blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d8b556c744a5f3c7a16f4531c97f2d448deeae84
|
05c3f49bf4c204c77bee6c67d33501d7a1544a0b
|
/hw2/hw2.py.bak
|
818705cad97fcd5240258ae3481720e46d3fc10c
|
[] |
no_license
|
krieghan/cs_homework
|
8b0fd4f7abe07b945ec179fefb8accfbaf950c78
|
35a96ba06dd5895c029bd066e3d1f4a62876340d
|
refs/heads/master
| 2021-01-08T03:27:59.829154 | 2020-02-20T14:07:56 | 2020-02-20T14:07:56 | 241,898,952 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,695 |
bak
|
#!/usr/bin/env python
# -*- coding: ISO-8859-1 -*-
# generated by wxGlade 0.4.1 on Mon Apr 03 13:01:10 2006
import wx
from turtle import GLPane
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.panel_1 = wx.Panel(self, -1)
self.window_1 = GLPane(self.panel_1, -1)
self.label_1 = wx.StaticText(self.panel_1, -1, "Number of Iterations:")
self.text_ctrl_1 = wx.TextCtrl(self.panel_1, -1, "")
self.label_2 = wx.StaticText(self.panel_1, -1, "Seed String:")
self.text_ctrl_2 = wx.TextCtrl(self.panel_1, -1, "")
self.label_3 = wx.StaticText(self.panel_1, -1, "Turn Angle:")
self.text_ctrl_3 = wx.TextCtrl(self.panel_1, -1, "")
self.label_4 = wx.StaticText(self.panel_1, -1, "F Production: ")
self.text_ctrl_4 = wx.TextCtrl(self.panel_1, -1, "")
self.label_5 = wx.StaticText(self.panel_1, -1, "X Production:")
self.text_ctrl_5 = wx.TextCtrl(self.panel_1, -1, "")
self.label_6 = wx.StaticText(self.panel_1, -1, "Y Production:")
self.text_ctrl_6 = wx.TextCtrl(self.panel_1, -1, "")
self.button_1 = wx.Button(self.panel_1, -1, "Display:")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.Handle_Button, self.button_1)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrame.__set_properties
self.SetTitle("frame_1")
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrame.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_1 = wx.FlexGridSizer(7, 2, 0, 0)
sizer_2.Add(self.window_1, 1, wx.EXPAND, 0)
grid_sizer_1.Add(self.label_1, 0, wx.ADJUST_MINSIZE, 0)
grid_sizer_1.Add(self.text_ctrl_1, 0, wx.ADJUST_MINSIZE, 0)
grid_sizer_1.Add(self.label_2, 0, wx.ADJUST_MINSIZE, 0)
grid_sizer_1.Add(self.text_ctrl_2, 0, wx.ADJUST_MINSIZE, 0)
grid_sizer_1.Add(self.label_3, 0, wx.ADJUST_MINSIZE, 0)
grid_sizer_1.Add(self.text_ctrl_3, 0, wx.ADJUST_MINSIZE, 0)
grid_sizer_1.Add(self.label_4, 0, wx.ADJUST_MINSIZE, 0)
grid_sizer_1.Add(self.text_ctrl_4, 0, wx.ADJUST_MINSIZE, 0)
grid_sizer_1.Add(self.label_5, 0, wx.ADJUST_MINSIZE, 0)
grid_sizer_1.Add(self.text_ctrl_5, 0, wx.ADJUST_MINSIZE, 0)
grid_sizer_1.Add(self.label_6, 0, wx.ADJUST_MINSIZE, 0)
grid_sizer_1.Add(self.text_ctrl_6, 0, wx.ADJUST_MINSIZE, 0)
grid_sizer_1.Add(self.button_1, 0, wx.ADJUST_MINSIZE, 0)
sizer_2.Add(grid_sizer_1, 1, wx.EXPAND, 0)
self.panel_1.SetAutoLayout(True)
self.panel_1.SetSizer(sizer_2)
sizer_2.Fit(self.panel_1)
sizer_2.SetSizeHints(self.panel_1)
sizer_1.Add(self.panel_1, 1, wx.EXPAND, 0)
self.SetAutoLayout(True)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
sizer_1.SetSizeHints(self)
self.Layout()
# end wxGlade
def Handle_Button(self, event): # wxGlade: MyFrame.<event_handler>
print "Event handler `Handle_Button' not implemented!"
event.Skip()
# end of class MyFrame
class MyApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame_1 = MyFrame(None, -1, "")
self.SetTopWindow(frame_1)
frame_1.Show()
return 1
# end of class MyApp
if __name__ == "__main__":
app = MyApp(0)
app.MainLoop()
|
[
"[email protected]"
] | |
60cbed3e8914742866345a1fab9116a02d3a65ab
|
57c1f699f03f1bf015e99712b8445f0a4f57db9a
|
/Models/4/modelParams4.py
|
97cec0fd58111ca5c61e2ea4d2ab04122ffef38a
|
[] |
no_license
|
DaphneHB/sBCBG
|
693cb610c7bc8a33d3148edc25afa9f6236c523b
|
ca08181121c4ba27b6f1df82bc92756f158e4058
|
refs/heads/master
| 2021-01-17T21:07:03.008080 | 2017-07-31T08:44:20 | 2017-07-31T08:44:20 | 84,159,012 | 1 | 1 | null | 2017-03-07T05:34:43 | 2017-03-07T05:34:43 | null |
UTF-8
|
Python
| false | false | 1,623 |
py
|
#!/apps/free/python/2.7.10/bin/python
# defines the value of the parameters that will be used by testFullbG.py
# should be generated by sangoScript.py
params = {'LG14modelID':4,
'nbCh': 2,
'nbcpu': 6,
'nbMSN': 2644.,
'nbFSI': 53.,
'nbSTN': 8.,
'nbGPe': 25.,
'nbGPi': 14.,
'nbCSN': 3000.,
'nbPTN': 100.,
'nbCMPf': 9.,
'GMSN': 5.,
'GFSI': 1.3,
'GSTN': 1.3,
'GGPe': 1.3,
'GGPi': 1.,
'IeGPe': 13.,
'IeGPi': 11.,
'inDegCSNMSN': 97.,
'inDegPTNMSN': 1.,
'inDegCMPfMSN': 1.,
'inDegFSIMSN': 30., # 30 : according to Humphries et al. 2010, 30-150 FSIs->MSN
'inDegMSNMSN': 70., # 70 = 210/3 : according to Koos et al. 2004, cited by Humphries et al., 2010, on avg 3 synpase per MSN-MSN connection
'inDegSTNMSN': 0.024,
'inDegGPeMSN': 0.,
'inDegCSNFSI': 76.,
'inDegPTNFSI': 1.,
'inDegSTNFSI': 2.,
'inDegGPeFSI': 16.,
'inDegCMPfFSI': 9.,
'inDegFSIFSI': 15., # 15 : according to Humphries et al., 2010, 13-63 FSIs->FSI
'inDegPTNSTN': 25.,
'inDegCMPfSTN': 9.,
'inDegGPeSTN': 25.,
'inDegCMPfGPe': 9.,
'inDegSTNGPe': 8.,
'inDegMSNGPe':2644.,
'inDegGPeGPe': 25.,
'inDegMSNGPi':2644.,
'inDegSTNGPi': 8.,
'inDegGPeGPi': 23.,
'inDegCMPfGPi': 9.,
}
|
[
"[email protected]"
] | |
be9b4d3e9c5944913c7318af6a50c9b2159cfa7c
|
fb4bf309b4d2db0597647c4dadda996b101f53f6
|
/Artificial Intelligence/r04exercise/logic.py
|
61c42f6a349b812f2151415b192e1ca7c266cac7
|
[
"Apache-2.0"
] |
permissive
|
JayWu7/Machine-Learning-Courses-Study-Record
|
3eea44552287b1e48d50bb2a75a29aaf0bd68cfe
|
7586c3429514bc21c7cfe42f85ca8c0fcf8f072b
|
refs/heads/master
| 2020-07-24T18:08:41.445694 | 2020-05-27T03:55:46 | 2020-05-27T03:55:46 | 208,004,954 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,576 |
py
|
#!/usr/bin/python3
import itertools
# Representation of propositional formulas in Python.
#
# The basic connectives are NOT, AND and OR.
# IMPL and EQVI are reduced to these through the obvious reductions.
# We have a separate class for formulas with different outermost
# connectives, as well as for atomic formulas (ATOM).
#
# The methods supported are:
# negin(self) Negation of formula; negation pushed in one level (if possible)
# clauses(self) Return clauses representing the formula
# vars(self) Return variables occurring in a formula
#
# Translation to CNF:
# Instead of applying the logical equivalences to incrementally transform
# a formula to CNF, the 'clauses' methods below perform a recursive
# transformation to sets of clauses for each formula type after
# the subformulas have been translated into clauses.
#
# A clause set is a list of lists of literals. Literals here are represented
# as pairs (nameOfAtom,truthValue), where the truthValue is either True or False.
#
# The translations for ATOM, NOT(ATOM ...), and AND(...) are trivial.
# For example, a negative literal NOT(ATOM("X")) is simply [[("X",False)]],
# ie. a clause set consisting of a single clause with only one literal.
# For AND, the lists of the constituent clause sets are simply concatenated.
# The complicated part is the translation for OR after its subformulas
# have been translated to clauses, i.e. computing the disjunction
# of two or more clause sets. See the accompanying Standard ML code in
# FORMULAS.sml for more explanation.
# auxiliary functions
def concatlists(ll):
return list(itertools.chain.from_iterable(ll))
# Both AND and OR will inherit __init__ and vars from NaryFormula
# NaryFormula means formulas with multiple subformulas.
# cCnjunction (AND) and disjunction (OR) are traditionally defined
# as binary connectives, that is, with two subformulas.
# Because of associativity, ie. A & (B & C) and (A & B) & C are equivalent,
# it is often more convenient to write A & B & C.
class NaryFormula: # N-ary formulas with multiple subformulas
def __init__(self, subformulas):
self.subformulas = subformulas
def vars(self):
vs = [f.vars() for f in self.subformulas]
return set.union(*vs)
class BinaryFormula: # Not used here
def __init__(self, subformula1, subformula2):
self.subformula1 = subformula1
self.subformula2 = subformula2
# AND and OR are defined with multiple subformulas (phi1 & phi2 & ... & phiN)
class AND(NaryFormula):
def __repr__(self):
return "(and " + (' '.join([str(x) for x in self.subformulas])) + ")"
def negin(self):
return OR([NOT(f) for f in self.subformulas])
def clauses(self):
cclauses = [c.clauses() for c in self.subformulas]
return concatlists(cclauses)
### IMPLEMENT THIS ## IMPLEMENT THIS ## IMPLEMENT THIS ## IMPLEMENT THIS ###
class OR(NaryFormula):
def __repr__(self):
return "(or " + (' '.join([str(x) for x in self.subformulas])) + ")"
def negin(self):
AND([NOT(f) for f in self.subformulas])
### IMPLEMENT THIS ## IMPLEMENT THIS ## IMPLEMENT THIS ## IMPLEMENT THIS ###
def clauses(self):
cclauses = [c.clauses() for c in self.subformulas]
return [concatlists(list(c)) for c in itertools.product(*cclauses)]
class NOT:
def __init__(self, subformula):
self.subformula = subformula
def __repr__(self):
return "(not " + str(self.subformula) + ")"
def negin(self):
return self.subformula
def clauses(self):
if isinstance(self.subformula, ATOM):
return [[(self.subformula.name, False)]]
else:
negsubformula = self.subformula.negin()
return negsubformula.clauses()
def vars(self):
return self.subformula.vars()
class ATOM:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
def negin(self):
return [[(self.name, False)]]
### IMPLEMENT THIS ## IMPLEMENT THIS ## IMPLEMENT THIS ## IMPLEMENT THIS ###
def clauses(self):
return [[(self.name, True)]]
### IMPLEMENT THIS ## IMPLEMENT THIS ## IMPLEMENT THIS ## IMPLEMENT THIS ###
def vars(self):
return {self.name}
# Implication and equivalence reduced to the primitive connectives
# A -> B is reduced to -A V B
def IMPL(f1, f2):
return OR([NOT(f1), f2])
# A <-> B is reduced to (-A V B) & (-B V A)
def EQVI(f1, f2):
return AND([IMPL(f1, f2), IMPL(f2, f1)])
|
[
"[email protected]"
] | |
b7e90fcda58d0566c7be66b95c4c017ebea9525f
|
d31c99522f70cf9888c6411be8c1fbe0f7441efe
|
/leapyear Testing/leapYear.py
|
02091de04405b188075a56b602034891bb68266a
|
[] |
no_license
|
bhaveesh09/CS362-Homework-7
|
0e96d5390fd31b6abb9da3c7846c34b698d8fc19
|
b8979328ef53245ec423b222ac0965f6492e220a
|
refs/heads/main
| 2023-05-06T02:22:40.765563 | 2021-05-31T20:34:22 | 2021-05-31T20:34:22 | 372,339,566 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 500 |
py
|
def leapYear(x):
if (x<0): #Now check if user input is a positive number
print("Enter a postive number, program has quit")
return False
elif (x % 4== 0) and (x % 100 != 0): #conditions for leap year updated
print(x, "is a leap year")
return True
elif (x % 400 == 0):
print(x, "is a leap year")
return True
else:
print(x, "is not a leap year")
return False
|
[
"[email protected]"
] | |
9189ff6a8da588f5c8d74e069de80277af625faf
|
14c5bc2d802d5373efda230dd49c3f25fd8fbad0
|
/roboto_gui/pySSRL_bServer/bServer/bl_interaction.py
|
d8a93a91b0f0f003d771a313f7a5888db16ca4ce
|
[
"MIT"
] |
permissive
|
rwalroth/roboto_gui
|
58c4e6712fc1cbb1a546191432443733d3de8190
|
210ab4fc4fbcddd1863f251cb7206fc7173a93d2
|
refs/heads/main
| 2023-04-04T15:26:46.019258 | 2021-04-14T18:55:50 | 2021-04-14T18:55:50 | 317,326,993 | 0 | 0 | null | 2021-03-22T19:51:47 | 2020-11-30T19:38:37 |
Python
|
UTF-8
|
Python
| false | false | 20,271 |
py
|
import socket
import time
import types
import sys
import yaml
import copy
import logging
from logging import info, warning, critical, debug
import asyncio
import re
from bServer.bl_communication import *
from bServer.bl_command import *
from bServer.BL_Error import *
from bServer.PulpitObject import *
from bServer.BL_Variables import *
class BL_Interaction():
"""This class is holds all of the beamline interaction objects and initilizes all of
their functionality. This includes the beamline variables, rosetta object, beamline state configuration object, and
subroutines for executing commands on the beamlines. Each beamline will have it's own LMD_beamlineInteraction()
object."""
def __init__(self, beamline_name, loop):
"""Sets up all of the objects discussed above. The only argument is the 'fun' unique name of the beamline that
appears in the beamline config YAML files."""
#Lets save the "fun" name
self.beamline_name = beamline_name
#Save a pointer to the loop for all the async functions
self.loop = loop
#Do the rest of the initialization in an async function - can't be done directly in __init__ since async tag is not allowed
asyncio.ensure_future(self.setup(), loop=self.loop)
async def setup(self):
"""Setup anything that depends on async functionality be available"""
# Initialize the beamline communciations
try:
self.beamline = BL_SPEC(self)
await self.beamline.setup()
print("Finished beamline setup for '{}'".format(self.beamline_name))
except:
print("Caught exception in setup of beamline communication")
raise RuntimeError("Caught exception in setup of communication for '{}': {}".format(self.beamline_name, sys.exc_info()[0]))
#Get the mnemonic
self.mnemonic = self.beamline.mnemonic
#Lets initialize the SPEC Infoserver Interaction.
try:
print("interface:: '{}'".format(getattr(self.beamline, 'interface')))
if self.beamline.interface == "SPEC Infoserver":
print("starting beamline interface")
debug("Setting up beamline interface for {}".format(self.beamline_name))
self.sis = self.SPECInfoserverInteraction(self)
print("done starting beamline interface")
else:
debug("Could not find correct interface for {}".format(self.beamline_name))
raise RuntimeError("Could not find the correct interface to initialize")
except:
print("Caught exception starting SPEC Infoserver Interaction Class: {}".format(sys.exc_info()[0]))
raise RuntimeError("Caught exception in setup of SPEC Infoserver Interaction for'{}': {}".format(self.beamline_name, sys.exc_info()[0]))
# Lets refresh all of the values in the beamline registers
debug("bl_interaction: init beamline variables")
self.variables = BL_Variables(self)
self.variables.manage_beamline_variables(self.variables, self)
#await self.beamline_variables.refresh_variables()
#create the pulpit objects. This framework uses multiple pulpits since the SIS framework actually has a few different ways of getting info
self.command_pulpit = PulpitObject(loop=self.loop)
self.motor_info_pulpit = PulpitObject(loop=self.loop)
self.counter_pulpit = PulpitObject(loop=self.loop)
#Create a lock for the communication and execute of commands for this beam line. This is necessary since we have multiple pulpits and socket communication may need to be grouped to prevent issues with our concurrancy framework from giving the wrong information between concrurrent commands (or trying to execute concurrently)
self.beamline.comm_lock = asyncio.Lock(loop=self.loop)
class SPECInfoserverInteraction:
"""Low level SPEC infoserver commands. These are just out of the SPEC infoserver doc"""
def __init__(self, bi):
debug("SPECInfoserverInteraction: init()")
self.bi = bi
self.beamline = bi.beamline
#SIS has a bunch of canned text responses. Lets put them in once as a variable within the class so we don't make any mistakes when comparing values
self.sis_text = {}
self.sis_text['in_control'] = "client in control."
self.sis_text['not_in_control'] = "client not in control."
self.sis_text['control_not_available'] = "control not available."
self.sis_text['logging on'] = "Logging is ON."
self.sis_text['logging off'] = "Logging is OFF."
async def get_version(self):
"""Run the ?ver command for spec infoserver"""
cmd_text = "?ver"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True)
await cmd.execute()
print("cmd: {}".format(cmd))
response = cmd.response[0]
return response
async def get_user(self):
"""Run the ?usr command for spec infoserver"""
cmd_text = "?usr"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True)
await cmd.execute()
response = cmd.response[0]
return response
async def get_console_output_buffer(self, N=None, return_after=False, get_buffer_index=False):
"""Run the ?usr command for spec infoserver"""
if N is None and get_buffer_index is False:
cmd_text = "?con"
elif N is None and get_buffer_index is True:
cmd_text = "?con idx"
elif N is not None and return_after is True:
cmd_text = "?con {}-".format(N)
elif N is not None and return_after is False:
cmd_text = "?con {}".format(N)
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True, listSeparator="\n")
await cmd.execute()
if N is None and get_buffer_index is True:
response = int(cmd.response[0])
else:
response = cmd.response
return response
async def is_available(self):
"""Run ?avl. Check if SPEC is available."""
cmd_text = "?avl"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True)
await cmd.execute()
availability = cmd.typecastResponse([bool])[0]
return availability
async def is_busy(self):
"""Run ?bsy. This is the opposite of ?avl. Check if spec is busy."""
cmd_text = "?bsy"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True)
await cmd.execute()
busy = cmd.typecastResponse([bool])[0]
return busy
async def get_motor_position(self, motor_name):
"""Run ?mp"""
cmd_text = "?mp {}".format(motor_name)
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True)
await cmd.execute()
response = cmd.typecastResponse([float])[0]
return response
async def get_motor_information(self, motor_name):
"""Run ?mi and get the motor info. Parse result and return a dict"""
cmd_text = "?mi {}".format(motor_name)
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True)
await cmd.execute()
response = cmd.typecastResponse([int])[0]
mi_dict = self.decode_motor_status_bits(response)
return mi_dict
def decode_motor_status_bits(self, status):
"""Decode the 4-bit state of the motor. See SPEC infoserver doc. Returns a dictionary"""
mi_dict = {}
mi_dict['motor moving'] = True if status & 0x1 else False
mi_dict['motor disabled'] = True if status & 0x2 else False
mi_dict['low limit active'] = True if status & 0x4 else False
mi_dict['high limit active'] = True if status & 0x8 else False
return mi_dict
async def get_all_motor_mnemonics(self):
"""Get all of the motor names as a list"""
cmd_text = "?mne"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True, listSeparator=", ")
await cmd.execute()
response = cmd.response
print(cmd)
print(response)
return response
async def get_all_counter_mnemonics(self):
"""Get all of the counter names as a list. This is undocumented in the SIS documentation but I found it in Stefans code."""
cmd_text = "?mne c"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True, listSeparator=", ")
await cmd.execute()
response = cmd.response
return response
async def get_all_motor_positions(self):
"""Get all motor positions"""
cmd_text = "?mpa"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True, listSeparator=", ")
await cmd.execute()
#Could have done the listSeparator above
response = cmd.response
converted_response = [float(x) for x in response]
return converted_response
async def get_all_motor_status(self):
"""Get all motor positions"""
cmd_text = "?mia"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True, listSeparator=", ")
await cmd.execute()
response = cmd.response
all_motor_status =[]
for motor_status in response:
all_motor_status.append(self.decode_motor_status_bits(int(motor_status)))
return all_motor_status
async def get_all_counters(self):
"""Get all motor positions"""
cmd_text = "?cta"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True, listSeparator=", ")
await cmd.execute()
response = cmd.response
converted_response = [float(x) for x in response] #Is floating point the right thing? I guess....
return converted_response
async def get_detector_status(self):
"""Get the detector status string. Not sure what this actually is"""
cmd_text = "?det"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True, readterminator='\r\n')
await cmd.execute()
response = cmd.response[0]
print(cmd)
return response
async def get_all_status_motors_and_counters(self):
"""This function will eventually return everything that is returned by ?all. Right now, not sure what the
counter statuses actually mean. Returning the string right now."""
cmd_text = "?all"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True, listSeparator=", ")
await cmd.execute()
response = cmd.response
#converted_response = [float(x) for x in response] #Is floating point the right thing? I guess....
return response
async def get_current_scan_details(self):
"""Get the detector status string. Not sure what this actually is"""
cmd_text = "?sci"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True, readterminator='\r\n')
await cmd.execute()
response = cmd.response[0]
print(cmd)
return response
async def get_current_plot_point_index(self):
"""Return the current buffer index"""
cmd_text = "?plt idx"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True)
await cmd.execute()
response = cmd.response[0]
print(cmd)
return response
async def get_plot_points(self, num_pts = None):
"""Return the full buffer or the last 'num_pts' points. Default is the full buffer"""
if num_pts is not None: #return the last num_pts points
cmd_text = "?plt {}".format(num_pts)
else: #Get everything
cmd_text = "?plt all"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True, listSeparator="/")
await cmd.execute()
response = cmd.response
print("len response: {}".format(len(response)))
converted_response = []
t0 = time.time()
for data_series in response:
#c_line = list(map(float, data_series.split(", ")))
c_line = data_series.split(", ")
converted_response.append(c_line)
print("conversion took {}sec".format(time.time()-t0))
return converted_response
async def are_we_in_control(self):
"""Check if we already have control."""
cmd_text = "?inc"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True)
await cmd.execute()
inc = cmd.typecastResponse([bool])[0]
return inc
async def get_remote_control(self):
"""Get the current plot. Not sure what this actually is"""
cmd_text = "!rqc"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True)
await cmd.execute()
response = cmd.response[0]
print(response)
return response
async def release_remote_control(self):
"""Get the current plot. Not sure what this actually is"""
cmd_text = "!rlc"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True)
await cmd.execute()
response = cmd.response[0]
print(response)
return response
async def execute_unix_command(self, unix_cmd):
"""Get the current plot. Not sure what this actually is. untested"""
cmd_text = "!unx {}".format(unix_cmd)
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True)
await cmd.execute()
response = cmd.response[0]
m = re.match(r"(\d+), (.*)", response, re.DOTALL) #DOTALL catches newline characters as well", response)
if m is not None:
cmd_success = bool(int(m.groups()[0]))
ret_val = m.groups()[1]
debug("execute_unix_command: success = {}, response = '{}'".format(cmd_success, ret_val))
return {'success': cmd_success, 'response': ret_val}
elif response == self.sis_text['not_in_control']:
raise SISControlError(msg='execute_unix_command: not in control. Raising error')
else:
raise SPECCommandError(msg="execute_unix_command: unknown response: '{}'".format(response))
print(response)
return response
async def abort_command(self):
"""Abort the current SPEC command - equivelent to Ctrl^C"""
cmd_text = "!abr"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True)
await cmd.execute()
#can return not in control!
response = cmd.response[0]
print(response)
return response
async def execute_command(self, spec_cmd):
"""Execute a spec command and return the uuid of the command. In the case of SIS, this just an increasing
number."""
cmd_text = "!cmd {}".format(spec_cmd)
debug("execute_command: executing '{}'".format(cmd_text))
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True)
await cmd.execute()
response = cmd.response[0]
debug("execute_command: received'{}'".format(cmd_text))
m = re.match(r"#(\d+)", response)
if m is not None:
spec_cmd_uuid = int(m.groups()[0])
debug("execute_command: command executed as #{}".format(spec_cmd_uuid))
return spec_cmd_uuid
elif response == self.sis_text['not_in_control']:
raise SISControlError(msg='execute_command: not in control. Raising error')
else:
raise SPECCommandError(msg="execute_command: unknown response: '{}'".format(response))
async def retrieve_result(self):
"""Get the most recent result from SIS. Sadly SIS only keeps track of the most recent result. Returns just the last one in memory. Does not block in any way."""
cmd_text = "?res"
debug("retrieve_result: sending")
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True)
await cmd.execute()
response = cmd.response[0]
debug("retrieve_result: response '{}'".format(response))
spec_result = {}
m = re.match(r"#(\d+), (\w+), (.*)", response, re.DOTALL) #DOTALL catches newline characters as well
if m is not None:
matched = m.groups()
spec_result['uuid'] = int(matched[0])
spec_result['status ok'] = True if matched[1] == "OK" else False
spec_result['result'] = matched[2]
else:
raise SPECCommandError(message="retrieve_result: Got a result we couldn't parse")
debug('retrieve_result: parsed: {}'.format(spec_result))
return spec_result
async def set_sis_logging(self, set_logging_on=None):
"""This is a poorly documented feature that appears to return enteries in the SIS log. Could be useful for
debugging or potentially getting the response of commands in case we miss calling the ?res before executing
another command. Need to experiment. untested"""
if set_logging_on is None:
set_logging_on = True
if set_logging_on is True:
cmd_text = "!log on"
debug('set_sis_logging: turning on')
else:
cmd_text = "!log off"
debug('set_sis_logging: turning off')
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True)
await cmd.execute()
response = cmd.response[0]
debug("set_sis_logging: response '{}'".format(response))
if response == self.sis_text['logging on'] and set_logging_on is True:
pass
elif response == self.sis_text['logging off'] and set_logging_on is False:
pass
else:
print("response vs loggingoff:", response == self.sis_text['logging off'])
raise SPECCommandError(message="set_sis_logging: unknown response: '{}'".format(response))
return response
async def get_sis_logs(self, num_entries=None):
"""A poorly documented feature that appears to return entries of SIS logging.
Specifiy either the number of recent entries to return or 'None' to return the full log.
I believe this is capped at 1000 entries from Stefan's code. untested"""
debug('get_sis_logs: starting')
if num_entries is not None: #return last N
cmd_text = "!log {}".format(num_entries)
else: #return everything
cmd_text = "!log"
cmd = BLCommand(self.beamline, cmd_text, needsResponse=True, listSeparator="\n")
await cmd.execute()
response = cmd.response
debug("get_sis_log: response '{}'".format(response))
return response
###################################################################################
# Universal Functions
###################################################################################
######################################################################################
# Lets add some fast diagnostic functions. These will rely on the get_many_vars
########################################################################################
|
[
"[email protected]"
] | |
16ea0ace51907afcbb377c98ac45a4b447646221
|
8af68426584e78d1913ed2342498d6bdf02a8923
|
/manage.py
|
3fd3eee148f2566cfe0e2d852573618afff5ded4
|
[] |
no_license
|
RabbaniM/duafi_old
|
8edaccc78b38818ebf3df9397363f59b557ef185
|
791844efdbe80cea0d106edc660310d390531871
|
refs/heads/master
| 2020-12-28T19:46:24.236639 | 2015-07-13T02:09:59 | 2015-07-13T02:09:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 248 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "duafi.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
20744b85032062eeca0b6a483efed89f604b68a1
|
45887243a656490eafb6607dd7fb75111cc802ac
|
/app/models/models.py
|
9be81aab85aa6d928b2627b4e15e79bbe6880989
|
[] |
no_license
|
liuxiaowei/dnf_server
|
f0d21f8ecf94b085c9bb7122c38c043e872164b2
|
f3b4c96fc54b9ffda940d1ccc292a938299d99a3
|
refs/heads/master
| 2020-04-30T15:20:48.296542 | 2019-04-09T08:52:38 | 2019-04-09T08:52:38 | 176,918,003 | 0 | 0 | null | 2019-04-09T08:52:39 | 2019-03-21T10:00:40 |
Python
|
UTF-8
|
Python
| false | false | 485 |
py
|
import json
import string
import random
from datetime import datetime
import hashlib
from .base import Base, db
class TUser(Base):
__tablename__ = 't_user'
mac = db.Column(db.String(255))
note = db.Column(db.String(255))
last_login = db.Column(db.DateTime)
status = db.Column(db.SmallInteger)
ip = db.Column(db.String(255))
class TUserConfig(Base):
__tablename__ = 't_user_config'
user_id = db.Column(db.Integer)
config = db.Column(db.JSON)
|
[
"[email protected]"
] | |
a9029ca71800fd3d14b935d41f8dd774c74c1973
|
f3381f1b1f995b87eaf240428e902531e68ba95c
|
/algoprog/A2_3.py
|
408f7926360f45b75075246455faa0329623b210
|
[] |
no_license
|
MorZxd/py
|
6565716d156511c6fb515ad50232edbce935dfea
|
b5291b2dbd4db59cdcf2f05344e145b32f1e74e2
|
refs/heads/master
| 2023-02-07T17:36:41.823961 | 2020-12-26T10:05:31 | 2020-12-26T10:05:31 | 321,710,657 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 90 |
py
|
f = int(input())
if (f>0):
print("1")
elif (f<0):
print("-1")
else:
print("0")
|
[
"[email protected]"
] | |
0b747c2a7cc8c26a6f70888af30f80f3d9044a71
|
699c5894668e07bbb4ddae632730f0d218f72558
|
/Yeji-Lee/baekjoon/b_100/1297.py
|
24bcc07f02942fb4ba2f944171f1e631c24cbec9
|
[] |
no_license
|
Sumsan38/learning-algorithm
|
0914ddbbd8786381dda807562e4529773b4aa987
|
59a1d7b53d4348a0320b0cbf48ee75b5086b3a29
|
refs/heads/master
| 2023-07-15T08:16:29.896218 | 2021-08-14T11:47:01 | 2021-08-14T11:47:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 113 |
py
|
# TV 크기
c, a, b, = map(int, input().split())
r = c / ((a ** 2 + b ** 2) ** 0.5)
print(int(a * r), int(b * r))
|
[
"[email protected]"
] | |
0a7c0934e651558320e4ccc999fab5b29f046a66
|
3e54f3ad08a8d3e4f17b77394491e3f625672fbe
|
/hybrid_AC_DC_networks/optimal_power_flows/optimal_power_flow_hybrid_AC_DC_networks.py
|
56a26c844b511870eb9ff1ffdc9e590e8acb8383
|
[
"MIT"
] |
permissive
|
shubhampachori12110095/EnergyManagementSourceCodes
|
ccb6f38c155e955624330a0f20b9ed2f4941b08a
|
1ea824941fe87528622ec7aa8148024752a3947c
|
refs/heads/master
| 2023-08-01T23:57:45.271895 | 2021-09-26T04:55:05 | 2021-09-26T04:55:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 16,158 |
py
|
"""
Optimal power flow models for hybrid AC/DC microgrids
@author: Tianyang Zhao
@email: [email protected]
Something should be noted for the hypothesis.
1) The energy losses on the bi-directional converters is modelled simply as used in
[1]Concerted action on computer modeling and simulation
[2]Energy management and operation modelling of hybrid AC–DC microgrid
There are more complex modelling method for different types of converters, see the following references for details.
[1]Mathematical Efficiency Modeling of Static Power Converters
[2]Power Loss Modeling of Isolated AC/DC Converter
The variations on the mathematical modelling result in significant differences in terms of the mathematical property.
2) Even renewable energy sources are assigned with operational cost, e.g., linear in this case.
3) The power losses is ignored in the real-time operation.
@Reference:
[1]
"""
from pypower import runopf
from gurobipy import *
from numpy import zeros, c_, shape, ix_, ones, r_, arange, sum, diag, concatenate, power
from scipy.sparse import csr_matrix as sparse
from scipy.sparse import hstack, vstack, diags
from distribution_system_optimization.test_cases import case33
from distribution_system_optimization.data_format import case_converters
# The following cases, data formats are imported from the Pypower package.
from pypower import case6ww, case9, case30, case118, case300
from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, TAP, SHIFT, BR_STATUS, RATE_A
from pypower.idx_cost import MODEL, NCOST, PW_LINEAR, COST, POLYNOMIAL
from pypower.idx_bus import BUS_TYPE, REF, VA, VM, PD, GS, VMAX, VMIN, BUS_I, QD
from pypower.idx_gen import GEN_BUS, VG, PG, QG, PMAX, PMIN, QMAX, QMIN
from pypower.ext2int import ext2int
def main(Case_AC=None, Case_DC=None, Converters=None):
"""
:param Case_AC: AC case
:param Case_DC: DC case
:param Converters: Bi-directional converters
:return: Obtained solutions for hybrid AC DC networks
"""
# 1) Problem formulation
model_AC = AC_network_formulation(Case_AC)
model_DC = DC_network_formulation(Case_DC)
# 2) Solve the initial problems
sol_AC = AC_opf_solver(model_AC)
sol_DC = DC_opf_solver(model_DC)
# 3) Connect two systems via the BIC networks
model_converters = BIC_network_formulation(model_AC, model_DC, Converters)
# 4) Solve the merged functions
# 4.1) Solve the problem
return model_converters
def DC_network_formulation(case):
"""
:param case:
:return:
"""
case = ext2int(case)
baseMVA, bus, gen, branch, gencost = case["baseMVA"], case["bus"], case["gen"], case["branch"], case["gencost"]
nb = shape(case['bus'])[0] ## number of buses
nl = shape(case['branch'])[0] ## number of branches
ng = shape(case['gen'])[0] ## number of dispatchable injections
f = branch[:, F_BUS] ## list of "from" buses
t = branch[:, T_BUS] ## list of "to" buses
i = range(nl) ## double set of row indices
# Connection matrix
Cf = sparse((ones(nl), (i, f)), (nl, nb))
Ct = sparse((ones(nl), (i, t)), (nl, nb))
Cg = sparse((ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng))
# Modify the branch resistance
Branch_R = branch[:, BR_X]
for i in range(nl):
if Branch_R[i] <= 0:
Branch_R[i] = max(Branch_R)
Cf = Cf.T
Ct = Ct.T
# Obtain the boundary information
Slmax = branch[:, RATE_A] / baseMVA
Pij_l = -Slmax
Iij_l = zeros(nl)
Vm_l = power(bus[:, VMIN], 2)
Pg_l = gen[:, PMIN] / baseMVA
Pij_u = Slmax
Iij_u = Slmax
# Vm_u = [max(turn_to_power(bus[:, VMAX], 2))] * nb
Vm_u = power(bus[:, VMAX], 2)
Pg_u = gen[:, PMAX] / baseMVA
# Pg_l = -Pg_u
lx = concatenate([Pij_l, Iij_l, Vm_l, Pg_l])
ux = concatenate([Pij_u, Iij_u, Vm_u, Pg_u])
# KCL equation
Aeq_p = hstack([Ct - Cf, -diag(Ct * Branch_R) * Ct, zeros((nb, nb)), Cg])
beq_p = bus[:, PD] / baseMVA
# KVL equation
Aeq_KVL = hstack([-2 * diags(Branch_R), diags(power(Branch_R, 2)), Cf.T - Ct.T, zeros((nl, ng))])
beq_KVL = zeros(nl)
Aeq = vstack([Aeq_p, Aeq_KVL])
Aeq = Aeq.todense()
beq = concatenate([beq_p, beq_KVL])
neq = len(beq)
nx = 2 * nl + nb + ng
Q = zeros(nx)
c = zeros(nx)
c0 = zeros(nx)
for i in range(ng):
Q[i + 2 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA
c[i + 2 * nl + nb] = gencost[i, 5] * baseMVA
c0[i + 2 * nl + nb] = gencost[i, 6]
model = {"Q": Q,
"c": c,
"c0": c0,
"Aeq": Aeq,
"beq": beq,
"lx": lx,
"ux": ux,
"nx": nx,
"nb": nb,
"nl": nl,
"ng": ng,
"f": f,
"neq": neq}
return model
def AC_network_formulation(case):
"""
:param case:
:return:
"""
case = ext2int(case)
baseMVA, bus, gen, branch, gencost = case["baseMVA"], case["bus"], case["gen"], case["branch"], case["gencost"]
nb = shape(case['bus'])[0] ## number of buses
nl = shape(case['branch'])[0] ## number of branches
ng = shape(case['gen'])[0] ## number of dispatchable injections
f = branch[:, F_BUS] ## list of "from" buses
t = branch[:, T_BUS] ## list of "to" buses
i = range(nl) ## double set of row indices
# Connection matrix
Cf = sparse((ones(nl), (i, f)), (nl, nb))
Ct = sparse((ones(nl), (i, t)), (nl, nb))
Cg = sparse((ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng))
Branch_R = branch[:, BR_R]
Branch_X = branch[:, BR_X]
Cf = Cf.T
Ct = Ct.T
# Obtain the boundary information
Slmax = branch[:, RATE_A] / baseMVA
Pij_l = -Slmax
Qij_l = -Slmax
Iij_l = zeros(nl)
Vm_l = power(bus[:, VMIN], 2)
Pg_l = gen[:, PMIN] / baseMVA
Qg_l = gen[:, QMIN] / baseMVA
Pij_u = Slmax
Qij_u = Slmax
Iij_u = Slmax
Vm_u = 2 * power(bus[:, VMAX], 2)
Pg_u = 2 * gen[:, PMAX] / baseMVA
Qg_u = gen[:, QMAX] / baseMVA
# Problem formulation
lx = concatenate([Pij_l, Qij_l, Iij_l, Vm_l, Pg_l, Qg_l])
ux = concatenate([Pij_u, Qij_u, Iij_u, Vm_u, Pg_u, Qg_u])
# KCL equation, active power
Aeq_p = hstack([Ct - Cf, zeros((nb, nl)), -diag(Ct * Branch_R) * Ct, zeros((nb, nb)), Cg, zeros((nb, ng))])
beq_p = bus[:, PD] / baseMVA
# KCL equation, reactive power
Aeq_q = hstack([zeros((nb, nl)), Ct - Cf, -diag(Ct * Branch_X) * Ct, zeros((nb, nb)), zeros((nb, ng)), Cg])
beq_q = bus[:, QD] / baseMVA
# KVL equation
Aeq_KVL = hstack([-2 * diags(Branch_R), -2 * diags(Branch_X),
diags(power(Branch_R, 2)) + diags(power(Branch_X, 2)), Cf.T - Ct.T,
zeros((nl, 2 * ng))])
beq_KVL = zeros(nl)
Aeq = vstack([Aeq_p, Aeq_q, Aeq_KVL])
Aeq = Aeq.todense()
beq = concatenate([beq_p, beq_q, beq_KVL])
neq = len(beq)
nx = 3 * nl + nb + 2 * ng
Q = zeros(nx)
c = zeros(nx)
c0 = zeros(nx)
for i in range(ng):
Q[i + 3 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA
c[i + 3 * nl + nb] = gencost[i, 5] * baseMVA
c0[i + 3 * nl + nb] = gencost[i, 6]
for i in range(nl):
c[i + 3 * nl] = Branch_R[i]
model = {"Q": Q,
"c": c,
"c0": c0,
"Aeq": Aeq,
"beq": beq,
"lx": lx,
"ux": ux,
"nx": nx,
"nb": nb,
"nl": nl,
"ng": ng,
"f": f,
"neq": neq}
return model
def AC_opf_solver(case):
"""
Optimal power flow solver for AC networks
:param model:
:return: AC OPF solution
"""
nl = case["nl"]
nb = case["nb"]
ng = case["ng"]
f = case["f"]
nx = case["nx"]
lx = case["lx"]
ux = case["ux"]
Aeq = case["Aeq"]
beq = case["beq"]
neq = len(beq)
Q = case["Q"]
c = case["c"]
c0 = case["c0"]
model = Model("OPF")
# Define the decision variables
x = {}
for i in range(nx):
x[i] = model.addVar(lb=lx[i], ub=ux[i], vtype=GRB.CONTINUOUS)
for i in range(neq):
expr = 0
for j in range(nx):
expr += x[j] * Aeq[i, j]
model.addConstr(lhs=expr, sense=GRB.EQUAL, rhs=beq[i])
for i in range(nl):
model.addConstr(x[i] * x[i] + x[i + nl] * x[i + nl] <= x[i + 2 * nl] * x[f[i] + 3 * nl])
obj = 0
for i in range(nx):
obj += Q[i] * x[i] * x[i] + c[i] * x[i] + c0[i]
model.setObjective(obj)
model.Params.OutputFlag = 0
model.Params.LogToConsole = 0
model.Params.DisplayInterval = 1
model.optimize()
xx = []
for v in model.getVars():
xx.append(v.x)
obj = obj.getValue()
Pij = xx[0:nl]
Qij = xx[nl + 0:2 * nl]
Iij = xx[2 * nl:3 * nl]
Vi = xx[3 * nl:3 * nl + nb]
Pg = xx[3 * nl + nb:3 * nl + nb + ng]
Qg = xx[3 * nl + nb + ng:3 * nl + nb + 2 * ng]
primal_residual = zeros(nl)
for i in range(nl):
primal_residual[i] = Pij[i] * Pij[i] + Qij[i] * Qij[i] - Iij[i] * Vi[int(f[i])]
sol = {"Pij": Pij,
"Qij": Qij,
"Iij": Iij,
"Vm": power(Vi, 0.5),
"Pg": Pg,
"Qg": Qg,
"obj": obj}
return sol, primal_residual
def DC_opf_solver(case):
"""
Optimal power flow solver for DC networks
:param model:
:return: DC OPF solution
"""
nl = case["nl"]
nb = case["nb"]
ng = case["ng"]
f = case["f"]
nx = case["nx"]
lx = case["lx"]
ux = case["ux"]
Aeq = case["Aeq"]
beq = case["beq"]
neq = len(beq)
Q = case["Q"]
c = case["c"]
c0 = case["c0"]
model = Model("OPF_DC")
# Define the decision variables
x = {}
for i in range(nx):
x[i] = model.addVar(lb=lx[i], ub=ux[i], vtype=GRB.CONTINUOUS)
for i in range(neq):
expr = 0
for j in range(nx):
expr += x[j] * Aeq[i, j]
model.addConstr(lhs=expr, sense=GRB.EQUAL, rhs=beq[i])
for i in range(nl):
model.addConstr(x[i] * x[i] <= x[i + nl] * x[f[i] + 2 * nl])
obj = 0
for i in range(nx):
obj += Q[i] * x[i] * x[i] + c[i] * x[i] + c0[i]
model.setObjective(obj)
model.Params.OutputFlag = 0
model.Params.LogToConsole = 0
model.Params.DisplayInterval = 1
model.optimize()
xx = []
for v in model.getVars():
xx.append(v.x)
obj = obj.getValue()
Pij = xx[0:nl]
Iij = xx[nl:2 * nl]
Vi = xx[2 * nl:2 * nl + nb]
Pg = xx[2 * nl + nb:2 * nl + nb + ng]
primal_residual = zeros(nl)
for i in range(nl):
primal_residual[i] = Pij[i] * Pij[i] - Iij[i] * Vi[int(f[i])]
sol = {"Pij": Pij,
"Iij": Iij,
"Vm": power(Vi, 0.5),
"Pg": Pg,
"obj": obj}
return sol, primal_residual
def BIC_network_formulation(case_AC, case_DC, case_BIC):
"""
Merger the AC network and DC networks
:param case_AC:
:param case_DC:
:param case_BIC:
:return:
"""
from distribution_system_optimization.data_format.case_converters import AC_ID, DC_ID, EFF_A2D, EFF_D2A, \
SMAX
nx_BIC = shape(case_BIC["con"])[0]
nx_AC = case_AC["nx"]
nx_DC = case_DC["nx"]
nx = nx_AC + nx_DC + nx_BIC * 2
lx = concatenate([case_AC["lx"], case_DC["lx"], zeros(2 * nx_BIC)])
ux = concatenate([case_AC["ux"], case_DC["ux"], case_BIC["con"][:, SMAX] / case_BIC["baseMVA"],
case_BIC["con"][:, SMAX] / case_BIC["baseMVA"]])
Q = concatenate([case_AC["Q"], case_DC["Q"], zeros(nx_BIC * 2)])
c = concatenate([case_AC["c"], case_DC["c"], zeros(nx_BIC * 2)])
c0 = concatenate([case_AC["c0"], case_DC["c0"], zeros(nx_BIC * 2)])
# Update the equality constraints
neq = case_AC["neq"] + case_DC["neq"]
Aeq = zeros((neq, nx))
Aeq[0:case_AC["neq"], 0:case_AC["nx"]] = case_AC["Aeq"]
Aeq[case_AC["neq"]:neq, case_AC["nx"]:case_AC["nx"] + case_DC["nx"]] = case_DC["Aeq"]
# Update the KCL equations
for i in range(nx_BIC):
# Update the AC network information
Aeq[int(case_BIC["con"][i][AC_ID]), case_AC["nx"] + case_DC["nx"] + i] = -1
Aeq[int(case_BIC["con"][i][AC_ID]), case_AC["nx"] + case_DC["nx"] + nx_BIC + i] = case_BIC["con"][
i, EFF_D2A]
# Update the DC network information
Aeq[case_AC["nx"] + int(case_BIC["con"][i][DC_ID]), case_AC["nx"] + case_DC["nx"] + nx_BIC + i] = -1
Aeq[case_AC["nx"] + int(case_BIC["con"][i][DC_ID]), case_AC["nx"] + case_DC["nx"] + i] = \
case_BIC["con"][i, EFF_A2D]
beq = concatenate([case_AC["beq"], case_DC["beq"]])
model = Model("OPF_AC_DC")
# Define the decision variables
x = {}
for i in range(nx):
x[i] = model.addVar(lb=lx[i], ub=ux[i], vtype=GRB.CONTINUOUS)
for i in range(neq):
expr = 0
for j in range(nx):
expr += x[j] * Aeq[i, j]
model.addConstr(lhs=expr, sense=GRB.EQUAL, rhs=beq[i])
for i in range(case_AC["nl"]):
model.addConstr(x[i] * x[i] + x[i + case_AC["nl"]] * x[i + case_AC["nl"]] <= x[i + 2 * case_AC["nl"]] * x[
case_AC["f"][i] + 3 * case_AC["nl"]])
for i in range(case_DC["nl"]):
model.addConstr(
x[case_AC["nx"] + i] * x[case_AC["nx"] + i] <= x[case_AC["nx"] + i + case_DC["nl"]] * x[
case_AC["nx"] + case_DC["f"][i] + 2 * case_DC["nl"]])
obj = 0
for i in range(nx):
obj += Q[i] * x[i] * x[i] + c[i] * x[i] + c0[i]
model.setObjective(obj)
model.Params.OutputFlag = 0
model.Params.LogToConsole = 0
model.Params.DisplayInterval = 1
model.optimize()
xx = []
for v in model.getVars():
xx.append(v.x)
obj = obj.getValue()
Pij_AC = xx[0:case_AC["nl"]]
Qij_AC = xx[case_AC["nl"]:2 * case_AC["nl"]]
Iij_AC = xx[2 * case_AC["nl"]:3 * case_AC["nl"]]
Vi_AC = xx[3 * case_AC["nl"]:3 * case_AC["nl"] + case_AC["nb"]]
Pg_AC = xx[3 * case_AC["nl"] + case_AC["nb"]:3 * case_AC["nl"] + case_AC["nb"] + case_AC["ng"]]
Qg_AC = xx[3 * case_AC["nl"] + case_AC["nb"] + case_AC["ng"]:3 * case_AC["nl"] + case_AC["nb"] + 2 * case_AC["ng"]]
primal_residual_AC = zeros(case_AC["nl"])
for i in range(case_AC["nl"]):
primal_residual_AC[i] = Pij_AC[i] * Pij_AC[i] + Qij_AC[i] * Qij_AC[i] - Iij_AC[i] * Vi_AC[int(case_AC["f"][i])]
Pij_DC = xx[case_AC["nx"]:case_AC["nx"] + case_DC["nl"]]
Iij_DC = xx[case_AC["nx"] + case_DC["nl"]:case_AC["nx"] + 2 * case_DC["nl"]]
Vi_DC = xx[case_AC["nx"] + 2 * case_DC["nl"]:case_AC["nx"] + 2 * case_DC["nl"] + case_DC["nb"]]
Pg_DC = xx[case_AC["nx"] + 2 * case_DC["nl"] + case_DC["nb"]:case_AC["nx"] + 2 * case_DC["nl"] + case_DC["nb"] +
case_DC["ng"]]
primal_residual_DC = zeros(case_DC["nl"])
for i in range(case_DC["nl"]):
primal_residual_DC[i] = Pij_DC[i] * Pij_DC[i] - Iij_DC[i] * Vi_DC[int(case_DC["f"][i])]
primal_residual_BIC = zeros(nx_BIC)
for i in range(nx_BIC):
primal_residual_BIC[i] = xx[case_AC["nx"] + 2 * case_DC["nl"] + case_DC["nb"] +
case_DC["ng"] + i] * xx[case_AC["nx"] + 2 * case_DC["nl"] + case_DC["nb"] +
case_DC["ng"] + i + nx_BIC]
sol = {"Pij_AC": Pij_AC,
"Qij_AC": Qij_AC,
"Iij_AC": Iij_AC,
"Vm_AC": power(Vi_AC, 0.5),
"Pg_AC": Pg_AC,
"Qg_AC": Qg_AC,
"Pij_DC": Pij_DC,
"Iij_DC": Iij_DC,
"Vm_DC": power(Vi_DC, 0.5),
"Pg_DC": Pg_DC,
"residual_AC": primal_residual_AC,
"residual_DC": primal_residual_DC,
"residual_BIC": primal_residual_BIC,
"obj": obj}
return sol
if __name__ == '__main__':
# A test hybrid AC DC network is connected via BIC networks
caseAC = case33.case33()
caseDC = case118.case118()
converters = case_converters.con()
sol = main(Case_AC=caseAC, Case_DC=caseDC, Converters=converters)
|
[
"[email protected]"
] | |
1b78135398abeca244e835d6de11727d963c8134
|
49ee49ee34fa518b0df934081f5ea44a0faa3451
|
/study-crow-framework/crow/examples/example_test.py
|
d252df0b805e995dadd5e2d37ab2bed1e000c5f6
|
[
"BSD-3-Clause",
"MIT",
"ISC"
] |
permissive
|
kingsamchen/Eureka
|
a9458fcc7d955910bf2cefad3a1561cec3559702
|
e38774cab5cf757ed858547780a8582951f117b4
|
refs/heads/master
| 2023-09-01T11:32:35.575951 | 2023-08-27T15:21:42 | 2023-08-27T15:22:31 | 42,903,588 | 28 | 16 |
MIT
| 2023-09-09T07:33:29 | 2015-09-22T01:27:05 |
C++
|
UTF-8
|
Python
| false | false | 1,401 |
py
|
import urllib
assert "Hello World!" == urllib.urlopen('http://localhost:18080').read()
assert "About Crow example." == urllib.urlopen('http://localhost:18080/about').read()
assert 404 == urllib.urlopen('http://localhost:18080/list').getcode()
assert "3 bottles of beer!" == urllib.urlopen('http://localhost:18080/hello/3').read()
assert "100 bottles of beer!" == urllib.urlopen('http://localhost:18080/hello/100').read()
assert 400 == urllib.urlopen('http://localhost:18080/hello/500').getcode()
assert "3" == urllib.urlopen('http://localhost:18080/add_json', data='{"a":1,"b":2}').read()
assert "3" == urllib.urlopen('http://localhost:18080/add/1/2').read()
# test persistent connection
import socket
import time
s = socket.socket()
s.connect(('localhost', 18080))
for i in xrange(10):
s.send('''GET / HTTP/1.1
Host: localhost\r\n\r\n''');
assert 'Hello World!' in s.recv(1024)
# test large
s = socket.socket()
s.connect(('localhost', 18080))
s.send('''GET /large HTTP/1.1
Host: localhost\r\nConnection: close\r\n\r\n''')
r = ''
while True:
d = s.recv(1024*1024)
if not d:
break;
r += d
print len(r), len(d)
print len(r), r[:100]
assert len(r) > 512*1024
# test timeout
s = socket.socket()
s.connect(('localhost', 18080))
# invalid request, connection will be closed after timeout
s.send('''GET / HTTP/1.1
hHhHHefhwjkefhklwejfklwejf
''')
print s.recv(1024)
|
[
"[email protected]"
] | |
8fb3f79b350977c88931c3266b2db486922dcec9
|
ffad717edc7ab2c25d5397d46e3fcd3975ec845f
|
/Python/pyesri/ANSWERS/countwords.py
|
3cb94d4482bdf35763fd40b40028fc5136cad2d1
|
[] |
no_license
|
shaunakv1/esri-developer-conference-2015-training
|
2f74caea97aa6333aa38fb29183e12a802bd8f90
|
68b0a19aac0f9755202ef4354ad629ebd8fde6ba
|
refs/heads/master
| 2021-01-01T20:35:48.543254 | 2015-03-09T22:13:14 | 2015-03-09T22:13:14 | 31,855,365 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 390 |
py
|
#!/usr/bin/python
import sys
if len(sys.argv) < 3:
print "Syntax: countwords.py PATTERN FILE ..."
sys.exit()
pattern = sys.argv[1]
for fname in sys.argv[2:]:
count = 0
with open(fname) as f:
for line in f:
if pattern in line:
count += 1
print '''"{0}" occurred on {1} lines in {2}'''.format(pattern,count,fname)
|
[
"[email protected]"
] | |
2c14b342ece31335f536bac793332b879a2c8b94
|
7f54637e347e5773dfbfded7b46b58b50544cfe5
|
/8-1/chainxy/settings.py
|
0f222740778cd9f63c7bbb6304924cd66e17b44f
|
[] |
no_license
|
simba999/all-scrapy
|
5cc26fd92b1d03366b74d4fff58c4a0641c85609
|
d48aeb3c00fa2474153fbc8d131cf58402976e1d
|
refs/heads/master
| 2021-01-25T14:24:04.715550 | 2018-03-03T13:43:13 | 2018-03-03T13:43:13 | 123,695,640 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,587 |
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for chainxy project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'chainxy'
SPIDER_MODULES = ['chainxy.spiders']
NEWSPIDER_MODULE = 'chainxy.spiders'
# Feed export
FEED_FORMAT = 'csv' # exports to csv
FEED_EXPORT_FIELDS = ['store_number', 'address'] # which fields should be exported
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'chainxy (+http://www.yourdomain.com)'
USER_AGENT = "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36"
DOWNLOADER_MIDDLEWARES = {'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,}
# Obey robots.txt rules
# ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'chainxy.middlewares.ChainxySpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'chainxy.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'chainxy.pipelines.ChainxyPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"[email protected]"
] | |
509ca1afcbfe5bbdeb744ed4f48259dbb9978d9f
|
775d3690f09f34347e2a7918b060f9dd9f83c10d
|
/research/vrgripper/vrgripper_env_models.py
|
36c7825380aeab82828057bd703230deea3186e6
|
[
"Apache-2.0"
] |
permissive
|
HK2-D/tensor2robot
|
aa0ccc9a997ba72447a48d0dc3acf71d2f4af827
|
58d71467eecf02d3a1646d26cc9011f81753f560
|
refs/heads/master
| 2023-02-04T03:18:22.863436 | 2020-12-24T01:20:50 | 2020-12-24T01:21:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 18,221 |
py
|
# coding=utf-8
# Copyright 2020 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T2RModels for VRGripper env tasks."""
from typing import Callable, Dict, List, Optional, Text, Tuple
import gin
import numpy as np
from tensor2robot.layers import mdn
from tensor2robot.layers import vision_layers
from tensor2robot.meta_learning import meta_tfdata
from tensor2robot.models import abstract_model
from tensor2robot.models import regression_model
from tensor2robot.preprocessors import abstract_preprocessor
from tensor2robot.preprocessors import distortion
from tensor2robot.utils import tensorspec_utils
import tensorflow.compat.v1 as tf # tf
import tensorflow_probability as tfp
from tensorflow.contrib import layers as contrib_layers
TensorSpec = tensorspec_utils.ExtendedTensorSpec
TRAIN = tf.estimator.ModeKeys.TRAIN
PREDICT = tf.estimator.ModeKeys.PREDICT
FLOAT_DTYPES = [tf.bfloat16, tf.float32, tf.float64]
@gin.configurable
class DefaultVRGripperPreprocessor(abstract_preprocessor.AbstractPreprocessor):
"""The default VRGripperEnv preprocessor."""
def __init__(self,
src_img_res = (220, 300),
crop_size = (200, 280),
mixup_alpha = 0.0,
**kwargs):
"""Construct the preprocessor.
Args:
src_img_res: The true height and width of the image data. If the model
expects images of a different size, we automatically resize the images.
crop_size: Before resizing the image, take a crop of the image to this
height and width. Is a no-op if equal to src_img_res. Crop is done
randomly at train time, and is take from the center otherwise.
mixup_alpha: If > 0., turns on Mixup data augmentation for features and
labels.
**kwargs: Keyword args passed to parent class.
"""
super(DefaultVRGripperPreprocessor, self).__init__(**kwargs)
self._src_img_res = src_img_res
self._crop_size = crop_size
self._mixup_alpha = mixup_alpha
def get_in_feature_specification(self, mode
):
"""See base class."""
feature_spec = tensorspec_utils.copy_tensorspec(
self._model_feature_specification_fn(mode))
# Don't want to parse the original_image, since we don't want to parse it
# and we are adding this feature in preprocess_fn to satisfy the model's
# inputs.
if mode != PREDICT and 'original_image' in feature_spec:
del feature_spec['original_image']
if 'image' in feature_spec:
true_img_shape = feature_spec.image.shape.as_list()
# Overwrite the H, W dimensions.
true_img_shape[-3:-1] = self._src_img_res
feature_spec.image = TensorSpec.from_spec(
feature_spec.image, shape=true_img_shape, dtype=tf.uint8)
return tensorspec_utils.flatten_spec_structure(feature_spec)
def get_in_label_specification(self, mode
):
"""See base class."""
return tensorspec_utils.flatten_spec_structure(
self._model_label_specification_fn(mode))
def get_out_feature_specification(self, mode
):
"""See base class."""
return tensorspec_utils.flatten_spec_structure(
self._model_feature_specification_fn(mode))
def get_out_label_specification(self, mode
):
"""See base class."""
return tensorspec_utils.flatten_spec_structure(
self._model_label_specification_fn(mode))
def _preprocess_fn(
self, features,
labels,
mode
):
"""Resize images and convert them from uint8 -> float32."""
if 'image' in features:
ndim = len(features.image.shape)
is_sequence = (ndim > 4)
input_size = self._src_img_res
target_size = self._crop_size
features.original_image = features.image
features.image = distortion.preprocess_image(features.image, mode,
is_sequence, input_size,
target_size)
features.image = tf.image.convert_image_dtype(features.image, tf.float32)
out_feature_spec = self.get_out_feature_specification(mode)
if out_feature_spec.image.shape != features.image.shape:
features.image = meta_tfdata.multi_batch_apply(
tf.image.resize_images, 2, features.image,
out_feature_spec.image.shape.as_list()[-3:-1])
if self._mixup_alpha > 0. and labels and mode == TRAIN:
lmbda = tfp.distributions.Beta(
self._mixup_alpha, self._mixup_alpha).sample()
for key, x in features.items():
if x.dtype in FLOAT_DTYPES:
features[key] = lmbda * x + (1-lmbda)*tf.reverse(x, axis=[0])
if labels is not None:
for key, x in labels.items():
if x.dtype in FLOAT_DTYPES:
labels[key] = lmbda * x + (1 - lmbda) * tf.reverse(x, axis=[0])
return features, labels
@gin.configurable
class VRGripperRegressionModel(regression_model.RegressionModel):
"""Continuous regression output model for VRGripper Env."""
def __init__(self,
use_gripper_input = True,
normalize_outputs = False,
output_mean = None,
output_stddev = None,
outer_loss_multiplier = 1.,
num_mixture_components = 1,
output_mixture_sample = False,
condition_mixture_stddev = False,
episode_length = 40,
**kwargs):
"""Initialize the VRGripperRegressionModel.
Args:
use_gripper_input: If True, concatenate gripper pose with input to the
fully connected layers when predicting actions.
normalize_outputs: If True, scale actions by `output_stddev` and
translate by `output_mean`.
output_mean: The empirical mean of demonstration actions.
output_stddev: The empirical standard deviation of demonstration actions.
outer_loss_multiplier: A scaling factor for the outer loss.
num_mixture_components: The number of gaussian mixture components. Use 1
for standard mean squared error regression.
output_mixture_sample: If True (and num_mixture_components > 1), output
actions by sampling from a gaussian mixture. Otherwise, we use the mean
of the most likely component.
condition_mixture_stddev: If True, the mixture standard deviations will be
output from a neural net and thus conditioned on image/state. Otherwise,
they will simply be learned variables (unconditioned on image/state).
episode_length: The fixed length of an episode in the data.
**kwargs: Passed to parent.
Raises:
ValueError: If `output_mean` or `output_stddev` have incorrect length.
"""
super(VRGripperRegressionModel, self).__init__(**kwargs)
self._use_gripper_input = use_gripper_input
self._normalize_outputs = normalize_outputs
self._output_mean = None
self._output_stddev = None
self._outer_loss_multiplier = outer_loss_multiplier
self._num_mixture_components = num_mixture_components
self._output_mixture_sample = output_mixture_sample
self._condition_mixture_stddev = condition_mixture_stddev
self._episode_length = episode_length
if output_mean and output_stddev:
if not len(output_mean) == len(output_stddev) == self.action_size:
raise ValueError(
'Output mean and stddev have lengths {:d} and {:d}.'.format(
len(output_mean), len(output_stddev)))
self._output_mean = np.array([output_mean])
self._output_stddev = np.array([output_stddev])
@property
def default_preprocessor_cls(self):
return DefaultVRGripperPreprocessor
def get_feature_specification(self, mode):
del mode
image_spec = TensorSpec(
shape=(100, 100, 3),
dtype=tf.float32,
name='image0',
data_format='jpeg')
gripper_pose_spec = TensorSpec(
shape=(14,), dtype=tf.float32, name='world_pose_gripper')
tspec = tensorspec_utils.TensorSpecStruct(
image=image_spec, gripper_pose=gripper_pose_spec)
return tensorspec_utils.copy_tensorspec(
tspec, batch_size=self._episode_length)
def get_label_specification(self, mode):
del mode
action_spec = TensorSpec(
shape=(self._action_size,), dtype=tf.float32, name='action_world')
tspec = tensorspec_utils.TensorSpecStruct(action=action_spec)
return tensorspec_utils.copy_tensorspec(
tspec, batch_size=self._episode_length)
@property
def action_size(self):
return self._action_size
def _single_batch_a_func(self,
features,
scope,
mode,
context_fn=None,
reuse=tf.AUTO_REUSE):
"""A state -> action regression function that expects a single batch dim."""
gripper_pose = features.gripper_pose if self._use_gripper_input else None
with tf.variable_scope(scope, reuse=reuse, use_resource=True):
with tf.variable_scope('state_features', reuse=reuse, use_resource=True):
feature_points, end_points = vision_layers.BuildImagesToFeaturesModel(
features.image,
is_training=(mode == TRAIN),
normalizer_fn=contrib_layers.layer_norm)
if context_fn:
feature_points = context_fn(feature_points)
fc_input = tf.concat([feature_points, gripper_pose], -1)
outputs = {}
if self._num_mixture_components > 1:
dist_params = mdn.predict_mdn_params(
fc_input,
self._num_mixture_components,
self._action_size,
condition_sigmas=self._condition_mixture_stddev)
gm = mdn.get_mixture_distribution(
dist_params, self._num_mixture_components, self._action_size,
self._output_mean if self._normalize_outputs else None)
if self._output_mixture_sample:
# Output a mixture sample as action.
action = gm.sample()
else:
action = mdn.gaussian_mixture_approximate_mode(gm)
outputs['dist_params'] = dist_params
else:
action, _ = vision_layers.BuildImageFeaturesToPoseModel(
fc_input, num_outputs=self._action_size)
action = self._output_mean + self._output_stddev * action
outputs.update({
'inference_output': action,
'image': features.image,
'feature_points': feature_points,
'softmax': end_points['softmax']
})
return outputs
def a_func(self,
features,
scope,
mode,
context_fn=None,
reuse=tf.AUTO_REUSE,
config=None,
params=None):
"""A (state) regression function.
This function can return a stochastic or a deterministic tensor.
Args:
features: This is the first item returned from the input_fn and parsed by
tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
requirements of the self.get_feature_spefication.
scope: String specifying variable scope.
mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
context_fn: Optional python function that takes in features and returns
new features of same shape. For merging information like in RL^2.
reuse: Whether or not to reuse variables under variable scope 'scope'.
config: Optional configuration object. Will receive what is passed to
Estimator in config parameter, or the default config. Allows updating
things in your model_fn based on configuration such as num_ps_replicas,
or model_dir.
params: An optional dict of hyper parameters that will be passed into
input_fn and model_fn. Keys are names of parameters, values are basic
python types. There are reserved keys for TPUEstimator, including
'batch_size'.
Returns:
outputs: A {key: Tensor} mapping. The key 'action' is required.
"""
del config, params
return meta_tfdata.multi_batch_apply(self._single_batch_a_func, 2, features,
scope, mode, context_fn, reuse)
def loss_fn(self, labels, inference_outputs, mode, params=None):
"""This implements outer loss and configurable inner losses."""
if params and params.get('is_outer_loss', False):
pass
if self._num_mixture_components > 1:
gm = mdn.get_mixture_distribution(
inference_outputs['dist_params'], self._num_mixture_components,
self._action_size,
self._output_mean if self._normalize_outputs else None)
return -tf.reduce_mean(gm.log_prob(labels.action))
else:
return self._outer_loss_multiplier * tf.losses.mean_squared_error(
labels=labels.action,
predictions=inference_outputs['inference_output'])
@gin.configurable
class VRGripperDomainAdaptiveModel(VRGripperRegressionModel):
"""Base model which uses a learned loss to do domain adaptive imitation.
The model conditions on video only (no actions or gripper pose).
"""
def __init__(self,
predict_con_gripper_pose = False,
learned_loss_conv1d_layers = (10, 10,
6),
**kwargs):
"""Initialize the model.
Args:
predict_con_gripper_pose: If True, predict the condition gripper pose
input from the image features. Otherwise, set to zeros.
learned_loss_conv1d_layers: A tuple describing the conv1d layers of the
learned loss. If None, the learned loss won't use conv1d layers.
**kwargs: Passed to parent.
"""
super(VRGripperDomainAdaptiveModel, self).__init__(**kwargs)
self._predict_con_gripper_pose = predict_con_gripper_pose
self._learned_loss_conv1d_layers = learned_loss_conv1d_layers
def _predict_gripper_pose(self, feature_points):
"""Predict the condition gripper pose from feature points."""
out = feature_points
out = tf.layers.dense(out, 40, activation=tf.nn.relu, use_bias=False)
out = contrib_layers.layer_norm(out)
out = tf.layers.dense(out, 14, activation=None)
return out
def single_batch_a_func(
self, features, scope,
mode,
context_fn, reuse,
config,
params):
"""Single step action predictor when there is a single batch dim."""
del config
with tf.variable_scope(scope, reuse=reuse, use_resource=True):
with tf.variable_scope('state_features', reuse=reuse, use_resource=True):
feature_points, end_points = vision_layers.BuildImagesToFeaturesModel(
features.image,
is_training=(mode == TRAIN),
normalizer_fn=contrib_layers.layer_norm)
if context_fn:
feature_points = context_fn(feature_points)
if params and params.get('is_inner_loop', False):
if self._predict_con_gripper_pose:
gripper_pose = self._predict_gripper_pose(feature_points)
else:
gripper_pose = tf.zeros_like(features.gripper_pose)
else:
gripper_pose = features.gripper_pose
action, _ = vision_layers.BuildImageFeaturesToPoseModel(
feature_points, aux_input=gripper_pose, num_outputs=self._action_size)
action = self._output_mean + self._output_stddev * action
return {
'inference_output': action,
'image': features.image,
'feature_points': feature_points,
'softmax': end_points['softmax'],
}
def a_func(self,
features,
scope,
mode,
context_fn = None,
reuse=tf.AUTO_REUSE,
config = None,
params = None
):
"""Single step action predictor. See parent class."""
return meta_tfdata.multi_batch_apply(self.single_batch_a_func, 2, features,
scope, mode, context_fn, reuse, config,
params)
def model_train_fn(self,
features,
labels,
inference_outputs,
mode,
config = None,
params = None
):
"""Output learned loss if inner loop, or behavior clone if outer loop."""
if params and params.get('is_outer_loss', False):
# Outer loss case: use standard RegressionModel loss.
return self.loss_fn(labels, inference_outputs, mode, params)
# Inner loss case: compute learned loss function.
with tf.variable_scope(
'learned_loss', reuse=tf.AUTO_REUSE, use_resource=True):
predicted_action, _ = meta_tfdata.multi_batch_apply(
vision_layers.BuildImageFeaturesToPoseModel,
2,
inference_outputs['feature_points'],
num_outputs=self._action_size)
if self._learned_loss_conv1d_layers is None:
return tf.losses.mean_squared_error(predicted_action,
inference_outputs['action'])
ll_input = tf.concat([
predicted_action, inference_outputs['feature_points'],
inference_outputs['inference_output']
], -1)
net = ll_input
for num_filters in self._learned_loss_conv1d_layers[:-1]:
net = tf.layers.conv1d(
net, num_filters, 10, activation=tf.nn.relu, use_bias=False)
net = contrib_layers.layer_norm(net)
net = tf.layers.conv1d(net, self._learned_loss_conv1d_layers[-1],
1) # 1x1 convolution.
return tf.reduce_mean(tf.reduce_sum(tf.square(net), axis=(1, 2)))
|
[
"[email protected]"
] | |
0ea9bc7a81b5477b94cd0a274d0e8e74a3e7a894
|
97b2688fb68523f8350c951972c472425e5671d5
|
/test/test_invoice_line_tax_m.py
|
a9cc1195e7b0af6850828d47dd0fa4c3a0a50aff
|
[] |
no_license
|
calebdlarson/aria-sdk-object-query
|
ab45e5a36d629f98cd58957eef72e53a28b3706b
|
b44abc9e6e3b86c109e65fdc4edf22d7c51edc2c
|
refs/heads/master
| 2020-06-26T19:33:36.880213 | 2019-07-30T22:04:10 | 2019-07-30T22:04:10 | 199,733,517 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 897 |
py
|
# coding: utf-8
"""
Object Query API
Object Query API for Aria billing # noqa: E501
OpenAPI spec version: 23
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.vendor\ariasystems\model.invoice_line_tax_m import InvoiceLineTaxM # noqa: E501
from swagger_client.rest import ApiException
class TestInvoiceLineTaxM(unittest.TestCase):
"""InvoiceLineTaxM unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInvoiceLineTaxM(self):
"""Test InvoiceLineTaxM"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.invoice_line_tax_m.InvoiceLineTaxM() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
d4f824fdedc2fe9a5500cdeaba29ec74be9b9839
|
aa96ac0dda227bbbfc647c710524915dfae0240f
|
/tests/basic/vargs.py
|
31ca3e5fa2b9b2a9cc757e9658a8a34120b995b0
|
[
"BSD-3-Clause",
"Python-2.0",
"MIT"
] |
permissive
|
certik/test3
|
c963e806e0ccba7a751543303df7137332a34e8a
|
5ed67b37bbff59981c3a9e55bb3d8bdc8924a1aa
|
refs/heads/master
| 2016-09-06T11:38:01.939907 | 2010-04-29T17:31:50 | 2010-04-29T17:31:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 124 |
py
|
def myfunc(a,b,*c):
print a
print b
for i in c:
print i
myfunc(1,2)
myfunc('a','b','c','d')
myfunc(3,4,5,6,'hello')
|
[
"[email protected]"
] | |
3ef0aa3cd77321b32b02be7e5729cddcb6ed997a
|
425aa72cb1ca1edf15a6234647dd546e0d29a71b
|
/turtle_pkg/scripts/grid_path_planning.py
|
3b725eca7f70f7512c62e4ee74c8723b5a202f34
|
[] |
no_license
|
TechnoYantra/robot-teleoperation
|
bbc652deaf1ed15fe54b91fef27d542c124705ef
|
c633169ac57ab1db36b0763fb7498c1722c93881
|
refs/heads/master
| 2023-04-06T03:41:21.440297 | 2020-04-24T06:51:28 | 2020-04-24T06:51:28 | 301,329,974 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,647 |
py
|
#! /home/hunter/development/anaconda3/bin/python
import rospy
from grid_based_sweep_path_planner import planning
from sensor_msgs.msg import NavSatFix
from move_base_msgs.msg import MoveBaseActionResult
from std_srvs.srv import Empty
class GridPathPlannig:
def __init__(self):
rospy.init_node('grid_path_planning')
lat = 49.9000869191
lng = 8.89990548393
ox = [lat, lat+0.0000200, lat+0.0000200, lat, lat]
oy = [lng, lng, lng+0.0000200, lng+0.0000200, lng]
reso = 0.0000025
self.px, self.py = planning(ox, oy, reso)
self.i = 0
rospy.wait_for_service('/move_base/clear_costmaps')
self.clear_costmap = rospy.ServiceProxy('/move_base/clear_costmaps',Empty)
move_sub = rospy.Subscriber('/move_base/result',MoveBaseActionResult,self.feedback_cb)
rospy.spin()
def feedback_cb(self,msg):
if (msg.status.status == 3):
rospy.loginfo('result is success')
rospy.logwarn('clearing costmap')
ret = self.clear_costmap()
self.publish_next_goal(self.i)
elif (msg.status.status == 2):
rospy.loginfo('there is some error so clearing the costmap')
ret = self.clear_costmap()
self.publish_previous_goal(self.i)
elif (msg.status.status == 4):
rospy.loginfo('there is some warning so clearing the costmap')
ret = self.clear_costmap()
self.publish_previous_goal(self.i)
def publish_previous_goal(self,i):
gps_pub = rospy.Publisher('/gps_goal_fix', NavSatFix, queue_size=10)
gps_data = NavSatFix()
gps_data.header.frame_id='/world'
gps_data.latitude=self.px[self.i]
rospy.loginfo('latitude is : '+str(self.px[self.i]))
gps_data.longitude=self.py[self.i]
rospy.loginfo('longitude is : '+str(self.py[self.i]))
rospy.logerr('number is '+str(self.i))
rospy.loginfo('publishing next goal')
gps_pub.publish(gps_data)
def publish_next_goal(self,i):
self.i = self.i+1
gps_pub = rospy.Publisher('/gps_goal_fix',NavSatFix,queue_size=10)
gps_data = NavSatFix()
gps_data.header.frame_id='/world'
rospy.logerr('number is '+str(self.i))
gps_data.latitude=self.px[self.i]
rospy.loginfo('latitude is : '+str(self.px[self.i]))
gps_data.longitude=self.py[self.i]
rospy.loginfo('longitude is : '+str(self.py[self.i]))
rospy.loginfo('publishing next goal')
gps_pub.publish(gps_data)
if __name__ == '__main__':
grdpthpln= GridPathPlannig()
|
[
"[email protected]"
] | |
33ce5299478cdbc5d4c69e68261c0090083414ce
|
7f46fda936a5f4358e8b229668d7f4d94bc7fa33
|
/backend/env/bin/django-admin
|
9d20155efcf8be1a359c9b0f866b8abe7d34fd03
|
[] |
no_license
|
souksavanhlkl/djreact-ant-test
|
22de7c2aaa202e002baa41020fd15a46269439dc
|
66f90a19770b39a5c3cb79a14b5c6fe12e49ca55
|
refs/heads/master
| 2020-06-18T07:26:30.601504 | 2019-07-12T17:51:57 | 2019-07-12T17:51:57 | 196,211,377 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 309 |
#!/Users/ton/Downloads/Test/djreact/backend/env/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"[email protected]"
] | ||
58e695680127bb42f2f78903fc84e26e9f79b012
|
7822e658e88f3f948732e6e3e588ca4b2eb5662a
|
/guias/2012-2/octubre-17/torneos.py
|
3cc64246b4e91ed046f843aea8d045bff0ea5db2
|
[] |
no_license
|
carlos2020Lp/progra-utfsm
|
632b910e96c17b9f9bb3d28329e70de8aff64570
|
a0231d62837c54d4eb8bbf00bb1b84484efc1af2
|
refs/heads/master
| 2021-05-28T06:00:35.711630 | 2015-02-05T02:19:18 | 2015-02-05T02:19:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,803 |
py
|
def contar_partidos(partidos):
return len(partidos)
def obtener_equipos(partidos):
equipos = set()
for local, visita in partidos:
equipos.add(local)
equipos.add(visita)
equipos = list(equipos)
equipos.sort()
return equipos
def obtener_fechas(partidos):
fechas = set()
for p in partidos:
fecha, _ = partidos[p]
fechas.add(fecha)
fechas = list(fechas)
fechas.sort()
return fechas
def calcular_puntos(partidos, equipo):
puntos = 0
for p in partidos:
_, resultado = partidos[p]
if resultado == None:
continue
local, visita = p
gl, gv = resultado
if equipo == local:
if gl > gv:
puntos += 3
elif gl == gv:
puntos += 1
elif equipo == visita:
if gl < gv:
puntos += 3
elif gl == gv:
puntos += 1
return puntos
def calcular_diferencia(partidos, equipo):
diferencia = 0
for p in partidos:
_, resultado = partidos[p]
if resultado == None:
continue
gl, gv = resultado
local, visita = p
if equipo == local:
diferencia += (gl - gv)
elif equipo == visita:
diferencia += (gv - gl)
return diferencia
def ordenar_equipos(partidos):
equipos = obtener_equipos(partidos)
estadisticas = []
for equipo in equipos:
pts = calcular_puntos(partidos, equipo)
dif = calcular_diferencia(partidos, equipo)
estadisticas.append((pts, dif, equipo))
estadisticas.sort()
estadisticas.reverse()
equipos_ordenados = []
for _, _, equipo in estadisticas:
equipos_ordenados.append(equipo)
return equipos_ordenados
|
[
"[email protected]"
] | |
9813d2f1469dc08e215edac52165f3615023264d
|
3b2940c38412e5216527e35093396470060cca2f
|
/top/api/rest/AlibabaOpendspAdgroupsAddRequest.py
|
ecc347df1177f0300f8f99e6b18777f4d00cdb29
|
[] |
no_license
|
akingthink/goods
|
842eb09daddc2611868b01ebd6e330e5dd7d50be
|
ffdb5868a8df5c2935fc6142edcdf4c661c84dca
|
refs/heads/master
| 2021-01-10T14:22:54.061570 | 2016-03-04T09:48:24 | 2016-03-04T09:48:24 | 45,093,302 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 301 |
py
|
'''
Created by auto_sdk on 2015-01-20 12:44:32
'''
from top.api.base import RestApi
class AlibabaOpendspAdgroupsAddRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
def getapiname(self):
return 'alibaba.opendsp.adgroups.add'
|
[
"[email protected]"
] | |
cbd5604e182ecd8be6874afcae31daabbc07241b
|
decfc623e13ee43829b4150db8687847ca6b2fd0
|
/MazeAgents.py
|
92953ad28e28fd1baa2002743778d755a4fa30a5
|
[] |
no_license
|
callaunchpad/MeTaL
|
da07d0bab5f70102d06d0c0f5407fe90cacc2fd1
|
a8f83d7db566fb205f47e2201956e35abae11c55
|
refs/heads/master
| 2020-03-28T18:03:19.364027 | 2018-12-06T19:10:59 | 2018-12-06T19:10:59 | 148,848,555 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,725 |
py
|
import dijkstra
import numpy as np
class MazeAgent():
import collections
def __init__(self, goal, walls, size):
self.size = size
self.goal = goal
self.loc = np.array([0,0])
self.graph = self.make_graph(walls)
self.ACTIONS = [0, 1, 2, 3, 4]
def get_action_distr(self, loc):
distr = np.array([0.0, 0.0, 0.0, 0.0, 0.0])
best_action = self.find_move_from_state(start=loc, state=self.find_optimal_first_move(loc))
distr[best_action] = 1.0
return distr
# TODO: make states an enum
def find_move_from_state(self, start, state):
#make sure we only differ in one position by one move
#NOTE! != is xor for booleans in python
assert bool((start[0]-state[0])**2 == 1) != bool((start[1]-state[1])**2 == 1)
if state[1] == start[1] - 1:
return 0 # up
elif state[1] == start[1] + 1:
return 1 # down
elif state[0] == start[0] + 1:
return 2 # right
elif state[0] == start[0] - 1:
return 3 # left
else:
raise ValueError("bad states")
def find_optimal_first_move(self, start):
_, paths_back = self.graph.dijkstra(start)
# starting with a dictionary of nodes to adjacent nodes closest to start, walk back to find best first move
second_state = self.goal
last_state = paths_back[self.goal]
while last_state is not start:
second_state = last_state
last_state = paths_back[last_state]
return second_state
def make_graph(self, walls):
x_coords = list(range(self.size))
y_coords = list(range(self.size))
states = []
for start_x in x_coords:
for start_y in y_coords:
states.append((start_x, start_y))
graph = dijkstra.Digraph(nodes=states)
for start_x in x_coords:
for start_y in y_coords:
start = (start_x, start_y)
left = (start_x-1, start_y)
right = (start_x+1, start_y)
up = (start_x, start_y+1)
down = (start_x, start_y-1)
if start_x - 1 >= 0 and left not in walls:
graph.addEdge(start, left, 1)
if start_x + 1 < self.size and right not in walls:
graph.addEdge(start, right, 1)
if start_y - 1 >= 0 and down not in walls:
graph.addEdge(start, down, 1)
if start_x + 1 < self.size and up not in walls:
graph.addEdge(start, up, 1)
return graph
def move(self, distr):
action = np.random.choice(self.ACTIONS, 1, p=distr)[0]
if action == 0:
self.loc[1] += 1
elif action == 1:
self.loc[1] -= 1
elif action == 2:
self.loc[0] += 1
else:
self.loc[0] -= 1
class NoisyMazeAgent(MazeAgent):
def __init__(self, goal, walls, size, opt_prob):
self.opt_prob = opt_prob
self.noise = (1-opt_prob)/4
MazeAgent.__init__(self, goal, walls, size)
def get_action_distr(self, loc):
no_noise = list(MazeAgent.get_action_distr(self, loc))
print(no_noise)
noisy = no_noise
for index, elem in enumerate(no_noise):
if no_noise[index] == 1:
noisy[index] = self.opt_prob
else:
noisy[index] = self.noise
return noisy
if __name__ == '__main__':
agent = NoisyMazeAgent(goal=(2,0), walls=[], size=3, opt_prob=0.9)
print(agent.get_action_distr(loc=(0,0)))
|
[
"[email protected]"
] | |
bc6e7bce01224906af14992bd0824e147133d29d
|
cdfbe1e28af6c9b3653b450360d3e22b6d509aa7
|
/3_1_joern_vahland.py
|
915b17fe4d3b4038ab9a632bb30704cd0a3af8af
|
[] |
no_license
|
Ahemmetter/comp-physics
|
e58e731db72e5b3cc41f2d66b503e5c734830f96
|
f9a570a2c536c658e8150ee06454d2ea3ff4da6c
|
refs/heads/master
| 2020-03-12T11:08:57.377346 | 2018-04-22T17:41:57 | 2018-04-22T17:41:57 | 130,589,970 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,732 |
py
|
#! /usr/bin/env python
from __future__ import division
import numpy as np
from matplotlib import pyplot as plt
def fkt(x):
"""Auszuwertende Funktion"""
return np.sin(2*x)
def int_middle(function, x0, x1, n=1000):
"""
Integration der uebergebenen Funktion 'function' von x0 bis x1
auf n Teileintervallen. Mittelpunktmethode.
Rueckgabe: Integalwert, Diskretisierungsbreite dx
"""
# Array mit linken Intervallgrenzen. n Intervalle
links = np.linspace(x0, x1, num=n, endpoint=False, retstep=True)
dx = links[1] # Streifenbreite
werte = function(links[0] + dx/2) # Fkt-Werte in Intervallmitte
streifen = dx * werte # Streifenflaechen
return (np.sum(streifen), dx) # Streifen aufsummiert
def int_trapez(function, x0, x1, n=1000):
"""
Integration der uebergebenen Funktion 'function' von x0 bis x1
auf n Teilintervallen. Trapezmethode
Rueckgabe: Integalwert, Diskretisierungsbreite dx
"""
# Array mit Intervallgrenzen. n Stuetzstellen. rechter Rand inklusive
stuetz = np.linspace(x0, x1, num=n+1, endpoint=True, retstep=True)
dx = stuetz[1] # Streifenbreite
werte = function(stuetz[0]) # Fkt-Werte an Grenzen
streifen = (werte[:-1] + werte[1:]) *dx/2 # Streifenflaechen
return (np.sum(streifen), dx) # Streifen aufsummiert
def int_simps(function, x0, x1, n=1000):
"""
Integration der uebergebenen Funktion 'function' von x0 bis x1
auf n Teilintervallen. Simpsonmethode
Rueckgabe: Integalwert, Diskretisierungsbreite dx
"""
# Array mit Intervallgrenzen
stuetz = np.linspace(x0, x1, num=n+1, endpoint=True, retstep=True)
dx = stuetz [1] # Streifenbreite
werte = function(stuetz[0]) # Fkt-Werte an Grenzen
werte_mitte = function(stuetz[0][:-1] + dx/2) # Fkt-Werte in Mitte
streifen = dx/6 * (werte[:-1] + (4 * werte_mitte) + werte[1:])
return (np.sum(streifen), dx) # Streifen aufsummiert
def main():
"""
Fuerht die numerische Integration der Funktion sin(2*x) mittels der
Mittelpunkt-, Trapez- und der Simpson-Methode auf dem Intervall
-pi/2 bis pi/3 durch
"""
# Integrationsparameter
function = fkt
x0 = -np.pi/2
x1 = np.pi/3
analytic = -1/4
# Integration ueber verschiedene Teilintervalle N [1, 10**5]
N = np.unique(np.int32(np.logspace(0, 5, 1000, endpoint=True)))
# Array fuer Integralwerte
val_mid, val_trap, val_sim = np.zeros((3, len(N)))
# Array fuer Diskretisierungsparameter dx (Teilintervallbreite)
dx_mid, dx_trap, dx_sim = np.zeros((3, len(N)))
# Berechnung der Integrale fuer versch. Teilintervalle [1, 10**5]
for i in np.arange(0, len(N)):
val_mid[i], dx_mid[i] = int_middle(function, x0, x1, N[i])
val_trap[i], dx_trap[i] = int_trapez(function, x0, x1, N[i])
val_sim[i], dx_sim[i] = int_simps(function, x0, x1, N[i])
# Plot anlegen und konfigurieren
plt.subplot(111, xscale="log", yscale="log")
plt.title("Numerische Integration")
plt.xlabel("dx")
plt.ylabel(r"$\frac{\Delta I}{I}$ ", fontsize=20)
# Plot der numerischen Integrale
plt.plot(dx_mid, np.abs((analytic - val_mid)/analytic),
label="Mittelpunkt", ls="None", color="r", marker=".", markersize=3)
plt.plot(dx_trap, np.abs((analytic - val_trap)/analytic),
label="Trapez", ls="None", color="g", marker=".", markersize=3)
plt.plot(dx_sim, np.abs((analytic - val_sim)/analytic),
label="Simpson", ls="None", color="b", marker=".", markersize=3)
# Plot des zu erwartenden Skalierungsverhalten
plt.plot(dx_mid, dx_mid**2, label="Skalierung Mittelpunkt & Trapez",
color="r")
plt.plot(dx_sim, dx_sim**4, label="Skalierung Simpson", color="b")
# Plot der Legende und Diagrammausgabe
plt.legend(loc="upper left")
plt.show()
if __name__ == "__main__":
main()
# Analytische Wert der Integrale:
# a) I = -1/4
# b) I = sqrt(pi)/10 = 0.1772453850905516
# c) I = pi/3 = 1.0471975511965976
# Auswertung der numerischen Werte:
# a) Fuer alle Methoden ergeben sich in doppelt logarithmischer
# Darstellung Geraden (~ dx**2 bzw. ~dx**4)
# Fuer die Mittelpunkt und Trapezmethode treten kaum numerische
# Streuung auf. Die Simpsonmethode verhaelt sich fuer dx in
# [2*10**(-3), 5/6 * pi] nach der zu erwarteten Skalierung.
# Fuer kleinere dx dominiert der numerische Fehler --> Rauschen
#
# b) Die relativen Fehler aller 3 Methoden haben eine aehnliche Form und
# alle Methoden scheinen etwa gleich genaue Ergebnisse zu liefern:
# Fuer dx < ~0.05 liegen die Fehler unter 10**-13. Fuer groessere dx
# nehmen die Fehler drastisch zu, dies liegt dan der Form der Funktion.
# Die einzigen Werte die einen signifikanten Wert zum Integral beitragen
# liegen schmal um die y-Achse (x=0). Liegen in diesem interessanten
# Intervall nur wenige Stuetzstellen wird das Integral entsprechend
# ungenau.
#
# c) Fuer alle Methoden entstehen mehrere Geraden, die ungefaehr im Bereich
# relativer Fehler von 10**-5 - 1 liegen. Die verschiedenen Geraden
# haben ihre Uhrsache in der Unstetigkeit bei x=0 und wie die Stuetz-
# stellen in diesen Bereich fallen.
# Es faellt ausserdem auf, dass fuer einige wenige dx auch Werte des
# relativen Fehlers von unter 10**-12 auftreten. Diese sind vermutlich
# zufaellige Werte bei denen eine Stuetzstelle guenstig auf die
# Unstetigkeit bei x=0 faellt
|
[
"[email protected]"
] | |
9ccf673fc7f75b6ef56bdf197618ec1911503eb7
|
ed14247e130e538612bf8108a5ae5d95fc1d4eb5
|
/algorithm/DE_FBG.py
|
ae830ac181dc6f841844ddc010d8a2cbbe7841b5
|
[] |
no_license
|
chiuhans111/threeFBGpredict
|
b46f3df711ec0d5e37c19501d406c7331ca1ded9
|
109eb28393f66f8d596f686fbfa62ff29f12b930
|
refs/heads/main
| 2023-02-24T10:53:25.379452 | 2020-12-23T11:32:48 | 2020-12-23T11:32:48 | 323,494,418 | 0 | 0 | null | 2020-12-22T02:00:30 | 2020-12-22T02:00:30 | null |
UTF-8
|
Python
| false | false | 959 |
py
|
from algorithm.DE import DE
import numpy as np
from algorithm.FBG import simulate
def optimize(data, center, width, height, d, iterations=20, F=.5, CR=0.1, NP=50):
x = np.array([center, width, height])
down_lim = x - d[:, np.newaxis]
up_lim = x + d[:, np.newaxis]
de = DE(F, CR, NP)
X = de.init(down_lim, up_lim)
Perror = None
PX = None
for t in range(iterations):
if PX is not None:
X = de.mutate(PX)
spectra = simulate(data[0], X)
error = np.sum((spectra - data[1, np.newaxis, :])**2, 1)
if Perror is not None:
compare = error < Perror
PX += (X-PX) * compare[:, np.newaxis, np.newaxis]
Perror += (error-Perror) * compare
else:
Perror = error
PX = X
# plt.plot(spectra.T)
# plt.show()
# plt.plot(error)
# plt.show()
average = np.average(PX, 0)
return average
|
[
"[email protected]"
] | |
e45404a58ceedb3cdcaa8689db61661f2f574385
|
6060a897acb9b47b52ce03b2400ada9474521a1a
|
/vecSubtract.py
|
0eb504c04d7fce55f665e0f41fb805f2e014f06b
|
[] |
no_license
|
madisonstewart2018/quiz-3
|
64c6761dd64ed213927720b2312419d4efe1a1de
|
da0473848f9a496859cd79e08bb73747df3438e1
|
refs/heads/master
| 2020-03-21T13:34:52.435332 | 2018-06-25T15:36:24 | 2018-06-25T15:36:24 | 138,613,649 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 876 |
py
|
# vecSubtract subtracts the second vector from the first.
def vecSubtract(vector01, vector02):
'''
This function takes two vectors as its arguments and subtracts the second vector from the first vector giving a new vector from the two.
'''
if len(vector01) != len(vector02):
#if the length of the two vectors don't equal each other the input is said to be invalid.
print('invalid input')
return None
new = [] #since we want the answer to be in brackets instead of an integer.
for i in range(len(vector01)):
total = 0 #after the for i statement so it resets.
total += vector01[i] - vector02[i]
new.append(total) #adds the new total into the 'new' brackets
return new
#test elements
vector01 = [5, 12]
vector02 = [-3, 4]
vector03 = [1, 1, 1]
vector04 = [1, 2]
print(vecSubtract(vector01, vector02))
#print(vecSubtract(vector03, vector04))
|
[
"[email protected]"
] | |
93ae19d8347402fc48316e47a19dfa72c32bdf88
|
11d6c4caca42d885587b36ab754809dd14d2a4c1
|
/apps/messaging/tests/test_urls.py
|
001a2a3c8dc84bed586bfef75fdb8b7e3ef54283
|
[] |
no_license
|
illia-v/FortyTwoTestTask
|
b32b73832f40bf51ad74e6c1a8e91d1c425dfc22
|
c2dc855e0463e428d1d3876d8748ea90e42e0038
|
refs/heads/master
| 2021-01-10T23:52:47.517041 | 2016-11-13T23:27:43 | 2016-11-13T23:27:43 | 69,566,080 | 0 | 0 | null | 2016-09-29T12:37:56 | 2016-09-29T12:37:55 | null |
UTF-8
|
Python
| false | false | 4,392 |
py
|
from django.test import TestCase
from django.core.urlresolvers import resolve, reverse
class TestMessagingURLs(TestCase):
def test_messages_index_view_url(self):
"""
Ensures that a URL pattern name `messaging` is valid and the
pattern is resolved to `MessagingIndexView`
"""
messages = resolve('/messaging/')
self.assertEqual(reverse('messaging:index'), '/messaging/',
'A view name `messaging:index` should be reversed to '
'the URL `/messaging/`')
self.assertEqual(messages.func.__name__, 'MessagingIndexView',
'Should be resolved to `MessagingIndexView`')
def test_messages_detail_view_url(self):
"""
Ensures that a URL pattern name `messages_detail` is valid and
the pattern is resolved to `MessagingDetailView`
"""
messages_detail = resolve('/messaging/somebody/')
self.assertEqual(reverse('messaging:detail', args=['somebody']),
'/messaging/somebody/',
'A view name `messaging:detail` should be reversed '
'to the URL `/messaging/{username}/`')
self.assertEqual(messages_detail.func.__name__, 'MessagingDetailView',
'Should be resolved to `MessagingDetailView`')
def test_messaging_create_view_url(self):
"""
Ensures that a URL pattern name `messaging:create` is valid and
the pattern is resolved to `MessagingCreateView`
"""
messaging_create = resolve('/messaging/somebody/new/')
self.assertEqual(reverse('messaging:create', args=['somebody']),
'/messaging/somebody/new/',
'A view name `messaging:create` should be reversed '
'to the URL `/messaging/{username}/new/`')
self.assertEqual(messaging_create.func.__name__, 'MessagingCreateView',
'Should be resolved to `MessagingCreateView`')
def test_messaging_pull_view_url(self):
"""
Ensures that a URL pattern name `messaging:pull` is valid and
the pattern is resolved to `MessagingPullView`
"""
messaging_pull = resolve('/messaging/somebody/pull/')
self.assertEqual(reverse('messaging:pull', args=['somebody']),
'/messaging/somebody/pull/',
'A view name `messaging:pull` should be reversed '
'to the URL `/messaging/{username}/pull/`')
self.assertEqual(messaging_pull.func.__name__, 'MessagingPullView',
'Should be resolved to `MessagingPullView`')
def test_messaging_update_unread_count_view_url(self):
"""
Ensures that a URL pattern name `messaging:update_unread_count`
is valid and the pattern is resolved to
`MessagingUpdateUnreadCountView`
"""
update_unread_count = resolve(
'/messaging/update_unread_count/'
)
self.assertEqual(
reverse('messaging:update_unread_count'),
'/messaging/update_unread_count/',
'A view name `messaging:update_unread_count` should be reversed '
'to the URL `/messaging/update_unread_count/`'
)
self.assertEqual(
update_unread_count.func.__name__,
'MessagingUpdateUnreadCountView',
'Should be resolved to `MessagingUpdateUnreadCountView`'
)
def test_messaging_reset_unread_count_view_url(self):
"""
Ensures that a URL pattern name `messaging:reset_unread_count`
is valid and the pattern is resolved to
`MessagingResetUnreadCountView`
"""
reset_unread_count = resolve(
'/messaging/somebody/reset_unread_count/'
)
self.assertEqual(
reverse('messaging:reset_unread_count', args=['somebody']),
'/messaging/somebody/reset_unread_count/',
'A view name `messaging:update_unread_count` should be reversed '
'to the URL `/messaging/{username}/reset_unread_count/`'
)
self.assertEqual(
reset_unread_count.func.__name__,
'MessagingResetUnreadCountView',
'Should be resolved to `MessagingResetUnreadCountView`'
)
|
[
"[email protected]"
] | |
0981118c599df667436e798337a89c2f6dae448d
|
fd6f795e14a1f8d64860aa0dbac91cb4282d4675
|
/src/create_all_train_heading_matrices.py
|
1a390969f3441df2b9d75d6bd67f9cdce66d9155
|
[
"Apache-2.0"
] |
permissive
|
mesax1/Eafit-UdeA-Amazon-Last-Mile-Routing-Challenge
|
495e0eb46e4921c3696501f8bc31b20c017e99d7
|
51c4f1cf14529eee558395c4cf90c4685f786fbe
|
refs/heads/main
| 2023-07-05T02:09:50.231281 | 2021-08-18T18:57:39 | 2021-08-18T18:57:39 | 397,702,616 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,111 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 14 14:04:34 2021
@author: mesar
"""
import numpy as np
import pandas as pd
from lib import utils
from progressbar import progressbar as pbar
from pathlib import Path
import itertools
from time import time
import csv
if __name__ == "__main__":
Path("../data/model_build_outputs/all_prob_matrices_heading").mkdir(parents=True, exist_ok=True)
Path("../data/model_build_outputs/prob_matrices_heading").mkdir(parents=True, exist_ok=True)
Path("../data/model_build_outputs/prob_matrices_angle").mkdir(parents=True, exist_ok=True)
print("Calculating train_routes")
#train_routes = utils.get_train_routes()
routes = utils.get_routes()
hroutes = np.unique(routes.route_fid)
all_zroutes = utils.get_routes_as_zones()
zroutes = all_zroutes[all_zroutes.route_fid.isin(hroutes)]
print("Done reading routes")
t0 = time()
max_distances = [50, 100, 150, 200, 250, 300]
dwks = [0.01, 0.05, 0.1, 0.15]
r = []
for max_distance, dwk in itertools.product(max_distances, dwks):
tt = time()
#print ("\n----------\n%3d"%max_distance, "%.2f"%dwk, end=" || ", flush=True)
za = utils.ZrouteField(zroutes, max_distance=max_distance).compute_field(dwk=dwk, use_pbar=True)
h = za.get_estimated_headings(use_pbar=True)
rr = za.heading_estimations_cosdistance(h)
rr['max_distance'] = max_distance
rr['dwk'] = dwk
rr['zones_estimated'] = np.mean(h.cos_distance!=0)
rr['time'] = time()-t0
rr['nroutes'] = len(np.unique(za.zroutes.route_fid))
t0 = time()
r.append(rr)
print ("maxd %3d, "%max_distance, "dwk %.2f, "%dwk, f'time {time()-tt:.4f}, cos_sim {rr["cos_distance_mean"]:.4f}', flush=True)
r = pd.DataFrame(r)
r.to_hdf("../data/model_build_outputs/md_dkw_exploration.hdf", "data")
dwks = np.sort(np.unique(r.dwk))
max_distances = np.sort(np.unique(r.max_distance))
csims = np.zeros((len(dwks), len(max_distances)))
zcovered = np.zeros((len(dwks), len(max_distances)))
for i,dwk in enumerate(dwks):
for j,max_distance in enumerate(max_distances):
k = r[(r.max_distance==max_distance)&(r.dwk==dwk)].iloc[0]
csims[i,j] = k.cos_distance_mean
zcovered[i,j] = k.zones_estimated
for distance in max_distances:
k = r[r.max_distance==distance]
print(k)
estimated_zones_value = 1.0
best_options = r[r.zones_estimated >= estimated_zones_value]
if not best_options.empty:
best_options = r[r.zones_estimated >= estimated_zones_value]
best_combination = best_options[best_options.cos_distance_mean == best_options.cos_distance_mean.max()]
selected_max_distance = best_combination.max_distance.values[0]
selected_dwk = best_combination.dwk.values[0]
while best_options.empty:
print("Empty for value: " + str(estimated_zones_value))
estimated_zones_value = estimated_zones_value - 0.1
best_options = r[r.zones_estimated >= estimated_zones_value]
best_combination = best_options[best_options.cos_distance_mean == best_options.cos_distance_mean.max()]
selected_max_distance = best_combination.max_distance.values[0]
selected_dwk = best_combination.dwk.values[0]
print(selected_max_distance)
print(selected_dwk)
output_path = "../data/model_build_outputs/best_max_distance.csv"
with open(output_path, "w") as file:
writer = csv.writer(file, delimiter=',')
writer.writerow([selected_max_distance])
output_path = "../data/model_build_outputs/best_dwk.csv"
with open(output_path, "w") as file:
writer = csv.writer(file, delimiter=',')
writer.writerow([selected_dwk])
print("Max distance: " + str(selected_max_distance))
print("dwk: " + str(selected_dwk))
print("Calculating train_routes")
train_routes = utils.get_routes()
print("Calculating train_zroutes")
train_zroutes = utils.get_routes_as_zones()
print("Calculating z_route_fields")
za = utils.ZrouteField(train_zroutes, max_distance=selected_max_distance).compute_field(dwk=selected_dwk)
print("Calculating heading_matrices")
h = za.get_estimated_headings(zroutes=train_zroutes)
fname = f'../data/model_build_outputs/heading_estimations_md_{selected_max_distance}_dwk_{selected_dwk:.4f}.hdf'
h.to_hdf(fname, "data")
#h = pd.read_hdf("../data/model_apply_outputs/heading_estimations_md_200_dwk_0.1000.hdf")
zroutes = train_zroutes.copy()
print("Calculating prob_matrices")
for route_fid in pbar(np.unique(h.route_fid)):
probs = utils.get_heading_based_probmatrix(h, route_fid)
probs = probs[~probs.index.str.contains("Station")]
#probs.drop(probs.filter(regex='Station').columns, axis=1, inplace=True)
probs.to_csv(f"../data/model_build_outputs/prob_matrices_heading/{route_fid}_probs.csv", sep=',', na_rep='nan')
zones_id = zroutes.zone_id[zroutes.route_fid==route_fid]
zones_id = zones_id[~zones_id.str.contains("Station")]
zones_id.reset_index(inplace=True, drop=True)
cities = zroutes.city[zroutes.route_fid==route_fid]
cities.reset_index(inplace=True, drop=True)
city = cities[0]
city_size = len(city) + 2
zones_id = [zones_id[i][city_size:] for i in range(0,len(zones_id))] #Empieza desde 1 para saltarse del Depot
zones_df = pd.Series(zones_id)
zones_df = zones_df.append(pd.Series("nan"))
zones_df.to_csv(f"../data/model_build_outputs/prob_matrices_heading/{route_fid}_zroutes.csv", index=False, header=False, na_rep='nan')
prob_matrix = utils.get_angle_based_probmatrix(h, route_fid)
prob_matrix.to_csv(f"../data/model_build_outputs/prob_matrices_angle/{route_fid}_probs.csv", sep=',', na_rep='nan')
#probs.to_hdf(f"data/prob_matrices_based_on_heading/{route_fid}_probs.hdf", "data")
print("Done")
|
[
"[email protected]"
] | |
72d63ebd6c645be46a359af210e35f898b44baa4
|
7fc1de164e179c51f3c114aa31cc81edc89b3f91
|
/dopamine/discrete_domains/train.py
|
672479f1e553583f1efecb2c0704d272aef2b5ba
|
[
"MIT"
] |
permissive
|
SherlockShellingford/NotWorkingIQN
|
9b8609691ee43eaf65c81069e33a92ada386491a
|
99af0bdd25b67203f93e060d23f42fafce588ff7
|
refs/heads/master
| 2023-03-20T07:52:06.656742 | 2021-03-09T12:16:45 | 2021-03-09T12:16:45 | 339,854,241 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,830 |
py
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""The entry point for running a Dopamine agent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path = ['../../'] + sys.path
print (sys.path)
#exit(0)
from absl import app
from absl import flags
from dopamine.discrete_domains import run_experiment
import tensorflow as tf
flags.DEFINE_string('base_dir', None,
'Base directory to host all required sub-directories.')
flags.DEFINE_multi_string(
'gin_files', [], 'List of paths to gin configuration files (e.g.'
'"dopamine/agents/dqn/dqn.gin").')
flags.DEFINE_multi_string(
'gin_bindings', [],
'Gin bindings to override the values set in the config files '
'(e.g. "DQNAgent.epsilon_train=0.1",'
' "create_environment.game_name="Pong"").')
FLAGS = flags.FLAGS
def main(unused_argv):
"""Main method.
Args:
unused_argv: Arguments (unused).
"""
tf.logging.set_verbosity(tf.logging.INFO)
run_experiment.load_gin_configs(FLAGS.gin_files, FLAGS.gin_bindings)
runner = run_experiment.create_runner(FLAGS.base_dir)
runner.run_experiment()
if __name__ == '__main__':
flags.mark_flag_as_required('base_dir')
app.run(main)
|
[
"[email protected]"
] | |
10b46b68c63bd7cd2f94b81bd419e6d1e2b27176
|
605e05d30e9df15307c7414ec7d4137bea5b2b53
|
/docs/core/models/professor.py
|
349786ff2cc2462926df15bdc4573bdb863b67f2
|
[] |
no_license
|
tr0v40/Malakias
|
f4c4542373b038e833ce5d45848852d94ce05f3d
|
46d85758c515d858430b0174646ab3f8a37dc65b
|
refs/heads/master
| 2020-03-15T18:59:27.931071 | 2018-05-06T18:22:46 | 2018-05-06T18:22:46 | 132,297,500 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,460 |
py
|
class Professor:
def __init__(self, login, nome, email, celular, apelido):
self.__login = login
self.__nome = nome
self.__email = email
self.__celular = celular
self.__apelido = apelido
self.__disciplinas = []
def addDisciplina(self, disciplina):
if disciplina.getprofessor().getra() == self.__ra:
self.__disciplinas.append(disciplina)
else:
return "Professor nao associado a disciplina"
def getLogin(self):
return self.__login
def setLogin(self, novoLogin):
self.__login = novoLogin
def getNome(self):
return self.__nome
def setNome(self, novoNome):
self.__nome = novoNome
def getEmail(self):
return self.__email
def setEmail(self, novoEmail):
self.__email = novoEmail
def getRa(self):
return self.__ra
def setRa(self, novoRa):
self.__ra = novoRa
def getCelular(self):
return self.__celular
def setCelular(self, novoCelular):
self.__celular = novoCelular
def getApelido(self):
return self.__apelido
def setApelido(self, novoApelido):
self.__apelido = novoApelido
def getDisciplinas(self):
return self.__disciplinas
def retornaCargaHoraria(self):
soma_carga = 0
for d in self.__disciplinas:
soma_carga += d.getcargahoraria()/20
return soma_carga
|
[
"[email protected]"
] | |
990dae602fb84ac655bda5f1afc15ff366bfbd32
|
dfee0c908ad021f229d6d802408790a10272132c
|
/tests/python/relay/test_dataflow_pattern.py
|
d99e55b7c33ff3dc3db2e470777793a514c8712c
|
[
"Apache-2.0",
"Zlib",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
cchung100m/tvm
|
788453bce13a659d991c599d7ee2979089538967
|
6258fae6d1e9ab77b8065d4ffb81a5033665e0cc
|
refs/heads/master
| 2023-07-09T20:36:14.055440 | 2021-01-02T00:07:41 | 2021-01-02T00:07:41 | 198,459,825 | 0 | 2 |
Apache-2.0
| 2019-07-23T15:37:37 | 2019-07-23T15:37:37 | null |
UTF-8
|
Python
| false | false | 47,618 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-wildcard-import
import numpy as np
import tvm
from tvm import relay
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.dataflow_pattern import *
from tvm.relay.testing import run_opt_pass
# NB: 1 corresponds to the C++ enum that specicfies this
# we loose the type safety due to the Python/C++ calling
# convention.
K_ELEMWISE = 0
K_BROADCAST = 1
## NODE TESTS
def test_expr_pattern():
ep = is_expr(relay.var("x", shape=(4, 1)))
assert isinstance(ep, ExprPattern)
assert isinstance(ep.expr, relay.Var)
def test_var_pattern():
v = is_var("x")
assert isinstance(v, VarPattern)
assert v.name == "x"
def test_constant_pattern():
c = is_constant()
assert isinstance(c, ConstantPattern)
def test_wildcard_pattern():
wc = wildcard()
assert isinstance(wc, WildcardPattern)
def test_CallPattern():
wc1 = wildcard()
wc2 = wildcard()
c = is_op("add")(wc1, wc2)
assert isinstance(c, CallPattern)
assert isinstance(c.args[0], WildcardPattern)
assert isinstance(c.args[1], WildcardPattern)
def test_FunctionPattern():
wc1 = wildcard()
wc2 = wildcard()
c = is_op("add")(wc1, wc2)
f = FunctionPattern([wc1, wc2], c)
assert isinstance(f, FunctionPattern)
assert isinstance(f.params[0], WildcardPattern)
assert isinstance(f.params[1], WildcardPattern)
assert isinstance(f.body, CallPattern)
assert isinstance(f.body.args[0], WildcardPattern)
assert isinstance(f.body.args[1], WildcardPattern)
def test_TuplePattern():
wc1 = wildcard()
wc2 = wildcard()
t = is_tuple([wc1, wc2])
assert isinstance(t, TuplePattern)
assert isinstance(t.fields[0], WildcardPattern)
assert isinstance(t.fields[1], WildcardPattern)
def test_TupleGetItemPattern():
wc1 = wildcard()
wc2 = wildcard()
t = is_tuple([wc1, wc2])
tgi = is_tuple_get_item(t, 1)
assert isinstance(tgi, TupleGetItemPattern)
assert isinstance(tgi.tuple, TuplePattern)
assert isinstance(tgi.tuple.fields[0], WildcardPattern)
assert isinstance(tgi.tuple.fields[1], WildcardPattern)
def test_AltPattern():
is_add_or_sub = is_op("add") | is_op("subtract")
assert isinstance(is_add_or_sub, AltPattern)
def test_TypePattern():
ttype = relay.TensorType((10, 10), "float32")
ty_pat = has_type(ttype)
assert isinstance(ty_pat, TypePattern)
assert ty_pat.type == ttype
def test_DataTypePattern():
dtype = "float16"
pattern = has_dtype(dtype)
assert isinstance(pattern, DataTypePattern)
assert pattern.dtype == dtype
def test_ShapePattern():
shape = [10, 10]
pattern = has_shape(shape)
assert isinstance(pattern, ShapePattern)
assert tvm.ir.structural_equal(pattern.shape, shape)
def test_AttrPattern():
op = is_op("add").has_attr({"TOpPattern": K_ELEMWISE})
assert isinstance(op, AttrPattern)
assert op.attrs["TOpPattern"] == K_ELEMWISE
## MATCHER TESTS
def test_match_op():
assert is_op("add").match(relay.op.op.get("add"))
def test_no_match_op():
assert not is_op("add").match(relay.op.op.get("subtract"))
def test_match_op_or():
is_add_or_sub = is_op("add") | is_op("subtract")
assert is_add_or_sub.match(relay.op.op.get("add"))
assert is_add_or_sub.match(relay.op.op.get("subtract"))
def test_match_call_commutive():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("add")(is_var("x"), is_var("y"))
assert add_pattern.match(x + y)
assert add_pattern.match(y + x)
mul_pattern = is_op("multiply")(is_var("x"), is_var("y"))
assert mul_pattern.match(x * y)
assert mul_pattern.match(y * x)
def test_no_match_call_commutive():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("subtract")(is_var("x"), is_var("y"))
assert add_pattern.match(x - y)
assert not add_pattern.match(y - x)
add_pattern = is_op("divide")(is_var("x"), is_var("y"))
assert add_pattern.match(x / y)
assert not add_pattern.match(y / x)
def test_match_call():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("add")(wildcard(), wildcard())
assert add_pattern.match(x + y)
def test_no_match_call():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("add")(wildcard(), wildcard())
assert not add_pattern.match(x - y)
def test_match_func():
x = relay.var("x")
y = relay.var("y")
wc1 = wildcard()
wc2 = wildcard()
func_pattern = FunctionPattern([wc1, wc2], wc1 + wc2)
assert func_pattern.match(relay.Function([x, y], x + y))
def test_no_match_func():
x = relay.var("x")
y = relay.var("y")
wc1 = wildcard()
wc2 = wildcard()
func_pattern = FunctionPattern([wc1, wc2], wc1 + wc2)
assert not func_pattern.match(relay.Function([x, y], x - y))
def test_match_option():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
pattern = is_op("nn.relu")(
is_op("nn.conv2d")(wildcard(), wildcard()).optional(
lambda x: is_op("nn.bias_add")(x, wildcard())
)
)
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
assert pattern.match(relu)
conv2d = relay.op.nn.conv2d(x, w)
bias_add = relay.op.nn.bias_add(conv2d, b)
relu = relay.op.nn.relu(bias_add)
assert pattern.match(relu)
pattern = is_op("nn.conv2d")(wildcard(), wildcard())
pattern = pattern.optional(is_op("nn.relu")).optional(is_op("tanh"))
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
tanh = relay.op.tanh(conv2d)
tanh2 = relay.op.tanh(relu)
relu2 = relay.op.nn.relu(tanh)
assert pattern.match(conv2d)
assert pattern.match(relu)
assert pattern.match(tanh)
assert pattern.match(tanh2)
assert not pattern.match(relu2)
def test_no_match_option():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
pattern = is_op("nn.relu")(
is_op("nn.conv2d")(wildcard(), wildcard()).optional(
lambda x: is_op("nn.bias_add")(x, wildcard())
)
)
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.tanh(conv2d)
assert not pattern.match(relu)
conv2d = relay.op.nn.dense(x, w)
relu = relay.op.tanh(conv2d)
assert not pattern.match(relu)
conv2d = relay.op.nn.dense(x, w)
bias_add = relay.op.nn.bias_add(conv2d, b)
relu = relay.op.nn.relu(bias_add)
assert not pattern.match(relu)
conv2d = relay.op.nn.conv2d(x, w)
bias_add = conv2d + w
relu = relay.op.nn.relu(bias_add)
assert not pattern.match(relu)
def test_match_const():
conv2d = is_op("nn.conv2d")(wildcard(), is_constant())
pattern = is_op("nn.bias_add")(conv2d, wildcard())
x = relay.var("x", shape=(1, 3, 224, 224))
w = relay.var("w", shape=(3, 3, 3, 3))
b = relay.var("b", shape=(3,))
conv2d = relay.op.nn.conv2d(x, w)
out = relay.op.nn.bias_add(conv2d, b)
func = relay.Function([x, w, b], out)
mod = tvm.IRModule.from_expr(func)
assert not pattern.match(mod["main"].body)
mod["main"] = bind_params_by_name(mod["main"], {"w": tvm.nd.array(np.ones(shape=(3, 3, 3, 3)))})
assert pattern.match(mod["main"].body)
def test_match_tuple():
x = relay.var("x")
y = relay.var("y")
z = relay.op.op.get("add")
tuple_pattern = is_tuple((is_var("x"), wildcard(), is_op("add")))
assert tuple_pattern.match(relay.expr.Tuple((x, y, z)))
tuple_pattern = is_tuple((is_var("x"), wildcard(), is_op("add")))
tuple_get_item_pattern = is_tuple_get_item(tuple_pattern, 1)
assert tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 1))
tuple_get_item_pattern = is_tuple_get_item(tuple_pattern) # Match any index
assert tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 0))
assert tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 1))
assert tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 2))
def test_no_match_tuple():
x = relay.var("x")
y = relay.var("y")
z = relay.op.op.get("add")
tuple_pattern = is_tuple((is_var("x"), wildcard(), is_op("add"), wildcard()))
assert not tuple_pattern.match(relay.expr.Tuple((x, y, z)))
tuple_pattern = is_tuple((is_var("x"), wildcard(), is_op("add")))
tuple_get_item_pattern = is_tuple_get_item(tuple_pattern, 1)
assert not tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 2))
def test_match_type():
x = relay.var("x", shape=(10, 10), dtype="float32")
ty_pat = has_type(relay.TensorType((10, 10), "float32"))
assert ty_pat.match(x)
def test_no_match_type():
x = relay.var("x", shape=(10, 10), dtype="int32")
ty_pat = has_type(relay.TensorType((10, 10), "float32"))
assert not ty_pat.match(x)
def test_match_dtype():
x = relay.var("x", shape=(10, 10), dtype="float32")
ty_pat = has_dtype("float32")
assert ty_pat.match(x)
def test_no_match_dtype():
x = relay.var("x", shape=(10, 10), dtype="int32")
ty_pat = has_dtype("float32")
assert not ty_pat.match(x)
def test_match_shape():
x = relay.var("x", shape=(10, 10), dtype="float32")
ty_pat = has_shape((10, 10))
assert ty_pat.match(x)
def test_no_match_shape():
x = relay.var("x", shape=(10, 10), dtype="int32")
ty_pat = has_shape((10, 5))
assert not ty_pat.match(x)
def test_match_op_attr():
op = is_op("add").has_attr({"TOpPattern": K_BROADCAST})
op_pat = op(wildcard(), wildcard())
x = relay.var("x")
y = relay.var("y")
assert op_pat.match(x + y)
def test_no_match_op_attr():
op = is_op("nn.dense").has_attr({"TOpPattern": K_ELEMWISE})
op_pat = op(wildcard(), wildcard())
x = relay.var("x")
y = relay.var("y")
assert not op_pat.match(relay.op.nn.dense(x, y))
op = is_op("add").has_attr({"TOpPattern": K_BROADCAST})
op_pat = op(wildcard(), wildcard())
x = relay.var("x")
y = relay.var("y")
assert not op_pat.match(x - y)
def test_match_func_attr():
pattern = wildcard().has_attr({"Composite": "add"})
x = relay.var("x")
y = relay.var("y")
f = relay.Function([x, y], x + y).with_attr("Composite", "add")
assert pattern.match(f)
def test_no_match_func_attr():
pattern = wildcard().has_attr({"Composite": "add"})
x = relay.var("x")
y = relay.var("y")
f = relay.Function([x, y], x + y).with_attr("RandomTest", "add")
assert not pattern.match(f)
f = relay.Function([x, y], x + y).with_attr("Composite", "conv_bias")
assert not pattern.match(f)
def test_match_call_attr():
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"data_layout": "NCHW"})
x = relay.var("x")
y = relay.var("y")
assert is_conv2d.match(relay.op.nn.conv2d(x, y))
def test_no_match_call_attr():
x = relay.var("x")
y = relay.var("y")
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"data_layout": "NHWC"})
assert not is_conv2d.match(relay.op.nn.conv2d(x, y))
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"RandomAttr": "NCHW"})
assert not is_conv2d.match(relay.op.nn.conv2d(x, y))
def test_match_diamond():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
path1 = is_op("nn.relu")(is_conv2d)
path2 = is_op("nn.leaky_relu")(is_conv2d)
diamond = is_op("add")(path1, path2)
# Expr
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert diamond.match(out)
def test_no_match_diamond():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
path1 = is_op("nn.relu")(is_conv2d)
path2 = is_op("nn.leaky_relu")(is_conv2d)
diamond = is_op("add")(path1, path2)
# Expr
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
# Check
assert not diamond.match(leaky_relu)
assert not diamond.match(relu)
def test_match_fake_diamond():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
path1 = is_op("nn.relu")(is_conv2d)
path2 = is_op("nn.leaky_relu")(is_conv2d)
diamond = is_op("add")(path1, path2)
# Expr
input1 = relay.var("input1")
weight1 = relay.var("weight1")
conv2d1 = relay.op.nn.conv2d(input1, weight1)
inp2 = relay.var("input2")
weight2 = relay.var("weight2")
conv2d2 = relay.op.nn.conv2d(inp2, weight2)
relu = relay.op.nn.relu(conv2d1)
leaky_relu = relay.op.nn.leaky_relu(conv2d2, alpha=0)
out = relu + leaky_relu
# Check
assert not diamond.match(out)
def test_match_dominator():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Classic Diamond
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert diamond.match(out)
# Deeper Branch
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
relu = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert diamond.match(out)
# Single Branch
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
tanh = relay.op.tanh(relu)
out = relu + tanh
# Check
assert diamond.match(out)
# Fuzzy path/nested Diamond
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()) | is_op(
"add"
)(wildcard(), wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relu + relu
tanh = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = tanh + leaky_relu
assert diamond.match(out)
def test_not_match_dominator():
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Fake Diamond
input1 = relay.var("input1")
weight1 = relay.var("weight1")
conv2d1 = relay.op.nn.conv2d(input1, weight1)
inp2 = relay.var("input2")
weight2 = relay.var("weight2")
conv2d2 = relay.op.nn.conv2d(inp2, weight2)
relu = relay.op.nn.relu(conv2d1)
leaky_relu = relay.op.nn.leaky_relu(conv2d2, alpha=0)
out = relu + leaky_relu
# Check
assert not diamond.match(out)
# Add op that doesn't match K_ELEMWISE
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relu + relu
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert not diamond.match(out)
# Relu on the input instead of the conv
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(inp)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert not diamond.match(out)
# No conv
inp = relay.var("input")
relu = relay.op.nn.relu(inp)
relu = relay.op.nn.relu(relu)
tanh = relay.op.tanh(relu)
out = relu + tanh
# Check
assert not diamond.match(out)
def test_match_typed_dominator():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()).has_dtype(
"float32"
)
reduction = is_op("add")(wildcard(), wildcard()).has_shape([1, 3, 10, 10])
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Classic Diamond
inp = relay.var("input", relay.TensorType((1, 3, 12, 12), "float32"))
weight = relay.var("weight", relay.TensorType((3, 3, 3, 3), "float32"))
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert diamond.match(out)
def test_no_match_typed_dominator():
# Classic Diamond
inp = relay.var("input", relay.TensorType((1, 3, 12, 12), "float32"))
weight = relay.var("weight", relay.TensorType((3, 3, 3, 3), "float32"))
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()).has_dtype(
"float32"
)
reduction = is_op("add")(wildcard(), wildcard()).has_shape([1, 1, 10, 10])
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Check
assert not diamond.match(out)
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()).has_dtype(
"float16"
)
reduction = is_op("add")(wildcard(), wildcard()).has_shape([1, 3, 10, 10])
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Check
assert not diamond.match(out)
def test_rewrite():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("add")(wildcard(), wildcard())
sub_pattern = is_op("subtract")(wildcard(), wildcard())
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = add_pattern
def callback(self, pre, post, node_map):
return post.args[0] - post.args[1]
out = rewrite(TestRewrite(), x + y)
assert sub_pattern.match(out)
def test_rewrite_func():
x = relay.var("x")
w = relay.var("w")
y = relay.var("y")
add_pattern = is_op("add")(wildcard(), wildcard())
sub_pattern = is_op("subtract")(wildcard(), wildcard())
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = add_pattern
def callback(self, pre, post, node_map):
return post.args[0] - post.args[1]
inpf = relay.var("input")
weightf = relay.var("weight")
func = relay.Function(
[inpf, weightf], relay.op.nn.relu(relay.op.nn.conv2d(inpf, weightf)), attrs=None
)
out = rewrite(TestRewrite(), func(x, w) + y)
assert sub_pattern.match(out)
def test_nested_rewrite():
class PatternCallback(DFPatternCallback):
def __init__(self, pattern):
super(PatternCallback, self).__init__()
self.pattern = pattern
def callback(self, pre, post, node_map):
return post
def gen():
x = relay.var("x")
y = relay.var("y")
y_add = relay.add(y, y)
n0 = relay.add(x, y_add)
n1 = relay.add(x, n0)
return relay.add(n1, n0)
def pattern():
a = wildcard()
b = wildcard()
n0 = is_op("add")(a, b)
n1 = is_op("add")(n0, a)
return is_op("add")(n0, n1)
out = gen()
pat = pattern()
new_out = rewrite(PatternCallback(pat), out)
assert tvm.ir.structural_equal(out, new_out)
def test_not_fuse_multi_diamond():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
path1 = is_op("nn.relu")(is_conv2d)
path2 = is_op("nn.leaky_relu")(is_conv2d)
diamond = is_op("add")(path1, path2)
# Expr
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
out = out + conv2d
# Check
assert not diamond.match(out)
class BatchnormCallback(DFPatternCallback):
def __init__(self):
super(BatchnormCallback, self).__init__()
self.x = wildcard()
self.var = wildcard()
self.mean = wildcard()
self.beta = wildcard()
self.gamma = wildcard()
self.eps = is_constant()
self.pattern = (
self.gamma * (self.x - self.mean) / is_op("sqrt")(self.var + self.eps) + self.beta
)
def callback(self, pre, post, node_map):
x = node_map[self.x][0]
var = node_map[self.var][0]
mean = node_map[self.mean][0]
beta = node_map[self.beta][0]
gamma = node_map[self.gamma][0]
eps = node_map[self.eps][0]
return relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=eps.data.asnumpy().item())[
0
]
def test_fuse_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5)) + beta
out = rewrite(BatchnormCallback(), BN)
assert tvm.ir.structural_equal(
out, relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
)
def test_no_fuse_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
fake_BN = gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5)) - beta
out = rewrite(BatchnormCallback(), fake_BN)
assert tvm.ir.structural_equal(out, fake_BN)
def test_fuse_double_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5)) + beta
BN2 = gamma * (BN - mean) / relay.op.sqrt(var + relay.const(1e-5)) + beta
out = rewrite(BatchnormCallback(), BN2)
bn = relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
bn2 = relay.op.nn.batch_norm(bn, gamma, beta, mean, var, epsilon=1e-5)[0]
assert tvm.ir.structural_equal(out, bn2)
def test_partial_fuse_double_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5)) - beta
BN2 = gamma * (BN - mean) / relay.op.sqrt(var + relay.const(1e-5)) + beta
out = rewrite(BatchnormCallback(), BN2)
bn2 = relay.op.nn.batch_norm(BN, gamma, beta, mean, var, epsilon=1e-5)[0]
assert tvm.ir.structural_equal(out, bn2)
def test_fuse_batchnorm_commutation():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
# commute add
BN = beta + gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5))
out = rewrite(BatchnormCallback(), BN)
assert tvm.ir.structural_equal(
out, relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
)
# associate divide/multiply
BN = (gamma * (x - mean)) / relay.op.sqrt(var + relay.const(1e-5)) + beta
out = rewrite(BatchnormCallback(), BN)
assert tvm.ir.structural_equal(
out, relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
)
# associate multiply/divide
BN = gamma * ((x - mean) / relay.op.sqrt(var + relay.const(1e-5))) + beta
out = rewrite(BatchnormCallback(), BN)
assert tvm.ir.structural_equal(
out, relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
)
def test_quadruple_rewrite_dominator():
class DominatorRemovalCallback(DFPatternCallback):
def __init__(self):
super(DominatorRemovalCallback, self).__init__()
self.inp = wildcard()
self.weight = wildcard()
is_conv2d = is_op("nn.conv2d")(self.inp, self.weight)
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(
wildcard()
) | is_op("add")(wildcard(), wildcard())
reduction = is_op("add")(wildcard(), wildcard())
self.pattern = dominates(is_conv2d, is_unary_elemwise, reduction)
def callback(self, pre, post, node_map):
inp = node_map[self.inp][0]
weight = node_map[self.weight][0]
return relay.op.nn.conv2d(inp, weight)
inp = relay.var("input")
weight = relay.var("weight")
# Classic Diamond
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Deeper Branch
conv2d = relay.op.nn.conv2d(out, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
relu = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Single Branch
conv2d = relay.op.nn.conv2d(out, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
tanh = relay.op.tanh(relu)
out = relu + tanh
# Fuzzy path/nested Diamond
conv2d = relay.op.nn.conv2d(out, weight)
relu = relay.op.nn.relu(conv2d)
relu = relu + relu
tanh = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = tanh + leaky_relu
one = relay.op.nn.conv2d(inp, weight)
two = relay.op.nn.conv2d(one, weight)
three = relay.op.nn.conv2d(two, weight)
four = relay.op.nn.conv2d(three, weight)
assert tvm.ir.structural_equal(DominatorRemovalCallback().rewrite(out), four)
def algebraic_simplify(expr):
zero = is_expr(relay.const(0)) | is_expr(relay.const(0.0))
one = is_expr(relay.const(1)) | is_expr(relay.const(1.0))
class ElwiseNullCallback(DFPatternCallback):
def callback(self, pre, post, node_map):
return node_map[self.x][0] # pylint: disable=no-member
class AddCallback(ElwiseNullCallback):
def __init__(self):
super(AddCallback, self).__init__()
self.x = wildcard()
self.pattern = self.x + zero
class SubCallback(ElwiseNullCallback):
def __init__(self):
super(SubCallback, self).__init__()
self.x = wildcard()
self.pattern = self.x - zero
class MulCallback(ElwiseNullCallback):
def __init__(self):
super(MulCallback, self).__init__()
self.x = wildcard()
self.pattern = self.x * one
class DivCallback(ElwiseNullCallback):
def __init__(self):
super(DivCallback, self).__init__()
self.x = wildcard()
self.pattern = self.x / one
class MulZeroCallback(ElwiseNullCallback):
def __init__(self):
super(MulZeroCallback, self).__init__()
self.x = zero
self.pattern = self.x * wildcard()
class ZeroDivCallback(ElwiseNullCallback):
def __init__(self):
super(ZeroDivCallback, self).__init__()
self.x = zero
self.pattern = self.x / wildcard()
return rewrite(
[
AddCallback(),
SubCallback(),
MulCallback(),
DivCallback(),
MulZeroCallback(),
ZeroDivCallback(),
],
expr,
)
def test_algebraic_simplify():
x = relay.Var("x")
y = relay.Var("y")
one = relay.const(1)
zero = relay.const(0)
onef = relay.const(1.0)
zerof = relay.const(0.0)
assert algebraic_simplify(x + zero) == x
assert algebraic_simplify(x + zerof) == x
assert algebraic_simplify(zero + x) == x
assert algebraic_simplify(zerof + x) == x
assert algebraic_simplify(x - zero) == x
assert algebraic_simplify(x - zerof) == x
assert algebraic_simplify(x * one) == x
assert algebraic_simplify(x * onef) == x
assert algebraic_simplify(one * x) == x
assert algebraic_simplify(onef * x) == x
assert algebraic_simplify(x * zero) == zero
assert algebraic_simplify(x * zerof) == zerof
assert algebraic_simplify(x / one) == x
assert algebraic_simplify(x / onef) == x
assert algebraic_simplify(zero / x) == zero
assert algebraic_simplify(zerof / x) == zerof
assert tvm.ir.structural_equal(
algebraic_simplify((x + zero * y) / one + (y * one) - zero / x), x + y
)
def test_double_partition():
# Pattern 1
conv2d_p = is_op("nn.conv2d")(wildcard(), wildcard())
bias_add_p = is_op("nn.bias_add")(conv2d_p, wildcard())
relu_p = is_op("nn.relu")(bias_add_p)
# Graph
x = relay.var("input")
w = relay.var("weight")
b = relay.var("bias")
w2 = relay.var("weight")
b2 = relay.var("bias")
conv2d = relay.op.nn.conv2d(x, w)
bias_add = relay.op.nn.bias_add(conv2d, b)
relu = relay.op.nn.relu(bias_add)
conv2d2 = relay.op.nn.conv2d(relu, w2)
bias_add2 = relay.op.nn.bias_add(conv2d2, b2)
partitioned = bias_add2
for pat, label in [(relu_p, "conv_bias_relu"), (bias_add_p, "conv_bias")]:
partitioned = pat.partition(partitioned, {"Composite": label})
inpf = relay.var("input")
weightf = relay.var("weight")
biasf = relay.var("bias")
func0 = (
relay.Function(
[inpf, weightf, biasf],
relay.op.nn.relu(relay.op.nn.bias_add(relay.op.nn.conv2d(inpf, weightf), biasf)),
)
.with_attr("Composite", "conv_bias_relu")
.with_attr("PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_")
)
inpf = relay.var("input")
weightf = relay.var("weight")
biasf = relay.var("bias")
func1 = (
relay.Function(
[inpf, weightf, biasf], relay.op.nn.bias_add(relay.op.nn.conv2d(inpf, weightf), biasf)
)
.with_attr("Composite", "conv_bias")
.with_attr("PartitionedFromPattern", "nn.conv2d_nn.bias_add_")
)
expected = func1(func0(x, w, b), w2, b2)
assert tvm.ir.structural_equal(partitioned, expected)
def test_partition_dominator():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Classic Diamond
inp = relay.var("input")
weight = relay.var("weight")
def generate_diamond(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
return relu + leaky_relu
out = generate_diamond(inp * inp, weight * weight)
# Check
partitioned = diamond.partition(out)
i = relay.Var("input")
w = relay.Var("weight")
f = relay.Function([i, w], generate_diamond(i, w)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.relu_nn.relu_nn.leaky_relu_add_"
)
assert tvm.ir.structural_equal(partitioned, f(inp * inp, weight * weight))
def test_quadruple_partition_dominator():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()) | is_op(
"add"
)(wildcard(), wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
inp = relay.var("input")
weight = relay.var("weight")
# Classic Diamond
def classic_diamond(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
return relu + leaky_relu
# Deeper Branch
def deeper_diamond(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
relu = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
return relu + leaky_relu
# Single Branch
def single_branch(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
tanh = relay.op.tanh(relu)
return relu + tanh
# Fuzzy path/nested Diamond
def nested_diamond(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relu + relu
tanh = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
return tanh + leaky_relu
partitioned = diamond.partition(
nested_diamond(
single_branch(deeper_diamond(classic_diamond(inp, weight), weight), weight), weight
)
)
functions = []
partition_names = [
"nn.conv2d_nn.relu_nn.relu_nn.leaky_relu_add_",
"nn.conv2d_nn.relu_nn.relu_tanh_nn.leaky_relu_add_",
"nn.conv2d_nn.relu_nn.relu_tanh_add_",
"nn.conv2d_nn.relu_add_tanh_nn.leaky_relu_add_",
]
for i, f in enumerate([classic_diamond, deeper_diamond, single_branch, nested_diamond]):
inpf = relay.var("input")
weightf = relay.var("weight")
functions.append(
relay.Function([inpf, weightf], f(inpf, weightf)).with_attr(
"PartitionedFromPattern", partition_names[i]
)
)
reference = functions[3](
functions[2](functions[1](functions[0](inp, weight), weight), weight), weight
)
assert tvm.ir.structural_equal(partitioned, reference)
def get_BN(x, var, mean, beta, gamma, eps):
return gamma * (x - mean) / relay.op.sqrt(var + eps) + beta
def test_partition_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
eps = relay.const(1e-5)
BN = get_BN(x, var, mean, beta, gamma, eps)
xf = relay.var("xf")
varf = relay.var("varf")
meanf = relay.var("meanf")
betaf = relay.var("betaf")
gammaf = relay.var("gammaf")
# Put the arguments in toplogological order for the reference
f = relay.Function(
[gammaf, xf, meanf, varf, betaf], get_BN(xf, varf, meanf, betaf, gammaf, eps)
).with_attr("PartitionedFromPattern", "subtract_multiply_add_sqrt_divide_add_")
partitioned = BatchnormCallback().pattern.partition(BN)
reference = f(gamma, x, mean, var, beta)
assert tvm.ir.structural_equal(partitioned, reference)
def test_partition_double_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
eps = relay.const(1e-5)
BN = gamma * (x - mean) / relay.op.sqrt(var + eps) + beta
BN2 = gamma * (BN - mean) / relay.op.sqrt(var + eps) + beta
xf = relay.var("xf")
varf = relay.var("varf")
meanf = relay.var("meanf")
betaf = relay.var("betaf")
gammaf = relay.var("gammaf")
f1 = relay.Function(
[gammaf, xf, meanf, varf, betaf], get_BN(xf, varf, meanf, betaf, gammaf, eps)
).with_attr("PartitionedFromPattern", "subtract_multiply_add_sqrt_divide_add_")
# The partitioner doesn't replace duplicates, so we use two copies of the function
xf2 = relay.var("xf2")
varf2 = relay.var("varf2")
meanf2 = relay.var("meanf2")
betaf2 = relay.var("betaf2")
gammaf2 = relay.var("gammaf2")
f2 = relay.Function(
[gammaf2, xf2, meanf2, varf2, betaf2], get_BN(xf2, varf2, meanf2, betaf2, gammaf2, eps)
).with_attr("PartitionedFromPattern", "subtract_multiply_add_sqrt_divide_add_")
partitioned = BatchnormCallback().pattern.partition(BN2)
reference = f2(gamma, f1(gamma, x, mean, var, beta), mean, var, beta)
assert tvm.ir.structural_equal(partitioned, reference)
def test_overlappting_partitions():
x = wildcard()
gamma = wildcard()
beta = wildcard()
moving_mean = wildcard()
moving_var = wildcard()
bn_node = is_op("nn.batch_norm")(x, gamma, beta, moving_mean, moving_var)
tuple_get_item_node = TupleGetItemPattern(bn_node, 0)
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)
T1 = BN[0]
T2 = BN[0]
add = T1 + T2
assert tuple_get_item_node.partition(add) == add
def test_partition_overused():
pattern = is_op("nn.relu")(is_op("nn.conv2d")(wildcard(), wildcard()))
x = relay.var("input")
w = relay.var("weight")
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
out = relu + conv2d
assert pattern.partition(out) == out
def test_partition_check():
pattern = is_op("nn.relu")(is_op("nn.conv2d")(is_var("input"), wildcard()))
def check(pre):
return pre.args[0].attrs.data_layout == "NCHW"
x = relay.var("input")
w = relay.var("weight")
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
xf = relay.var("input")
wf = relay.var("weight")
conv2df = relay.op.nn.conv2d(xf, wf)
reluf = relay.op.nn.relu(conv2df)
func = relay.Function([xf, wf], reluf).with_attr("PartitionedFromPattern", "nn.conv2d_nn.relu_")
reference = func(x, w)
partitioned = pattern.partition(relu, check=check)
assert tvm.ir.structural_equal(partitioned, reference)
conv2d = relay.op.nn.conv2d(x, w, data_layout="NHWC")
relu = relay.op.nn.relu(conv2d)
assert relu == pattern.partition(relu, check=check)
def test_partition_check_types():
pattern = is_op("nn.relu")(is_op("nn.conv2d")(wildcard(), wildcard()))
def check(pre):
conv = pre.args[0]
return (conv.attrs.data_layout == "NCHW") and bool(conv.checked_type.shape[0] == 1)
x = relay.var("input", shape=(1, 10, 10, 10))
w = relay.var("weight", shape=(10, 10, 3, 3))
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
relu = run_opt_pass(relu, relay.transform.InferType())
partitioned = pattern.partition(relu, check=check)
assert partitioned.op.attrs["PartitionedFromPattern"] == "nn.conv2d_nn.relu_"
conv2d = relay.op.nn.conv2d(x, w, data_layout="NHWC")
relu = relay.op.nn.relu(conv2d)
relu = run_opt_pass(relu, relay.transform.InferType())
assert relu == pattern.partition(relu, check=check)
x = relay.var("input", shape=(2, 10, 10, 10))
w = relay.var("weight", shape=(10, 10, 3, 3))
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
relu = run_opt_pass(relu, relay.transform.InferType())
assert relu == pattern.partition(relu, check=check)
def conv_bias_relu(x, w, b):
conv2d = relay.op.nn.conv2d(x, w)
bias_add = relay.op.nn.bias_add(conv2d, b)
relu = relay.op.nn.relu(bias_add)
return relu
def test_partition_option():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
bias = conv2d.optional(lambda x: is_op("nn.bias_add")(x, wildcard()))
pattern1 = is_op("nn.relu")(bias)
conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
bias = is_op("nn.bias_add")(conv2d, wildcard())
pattern2 = bias.optional(lambda x: is_op("nn.relu")(x))
relu = conv_bias_relu(x, w, b)
xf = relay.var("x")
wf = relay.var("w")
bf = relay.var("b")
func = relay.Function([xf, wf, bf], conv_bias_relu(xf, wf, bf)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_"
)
assert pattern1.match(relu)
assert tvm.ir.structural_equal(func(x, w, b), pattern1.partition(relu))
assert pattern2.match(relu)
assert tvm.ir.structural_equal(func(x, w, b), pattern2.partition(relu))
def test_partition_function():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
x1 = relay.var("x1")
w1 = relay.var("w1")
wc_x = wildcard()
wc_w = wildcard()
wc_b = wildcard()
wc_x1 = wildcard()
wc_w1 = wildcard()
func_pattern = FunctionPattern([wc_x1, wc_w1], is_op("nn.conv2d")(wc_x1, wc_w1))
pattern = func_pattern(wc_x, wc_w) + wc_b
func = relay.Function([x1, w1], relay.nn.conv2d(x1, w1))
expr = func(x, w) + b + b
x2 = relay.var("x2")
w2 = relay.var("w2")
b2 = relay.var("b2")
func2 = relay.Function([x2, w2, b2], func(x2, w2) + b2).with_attr(
"PartitionedFromPattern", "nn.conv2d_FunctionCall_add_"
)
expr2 = func2(x, w, b) + b
assert tvm.ir.structural_equal(pattern.partition(expr), expr2)
def test_match_match():
add_pattern = is_op("add")(wildcard(), wildcard())
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = add_pattern
def callback(self, pre, post, node_map):
return post.args[0] - post.args[1]
mod = tvm.IRModule({})
tvm.relay.prelude.Prelude(mod)
# Apply rewrite on IR including relay.Match
out = rewrite(TestRewrite(), mod["tensor_concatenate_int64"])
assert tvm.ir.structural_equal(mod["tensor_concatenate_int64"], out)
def test_partition_constant_embedding():
x = relay.var("x")
w = relay.var("w")
wc = relay.const(1)
b = relay.var("b")
xf = relay.var("x")
wf = relay.var("w")
bf = relay.var("b")
embeded_func = relay.Function([xf, bf], conv_bias_relu(xf, wc, bf)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_"
)
xf = relay.var("x")
wf = relay.var("w")
bf = relay.var("b")
lifted_func = relay.Function([xf, wf, bf], conv_bias_relu(xf, wf, bf)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_"
)
relu = conv_bias_relu(x, w, b)
reluc = conv_bias_relu(x, wc, b)
# Check lifting of wildcard matches
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), wildcard()), wildcard())
)
assert tvm.ir.structural_equal(lifted_func(x, w, b), pattern.partition(relu))
assert tvm.ir.structural_equal(lifted_func(x, wc, b), pattern.partition(reluc))
# Check lifting of input matches
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_var()), wildcard())
)
assert tvm.ir.structural_equal(lifted_func(x, w, b), pattern.partition(relu))
assert tvm.ir.structural_equal(reluc, pattern.partition(reluc)) # Constants are not Inputs
# Check embedding of constant matches
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_constant()), wildcard())
)
assert tvm.ir.structural_equal(relu, pattern.partition(relu))
assert tvm.ir.structural_equal(embeded_func(x, b), pattern.partition(reluc))
# Check embedding of constant ExprPatterns
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_expr(wc)), wildcard())
)
assert tvm.ir.structural_equal(relu, pattern.partition(relu))
assert tvm.ir.structural_equal(embeded_func(x, b), pattern.partition(reluc))
# Check lifting/embedding of Alt matches
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_var() | is_constant()), wildcard())
)
assert tvm.ir.structural_equal(lifted_func(x, w, b), pattern.partition(relu))
assert tvm.ir.structural_equal(embeded_func(x, b), pattern.partition(reluc))
# Check lifting/embedding of Alt matches with the other ordering
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_constant() | is_var()), wildcard())
)
assert tvm.ir.structural_equal(lifted_func(x, w, b), pattern.partition(relu))
assert tvm.ir.structural_equal(embeded_func(x, b), pattern.partition(reluc))
if __name__ == "__main__":
test_expr_pattern()
test_var_pattern()
test_constant_pattern()
test_wildcard_pattern()
test_CallPattern()
test_TuplePattern()
test_TupleGetItemPattern()
test_AltPattern()
test_TypePattern()
test_DataTypePattern()
test_ShapePattern()
test_AttrPattern()
test_match_op()
test_no_match_op()
test_match_op_or()
test_match_call_commutive()
test_no_match_call_commutive()
test_match_call()
test_no_match_call()
test_match_option()
test_no_match_option()
test_match_const()
test_match_tuple()
test_no_match_tuple()
test_match_type()
test_no_match_type()
test_match_dtype()
test_no_match_dtype()
test_match_shape()
test_no_match_shape()
test_match_op_attr()
test_no_match_op_attr()
test_match_func_attr()
test_no_match_func_attr()
test_match_call_attr()
test_no_match_call_attr()
test_match_diamond()
test_no_match_diamond()
test_match_fake_diamond()
test_match_dominator()
test_not_match_dominator()
test_rewrite()
test_rewrite_func()
test_nested_rewrite()
test_not_fuse_multi_diamond()
test_fuse_batchnorm()
test_no_fuse_batchnorm()
test_fuse_double_batchnorm()
test_partial_fuse_double_batchnorm()
test_fuse_batchnorm_commutation()
test_quadruple_rewrite_dominator()
test_algebraic_simplify()
test_double_partition()
test_partition_dominator()
test_quadruple_partition_dominator()
test_partition_batchnorm()
test_partition_double_batchnorm()
test_partition_check()
test_partition_check_types()
test_partition_option()
test_match_match()
test_partition_constant_embedding()
|
[
"[email protected]"
] | |
323a9f902674ad4556e5501d6aa72e33fa93729c
|
6e33b919b56ac94c61ba2df328b544a09a6ce2ad
|
/simple_mine_f.py
|
c4983ce2e68a7e974c6c2d7f95c5886ec5653fe8
|
[
"MIT"
] |
permissive
|
ReyesDeJong/MINE
|
5cc51838fdc5a111c2dd07bb2354cd3d9def9b68
|
50f02110f8c2f4b5a11e1e37c31a4a9c271aa19c
|
refs/heads/master
| 2020-03-27T04:08:06.797991 | 2018-10-20T22:59:16 | 2018-10-20T22:59:16 | 145,914,435 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,803 |
py
|
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import time
import datetime
from grad_corrected_mine import GradMINE
class MINEf(GradMINE):
def __init__(self, params, name="mine_f"):
tf.reset_default_graph()
self.name = name
self.p = self._fix_to_default(params)
# Directories
self.model_path, self.ckpt_path, self.tb_path = self._create_directories()
# Input pipeline
self.iterator, self.inputs_x_ph, self.inputs_z_ph = self._iterator_init(self.p["batch_size"])
self.loss, self.train_step = self._build_graph(self.iterator, self.p["learning_rate"])
# Init
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.sess.run(tf.global_variables_initializer())
def train(self, inputs_x, inputs_z, max_it, stat_every):
# Init iterator with trainning data
self.sess.run(self.iterator.initializer, feed_dict={self.inputs_x_ph: inputs_x, self.inputs_z_ph: inputs_z})
# Initialization of log and saver
saver = tf.train.Saver(keep_checkpoint_every_n_hours=1)
summ_writer = tf.summary.FileWriter(self.tb_path, self.sess.graph)
merged = tf.summary.merge_all()
start_time = time.time()
print("Beginning training of " + self.name)
for it in range(1, max_it + 1):
_, train_loss, summ = self.sess.run([self.train_step, self.loss, merged])
if it % stat_every == 0:
elapsed_time = time.time() - start_time
print("Iteration %i / %i: loss %f -- elapsed time %f [s]" % (it, max_it, train_loss, elapsed_time),
flush=True)
summ_writer.add_summary(summ, it)
save_path = saver.save(self.sess, self.ckpt_path, global_step=max_it)
print("Model saved to: %s" % save_path)
def _fix_to_default(self, params):
if "batch_size" not in params:
params["batch_size"] = 256
if "learning_rate" not in params:
params["input_dim"] = 1e-4
if "input_dim" not in params:
raise AttributeError("Dimensions of input needed")
if "ema_decay" not in params:
params["ema_decay"] = 0.999
return params
def _create_directories(self):
date = datetime.datetime.now().strftime("%Y%m%d")
self.model_path = "results/" + self.name + "_" + date + "/"
self.ckpt_path = self.model_path + "ckpt/model"
self.tb_path = self.model_path + "tb_summ/"
# Delete previus content of tensorboard logs
if tf.gfile.Exists(self.tb_path):
tf.gfile.DeleteRecursively(self.tb_path)
return self.model_path, self.ckpt_path, self.tb_path
def _iterator_init(self, batch_size):
with tf.device('/cpu:0'):
with tf.name_scope("input"):
inputs_x_ph = tf.placeholder(dtype=tf.float32, shape=[None, self.p["input_dim"]])
inputs_z_ph = tf.placeholder(dtype=tf.float32, shape=[None, self.p["input_dim"]])
# Dataset
dataset = tf.data.Dataset.from_tensor_slices((inputs_x_ph, inputs_z_ph))
dataset = dataset.repeat()
dataset = dataset.shuffle(buffer_size=5000)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset.prefetch(buffer_size=4)
# Iterator
iterator = dataset.make_initializable_iterator()
return iterator, inputs_x_ph, inputs_z_ph
def _build_graph(self, iterator, lr):
x_it, z_it = iterator.get_next()
_, z_hat_it = iterator.get_next()
# Inputs
self.x = tf.placeholder_with_default(x_it, shape=[None, self.p["input_dim"]], name="x")
self.z = tf.placeholder_with_default(z_it, shape=[None, self.p["input_dim"]], name="z")
self.z_hat = tf.placeholder_with_default(z_hat_it, shape=[None, self.p["input_dim"]], name="z_hat")
# Model
with tf.name_scope("stat_net_t"):
out_t = self._stat_net(self.x, self.z)
with tf.name_scope("stat_net_t_prime"):
out_t_prime = self._stat_net(self.x, self.z_hat, reuse=True)
tf.summary.histogram("out_t", out_t)
tf.summary.histogram("out_t_prime", out_t_prime)
loss, self.term_1, self.term_2 = self._loss_init(out_t, out_t_prime)
train_step = self._optimizer_init(loss, lr)
return loss, train_step
def _loss_init(self, out_t, out_t_prime):
with tf.name_scope("loss"):
term_1 = tf.reduce_mean(out_t)
term_2 = tf.reduce_mean(tf.exp(out_t_prime - 1))
loss = term_1 - term_2
tf.summary.scalar("term_1", term_1)
tf.summary.scalar("term_2", term_2)
tf.summary.scalar("mine_loss", loss)
return loss, term_1, term_2
def _optimizer_init(self, loss, lr):
with tf.name_scope("optimizer"):
self.stat_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="stat_net")
self.optimizer = tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-6)
train_step = self.optimizer.minimize(-loss, var_list=self.stat_vars) # Maximization <=> Neg minimization
# corrected_gradients = self.gradient_bias_correction()
# train_step = self.optimizer.apply_gradients(zip(corrected_gradients, self.stat_vars)) #train_step = optimizer.minimize(-loss, var_list=stat_vars) # Maximization <=> Neg minimization
# train_step = self.optimizer.minimize(-loss, var_list=self.stat_vars) # Maximization <=> Neg minimization
return train_step
|
[
"[email protected]"
] | |
838a4c8d30bc51f929d302ad820dcf88dccc9bdc
|
5fde25a6683cf1fc71cf351c70145b67de5c2890
|
/python/common.py
|
7ea09674fb82eca895c8a28631199a636b487949
|
[] |
no_license
|
lymslive/expcode
|
e1a37a8aa7e2bf610088b2aa2665567ec97cb38e
|
43f06f1da3d71d6e8bed9f03b3e126c2ba2c0151
|
refs/heads/master
| 2021-06-21T04:23:58.633222 | 2019-07-30T10:04:01 | 2019-07-30T10:04:01 | 66,209,383 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 122 |
py
|
#! /bin/env python3
VERSION = '3.7'
def hello(str):
print('Hello ' + str)
if __name__ == '__main__':
hello('python')
|
[
"[email protected]"
] | |
705628d355732a3aba43be82e8cdeb9a979abf95
|
1282705c88423f06d9d8dfd946759149866a3ef5
|
/recommender.py
|
b545bc3c40cc0a12834fe6debf2e456b2c8364e8
|
[] |
no_license
|
zlq54321/recommend_system
|
03f4b6bce5a47b4ada1f2ed42d884366e6bd2138
|
5f18fb162da4f250ecfc61c48001ba4c253ebfaa
|
refs/heads/master
| 2020-11-26T22:16:07.169278 | 2019-12-20T08:04:47 | 2019-12-20T08:04:47 | 229,215,834 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,751 |
py
|
import pickle
import tensorflow as tf
import numpy as np
load_dir = './save/model'
title_count, title_set, genres2int, features, targets_values, ratings, users, \
movies, data, movies_orig, users_orig = pickle.load(open('preprocess.p', mode='rb'))
title_col = movies['Title']
len_col = np.array([len(x) for x in title_col], dtype=np.int32)
movies['Title_size'] = len_col
# users_matrics = pickle.load(open('users_matrics.p', mode='rb'))
movie_matrics = pickle.load(open('movie_matrics.p', mode='rb'))
movieid2idx = {val[0]: i for i, val in enumerate(movies.values)}
def recommend_same_type_movie(movie_id_val, top_k=20):
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# load model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
norm_movie_matrics = tf.sqrt(tf.reduce_sum(tf.square(movie_matrics), 1, keep_dims=True))
normalized_movie_matrics = movie_matrics / norm_movie_matrics
# 推荐
probs_embeddings = (movie_matrics[movieid2idx[movie_id_val]]).reshape([1, 200])
probs_similarity = tf.matmul(probs_embeddings, tf.transpose(normalized_movie_matrics))
sim = probs_similarity.eval()
print("您看的电影是:{}".format(movies_orig[movieid2idx[movie_id_val]]))
print("以下是给您的推荐:")
p = np.squeeze(sim)
p[np.argsort(p)[:-top_k]] = 0
p = p / np.sum(p)
results = set()
while len(results) != 5:
c = np.random.choice(3883, 1, p=p)[0]
results.add(c)
for val in results:
print(val)
print(movies_orig[val])
return results
recommend_same_type_movie(1401, 20)
|
[
"[email protected]"
] | |
2f1c6182fa0804fc0d4d1db18e26043816332d5e
|
55c7a20dfd2b6aa03bdfc53890519d44a7e7d399
|
/tweet/models.py
|
03533b17c9292305ae7042a6535bc9f361057598
|
[] |
no_license
|
shijuqb/cmtest
|
4ff78e1fb031fb45168d0109aee4e57426fab46f
|
bb1b15cb807476ca15013e58d947c6b44ba9bb23
|
refs/heads/master
| 2021-01-25T05:22:48.038160 | 2013-04-02T06:05:20 | 2013-04-02T06:05:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 874 |
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class TwitterProfile(models.Model):
USER_TYPE = (
('1', 'editor'),
('2', 'author')
)
"""holds additional fields of a user"""
user = models.OneToOneField(User, unique=True, related_name='twitter_profile')
level = models.CharField(max_length=20, choices=USER_TYPE, blank=False, null=False)
def __unicode__(self):
return self.user.username
class Tweet(models.Model):
tweet_id = models.IntegerField(blank=True, null=True)
content = models.CharField(max_length=140)
created_by = models.ForeignKey(User, related_name='tweets')
created_at = models.DateTimeField('Created At', auto_now_add=True)
is_dirty = models.BooleanField(default=False)
def __unicode__(self):
return self.content[:20]+"..."
|
[
"shiju@shiju-desktop"
] |
shiju@shiju-desktop
|
18d41e510e6cab64ca3b008c8058c0880ca3ff05
|
da64994d73d250d19a30381de7462c5729372f81
|
/apps/trainingClass/models/tuitionmodel.py
|
68d65fed22d12da00ba68e01232acb2bcfb7699f
|
[] |
no_license
|
Mid0Riii/psybackend
|
2f872c1dd21e97ba0a46efa10f2b3246ac8bb2b5
|
2cd477f01111a816b17725a00ffa77a156dec7b0
|
refs/heads/master
| 2023-03-26T07:55:17.580161 | 2021-03-14T01:45:19 | 2021-03-14T01:45:19 | 305,083,821 | 0 | 1 | null | 2021-03-14T01:45:20 | 2020-10-18T11:15:48 |
Python
|
UTF-8
|
Python
| false | false | 3,142 |
py
|
from django.db import models
from .classmodel import TrainClass
from .trainingclassmodel import TrainBasic
from django.utils.html import format_html
class TrainTuition(models.Model):
class Meta:
verbose_name = '训练班交费信息'
verbose_name_plural = verbose_name
relate_class = models.ForeignKey(TrainClass, on_delete=models.CASCADE, verbose_name='班级', null=True, blank=True)
relate_trainingclass = models.OneToOneField(TrainBasic, on_delete=models.CASCADE, verbose_name='学号',
primary_key=True)
fee_train = models.CharField(max_length=128, verbose_name='培训费', blank=True, null=True, default='空')
fee_material = models.CharField(max_length=128, verbose_name='资料费', blank=True, null=True, default='空')
fee_date = models.CharField(max_length=128, verbose_name='缴费日期', blank=True, null=True, default='空')
fee_method = models.CharField(max_length=128, verbose_name='缴费方式', blank=True, null=True, default='空')
fee_id = models.CharField(max_length=128, verbose_name='收据号', blank=True, null=True, default='空')
fee_tax = models.CharField(max_length=128, verbose_name='发票号', blank=True, null=True, default='空')
fee_invoice_header = models.CharField(max_length=128, verbose_name='发票抬头', blank=True, null=True, default='空')
fee_invoice_id = models.CharField(max_length=128, verbose_name='发票机构代码', blank=True, null=True, default='空')
fee_invoice_date = models.CharField(max_length=128, verbose_name='发票开票日期', blank=True, null=True, default='空')
fee_invoice_inc = models.CharField(max_length=128, verbose_name='出票单位', blank=True, null=True, default='空')
fee_exam = models.CharField(max_length=128, verbose_name='考试费', blank=True, null=True, default='空')
fee_total = models.CharField(max_length=128, verbose_name='总费用', blank=True, null=True, default='空')
fee_exam_extra = models.CharField(max_length=128, verbose_name='补考费', blank=True, null=True, default='空')
fee_info = models.TextField(max_length=128, verbose_name='备注', blank=True, null=True, default='空')
# TODO CODEREVICEW:模型的三种继承方式和自定义方法
def get_tra_name(self):
info = self.relate_trainingclass.tra_name
if self.fee_date == '空':
color_code = 'red'
else:
color_code = 'black'
return format_html('<span style="color:{};">{}</span>', color_code, info)
get_tra_name.short_description = u'姓名'
get_tra_name.allow_tags = get_tra_name.is_column = True
def get_tra_class(self):
return self.relate_trainingclass.tra_class.class_name
get_tra_class.short_description = u'班级'
get_tra_class.allow_tags = get_tra_name.is_column = True
def get_tra_num(self):
return self.relate_trainingclass.tra_number
get_tra_num.short_description = '学号'
get_tra_num.allow_tags = get_tra_num.is_colume = True
def __str__(self):
return str(self.relate_trainingclass.tra_name)
|
[
"[email protected]"
] | |
53a945c90a5f13d985d4f692365b5b5705914dcd
|
94b727c9dd370738d3c7c6a45bef183e44330fb9
|
/dojo.py
|
ad0b114bb6cf9a2171c5190e3fe10e99f169304c
|
[] |
no_license
|
kevinidias/Testes_Python
|
315c84106ce7649f74c2049167044a5eca4e57fe
|
6654d3827cb64706fd4a200423093334d58ecfef
|
refs/heads/master
| 2023-08-26T04:10:01.315708 | 2021-10-27T17:02:56 | 2021-10-27T17:02:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 497 |
py
|
"""
Como saber se um número é feliz ou triste?
1- Dado um número inteiro positivo
2- Substitua o número pela soma dos quadrados dos seus dígitos.
3- Se o resultado for 1, o número é feliz
4- Caso contrário, repita o processo indefinidamente.
"""
def happy(number):
next_ = sum(int(char) ** 2 for char in str(number))
return number in (1, 7) if number < 10 else happy(next_)
assert all([happy(n) for n in (1, 10, 100, 130, 97)])
assert not all(happy(n) for n in (2,3,4,5,6,8,9))
|
[
"[email protected]"
] | |
958bffbcef5c0c35574ec6229d4eb3360c9cde5e
|
9d9fcf401bb47ccaaa6c3fd3fe7a8be255762855
|
/libs/numpy/sort/argsort.py
|
2725c26fb628d43f78413d5fa7ac417f25fcd07d
|
[] |
no_license
|
hanhiver/PythonBasic
|
f05ef9fe713f69610860c63e5223317decee09ad
|
8e012855cce61fb53437758021416e5f6deb02ea
|
refs/heads/master
| 2022-10-11T22:57:47.931313 | 2020-12-30T12:32:44 | 2020-12-30T12:32:44 | 148,477,052 | 0 | 3 | null | 2022-10-01T05:35:03 | 2018-09-12T12:29:33 |
Python
|
UTF-8
|
Python
| false | false | 187 |
py
|
import numpy as np
a = np.random.randint(0, 10, (4, 5))
print(a, '\n')
index = np.argsort(a, axis=0)
print(index, '\n')
index_3 = index[..., 3]
print(index_3, '\n')
print(a[index_3])
|
[
"[email protected]"
] | |
43346fa0feeefd499bf34c8866c7fe30287cc8d3
|
d5fa9cd39f283642047f2657343e3db79b7726e8
|
/cya_server/models.py
|
187f71442df2ba244c3c5026a3443c958c718c6b
|
[] |
no_license
|
doanac/cya
|
6004d7d90bcf790e1357ab36da7f43140e1fa2eb
|
a623060dbf29b024592a68d023e135a546a81c0f
|
refs/heads/master
| 2021-01-21T04:55:23.529810 | 2016-06-30T03:39:29 | 2016-06-30T03:39:29 | 41,894,462 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,733 |
py
|
import datetime
import os
import random
import time
import string
from cya_server.settings import (
MODELS_DIR, CONTAINER_TYPES, CLIENT_SCRIPT)
from cya_server.simplemodels import (
Field, Model, ModelManager, ModelError, SecretField)
def client_version():
return str(os.stat(CLIENT_SCRIPT).st_mtime)
class ContainerMount(Model):
FIELDS = [
Field('type', data_type=str),
Field('source', data_type=str),
Field('directory', data_type=str),
]
class InitScript(Model):
FIELDS = [
Field('content', data_type=str),
]
class Container(Model):
FIELDS = [
Field('template', data_type=str, required=False),
Field('release', data_type=str, required=False),
Field('init_script', data_type=str, required=False),
Field('date_requested', int, required=False),
Field('date_created', int, required=False),
Field('max_memory', int, required=False),
Field('re_create', data_type=bool, def_value=False, required=False),
Field('state', data_type=str, def_value='UNKNOWN', required=False),
Field('keep_running', data_type=bool, def_value=True, required=False),
Field('ips', data_type=str, required=False),
Field('one_shot', data_type=bool, def_value=False, required=False),
Field('requested_by', data_type=str, required=False),
]
CHILDREN = [ContainerMount, InitScript]
@property
def requested_str(self):
v = self.date_requested
if v:
return datetime.datetime.fromtimestamp(v)
return '?'
@property
def created_str(self):
v = self.date_created
if v:
return datetime.datetime.fromtimestamp(v)
return '?'
def update(self, data):
if self.date_created and \
data.get('date_created', 0) > self.date_created:
data['re_create'] = False
return super(Container, self).update(data)
def _get_log_file(self, logname):
logdir = os.path.join(self._modeldir, 'logs')
if not os.path.exists(logdir):
os.mkdir(logdir)
return os.path.join(logdir, logname)
def append_log(self, logname, content):
with open(self._get_log_file(logname), 'a') as f:
f.write(content)
def get_log_names(self):
logdir = os.path.join(self._modeldir, 'logs')
if os.path.exists(logdir):
return os.listdir(logdir)
return []
def get_log(self, logname):
with open(self._get_log_file(logname)) as f:
return f.read()
def __repr__(self):
return self.name
@staticmethod
def validate_template_release(template, release):
releases = CONTAINER_TYPES.get(template)
if not releases:
raise KeyError('Invalid template type: %s' % template)
if release not in releases:
raise KeyError('Invalid release for template: %s' % release)
class Host(Model):
FIELDS = [
Field('distro_id', data_type=str),
Field('distro_release', data_type=str),
Field('distro_codename', data_type=str),
Field('mem_total', data_type=int),
Field('cpu_total', data_type=int),
Field('cpu_type', data_type=str),
Field('enlisted', data_type=bool, def_value=False, required=False),
Field('max_containers', data_type=int, def_value=0, required=False),
SecretField('api_key'),
]
CHILDREN = [
Container,
]
def __repr__(self):
return self.name
def get_container(self, name):
for c in self.containers:
if c.name == name:
return c
raise ModelError('Container not found: %s' % name, 404)
def _get_ping_file(self):
return os.path.join(self._modeldir, 'pings.log')
def ping(self):
with open(self._get_ping_file(), mode='a') as f:
f.write('%d\n' % time.time())
@property
def online(self):
"""Online means we've been "pinged" in the last 3 minutes."""
ping_file = self._get_ping_file()
if not os.path.exists(ping_file):
return False
now = time.time()
mtime = os.path.getmtime(self._get_ping_file())
return now - mtime < 180 # pinged in last 3 minutes
class User(Model):
FIELDS = [
Field('nickname', data_type=str),
Field('openid', data_type=str),
Field('approved', data_type=bool, def_value=False),
Field('admin', data_type=bool, def_value=False, required=False),
Field('api_key', data_type=str, required=False)
]
CHILDREN = [
InitScript,
]
class SharedStorage(Model):
FIELDS = [
Field('type', data_type=str),
Field('source', data_type=str)
]
class ContainerRequest(Container):
pass
hosts = ModelManager(MODELS_DIR, Host)
users = ModelManager(MODELS_DIR, User)
shared_storage = ModelManager(MODELS_DIR, SharedStorage)
container_requests = ModelManager(MODELS_DIR, ContainerRequest)
def _get_user_by_openid(openid):
for x in users.list():
x = users.get(x)
if x.openid == openid:
return x
return None
users.get_user_by_openid = _get_user_by_openid
def _generate_api_key():
chars = string.ascii_letters + string.digits + '!@#$%^&*~-+'
return ''.join(random.choice(chars) for _ in range(32))
users.generate_api_key = _generate_api_key
def _container_request_handle(host):
'''Dumb logic but find host with least number of containers.
It also honors allowing max_containers on a host.
'''
if not host.enlisted or (host.max_containers and
host.max_containers <= host.containers.count()):
return # no point in checking
requests = list(container_requests.list())
if not requests:
return
candidates = []
for h in hosts.list():
h = hosts.get(h)
h.count_cache = h.containers.count()
if h.enlisted and h.online and (h.max_containers == 0 or
h.count_cache < h.max_containers):
candidates.append(h)
candidates = sorted(candidates, key=lambda x: -1 * x.mem_total)
candidates = sorted(candidates, key=lambda x: x.count_cache)
match = candidates and host.name == candidates[0].name
if match:
# use os.rename which is atomic
src = os.path.join(container_requests._model_dir, requests[0])
dst = os.path.join(host.containers._model_dir, requests[0])
try:
os.mkdir(host.containers._model_dir)
except FileExistsError:
pass
os.rename(src, dst)
container_requests.handle = _container_request_handle
|
[
"[email protected]"
] | |
385dc29e8a96a82daa9709d0c22d2c368662202c
|
be0d83dde6b499b60f36c14c961a125581f36a57
|
/preprocess_files/mv_img.py
|
592114d7036909b48ede59be9b6dcca5df06b54f
|
[] |
no_license
|
zhengziqiang/gan_learning
|
4acaf18f452fed0e2eeb0ddb45d861e9d10af835
|
d9ffb1c18e592715b62df684e23a362f8d07ac41
|
refs/heads/master
| 2021-01-01T13:35:28.696378 | 2017-10-29T13:42:51 | 2017-10-29T13:42:51 | 97,583,619 | 7 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 745 |
py
|
import os
import glob
# d={}
# for files in glob.glob('/home/zzq/research/windows_file/IIIT-CFW1.0/tmp/*.jpg'):
# filepath, filename = os.path.split(files)
# # print filename
# l=filename.split('.')
# # print l[0]
# my_namee=filter(str.isalpha, l[0])
# print my_namee
# if d.has_key(my_namee):
# d[my_namee]+=1
# else:
# d[my_namee]=1
# print d
dest='/home/zzq/research/windows_file/IIIT-CFW1.0/dest/'
name={}
for files in glob.glob('/home/zzq/research/windows_file/IIIT-CFW1.0/realFaces/*.jpg'):
filepath, filename = os.path.split(files)
l=filename.split('.')
my_name=filter(str.isalpha,l[0])
if name.has_key(my_name):
name[my_name]+=1
else:
name[my_name]=1
|
[
"[email protected]"
] | |
c94f97bced0e065103a9eee317568b7acb7efafa
|
8294a144dc40e2da77cdbdb623301dc53b2697bb
|
/book_control/views.py
|
a0253bfe82d3487b8de25759c64e7d3697091bef
|
[] |
no_license
|
md-asif-shahriar/slim
|
dd3cea3df33738c6fc146992fdc7b7ce5eaeed41
|
52401ea02de3de184164843ded36628335bca4c3
|
refs/heads/main
| 2023-07-23T23:07:45.536996 | 2021-04-12T14:55:36 | 2021-04-12T14:55:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 799 |
py
|
from django.shortcuts import render, redirect
from .forms import *
def post_book_view(request):
user = PublisherProfileModel.objects.get(user=request.user)
task = "Post New"
form = PostBookForm()
if request.method == 'POST':
form = PostBookForm(request.POST, request.FILES)
if form.is_valid():
new_ad = form.save(commit=False)
new_ad.publisher = user
form.save()
return redirect('home')
else:
context = {
'task': task,
'form': form
}
return render(request, 'book_control/post-update-book.html', context)
context = {
'task': task,
'form': form
}
return render(request, "publisher/post-update-book.html", context)
|
[
"[email protected]"
] | |
1be57f985a05ec18b7b9bbaa2505fdf735efee35
|
f55ed49e77f2983f9118a5228a0f6d777c4eac97
|
/apps/hbase/src/hbase/conf.py
|
e72da5dbabc2a8effb22142b59233d8ca77a8cf8
|
[
"Apache-2.0"
] |
permissive
|
mravi/hue
|
feb8543e1490fdbfdaff069c021ae168f72b28c6
|
1190bc41c560edf239c5dfc9689d25f3b4b3ab95
|
refs/heads/master
| 2020-12-25T21:55:41.294305 | 2013-11-07T11:49:05 | 2013-11-08T01:36:42 | 14,227,040 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,253 |
py
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Configuration options for the hbase application.
"""
import re
from desktop.lib.conf import Config
HBASE_CLUSTERS = Config(
key="hbase_clusters",
default="(Cluster|localhost:9090)",
help="Comma-separated list of HBase Thrift servers for clusters in the format of '(name|host:port)'.",
type=str)
TRUNCATE_LIMIT = Config(
key="truncate_limit",
default="500",
help="Hard limit of rows or columns per row fetched before truncating.",
type=int)
|
[
"[email protected]"
] | |
138535dfe2fb66e2a63cac24f3a9230738a73b8e
|
6aef2d7d320e8ec35f304955b35295f6cde8b1c2
|
/r00tphish.py
|
c4b9779ecd88ac455c7b929ced519c94f73d8eeb
|
[] |
no_license
|
r00tapple/wizardbible
|
3ba074eb56a313df07b29033ddf6ffb466f63fd7
|
ba8073567ec03caa12c63b5fa0e0c1aaa7baf7d9
|
refs/heads/master
| 2021-01-10T04:48:03.836427 | 2015-12-13T18:27:10 | 2015-12-13T18:27:10 | 46,774,367 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,785 |
py
|
from lxml import html
import subprocess
import os,sys,time,re,shutil,urllib2
###setting###
html_parser = html.HTMLParser(encoding="utf-8")
#############
def setdirectory():
if check_os() == "posix":
return os.path.join(os.path.expanduser('~'), '/clone')
if check_os() == "windows":
return "src/program_junk/"
def check_os():
if os.name == "nt":
operating_system = "windows"
if os.name == "posix":
operating_system = "posix"
return operating_system
def makephp(RAW_URL):
logpath = "/clone/"
filewrite = file("%s/post.php" % (logpath), "w")
filewrite.write("""<?php $file = 'log.txt';file_put_contents($file, print_r($_POST, true), FILE_APPEND);?><meta http-equiv="refresh" content="0; url=%s" />""" % (RAW_URL))
filewrite.close()
filewrite = file("%s/log.txt" % (logpath), "w")
filewrite.write("")
filewrite.close()
if sys.platform == "darwin":
subprocess.Popen("chown _www:_www '%s/log.txt'" % (logpath), shell=True).wait()
else:
subprocess.Popen("chown www-data:www-data '%s/log.txt'" % (logpath), shell=True).wait()
def relaive(clonesite,base):
fullpath = "/clone/index.html"
site = "index.html"
with open(clonesite, "r") as rf:
doc = html.parse(rf).getroot()
html.make_links_absolute(doc, base)
fileopen=file("/clone/index.html","r").readlines()
filewrite=file(fullpath,"w")
try:
for line in fileopen:
counter=0
match=re.search('post',line, flags=re.IGNORECASE)
method_post=re.search("method=post", line, flags=re.IGNORECASE)
if match or method_post:
line = re.sub('action="*"', 'action="post.php"', line)
filewrite.write(line)
print "action attribute it was rewritten to post.php.."
except:
print "file write error.."
finally:
filewrite.close()
def clone(url):
user_agent = "Mozilla/5.0 (Windows; Intel Mac OS X) Chrome/45.0.2454.101 Safari/537.36"
try:
wget = 0
if os.path.isfile("/usr/local/bin/wget"):
wget = 1
if os.path.isfile("/usr/bin/wget"):
wget = 1
if os.path.isfile("/usr/local/wget"):
wget = 1
if wget == 1:
subprocess.Popen('cd %s;wget --no-check-certificate -O index.html -c -k -U "%s" "%s";' % (setdir,user_agent,url), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
if wget == 0:
headers = { 'User-Agent' : user_agent }
req = urllib2.Request(url, None, headers)
html = urllib2.urlopen(req).read()
if len(html) > 1:
try:
filewrite = file(setdir + "/index.html", "w")
filewrite.write(html)
except:
print "index.html write error"
finally:
ilewrite.close()
except:
print "Sorry error to be continue .."
pass
if __name__ == '__main__':
print """
###############################################
#The python script that web site also was set #
#to be the creation of the link relative path #
#for r00tapple #
###############################################
"""
setdir = setdirectory()
if not os.path.isdir(setdir):
os.makedirs(setdir + "/web_clone")
#input url
URL = raw_input("URL of clone sites that you create: ")
clone(URL)
domain = raw_input("Enter the http://****.com/ of creating clones site :")
makephp(domain)
path = setdir + "/index.html"
relaive(path,domain)
print "END"
|
[
"[email protected]"
] | |
afe0ccc8e45fcfa053086a87b4a310df15eceab1
|
73ce13a41156bb4a3c076a1a3a29fa746d716a4b
|
/python-/pdfScrapper.py
|
a5513894876a5c0f7b142a47d3290bb5f8200b3e
|
[] |
no_license
|
Wsaleh123/Python
|
b31da424dcf3e0a9d0978371e83f3aa7e17a0ff1
|
308014bfd632f61c7bb4cd1faae974c637db0179
|
refs/heads/master
| 2021-08-16T13:02:49.042530 | 2017-11-19T22:40:14 | 2017-11-19T22:40:14 | 111,337,836 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 91 |
py
|
import pdfquery
file = open(name["ast_sci_data_tables_sample.pdf"], mode[w+])
print(file)
|
[
"[email protected]"
] | |
916eae5a842222453fbd7561d41098dcb8e049fa
|
80ebd1937d184206221ef34489f77a5f4f54d6d4
|
/venv/bin/pyhtmlizer
|
aaf0384c060c59066b63ee13038d5e7a41872a3f
|
[] |
no_license
|
cv121189mav/chat
|
e259ec7b24f23db76f32d96e135d5fd30608e0ca
|
11832deeda4a31b12b045b6e53c55853043b2e6e
|
refs/heads/master
| 2020-06-03T21:43:58.777537 | 2019-06-13T10:36:13 | 2019-06-13T10:36:13 | 191,741,778 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 423 |
#!/home/sanya/PycharmProjects/chat/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==19.2.1','console_scripts','pyhtmlizer'
__requires__ = 'Twisted==19.2.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==19.2.1', 'console_scripts', 'pyhtmlizer')()
)
|
[
"[email protected]"
] | ||
6fdb87404571a689574fb5666416aa230b4b70f2
|
7c718926b1f4091190e5faefb34c34bacdc312a6
|
/modules.py
|
64f3f1ea9a56da0081a68e7a3f5e690bec8d7973
|
[] |
no_license
|
mindis/Recsys
|
617d8d9196e6aa82d3a1194d2d2f14baa0cd7966
|
3eed212cfefc76417689e506825a4332524375e2
|
refs/heads/master
| 2020-04-07T00:08:29.945753 | 2018-06-11T11:29:11 | 2018-06-11T11:29:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,091 |
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
#embedding
class Embed(nn.Module):
def __init__(self, input_dim, embedding_dim, item = False):
super(Embed, self).__init__()
self.embedding_table = nn.Embedding(input_dim, embedding_dim)
#ensure that the representation of paddings are tensors of zeros, thus, will
#not contribute in potential average pooling session representations
if(item):
self.embedding_table.weight.data[0] = torch.zeros(embedding_dim)
def forward(self, input):
output = self.embedding_table(input)
return output
#inter session RNN module
class Inter_RNN(nn.Module):
def __init__(self, input_dim, hidden_dim, dropout_rate):
super(Inter_RNN, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout_rate)
self.gru = nn.GRU(self.input_dim, self.hidden_dim, batch_first=True)
def forward(self, input, hidden, rep_indicies):
input = self.dropout(input)
gru_output, _ = self.gru(input, hidden)
#find the last hidden state of each sequence in the batch which are not
hidden_indices = rep_indicies.view(-1,1,1).expand(gru_output.size(0), 1, gru_output.size(2))
hidden_out = torch.gather(gru_output,1,hidden_indices)
hidden_out = hidden_out.squeeze().unsqueeze(0)
hidden_out = self.dropout(hidden_out)
return hidden_out
def init_hidden(self, batch_size):
hidden = Variable(torch.cuda.FloatTensor(1, batch_size, self.hidden_dim).fill_(0))
return hidden
#intra session RNN module
class Intra_RNN(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, dropout_rate):
super(Intra_RNN, self).__init__()
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.dropout = nn.Dropout(dropout_rate)
self.gru = nn.GRU(input_dim, hidden_dim, batch_first=True)
self.linear = nn.Linear(hidden_dim, output_dim)
def forward(self, input, hidden, lengths):
input = self.dropout(input)
gru_output, _ = self.gru(input, hidden)
output = self.dropout(gru_output)
output = self.linear(output)
hidden_indices = lengths.view(-1,1,1).expand(gru_output.size(0), 1, gru_output.size(2))
hidden_out = torch.gather(gru_output,1,hidden_indices)
hidden_out = hidden_out.squeeze().unsqueeze(0)
return output, hidden_out
#time loss module
class Time_Loss(nn.Module):
def __init__(self):
super(Time_Loss, self).__init__()
self.w = nn.Parameter(torch.FloatTensor([-0.1]))
#self.w.data.uniform_(-0.1,0.1)
def forward(self, time, target, epsilon):
time_exp = torch.exp(time)
w_target = self.w*torch.pow(target, epsilon)
exps = (time_exp*(1-torch.exp(w_target)))/self.w
output = time+w_target+exps
return -output
def get_w(self):
return self.w
|
[
"[email protected]"
] | |
2ed62811591195923fcfee6e88f11490c620d28a
|
9c70ae1761d9d28f619e96f335a0807de5fad1b7
|
/Modules and Packages/MyMainPackages/some_main_script.py
|
139031511681c6ff4a0145195f21b10d7b11c471
|
[] |
no_license
|
hareeshkavumkulath/CPB
|
eb35cf86c8d6bfa85388d82a742c4b09074bf8b2
|
6f606b4e09f09fbb8c3f9a521cc7f6f52f30e301
|
refs/heads/master
| 2020-09-25T13:59:24.372643 | 2019-12-31T07:30:45 | 2019-12-31T07:30:45 | 226,018,264 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 77 |
py
|
def report_main():
print("Hey I am in some main script and main package")
|
[
"[email protected]"
] | |
138bed068a1b35a2872be88c51cc61ea599cc5fc
|
c431e47c7231e9ba8c3f2709da5fae653595bfcd
|
/prodigy/player.py
|
93e76c2220c3e366b93395a93a4a277e5d0381bb
|
[
"MIT"
] |
permissive
|
TheFinality/Prodigy
|
732a298d04e7bb00bd8aa7dd9a2ed1774465e4f9
|
59148734b9925d460a2d7139d4fe53e4dbf4c5bb
|
refs/heads/master
| 2023-07-03T03:00:00.676336 | 2021-08-02T11:25:40 | 2021-08-02T11:25:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,087 |
py
|
import json
import requests
from prodigy.get_userID import get_userID
def player(token: str, userID: int = None, log: bool = False) -> dict:
if userID:
return requests.get(f"https://api.prodigygame.com/game-api/v2/characters/{userID}?fields=inventory%2Cdata%2CisMember%2Ctutorial%2Cpets%2Cencounters%2Cquests%2Cappearance%2Cequipment%2Chouse%2Cachievements%2Cstate&userID={get_userID(token)}", headers={'Authorization': f"Bearer {token}"}).json()[userID]
if log:
print("Fetching data from token...")
userID = get_userID(token)
return requests.get(f"https://api.prodigygame.com/game-api/v1/character/{userID}?isMember=0&userID={userID}", headers={'Authorization': f"Bearer {token}"}).json()
def update_player(token: str, data: dict, log: bool = False) -> str:
userID = get_userID(token)
result = requests.post(f"https://api.prodigygame.com/game-api/v3/characters/{userID}", headers={"Authorization": f"Bearer {token}"}, data={"data": json.dumps(data), "userID": userID}).text()
if log:
print("Successfully updated.")
return result
|
[
"[email protected]"
] | |
44d2f48c0e7d31ec05ea19b6bc744d08a1727187
|
e82361444469e95dbe19d5b1660ff1d74839bf22
|
/webapp/models.py
|
fed80bee131c2854beafa0b83639f0dd677ffd4c
|
[] |
no_license
|
salimbencosme/movierentalapp-api
|
86271bac1ce9ecad7b8c912c4496db11b18b4d37
|
ea222a8bd9b4caa568866456a3575c8981641988
|
refs/heads/master
| 2021-08-30T12:01:40.912900 | 2017-12-17T20:59:43 | 2017-12-17T20:59:43 | 114,559,449 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,428 |
py
|
from django.db import models
from datetime import datetime
class Movie(models.Model):
movieid=models.AutoField(primary_key=True)
name=models.CharField(max_length=30)
year=models.IntegerField()
director=models.CharField(max_length=30)
imgurl=models.TextField()
rentalprice = models.DecimalField(default=0.00,decimal_places=2,max_digits=12)
active=models.BooleanField(default=True)
class User(models.Model):
userid=models.AutoField(primary_key=True)
fullname=models.CharField(max_length=40)
username=models.CharField(max_length=30)
password=models.CharField(max_length=30)
type=models.IntegerField(default=2)# 1-Admin 2-Client
active=models.BooleanField(default=True)
class Rent(models.Model):
rentid=models.AutoField(primary_key=True)
user = models.ForeignKey(User,on_delete=models.CASCADE,)
renteddate = models.DateTimeField(default=datetime.now)
totalamount = models.DecimalField(default=0.00,decimal_places=2,max_digits=12)
haspenalty=models.BooleanField(default=False)
delivereddate = models.DateTimeField(null=True, blank=True)
inprocess=models.BooleanField(default=True)
active=models.BooleanField(default=True)
class RentMovies(models.Model):
rentmoviesid=models.AutoField(primary_key=True)
rent = models.ForeignKey(Rent,on_delete=models.CASCADE,)
movie = models.ForeignKey(Movie,on_delete=models.CASCADE,)
|
[
"[email protected]"
] | |
23b98df514699d790cd687bab8dee1a7e4fd757d
|
2a1802605a47bdcc9eeffda35082f8a73268df11
|
/rest-api/app.py
|
85edd4b3a3e52bb98c6fb2b2ce38893a90465f96
|
[] |
no_license
|
RemusSino/reddit-consumer
|
1e05929860cd3f118d38a35c03b49000d42769f2
|
535a98f1a593e8fb462be9d8c86fac061efc2ebb
|
refs/heads/master
| 2020-05-27T07:49:22.337057 | 2019-06-02T08:21:03 | 2019-06-02T08:23:44 | 188,535,987 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 783 |
py
|
import json
from bson import json_util
from flask import Flask, request
from mongo import read_from_mongo
app = Flask(__name__)
@app.route('/items', methods=['GET'])
def get_items():
subreddit = request.args.get('subreddit')
from_timestamp = request.args.get('from')
to_timestamp = request.args.get('to')
keyword = request.args.get('keyword')
if subreddit is None or from_timestamp is None or to_timestamp is None:
return "ERROR"
if int(from_timestamp) > int(to_timestamp):
return "Error: from_timestamp > to_timestamp"
docs = read_from_mongo(subreddit, from_timestamp, to_timestamp, keyword)
return json.dumps(docs, default=json_util.default)
return "ok"
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
[
"[email protected]"
] | |
6967008ed0deda0ace65515c54b28d2858ba4210
|
7f594817970f5c9c774e7d2e7640068f5c00787d
|
/src/logistic_regression_eda.py
|
de850bab89e033243d05a7ab9b3f92363b8761a0
|
[] |
no_license
|
kellypeng/churn_prediction
|
3aa4b83cf8ed4d74a2c957b0f310fc5200d802c8
|
827bf6634b491e872e4a5d01b291cca4bdb60e61
|
refs/heads/master
| 2021-01-23T09:10:50.433224 | 2017-09-09T23:29:57 | 2017-09-09T23:29:57 | 102,563,838 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,718 |
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from main import load_data, data_processing
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score, make_scorer, recall_score, confusion_matrix, precision_score
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.preprocessing import StandardScaler
def cross_val(model,X,y,cv):
f1 = make_scorer(f1_score)
scores = cross_val_score(model,X,y,scoring=f1,cv=cv)
return scores
def base_design_matrix(df):
y = df.pop('churn').values
df['extreme_weekday_usage'] = ((df['weekday_pct'] == 0)|(df['weekday_pct'] == 100)).astype(int)
df['I am rich'] = (df['surge_pct'] == 100).astype(int)
df['unhappy_customer'] = ((df['avg_rating_of_driver'] > 0) & (df['avg_rating_of_driver'] < 4)).astype(int)
df['unhappy_driber'] = ((df['avg_rating_of_driver'] > 0) & (df['avg_rating_of_driver'] < 4)).astype(int)
# df['trips_per_day'] = df['trips_in_first_30_days'] / 30.
# df,sc1 = standardize_col(df,'avg_rating_by_driver')
# df,sc2 = standardize_col(df,'avg_rating_of_driver')
X = df.values
X_train,X_test,y_train,y_test = train_test_split(X,y,random_state = 0)
print 'Data imbalance check: ',np.mean(y)
return X_train,X_test,y_train,y_test, df.columns
def standardize_col(df,col):
sc = StandardScaler()
df[col+'_std'] = sc.fit_transform(df[col])
return df,sc
def design_matrix(df):
y = df.pop('churn').values
# df['avg_total_surge'] = df['avg_surge']*df['surge_pct']
df['extreme_weekday_usage'] = ((df['weekday_pct'] == 0)|(df['weekday_pct'] == 100)).astype(int)
df['I am rich'] = (df['surge_pct'] == 100).astype(int)
# df['unhappy_customer'] = ((df['avg_rating_of_driver'] > 0) & (df['avg_rating_of_driver'] < 4)).astype(int)
cols = ['avg_rating_by_driver_isnull','avg_rating_of_driver_isnull',"King's Landing",\
'luxury_car_user','avg_surge','Winterfell','iPhone','extreme_weekday_usage','I am rich']
X = df[cols].values
# print 'Data imbalance check: ',np.mean(y)
X_train,X_test,y_train,y_test = train_test_split(X,y,random_state = 0)
return X_train,X_test,y_train,y_test, cols
def lr_search(X_train,X_test,y_train,y_test,cols):
lr = LogisticRegression()
valid_scores = cross_val(lr,X_train,y_train,5)
lr.fit(X_train,y_train)
coef = lr.coef_
print '-'*50
print 'cross_valid_f1_scores: {}, avg_valid_score: {}'.format(valid_scores,np.mean(valid_scores))
print 'validation accuracy score:{}'.format(lr.score(X_test,y_test))
print 'validation f1 score:{}'.format(f1_score(y_test,lr.predict(X_test)))
pred_y = lr.predict(X_test)
print 'validation recall score:{}'.format(recall_score(y_test,pred_y))
# print cols
print '-'*50
print 'confusion_matrix'
print confusion_matrix(y_test,pred_y)
print len(cols), len(coef[0])
abs_value = np.abs(coef[0])
for i,j,k in sorted(zip(abs_value,coef[0],cols))[::-1]:
print k+': ', j
return lr
def test_processing(df):
y = df.pop('churn').values
df['extreme_weekday_usage'] = ((df['weekday_pct'] == 0)|(df['weekday_pct'] == 100)).astype(int)
df['I am rich'] = (df['surge_pct'] == 100).astype(int)
cols = ['avg_rating_by_driver_isnull','avg_rating_of_driver_isnull',"King's Landing",\
'luxury_car_user','avg_surge','Winterfell','iPhone','extreme_weekday_usage','I am rich']
X = df[cols].values
return X,y, cols
def test_final_model(model):
test_data = pd.read_csv('data/churn_test.csv',parse_dates=['last_trip_date','signup_date'])
date_cutoff = datetime.date(2014, 6, 1)
test_data['churn'] = (test_data.last_trip_date < date_cutoff).astype(int)
test_data = data_processing(test_data)
X,y,cols = test_processing(test_data)
test_pred = model.predict(X)
print '='*50
print 'confusion_matrix\n',confusion_matrix(y,test_pred)
print 'test data f1 score: ',f1_score(y,test_pred)
print 'test data precision score: ',precision_score(y,test_pred)
print 'test data recall score: ',recall_score(y,test_pred)
def plot_distribution_by_churn(df,col):
# fig,axes = plt.subplots(1,2,figsize=(8,5))
# axes[0].hist(df[col][df['churn'] == 0],label='not churn')
# axes[1].hist(df[col][df['churn'] == 1],label='churn')
# axes[0].legend()
# axes[1].legend()
plt.hist(df[col][df['churn'] == 0],bins=20,alpha=.3,label='not churn')
plt.hist(df[col][df['churn'] == 1],bins=20,alpha=.3,label='churn')
plt.xlabel(col)
plt.ylabel('# of users')
plt.legend()
plt.savefig('images/{}.png'.format(col))
plt.show()
plt.clf()
def plot_dists(df):
plot_distribution_by_churn(df,'avg_surge')
plot_distribution_by_churn(df,'surge_pct')
plot_distribution_by_churn(df,'avg_dist')
plot_distribution_by_churn(df,'avg_rating_by_driver')
plot_distribution_by_churn(df,'avg_rating_of_driver')
plot_distribution_by_churn(df,'trips_in_first_30_days')
plot_distribution_by_churn(df,'weekday_pct')
def plot_category(df,col):
print pd.crosstab(df['churn'],df[col])
def plot_cats(df):
print '-'*50
plot_category(df,'city')
print '-'*50
plot_category(df,'phone')
print '-'*50
plot_category(df,'luxury_car_user')
print '-'*50
if __name__ == '__main__':
df = load_data()
# plot_cats(df)
df = data_processing(df)
# plot_category(df,'avg_rating_by_driver_isnull')
# plot_category(df,'avg_rating_of_driver_isnull')
# plot_dists(df)
X_train,X_test,y_train,y_test,cols = design_matrix(df)
model = lr_search(X_train,X_test,y_train,y_test,cols)
test_final_model(model)
|
[
"[email protected]"
] | |
cf634701ce51fc3cb9c14499ec878f065f7baad4
|
427cb811a465677542172b59f5e5f102e3cafb1a
|
/python/classes/subClass.py
|
2244d735db508c992121644c9b9e179b8a63ef61
|
[] |
no_license
|
IzaakWN/CodeSnippets
|
1ecc8cc97f18f77a2fbe980f322242c04dacfb89
|
07ad94d9126ea72c1a8ee5b7b2af176c064c8854
|
refs/heads/master
| 2023-07-26T21:57:10.660979 | 2023-07-20T20:35:59 | 2023-07-20T20:35:59 | 116,404,943 | 18 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,236 |
py
|
# http://www.jesshamrick.com/2011/05/18/an-introduction-to-classes-and-inheritance-in-python/
# https://stackoverflow.com/questions/2843165/python-how-to-inherite-and-override
# http://blog.thedigitalcatonline.com/blog/2014/05/19/method-overriding-in-python/
# https://docs.python.org/2.7/library/functions.html#super
class Animal(object):
def __init__(self,name,age):
self.name = name
self.age = age
def makeNoise(self):
print ">>> %s makes a noise"%(self.name)
def printName(self):
print ">>> Animal name = \"%s\""%(self.name)
def printClassification(self):
print ">>> Animal"
class Dog(Animal):
def __init__(self,name,age):
Animal.__init__(self,name,age)
# or super(Dog,self).__init__(name,age)]
def makeNoise(self):
print ">>> %s says \"%s\""%(self.name,"Woof!")
def printName(self):
print ">>> Dog name = \"%s\""%(self.name)
def printClassification(self):
super(Dog,self).printClassification()
print ">>> Dog"
animal1 = Animal("Carrol",2)
animal2 = Dog("Yeller",4)
print "\n>>> animal 1"
animal1.makeNoise()
animal1.printName()
print ">>>\n>>> animal 2"
animal2.makeNoise()
animal2.printName()
animal2.printClassification()
print
|
[
"[email protected]"
] | |
2c11e8ac451f26810d32dc499dcaf475406a5e30
|
a5161d122e9ac7f733f5af1ca320ef19cf87154a
|
/extractors/host_feature.py
|
c7127ec92eea1d9f91da3a383ffa3d6690c1ff05
|
[] |
no_license
|
zihnan/spider
|
f8c315e35e92ba7c7e0ff4d1e7b353ac98dea5ab
|
0823387171ec34a2945328f898f9a40535fe6c4f
|
refs/heads/master
| 2021-01-11T09:50:00.840323 | 2017-07-21T08:36:32 | 2017-07-21T08:36:32 | 78,271,428 | 1 | 1 | null | 2017-06-23T10:42:33 | 2017-01-07T09:57:36 |
Python
|
UTF-8
|
Python
| false | false | 711 |
py
|
import re
from lxml import html
from extractor import Extractor
class HostExtract(Extractor):
def __init__(self, host_str, **kwargs):
self.ipv4_address_list = []
self.ipv6_address_list = []
for row in host_str.split('\n'):
if row:
cols = row.split(' ')
if cols[2] == 'address':
self.ipv4_address_list.append(cols[3])
elif cols[2] == 'IPv6':
self.ipv6_address_list.append(cols[4])
self.features = []
def ipv4_numbers(self):
return len(self.ipv4_address_list)
def ipv6_numbers(self):
return len(self.ipv6_address_list)
|
[
"[email protected]"
] | |
a8664286f8358d03bcf8e11702b53d8ee5865ef0
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/matplotlib/2018/8/axes3d.py
|
efbaedfef284ed36e1b8f3d87fbaaf0aa8ba1c71
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null |
UTF-8
|
Python
| false | false | 103,008 |
py
|
"""
axes3d.py, original mplot3d version by John Porter
Created: 23 Sep 2005
Parts fixed by Reinier Heeres <[email protected]>
Minor additions by Ben Axelrod <[email protected]>
Significant updates and revisions by Ben Root <[email protected]>
Module containing Axes3D, an object which can plot 3D objects on a
2D matplotlib figure.
"""
from functools import reduce
from collections import defaultdict
import math
import warnings
import numpy as np
from matplotlib import artist
import matplotlib.axes as maxes
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.docstring as docstring
import matplotlib.scale as mscale
import matplotlib.transforms as mtransforms
from matplotlib.axes import Axes, rcParams
from matplotlib.colors import Normalize, LightSource
from matplotlib.transforms import Bbox
from matplotlib.tri.triangulation import Triangulation
from . import art3d
from . import proj3d
from . import axis3d
def unit_bbox():
box = Bbox(np.array([[0, 0], [1, 1]]))
return box
class Axes3D(Axes):
"""
3D axes object.
"""
name = '3d'
_shared_z_axes = cbook.Grouper()
def __init__(
self, fig, rect=None, *args,
azim=-60, elev=30, zscale=None, sharez=None, proj_type='persp',
**kwargs):
'''
Build an :class:`Axes3D` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*azim* Azimuthal viewing angle (default -60)
*elev* Elevation viewing angle (default 30)
*zscale* [%(scale)s]
*sharez* Other axes to share z-limits with
*proj_type* 'persp' or 'ortho' (default 'persp')
================ =========================================
.. versionadded :: 1.2.1
*sharez*
''' % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
if rect is None:
rect = [0.0, 0.0, 1.0, 1.0]
self._cids = []
self.initial_azim = azim
self.initial_elev = elev
self.set_proj_type(proj_type)
self.xy_viewLim = unit_bbox()
self.zz_viewLim = unit_bbox()
self.xy_dataLim = unit_bbox()
self.zz_dataLim = unit_bbox()
# inihibit autoscale_view until the axes are defined
# they can't be defined until Axes.__init__ has been called
self.view_init(self.initial_elev, self.initial_azim)
self._ready = 0
self._sharez = sharez
if sharez is not None:
self._shared_z_axes.join(self, sharez)
self._adjustable = 'datalim'
super().__init__(fig, rect, frameon=True, *args, **kwargs)
# Disable drawing of axes by base class
super().set_axis_off()
# Enable drawing of axes by Axes3D class
self.set_axis_on()
self.M = None
# func used to format z -- fall back on major formatters
self.fmt_zdata = None
if zscale is not None:
self.set_zscale(zscale)
if self.zaxis is not None:
self._zcid = self.zaxis.callbacks.connect(
'units finalize', lambda: self._on_units_changed(scalez=True))
else:
self._zcid = None
self._ready = 1
self.mouse_init()
self.set_top_view()
self.patch.set_linewidth(0)
# Calculate the pseudo-data width and height
pseudo_bbox = self.transLimits.inverted().transform([(0, 0), (1, 1)])
self._pseudo_w, self._pseudo_h = pseudo_bbox[1] - pseudo_bbox[0]
self.figure.add_axes(self)
def set_axis_off(self):
self._axis3don = False
self.stale = True
def set_axis_on(self):
self._axis3don = True
self.stale = True
def have_units(self):
"""
Return *True* if units are set on the *x*, *y*, or *z* axes
"""
return (self.xaxis.have_units() or self.yaxis.have_units() or
self.zaxis.have_units())
def convert_zunits(self, z):
"""
For artists in an axes, if the zaxis has units support,
convert *z* using zaxis unit type
.. versionadded :: 1.2.1
"""
return self.zaxis.convert_units(z)
def _process_unit_info(self, xdata=None, ydata=None, zdata=None,
kwargs=None):
"""
Look for unit *kwargs* and update the axis instances as necessary
"""
super()._process_unit_info(xdata=xdata, ydata=ydata, kwargs=kwargs)
if self.xaxis is None or self.yaxis is None or self.zaxis is None:
return
if zdata is not None:
# we only need to update if there is nothing set yet.
if not self.zaxis.have_units():
self.zaxis.update_units(xdata)
# process kwargs 2nd since these will override default units
if kwargs is not None:
zunits = kwargs.pop('zunits', self.zaxis.units)
if zunits != self.zaxis.units:
self.zaxis.set_units(zunits)
# If the units being set imply a different converter,
# we need to update.
if zdata is not None:
self.zaxis.update_units(zdata)
def set_top_view(self):
# this happens to be the right view for the viewing coordinates
# moved up and to the left slightly to fit labels and axes
xdwl = (0.95/self.dist)
xdw = (0.9/self.dist)
ydwl = (0.95/self.dist)
ydw = (0.9/self.dist)
# This is purposely using the 2D Axes's set_xlim and set_ylim,
# because we are trying to place our viewing pane.
super().set_xlim(-xdwl, xdw, auto=None)
super().set_ylim(-ydwl, ydw, auto=None)
def _init_axis(self):
'''Init 3D axes; overrides creation of regular X/Y axes'''
self.w_xaxis = axis3d.XAxis('x', self.xy_viewLim.intervalx,
self.xy_dataLim.intervalx, self)
self.xaxis = self.w_xaxis
self.w_yaxis = axis3d.YAxis('y', self.xy_viewLim.intervaly,
self.xy_dataLim.intervaly, self)
self.yaxis = self.w_yaxis
self.w_zaxis = axis3d.ZAxis('z', self.zz_viewLim.intervalx,
self.zz_dataLim.intervalx, self)
self.zaxis = self.w_zaxis
for ax in self.xaxis, self.yaxis, self.zaxis:
ax.init3d()
def get_children(self):
return [self.zaxis] + super().get_children()
def _get_axis_list(self):
return super()._get_axis_list() + (self.zaxis, )
def unit_cube(self, vals=None):
minx, maxx, miny, maxy, minz, maxz = vals or self.get_w_lims()
return [(minx, miny, minz),
(maxx, miny, minz),
(maxx, maxy, minz),
(minx, maxy, minz),
(minx, miny, maxz),
(maxx, miny, maxz),
(maxx, maxy, maxz),
(minx, maxy, maxz)]
def tunit_cube(self, vals=None, M=None):
if M is None:
M = self.M
xyzs = self.unit_cube(vals)
tcube = proj3d.proj_points(xyzs, M)
return tcube
def tunit_edges(self, vals=None, M=None):
tc = self.tunit_cube(vals, M)
edges = [(tc[0], tc[1]),
(tc[1], tc[2]),
(tc[2], tc[3]),
(tc[3], tc[0]),
(tc[0], tc[4]),
(tc[1], tc[5]),
(tc[2], tc[6]),
(tc[3], tc[7]),
(tc[4], tc[5]),
(tc[5], tc[6]),
(tc[6], tc[7]),
(tc[7], tc[4])]
return edges
@artist.allow_rasterization
def draw(self, renderer):
# draw the background patch
self.patch.draw(renderer)
self._frameon = False
# first, set the aspect
# this is duplicated from `axes._base._AxesBase.draw`
# but must be called before any of the artist are drawn as
# it adjusts the view limits and the size of the bounding box
# of the axes
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
# add the projection matrix to the renderer
self.M = self.get_proj()
renderer.M = self.M
renderer.vvec = self.vvec
renderer.eye = self.eye
renderer.get_axis_position = self.get_axis_position
# Calculate projection of collections and patches and zorder them.
# Make sure they are drawn above the grids.
zorder_offset = max(axis.get_zorder()
for axis in self._get_axis_list()) + 1
for i, col in enumerate(
sorted(self.collections,
key=lambda col: col.do_3d_projection(renderer),
reverse=True)):
col.zorder = zorder_offset + i
for i, patch in enumerate(
sorted(self.patches,
key=lambda patch: patch.do_3d_projection(renderer),
reverse=True)):
patch.zorder = zorder_offset + i
if self._axis3don:
# Draw panes first
for axis in self._get_axis_list():
axis.draw_pane(renderer)
# Then axes
for axis in self._get_axis_list():
axis.draw(renderer)
# Then rest
super().draw(renderer)
def get_axis_position(self):
vals = self.get_w_lims()
tc = self.tunit_cube(vals, self.M)
xhigh = tc[1][2] > tc[2][2]
yhigh = tc[3][2] > tc[2][2]
zhigh = tc[0][2] > tc[2][2]
return xhigh, yhigh, zhigh
def _on_units_changed(self, scalex=False, scaley=False, scalez=False):
"""
Callback for processing changes to axis units.
Currently forces updates of data limits and view limits.
"""
self.relim()
self.autoscale_view(scalex=scalex, scaley=scaley, scalez=scalez)
def update_datalim(self, xys, **kwargs):
pass
def get_autoscale_on(self):
"""
Get whether autoscaling is applied for all axes on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
return super().get_autoscale_on() and self.get_autoscalez_on()
def get_autoscalez_on(self):
"""
Get whether autoscaling for the z-axis is applied on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
return self._autoscaleZon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
Parameters
----------
b : bool
"""
super().set_autoscale_on(b)
self.set_autoscalez_on(b)
def set_autoscalez_on(self, b):
"""
Set whether autoscaling for the z-axis is applied on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
Parameters
----------
b : bool
"""
self._autoscaleZon = b
def set_zmargin(self, m):
"""
Set padding of Z data limits prior to autoscaling.
*m* times the data interval will be added to each
end of that interval before it is used in autoscaling.
accepts: float in range 0 to 1
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if m < 0 or m > 1 :
raise ValueError("margin must be in range 0 to 1")
self._zmargin = m
self.stale = True
def margins(self, *margins, x=None, y=None, z=None, tight=True):
"""
Convenience method to set or retrieve autoscaling margins.
signatures::
margins()
returns xmargin, ymargin, zmargin
::
margins(margin)
margins(xmargin, ymargin, zmargin)
margins(x=xmargin, y=ymargin, z=zmargin)
margins(..., tight=False)
All forms above set the xmargin, ymargin and zmargin
parameters. All keyword parameters are optional. A single
positional argument specifies xmargin, ymargin and zmargin.
Passing both positional and keyword arguments for xmargin,
ymargin, and/or zmargin is invalid.
The *tight* parameter
is passed to :meth:`autoscale_view`, which is executed after
a margin is changed; the default here is *True*, on the
assumption that when margins are specified, no additional
padding to match tick marks is usually desired. Setting
*tight* to *None* will preserve the previous setting.
Specifying any margin changes only the autoscaling; for example,
if *xmargin* is not None, then *xmargin* times the X data
interval will be added to each end of that interval before
it is used in autoscaling.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if margins and x is not None and y is not None and z is not None:
raise TypeError('Cannot pass both positional and keyword '
'arguments for x, y, and/or z.')
elif len(margins) == 1:
x = y = z = margins[0]
elif len(margins) == 3:
x, y, z = margins
elif margins:
raise TypeError('Must pass a single positional argument for all '
'margins, or one for each margin (x, y, z).')
if x is None and y is None and z is None:
if tight is not True:
warnings.warn('ignoring tight=%r in get mode' % (tight,))
return self._xmargin, self._ymargin, self._zmargin
if x is not None:
self.set_xmargin(x)
if y is not None:
self.set_ymargin(y)
if z is not None:
self.set_zmargin(z)
self.autoscale_view(
tight=tight, scalex=(x is not None), scaley=(y is not None),
scalez=(z is not None)
)
def autoscale(self, enable=True, axis='both', tight=None):
"""
Convenience method for simple axis view autoscaling.
See :meth:`matplotlib.axes.Axes.autoscale` for full explanation.
Note that this function behaves the same, but for all
three axes. Therefore, 'z' can be passed for *axis*,
and 'both' applies to all three axes.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if enable is None:
scalex = True
scaley = True
scalez = True
else:
if axis in ['x', 'both']:
self._autoscaleXon = scalex = bool(enable)
else:
scalex = False
if axis in ['y', 'both']:
self._autoscaleYon = scaley = bool(enable)
else:
scaley = False
if axis in ['z', 'both']:
self._autoscaleZon = scalez = bool(enable)
else:
scalez = False
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley,
scalez=scalez)
def auto_scale_xyz(self, X, Y, Z=None, had_data=None):
x, y, z = map(np.asarray, (X, Y, Z))
try:
x, y = x.flatten(), y.flatten()
if Z is not None:
z = z.flatten()
except AttributeError:
raise
# This updates the bounding boxes as to keep a record as
# to what the minimum sized rectangular volume holds the
# data.
self.xy_dataLim.update_from_data_xy(np.array([x, y]).T, not had_data)
if z is not None:
self.zz_dataLim.update_from_data_xy(np.array([z, z]).T, not had_data)
# Let autoscale_view figure out how to use this data.
self.autoscale_view()
def autoscale_view(self, tight=None, scalex=True, scaley=True,
scalez=True):
"""
Autoscale the view limits using the data limits.
See :meth:`matplotlib.axes.Axes.autoscale_view` for documentation.
Note that this function applies to the 3D axes, and as such
adds the *scalez* to the function arguments.
.. versionchanged :: 1.1.0
Function signature was changed to better match the 2D version.
*tight* is now explicitly a kwarg and placed first.
.. versionchanged :: 1.2.1
This is now fully functional.
"""
if not self._ready:
return
# This method looks at the rectangular volume (see above)
# of data and decides how to scale the view portal to fit it.
if tight is None:
# if image data only just use the datalim
_tight = self._tight or (
len(self.images) > 0
and len(self.lines) == len(self.patches) == 0)
else:
_tight = self._tight = bool(tight)
if scalex and self._autoscaleXon:
self._shared_x_axes.clean()
x0, x1 = self.xy_dataLim.intervalx
xlocator = self.xaxis.get_major_locator()
try:
x0, x1 = xlocator.nonsingular(x0, x1)
except AttributeError:
x0, x1 = mtransforms.nonsingular(x0, x1, increasing=False,
expander=0.05)
if self._xmargin > 0:
delta = (x1 - x0) * self._xmargin
x0 -= delta
x1 += delta
if not _tight:
x0, x1 = xlocator.view_limits(x0, x1)
self.set_xbound(x0, x1)
if scaley and self._autoscaleYon:
self._shared_y_axes.clean()
y0, y1 = self.xy_dataLim.intervaly
ylocator = self.yaxis.get_major_locator()
try:
y0, y1 = ylocator.nonsingular(y0, y1)
except AttributeError:
y0, y1 = mtransforms.nonsingular(y0, y1, increasing=False,
expander=0.05)
if self._ymargin > 0:
delta = (y1 - y0) * self._ymargin
y0 -= delta
y1 += delta
if not _tight:
y0, y1 = ylocator.view_limits(y0, y1)
self.set_ybound(y0, y1)
if scalez and self._autoscaleZon:
self._shared_z_axes.clean()
z0, z1 = self.zz_dataLim.intervalx
zlocator = self.zaxis.get_major_locator()
try:
z0, z1 = zlocator.nonsingular(z0, z1)
except AttributeError:
z0, z1 = mtransforms.nonsingular(z0, z1, increasing=False,
expander=0.05)
if self._zmargin > 0:
delta = (z1 - z0) * self._zmargin
z0 -= delta
z1 += delta
if not _tight:
z0, z1 = zlocator.view_limits(z0, z1)
self.set_zbound(z0, z1)
def get_w_lims(self):
'''Get 3D world limits.'''
minx, maxx = self.get_xlim3d()
miny, maxy = self.get_ylim3d()
minz, maxz = self.get_zlim3d()
return minx, maxx, miny, maxy, minz, maxz
def _determine_lims(self, xmin=None, xmax=None, *args, **kwargs):
if xmax is None and np.iterable(xmin):
xmin, xmax = xmin
if xmin == xmax:
xmin -= 0.05
xmax += 0.05
return (xmin, xmax)
def set_xlim3d(self, left=None, right=None, emit=True, auto=False,
*, xmin=None, xmax=None):
"""
Set 3D x limits.
See :meth:`matplotlib.axes.Axes.set_xlim` for full documentation.
"""
if right is None and np.iterable(left):
left, right = left
if xmin is not None:
cbook.warn_deprecated('3.0', name='`xmin`',
alternative='`left`', obj_type='argument')
if left is not None:
raise TypeError('Cannot pass both `xmin` and `left`')
left = xmin
if xmax is not None:
cbook.warn_deprecated('3.0', name='`xmax`',
alternative='`right`', obj_type='argument')
if right is not None:
raise TypeError('Cannot pass both `xmax` and `right`')
right = xmax
self._process_unit_info(xdata=(left, right))
left = self._validate_converted_limits(left, self.convert_xunits)
right = self._validate_converted_limits(right, self.convert_xunits)
old_left, old_right = self.get_xlim()
if left is None:
left = old_left
if right is None:
right = old_right
if left == right:
warnings.warn(('Attempting to set identical left==right results\n'
'in singular transformations; automatically expanding.\n'
'left=%s, right=%s') % (left, right))
left, right = mtransforms.nonsingular(left, right, increasing=False)
left, right = self.xaxis.limit_range_for_scale(left, right)
self.xy_viewLim.intervalx = (left, right)
if auto is not None:
self._autoscaleXon = bool(auto)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.xy_viewLim.intervalx,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
self.stale = True
return left, right
set_xlim = set_xlim3d
def set_ylim3d(self, bottom=None, top=None, emit=True, auto=False,
*, ymin=None, ymax=None):
"""
Set 3D y limits.
See :meth:`matplotlib.axes.Axes.set_ylim` for full documentation.
"""
if top is None and np.iterable(bottom):
bottom, top = bottom
if ymin is not None:
cbook.warn_deprecated('3.0', name='`ymin`',
alternative='`bottom`', obj_type='argument')
if bottom is not None:
raise TypeError('Cannot pass both `ymin` and `bottom`')
bottom = ymin
if ymax is not None:
cbook.warn_deprecated('3.0', name='`ymax`',
alternative='`top`', obj_type='argument')
if top is not None:
raise TypeError('Cannot pass both `ymax` and `top`')
top = ymax
self._process_unit_info(ydata=(bottom, top))
bottom = self._validate_converted_limits(bottom, self.convert_yunits)
top = self._validate_converted_limits(top, self.convert_yunits)
old_bottom, old_top = self.get_ylim()
if bottom is None:
bottom = old_bottom
if top is None:
top = old_top
if top == bottom:
warnings.warn(('Attempting to set identical bottom==top results\n'
'in singular transformations; automatically expanding.\n'
'bottom=%s, top=%s') % (bottom, top))
bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
bottom, top = self.yaxis.limit_range_for_scale(bottom, top)
self.xy_viewLim.intervaly = (bottom, top)
if auto is not None:
self._autoscaleYon = bool(auto)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.xy_viewLim.intervaly,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
self.stale = True
return bottom, top
set_ylim = set_ylim3d
def set_zlim3d(self, bottom=None, top=None, emit=True, auto=False,
*, zmin=None, zmax=None):
"""
Set 3D z limits.
See :meth:`matplotlib.axes.Axes.set_ylim` for full documentation
"""
if top is None and np.iterable(bottom):
bottom, top = bottom
if zmin is not None:
cbook.warn_deprecated('3.0', name='`zmin`',
alternative='`bottom`', obj_type='argument')
if bottom is not None:
raise TypeError('Cannot pass both `zmin` and `bottom`')
bottom = zmin
if zmax is not None:
cbook.warn_deprecated('3.0', name='`zmax`',
alternative='`top`', obj_type='argument')
if top is not None:
raise TypeError('Cannot pass both `zmax` and `top`')
top = zmax
self._process_unit_info(zdata=(bottom, top))
bottom = self._validate_converted_limits(bottom, self.convert_zunits)
top = self._validate_converted_limits(top, self.convert_zunits)
old_bottom, old_top = self.get_zlim()
if bottom is None:
bottom = old_bottom
if top is None:
top = old_top
if top == bottom:
warnings.warn(('Attempting to set identical bottom==top results\n'
'in singular transformations; automatically expanding.\n'
'bottom=%s, top=%s') % (bottom, top))
bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
bottom, top = self.zaxis.limit_range_for_scale(bottom, top)
self.zz_viewLim.intervalx = (bottom, top)
if auto is not None:
self._autoscaleZon = bool(auto)
if emit:
self.callbacks.process('zlim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_z_axes.get_siblings(self):
if other is not self:
other.set_zlim(self.zz_viewLim.intervalx,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
self.stale = True
return bottom, top
set_zlim = set_zlim3d
def get_xlim3d(self):
return tuple(self.xy_viewLim.intervalx)
get_xlim3d.__doc__ = maxes.Axes.get_xlim.__doc__
get_xlim = get_xlim3d
if get_xlim.__doc__ is not None:
get_xlim.__doc__ += """
.. versionchanged :: 1.1.0
This function now correctly refers to the 3D x-limits
"""
def get_ylim3d(self):
return tuple(self.xy_viewLim.intervaly)
get_ylim3d.__doc__ = maxes.Axes.get_ylim.__doc__
get_ylim = get_ylim3d
if get_ylim.__doc__ is not None:
get_ylim.__doc__ += """
.. versionchanged :: 1.1.0
This function now correctly refers to the 3D y-limits.
"""
def get_zlim3d(self):
'''Get 3D z limits.'''
return tuple(self.zz_viewLim.intervalx)
get_zlim = get_zlim3d
def get_zscale(self):
"""
Return the zaxis scale string %s
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
""" % (", ".join(mscale.get_scale_names()))
return self.zaxis.get_scale()
# We need to slightly redefine these to pass scalez=False
# to their calls of autoscale_view.
def set_xscale(self, value, **kwargs):
self.xaxis._set_scale(value, **kwargs)
self.autoscale_view(scaley=False, scalez=False)
self._update_transScale()
if maxes.Axes.set_xscale.__doc__ is not None:
set_xscale.__doc__ = maxes.Axes.set_xscale.__doc__ + """
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
def set_yscale(self, value, **kwargs):
self.yaxis._set_scale(value, **kwargs)
self.autoscale_view(scalex=False, scalez=False)
self._update_transScale()
self.stale = True
if maxes.Axes.set_yscale.__doc__ is not None:
set_yscale.__doc__ = maxes.Axes.set_yscale.__doc__ + """
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
@docstring.dedent_interpd
def set_zscale(self, value, **kwargs):
"""
Set the scaling of the z-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
.. note ::
Currently, Axes3D objects only supports linear scales.
Other scales may or may not work, and support for these
is improving with each release.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
self.zaxis._set_scale(value, **kwargs)
self.autoscale_view(scalex=False, scaley=False)
self._update_transScale()
self.stale = True
def set_zticks(self, *args, **kwargs):
"""
Set z-axis tick locations.
See :meth:`matplotlib.axes.Axes.set_yticks` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return self.zaxis.set_ticks(*args, **kwargs)
def get_zticks(self, minor=False):
"""
Return the z ticks as a list of locations
See :meth:`matplotlib.axes.Axes.get_yticks` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return self.zaxis.get_ticklocs(minor=minor)
def get_zmajorticklabels(self):
"""
Get the ztick labels as a list of Text instances
.. versionadded :: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_majorticklabels())
def get_zminorticklabels(self):
"""
Get the ztick labels as a list of Text instances
.. note::
Minor ticks are not supported. This function was added
only for completeness.
.. versionadded :: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_minorticklabels())
def set_zticklabels(self, *args, **kwargs):
"""
Set z-axis tick labels.
See :meth:`matplotlib.axes.Axes.set_yticklabels` for more details.
.. note::
Minor ticks are not supported by Axes3D objects.
.. versionadded:: 1.1.0
"""
return self.zaxis.set_ticklabels(*args, **kwargs)
def get_zticklabels(self, minor=False):
"""
Get ztick labels as a list of Text instances.
See :meth:`matplotlib.axes.Axes.get_yticklabels` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_ticklabels(minor=minor))
def zaxis_date(self, tz=None):
"""
Sets up z-axis ticks and labels that treat the z data as dates.
*tz* is a timezone string or :class:`tzinfo` instance.
Defaults to rc value.
.. note::
This function is merely provided for completeness.
Axes3D objects do not officially support dates for ticks,
and so this may or may not work as expected.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
self.zaxis.axis_date(tz)
def get_zticklines(self):
"""
Get ztick lines as a list of Line2D instances.
Note that this function is provided merely for completeness.
These lines are re-calculated as the display changes.
.. versionadded:: 1.1.0
"""
return self.zaxis.get_ticklines()
def clabel(self, *args, **kwargs):
"""
This function is currently not implemented for 3D axes.
Returns *None*.
"""
return None
def view_init(self, elev=None, azim=None):
"""
Set the elevation and azimuth of the axes.
This can be used to rotate the axes programmatically.
'elev' stores the elevation angle in the z plane.
'azim' stores the azimuth angle in the x,y plane.
if elev or azim are None (default), then the initial value
is used which was specified in the :class:`Axes3D` constructor.
"""
self.dist = 10
if elev is None:
self.elev = self.initial_elev
else:
self.elev = elev
if azim is None:
self.azim = self.initial_azim
else:
self.azim = azim
def set_proj_type(self, proj_type):
"""
Set the projection type.
Parameters
----------
proj_type : str
Type of projection, accepts 'persp' and 'ortho'.
"""
if proj_type == 'persp':
self._projection = proj3d.persp_transformation
elif proj_type == 'ortho':
self._projection = proj3d.ortho_transformation
else:
raise ValueError("unrecognized projection: %s" % proj_type)
def get_proj(self):
"""
Create the projection matrix from the current viewing position.
elev stores the elevation angle in the z plane
azim stores the azimuth angle in the x,y plane
dist is the distance of the eye viewing point from the object
point.
"""
relev, razim = np.pi * self.elev/180, np.pi * self.azim/180
xmin, xmax = self.get_xlim3d()
ymin, ymax = self.get_ylim3d()
zmin, zmax = self.get_zlim3d()
# transform to uniform world coordinates 0-1.0,0-1.0,0-1.0
worldM = proj3d.world_transformation(xmin, xmax,
ymin, ymax,
zmin, zmax)
# look into the middle of the new coordinates
R = np.array([0.5, 0.5, 0.5])
xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist
yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist
zp = R[2] + np.sin(relev) * self.dist
E = np.array((xp, yp, zp))
self.eye = E
self.vvec = R - E
self.vvec = self.vvec / proj3d.mod(self.vvec)
if abs(relev) > np.pi/2:
# upside down
V = np.array((0, 0, -1))
else:
V = np.array((0, 0, 1))
zfront, zback = -self.dist, self.dist
viewM = proj3d.view_transformation(E, R, V)
projM = self._projection(zfront, zback)
M0 = np.dot(viewM, worldM)
M = np.dot(projM, M0)
return M
def mouse_init(self, rotate_btn=1, zoom_btn=3):
"""Initializes mouse button callbacks to enable 3D rotation of
the axes. Also optionally sets the mouse buttons for 3D rotation
and zooming.
============ =======================================================
Argument Description
============ =======================================================
*rotate_btn* The integer or list of integers specifying which mouse
button or buttons to use for 3D rotation of the axes.
Default = 1.
*zoom_btn* The integer or list of integers specifying which mouse
button or buttons to use to zoom the 3D axes.
Default = 3.
============ =======================================================
"""
self.button_pressed = None
canv = self.figure.canvas
if canv is not None:
c1 = canv.mpl_connect('motion_notify_event', self._on_move)
c2 = canv.mpl_connect('button_press_event', self._button_press)
c3 = canv.mpl_connect('button_release_event', self._button_release)
self._cids = [c1, c2, c3]
else:
warnings.warn(
"Axes3D.figure.canvas is 'None', mouse rotation disabled. "
"Set canvas then call Axes3D.mouse_init().")
# coerce scalars into array-like, then convert into
# a regular list to avoid comparisons against None
# which breaks in recent versions of numpy.
self._rotate_btn = np.atleast_1d(rotate_btn).tolist()
self._zoom_btn = np.atleast_1d(zoom_btn).tolist()
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
3D axes objects do not use the zoom box button.
"""
return False
def can_pan(self):
"""
Return *True* if this axes supports the pan/zoom button functionality.
3D axes objects do not use the pan/zoom button.
"""
return False
def cla(self):
"""
Clear axes
"""
# Disabling mouse interaction might have been needed a long
# time ago, but I can't find a reason for it now - BVR (2012-03)
#self.disable_mouse_rotation()
super().cla()
self.zaxis.cla()
if self._sharez is not None:
self.zaxis.major = self._sharez.zaxis.major
self.zaxis.minor = self._sharez.zaxis.minor
z0, z1 = self._sharez.get_zlim()
self.set_zlim(z0, z1, emit=False, auto=None)
self.zaxis._set_scale(self._sharez.zaxis.get_scale())
else:
self.zaxis._set_scale('linear')
try:
self.set_zlim(0, 1)
except TypeError:
pass
self._autoscaleZon = True
self._zmargin = 0
self.grid(rcParams['axes3d.grid'])
def disable_mouse_rotation(self):
"""Disable mouse button callbacks.
"""
# Disconnect the various events we set.
for cid in self._cids:
self.figure.canvas.mpl_disconnect(cid)
self._cids = []
def _button_press(self, event):
if event.inaxes == self:
self.button_pressed = event.button
self.sx, self.sy = event.xdata, event.ydata
def _button_release(self, event):
self.button_pressed = None
def format_zdata(self, z):
"""
Return *z* string formatted. This function will use the
:attr:`fmt_zdata` attribute if it is callable, else will fall
back on the zaxis major formatter
"""
try: return self.fmt_zdata(z)
except (AttributeError, TypeError):
func = self.zaxis.get_major_formatter().format_data_short
val = func(z)
return val
def format_coord(self, xd, yd):
"""
Given the 2D view coordinates attempt to guess a 3D coordinate.
Looks for the nearest edge to the point and then assumes that
the point is at the same z location as the nearest point on the edge.
"""
if self.M is None:
return ''
if self.button_pressed in self._rotate_btn:
return 'azimuth=%d deg, elevation=%d deg ' % (self.azim, self.elev)
# ignore xd and yd and display angles instead
# nearest edge
p0, p1 = min(self.tunit_edges(),
key=lambda edge: proj3d.line2d_seg_dist(
edge[0], edge[1], (xd, yd)))
# scale the z value to match
x0, y0, z0 = p0
x1, y1, z1 = p1
d0 = np.hypot(x0-xd, y0-yd)
d1 = np.hypot(x1-xd, y1-yd)
dt = d0+d1
z = d1/dt * z0 + d0/dt * z1
x, y, z = proj3d.inv_transform(xd, yd, z, self.M)
xs = self.format_xdata(x)
ys = self.format_ydata(y)
zs = self.format_zdata(z)
return 'x=%s, y=%s, z=%s' % (xs, ys, zs)
def _on_move(self, event):
"""Mouse moving
button-1 rotates by default. Can be set explicitly in mouse_init().
button-3 zooms by default. Can be set explicitly in mouse_init().
"""
if not self.button_pressed:
return
if self.M is None:
return
x, y = event.xdata, event.ydata
# In case the mouse is out of bounds.
if x is None:
return
dx, dy = x - self.sx, y - self.sy
w = self._pseudo_w
h = self._pseudo_h
self.sx, self.sy = x, y
# Rotation
if self.button_pressed in self._rotate_btn:
# rotate viewing point
# get the x and y pixel coords
if dx == 0 and dy == 0:
return
self.elev = art3d.norm_angle(self.elev - (dy/h)*180)
self.azim = art3d.norm_angle(self.azim - (dx/w)*180)
self.get_proj()
self.stale = True
self.figure.canvas.draw_idle()
# elif self.button_pressed == 2:
# pan view
# project xv,yv,zv -> xw,yw,zw
# pan
# pass
# Zoom
elif self.button_pressed in self._zoom_btn:
# zoom view
# hmmm..this needs some help from clipping....
minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()
df = 1-((h - dy)/h)
dx = (maxx-minx)*df
dy = (maxy-miny)*df
dz = (maxz-minz)*df
self.set_xlim3d(minx - dx, maxx + dx)
self.set_ylim3d(miny - dy, maxy + dy)
self.set_zlim3d(minz - dz, maxz + dz)
self.get_proj()
self.figure.canvas.draw_idle()
def set_zlabel(self, zlabel, fontdict=None, labelpad=None, **kwargs):
'''
Set zlabel. See doc for :meth:`set_ylabel` for description.
'''
if labelpad is not None : self.zaxis.labelpad = labelpad
return self.zaxis.set_label_text(zlabel, fontdict, **kwargs)
def get_zlabel(self):
"""
Get the z-label text string.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
label = self.zaxis.get_label()
return label.get_text()
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the 3D axes panels are drawn.
.. versionadded :: 1.1.0
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the 3D axes panels are drawn.
.. versionadded :: 1.1.0
Parameters
----------
b : bool
"""
self._frameon = bool(b)
self.stale = True
def grid(self, b=True, **kwargs):
'''
Set / unset 3D grid.
.. note::
Currently, this function does not behave the same as
:meth:`matplotlib.axes.Axes.grid`, but it is intended to
eventually support that behavior.
.. versionchanged :: 1.1.0
This function was changed, but not tested. Please report any bugs.
'''
# TODO: Operate on each axes separately
if len(kwargs):
b = True
self._draw_grid = cbook._string_to_bool(b)
self.stale = True
def ticklabel_format(
self, *, style='', scilimits=None, useOffset=None, axis='both'):
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes in Axed3D objects.
See :meth:`matplotlib.axes.Axes.ticklabel_format` for full
documentation. Note that this version applies to all three
axes of the Axes3D object. Therefore, the *axis* argument
will also accept a value of 'z' and the value of 'both' will
apply to all three axes.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
style = style.lower()
axis = axis.lower()
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
if style[:3] == 'sci':
sb = True
elif style == 'plain':
sb = False
elif style == '':
sb = None
else:
raise ValueError("%s is not a valid style value")
try:
if sb is not None:
if axis in ['both', 'z']:
self.xaxis.major.formatter.set_scientific(sb)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_scientific(sb)
if axis in ['both', 'z'] :
self.zaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis in ['both', 'x']:
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_powerlimits(scilimits)
if axis in ['both', 'z']:
self.zaxis.major.formatter.set_powerlimits(scilimits)
if useOffset is not None:
if axis in ['both', 'x']:
self.xaxis.major.formatter.set_useOffset(useOffset)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_useOffset(useOffset)
if axis in ['both', 'z']:
self.zaxis.major.formatter.set_useOffset(useOffset)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def locator_params(self, axis='both', tight=None, **kwargs):
"""
Convenience method for controlling tick locators.
See :meth:`matplotlib.axes.Axes.locator_params` for full
documentation. Note that this is for Axes3D objects,
therefore, setting *axis* to 'both' will result in the
parameters being set for all three axes. Also, *axis*
can also take a value of 'z' to apply parameters to the
z axis.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
_x = axis in ['x', 'both']
_y = axis in ['y', 'both']
_z = axis in ['z', 'both']
if _x:
self.xaxis.get_major_locator().set_params(**kwargs)
if _y:
self.yaxis.get_major_locator().set_params(**kwargs)
if _z:
self.zaxis.get_major_locator().set_params(**kwargs)
self.autoscale_view(tight=tight, scalex=_x, scaley=_y, scalez=_z)
def tick_params(self, axis='both', **kwargs):
"""
Convenience method for changing the appearance of ticks and
tick labels.
See :meth:`matplotlib.axes.Axes.tick_params` for more complete
documentation.
The only difference is that setting *axis* to 'both' will
mean that the settings are applied to all three axes. Also,
the *axis* parameter also accepts a value of 'z', which
would mean to apply to only the z-axis.
Also, because of how Axes3D objects are drawn very differently
from regular 2D axes, some of these settings may have
ambiguous meaning. For simplicity, the 'z' axis will
accept settings as if it was like the 'y' axis.
.. note::
While this function is currently implemented, the core part
of the Axes3D object may ignore some of these settings.
Future releases will fix this. Priority will be given to
those who file bugs.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
super().tick_params(axis, **kwargs)
if axis in ['z', 'both'] :
zkw = dict(kwargs)
zkw.pop('top', None)
zkw.pop('bottom', None)
zkw.pop('labeltop', None)
zkw.pop('labelbottom', None)
self.zaxis.set_tick_params(**zkw)
### data limits, ticks, tick labels, and formatting
def invert_zaxis(self):
"""
Invert the z-axis.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
bottom, top = self.get_zlim()
self.set_zlim(top, bottom, auto=None)
def zaxis_inverted(self):
'''
Returns True if the z-axis is inverted.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
'''
bottom, top = self.get_zlim()
return top < bottom
def get_zbound(self):
"""
Returns the z-axis numerical bounds where::
lowerBound < upperBound
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
bottom, top = self.get_zlim()
if bottom < top:
return bottom, top
else:
return top, bottom
def set_zbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the z-axis.
This method will honor axes inversion regardless of parameter order.
It will not change the :attr:`_autoscaleZon` attribute.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if upper is None and np.iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_zbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.zaxis_inverted():
if lower < upper:
self.set_zlim(upper, lower, auto=None)
else:
self.set_zlim(lower, upper, auto=None)
else :
if lower < upper:
self.set_zlim(lower, upper, auto=None)
else :
self.set_zlim(upper, lower, auto=None)
def text(self, x, y, z, s, zdir=None, **kwargs):
'''
Add text to the plot. kwargs will be passed on to Axes.text,
except for the `zdir` keyword, which sets the direction to be
used as the z direction.
'''
text = super().text(x, y, s, **kwargs)
art3d.text_2d_to_3d(text, z, zdir)
return text
text3D = text
text2D = Axes.text
def plot(self, xs, ys, *args, zdir='z', **kwargs):
'''
Plot 2D or 3D data.
========== ================================================
Argument Description
========== ================================================
*xs*, *ys* x, y coordinates of vertices
*zs* z value(s), either one for all points or one for
each point.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
========== ================================================
Other arguments are passed on to
:func:`~matplotlib.axes.Axes.plot`
'''
had_data = self.has_data()
# `zs` can be passed positionally or as keyword; checking whether
# args[0] is a string matches the behavior of 2D `plot` (via
# `_process_plot_var_args`).
if args and not isinstance(args[0], str):
zs = args[0]
args = args[1:]
if 'zs' in kwargs:
raise TypeError("plot() for multiple values for argument 'z'")
else:
zs = kwargs.pop('zs', 0)
# Match length
zs = np.broadcast_to(zs, len(xs))
lines = super().plot(xs, ys, *args, **kwargs)
for line in lines:
art3d.line_2d_to_3d(line, zs=zs, zdir=zdir)
xs, ys, zs = art3d.juggle_axes(xs, ys, zs, zdir)
self.auto_scale_xyz(xs, ys, zs, had_data)
return lines
plot3D = plot
def plot_surface(self, X, Y, Z, *args, norm=None, vmin=None,
vmax=None, lightsource=None, **kwargs):
"""
Create a surface plot.
By default it will be colored in shades of a solid color, but it also
supports color mapping by supplying the *cmap* argument.
.. note::
The *rcount* and *ccount* kwargs, which both default to 50,
determine the maximum number of samples used in each direction. If
the input data is larger, it will be downsampled (by slicing) to
these numbers of points.
Parameters
----------
X, Y, Z : 2d arrays
Data values.
rcount, ccount : int
Maximum number of samples used in each direction. If the input
data is larger, it will be downsampled (by slicing) to these
numbers of points. Defaults to 50.
.. versionadded:: 2.0
rstride, cstride : int
Downsampling stride in each direction. These arguments are
mutually exclusive with *rcount* and *ccount*. If only one of
*rstride* or *cstride* is set, the other defaults to 10.
'classic' mode uses a default of ``rstride = cstride = 10`` instead
of the new default of ``rcount = ccount = 50``.
color : color-like
Color of the surface patches.
cmap : Colormap
Colormap of the surface patches.
facecolors : array-like of colors.
Colors of each individual patch.
norm : Normalize
Normalization for the colormap.
vmin, vmax : float
Bounds for the normalization.
shade : bool
Whether to shade the face colors.
**kwargs :
Other arguments are forwarded to `.Poly3DCollection`.
"""
had_data = self.has_data()
if Z.ndim != 2:
raise ValueError("Argument Z must be 2-dimensional.")
# TODO: Support masked arrays
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
has_stride = 'rstride' in kwargs or 'cstride' in kwargs
has_count = 'rcount' in kwargs or 'ccount' in kwargs
if has_stride and has_count:
raise ValueError("Cannot specify both stride and count arguments")
rstride = kwargs.pop('rstride', 10)
cstride = kwargs.pop('cstride', 10)
rcount = kwargs.pop('rcount', 50)
ccount = kwargs.pop('ccount', 50)
if rcParams['_internal.classic_mode']:
# Strides have priority over counts in classic mode.
# So, only compute strides from counts
# if counts were explicitly given
compute_strides = has_count
else:
# If the strides are provided then it has priority.
# Otherwise, compute the strides from the counts.
compute_strides = not has_stride
if compute_strides:
rstride = int(max(np.ceil(rows / rcount), 1))
cstride = int(max(np.ceil(cols / ccount), 1))
if 'facecolors' in kwargs:
fcolors = kwargs.pop('facecolors')
else:
color = kwargs.pop('color', None)
if color is None:
color = self._get_lines.get_next_color()
color = np.array(mcolors.to_rgba(color))
fcolors = None
cmap = kwargs.get('cmap', None)
shade = kwargs.pop('shade', cmap is None)
# Shade the data
if shade and cmap is not None and fcolors is not None:
fcolors = self._shade_colors_lightsource(Z, cmap, lightsource)
# evenly spaced, and including both endpoints
row_inds = list(range(0, rows-1, rstride)) + [rows-1]
col_inds = list(range(0, cols-1, cstride)) + [cols-1]
colset = [] # the sampled facecolor
polys = []
for rs, rs_next in zip(row_inds[:-1], row_inds[1:]):
for cs, cs_next in zip(col_inds[:-1], col_inds[1:]):
ps = [
# +1 ensures we share edges between polygons
cbook._array_perimeter(a[rs:rs_next+1, cs:cs_next+1])
for a in (X, Y, Z)
]
# ps = np.stack(ps, axis=-1)
ps = np.array(ps).T
polys.append(ps)
if fcolors is not None:
colset.append(fcolors[rs][cs])
def get_normals(polygons):
"""
Takes a list of polygons and return an array of their normals
"""
v1 = np.empty((len(polygons), 3))
v2 = np.empty((len(polygons), 3))
for poly_i, ps in enumerate(polygons):
# pick three points around the polygon at which to find the normal
# doesn't vectorize because polygons is jagged
i1, i2, i3 = 0, len(ps)//3, 2*len(ps)//3
v1[poly_i, :] = ps[i1, :] - ps[i2, :]
v2[poly_i, :] = ps[i2, :] - ps[i3, :]
return np.cross(v1, v2)
# note that the striding causes some polygons to have more coordinates
# than others
polyc = art3d.Poly3DCollection(polys, *args, **kwargs)
if fcolors is not None:
if shade:
colset = self._shade_colors(colset, get_normals(polys))
polyc.set_facecolors(colset)
polyc.set_edgecolors(colset)
elif cmap:
# doesn't vectorize because polys is jagged
avg_z = np.array([ps[:,2].mean() for ps in polys])
polyc.set_array(avg_z)
if vmin is not None or vmax is not None:
polyc.set_clim(vmin, vmax)
if norm is not None:
polyc.set_norm(norm)
else:
if shade:
colset = self._shade_colors(color, get_normals(polys))
else:
colset = color
polyc.set_facecolors(colset)
self.add_collection(polyc)
self.auto_scale_xyz(X, Y, Z, had_data)
return polyc
def _generate_normals(self, polygons):
'''
Generate normals for polygons by using the first three points.
This normal of course might not make sense for polygons with
more than three points not lying in a plane.
'''
normals = []
for verts in polygons:
v1 = np.array(verts[0]) - np.array(verts[1])
v2 = np.array(verts[2]) - np.array(verts[0])
normals.append(np.cross(v1, v2))
return normals
def _shade_colors(self, color, normals):
'''
Shade *color* using normal vectors given by *normals*.
*color* can also be an array of the same length as *normals*.
'''
shade = np.array([np.dot(n / proj3d.mod(n), [-1, -1, 0.5])
if proj3d.mod(n) else np.nan
for n in normals])
mask = ~np.isnan(shade)
if len(shade[mask]) > 0:
norm = Normalize(min(shade[mask]), max(shade[mask]))
shade[~mask] = min(shade[mask])
color = mcolors.to_rgba_array(color)
# shape of color should be (M, 4) (where M is number of faces)
# shape of shade should be (M,)
# colors should have final shape of (M, 4)
alpha = color[:, 3]
colors = (0.5 + norm(shade)[:, np.newaxis] * 0.5) * color
colors[:, 3] = alpha
else:
colors = np.asanyarray(color).copy()
return colors
def _shade_colors_lightsource(self, data, cmap, lightsource):
if lightsource is None:
lightsource = LightSource(azdeg=135, altdeg=55)
return lightsource.shade(data, cmap)
def plot_wireframe(self, X, Y, Z, *args, **kwargs):
"""
Plot a 3D wireframe.
.. note::
The *rcount* and *ccount* kwargs, which both default to 50,
determine the maximum number of samples used in each direction. If
the input data is larger, it will be downsampled (by slicing) to
these numbers of points.
Parameters
----------
X, Y, Z : 2d arrays
Data values.
rcount, ccount : int
Maximum number of samples used in each direction. If the input
data is larger, it will be downsampled (by slicing) to these
numbers of points. Setting a count to zero causes the data to be
not sampled in the corresponding direction, producing a 3D line
plot rather than a wireframe plot. Defaults to 50.
.. versionadded:: 2.0
rstride, cstride : int
Downsampling stride in each direction. These arguments are
mutually exclusive with *rcount* and *ccount*. If only one of
*rstride* or *cstride* is set, the other defaults to 1. Setting a
stride to zero causes the data to be not sampled in the
corresponding direction, producing a 3D line plot rather than a
wireframe plot.
'classic' mode uses a default of ``rstride = cstride = 1`` instead
of the new default of ``rcount = ccount = 50``.
**kwargs :
Other arguments are forwarded to `.Line3DCollection`.
"""
had_data = self.has_data()
if Z.ndim != 2:
raise ValueError("Argument Z must be 2-dimensional.")
# FIXME: Support masked arrays
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
has_stride = 'rstride' in kwargs or 'cstride' in kwargs
has_count = 'rcount' in kwargs or 'ccount' in kwargs
if has_stride and has_count:
raise ValueError("Cannot specify both stride and count arguments")
rstride = kwargs.pop('rstride', 1)
cstride = kwargs.pop('cstride', 1)
rcount = kwargs.pop('rcount', 50)
ccount = kwargs.pop('ccount', 50)
if rcParams['_internal.classic_mode']:
# Strides have priority over counts in classic mode.
# So, only compute strides from counts
# if counts were explicitly given
if has_count:
rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0
cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0
else:
# If the strides are provided then it has priority.
# Otherwise, compute the strides from the counts.
if not has_stride:
rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0
cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0
# We want two sets of lines, one running along the "rows" of
# Z and another set of lines running along the "columns" of Z.
# This transpose will make it easy to obtain the columns.
tX, tY, tZ = np.transpose(X), np.transpose(Y), np.transpose(Z)
if rstride:
rii = list(range(0, rows, rstride))
# Add the last index only if needed
if rows > 0 and rii[-1] != (rows - 1):
rii += [rows-1]
else:
rii = []
if cstride:
cii = list(range(0, cols, cstride))
# Add the last index only if needed
if cols > 0 and cii[-1] != (cols - 1):
cii += [cols-1]
else:
cii = []
if rstride == 0 and cstride == 0:
raise ValueError("Either rstride or cstride must be non zero")
# If the inputs were empty, then just
# reset everything.
if Z.size == 0:
rii = []
cii = []
xlines = [X[i] for i in rii]
ylines = [Y[i] for i in rii]
zlines = [Z[i] for i in rii]
txlines = [tX[i] for i in cii]
tylines = [tY[i] for i in cii]
tzlines = [tZ[i] for i in cii]
lines = ([list(zip(xl, yl, zl))
for xl, yl, zl in zip(xlines, ylines, zlines)]
+ [list(zip(xl, yl, zl))
for xl, yl, zl in zip(txlines, tylines, tzlines)])
linec = art3d.Line3DCollection(lines, *args, **kwargs)
self.add_collection(linec)
self.auto_scale_xyz(X, Y, Z, had_data)
return linec
def plot_trisurf(self, *args, color=None, norm=None, vmin=None, vmax=None,
lightsource=None, **kwargs):
"""
============= ================================================
Argument Description
============= ================================================
*X*, *Y*, *Z* Data values as 1D arrays
*color* Color of the surface patches
*cmap* A colormap for the surface patches.
*norm* An instance of Normalize to map values to colors
*vmin* Minimum value to map
*vmax* Maximum value to map
*shade* Whether to shade the facecolors
============= ================================================
The (optional) triangulation can be specified in one of two ways;
either::
plot_trisurf(triangulation, ...)
where triangulation is a :class:`~matplotlib.tri.Triangulation`
object, or::
plot_trisurf(X, Y, ...)
plot_trisurf(X, Y, triangles, ...)
plot_trisurf(X, Y, triangles=triangles, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of
these possibilities.
The remaining arguments are::
plot_trisurf(..., Z)
where *Z* is the array of values to contour, one per point
in the triangulation.
Other arguments are passed on to
:class:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
**Examples:**
.. plot:: gallery/mplot3d/trisurf3d.py
.. plot:: gallery/mplot3d/trisurf3d_2.py
.. versionadded:: 1.2.0
This plotting function was added for the v1.2.0 release.
"""
had_data = self.has_data()
# TODO: Support custom face colours
if color is None:
color = self._get_lines.get_next_color()
color = np.array(mcolors.to_rgba(color))
cmap = kwargs.get('cmap', None)
shade = kwargs.pop('shade', cmap is None)
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
if 'Z' in kwargs:
z = np.asarray(kwargs.pop('Z'))
else:
z = np.asarray(args[0])
# We do this so Z doesn't get passed as an arg to PolyCollection
args = args[1:]
triangles = tri.get_masked_triangles()
xt = tri.x[triangles]
yt = tri.y[triangles]
zt = z[triangles]
verts = np.stack((xt, yt, zt), axis=-1)
polyc = art3d.Poly3DCollection(verts, *args, **kwargs)
if cmap:
# average over the three points of each triangle
avg_z = verts[:, :, 2].mean(axis=1)
polyc.set_array(avg_z)
if vmin is not None or vmax is not None:
polyc.set_clim(vmin, vmax)
if norm is not None:
polyc.set_norm(norm)
else:
if shade:
v1 = verts[:, 0, :] - verts[:, 1, :]
v2 = verts[:, 1, :] - verts[:, 2, :]
normals = np.cross(v1, v2)
colset = self._shade_colors(color, normals)
else:
colset = color
polyc.set_facecolors(colset)
self.add_collection(polyc)
self.auto_scale_xyz(tri.x, tri.y, z, had_data)
return polyc
def _3d_extend_contour(self, cset, stride=5):
'''
Extend a contour in 3D by creating
'''
levels = cset.levels
colls = cset.collections
dz = (levels[1] - levels[0]) / 2
for z, linec in zip(levels, colls):
paths = linec.get_paths()
if not paths:
continue
topverts = art3d.paths_to_3d_segments(paths, z - dz)
botverts = art3d.paths_to_3d_segments(paths, z + dz)
color = linec.get_color()[0]
polyverts = []
normals = []
nsteps = np.round(len(topverts[0]) / stride)
if nsteps <= 1:
if len(topverts[0]) > 1:
nsteps = 2
else:
continue
stepsize = (len(topverts[0]) - 1) / (nsteps - 1)
for i in range(int(np.round(nsteps)) - 1):
i1 = int(np.round(i * stepsize))
i2 = int(np.round((i + 1) * stepsize))
polyverts.append([topverts[0][i1],
topverts[0][i2],
botverts[0][i2],
botverts[0][i1]])
v1 = np.array(topverts[0][i1]) - np.array(topverts[0][i2])
v2 = np.array(topverts[0][i1]) - np.array(botverts[0][i1])
normals.append(np.cross(v1, v2))
colors = self._shade_colors(color, normals)
colors2 = self._shade_colors(color, normals)
polycol = art3d.Poly3DCollection(polyverts,
facecolors=colors,
edgecolors=colors2)
polycol.set_sort_zpos(z)
self.add_collection3d(polycol)
for col in colls:
self.collections.remove(col)
def add_contour_set(self, cset, extend3d=False, stride=5, zdir='z', offset=None):
zdir = '-' + zdir
if extend3d:
self._3d_extend_contour(cset, stride)
else:
for z, linec in zip(cset.levels, cset.collections):
if offset is not None:
z = offset
art3d.line_collection_2d_to_3d(linec, z, zdir=zdir)
def add_contourf_set(self, cset, zdir='z', offset=None):
zdir = '-' + zdir
for z, linec in zip(cset.levels, cset.collections):
if offset is not None :
z = offset
art3d.poly_collection_2d_to_3d(linec, z, zdir=zdir)
linec.set_sort_zpos(z)
def contour(self, X, Y, Z, *args,
extend3d=False, stride=5, zdir='z', offset=None, **kwargs):
'''
Create a 3D contour plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*extend3d* Whether to extend contour in 3D (default: False)
*stride* Stride (step size) for extending contour
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
The positional and other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.contour`
Returns a :class:`~matplotlib.axes.Axes.contour`
'''
had_data = self.has_data()
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
cset = super().contour(jX, jY, jZ, *args, **kwargs)
self.add_contour_set(cset, extend3d, stride, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
contour3D = contour
def tricontour(self, *args,
extend3d=False, stride=5, zdir='z', offset=None, **kwargs):
"""
Create a 3D contour plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*extend3d* Whether to extend contour in 3D (default: False)
*stride* Stride (step size) for extending contour
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
Other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.tricontour`
Returns a :class:`~matplotlib.axes.Axes.contour`
.. versionchanged:: 1.3.0
Added support for custom triangulations
EXPERIMENTAL: This method currently produces incorrect output due to a
longstanding bug in 3D PolyCollection rendering.
"""
had_data = self.has_data()
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(
*args, **kwargs)
X = tri.x
Y = tri.y
if 'Z' in kwargs:
Z = kwargs.pop('Z')
else:
Z = args[0]
# We do this so Z doesn't get passed as an arg to Axes.tricontour
args = args[1:]
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
tri = Triangulation(jX, jY, tri.triangles, tri.mask)
cset = super().tricontour(tri, jZ, *args, **kwargs)
self.add_contour_set(cset, extend3d, stride, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
def contourf(self, X, Y, Z, *args, zdir='z', offset=None, **kwargs):
'''
Create a 3D contourf plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the filled contour
on this position in plane normal to zdir
========== ================================================
The positional and keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.contourf`
Returns a :class:`~matplotlib.axes.Axes.contourf`
.. versionchanged :: 1.1.0
The *zdir* and *offset* kwargs were added.
'''
had_data = self.has_data()
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
cset = super().contourf(jX, jY, jZ, *args, **kwargs)
self.add_contourf_set(cset, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
contourf3D = contourf
def tricontourf(self, *args, zdir='z', offset=None, **kwargs):
"""
Create a 3D contourf plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
Other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.tricontour`
Returns a :class:`~matplotlib.axes.Axes.contour`
.. versionchanged :: 1.3.0
Added support for custom triangulations
EXPERIMENTAL: This method currently produces incorrect output due to a
longstanding bug in 3D PolyCollection rendering.
"""
had_data = self.has_data()
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(
*args, **kwargs)
X = tri.x
Y = tri.y
if 'Z' in kwargs:
Z = kwargs.pop('Z')
else:
Z = args[0]
# We do this so Z doesn't get passed as an arg to Axes.tricontourf
args = args[1:]
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
tri = Triangulation(jX, jY, tri.triangles, tri.mask)
cset = super().tricontourf(tri, jZ, *args, **kwargs)
self.add_contourf_set(cset, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
def add_collection3d(self, col, zs=0, zdir='z'):
'''
Add a 3D collection object to the plot.
2D collection types are converted to a 3D version by
modifying the object and adding z coordinate information.
Supported are:
- PolyCollection
- LineCollection
- PatchCollection
'''
zvals = np.atleast_1d(zs)
if len(zvals) > 0 :
zsortval = min(zvals)
else :
zsortval = 0 # FIXME: Fairly arbitrary. Is there a better value?
# FIXME: use issubclass() (although, then a 3D collection
# object would also pass.) Maybe have a collection3d
# abstract class to test for and exclude?
if type(col) is mcoll.PolyCollection:
art3d.poly_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
elif type(col) is mcoll.LineCollection:
art3d.line_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
elif type(col) is mcoll.PatchCollection:
art3d.patch_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
super().add_collection(col)
def scatter(self, xs, ys, zs=0, zdir='z', s=20, c=None, depthshade=True,
*args, **kwargs):
'''
Create a scatter plot.
============ ========================================================
Argument Description
============ ========================================================
*xs*, *ys* Positions of data points.
*zs* Either an array of the same length as *xs* and
*ys* or a single value to place all points in
the same plane. Default is 0.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
*s* Size in points^2. It is a scalar or an array of the
same length as *x* and *y*.
*c* A color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however, including the
case of a single row to specify the same color for
all points.
*depthshade*
Whether or not to shade the scatter markers to give
the appearance of depth. Default is *True*.
============ ========================================================
Keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.scatter`.
Returns a :class:`~mpl_toolkits.mplot3d.art3d.Patch3DCollection`
'''
had_data = self.has_data()
xs, ys, zs = np.broadcast_arrays(
*[np.ravel(np.ma.filled(t, np.nan)) for t in [xs, ys, zs]])
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
xs, ys, zs, s, c = cbook.delete_masked_points(xs, ys, zs, s, c)
patches = super().scatter(xs, ys, s=s, c=c, *args, **kwargs)
is_2d = not np.iterable(zs)
zs = np.broadcast_to(zs, len(xs))
art3d.patch_collection_2d_to_3d(patches, zs=zs, zdir=zdir,
depthshade=depthshade)
if self._zmargin < 0.05 and xs.size > 0:
self.set_zmargin(0.05)
#FIXME: why is this necessary?
if not is_2d:
self.auto_scale_xyz(xs, ys, zs, had_data)
return patches
scatter3D = scatter
def bar(self, left, height, zs=0, zdir='z', *args, **kwargs):
'''
Add 2D bar(s).
========== ================================================
Argument Description
========== ================================================
*left* The x coordinates of the left sides of the bars.
*height* The height of the bars.
*zs* Z coordinate of bars, if one value is specified
they will all be placed at the same z.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
========== ================================================
Keyword arguments are passed onto :func:`~matplotlib.axes.Axes.bar`.
Returns a :class:`~mpl_toolkits.mplot3d.art3d.Patch3DCollection`
'''
had_data = self.has_data()
patches = super().bar(left, height, *args, **kwargs)
zs = np.broadcast_to(zs, len(left))
verts = []
verts_zs = []
for p, z in zip(patches, zs):
vs = art3d.get_patch_verts(p)
verts += vs.tolist()
verts_zs += [z] * len(vs)
art3d.patch_2d_to_3d(p, z, zdir)
if 'alpha' in kwargs:
p.set_alpha(kwargs['alpha'])
if len(verts) > 0 :
# the following has to be skipped if verts is empty
# NOTE: Bugs could still occur if len(verts) > 0,
# but the "2nd dimension" is empty.
xs, ys = list(zip(*verts))
else :
xs, ys = [], []
xs, ys, verts_zs = art3d.juggle_axes(xs, ys, verts_zs, zdir)
self.auto_scale_xyz(xs, ys, verts_zs, had_data)
return patches
def bar3d(self, x, y, z, dx, dy, dz, color=None,
zsort='average', shade=True, *args, **kwargs):
"""Generate a 3D barplot.
This method creates three dimensional barplot where the width,
depth, height, and color of the bars can all be uniquely set.
Parameters
----------
x, y, z : array-like
The coordinates of the anchor point of the bars.
dx, dy, dz : scalar or array-like
The width, depth, and height of the bars, respectively.
color : sequence of valid color specifications, optional
The color of the bars can be specified globally or
individually. This parameter can be:
- A single color value, to color all bars the same color.
- An array of colors of length N bars, to color each bar
independently.
- An array of colors of length 6, to color the faces of the
bars similarly.
- An array of colors of length 6 * N bars, to color each face
independently.
When coloring the faces of the boxes specifically, this is
the order of the coloring:
1. -Z (bottom of box)
2. +Z (top of box)
3. -Y
4. +Y
5. -X
6. +X
zsort : str, optional
The z-axis sorting scheme passed onto
:func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
shade : bool, optional (default = True)
When true, this shades the dark sides of the bars (relative
to the plot's source of light).
Any additional keyword arguments are passed onto
:func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
Returns
-------
collection : Poly3DCollection
A collection of three dimensional polygons representing
the bars.
"""
had_data = self.has_data()
x, y, z, dx, dy, dz = np.broadcast_arrays(
np.atleast_1d(x), y, z, dx, dy, dz)
minx = np.min(x)
maxx = np.max(x + dx)
miny = np.min(y)
maxy = np.max(y + dy)
minz = np.min(z)
maxz = np.max(z + dz)
polys = []
for xi, yi, zi, dxi, dyi, dzi in zip(x, y, z, dx, dy, dz):
polys.extend([
((xi, yi, zi), (xi + dxi, yi, zi),
(xi + dxi, yi + dyi, zi), (xi, yi + dyi, zi)),
((xi, yi, zi + dzi), (xi + dxi, yi, zi + dzi),
(xi + dxi, yi + dyi, zi + dzi), (xi, yi + dyi, zi + dzi)),
((xi, yi, zi), (xi + dxi, yi, zi),
(xi + dxi, yi, zi + dzi), (xi, yi, zi + dzi)),
((xi, yi + dyi, zi), (xi + dxi, yi + dyi, zi),
(xi + dxi, yi + dyi, zi + dzi), (xi, yi + dyi, zi + dzi)),
((xi, yi, zi), (xi, yi + dyi, zi),
(xi, yi + dyi, zi + dzi), (xi, yi, zi + dzi)),
((xi + dxi, yi, zi), (xi + dxi, yi + dyi, zi),
(xi + dxi, yi + dyi, zi + dzi), (xi + dxi, yi, zi + dzi)),
])
facecolors = []
if color is None:
color = [self._get_patches_for_fill.get_next_color()]
if len(color) == len(x):
# bar colors specified, need to expand to number of faces
for c in color:
facecolors.extend([c] * 6)
else:
# a single color specified, or face colors specified explicitly
facecolors = list(mcolors.to_rgba_array(color))
if len(facecolors) < len(x):
facecolors *= (6 * len(x))
if shade:
normals = self._generate_normals(polys)
sfacecolors = self._shade_colors(facecolors, normals)
else:
sfacecolors = facecolors
col = art3d.Poly3DCollection(polys,
zsort=zsort,
facecolor=sfacecolors,
*args, **kwargs)
self.add_collection(col)
self.auto_scale_xyz((minx, maxx), (miny, maxy), (minz, maxz), had_data)
return col
def set_title(self, label, fontdict=None, loc='center', **kwargs):
ret = super().set_title(label, fontdict=fontdict, loc=loc, **kwargs)
(x, y) = self.title.get_position()
self.title.set_y(0.92 * y)
return ret
set_title.__doc__ = maxes.Axes.set_title.__doc__
def quiver(self, *args,
length=1, arrow_length_ratio=.3, pivot='tail', normalize=False,
**kwargs):
"""
Plot a 3D field of arrows.
call signatures::
quiver(X, Y, Z, U, V, W, **kwargs)
Arguments:
*X*, *Y*, *Z*:
The x, y and z coordinates of the arrow locations (default is
tail of arrow; see *pivot* kwarg)
*U*, *V*, *W*:
The x, y and z components of the arrow vectors
The arguments could be array-like or scalars, so long as they
they can be broadcast together. The arguments can also be
masked arrays. If an element in any of argument is masked, then
that corresponding quiver element will not be plotted.
Keyword arguments:
*length*: [1.0 | float]
The length of each quiver, default to 1.0, the unit is
the same with the axes
*arrow_length_ratio*: [0.3 | float]
The ratio of the arrow head with respect to the quiver,
default to 0.3
*pivot*: [ 'tail' | 'middle' | 'tip' ]
The part of the arrow that is at the grid point; the arrow
rotates about this point, hence the name *pivot*.
Default is 'tail'
*normalize*: bool
When True, all of the arrows will be the same length. This
defaults to False, where the arrows will be different lengths
depending on the values of u,v,w.
Any additional keyword arguments are delegated to
:class:`~matplotlib.collections.LineCollection`
"""
def calc_arrow(uvw, angle=15):
"""
To calculate the arrow head. uvw should be a unit vector.
We normalize it here:
"""
# get unit direction vector perpendicular to (u,v,w)
norm = np.linalg.norm(uvw[:2])
if norm > 0:
x = uvw[1] / norm
y = -uvw[0] / norm
else:
x, y = 0, 1
# compute the two arrowhead direction unit vectors
ra = math.radians(angle)
c = math.cos(ra)
s = math.sin(ra)
# construct the rotation matrices
Rpos = np.array([[c+(x**2)*(1-c), x*y*(1-c), y*s],
[y*x*(1-c), c+(y**2)*(1-c), -x*s],
[-y*s, x*s, c]])
# opposite rotation negates all the sin terms
Rneg = Rpos.copy()
Rneg[[0,1,2,2],[2,2,0,1]] = -Rneg[[0,1,2,2],[2,2,0,1]]
# multiply them to get the rotated vector
return Rpos.dot(uvw), Rneg.dot(uvw)
had_data = self.has_data()
# handle args
argi = 6
if len(args) < argi:
raise ValueError('Wrong number of arguments. Expected %d got %d' %
(argi, len(args)))
# first 6 arguments are X, Y, Z, U, V, W
input_args = args[:argi]
# if any of the args are scalar, convert into list
input_args = [[k] if isinstance(k, (int, float)) else k
for k in input_args]
# extract the masks, if any
masks = [k.mask for k in input_args if isinstance(k, np.ma.MaskedArray)]
# broadcast to match the shape
bcast = np.broadcast_arrays(*(input_args + masks))
input_args = bcast[:argi]
masks = bcast[argi:]
if masks:
# combine the masks into one
mask = reduce(np.logical_or, masks)
# put mask on and compress
input_args = [np.ma.array(k, mask=mask).compressed()
for k in input_args]
else:
input_args = [k.flatten() for k in input_args]
if any(len(v) == 0 for v in input_args):
# No quivers, so just make an empty collection and return early
linec = art3d.Line3DCollection([], *args[argi:], **kwargs)
self.add_collection(linec)
return linec
# Following assertions must be true before proceeding
# must all be ndarray
assert all(isinstance(k, np.ndarray) for k in input_args)
# must all in same shape
assert len({k.shape for k in input_args}) == 1
shaft_dt = np.linspace(0, length, num=2)
arrow_dt = shaft_dt * arrow_length_ratio
if pivot == 'tail':
shaft_dt -= length
elif pivot == 'middle':
shaft_dt -= length/2.
elif pivot != 'tip':
raise ValueError('Invalid pivot argument: ' + str(pivot))
XYZ = np.column_stack(input_args[:3])
UVW = np.column_stack(input_args[3:argi]).astype(float)
# Normalize rows of UVW
norm = np.linalg.norm(UVW, axis=1)
# If any row of UVW is all zeros, don't make a quiver for it
mask = norm > 0
XYZ = XYZ[mask]
if normalize:
UVW = UVW[mask] / norm[mask].reshape((-1, 1))
else:
UVW = UVW[mask]
if len(XYZ) > 0:
# compute the shaft lines all at once with an outer product
shafts = (XYZ - np.multiply.outer(shaft_dt, UVW)).swapaxes(0, 1)
# compute head direction vectors, n heads by 2 sides by 3 dimensions
head_dirs = np.array([calc_arrow(d) for d in UVW])
# compute all head lines at once, starting from where the shaft ends
heads = shafts[:, :1] - np.multiply.outer(arrow_dt, head_dirs)
# stack left and right head lines together
heads.shape = (len(arrow_dt), -1, 3)
# transpose to get a list of lines
heads = heads.swapaxes(0, 1)
lines = [*shafts, *heads]
else:
lines = []
linec = art3d.Line3DCollection(lines, *args[argi:], **kwargs)
self.add_collection(linec)
self.auto_scale_xyz(XYZ[:, 0], XYZ[:, 1], XYZ[:, 2], had_data)
return linec
quiver3D = quiver
def voxels(self, *args, facecolors=None, edgecolors=None, **kwargs):
"""
ax.voxels([x, y, z,] /, filled, **kwargs)
Plot a set of filled voxels
All voxels are plotted as 1x1x1 cubes on the axis, with filled[0,0,0]
placed with its lower corner at the origin. Occluded faces are not
plotted.
Call signatures::
voxels(filled, facecolors=fc, edgecolors=ec, **kwargs)
voxels(x, y, z, filled, facecolors=fc, edgecolors=ec, **kwargs)
.. versionadded:: 2.1
Parameters
----------
filled : 3D np.array of bool
A 3d array of values, with truthy values indicating which voxels
to fill
x, y, z : 3D np.array, optional
The coordinates of the corners of the voxels. This should broadcast
to a shape one larger in every dimension than the shape of `filled`.
These can be used to plot non-cubic voxels.
If not specified, defaults to increasing integers along each axis,
like those returned by :func:`~numpy.indices`.
As indicated by the ``/`` in the function signature, these arguments
can only be passed positionally.
facecolors, edgecolors : array_like, optional
The color to draw the faces and edges of the voxels. Can only be
passed as keyword arguments.
This parameter can be:
- A single color value, to color all voxels the same color. This
can be either a string, or a 1D rgb/rgba array
- ``None``, the default, to use a single color for the faces, and
the style default for the edges.
- A 3D ndarray of color names, with each item the color for the
corresponding voxel. The size must match the voxels.
- A 4D ndarray of rgb/rgba data, with the components along the
last axis.
**kwargs
Additional keyword arguments to pass onto
:func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
Returns
-------
faces : dict
A dictionary indexed by coordinate, where ``faces[i,j,k]`` is a
`Poly3DCollection` of the faces drawn for the voxel
``filled[i,j,k]``. If no faces were drawn for a given voxel, either
because it was not asked to be drawn, or it is fully occluded, then
``(i,j,k) not in faces``.
Examples
--------
.. plot:: gallery/mplot3d/voxels.py
.. plot:: gallery/mplot3d/voxels_rgb.py
.. plot:: gallery/mplot3d/voxels_torus.py
.. plot:: gallery/mplot3d/voxels_numpy_logo.py
"""
# work out which signature we should be using, and use it to parse
# the arguments. Name must be voxels for the correct error message
if len(args) >= 3:
# underscores indicate position only
def voxels(__x, __y, __z, filled, **kwargs):
return (__x, __y, __z), filled, kwargs
else:
def voxels(filled, **kwargs):
return None, filled, kwargs
xyz, filled, kwargs = voxels(*args, **kwargs)
# check dimensions
if filled.ndim != 3:
raise ValueError("Argument filled must be 3-dimensional")
size = np.array(filled.shape, dtype=np.intp)
# check xyz coordinates, which are one larger than the filled shape
coord_shape = tuple(size + 1)
if xyz is None:
x, y, z = np.indices(coord_shape)
else:
x, y, z = (np.broadcast_to(c, coord_shape) for c in xyz)
def _broadcast_color_arg(color, name):
if np.ndim(color) in (0, 1):
# single color, like "red" or [1, 0, 0]
return np.broadcast_to(color, filled.shape + np.shape(color))
elif np.ndim(color) in (3, 4):
# 3D array of strings, or 4D array with last axis rgb
if np.shape(color)[:3] != filled.shape:
raise ValueError(
"When multidimensional, {} must match the shape of "
"filled".format(name))
return color
else:
raise ValueError("Invalid {} argument".format(name))
# broadcast and default on facecolors
if facecolors is None:
facecolors = self._get_patches_for_fill.get_next_color()
facecolors = _broadcast_color_arg(facecolors, 'facecolors')
# broadcast but no default on edgecolors
edgecolors = _broadcast_color_arg(edgecolors, 'edgecolors')
# always scale to the full array, even if the data is only in the center
self.auto_scale_xyz(x, y, z)
# points lying on corners of a square
square = np.array([
[0, 0, 0],
[0, 1, 0],
[1, 1, 0],
[1, 0, 0]
], dtype=np.intp)
voxel_faces = defaultdict(list)
def permutation_matrices(n):
""" Generator of cyclic permutation matices """
mat = np.eye(n, dtype=np.intp)
for i in range(n):
yield mat
mat = np.roll(mat, 1, axis=0)
# iterate over each of the YZ, ZX, and XY orientations, finding faces to
# render
for permute in permutation_matrices(3):
# find the set of ranges to iterate over
pc, qc, rc = permute.T.dot(size)
pinds = np.arange(pc)
qinds = np.arange(qc)
rinds = np.arange(rc)
square_rot = square.dot(permute.T)
# iterate within the current plane
for p in pinds:
for q in qinds:
# iterate perpendicularly to the current plane, handling
# boundaries. We only draw faces between a voxel and an
# empty space, to avoid drawing internal faces.
# draw lower faces
p0 = permute.dot([p, q, 0])
i0 = tuple(p0)
if filled[i0]:
voxel_faces[i0].append(p0 + square_rot)
# draw middle faces
for r1, r2 in zip(rinds[:-1], rinds[1:]):
p1 = permute.dot([p, q, r1])
p2 = permute.dot([p, q, r2])
i1 = tuple(p1)
i2 = tuple(p2)
if filled[i1] and not filled[i2]:
voxel_faces[i1].append(p2 + square_rot)
elif not filled[i1] and filled[i2]:
voxel_faces[i2].append(p2 + square_rot)
# draw upper faces
pk = permute.dot([p, q, rc-1])
pk2 = permute.dot([p, q, rc])
ik = tuple(pk)
if filled[ik]:
voxel_faces[ik].append(pk2 + square_rot)
# iterate over the faces, and generate a Poly3DCollection for each voxel
polygons = {}
for coord, faces_inds in voxel_faces.items():
# convert indices into 3D positions
if xyz is None:
faces = faces_inds
else:
faces = []
for face_inds in faces_inds:
ind = face_inds[:, 0], face_inds[:, 1], face_inds[:, 2]
face = np.empty(face_inds.shape)
face[:, 0] = x[ind]
face[:, 1] = y[ind]
face[:, 2] = z[ind]
faces.append(face)
poly = art3d.Poly3DCollection(faces,
facecolors=facecolors[coord],
edgecolors=edgecolors[coord],
**kwargs
)
self.add_collection3d(poly)
polygons[coord] = poly
return polygons
def get_test_data(delta=0.05):
'''
Return a tuple X, Y, Z with a test data set.
'''
x = y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = np.exp(-(X**2 + Y**2) / 2) / (2 * np.pi)
Z2 = (np.exp(-(((X - 1) / 1.5)**2 + ((Y - 1) / 0.5)**2) / 2) /
(2 * np.pi * 0.5 * 1.5))
Z = Z2 - Z1
X = X * 10
Y = Y * 10
Z = Z * 500
return X, Y, Z
########################################################
# Register Axes3D as a 'projection' object available
# for use just like any other axes
########################################################
import matplotlib.projections as proj
proj.projection_registry.register(Axes3D)
|
[
"[email protected]"
] | |
3b431dde97db8ea84f5bdc268f350d1f65c5b6bf
|
870a749904a488193b8bc52d3dc86335ebab812d
|
/example4.py
|
f6f06cc69bce0a50d2a7038c5b4341e475ac8c50
|
[] |
no_license
|
Majd96/DebuggingPython
|
e8dcf23bc0fea9acc758a2fd85babb79c17f3185
|
da965b63f0c2e63e08353b3c4a188d7e1b5bb363
|
refs/heads/master
| 2020-03-23T14:54:57.219202 | 2018-07-20T12:12:30 | 2018-07-20T12:12:30 | 141,707,926 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 120 |
py
|
#print the first letters until reaching the half
word = "Jerusamlem"
pos=0
while(pos<len(word)/2):
print(word[pos])
|
[
"[email protected]"
] | |
f69c16a19a338a4c13d918bc24780962c2d0cd5e
|
1c7e774fbf663771e404b086a61aa14468091e40
|
/myprofile/asgi.py
|
86c9ced93554a16e75fb33dbc5fb94858288c5fe
|
[] |
no_license
|
aliasgartaksali/myprofile
|
3d37340bd460989750d6ced52c35fb245f48a44f
|
dc2a9704316f2e3fb7d6de3f0d52f4ae4651a3e7
|
refs/heads/main
| 2023-01-07T18:53:06.123253 | 2020-10-19T11:53:41 | 2020-10-19T11:53:41 | 305,365,005 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 411 |
py
|
"""
ASGI config for myprofile project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myprofile.settings')
application = get_asgi_application()
|
[
"[email protected]"
] | |
55226d6399a5558f11abc91e3f66bf5e0d190179
|
5c6a2b2ba00b28803f1172393d44ac65cd762a0d
|
/blog/urls.py
|
0ffc4b886fac8171d260afd8db284d43329e9f9e
|
[] |
no_license
|
PankajBasera/My-first-blog
|
ac8b141a1bafd3febbcf34fde9c4a6c1a72623ba
|
8b8afb055c0581250f5610c77630c548d3173d94
|
refs/heads/master
| 2023-09-02T20:43:36.797082 | 2021-10-28T15:16:46 | 2021-10-28T15:16:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 162 |
py
|
from django.urls import path
from django.urls.resolvers import URLPattern
from . import views
urlpatterns = [
path('', views.post_list, name='post_list'),
]
|
[
"[email protected]"
] | |
1025f53cec2feecc8996ae422282e42729e32047
|
52400f92d663d920cfaceeb48ab05c54b5065302
|
/npsemrel/npsemrel/ml/features/syntax/__init__.py
|
2c11642198c3dab3127537e58e50870f69e7457d
|
[] |
no_license
|
Barkar19/wsd-nlp
|
22cd2f4414b217f8660e2b36b5eb07e86b4fa72f
|
01b4412246c877eb30c82cefa3f2ec0b05612f9c
|
refs/heads/master
| 2021-09-05T06:26:40.261572 | 2018-01-24T18:17:18 | 2018-01-24T18:17:18 | 111,022,523 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 32 |
py
|
# import feature_i
# import pos
|
[
"[email protected]"
] | |
b5f7b40cdab61e773d1bec1c144966fc8c019ad5
|
b9878c92b857f73ff0452fc51c822cfc9fa4dc1c
|
/watson_machine_learning_client/libs/repo/swagger_client/models/connection_object_target_experiments.py
|
f8548c105d870dc07cfbde41d0896b443cf3f175
|
[] |
no_license
|
DavidCastilloAlvarado/WMLC_mod
|
35f5d84990c59b623bfdd27369fe7461c500e0a5
|
f2673b9c77bd93c0e017831ee4994f6d9789d9a1
|
refs/heads/master
| 2022-12-08T02:54:31.000267 | 2020-09-02T15:49:21 | 2020-09-02T15:49:21 | 292,322,284 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,806 |
py
|
# coding: utf-8
"""
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class ConnectionObjectTargetExperiments(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, type=None, connection=None, target=None):
"""
ConnectionObjectTargetExperiments - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'type': 'str',
'connection': 'dict(str, str)',
'target': 'object'
}
self.attribute_map = {
'type': 'type',
'connection': 'connection',
'target': 'target'
}
self._type = type
self._connection = connection
self._target = target
@property
def type(self):
"""
Gets the type of this ConnectionObjectTargetExperiments.
:return: The type of this ConnectionObjectTargetExperiments.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this ConnectionObjectTargetExperiments.
:param type: The type of this ConnectionObjectTargetExperiments.
:type: str
"""
self._type = type
@property
def connection(self):
"""
Gets the connection of this ConnectionObjectTargetExperiments.
:return: The connection of this ConnectionObjectTargetExperiments.
:rtype: dict(str, str)
"""
return self._connection
@connection.setter
def connection(self, connection):
"""
Sets the connection of this ConnectionObjectTargetExperiments.
:param connection: The connection of this ConnectionObjectTargetExperiments.
:type: dict(str, str)
"""
self._connection = connection
@property
def target(self):
"""
Gets the target of this ConnectionObjectTargetExperiments.
:return: The target of this ConnectionObjectTargetExperiments.
:rtype: object
"""
return self._target
@target.setter
def target(self, target):
"""
Sets the target of this ConnectionObjectTargetExperiments.
:param target: The target of this ConnectionObjectTargetExperiments.
:type: object
"""
self._target = target
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] | |
b67e95b7e22f46ae001f3e019af40c357b893231
|
c3cf5e0dc112b9118eee75264d069c0267a8d810
|
/utils/no_segment_processer_add_validation.py
|
6ca87b9268e86daa93d65dd879a7cfc5af3d5c22
|
[] |
no_license
|
chengtbf/ImageCaption
|
e8f724f594ef7122940a6dcb3f40c4def67092b1
|
76c2284d3f05a6c34c16744ac2e1ddc6d0cf83d5
|
refs/heads/master
| 2021-04-12T11:27:41.926871 | 2018-07-02T08:35:30 | 2018-07-02T08:35:30 | 126,583,051 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,381 |
py
|
def copy_value(list):
ret = []
for e in list:
ret.append(e)
return ret
in_file = 'D:/train.txt'
in2_file = 'D:/valid.txt'
out_dic_file = 'D:/valid_dictionary.txt'
out_vec_file = 'D:/valid_train_vector.txt'
out_vec2_file = 'D:/valid_valid_vector.txt'
sentences = []
valid_sentences = []
content = []
for line in open(in_file):
line = line.split('\n')[0]
if line.isdigit():
if len(content) > 0:
sentences.append(copy_value(content))
content.clear()
else:
content.append(line)
sentences.append(copy_value(content))
content.clear()
for line in open(in2_file):
line = line.split('\n')[0]
if line.isdigit():
if len(content) > 0:
valid_sentences.append(copy_value(content))
content.clear()
else:
content.append(line)
valid_sentences.append(copy_value(content))
dic = {}
dic["<S>"] = 1
dic["</S>"] = 2
dic_size = 3
for st in sentences:
for sent in st:
for i in range(len(sent)):
if sent[i] in dic:
continue
else:
dic[sent[i]] = dic_size
dic_size += 1
for st in valid_sentences:
for sent in st:
for i in range(len(sent)):
if sent[i] in dic:
continue
else:
dic[sent[i]] = dic_size
dic_size += 1
dic_size -= 1
#print(dic['大'])
#output map
with open(out_dic_file, 'w') as f:
id2word = {}
for k, v in dic.items():
id2word[v] = k
for i in range(dic_size):
f.write(id2word[i+1] + " " + repr(i+1) + '\n')
#output vector
with open(out_vec_file, 'w') as f:
for index in range(len(sentences)):
f.write(repr(index+1) + "\n")
for sent in sentences[index]:
f.write("1 ")
for i in range(len(sent)):
#print(sent[i])
#print(dic[sent[i]])
f.write(repr(dic[sent[i]]) + " ")
f.write("2\n")
with open(out_vec2_file, 'w') as f:
for index in range(len(valid_sentences)):
f.write(repr(index+8001) + "\n")
for sent in valid_sentences[index]:
f.write("1 ")
for i in range(len(sent)):
#print(sent[i])
#print(dic[sent[i]])
f.write(repr(dic[sent[i]]) + " ")
f.write("2\n")
|
[
"[email protected]"
] | |
16632e1cfd929360e81b6b66540741a40107d618
|
113d9082d153adbccd637da76318b984f249baf5
|
/setup.py
|
b2cce85ef433c74f9b005df1a6e7c62d9261ca91
|
[
"BSD-3-Clause"
] |
permissive
|
jorcast18462/django-applepodcast
|
bebb6f85d4c3ed98c96e6628443ece613898ca32
|
50732acfbe1ca258e5afb44c117a6ac5fa0c1219
|
refs/heads/master
| 2023-03-21T13:05:08.576831 | 2018-10-06T22:19:12 | 2018-10-06T22:19:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,675 |
py
|
from __future__ import unicode_literals
import os
from setuptools import find_packages, setup
setup(
name='django-applepodcast',
version='0.3.7',
description='A Django podcast app optimized for Apple Podcasts',
long_description=open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'README.rst')).read(),
author='Richard Cornish',
author_email='[email protected]',
url='https://github.com/richardcornish/django-applepodcast',
license='BSD',
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=('tests',)),
install_requires=[
'bleach',
'mutagen',
'pillow',
],
test_suite='podcast.tests',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
[
"[email protected]"
] | |
369004a9e9a87da680156595de04557b95c67f10
|
5e966da612d69f8428d1d2216d75e0e63c3f6b02
|
/predict_test.py
|
ffff667db80f97fd3f91e1d594804806ddf02d7c
|
[] |
no_license
|
ieiriyuki/titanic-survival
|
8f061afcf9d16308970cabc3fa3e605573cb8b6b
|
fd33b976cbf0184b1e2e3f9ebbe5545e6d5d218d
|
refs/heads/master
| 2020-03-21T01:46:45.159306 | 2018-06-22T07:10:22 | 2018-06-22T07:10:22 | 137,961,998 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,108 |
py
|
#!/usr/bin/python
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from time import time
import pandas as pd
train0 = pd.read_csv("./data/train.csv", sep=",", header=0)
test0 = pd.read_csv("./data/test.csv", sep=",", header=0)
train1 = train0[['Survived','Pclass','Sex', 'Parch']]
train1 = train1.mask(train1 == 'female', 1)
train1 = train1.mask(train1 == 'male', 0)
test1 = test0[['Pclass','Sex', 'Parch']]
test1 = test1.mask(test1 == 'female', 1)
test1 = test1.mask(test1 == 'male', 0)
mdl = LogisticRegression(C=0.2)
x_train, x_test, y_train, y_test = train_test_split(
train1[['Pclass','Sex','Parch']], train1[['Survived']], test_size=0.2
)
mdl.fit(x_train, y_train)
print("score for train is: {0}".format(mdl.score(x_train, y_train)))
print("score for test is: {0}".format(mdl.score(x_test, y_test)))
pred = mdl.predict(test1[['Pclass','Sex', 'Parch']])
submit = pd.DataFrame({'PassengerId': test0['PassengerId'],
'Survived': pred})
submit.to_csv("./data/pc_sx_pr_submission.csv", index=False)
#end of file
|
[
"[email protected]"
] | |
68740806ca9fdcb8c924b5a4b88a4c98f0efd8d7
|
3b831eedb7afede666088b6e018c829219938a93
|
/Grouping_Values.py
|
d73419177b17ac18330e2f7223561e75e54c044e
|
[] |
no_license
|
joydas65/GeeksforGeeks
|
f03ed1aaea88d894f4d8ac0d70f574c4cd78a64b
|
e58c42cb3c9fe3a87e6683d8e3fda442dc83b45b
|
refs/heads/master
| 2023-01-12T02:19:54.967779 | 2023-01-10T17:28:41 | 2023-01-10T17:28:41 | 161,937,667 | 9 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 432 |
py
|
class Solution:
def isPossible(self, N, arr, K):
# code here
d = dict()
for i in arr:
if i in d:
d[i] += 1
else:
d[i] = 1
for i in d:
if d[i] > K*2:
return 0
return 1
|
[
"[email protected]"
] | |
695df45d3c45d2a3dddc563aa799797402a2e546
|
91ac436e321e37edc4c35a92ad8c190da31a2dca
|
/pickle/pickle_recursive.py
|
986e15fbd7d3038c2931c9c281b0ad65b759b444
|
[] |
no_license
|
jagan/ze-learnpythehardway
|
64d1fce42c7bce851377d1a0b96f42c295805335
|
2304ff09904a14ee5fe87e204f2c3732ac934d50
|
refs/heads/master
| 2021-01-13T01:36:35.254041 | 2013-10-25T16:24:11 | 2013-10-25T16:24:11 | 13,439,561 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 761 |
py
|
import cPickle as pik
def dump_an_obj(obj, file):
with open(file, "wb") as f:
pik.dump(obj, f, 2) # Pickle protocol version 2
def read_the_obj(file):
with open(file, "rb") as f:
obj = pik.load(f)
return obj
if __name__ == "__main__":
l = [1, 2, 3]
l.append(l)
print l
print l[3]
fname = 'recursive.pyobj'
dump_an_obj(l, fname)
m = read_the_obj(fname)
print m
print m[3]
####
l1 = [1, 2, 3]
print l1
l2 = l1
l2.append(4)
print l1, l2
fname = 'tuple.pyobj'
dump_an_obj((l1, l2), fname)
m1, m2 = read_the_obj(fname)
print m1, m2
m2.append(5)
print 'm1, m2:', m1, m2
assert m1 is m2
print 'l1, l2:', l1, l2
assert not l1 is m1
|
[
"[email protected]"
] | |
09c1b393b0f9f6175dd19bafbe061ba24c38973d
|
2119abe2e865dabaac5f8df5bd5688df5c817ba8
|
/g13gui/g13/displaydevice_tests.py
|
afa1eba93a4340255898e6240ca604d6e52d7cbc
|
[
"MIT"
] |
permissive
|
ddnut/g13gui
|
a4daba079367ff24f910a6614671b2cf8c7a9867
|
aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc
|
refs/heads/master
| 2023-06-17T23:40:09.841290 | 2021-07-13T19:05:16 | 2021-07-13T19:05:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,271 |
py
|
import unittest
import time
from dbus.mainloop.glib import DBusGMainLoop
from g13gui.model.prefs import Preferences
from g13gui.bitwidgets.display import Display
from g13gui.bitwidgets.screen import Screen
from g13gui.bitwidgets.label import Label
from g13gui.bitwidgets.rectangle import Rectangle
from g13gui.g13.displaydevice import G13DisplayDevice
from g13gui.g13.manager import DeviceManager
from g13gui.bitwidgets import DISPLAY_WIDTH
from g13gui.bitwidgets import DISPLAY_HEIGHT
class DisplayDeviceTests(unittest.TestCase):
def setUp(self):
self.prefs = Preferences()
self.manager = DeviceManager(self.prefs)
self.manager.start()
time.sleep(1)
self.dd = G13DisplayDevice(self.manager)
self.d = Display(self.dd)
self.s = Screen(self.d)
def tearDown(self):
time.sleep(1)
self.manager.shutdown()
def testDisplay(self):
rect = Rectangle(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT)
rect.show()
self.s.addChild(rect)
label = Label(0, 0, 'Hello, world!')
label.show()
self.s.addChild(label)
self.s.buttonBar.hide()
self.s.nextFrame()
if __name__ == '__main__':
DBusGMainLoop(set_as_default=True)
unittest.main()
|
[
"[email protected]"
] | |
68da6883582383a3328f9aa8107fe126c7fa7346
|
554bac52953d73db04c686ebbef3eb9c1600c650
|
/novice/01-01/latihan/latihan5.py
|
614bbc7f90eacc63fd4649a1458badde6a56c640
|
[] |
no_license
|
pradana1/praxis-academy
|
aad37c1326e638d666415c4e515262bd21a52e48
|
fbfef9659fa192b873aac551b8770ae6c774d717
|
refs/heads/master
| 2021-01-06T16:13:29.183400 | 2020-04-04T13:15:05 | 2020-04-04T13:15:05 | 241,390,845 | 0 | 1 | null | 2020-07-21T14:45:05 | 2020-02-18T15:04:04 |
Python
|
UTF-8
|
Python
| false | false | 114 |
py
|
list(range(3, 6))
def concat(*args, sep="/"):
return sep.join(args)
concat("earth", "mars", "venus", sep=".")
|
[
"[email protected]"
] | |
a2169a0f6f86b2f788d0140f3c55b55953c3e356
|
39446bc5f68d38f972d29de009178c9a910b6302
|
/TextAnalysis/util/miscellaneous.py
|
8811d16339072e888edffb4b9560cb381c3ec4c7
|
[] |
no_license
|
pyceileen/master_thesis
|
a94030f1b4a96e869f814d4bfe909dbf7699dfcf
|
2aa6b4059b2750410d1d12e583106dc43e5d423b
|
refs/heads/master
| 2022-12-29T19:21:33.698948 | 2020-10-01T12:39:00 | 2020-10-01T12:39:00 | 250,233,930 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,344 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 12 11:22:57 2020
@author: Pei-yuChen
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.utils.class_weight import compute_class_weight, compute_sample_weight
from sklearn.metrics import classification_report, confusion_matrix
from scipy import stats
def get_class_weight(y_array):
weights_list = compute_class_weight('balanced',
np.unique(y_array), y_array)
weights_list = dict(enumerate(weights_list))
return(weights_list)
def get_sample_weight(y_array):
count = np.unique(y_array, return_counts=True)
weight = len(y_array) / (len(count[0]) * count[1])
class_dict = dict(zip(np.unique(y_array),weight))
sample_weight = list(map(class_dict.get, y_array))
return(np.array(sample_weight))
def m(x, w):
"""Weighted Mean"""
return np.sum(x * w) / np.sum(w)
def cov(x, y, w):
"""Weighted Covariance"""
return np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
def weighted_corr(x, y, w):
"""Weighted Correlation"""
return cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
def y2string(raw_y):
y_string = []
for i in raw_y:
if i == 0:
x = 'negative'
elif i == 1:
x = 'neutral'
else:
x = 'positive'
y_string.append(x)
return(y_string)
def make_classfication_report(y_pred, y_true):
y_pred_str = y2string(y_pred)
y_true_str = y2string(y_true)
return(print(classification_report(y_true_str, y_pred_str)))
def draw_confusion_matrix(y_pred, y_true, title_name):
y_pred_str = y2string(y_pred)
y_true_str = y2string(y_true)
label_list = ["negative", "neutral", "positive"]
cfm = confusion_matrix(y_true_str, y_pred_str, labels=label_list)
df_cm = pd.DataFrame(cfm, index=label_list, columns=label_list)
cfm_norm = df_cm.astype('float') / df_cm.sum(axis=1)[:, np.newaxis]
labels = (100. * cfm_norm).round(2).astype(str) + '%'
plt.figure(figsize=(10,7))
sns.set(font_scale=1) # for label size
sns.despine(offset=10, trim=True);
ax = sns.heatmap(cfm_norm, annot=labels, annot_kws={"size": 12}, fmt='',
vmin=0, vmax=0.70, cmap="Purples", linewidths=.5, cbar=False) # font size
cbar = ax.figure.colorbar(ax.collections[0])
cbar.set_ticks([0, 0.30, 0.60])
cbar.set_ticklabels(["0%", "30%", "60%"])
plt.xlabel("Predicted", labelpad=10)
plt.ylabel("True", labelpad=10)
plt.title(title_name)
plt.show()
def draw_regression(y_true, y_pred, title):
plt.figure()
plt.axis('square')
plt.xlim([1, 5.3])
plt.ylim([1, 5.3])
plt.yticks(np.arange(1, 5.3, 0.5))
plt.xticks(np.arange(1, 5.3, 0.5))
plt.xlabel("True")
plt.ylabel("Predicted")
plt.scatter(y_true, y_pred, s=25, alpha=0.8)
slope, intercept, r_value, p_value, std_err = stats.linregress(y_true,y_pred)
line = slope*y_true+intercept
plt.plot(y_true, line, 'r', label='r = {:.3f}'.format(r_value), color='lime')
plt.legend(fontsize=9)
plt.title(title)
plt.savefig("output\\figures\\"+title)
|
[
"[email protected]"
] | |
23d6a04e73cb64a8b99b1049956a491e698cfc84
|
86dc81e21f5b9e784dd087666d4d980c34781536
|
/udp_bro_send.py
|
596343dd578225cf7d1f4e55544f7bb7e2be5825
|
[] |
no_license
|
sheltie03/udp_python
|
37b4e1f3377979c26e247a020efb958b3dfc28e5
|
cb0551fc4026a3baff968e81b758ea4d7d7e5fd6
|
refs/heads/master
| 2021-07-09T15:37:46.684924 | 2017-10-02T08:06:25 | 2017-10-02T08:06:25 | 105,496,943 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 492 |
py
|
# -*- coding: utf-8 -*-
import socket
import time
def main():
host = ''
port = 4000
# local_addr = '192.168.10.255'
local_addr = '255.255.255.255'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.bind((host, port))
while True:
msg = 'Hello Server'.encode('utf-8')
print(msg)
sock.sendto(msg, (local_addr, port))
return
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
74462d92064ecb85d151749d3d98de50ec838a75
|
f2e9eabc8ea32c4381525f8dfb7865aaa98af460
|
/TreeMaker/python/makeTreeFromPAT_cff.py
|
c7962a496e8076e09c87bbf7342ebdf7c2010217
|
[] |
no_license
|
kheine/RA2Classic
|
a75977dc3ae1ce5a51bc5471111c69c00137bfdb
|
0f48e482da6859dad96002ad68fb78b9a56fac57
|
refs/heads/master
| 2020-04-13T18:49:31.530643 | 2013-08-02T13:28:46 | 2013-08-02T13:28:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,407 |
py
|
# $Id: makeTreeFromPAT_cff.py,v 1.18 2013/02/13 12:44:04 mschrode Exp $
#
import FWCore.ParameterSet.Config as cms
def makeTreeFromPAT(process,
outFileName,
NJetsMin=2,
HTMin=350.,
MHTMin=0.,
globalTag="none",
isData=True,
hltPath=[],
reportEveryEvt=10,
testFileName="",
numProcessedEvt=100):
## --- Log output ------------------------------------------------------
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr = cms.untracked.PSet(
placeholder = cms.untracked.bool(True)
)
process.MessageLogger.cout = cms.untracked.PSet(
INFO = cms.untracked.PSet(reportEvery = cms.untracked.int32(reportEveryEvt))
)
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
## --- Files to process ------------------------------------------------
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(numProcessedEvt)
)
process.source = cms.Source(
"PoolSource",
fileNames = cms.untracked.vstring(testFileName)
)
## --- Output file -----------------------------------------------------
process.TFileService = cms.Service(
"TFileService",
fileName = cms.string(outFileName+".root")
)
## --- Selection sequences ---------------------------------------------
# HLT
process.load('HLTrigger.HLTfilters.hltHighLevel_cfi')
process.hltHighLevel.HLTPaths = cms.vstring(hltPath)
process.hltHighLevel.andOr = cms.bool(True)
process.hltHighLevel.throw = cms.bool(False)
process.HLTSelection = cms.Sequence(
process.hltHighLevel
)
if not isData:
print "Running over MC: removing HLT selection"
process.HLTSelection.remove(process.hltHighLevel)
elif not hltPath:
print "Empty list of HLT paths: removing HLT selection"
process.HLTSelection.remove(process.hltHighLevel)
# Filter-related selection
process.load('RA2Classic.TreeMaker.filterSelection_cff')
from RecoMET.METFilters.jetIDFailureFilter_cfi import jetIDFailure
process.PBNRFilter = jetIDFailure.clone(
JetSource = cms.InputTag('patJetsPF'),
MinJetPt = cms.double(30.0),
taggingMode = cms.bool(True)
)
process.filterSelection += process.PBNRFilter
from RecoMET.METFilters.multiEventFilter_cfi import multiEventFilter
process.HCALLaserEvtFilterList2012 = multiEventFilter.clone(
file = cms.FileInPath('RA2Classic/AdditionalInputFiles/data/HCALLaserEventList_20Nov2012-v2_HT-HTMHT.txt'),
taggingMode = cms.bool(True)
)
process.filterSelection += process.HCALLaserEvtFilterList2012
from SandBox.Skims.hoNoiseFilter_cfi import hoNoiseFilter
process.RA2HONoiseFilter = hoNoiseFilter.clone(
patJetsInputTag = cms.InputTag('patJetsPF'),
jetPtMin = cms.double(30),
jetEtaMax = cms.double(5),
maxHOEfrac = cms.double(0.4),
taggingMode = cms.bool(True)
)
process.filterSelection += process.RA2HONoiseFilter
process.load('JetMETCorrections.Configuration.DefaultJEC_cff')
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = globalTag
process.load('SandBox.Skims.RA2CaloVsPFMHTFilterSequence_cff')
process.RA2CaloVsPFMHTFilter.TaggingMode = cms.bool(True)
process.filterSelection += process.RA2CaloVsPFMHTFilterSequence
process.load('SandBox.Skims.RA2Leptons_cff')
process.LeptonVeto = cms.Sequence(
process.ra2PFMuonVeto *
process.ra2ElectronVeto
)
# Produce RA2 jets (produces the collections HTJets and MHTJets)
process.load('RA2Classic.Utils.produceRA2JetsPFCHS_cff')
process.ProduceRA2Jets = cms.Sequence(
process.produceRA2JetsPFCHS
)
# Select events with at least 'NJetsMin' of the above jets
from PhysicsTools.PatAlgos.selectionLayer1.jetCountFilter_cfi import countPatJets
process.NumJetSelection = countPatJets.clone(
src = cms.InputTag('HTJets'),
minNumber = cms.uint32(NJetsMin)
)
# HT selection
htInputCol = 'htPFchs'
from SandBox.Skims.RA2HT_cff import htPFFilter
process.HTSelection = htPFFilter.clone(
HTSource = cms.InputTag(htInputCol),
MinHT = cms.double(HTMin)
)
# MHT selection
mhtInputCol = 'mhtPFchs'
from SandBox.Skims.RA2MHT_cff import mhtPFFilter
process.MHTSelection = mhtPFFilter.clone(
MHTSource = cms.InputTag(mhtInputCol),
MinMHT = cms.double(MHTMin)
)
## --- HLT decisions --------------------------------------------------
process.load('RA2Classic.TreeMaker.hltDecisions_cff')
## --- Setup WeightProducer -------------------------------------------
from RA2Classic.WeightProducer.getWeightProducer_cff import getWeightProducer
process.WeightProducer = getWeightProducer(testFileName)
process.WeightProducer.Lumi = cms.double(19466)
process.WeightProducer.PU = cms.int32(3) # PU S10
process.WeightProducer.FileNamePUDataDistribution = cms.string("RA2Classic/WeightProducer/data/DataPileupHistogram_RA2Summer12_190456-208686_ABCD.root")
## --- Setup of TreeMaker ----------------------------------------------
FilterNames = cms.VInputTag()
FilterNames.append(cms.InputTag("HBHENoiseFilterRA2","HBHENoiseFilterResult","PAT"))
FilterNames.append(cms.InputTag("beamHaloFilter"))
FilterNames.append(cms.InputTag("trackingFailureFilter"))
FilterNames.append(cms.InputTag("inconsistentMuons"))
FilterNames.append(cms.InputTag("greedyMuons"))
FilterNames.append(cms.InputTag("ra2EcalTPFilter"))
FilterNames.append(cms.InputTag("ra2EcalBEFilter"))
FilterNames.append(cms.InputTag("hcalLaserEventFilter"))
FilterNames.append(cms.InputTag("ecalLaserCorrFilter"))
FilterNames.append(cms.InputTag("eeBadScFilter"))
FilterNames.append(cms.InputTag("PBNRFilter"))
FilterNames.append(cms.InputTag("HCALLaserEvtFilterList2012"))
FilterNames.append(cms.InputTag("manystripclus53X"))
FilterNames.append(cms.InputTag("toomanystripclus53X"))
FilterNames.append(cms.InputTag("logErrorTooManyClusters"))
FilterNames.append(cms.InputTag("RA2CaloVsPFMHTFilter"))
FilterNames.append(cms.InputTag("RA2HONoiseFilter"))
# for f in process.hltDecisions.moduleNames():
# FilterNames.append(cms.InputTag(f))
from RA2Classic.TreeMaker.treemaker_cfi import TreeMaker
process.RA2TreeMaker = TreeMaker.clone(
TreeName = cms.string("RA2PreSelection"),
VertexCollection = cms.InputTag('goodVertices'),
HT = cms.InputTag(htInputCol),
HTJets = cms.InputTag('HTJets'),
MHT = cms.InputTag(mhtInputCol),
MHTJets = cms.InputTag('MHTJets'),
VarsDouble = cms.VInputTag(cms.InputTag('WeightProducer:weight')),
VarsDoubleNamesInTree = cms.vstring('Weight'),
METs = cms.VInputTag(mhtInputCol,'mhtCalo'),
METNamesInTree = cms.vstring('PFMHT','CaloMHT'),
PatJetCollInputTag = cms.InputTag('patJetsPF'),
PatJetsMinPt = cms.double(30.),
PatJetsNameInTree = cms.string('Jets'),
Filters = FilterNames
)
## --- Final paths ----------------------------------------------------
# process.dump = cms.EDAnalyzer("EventContentAnalyzer")
process.WriteTree = cms.Path(
process.HLTSelection *
#process.hltDecisions *
process.ProduceRA2Jets *
#process.filterSelection *
process.PBNRFilter * process.HCALLaserEvtFilterList2012 * process.RA2CaloVsPFMHTFilterSequence * process.RA2HONoiseFilter *
process.LeptonVeto *
process.NumJetSelection *
process.HTSelection *
process.MHTSelection *
## process.dump
process.WeightProducer *
process.RA2TreeMaker
)
|
[
""
] | |
3f845c49314e2570d97f59eb93593ac5247fdd9f
|
3d8ece9b0291d38064abbcde1da7078b07cc0323
|
/elevator/positioner.py
|
ea75186e5c49a7d04c29636037e5ad1fbc71abf0
|
[] |
no_license
|
EMS-TU-Ilmenau/Elevator
|
b92ff5c078d5d644b3e556d95ced868ecda611c5
|
38d2cd1c9d6c5b708ea680efedd552de9894a19f
|
refs/heads/master
| 2020-07-17T18:11:17.599973 | 2020-07-02T16:32:52 | 2020-07-02T16:32:52 | 206,069,682 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,642 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import nested_scopes, generators, with_statement, unicode_literals, absolute_import, division, print_function # for compatibility
import serial # for RS232/UART/COM port
import time # for sleeping
import math # for calculations
import logging # for warnings, debugging, etc.
log = logging.getLogger(__name__)
class Positioner:
'''Class to lift the elevator platform up and down'''
def __init__(self, port='COM10', axisID=1, diameter=0.0324, tarStartPos=0.):
'''
:param port: serial port to communicate with the motor controller
:param axisID: motor controller board identifier number
:param tarStartPos: target start position in meters
:param diameter: motor axis diameter which moves the belt'''
self.id = axisID
self.tarStartPos = tarStartPos
self.diameter = diameter
self.dev = None
# connect to motor and turn on
self.connect(port)
self.turnOn()
def __del__(self):
# close connection properly when destructing the instance
self.disconnect()
def connect(self, port):
'''Connect to serial port'''
try:
self.dev = serial.Serial(port, 9600, timeout=5)
except:
raise IOError('Cannot connect to elevator')
log.info('Connection opened to elevator')
def disconnect(self):
'''Disconnect from serial port'''
if self.dev:
self.dev.close()
def send(self, cmd):
'''sends a command via the port to the motor controller(s)
:param cmd: command (without line endings) to send
:returns: when the command was a query, the response is returned'''
self.dev.write(bytes(cmd+'\n', 'ascii'))
if '?' in cmd:
resp = self.dev.readline()
return str(bytes(filter(lambda c: c > 32, resp)), 'ascii') # remove control chars
return None
def len2rot(self, l):
'''returns: rotation angle for path-length l'''
return 360.0*l/(math.pi*self.diameter)
def rot2len(self, r):
'''returns: path-length for rotation angle r'''
return r*math.pi*self.diameter/360.0
@property
def acceleration(self):
''':returns: acceleration value (no units)'''
return int(self.send('AX{}:ACC?'.format(self.id)))
@acceleration.setter
def acceleration(self, val):
self.send('AX{}:ACC {}'.format(self.id, int(val)))
@property
def deceleration(self):
''':returns: deceleration value (no units)'''
return int(self.send('AX{}:DEC?'.format(self.id)))
@deceleration.setter
def deceleration(self, val):
self.send('AX{}:DEC {}'.format(self.id, int(val)))
def turnOn(self):
'''Turns motor power on.
Note: this will reset the motors internal position state!'''
log.info('Turning motor power on')
self.send('AX{}:POW ON'.format(self.id))
time.sleep(0.05)
resp = self.send('AX{}:POW?'.format(self.id))
if 'ON' in resp:
log.debug('Motor is ready')
else:
log.error('Motor is not powered')
def turnOff(self):
'''Turns motor power off'''
log.info('Turning motor power off')
self.send('AX{}:POW OFF'.format(self.id))
def home(self, vel=0.01):
'''moves in negative position until the reference is found.'''
if int(self.send('AX{}:HOME?'.format(self.id))):
log.info('Already at home')
return
# set max rate, then search home
self.send('AX{}:LIM:MAX {:.2f}'.format(self.id, self.len2rot(vel)))
time.sleep(0.05)
oldAcc = self.acceleration
self.acceleration = 100 # to instantly drive to home position
time.sleep(0.05)
self.send('AX{}:HOME -1'.format(self.id))
log.info('Homing')
# wait until home found
while True:
time.sleep(0.1)
onHome = int(self.send('AX{}:HOME?'.format(self.id)))
log.debug('Wait for homing done. Last reply: {}'.format(onHome))
if onHome == 1:
break
self.acceleration = oldAcc
def moveToPos(self, pos, vel=0.01, block=True):
'''moves the target to a new position
:param pos: new target position in meters
:param vel: speed in m/s to move to the position'''
log.info('Moving target to {} m with {} mm/s'.format(pos, vel*1000))
# calculations
tarRot = self.len2rot(pos-self.tarStartPos)
rate = self.len2rot(vel)
# move to position
self.send('AX{}:LIM:MAX {:.2f}'.format(self.id, rate)) # set rate
time.sleep(0.05)
self.send('AX{}:POS {:.2f}'.format(self.id, tarRot)) # set position
if not block:
return
# make sure that the motor reached their positions
notThereCnt = 0
while True:
notThereCnt += 1
curRot = self.getRot()
delta = abs(tarRot-curRot)
if delta < 1.:
log.debug('Position reached')
break
log.debug('Motor still not on position ({} deg is, {} deg should)'.format(curRot, tarRot))
duration = delta/rate+0.05
log.debug('Waiting {:.2f} s for motors to reach position...'.format(duration))
time.sleep(duration)
# check for serious problem
if notThereCnt == 20:
log.warning('Re-sending position')
# re-send strings
self.send('AX{}:LIM:MAX {:.2f}'.format(self.id, rate)) # set rate
time.sleep(0.05)
self.send('AX{}:POS {:.2f}'.format(self.id, tarRot)) # set position
# something is kaputt
if notThereCnt > 100:
log.error('Position cannot not be reached ({} deg is, {} deg should)'.format(curRot, tarRot))
break
def getRot(self):
''':returns: motor axis angle in degree'''
resp = None
while not resp:
resp = self.send('AX{}:POS?'.format(self.id))
return float(resp)
def getPos(self):
''':returns: current target on platform position in m'''
rot = self.getRot()
return self.rot2len(rot)+self.tarStartPos
|
[
"[email protected]"
] | |
c261a3aa2393582101930b0d509c572623981a2b
|
29eacf3b29753d65d8ec0ab4a60ea1f7ddecbd68
|
/lightly/openapi_generated/swagger_client/models/docker_run_scheduled_priority.py
|
8f59946a24631b8670f78eced6e272cd1b4e2588
|
[
"MIT"
] |
permissive
|
lightly-ai/lightly
|
5b655fe283b7cc2ddf1d7f5bd098603fc1cce627
|
5650ee8d4057139acf8aa10c884d5d5cdc2ccb17
|
refs/heads/master
| 2023-08-17T11:08:00.135920 | 2023-08-16T12:43:02 | 2023-08-16T12:43:02 | 303,705,119 | 2,473 | 229 |
MIT
| 2023-09-14T14:47:16 | 2020-10-13T13:02:56 |
Python
|
UTF-8
|
Python
| false | false | 1,014 |
py
|
# coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import json
import pprint
import re # noqa: F401
from enum import Enum
from aenum import no_arg # type: ignore
class DockerRunScheduledPriority(str, Enum):
"""
DockerRunScheduledPriority
"""
"""
allowed enum values
"""
LOW = 'LOW'
MID = 'MID'
HIGH = 'HIGH'
CRITICAL = 'CRITICAL'
@classmethod
def from_json(cls, json_str: str) -> 'DockerRunScheduledPriority':
"""Create an instance of DockerRunScheduledPriority from a JSON string"""
return DockerRunScheduledPriority(json.loads(json_str))
|
[
"[email protected]"
] | |
752c131107a11c4cca9973aa5a08f2fc22b37083
|
25a565679443dc00be245c00cd68dde43601df50
|
/workrobot/libs/region/class_regionmatcher.py
|
4dff7eae558942da43aac12cab870177c1aa13fc
|
[] |
no_license
|
plutoese/workrobot
|
2d4e929a05be5aea1d6679769ac8c30aa42a1595
|
097571be9d61a120dd676464941cb9d0618963f6
|
refs/heads/master
| 2020-04-12T06:45:18.737553 | 2017-04-18T17:40:24 | 2017-04-18T17:40:24 | 63,757,855 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,751 |
py
|
# coding=UTF-8
"""
=========================================
区域匹配类
=========================================
:Author: glen
:Date: 2016.10.26
:Tags: region
:abstract: 对区域进行匹配
**类**
==================
RegionMatcher
区域匹配类
**使用方法**
==================
**示范代码**
==================
"""
import re
from libs.imexport.class_mongodb import MongoDB,MonDatabase,MonCollection
from libs.imexport.class_Excel import Excel
import regex
import pandas as pd
class RegionMatcher:
def __init__(self, region_query=None):
# 设置查询结果
if region_query is None:
mongo = MongoDB()
mdb = MonDatabase(mongodb=mongo, database_name='region')
collection = MonCollection(database=mdb, collection_name='admincode')
self.collection = collection.collection
else:
self.collection = region_query
self.collection = None
def match(self,regions=None,year=None):
pass
class RegionMatchingAlgorithm:
def __init__(self,to_be_matched=None):
self._to_be_matched = to_be_matched
class RegionMatchingOrderAlgorithm(RegionMatchingAlgorithm):
""" 顺序匹配算法类
:param pandas.DataFrame to_be_matched: 待匹配的区域数据框
:param pandas.DataFrame to_be_compared: 标准的区域数据框
:return: 无返回值
"""
def __init__(self,to_be_matched=None,to_be_compared=None):
RegionMatchingAlgorithm.__init__(self,to_be_matched=to_be_matched)
self._to_be_compared = to_be_compared
# 结果保存在_result变量中
self._result = None
def correct(self,correction='auto'):
if isinstance(collection,dict):
pass
else:
if re.match('^auto$',correction):
correction = self.auto_correction()
else:
correction = pd.read_excel(correction)
is_index_rid = False
if correction.index.name == 'rid':
is_index_rid = True
if 'rid' in correction.columns:
correction = correction.set_index('rid')
is_index_rid = True
if is_index_rid:
for ind in correction.index:
self._result.loc[ind,'region'] = correction.loc[ind,'matched']
else:
correction_dict = dict([(correction.loc[ind,'region'],correction.loc[ind,'matched']) for ind in correction.index])
for ind in RegionMatchingOrderAlgorithm.not_matched(self._result).index:
if self._result.loc[ind,'region'] in correction_dict:
self._result.loc[ind,'region'] = correction_dict[self._result.loc[ind,'region']]
@property
def simu_auto_corrected_region_list(self):
correction = self.auto_correction()
if correction.size > 0:
corr_result = pd.merge(self._result,self._to_be_compared[['cid','region']],how='left',on='cid')
corr_result = corr_result.rename(columns={'region_x':'region','region_y':'compared'})
corr_result['supplement'] = None
for ind in correction.index:
corr_result.loc[ind,'compared'] = correction.loc[ind,'matched']
corr_result.loc[ind,'acode'] = correction.loc[ind,'acode']
corr_result.loc[ind,'cid'] = correction.loc[ind,'cid']
corr_result.loc[ind,'supplement'] = self.output_of_region_set_mapping.loc[ind,'matching_regions']
del corr_result['_id']
return corr_result
@property
def simu_auto_corrected_region_list_short_version(self):
select_index = set()
for num in sorted(self.region_set_dict):
select_index.update([max(0,num-1),num,min(algo._result.shape[0]-1,num+1)])
result = self.simu_auto_corrected_region_list.loc[sorted(list(select_index)),]
return result
def find_anchor(self,type='merge'):
""" 寻找锚,即发现区域列表中确定匹配的区域(点)
:param str type: 定锚算法类型:merge(用pandas.DataFrame的merge定锚)
:return: 修改_result,无返回值
"""
if re.match('^merge$',type) is not None:
self._result = self._merge_matching()
@property
def region_set_mapping(self):
""" 定锚之后,生成缺失区域的参考区域选择映射
:return: 返回映射
"""
not_matched = RegionMatchingOrderAlgorithm.not_matched(self._result)
all_matched = RegionMatchingOrderAlgorithm.all_matched(self._result)
refer_regions_map = []
for i in not_matched.index:
region = not_matched.loc[i]['region']
# 锚定上下文位置
for m in range(i,-1,-1):
if m in all_matched.index:
search_start = int(all_matched.loc[m]['cid'])
break
for m in range(i,self._result.shape[0]):
if m in all_matched.index:
search_end = int(all_matched.loc[m]['cid'])
break
# 可选择的区域
refer_regions = [self._to_be_compared.loc[n] for n in range(search_start+1,search_end)]
# 构建映射:列表——每个元素为(区域名称,位置,可选择的匹配区域)
refer_regions_map.append((region,i,refer_regions))
return refer_regions_map
@property
def output_of_region_set_mapping(self):
""" 返回区域选择映射
:return: 返回区域选择映射
"""
result = []
for record in self.region_set_mapping:
result.append([record[0],record[1],','.join([item['region'] for item in record[2]])])
result = pd.DataFrame(result,columns=['region','rid','matching_regions'])
result = result.set_index('rid')
return result
def auto_correction(self,error='auto'):
""" 返回自动纠错匹配结果
:param error: 允许错误数量
:return: 返回自动纠错匹配结果
"""
correction = []
map = self.region_set_mapping
for record in map:
region = record[0]
index = record[1]
refer_regions = record[2]
for n in range(len(refer_regions)):
if self.fuzzy_region_matching(region,refer_regions[n]['region'],error):
correction.append([region,index,refer_regions[n]['region'],refer_regions[n]['acode'],refer_regions[n]['cid']])
correction = pd.DataFrame(correction,columns=['region','rid','matched','acode','cid'])
correction = correction.set_index('rid')
return correction
def exactly_matching_from_region_set(self):
""" 从备选区域集中选择区域,用精确匹配
:return: 无返回值
"""
map = self.region_set_mapping
for record in map:
region = record[0]
index = record[1]
refer_regions = record[2]
for n in range(len(refer_regions)):
if re.match(region,refer_regions[n]['region']) is not None:
self._result.loc[index,'acode'] = refer_regions[n]['acode']
self._result.loc[index,'cid'] = refer_regions[n]['cid']
self._result.loc[index,'_id'] = refer_regions[n]['_id']
break
@staticmethod
def fuzzy_region_matching(region,compared,error='auto'):
if re.match('^auto$',error) is not None:
error = max(1,int(len(region)*0.4))
return regex.fullmatch('(?:%s){e<=%s}' % (region, str(error)),compared) is not None
def _merge_matching(self):
""" 定锚,通过merge进行匹配
完成时self._result对象为pandas.DataFrame
region mid acode cid
0 北京市 0 110000 0
1 市辖区 1
2 东城区 2 110101 2
3 西城区 3 110102 3
:return: 无返回值
"""
# 返回初次定锚的对象:pandas.DataFrame
merge_result = pd.merge(self._to_be_matched, self._to_be_compared, how='left', on='region')
merge_result = merge_result.drop_duplicates(subset='rid',keep=False)
#merge_result = pd.merge(self._to_be_matched,merge_result,how='left',on='rid')
#del merge_result['region_y']
return merge_result.rename(columns={'region_x':'region'})
@property
def accuracy(self):
accuracy = 100*(RegionMatchingOrderAlgorithm.all_matched(self._result).shape[0]/(self._result.shape[0]))
return accuracy
@staticmethod
def not_matched(pdata=None):
return pdata[pdata.isnull().any(axis=1)]
@staticmethod
def all_matched(pdata=None):
return pdata[pdata.notnull().all(axis=1)]
@property
def region_set_dict(self):
ref_regions_dict = dict()
for record in self.region_set_mapping:
to_be_selected = []
for item in record[2]:
to_be_selected.append(item['region'])
ref_regions_dict[record[1]] = to_be_selected
return ref_regions_dict
@property
def matched_region(self):
return self._result
if __name__ == '__main__':
pop_year = '2010'
pop_region_file_2010 = r'E:\data\popcensus\origin\var_temp.xls'
raw_region_2010 = Excel(pop_region_file_2010).read()
to_be_matched = [re.sub('\s+','',item[0]) for item in raw_region_2010 if re.match('^\s*$',item[0]) is None]
pd_to_be_matched = pd.DataFrame(to_be_matched,columns=['region'])
pd_to_be_matched['rid'] = range(pd_to_be_matched.shape[0])
collection = MonCollection(database=MonDatabase(mongodb=MongoDB(), database_name='region'), collection_name='admincode')
found = collection.collection.find(filter={'year':'2010'},
projection={'acode':True,'region':True,'_id':True},
sort=[('acode',1)])
pd_to_be_compared = pd.DataFrame(list(found))
pd_to_be_compared['cid'] = range(pd_to_be_compared.shape[0])
#pd_to_be_compared['_id'] = pd_to_be_compared['_id'].apply(str)
print(pd_to_be_matched,pd_to_be_compared)
algo = RegionMatchingOrderAlgorithm(pd_to_be_matched,pd_to_be_compared)
# 首先是寻找可靠的匹配作为锚点
algo.find_anchor()
# 其次进行顺序的严格匹配
algo.exactly_matching_from_region_set()
print(algo.matched_region)
# 打印匹配率
print('Accuracy Rate: {:.2f}%.'.format(algo.accuracy))
'''
# 纠正错误
#algo.correct(correction=r'E:\data\popcensus\origin\correction.xlsx')
algo.auto_correction().to_excel(r'E:\data\popcensus\origin\correction.xlsx')
#algo.matched_region.to_excel(r'E:\data\popcensus\origin\pdoutput_before.xlsx')
algo.correct()
# 重新进行匹配
algo.exactly_matching_from_region_set()
print('Accuracy Rate: {:.2f}%.'.format(algo.accuracy))
# 输出匹配完成的结果
algo.matched_region.to_excel(r'E:\data\popcensus\origin\pdoutput.xlsx')
algo.output_of_region_set_mapping.to_excel(r'E:\data\popcensus\origin\reference_regions.xlsx')
print(algo.auto_correction())
algo.auto_correction().to_excel(r'E:\data\popcensus\origin\correction.xlsx')
print(algo.auto_correction().size)
algo.simu_auto_corrected_region_list.to_excel(r'E:\data\popcensus\origin\sim_output.xlsx')
algo.simu_auto_corrected_region_list_short_version.to_excel(r'E:\data\popcensus\origin\sim_output_short.xlsx')
result.to_excel(r'E:\data\popcensus\origin\pdoutput.xlsx')
cor_file = r'E:\data\popcensus\origin\correction.xlsx'
pdata = pd.read_excel(cor_file)
print(pdata)
'''
|
[
"[email protected]"
] | |
f97850dc6668b6a0383ae0dade7bb9e5b0360eb3
|
2f46f893621f184ad7a37dea03001fa7c8704c0b
|
/team/urls.py
|
09142cd3d5718b65dd0291fad99f4654d8938a17
|
[] |
no_license
|
DarkMagician0611/IITZL
|
b55c909cc94675af63e7ad5be0d5e959bc5cb437
|
b7e7ab304706ab00d9e9c747262387d6defe9865
|
refs/heads/master
| 2021-09-11T13:57:53.662298 | 2018-04-08T08:34:32 | 2018-04-08T08:34:32 | 125,329,743 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 946 |
py
|
from django.urls import path
from . import views
app_name = 'team'
urlpatterns = [
path('', views.selectMatch, name='selectMatch'),
path('index/', views.index, name='index'),
path('teamAddIndex/', views.teamAddIndex, name='teamAddIndex'),
path('playerAdd/', views.playerAdd, name='playerAdd'),
path('addPlayerLater/', views.addPlayerLater, name='addPlayerLater'),
path('addBlackMamba/', views.addBlackMamba, name='addBlackMamba'),
path('loadPlayer/', views.loadPlayer, name='loadPlayer'),
path('updateMamba/', views.updateMamba, name='updateMamba'),
path('update/<str:name>/', views.update, name='update'),
path('deleteTeamPlayer/<str:name>/', views.deleteTeamPlayer, name='deleteTeamPlayer'),
path('playerUpdate/', views.playerUpdate, name='playerUpdate'),
path('resetTeam/', views.resetTeam, name='resetTeam'),
path('listTeams/', views.listTeams, name='listTeams'),
path('createTeams/', views.createTeams, name='createTeams'),
]
|
[
"[email protected]"
] | |
ccbe60417f748533bab61dc6d6cde0b17cca9c8b
|
a03131bb68abb94eafacdc56e803551e93aa2b94
|
/test/test_offline/test_defaults.py
|
382a6f8f0cc9486998a0249ed69ab4370083f2ea
|
[
"Apache-2.0"
] |
permissive
|
gistart/prometheus-push-client
|
b1a50d7173e57817ca0011dbca213125e34577f7
|
fcaddb31e81d5c04d86c01d219f73b7d2e1736ed
|
refs/heads/master
| 2023-04-24T02:11:32.984284 | 2021-05-17T07:17:27 | 2021-05-17T07:17:27 | 363,989,153 | 14 | 3 |
Apache-2.0
| 2021-05-16T19:14:55 | 2021-05-03T16:16:29 |
Python
|
UTF-8
|
Python
| false | false | 1,107 |
py
|
import pytest
import prometheus_push_client as ppc
from testutils import make_metric_fixture, collect_metrics
NS = "test_offline"
SUB = "defaults"
_cnt1 = ppc.Counter(
name="c1",
namespace=NS,
subsystem=SUB,
labelnames=["host", "l1", "l2"],
default_labelvalues={
"host": "localhost",
"l1": 0,
}
)
@pytest.fixture
def counter1(request):
return make_metric_fixture(request, _cnt1)
@pytest.fixture
def test_defaults_expected():
return \
"""
test_offline_defaults_c1_total{host="localhost",l1="0",l2="2"} 2.0
test_offline_defaults_c1_total{host="localhost",l1="1",l2="2"} 2.0
test_offline_defaults_c1_total{host="H",l1="1",l2="2"} 2.0
""".strip()
def test_default_labelvalues_usage(counter1, test_defaults_expected):
# pairs of equivalent actions:
counter1.labels(l2=2).inc()
counter1.labels(2).inc()
counter1.labels(l1=1, l2=2).inc()
counter1.labels(1, 2).inc()
counter1.labels(host="H", l1=1, l2=2).inc()
counter1.labels("H", 1, 2).inc()
res = collect_metrics(counter1._name)
assert res == test_defaults_expected
|
[
"[email protected]"
] | |
ba99a49368036925459bfa5fcb8f14164b87077e
|
06d21b5ce670b719e1c68f890e48c61d55dd9c84
|
/exercises/three/bmicalculator.py
|
4a1b243c7cb4c990529571b944f705aebc692924
|
[] |
no_license
|
kukuu/python
|
1613444d2c773a9d82e3f0df079cfbffea20fb65
|
5c6b8b9d7c3a187c90531bdd28347bbbec584ebe
|
refs/heads/master
| 2021-01-20T04:01:53.890753 | 2019-09-04T14:35:29 | 2019-09-04T14:35:29 | 33,211,288 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 397 |
py
|
def bmi_calculate():
weight, height = input("Enter weight and height seperated by a comma:")
bmi_calculate = [(weight * 720) / (height * height)]
for bmi_calculate in range (19,25):
where = 'healthy'
if bmi_calculate < 19:
where = 'below'
elif bmi_calculate > 25:
where = 'high'
print ("your body mass index is:" , bmi_calculate)
|
[
"[email protected]"
] | |
a0f8f7b8b1b8309bccf987e46c698b39e152970c
|
9c56151ff0c981f4d24aaaefd8896893225be8c2
|
/fotochest/apps/administrator/__init__.py
|
01dce1499601edfe311a5b67dd72fabd730a6561
|
[
"MIT"
] |
permissive
|
ginking/fotochest
|
9da4c34abb7df758e29f5f3284c93e3cd6933bcc
|
0f9e6e72c7b587dec91cd5a0c3b081e28d056c62
|
refs/heads/master
| 2021-01-18T02:45:14.377309 | 2015-04-16T02:58:47 | 2015-04-16T02:58:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 68 |
py
|
default_app_config = 'fotochest.apps.administrator.apps.AdminConfig'
|
[
"[email protected]"
] | |
bfe6eb6e9734dbfe24074e1964400cdb06a23cc3
|
fce1b262820539e8574e5476692096f599ca2b27
|
/luffycity_s8/luffy/views/article.py
|
fecd0d8cf009734ca9798d3523d3afb6d261806e
|
[] |
no_license
|
iyouyue/green_hand
|
9386082a0589ee6e1805aafe189ee38e823c8202
|
7b80e8cc0622e4d8e9d07dde37c72ac7d6e3261c
|
refs/heads/master
| 2020-03-26T14:39:02.224727 | 2018-08-16T14:27:57 | 2018-08-16T14:27:57 | 144,997,556 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,851 |
py
|
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from django.core.exceptions import ObjectDoesNotExist
from luffy import models
from luffy.response.base import BaseResponse
from luffy.serializers.article import ArticleSerializer, ArticleDetailSerializer
from luffy.pagination.page import LuffyPageNumberPagination
class MyException(Exception):
def __init__(self, msg):
self.msg = msg
class ArticleView(GenericViewSet):
renderer_classes = [JSONRenderer,]
def list(self, request, *args, **kwargs):
ret = BaseResponse()
try:
# 1. 获取数据
article_list = models.Article.objects.all().only('id', 'title','brief').order_by('-id')
# 2. 对数据进行分页
page = LuffyPageNumberPagination()
page_article_list = page.paginate_queryset(article_list, request, self)
# 3. 对数据序列化
ser = ArticleSerializer(instance=page_article_list, many=True)
ret.data = ser.data
except Exception as e:
ret.code = 1001
ret.error = '获取数据失败'
return Response(ret.dict)
def retrieve(self, request, pk, *args, **kwargs):
ret = BaseResponse()
try:
obj = models.Article.objects.get(id=pk)
ser = ArticleDetailSerializer(instance=obj, many=False)
ret.data = ser.data
except ObjectDoesNotExist as e:
ret.code = 1001
ret.error = '查询数据不存在'
except Exception as e:
ret.code = 1002
ret.error = "查询失败"
return Response(ret.dict)
|
[
"[email protected]"
] | |
773663c4df0ccd2fbf185f8bbedf2977848846c9
|
3c8b1a4d9e7d53fd643e02dabae50298a8122763
|
/tests/__init__.py
|
6ae0126758f6be63f55461a5786077a39670ba77
|
[
"MIT"
] |
permissive
|
greyli/fanxiangce
|
f0866ed5dfd32a2cd795db92dec9e8785833d480
|
c6eb8410867c7a743d1ede920b0858158fec961c
|
refs/heads/master
| 2021-09-18T10:35:02.317823 | 2018-07-13T02:32:25 | 2018-07-13T02:32:25 | 67,604,143 | 79 | 40 | null | null | null | null |
UTF-8
|
Python
| false | false | 21 |
py
|
# -*-coding: utf-8-*-
|
[
"[email protected]"
] | |
556cfb872153af8a3f4f6cac55f0cb4b264c5155
|
995f655b116cd61e0d7e2e139d414c83764af142
|
/estudos/python/ObjectSensing/OpenCV/webcam_example_1/webcam_basic_1.py
|
7815f65f99022a3f84f5cd02673d8938f380132e
|
[] |
no_license
|
LPAE/lpae.github.io
|
a7513ab383b232e997e6d19d865be207dc7e417c
|
05cffe4bb25da037d1af5ae110a5d0e2fe1af9b2
|
refs/heads/master
| 2022-11-22T23:07:00.128668 | 2022-11-11T12:09:05 | 2022-11-11T12:09:05 | 171,523,476 | 2 | 7 | null | 2019-06-12T23:19:54 | 2019-02-19T18:03:16 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 518 |
py
|
import cv2
# capture = cv2.VideoCapture(0)
capture = cv2.VideoCapture(1)
while 1:
ret, frame = capture.read()
cv2.imshow("Video", frame)
# ------------------------------------------------------------------------------------------------------------------
# Esc -> EXIT while
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# ------------------------------------------------------------------------------------------------------------------
capture.release()
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
777644e2f2338f3db883ddb2a888fb753aed70c2
|
ecfcd725b4df54b7bab9b5e05a4618d6615a1277
|
/currency_1.0.py
|
a074c4ea8fe51533837eea2efbbc9c721319640b
|
[] |
no_license
|
lyb0596/RMB
|
7695f4d39cd2c2002123e791409a5ef324490959
|
1b14d125dc04bb91a8627cb9ad8d2dcff8442226
|
refs/heads/master
| 2020-04-05T11:18:20.668810 | 2018-11-09T08:37:31 | 2018-11-09T08:37:31 | 156,831,615 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 258 |
py
|
rmb_str_value = input('input: ')
lenth = len(rmb_str_value)
print(lenth)
#value = rmb_str_value[lenth-3:lenth]
value = rmb_str_value[-3:]
print(value)
# transe = 6.77
# rmb_value = eval(rmb_str_value)
# usa = rmb_value/transe
# print(rmb_value,' && ',usa)
|
[
"[email protected]"
] | |
f0fadc6c349ecd2154f74fb36092ac1ba6e56a02
|
66f867e6939dd641498d8ce3e9c9ec375ac65305
|
/python week3/spelletje.py
|
36faadbd84d5e6dcd1cb5580ebd3e5459470a79e
|
[] |
no_license
|
Mischadehaan/PythonAchievements
|
dc45a211344a6d6c2af4f04fe2b9616f120b7839
|
08436115ae4618211aa656c433ddf2fbf2af0cf4
|
refs/heads/master
| 2023-01-10T16:00:59.700131 | 2020-10-30T12:15:44 | 2020-10-30T12:15:44 | 294,099,119 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 673 |
py
|
# Doel:
# Hoger / Lager spelletje moet als volgt werken.
# De speler wordt een random getal getoont van 1 tot en met 10
# De speler moet daarna raden of het volgende getal hoger of lager wordt
# Aan het einde moet de speler weten of hij / zij gewonnen heeft.
# Stappenplan:
# 1. Een willekeurig nummer van 1 t/m 10 maken.
# (Een random getal in een variabele opslaan.)
# 2. De speler voert in of het hoger of lager wordt.
# 3. Er moet een volgend getal gegenereerd worden.
# 4. Er moet gekeken worden of de speler gelijk heeft.
# 5. De speler moet weten of hij/zij gewonnen of verloren heeft.
import random
willekeur = random.randrange(1, 11)
#print(willekeur)
|
[
"[email protected]"
] | |
2613f41ca4dc3a52d8a9eba8b22d5db1b4f73c1e
|
04d9ee05feb6dddf19b9f7653f4dd9e9ce3ee95c
|
/rbtools/commands/install.py
|
03724c98f41a1da2e2262344e0806a96951d6e81
|
[
"MIT"
] |
permissive
|
pbwkoswara/rbtools
|
2fa44ade1c60b4f076198bb8206a5d624dd40cd2
|
8ea5ff8843d2a3d44056ad4358d75c81a066cf28
|
refs/heads/master
| 2021-07-17T22:22:20.906220 | 2017-10-20T22:11:03 | 2017-10-25T17:05:21 | 108,022,324 | 0 | 0 | null | 2017-10-23T18:26:30 | 2017-10-23T18:26:30 | null |
UTF-8
|
Python
| false | false | 7,398 |
py
|
from __future__ import division, print_function, unicode_literals
import hashlib
import logging
import os
import shutil
import tempfile
import zipfile
import tqdm
from six.moves.urllib.error import HTTPError, URLError
from six.moves.urllib.request import urlopen
from rbtools.commands import Command, CommandError
from rbtools.utils.appdirs import user_data_dir
from rbtools.utils.checks import check_install
from rbtools.utils.process import execute
class Install(Command):
"""Install a dependency.
This allows RBTools to install external dependencies that may be needed for
some features.
"""
name = 'install'
author = 'The Review Board Project'
description = 'Install an optional dependency.'
args = '<package>'
option_list = []
package_urls = {
'tfs': 'http://downloads.beanbaginc.com/rb-tfs/rb-tfs.zip'
}
def main(self, package):
"""Run the command.
Args:
package (unicode):
The name of the package to install.
Raises:
rbtools.commands.CommandError:
An error occurred during installation.
"""
try:
url = self.package_urls[package]
except KeyError:
err = 'Package "%s" not found. Available packages are:\n' % package
err += '\n'.join(
' %s' % package_name
for package_name in self.package_urls.keys()
)
raise CommandError(err)
label = 'Downloading %s' % package
zip_filename = self.download_file(url, label=label)
try:
self.check_download(url, zip_filename)
self.unzip(
zip_filename,
os.path.join(user_data_dir('rbtools'), 'packages', package))
finally:
os.unlink(zip_filename)
def check_download(self, url, zip_filename):
"""Check to see if the file was successfully downloaded.
If the user has :command:`gpg` installed on their system, use that to
check that the package was signed. Otherwise, check the sha256sum.
Args:
url (unicode):
The URL that the file came from.
zip_filename (unicode):
The filename of the downloaded copy.
Raises:
rbtools.commands.CommandError:
The authenticity of the file could not be verified.
"""
if check_install('gpg'):
execute(['gpg', '--recv-keys', '4ED1F993'])
sig_filename = self.download_file('%s.asc' % url)
try:
retcode, output, errors = execute(
['gpg', '--verify', sig_filename, zip_filename],
with_errors=False, ignore_errors=True,
return_error_code=True, return_errors=True)
if retcode == 0:
logging.debug('Verified file signature')
else:
raise CommandError(
'Unable to verify authenticity of file downloaded '
'from %s:\n%s' % (url, errors))
finally:
os.unlink(sig_filename)
else:
logging.info('"gpg" not installed. Skipping signature validation.')
try:
sha_url = '%s.sha256sum' % url
logging.debug('Downloading %s', sha_url)
response = urlopen(sha_url)
real_sha = response.read().split(' ')[0]
except (HTTPError, URLError) as e:
raise CommandError('Error when downloading file: %s' % e)
with open(zip_filename, 'r') as f:
our_sha = hashlib.sha256(f.read()).hexdigest()
if real_sha == our_sha:
logging.debug('Verified SHA256 hash')
else:
logging.debug('SHA256 hash does not match!')
logging.debug(' Downloaded file hash was: %s', our_sha)
logging.debug(' Expected hash was: %s', real_sha)
raise CommandError(
'Unable to verify the checksum of the downloaded copy of '
'%s.\n'
'This could be due to an invasive proxy or an attempted '
'man-in-the-middle attack.' % url)
def unzip(self, zip_filename, package_dir):
"""Unzip a .zip file.
This method will unpack the contents of a .zip file into a target
directory. If that directory already exists, it will first be removed.
Args:
zip_filename (unicode):
The absolute path to the .zip file to unpack.
package_dir (unicode):
The directory to unzip the files into.
Raises:
rbtools.commands.CommandError:
The file could not be unzipped.
"""
logging.debug('Extracting %s to %s', zip_filename, package_dir)
try:
if os.path.exists(package_dir):
if os.path.isdir(package_dir):
shutil.rmtree(package_dir)
else:
os.remove(package_dir)
os.makedirs(package_dir)
except (IOError, OSError) as e:
raise CommandError('Failed to set up package directory %s: %s'
% (package_dir, e))
zip_file = zipfile.ZipFile(zip_filename, 'r')
try:
zip_file.extractall(package_dir)
except Exception as e:
raise CommandError('Failed to extract file: %s' % e)
finally:
zip_file.close()
def download_file(self, url, label=None):
"""Download the given file.
This is intended to be used as a context manager, and the bound value
will be the filename of the downloaded file.
Args:
url (unicode):
The URL of the file to download.
label (unicode, optional):
The label to use for the progress bar. If this is not
specified, no progress bar will be shown.
Yields:
unicode:
The filename of the downloaded file.
Raises:
rbtools.commands.CommandError:
An error occurred while downloading the file.
"""
logging.debug('Downloading %s', url)
try:
response = urlopen(url)
total_bytes = int(
response.info().getheader('Content-Length').strip())
read_bytes = 0
bar_format = '{desc} {bar} {percentage:3.0f}% [{remaining}]'
with tqdm.tqdm(total=total_bytes, desc=label or '',
ncols=80, disable=label is None,
bar_format=bar_format) as bar:
try:
f = tempfile.NamedTemporaryFile(delete=False)
while read_bytes != total_bytes:
chunk = response.read(8192)
chunk_length = len(chunk)
read_bytes += chunk_length
f.write(chunk)
bar.update(chunk_length)
finally:
f.close()
return f.name
except (HTTPError, URLError) as e:
raise CommandError('Error when downloading file: %s' % e)
|
[
"[email protected]"
] | |
a9340662bebfa1cdd1adef79408712eb2e5883fd
|
7188e4eca6bb6ba03453e5c1d9e3134e9ef1b588
|
/apps/clndr/apps.py
|
29d6fb8f53be320b7e1c8a59f9267f426baf18ea
|
[] |
no_license
|
mitshel/ghc_yapokaju
|
c85eb2c3cbfd9802f6fac16a6d6192ae85ad2511
|
d70b53235223dc935792aac3838678cb1b4d2b2e
|
refs/heads/master
| 2020-05-15T21:50:15.646729 | 2019-04-21T08:48:31 | 2019-04-21T08:48:31 | 182,509,831 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 177 |
py
|
from django.apps import AppConfig
class ClndrConfig(AppConfig):
name = 'apps.clndr'
verbose_name = 'The calendar'
def ready(self):
from . import signals
|
[
"[email protected]"
] | |
40482d11844a15da5e63667740be2f0721b99c48
|
3f1873d63ccb215ec88105661f1cd44927e222b3
|
/dl4mir/chords/data.py
|
5144796b4d0b592a6db6d5ba99adf69daadbd0ec
|
[] |
no_license
|
bmcfee/dl4mir
|
50dd467687991163a819fc7560b9d9f9c4c6e568
|
c9438a221d1e853c25e1867673c1c3efca2e1756
|
refs/heads/master
| 2023-07-11T10:35:54.041154 | 2016-01-20T03:57:58 | 2016-01-20T03:57:58 | 69,293,002 | 1 | 0 | null | 2016-09-26T21:09:15 | 2016-09-26T21:09:14 | null |
UTF-8
|
Python
| false | false | 25,686 |
py
|
import itertools
import numpy as np
import biggie
import pescador
import mir_eval
from dl4mir.chords import labels as L
import dl4mir.chords.pipefxs as FX
from dl4mir.common import util
import dl4mir.chords.lexicon as lex
def intervals_to_durations(intervals):
return np.abs(np.diff(np.asarray(intervals), axis=1)).flatten()
def slice_cqt_entity(entity, length, idx=None):
"""Return a windowed slice of a chord Entity.
Parameters
----------
entity : Entity, with at least {cqt, chord_labels} fields
Observation to window.
Note that entity.cqt is shaped (num_channels, num_frames, num_bins).
length : int
Length of the sliced array.
idx : int, or None
Centered frame index for the slice, or random if not provided.
Returns
-------
sample: biggie.Entity with fields {data, chord_label}
The windowed chord observation.
"""
idx = np.random.randint(entity.cqt.shape[1]) if idx is None else idx
cqt = np.array([util.slice_tile(x, idx, length) for x in entity.cqt])
return biggie.Entity(data=cqt, chord_label=entity.chord_labels[idx])
def slice_note_entity(entity, length, idx=None):
"""Return a windowed slice of a chord Entity.
Parameters
----------
entity : Entity, with at least {cqt, chord_labels} fields
Observation to window.
Note that entity.cqt is shaped (num_channels, num_frames, num_bins).
length : int
Length of the sliced array.
idx : int, or None
Centered frame index for the slice, or random if not provided.
Returns
-------
sample: biggie.Entity with fields {data, chord_label}
The windowed chord observation.
"""
idx = np.random.randint(entity.cqt.shape[1]) if idx is None else idx
cqt = np.array([util.slice_tile(x, idx, length) for x in entity.cqt])
return biggie.Entity(data=cqt, note_numbers=entity.note_numbers[idx])
def slice_chroma_entity(entity, length, idx=None):
"""Return a windowed slice of a chord Entity.
Parameters
----------
entity : Entity, with at least {cqt, chord_labels} fields
Observation to window.
Note that entity.cqt is shaped (num_channels, num_frames, num_bins).
length : int
Length of the sliced array.
idx : int, or None
Centered frame index for the slice, or random if not provided.
Returns
-------
sample: biggie.Entity with fields {data, chord_label}
The windowed chord observation.
"""
idx = np.random.randint(entity.cqt.shape[1]) if idx is None else idx
chroma = util.slice_tile(entity.chroma, idx, length)
# chroma = np.array([util.slice_tile(x, idx, length) for x in entity.chroma])
return biggie.Entity(data=chroma, chord_label=entity.chord_labels[idx])
def chord_sampler(key, stash, win_length=20, index=None, max_samples=None,
sample_func=slice_cqt_entity):
"""Generator for sampling windowed chord observations from an entity.
Parameters
----------
key : str
Key for the entity of interest; must be consistent across both `stash`
and `index`, when the latter is provided.
stash : dict_like
Dict or biggie.Stash of chord entities.
win_length: int
Length of centered observation window for the CQT.
index: dict of index arrays, default=None
Indexing object for constrained sampling of the chord entity.
If provided, must have a np.ndarray of integers under `key`; otherwise,
this method will fail.
max_samples: int, or None
Maximum number of samples to return from this Generator; if None, runs
indefinitely.
Yields
------
sample: biggie.Entity with fields {cqt, chord_label}
The windowed chord observation.
"""
entity = stash.get(key)
has_labels = hasattr(entity, 'chord_labels')
label_key = 'note_numbers' if not has_labels else 'chord_labels'
num_samples = len(getattr(entity, label_key))
if index is None:
index = {key: np.arange(num_samples)}
valid_samples = index.get(key, [])
idx = np.inf
max_samples = np.inf if max_samples is None else max_samples
count = 0
while count < max_samples and len(valid_samples):
if idx >= len(valid_samples):
np.random.shuffle(valid_samples)
idx = 0
yield sample_func(entity, win_length, valid_samples[idx])
idx += 1
count += 1
def cqt_buffer(entity, win_length=20, valid_samples=None):
"""Generator for stepping windowed chord observations from an entity.
Parameters
----------
entity : biggie.Entity
CQT entity to step through
win_length: int
Length of centered observation window for the CQT.
Yields
------
sample: biggie.Entity with fields {cqt, chord_label}
The windowed chord observation.
"""
num_samples = len(entity.chord_labels)
if valid_samples is None:
valid_samples = np.arange(num_samples)
idx = 0
count = 0
while count < len(valid_samples):
yield slice_cqt_entity(entity, win_length, valid_samples[idx])
idx += 1
count += 1
def lazy_cqt_buffer(key, stash, win_length=20, index=None):
"""Generator for stepping windowed chord observations from an entity; note
that the entity is not queried until the generator is called.
Parameters
----------
key : str
Key for the entity of interest; must be consistent across both `stash`
and `index`, when the latter is provided.
stash : dict_like
Dict or biggie.Stash of chord entities.
win_length: int
Length of centered observation window for the CQT.
Yields
------
sample: biggie.Entity with fields {cqt, chord_label}
The windowed chord observation.
"""
entity = stash.get(key)
num_samples = len(entity.chord_labels)
if index is None:
index = {key: np.arange(num_samples)}
valid_samples = index.get(key, [])
for x in cqt_buffer(entity, win_length, valid_samples):
yield x
def map_chord_labels(entity, lexicon):
if hasattr(entity, 'chord_label'):
labels = entity.chord_label
else:
labels = entity.chord_labels
return lexicon.label_to_index(labels)
def map_bigrams(entity, lexicon):
return lexicon.label_to_index(entity.bigrams)
def create_chord_index_stream(stash, win_length, lexicon,
index_mapper=map_chord_labels,
sample_func=slice_cqt_entity,
pitch_shift_func=FX.pitch_shift_cqt,
max_pitch_shift=0, working_size=50,
partition_labels=None, valid_idx=None):
"""Return an unconstrained stream of chord samples with class indexes.
Parameters
----------
stash : biggie.Stash
A collection of chord entities.
win_length : int
Length of a given tile slice.
lexicon : lexicon.Lexicon
Instantiated chord lexicon for mapping labels to indices.
working_size : int
Number of open streams at a time.
pitch_shift : int
Maximum number of semitones (+/-) to rotate an observation.
partition_labels : dict
Returns
-------
stream : generator
Data stream of windowed chord entities.
"""
if partition_labels is None:
partition_labels = util.partition(stash, index_mapper, lexicon)
if valid_idx is None:
valid_idx = range(lexicon.num_classes)
chord_index = util.index_partition_arrays(partition_labels, valid_idx)
entity_pool = [pescador.Streamer(chord_sampler, key, stash,
win_length, chord_index,
sample_func=sample_func)
for key in stash.keys()]
stream = pescador.mux(entity_pool, None, working_size, lam=25)
if max_pitch_shift > 0:
stream = pitch_shift_func(stream, max_pitch_shift=max_pitch_shift)
return FX.map_to_class_index(stream, index_mapper, lexicon)
def create_target_stream(stash, win_length, working_size=50, max_pitch_shift=0,
bins_per_pitch=1, sample_func=slice_cqt_entity,
mapper=FX.map_to_chroma):
"""Return an unconstrained stream of chord samples with class indexes.
Parameters
----------
stash : biggie.Stash
A collection of chord entities.
win_length : int
Length of a given tile slice.
lexicon : lexicon.Lexicon
Instantiated chord lexicon for mapping labels to indices.
working_size : int
Number of open streams at a time.
max_pitch_shift : int
Maximum number of semitones (+/-) to rotate an observation.
partition_labels : dict
Returns
-------
stream : generator
Data stream of windowed chord entities.
"""
entity_pool = [pescador.Streamer(chord_sampler, key, stash, win_length,
sample_func=sample_func)
for key in stash.keys()]
stream = pescador.mux(entity_pool, None, working_size, lam=25)
if max_pitch_shift > 0:
stream = FX.pitch_shift_cqt(stream, max_pitch_shift=max_pitch_shift)
return mapper(stream, bins_per_pitch)
def create_uniform_chord_index_stream(stash, win_length, lexicon,
index_mapper=map_chord_labels,
sample_func=slice_cqt_entity,
pitch_shift_func=FX.pitch_shift_cqt,
max_pitch_shift=0, working_size=4,
partition_labels=None, valid_idx=None):
"""Return a stream of chord samples, with uniform quality presentation.
Parameters
----------
stash : biggie.Stash
A collection of chord entities.
win_length : int
Length of a given tile slice.
lexicon : lexicon.Lexicon
Instantiated chord lexicon for mapping labels to indices.
working_size : int
Number of open streams at a time.
pitch_shift : int
Maximum number of semitones (+/-) to rotate an observation.
partition_labels : dict
Returns
-------
stream : generator
Data stream of windowed chord entities.
"""
if partition_labels is None:
partition_labels = util.partition(stash, index_mapper, lexicon)
if valid_idx is None:
valid_idx = range(lexicon.num_classes)
chord_pool = []
for chord_idx in valid_idx:
subindex = util.index_partition_arrays(partition_labels, [chord_idx])
entity_pool = [pescador.Streamer(chord_sampler, key, stash,
win_length, subindex,
sample_func=sample_func)
for key in subindex.keys()]
if len(entity_pool) == 0:
continue
stream = pescador.mux(
entity_pool, n_samples=None, k=working_size, lam=20)
chord_pool.append(pescador.Streamer(stream))
stream = pescador.mux(chord_pool, n_samples=None, k=lexicon.vocab_dim,
lam=None, with_replacement=False)
if max_pitch_shift > 0:
stream = pitch_shift_func(stream, max_pitch_shift=max_pitch_shift)
return FX.map_to_class_index(stream, index_mapper, lexicon)
def create_uniform_chroma_stream(stash, win_length, lexicon, working_size=5,
bins_per_pitch=1, max_pitch_shift=0,
partition_labels=None, valid_idx=None):
"""Return an unconstrained stream of chord samples with class indexes.
Parameters
----------
stash : biggie.Stash
A collection of chord entities.
win_length : int
Length of a given tile slice.
lexicon : lexicon.Lexicon
Instantiated chord lexicon for mapping labels to indices.
working_size : int
Number of open streams at a time.
pitch_shift : int
Maximum number of semitones (+/-) to rotate an observation.
partition_labels : dict
Returns
-------
stream : generator
Data stream of windowed chord entities.
"""
if partition_labels is None:
partition_labels = util.partition(stash, map_chord_labels, lexicon)
if valid_idx is None:
valid_idx = range(lexicon.num_classes)
chord_pool = []
for chord_idx in valid_idx:
subindex = util.index_partition_arrays(partition_labels, [chord_idx])
entity_pool = [pescador.Streamer(chord_sampler, key, stash,
win_length, subindex,
sample_func=slice_cqt_entity)
for key in subindex.keys()]
if len(entity_pool) == 0:
continue
stream = pescador.mux(
entity_pool, n_samples=None, k=working_size, lam=20)
chord_pool.append(pescador.Streamer(stream))
stream = pescador.mux(chord_pool, n_samples=None, k=lexicon.vocab_dim,
lam=None, with_replacement=False)
if max_pitch_shift > 0:
stream = FX.pitch_shift_cqt(stream, max_pitch_shift=max_pitch_shift)
return FX.map_to_chroma(stream, bins_per_pitch)
def muxed_uniform_chord_stream(stash, synth_stash, win_length, vocab_dim=157,
pitch_shift=0, working_size=4):
"""Return a stream of chord samples, merging two separate datasets."""
partition_labels = util.partition(stash, chord_map)
synth_partition_labels = util.partition(synth_stash, chord_map)
valid_idx = range(vocab_dim)
valid_idx_synth = range(60, vocab_dim - 1)
chord_pool = []
for chord_idx in valid_idx:
subindex = util.index_partition_arrays(partition_labels, [chord_idx])
entity_pool = [pescador.Streamer(chord_sampler, key, stash,
win_length, subindex)
for key in subindex.keys()]
if chord_idx in valid_idx_synth:
subindex = util.index_partition_arrays(
synth_partition_labels, [chord_idx])
synth_pool = [pescador.Streamer(chord_sampler, key, synth_stash,
win_length, subindex)
for key in subindex.keys()]
entity_pool.extend(synth_pool)
if len(entity_pool) == 0:
continue
stream = pescador.mux(
entity_pool, n_samples=None, k=working_size, lam=20)
chord_pool.append(pescador.Streamer(stream))
stream = pescador.mux(chord_pool, n_samples=None, k=vocab_dim, lam=None,
with_replacement=False)
if pitch_shift:
stream = FX.pitch_shift_cqt(stream, max_pitch_shift=pitch_shift)
return FX.map_to_chord_index(stream, vocab_dim)
def create_uniform_factored_stream(stash, win_length, partition_labels=None,
working_size=50, vocab_dim=157,
pitch_shift=True):
"""Return a stream of chord samples, with uniform quality presentation."""
if partition_labels is None:
partition_labels = util.partition(stash, quality_map)
quality_pool = []
for qual_idx in range(13):
quality_subindex = util.index_partition_arrays(
partition_labels, [qual_idx])
entity_pool = [pescador.Streamer(chord_sampler, key, stash,
win_length, quality_subindex)
for key in quality_subindex.keys()]
stream = pescador.mux(entity_pool, n_samples=None, k=25, lam=20)
quality_pool.append(pescador.Streamer(stream))
stream = pescador.mux(quality_pool, n_samples=None, k=working_size,
lam=None, with_replacement=False)
if pitch_shift:
stream = FX.pitch_shift_cqt(stream)
return FX.map_to_joint_index(stream, vocab_dim)
def create_contrastive_chord_stream(stash, win_length, valid_idx=None,
partition_labels=None, working_size=2,
vocab_dim=157, pitch_shift=0,
neg_probs=None):
"""Return a stream of chord samples, with equal positive and negative
examples."""
if partition_labels is None:
partition_labels = util.partition(stash, chord_map)
if valid_idx is None:
valid_idx = range(vocab_dim)
if neg_probs is None:
neg_probs = np.ones([vocab_dim]*2)
neg_probs[np.eye(vocab_dim, dtype=bool)] = 0.0
neg_probs = util.normalize(neg_probs, axis=1)
chord_streams = []
has_data = np.ones(vocab_dim, dtype=bool)
for chord_idx in valid_idx:
subindex = util.index_partition_arrays(partition_labels, [chord_idx])
entity_pool = [pescador.Streamer(chord_sampler, key, stash,
win_length, subindex)
for key in subindex.keys()]
if len(entity_pool) == 0:
has_data[chord_idx] = False
stream = None
else:
stream = pescador.mux(
entity_pool, n_samples=None, k=working_size, lam=20)
chord_streams.append(stream)
chord_streams = np.array(chord_streams)
binary_pool = []
for chord_idx in range(vocab_dim):
if chord_streams[chord_idx] is None:
continue
# Skip contrast streams with (a) no data or (b) no probability.
not_chord_probs = neg_probs[chord_idx]
not_chord_probs[chord_idx] = 0.0
not_chord_probs *= has_data
nidx = not_chord_probs > 0.0
assert not_chord_probs.sum() > 0.0
chord_pool = [pescador.Streamer(x)
for x in chord_streams[nidx]]
neg_stream = pescador.mux(chord_pool, n_samples=None,
k=len(chord_pool), lam=None,
with_replacement=False,
pool_weights=not_chord_probs[nidx])
pair_stream = itertools.izip(chord_streams[chord_idx], neg_stream)
binary_pool.append(pescador.Streamer(pair_stream))
cstream = pescador.mux(binary_pool, n_samples=None, k=len(binary_pool),
lam=None, with_replacement=False)
return FX.unpack_contrastive_pairs(cstream, vocab_dim)
def chroma_stepper(key, stash, index=None):
"""writeme."""
entity = stash.get(key)
num_samples = len(entity.chord_labels)
if index is None:
index = {key: np.arange(num_samples)}
valid_samples = index.get(key, [])
idx = 0
count = 0
while count < len(valid_samples):
n = valid_samples[idx]
if n >= entity.chroma.shape[0]:
print "Out of range! %s" % key
break
yield biggie.Entity(chroma=entity.chroma[n],
chord_label=entity.chord_labels[n])
idx += 1
count += 1
def count_transitions_v157(stash):
"""writeme."""
vocab = lex.Strict(157)
transitions = np.zeros([14, 157])
for k in stash.keys():
chord_labels = stash.get(k).chord_labels
chord_idx = vocab.label_to_index(chord_labels)
for n in range(len(chord_idx) - 1):
if chord_idx[n] is None or chord_idx[n + 1] is None:
continue
from_idx = int(chord_idx[n]) / 12
if 156 in chord_idx[n:n+2]:
to_idx = chord_idx[n + 1]
else:
to_idx = L.subtract_mod(chord_idx[n], chord_idx[n + 1], 12)
transitions[from_idx, to_idx] += 1
trans_mat = []
for row in transitions[:-1]:
for _ in range(12):
trans_mat.append(L.rotate(row, 0-_))
trans_mat.append(trans_mat[-1])
return np.array(trans_mat)
def count_labels(reference_set, vocab_dim=157):
labels = dict()
for labeled_intervals in reference_set.values():
rootless = [L.join(*list([''] + list(L.split(l)[1:])))
for l in labeled_intervals['labels']]
intervals = np.array(labeled_intervals['intervals'])
durations = np.abs(np.diff(intervals, axis=1)).flatten()
for y, w in zip(rootless, durations):
if not y in labels:
labels[y] = 0
labels[y] += w
qlabels = labels.keys()
counts = [labels[y] for y in qlabels]
idx = np.argsort(counts)[::-1]
return [qlabels[i] for i in idx], [counts[i] for i in idx]
def count_states(reference_set, lexicon):
states = dict()
for labeled_intervals in reference_set.values():
chord_idx = lexicon.label_to_index(labeled_intervals['labels'])
intervals = np.array(labeled_intervals['intervals'])
durations = np.abs(np.diff(intervals, axis=1)).flatten()
for y, w in zip(chord_idx, durations):
s = L.relative_chord_index(y, y, 157)
if not s in states:
states[s] = 0
states[s] += w
labels = states.keys()
counts = [states[y] for y in labels]
idx = np.argsort(counts)[::-1]
return [labels[i] for i in idx], [counts[i] for i in idx]
def count_bigrams(reference_set, vocab_dim=157):
states = dict()
for labeled_intervals in reference_set.values():
chord_idx = L.chord_label_to_class_index(labeled_intervals['labels'],
vocab_dim)
intervals = np.array(labeled_intervals['intervals'])
durations = np.abs(np.diff(intervals, axis=1)).flatten()
for n in range(1, len(chord_idx)):
s = tuple([L.relative_chord_index(chord_idx[n],
chord_idx[n + i], 157)
for i in range(-1, 1)])
if not s in states:
states[s] = 0
states[s] += durations[n]
labels = states.keys()
counts = [states[y] for y in labels]
idx = np.argsort(counts)[::-1]
return [labels[i] for i in idx], [counts[i] for i in idx]
def count_trigrams(reference_set, vocab_dim=157):
states = dict()
for labeled_intervals in reference_set.values():
chord_idx = L.chord_label_to_class_index_soft(
labeled_intervals['labels'], vocab_dim)
intervals = np.array(labeled_intervals['intervals'])
durations = np.abs(np.diff(intervals, axis=1)).flatten()
for n in range(1, len(chord_idx) - 1):
s = tuple([L.relative_chord_index(chord_idx[n],
chord_idx[n + i], 157)
for i in range(-1, 2)])
if not s in states:
states[s] = 0
states[s] += durations[n]
labels = states.keys()
counts = [states[y] for y in labels]
idx = np.argsort(counts)[::-1]
return [labels[i] for i in idx], [counts[i] for i in idx]
def chroma_trigrams(ref_set):
states = dict()
for v in ref_set.values():
labels = v['labels']
y = L.chord_label_to_class_index(labels, 157)
intervals = np.array(v['intervals'])
durations = np.abs(np.diff(intervals, axis=1)).flatten()
for n in range(1, len(y) - 1):
sidx = [L.relative_chord_index(y[n], y[n + i], 157)
for i in range(-1, 2)]
if None in sidx:
continue
rot_labels = [L.index_to_chord_label(s, 157) for s in sidx]
c = tuple(["".join(["%d" % _
for _ in L.chord_label_to_chroma(l).flatten()])
for l in rot_labels])
if not c in states:
states[c] = dict(labels=set(), duration=0.0)
states[c]['duration'] += durations[n]
states[c]['labels'].add(labels[n])
return states
def ideal_chroma_fr(ref_set, stash, framerate=20.0):
sample_size = 1./framerate
for k, v in ref_set.iteritems():
chroma = L.chord_label_to_chroma(v['labels'])
time_points, chroma = mir_eval.util.intervals_to_samples(
np.asarray(v['intervals']), chroma.tolist(),
sample_size=sample_size, fill_value=[0]*12)
time_points, labels = mir_eval.util.intervals_to_samples(
np.asarray(v['intervals']), v['labels'],
sample_size=sample_size, fill_value='N')
stash.add(str(k), biggie.Entity(
chroma=chroma, chord_labels=[str(l) for l in labels],
time_points=time_points), overwrite=True)
print k
def ideal_chroma_ss(ref_set, stash):
for k, v in ref_set.iteritems():
intervals, labels = L.compress_labeled_intervals(**v)
chord_labels = [str(l) for l in labels]
chroma = L.chord_label_to_chroma(chord_labels)
durations = intervals_to_durations(intervals)
stash.add(str(k), biggie.Entity(
chroma=chroma, chord_labels=chord_labels,
time_points=intervals[:, 0], durations=durations), overwrite=True)
print k
def class_prior_v157(stash, lexicon):
"""writeme."""
assert lexicon.num_classes == 157
total_count = np.zeros(lexicon.num_classes, dtype=float)
for k in stash.keys():
entity = stash.get(k)
chord_idx = lexicon.label_to_index(entity.chord_labels)
y_true = chord_idx[np.not_equal(chord_idx, None)].astype(int)
counts = np.bincount(y_true)
total_count[:len(counts)] += counts
for q in range(13):
total_count[12*q:(q+1)*12] = total_count[12*q:(q+1)*12].sum()
return total_count / total_count.sum()
|
[
"[email protected]"
] | |
51577add3faa8b6ac3b28fcbd7f7c2b1f86e5db7
|
ed7917283ed68d706a232f78cd29eb53e25f47d5
|
/src/datafinder/persistence/adapters/ldap_/principal_search/__init__.py
|
66259ef380c242dc9192870a975711c23f473e62
|
[] |
no_license
|
DLR-SC/DataFinder
|
b08ae7fe360d8f7424c6b629ab3bc7fb5c370384
|
958fda4f3064f9f6b2034da396a20ac9d9abd52f
|
refs/heads/master
| 2021-01-18T23:25:38.683720 | 2017-01-27T13:09:43 | 2017-01-30T11:37:14 | 52,777,594 | 9 | 7 | null | 2017-01-30T11:37:16 | 2016-02-29T08:58:04 |
Python
|
UTF-8
|
Python
| false | false | 1,805 |
py
|
# $Filename$
# $Authors$
#
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements LDAP-specific adapters for the principal search.
"""
__version__ = "$Revision-Id:$"
|
[
""
] | |
aa73b5feecaf9671d0248fac60f0816e78ff5f24
|
77462a326307065eda78f309e3c1a5e28114bc48
|
/bth7-4.py
|
fddcd2228937ba9a56407bff03e341a5e4e95e3d
|
[] |
no_license
|
PhamHongQuyet/bai-TH7
|
e0b7f2ea7ca92430662c2d29a366d805fc75727f
|
ca233e02c7b6d4f88f0d68b3b06cb3a5e29fec9d
|
refs/heads/master
| 2020-05-24T20:17:05.355919 | 2019-05-19T08:32:22 | 2019-05-19T08:32:22 | 187,452,203 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 201 |
py
|
def file_read_from_head(fname, nlines):
from itertools import islice
with open(fname) as f:
for line in islice(f, nlines):
print(line)
file_read_from_head('test.txt',2)
|
[
"[email protected]"
] | |
01d8a592e57f2900c2948ab287bc01ca07685ce4
|
6bf11db369218e0d7b2213b3781cef9e4bf67b6d
|
/YourSpace/post/migrations/0004_auto_20160727_0247.py
|
25989b6954004dff6bb8011ab84784f132964dfc
|
[] |
no_license
|
georgesitoniv/Your-Space
|
4ebed257c43e36379540d7404f0d9737e0a7c1fa
|
c6db36cde99e36ea99fee0ae97ab73118700d4e0
|
refs/heads/master
| 2022-12-03T05:54:47.485939 | 2020-05-20T13:14:45 | 2020-05-20T13:14:45 | 64,178,791 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 411 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('post', '0003_auto_20160725_0857'),
]
operations = [
migrations.AlterField(
model_name='post',
name='content',
field=models.TextField(max_length=600, blank=True),
),
]
|
[
"[email protected]"
] | |
098128b5007241360b8b280b534fcda40d51d2dd
|
313e7752b94935d265e215a2f6095a7b9697f587
|
/reforcer.py
|
84634e1f5adcbd4b6eebafc6a8a07057eac4c388
|
[] |
no_license
|
HeroOnline/autoReinforce
|
b1cbb9f7df79734dc32698b874f514425811e913
|
cd569b66b67a2f1fbe5800b877fa06ab5abaff20
|
refs/heads/master
| 2021-05-17T18:48:21.406220 | 2019-09-25T09:42:28 | 2019-09-25T09:42:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 790 |
py
|
import os
import sys
import signer
import tools
sid = ""
skey = ""
downloadDir = "./legu"
bash = "java -Dfile.encoding=utf-8 -jar ms-shield.jar -sid {sid} -skey {skey} -uploadPath {uploadPath} -downloadPath {downloadPath}"
def reforce():
print("============开始加固============")
tar = tools.findFile("apk")
if(not tar):
raise Exception("didn't find apk, do you put it in root?")
apk = tar
if not os.path.exists(downloadDir):
os.mkdir(downloadDir)
if len(sid) > 0 and len(skey) > 0:
cmd = bash.format(sid=sid, skey=skey, uploadPath=apk, downloadPath=downloadDir)
os.system(cmd)
print("============加固完成============")
return True
else:
raise Exception("pls set sid & skey value in reforcer.py")
|
[
"[email protected]"
] | |
9de2abc269bc9cdcf02113bb509871159393a39e
|
fc8063db27ae5806c7bf2d2ebc10c3d3380e33a0
|
/2019_issue_counts_combined/combine_state_counts.py
|
44b665c7425cf4f9663e7595eadea350644243d3
|
[] |
no_license
|
cjehr/chronam_api
|
270c794b643a201a22c0200a104868558e993afc
|
4afbf49493ceea503353b083d653e2060b5d3734
|
refs/heads/master
| 2020-05-17T10:55:17.031310 | 2019-05-08T15:45:10 | 2019-05-08T15:45:10 | 183,670,812 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 502 |
py
|
import glob
import shutil
# Opens .csv files in a directory and appends them to a single file.
# combined_state_count.csv is the combined file name.
count = 0
with open('combined_state_count.csv', 'wb') as outfile:
for filename in glob.glob('*.csv'):
if filename == 'combined_state_count.csv':
continue
with open(filename, 'rb') as readfile:
count += 1
shutil.copyfileobj(readfile, outfile)
print(str(count) + ' files combined')
|
[
"[email protected]"
] | |
7efaf3614bfa50e9f908d810d6bfbf19d5941535
|
315fae59067002bc5d48f1f9998283d68026f406
|
/Pregunta7.py
|
64253ce89dbd88b0aab7a0afed87e604f354620b
|
[] |
no_license
|
krozmok/InfotechSystemsTest
|
3eae738c6b8fa893bf8316d2c61563f84074aa5b
|
de0235e9696158bc4581821dd070cee7b8d91509
|
refs/heads/main
| 2023-02-20T18:09:29.013527 | 2021-01-28T17:21:22 | 2021-01-28T17:21:22 | 333,793,335 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 691 |
py
|
'''
Desarrollar una funcion que me devuelva el n-esimo fibonacci
Recordar que la serie fibonacci inicia en uno, es decir que fibonacci(1) = 1, ademas que
el fibonacci(3) = fibonacci(2) + fibonacci(1)
Nota: Implementarlo de modo iterativo.
'''
def fibonacci(n):
if n == 1 or n == 2:
return 1
else:
primero = 1
siguiente = 1
temporal = 0
for i in range(n-2):
temporal = siguiente
siguiente = primero + siguiente
primero = temporal
return siguiente
def main():
n = int(input("Ingrese n: "))
print("Fibonacchi({0}) = {1}".format(n, fibonacci(n)))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
0399d3b723e13b3d7a6e31812104913ca82907df
|
3217bdfe91c65a6897b2cffedd6cf7a5ce154ce6
|
/bin/pip3
|
413059cf4d4139fb647f908d41dd4e5179163dd4
|
[] |
no_license
|
merrillkoshy/dt
|
eaf6f5bda45169c90c634bdc59b5db9cfae206fa
|
04852a7abc426470fa00770627260096af4a7a73
|
refs/heads/master
| 2022-12-09T03:33:27.668964 | 2018-09-08T17:46:00 | 2018-09-08T17:46:00 | 145,230,821 | 0 | 1 | null | 2022-12-07T20:03:42 | 2018-08-18T15:24:16 |
Python
|
UTF-8
|
Python
| false | false | 227 |
#!/Users/Bdb/dt/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
f2429859aaddc700ac346e102f30f493ad8da85f
|
cfb5782653073bdc15ee7df6e198fc2f73f83b4d
|
/sourcing/SEC-10K/SECAzure.py
|
ebfe18f7e1dcd5b4a7fae0bb032c3ed22869c6c5
|
[] |
no_license
|
ravithejaburugu/ravitest
|
bde0bfd11709becc20935bd5b8bd97108e55fed4
|
ff0c13a76c875c32c7b380fe7a69e58aae8ddb9e
|
refs/heads/master
| 2021-08-14T20:19:43.406682 | 2017-11-16T18:04:16 | 2017-11-16T18:04:16 | 110,831,847 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,012 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 31 12:56:40 2017
@author: ANSHUL
"""
import requests
import io
import re
from bs4 import BeautifulSoup
from urllib2 import urlopen
from azure.storage.blob import BlockBlobService, ContainerPermissions
from datetime import datetime, timedelta
from os import path
class SEC_Azure():
def __init__(self, azure_account_name, azure_account_key, azure_container):
# initializing class variables
self.azure_container = azure_container
self.block_blob_service = BlockBlobService(
account_name=azure_account_name,
account_key=azure_account_key)
# creating azure container, iff container doesn't exist.
if not self.block_blob_service.exists(self.azure_container):
self.block_blob_service.create_container(self.azure_container)
def createDocumentList(self, cik, year):
if (len(cik) != 10):
while(len(cik) != 10):
cik = '0' + cik
# generate the url to crawl
base_url = "http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK=" + str(cik) + "&type=10-K&dateb=&owner=exclude&output=xml&start=0&count="
r = requests.get(base_url)
data = r.text
# parse fetched data using beatifulsoup
soup = BeautifulSoup(data)
# store the link in the list
link_list = list()
year_link_list = soup.find_all(['datefiled', 'filinghref'])
year_link_string = [str(e) for e in year_link_list]
arg = year + '-'
for year_str in year_link_string:
if arg in year_str:
i = year_link_string.index(year_str)
if len(year_link_string) > i + 1:
link = year_link_string[i + 1]
url = re.split('[< >]', link)[2]
# If the link is .htm, convert it to .html
if url.split(".")[len(url.split(".")) - 1] == "htm":
url += "l"
link_list.append(url)
azure_urls, file_types = self.downloadToAzure(cik, link_list)
return azure_urls, file_types
def downloadToAzure(self, cik, link_list):
# Get all the doc
azure_urls = set()
file_types = set()
for k in range(len(link_list)):
original_url = ''
try:
soup1 = BeautifulSoup(urlopen(link_list[k]))
except BaseException:
continue
tablecheck = soup1.findAll("table", {"class": "tableFile"})
table1 = soup1.findAll(
"table", {
"class": "tableFile", "summary": "Document Format Files"})
if(len(tablecheck) == 2):
xbrl_zip_file_url = link_list[k].replace(
'-index.html', '') + "-xbrl.zip"
xbrl_zip_file_name = xbrl_zip_file_url.split("/")[-1]
r = requests.get(xbrl_zip_file_url, stream=True)
stream = io.BytesIO(r.content)
self.block_blob_service.create_blob_from_stream(path.join(
self.azure_container, cik), xbrl_zip_file_name, stream)
sas_token = self.block_blob_service.generate_blob_shared_access_signature(
self.azure_container,
path.join(cik, xbrl_zip_file_name),
expiry=datetime.utcnow() + timedelta(weeks=52),
permission=ContainerPermissions.READ)
download_url = self.block_blob_service.make_blob_url(
self.azure_container, path.join(
cik, xbrl_zip_file_name), sas_token=sas_token)
azure_urls.add(download_url)
file_types.add(xbrl_zip_file_url.split(".")[-1])
for tbl in table1:
rows = tbl.findAll('tr')
for tr in rows:
cols = tr.findAll('td')
for td in cols:
url = tr.find('a', href=True)
original_url = url['href']
arc = "https://www.sec.gov" + original_url
file_name = original_url.split("/")[-1]
file_type = original_url.split(".")[-1]
if(file_name != ''):
if(file_type == 'pdf' or
file_type == 'gif' or
file_type == 'jpg'):
r = requests.get(arc, stream=True)
stream = io.BytesIO(r.content)
self.block_blob_service.create_blob_from_stream(
path.join(self.azure_container, cik),
file_name, stream)
else:
f = requests.get(arc)
self.block_blob_service.create_blob_from_text(
path.join(self.azure_container, cik),
file_name, f.text)
sas_token = self.block_blob_service.generate_blob_shared_access_signature(
self.azure_container,
path.join(cik, file_name),
expiry=datetime.utcnow() + timedelta(weeks=52),
permission=ContainerPermissions.READ)
download_url = self.block_blob_service.make_blob_url(
self.azure_container, path.join(cik, file_name), sas_token=sas_token)
azure_urls.add(download_url)
file_types.add(file_type)
break
return azure_urls, file_types
|
[
"[email protected]"
] | |
c462f2a96c5c7c43976b510cb1bc5edc5d2ab16d
|
bd0af18980fe7817f7abde7dd23fddaf10548c72
|
/sgvehiclepark/wsgi.py
|
176e0bc1b010290176471f2c555714d573afed6a
|
[] |
no_license
|
suneelmurthy/sgvehiclepark
|
4f5ef5419db9219e5cbf1545ea60a7bbe8356849
|
1ee4c9aeef3fef85f520c060a919bd41a8caa64c
|
refs/heads/master
| 2020-04-14T18:48:44.687883 | 2015-12-14T09:35:53 | 2015-12-14T09:35:53 | 40,391,562 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,435 |
py
|
"""
WSGI config for myproject project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "sgvehiclepark.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sgvehiclepark.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[
"[email protected]"
] | |
98a1ce18006a597696a1f8dc8530add0fd6103ab
|
72a0f1c73a6267d35b793dd8aab1aa65997278d2
|
/Posterior-Computation/compute-posteriors-factored-model.py
|
13eaac08f152792f2a4d5fcaa26d109df3df1024
|
[
"BSD-3-Clause"
] |
permissive
|
CrowdDynamicsLab/CMAP
|
e3c74b48983c9e302b7f357a649e4701e44fee30
|
1db002cfcd5e9805dc93d27582381f1388f8fe9f
|
refs/heads/master
| 2020-04-06T13:12:31.399393 | 2018-11-22T23:35:41 | 2018-11-22T23:35:41 | 157,488,963 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,447 |
py
|
import codecs
import os
import math
import numpy as np
import scipy.special
import operator
import datetime
import argparse
def load_phi_w(phi_w_path):
PHI_W = []
file = codecs.open(phi_w_path, 'r', 'utf-8')
for row in file:
s = row.strip().split(' ')
curr_li = []
for elem in s:
prob = float(elem)
curr_li.append(prob)
PHI_W.append(curr_li)
file.close()
return PHI_W
def load_phi_b(phi_b_path):
PHI_B = []
file = codecs.open(phi_b_path, 'r', 'utf-8')
for row in file:
s = row.strip().split(' ')
curr_li = []
for elem in s:
prob = float(elem)
curr_li.append(prob)
PHI_B.append(curr_li)
file.close()
return PHI_B
def load_time_map(mapping_path):
TIME_MAP = {}
mapping_file = codecs.open(mapping_path, 'r', 'utf-8')
idx = 0
for row in mapping_file:
# UserId, PostId, Behav, TimeStamp
s = row.strip().split('\t')
struct_time = datetime.datetime.strptime(s[3], "%Y-%m-%d %H:%M:%S")
# Id = int(s[1])
# actual_ts = int(s[3].strip())
TIME_MAP[idx] = struct_time
idx+=1
return TIME_MAP
def load_link_probs(link_prob_path):
LINK_PROB = []
link_prob_file = codecs.open(link_prob_path, 'r', 'utf-8')
for row in link_prob_file:
s = row.strip().split(' ')
curr_li = []
for elem in s:
prob = float(elem)
curr_li.append(prob)
LINK_PROB.append(curr_li)
link_prob_file.close()
return LINK_PROB
def load_alpha_beta_k():
ALPHA_K = []
alpha_file = codecs.open(alpha_k_path, 'r', 'utf-8')
for row in alpha_file:
ALPHA_K.append(float(row.strip()))
alpha_file.close()
BETA_K = []
beta_file = codecs.open(beta_k_path, 'r', 'utf-8')
for row in beta_file:
BETA_K.append(float(row.strip()))
beta_file.close()
return ALPHA_K, BETA_K
def load_alpha_beta_g(alpha_g_path, beta_g_path):
ALPHA_G = []
alpha_file = codecs.open(alpha_g_path, 'r', 'utf-8')
for row in alpha_file:
curr_alpha = []
s = row.strip().split(' ')
for elem in s:
curr_alpha.append(float(elem))
ALPHA_G.append(curr_alpha)
alpha_file.close()
BETA_G = []
beta_file = codecs.open(beta_g_path, 'r', 'utf-8')
for row in beta_file:
curr_beta = []
s = row.strip().split(' ')
for elem in s:
curr_beta.append(float(elem))
BETA_G.append(curr_beta)
beta_file.close()
return ALPHA_G, BETA_G
def load_group_user_distr(group_user_distr_path):
GROUP_USER = []
group_user_distr_file = codecs.open(group_user_distr_path, 'r', 'utf-8')
for row in group_user_distr_file:
s = row.strip().split(' ')
curr_li = []
for elem in s:
prob = float(elem)
curr_li.append(prob)
GROUP_USER.append(curr_li)
group_user_distr_file.close()
return GROUP_USER
def load_group_prior(group_prior_path):
GROUP_PRIOR = []
group_prior_file = codecs.open(group_prior_path, 'r', 'utf-8')
for row in group_prior_file:
GROUP_PRIOR.append(float(row.strip()))
group_prior_file.close()
return GROUP_PRIOR
def load_group_topic_distr(group_b_topic_distr_path, group_w_topic_distr_path):
GROUP_B_TOPIC = []
GROUP_W_TOPIC = []
group_topic_distr_file = codecs.open(group_b_topic_distr_path, 'r', 'utf-8')
for row in group_topic_distr_file:
s = row.strip().split(' ')
curr_li = []
for elem in s:
prob = float(elem)
curr_li.append(prob)
GROUP_B_TOPIC.append(curr_li)
group_topic_distr_file.close()
group_topic_distr_file = codecs.open(group_w_topic_distr_path, 'r', 'utf-8')
for row in group_topic_distr_file:
s = row.strip().split(' ')
curr_li = []
for elem in s:
prob = float(elem)
curr_li.append(prob)
GROUP_W_TOPIC.append(curr_li)
group_topic_distr_file.close()
return GROUP_B_TOPIC, GROUP_W_TOPIC
def load_word_idx(word_idx_path):
WORD_IDX = {}
file = codecs.open(word_idx_path, 'r', 'utf-8')
idx = 0
for row in file:
# idx+=1
# print(idx)
try:
s = row.strip().split('\t')
WORD_IDX[s[1]] = int(s[0])
except:
print("Error: ",row.strip())
file.close()
return WORD_IDX
def load_behav_idx(behav_idx_path):
BEHAV_IDX = {}
file = codecs.open(behav_idx_path, 'r', 'utf-8')
for row in file:
s = row.strip().split('\t')
BEHAV_IDX[s[1]] = int(s[0])
file.close()
return BEHAV_IDX
def load_user_map(user_map_path):
USER_MAP = {}
file = codecs.open(user_map_path, 'r', 'utf-8')
for row in file:
s = row.strip().split('\t')
USER_MAP[int(s[1])] = s[0]
file.close()
return USER_MAP
def load_user_group_topic(table_assign_path, discount):
USER_GROUP = {}
USER_TOPIC = {}
USER_TABLE = {}
PY_TERM = [[0.0]*K_b]*G
# N_g = [0]*G
# N_k = [0]*K
C = 0
table_idx = 0
table_assign_file = codecs.open(table_assign_path, 'r', 'utf-8')
for row in table_assign_file:
# Num_intr, Group, Topic, (Interactions separated by ,)
s = row.strip().split('\t')
if(int(s[0]) == 0):
table_idx+=1
continue
group = int(s[1])
topic = int(s[2])
intr_list = s[3].strip().split(",")
for elem in intr_list:
if elem.strip() == '':
continue
user = int(elem.strip())
USER_GROUP[user] = group
USER_TOPIC[user] = topic
USER_TABLE[user] = table_idx
# N_g[group]+=1
# N_k[topic]+=1
C+=1
PY_TERM[group][topic] += (int(s[0]) - discount)
table_idx+=1
table_assign_file.close()
return USER_GROUP, USER_TOPIC, USER_TABLE, PY_TERM
def load_post_ids(mapping_path):
POST_IDs = {}
mapping_file = codecs.open(mapping_path, 'r', 'utf-8')
idx = 0
for row in mapping_file:
# UserId, Postid, Behav, CreationDate
s = row.strip().split('\t')
Id = int(s[1])
POST_IDs[idx] = Id
idx+=1
return POST_IDs
def compute_time_prob(alpha, beta, t):
prob = (1.0*(math.pow(t, alpha - 1))*(math.pow(1-t, beta - 1)))/(scipy.special.beta(alpha, beta))
return prob
def compute_posteriors(INTR_GROUP, INTR_TOPIC, USER_MAP, WORD_IDX, BEHAV_IDX, PHI_W, PHI_B, GROUP_USER, ALPHA_G, BETA_G, ALPHA_K, BETA_K, GROUP_PRIOR, GROUP_B_TOPIC_PRIOR, GROUP_W_TOPIC_PRIOR, POST_IDs, LINKS_i_j, LINK_PROB, USER_TABLE, PY_TERM, TIME_MAP, model, dataset, discount, intr_path):
DICT_USER_POSTERIORS = {}
group_posteriors = []
intr_file = codecs.open(intr_path, 'r', 'utf-8')
idx = 0
for row in intr_file:
# Text, u, b, ts
s = row.split('\t')
# if s[1].startswith("TEMP_USER"):
# continue
text = s[0].strip()
u = int(s[1].strip())
b = s[2].strip()
ts = float(s[3].strip())
b = BEHAV_IDX[b]
text_li = text.strip().split(' ')
curr_posterior_g = []
curr_posterior_k = []
for g in range(G):
prob_g = 0.0
for k_w in range(K_w):
prob_k = 1.0
for word in text_li:
if word == '':
continue
w = WORD_IDX[word]
prob_k = prob_k * PHI_W[k_w][w]
prob_k = prob_k * compute_time_prob(ALPHA_G[g][k_w], BETA_G[g][k_w], ts)
prob_k *= GROUP_W_TOPIC_PRIOR[g][k_w]
prob_g+=prob_k
curr_posterior_g.append(prob_g)
group_posteriors.append(curr_posterior_g)
idx+=1
if idx%1000 == 0:
print(model, dataset, discount, idx)
intr_file.close()
print("Without link prob loaded")
intr_file = codecs.open(intr_path, 'r', 'utf-8')
idx = 0
for row in intr_file:
# Text, u, b, ts
s = row.split('\t')
# if s[1].startswith("TEMP_USER"):
# continue
text = s[0].strip()
u = int(s[1].strip())
if u == -1:
idx+=1
continue
b = s[2].strip()
ts = float(s[3].strip())
b = BEHAV_IDX[b]
text_li = text.strip().split(' ')
curr_posterior_g_k_b = []
for g in range(G):
for k_b in range(K_b):
prob = 1.0
prob *= GROUP_PRIOR[g]
prob *= PY_TERM[g][k_b]
prob *= PHI_B[k_b][b]
prob_k_w = 0.0
for k_w in range(K_w):
prob_k = 1.0
for word in text_li:
if word == '':
continue
w = WORD_IDX[word]
prob_k = prob_k * PHI_W[k_w][w]
prob_k = prob_k * compute_time_prob(ALPHA_G[g][k_w], BETA_G[g][k_w], ts)
prob_k *= GROUP_W_TOPIC_PRIOR[g][k_w]
prob_k_w+=prob_k
prob *= prob_k_w
if idx in LINKS_i_j:
curr_links = LINKS_i_j[idx]
for j in curr_links:
posterior_j = group_posteriors[j]
max_index, max_value = max(enumerate(posterior_j), key=operator.itemgetter(1))
prob = prob * LINK_PROB[g][max_index]
curr_posterior_g_k_b.append(prob)
curr_posterior_g = []
li_idx = 0
for g in range(G):
prob_g = 0.0
for k in range(K_b):
prob_g += curr_posterior_g_k_b[li_idx]
li_idx+=1
curr_posterior_g.append(prob_g)
curr_posterior_k = [0.0]*K_b
li_idx = 0
for g in range(G):
for k in range(K_b):
curr_posterior_k[k] += curr_posterior_g_k_b[li_idx]
li_idx+=1
sum_li = np.sum(curr_posterior_g)
if sum_li == 0.0:
sum_li = np.sum([1.0])
curr_posterior_g = curr_posterior_g/sum_li
curr_posterior_g = curr_posterior_g.tolist()
sum_li = np.sum(curr_posterior_k)
if sum_li == 0.0:
sum_li = np.sum([1.0])
curr_posterior_k = curr_posterior_k/sum_li
curr_posterior_k = curr_posterior_k.tolist()
b = s[2].strip()
# actual_ts = TIME_MAP[idx]
# actual_user = USER_MAP[u]
actual_user = u
if actual_user not in DICT_USER_POSTERIORS:
DICT_USER_POSTERIORS[actual_user] = []
# g = INTR_GROUP[idx]
DICT_USER_POSTERIORS[actual_user].append(curr_posterior_g+curr_posterior_k)
idx+=1
if idx%1000==0:
print(model, dataset, discount, idx)
intr_file.close()
return DICT_USER_POSTERIORS
def load_links(links_path):
LINKS_i_j = {}
links_file = codecs.open(links_path, 'r', 'utf-8')
for row in links_file:
# Interaction i -> Interaction j
s = row.strip().split('\t')
i = int(s[0])
j = int(s[1])
if i not in LINKS_i_j:
LINKS_i_j[i] = []
LINKS_i_j[i].append(j)
links_file.close()
return LINKS_i_j
def generate_posteriors(basepath, intr_path, links_path, discount):
# INTR_GROUP, INTR_TOPIC, USER_MAP, WORD_IDX, BEHAV_IDX, PHI_W, PHI_B, GROUP_USER, ALPHA_G, BETA_G, ALPHA_K, BETA_K, GROUP_PRIOR, GROUP_TOPIC_PRIOR
# basepath = "../Output/" +model+"_"+str(K_b)+"_"+str(G)+"_"+dataset+"_"+str(discount)+"00000/"
phi_w_path = basepath+"topic-word-distribution.txt"
phi_b_path = basepath+"topic-behavior-distribution.txt"
alpha_k_path = basepath+"topic-time-alpha.txt"
beta_k_path = basepath+"topic-time-beta.txt"
alpha_g_path = basepath+"group-time-alpha.txt"
beta_g_path = basepath+"group-time-beta.txt"
group_user_distr_path = basepath+"group-user-distribution.txt"
group_prior_path = basepath+"group-priors.txt"
group_b_topic_distr_path = basepath+"group-b-topic-distribution.txt"
group_w_topic_distr_path = basepath+"group-w-topic-distribution.txt"
word_idx_path = basepath+"vocab-mapping.txt"
behav_idx_path = basepath+"behavior-mapping.txt"
table_assign_path = basepath+"table-assignment-status.txt"
# user_map_path = "../Data/"+dataset+"-user-map.txt"
# intr_path = "../Data/"+dataset+"_pre_processed.txt"
# mapping_path = "../Data/"+dataset+"_map.txt"
# links_path = "../Data/"+dataset+"_links.txt"
posterior_path = basepath+"posteriors-user-interactions.txt"
link_prob_path = basepath+"link-prob.txt"
USER_MAP = load_user_map(user_map_path)
WORD_IDX = load_word_idx(word_idx_path)
BEHAV_IDX = load_behav_idx(behav_idx_path)
PHI_W = load_phi_w(phi_w_path)
PHI_B = load_phi_b(phi_b_path)
GROUP_USER = load_group_user_distr(group_user_distr_path)
ALPHA_G, BETA_G = load_alpha_beta_g(alpha_g_path, beta_g_path)
# ALPHA_K, BETA_K = load_alpha_beta_k()
ALPHA_K = []
BETA_K = []
GROUP_PRIOR = load_group_prior(group_prior_path)
GROUP_B_TOPIC_PRIOR, GROUP_W_TOPIC_PRIOR = load_group_topic_distr(group_b_topic_distr_path, group_w_topic_distr_path)
# INTR_GROUP, INTR_TOPIC = load_intr_group_topic()
USER_GROUP, USER_TOPIC, USER_TABLE, PY_TERM = load_user_group_topic(table_assign_path, discount)
# USER_GROUP = {}
# USER_TOPIC = {}
# POST_IDs = load_post_ids(mapping_path)
POST_IDs = {}
LINKS_i_j = load_links(links_path)
LINK_PROB = load_link_probs(link_prob_path)
# LINKS_i_j = {}
# LINK_PROB = []
# TIME_MAP = load_time_map(mapping_path)
TIME_MAP = {}
print("Loading Done")
DICT_USER_POSTERIORS = compute_posteriors(USER_GROUP, USER_TOPIC, USER_MAP, WORD_IDX, BEHAV_IDX, PHI_W, PHI_B, GROUP_USER, ALPHA_G, BETA_G, ALPHA_K, BETA_K, GROUP_PRIOR, GROUP_B_TOPIC_PRIOR, GROUP_W_TOPIC_PRIOR, POST_IDs, LINKS_i_j, LINK_PROB, USER_TABLE, PY_TERM, TIME_MAP, model, dataset, discount, intr_path)
print("User Posterior Dict: ", len(DICT_USER_POSTERIORS))
posterior_file = codecs.open(posterior_path, 'w', 'utf-8')
for user in DICT_USER_POSTERIORS:
print(str(user)+'\t'+str(DICT_USER_POSTERIORS[user]), file = posterior_file)
posterior_file.close()
print('Posteriors Saved to '+ posterior_path)
parser = argparse.ArgumentParser("Posterior_Factored")
parser.add_argument("--output_path", help="Path to CMAP output files")
parser.add_argument("--corpus_path", help="Path to pre_processed file")
parser.add_argument("--links_path", help="Path to links file")
parser.add_argument("--discount", help="Value of Discount Parameter", default = "0.5")
parser.add_argument("--K_b", help="Number of Behavior Topics", default = "5")
parser.add_argument("--K_w", help="Number of Text Topics", default = "20")
parser.add_argument("--G", help="Number of Profiles", default = "20")
args = parser.parse_args()
K_b = int(args.K_b)
K_w = int(args.K_w)
G = int(args.G)
discount = float(args.discount)
basepath = args.output_path
intr_path = args.corpus_path
links_path = args.links_path
generate_posteriors(basepath, intr_path, links_path, discount)
|
[
"[email protected]"
] | |
129be795f76f25fb388feb32df14d708ab666aea
|
d1b1c14d90ba2bf8752f3019e178d6042cc12c95
|
/intersog/chat/models.py
|
45bd41070236d9da4e60738f49c062771d7ba07b
|
[] |
no_license
|
NatalyMac/intersog
|
923584cf9bc2cd803791744ab309098cf8706a0a
|
bc700dd812e10aa67adccaca9c363b8caf1f9573
|
refs/heads/master
| 2021-01-20T20:38:53.365299 | 2016-06-28T07:54:02 | 2016-06-28T07:54:02 | 60,775,698 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 862 |
py
|
# coding: utf-8
from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from swampdragon.models import SelfPublishModel
from .serializer import ChatSerializer
class Chat(SelfPublishModel, models.Model):
class Meta(object):
verbose_name = u"Чат"
verbose_name_plural = u"Чат"
serializer_class = ChatSerializer
user_sent = models.ForeignKey(settings.AUTH_USER_MODEL,
blank=True)
user_receive_id = models.IntegerField(
blank=True,
verbose_name=u"Получатель")
text = models.TextField(
blank=True,)
created = models.DateField(
auto_now=False,
auto_now_add=True,
blank=True,
verbose_name=u"Дата создания")
def __unicode__(self):
return '{}'.format(self.text)
|
[
"[email protected]"
] | |
9117f9f2cce95c3f9c960a40127f7cde6384a932
|
d21864a26233d32913c44fd87d6f6e67ca9aabd8
|
/prosodic/lib/Phoneme.py
|
876171217cb508068e7a472fe4fc487bf116ba6c
|
[
"MIT"
] |
permissive
|
quadrismegistus/litlab-poetry
|
7721a8849667f2130bb6fa6b9f18a7f6beb9912e
|
28fff4c73344ed95d19d7e9a14e5a20697599605
|
refs/heads/master
| 2021-01-23T20:14:05.537155 | 2018-11-19T08:56:55 | 2018-11-19T08:56:55 | 27,054,260 | 16 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,454 |
py
|
from ipa import ipa,ipakey,ipa2cmu,formantd
from entity import entity
class Phoneme(entity):
def __init__(self,phons,ipalookup=True):
self.feats = {}
self.children = [] # should remain empty unless dipthong
self.featpaths={}
self.phon=None
if type(phons)==type([]):
for phon in phons:
if type(phon)==type(""):
self.children.append(Phoneme(phon))
else:
self.children.append(phon)
self.feat('dipthong',True)
else:
self.phon=phons.strip()
if ipalookup and self.phon:
if(self.phon in ipa):
k=-1
for v in ipa[self.phon]:
k+=1
self.feat(ipakey[k],v)
self.finished = True
if self.isLong() or self.isDipthong():
self.len=2
else:
self.len=1
def str_cmu(self):
strself=str(self)
if strself in ipa2cmu:
return ipa2cmu[strself].lower()
else:
print "<error> no cmu transcription for phoneme: ["+strself+"]"
return strself
def __str__(self):
if self.children:
return self.u2s(u"".join([x.phon for x in self.children]))
else:
return self.u2s(self.phon)
def __repr__(self):
#return "["+str(self)+"]"
return str(self)
def isConsonant(self):
return self.feature('cons')
def isVowel(self):
return (self.isDipthong() or self.isPeak())
def isPeak(self):
return self.feature('syll')
def isDipthong(self):
return self.feature('dipthong')
def isLong(self):
return self.feature('long')
def isHigh(self):
return self.feature('high')
@property
def phon_str(self):
if self.phon: return self.phon
return u''.join(phon.phon for phon in self.children)
@property
def featset(self):
if self.children:
featset=set()
for child in self.children:
featset|=child.featset
return featset
else:
return {feat for feat in self.feats if self.feats[feat]}
@property
def featspace(self):
fs={}
if self.children:
for child in self.children:
#print "CHILD:",child,child.featset
for f,v in child.feats.items():
fs[f]=int(v) if v!=None else 0
else:
for f,v in self.feats.items():
fs[f]=int(v) if v!=None else 0
return fs
def CorV(self):
if self.isDipthong() or self.isLong():
return "VV"
if self.isPeak():
return "V"
else:
return "C"
def distance(self,other):
lfs1=[self.featspace] if not self.children else [c.featspace for c in self.children]
lfs2=[other.featspace] if not other.children else [c.featspace for c in other.children]
dists=[]
for fs1 in lfs1:
for fs2 in lfs2:
allkeys=set(fs1.keys() + fs2.keys())
f=sorted(list(allkeys))
v1=[float(fs1.get(fx,0)) for fx in f]
v2=[float(fs2.get(fx,0)) for fx in f]
from scipy.spatial import distance
dists+=[distance.euclidean(v1,v2)]
return sum(dists)/float(len(dists))
def distance0(self,other):
import math
feats1=self.featset
feats2=other.featset
jc=len(feats1&feats2) / float(len(feats1 | feats2))
vdists=[]
if not 'cons' in feats1 and not 'cons' in feats2:
## ADD VOWEL F1,F2 DIST
v1=[p for p in self.phon_str if p in formantd]
v2=[p for p in other.phon_str if p in formantd]
if not v1 or not v2:
vdists+=[2]
for v1x in v1:
for v2x in v2:
#print v1x,v2x
vdist=math.sqrt( (formantd[v1x][0] - formantd[v2x][0])**2 + (formantd[v1x][1] - formantd[v2x][1])**2)
#print "ADDING",vdist
vdists+=[vdist]
#print self,other,feats1,feats2
return jc + sum(vdists)
def __eq__(self,other):
return self.feats == other.feats
|
[
"[email protected]"
] | |
f532d35eec7418c8dbe4d20673d602c6a8a643a4
|
30b10e92cf7db28910f44602dbb75135c0655ec9
|
/CelebA/src/construct_celeba_dataset.py
|
043b07d050d856c8e48f8d2604ae601bce77b98b
|
[] |
no_license
|
zjsong/CDNet
|
c267824ddb48a851764d702b9dc19d8c2dd4f324
|
914770230b9629db1d0aac97686c42b26956ee1c
|
refs/heads/master
| 2022-11-11T06:42:22.132804 | 2020-06-30T01:55:05 | 2020-06-30T01:55:05 | 266,239,399 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,564 |
py
|
"""
Construct the CelebA dataset class.
"""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.utils.data import Dataset
class CelebADataset(Dataset):
def __init__(self, csv_file, imgs_dir, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
imgs_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied on a sample.
"""
self.attributes_frame = pd.read_csv(csv_file)
self.images_dir = imgs_dir
self.transform = transform
def __len__(self):
return len(self.attributes_frame)
def __getitem__(self, idx):
img_name = os.path.join(self.images_dir, self.attributes_frame.iloc[idx, 0])
image = plt.imread(img_name)
if self.transform:
image = self.transform(image)
attributes = self.attributes_frame.iloc[idx, 1:]
# for 1 * 40 attribute label
# convert the attribute values (1 and -1) into binary representations (1 and 0)
attrs_binary = np.zeros(len(attributes))
for id_attr, value_attr in enumerate(attributes):
if value_attr == 1:
attrs_binary[id_attr] = 1
else:
pass
attrs_binary = torch.from_numpy(attrs_binary).type(torch.FloatTensor)
sample = {'image': image, 'attributes': attrs_binary}
return sample
|
[
"[email protected]"
] | |
e82d4bec70a3688bdd17fec5b4f3ac82eccb3528
|
d37493c5d109c1396881bbaf01fa4341789c2503
|
/src/symbol_table/partial_symbol_table.py
|
31c4fb7dce1a598d6da6ffe0bfe841741d3f3bff
|
[] |
no_license
|
xkozar/VYP-compiler
|
338baed8e45e21846a734e225fe867d8182251a0
|
76b113ad066ddbef4ae9beea488321c14eb94ffa
|
refs/heads/master
| 2023-06-11T01:45:15.535879 | 2021-07-11T11:50:59 | 2021-07-11T11:50:59 | 302,682,862 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,985 |
py
|
'''
project: VYPlanguage compiler
authros: Tomáš Kožár, xkozar02
'''
from compiler import SemanticGeneralError
from symbol_table.class_partial_symbol_table import ClassPartialSymbolTable
class PartialSymbolTable:
def __init__(self, parentClosure):
self.symbols = {}
self.__parentClosure = parentClosure
def getSymbol(self, key: str):
return self.symbols.get(key) or self.__parentClosure.getSymbol(key)
def isSymbolDefined(self, key):
if self.symbols.get(key):
return True
else:
return self.__parentClosure.isSymbolDefined(key)
def addSymbol(self, key, symbol):
if self.symbols.get(key):
raise SemanticGeneralError(f'Symbol with id:\'{key}\' is already defined in this scope')
self.symbols.update({key: symbol})
def getParentClosure(self):
return self.__parentClosure.getClosure()
def getClosure(self):
return self
def containsKey(self, key):
return key in self.symbols
def getAllCurrentSymbols(self):
return list(self.symbols.values())
def getAllCurrentSymbolsAsDict(self):
return self.symbols.copy()
def getAllSymbols(self):
return self.getAllCurrentSymbols() + self.__parentClosure.getAllSymbols()
def __str__(self):
return f'{{ \n {self.__parentClosure.__str__()} \n\t {self.symbols.keys()} \n }}'
class PartialClassSymbolTable(PartialSymbolTable):
def __init__(self):
super().__init__(ClassPartialSymbolTable())
def addSymbol(self, key, symbol):
if self.symbols.get(key):
raise SemanticGeneralError(f'Symbol with id:\'{key}\' is already defined in this class')
self.symbols.update({key: symbol})
def getLength(self):
return len(list(self.symbols.keys()))
def copy(self):
returnValue = PartialClassSymbolTable()
returnValue.symbols = self.symbols.copy()
return returnValue
|
[
"[email protected]"
] | |
7112580637970329d57785ff0bc48507d4d081ea
|
08cfc4fb5f0d2f11e4e226f12520a17c5160f0a2
|
/kubernetes/client/apis/storage_v1_api.py
|
89b1839daafcce6aca004b352e8dd5927d723d95
|
[
"Apache-2.0"
] |
permissive
|
ex3cv/client-python
|
5c6ee93dff2424828d064b5a2cdbed3f80b74868
|
2c0bed9c4f653472289324914a8f0ad4cbb3a1cb
|
refs/heads/master
| 2021-07-12T13:37:26.049372 | 2017-10-16T20:19:01 | 2017-10-16T20:19:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 52,194 |
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class StorageV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_storage_class(self, body, **kwargs):
"""
create a StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_storage_class(body, async=True)
>>> result = thread.get()
:param async bool
:param V1StorageClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_storage_class_with_http_info(body, **kwargs)
else:
(data) = self.create_storage_class_with_http_info(body, **kwargs)
return data
def create_storage_class_with_http_info(self, body, **kwargs):
"""
create a StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_storage_class_with_http_info(body, async=True)
>>> result = thread.get()
:param async bool
:param V1StorageClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'pretty']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_storage_class`")
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClass',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_storage_class(self, **kwargs):
"""
delete collection of StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_collection_storage_class(async=True)
>>> result = thread.get()
:param async bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_collection_storage_class_with_http_info(**kwargs)
else:
(data) = self.delete_collection_storage_class_with_http_info(**kwargs)
return data
def delete_collection_storage_class_with_http_info(self, **kwargs):
"""
delete collection of StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_collection_storage_class_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pretty', '_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_storage_class" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_storage_class(self, name, body, **kwargs):
"""
delete a StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_storage_class(name, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the StorageClass (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_storage_class_with_http_info(name, body, **kwargs)
else:
(data) = self.delete_storage_class_with_http_info(name, body, **kwargs)
return data
def delete_storage_class_with_http_info(self, name, body, **kwargs):
"""
delete a StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_storage_class_with_http_info(name, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the StorageClass (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_storage_class`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_storage_class`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_api_resources(async=True)
>>> result = thread.get()
:param async bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_api_resources_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_storage_class(self, **kwargs):
"""
list or watch objects of kind StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_storage_class(async=True)
>>> result = thread.get()
:param async bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StorageClassList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.list_storage_class_with_http_info(**kwargs)
else:
(data) = self.list_storage_class_with_http_info(**kwargs)
return data
def list_storage_class_with_http_info(self, **kwargs):
"""
list or watch objects of kind StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_storage_class_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StorageClassList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pretty', '_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_storage_class" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClassList',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_storage_class(self, name, body, **kwargs):
"""
partially update the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.patch_storage_class(name, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the StorageClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.patch_storage_class_with_http_info(name, body, **kwargs)
else:
(data) = self.patch_storage_class_with_http_info(name, body, **kwargs)
return data
def patch_storage_class_with_http_info(self, name, body, **kwargs):
"""
partially update the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.patch_storage_class_with_http_info(name, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the StorageClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_storage_class`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_storage_class`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClass',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_storage_class(self, name, **kwargs):
"""
read the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.read_storage_class(name, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the StorageClass (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.read_storage_class_with_http_info(name, **kwargs)
else:
(data) = self.read_storage_class_with_http_info(name, **kwargs)
return data
def read_storage_class_with_http_info(self, name, **kwargs):
"""
read the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.read_storage_class_with_http_info(name, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the StorageClass (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'pretty', 'exact', 'export']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_storage_class`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClass',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_storage_class(self, name, body, **kwargs):
"""
replace the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_storage_class(name, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the StorageClass (required)
:param V1StorageClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.replace_storage_class_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_storage_class_with_http_info(name, body, **kwargs)
return data
def replace_storage_class_with_http_info(self, name, body, **kwargs):
"""
replace the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_storage_class_with_http_info(name, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the StorageClass (required)
:param V1StorageClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_storage_class`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_storage_class`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClass',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"[email protected]"
] | |
66ba06ef2ef8a06a93dcc455b15df488909eda2c
|
8e71389d52ea845d37e99c5fa412ede317fc6b84
|
/simulation/PacketServer.py
|
f8e194acb7e49e410a2d2b5611d4535a80bcb417
|
[] |
no_license
|
JackHarnett/core_ospf
|
098d993a367a1815fa52fd93e7e6d71433b2251c
|
54a31b36646820ebe7b5a7c0c5dc66688613ea3b
|
refs/heads/master
| 2023-03-29T20:46:45.456909 | 2021-04-06T10:36:05 | 2021-04-06T10:36:05 | 351,663,531 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 266 |
py
|
import random
import socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_socket.bind(('', 12000))
while True:
rand = random.randint(0, 10)
(client_socket, address) = server_socket.accept()
data = client_socket.recvfrom(1024)
|
[
"[email protected]"
] | |
2bd1992e4ff2693c9c485b27dd08954a36955f40
|
b01e55da3542cd3ec298ef4d5d3da4b01d002881
|
/common/__init__.py
|
667af8f340636855a7bfcec1f901a333abcfcfe6
|
[] |
no_license
|
kongjingchun/gift
|
85a3a3ac3dc6a4e50bb81d7e86c582a5f0bda5f5
|
36c380d74d88fc4e6e5d14fb1bceb941446e139f
|
refs/heads/master
| 2023-04-09T20:57:09.474169 | 2021-04-19T11:06:42 | 2021-04-19T11:06:42 | 355,089,670 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 88 |
py
|
# coding:utf-8
# @Create time: 2021/3/29 4:39 下午
# @Author: KongJingchun
# @remark:
|
[
"[email protected]"
] | |
0d84e04b93554f7da8d2ad5ffe00b4e1b02e5b4b
|
ec8af7eac3f600224c44026d6112a0fbc7f7ed14
|
/boa/wikidocs/05/wikidocs-05-2.2.py
|
e25aec6e8486919f717742e935b25f8ef26978f6
|
[] |
no_license
|
YangBoaa/first-repository
|
eb2befdd6e17125106983e4a3284c8c343ae8efc
|
631a1afddc891a688805577f429fa10179900a19
|
refs/heads/master
| 2022-12-07T08:55:32.302685 | 2020-08-28T01:47:57 | 2020-08-28T01:47:57 | 279,538,060 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 177 |
py
|
# if __name__ == "__main__": 의 의미
def add(a, b):
return a + b
def sub(a, b):
return a - b
if __name__ == "__main__":
print(add(1, 4))
print(sub(5, 2))
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.