code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def list_members(self, list_id):
return [User(user._json) for user in self._client.list_members(list_id=list_id)] | List users in a list
:param list_id: list ID number
:return: list of :class:`~responsebot.models.User` objects |
def is_list_member(self, list_id, user_id):
try:
return bool(self._client.show_list_member(list_id=list_id, user_id=user_id))
except TweepError as e:
if e.api_code == TWITTER_USER_IS_NOT_LIST_MEMBER_SUBSCRIBER:
return False
raise | Check if a user is member of a list
:param list_id: list ID number
:param user_id: user ID number
:return: :code:`True` if user is member of list, :code:`False` otherwise |
def subscribe_list(self, list_id):
return List(tweepy_list_to_json(self._client.subscribe_list(list_id=list_id))) | Subscribe to a list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object |
def unsubscribe_list(self, list_id):
return List(tweepy_list_to_json(self._client.unsubscribe_list(list_id=list_id))) | Unsubscribe to a list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object |
def list_subscribers(self, list_id):
return [User(user._json) for user in self._client.list_subscribers(list_id=list_id)] | List subscribers of a list
:param list_id: list ID number
:return: :class:`~responsebot.models.User` object |
def is_subscribed_list(self, list_id, user_id):
try:
return bool(self._client.show_list_subscriber(list_id=list_id, user_id=user_id))
except TweepError as e:
if e.api_code == TWITTER_USER_IS_NOT_LIST_MEMBER_SUBSCRIBER:
return False
raise | Check if user is a subscribed of specified list
:param list_id: list ID number
:param user_id: user ID number
:return: :code:`True` if user is subscribed of list, :code:`False` otherwise |
def auth(config):
auth = tweepy.OAuthHandler(config.get('consumer_key'), config.get('consumer_secret'))
auth.set_access_token(config.get('token_key'), config.get('token_secret'))
api = tweepy.API(auth)
try:
api.verify_credentials()
except RateLimitError as e:
raise APIQuotaError(e.args[0][0]['message'])
except TweepError as e:
raise AuthenticationError(e.args[0][0]['message'])
else:
logging.info('Successfully authenticated as %s' % api.me().screen_name)
return ResponseBotClient(config=config, client=api) | Perform authentication with Twitter and return a client instance to communicate with Twitter
:param config: ResponseBot config
:type config: :class:`~responsebot.utils.config_utils.ResponseBotConfig`
:return: client instance to execute twitter action
:rtype: :class:`~responsebot.responsebot_client.ResponseBotClient`
:raises: :class:`~responsebot.common.exceptions.AuthenticationError`: If failed to authenticate
:raises: :class:`~responsebot.common.exceptions.APIQuotaError`: If API call rate reached limit |
def json2py(json_obj):
for key, value in json_obj.items():
if type(value) not in (str, unicode):
continue
# restore a datetime
if re.match('^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}:\d+$', value):
value = datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S:%f')
elif re.match('^\d{4}-\d{2}-\d{2}$', value):
year, month, day = map(int, value.split('-'))
value = datetime.date(year, month, day)
elif re.match('^\d{2}:\d{2}:\d{2}:\d+$', value):
hour, minute, second, micro = map(int, value.split(':'))
value = datetime.time(hour, minute, second, micro)
else:
found = False
for decoder in _decoders:
success, new_value = decoder(value)
if success:
value = new_value
found = True
break
if not found:
continue
json_obj[key] = value
return json_obj | Converts the inputted JSON object to a python value.
:param json_obj | <variant> |
def jsonify(py_data, default=None, indent=4, sort_keys=True):
return json.dumps(py_data, default=py2json, indent=indent, sort_keys=sort_keys) | Converts the inputted Python data to JSON format.
:param py_data | <variant> |
def py2json(py_obj):
method = getattr(py_obj, '__json__', None)
if method:
return method()
elif type(py_obj) == datetime.datetime:
return py_obj.isoformat()
elif type(py_obj) == datetime.date:
return py_obj.isoformat()
elif type(py_obj) == datetime.time:
return py_obj.isoformat()
elif type(py_obj) == set:
return list(py_obj)
elif type(py_obj) == decimal.Decimal:
return str(py_obj)
else:
# look through custom plugins
for encoder in _encoders:
success, value = encoder(py_obj)
if success:
return value
opts = (py_obj, type(py_obj))
raise TypeError('Unserializable object {} of type {}'.format(*opts)) | Converts the inputted python object to JSON format.
:param py_obj | <variant> |
def register(encoder=None, decoder=None):
if encoder:
_encoders.append(encoder)
if decoder:
_decoders.append(decoder) | Registers an encoder method and/or a decoder method for processing
custom values. Encoder and decoders should take a single argument
for the value to encode or decode, and return a tuple of (<bool>
success, <variant> value). A successful decode or encode should
return True and the value.
:param encoder | <callable> || None
decoder | <callable> || None |
def _read_file(fname):
with open(fname) as input_file:
re_grammar = [x.strip('\n') for x in input_file.readlines()]
return re_grammar | Args:
fname (str): Name of the grammar file to be parsed
Return:
list: The grammar rules |
def get(self, statediag, dfaaccepted):
newstatediag = {}
newstate = PDAState()
newstate.id = 'AI,I' # BECAREFUL WHEN SIMPLIFYING...
newstate.type = 1
newstate.sym = '@wrapping'
transitions = {}
transitions[(0, 0)] = [0]
newstate.trans = transitions
i = 0
newstatediag[i] = newstate
# print 'accepted:'
# print dfaaccepted
for stateid in statediag:
state = statediag[stateid]
# print state.id
if state.type == 2:
for state2id in dfaaccepted:
# print state.id[1]
if state.id[1] == state2id:
# print 'adding...'
state.trans['AI,I'] = ['@wrapping']
# print state.trans
break
i = i + 1
newstatediag[i] = state
return newstatediag | # - Remove all the POP (type - 2) transitions to state 0,non DFA accepted
# for symbol @closing
# - Generate the accepted transitions
- Replace DFA accepted States with a push - pop symbol and two extra states
Args:
statediag (list): The states of the PDA
dfaaccepted (list):The list of DFA accepted states
Returns:
list: A cleaned, smaller list of DFA states |
def bfs(self, graph, start):
newstatediag = {}
# maintain a queue of paths
queue = []
visited = []
# push the first path into the queue
queue.append(start)
while queue:
# get the first path from the queue
state = queue.pop(0)
# get the last node from the path
# visited
visited.append(state.id)
# enumerate all adjacent nodes, construct a new path and push it
# into the queue
for key in state.trans:
if state.trans[key] != []:
if key not in visited:
for nextstate in graph:
if graph[nextstate].id == key:
queue.append(graph[nextstate])
break
i = 0
for state in graph:
if graph[state].id in visited:
newstatediag[i] = graph[state]
i = i + 1
return newstatediag | Performs BFS operation for eliminating useless loop transitions
Args:
graph (PDA): the PDA object
start (PDA state): The PDA initial state
Returns:
list: A cleaned, smaller list of DFA states |
def get(self, statediag):
if len(statediag) < 1:
print 'PDA is empty and can not be reduced'
return statediag
newstatediag = self.bfs(statediag, statediag[0])
return newstatediag | Args:
statediag (list): The states of the PDA
Returns:
list: A reduced list of states using BFS |
def get(self, statediag, accepted=None):
count = 0
statesmap = {}
newstatediag = {}
for state in statediag:
# Simplify state IDs
if statediag[state].id not in statesmap:
statesmap[statediag[state].id] = count
mapped = count
count = count + 1
else:
mapped = statesmap[statediag[state].id]
# Simplify transitions IDs
transitions = {}
for nextstate in statediag[state].trans:
if nextstate not in statesmap:
statesmap[nextstate] = count
transmapped = count
count = count + 1
else:
transmapped = statesmap[nextstate]
transitions[transmapped] = statediag[state].trans[nextstate]
newstate = PDAState()
newstate.id = mapped
newstate.type = statediag[state].type
newstate.sym = statediag[state].sym
newstate.trans = transitions
newstatediag[mapped] = newstate
newaccepted = None
if accepted is not None:
newaccepted = []
for accepted_state in accepted :
if (0, accepted_state) in statesmap:
newaccepted.append(statesmap[(0, accepted_state)])
return newstatediag, count, newaccepted | Replaces complex state IDs as generated from the product operation,
into simple sequencial numbers. A dictionaty is maintained in order
to map the existed IDs.
Args:
statediag (list): The states of the PDA
accepted (list): the list of DFA accepted states
Returns:
list: |
def _generate_state(self, trans):
state = PDAState()
state.id = self.nextstate()
state.type = 2
state.sym = state.id
state.trans = trans.copy()
self.toadd.append(state)
return state.id | Creates a new POP state (type - 2) with the same transitions.
The POPed symbol is the unique number of the state.
Args:
trans (dict): Transition dictionary
Returns:
Int: The state identifier |
def replace_read(self):
for statenum in self.statediag:
state = self.statediag[statenum]
if state.type == 3: # READ state
state.type = 1
destination_and_symbol = self._generate_state(state.trans)
state.sym = destination_and_symbol
state.trans = {}
state.trans[destination_and_symbol] = [0]
statenumber_identifier = len(self.statediag) + 1
for state in self.toadd:
self.statediag[statenumber_identifier] = state
statenumber_identifier = statenumber_identifier + 1
return self.statediag | Replaces all READ (type - 3) states to a PUSH (type - 1) and a POP (type - 2).
The actual state is replaced with the PUSH, and a new POP is created. |
def insert_self_to_empty_and_insert_all_intemediate(self, optimized):
for state_a in self.statediag:
self.rules.append('A' +repr(state_a.id) +',' + repr(state_a.id) + ': @empty_set')
# If CFG is not requested, avoid the following O(n^3) rule.
# It can be solved and a string can be generated faster with BFS of DFS
if optimized == 0:
for state_b in self.statediag:
if state_b.id != state_a.id:
for state_c in self.statediag:
if state_c.id != state_a.id \
and state_b.id != state_c.id:
self.rules.append('A' + repr(state_a.id)
+ ',' + repr(state_c.id)
+ ': A' + repr(state_a.id)
+ ',' + repr(state_b.id)
+ ' A' + repr(state_b.id)
+ ',' + repr(state_c.id)
+ '') | For each state qi of the PDA, we add the rule Aii -> e
For each triplet of states qi, qj and qk, we add the rule Aij -> Aik Akj.
Args:
optimized (bool): Enable or Disable optimization - Do not produce O(n^3) |
def get_rules(self, optimized):
self.insert_start_to_accepting()
# If CFG is not requested, avoid the following O(n^3) rule.
# It can be solved and a string can be generated faster with BFS of DFS
if optimized == 0:
self.insert_self_to_empty_and_insert_all_intemediate(optimized)
self.insert_symbol_pushpop()
return self.rules | Args:
optimized (bool): Enable or Disable optimization - Do not produce O(n^3)
Return:
list: The CFG rules |
def provision(self, instance_id: str, service_details: ProvisionDetails, async_allowed: bool) -> ProvisionedServiceSpec:
if service_details.plan_id == self._backend.config.UUID_PLANS_EXISTING_CLUSTER:
# Provision the instance on an Existing Atlas Cluster
# Find or create the instance
instance = self._backend.find(instance_id)
# Create the instance if needed
return self._backend.create(instance, service_details.parameters, existing=True)
# Plan not supported
raise ErrPlanUnsupported(service_details.plan_id) | Provision the new instance
see openbrokerapi documentation
Returns:
ProvisionedServiceSpec |
def unbind(self, instance_id: str, binding_id: str, details: UnbindDetails):
# Find the instance
instance = self._backend.find(instance_id)
# Find the binding
binding = self._backend.find(binding_id, instance)
if not binding.isProvisioned():
# The binding does not exist
raise ErrBindingDoesNotExist()
# Delete the binding
self._backend.unbind(binding) | Unbinding the instance
see openbrokerapi documentation
Raises:
ErrBindingDoesNotExist: Binding does not exist. |
def bind(self, instance_id: str, binding_id: str, details: BindDetails) -> Binding:
# Find the instance
instance = self._backend.find(instance_id)
# Find or create the binding
binding = self._backend.find(binding_id, instance)
# Create the binding if needed
return self._backend.bind(binding, details.parameters) | Binding the instance
see openbrokerapi documentation |
def deprovision(self, instance_id: str, details: DeprovisionDetails, async_allowed: bool) -> DeprovisionServiceSpec:
# Find the instance
instance = self._backend.find(instance_id)
if not instance.isProvisioned():
# the instance does not exist
raise ErrInstanceDoesNotExist()
return self._backend.delete(instance) | Deprovision an instance
see openbrokerapi documentation
Raises:
ErrInstanceDoesNotExist: Instance does not exist. |
def _ddns(self, ip):
headers = {"Accept": "text/json", "User-Agent": "ddns/0.1.0 ([email protected])"}
data = {
'login_token': self.login_token,
'format': "json",
'domain_id': self.domain_id,
'record_id': self.record_id,
'sub_domain': self.sub_domain,
'record_line': '默认',
'value': ip
}
res = requests.post(Ddns.DNSPOD_API, data, headers=headers)
logger.debug(res.json())
return res.json()['status']['code'] == '1' | curl -X POST https://dnsapi.cn/Record.Ddns -d 'login_token=LOGIN_TOKEN&format=json&domain_id=2317346&record_id=16894439&record_line=默认&sub_domain=www'
:return: |
def post(self, path, data={}):
'''Perform POST Request '''
response = requests.post(API_URL + path, data=json.dumps(data), headers=self._set_headers())
return self._check_response(response, self.post, path, dataf post(self, path, data={}):
'''Perform POST Request '''
response = requests.post(API_URL + path, data=json.dumps(data), headers=self._set_headers())
return self._check_response(response, self.post, path, data) | Perform POST Request |
def delete(self, path, data={}):
'''Perform DELETE Request'''
if len(data) != 0:
parameter_string = ''
for k,v in data.items():
parameter_string += '{}={}'.format(k,v)
parameter_string += '&'
path += '?' + parameter_string
response = requests.delete(API_URL + path, headers=self._set_headers())
return self._check_response(response, self.delete, path, dataf delete(self, path, data={}):
'''Perform DELETE Request'''
if len(data) != 0:
parameter_string = ''
for k,v in data.items():
parameter_string += '{}={}'.format(k,v)
parameter_string += '&'
path += '?' + parameter_string
response = requests.delete(API_URL + path, headers=self._set_headers())
return self._check_response(response, self.delete, path, data) | Perform DELETE Request |
def parsed(self):
if not self._parsed:
self._parsed = ConfigParser()
self._parsed.readfp(io.StringIO(self.content))
return self._parsed | Get the ConfigParser object which represents the content.
This property is cached and only parses the content once. |
def create_cache(directory, compress_level=6, value_type_is_binary=False, **kwargs):
cache = diskcache.Cache(
directory,
disk=CompressedDisk,
disk_compress_level=compress_level,
disk_value_type_is_binary=value_type_is_binary,
**kwargs
)
return cache | Create a html cache. Html string will be automatically compressed.
:param directory: path for the cache directory.
:param compress_level: 0 ~ 9, 9 is slowest and smallest.
:param kwargs: other arguments.
:return: a `diskcache.Cache()` |
def timeticks(tdiff):
if isinstance(tdiff, xarray.DataArray): # len==1
tdiff = timedelta(seconds=tdiff.values / np.timedelta64(1, 's'))
assert isinstance(tdiff, timedelta), 'expecting datetime.timedelta'
if tdiff > timedelta(hours=2):
return None, None
elif tdiff > timedelta(minutes=20):
return MinuteLocator(byminute=range(0, 60, 5)), MinuteLocator(byminute=range(0, 60, 2))
elif (timedelta(minutes=10) < tdiff) & (tdiff <= timedelta(minutes=20)):
return MinuteLocator(byminute=range(0, 60, 2)), MinuteLocator(byminute=range(0, 60, 1))
elif (timedelta(minutes=5) < tdiff) & (tdiff <= timedelta(minutes=10)):
return MinuteLocator(byminute=range(0, 60, 1)), SecondLocator(bysecond=range(0, 60, 30))
elif (timedelta(minutes=1) < tdiff) & (tdiff <= timedelta(minutes=5)):
return SecondLocator(bysecond=range(0, 60, 30)), SecondLocator(bysecond=range(0, 60, 10))
elif (timedelta(seconds=30) < tdiff) & (tdiff <= timedelta(minutes=1)):
return SecondLocator(bysecond=range(0, 60, 10)), SecondLocator(bysecond=range(0, 60, 2))
else:
return SecondLocator(bysecond=range(0, 60, 2)), SecondLocator(bysecond=range(0, 60, 1)) | NOTE do NOT use "interval" or ticks are misaligned! use "bysecond" only! |
def consume(self, msg):
self.log.info(msg)
body = msg['body']
topic = body['topic']
repo = None
if 'rawhide' in topic:
arch = body['msg']['arch']
self.log.info('New rawhide %s compose ready', arch)
repo = 'rawhide'
elif 'branched' in topic:
arch = body['msg']['arch']
branch = body['msg']['branch']
self.log.info('New %s %s branched compose ready', branch, arch)
log = body['msg']['log']
if log != 'done':
self.log.warn('Compose not done?')
return
repo = branch
elif 'updates.fedora' in topic:
self.log.info('New Fedora %(release)s %(repo)s compose ready',
body['msg'])
repo = 'f%(release)s-%(repo)s' % body['msg']
else:
self.log.warn('Unknown topic: %s', topic)
release = self.releases[repo]
reactor.callInThread(self.compose, release) | Called with each incoming fedmsg.
From here we trigger an rpm-ostree compose by touching a specific file
under the `touch_dir`. Then our `doRead` method is called with the
output of the rpm-ostree-toolbox treecompose, which we monitor to
determine when it has completed. |
def parse_addr(text):
"Parse a 1- to 3-part address spec."
if text:
parts = text.split(':')
length = len(parts)
if length== 3:
return parts[0], parts[1], int(parts[2])
elif length == 2:
return None, parts[0], int(parts[1])
elif length == 1:
return None, '', int(parts[0])
return None, None, Nonf parse_addr(text):
"Parse a 1- to 3-part address spec."
if text:
parts = text.split(':')
length = len(parts)
if length== 3:
return parts[0], parts[1], int(parts[2])
elif length == 2:
return None, parts[0], int(parts[1])
elif length == 1:
return None, '', int(parts[0])
return None, None, None | Parse a 1- to 3-part address spec. |
def step_note_that(context, remark):
log = getattr(context, "log", None)
if log:
log.info(u"NOTE: %s;" % remark) | Used as generic step that provides an additional remark/hint
and enhance the readability/understanding without performing any check.
.. code-block:: gherkin
Given that today is "April 1st"
But note that "April 1st is Fools day (and beware)" |
def section(self, resources):
section = [p for p in self.parents(resources) if p.rtype == 'section']
if section:
return section[0]
return None | Which section is this in, if any |
def in_navitem(self, resources, nav_href):
# The navhref might end with '/index' so remove it if so
if nav_href.endswith('/index'):
nav_href = nav_href[:-6]
return self.docname.startswith(nav_href) | Given href of nav item, determine if resource is in it |
def is_published(self):
now = datetime.now()
published = self.props.published
if published:
return published < now
return False | Return true if this resource has published date in the past |
def request(self, method, path, query=None, content=None):
kwargs = {
"headers": headers,
"timeout": self.timeout,
"verify": False,
}
if self.username and self.password:
kwargs["auth"] = self.username, self.password
if content is not None:
kwargs["data"] = self._json_encoder.encode(content)
if query:
prepare_query(query)
kwargs["params"] = query
url = self._base_url + path
# print "Sending request to %s %s" % (url, kwargs)
try:
r = requests.request(method, url, **kwargs)
except requests.ConnectionError:
raise GanetiApiError("Couldn't connect to %s" % self._base_url)
except requests.Timeout:
raise GanetiApiError("Timed out connecting to %s" %
self._base_url)
if r.status_code != requests.codes.ok:
raise NotOkayError(str(r.status_code), code=r.status_code)
if r.content:
return json.loads(r.content)
else:
return None | Sends an HTTP request.
This constructs a full URL, encodes and decodes HTTP bodies, and
handles invalid responses in a pythonic way.
@type method: string
@param method: HTTP method to use
@type path: string
@param path: HTTP URL path
@type query: list of two-tuples
@param query: query arguments to pass to urllib.urlencode
@type content: str or None
@param content: HTTP body content
@rtype: object
@return: JSON-Decoded response
@raises GanetiApiError: If an invalid response is returned |
def start(self):
version = self.request("get", "/version")
if version != 2:
raise GanetiApiError("Can't work with Ganeti RAPI version %d" %
version)
logging.info("Accessing Ganeti RAPI, version %d" % version)
self.version = version
try:
features = self.request("get", "/2/features")
except NotOkayError, noe:
if noe.code == 404:
# Okay, let's calm down, this is totally reasonable. Certain
# older Ganeti RAPIs don't have a list of features.
features = []
else:
# No, wait, panic was the correct thing to do.
raise
logging.info("RAPI features: %r" % (features,))
self.features = features | Confirm that we may access the target cluster. |
def _create_driver(self, **kwargs):
if self.driver is None:
self.driver = self.create_driver(**kwargs)
self.init_driver_func(self.driver) | Create webdriver, assign it to ``self.driver``, and run webdriver
initiation process, which is usually used for manual login. |
def get_html(self,
url,
params=None,
cache_cb=None,
**kwargs):
url = add_params(url, params)
cache_consumed, value = self.try_read_cache(url)
if cache_consumed:
html = value
else:
self._create_driver()
self.driver.get(url)
html = self.driver.page_source
if self.should_we_update_cache(html, cache_cb, cache_consumed):
self.cache.set(
url, html,
expire=kwargs.get("cache_expire", self.cache_expire),
)
return html | Get html of an url. |
def deserialize_time(data):
parsed = parser.parse(data)
return parsed.time().replace(tzinfo=parsed.tzinfo) | Return a time instance based on the values of the data param |
def freeze():
'''
Show arguments to require() to recreate what has been installed
'''
installations = {}
for dist in get_installed_distributions():
req = pip.FrozenRequirement.from_dist(dist, [], find_tags=False)
installations[req.name] = req
return [str(installation).rstrip() for installation in
sorted(installations.values(), key=lambda x: x.name.lower())f freeze():
'''
Show arguments to require() to recreate what has been installed
'''
installations = {}
for dist in get_installed_distributions():
req = pip.FrozenRequirement.from_dist(dist, [], find_tags=False)
installations[req.name] = req
return [str(installation).rstrip() for installation in
sorted(installations.values(), key=lambda x: x.name.lower())] | Show arguments to require() to recreate what has been installed |
def require(*args, **kwargs):
'''
Install a set of packages using pip
This is designed to be an interface for IPython notebooks that
replicates the requirements.txt pip format. This lets notebooks
specify which versions of packages they need inside the notebook
itself.
This function is the general-purpose interface that lets
the caller specify any version string for any package.
'''
# If called with no arguments, returns requirements list
if not args and not kwargs:
return freeze()
# Construct array of requirements
requirements = list(args)
extra = ['{}{}'.format(kw, kwargs[kw]) for kw in kwargs]
requirements.extend(extra)
args = ['install', '-q']
args.extend(requirements)
pip.main(argsf require(*args, **kwargs):
'''
Install a set of packages using pip
This is designed to be an interface for IPython notebooks that
replicates the requirements.txt pip format. This lets notebooks
specify which versions of packages they need inside the notebook
itself.
This function is the general-purpose interface that lets
the caller specify any version string for any package.
'''
# If called with no arguments, returns requirements list
if not args and not kwargs:
return freeze()
# Construct array of requirements
requirements = list(args)
extra = ['{}{}'.format(kw, kwargs[kw]) for kw in kwargs]
requirements.extend(extra)
args = ['install', '-q']
args.extend(requirements)
pip.main(args) | Install a set of packages using pip
This is designed to be an interface for IPython notebooks that
replicates the requirements.txt pip format. This lets notebooks
specify which versions of packages they need inside the notebook
itself.
This function is the general-purpose interface that lets
the caller specify any version string for any package. |
def handle(self, *args, **options):
self.db = options.get("database", DEFAULT_DB_ALIAS)
self.current_name = connections[self.db].settings_dict["NAME"]
self.compare_name = options.get("db_name")
self.lines = options.get("lines")
self.ignore = int(options.get('ignore'))
if not self.compare_name:
self.compare_name = "%s_compare" % self.current_name
command = NASHVEGAS.get("dumpdb", "pg_dump -s {dbname}")
print "Getting schema for current database..."
current_sql = Popen(
command.format(dbname=self.current_name),
shell=True,
stdout=PIPE
).stdout.readlines()
print "Getting schema for fresh database..."
self.setup_database()
connections[self.db].close()
connections[self.db].settings_dict["NAME"] = self.compare_name
try:
call_command("syncdb", interactive=False, verbosity=0, migrations=False)
new_sql = Popen(
command.format(dbname=self.compare_name).split(),
stdout=PIPE
).stdout.readlines()
finally:
connections[self.db].close()
connections[self.db].settings_dict["NAME"] = self.current_name
self.teardown_database()
print "Outputing diff between the two..."
print "".join(difflib.unified_diff(normalize_sql(current_sql, self.ignore),
normalize_sql(new_sql, self.ignore),
n=int(self.lines))) | Compares current database with a migrations.
Creates a temporary database, applies all the migrations to it, and
then dumps the schema from both current and temporary, diffs them,
then report the diffs to the user. |
def render_widgets(kb_app: kb,
sphinx_app: Sphinx,
doctree: doctree,
fromdocname: str,
):
builder: StandaloneHTMLBuilder = sphinx_app.builder
for node in doctree.traverse(widget):
# Render the output
w = sphinx_app.env.widgets.get(node.name)
context = builder.globalcontext.copy()
# Add in certain globals
context['resources'] = sphinx_app.env.resources
context['references'] = sphinx_app.env.references
output = w.render(sphinx_app, context)
# Put the output into the node contents
listing = [nodes.raw('', output, format='html')]
node.replace_self(listing) | Go through docs and replace widget directive with rendering |
def attr_string(filterKeys=(), filterValues=(), **kwargs):
return ', '.join([str(k)+'='+repr(v) for k, v in kwargs.items()
if k not in filterKeys and v not in filterValues]) | Build a string consisting of 'key=value' substrings for each keyword
argument in :kwargs:
@param filterKeys: list of key names to ignore
@param filterValues: list of values to ignore (e.g. None will ignore all
key=value pairs that has that value. |
def auth_string(self):
if not self._token:
self.execute()
if not self._token.expired:
return 'Bearer {}'.format(self._token.access_token)
if self.auto_refresh:
self.execute()
return 'Bearer {}'.format(self._token.access_token)
raise TokenExpired() | Get the auth string. If the token is expired and auto refresh enabled,
a new token will be fetched
:return: the auth string
:rtype: str |
async def main():
async with aiohttp.ClientSession() as session:
data = Hole('192.168.0.215', loop, session)
await data.get_data()
# Get the raw data
print(json.dumps(data.data, indent=4, sort_keys=True))
print("Status:", data.status)
print("Domains being blocked:", data.domains_being_blocked) | Get the data from a *hole instance. |
async def enable():
async with aiohttp.ClientSession() as session:
data = Hole('192.168.0.215', loop, session, api_token=API_TOKEN)
await data.enable() | Get the data from a *hole instance. |
def admin_penalty(self, column=None, value=None, **kwargs):
return self._resolve_call('PCS_ADMIN_PENALTY_ORDER', column,
value, **kwargs) | An enforcement action that results in levying the permit holder with a
penalty or fine. It is used to track judicial hearing dates, penalty
amounts, and type of administrative penalty order.
>>> PCS().admin_penalty('enfor_action_date', '16-MAR-01') |
def audit(self, column=None, value=None, **kwargs):
return self._resolve_call('PCS_PCI_AUDIT', column, value, **kwargs) | Pretreatment Compliance Inspections (PCI) or Pretreatment Audits
collect information resulting from inspections pertaining to a Publicly
Owned Treatment Works (POTWs) that receive pollutants from in direct
dischargers.
>>> PCS().audit('insp_date', '16-MAR-01') |
def code_description(self, column=None, value=None, **kwargs):
return self._resolve_call('PCS_CODE_DESC', column, value, **kwargs) | The Permit Compliance System (PCS) records milestones, events, and many
other parameters in code format. To provide text descriptions that
explain the code meanings, the PCS_CODE_DESC provide s complete
information on all types of codes, and for each type, the text
description of each possible code value.
>>> PCS().code_description('code', 110) |
def compliance_schedule(self, column=None, value=None, **kwargs):
return self._resolve_call('PCS_CMPL_SCHD', column, value, **kwargs) | A sequence of activities with associated milestones which pertains to a
given permit.
>>> PCS().compliance_schedule('cmpl_schd_evt', '62099') |
def compliance_violation(self, column=None, value=None, **kwargs):
return self._resolve_call('PCS_CMPL_SCHD_VIOL', column, value, **kwargs) | A compliance schedule violation reflects the non-achievement of a
given compliance schedule event including the type of violation and ty
pe of resolution.
>>> PCS().compliance_violation('cs_rnc_detect_date', '16-MAR-04') |
def dmr_measurement(self, column=None, value=None, **kwargs):
return self._resolve_call('PCS_DMR_MEASUREMENT', column, value, **kwargs) | Measurements of effluents reported on the Discharge Monitoring Report
(DMR). The violations are detected by comparing the measurement values
against the corresponding effluent limit.
>>> PCS().dmr_measurement('season_num', 2) |
def enforcement_action(self, column=None, value=None, **kwargs):
return self._resolve_call('PCS_ENFOR_ACTION', column, value, **kwargs) | A disciplinary action taken against a permit facility. The action may
be applicable to one or more violations.
>>> PCS().enforcement_action('ea_code', '09') |
def hearing(self, column=None, value=None, **kwargs):
return self._resolve_call('PCS_EVIDENTIARY_HEARING_EVENT', column,
value, **kwargs) | An evidentiary hearing.
>>> PCS().hearing('event_date', '23-MAY-01') |
def industrial_user(self, column=None, value=None, **kwargs):
return self._resolve_call('PCS_INDUSTRIAL_USER_INFO', column,
value, **kwargs) | Information from the PCI_AUDIT table pertaining to industrial users,
i.e. the number of significant industrial users.
>>> PCS().industrial_user('insp_date', '16-MAR-01') |
def inspection(self, column=None, value=None, **kwargs):
return self._resolve_call('PCS_INSPECTION', column, value, **kwargs) | An official visit to the permit facility on a periodic basis which
consists of the following inspection types: NPDES, Biomonitoring,
Pretreatment, and Industrial User.
>>> PCS().inspection('insp_date', '16-MAR-01') |
def permit_event(self, column=None, value=None, **kwargs):
return self._resolve_call('PCS_PERMIT_EVENT', column, value, **kwargs) | A permit event tracks the lifecycle of a permit from issuance to
expiration. Examples include 'Application Received' and 'Permit
Issued', etc.
>>> PCS().permit_event('event_actual_date', '16-MAR-04') |
def pipe_schedule(self, column=None, value=None, **kwargs):
return self._resolve_call('PCS_PIPE_SCHED', column, value, **kwargs) | Particular discharge points at a permit facility that are governed by
effluent limitations and monitoring and submission requirements.
>>> PCS().pipe_schedule('state_submission_units', 'M') |
def single_violation(self, column=None, value=None, **kwargs):
return self._resolve_call('PCS_SINGLE_EVENT_VIOL', column,
value, **kwargs) | A single event violation is a one-time event that occurred on a fixed
date, and is associated with one permitted facility.
>>> PCS().single_violation('single_event_viol_date', '16-MAR-01') |
def sludge(self, column=None, value=None, **kwargs):
return self._resolve_call('PCS_SLUDGE', column, value, **kwargs) | Sludge information describes the volumn of sludge produced at a
facility, identification information on a sludge handler, and
classification/permitting information on a facility that handles
sludge, such as a pretreatment POTW.
>>> PCS().sludge('county_name', 'San Francisco') |
def typify(value: Union[dict, list, set, str]):
if type(value) == dict:
return walk_values(typify, value)
if type(value) in [list, set]:
return list(map(typify, value))
if type(value) == str:
if re.match('^\d+\.\d+ (STEEM|SBD|VESTS)$', value):
return keep_in_dict(dict(Amount(value)), ['amount', 'asset'])
if re.match('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$', value):
return parse_time(value)
return value | Enhance block operation with native types.
Typify takes a blockchain operation or dict/list/value,
and then it parses and converts string types into native data types where appropriate. |
def json_expand(json_op):
if type(json_op) == dict and 'json' in json_op:
return update_in(json_op, ['json'], safe_json_loads)
return json_op | For custom_json ops. |
def delete(self, subnet_id):
# 1 : show subnet
subnet = self.client.describe_subnets(
SubnetIds=[subnet_id]).get('Subnets')[0]
vpc_id = subnet.get('VpcId')
# 2 : delete subnet
self.client.delete_subnet(SubnetId=subnet_id)
# 3 : delete vpc
return self.client.delete_vpc(VpcId=vpc_id) | This is bad delete function
because one vpc can have more than one subnet.
It is Ok if user only use CAL for manage cloud resource
We will update ASAP. |
def generate(self):
result = self._gen(self.optimized, self.splitstring)
if self.splitstring and result is not None:
result = result[1:]
return result | Generates a new random string from the start symbol
Args:
None
Returns:
str: The generated string |
def _clean_terminals(self):
new_terminals = []
for term in self.grammar.grammar_terminals:
x_term = term.rfind('@')
y_term = term.rfind('A')
if y_term > x_term:
x_term = y_term
ids = term[x_term + 1:].split(',')
if len(ids) < 2:
"""It'input_string a normal terminal, not a state"""
new_terminals.append(term)
self.grammar.grammar_terminals = new_terminals | Because of the optimization, there are some non existing terminals
on the generated list. Remove them by checking for terms in form Ax,x |
def _check_self_to_empty(self, stateid):
x_term = stateid.rfind('@')
y_term = stateid.rfind('A')
if y_term > x_term:
x_term = y_term
ids = stateid[x_term + 1:].split(',')
if len(ids) < 2:
return 0
if ids[0] == ids[1]:
# print 'empty'
return 1
return 0 | Because of the optimization, the rule for empty states is missing
A check takes place live
Args:
stateid (int): The state identifier
Returns:
bool: A true or false response |
def _check_self_replicate(self, myntr):
# print 'BFS Dictionary Update - Self Replicate'
find = 0
for nonterm in self.grammar.grammar_nonterminals_map:
for i in self.grammar.grammar_nonterminals_map[nonterm]:
if self.grammar.grammar_rules[i][0] not in self.resolved and not isinstance(
self.grammar.grammar_rules[i][1], (set, tuple)) \
and self.grammar.grammar_rules[i][1] == myntr:
self.resolved[self.grammar.grammar_rules[i][0]] = self.resolved[myntr]
if self._checkfinal(self.grammar.grammar_rules[i][0]):
return self.resolved[self.grammar.grammar_rules[i][0]]
if self.grammar.grammar_rules[i][0] not in self.bfs_queue:
self.bfs_queue.append(self.grammar.grammar_rules[i][0])
find = 1
if find == 1:
return 1
return 0 | For each Rule B -> c where c is a known terminal, this function
searches for B occurences in rules with the form A -> B and sets
A -> c. |
def project_workspace_addsitedir(sitedir):
assert os.path.isdir(sitedir)
try:
from site import addsitedir
except ImportError:
# -- USE: Python2.7 site.py package
from pysite import addsitedir
next_package_pos = len(sys.path)
addsitedir(sitedir)
# -- POST-PROCESS: Move new packages from end to begin of sys.path list.
pos = 0
new_packages = sys.path[next_package_pos:]
del sys.path[next_package_pos:]
sys.path[pos:pos] = new_packages | Similar to site.addsitedir() but prefers new sitedir over existing ones.
Therefore, prefers local packages over installed packages.
.. note::
This allows to support *.pth files and zip-/egg-imports
similar to an installed site-packages directory. |
def create(self, name, description=None, units=None,
agg_method="priority_fill", overwrite=False):
sym = self.try_to_get(name)
if sym is not None:
if overwrite:
print "Deleting {}".format(sym.name)
self.ses.delete(sym)
self.ses.commit()
else:
msg = 'Symbol {} already exists.\n' + \
'Consider setting overwrite to True.'
msg = msg.format(name)
raise Exception(msg)
sym = Symbol(name, description, units, agg_method)
self.ses.add(sym)
print "Creating {}".format(sym.name)
sym.add_alias(name)
sym.handle = SymbolHandle(sym=sym)
self.ses.commit()
return sym | Create, or get if exists, a Symbol.
Parameters
----------
name : str
A symbol's name is a primary key, used across
the Trump ORM.
description : str, optional
An arbitrary string, used to store user information
related to the symbol.
units : str, optional
This is a string used to denote the units of the final
data Series.
agg_method : str, optional
The aggregation method, used to calculate
the final feed. Defaults to priority_fill.
overwrite : bool, optional
Set to True, to force deletion an existing symbol.
defaults to False.
Returns
-------
Symbol |
def delete(self, symbol):
if isinstance(symbol, (str, unicode)):
sym = self.get(symbol)
elif isinstance(symbol, Symbol):
sym = symbol
else:
raise Exception("Invalid symbol {}".format((repr(symbol))))
# Has to handle the case where the table would exist already
# and where it wouldn't.
try:
sym.datatable = Table(sym.name, Base.metadata, autoload=True)
sym.datatable.drop(self.eng, checkfirst=True)
except NoSuchTableError:
print "No worries, {} never existed to begin with.".format(sym.name)
self.ses.delete(sym)
self.ses.commit() | Deletes a Symbol.
Parameters
----------
symbol : str or Symbol |
def exists(self, symbol):
if isinstance(symbol, str):
sym = symbol
elif isinstance(symbol, Symbol):
sym = symbol.name
syms = self.ses.query(Symbol).filter(Symbol.name == sym).all()
if len(syms) == 0:
return False
else:
return True | Checks to if a symbol exists, by name.
Parameters
----------
symbol : str or Symbol
Returns
-------
bool |
def get(self, symbol):
syms = self.try_to_get(symbol)
if syms is None:
raise Exception("Symbol {} does not exist".format(symbol))
else:
return syms | Gets a Symbol based on name, which is expected to exist.
Parameters
----------
symbol : str or Symbol
Returns
-------
Symbol
Raises
------
Exception
If it does not exist. Use .try_to_get(),
if the symbol may or may not exist. |
def try_to_get(self, symbol):
syms = self.ses.query(Symbol).filter(Symbol.name == symbol).all()
if len(syms) == 0:
return None
else:
return syms[0] | Gets a Symbol based on name, which may or may not exist.
Parameters
----------
symbol : str
Returns
-------
Symbol or None.
Note
----
Use .get(), if the symbol should exist, and an exception
is needed if it doesn't. |
def search_meta(self, attr, value=None, stronly=False):
if stronly:
qry = self.ses.query(Symbol.name).join(SymbolMeta)
else:
qry = self.ses.query(Symbol).join(SymbolMeta)
crits = []
if value is None:
crits.append(SymbolMeta.attr == attr)
else:
if isinstance(value, str):
values = [value]
elif isinstance(value, (tuple, list)):
values = value
for v in values:
crits.append(and_(SymbolMeta.attr == attr, SymbolMeta.value.like(value)))
if len(crits):
qry = qry.filter(or_(*crits))
qry = qry.order_by(Symbol.name)
if stronly:
return [sym[0] for sym in qry.distinct()]
else:
return [sym for sym in qry.distinct()] | Get a list of Symbols by searching a specific meta
attribute, and optionally the value.
Parameters
----------
attr : str
The meta attribute to query.
value : None, str or list
The meta attribute to query. If you pass a float, or an int,
it'll be converted to a string, prior to searching.
stronly : bool, optional, default True
Return only a list of symbol names, as opposed
to the (entire) Symbol objects.
Returns
-------
List of Symbols or empty list |
def search(self, usrqry=None, name=False, desc=False, tags=False, meta=False, stronly=False, dolikelogic=True):
if stronly:
qry = self.ses.query(Symbol.name)
else:
qry = self.ses.query(Symbol)
if tags:
qry = qry.join(SymbolTag)
if meta:
qry = qry.join(SymbolMeta)
if dolikelogic:
if usrqry is not None:
if '%' not in usrqry:
usrqry = '%' + usrqry + '%'
crits = []
if name:
crits.append(Symbol.name.like(usrqry))
if tags:
crits.append(SymbolTag.tag.like(usrqry))
if desc:
crits.append(Symbol.description.like(usrqry))
if meta:
crits.append(SymbolMeta.value.like(usrqry))
if len(crits):
qry = qry.filter(or_(*crits))
qry = qry.order_by(Symbol.name)
if stronly:
return [sym[0] for sym in qry.distinct()]
else:
return [sym for sym in qry.distinct()] | Get a list of Symbols by searching a combination of
a Symbol's name, description, tags or meta values.
Parameters
----------
usrqry : str
The string used to query. Appending '%' will use SQL's "LIKE"
functionality.
name : bool, optional, default False
Search by symbol name.
desc : bool, optional, default False
Search by symbol descriptions.
tags : bool, optional, default False
Search by symbol tags.
meta : bool, optional, default False
Search within a symbol's meta attribute's value.
stronly : bool, optional, default True
Return only a list of symbol names, as opposed
to the (entire) Symbol objects.
dolikelogic :
Append '%' to either side of the string, if the string
doesn't already have % specified.
Returns
-------
List of Symbols or empty list |
def search_tag(self, tag, symbols=True, feeds=False):
syms = []
if isinstance(tag, (str, unicode)):
tags = [tag]
else:
tags = tag
if symbols:
crits = []
for tag in tags:
if "%" in tag:
crit = SymbolTag.tag.like(tag)
else:
crit = SymbolTag.tag == tag
crits.append(crit)
qry = self.ses.query(SymbolTag)
qry = qry.filter(or_(*crits))
syms = qry.all()
syms = [tagged.symbol for tagged in syms]
if feeds:
crits = []
for tag in tags:
if "%" in tag:
crit = FeedTag.tag.like(tag)
else:
crit = FeedTag.tag == tag
crits.append(crit)
qry = self.ses.query(Symbol).select_from(FeedTag)
qry = qry.join(FeedTag.feed).join(Feed.symbol)
qry = qry.filter(or_(*crits))
fds = qry.distinct()
syms = syms + [sym for sym in fds]
return list(set(syms))
return syms | Get a list of Symbols by searching a tag or partial tag.
Parameters
----------
tag : str
The tag to search. Appending '%' will use SQL's "LIKE"
functionality.
symbols : bool, optional
Search for Symbol's based on their tags.
feeds : bool, optional
Search for Symbol's based on their Feeds' tags.
Returns
-------
List of Symbols or empty list |
def search_meta_specific(self, **avargs):
qry = self.ses.query(Symbol).join(SymbolMeta.symbol)
for attr, value in avargs.iteritems():
SMA = aliased(SymbolMeta)
if "%" in value:
acrit = SMA.value.like(value)
else:
acrit = SMA.value == value
crit = and_(acrit, SMA.attr == attr)
qry = qry.filter(crit).join(SMA, SMA.symname == SymbolMeta.symname)
qry = qry.order_by(Symbol.name)
return qry.all() | Search list of Symbol objects by by querying specific
meta attributes and their respective values.
Parameters
----------
avargs
The attributes and values passed as key word arguments.
If more than one criteria is specified, AND logic is applied.
Appending '%' to values will use SQL's "LIKE" functionality.
Example
-------
>>> sm.search_meta(geography='Canada', sector='Gov%')
Returns
-------
List of Symbols or empty list |
def tag_counts(self):
qry = self.ses.query(SymbolTag.tag, func.count(SymbolTag.tag))
qry = qry.group_by(SymbolTag.tag)
qry = qry.order_by(SymbolTag.tag)
tags = list(qry.all())
return tags | Get a list of tags and the number of each.
Returns
-------
List of tuples, in order (tag, # of Symbols w/Tag) |
def bulk_cache_of_tag(self, tag):
syms = self.search_tag(tag)
name = 'Bulk Cache of Symbols tagged {}'.format(tag)
tr = TrumpReport(name)
for sym in syms:
sr = sym.cache()
tr.add_symbolreport(sr)
return tr | Caches all the symbols by a certain tag.
For now, there is no different, than
caching each symbol individually. In the future,
this functionality could have speed improvements.
Parameters
----------
tag : str
Use '%' to enable SQL's "LIKE" functionality.
Returns
-------
TrumpReport |
def build_view_from_tag(self, tag):
syms = self.search_tag(tag)
names = [sym.name for sym in syms]
subs = ["SELECT indx, '{}' AS symbol, final FROM {}".format(s, s) for s in names]
qry = " UNION ALL ".join(subs)
qry = "CREATE VIEW {} AS {};".format(tag, qry)
self.ses.execute("DROP VIEW IF EXISTS {};".format(tag))
self.ses.commit()
self.ses.execute(qry)
self.ses.commit() | Build a view of group of Symbols based on their tag.
Parameters
----------
tag : str
Use '%' to enable SQL's "LIKE" functionality.
Note
----
This function is written without SQLAlchemy,
so it only tested on Postgres. |
def _add_orfs(self, which, symbol, ind, val, dt_log=None, user=None, comment=None):
if not isinstance(symbol, (str, unicode)):
symbol = symbol.name
if not dt_log:
dt_log = dt.datetime.now()
if which.lower() == 'override':
qry = self.ses.query(func.max(Override.ornum).label('max_ornum'))
override = True
elif which.lower() == 'failsafe':
qry = self.ses.query(func.max(FailSafe.fsnum).label('max_fsnum'))
override = False
qry = qry.filter_by(symname = symbol)
cur_num = qry.one()
if cur_num[0] is None:
next_num = 0
else:
next_num = cur_num[0] + 1
if override:
tmp = Override(symname=symbol,
ind=ind,
val=val,
dt_log=dt_log,
user=user,
comment=comment,
ornum=next_num)
else:
tmp = FailSafe(symname=symbol,
ind=ind,
val=val,
dt_log=dt_log,
user=user,
comment=comment,
fsnum=next_num)
self.ses.add(tmp)
self.ses.commit() | Appends a single indexed-value pair, to a symbol object, to be
used during the final steps of the aggregation of the datatable.
See add_override and add_fail_safe.
Parameters
----------
which : str
Fail Safe or Override?
symbol : Symbol or str
The Symbol to apply the fail safe
ind : obj
The index value where the fail safe should be applied
val : obj
The data value which will be used in the fail safe
dt_log : datetime
A log entry, for saving when this fail safe was created.
user : str
A string representing which user made the fail safe
comment : str
A string to store any notes related to this fail safe. |
def add_override(self, symbol, ind, val, dt_log=None, user=None, comment=None):
self._add_orfs('override', symbol, ind, val, dt_log, user, comment) | Appends a single indexed-value pair, to a symbol object, to be
used during the final steps of the aggregation of the datatable.
With default settings Overrides, get applied with highest priority.
Parameters
----------
symbol : Symbol or str
The Symbol to override
ind : obj
The index value where the override should be applied
val : obj
The data value which will be used in the override
dt_log : datetime
A log entry, for saving when this override was created.
user : str
A string representing which user made the override
comment : str
A string to store any notes related to this override. |
def get_converted(self, symbol, units='CAD', system=None, tag=None):
if isinstance(symbol, (str, unicode)):
sym = self.get(symbol)
df = sym.df
curu = sym.units
requ = units
elif isinstance(symbol, tuple):
df = symbol[0]
curu = symbol[1]
requ = units
else:
raise TypeError("Expected str or (DataFrame, str), found {}".format(type(symbol)))
system = system or self.default_system
tag = tag or self.default_tag
conv = self.converters[system][tag]
newdf = conv.convert(df, curu, requ)
newdf = pd.merge(df, newdf, left_index=True, right_index=True)
newdf = newdf[df.columns[0] + "_y"].to_frame()
newdf.columns = df.columns
return newdf | Uses a Symbol's Dataframe, to build a new Dataframe,
with the data converted to the new units
Parameters
----------
symbol : str or tuple of the form (Dataframe, str)
String representing a symbol's name, or a dataframe
with the data required to be converted. If supplying a
dataframe, units must be passed.
units : str, optional
Specify the units to convert the symbol to, default to CAD
system : str, optional
If None, the default system specified at instantiation
is used. System defines which conversion approach to take.
tag : str, optional
Tags define which set of conversion data is used. If None, the
default tag specified at instantiation is used. |
def last_cache(self,result='COMPLETE'):
crit = and_(SymbolLogEvent.event == 'CACHE',
SymbolLogEvent.evresult == result)
qry = self.log.filter(crit)
qry = qry.order_by(SymbolLogEvent.evtime.desc())
t = qry.first()
if t:
return t.evtime
else:
return None | The date and time of the previous cache.
Parameters
----------
result : string, default 'COMPLETE'
A string to choose which point in the log,
should be returned.
- COMPLETE - the last time a cache was completed
- STARTED - the last time a cache was started
Returns
-------
datetime.datetime |
def set_indexing(self, index_template):
objs = object_session(self)
if self.index.indimp != index_template.imp_name:
self._refresh_datatable_schema()
self.index.name = index_template.name
self.index.indimp = index_template.imp_name
self.index.case = index_template.case
self.index.setkwargs(**index_template.kwargs)
objs.commit() | Update a symbol's indexing strategy
Parameters
----------
index_template : bIndex or bIndex-like
An index template used to overwrite all
details about the symbol's current index. |
def add_meta(self, **metadict):
objs = object_session(self)
for attr,val in metadict.iteritems():
newmeta = SymbolMeta(self, attr, val)
self.meta.append(newmeta)
objs.commit() | Add meta information to a Symbol.
Parameters
----------
metadict
Attributes are passed as keywords, with their
associated values as strings. For meta attributes with spaces,
use an unpacked dict. |
def add_validator(self, val_template):
validator = val_template.validator
args = []
for arg in SymbolValidity.argnames:
if arg in val_template.__dict__.keys():
args.append(getattr(val_template, arg))
objs = object_session(self)
qry = objs.query(func.max(SymbolValidity.vid).label('max_vid'))
qry = qry.filter_by(symname = self.name)
cur_vid = qry.one()[0]
if cur_vid is None:
next_vid = 0
else:
next_vid = cur_vid + 1
self.validity.append(SymbolValidity(self, next_vid, validator, *args))
objs.commit() | Creates and adds a SymbolValidity object to the Symbol.
Parameters
----------
validity_template : bValidity or bValidity-like
a validity template. |
def check_validity(self, checks=None, report=True):
if report:
reportpoints = []
allchecks = []
checks_specified=False
if isinstance(checks, (str, unicode)):
checks = [checks]
checks_specified = True
elif isinstance(checks, (list, tuple)):
checks_specified = True
else:
checks = []
for val in self.validity:
if (val.validator in checks) or (not checks_specified):
ValCheck = validitychecks[val.validator]
anum = ValCheck.__init__.func_code.co_argcount - 2
args = []
for arg in SymbolValidity.argnames:
args.append(getattr(val, arg))
valid = ValCheck(self.datatable_df, *args[:anum])
res = valid.result
allchecks.append(res)
rp = ReportPoint('validation', val.validator, res, str(args[:anum]))
reportpoints.append(rp)
if report:
return all(allchecks), reportpoints
else:
return all(allchecks) | Runs a Symbol's validity checks.
Parameters
----------
checks : str, [str,], optional
Only run certain checks.
report : bool, optional
If set to False, the method will return only the result of the
check checks (True/False). Set to True, to have a
SymbolReport returned as well.
Returns
-------
Bool, or a Tuple of the form (Bool, SymbolReport) |
def describe(self):
lines = []
lines.append("Symbol = {}".format(self.name))
if len(self.tags):
tgs = ", ".join(x.tag for x in self.tags)
lines.append(" tagged = {}".format(tgs))
if len(self.aliases):
als = ", ".join(x.alias for x in self.aliases)
lines.append(" aliased = {}".format(als))
if len(self.feeds):
lines.append(" feeds:")
for fed in self.feeds:
lines.append(" {}. {}".format(fed.fnum,
fed.ftype))
return "\n".join(lines) | describes a Symbol, returns a string |
def del_tags(self, tags):
# SQLA Adding a SymbolTag object, feels awkward/uneccessary.
# Should I be implementing this functionality a different way?
if isinstance(tags, (str, unicode)):
tags = [tags]
objs = object_session(self)
docommit = False
for symboltag in self.tags:
if symboltag.tag in tags:
objs.delete(symboltag)
docommit = True
if docommit:
objs.commit() | remove a tag or tags from a symbol
Parameters
----------
tags : str or [str,]
Tags to be removed |
def add_tags(self, tags):
# SQLA Adding a SymbolTag object, feels awkward/uneccessary.
# Should I be implementing this functionality a different way?
if isinstance(tags, (str, unicode)):
tags = [tags]
objs = object_session(self)
tmps = [SymbolTag(tag=t, sym=self) for t in tags]
objs.add_all(tmps)
objs.commit() | add a tag or tags to a symbol
Parameters
----------
tags : str or [str,]
Tags to be added |
def _log_an_event(self, event, evresult='No Result', note='No Note'):
objs = object_session(self)
evnt = SymbolLogEvent(event, evresult, note, sym=self.name)
objs.add(evnt)
objs.commit() | log an event
Parameters
----------
event : string
evresult : string
note : string |
def add_feed(self, feedlike, **kwargs):
if 'fnum' in kwargs:
fnum = kwargs['fnum']
del kwargs['fnum']
else:
fnum = None
if isinstance(feedlike, bFeed):
munging = feedlike.munging
if 'munging' in kwargs:
explicit_munging = kwargs['munging'].as_odict
for key in explicit_munging:
munging[key] = explicit_munging[key]
fed = Feed(self, feedlike.ftype,
feedlike.sourcing,
munging,
feedlike.meta,
fnum)
elif isinstance(feedlike, Feed):
fed = feedlike
else:
raise Exception("Invalid Feed {}".format(repr(feedlike)))
self.feeds.append(fed)
objs = object_session(self)
objs.add(fed)
objs.commit() | Add a feed to the Symbol
Parameters
----------
feedlike : Feed or bFeed-like
The feed template, or Feed object to be added.
kwargs
Munging instructions |
def add_alias(self, alias):
objs = object_session(self)
if isinstance(alias, list):
raise NotImplementedError
elif isinstanceofany(alias, (str, unicode)):
a = SymbolAlias(self, alias)
self.aliases.append(a)
objs.add(a) | Add an alias to a Symbol
Parameters
----------
alias : str
The alias |
def _final_data(self):
dtbl = self.datatable
objs = object_session(self)
if isinstance(dtbl, Table):
return objs.query(dtbl.c.indx, dtbl.c.final).all()
else:
raise Exception("Symbol has no datatable, likely need to cache first.") | Returns
-------
A list of tuples representing rows from the datatable's index
and final column, sorted accordingly. |
def _max_min(self):
dtbl = self.datatable
objs = object_session(self)
if isinstance(dtbl, Table):
return objs.query(func.max(dtbl.c.indx).label("max_indx"),
func.min(dtbl.c.indx).label("min_indx")).one()
else:
raise Exception("Symbol has no datatable") | Returns
-------
A tuple consisting of (max, min) of the index. |
def _all_datatable_data(self):
dtbl = self.datatable
objs = object_session(self)
imcols = [dtbl.c.indx, dtbl.c.final, dtbl.c.override_feed000, dtbl.c.failsafe_feed999]
cols = imcols[:3] + [c for c in dtbl.c if c not in (imcols)] + [imcols[3]]
if isinstance(dtbl, Table):
return objs.query(*cols).order_by(dtbl.c.indx).all()
else:
raise Exception("Symbol has no datatable") | Returns
-------
A list of tuples representing rows from all columns of the datatable,
sorted accordingly. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.