blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
816ab3ea2f2aa70a194ab70f5b4edf02058b1926
|
8f0ce1be6cc093d962c64179eec99c7ccc20ffc4
|
/fabrication/migrations/0002_auto_20170522_2054.py
|
bb9f50cf4571262b07c8bd43de137ffccd69989e
|
[] |
no_license
|
dmeehan/futuregreenstudio
|
cf5e12c6ead8f0c7023ba09d5868749888068b72
|
e6e2b7f7ffa2ed251d21e6b1d07573ab4f70782f
|
refs/heads/master
| 2023-08-30T20:12:24.814970 | 2023-08-28T14:55:26 | 2023-08-28T14:55:26 | 89,943,081 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,047 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-22 20:54
from __future__ import unicode_literals
from django.db import migrations
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('fabrication', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='fabricationpage',
name='fabrication_content',
field=wagtail.wagtailcore.fields.StreamField([(b'item_list', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'text', wagtail.wagtailcore.blocks.TextBlock())]), icon='list-ul')), (b'numbered_item_list', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'text', wagtail.wagtailcore.blocks.TextBlock())]), icon='list-ol')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'video', wagtail.wagtailembeds.blocks.EmbedBlock())]),
),
migrations.AlterField(
model_name='fabricationpage',
name='process_content',
field=wagtail.wagtailcore.fields.StreamField([(b'item_list', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'text', wagtail.wagtailcore.blocks.TextBlock())]), icon='list-ul')), (b'numbered_item_list', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'text', wagtail.wagtailcore.blocks.TextBlock())]), icon='list-ol')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'video', wagtail.wagtailembeds.blocks.EmbedBlock())]),
),
]
|
[
"[email protected]"
] | |
269de419f647a554c8e7218a5395f7f52ae9865f
|
de0a69e419b98bcf2556456d9c60208e4804d50b
|
/curator/actions.py
|
3da90e41de526d8d0e276919177f652c372bb0fb
|
[
"Apache-2.0"
] |
permissive
|
sk163/curator-1
|
754cf447e0fd94914e03fd5ac09f1dc7cbb1efc2
|
76b396c7d34e650ad8de1e9040c109094eb5e672
|
refs/heads/master
| 2020-12-30T11:28:10.694444 | 2017-05-16T13:03:30 | 2017-05-16T13:03:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 65,524 |
py
|
from .exceptions import *
from .utils import *
import logging
import time
from datetime import datetime
class Alias(object):
def __init__(self, name=None, extra_settings={}, **kwargs):
"""
Define the Alias object.
:arg name: The alias name
:arg extra_settings: Extra settings, including filters and routing. For
more information see
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html
:type extra_settings: dict, representing the settings.
"""
if not name:
raise MissingArgument('No value for "name" provided.')
#: Instance variable
#: The strftime parsed version of `name`.
self.name = parse_date_pattern(name)
#: The list of actions to perform. Populated by
#: :mod:`curator.actions.Alias.add` and
#: :mod:`curator.actions.Alias.remove`
self.actions = []
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = None
#: Instance variable.
#: Any extra things to add to the alias, like filters, or routing.
self.extra_settings = extra_settings
self.loggit = logging.getLogger('curator.actions.alias')
def add(self, ilo, warn_if_no_indices=False):
"""
Create `add` statements for each index in `ilo` for `alias`, then
append them to `actions`. Add any `extras` that may be there.
:arg ilo: A :class:`curator.indexlist.IndexList` object
"""
verify_index_list(ilo)
if not self.client:
self.client = ilo.client
try:
ilo.empty_list_check()
except NoIndices:
# Add a warning if there are no indices to add, if so set in options
if warn_if_no_indices:
self.loggit.warn(
'No indices found after processing filters. '
'Nothing to add to {0}'.format(self.name)
)
return
else:
# Re-raise the NoIndices so it will behave as before
raise NoIndices
for index in ilo.working_list():
self.loggit.debug(
'Adding index {0} to alias {1} with extra settings '
'{2}'.format(index, self.name, self.extra_settings)
)
add_dict = { 'add' : { 'index' : index, 'alias': self.name } }
add_dict['add'].update(self.extra_settings)
self.actions.append(add_dict)
def remove(self, ilo, warn_if_no_indices=False):
"""
Create `remove` statements for each index in `ilo` for `alias`,
then append them to `actions`.
:arg ilo: A :class:`curator.indexlist.IndexList` object
"""
verify_index_list(ilo)
if not self.client:
self.client = ilo.client
try:
ilo.empty_list_check()
except NoIndices:
# Add a warning if there are no indices to add, if so set in options
if warn_if_no_indices:
self.loggit.warn(
'No indices found after processing filters. '
'Nothing to remove from {0}'.format(self.name)
)
return
else:
# Re-raise the NoIndices so it will behave as before
raise NoIndices
aliases = self.client.indices.get_alias()
for index in ilo.working_list():
if index in aliases:
self.loggit.debug(
'Index {0} in get_aliases output'.format(index))
# Only remove if the index is associated with the alias
if self.name in aliases[index]['aliases']:
self.loggit.debug(
'Removing index {0} from alias '
'{1}'.format(index, self.name)
)
self.actions.append(
{ 'remove' : { 'index' : index, 'alias': self.name } })
else:
self.loggit.debug(
'Can not remove: Index {0} is not associated with alias'
' {1}'.format(index, self.name)
)
def body(self):
"""
Return a `body` string suitable for use with the `update_aliases` API
call.
"""
if not self.actions:
raise ActionError('No "add" or "remove" operations')
self.loggit.debug('Alias actions: {0}'.format(self.actions))
return { 'actions' : self.actions }
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
self.loggit.info('DRY-RUN MODE. No changes will be made.')
for item in self.body()['actions']:
job = list(item.keys())[0]
index = item[job]['index']
alias = item[job]['alias']
# We want our log to look clever, so if job is "remove", strip the
# 'e' so "remove" can become "removing". "adding" works already.
self.loggit.info(
'DRY-RUN: alias: {0}ing index "{1}" {2} alias '
'"{3}"'.format(
job.rstrip('e'),
index,
'to' if job is 'add' else 'from',
alias
)
)
def do_action(self):
"""
Run the API call `update_aliases` with the results of `body()`
"""
self.loggit.info('Updating aliases...')
self.loggit.info('Alias actions: {0}'.format(self.body()))
try:
self.client.indices.update_aliases(body=self.body())
except Exception as e:
report_failure(e)
class Allocation(object):
def __init__(self, ilo, key=None, value=None, allocation_type='require',
wait_for_completion=False, wait_interval=3, max_wait=-1,
):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
:arg key: An arbitrary metadata attribute key. Must match the key
assigned to at least some of your nodes to have any effect.
:arg value: An arbitrary metadata attribute value. Must correspond to
values associated with `key` assigned to at least some of your nodes
to have any effect. If a `None` value is provided, it will remove
any setting associated with that `key`.
:arg allocation_type: Type of allocation to apply. Default is `require`
:arg wait_for_completion: Wait (or not) for the operation
to complete before returning. (default: `False`)
:type wait_for_completion: bool
:arg wait_interval: How long in seconds to wait between checks for
completion.
:arg max_wait: Maximum number of seconds to `wait_for_completion`
.. note::
See:
https://www.elastic.co/guide/en/elasticsearch/reference/current/shard-allocation-filtering.html
"""
verify_index_list(ilo)
if not key:
raise MissingArgument('No value for "key" provided')
if allocation_type not in ['require', 'include', 'exclude']:
raise ValueError(
'{0} is an invalid allocation_type. Must be one of "require", '
'"include", "exclude".'.format(allocation_type)
)
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
self.loggit = logging.getLogger('curator.actions.allocation')
#: Instance variable.
#: Populated at instance creation time. Value is
#: ``index.routing.allocation.`` `allocation_type` ``.`` `key` ``.`` `value`
bkey = 'index.routing.allocation.{0}.{1}'.format(allocation_type, key)
self.body = { bkey : value }
#: Instance variable.
#: Internal reference to `wait_for_completion`
self.wfc = wait_for_completion
#: Instance variable
#: How many seconds to wait between checks for completion.
self.wait_interval = wait_interval
#: Instance variable.
#: How long in seconds to `wait_for_completion` before returning with an
#: exception. A value of -1 means wait forever.
self.max_wait = max_wait
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
show_dry_run(self.index_list, 'allocation', body=self.body)
def do_action(self):
"""
Change allocation settings for indices in `index_list.indices` with the
settings in `body`.
"""
self.loggit.debug(
'Cannot get change shard routing allocation of closed indices. '
'Omitting any closed indices.'
)
self.index_list.filter_closed()
self.index_list.empty_list_check()
self.loggit.info('Updating index setting {0}'.format(self.body))
try:
index_lists = chunk_index_list(self.index_list.indices)
for l in index_lists:
self.client.indices.put_settings(
index=to_csv(l), body=self.body
)
if self.wfc:
logger.debug(
'Waiting for shards to complete relocation for indices:'
' {0}'.format(to_csv(l))
)
wait_for_it(
self.client, 'allocation',
wait_interval=self.wait_interval, max_wait=self.max_wait
)
except Exception as e:
report_failure(e)
class Close(object):
def __init__(self, ilo, delete_aliases=False):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
:arg delete_aliases: If `True`, will delete any associated aliases
before closing indices.
:type delete_aliases: bool
"""
verify_index_list(ilo)
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: Internal reference to `delete_aliases`
self.delete_aliases = delete_aliases
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
self.loggit = logging.getLogger('curator.actions.close')
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
show_dry_run(
self.index_list, 'close', **{'delete_aliases':self.delete_aliases})
def do_action(self):
"""
Close open indices in `index_list.indices`
"""
self.index_list.filter_closed()
self.index_list.empty_list_check()
self.loggit.info(
'Closing selected indices: {0}'.format(self.index_list.indices))
try:
index_lists = chunk_index_list(self.index_list.indices)
for l in index_lists:
if self.delete_aliases:
self.loggit.info(
'Deleting aliases from indices before closing.')
self.loggit.debug('Deleting aliases from: {0}'.format(l))
try:
self.client.indices.delete_alias(
index=to_csv(l), name='_all')
except Exception as e:
self.loggit.warn(
'Some indices may not have had aliases. Exception:'
' {0}'.format(e)
)
self.client.indices.flush_synced(
index=to_csv(l), ignore_unavailable=True)
self.client.indices.close(
index=to_csv(l), ignore_unavailable=True)
except Exception as e:
report_failure(e)
class ClusterRouting(object):
def __init__(
self, client, routing_type=None, setting=None, value=None,
wait_for_completion=False, wait_interval=9, max_wait=-1
):
"""
For now, the cluster routing settings are hardcoded to be ``transient``
:arg client: An :class:`elasticsearch.Elasticsearch` client object
:arg routing_type: Type of routing to apply. Either `allocation` or
`rebalance`
:arg setting: Currently, the only acceptable value for `setting` is
``enable``. This is here in case that changes.
:arg value: Used only if `setting` is `enable`. Semi-dependent on
`routing_type`. Acceptable values for `allocation` and `rebalance`
are ``all``, ``primaries``, and ``none`` (string, not `NoneType`).
If `routing_type` is `allocation`, this can also be
``new_primaries``, and if `rebalance`, it can be ``replicas``.
:arg wait_for_completion: Wait (or not) for the operation
to complete before returning. (default: `False`)
:type wait_for_completion: bool
:arg wait_interval: How long in seconds to wait between checks for
completion.
:arg max_wait: Maximum number of seconds to `wait_for_completion`
"""
verify_client_object(client)
#: Instance variable.
#: An :class:`elasticsearch.Elasticsearch` client object
self.client = client
self.loggit = logging.getLogger('curator.actions.cluster_routing')
#: Instance variable.
#: Internal reference to `wait_for_completion`
self.wfc = wait_for_completion
#: Instance variable
#: How many seconds to wait between checks for completion.
self.wait_interval = wait_interval
#: Instance variable.
#: How long in seconds to `wait_for_completion` before returning with an
#: exception. A value of -1 means wait forever.
self.max_wait = max_wait
if setting != 'enable':
raise ValueError(
'Invalid value for "setting": {0}.'.format(setting)
)
if routing_type == 'allocation':
if value not in ['all', 'primaries', 'new_primaries', 'none']:
raise ValueError(
'Invalid "value": {0} with "routing_type":'
'{1}.'.format(value, routing_type)
)
elif routing_type == 'rebalance':
if value not in ['all', 'primaries', 'replicas', 'none']:
raise ValueError(
'Invalid "value": {0} with "routing_type":'
'{1}.'.format(value, routing_type)
)
else:
raise ValueError(
'Invalid value for "routing_type": {0}.'.format(routing_type)
)
bkey = 'cluster.routing.{0}.{1}'.format(routing_type,setting)
self.body = { 'transient' : { bkey : value } }
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
logger.info('DRY-RUN MODE. No changes will be made.')
self.loggit.info(
'DRY-RUN: Update cluster routing settings with arguments: '
'{0}'.format(self.body)
)
def do_action(self):
"""
Change cluster routing settings with the settings in `body`.
"""
self.loggit.info('Updating cluster settings: {0}'.format(self.body))
try:
self.client.cluster.put_settings(body=self.body)
if self.wfc:
logger.debug(
'Waiting for shards to complete routing and/or rebalancing'
)
wait_for_it(
self.client, 'cluster_routing',
wait_interval=self.wait_interval, max_wait=self.max_wait
)
except Exception as e:
report_failure(e)
class CreateIndex(object):
def __init__(self, client, name, extra_settings={}):
"""
:arg client: An :class:`elasticsearch.Elasticsearch` client object
:arg name: A name, which can contain :py:func:`time.strftime`
strings
:arg extra_settings: The `settings` and `mappings` for the index. For
more information see
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html
:type extra_settings: dict, representing the settings and mappings.
"""
verify_client_object(client)
if not name:
raise ConfigurationError('Value for "name" not provided.')
#: Instance variable.
#: The parsed version of `name`
self.name = parse_date_pattern(name)
#: Instance variable.
#: Extracted from the config yaml, it should be a dictionary of
#: mappings and settings suitable for index creation.
self.body = extra_settings
#: Instance variable.
#: An :class:`elasticsearch.Elasticsearch` client object
self.client = client
self.loggit = logging.getLogger('curator.actions.create_index')
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
logger.info('DRY-RUN MODE. No changes will be made.')
self.loggit.info(
'DRY-RUN: create_index "{0}" with arguments: '
'{1}'.format(self.name, self.body)
)
def do_action(self):
"""
Create index identified by `name` with settings in `body`
"""
self.loggit.info(
'Creating index "{0}" with settings: '
'{1}'.format(self.name, self.body)
)
try:
self.client.indices.create(index=self.name, body=self.body)
except Exception as e:
report_failure(e)
class DeleteIndices(object):
def __init__(self, ilo, master_timeout=30):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
:arg master_timeout: Number of seconds to wait for master node response
"""
verify_index_list(ilo)
if not type(master_timeout) == type(int()):
raise TypeError(
'Incorrect type for "master_timeout": {0}. '
'Should be integer value.'.format(type(master_timeout))
)
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
#: Instance variable.
#: String value of `master_timeout` + 's', for seconds.
self.master_timeout = str(master_timeout) + 's'
self.loggit = logging.getLogger('curator.actions.delete_indices')
self.loggit.debug('master_timeout value: {0}'.format(
self.master_timeout))
def _verify_result(self, result, count):
"""
Breakout method to aid readability
:arg result: A list of indices from `_get_result_list`
:arg count: The number of tries that have occurred
:rtype: bool
"""
if len(result) > 0:
self.loggit.error(
'The following indices failed to delete on try '
'#{0}:'.format(count)
)
for idx in result:
self.loggit.error("---{0}".format(idx))
return False
else:
self.loggit.debug(
'Successfully deleted all indices on try #{0}'.format(count)
)
return True
def __chunk_loop(self, chunk_list):
"""
Loop through deletes 3 times to ensure they complete
:arg chunk_list: A list of indices pre-chunked so it won't overload the
URL size limit.
"""
working_list = chunk_list
for count in range(1, 4): # Try 3 times
for i in working_list:
self.loggit.info("---deleting index {0}".format(i))
self.client.indices.delete(
index=to_csv(working_list), master_timeout=self.master_timeout)
result = [ i for i in working_list if i in get_indices(self.client)]
if self._verify_result(result, count):
return
else:
working_list = result
self.loggit.error(
'Unable to delete the following indices after 3 attempts: '
'{0}'.format(result)
)
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
show_dry_run(self.index_list, 'delete_indices')
def do_action(self):
"""
Delete indices in `index_list.indices`
"""
self.index_list.empty_list_check()
self.loggit.info(
'Deleting selected indices: {0}'.format(self.index_list.indices))
try:
index_lists = chunk_index_list(self.index_list.indices)
for l in index_lists:
self.__chunk_loop(l)
except Exception as e:
report_failure(e)
class ForceMerge(object):
def __init__(self, ilo, max_num_segments=None, delay=0):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
:arg max_num_segments: Number of segments per shard to forceMerge
:arg delay: Number of seconds to delay between forceMerge operations
"""
verify_index_list(ilo)
if not max_num_segments:
raise MissingArgument('Missing value for "max_num_segments"')
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: Internally accessible copy of `max_num_segments`
self.max_num_segments = max_num_segments
#: Instance variable.
#: Internally accessible copy of `delay`
self.delay = delay
self.loggit = logging.getLogger('curator.actions.forcemerge')
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
show_dry_run(
self.index_list, 'forcemerge',
max_num_segments=self.max_num_segments,
delay=self.delay,
)
def do_action(self):
"""
forcemerge indices in `index_list.indices`
"""
self.index_list.empty_list_check()
self.index_list.filter_forceMerged(
max_num_segments=self.max_num_segments)
self.loggit.info('forceMerging selected indices')
try:
for index_name in self.index_list.indices:
self.loggit.info(
'forceMerging index {0} to {1} segments per shard. '
'Please wait...'.format(index_name, self.max_num_segments)
)
self.client.indices.forcemerge(index=index_name,
max_num_segments=self.max_num_segments)
if self.delay > 0:
self.loggit.info(
'Pausing for {0} seconds before continuing...'.format(
self.delay)
)
time.sleep(self.delay)
except Exception as e:
report_failure(e)
class Open(object):
def __init__(self, ilo):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
"""
verify_index_list(ilo)
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
self.loggit = logging.getLogger('curator.actions.open')
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
show_dry_run(self.index_list, 'open')
def do_action(self):
"""
Open closed indices in `index_list.indices`
"""
self.index_list.empty_list_check()
self.loggit.info(
'Opening selected indices: {0}'.format(self.index_list.indices))
try:
index_lists = chunk_index_list(self.index_list.indices)
for l in index_lists:
self.client.indices.open(index=to_csv(l))
except Exception as e:
report_failure(e)
class Replicas(object):
def __init__(self, ilo, count=None, wait_for_completion=False,
wait_interval=9, max_wait=-1):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
:arg count: The count of replicas per shard
:arg wait_for_completion: Wait (or not) for the operation
to complete before returning. (default: `False`)
:type wait_for_completion: bool
:arg wait_interval: How long in seconds to wait between checks for
completion.
:arg max_wait: Maximum number of seconds to `wait_for_completion`
"""
verify_index_list(ilo)
# It's okay for count to be zero
if count == 0:
pass
elif not count:
raise MissingArgument('Missing value for "count"')
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: Internally accessible copy of `count`
self.count = count
#: Instance variable.
#: Internal reference to `wait_for_completion`
self.wfc = wait_for_completion
#: Instance variable
#: How many seconds to wait between checks for completion.
self.wait_interval = wait_interval
#: Instance variable.
#: How long in seconds to `wait_for_completion` before returning with an
#: exception. A value of -1 means wait forever.
self.max_wait = max_wait
self.loggit = logging.getLogger('curator.actions.replicas')
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
show_dry_run(self.index_list, 'replicas', count=self.count)
def do_action(self):
"""
Update the replica count of indices in `index_list.indices`
"""
self.index_list.empty_list_check()
self.loggit.debug(
'Cannot get update replica count of closed indices. '
'Omitting any closed indices.'
)
self.index_list.filter_closed()
self.loggit.info(
'Setting the replica count to {0} for indices: '
'{1}'.format(self.count, self.index_list.indices)
)
try:
index_lists = chunk_index_list(self.index_list.indices)
for l in index_lists:
self.client.indices.put_settings(index=to_csv(l),
body={'number_of_replicas' : self.count})
if self.wfc and self.count > 0:
logger.debug(
'Waiting for shards to complete replication for '
'indices: {0}'.format(to_csv(l))
)
wait_for_it(
self.client, 'replicas',
wait_interval=self.wait_interval, max_wait=self.max_wait
)
except Exception as e:
report_failure(e)
class Rollover(object):
def __init__(
self, client, name, conditions, extra_settings=None,
wait_for_active_shards=1
):
"""
:arg client: An :class:`elasticsearch.Elasticsearch` client object
:arg name: The name of the single-index-mapped alias to test for
rollover conditions.
:arg conditions: A dictionary of conditions to test
:arg extra_settings: Must be either `None`, or a dictionary of settings
to apply to the new index on rollover. This is used in place of
`settings` in the Rollover API, mostly because it's already existent
in other places here in Curator
:arg wait_for_active_shards: The number of shards expected to be active
before returning.
"""
verify_client_object(client)
self.loggit = logging.getLogger('curator.actions.rollover')
if not isinstance(conditions, dict):
raise ConfigurationError('"conditions" must be a dictionary')
else:
self.loggit.debug('"conditions" is {0}'.format(conditions))
if not isinstance(extra_settings, dict) and extra_settings is not None:
raise ConfigurationError(
'"extra_settings" must be a dictionary or None')
#: Instance variable.
#: The Elasticsearch Client object
self.client = client
#: Instance variable.
#: Internal reference to `conditions`
self.conditions = conditions
#: Instance variable.
#: Internal reference to `extra_settings`
self.settings = extra_settings
#: Instance variable.
#: Internal reference to `wait_for_active_shards`
self.wait_for_active_shards = wait_for_active_shards
# Verify that `conditions` and `settings` are good?
# Verify that `name` is an alias, and is only mapped to one index.
if rollable_alias(client, name):
self.name = name
else:
raise ValueError(
'Unable to perform index rollover with alias '
'"{0}". See previous logs for more details.'.format(name)
)
def body(self):
"""
Create a body from conditions and settings
"""
retval = {}
retval['conditions'] = self.conditions
if self.settings:
retval['settings'] = self.settings
return retval
def doit(self, dry_run=False):
"""
This exists solely to prevent having to have duplicate code in both
`do_dry_run` and `do_action`
"""
return self.client.indices.rollover(
alias=self.name,
body=self.body(),
dry_run=dry_run,
wait_for_active_shards=self.wait_for_active_shards,
)
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
logger.info('DRY-RUN MODE. No changes will be made.')
result = self.doit(dry_run=True)
logger.info('DRY-RUN: rollover: {0} result: '
'{1}'.format(self.name, result))
def do_action(self):
"""
Rollover the index referenced by alias `name`
"""
self.loggit.info('Performing index rollover')
try:
self.doit()
except Exception as e:
report_failure(e)
class DeleteSnapshots(object):
def __init__(self, slo, retry_interval=120, retry_count=3):
"""
:arg slo: A :class:`curator.snapshotlist.SnapshotList` object
:arg retry_interval: Number of seconds to delay betwen retries. Default:
120 (seconds)
:arg retry_count: Number of attempts to make. Default: 3
"""
verify_snapshot_list(slo)
#: Instance variable.
#: The Elasticsearch Client object derived from `slo`
self.client = slo.client
#: Instance variable.
#: Internally accessible copy of `retry_interval`
self.retry_interval = retry_interval
#: Instance variable.
#: Internally accessible copy of `retry_count`
self.retry_count = retry_count
#: Instance variable.
#: Internal reference to `slo`
self.snapshot_list = slo
#: Instance variable.
#: The repository name derived from `slo`
self.repository = slo.repository
self.loggit = logging.getLogger('curator.actions.delete_snapshots')
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
logger.info('DRY-RUN MODE. No changes will be made.')
mykwargs = {
'repository' : self.repository,
'retry_interval' : self.retry_interval,
'retry_count' : self.retry_count,
}
for snap in self.snapshot_list.snapshots:
logger.info('DRY-RUN: delete_snapshot: {0} with arguments: '
'{1}'.format(snap, mykwargs))
def do_action(self):
"""
Delete snapshots in `slo`
Retry up to `retry_count` times, pausing `retry_interval`
seconds between retries.
"""
self.snapshot_list.empty_list_check()
self.loggit.info('Deleting selected snapshots')
if not safe_to_snap(
self.client, repository=self.repository,
retry_interval=self.retry_interval, retry_count=self.retry_count):
raise FailedExecution(
'Unable to delete snapshot(s) because a snapshot is in '
'state "IN_PROGRESS"')
try:
for s in self.snapshot_list.snapshots:
self.loggit.info('Deleting snapshot {0}...'.format(s))
self.client.snapshot.delete(
repository=self.repository, snapshot=s)
except Exception as e:
report_failure(e)
class Reindex(object):
def __init__(self, ilo, request_body, refresh=True,
requests_per_second=-1, slices=1, timeout=60, wait_for_active_shards=1,
wait_for_completion=True, max_wait=-1, wait_interval=9,
remote_url_prefix=None, remote_ssl_no_validate=None,
remote_certificate=None, remote_client_cert=None,
remote_client_key=None, remote_aws_key=None, remote_aws_secret_key=None,
remote_aws_region=None, remote_filters={}):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
:arg request_body: The body to send to
:class:`elasticsearch.Indices.Reindex`, which must be complete and
usable, as Curator will do no vetting of the request_body. If it
fails to function, Curator will return an exception.
:arg refresh: Whether to refresh the entire target index after the
operation is complete. (default: `True`)
:type refresh: bool
:arg requests_per_second: The throttle to set on this request in
sub-requests per second. ``-1`` means set no throttle as does
``unlimited`` which is the only non-float this accepts. (default:
``-1``)
:arg slices: The number of slices this task should be divided into. 1
means the task will not be sliced into subtasks. (default: ``1``)
:arg timeout: The length in seconds each individual bulk request should
wait for shards that are unavailable. (default: ``60``)
:arg wait_for_active_shards: Sets the number of shard copies that must
be active before proceeding with the reindex operation. (default:
``1``) means the primary shard only. Set to ``all`` for all shard
copies, otherwise set to any non-negative value less than or equal
to the total number of copies for the shard (number of replicas + 1)
:arg wait_for_completion: Wait (or not) for the operation
to complete before returning. (default: `True`)
:type wait_for_completion: bool
:arg wait_interval: How long in seconds to wait between checks for
completion.
:arg max_wait: Maximum number of seconds to `wait_for_completion`
:arg remote_url_prefix: `Optional` url prefix, if needed to reach the
Elasticsearch API (i.e., it's not at the root level)
:type remote_url_prefix: str
:arg remote_ssl_no_validate: If `True`, do not validate the certificate
chain. This is an insecure option and you will see warnings in the
log output.
:type remote_ssl_no_validate: bool
:arg remote_certificate: Path to SSL/TLS certificate
:arg remote_client_cert: Path to SSL/TLS client certificate (public key)
:arg remote_client_key: Path to SSL/TLS private key
:arg remote_aws_key: AWS IAM Access Key (Only used if the
:mod:`requests-aws4auth` python module is installed)
:arg remote_aws_secret_key: AWS IAM Secret Access Key (Only used if the
:mod:`requests-aws4auth` python module is installed)
:arg remote_aws_region: AWS Region (Only used if the
:mod:`requests-aws4auth` python module is installed)
:arg remote_filters: Apply these filters to the remote client for
remote index selection.
"""
self.loggit = logging.getLogger('curator.actions.reindex')
verify_index_list(ilo)
# Normally, we'd check for an empty list here. But since we can reindex
# from remote, we might just be starting with an empty one.
# ilo.empty_list_check()
if not isinstance(request_body, dict):
raise ConfigurationError('"request_body" is not of type dictionary')
#: Instance variable.
#: Internal reference to `request_body`
self.body = request_body
self.loggit.debug('REQUEST_BODY = {0}'.format(request_body))
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: Internal reference to `refresh`
self.refresh = refresh
#: Instance variable.
#: Internal reference to `requests_per_second`
self.requests_per_second = requests_per_second
#: Instance variable.
#: Internal reference to `slices`
self.slices = slices
#: Instance variable.
#: Internal reference to `timeout`, and add "s" for seconds.
self.timeout = '{0}s'.format(timeout)
#: Instance variable.
#: Internal reference to `wait_for_active_shards`
self.wait_for_active_shards = wait_for_active_shards
#: Instance variable.
#: Internal reference to `wait_for_completion`
self.wfc = wait_for_completion
#: Instance variable
#: How many seconds to wait between checks for completion.
self.wait_interval = wait_interval
#: Instance variable.
#: How long in seconds to `wait_for_completion` before returning with an
#: exception. A value of -1 means wait forever.
self.max_wait = max_wait
# This is for error logging later...
self.remote = False
if 'remote' in self.body['source']:
self.remote = True
# REINDEX_SELECTION is the designated token. If you use this for the
# source "index," it will be replaced with the list of indices from the
# provided 'ilo' (index list object).
if self.body['source']['index'] == 'REINDEX_SELECTION' \
and 'remote' not in self.body['source']:
self.body['source']['index'] = self.index_list.indices
# Remote section
elif 'remote' in self.body['source']:
self.loggit.debug('Remote reindex request detected')
if 'host' not in self.body['source']['remote']:
raise ConfigurationError('Missing remote "host"')
rclient_info = {}
for k in ['host', 'username', 'password']:
rclient_info[k] = self.body['source']['remote'][k] \
if k in self.body['source']['remote'] else None
rhost = rclient_info['host']
try:
# Save these for logging later
a = rhost.split(':')
self.remote_port = a[2]
self.remote_host = a[1][2:]
except Exception as e:
raise ConfigurationError(
'Host must be in the form [scheme]://[host]:[port] but '
'was [{0}]'.format(rhost)
)
rhttp_auth = '{0}:{1}'.format(
rclient_info['username'],rclient_info['password']) \
if (rclient_info['username'] and rclient_info['password']) \
else None
if rhost[:5] == 'http:':
use_ssl = False
elif rhost[:5] == 'https':
use_ssl = True
else:
raise ConfigurationError(
'Host must be in URL format. You provided: '
'{0}'.format(rclient_info['host'])
)
# The rest only applies if using filters for remote indices
if self.body['source']['index'] == 'REINDEX_SELECTION':
self.loggit.debug('Filtering indices from remote')
from .indexlist import IndexList
self.loggit.debug('Remote client args: '
'host={0} '
'http_auth={1} '
'url_prefix={2} '
'use_ssl={3} '
'ssl_no_validate={4} '
'certificate={5} '
'client_cert={6} '
'client_key={7} '
'aws_key={8} '
'aws_secret_key={9} '
'aws_region={10}'
'skip_version_test=True'.format(
rhost,
rhttp_auth,
remote_url_prefix,
use_ssl,
remote_ssl_no_validate,
remote_certificate,
remote_client_cert,
remote_client_key,
remote_aws_key,
remote_aws_secret_key,
remote_aws_region
)
)
try: # let's try to build a remote connection with these!
rclient = get_client(
host=rhost,
http_auth=rhttp_auth,
url_prefix=remote_url_prefix,
use_ssl=use_ssl,
ssl_no_validate=remote_ssl_no_validate,
certificate=remote_certificate,
client_cert=remote_client_cert,
client_key=remote_client_key,
aws_key=remote_aws_key,
aws_secret_key=remote_aws_secret_key,
aws_region=remote_aws_region,
skip_version_test=True,
)
except Exception as e:
self.loggit.error(
'Unable to establish connection to remote Elasticsearch'
' with provided credentials/certificates/settings.'
)
report_failure(e)
try:
rio = IndexList(rclient)
rio.iterate_filters({'filters': remote_filters})
try:
rio.empty_list_check()
except NoIndices:
raise FailedExecution(
'No actionable remote indices selected after '
'applying filters.'
)
self.body['source']['index'] = rio.indices
except Exception as e:
self.loggit.error(
'Unable to get/filter list of remote indices.'
)
report_failure(e)
self.loggit.debug(
'Reindexing indices: {0}'.format(self.body['source']['index']))
def show_run_args(self):
"""
Show what will run
"""
return ('request body: {0} with arguments: '
'refresh={1} '
'requests_per_second={2} '
'slices={3} '
'timeout={4} '
'wait_for_active_shards={5} '
'wait_for_completion={6}'.format(
self.body,
self.refresh,
self.requests_per_second,
self.slices,
self.timeout,
self.wait_for_active_shards,
self.wfc
)
)
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
self.loggit.info('DRY-RUN MODE. No changes will be made.')
self.loggit.info('DRY-RUN: REINDEX: {0}'.format(self.show_run_args()))
def do_action(self):
"""
Execute :class:`elasticsearch.Elasticsearch.reindex` operation with the
provided request_body and arguments.
"""
try:
self.loggit.info('Commencing reindex operation')
self.loggit.debug('REINDEX: {0}'.format(self.show_run_args()))
# Always set wait_for_completion to False. Let 'wait_for_it' do its
# thing if wait_for_completion is set to True. Report the task_id
# either way.
version = get_version(self.client)
if version >= (5,1,0):
response = self.client.reindex(
body=self.body, refresh=self.refresh,
requests_per_second=self.requests_per_second,
slices=self.slices,
timeout=self.timeout,
wait_for_active_shards=self.wait_for_active_shards,
wait_for_completion=False
)
else: # No slices for you
self.loggit.info(
'Your version of elasticsearch ({0}) does not support '
'sliced scroll for reindex, so that setting will not be '
'used'.format(version)
)
response = self.client.reindex(
body=self.body, refresh=self.refresh,
requests_per_second=self.requests_per_second,
timeout=self.timeout,
wait_for_active_shards=self.wait_for_active_shards,
wait_for_completion=False
)
self.loggit.debug('TASK ID = {0}'.format(response['task']))
if self.wfc:
wait_for_it(
self.client, 'reindex', task_id=response['task'],
wait_interval=self.wait_interval, max_wait=self.max_wait
)
# Verify the destination index is there after the fact
post_run = get_indices(self.client)
alias_instead = self.client.exists_alias(
name=self.body['dest']['index'])
if self.body['dest']['index'] not in post_run \
and not alias_instead:
self.loggit.error(
'Index "{0}" not found after reindex operation. Check '
'Elasticsearch logs for more '
'information.'.format(self.body['dest']['index'])
)
if self.remote:
self.loggit.error(
'Did you forget to add "reindex.remote.whitelist: '
'{0}:{1}" to the elasticsearch.yml file on the '
'"dest" node?'.format(
self.remote_host, self.remote_port
)
)
raise FailedExecution(
'Reindex failed. Index "{0}" not found.'.format(
self.body['dest']['index'])
)
else:
self.loggit.warn(
'"wait_for_completion" set to {0}.'
'Remember to check task_id "{1}" for successful completion '
'manually.'.format(self.wfc, response['task'])
)
except Exception as e:
report_failure(e)
class Snapshot(object):
def __init__(self, ilo, repository=None, name=None,
ignore_unavailable=False, include_global_state=True,
partial=False, wait_for_completion=True, wait_interval=9,
max_wait=-1, skip_repo_fs_check=False):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
:arg repository: The Elasticsearch snapshot repository to use
:arg name: What to name the snapshot.
:arg wait_for_completion: Wait (or not) for the operation
to complete before returning. (default: `True`)
:type wait_for_completion: bool
:arg wait_interval: How long in seconds to wait between checks for
completion.
:arg max_wait: Maximum number of seconds to `wait_for_completion`
:arg ignore_unavailable: Ignore unavailable shards/indices.
(default: `False`)
:type ignore_unavailable: bool
:arg include_global_state: Store cluster global state with snapshot.
(default: `True`)
:type include_global_state: bool
:arg partial: Do not fail if primary shard is unavailable. (default:
`False`)
:type partial: bool
:arg skip_repo_fs_check: Do not validate write access to repository on
all cluster nodes before proceeding. (default: `False`). Useful for
shared filesystems where intermittent timeouts can affect
validation, but won't likely affect snapshot success.
:type skip_repo_fs_check: bool
"""
verify_index_list(ilo)
# Check here and don't bother with the rest of this if there are no
# indices in the index list.
ilo.empty_list_check()
if not repository_exists(ilo.client, repository=repository):
raise ActionError(
'Cannot snapshot indices to missing repository: '
'{0}'.format(repository)
)
if not name:
raise MissingArgument('No value for "name" provided.')
#: Instance variable.
#: The parsed version of `name`
self.name = parse_date_pattern(name)
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: Internally accessible copy of `repository`
self.repository = repository
#: Instance variable.
#: Internally accessible copy of `wait_for_completion`
self.wait_for_completion = wait_for_completion
#: Instance variable
#: How many seconds to wait between checks for completion.
self.wait_interval = wait_interval
#: Instance variable.
#: How long in seconds to `wait_for_completion` before returning with an
#: exception. A value of -1 means wait forever.
self.max_wait = max_wait
#: Instance variable.
#: Internally accessible copy of `skip_repo_fs_check`
self.skip_repo_fs_check = skip_repo_fs_check
self.state = None
#: Instance variable.
#: Populated at instance creation time by calling
#: :mod:`curator.utils.create_snapshot_body` with `ilo.indices` and the
#: provided arguments: `ignore_unavailable`, `include_global_state`,
#: `partial`
self.body = create_snapshot_body(
ilo.indices,
ignore_unavailable=ignore_unavailable,
include_global_state=include_global_state,
partial=partial
)
self.loggit = logging.getLogger('curator.actions.snapshot')
def get_state(self):
"""
Get the state of the snapshot
"""
try:
self.state = self.client.snapshot.get(
repository=self.repository,
snapshot=self.name)['snapshots'][0]['state']
return self.state
except IndexError:
raise CuratorException(
'Snapshot "{0}" not found in repository '
'"{1}"'.format(self.name, self.repository)
)
def report_state(self):
"""
Log the state of the snapshot
"""
self.get_state()
if self.state == 'SUCCESS':
self.loggit.info(
'Snapshot {0} successfully completed.'.format(self.name))
else:
self.loggit.warn(
'Snapshot {0} completed with state: {0}'.format(self.state))
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
self.loggit.info('DRY-RUN MODE. No changes will be made.')
self.loggit.info(
'DRY-RUN: snapshot: {0} in repository {1} with arguments: '
'{2}'.format(self.name, self.repository, self.body)
)
def do_action(self):
"""
Snapshot indices in `index_list.indices`, with options passed.
"""
if not self.skip_repo_fs_check:
test_repo_fs(self.client, self.repository)
if snapshot_running(self.client):
raise SnapshotInProgress('Snapshot already in progress.')
try:
self.loggit.info('Creating snapshot "{0}" from indices: '
'{1}'.format(self.name, self.index_list.indices)
)
# Always set wait_for_completion to False. Let 'wait_for_it' do its
# thing if wait_for_completion is set to True. Report the task_id
# either way.
self.client.snapshot.create(
repository=self.repository, snapshot=self.name, body=self.body,
wait_for_completion=False
)
if self.wait_for_completion:
wait_for_it(
self.client, 'snapshot', snapshot=self.name,
repository=self.repository,
wait_interval=self.wait_interval, max_wait=self.max_wait
)
else:
self.loggit.warn(
'"wait_for_completion" set to {0}.'
'Remember to check for successful completion '
'manually.'.format(self.wait_for_completion)
)
except Exception as e:
report_failure(e)
class Restore(object):
def __init__(self, slo, name=None, indices=None, include_aliases=False,
ignore_unavailable=False, include_global_state=False,
partial=False, rename_pattern=None, rename_replacement=None,
extra_settings={}, wait_for_completion=True, wait_interval=9,
max_wait=-1, skip_repo_fs_check=False):
"""
:arg slo: A :class:`curator.snapshotlist.SnapshotList` object
:arg name: Name of the snapshot to restore. If no name is provided, it
will restore the most recent snapshot by age.
:type name: str
:arg indices: A list of indices to restore. If no indices are provided,
it will restore all indices in the snapshot.
:type indices: list
:arg include_aliases: If set to `True`, restore aliases with the
indices. (default: `False`)
:type include_aliases: bool
:arg ignore_unavailable: Ignore unavailable shards/indices.
(default: `False`)
:type ignore_unavailable: bool
:arg include_global_state: Restore cluster global state with snapshot.
(default: `False`)
:type include_global_state: bool
:arg partial: Do not fail if primary shard is unavailable. (default:
`False`)
:type partial: bool
:arg rename_pattern: A regular expression pattern with one or more
captures, e.g. ``index_(.+)``
:type rename_pattern: str
:arg rename_replacement: A target index name pattern with `$#` numbered
references to the captures in ``rename_pattern``, e.g.
``restored_index_$1``
:type rename_replacement: str
:arg extra_settings: Extra settings, including shard count and settings
to omit. For more information see
https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html#_changing_index_settings_during_restore
:type extra_settings: dict, representing the settings.
:arg wait_for_completion: Wait (or not) for the operation
to complete before returning. (default: `True`)
:arg wait_interval: How long in seconds to wait between checks for
completion.
:arg max_wait: Maximum number of seconds to `wait_for_completion`
:type wait_for_completion: bool
:arg skip_repo_fs_check: Do not validate write access to repository on
all cluster nodes before proceeding. (default: `False`). Useful for
shared filesystems where intermittent timeouts can affect
validation, but won't likely affect snapshot success.
:type skip_repo_fs_check: bool
"""
self.loggit = logging.getLogger('curator.actions.snapshot')
verify_snapshot_list(slo)
# Get the most recent snapshot.
most_recent = slo.most_recent()
self.loggit.debug('"most_recent" snapshot: {0}'.format(most_recent))
#: Instance variable.
#: Will use a provided snapshot name, or the most recent snapshot in slo
self.name = name if name else most_recent
# Stop here now, if it's not a successful snapshot.
if slo.snapshot_info[self.name]['state'] == 'PARTIAL' \
and partial == True:
self.loggit.warn(
'Performing restore of snapshot in state PARTIAL.')
elif slo.snapshot_info[self.name]['state'] != 'SUCCESS':
raise CuratorException(
'Restore operation can only be performed on snapshots with '
'state "SUCCESS", or "PARTIAL" if partial=True.'
)
#: Instance variable.
#: The Elasticsearch Client object derived from `slo`
self.client = slo.client
#: Instance variable.
#: Internal reference to `slo`
self.snapshot_list = slo
#: Instance variable.
#: `repository` derived from `slo`
self.repository = slo.repository
if indices:
self.indices = ensure_list(indices)
else:
self.indices = slo.snapshot_info[self.name]['indices']
self.wfc = wait_for_completion
#: Instance variable
#: How many seconds to wait between checks for completion.
self.wait_interval = wait_interval
#: Instance variable.
#: How long in seconds to `wait_for_completion` before returning with an
#: exception. A value of -1 means wait forever.
self.max_wait = max_wait
#: Instance variable version of ``rename_pattern``
self.rename_pattern = rename_pattern if rename_replacement is not None \
else ''
#: Instance variable version of ``rename_replacement``
self.rename_replacement = rename_replacement if rename_replacement \
is not None else ''
#: Also an instance variable version of ``rename_replacement``
#: but with Java regex group designations of ``$#``
#: converted to Python's ``\\#`` style.
self.py_rename_replacement = self.rename_replacement.replace('$', '\\')
#: Instance variable.
#: Internally accessible copy of `skip_repo_fs_check`
self.skip_repo_fs_check = skip_repo_fs_check
#: Instance variable.
#: Populated at instance creation time from the other options
self.body = {
'indices' : self.indices,
'include_aliases' : include_aliases,
'ignore_unavailable' : ignore_unavailable,
'include_global_state' : include_global_state,
'partial' : partial,
'rename_pattern' : self.rename_pattern,
'rename_replacement' : self.rename_replacement,
}
if extra_settings:
self.loggit.debug(
'Adding extra_settings to restore body: '
'{0}'.format(extra_settings)
)
try:
self.body.update(extra_settings)
except:
self.loggit.error(
'Unable to apply extra settings to restore body')
self.loggit.debug('REPOSITORY: {0}'.format(self.repository))
self.loggit.debug('WAIT_FOR_COMPLETION: {0}'.format(self.wfc))
self.loggit.debug(
'SKIP_REPO_FS_CHECK: {0}'.format(self.skip_repo_fs_check))
self.loggit.debug('BODY: {0}'.format(self.body))
# Populate the expected output index list.
self._get_expected_output()
def _get_expected_output(self):
if not self.rename_pattern and not self.rename_replacement:
self.expected_output = self.indices
return # Don't stick around if we're not replacing anything
self.expected_output = []
for index in self.indices:
self.expected_output.append(
re.sub(
self.rename_pattern,
self.py_rename_replacement,
index
)
)
self.loggit.debug('index: {0} replacement: '
'{1}'.format(index, self.expected_output[-1])
)
def report_state(self):
"""
Log the state of the restore
This should only be done if ``wait_for_completion`` is `True`, and only
after completing the restore.
"""
all_indices = get_indices(self.client)
found_count = 0
missing = []
for index in self.expected_output:
if index in all_indices:
found_count += 1
self.loggit.info('Found restored index {0}'.format(index))
else:
missing.append(index)
if found_count == len(self.expected_output):
self.loggit.info('All indices appear to have been restored.')
else:
self.loggit.error(
'Some of the indices do not appear to have been restored. '
'Missing: {0}'.format(missing)
)
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
logger.info('DRY-RUN MODE. No changes will be made.')
logger.info(
'DRY-RUN: restore: Repository: {0} Snapshot name: {1} Arguments: '
'{2}'.format(
self.name, self.repository,
{ 'wait_for_completion' : self.wfc, 'body' : self.body }
)
)
for index in self.indices:
if self.rename_pattern and self.rename_replacement:
replacement_msg = 'as {0}'.format(
re.sub(
self.rename_pattern,
self.py_rename_replacement,
index
)
)
else:
replacement_msg = ''
logger.info(
'DRY-RUN: restore: Index {0} {1}'.format(index, replacement_msg)
)
def do_action(self):
"""
Restore indices with options passed.
"""
if not self.skip_repo_fs_check:
test_repo_fs(self.client, self.repository)
if snapshot_running(self.client):
raise SnapshotInProgress(
'Cannot restore while a snapshot is in progress.')
try:
self.loggit.info('Restoring indices "{0}" from snapshot: '
'{1}'.format(self.indices, self.name)
)
# Always set wait_for_completion to False. Let 'wait_for_it' do its
# thing if wait_for_completion is set to True. Report the task_id
# either way.
self.client.snapshot.restore(
repository=self.repository, snapshot=self.name, body=self.body,
wait_for_completion=False
)
if self.wfc:
wait_for_it(
self.client, 'restore', index_list=self.expected_output,
wait_interval=self.wait_interval, max_wait=self.max_wait
)
else:
self.loggit.warn(
'"wait_for_completion" set to {0}. '
'Remember to check for successful completion '
'manually.'.format(self.wfc)
)
except Exception as e:
report_failure(e)
|
[
"[email protected]"
] | |
95ffd688e970ee7591bc3d5adc0e0f6570dfb5dd
|
67c92dd802a14b41956589dafc6c8fad9f5043cb
|
/venv/bin/easy_install
|
b3dabbbbbad2ac29c808766ee81b920ecbc5a971
|
[] |
no_license
|
sunny-kathuria/ReconnTool
|
f9a68aca869cb27ad45351a1f5b8f59178590a75
|
274d2aad29f0b8c408772821b8066adfd43a9540
|
refs/heads/master
| 2021-01-05T02:36:59.159207 | 2020-02-16T07:49:10 | 2020-02-16T07:49:10 | 240,848,236 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 431 |
#!/root/PycharmProjects/Recon/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"[email protected]"
] | ||
b9e506df9c12fad0bd4108510fa77486a23a356d
|
4f0192ccd0b29b4b28428daa9813010cd70f49a3
|
/news/migrations/0010_auto_20140519_1249.py
|
9ffe01021e97f71a6319b89b0b3a6c86147bb51f
|
[] |
no_license
|
nbedi/appletini
|
03c2a7286cb5775a63e17c41c3ccd2af48f0b90a
|
dd1f34f0fa3948fa808979e35844b6d58d46c0ea
|
refs/heads/master
| 2016-08-05T00:08:35.461725 | 2015-03-08T06:02:56 | 2015-03-08T06:02:56 | 31,575,783 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,006 |
py
|
# encoding: utf8
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('news', '0009_page_parent'),
]
operations = [
migrations.AddField(
model_name='category',
name='twitter',
field=models.CharField(default='', max_length=15, blank=True),
preserve_default=False,
),
migrations.AddField(
model_name='category',
name='twitter_profile_image',
field=models.ImageField(null=True, upload_to=b'news/category/twitter_profile/', blank=True),
preserve_default=True,
),
migrations.RemoveField(
model_name='category',
name='default_card_2x',
),
migrations.AlterField(
model_name='category',
name='default_card',
field=models.ImageField(upload_to=b'news/category/default_card/'),
),
]
|
[
"[email protected]"
] | |
069bd1201208b6e6dcd4c8d5a3897aaf17dfad90
|
8f2f83bc1381d4ce7fc968aec72fa400aae4155d
|
/pybitcoin/types/address.py
|
99c9195e3bd8727bf34cc0d36170c29765b43ee1
|
[
"MIT"
] |
permissive
|
nifrali/pyStratis
|
c855fb33be77064c9a741255e324003319a4789f
|
b1a80bf155b7941e9ef8fc2ea93fa1b08a0c4366
|
refs/heads/master
| 2023-06-20T16:02:30.863589 | 2021-07-01T19:24:18 | 2021-07-01T19:24:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,100 |
py
|
from __future__ import annotations
from typing import Callable
from pybitcoin.networks import BaseNetwork
class Address:
"""A address model. Address is validated by the network."""
def __init__(self, address: str, network: BaseNetwork):
self.validate_values(address=address, network=network)
self.address = address
self.network = network
def __repr__(self) -> str:
return self.address
def __str__(self) -> str:
return self.address
def __eq__(self, other) -> bool:
return self.address == other
def json(self) -> str:
return self.address
@classmethod
def __get_validators__(cls) -> Callable:
yield cls.validate_class
@classmethod
def validate_class(cls, value) -> Address:
cls.validate_values(address=value.address, network=value.network)
return value
@staticmethod
def validate_values(address: str, network: BaseNetwork) -> bool:
if network.validate_address(address):
return True
raise ValueError('Invalid address for given network.')
|
[
"[email protected]"
] | |
b62904185be0fc06afc95aca81b753375fbeeef0
|
a1dbad34ad37f0856c29791bdc961233807ba49c
|
/gstudio/management/__init__.py
|
3606a89de7b56484b245fd9dbd151b3e22a09694
|
[
"BSD-3-Clause"
] |
permissive
|
gnowgi/django-gstudio
|
d03b6cc845d3a3c1ef2683f940386b8e4384605c
|
d515883fc4ffe01dd8b4b876d5a3dd023f862d30
|
refs/heads/master
| 2020-12-24T10:23:38.958910 | 2012-03-14T12:09:50 | 2012-03-14T12:09:50 | 2,724,426 | 1 | 9 | null | null | null | null |
UTF-8
|
Python
| false | false | 35 |
py
|
"""Management module of Gstudio"""
|
[
"[email protected]"
] | |
736cb5b4a52da061d34b42230a957d40324f6fb9
|
b76615ff745c6d66803506251c3d4109faf50802
|
/pyobjc-framework-LocalAuthentication/setup.py
|
a2118691526813fa2bc6cb10fc669f8a09239aa3
|
[
"MIT"
] |
permissive
|
danchr/pyobjc-git
|
6ef17e472f54251e283a0801ce29e9eff9c20ac0
|
62b787fddeb381184043c7ff136f1c480755ab69
|
refs/heads/master
| 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 703 |
py
|
"""
Wrappers for the "LocalAuthentication" framework on macOS.
These wrappers don't include documentation, please check Apple's documention
for information on how to use this framework and PyObjC's documentation
for general tips and tricks regarding the translation between Python
and (Objective-)C frameworks
"""
from pyobjc_setup import setup
VERSION = "6.2b1"
setup(
name="pyobjc-framework-LocalAuthentication",
description="Wrappers for the framework LocalAuthentication on macOS",
min_os_level="10.10",
packages=["LocalAuthentication"],
version=VERSION,
install_requires=["pyobjc-core>=" + VERSION, "pyobjc-framework-Cocoa>=" + VERSION],
long_description=__doc__,
)
|
[
"[email protected]"
] | |
08ba34c09978507b3c6bcd1dcf0f9689f5613194
|
555b9f764d9bca5232360979460bc35c2f5ad424
|
/google/ads/google_ads/v2/services/transports/campaign_bid_modifier_service_grpc_transport.py
|
5f1bd483bd72991b807a4acf3847203f0f421fc9
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
juanmacugat/google-ads-python
|
b50256163782bc0223bcd8b29f789d74f4cfad05
|
0fc8a7dbf31d9e8e2a4364df93bec5f6b7edd50a
|
refs/heads/master
| 2021-02-18T17:00:22.067673 | 2020-03-05T16:13:57 | 2020-03-05T16:13:57 | 245,215,877 | 1 | 0 |
Apache-2.0
| 2020-03-05T16:39:34 | 2020-03-05T16:39:33 | null |
UTF-8
|
Python
| false | false | 5,103 |
py
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.ads.google_ads.v2.proto.services import campaign_bid_modifier_service_pb2_grpc
class CampaignBidModifierServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.ads.googleads.v2.services CampaignBidModifierService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
)
def __init__(self, channel=None, credentials=None,
address='googleads.googleapis.com:443'):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments are mutually '
'exclusive.',
)
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
'campaign_bid_modifier_service_stub': campaign_bid_modifier_service_pb2_grpc.CampaignBidModifierServiceStub(channel),
}
@classmethod
def create_channel(
cls,
address='googleads.googleapis.com:443',
credentials=None,
**kwargs):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (dict): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address,
credentials=credentials,
scopes=cls._OAUTH_SCOPES,
**kwargs
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def get_campaign_bid_modifier(self):
"""Return the gRPC stub for :meth:`CampaignBidModifierServiceClient.get_campaign_bid_modifier`.
Returns the requested campaign bid modifier in full detail.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['campaign_bid_modifier_service_stub'].GetCampaignBidModifier
@property
def mutate_campaign_bid_modifiers(self):
"""Return the gRPC stub for :meth:`CampaignBidModifierServiceClient.mutate_campaign_bid_modifiers`.
Creates, updates, or removes campaign bid modifiers.
Operation statuses are returned.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['campaign_bid_modifier_service_stub'].MutateCampaignBidModifiers
|
[
"[email protected]"
] | |
f57d887b142a32218aa4c3df1b3b08198019563f
|
8f68af7b8854d8c5000f8ecbe3a3c4330b4d6a7c
|
/docs/interviewPrep/designPatterns/Behavioral_patterns/Memento/python/Memento.py
|
13b40cf9c3bcb0f1aa3c53ea95daf48f89fa96a0
|
[] |
no_license
|
reshinto/reshinto.github.io
|
7590d0fb26cbf239b2545fd3b745416ab31aa7aa
|
71e5b82d49a11d9a9171a38bcb3ac23dd07ee62f
|
refs/heads/dev
| 2022-12-05T13:45:53.578262 | 2022-12-01T15:34:59 | 2022-12-01T15:34:59 | 211,689,735 | 6 | 0 | null | 2022-08-07T22:07:36 | 2019-09-29T16:11:25 |
TypeScript
|
UTF-8
|
Python
| false | false | 357 |
py
|
from abc import ABC, abstractmethod
class Memento(ABC):
"""
The Memento interface provides a way to retrieve the memento's metadata,
such as creation date or name. However, it doesn't expose the Originator's
state.
"""
@abstractmethod
def get_name(self):
pass
@abstractmethod
def get_date(self):
pass
|
[
"[email protected]"
] | |
2d2104f7dd3191c1a45bdf24ccc0559a181e3bfd
|
3d83e5d6c5c3b264dbca94f2fedcd1abaf522278
|
/docs/source/conf.py
|
34117acfb1003d1e24d7e473b7f2a321cbd12283
|
[
"Apache-2.0"
] |
permissive
|
cp4cds/c4cds-wps
|
4767d779a2338d46d52f0c23bb89f0072928c482
|
5abd9281195548bbd1e7653fe2ab1fee26745200
|
refs/heads/master
| 2020-04-02T06:43:19.383112 | 2020-01-14T16:05:36 | 2020-01-14T16:05:36 | 154,164,988 | 0 | 0 |
NOASSERTION
| 2020-01-14T16:05:37 | 2018-10-22T15:09:32 |
Python
|
UTF-8
|
Python
| false | false | 5,253 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# c4cds documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'pywps.ext_autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'c4cds-wps'
copyright = u"2019, Carsten Ehbrecht"
author = u"Carsten Ehbrecht"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/birdhouse_logo.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'c4cdsdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'c4cds.tex',
u'c4cds-wps Documentation',
u'Carsten Ehbrecht', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'c4cds',
u'c4cds-wps Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'c4cds',
u'c4cds-wps Documentation',
author,
'c4cds',
'One line description of project.',
'Miscellaneous'),
]
|
[
"[email protected]"
] | |
29e29ae5ff4022a72a00e62679cd8b9718301532
|
d75cbad7a79e24b49f405c6529633ea65c9b286d
|
/most_contributive_feature.py
|
c3628be932c6847796e1899376eca75bccc8b7f3
|
[] |
no_license
|
aj2622/ML_HW1
|
bc49e61781f108c66dfd598423915e27c72f7b3a
|
7497f8d71f6b731fc232058d6a0597af4884a53f
|
refs/heads/master
| 2020-04-22T08:49:52.188349 | 2017-10-31T14:23:02 | 2017-10-31T14:23:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 898 |
py
|
import numpy as np
import scipy.io as io
import poly_regression
feature = ['sepal length', 'sepal width', 'petal length', 'petal width']
if __name__ == '__main__':
data = io.loadmat('data/5_X.mat')['X']
target = io.loadmat('data/5_T.mat')['T']
train_x = np.concatenate((data[:40], data[50:90], data[100:140]))
train_t = np.concatenate((target[:40], target[50:90], target[100:140]))
error_list = []
for i in range(4):
trans_train_x = poly_regression.transform(np.delete(train_x, i, 1), 3, 2)
w = poly_regression.getCoef(trans_train_x, train_t)
train_y = np.dot(trans_train_x, w)
error_list.append(poly_regression.RMSE(train_y, train_t))
print 'The RMS error after remove feature <<', feature[i], '>> is', error_list[len(error_list)-1]
print 'The most contributive attribute is', feature[error_list.index(max(error_list))]
|
[
"[email protected]"
] | |
786af33be62d301d22854f723ab696e318419bdc
|
f97b8cd110b651a13628a2f394b018bed3d8957d
|
/screenshot_to_csv.py
|
ce2fa840bc41658c62a9de726137505330fe9596
|
[] |
no_license
|
sebastiengilbert73/chess_scribe
|
d09a9bcca37a15216342245bbddb546d87bf75c9
|
3073a8f12f086592e0eebb67f800112c914515e8
|
refs/heads/main
| 2023-03-30T15:18:56.852056 | 2021-03-29T00:46:39 | 2021-03-29T00:46:39 | 352,422,592 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,944 |
py
|
import cv2
import argparse
import logging
import os
import numpy as np
from tesserocr import PyTessBaseAPI
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument('ImageFilepath', help="The filepath to the screenshot")
parser.add_argument('--outputDirectory', help="The directory where the output will be written. Default: '/tmp/chess_scribe/'", default='/tmp/chess_scribe/')
parser.add_argument('--rangeThreshold', help="The threshold on the grayscale range, for a row. Default: 100", type=int, default=100)
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG, format='%(asctime)-15s %(message)s')
def main():
logging.info("screenshot_to_csv.py main()")
# If the output folder doesn't exist, create it. Cf. https://www.tutorialspoint.com/How-can-I-create-a-directory-if-it-does-not-exist-using-Python
if not os.path.exists(args.outputDirectory):
os.makedirs(args.outputDirectory)
# Open the image
screenshot = cv2.imread(args.ImageFilepath, cv2.IMREAD_COLOR)
screenshot_shapeHWC = screenshot.shape
# Convert to grayscale
grayscale_screenshot = cv2.cvtColor(screenshot, cv2.COLOR_BGR2GRAY)
# Find the text break rows
text_line_delimiters = TextLineDelimiters(args.outputDirectory, grayscale_screenshot, args.rangeThreshold)
logging.debug("text_line_delimiters = {}".format(text_line_delimiters))
# Append text lines to form a single text line
single_line_img = AppendTextLines(args.outputDirectory, screenshot, text_line_delimiters)
#single_line_rgb = cv2.cvtColor(single_line_img, cv2.COLOR_BGR2RGB)
with PyTessBaseAPI() as tesser_api:
tesser_api.SetImage(Image.fromarray(single_line_img))
logging.debug("tesser_api.GetUTF8Text() = {}".format(tesser_api.GetUTF8Text()))
#text_str = pytesseract.image_to_string(Image.fromarray(single_line_rgb))
def TextLineDelimiters(output_directory, grayscale_screenshot, range_threshold):
text_line_delimiters = [0]
img_sizeHW = grayscale_screenshot.shape
row_ranges = []
for y in range(img_sizeHW[0]):
min_value, max_value, _, _ = cv2.minMaxLoc(grayscale_screenshot[y, :])
row_ranges.append(max_value - min_value)
with open(os.path.join(output_directory, "TextLineDelimiters_rowRange.csv"), 'w+') as stats_file:
stats_file.write("y,range\n")
we_are_in_text = False
for y in range(len(row_ranges)):
grayscale_range = row_ranges[y]
stats_file.write("{},{}\n".format(y, grayscale_range))
if grayscale_range >= range_threshold:
we_are_in_text = True
else:
if we_are_in_text:
text_line_delimiters.append(y)
we_are_in_text = False
return text_line_delimiters
def AppendTextLines(output_directory, screenshot, text_line_delimiters):
deltas = [text_line_delimiters[i] - text_line_delimiters[i - 1] for i in range(1, len(text_line_delimiters))]
text_line_height = max(deltas)
deltas.append(text_line_height)
logging.debug("text_line_height = {}".format(text_line_height))
text_line_width = screenshot.shape[1] * len(text_line_delimiters)
single_line_img = np.zeros((text_line_height, text_line_width, 3), dtype=np.uint8)
for lineNdx in range(len(text_line_delimiters) - 1):
#logging.debug("lineNdx = {}; text_line_delimiters[lineNdx] = {}; deltas[lineNdx] = {}".format(lineNdx, text_line_delimiters[lineNdx], deltas[lineNdx]))
single_line_img[0: deltas[lineNdx], lineNdx * screenshot.shape[1] : (lineNdx + 1) * screenshot.shape[1]] = \
screenshot[text_line_delimiters[lineNdx]: text_line_delimiters[lineNdx] + deltas[lineNdx], :]
single_line_filepath = os.path.join(output_directory, "AppendTextLines_singleLine.png")
cv2.imwrite(single_line_filepath, single_line_img)
return single_line_img
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
eea28399f23ba03c93add6c9473bc9bab6478311
|
942ee5e8d54e8ebe9c5c841fbfdd1da652946944
|
/1001-1500/1029.Two City Scheduling.py
|
aa44653739248dc7f890e08295a3dd9bd2cedb30
|
[] |
no_license
|
kaiwensun/leetcode
|
0129c174457f32887fbca078fb448adce46dd89d
|
6b607f4aae3a4603e61f2e2b7480fdfba1d9b947
|
refs/heads/master
| 2023-08-31T07:30:50.459062 | 2023-08-27T07:59:16 | 2023-08-27T07:59:16 | 57,526,914 | 69 | 9 | null | 2023-08-20T06:34:41 | 2016-05-01T05:37:29 |
Python
|
UTF-8
|
Python
| false | false | 343 |
py
|
class Solution(object):
def twoCitySchedCost(self, costs):
"""
:type costs: List[List[int]]
:rtype: int
"""
diff = [(cost[0] - cost[1], cost) for cost in costs]
diff.sort()
N = len(costs) / 2
return sum(pair[1][0] for pair in diff[:N]) + sum(pair[1][1] for pair in diff[N:])
|
[
"[email protected]"
] | |
79e4094ee8c558cf5207293f1b1395a43cd41174
|
aef8eb6681e555ecb61ac67151e4c54d6fdd1023
|
/plots/plotsDaniel/regions/covarianceMatrix.py
|
395fda4f1921f5142a11da1b8d19d03b0329829a
|
[] |
no_license
|
HephyAnalysisSW/TopEFT
|
0e2dc89f7a43bacf50c77a042f56663e9d4f3404
|
53174807c96dffa6654e4dc63bef92f2b71706ee
|
refs/heads/master
| 2022-11-07T02:41:53.120759 | 2020-03-31T08:08:27 | 2020-03-31T08:08:27 | 98,643,866 | 0 | 3 | null | 2019-10-14T09:02:09 | 2017-07-28T11:38:23 |
Python
|
UTF-8
|
Python
| false | false | 7,589 |
py
|
import shutil, os
import ROOT
from array import array
import math
import pickle
import numpy as np
import copy
from TopEFT.Tools.user import combineReleaseLocation as releaseLocation
import re
def natural_sort(list, key=lambda s:s):
"""
Sort the list into natural alphanumeric order.
http://stackoverflow.com/questions/4836710/does-python-have-a-built-in-function-for-string-natural-sort
"""
def get_alphanum_key_func(key):
convert = lambda text: int(text) if text.isdigit() else text
return lambda s: [convert(c) for c in re.split('([0-9]+)', key(s))]
sort_key = get_alphanum_key_func(key)
lc = sorted(list, key=sort_key)
return lc
def getCovariance(fname):
import uuid, os
ustr = str(uuid.uuid4())
uniqueDirname = os.path.join(releaseLocation, ustr)
print "Creating %s"%uniqueDirname
os.makedirs(uniqueDirname)
if fname is not None: # Assume card is already written when fname is not none
filename = os.path.abspath(fname)
else:
filename = fname if fname else os.path.join(uniqueDirname, ustr+".txt")
self.writeToFile(filename)
covFilename = filename.replace('.txt', '_mlfit.root')
shapeFilename = filename.replace('.txt', '_shape.txt')
assert os.path.exists(filename), "File not found: %s"%filename
combineCommand = "cd "+uniqueDirname+";eval `scramv1 runtime -sh`;combineCards.py %s -S > myshapecard.txt "%fname
#set workspace
workspaceCommand = "cd "+uniqueDirname+";eval `scramv1 runtime -sh`;text2workspace.py myshapecard.txt"
##Run fit
#fitCommand = "cd "+uniqueDirname+";eval `scramv1 runtime -sh`;combine -M FitDiagnostics --freezeParameters r --saveShapes --saveWithUnc --numToysForShape 100 --saveOverall myshapecard.root"
fitCommand = "cd "+uniqueDirname+";eval `scramv1 runtime -sh`;combine -M FitDiagnostics --saveShapes --saveWithUnc --numToysForShape 100 --saveOverall myshapecard.root"
print fitCommand
os.system(combineCommand)
os.system(workspaceCommand)
os.system(fitCommand)
f1 = ROOT.TFile(uniqueDirname+"/fitDiagnostics.root")
postfit = f1.Get("shapes_fit_s")
prefit = f1.Get("shapes_prefit")
# should also extract yields here to directly obtain chi2
cov_postfit = copy.deepcopy(postfit.Get("overall_total_covar"))
cov_prefit = copy.deepcopy(prefit.Get("overall_total_covar"))
total_postfit = copy.deepcopy(postfit.Get("total_overall"))
total_prefit = copy.deepcopy(prefit.Get("total_overall"))
data = copy.deepcopy(postfit.Get("total_data"))
f1.Close()
del postfit, prefit, f1
shutil.rmtree(uniqueDirname)
return {"postfit":cov_postfit, "prefit":cov_prefit, "yield_postfit":total_postfit, "yield_prefit":total_prefit, "data":data}
#f1 = ROOT.TFile("/afs/hephy.at/work/d/dspitzbart/top/devel/CMSSW_8_1_0/src/fitDiagnostics.root")
def getMatrix(h2, binNumbers):
binNames = []
matrix = {}
nbins = len(binNumbers)
for i in range(1, nbins+1):
#binNames.append(h2.GetXaxis().GetBinLabel(i))
matrix[h2.GetXaxis().GetBinLabel(i)] = {}
for j in range(1, nbins+1):
matrix[h2.GetXaxis().GetBinLabel(i)][h2.GetXaxis().GetBinLabel(j)] = h2.GetBinContent(i,j)
sorted_cov = ROOT.TH2D('cov','',nbins,0,nbins,nbins,0,nbins)
#binNames = natural_sort(binNames)
cov = np.zeros((nbins,nbins))
diag = np.zeros((nbins,nbins))
diag_corr = np.zeros((nbins, nbins))
for i,k in enumerate(binNumbers):
diag_corr[i,i] = math.sqrt(h2.GetBinContent(k,k))
for j,l in enumerate(binNumbers):
cov[i][j] = h2.GetBinContent(k,l)#matrix[k][l]
if i==j:
diag[i][j] = h2.GetBinContent(k,l)
return cov,diag, diag_corr
def getSortedBinNumber(h1):
binNames = []
indices = []
nbins = h1.GetNbinsX()
for i in range(1, nbins+1):
binNames.append(h1.GetXaxis().GetBinLabel(i))
sortedBinNames = natural_sort(binNames)
#sortedBinNames = sortedBinNames[0:15]# + sortedBinNames[30:45]
for x in sortedBinNames:
binNumber = binNames.index(x)+1
if h1.GetBinContent(binNumber)>0:
indices.append(binNames.index(x)+1)
return indices, sortedBinNames
def getVectorFromHist(h1, binNumbers):
vector = []
for b in binNumbers:
vector.append(h1.GetBinContent(b))
return np.array(vector)
def getVectorFromGraph(graph, binNumbers):
vector = []
for b in binNumbers:
vector.append(graph.Eval(b-0.5))
return np.array(vector)
cov = getCovariance("/afs/hephy.at/data/dspitzbart01/TopEFT/results/cardFiles/regionsE_COMBINED_xsec_shape_lowUnc_SRandCR/dim6top_LO_currents/dim6top_LO_ttZ_ll.txt")
binNumbers,sortedBinNames = getSortedBinNumber(cov["yield_postfit"])
cov_prefit, cov_prefit_diag, cov_prefit_diag_corr = getMatrix(cov["prefit"], binNumbers)
cov_postfit, cov_postfit_diag, cov_postfit_diag_corr = getMatrix(cov["postfit"], binNumbers)
obs = getVectorFromGraph(cov["data"], binNumbers)
exp_postfit = getVectorFromHist(cov["yield_postfit"], binNumbers)
exp_prefit = getVectorFromHist(cov["yield_prefit"], binNumbers)
# Chi2 for postfit
R_postfit = obs - exp_postfit
cov_postfit_BU = copy.deepcopy(cov_postfit)
cov_postfit_inv = np.linalg.inv(cov_postfit)
chi2_postfit = np.dot(cov_postfit_inv, R_postfit)
chi2_postfit = np.dot(R_postfit,chi2_postfit)
cov_postfit_diag_inv = np.linalg.inv(cov_postfit_diag)
cov_postfit_diag_corr_inv = np.linalg.inv(cov_postfit_diag_corr)
chi2_postfit_uncor = np.dot(cov_postfit_diag_inv, R_postfit)
chi2_postfit_uncor = np.dot(R_postfit, chi2_postfit_uncor)
## get the correlation matrix
corr = np.dot(cov_postfit_diag_corr_inv, cov_postfit)
corr = np.dot(corr, cov_postfit_diag_corr_inv)
nbins = len(binNumbers)
sorted_corr = ROOT.TH2D('corr','',nbins,0,nbins,nbins,0,nbins)
for i,k in enumerate(sortedBinNames[:nbins]):
#if i < nSR:
sorted_corr.GetXaxis().SetBinLabel(i+1, str(i+1))#SRnames[i])
sorted_corr.GetYaxis().SetBinLabel(i+1, str(i+1))#SRnames[i])
for j,l in enumerate(sortedBinNames[:nbins]):
sorted_corr.SetBinContent(i+1, j+1, corr[i][j])
sorted_corr.GetXaxis().LabelsOption("v")
sorted_corr.GetZaxis().SetRangeUser(-1.0, 1.0)
c3 = ROOT.TCanvas('c3','c3',700,700)
pad2=ROOT.TPad("pad2","Main",0.,0.,1.,1.)
pad2.SetRightMargin(0.15)
pad2.SetTopMargin(0.06)
pad2.SetBottomMargin(0.12)
pad2.Draw()
pad2.cd()
sorted_corr.Draw("colz")
latex1 = ROOT.TLatex()
latex1.SetNDC()
latex1.SetTextSize(0.04)
latex1.SetTextAlign(11) # align right
latex1.DrawLatex(0.10,0.95,'CMS #bf{#it{Private Work}}')
outname = 'correlation'
filetypes = ['.png','.pdf','.root']
plot_dir = '/afs/hephy.at/user/d/dspitzbart/www/TopEFT/correlation/'
for f in filetypes:
c3.Print(plot_dir+outname+f)
chi2_primitive = 0
chi2_primitives_postfit = [0,0,0,0]
chi2_primitives_prefit = [0,0,0,0]
for i,r in enumerate(R_postfit):
#if i >= 30 and i<45:
chi2_primitives_postfit[i/15] += (r**2 / cov_postfit_BU[i][i])
chi2_primitives_prefit[i/15] += (r**2 / cov_prefit[i][i])
chi2_primitive += (r**2 / cov_postfit[i][i])
print "Results"
print chi2_postfit
print chi2_primitive
print "postfit", chi2_primitives_postfit
print "prefit", chi2_primitives_prefit
# Chi2 for prefit
R_prefit = obs - exp_prefit
cov_prefit_inv = np.linalg.inv(cov_prefit)
chi2_prefit = np.dot(cov_prefit_inv, R_prefit)
chi2_prefit = np.dot(R_prefit,chi2_prefit)
#cov_inv = np.linalg.inv(cov)
#pickle.dump(cov_inv, file('cov_inv.pkl','w'))
|
[
"[email protected]"
] | |
cf390af645cb88d0be8039c0bb54d5193044fcf1
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/5/lrx.py
|
494875a7ada72dddb9e71b6b3dbf5fbf9203dc27
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'lRX':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"[email protected]"
] | |
7ce1d9097321b92cdf001e1ecbcd96d23b2ed402
|
551b75f52d28c0b5c8944d808a361470e2602654
|
/huaweicloud-sdk-tms/huaweicloudsdktms/v1/model/predefine_tag.py
|
5bbfd3f3ded7213f0f5b7bef8e19d6e57181afd3
|
[
"Apache-2.0"
] |
permissive
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
9d6597ce8ab666a9a297b3d936aeb85c55cf5877
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
refs/heads/master
| 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 |
NOASSERTION
| 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null |
UTF-8
|
Python
| false | false | 4,468 |
py
|
# coding: utf-8
import pprint
import re
import six
class PredefineTag:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'key': 'str',
'value': 'str',
'update_time': 'datetime'
}
attribute_map = {
'key': 'key',
'value': 'value',
'update_time': 'update_time'
}
def __init__(self, key=None, value=None, update_time=None):
"""PredefineTag - a model defined in huaweicloud sdk"""
self._key = None
self._value = None
self._update_time = None
self.discriminator = None
self.key = key
self.value = value
self.update_time = update_time
@property
def key(self):
"""Gets the key of this PredefineTag.
键。 最大长度36个字符。 字符集:A-Z,a-z , 0-9,‘-’,‘_’,UNICODE字符(\\u4E00-\\u9FFF)。
:return: The key of this PredefineTag.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this PredefineTag.
键。 最大长度36个字符。 字符集:A-Z,a-z , 0-9,‘-’,‘_’,UNICODE字符(\\u4E00-\\u9FFF)。
:param key: The key of this PredefineTag.
:type: str
"""
self._key = key
@property
def value(self):
"""Gets the value of this PredefineTag.
值。 每个值最大长度43个字符,可以为空字符串。 字符集:A-Z,a-z , 0-9,‘.’,‘-’,‘_’,UNICODE字符(\\u4E00-\\u9FFF)。
:return: The value of this PredefineTag.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this PredefineTag.
值。 每个值最大长度43个字符,可以为空字符串。 字符集:A-Z,a-z , 0-9,‘.’,‘-’,‘_’,UNICODE字符(\\u4E00-\\u9FFF)。
:param value: The value of this PredefineTag.
:type: str
"""
self._value = value
@property
def update_time(self):
"""Gets the update_time of this PredefineTag.
更新时间,采用UTC时间表示。2016-12-09T00:00:00Z
:return: The update_time of this PredefineTag.
:rtype: datetime
"""
return self._update_time
@update_time.setter
def update_time(self, update_time):
"""Sets the update_time of this PredefineTag.
更新时间,采用UTC时间表示。2016-12-09T00:00:00Z
:param update_time: The update_time of this PredefineTag.
:type: datetime
"""
self._update_time = update_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PredefineTag):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
16f3de4254710303d6a86d6135995be72fcc7625
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_199/664.py
|
4fc3d46c0a13dbea8850ff91f3343512c00a2b8e
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 711 |
py
|
def flip(pancakes, left_index, spatula_size):
for i in range(left_index, left_index + spatula_size):
pancakes[i] = not pancakes[i]
def solve(pancakes, spatula_size):
"""Flip left-to-right."""
flips = 0
for left_index in range(0, len(pancakes) - spatula_size + 1):
if not pancakes[left_index]:
flips += 1
flip(pancakes, left_index, spatula_size)
# print('FLIP: ', pancakes)
return flips if all(pancakes) else 'IMPOSSIBLE'
def main():
cases = int(input())
for case_num in range(1, cases + 1):
pancakes, spatula_size = input().split()
solution = solve([p == '+' for p in pancakes], int(spatula_size))
print('Case #{}: {}'.format(case_num, solution))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
36bbf0f5a20d0315d125827ef6e66ec12b5d9e74
|
2f71665621698da42e7f6d9245deea95325f3320
|
/energy.py
|
551577081cac09255f411ae0a2d53168fcb49ee4
|
[
"MIT"
] |
permissive
|
MiroK/fenics-rigid-motions
|
dfee68a4d726f16db2d293f0aef492d43698dba2
|
cd50c1641d0137ac7653f032fba15d0b23b26ac6
|
refs/heads/master
| 2020-12-02T10:03:37.532532 | 2017-07-13T06:16:59 | 2017-07-13T06:16:59 | 96,685,308 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,298 |
py
|
from dolfin import *
from block import block_mat, block_vec, block_transpose
from block.iterative import ConjGrad
from block.algebraic.petsc import AMG
from rigid_motions import first
import rigid_motions
def energy(lmbda, mu, f, h, mesh, Z=None):
'''
Solves
-div(sigma) = f in Omega
sigma.n = h on boundary
where sigma(u) = 2*mu*eps(u) + lambda*div(u)*I. The problem is reformulated by
considering a complemented strain energy (for which rigid motions are not
tranparent). The system to be solved with CG as
P*[A+E]*[u] = P'*b
with P a precondtioner. We run on series of meshes to show mesh independence
of the solver.
'''
if not isinstance(mesh, Mesh):
# Precompute the 'symbolic' basis
mesh0 = first(mesh)
Z = rigid_motions.rm_basis(mesh0)
return [energy(lmbda, mu, f, h, mesh_, Z) for mesh_ in mesh]
# For cube
V = VectorFunctionSpace(mesh, 'CG', 1)
u, v = TrialFunction(V), TestFunction(V)
# Strain
epsilon = lambda u: sym(grad(u))
# Stress
gdim = mesh.geometry().dim()
sigma = lambda u: 2*mu*epsilon(u) + lmbda*tr(epsilon(u))*Identity(gdim)
# Energy of elastic deformation
a = inner(sigma(u), epsilon(v))*dx
A = assemble(a)
# Mass matrix for B
m = inner(u, v)*dx
M = assemble(m)
# NOTE: Avoiding use of Q space in the assembly - dense blocks!
Q = VectorFunctionSpace(mesh, 'R', 0, dim=6)
Zh = rigid_motions.RMBasis(V, Q, Z) # L^2 orthogonal
B = M*Zh
# System operator
AA = A + B*block_transpose(B)
# Right hand side
L = inner(f, v)*dx + inner(h, v)*ds
# Orthogonalize
P = rigid_motions.Projector(Zh)
b = assemble(L)
b0 = block_transpose(P)*b
# Preconditioner
AM = assemble(a + m)
BB = AMG(AM)
# Solve, using random initial guess
x0 = AA.create_vec()
as_backend_type(x0).vec().setRandom()
AAinv = ConjGrad(AA, precond=BB, initial_guess=x0, maxiter=100, tolerance=1E-8,
show=2, relativeconv=True)
x = AAinv*b0
# Functions from coefficients
# uh = Function(V, x) # Displacement
niters = len(AAinv.residuals) - 1
assert niters < 100
P*x # to get orthogonality
if MPI.rank(mesh.mpi_comm()) == 0:
print '\033[1;37;31m%s\033[0m' % ('Orthogonality %g' % max(P.alphas))
return V.dim(), niters
def test_energy():
'''Number of iterations should not blow up'''
lmbda = Constant(1)
mu = Constant(1)
f = Expression(('A*sin(2*x[0])', 'A*cos(3*(x[0]+x[1]+x[2]))', 'A*sin(x[2])'),
degree=3, A=0.01)
h = Constant((0, 0, 0))
comm = mpi_comm_world().tompi4py()
Ns = [2, 4, 8, 16, 32]
if comm.size > 2:
Ns.extend([64, 128])
meshes = (BoxMesh(Point(1, 1, 1), Point(2, 1.5, 1.25), N, N, N) for N in Ns)
converged = energy(lmbda, mu, f, h, meshes)
assert all(converged)
# Dump data for plotting
if comm.rank == 0:
from numpy import savetxt, array
savetxt('./.energy.txt', array(converged), fmt=['%d', '%d'])
return True
# ------------------------------------------------------------------------------
if __name__ == '__main__':
set_log_level(PROGRESS)
assert test_energy()
|
[
"[email protected]"
] | |
619d787fd1c5414403f3e6be8bac6d920d0a6743
|
9ac793d32e70775bb119aaddeb832624e3cf9281
|
/consoverloading.py
|
d6ef77beed8a28d74e31642020cc01266eca143a
|
[] |
no_license
|
prabhatpal77/Adv-python-polymorphism
|
9368311732e1bca9b54e099489c255e3498fbb9b
|
d68375e4816a746a1ffbffa6d179c50227267feb
|
refs/heads/master
| 2020-07-29T00:41:08.162385 | 2019-09-19T16:35:32 | 2019-09-19T16:35:32 | 209,601,547 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 457 |
py
|
# Constructor overloading:- The concept of defining multiple constructor with the same number parameters are different number of parameters
# within a class is known as aa constructor overloading.
class X:
def __init__(self):
print("in no parameter constructor of x")
def __init__(self, a, b):
print("in two parameter constructor of x")
def __init__(self, a):
print("in one parameter constructor of x")
x1=X(1000)
x2=X()
|
[
"[email protected]"
] | |
ca92618105b8fcd2c360427f208e240fccd36c7b
|
031d0c267bef0cb8dad9a39b9863b2946a93e8bd
|
/pymap3d/azelradec.py
|
2338ab71388c7ae4544ca4e327c51ffd1a164e08
|
[
"BSD-2-Clause"
] |
permissive
|
nhz2/pymap3d
|
70a8e8987d7d739ff6d801b608830adc6de0d4fc
|
74dbe48fe794a27e67c599c0740d88e84d22b3c5
|
refs/heads/master
| 2020-08-01T10:52:25.182699 | 2019-09-24T15:21:51 | 2019-09-24T15:21:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,014 |
py
|
"""
Azimuth / elevation <==> Right ascension, declination
"""
from typing import Tuple
from datetime import datetime
from .vallado import azel2radec as vazel2radec, radec2azel as vradec2azel
from .timeconv import str2dt # astropy can't handle xarray times (yet)
try:
from astropy.time import Time
from astropy import units as u
from astropy.coordinates import Angle, SkyCoord, EarthLocation, AltAz, ICRS
except ImportError:
Time = None
__all__ = ["radec2azel", "azel2radec"]
def azel2radec(
az_deg: float, el_deg: float, lat_deg: float, lon_deg: float, time: datetime, usevallado: bool = False
) -> Tuple[float, float]:
"""
viewing angle (az, el) to sky coordinates (ra, dec)
Parameters
----------
az_deg : float
azimuth [degrees clockwize from North]
el_deg : float
elevation [degrees above horizon (neglecting aberration)]
lat_deg : float
observer latitude [-90, 90]
lon_deg : float
observer longitude [-180, 180] (degrees)
time : datetime.datetime or str
time of observation
usevallado : bool, optional
default use astropy. If true, use Vallado algorithm
Returns
-------
ra_deg : float
ecliptic right ascension (degress)
dec_deg : float
ecliptic declination (degrees)
"""
if usevallado or Time is None: # non-AstroPy method, less accurate
return vazel2radec(az_deg, el_deg, lat_deg, lon_deg, time)
obs = EarthLocation(lat=lat_deg * u.deg, lon=lon_deg * u.deg)
direc = AltAz(location=obs, obstime=Time(str2dt(time)), az=az_deg * u.deg, alt=el_deg * u.deg)
sky = SkyCoord(direc.transform_to(ICRS()))
return sky.ra.deg, sky.dec.deg
def radec2azel(
ra_deg: float, dec_deg: float, lat_deg: float, lon_deg: float, time: datetime, usevallado: bool = False
) -> Tuple[float, float]:
"""
sky coordinates (ra, dec) to viewing angle (az, el)
Parameters
----------
ra_deg : float
ecliptic right ascension (degress)
dec_deg : float
ecliptic declination (degrees)
lat_deg : float
observer latitude [-90, 90]
lon_deg : float
observer longitude [-180, 180] (degrees)
time : datetime.datetime or str
time of observation
usevallado : bool, optional
default use astropy. If true, use Vallado algorithm
Returns
-------
az_deg : float
azimuth [degrees clockwize from North]
el_deg : float
elevation [degrees above horizon (neglecting aberration)]
"""
if usevallado or Time is None:
return vradec2azel(ra_deg, dec_deg, lat_deg, lon_deg, time)
obs = EarthLocation(lat=lat_deg * u.deg, lon=lon_deg * u.deg)
points = SkyCoord(Angle(ra_deg, unit=u.deg), Angle(dec_deg, unit=u.deg), equinox="J2000.0")
altaz = points.transform_to(AltAz(location=obs, obstime=Time(str2dt(time))))
return altaz.az.degree, altaz.alt.degree
|
[
"[email protected]"
] | |
291a36f12c47975a6010052d2f3be3a8a91bf36b
|
548ca3821601f3085eb9dde36b76b558a4334f0f
|
/apps/common/__init__.py
|
7cb93a9bca26544725a42c47ecb58ecef962471f
|
[] |
no_license
|
Erick-LONG/flbbs
|
9257726b120012f118999e267a63f15d558d3756
|
f2ac48d522799e4bb1ab278bd5a5cc7d56ca1fc8
|
refs/heads/master
| 2020-03-18T06:51:03.494630 | 2018-06-04T05:57:17 | 2018-06-04T05:57:17 | 134,418,124 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 28 |
py
|
from .views import common_bp
|
[
"[email protected]"
] | |
91d9a979cbfbb76e781425a6e40b815428a3fba0
|
6ec91b363b077bffd33f15300a0935124e9fb915
|
/Cracking_the_Code_Interview/Leetcode/1.Array/581.Shortest_Unsorted_Continuous_Subarray.py
|
59ee17c1d77f092052632f84b1267f3614dfbe84
|
[] |
no_license
|
lzxyzq/Cracking_the_Coding_Interview
|
03232515ae8eb50394d46322d36b230d1a626fcf
|
79dee7dab41830c4ff9e38858dad229815c719a0
|
refs/heads/master
| 2023-06-05T19:52:15.595289 | 2021-06-23T22:46:02 | 2021-06-23T22:46:02 | 238,068,000 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,967 |
py
|
# Given an integer array, you need to find one continuous subarray that if you only sort this subarray in ascending order, then the whole array will be sorted in ascending order, too.
# You need to find the shortest such subarray and output its length.
'''
Example 1:
Input: [2, 6, 4, 8, 10, 9, 15]
Output: 5
Explanation: You need to sort [6, 4, 8, 10, 9] in ascending order to make the whole array sorted in ascending order.
'''
# Method 1
class Solution:
def findUnsortedSubarray(self, nums: List[int]) -> int:
nums_copy = sorted(nums)
start = len(nums)
end = 0
for i in range(len(nums)):
if nums_copy[i] != nums[i]:
start = min(start,i)
end = max(end,i)
return end - start + 1 if end - start >= 0 else 0
# Time complexity : O(nlogn). Sorting takes nlogn time.
# Space complexity : O(n). We are making copy of original array.
# Method 2 (TLE)
# Better Brute Force
class Solution:
def findUnsortedSubarray(self, nums: List[int]) -> int:
start = len(nums)
end = 0
for i in range(len(nums)):
for j in range(i+1,len(nums)):
if nums[j] < nums[i]:
start = min(start,i)
end = max(end,j)
return end - start + 1 if end - start >= 0 else 0
# Time complexity : O(n2).Two nested loops are there.
# Space complexity : O(1). Constant space is used.
# Method 3
# Using Stack
class Solution:
def findUnsortedSubarray(self, nums: List[int]) -> int:
start = len(nums)
end = 0
stack = []
for i in range(len(nums)):
while stack and nums[stack[-1]] > nums[i]:
start = min(start,stack.pop())
stack.append(i)
stack.clear()
for i in range(len(nums)-1,-1,-1):
while stack and nums[stack[-1]] < nums[i]:
end = max(end,stack.pop())
stack.append(i)
return end - start + 1 if end - start >= 0 else 0
# Method 4
class Solution:
def findUnsortedSubarray(self, nums: List[int]) -> int:
if len(nums) == 1:
return 0
min_ = float("inf")
max_ = float("-inf")
flag = False
for i in range(1,len(nums)):
if nums[i] < nums[i-1]:
flag = True
if flag:
min_ = min(min_,nums[i])
flag = False
for i in range(len(nums)-2,-1,-1):
if nums[i] > nums[i+1]:
flag = True
if flag:
max_ = max(max_, nums[i])
for l in range(len(nums)):
if min_ < nums[l]:
break
for r in range(len(nums)-1,-1,-1):
if max_ > nums[r]:
break
return r - l + 1 if r - l > 0 else 0
# Time complexity : O(n). Four O(n) loops are used.
# Space complexity : O(1). Constant space is used.
|
[
"[email protected]"
] | |
7270bd3fa6b6b145daf3013f882bea4b4cdd44be
|
0343e1bf2154ea4b1515264d365faecfe3506e5b
|
/Atom.py
|
4788d33ee37b23ab32baa3e618f02369867cf7ee
|
[] |
no_license
|
declanoller/hyperevo
|
4273fb1d94b670a69c4f25c2e5552e735b934630
|
ff3d62f8305c5ecbdcc5e51603d1bbfb042c1408
|
refs/heads/master
| 2020-04-20T16:59:07.431135 | 2019-02-03T18:02:19 | 2019-02-03T18:02:19 | 168,975,673 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,969 |
py
|
import numpy as np
from math import tanh
import sympy
import os
import FileSystemTools as fst
import json
from copy import copy
'''
I guess for this one, the output_weights should probably be tuples of the form (atom, input #, weight),
because a given atom can have multiple inputs.
Likewise, the output_weights for this should probably have an entry for each output.
So it's:
-a dict for each output
-each entry of that is a dict of the index of each atom this output goes to
-each entry of that is a dict from the input index of that atom to the corresponding weight.
err, or the second two could just be combined into a (index, input) : weight dict?
Hmm, might make it harder to get all the indices easily.
inputs_received should also be a dict now, one for each atom input. Also for input_indices.
value should now be an array, one for each output.
needs:
--clear atom
--inputs, outputs
--addToInputsReceived (probably have to specify the atom AND the input index now?)
eventually: make a simple dict that has the number of times outputted to a given atom
from this atom (counting all its output indices), so you don't have to run the slow getAllChildAtoms()
every time.
eventually: more complicated atoms also don't actually need to be composed of nodes, they
should just be a huge function that's composed of all the smaller ones.
Before I was using None for the value, but we want the values to be 0 by default,
for example of atoms that get no input.
'''
class Atom:
def __init__(self, atom_index, type, **kwargs):
self.atom_index = atom_index
self.type = type
self.value = None
self.is_input_atom = False
self.is_output_atom = False
self.is_bias_atom = False
#print('creating atom (index {}) of type {}'.format(self.atom_index, self.type))
self.loadAtomFromModuleName(self.type)
'''if type=='Node':
self.N_inputs = 1
self.N_outputs = 1
elif type=='module':
pass
#self.atom = EPANN()
# Import atom properties here from json file
#self.N_inputs = self.atom.N_inputs
#self.N_outputs = self.atom.N_outputs'''
# input_indices is going to be a dict, with a dict for each of its
# inputs. Then, each of those dicts is going to be filled with entries of the form:
# par_atom_index : [par_atom_input_index1, par_atom_input_index2, ...]
self.input_indices = {i : {} for i in range(self.N_inputs)}
self.inputs_received = {i : [] for i in range(self.N_inputs)}
# output_weights is going to be a dict, with a dict for each of its
# outputs. Then, each of those dicts is going to be filled with entries of the form:
# child_atom_index : {child_atom_input_index1 : w1, child_atom_input_index2: w2, ...}
self.output_weights = {i : {} for i in range(self.N_outputs)}
################# setup stuff
def setToInputAtom(self):
self.is_input_atom = True
def setToOutputAtom(self):
self.is_output_atom = True
def setToBiasAtom(self):
self.is_bias_atom = True
self.value = [1.0]
def loadAtomFromModuleName(self, module_name):
module_dir = 'atom_modules'
ext = '.json'
module_fname = fst.combineDirAndFile(module_dir, f'{module_name}{ext}')
assert os.path.isfile(module_fname), f'File {module_fname} doesnt exist!'
self.loadAtomFromFile(module_fname)
def loadAtomFromFile(self, fname):
# This loads a NN from a .json file that was saved with
# saveNetworkToFile(). Note that it will overwrite any existing NN
# for this object.
with open(fname) as json_file:
NN_dict = json.load(json_file)
self.N_inputs = NN_dict['N_inputs']
self.N_outputs = NN_dict['N_outputs']
if NN_dict['Name'] == 'InputNode':
self.setToInputAtom()
if NN_dict['Name'] == 'OutputNode':
self.setToOutputAtom()
if NN_dict['Name'] == 'BiasNode':
self.setToBiasAtom()
self.input_symbols = [sympy.symbols('a_{}'.format(ind)) for ind in range(self.N_inputs)]
self.atom_function_vec = NN_dict['atom_function_vec']
# This converts it from a list of strings to a list of sympy expressions
self.atom_function_vec = [sympy.sympify(fn) for fn in self.atom_function_vec]
# This lambdify's them, so they can just be given arbitrary input vectors to be eval'd.
self.atom_fn = sympy.lambdify(self.input_symbols, self.atom_function_vec)
################################# Getters
def getAllInputIndices(self):
# This just gives a list of the input indices for this node
return(list(self.input_indices.keys()))
def getAllOutputIndices(self):
# This just gives a list of the output indices for this node
return(list(self.output_weights.keys()))
def getAllParentAtoms(self):
# This returns only the par atom indices, not which of their output indices.
# So it's giving the pars of all inputs, which is why it uses a set (in case
# two inputs have the same par node)
par_list = []
for v in self.input_indices.values():
if v:
par_list += list(v.keys())
return(list(set(par_list)))
def getParentAtomsOfInput(self, atom_input_index):
# Returns the par atoms of a single input index.
# This returns only the par atom indices, not which of their output indices.
return(list(self.input_indices[atom_input_index].keys()))
def getAllChildAtoms(self):
children_list = []
for v in self.output_weights.values():
if v:
children_list += list(v.keys())
return(list(set(children_list)))
def getChildAtomsOfOutput(self, atom_input_index):
# This returns only the child atom indices, not which of their output indices.
return(list(self.output_weights[atom_input_index].keys()))
def getChildAtomInputIndices(self, par_output_ind, child_atom_ind):
# This is for, if you have the output index of this atom,
# and the child atom you're looking at, which input indices it goes to and
# the weights for those inputs.
'''print('\n\nind: ', self.atom_index)
print('ow:', self.output_weights)
print('ow[par_out_ind]', self.output_weights[par_output_ind])'''
return(list(self.output_weights[par_output_ind][child_atom_ind].keys()))
def getOutputWeight(self, par_output_ind, child_atom_ind, child_atom_input_ind):
# par_output_ind is the index of the output of this atom that we're changing.
# child_atom_ind is the index of the atom that's being output to.
# child_atom_input_ind is the index of the input of that atom (will be 0
# for nodes, could be higher for more complex atoms).
# val is if you want to set the val, std is if you want to set it to a random gaussian.
return(self.output_weights[par_output_ind][child_atom_ind][child_atom_input_ind])
def getOutputWeightStr(self):
w_str = ', '.join(['{} : {}'.format(k,v) for k,v in self.output_weights.items()])
s = '[{}]'.format(w_str)
return(s)
################### atom modifiers
def addToInputIndices(self, par_atom_index, par_atom_output_index, child_atom_input_index):
#self.input_indices[input_ind].append(new_input_ind)
# If that atom ind isn't in the output_weights dict yet, add an empty dict for it.
if par_atom_index not in self.getParentAtomsOfInput(child_atom_input_index):
self.input_indices[child_atom_input_index][par_atom_index] = []
self.input_indices[child_atom_input_index][par_atom_index].append(par_atom_output_index)
def removeFromInputIndices(self, par_atom_index, par_atom_output_index, child_atom_input_index):
self.input_indices[child_atom_input_index][par_atom_index].remove(par_atom_output_index)
# if that was the last output index from that par, this atom no longer gets input from
# that par and we can remove it.
if not self.input_indices[child_atom_input_index][par_atom_index]:
self.input_indices[child_atom_input_index].pop(par_atom_index, None)
def addToOutputWeights(self, par_output_index, child_atom_index, child_atom_input_index, val=None, std=0.1):
# par_output_index is the index of the output of this atom that we're changing.
# child_atom_index is the index of the atom that's being output to.
# child_atom_input_index is the index of the input of that atom (will be 0
# for nodes, could be higher for more complex atoms).
# val is if you want to set the val, std is if you want to set it to a random gaussian.
if val is None:
val = np.random.normal(scale=std)
# If that atom ind isn't in the output_weights dict yet, add an empty dict for it.
if child_atom_index not in self.getChildAtomsOfOutput(par_output_index):
self.output_weights[par_output_index][child_atom_index] = {}
self.output_weights[par_output_index][child_atom_index][child_atom_input_index] = val
def removeFromOutputWeights(self, par_output_index, child_atom_index, child_atom_input_index):
self.output_weights[par_output_index][child_atom_index].pop(child_atom_input_index, None)
if not self.output_weights[par_output_index][child_atom_index]:
self.output_weights[par_output_index].pop(child_atom_index, None)
def mutateOutputWeight(self, par_output_index, child_atom_index, child_atom_input_index, std=0.1):
self.output_weights[par_output_index][child_atom_index][child_atom_input_index] += np.random.normal(scale=std)
##################### I/O stuff
def setInputAtomValue(self, val):
assert self.is_input_atom, 'Can only directly set the value of an input atom!'
# Uhhh I think here I'm gonna assume that only Node atoms will be inputs.
self.value = [val]
def getValue(self):
# Think carefully how to do this!
# So... I think what I'll do is, for each input ind, just sum
# the inputs. Then it will be up to the atom itself to do a nonlinearity
# or whatever.
# So for a simple Node, that will happen in its forward pass.
if self.value is not None:
# Bias atoms should have a .value by default.
# Input atoms should have been given a .value before this was called.
return(self.value)
else:
assert not self.is_bias_atom, '.value attr must already be set with bias atom to call getValue()!'
assert not self.is_input_atom, '.value attr must already be set with input atom to call getValue()!'
self.value = self.forwardPass()
def getValueOfOutputIndex(self, output_ind):
if self.value is None:
self.getValue()
return(self.value[output_ind])
def forwardPass(self):
'''
This will assume that the atom has already gotten all the inputs it needs
to. You'll need to do clearAtom() on this.
Right now this is just for figuring out the analytic form of the NN function.
'''
atom_input_vec = [sum(v) for v in self.inputs_received.values()]
#print('input vec for atom {}: {}'.format(self.atom_index, atom_input_vec))
output_vec = copy(self.atom_function_vec)
#print('atom {} output vec before: {}'.format(self.atom_index, output_vec))
# For each output index...
for output_index in range(self.N_outputs):
# Replace the atom input with the input to that input index of the atom.
for input_index in range(self.N_inputs):
output_vec[output_index] = output_vec[output_index].subs('a_{}'.format(input_index), atom_input_vec[input_index])
#print('atom {} output vec after: {}'.format(self.atom_index, output_vec))
return(output_vec)
def clearInputs(self):
# I'm not sure why it had the if statement... is that needed for some reason??
#if not self.is_input_atom:
self.inputs_received = {i : [] for i in range(self.N_inputs)}
def clearAtom(self):
# N.B.: this clears the VALUE of the node, which is just the thing it
# stores.
self.clearInputs()
if not self.is_bias_atom:
self.value = None
def addToInputsReceived(self, input_ind, val):
self.inputs_received[input_ind].append(val)
''' SCRAP
def getValue(self):
# Think carefully how to do this!
# So... I think what I'll do is, for each input ind, just sum
# the inputs. Then it will be up to the atom itself to do a nonlinearity
# or whatever.
# So for a simple Node, that will happen in its forward pass.
if self.value is not None:
# Bias atoms should have a .value by default.
# Input atoms should have been given a .value before this was called.
return(self.value)
else:
assert not self.is_bias_atom, '.value attr must already be set with bias atom to call getValue()!'
assert not self.is_input_atom, '.value attr must already be set with input atom to call getValue()!'
self.value = self.forwardPass()
return(self.value)
if self.type == 'Node':
sum_tot = [sum(v) for v in self.inputs_received.values()]
if self.is_output_atom:
self.value = sum_tot
else:
self.value = [self.nonlinear(s) for s in sum_tot]
return(self.value)
else:
self.value = self.forwardPass()
'''
#
|
[
"[email protected]"
] | |
6c43c50d4f5bd3c8db2cb275e43b1e2924c155c4
|
6569158699caec02fca237748b537863b861460c
|
/src/similarity/heterogeneous/PathSimStrategy.py
|
d3d3a468eb321db70008c2b917cd2cd3494c99d0
|
[] |
no_license
|
wfnuser/RicherPathSIM
|
7c570ed35680c99643408ca9d1ccc40e935b4a36
|
253906f9a2fe4fb4d3451ebd1d3b51de51e0d239
|
refs/heads/master
| 2021-01-11T06:19:12.669466 | 2013-06-20T23:05:46 | 2013-06-20T23:05:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,378 |
py
|
import numpy
from src.similarity.MetaPathSimilarityStrategy import MetaPathSimilarityStrategy
__author__ = 'jontedesco'
class PathSimStrategy(MetaPathSimilarityStrategy):
"""
Class that implements the PathSim similarity measure for same-typed nodes on heterogeneous graphs. Based on
publication by Yizhou Sun et al. NOTE: This assumes that any given meta path is symmetric.
@see http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.220.2455
"""
def findSimilarityScore(self, source, destination):
"""
Find the similarity score between
"""
partialMetaPath = self.metaPath[:len(self.metaPath)/2 + len(self.metaPath) % 2]
# Get the number of meta paths between source and destination
if self.conserveMemory:
# Slow, but less in-memory storage
numSourceDestinationPaths = len(self.metaPathUtility.findMetaPaths(self.graph, source, destination, self.metaPath, True))
else:
# Faster, but requires more memory
firstHalfAdjMatrix, firstHalfIndex = self.metaPathUtility.getAdjacencyMatrixFromGraph(
self.graph, partialMetaPath, project=True, symmetric=True)
secHalfAdjMatrix, secHalfIndex = self.metaPathUtility.getAdjacencyMatrixFromGraph(
self.graph, list(reversed(partialMetaPath)), project=True, symmetric=True)
adjMatrix = numpy.dot(firstHalfAdjMatrix, secHalfAdjMatrix)
numSourceDestinationPaths = adjMatrix[firstHalfIndex[source]][secHalfIndex[destination]]
# Get cycle counts
sourceNeighbors = self.metaPathUtility.findMetaPathNeighbors(self.graph, source, partialMetaPath, True)
destinationNeighbors = self.metaPathUtility.findMetaPathNeighbors(self.graph, destination, partialMetaPath, True)
numSourceDestinationCycles = 0
for node, neighbors in [(source, sourceNeighbors), (destination, destinationNeighbors)]:
for neighbor in neighbors:
paths = self.metaPathUtility.findMetaPaths(self.graph, node, neighbor, partialMetaPath, True)
numSourceDestinationCycles += len(paths) ** 2
# Compute the PathSim similarity scores of the two nodes
similarityScore = (2.0 * numSourceDestinationPaths) / float(numSourceDestinationCycles)
return similarityScore
|
[
"[email protected]"
] | |
22b8bc857da47675a13651377ee28938d3bc1028
|
5c7f2ff956b1fd1477d56486e239b6e661a08efd
|
/supervised_learning/0x06-keras/4-train.py
|
3af2ede58a820421ddda52a4111f58e7d5ef1030
|
[] |
no_license
|
diego0096/holbertonschool-machine_learning
|
60c5f40e185df04d02d9887d966542e85a981896
|
64b8984846c2b2b88bbf11125b55b482c7b74eea
|
refs/heads/master
| 2023-04-02T01:27:59.263397 | 2021-04-02T21:33:51 | 2021-04-02T21:33:51 | 279,229,931 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 524 |
py
|
#!/usr/bin/env python3
"""Module used to"""
import tensorflow.keras as K
def train_model(
network,
data,
labels,
batch_size,
epochs,
verbose=True,
shuffle=False):
"""Function that trains a model"""
history = network.fit(data,
labels,
epochs=epochs,
batch_size=batch_size,
shuffle=shuffle,
verbose=verbose)
return(history)
|
[
"[email protected]"
] | |
43690166d40128ffbbd3a038c62b64dc7eeb5ea7
|
ebcc57cbd7bc4c951fe3cf9826efc2d03d1e47e8
|
/Chapter1/Q1.8.py
|
d8d57988cc79c56d9f0a1356d1eb38f885e22581
|
[] |
no_license
|
Vahid-Esmaeelzadeh/CTCI-Python
|
17a672e95f1d886f4fb66239a4aa22a87f38382a
|
867360ab13dd63d24d6f3e45b5ac223755942b54
|
refs/heads/master
| 2022-10-26T16:43:54.939188 | 2020-06-11T21:42:15 | 2020-06-11T21:42:15 | 190,065,582 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,358 |
py
|
'''
Zero Matrix: Write an algorithm such that if an element in an MxN matrix is 0, its entire row and
column are set to 0.
'''
def zeroMatrix(mat: list):
rowNum = len(mat)
colNum = len(mat[0]) if rowNum != 0 else 0
if rowNum == 0 or colNum == 0:
return
firstRowHasZero = mat[0].count(0) > 0
firstColHasZero = False
for x in mat:
if x[0] == 0:
firstColHasZero = True
break
# use the first row and col to save the zero existence in rows and cols
for i in range(1, rowNum):
for j in range(1, colNum):
if mat[i][j] == 0:
mat[0][j] = 0
mat[i][0] = 0
# make zero the rows
for i in range(1, rowNum):
if mat[i][0] == 0:
for j in range(1, colNum):
mat[i][j] = 0
# make zero the cols
for j in range(1, colNum):
if mat[0][j] == 0:
for i in range(1, rowNum):
mat[i][j] = 0
# make zero the first row
if firstRowHasZero:
for j in range(colNum):
mat[0][j] = 0
# make zero the first col
if firstColHasZero:
for i in range(rowNum):
mat[i][0] = 0
a = [[0, 2, 3, 4, 5],
[4, 1, 6, 7, 7],
[4, 7, 0, 6, 2],
[1, 4, 5, 7, 8],
[6, 6, 6, 6, 0]]
zeroMatrix(a)
for x in a:
print(x)
|
[
"[email protected]"
] | |
bee1cfef342afe3e9ebadd5185c7059521be9dfc
|
54417b54c6e025a5d9bd89ae119c9134ccca4510
|
/test/test_connectomics/datastructures/testvoxel.py
|
43b80fd448b4b48a89d428df47cb82d55b2de147
|
[] |
no_license
|
SheetsKG/py-connectome-analysis
|
15481d579c089010b031d57141486114a2a153b2
|
169274d562b2981bc6f04032797c87ca5a66bbb0
|
refs/heads/master
| 2021-01-22T15:42:33.947279 | 2014-09-16T21:06:25 | 2014-09-16T21:06:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,230 |
py
|
'''
Created on Feb 3, 2014
@author: u0490822
'''
import unittest
import numpy as np
import connectome_analysis as ca
import connectome_analysis.datastructures.voxel as voxel
import scipy.spatial.distance as sdist
class VoxelTest(unittest.TestCase):
def checkIndicies(self, vol, position, expectedIndicies, msg=None):
indicies = vol.CoordToIndex(position)
outstr = "Position %s should map to indicies of %s not %s" % (str(position), str(indicies), str(expectedIndicies))
if not msg is None:
outstr = outstr + "\n" + msg
self.assertTrue(np.all(indicies == expectedIndicies), outstr)
return
def testCoordinates(self):
vox_size = np.array([10, 10, 10.0])
vol_dim = np.array([8, 16, 32])
vol_origin = np.array([15, 0.0, 10.0])
vol = ca.voxel.Volume.Create(voxel_size=vox_size, voxel_count=vol_dim, origin=vol_origin)
indicies = vol.CoordToIndex(vol_origin)
self.checkIndicies(vol, vol_origin, np.array([0, 0, 0]), "Origin should map to zero indicies")
self.checkIndicies(vol, vol_origin - (vox_size / 2.0), np.array([-1, -1, -1]))
self.checkIndicies(vol, vol_origin + (vox_size / 2.0), np.array([0, 0, 0]))
self.checkIndicies(vol, vol_origin + vox_size, np.array([1, 1, 1]))
self.checkIndicies(vol, vol_origin + (vox_size * 1.5), np.array([1, 1, 1]))
vol.voxels[0:2, 0:4, 0:8] = True
vol.Save('C:\\Temp\\TestVoxels.nrrd')
vol.Save('C:\\Temp\\TestVoxels.binvox')
pass
def testSphere(self):
'''Create a voxellized sphere to ensure our voxel pipeline works'''
vox_size = np.array([1, 1, 1])
vol_dim = np.array([32, 32, 32])
vol_origin = np.array([0, 0.0, 0.0])
sphere_center = (vol_dim / 2.0) * vox_size
vol = ca.voxel.Volume.Create(voxel_size=vox_size, voxel_count=vol_dim, origin=vol_origin)
for iX in range(0, vol_dim[0]):
print "X: " + str(iX)
for iY in range(0, vol_dim[1]):
for iZ in range(0, vol_dim[2]):
coord = np.array([iX, iY, iZ])
dist = sdist.pdist(np.vstack((sphere_center, coord)))
vol.voxels[iX, iY, iZ] = np.any(dist < 12.0)
vol.Save('C:\\Temp\\TestSphere.binvox')
def AddBoundingBoxToVolume(self, voxvol, BoundingBox):
(RegionOrigin, RegionVoxCount) = voxel.VoxelRegion(BoundingBox, voxvol.voxsize, voxvol.origin)
indicies = RegionOrigin / voxvol.voxsize # vol.CoordToIndex(RegionOrigin)
endIndex = indicies + RegionVoxCount
if indicies.ndim == 1:
voxvol.voxels[indicies[0]:endIndex[0],
indicies[1]:endIndex[1],
indicies[2]:endIndex[2]] = True
else:
for iRow in range(0, indicies.shape[0]):
voxvol.voxels[indicies[iRow, 0]:endIndex[iRow, 0],
indicies[iRow, 1]:endIndex[iRow, 1],
indicies[iRow, 2]:endIndex[iRow, 2]] = True
def testBoundingBox(self):
vox_size = np.array([10, 10, 10.0])
vol_dim = np.array([32, 32, 32])
vol_origin = np.array([0, 0.0, 0.0])
BoundingBox = np.array([10, 19, 31, 19, 40, 50])
(RegionOrigin, RegionVoxCount) = voxel.VoxelRegion(BoundingBox, vox_size)
self.assertTrue(np.all(RegionOrigin == np.array([10, 10, 30])))
self.assertTrue(np.all(RegionVoxCount == np.array([1, 3, 2])))
def testBoundingBox2(self):
'''Ensure all voxels within a bounding box are reported'''
vox_size = np.array([10, 10, 10])
vol_dim = np.array([32, 32, 32])
vol_origin = np.array([15, 0.0, -10.0])
vol = voxel.Volume.Create(voxel_size=vox_size, voxel_count=vol_dim, origin=vol_origin)
# BoundingBox = [MinX MinY MinZ MaxX MaxY MaxZ]
BoundingBox = np.array([27, 20, 1, 49, 40, 19])
# BoundingBox = np.array([25, 20, -10, 55, 80, 30])
self.AddBoundingBoxToVolume(vol, BoundingBox)
BoundingBoxes = np.array([[75, 50, 30, 95, 80, 40],
[75, 50, 50, 95, 120, 90]])
self.AddBoundingBoxToVolume(vol, BoundingBoxes)
vol.Save('C:\\Temp\\TestBoundingBox.binvox')
def testBigVolume(self):
'''Ensure all voxels within a bounding box are reported'''
vox_size = np.array([.5, 1, 2])
vol_dim = np.array([512, 512, 512])
vol_origin = np.array([0, 0.0, 0])
vol = voxel.Volume.Create(voxel_size=vox_size, voxel_count=vol_dim, origin=vol_origin)
# BoundingBox = [MinX MinY MinZ MaxX MaxY MaxZ]
BoundingBox = np.array([64, 1, 255, 255, 511, 356])
# BoundingBox = np.array([25, 20, -10, 55, 80, 30])
self.AddBoundingBoxToVolume(vol, BoundingBox)
# BoundingBoxes = np.array([[75, 50, 30, 95, 80, 40],
# [75, 50, 50, 95, 120, 90]])
# self.AddBoundingBoxToVolume(vol, BoundingBoxes)
vol.Save('C:\\Temp\\TestLargeVolume.binvox')
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
[
"[email protected]"
] | |
2b8c4867f20c06f28ecc9fbcf2774b04be05a04e
|
c15a28ae62eb94dbf3ed13e2065195e572a9988e
|
/Fluent Python/20/20.2.py
|
c2c01f531425d86d457d80596f247fa9439c1bda
|
[] |
no_license
|
xuyuchends1/python
|
10798c92840a1a59d50f5dc5738b2881e65f7865
|
545d950a3d2fee799902658e8133e3692939496b
|
refs/heads/master
| 2021-01-25T07:07:04.812140 | 2020-02-28T09:25:15 | 2020-02-28T09:25:15 | 93,647,064 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 815 |
py
|
class Quantity:
_count=0
def __init__(self):
cls=self.__class__
prefix=cls.__name__
index=cls._count
self.storage_name='_{}#{}'.format(prefix,index)
cls._count+=1
def __set__(self, instance, value):
if value>0:
instance.__dict__[self.storage_name]=value
else:
raise ValueError("value must be >0")
def __get__(self, instance, owner):
return getattr(instance,self.storage_name)
class LineItem:
weight=Quantity()
price=Quantity()
def __init__(self,description,weight,price):
self.description=description
self.weight=weight
self.price=price
def subtotal(self):
return self.weight*self.price
count=LineItem('test',20,18.3)
temp=getattr(count,'_Quantity#0')
pass
|
[
"[email protected]"
] | |
25c8366f63832321581fc1dce1f206218e841d65
|
2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5
|
/archive/698PartitiontoKEqualSumSubsets.py
|
1a8ec7581d9cf1bb49975573f231f9da53963e1b
|
[] |
no_license
|
doraemon1293/Leetcode
|
924b19f840085a80a9e8c0092d340b69aba7a764
|
48ba21799f63225c104f649c3871444a29ab978a
|
refs/heads/master
| 2022-10-01T16:20:07.588092 | 2022-09-08T02:44:56 | 2022-09-08T02:44:56 | 122,086,222 | 0 | 0 | null | null | null | null |
WINDOWS-1252
|
Python
| false | false | 1,607 |
py
|
# coding=utf-8
'''
Created on 2017�10�16�
@author: Administrator
'''
class Solution(object):
def canPartitionKSubsets(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
summ = sum(nums)
if summ % k != 0: return False
target = summ / k
if max(nums) > target:
return False
used = [False] * len(nums)
nums = sorted(nums, reverse = True)
memo = set()
def dfs(cur_target, k, nums, used, st,):
# print cur_target, k, nums, used, st
key = (cur_target, k)
if key in memo:
return False
if k == 1:
return True
if cur_target == 0:
if dfs(target, k - 1, nums, used, 0):
return True
else:
memo.add(key)
for i in range(st, len(nums)):
if used[i] == False and nums[i] <= cur_target:
used[i] = True
if dfs(cur_target - nums[i], k, nums, used, i + 1):
return True
used[i] = False
if dfs(cur_target, k, nums, used, i + 1):
return True
memo.add(key)
return False
return dfs(target, k, nums, used, 0)
nums = [4, 3, 2, 3, 5, 2, 1]
k = 4
nums = [2, 2, 2, 2, 3, 4, 5]
nums = [3522, 181, 521, 515, 304, 123, 2512, 312, 922, 407, 146, 1932, 4037, 2646, 3871, 269]
k = 5
print Solution().canPartitionKSubsets(nums, k)
|
[
"[email protected]"
] | |
7cf12da9090e35e42593c445e9e5eb711089d0fb
|
a52066a5f390e1372fd4de78c69c16b5e247e46a
|
/property/admin.py
|
2d37a754e348b2cdbdd1282e97ca6faed46d8e92
|
[] |
no_license
|
bl4ck4ndbr0wn/landville-backend-web-api
|
48de112b50a16da81611b550a91bd71486b20824
|
2248e95a91ffabc0c69fad25ba69a7ade1081512
|
refs/heads/develop
| 2022-12-14T11:18:29.294693 | 2019-09-17T07:58:41 | 2019-09-17T07:58:41 | 230,882,054 | 0 | 0 | null | 2022-12-08T05:34:56 | 2019-12-30T08:54:55 | null |
UTF-8
|
Python
| false | false | 283 |
py
|
from django.contrib import admin
from .models import (Property, PropertyReview,
PropertyInspection, PropertyEnquiry)
admin.site.register(Property)
admin.site.register(PropertyReview)
admin.site.register(PropertyEnquiry)
admin.site.register(PropertyInspection)
|
[
"[email protected]"
] | |
f3b2047bb8c15d009d5cfdc2294fd57939f37105
|
ff3eb18d5c3234a7e23a03fac7f8cc2a9bf94547
|
/glearn/_likelihood/_base_likelihood.py
|
be0f78987b4f72087c2946cdb8b65715e075c570
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ameli/glearn
|
afe9f73edcf1719a9a59600d3934ce3653d7e43a
|
c5183c746306522e74e163b64ef115a65681266c
|
refs/heads/main
| 2023-08-16T16:36:37.097729 | 2023-08-15T23:38:08 | 2023-08-15T23:38:08 | 373,664,668 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,372 |
py
|
# SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <[email protected]>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the license found in the LICENSE.txt file in the root directory
# of this source tree.
# =======
# Imports
# =======
from ..device._timer import Timer
# ===============
# Base Likelihood
# ===============
class BaseLikelihood(object):
"""
"""
# ====
# init
# ====
def __init__(self, mean, cov, z):
"""
"""
# Input attributes
self.mean = mean
self.cov = cov
self.z = z
# Member data
self.X = self.mean.X
self.b = self.mean.b
self.B = self.mean.B
self.Binv = self.mean.Binv
self.mixed_cor = self.cov.mixed_cor
if self.B is not None:
# Translate data to the mean of prior of beta.
self.z = self.z - self.X @ self.b
# Degrees of freedom of linear model
if self.B is None:
m = self.X.shape[1]
self.dof = m
else:
self.dof = 0
# Residual degrees of freedom
n = self.X.shape[0]
self.rdof = n - self.dof
# Counting elapsed wall time and cpu process time
self.timer = Timer()
|
[
"[email protected]"
] | |
748335d095a70db2081c5a1775861689f7ca3d8a
|
1fd7d0ac2903beb5ef70370b22485a3b43df7466
|
/Machine Learning/Klassifikation/Logistische Regression.py
|
b830413869dfe89c624468c86e2c1fc8d7429582
|
[
"MIT"
] |
permissive
|
stanman71/Python_Basics
|
a34b3ea95b035ced5e607a8ba4841836c7667666
|
fe442e421362b22f61d05235e835a568d9ce3aef
|
refs/heads/master
| 2021-06-07T21:29:58.565300 | 2019-09-22T21:07:56 | 2019-09-22T21:07:56 | 161,891,286 | 1 | 0 |
MIT
| 2021-05-08T16:50:05 | 2018-12-15T09:47:23 |
CSS
|
UTF-8
|
Python
| false | false | 2,234 |
py
|
""" Unter logistischer Regression oder Logit-Modell versteht man Regressionsanalysen zur
Modellierung der Verteilung abhängiger zufälliger (diskreter) Variablen und Zuordnung
zu einer Klasse.
Ziel: Kurvenverlauf mit möglichst geringen Abstand zu den einezlnen Punkten """
## ##########################
## Teil 0: Einlesen der Daten
## ##########################
import pandas as pd
df = pd.read_csv("./Python_Training/Machine Learning/Klassifikation/CSV/classification.csv")
## ################################################################
## Teil 1: Aufteilung in Trainings- und Testdaten (hier: 75% / 25%)
## ################################################################
from sklearn.model_selection import train_test_split
# Welche Spalten sollen zur Vorhersage verwendet werden
X = df[["age", "interest"]].values
""" Oder: Die Spalte "success" soll nicht zur Vorhersage verwendet werden:
X = df.drop("success", axis = 1).values """
y = df["success"].values
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 0, test_size = 0.25)
## ########################
## Teil 2: Daten skallieren
## ########################
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
## #########################
## Teil 3: Modell trainieren
## #########################
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(solver='lbfgs')
model.fit(X_train, y_train)
# Güte des Modells
print(model.score(X_test, y_test))
## ##########################
## Teil 4: Ergebnisse plotten
## ##########################
""" Hinweis: Benötigt plot_classifier.py """
from Support.plot_classifier import plot_classifier
# Trainings-Daten plotten (proba bezieht sich auf die Visualisierung der Grenze)
plot_classifier(model, X_train, y_train, proba = True, xlabel = "Alter", ylabel = "Interesse")
# Testdaten plotten (proba bezieht sich auf die Visualisierung der Grenze)
plot_classifier(model, X_test, y_test, proba = True, xlabel = "Alter", ylabel = "Interesse")
|
[
"[email protected]"
] | |
49d717a463fe1aa2aba56a457a609fd5ef28eaef
|
99fca8eaa3fb5e93ed4ed857b439293bc0952c79
|
/Code Testing/test_survey.py
|
a5a66d73334b39d02acfeb0c00b35ea6e63ad6e4
|
[] |
no_license
|
Ebyy/python_projects
|
7adb377f4e8eec94613e4e348f02c2ded306efac
|
0cacfab443d3eeeb274836b7be4b7205585f7758
|
refs/heads/master
| 2020-05-19T22:28:17.672051 | 2019-05-19T19:32:19 | 2019-05-19T19:32:19 | 185,240,041 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 912 |
py
|
import unittest
from survey import AnonymousSurvey
class TestAnonymousSurvey(unittest.TestCase):
"""Test for class AnonymousSurvey."""
def test_store_single_response(self):
"""Test that a single response is stored."""
question = "What language did you first learn to speak?"
my_survey = AnonymousSurvey(question)
my_survey.store_response('English')
self.assertIn('English',my_survey.responses)
def test_store_three_responses(self):
"""Test that 3 individual responses are stored properly."""
question = "What language did you first learn to speak?"
my_survey = AnonymousSurvey(question)
responses = ['English','German','French']
for response in responses:
my_survey.store_response(response)
for response in responses:
self.assertIn(response, my_survey.responses)
unittest.main()
|
[
"[email protected]"
] | |
a6a11b9f02b65505c57c67c82f445c38bf20424f
|
9cbab916088192af67a19aaee25fe7d6e5d27a31
|
/web/flask/mvc/mvc.py
|
4b9513e871f0b010000c26f6a1b3992bdfc99267
|
[] |
no_license
|
ddayzzz/Pythonlearning
|
806c75304d7d954f2c935031d4d7516be7ce7300
|
54e92aa5282da97b6d4bd2355a668a16c272ee68
|
refs/heads/master
| 2020-12-30T12:44:49.465356 | 2017-05-25T15:12:53 | 2017-05-25T15:12:53 | 91,356,527 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,300 |
py
|
# coding=utf-8
# MVC结构初探
from flask import Flask, render_template, request
import json
import os
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
return render_template('home.html')
@app.route('/signin', methods=['GET'])
def signin_form():
return render_template('form.html')
@app.route('/signin', methods=['POST'])
def signin():
username = request.form['username']
passwd = request.form['password']
if username == 'admin' and passwd == '123':
return render_template('signin-ok.html', username=username)
# 错误记录一下
if os.path.exists('record.json') == False:
data = {'count': 1}
jsfptr = open('record.json', 'w')
jsfptr.write(json.dumps(data))
jsfptr.close()
return render_template('form.html', message='Bad username or password!', count=range(1, 2), username=username)
jsfptr = open('record.json', 'r')
data = json.load(jsfptr)
jsfptr.close()
cnt = data['count']
data['count'] = data['count'] + 1
jsfptr = open('record.json', 'w')
jsfptr.write(json.dumps(data))
jsfptr.close()
return render_template('form.html', message='Bad username or password!', count=range(1, cnt + 2), username=username)
if __name__ == '__main__':
app.run()
|
[
"[email protected]"
] | |
183146db1c12b5739955d5dd2905e1e8753a16e5
|
b872ccff0c2f79886c0136b32da5f04cb8d3276c
|
/etcewrappers/emane/emaneeventtdmaschedule.py
|
55875966e00e3b6b7a7d46e29ca2f41d9efe2459
|
[] |
no_license
|
prj8121/python-etce
|
9c22b3a182f103f46b1d865d13ded277482e4a34
|
bbd74a65280a09f3edc05457961b8c51ec009165
|
refs/heads/master
| 2022-11-18T05:19:19.324966 | 2020-04-02T15:15:47 | 2020-04-02T15:15:47 | 276,674,792 | 0 | 0 | null | 2020-07-02T14:57:07 | 2020-07-02T14:57:06 | null |
UTF-8
|
Python
| false | false | 3,961 |
py
|
#
# Copyright (c) 2015-2018 - Adjacent Link LLC, Bridgewater, New Jersey
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Adjacent Link LLC nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import etce.timeutils
from etce.eelsequencer import EELSequencer
from etce.wrapper import Wrapper
class EmaneEventTDMASchedule(Wrapper):
"""
Issue TDMA schedule events using emaneevent-tdmaschedule based on events
listed in the input EEL file. EEL lines require this format:
TIME NEMIDS tdmaschedule SCHEDULEXMLFILE
Example: Issue schedule events at time 3.0 and 47.0 to different NEM
groups.
3.0 nem:1-5,7 tdmaschedule schedule-003.xml
47.0 nem:9 tdmaschedule schedule-047.xml
"""
def register(self, registrar):
registrar.register_infile_name('scenario.eel')
registrar.register_outfile_name('tdmaschedule.log')
registrar.register_argument('eventservicegroup',
'224.1.2.8:45703',
'The Event Service multicast group and port.')
registrar.register_argument('eventservicedevice',
None,
'Event channel multcast device.')
def run(self, ctx):
if not ctx.args.infile:
return
if not ctx.args.eventservicedevice:
message = 'Wrapper emane.emaneeventtdmaschedule mandatory ' \
'argument "eventservicedevice" not specified. Quitting.'
raise RuntimeError(message)
mcgroup,mcport = ctx.args.eventservicegroup.split(':')
sequencer = EELSequencer(ctx.args.infile,
ctx.args.starttime,
('tdmaschedule',))
for _,_,eventargline in sequencer:
# parse inputs
# 0.0 nem:1-5 tdmaschedule tdmaschedules/t000.xml
eventargs = eventargline.split()
schedulexml = eventargs[0]
# build argstr
argstr = \
'--device %s --group %s --port %s %s' \
% (ctx.args.eventservicedevice, mcgroup, mcport, schedulexml)
ctx.run('emaneevent-tdmaschedule', argstr, genpidfile=False)
# and log it
with open(ctx.args.outfile,'a') as lf:
lf.write('%s: emaneevent-tdmaschedule %s\n' % (etce.timeutils.getstrtimenow(),
argstr))
def stop(self, ctx):
pass
|
[
"[email protected]"
] | |
bf26cfc8f60644c52cb50448bc33a7c5e8dddb18
|
505343f6ace00d22f8753c1a943a5794a619e698
|
/katas/Python/6 kyu/Schrdingers Boolean 5a5f9f80f5dc3f942b002309.py
|
e09f00fb145d91c745d59a94f46968afea841d81
|
[] |
no_license
|
bullet1337/codewars
|
7652e50bf768bc47976a9124dd98b93602d4d458
|
ba7f13ddd766158b41e036dae5d6b15f7f08761a
|
refs/heads/master
| 2020-03-27T05:04:03.751302 | 2019-04-30T17:45:39 | 2019-04-30T17:45:39 | 145,991,995 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 145 |
py
|
# https://www.codewars.com/kata/5a5f9f80f5dc3f942b002309
class Omnibool:
def __eq__(self, other):
return True
omnibool = Omnibool()
|
[
"[email protected]"
] | |
6d4ba7dbe4ec8724bcfc6c95ce18bb818fc5b124
|
0ebcfdb5a98ff3e3975fb16e5f3b0616447b27e5
|
/DPSPipeline/database/userassignments.py
|
c07e0f32c85254e6e9223126fbb45bb0ef4c8edb
|
[] |
no_license
|
satishgoda/DPS_PIPELINE
|
ff7723dba09c54dca4caaaf390c398f33d474bf3
|
49100eea1f81bb0b86a5fed1bb5c3b1b5411b912
|
refs/heads/master
| 2021-01-21T18:34:46.613939 | 2016-03-04T22:42:47 | 2016-03-04T22:42:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,241 |
py
|
import sharedDB
#timestamp
from datetime import datetime
from PyQt4 import QtCore
from PyQt4.QtCore import QObject
'''
Group by project/phase
order by due date
For percentage completed Get the number of unfinished tasks / total number of tasks
To get estimated hours per assignment, take (8 hr * work days left in phase) / number of incomplete tasks under phase
'''
class UserAssignment(QObject):
userAssignmentChanged = QtCore.pyqtSignal(QtCore.QString)
userAssignmentAdded = QtCore.pyqtSignal(QtCore.QString)
def __init__(self,_iduserassignments = -1, _idusers = -1, _assignmentid = -1, _assignmenttype = '', _idstatuses = 1, _timestamp = datetime.now(), _hours = 0, _updated = 0, _new = 0):
super(QObject, self).__init__()
# define custom properties
self._iduserassignments = _iduserassignments
self._idusers = _idusers
self._assignmentid = _assignmentid
self._assignmenttype = _assignmenttype
self._idstatuses = _idstatuses
self._hours = _hours
self._timestamp = _timestamp
self._updated = _updated
self._new = _new
self._type = "userassignment"
self._hidden = False
self._new = _new
self.statusButton = ''
#if self._idstatuses == 3 or self._idstatuses == 5:
#self._hidden = True
if not self._new:
self.connectToDBClasses()
self._estimatedHoursLeft = 0
'''
if self.assignmentType() == "phase_assignment":
self._scarcityIndex = sharedDB.myPhaseAssignments(str(self._assignmentid))._scarcityIndex
else:
self._scarcityIndex = 0
'''
self.userAssignmentChanged.connect(sharedDB.myAvailabilityManager.CalculateBooking)
def __eq__(self, another):
return hasattr(another, '_iduserassignments') and self._iduserassignments == another._iduserassignments
def __hash__(self):
return hash(self._iduserassignments)
def id(self):
return self._iduserassignments
def Save(self):
if self._new:
self.AddUserAssignmentToDB()
print "User Assignment '"+str(self._iduserassignments)+"' Added to Database!"
elif self._updated:
#print self._number+" Updated!"
self.UpdateUserAssignmentInDB()
print "User Assignment '"+str(self._iduserassignments)+"' Updated in Database!"
def AddUserAssignmentToDB(self):
rows,self._iduserassignments = sharedDB.mySQLConnection.query("INSERT INTO userassignments (idusers, assignmentid, assignmenttype, idstatuses, lasteditedbyname, lasteditedbyip, appsessionid, hours) VALUES ('"+str(self._idusers)+"', '"+str(self._assignmentid)+"', '"+str(self._assignmenttype)+"', '"+str(self._idstatuses)+"', '"+str(sharedDB.currentUser._name)+"', '"+str(sharedDB.mySQLConnection.myIP)+"', '"+str(sharedDB.app.sessionId())+"', '"+str(self._hours)+"');","commit")
#self._iduserassignments = sharedDB.mySQLConnection._lastInsertId
sharedDB.myUserAssignments[str(self._iduserassignments)] = self
self.userAssignmentAdded.emit(str(self._iduserassignments))
self._new = 0
def UpdateUserAssignmentInDB (self):
if self.id() is not None:
sharedDB.mySQLConnection.query("UPDATE userassignments SET idusers = '"+str(self._idusers)+"', assignmentid = '"+str(self._assignmentid)+"', assignmenttype = '"+str(self._assignmenttype)+"', idstatuses = '"+str(self._idstatuses)+"', lasteditedbyname = '"+str(sharedDB.currentUser._name)+"', lasteditedbyip = '"+str(sharedDB.mySQLConnection.myIP)+"', appsessionid = '"+str(sharedDB.app.sessionId())+"', hours = '"+str(self._hours)+"' WHERE iduserassignments = "+str(self._iduserassignments)+";","commit")
self._updated = 0
def SetValues(self,_iduserassignments = -1, _idusers = -1, _assignmentid = -1, _assignmenttype = '', _idstatuses = 1, _hours = 0, _timestamp = datetime.now()):
print ("Downloaded update for UserAssignment '"+str(self._iduserassignments)+"'")
self._iduserassignments = _iduserassignments
self._idusers = _idusers
self._assignmentid = _assignmentid
self._assignmenttype = _assignmenttype
self._idstatuses = _idstatuses
self._hours = _hours
self._timestamp = _timestamp
#update views containing project
#update calendar view
#self.UpdateCalendarView()
self.userAssignmentChanged.emit(str(self._iduserassignments))
#self.UpdateProjectView()
##if current project changed, update values
##else just update project list
def setStatus(self,newStatus):
self._status = newStatus
self._updated = 1
def setHours(self, hours):
#if hours <1 delete assignment?
self._hours = hours
self.userAssignmentChanged.emit(str(self._iduserassignments))
self._updated = 1
def connectToDBClasses(self):
#connect to users
if str(self._idusers) in sharedDB.myUsers:
user = sharedDB.myUsers[str(self._idusers)]
user._assignments[str(self.id())] = self
if self.assignmentType() == "phase_assignment":
#for phase in sharedDB.myPhaseAssignments:
#if phase.idphaseassignments() == self.assignmentID():
if str(self.assignmentID()) in sharedDB.myPhaseAssignments:
phase = sharedDB.myPhaseAssignments[str(self.assignmentID())]
phase.addUserAssignment(self)
if self.hours():
if not phase.assigned():
phase.setAssigned(1)
def assignmentID(self):
return self._assignmentid
def assignmentType(self):
return self._assignmenttype
def idUsers(self):
return self._idusers
def idUserAssignment(self):
return self._iduserassignments
def hours(self):
return self._hours
#
'''if self._assignmenttype = 'phaseassignment':
#iterate through shots
for shot in sharedDB.myShots:
##if idsequences matches
#print "Shot id:" +str(shot._idshots)+" Task Id shots: "+str(myTask._idshots)
if shot._idshots == myUserAssignment._idshots:
###add to shot's task list
if shot._tasks is not None:
#print "Appending shot: "+str(shot._idshots)+"'s task list"
shot._tasks.append(myUserAssignment)
else:
#print "Creating shot: "+str(shot._idshots)+"'s task list"
shot._tasks = [myUserAssignment]
sharedDB.mySQLConnection.newTaskSignal.emit(str(myUserAssignment._idtasks))
break
'''
|
[
"[email protected]"
] | |
6475dfcd1638382b256d2c080aa332986c8c10aa
|
bc5dd7be84a43ec53f8e4215761badb9b61a13ad
|
/kurs_2/vertualenv/Lib/site-packages/django/db/models/functions/text.py
|
b896b61e28ff97d18e7080780b35a37804a24f9f
|
[] |
no_license
|
MaximMak/DL_Academy_Lessons
|
ef4758be02e43954748031ac95c970077f71cd7e
|
427576859657e88fd81683494397af3df920c674
|
refs/heads/master
| 2023-01-29T19:53:11.650096 | 2020-12-13T21:40:58 | 2020-12-13T21:40:58 | 276,397,551 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,906 |
py
|
from django.db import NotSupportedError
from django.db.models.expressions import Func, Value
from django.db.models.fields import IntegerField
from django.db.models.functions import Coalesce
from django.db.models.lookups import Transform
class BytesToCharFieldConversionMixin:
"""
Convert CharField results from bytes to str.
MySQL returns long data types (bytes) instead of chars when it can't
determine the length of the result string. For example:
LPAD(column1, CHAR_LENGTH(column2), ' ')
returns the LONGTEXT (bytes) instead of VARCHAR.
"""
def convert_value(self, value, expression, connection):
if connection.features.db_functions_convert_bytes_to_str:
if self.output_field.get_internal_type() == 'CharField' and isinstance(value, bytes):
return value.decode()
return super().convert_value(value, expression, connection)
class MySQLSHA2Mixin:
def as_mysql(self, compiler, connection, **extra_content):
return super().as_sql(
compiler,
connection,
template='SHA2(%%(expressions)s, %s)' % self.function[3:],
**extra_content,
)
class OracleHashMixin:
def as_oracle(self, compiler, connection, **extra_context):
return super().as_sql(
compiler,
connection,
template=(
"LOWER(RAWTOHEX(STANDARD_HASH(UTL_I18N.STRING_TO_RAW("
"%(expressions)s, 'AL32UTF8'), '%(function)s')))"
),
**extra_context,
)
class PostgreSQLSHAMixin:
def as_postgresql(self, compiler, connection, **extra_content):
return super().as_sql(
compiler,
connection,
template="ENCODE(DIGEST(%(expressions)s, '%(function)s'), 'hex')",
function=self.function.lower(),
**extra_content,
)
class Chr(Transform):
function = 'CHR'
lookup_name = 'chr'
def as_mysql(self, compiler, connection, **extra_context):
return super().as_sql(
compiler, connection, function='CHAR',
template='%(function)s(%(expressions)s USING utf16)',
**extra_context
)
def as_oracle(self, compiler, connection, **extra_context):
return super().as_sql(
compiler, connection,
template='%(function)s(%(expressions)s USING NCHAR_CS)',
**extra_context
)
def as_sqlite(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='CHAR', **extra_context)
class ConcatPair(Func):
"""
Concatenate two arguments together. This is used by `Concat` because not
all profiles databases support more than two arguments.
"""
function = 'CONCAT'
def as_sqlite(self, compiler, connection, **extra_context):
coalesced = self.coalesce()
return super(ConcatPair, coalesced).as_sql(
compiler, connection, template='%(expressions)s', arg_joiner=' || ',
**extra_context
)
def as_mysql(self, compiler, connection, **extra_context):
# Use CONCAT_WS with an empty separator so that NULLs are ignored.
return super().as_sql(
compiler, connection, function='CONCAT_WS',
template="%(function)s('', %(expressions)s)",
**extra_context
)
def coalesce(self):
# null on either side results in null for expression, wrap with coalesce
c = self.copy()
c.set_source_expressions([
Coalesce(expression, Value('')) for expression in c.get_source_expressions()
])
return c
class Concat(Func):
"""
Concatenate text fields together. Backends that result in an entire
null expression when any arguments are null will wrap each argument in
coalesce functions to ensure a non-null result.
"""
function = None
template = "%(expressions)s"
def __init__(self, *expressions, **extra):
if len(expressions) < 2:
raise ValueError('Concat must take at least two expressions')
paired = self._paired(expressions)
super().__init__(paired, **extra)
def _paired(self, expressions):
# wrap pairs of expressions in successive concat functions
# exp = [a, b, c, d]
# -> ConcatPair(a, ConcatPair(b, ConcatPair(c, d))))
if len(expressions) == 2:
return ConcatPair(*expressions)
return ConcatPair(expressions[0], self._paired(expressions[1:]))
class Left(Func):
function = 'LEFT'
arity = 2
def __init__(self, expression, length, **extra):
"""
expression: the name of a field, or an expression returning a string
length: the number of characters to return from the start of the string
"""
if not hasattr(length, 'resolve_expression'):
if length < 1:
raise ValueError("'length' must be greater than 0.")
super().__init__(expression, length, **extra)
def get_substr(self):
return Substr(self.source_expressions[0], Value(1), self.source_expressions[1])
def as_oracle(self, compiler, connection, **extra_context):
return self.get_substr().as_oracle(compiler, connection, **extra_context)
def as_sqlite(self, compiler, connection, **extra_context):
return self.get_substr().as_sqlite(compiler, connection, **extra_context)
class Length(Transform):
"""Return the number of characters in the expression."""
function = 'LENGTH'
lookup_name = 'length'
output_field = IntegerField()
def as_mysql(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='CHAR_LENGTH', **extra_context)
class Lower(Transform):
function = 'LOWER'
lookup_name = 'lower'
class LPad(BytesToCharFieldConversionMixin, Func):
function = 'LPAD'
def __init__(self, expression, length, fill_text=Value(' '), **extra):
if not hasattr(length, 'resolve_expression') and length is not None and length < 0:
raise ValueError("'length' must be greater or equal to 0.")
super().__init__(expression, length, fill_text, **extra)
class LTrim(Transform):
function = 'LTRIM'
lookup_name = 'ltrim'
class MD5(OracleHashMixin, Transform):
function = 'MD5'
lookup_name = 'md5'
class Ord(Transform):
function = 'ASCII'
lookup_name = 'ord'
output_field = IntegerField()
def as_mysql(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='ORD', **extra_context)
def as_sqlite(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='UNICODE', **extra_context)
class Repeat(BytesToCharFieldConversionMixin, Func):
function = 'REPEAT'
def __init__(self, expression, number, **extra):
if not hasattr(number, 'resolve_expression') and number is not None and number < 0:
raise ValueError("'number' must be greater or equal to 0.")
super().__init__(expression, number, **extra)
def as_oracle(self, compiler, connection, **extra_context):
expression, number = self.source_expressions
length = None if number is None else Length(expression) * number
rpad = RPad(expression, length, expression)
return rpad.as_sql(compiler, connection, **extra_context)
class Replace(Func):
function = 'REPLACE'
def __init__(self, expression, text, replacement=Value(''), **extra):
super().__init__(expression, text, replacement, **extra)
class Reverse(Transform):
function = 'REVERSE'
lookup_name = 'reverse'
def as_oracle(self, compiler, connection, **extra_context):
# REVERSE in Oracle is undocumented and doesn't support multi-byte
# strings. Use a special subquery instead.
return super().as_sql(
compiler, connection,
template=(
'(SELECT LISTAGG(s) WITHIN GROUP (ORDER BY n DESC) FROM '
'(SELECT LEVEL n, SUBSTR(%(expressions)s, LEVEL, 1) s '
'FROM DUAL CONNECT BY LEVEL <= LENGTH(%(expressions)s)) '
'GROUP BY %(expressions)s)'
),
**extra_context
)
class Right(Left):
function = 'RIGHT'
def get_substr(self):
return Substr(self.source_expressions[0], self.source_expressions[1] * Value(-1))
class RPad(LPad):
function = 'RPAD'
class RTrim(Transform):
function = 'RTRIM'
lookup_name = 'rtrim'
class SHA1(OracleHashMixin, PostgreSQLSHAMixin, Transform):
function = 'SHA1'
lookup_name = 'sha1'
class SHA224(MySQLSHA2Mixin, PostgreSQLSHAMixin, Transform):
function = 'SHA224'
lookup_name = 'sha224'
def as_oracle(self, compiler, connection, **extra_context):
raise NotSupportedError('SHA224 is not supported on Oracle.')
class SHA256(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform):
function = 'SHA256'
lookup_name = 'sha256'
class SHA384(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform):
function = 'SHA384'
lookup_name = 'sha384'
class SHA512(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform):
function = 'SHA512'
lookup_name = 'sha512'
class StrIndex(Func):
"""
Return a positive integer corresponding to the 1-indexed position of the
first occurrence of a substring inside another string, or 0 if the
substring is not found.
"""
function = 'INSTR'
arity = 2
output_field = IntegerField()
def as_postgresql(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='STRPOS', **extra_context)
class Substr(Func):
function = 'SUBSTRING'
def __init__(self, expression, pos, length=None, **extra):
"""
expression: the name of a field, or an expression returning a string
pos: an integer > 0, or an expression returning an integer
length: an optional number of characters to return
"""
if not hasattr(pos, 'resolve_expression'):
if pos < 1:
raise ValueError("'pos' must be greater than 0")
expressions = [expression, pos]
if length is not None:
expressions.append(length)
super().__init__(*expressions, **extra)
def as_sqlite(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='SUBSTR', **extra_context)
def as_oracle(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='SUBSTR', **extra_context)
class Trim(Transform):
function = 'TRIM'
lookup_name = 'trim'
class Upper(Transform):
function = 'UPPER'
lookup_name = 'upper'
|
[
"[email protected]"
] | |
44101645cfe34b8ec96656991a6c490fbcb09586
|
48de70e198d6c8f446dcba7efcebbae971ede3b3
|
/FireHydrant/BDT/xgbcallbacks.py
|
14b26f322fa93a2388352358000bc7c27f34187c
|
[] |
no_license
|
phylsix/FireHydrant
|
9e2eb95b9e20c2fcc38fb42af572a5d7ce3de7d1
|
0848bbe5595b73ee4b4b15aec00f5723a9a58752
|
refs/heads/master
| 2020-06-16T00:27:20.157672 | 2019-12-24T23:06:40 | 2019-12-24T23:06:40 | 195,430,826 | 0 | 3 | null | 2019-10-01T22:39:25 | 2019-07-05T15:24:18 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 8,297 |
py
|
#!/usr/bin/env python
"""callbacks for xgboost
"""
import time
import numpy as np
import xgboost as xgb
from xgboost import rabit
def callback_overtraining(best_test_auc, callback_status):
"""overtrain callback for xgboost
:param best_test_auc: best auc score on test set
:type best_test_auc: float
:param callback_status: dictionary holding callback status
:type callback_status: dict
:raises xgb.core.EarlyStopException: raised in case early stop triggered
:return: callback function to be registered
:rtype: function
"""
def callback(env):
train_auc = env.evaluation_result_list[0][1]
test_auc = env.evaluation_result_list[1][1]
if train_auc < best_test_auc:
return
if train_auc - test_auc > 1 - best_test_auc:
print("We have an overtraining problem! Stop boosting.")
callback_status["status"] = 2
raise xgb.core.EarlyStopException(env.iteration)
return callback
def callback_timeout(max_time, best_test_auc, callback_status, n_fit=10):
"""timeout callback for xgboost
:param max_time: max time allowed in seconds
:type max_time: float
:param best_test_auc: best auc score on test set
:type best_test_auc: float
:param callback_status: dictionary holding callback status
:type callback_status: dictionary
:param n_fit: maximum continuous times when fit test auc score less than previous best result allowed, defaults to 10
:type n_fit: int, optional
:raises xgb.core.EarlyStopException: raised in case early stop triggered
:raises xgb.core.EarlyStopException: raised for a second time
:raises xgb.core.EarlyStopException: raised in case continuous ``n_fit`` times test_auc fit result less than previous best
:raises xgb.core.EarlyStopException: raised for a second time
:return: callback function to be registered
:rtype: function
"""
start_time = time.time()
last_n_times = [] # holding last `n_fit` runtime
last_n_test_auc = [] # holding last `n_fit` auc score on test set
status = {"counter": 0}
def callback(env):
if max_time == None:
return
run_time = time.time() - start_time
if run_time > max_time:
callback_status["status"] = 3
raise xgb.core.EarlyStopException(env.iteration)
print("Xgboost training took too long. Stop boosting.")
raise xgb.core.EarlyStopException(env.iteration)
last_n_test_auc.append(env.evaluation_result_list[1][1])
if len(last_n_test_auc) > n_fit:
del last_n_test_auc[0]
last_n_times.append(run_time)
if len(last_n_times) > n_fit:
del last_n_times[0]
if len(last_n_test_auc) < n_fit:
return
poly = np.polyfit(last_n_times, last_n_test_auc, deg=1)
guessed_test_auc_at_max_time = np.polyval(poly, max_time)
if guessed_test_auc_at_max_time < best_test_auc and best_test_auc > 0.0:
status["counter"] = status["counter"] + 1
else:
status["counter"] = 0
if status["counter"] == n_fit:
callback_status["status"] = 2
raise xgb.core.EarlyStopException(env.iteration)
print("Test AUC does not converge well. Stop boosting.")
raise xgb.core.EarlyStopException(env.iteration)
return callback
# from official repo
# https://github.com/dmlc/xgboost/blob/master/python-package/xgboost/callback.py
def _fmt_metric(value, show_stdv=True):
"""format metric string"""
if len(value) == 2:
return "%s:%g" % (value[0], value[1])
elif len(value) == 3:
if show_stdv:
return "%s:%g+%g" % (value[0], value[1], value[2])
else:
return "%s:%g" % (value[0], value[1])
else:
raise ValueError("wrong metric value")
# Modification of the official early_stop callback to only trigger it from the nth round on
def early_stop(stopping_rounds, start_round=0, maximize=False, verbose=True, eval_idx=-1):
"""Create a callback that activates early stoppping.
Validation error needs to decrease at least
every **stopping_rounds** round(s) to continue training.
Requires at least one item in **evals**.
If there's more than one, will use the last.
Returns the model from the last iteration (not the best one).
If early stopping occurs, the model will have three additional fields:
``bst.best_score``, ``bst.best_iteration`` and ``bst.best_ntree_limit``.
(Use ``bst.best_ntree_limit`` to get the correct value if ``num_parallel_tree``
and/or ``num_class`` appears in the parameters)
Parameters
----------
stopp_rounds : int
The stopping rounds before the trend occur.
maximize : bool
Whether to maximize evaluation metric.
verbose : optional, bool
Whether to print message about early stopping information.
Returns
-------
callback : function
The requested callback function.
"""
state = {}
def init(env):
"""internal function"""
bst = env.model
if len(env.evaluation_result_list) == 0:
raise ValueError("For early stopping you need at least one set in evals.")
if len(env.evaluation_result_list) > 1 and verbose:
msg = "Multiple eval metrics have been passed: " "'{0}' will be used for early stopping.\n\n"
rabit.tracker_print(msg.format(env.evaluation_result_list[eval_idx][0]))
maximize_metrics = ("auc", "map", "ndcg")
maximize_at_n_metrics = ("auc@", "map@", "ndcg@")
maximize_score = maximize
metric_label = env.evaluation_result_list[eval_idx][0]
metric = metric_label.split("-", 1)[-1]
if any(metric.startswith(x) for x in maximize_at_n_metrics):
maximize_score = True
if any(metric.split(":")[0] == x for x in maximize_metrics):
maximize_score = True
if verbose and env.rank == 0:
msg = "Will train until {} hasn't improved in {} rounds.\n"
rabit.tracker_print(msg.format(metric_label, stopping_rounds))
state["maximize_score"] = maximize_score
state["best_iteration"] = 0
if maximize_score:
state["best_score"] = float("-inf")
else:
state["best_score"] = float("inf")
if bst is not None:
if bst.attr("best_score") is not None:
state["best_score"] = float(bst.attr("best_score"))
state["best_iteration"] = int(bst.attr("best_iteration"))
state["best_msg"] = bst.attr("best_msg")
else:
bst.set_attr(best_iteration=str(state["best_iteration"]))
bst.set_attr(best_score=str(state["best_score"]))
else:
assert env.cvfolds is not None
def callback(env):
"""internal function"""
if env.iteration < start_round:
return
score = env.evaluation_result_list[eval_idx][1]
if len(state) == 0:
init(env)
best_score = state["best_score"]
best_iteration = state["best_iteration"]
maximize_score = state["maximize_score"]
if (maximize_score and score > best_score) or (not maximize_score and score < best_score):
msg = "[%d]\t%s" % (env.iteration, "\t".join([_fmt_metric(x) for x in env.evaluation_result_list]))
state["best_msg"] = msg
state["best_score"] = score
state["best_iteration"] = env.iteration
# save the property to attributes, so they will occur in checkpoint.
if env.model is not None:
env.model.set_attr(
best_score=str(state["best_score"]),
best_iteration=str(state["best_iteration"]),
best_msg=state["best_msg"],
)
elif env.iteration - best_iteration >= stopping_rounds:
best_msg = state["best_msg"]
if verbose and env.rank == 0:
msg = "Stopping. Best iteration:\n{}\n\n"
rabit.tracker_print(msg.format(best_msg))
raise xgb.core.EarlyStopException(best_iteration)
return callback
|
[
"[email protected]"
] | |
1d1a3137d03706cbb5c91955085020705c30c27e
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Scripts/pyinstaller/PyInstaller/loader/pyimod02_archive.py
|
9296bbd14c1aef42a27da920eae5501b3e9edccd
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:c97ad5e579ca97d5b44d77cfa269fe74c6d46c1c0b473a1c8de6cf7df2228569
size 7279
|
[
"[email protected]"
] | |
2b9265fbb0e7033162161f8319ba5d843fec6c6e
|
7c79c8caee77d08aa05cdc59eb68e569abf54a7e
|
/ics 33/solutions/ile2 solutions/Lab 8/PhanChristopher/poly.py
|
5eebdb03bbe68e9c447e9c4bb418dcfd86860a0f
|
[] |
no_license
|
solomc1/python
|
2e4715cc24e7b23d91c879fc95954f615a615982
|
119e388fb6f4ab42f581e48393919d4052a08ef6
|
refs/heads/master
| 2021-01-17T16:48:02.671810 | 2016-07-29T05:27:50 | 2016-07-29T05:27:50 | 64,452,881 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,060 |
py
|
class Poly:
def __init__(self,*terms) -> None:
'''
> self.terms is a dictionary
> keys are *powers* of x
> values are *coefficients* of x
'''
self.terms = {}
if len(terms) == 0:
return
else:
for coefficient,power in terms:
#--------------------
assert type(coefficient) == int
assert type(power) == int
assert power >= 0
assert power not in self.terms
#--------------------
if coefficient == 0:
pass
else:
self.terms[power] = coefficient
#--------------------
return
def __str__(self) -> str:
def term(c,p,var):
return (str(c) if p == 0 or c != 1 else '') +\
('' if p == 0 else var+('^'+str(p) if p != 1 else ''))
if len(self.terms) == 0:
return '0'
else:
return ' + '.join([term(c,p,'x') for p,c in sorted(self.terms.items(),reverse=True)]).replace('+ -','- ')
def __repr__(self) -> str:
'''
Can Do Better
'''
s = "Poly("
for x in self.terms.items():
s += str((x[1],x[0]))
s +=","
s = s.rstrip(",")
s += ")"
return s
def __len__(self):
'''
Can Do Better
'''
length = 0
for key in self.terms:
for check in self.terms:
if key>=check:
length = key
else:
length = 0
return length
def __call__(self,arg:int or float):
'''
Can Do Better
'''
term = 0
if type(arg) == int or type(arg) == float:
for key,value in self.terms.items():
term += value*(arg**key)
return term
else:
raise TypeError("arg was not an int or float object")
def __iter__(self):
'''
Can Do Better
'''
l = sorted(self.terms.items(),reverse=True)
for t in l:
yield (t[1],t[0])
def __getitem__(self,index):
'''
Can Do Better
'''
if type(index) == int:
if index >= 0:
if index in self.terms:
return self.terms[index]
else:
return 0
else:
raise TypeError("Incorrect Input")
else:
raise TypeError("Incorrect Input")
def __setitem__(self,index,value):
if type(index) == int and index >= 0:
if value == 0:
if index in self.terms:
del self.terms[index]
# equavelent to self.terms.__delitem__(index)
else:
self.terms[index] = value
else:
raise TypeError("Incorrect Input")
def __delitem__(self,index) -> None:
'''
Is it this simple?
'''
if type(index) == int and index >= 0:
if index in self.terms:
self.terms.__delitem__(index)
else:
raise TypeError("Incorrect Input")
return
def _add_term(self,c,p) -> None:
if type(c) == int or type(c) == float:
if type(p) == int and p > 0:
if p not in self.terms and p <= 0:
self.terms[p] = c
elif p in self.terms and p <= 0:
self.terms[p] += c
if self.terms[p] == 0:
del self.terms[p]
else:
raise TypeError("Power is either not an int or negative")
else:
raise TypeError("Coefficient is neither a float or int object")
return
def __add__(self,right):
pass
def __radd__(self,left):
pass
def __mul__(self,right):
pass
def __rmul__(self,left):
pass
def __eq__(self,right):
pass
if __name__ == '__main__':
# Some simple tests; you can comment them out and/or add your own before
# the driver is called.
print('Start simple tests')
p = Poly((3,2),(-2,1), (4,0))
print(' For Polynomial: 3x^2 - 2x + 4')
print(' str(p):',p)
print(' repr(p):',repr(p))
print(' len(p):',len(p))
print(' p(2):',p(2))
print(' list collecting iterator results:',[t for t in p])
print(' p+p:',p+p)
print(' p+2:',p+2)
print(' p*p:',p*p)
print(' p*2:',p*2)
print('End simple tests\n')
import driver
#driver.default_show_exception=True
#driver.default_show_exception_message=True
#driver.default_show_traceback=True
driver.driver()
|
[
"[email protected]"
] | |
35e4465ea5c36e613c6aa15fad18b6c59e34aca2
|
d8e23b9eaaea8080aa7a910b06fe1ae04b7f2a74
|
/flavio/math/test_optimize.py
|
ace5779277eed50b329b85089cccbea69c4cecf4
|
[
"MIT"
] |
permissive
|
flav-io/flavio
|
7ba0f8735193f2014ee69b4b64e139714637f1df
|
cf9fe5c56b2a6930e366142894ddc66951c1ce52
|
refs/heads/master
| 2023-07-07T00:45:48.923555 | 2023-06-01T13:25:59 | 2023-06-01T13:25:59 | 50,420,265 | 76 | 65 |
MIT
| 2023-06-29T06:57:05 | 2016-01-26T10:03:12 |
Python
|
UTF-8
|
Python
| false | false | 1,149 |
py
|
import unittest
import numpy as np
import numpy.testing as npt
import flavio
def f(x):
return (x[0]-2)**2 + (x[1]-1)**2
def g(x):
return -f(x)
def h(x, a):
return (x[0]-a)**2 + (x[1]-1)**2
class TestOptimize(unittest.TestCase):
def test_slsqp(self):
res = flavio.math.optimize.minimize_robust(f, [0, 0], disp=False, methods=('SLSQP',))
npt.assert_array_almost_equal(res.x, [2, 1])
res = flavio.math.optimize.maximize_robust(g, [5, 5], disp=False, methods=('SLSQP',))
npt.assert_array_almost_equal(res.x, [2, 1])
res = flavio.math.optimize.minimize_robust(h, [0, 0], args=(3,), methods=('SLSQP',))
npt.assert_array_almost_equal(res.x, [3, 1])
def test_minuit(self):
res = flavio.math.optimize.minimize_migrad(f, [0, 0], print_level=0)
npt.assert_array_almost_equal(res.x, [2, 1])
res = flavio.math.optimize.minimize_robust(f, [0, 0], methods=('MIGRAD',))
npt.assert_array_almost_equal(res.x, [2, 1])
res = flavio.math.optimize.minimize_robust(h, [0, 0], args=(3,), methods=('MIGRAD',))
npt.assert_array_almost_equal(res.x, [3, 1])
|
[
"[email protected]"
] | |
42c767941b8f7a8d86d7e4cb597956535457e53c
|
d324b3d4ce953574c5945cda64e179f33c36c71b
|
/php/php-sky/grpc/src/python/grpcio_tests/tests/unit/framework/foundation/stream_testing.py
|
32b0341ad7966e0c9bd1fa259e35f43792aea7cf
|
[
"Apache-2.0"
] |
permissive
|
Denticle/docker-base
|
decc36cc8eb01be1157d0c0417958c2c80ac0d2f
|
232115202594f4ea334d512dffb03f34451eb147
|
refs/heads/main
| 2023-04-21T10:08:29.582031 | 2021-05-13T07:27:52 | 2021-05-13T07:27:52 | 320,431,033 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,017 |
py
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing stream-related code."""
from grpc.framework.foundation import stream
class TestConsumer(stream.Consumer):
"""A stream.Consumer instrumented for testing.
Attributes:
calls: A sequence of value-termination pairs describing the history of calls
made on this object.
"""
def __init__(self):
self.calls = []
def consume(self, value):
"""See stream.Consumer.consume for specification."""
self.calls.append((value, False))
def terminate(self):
"""See stream.Consumer.terminate for specification."""
self.calls.append((None, True))
def consume_and_terminate(self, value):
"""See stream.Consumer.consume_and_terminate for specification."""
self.calls.append((value, True))
def is_legal(self):
"""Reports whether or not a legal sequence of calls has been made."""
terminated = False
for value, terminal in self.calls:
if terminated:
return False
elif terminal:
terminated = True
elif value is None:
return False
else: # pylint: disable=useless-else-on-loop
return True
def values(self):
"""Returns the sequence of values that have been passed to this Consumer."""
return [value for value, _ in self.calls if value]
|
[
"[email protected]"
] | |
71df8e684ed3dae9857b6460d28c08af04f7c124
|
8ab46cba46fb2938835dde6e49af61937ab192af
|
/poetry/__version__.py
|
77eaf592dd6b7bd8809d79dca477381f98d3ac82
|
[
"MIT",
"GPL-2.0-only",
"Apache-2.0",
"BSD-2-Clause",
"LGPL-2.1-only",
"GPL-2.0-or-later",
"BSD-4-Clause",
"GPL-3.0-only",
"LGPL-3.0-only",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"GPL-3.0-or-later",
"LGPL-3.0-or-later",
"LGPL-2.1-or-later"
] |
permissive
|
isabella232/poetry
|
c5c056a29db3bf611bf6d97fcef3f156aa72c871
|
05a3c85f8a7902f572cbda8eb23701a9a56dafa2
|
refs/heads/master
| 2022-03-29T05:03:57.764249 | 2019-12-11T20:16:12 | 2019-12-11T20:16:12 | 474,336,367 | 0 | 0 |
MIT
| 2022-03-26T22:04:05 | 2022-03-26T12:08:50 | null |
UTF-8
|
Python
| false | false | 24 |
py
|
__version__ = "1.0.0b9"
|
[
"[email protected]"
] | |
60b0db1dcc7d38d48d1c419268514304c1806bd6
|
9c4508b340f7f84fc5084decc64ebff75afaec68
|
/analysis/webservice/webmodel/StatsComputeOptions.py
|
86e5d597215a4585d335caa8d2b389c2df4ae911
|
[
"Apache-2.0"
] |
permissive
|
apache/incubator-sdap-nexus
|
4590d6417b362acd88ac3ec6b315da06f7460718
|
76f3e4d617abbf283804d6f52aa2eff86e15a744
|
refs/heads/master
| 2023-09-01T12:52:17.381622 | 2023-08-22T21:35:26 | 2023-08-22T21:35:26 | 108,511,090 | 21 | 34 |
Apache-2.0
| 2023-09-14T20:18:33 | 2017-10-27T07:00:11 |
Python
|
UTF-8
|
Python
| false | false | 2,586 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class StatsComputeOptions(object):
def __init__(self):
pass
def get_apply_seasonal_cycle_filter(self, default="false"):
raise Exception("Please implement")
def get_max_lat(self, default=90.0):
raise Exception("Please implement")
def get_min_lat(self, default=-90.0):
raise Exception("Please implement")
def get_max_lon(self, default=180):
raise Exception("Please implement")
def get_min_lon(self, default=-180):
raise Exception("Please implement")
def get_dataset(self):
raise Exception("Please implement")
def get_environment(self):
raise Exception("Please implement")
def get_start_time(self):
raise Exception("Please implement")
def get_end_time(self):
raise Exception("Please implement")
def get_start_year(self):
raise Exception("Please implement")
def get_end_year(self):
raise Exception("Please implement")
def get_clim_month(self):
raise Exception("Please implement")
def get_start_row(self):
raise Exception("Please implement")
def get_end_row(self):
raise Exception("Please implement")
def get_content_type(self):
raise Exception("Please implement")
def get_apply_low_pass_filter(self, default=False):
raise Exception("Please implement")
def get_low_pass_low_cut(self, default=12):
raise Exception("Please implement")
def get_low_pass_order(self, default=9):
raise Exception("Please implement")
def get_plot_series(self, default="mean"):
raise Exception("Please implement")
def get_plot_type(self, default="default"):
raise Exception("Please implement")
def get_nparts(self):
raise Exception("Please implement")
|
[
"[email protected]"
] | |
e2aecbe9701cbaf4eaa6a72fdedbf8eeb13950b0
|
0a727f3ffde045805b9b789abbaa9c8497667f8e
|
/CrossMgrCamera/AddPhotoHeader.py
|
a9ab798acf0f813bf0c8bc2a0fb48a2709f0153d
|
[
"MIT"
] |
permissive
|
esitarski/CrossMgr
|
ff4a632089a144f6ecc57970e2b29a7c31a15118
|
a95ac1d65f2d0cab712cc6e5f9393668c1bbf83c
|
refs/heads/master
| 2023-08-30T22:48:43.457978 | 2023-08-24T14:12:44 | 2023-08-24T14:12:44 | 1,042,402 | 33 | 20 |
MIT
| 2023-04-30T13:32:11 | 2010-11-01T17:25:15 |
Python
|
UTF-8
|
Python
| false | false | 6,801 |
py
|
import wx
from wx.lib.agw.aui import LightColour
import os
import math
import Utils
class dotdict( object ):
pass
def formatTime( secs ):
f, ss = math.modf( secs or 0.0 )
secs = int(ss)
hours = secs // (60*60)
minutes = (secs // 60) % 60
secs = secs % 60 + f
return '{:02d}:{:02d}:{:06.3f}'.format( hours, minutes, secs )
def PilImageToWxImage( pil ):
image = wx.Image( *pil.size )
image.SetData( pil.convert('RGB').tobytes() )
return image
drawResources = None # Cached resource for drawing the photo header.
def setDrawResources( dc, w, h ):
global drawResources
drawResources = dotdict()
drawResources.w = w
drawResources.h = h
fontHeight = int(h/36.0)
fontFace = Utils.FontFace
drawResources.bibFontSize = fontHeight * 1.5
drawResources.bibFont = wx.Font(
(0, drawResources.bibFontSize),
wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD,
faceName=fontFace,
)
dc.SetFont( drawResources.bibFont )
drawResources.bibWidth, drawResources.bibHeight = dc.GetTextExtent( u' 9999' )
drawResources.bibTextColour = wx.Colour(0,0,200)
drawResources.bibSpaceWidth = dc.GetTextExtent( u'9999' )[0] / 4
drawResources.nameFontSize = drawResources.bibFontSize
drawResources.nameFont = wx.Font(
(0, drawResources.nameFontSize),
wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,
faceName=fontFace,
)
drawResources.nameTextColour = drawResources.bibTextColour
drawResources.fontSize = fontHeight * 1.0
drawResources.font = wx.Font(
(0, drawResources.fontSize),
wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,
faceName=fontFace,
)
dc.SetFont( drawResources.font )
drawResources.spaceWidth = dc.GetTextExtent( u'9999' )[0] / 4
drawResources.smallFontSize = drawResources.fontSize * 0.9
drawResources.smallFont = wx.Font(
(0, drawResources.smallFontSize),
wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,
faceName=fontFace,
)
drawResources.fontColour = wx.BLACK
dc.SetFont( drawResources.font )
drawResources.fontHeight = dc.GetTextExtent( u'ATWgjjy' )[1]
bitmapHeight = drawResources.bibHeight * 2.8
bitmap = wx.Bitmap( os.path.join(Utils.getImageFolder(), 'CrossMgrHeader.png'), wx.BITMAP_TYPE_PNG )
scaleMult = float(bitmapHeight) / float(bitmap.GetHeight())
image = bitmap.ConvertToImage()
drawResources.bitmapWidth, drawResources.bitmapHeight = int(image.GetWidth() * scaleMult), int(image.GetHeight() * scaleMult)
image.Rescale( drawResources.bitmapWidth, drawResources.bitmapHeight, wx.IMAGE_QUALITY_HIGH )
drawResources.bitmap = image.ConvertToBitmap()
drawResources.fadeDark = wx.Colour(114+80,119+80,168+80)
drawResources.fadeLight = LightColour( drawResources.fadeDark, 50 )
drawResources.borderColour = wx.Colour( 71+50, 75+50, 122+50 )
def AddPhotoHeader( image, bib=None, time=None, raceSeconds=None, firstNameTxt=u'', lastNameTxt=u'', teamTxt=u'', raceNameTxt=u'' ):
global drawResources
if not image:
return wx.Bitmap(8, 8)
bitmap = wx.Bitmap( image )
w, h = bitmap.GetSize()
dcMemory = wx.MemoryDC( bitmap )
dc = wx.GCDC( dcMemory )
if drawResources is None or drawResources.w != w or drawResources.h != h:
setDrawResources( dc, w, h )
bibTxt = '{}'.format(bib) if bib else ''
if time and raceSeconds:
timeTxt = _('{} {}').format(
formatTime(raceSeconds),
time.strftime('%Y-%m-%d %H:%M:%S'),
)
else:
timeTxt = u''
if timeTxt.startswith('0'):
timeTxt = timeTxt[1:]
nameTxt = u' '.join( n for n in [firstNameTxt, lastNameTxt] if n )
frameWidth = 4
borderWidth = 1
bitmapWidth = drawResources.bitmapWidth
bitmapHeight = drawResources.bitmapHeight
bibSpaceWidth = drawResources.bibSpaceWidth
spaceWidth = drawResources.spaceWidth
xText, yText = bitmapWidth, 0
x = borderWidth
y = borderWidth
def shadedRect( x, y, w, h ):
highlightTop = int(h/4.0)
dc.GradientFillLinear( wx.Rect(0, y, w, highlightTop),
drawResources.fadeDark, drawResources.fadeLight, wx.SOUTH )
dc.GradientFillLinear( wx.Rect(0, y+highlightTop, w, h-highlightTop),
drawResources.fadeDark, drawResources.fadeLight, wx.NORTH )
def textInRect( txt, x, y, width, height, font=None, colour=None, alignment=wx.ALIGN_CENTER|wx.ALIGN_CENTRE_VERTICAL ):
if font:
dc.SetFont( font )
if colour:
dc.SetTextForeground( colour )
dc.DrawLabel( txt, wx.Rect(x, y, width, height), alignment )
lineHeight = int(drawResources.bibHeight * 1.25 + 0.5)
x += xText + frameWidth + bibSpaceWidth
dc.SetPen( wx.Pen(drawResources.borderColour, borderWidth) )
shadedRect( x, 0, w, lineHeight + borderWidth )
dc.DrawLine( 0, 0, w, 0 )
dc.DrawLine( xText, lineHeight, w, lineHeight )
# Draw the bib.
dc.SetFont( drawResources.bibFont )
tWidth = dc.GetTextExtent( bibTxt )[0]
textInRect( bibTxt, x, y, tWidth, lineHeight, drawResources.bibFont, drawResources.bibTextColour )
# Draw the name and team.
x += tWidth + spaceWidth
textInRect( nameTxt, x, y, dc.GetTextExtent(nameTxt)[0], lineHeight, drawResources.nameFont, drawResources.bibTextColour )
x += dc.GetTextExtent(nameTxt)[0] + spaceWidth
remainingWidth = w - x - spaceWidth - borderWidth
dc.SetFont( drawResources.font )
teamTxtWidth = dc.GetTextExtent(teamTxt)[0]
if teamTxtWidth < remainingWidth:
textInRect( teamTxt, x, y, remainingWidth, lineHeight, drawResources.font, wx.BLACK, alignment=wx.ALIGN_RIGHT|wx.ALIGN_CENTRE_VERTICAL )
y += lineHeight
lineHeight = int( drawResources.fontHeight * 1.25 + 0.5 )
shadedRect( 0, y, w, lineHeight )
dc.DrawLine( 0, y+lineHeight, w, y+lineHeight )
# Draw the time, race time and raceName.
dc.SetFont( drawResources.font )
x = borderWidth
x += xText + frameWidth + bibSpaceWidth
textInRect( timeTxt, x, y, w-x, lineHeight, drawResources.font, wx.BLACK, alignment=wx.ALIGN_LEFT|wx.ALIGN_CENTRE_VERTICAL )
x += dc.GetTextExtent(timeTxt)[0] + spaceWidth
remainingWidth = w - x - spaceWidth - borderWidth
raceNameTxtWidth = dc.GetTextExtent(raceNameTxt)[0]
if raceNameTxtWidth < remainingWidth:
textInRect( raceNameTxt, x, y, remainingWidth, lineHeight, drawResources.font, wx.BLACK, alignment=wx.ALIGN_RIGHT|wx.ALIGN_CENTRE_VERTICAL )
# Draw the bitmap.
dc.DrawBitmap( drawResources.bitmap, frameWidth, frameWidth )
# Draw a frame around the bitmap.
dc.SetBrush( wx.TRANSPARENT_BRUSH )
frameHalf = frameWidth // 2
dc.SetPen( wx.Pen(drawResources.borderColour, frameWidth) )
dc.DrawRectangle( frameHalf, frameHalf, bitmapWidth+frameHalf, bitmapHeight+frameHalf )
dc.SetPen( wx.Pen(wx.WHITE, frameHalf) )
dc.DrawRectangle( frameHalf, frameHalf, bitmapWidth+frameHalf, bitmapHeight+frameHalf )
# Draw a border on the right side.
dc.SetPen( wx.Pen(drawResources.borderColour, 1) )
dc.DrawLine( w-1, 0, w-1, y+lineHeight )
return bitmap.ConvertToImage()
|
[
"[email protected]"
] | |
860636ada08867c77753a3c24ddd474e3242f227
|
94f5e16caf4d10a6ac4ba7e9896b33c8d503f1bb
|
/migrations/versions/5f7a5ddbf7af_.py
|
9a94c55035b427c0504ccd18b74553992c051ce5
|
[] |
no_license
|
hornLK/LKproject
|
3dd760ad1c83b2d6faaddf66c32d4be16349c2d2
|
9f9b7c324b740aa215d5dd0ac7a7eecdb0a4ef0c
|
refs/heads/master
| 2021-09-05T22:05:20.321201 | 2018-01-31T07:43:27 | 2018-01-31T07:43:27 | 112,689,101 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 771 |
py
|
"""empty message
Revision ID: 5f7a5ddbf7af
Revises: 1d0cc801c49f
Create Date: 2018-01-15 20:24:46.074233
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5f7a5ddbf7af'
down_revision = '1d0cc801c49f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('ips', sa.Column('status', sa.Boolean(), nullable=True))
op.add_column('networks', sa.Column('desc', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('networks', 'desc')
op.drop_column('ips', 'status')
# ### end Alembic commands ###
|
[
"[email protected]"
] | |
9fb1d67a0e9b750a78cab4ce36eddb0fad97d4ca
|
fa33d9994e45348b28a4aa375575460a0a5ef100
|
/bazaar/urls.py
|
96301ff62b6682a4206f791fd7476f3220ddb817
|
[] |
no_license
|
KushalVijay/Smart-Buy
|
6730b4cbb9951bfb9c59045af2c479574a6ad6e5
|
4bdcfc44826d6c1aaa2f10b507b181cd092e3cb0
|
refs/heads/master
| 2022-04-09T21:38:31.927219 | 2020-03-23T10:12:50 | 2020-03-23T10:12:50 | 249,395,588 | 11 | 8 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,734 |
py
|
"""project3 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls.static import static
from django.conf import settings
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from products.views import all_products
from accounts import urls as urls_accounts
from products import urls as urls_products
from cart import urls as urls_cart
from search import urls as urls_search
from checkout import urls as urls_checkout
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', all_products, name='index'),
url(r'^accounts/', include(urls_accounts)),
url(r'^products/', include(urls_products)),
url(r'^checkout/', include(urls_checkout)),
url(r'^cart/', include(urls_cart)),
url(r'^user/', include(urls_accounts)),
url(r'^search/', include(urls_search)),
#url(r'^media/(?P<path>.*)$', static.serve,{'document_root': MEDIA_ROOT}),
]
if settings.DEBUG:
urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
27ee28679d9eb1bffb2f1e431d2bc7787ec15af2
|
bb37226fe4f918cec1b5d3b9f3c2abd227c740fb
|
/library/setup.py
|
da6190df6b671fac3be9afc2c85b4235220dd3c5
|
[
"BSD-2-Clause"
] |
permissive
|
piecafe/rpi_ws281x-python
|
e942f44b77cb9c61f10a4777b1140463020b5ab5
|
7c7513aec0aa60dd2d8c3d8fdcb2e8bba6fa3ef6
|
refs/heads/master
| 2021-01-23T07:16:31.825159 | 2017-08-18T12:54:47 | 2017-08-18T12:54:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,424 |
py
|
#!/usr/bin/env python
# Python wrapper for the rpi_ws281x library.
# Authors:
# Phil Howard ([email protected])
# Tony DiCola ([email protected])
from setuptools import setup, find_packages, Extension
from setuptools.command.build_py import build_py
import subprocess
class CustomInstallCommand(build_py):
"""Customized install to run library Makefile"""
def run(self):
print("Compiling ws281x library...")
proc =subprocess.Popen(["make"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(proc.stderr.read())
build_py.run(self)
setup(name = 'rpi_ws281x',
version = '3.0.1',
author = 'Jeremy Garff <[email protected]>, Phil Howard <[email protected]>',
author_email = '[email protected]',
description = 'Userspace Raspberry Pi PWM/PCM/SPI library for SK6812 and WS281X LEDs.',
license = 'MIT',
url = 'https://github.com/pimoroni/rpi_ws281x-python/',
cmdclass = {'build_py':CustomInstallCommand},
packages = ['neopixel', 'rpi_ws281x'],
ext_modules = [Extension('_rpi_ws281x',
sources=['rpi_ws281x_wrap.c'],
include_dirs=['lib/'],
library_dirs=['lib-built/'],
libraries=['ws2811'])])
|
[
"[email protected]"
] | |
2b694d9643bf16ae008b20db0368e0915d9f8158
|
0674b9d8a34036a6bbe2052e1cae0eee9a44554b
|
/Baekjoon/2941.py
|
ee51dc09161855679d9cb3d0955c5af4efebcde3
|
[] |
no_license
|
timebird7/Solve_Problem
|
02fb54e90844a42dc69a78afb02cc10a87eda71c
|
2d54b6ecbe3edf9895fd8303cbca99b3f50f68f3
|
refs/heads/master
| 2020-04-14T23:37:15.354476 | 2019-04-15T14:32:41 | 2019-04-15T14:32:41 | 164,208,673 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 457 |
py
|
s = input()
while s.find('c=') >= 0:
s = s.replace('c=','1')
while s.find('c-') >= 0:
s = s.replace('c-','2')
while s.find('dz=') >= 0:
s = s.replace('dz=','3')
while s.find('d-') >= 0:
s = s.replace('d-','4')
while s.find('lj') >= 0:
s = s.replace('lj','5')
while s.find('nj') >= 0:
s = s.replace('nj','6')
while s.find('s=') >= 0:
s = s.replace('s=','7')
while s.find('z=') >= 0:
s = s.replace('z=','8')
print(len(s))
|
[
"[email protected]"
] | |
fc3fdfec5df7869e6e4c00786c0cb1712f5f7405
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p2DJ/New/program/pyquil/startPyquil191.py
|
5d80d7ba626c297f6554e4295cdaa0a9f0341404
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,154 |
py
|
# qubit number=2
# total number=9
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += SWAP(1,0) # number=2
prog += SWAP(1,0) # number=3
prog += X(1) # number=5
prog += RX(-2.73004401596953,1) # number=6
prog += Z(1) # number=4
prog += SWAP(1,0) # number=7
prog += SWAP(1,0) # number=8
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil191.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
be9f2bb0defc1f2082a178d6bb37135cf836224c
|
5b71e2952f34dd3bb20148874d952fee06d31857
|
/app/mf/crud/migrations/0107_auto_20210208_2328.py
|
37f44934ff8cd007fd1f0c5693956ab187222266
|
[] |
no_license
|
isela1998/facebook
|
a937917cddb9ef043dd6014efc44d59d034102b1
|
a0f2f146eb602b45c951995a5cb44409426250c5
|
refs/heads/master
| 2023-07-18T02:14:50.293774 | 2021-08-28T03:26:06 | 2021-08-28T03:26:06 | 400,613,743 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 431 |
py
|
# Generated by Django 3.1.1 on 2021-02-09 03:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crud', '0106_auto_20210208_1919'),
]
operations = [
migrations.AlterField(
model_name='cancelledinvoices',
name='provider',
field=models.CharField(max_length=255, verbose_name='Proveedor/Cliente'),
),
]
|
[
"[email protected]"
] | |
e46899246ef5d5ccc4e48374f590c0591205b6dc
|
567e89b21aca23db5f14032889fdd1cb7c7700f7
|
/Ia de morpion 1.py
|
85df14cede381f6300d00965b3de944a959dfe20
|
[] |
no_license
|
MarcPartensky/Python-2018
|
7ab83d42eb28b34bed88fc6fb77892e62094dd8d
|
27d2a57a6b6d6cdaa883fd2ce55e1c5eefd13ccc
|
refs/heads/master
| 2020-04-17T13:12:41.448439 | 2019-01-19T23:55:05 | 2019-01-19T23:55:05 | 166,605,846 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,487 |
py
|
import math
import numpy
M=[0]*9
Grille = [0]*9
Liste = np.zeros([9][8][7][6][5][4][3][2][1])
Enjeux = [[9,-5],
[-9,8],
[7,-4],
[-8,6],
[5,-3],
[-7,4],
[3,-2],
[-6,2],
[1,-1]]
for a in range(0,9):
M[0]=a
Avantage=Avantage+Enjeux[0,testwin(Grille)]/2**0
for b in range(0,8):
M[1]=b
Avantage=Avantage+Enjeux[1,testwin(Grille)]/2**1
for c in range(0,7):
M[2]=c
Avantage=Avantage+Enjeux[2,testwin(Grille)]/2**2
for d in range(0,6):
M[3]=d
Avantage=Avantage+Enjeux[3,testwin(Grille)]/2**3
for e in range(0,5):
M[4]=e
Avantage=Avantage+Enjeux[4,testwin(Grille)]/2**4
for f in range(0,4):
M[5]=f
Avantage=Avantage+Enjeux[5,testwin(Grille)]/2**5
for g in range(0,3):
M[6]=g
Avantage=Avantage+Enjeux[6,testwin(Grille)]/2**6
for h in range(0,2):
M[7]=h
Avantage=Avantage+Enjeux[7,testwin(Grille)]/2**7
i=1
M[8]=i
Avantage=Avantage+Enjeux[8,testwin(Grille)]/2**8
Liste[M]=Avantage
Mouvements=[0]*9
Avantage=0
def testwin(grille):
if (grille[0]==grille[1]) and (grille[0]==grille[2]) and (grille[0]!=0):
return 1
elif (grille[3]==grille[4]) and (grille[3]==grille[5]) and (grille[3]!=0):
return 1
elif (grille[6]==grille[7]) and (grille[6]==grille[8]) and (grille[6]!=0):
return 1
elif (grille[0]==grille[3]) and (grille[0]==grille[6]) and (grille[0]!=0):
return 1
elif (grille[1]==grille[4]) and (grille[1]==grille[7]) and (grille[1]!=0):
return 1
elif (grille[2]==grille[5]) and (grille[2]==grille[8]) and (grille[2]!=0):
return 1
elif (grille[0]==grille[4]) and (grille[0]==grille[8]) and (grille[0]!=0):
return 1
elif (grille[2]==grille[4]) and (grille[2]==grille[6]) and (grille[2]!=0):
return 1
else
return 0
|
[
"[email protected]"
] | |
baff098ad1a4ceb0d889a74177b3d6da526db038
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog/optimized_10324.py
|
32be4825222f5f15db073907b679ed582a5c4b98
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,841 |
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((472.606, 446.682, 470.856), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((516.459, 394.597, 488.707), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((571.719, 340.395, 514.422), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((604.141, 475.395, 499.212), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((669.213, 182.358, 565.998), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((501.799, 410.933, 475.449), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((501.119, 411.638, 474.683), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((492.036, 433.382, 459.424), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((496.054, 444.944, 434.373), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((517.696, 437.497, 418.189), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((530.208, 413.462, 409.88), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((518.505, 411.817, 384.057), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((476.39, 422.347, 481.528), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((556.118, 393.695, 285.889), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((626.632, 238.367, 394.303), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((626.632, 238.367, 394.303), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((616.263, 263.746, 403.109), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((605.358, 288.369, 413.896), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((592.197, 311.371, 425.56), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((576.028, 330.828, 439.474), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((554.51, 344.992, 452.872), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((529.441, 354.442, 464.686), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((687.527, 305.769, 267.106), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((361.666, 402.873, 657.674), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((530.413, 323.206, 494.803), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((530.413, 323.206, 494.803), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((551.203, 342.059, 487.69), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((567.106, 365.498, 493.926), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((582.623, 371.871, 517.687), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((514.22, 474.272, 501.131), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((654.362, 272.069, 543.897), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((522.246, 421.449, 497.816), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((522.265, 421.573, 497.894), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((507.4, 408.043, 517.841), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((486.66, 396.124, 502.563), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((481.143, 384.959, 476.356), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((485.229, 388.144, 447.738), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((487.932, 390.293, 418.814), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((494.166, 390.217, 390.619), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((464.363, 348.833, 458.48), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((526.866, 432.712, 320.545), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((472.52, 348.796, 503.932), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((496.003, 360.047, 507.294), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((546.219, 385.465, 516.788), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((596.532, 410.747, 526.199), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((550.694, 472.176, 554.058), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((700.631, 400.298, 519.022), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((498.661, 469.596, 534.397), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((509.477, 445.121, 544), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((518.831, 418.415, 548.415), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((528.679, 391.297, 552.097), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((541.86, 366.143, 559.39), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((555.229, 340.187, 565.018), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((508.2, 381.965, 516.924), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((604.056, 290.983, 611.756), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"[email protected]"
] | |
1c34d14ba96c3625da9c663dd7e84a221f2a03a4
|
025660ec946f46cb7458abb12ce32fb4a2e437bb
|
/event/arguments/loss.py
|
60b776d0dcc85bde9d1d031e11f8f8068f8c034e
|
[
"Apache-2.0"
] |
permissive
|
edvisees/DDSemantics
|
af22070a239ac227694b87218db5e9c2021ac57b
|
9044e4afa6f9d6d7504de028633295f30679278d
|
refs/heads/master
| 2022-12-01T10:50:32.221836 | 2020-06-18T04:29:44 | 2020-06-18T04:29:44 | 283,858,855 | 0 | 0 |
Apache-2.0
| 2020-07-30T19:19:55 | 2020-07-30T19:19:55 | null |
UTF-8
|
Python
| false | false | 224 |
py
|
import torch
def cross_entropy(y_hat, y):
print(y_hat)
print(y)
if y_hat == 1:
return -torch.log(y)
else:
return -torch.log(1 - y)
def hinge(y_hat, y):
return np.max(0, 1 - y_hat * y)
|
[
"[email protected]"
] | |
8989121be2a99eb5d69e650fe7256dd5990e7d0b
|
046cd4903994a736e31a50601550d05b53462372
|
/vision/vis.py
|
7692876d8b6aa2702b8a7e0af2bcb33c6e6ae775
|
[] |
no_license
|
Kyriection/compexp
|
baa0a13e8a16296c0088a2633476ee7d3de74891
|
1038c4cd66f47f4d56998927a8f64dc70839ab48
|
refs/heads/master
| 2023-04-20T03:34:03.692825 | 2021-05-10T14:04:26 | 2021-05-10T14:04:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,748 |
py
|
import settings
from loader.model_loader import loadmodel
from dissection.neuron import hook_feature, NeuronOperator
from dissection import contrib
from visualize.report import (
neuron as vneuron,
final as vfinal,
index as vindex,
)
from util.clean import clean
from util.misc import safe_layername
from tqdm import tqdm
from scipy.spatial import distance
import torch
# import torch.nn.functional as F
import pickle
import os
import pandas as pd
from loader.data_loader import ade20k
from loader.data_loader import formula as F
import numpy as np
import pycocotools.mask as cmask
from PIL import Image, ImageFilter
import matplotlib.pyplot as plt
from scipy import ndimage
import matplotlib.colors
MG = 1
BLUE_TINT = np.array([-MG * 255, -MG * 255, MG * 255])
PURPLE_TINT = np.array([MG * 255, -MG * 600, MG * 255])
RED_TINT = np.array([MG * 255, -MG * 255, -MG * 255])
def add_heatmap(img, acts, actmin=0.0, actmax=1.0):
cmap = plt.cm.rainbow
norm = matplotlib.colors.Normalize(vmin=actmin, vmax=actmax)
act_colors = cmap(norm(acts))
act_colors = act_colors[:, :, :3]
# weighted combination of colors and mask
weighted = (0.5 * act_colors * 255) + (0.5 * img)
weighted = np.clip(weighted, 0, 255)
weighted = np.round(weighted).astype(np.uint8)
return weighted
def iou(m, n):
intersection = np.logical_and(m, n).sum()
union = np.logical_or(m, n).sum()
return (intersection) / (union + 1e-10)
def iou_mask(img, neuron, mask):
img = img.astype(np.int64)
nborder = get_border(neuron)
mborder = get_border(mask)
border = np.logical_or(nborder, mborder)
intersection = np.logical_and(neuron, mask)
# union = np.logical_xor(neuron, mask)
int_mask = intersection[:, :, np.newaxis] * PURPLE_TINT
int_mask = np.round(int_mask).astype(np.int64)
neuron_mask = neuron[:, :, np.newaxis] * RED_TINT
neuron_mask = np.round(neuron_mask).astype(np.int64)
mask_mask = mask[:, :, np.newaxis] * BLUE_TINT
mask_mask = np.round(mask_mask).astype(np.int64)
img += neuron_mask + mask_mask + int_mask
img[border] = (255, 255, 0)
img = np.clip(img, 0, 255).astype(np.uint8)
return img
def noop(*args, **kwargs):
return None
layernames = list(map(safe_layername, settings.FEATURE_NAMES))
hook_modules = []
model = loadmodel(hook_feature, hook_modules=hook_modules)
fo = NeuronOperator()
# ==== STEP 1: Feature extraction ====
# features: list of activations - one 63305 x c x h x w tensor for each feature
# layer (defined by settings.FEATURE_NAMES; default is just layer4)
# maxfeature: the maximum activation across the input map for each channel (e.g. for layer 4, there is a 7x7 input map; what's the max value). one 63305 x c tensor for each feature
features, maxfeature, preds, logits = fo.feature_extraction(model=model)
# ==== STEP 2: Threshold quantization ====
thresholds = [
fo.quantile_threshold(lf, savepath=f"quantile_{ln}.npy")
for lf, ln in zip(features, layernames)
]
# ==== New: multilayer case - neuron contributions ====
contrs_spread = [{} for _ in layernames + ["final"]]
# Zip it all together
ranger = tqdm(
zip(
layernames,
features,
maxfeature,
thresholds,
[None, *layernames],
[None, *features],
[None, *thresholds],
contrs_spread,
),
total=len(layernames),
)
tallies = []
# Load cached card htmls if they exist - will be overwritten if not skipping
# summarization step
all_card_htmls_fname = os.path.join(settings.OUTPUT_FOLDER, "card_htmls.pkl")
if os.path.exists(all_card_htmls_fname):
print(f"Loading cached card htmls {all_card_htmls_fname}")
with open(all_card_htmls_fname, "rb") as f:
all_card_htmls = pickle.load(f)
else:
all_card_htmls = {}
ds = fo.data
def upsample(actmap, shape):
actmap_im_rsz = Image.fromarray(actmap).resize(shape, resample=Image.BILINEAR)
actmap_rsz = np.array(actmap_im_rsz)
return actmap_rsz
def get_border(x):
x = x.astype(np.uint8)
border = x - ndimage.morphology.binary_dilation(x)
border = border.astype(np.bool)
return border
def add_mask(img, mask):
img = img.astype(np.int64)
mask_border = get_border(mask)
mask_weights = np.clip(mask.astype(np.float32), 0.25, 1.0)
img_masked = img * mask_weights[:, :, np.newaxis]
img_masked[mask_border] = (255, 255, 0)
img_masked = np.clip(np.round(img_masked), 0, 255).astype(np.uint8)
return img_masked
def friendly(mstr):
return mstr.replace('(', '').replace(')', '').replace(' ', '_').replace(',', '')
for (
layername,
layer_features,
layer_maxfeature,
layer_thresholds,
prev_layername,
prev_features,
prev_thresholds,
layer_contrs,
) in ranger:
ranger.set_description(f"Layer {layername}")
# ==== STEP 3: calculating IoU scores ====
# Get tally dfname
if settings.UNIT_RANGE is None:
tally_dfname = f"tally_{layername}.csv"
else:
# Only use a subset of units
tally_dfname = f"tally_{layername}_{min(settings.UNIT_RANGE)}_{max(settings.UNIT_RANGE)}.csv"
tally_result, mc = fo.tally(
layer_features, layer_thresholds, savepath=tally_dfname
)
N = 483
top = np.argsort(layer_maxfeature, 0)[: -1 -20 : -1, :].transpose()
visdir = os.path.join(settings.OUTPUT_FOLDER, f'vis_{N}')
os.makedirs(visdir, exist_ok=True)
# DO THE FULL UPSAMPLING OF THIS FEATURE MASK (this is very inefficient_)
feats = layer_features[:, N]
from tqdm import tqdm
acts = np.stack([
upsample(feats_i, (112, 112))
for feats_i in tqdm(feats, desc='upsampling')
])
neuron_masks = acts > layer_thresholds[N]
actmin = acts.min()
actmax = acts.max()
for record in tally_result:
if record['unit'] != N:
continue
def get_labs(label):
# Image masks
labs_enc = mc.get_mask(label)
# Mask labels
labs = cmask.decode(labs_enc)
labs = labs.reshape((layer_features.shape[0], *mc.mask_shape))
return labs
labd = {}
def rec_add(lab_f):
lab_str = lab_f.to_str(lambda x: ds.name(None, x))
labd[lab_str] = get_labs(lab_f)
# Measure IoU again.
# print(f"{lab_str} IoU: {iou(labd[lab_str], neuron_masks):f}")
if isinstance(lab_f, F.Leaf):
return
elif isinstance(lab_f, F.Not):
rec_add(lab_f.val)
elif isinstance(lab_f, F.And) or isinstance(lab_f, F.Or):
# binary op
rec_add(lab_f.left)
rec_add(lab_f.right)
else:
raise ValueError(f"Unknown formula {lab_f}")
root_f = F.parse(record["label"], reverse_namer=ds.rev_name)
rec_add(root_f)
# Neuron mask
# Upsample
for i, index in enumerate(tqdm(top[N], desc='topn')):
# Most popular images
imfn = ds.filename(index)
img = np.array(Image.open(imfn))
# Neuron activations - upsample
# Here's your neuron mask
acts_up = acts[index]
acts_up = upsample(acts_up, img.shape[:2])
img_hm = add_heatmap(img, acts_up, actmin, actmax)
new_imfn = os.path.join(visdir, f"{i}_neuron_act.jpg")
Image.fromarray(img_hm).save(new_imfn)
# Find borders
neuron_mask = acts_up > layer_thresholds[N]
img_masked = add_mask(img, neuron_mask)
new_imfn = os.path.join(visdir, f"{i}_neuron.jpg")
Image.fromarray(img_masked).save(new_imfn)
# Go through primitives and masks
# Save original
orig_imfn = os.path.join(visdir, f"{i}_orig.jpg")
Image.fromarray(img).save(orig_imfn)
for mstr, m in labd.items():
m = m[index]
m = upsample(m, img.shape[:2])
mstr_friendly = friendly(mstr)
img_masked = add_mask(img, m)
new_imfn = os.path.join(visdir, f"{i}_{mstr_friendly}.jpg")
Image.fromarray(img_masked).save(new_imfn)
# IoU masks
iou_imfn = os.path.join(visdir, f"{i}_{mstr_friendly}_iou.jpg")
iou_img = iou_mask(img, neuron_mask, m)
Image.fromarray(iou_img).save(iou_imfn)
# Find examples - water river AND blue
if N == 483:
mstr = "((water OR river) AND blue-c)"
new_m = F.parse(mstr, reverse_namer=ds.rev_name)
mstr_friendly = friendly(mstr)
new_labs = get_labs(new_m)
# Sort by most common
top = np.argsort(new_labs.sum((1, 2)))[::-1]
top = top[30:50]
for i, index in enumerate(tqdm(top, desc='blue')):
# Most popular images
imfn = ds.filename(index)
img = np.array(Image.open(imfn))
# Neuron activations - upsample
actmap = acts[index]
actmap = upsample(actmap, img.shape[:2])
img_hm = add_heatmap(img, actmap, actmin, actmax)
new_imfn = os.path.join(visdir, f"BLUE-{i}_neuron_act.jpg")
Image.fromarray(img_hm).save(new_imfn)
# Here's your neuron mask
neuron_mask = actmap > layer_thresholds[N]
img_masked = add_mask(img, neuron_mask)
new_imfn = os.path.join(visdir, f"BLUE-{i}_neuron.jpg")
Image.fromarray(img_masked).save(new_imfn)
# Go through primitives and masks
m = new_labs[index]
m = upsample(m, img.shape[:2])
img_masked = add_mask(img, m)
new_imfn = os.path.join(visdir, f"BLUE-{i}_{mstr_friendly}.jpg")
Image.fromarray(img_masked).save(new_imfn)
for mstr, m in labd.items():
m = m[index]
m = upsample(m, img.shape[:2])
mstr_friendly = friendly(mstr)
img_masked = add_mask(img, m)
new_imfn = os.path.join(visdir, f"BLUE-{i}_{mstr_friendly}.jpg")
Image.fromarray(img_masked).save(new_imfn)
iou_imfn = os.path.join(visdir, f"BLUE-{i}_{mstr_friendly}_iou.jpg")
iou_img = iou_mask(img, neuron_mask, m)
Image.fromarray(iou_img).save(iou_imfn)
# Save original
orig_imfn = os.path.join(visdir, f"BLUE-{i}_orig.jpg")
Image.fromarray(img).save(orig_imfn)
|
[
"[email protected]"
] | |
33f283df6d6a2614a08df515391e4bc8357670c2
|
9c5b9e4197bfdcf74cec45dcca47d9caa3317a4b
|
/main.py
|
06e1a299d1af620fd21b58c3165b1705a3f4d14e
|
[] |
no_license
|
rhuidean/terrain_data
|
614c1a8f5420538b74e2e72bc20e19ccce7e565f
|
596b2e3993d7b445c8115cd316349a026ad63dae
|
refs/heads/master
| 2021-01-25T06:36:11.245301 | 2017-06-07T05:21:27 | 2017-06-07T05:21:27 | 93,593,657 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,530 |
py
|
#!/usr/bin/python
""" Complete the code in ClassifyNB.py with the sklearn
Naive Bayes classifier to classify the terrain data.
The objective of this exercise is to recreate the decision
boundary found in the lesson video, and make a plot that
visually shows the decision boundary """
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture, output_image
from classify_nb import classify
import numpy as np
import pylab as pl
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow" points mixed
### in together--separate them so we can give them different colors in the scatterplot,
### and visually identify them
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]
# You will need to complete this function imported from the ClassifyNB script.
# Be sure to change to that code tab to complete this quiz.
clf = classify(features_train, labels_train)
### draw the decision boundary with the text points overlaid
prettyPicture(clf, features_test, labels_test)
output_image("test.png", "png", open("test.png", "rb").read())
|
[
"[email protected]"
] | |
663e6787be5b9db16ae4072efaf3ea2dbae9775f
|
64509ed67a7c6594485f15eb1a3014d25c02957f
|
/plugins/module_utils/network/avi/ansible_utils.py
|
9abd7b8c9b6f1e6eea3d8a51afff8ae382c7a26b
|
[] |
no_license
|
ansible-collection-migration/ansible.avi
|
4a2673f10ba5196ea02ddc6f7b5509898dcbd0a6
|
bef71c337f967ef2f7a9969bac78dc71df14c78f
|
refs/heads/master
| 2020-12-18T13:02:59.075595 | 2020-02-03T22:02:18 | 2020-02-03T22:02:18 | 235,393,245 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 21,449 |
py
|
from __future__ import absolute_import
"""
Created on Aug 16, 2016
@author: Gaurav Rastogi ([email protected])
"""
import os
import re
import logging
import sys
from copy import deepcopy
from ansible.module_utils.basic import env_fallback
try:
from ansible_collections.ansible.avi.plugins.module_utils.network.avi.avi_api import (
ApiSession, ObjectNotFound, avi_sdk_syslog_logger, AviCredentials, HAS_AVI)
except ImportError:
HAS_AVI = False
if os.environ.get('AVI_LOG_HANDLER', '') != 'syslog':
log = logging.getLogger(__name__)
else:
# Ansible does not allow logging from the modules.
log = avi_sdk_syslog_logger()
def _check_type_string(x):
"""
:param x:
:return: True if it is of type string
"""
if isinstance(x, str):
return True
if sys.version_info[0] < 3:
try:
return isinstance(x, unicode)
except NameError:
return False
class AviCheckModeResponse(object):
"""
Class to support ansible check mode.
"""
def __init__(self, obj, status_code=200):
self.obj = obj
self.status_code = status_code
def json(self):
return self.obj
def ansible_return(module, rsp, changed, req=None, existing_obj=None,
api_context=None):
"""
:param module: AnsibleModule
:param rsp: ApiResponse from avi_api
:param changed: boolean
:param req: ApiRequest to avi_api
:param existing_obj: object to be passed debug output
:param api_context: api login context
helper function to return the right ansible based on the error code and
changed
Returns: specific ansible module exit function
"""
if rsp is not None and rsp.status_code > 299:
return module.fail_json(
msg='Error %d Msg %s req: %s api_context:%s ' % (
rsp.status_code, rsp.text, req, api_context))
api_creds = AviCredentials()
api_creds.update_from_ansible_module(module)
key = '%s:%s:%s' % (api_creds.controller, api_creds.username,
api_creds.port)
disable_fact = module.params.get('avi_disable_session_cache_as_fact')
fact_context = None
if not disable_fact:
fact_context = module.params.get('api_context', {})
if fact_context:
fact_context.update({key: api_context})
else:
fact_context = {key: api_context}
obj_val = rsp.json() if rsp else existing_obj
if (obj_val and module.params.get("obj_username", None) and
"username" in obj_val):
obj_val["obj_username"] = obj_val["username"]
if (obj_val and module.params.get("obj_password", None) and
"password" in obj_val):
obj_val["obj_password"] = obj_val["password"]
old_obj_val = existing_obj if changed and existing_obj else None
api_context_val = api_context if disable_fact else None
ansible_facts_val = dict(
avi_api_context=fact_context) if not disable_fact else {}
return module.exit_json(
changed=changed, obj=obj_val, old_obj=old_obj_val,
ansible_facts=ansible_facts_val, api_context=api_context_val)
def purge_optional_fields(obj, module):
"""
It purges the optional arguments to be sent to the controller.
:param obj: dictionary of the ansible object passed as argument.
:param module: AnsibleModule
return modified obj
"""
purge_fields = []
for param, spec in module.argument_spec.items():
if not spec.get('required', False):
if param not in obj:
# these are ansible common items
continue
if obj[param] is None:
purge_fields.append(param)
log.debug('purging fields %s', purge_fields)
for param in purge_fields:
obj.pop(param, None)
return obj
def cleanup_absent_fields(obj):
"""
cleans up any field that is marked as state: absent. It needs to be removed
from the object if it is present.
:param obj:
:return: Purged object
"""
if type(obj) != dict:
return obj
cleanup_keys = []
for k, v in obj.items():
if type(v) == dict:
if (('state' in v and v['state'] == 'absent') or
(v == "{'state': 'absent'}")):
cleanup_keys.append(k)
else:
cleanup_absent_fields(v)
if not v:
cleanup_keys.append(k)
elif type(v) == list:
new_list = []
for elem in v:
elem = cleanup_absent_fields(elem)
if elem:
# remove the item from list
new_list.append(elem)
if new_list:
obj[k] = new_list
else:
cleanup_keys.append(k)
elif isinstance(v, str) or isinstance(v, str):
if v == "{'state': 'absent'}":
cleanup_keys.append(k)
for k in cleanup_keys:
del obj[k]
return obj
RE_REF_MATCH = re.compile(r'^/api/[\w/]+\?name\=[\w]+[^#<>]*$')
# if HTTP ref match then strip out the #name
HTTP_REF_MATCH = re.compile(r'https://[\w.0-9:-]+/api/.+')
HTTP_REF_W_NAME_MATCH = re.compile(r'https://[\w.0-9:-]+/api/.*#.+')
def ref_n_str_cmp(x, y):
"""
compares two references
1. check for exact reference
2. check for obj_type/uuid
3. check for name
if x is ref=name then extract uuid and name from y and use it.
if x is http_ref then
strip x and y
compare them.
if x and y are urls then match with split on #
if x is a RE_REF_MATCH then extract name
if y is a REF_MATCH then extract name
:param x: first string
:param y: second string from controller's object
Returns
True if they are equivalent else False
"""
if type(y) in (int, float, bool, int, complex):
y = str(y)
x = str(x)
if not (_check_type_string(x) and _check_type_string(y)):
return False
y_uuid = y_name = str(y)
x = str(x)
if RE_REF_MATCH.match(x):
x = x.split('name=')[1]
elif HTTP_REF_MATCH.match(x):
x = x.rsplit('#', 1)[0]
y = y.rsplit('#', 1)[0]
elif RE_REF_MATCH.match(y):
y = y.split('name=')[1]
if HTTP_REF_W_NAME_MATCH.match(y):
path = y.split('api/', 1)[1]
# Fetching name or uuid from path /xxxx_xx/xx/xx_x/uuid_or_name
uuid_or_name = path.split('/')[-1]
parts = uuid_or_name.rsplit('#', 1)
y_uuid = parts[0]
y_name = parts[1] if len(parts) > 1 else ''
# is just string but y is a url so match either uuid or name
result = (x in (y, y_name, y_uuid))
if not result:
log.debug('x: %s y: %s y_name %s y_uuid %s',
x, y, y_name, y_uuid)
return result
def avi_obj_cmp(x, y, sensitive_fields=None):
"""
compares whether x is fully contained in y. The comparision is different
from a simple dictionary compare for following reasons
1. Some fields could be references. The object in controller returns the
full URL for those references. However, the ansible script would have
it specified as /api/pool?name=blah. So, the reference fields need
to match uuid, relative reference based on name and actual reference.
2. Optional fields with defaults: In case there are optional fields with
defaults then controller automatically fills it up. This would
cause the comparison with Ansible object specification to always return
changed.
3. Optional fields without defaults: This is most tricky. The issue is
how to specify deletion of such objects from ansible script. If the
ansible playbook has object specified as Null then Avi controller will
reject for non Message(dict) type fields. In addition, to deal with the
defaults=null issue all the fields that are set with None are purged
out before comparing with Avi controller's version
So, the solution is to pass state: absent if any optional field needs
to be deleted from the configuration. The script would return changed
=true if it finds a key in the controller version and it is marked with
state: absent in ansible playbook. Alternatively, it would return
false if key is not present in the controller object. Before, doing
put or post it would purge the fields that are marked state: absent.
:param x: first string
:param y: second string from controller's object
:param sensitive_fields: sensitive fields to ignore for diff
Returns:
True if x is subset of y else False
"""
if not sensitive_fields:
sensitive_fields = set()
if isinstance(x, str) or isinstance(x, str):
# Special handling for strings as they can be references.
return ref_n_str_cmp(x, y)
if type(x) not in [list, dict]:
# if it is not list or dict or string then simply compare the values
return x == y
if type(x) == list:
# should compare each item in the list and that should match
if len(x) != len(y):
log.debug('x has %d items y has %d', len(x), len(y))
return False
for i in zip(x, y):
if not avi_obj_cmp(i[0], i[1], sensitive_fields=sensitive_fields):
# no need to continue
return False
if type(x) == dict:
x.pop('_last_modified', None)
x.pop('tenant', None)
y.pop('_last_modified', None)
x.pop('api_version', None)
y.pop('api_verison', None)
d_xks = [k for k in x.keys() if k in sensitive_fields]
if d_xks:
# if there is sensitive field then always return changed
return False
# pop the keys that are marked deleted but not present in y
# return false if item is marked absent and is present in y
d_x_absent_ks = []
for k, v in x.items():
if v is None:
d_x_absent_ks.append(k)
continue
if isinstance(v, dict):
if ('state' in v) and (v['state'] == 'absent'):
if type(y) == dict and k not in y:
d_x_absent_ks.append(k)
else:
return False
elif not v:
d_x_absent_ks.append(k)
elif isinstance(v, list) and not v:
d_x_absent_ks.append(k)
# Added condition to check key in dict.
elif isinstance(v, str) or (k in y and isinstance(y[k], str)):
# this is the case when ansible converts the dictionary into a
# string.
if v == "{'state': 'absent'}" and k not in y:
d_x_absent_ks.append(k)
elif not v and k not in y:
# this is the case when x has set the value that qualifies
# as not but y does not have that value
d_x_absent_ks.append(k)
for k in d_x_absent_ks:
x.pop(k)
x_keys = set(x.keys())
y_keys = set(y.keys())
if not x_keys.issubset(y_keys):
# log.debug('x has %s and y has %s keys', len(x_keys), len(y_keys))
return False
for k, v in x.items():
if k not in y:
# log.debug('k %s is not in y %s', k, y)
return False
if not avi_obj_cmp(v, y[k], sensitive_fields=sensitive_fields):
# log.debug('k %s v %s did not match in y %s', k, v, y[k])
return False
return True
POP_FIELDS = ['state', 'controller', 'username', 'password', 'api_version',
'avi_credentials', 'avi_api_update_method', 'avi_api_patch_op',
'api_context', 'tenant', 'tenant_uuid', 'avi_disable_session_cache_as_fact']
def get_api_context(module, api_creds):
api_context = module.params.get('api_context')
if api_context and module.params.get('avi_disable_session_cache_as_fact'):
return api_context
elif api_context and not module.params.get(
'avi_disable_session_cache_as_fact'):
key = '%s:%s:%s' % (api_creds.controller, api_creds.username,
api_creds.port)
return api_context.get(key)
else:
return None
def avi_ansible_api(module, obj_type, sensitive_fields):
"""
This converts the Ansible module into AVI object and invokes APIs
:param module: Ansible module
:param obj_type: string representing Avi object type
:param sensitive_fields: sensitive fields to be excluded for comparison
purposes.
Returns:
success: module.exit_json with obj=avi object
faliure: module.fail_json
"""
api_creds = AviCredentials()
api_creds.update_from_ansible_module(module)
api_context = get_api_context(module, api_creds)
if api_context:
api = ApiSession.get_session(
api_creds.controller,
api_creds.username,
password=api_creds.password,
timeout=api_creds.timeout,
tenant=api_creds.tenant,
tenant_uuid=api_creds.tenant_uuid,
token=api_context['csrftoken'],
port=api_creds.port,
session_id=api_context['session_id'],
csrftoken=api_context['csrftoken'])
else:
api = ApiSession.get_session(
api_creds.controller,
api_creds.username,
password=api_creds.password,
timeout=api_creds.timeout,
tenant=api_creds.tenant,
tenant_uuid=api_creds.tenant_uuid,
token=api_creds.token,
port=api_creds.port)
state = module.params['state']
# Get the api version.
avi_update_method = module.params.get('avi_api_update_method', 'put')
avi_patch_op = module.params.get('avi_api_patch_op', 'add')
api_version = api_creds.api_version
name = module.params.get('name', None)
# Added Support to get uuid
uuid = module.params.get('uuid', None)
check_mode = module.check_mode
if uuid and obj_type != 'cluster':
obj_path = '%s/%s' % (obj_type, uuid)
else:
obj_path = '%s/' % obj_type
obj = deepcopy(module.params)
tenant = obj.pop('tenant', '')
tenant_uuid = obj.pop('tenant_uuid', '')
# obj.pop('cloud_ref', None)
for k in POP_FIELDS:
obj.pop(k, None)
purge_optional_fields(obj, module)
# Special code to handle situation where object has a field
# named username. This is used in case of api/user
# The following code copies the username and password
# from the obj_username and obj_password fields.
if 'obj_username' in obj:
obj['username'] = obj['obj_username']
obj.pop('obj_username')
if 'obj_password' in obj:
obj['password'] = obj['obj_password']
obj.pop('obj_password')
if 'full_name' not in obj and 'name' in obj and obj_type == "user":
obj['full_name'] = obj['name']
# Special case as name represent full_name in user module
# As per API response, name is always same as username regardless of full_name
obj['name'] = obj['username']
log.info('passed object %s ', obj)
if uuid:
# Get the object based on uuid.
try:
existing_obj = api.get(
obj_path, tenant=tenant, tenant_uuid=tenant_uuid,
params={'include_refs': '', 'include_name': ''},
api_version=api_version)
existing_obj = existing_obj.json()
except ObjectNotFound:
existing_obj = None
elif name:
params = {'include_refs': '', 'include_name': ''}
if obj.get('cloud_ref', None):
# this is the case when gets have to be scoped with cloud
cloud = obj['cloud_ref'].split('name=')[1]
params['cloud_ref.name'] = cloud
existing_obj = api.get_object_by_name(
obj_type, name, tenant=tenant, tenant_uuid=tenant_uuid,
params=params, api_version=api_version)
# Need to check if tenant_ref was provided and the object returned
# is actually in admin tenant.
if existing_obj and 'tenant_ref' in obj and 'tenant_ref' in existing_obj:
# https://10.10.25.42/api/tenant/admin#admin
existing_obj_tenant = existing_obj['tenant_ref'].split('#')[1]
obj_tenant = obj['tenant_ref'].split('name=')[1]
if obj_tenant != existing_obj_tenant:
existing_obj = None
else:
# added api version to avi api call.
existing_obj = api.get(obj_path, tenant=tenant, tenant_uuid=tenant_uuid,
params={'include_refs': '', 'include_name': ''},
api_version=api_version).json()
if state == 'absent':
rsp = None
changed = False
err = False
if not check_mode and existing_obj:
try:
if name is not None:
# added api version to avi api call.
rsp = api.delete_by_name(
obj_type, name, tenant=tenant, tenant_uuid=tenant_uuid,
api_version=api_version)
else:
# added api version to avi api call.
rsp = api.delete(
obj_path, tenant=tenant, tenant_uuid=tenant_uuid,
api_version=api_version)
except ObjectNotFound:
pass
if check_mode and existing_obj:
changed = True
if rsp:
if rsp.status_code == 204:
changed = True
else:
err = True
if not err:
return ansible_return(
module, rsp, changed, existing_obj=existing_obj,
api_context=api.get_context())
elif rsp:
return module.fail_json(msg=rsp.text)
rsp = None
req = None
if existing_obj:
# this is case of modify as object exists. should find out
# if changed is true or not
if name is not None and obj_type != 'cluster':
obj_uuid = existing_obj['uuid']
obj_path = '%s/%s' % (obj_type, obj_uuid)
if avi_update_method == 'put':
changed = not avi_obj_cmp(obj, existing_obj, sensitive_fields)
obj = cleanup_absent_fields(obj)
if changed:
req = obj
if check_mode:
# No need to process any further.
rsp = AviCheckModeResponse(obj=existing_obj)
else:
rsp = api.put(
obj_path, data=req, tenant=tenant,
tenant_uuid=tenant_uuid, api_version=api_version)
elif check_mode:
rsp = AviCheckModeResponse(obj=existing_obj)
else:
if check_mode:
# No need to process any further.
rsp = AviCheckModeResponse(obj=existing_obj)
changed = True
else:
obj.pop('name', None)
patch_data = {avi_patch_op: obj}
rsp = api.patch(
obj_path, data=patch_data, tenant=tenant,
tenant_uuid=tenant_uuid, api_version=api_version)
obj = rsp.json()
changed = not avi_obj_cmp(obj, existing_obj)
if changed:
log.debug('EXISTING OBJ %s', existing_obj)
log.debug('NEW OBJ %s', obj)
else:
changed = True
req = obj
if check_mode:
rsp = AviCheckModeResponse(obj=None)
else:
rsp = api.post(obj_type, data=obj, tenant=tenant,
tenant_uuid=tenant_uuid, api_version=api_version)
return ansible_return(module, rsp, changed, req, existing_obj=existing_obj,
api_context=api.get_context())
def avi_common_argument_spec():
"""
Returns common arguments for all Avi modules
:return: dict
"""
credentials_spec = dict(
controller=dict(fallback=(env_fallback, ['AVI_CONTROLLER'])),
username=dict(fallback=(env_fallback, ['AVI_USERNAME'])),
password=dict(fallback=(env_fallback, ['AVI_PASSWORD']), no_log=True),
api_version=dict(default='16.4.4', type='str'),
tenant=dict(default='admin'),
tenant_uuid=dict(default='', type='str'),
port=dict(type='int'),
timeout=dict(default=300, type='int'),
token=dict(default='', type='str', no_log=True),
session_id=dict(default='', type='str', no_log=True),
csrftoken=dict(default='', type='str', no_log=True)
)
return dict(
controller=dict(fallback=(env_fallback, ['AVI_CONTROLLER'])),
username=dict(fallback=(env_fallback, ['AVI_USERNAME'])),
password=dict(fallback=(env_fallback, ['AVI_PASSWORD']), no_log=True),
tenant=dict(default='admin'),
tenant_uuid=dict(default=''),
api_version=dict(default='16.4.4', type='str'),
avi_credentials=dict(default=None, type='dict',
options=credentials_spec),
api_context=dict(type='dict'),
avi_disable_session_cache_as_fact=dict(default=False, type='bool'))
|
[
"[email protected]"
] | |
6572a45e896e65f747dd399131d451027c8a0f8e
|
bfb113c3076f5b0570953583e7a2321c774d73ea
|
/Classes/class_example106.py
|
fb14ae80d41b7c5e58d8fbd7205723b8647d23bc
|
[] |
no_license
|
gsudarshan1990/Training_Projects
|
82c48d5492cb4be94db09ee5c66142c370794e1c
|
2b7edfafc4e448bd558c034044570496ca68bf2d
|
refs/heads/master
| 2022-12-10T15:56:17.535096 | 2020-09-04T06:02:31 | 2020-09-04T06:02:31 | 279,103,151 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 310 |
py
|
#This python program is about static method
class Rectangle:
def area(length, breadth):
return length*breadth
Rectangle.area=staticmethod(Rectangle.area)
print(Rectangle.area(10,12))
class Square:
def area(side):
return side**2
area=staticmethod(area)
print(Square.area(7))
|
[
"[email protected]"
] | |
5ec2bd27d104ad261ca02ea4c6eb402c72836eb5
|
e1c7c25c22c2f854aa8e3d8f6fffdf80a0b4dfbf
|
/CodeForces/CodeForces_Solution_In_Python/watermelon_problem.py
|
e4682dc0816385ec70cb341507d02ceba1752e7f
|
[] |
no_license
|
roshan13ghimire/Competitive_Programming
|
efc85f9fe6fa46edff96931ca3a1cca78628918b
|
0c238a391c6acee8763968ef298b765c133b7111
|
refs/heads/master
| 2023-04-15T16:35:07.711491 | 2021-04-12T03:00:05 | 2021-04-12T03:00:05 | 273,682,360 | 4 | 1 | null | 2020-08-05T02:11:53 | 2020-06-20T09:59:57 |
Python
|
UTF-8
|
Python
| false | false | 132 |
py
|
#watermelon_problem
n=int(input())
if(n==2):
print("NO")
exit()
if(n%2==0):
print("YES")
else:
print("NO")
|
[
"[email protected]"
] | |
523b04f22ef940fae42d54f7acd0945edab44cd0
|
26c8a9bda50bb2ea9d44529d803477e788d102a2
|
/MODEL1302180002/model.py
|
a2c7fdcded362373ccd9ab2e6a9483cf516de075
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
biomodels/MODEL1302180002
|
20a378031da3750921062cd69752e11eb9ff6645
|
fd9c884345a84dcf4c75d3db87f27520d6c3853f
|
refs/heads/master
| 2020-05-31T02:54:36.073367 | 2014-10-16T05:49:30 | 2014-10-16T05:49:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 427 |
py
|
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1302180002.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
[
"[email protected]"
] | |
14280dd87f4eec14382e48e5062801018048ead1
|
99deab5f52fd7262a26de9aa5d0163bfa738590f
|
/python/leetcode/geometry/593_valid_square.py
|
10db741d5cf9151696a35d715fbce4d92f80f27b
|
[] |
no_license
|
zchen0211/topcoder
|
e47fc07c928b83138e27fd6681b373ce499480b0
|
4d73e4c1f2017828ff2d36058819988146356abe
|
refs/heads/master
| 2022-01-17T16:54:35.871026 | 2019-05-08T19:26:23 | 2019-05-13T05:19:46 | 84,052,683 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,474 |
py
|
"""
593. Valid Square (Medium)
Given the coordinates of four points in 2D space, return whether the four points could construct a square.
The coordinate (x,y) of a point is represented by an integer array with two integers.
Example:
Input: p1 = [0,0], p2 = [1,1], p3 = [1,0], p4 = [0,1]
Output: True
Note:
All the input integers are in the range [-10000, 10000].
A valid square has four equal sides with positive length and four equal angles (90-degree angles).
Input points have no order.
"""
import collections
class Solution(object):
def validSquare(self, p1, p2, p3, p4):
"""
:type p1: List[int]
:type p2: List[int]
:type p3: List[int]
:type p4: List[int]
:rtype: bool
"""
dis12 = self.distance(p1, p2)
dis13 = self.distance(p1, p3)
dis14 = self.distance(p1, p4)
dis23 = self.distance(p2, p3)
dis24 = self.distance(p2, p4)
dis34 = self.distance(p3, p4)
dis_stat = collections.Counter([dis12,dis13,dis14,dis23,dis24,dis34])
dis_stat = dict(dis_stat)
if len(dis_stat.keys()) == 2:
max_ = max(dis_stat.keys())
min_ = min(dis_stat.keys())
if dis_stat[max_]==2 and dis_stat[min_]==4 and max_==2*min_:
return True
return False
def distance(self, p1, p2):
x1, y1 = p1
x2, y2 = p2
return (x1-x2)**2 + (y1-y2)**2
if __name__ == '__main__':
a = Solution()
# print a.validSquare([0,0],[1,1],[1,0],[0,2])
print a.validSquare([0,0],[-1,0],[1,0],[0,1])
|
[
"[email protected]"
] | |
b7ab08da2f79c2419645422a6de099e4cd1df741
|
72880d033c9948098291efebf934255635f8c6ea
|
/pythonexamples/constructor2.py
|
0dec20edf9b0368a505ee3cb68138df8219450b0
|
[] |
no_license
|
manutdmohit/mypythonexamples
|
729347aec300bda01f629224337c84d5838a71f2
|
b189c201d07b1a345478699bbb3852c02eb96ce5
|
refs/heads/master
| 2023-04-18T01:55:22.026867 | 2021-05-13T05:59:09 | 2021-05-13T05:59:09 | 366,946,854 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 306 |
py
|
class Student:
def __int__(self,name,rollno,marks):
print('Creating instance variables and performing initialization...')
self.name=name
self.rollno=rollno
self.marks=marks
s1=Student('Ram',101,90)
s2=Student('Sita',102,95)
print(s1.name.s1.rollno,s1.marks)
print(s2.name.s2.rollno,s2.marks)
|
[
"[email protected]"
] | |
070e6ab9841df7311809ebd17f01a2e542e6a9bb
|
7a4ed01a40e8d79126b26f5e8fca43c8e61e78fd
|
/Geeky Shows/Advance Pyhton/220.Module[34]/PythonProject/Example-16.py
|
e95b5e1423d89cfc81cdc3ab590dfb301de598a5
|
[] |
no_license
|
satyam-seth-learnings/python_learning
|
5a7f75bb613dcd7fedc31a1567a434039b9417f8
|
7e76c03e94f5c314dcf1bfae6f26b4a8a6e658da
|
refs/heads/main
| 2023-08-25T14:08:11.423875 | 2021-10-09T13:00:49 | 2021-10-09T13:00:49 | 333,840,032 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 166 |
py
|
# Example-16.py <---- Main Module
from thired import Myclass,Myschool
from fourth import Mycollege
c=Myclass()
c.name()
s=Myschool()
s.show()
cl=Mycollege()
cl.disp()
|
[
"[email protected]"
] | |
e2f30408e62e31ec33316a8fdad1c5f5e6477b7c
|
b95e71dcc1b42ebf3459ee57bd0119c618a79796
|
/HashTable/Easy/811.py
|
e34b3be698d497e6d4f22a44b9eb0185ba053eed
|
[] |
no_license
|
anton-dovnar/LeetCode
|
e47eece7de28d76b0c3b997d4dacb4f151a839b5
|
6ed9e1bd4a0b48e343e1dd8adaebac26a3bc2ed7
|
refs/heads/master
| 2023-06-29T07:21:06.335041 | 2021-07-31T15:08:05 | 2021-07-31T15:08:05 | 361,205,679 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 630 |
py
|
"""
Subdomain Visit Count
"""
from collections import deque
class Solution:
def subdomainVisits(self, cpdomains: List[str]) -> List[str]:
hash_table = dict()
for record in cpdomains:
count, domain = record.split()
queue = deque(domain.split("."))
while queue:
if domain in hash_table:
hash_table[domain] += int(count)
else:
hash_table[domain] = int(count)
queue.popleft()
domain = ".".join(queue)
return [f"{value} {key}" for key, value in hash_table.items()]
|
[
"[email protected]"
] | |
f01df49e553d025d48a9b6321804b23a2568aec9
|
b00840e56173dc2a196442bd354b9e3cc13b17df
|
/code_dust_fargo/plotBothDensityMaps.py
|
44ec031d239796ac37957df88912b7f62710b65e
|
[] |
no_license
|
Sportsfan77777/vortex
|
56c28fb760f6c98de4a7c8fdcf1168d78b4e57af
|
780ec14937d1b79e91a367d58f75adc905b8eef2
|
refs/heads/master
| 2023-08-31T02:50:09.454230 | 2023-08-24T10:55:05 | 2023-08-24T10:55:05 | 41,785,163 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,817 |
py
|
"""
plot 2-D gas and dust density maps side-by-side
usage: plotBothDensityMaps.py [-h] [-c NUM_CORES] [--dir SAVE_DIRECTORY]
[--hide] [-v VERSION] [--range R_LIM R_LIM]
[--shift] [--cmap CMAP] [--cmaxGas GAS_CMAX]
[--cmaxDust DUST_CMAX] [--fontsize FONTSIZE]
[--dpi DPI]
frames [frames ...]
positional arguments:
frames select single frame or range(start, end, rate). error
if nargs != 1 or 3
optional arguments:
-h, --help show this help message and exit
-c NUM_CORES number of cores (default: 1)
--dir SAVE_DIRECTORY save directory (default: bothDensityMaps)
--hide for single plot, do not display plot (default: display
plot)
-v VERSION version number (up to 4 digits) for this set of plot
parameters (default: None)
--range R_LIM R_LIM radial range in plot (default: [r_min, r_max])
--shift center frame on vortex peak or middle (default: do not
center)
--cmap CMAP color map (default: viridis)
--cmaxGas GAS_CMAX maximum density in colorbar (default: 10 for hcm+, 2.5
otherwise)
--cmaxDust DUST_CMAX maximum density in colorbar (default: 10 for hcm+, 2.5
otherwise)
--fontsize FONTSIZE fontsize of plot annotations (default: 16)
--dpi DPI dpi of plot annotations (default: 100)
"""
import sys, os, subprocess
import pickle, glob
from multiprocessing import Pool
import argparse
import math
import numpy as np
import matplotlib
from matplotlib import rcParams as rc
from matplotlib import pyplot as plot
from pylab import rcParams
from pylab import fromfile
import util
import azimuthal as az
from readTitle import readTitle
from colormaps import cmaps
for key in cmaps:
plot.register_cmap(name = key, cmap = cmaps[key])
###############################################################################
### Input Parameters ###
def new_argument_parser(description = "Plot dust density maps."):
parser = argparse.ArgumentParser()
# Frame Selection
parser.add_argument('frames', type = int, nargs = '+',
help = 'select single frame or range(start, end, rate). error if nargs != 1 or 3')
parser.add_argument('-c', dest = "num_cores", type = int, default = 1,
help = 'number of cores (default: 1)')
# Files
parser.add_argument('--dir', dest = "save_directory", default = "bothDensityMaps",
help = 'save directory (default: bothDensityMaps)')
# Plot Parameters (variable)
parser.add_argument('--hide', dest = "show", action = 'store_false', default = True,
help = 'for single plot, do not display plot (default: display plot)')
parser.add_argument('-v', dest = "version", type = int, default = None,
help = 'version number (up to 4 digits) for this set of plot parameters (default: None)')
parser.add_argument('--range', dest = "r_lim", type = float, nargs = 2, default = None,
help = 'radial range in plot (default: [r_min, r_max])')
parser.add_argument('--shift', dest = "center", action = 'store_true', default = False,
help = 'center frame on vortex peak or middle (default: do not center)')
# Plot Parameters (rarely need to change)
parser.add_argument('--cmapGas', dest = "gas_cmap", default = "viridis",
help = 'gas color map (default: viridis)')
parser.add_argument('--cmapDust', dest = "dust_cmap", default = "inferno",
help = 'dust color map (default: inferno)')
parser.add_argument('--cmaxGas', dest = "gas_cmax", type = float, default = 2,
help = 'gas maximum density in colorbar (default: 2)')
parser.add_argument('--cmaxDust', dest = "dust_cmax", type = float, default = None,
help = 'dust maximum density in colorbar (default: 10 for hcm+, 2.5 otherwise)')
parser.add_argument('--fontsize', dest = "fontsize", type = int, default = 16,
help = 'fontsize of plot annotations (default: 16)')
parser.add_argument('--dpi', dest = "dpi", type = int, default = 100,
help = 'dpi of plot annotations (default: 100)')
return parser
###############################################################################
### Parse Arguments ###
args = new_argument_parser().parse_args()
### Get Fargo Parameters ###
fargo_par = util.get_pickled_parameters()
num_rad = fargo_par["Nrad"]; num_theta = fargo_par["Nsec"]
r_min = fargo_par["Rmin"]; r_max = fargo_par["Rmax"]
jupiter_mass = 1e-3
planet_mass = fargo_par["PlanetMass"] / jupiter_mass
surface_density_zero = fargo_par["Sigma0"]
disk_mass = 2 * np.pi * surface_density_zero * (r_max - r_min) / jupiter_mass # M_{disk} = (2 \pi) * \Sigma_0 * r_p * (r_out - r_in)
scale_height = fargo_par["AspectRatio"]
taper = fargo_par["MassTaper"]
size = fargo_par["PSIZE"]
### Get Input Parameters ###
# Frames
frame_range = util.get_frame_range(args.frames)
# Number of Cores
num_cores = args.num_cores
# Files
save_directory = args.save_directory
if not os.path.isdir(save_directory):
os.mkdir(save_directory) # make save directory if it does not already exist
# Plot Parameters (variable)
show = args.show
rad = np.linspace(r_min, r_max, num_rad)
theta = np.linspace(0, 2 * np.pi, num_theta)
version = args.version
if args.r_lim is None:
x_min = r_min; x_max = r_max
else:
x_min = args.r_lim[0]; x_max = args.r_lim[1]
center = args.center
# Plot Parameters (constant)
gas_cmap = args.gas_cmap
dust_cmap = args.dust_cmap
gas_cmax = args.gas_cmax
dust_cmax = args.dust_cmax
if dust_cmax is None:
if size > 0.2:
dust_cmax = 10
else:
dust_cmax = 2.5
gas_clim = [0, gas_cmax]
dust_clim = [0, dust_cmax]
fontsize = args.fontsize
dpi = args.dpi
### Add new parameters to dictionary ###
fargo_par["rad"] = rad
fargo_par["theta"] = theta
###############################################################################
##### PLOTTING #####
def make_plot(frame, show = False):
# Set up figure
fig = plot.figure(figsize = (1400 / dpi, 600 / dpi), dpi = dpi)
# Data
gas_density = (fromfile("gasdens%d.dat" % frame).reshape(num_rad, num_theta))
dust_density = (fromfile("gasddens%d.dat" % frame).reshape(num_rad, num_theta))
if center:
if taper < 10.1:
shift_c = az.get_azimuthal_peak(dust_density, fargo_par)
else:
shift_c = az.get_azimuthal_center(dust_density, fargo_par, threshold = 0.05 * surface_density_zero)
gas_density = np.roll(gas_density, shift_c)
dust_density = np.roll(dust_density, shift_c)
############################ Gas Density ##################################
plot.subplot(1, 2, 1)
# Data
normalized_density = gas_density / surface_density_zero
### Plot ###
x = rad
y = theta * (180.0 / np.pi)
result = plot.pcolormesh(x, y, np.transpose(normalized_density), cmap = gas_cmap)
fig.colorbar(result)
result.set_clim(gas_clim[0], gas_clim[1])
# Axes
plot.xlim(x_min, x_max)
plot.ylim(0, 360)
angles = np.linspace(0, 360, 7)
plot.yticks(angles)
# Annotate Axes
time = fargo_par["Ninterm"] * fargo_par["DT"]
orbit = (time / (2 * np.pi)) * frame
title = readTitle()
plot.xlabel("Radius", fontsize = fontsize)
plot.ylabel(r"$\phi$", fontsize = fontsize)
if title is None:
plot.title("Gas Density Map\n(t = %.1f)" % (orbit), fontsize = fontsize + 1)
else:
plot.title("Gas Density Map\n%s\n(t = %.1f)" % (title, orbit), fontsize = fontsize + 1)
############################ Dust Density #################################
plot.subplot(1, 2, 2)
# Data
normalized_density = dust_density / (surface_density_zero / 100.0)
### Plot ###
x = rad
y = theta * (180.0 / np.pi)
result = plot.pcolormesh(x, y, np.transpose(normalized_density), cmap = dust_cmap)
fig.colorbar(result)
result.set_clim(dust_clim[0], dust_clim[1])
# Axes
plot.xlim(x_min, x_max)
plot.ylim(0, 360)
angles = np.linspace(0, 360, 7)
plot.yticks(angles)
# Annotate Axes
time = fargo_par["Ninterm"] * fargo_par["DT"]
orbit = (time / (2 * np.pi)) * frame
title = readTitle()
plot.xlabel("Radius", fontsize = fontsize)
plot.ylabel(r"$\phi$", fontsize = fontsize)
if title is None:
plot.title("Dust Density Map\n(t = %.1f)" % (orbit), fontsize = fontsize + 1)
else:
plot.title("Dust Density Map\n%s\n(t = %.1f)" % (title, orbit), fontsize = fontsize + 1)
###########################################################################
# Save, Show, and Close
if version is None:
save_fn = "%s/bothDensityMaps_%04d.png" % (save_directory, frame)
else:
save_fn = "%s/v%04d_bothDensityMaps_%04d.png" % (save_directory, version, frame)
plot.savefig(save_fn, bbox_inches = 'tight', dpi = dpi)
if show:
plot.show()
plot.close(fig) # Close Figure (to avoid too many figures)
##### Make Plots! #####
# Iterate through frames
if len(frame_range) == 1:
make_plot(frame_range[0], show = show)
else:
if num_cores > 1:
p = Pool(num_cores) # default number of processes is multiprocessing.cpu_count()
p.map(make_plot, frame_range)
p.terminate()
else:
for frame in frame_range:
make_plot(frame)
|
[
"[email protected]"
] | |
f0a7ba891391990b621ed21caa25719191d833b9
|
7bb9ffe61491817e999de40266653063ae4f8cfb
|
/swamp_angel/sa_sa2_vars/allNcFiles_sa2.py
|
1df7d70733f86ceacc5088842ad6eb38a63c481a
|
[] |
no_license
|
cycle13/chapter1
|
b0e43d7c3297aa24109d0fd00e9bfaa9a95cea2c
|
18638f5ef806fa16d7b3b14b43501674478e220e
|
refs/heads/master
| 2022-04-09T23:55:21.936738 | 2020-03-31T01:44:44 | 2020-03-31T01:44:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,752 |
py
|
av_ncfiles = ["C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_ljh_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_ljh_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_ljp_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_ljp_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_ljc_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_ljc_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_lth_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_lth_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_ltp_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_ltp_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_ltc_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_ltc_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_sjh_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_sjh_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_sjp_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_sjp_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_sjc_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_sjc_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_sth_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_sth_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_stp_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_stp_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_stc_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_stc_2007-2008_senatorVariableDecayRate_1.nc",
]#
av_ncfiles13p = ["C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_lsh_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_lsh_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_lsp_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_lsp_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_lsc_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_lsc_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_ssh_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_ssh_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_ssp_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_ssp_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_ssc_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/sa_sa2_vars/sa_sa2_ssc_2007-2008_senatorVariableDecayRate_1.nc",
]#
|
[
"[email protected]"
] | |
9d72d1fe8cfc2b68d11f5698576f8ab92bb82e06
|
d66fd976d66632267551467c3df9b2fbfb8be1cd
|
/Interview01/100 python传参还是传址.py
|
ab4c8e34f9f1c32e2d62671c0823d23c97d79687
|
[] |
no_license
|
bobopython/PythonInterviewQuestions
|
d8f580932f36bd85432aaafd5c00924183bac16a
|
9f38a9368bbca32d071062d59748518c0c4f0d09
|
refs/heads/master
| 2020-06-07T06:37:25.038426 | 2019-02-22T16:46:00 | 2019-02-22T16:46:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 497 |
py
|
# _*_ coding: utf-8 _*_
__author__ = 'jevoly'
__date__ = '2019/2/16 0016 下午 11:29'
"""
python中函数参数是引用传递。
对于不可变类型(数值、字符串、元组),因变量不能修改
所以运算不会影响到变量自身;
对于可变类型(列表字典)来说,函数体运算可能会更改传入的参数变量
"""
def selfAdd(a):
a += a
a_int = 1
print(a_int)
selfAdd(a_int)
print(a_int)
a_list = [1, 2]
print(a_list)
selfAdd(a_list)
print(a_list)
|
[
"[email protected]"
] | |
1fd6bc312a6a1bc45018278de45bf926b5e94a90
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnn1875.py
|
87e2540084a7b287dfde7037cd1f346bc054f5ca
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 |
Python
|
UTF-8
|
Python
| false | false | 120 |
py
|
ii = [('UnitAI.py', 1), ('WadeJEB.py', 1), ('MereHHB3.py', 2), ('MereHHB.py', 1), ('StorJCC.py', 1), ('SadlMLP2.py', 1)]
|
[
"[email protected]"
] | |
a76f71b1b0a7bbd3e2980596c7741c93c2f0397d
|
6f4ee285871ee52ea4c1143d54581ead795bca87
|
/example/asr/preprocess_aihub.py
|
966a54cb9783dff1ab5f8ed68a23ec6dd026ed95
|
[
"MIT"
] |
permissive
|
rosinality/imputer-pytorch
|
f3773074ddec615c8eaffd1b89a67402790aa3cc
|
7ff8f73dcd7bd62a98c5b8a126946c5fe381d895
|
refs/heads/master
| 2022-05-24T16:30:05.588670 | 2020-05-03T00:36:28 | 2020-05-03T00:36:28 | 257,501,256 | 52 | 8 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,933 |
py
|
import argparse
import io
import re
import os
import string
import wave
import pickle
from multiprocessing import Pool
from functools import partial
import lmdb
import librosa
import torch
from tqdm import tqdm
from audio import FilterbankFeature
from text import collapse_whitespace
re_pronunciation = re.compile(r'\((.*?)\)\/\((.*?)\)')
re_noise = re.compile(r'b\/|l\/|o\/|n\/')
table_punctuation = str.maketrans(string.punctuation, ' ' * len(string.punctuation))
PCM_CHANNELS = 1
PCM_BIT_DEPTH = 16
PCM_SAMPLING_RATE = 16000
N_META_CHAR = 3
def use_pronunciation(text):
return re_pronunciation.sub(r'\2', text)
def remove_noise(text):
return re_noise.sub(' ', text)
def remove_punctuation(text):
return text.translate(table_punctuation)
def process_text(text):
return collapse_whitespace(
remove_punctuation(remove_noise(use_pronunciation(text)))
).strip()
def load_pcm(filename):
with open(filename, 'rb') as f:
pcmdata = f.read()
wav_write = io.BytesIO()
wav = wave.open(wav_write, 'wb')
wav.setparams(
(PCM_CHANNELS, PCM_BIT_DEPTH // 8, PCM_SAMPLING_RATE, 0, 'NONE', 'NONE')
)
wav.writeframes(pcmdata)
wav_write.seek(0)
wav, _ = librosa.load(wav_write, sr=PCM_SAMPLING_RATE)
return wav
def load_text(filename):
with open(filename, encoding='cp949') as f:
return f.read()
def process_worker(filename, root):
file = os.path.join(root, filename)
wav = load_pcm(file + '.pcm')
text = load_text(file + '.txt')
wav_feat = wav_feature(torch.from_numpy(wav).unsqueeze(0), PCM_SAMPLING_RATE)
text_feat = process_text(text)
record = (wav_feat, text_feat, filename)
return record
if __name__ == '__main__':
os.environ['OMP_NUM_THREADS'] = '1'
parser = argparse.ArgumentParser()
parser.add_argument('--n_mels', type=int, default=80)
parser.add_argument('--path', type=str, required=True)
parser.add_argument('output', metavar='OUTPUT')
args = parser.parse_args()
speech_files = []
wav_feature = FilterbankFeature(args.n_mels)
for dirpath, dirs, files in os.walk(args.path):
if len(dirs) == 0:
speech_keys = set()
for file in files:
speech_keys.add(os.path.splitext(file)[0])
speech_keys = list(sorted(speech_keys))
relpath = os.path.relpath(dirpath, args.path)
for key in speech_keys:
speech_files.append(os.path.join(relpath, key))
vocab = {}
worker = partial(process_worker, root=args.path)
with Pool(processes=8) as pool, lmdb.open(
args.output, map_size=1024 ** 4, readahead=False
) as env:
pbar = tqdm(pool.imap(worker, speech_files), total=len(speech_files))
mel_lengths = []
text_lengths = []
for i, record in enumerate(pbar):
record_buffer = io.BytesIO()
torch.save(record, record_buffer)
with env.begin(write=True) as txn:
txn.put(str(i).encode('utf-8'), record_buffer.getvalue())
for char in record[1]:
if char not in vocab:
vocab[char] = len(vocab) + N_META_CHAR
mel_lengths.append(record[0].shape[0])
text_lengths.append(len(record[1]))
pbar.set_description(record[2])
with env.begin(write=True) as txn:
txn.put(b'length', str(len(speech_files)).encode('utf-8'))
txn.put(
b'meta',
pickle.dumps(
{
'sr': PCM_SAMPLING_RATE,
'channels': PCM_CHANNELS,
'bit_depth': PCM_BIT_DEPTH,
'vocab': vocab,
'mel_lengths': mel_lengths,
'text_lengths': text_lengths,
}
),
)
|
[
"[email protected]"
] | |
3f25f208b39b728d049043a65f1698f402063b63
|
d4dda2e2992ca16b8fe628e417f8a4243af0ed4a
|
/step10_plottingutilities/nexpectedeventsDbkg0.5.py
|
694952c3137cf9cfe4b69eb75d783cd8b5acd850
|
[] |
no_license
|
hroskes/anomalouscouplings
|
01f46c0d38f5332c58538b0bdea373704cf06fcc
|
391eb7fbd52d8605b09ca2e461b1789e019b1da0
|
refs/heads/production
| 2021-11-24T22:37:48.932830 | 2021-10-29T18:38:54 | 2021-10-29T18:38:54 | 60,651,233 | 0 | 2 | null | 2017-01-24T14:20:56 | 2016-06-07T22:37:23 |
Python
|
UTF-8
|
Python
| false | false | 1,170 |
py
|
#!/usr/bin/env python
import os
import ROOT
from helperstuff import config
from helperstuff.enums import analyses, channels, categories
from projections import Projections
assert len(config.productionsforcombine) == 1
production = config.productionsforcombine[0]
class NExpected(Projections):
@property
def nexpected(self):
disc = self.discriminants[0]
rootfile = os.path.join(self.saveasdir, disc.name+".root")
f = ROOT.TFile(rootfile)
c = f.c1
hstack = c.GetListOfPrimitives()[1]
total = 0
for h in hstack.GetHists():
if h.GetLineColor() in (1, 6, 2, ROOT.kOrange+6, ROOT.kViolet+7): total += h.Integral()
elif h.GetLineColor() in (ROOT.kCyan, ROOT.kGreen+3, 4): pass
else: assert False
return total
if __name__ == "__main__":
for analysis in analyses:
print sum(NExpected(production, channel, category, "rescalemixtures", analysis, "fullrange").nexpected for channel in channels for category in categories),
print
for analysis in analyses:
print sum(NExpected(production, channel, category, "rescalemixtures", analysis, "enrich").nexpected for channel in channels for category in categories),
|
[
"[email protected]"
] | |
7e18b441ca5e77bb4d99946436324fdc717db390
|
c8836eac0f6a20a6d3056fc3651c9daf5ce0572d
|
/test_hu/test_cadastro.py
|
dac1ceb79c100055f70f3db34a7f50d2195c4645
|
[] |
no_license
|
rafaelcoelho/code_interview_training
|
cb44eb6701b6902a28eaf1c0025aea4921dfccb4
|
fa7484487bf1a2fa9fb4a4abe135c6114fd98bf8
|
refs/heads/master
| 2021-01-18T20:05:59.486916 | 2020-11-27T00:00:56 | 2020-11-27T00:00:56 | 100,544,717 | 0 | 0 | null | 2017-08-17T00:36:50 | 2017-08-17T00:36:49 | null |
UTF-8
|
Python
| false | false | 3,447 |
py
|
"""
Utilizando o conceito de dicionários, faça uma ficha de cadastro de 4 funcionários utilizando a seguinte
estrutura:
Chave: Nome Dados: Idade, email, setor, salario
Inclua os funcionários:
Joao Pereira, 25, [email protected], marketing, 1950
Maria Silva, 23, [email protected], comercial, 2300
Pedro Peixoto, 32, [email protected], operacao, 2625
Luiza Almeida, 28, [email protected], atendimento, 2120
Faça um programa que retorna o nome, email e setor de todos os funcionários com mais de 25 anos.
"""
import unittest
from typing import List, Tuple, Union
def filtrar_maiores_de_25(colaboradores):
resultado = []
for nome, (idade, email, setor, *_) in colaboradores:
if idade > 25:
adicionar(email, nome, resultado, setor)
return resultado
def adicionar(email, nome, resultado, setor):
resultado.append((nome, email, setor))
def extrair_nome_salario(colaborador):
dados_pessoais = colaborador[1]
salario = dados_pessoais[-1]
nome = colaborador[0]
return nome, salario
def extrair_salario(tpl):
return tpl[-1]
def top_colaborador(colaboradores: List[Tuple[str, List[Union[str, int]]]]) -> Tuple[str, int]:
"""
Calcula o colaborador com maior salário
Ex:
>>> colaboradores = [
... ('Joao Pereira', [25, '[email protected]', 'marketing', 1950]),
... ('Maria Silva', [23, '[email protected]', 'comercial', 2300]),
... ('Pedro Peixoto', [32, '[email protected]', 'operacao', 2625]),
... ('Pedro Peixoto', [32, '[email protected]', 'operacao', 2625]),
... ('Luiza Almeida', [28, '[email protected]', 'atendimento', 2120]),
... ]
...
>>> top_colaborador(colaboradores)
('Pedro Peixoto', 2625)
:param colaboradores: lista de colabores
:return: Tupla com Nome e Salário
"""
return max([extrair_nome_salario(colaborador) for colaborador in colaboradores], key=extrair_salario)
class TesteColaborabores(unittest.TestCase):
def test_filtragem_colabores(self) -> None:
colaboradores = [
('Joao Pereira', [25, '[email protected]', 'marketing', 1950]),
('Maria Silva', [23, '[email protected]', 'comercial', 2300]),
('Pedro Peixoto', [32, '[email protected]', 'operacao', 2625]),
('Pedro Peixoto', [32, '[email protected]', 'operacao', 2625]),
('Luiza Almeida', [28, '[email protected]', 'atendimento', 2120]),
]
resultado = filtrar_maiores_de_25(colaboradores)
self.assertEqual(3, len(resultado))
self.assertSetEqual(
{
('Pedro Peixoto', '[email protected]', 'operacao'),
('Luiza Almeida', '[email protected]', 'atendimento'),
},
set(resultado)
)
def test_salario_maximo(self) -> None:
colaboradores = [
('Joao Pereira', [25, '[email protected]', 'marketing', 1950]),
('Maria Silva', [23, '[email protected]', 'comercial', 2300]),
('Pedro Peixoto', [32, '[email protected]', 'operacao', 2625]),
('Luiza Almeida', [28, '[email protected]', 'atendimento', 2120]),
('Zoraide', [28, '[email protected]', 'atendimento', 2120]),
]
resultado = top_colaborador(colaboradores)
self.assertEqual(('Pedro Peixoto', 2625), resultado)
|
[
"[email protected]"
] | |
7410a60093d30f7f552fe587527803a22d586e60
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/scrabble-score/4f89ec23c2c3437681d68f5c9bf4d3d9.py
|
ee4dbb86106d575505a2c999db6b7e251a7494d5
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 382 |
py
|
def score(phrase):
score_dict = {
"a": 1, "b": 3, "c": 3, "d": 2, "e": 1,
"f": 4, "g": 2, "h": 4, "i": 1, "j": 8,
"k": 5, "l": 1, "m": 3, "n": 1, "o": 1,
"p": 3, "q": 10, "r": 1, "s": 1, "t": 1,
"u": 1, "v": 4, "w": 4, "x": 8, "y": 4,
"z": 10
}
s = 0
for char in phrase.strip().lower():
s += score_dict[char]
return s
|
[
"[email protected]"
] | |
1d945a7c1bf6726a99ca1c625db31858855c0347
|
5ba0e63ff6d94efbd28ed425bb6074e74d3148a0
|
/app.py
|
c9fe431141f706fd0d4bbe760be77e916c60c677
|
[] |
no_license
|
SmartPracticeschool/SBSPS-Challenge-4160-Sentiment-Analysis
|
ad25e307f38828dd219e26a2bcbb7250a2cf023e
|
b448d93e8464bf5af5bd70cacf631ed124d96a18
|
refs/heads/master
| 2022-11-20T08:31:50.487634 | 2020-07-29T11:23:24 | 2020-07-29T11:23:24 | 272,616,702 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,313 |
py
|
from flask import Flask, render_template,request, jsonify
import tweepy
from textblob import TextBlob
app = Flask(__name__)
app.config["DEBUG"] = True
cons_key="zJQyiylJYFuutBTOzomm2ZgDc"
cons_sec="gPXYZSZ7jVqjTOIG48p4CYzs7fx9fmaWHFPnmSMp4DF10Bla3D"
acc_token="1269151933559762945-wZYKQZRbSRaTuDkxW29PnLVaTUJmea"
acc_sec="uGyK2OpmhiCyE20b7D0b26adNOosmdDT0FPmtCsLjHqqt"
auth = tweepy.OAuthHandler(cons_key,cons_sec)
auth.set_access_token(acc_token,acc_sec)
api = tweepy.API(auth)
@app.route('/')
def hello_world():
return render_template('home.html')
@app.route('/results',methods=['GET', 'POST'])
def show_result():
if request.method=='POST':
result = request.form['keyword']
neutral, positive, negative = 0,0,0
tweetData = {}
id = 0
tweets = api.search(q=result,count = 100,rpp = 1500, _id="23424848")
for tweet in tweets:
blob = TextBlob(tweet.text)
polarity = blob.sentiment.polarity
if polarity == 0:
tweetData[id] = {
'text': tweet.text,
'polarity': round(polarity, 2),
}
neutral += 1
elif polarity > 0:
tweetData[id] = {
'text': tweet.text,
'polarity': round(polarity, 2),
}
positive += 1
elif polarity < 0:
tweetData[id] = {
'text': tweet.text,
'polarity': round(polarity, 2),
}
negative += 1
id += 1
if(positive>negative) and (positive>neutral):
outcome = 'positive'
msg = "Outcome: Over the analysis the result falls on a positive edge. :)"
elif(negative> neutral):
outcome = 'negative'
msg = "Outcome: Over the analysis the result falls on the negative edge. :("
else:
outcome = 'neutral'
msg = "Outcome: Over the analysis, the results are claimed to be neutral. :| "
values = [positive, negative, neutral]
labels = ["positive", "negative", "neutral"]
return render_template('result.html', msg=msg, labels=labels, values=values, keyword=result, outcome=outcome, tweetData=tweetData)
app.run()
|
[
"[email protected]"
] | |
d31b596c5a1b60f72386ba04c19e591e3b4d2eca
|
7ef2308e51d1d5700fbd092177ee15e2a03ebdd8
|
/WorkLean/Python/Scrapy/testCrawler3_0/testCrawler3_0/items.py
|
6672b1af9e9bd24d5a6602896aef9d975422922e
|
[] |
no_license
|
STAWZW/STAWZW1.0
|
741002eb35c2883e5739fee8d14ff430e9622c01
|
a835ac27aba17f968116e321bd201b26c9fb3578
|
refs/heads/master
| 2020-07-21T20:21:59.753992 | 2019-09-26T09:21:28 | 2019-09-26T09:21:28 | 206,965,347 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 293 |
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class Testcrawler30Item(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
[
"[email protected]"
] | |
9eaa568ed36cd7532c669f74ff9bdcb0cae51b8e
|
bbefcbb5fdaa571b1bd674269a51d855eadbb2cb
|
/ticketing/migrations/0037_auto_20200207_1606.py
|
a6ef78cdee50e70b0274b2594896c195a3920f96
|
[] |
no_license
|
fadhlimulyana20/event_organizer
|
8a0e87e71ca24f9ca82af5c90b39be1e61e7867d
|
4995d529a533d0a6b855e42283c2aaf441fa5a9c
|
refs/heads/master
| 2020-12-13T17:11:49.562499 | 2020-03-01T22:19:29 | 2020-03-01T22:19:29 | 234,480,025 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 711 |
py
|
# Generated by Django 2.2.7 on 2020-02-07 09:06
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('ticketing', '0036_auto_20200206_1445'),
]
operations = [
migrations.AlterField(
model_name='event',
name='due_registration',
field=models.DateField(default=datetime.datetime(2020, 2, 4, 16, 6, 8, 775718)),
),
migrations.AlterField(
model_name='eventpayment',
name='due_date',
field=models.DateTimeField(default=datetime.datetime(2020, 2, 9, 9, 6, 8, 779718, tzinfo=utc)),
),
]
|
[
"[email protected]"
] | |
1026e8e368b714ae254ae648462cc092ae78c161
|
499c37bf1fe03b0beeb0121e9fa9ffd795b93392
|
/crowd.py
|
7ab6970834b393b0f05307b01bb7c3549db5c428
|
[
"ISC"
] |
permissive
|
wezu/p3d_gpu_anim
|
b2788c1950fbaa588068fc587aa112a33259b9ff
|
8429da56ffe52fd130fd8bbd0977e92200e9177a
|
refs/heads/master
| 2020-12-02T17:52:32.393525 | 2017-07-16T14:46:23 | 2017-07-16T14:46:23 | 96,442,664 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,980 |
py
|
from panda3d.core import *
from direct.interval.MetaInterval import Sequence
from direct.interval.FunctionInterval import Func
from direct.interval.FunctionInterval import Wait
from random import random
from math import floor
import json
__author__ = "wezu"
__copyright__ = "Copyright 2017"
__license__ = "ISC"
__version__ = "0.13"
__email__ = "[email protected]"
__all__ = ['Crowd']
class Crowd(object):
"""Class allows to make multiple hardware animated and instanced actors.
For custom use one probably needs to alter the shaders (included):
shaders/anim_v.glsl and shaders/anim_f.glsl
Once a Crowd is created you can controll individual actors by index
my_crowd=Crowd(...)
my_crowd[12].set_pos(...)
my_crowd[7].set_hpr(...)
my_crowd[1].play(...)
The object retured by __getitem__ ( [] brackets) is a CrowdActor,
it uses NodePath methods (and has a NodePath inside if you need it
eg. my_crowd[0].node), it also has play(), loop(), pose(), stop(),
and get_current_frame() functions.
You Should never create a CrowdActor by yourself, it will do nothing
without a Crowd controling it.
model - a loaded model (with the proper joint and weight vertex attributes)
anim_texture - a loaded texture with the animation data
animations - a dict with the names of animations as keys and [start_frame, number_of_frames] as value
num_actors -initial number of actor instances
frame_blend -True/False should inter frame blending be used
"""
def __init__(self, model, anim_texture=None, animations=None, num_actors=1, frame_blend=False):
#load the model, instance it and set omni bounds
if isinstance(model, NodePath):
self.model=model
else:
self.model=loader.load_model(model)
self.model.set_instance_count(num_actors)
self.model.node().set_bounds(OmniBoundingVolume())
self.model.node().set_final(True)
#make sure the animation texture is set up right
if anim_texture is not None:
self.anim_texture=anim_texture
else:
tex_name=self._find_first_tag(self.model, 'anim_tex')
self.anim_texture=loader.load_texture(tex_name)
self.anim_texture.set_wrap_u(SamplerState.WM_clamp)
self.anim_texture.set_wrap_v(SamplerState.WM_clamp)
self.anim_texture.set_magfilter(SamplerState.FT_nearest)
self.anim_texture.set_minfilter(SamplerState.FT_nearest)
self.anim_texture.set_format(Texture.F_rgba32)
#set the shader
self.shader_define={'NUM_ACTORS':num_actors,
'MAX_Y':self.anim_texture.get_y_size()-1}
self.frame_blend=frame_blend
if frame_blend:
self.shader_define['FRAME_BLEND']=1
self.model.set_shader(self._load_shader(v_shader='shaders/anim_v.glsl',
f_shader='shaders/anim_f.glsl',
define=self.shader_define))
#send the tex to the shader
self.model.set_shader_input('anim_texture', self.anim_texture)
#make an array of mat4 for each actor
self.matrix_data=PTALMatrix4f()
for i in range(num_actors):
self.matrix_data.push_back(Mat4())
self.model.set_shader_input('matrix_data', self.matrix_data)
#make an array of vec4 for each actor
self.anim_data=PTALVecBase4f()
for i in range(num_actors):
self.anim_data.push_back(Vec4(0.0, 1.0, 30.0, 0.0)) #start_frame, num_frame, fps, time_offset
self.model.set_shader_input('anim_data', self.anim_data)
#dict of named animations
if animations is not None:
self.animations=animations
else:
_anim_names=self._find_first_tag(self.model, 'anim_names').replace('\n', ' ').split()
_anim_numbers=self._find_first_tag(self.model, 'anim_range').replace('\n', ' ').split()
_anim_numbers=[[int(i) for i in j.split(":")] for j in _anim_numbers]
self.animations=dict(zip(_anim_names,_anim_numbers))
#list of actor nodes, so one can use crowd[12].play('some_anim')
self.actors=[CrowdActor(i, self.animations) for i in range(num_actors)]
#list for attaching nodes
self.attached_nodes=[]
self.task=taskMgr.add(self._update, "crowd_update", sort=-50)
def get_joint_id(self, joint_name):
if hasattr(self, 'joint_names'):
return self.joint_names.index(joint_name)
else:
self.joint_names=self._find_first_tag(self.model, 'joint_names').replace('\n', ' ').split()
return self.joint_names.index(joint_name)
def set_count(self, target_actors):
"""Set the number of actor instances to target_actors
"""
current_actors=len(self.actors)
#add actors if needed
while current_actors < target_actors:
self.actors.append(CrowdActor(current_actors, self.animations))
current_actors=len(self.actors)
#remove actors if needed
self.actors=self.actors[:target_actors]
self.model.set_instance_count(target_actors)
self.shader_define={'NUM_ACTORS':target_actors,
'MAX_Y':self.anim_texture.get_y_size()-1}
self.frame_blend=frame_blend
if self.frame_blend:
self.shader_define['FRAME_BLEND']=1
self.model.set_shader(self._load_shader(v_shader='shaders/anim_v.glsl',
f_shader='shaders/anim_f.glsl',
define=self.shader_define))
def attach_node_to_joint(self, node, joint_name, actor_id):
node.node().set_bounds(OmniBoundingVolume())
node.node().set_final(True)
node.reparent_to(self.model)
node.set_shader(self._load_shader(v_shader='shaders/attach_v.glsl',
f_shader='shaders/attach_f.glsl',
define=self.shader_define))
node.set_shader_inputs(id=actor_id, joint_id=self.get_joint_id(joint_name))
self.attached_nodes.append(node)
def reparent_to(self, node):
"""Reparents the Crowd to a node for rendering (usually render)
No transformations are used.
"""
self.model.reparent_to(node)
def set_frame_blend(self, state=True):
""" If state is True turns inter frame blending on, else turns it off
"""
self.frame_blend=state
self.shader_define={'NUM_ACTORS':len(self.actors),
'MAX_Y':self.anim_texture.get_y_size()-1}
if state:
self.shader_define['FRAME_BLEND']=1
self.model.set_shader(self._load_shader(v_shader='shaders/anim_v.glsl',
f_shader='shaders/anim_f.glsl',
define=self.shader_define))
for node in self.attached_nodes:
node.set_shader(self._load_shader(v_shader='shaders/attach_v.glsl',
f_shader='shaders/attach_f.glsl',
define=self.shader_define))
def _find_first_tag(self, node, tag):
for child in node.get_children():
if child.has_tag(tag):
return child.get_tag(tag)
else:
child_tag=self.find_tag(child, tag)
if child_tag:
return child_tag
return None
def _update(self, task):
for n, actor in enumerate(self.actors):
self.matrix_data.set_element(n, actor.node.get_mat())
self.anim_data.set_element(n, actor.anim_data)
return task.again
def _load_shader(self, v_shader, f_shader, define=None, version='#version 140'):
# load the shader text
with open(getModelPath().find_file(v_shader).to_os_specific()) as f:
v_shader_txt = f.read()
with open(getModelPath().find_file(f_shader).to_os_specific()) as f:
f_shader_txt = f.read()
# make the header
if define:
header = version + '\n'
for name, value in define.items():
header += '#define {0} {1}\n'.format(name, value)
# put the header on top
v_shader_txt = v_shader_txt.replace(version, header)
f_shader_txt = f_shader_txt.replace(version, header)
# make the shader
shader = Shader.make(Shader.SL_GLSL, v_shader_txt, f_shader_txt)
try:
shader.set_filename(Shader.ST_vertex, v_shader)
shader.set_filename(Shader.ST_fragment, f_shader)
except:
print('Shader filenames will not be available')
return shader
def __getitem__(self, index):
return self.actors[index]
def __iter__(self):
for actor in self.actors:
yield actor
def remove(self):
"""Remove the whole thing
"""
taskMgr.remove(self.task)
self.model.remove_node()
self.actors=None
self.matrix_data=None
self.anim_data=None
self.attached_nodes=None
class CrowdActor(object):
"""CrowdActor is a helper class for the Crowd class.
You Should never create a CrowdActor by yourself, it will do nothing
without a Crowd controling it.
You can use NodePath methods on a CrowdActor (not recomended to use remove_node())
or get the NodePath directly (eg. my_crowd[0].node).
"""
def __init__(self, id, animations):
self.animations=animations
self.id=id
self.node=NodePath('CrowdActor_'+str(id))
self.anim_data=Vec4(0.0, 1.0, 30.0, 0.0) #start_frame, num_frame, fps, time_offset
self.seq=None
self.time=globalClock.get_frame_time()
def __getattr__(self,attr):
"""Delegate the function call to the internal NodePath
"""
return self.node.__getattribute__(attr)
def loop(self, name, fps=30.0, sync=True):
"""Play the 'name' animation in a loop at 'fps' frames per second speed.
if sync is False the animation will start at a random frame
"""
if self.seq:
self.seq.finish()
self.seq=None
if sync:
sync=0.0
else:
sync=random()
self.time=sync-globalClock.get_frame_time()
self.anim_data=Vec4(self.animations[name][0],
self.animations[name][1],
fps,
self.time)
def play(self, name, fps=30.0, sync=True):
"""Play the 'name' animation ONCE at 'fps' frames per second speed.
if sync is False the animation will start at a random frame
"""
self.loop(name, fps, sync)
time= float(self.animations[name][1])/float(fps)
self.seq=Sequence(Wait(time), Func(self.stop))
self.seq.start()
def pose(self, frame_number):
"""Pause the playback on frame_number
"""
self.anim_data=Vec4(frame_number, 1.0, 0.0, 0.0)
def get_current_frame(self):
"""Returns the currently played frame (if a animation is playing/looping)
"""
start_frame=self.anim_data[0]
num_frame=self.anim_data[1]
fps=self.anim_data[2]
offset_time=self.anim_data[3]
time=globalClock.get_frame_time()
return int(start_frame + floor((time+offset_time)*fps)%num_frame)
def stop(self):
"""Pause the playback, there's no 'resume' so the function name is not 'pause'
"""
self.pose(self.get_current_frame())
def __del__(self):
"""Clean up"""
try:
if self.seq:
self.seq.finish()
self.seq=None
self.node.remove_node()
except:
pass
|
[
"[email protected]"
] | |
c0a5fb209b3e0ee8dd0b5293a6afb6b3dd4fe217
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/dbforpostgresql/v20210410privatepreview/server.py
|
78315aa30f6e698fdd1320e012bb19f618b33a14
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 33,962 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ServerArgs', 'Server']
@pulumi.input_type
class ServerArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
administrator_login: Optional[pulumi.Input[str]] = None,
administrator_login_password: Optional[pulumi.Input[str]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None,
delegated_subnet_arguments: Optional[pulumi.Input['ServerPropertiesDelegatedSubnetArgumentsArgs']] = None,
display_name: Optional[pulumi.Input[str]] = None,
ha_enabled: Optional[pulumi.Input['HAEnabledEnum']] = None,
identity: Optional[pulumi.Input['IdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
maintenance_window: Optional[pulumi.Input['MaintenanceWindowArgs']] = None,
point_in_time_utc: Optional[pulumi.Input[str]] = None,
private_dns_zone_arguments: Optional[pulumi.Input['ServerPropertiesPrivateDnsZoneArgumentsArgs']] = None,
server_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input['SkuArgs']] = None,
source_resource_group_name: Optional[pulumi.Input[str]] = None,
source_server_name: Optional[pulumi.Input[str]] = None,
source_subscription_id: Optional[pulumi.Input[str]] = None,
storage_profile: Optional[pulumi.Input['StorageProfileArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[Union[str, 'ServerVersion']]] = None):
"""
The set of arguments for constructing a Server resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] administrator_login: The administrator's login name of a server. Can only be specified when the server is being created (and is required for creation).
:param pulumi.Input[str] administrator_login_password: The administrator login password (required for server creation).
:param pulumi.Input[str] availability_zone: availability Zone information of the server.
:param pulumi.Input[Union[str, 'CreateMode']] create_mode: The mode to create a new PostgreSQL server.
:param pulumi.Input[str] display_name: The display name of a server.
:param pulumi.Input['HAEnabledEnum'] ha_enabled: stand by count value can be either enabled or disabled
:param pulumi.Input['IdentityArgs'] identity: The Azure Active Directory identity of the server.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input['MaintenanceWindowArgs'] maintenance_window: Maintenance window of a server.
:param pulumi.Input[str] point_in_time_utc: Restore point creation time (ISO8601 format), specifying the time to restore from.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input['SkuArgs'] sku: The SKU (pricing tier) of the server.
:param pulumi.Input[str] source_resource_group_name: The resource group name of source PostgreSQL server name to restore from.
:param pulumi.Input[str] source_server_name: The source PostgreSQL server name to restore from.
:param pulumi.Input[str] source_subscription_id: The subscription id of source PostgreSQL server name to restore from.
:param pulumi.Input['StorageProfileArgs'] storage_profile: Storage profile of a server.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[Union[str, 'ServerVersion']] version: PostgreSQL Server version.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if administrator_login is not None:
pulumi.set(__self__, "administrator_login", administrator_login)
if administrator_login_password is not None:
pulumi.set(__self__, "administrator_login_password", administrator_login_password)
if availability_zone is not None:
pulumi.set(__self__, "availability_zone", availability_zone)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if delegated_subnet_arguments is not None:
pulumi.set(__self__, "delegated_subnet_arguments", delegated_subnet_arguments)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if ha_enabled is not None:
pulumi.set(__self__, "ha_enabled", ha_enabled)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if maintenance_window is not None:
pulumi.set(__self__, "maintenance_window", maintenance_window)
if point_in_time_utc is not None:
pulumi.set(__self__, "point_in_time_utc", point_in_time_utc)
if private_dns_zone_arguments is not None:
pulumi.set(__self__, "private_dns_zone_arguments", private_dns_zone_arguments)
if server_name is not None:
pulumi.set(__self__, "server_name", server_name)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if source_resource_group_name is not None:
pulumi.set(__self__, "source_resource_group_name", source_resource_group_name)
if source_server_name is not None:
pulumi.set(__self__, "source_server_name", source_server_name)
if source_subscription_id is not None:
pulumi.set(__self__, "source_subscription_id", source_subscription_id)
if storage_profile is not None:
pulumi.set(__self__, "storage_profile", storage_profile)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="administratorLogin")
def administrator_login(self) -> Optional[pulumi.Input[str]]:
"""
The administrator's login name of a server. Can only be specified when the server is being created (and is required for creation).
"""
return pulumi.get(self, "administrator_login")
@administrator_login.setter
def administrator_login(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "administrator_login", value)
@property
@pulumi.getter(name="administratorLoginPassword")
def administrator_login_password(self) -> Optional[pulumi.Input[str]]:
"""
The administrator login password (required for server creation).
"""
return pulumi.get(self, "administrator_login_password")
@administrator_login_password.setter
def administrator_login_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "administrator_login_password", value)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> Optional[pulumi.Input[str]]:
"""
availability Zone information of the server.
"""
return pulumi.get(self, "availability_zone")
@availability_zone.setter
def availability_zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "availability_zone", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[Union[str, 'CreateMode']]]:
"""
The mode to create a new PostgreSQL server.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[Union[str, 'CreateMode']]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="delegatedSubnetArguments")
def delegated_subnet_arguments(self) -> Optional[pulumi.Input['ServerPropertiesDelegatedSubnetArgumentsArgs']]:
return pulumi.get(self, "delegated_subnet_arguments")
@delegated_subnet_arguments.setter
def delegated_subnet_arguments(self, value: Optional[pulumi.Input['ServerPropertiesDelegatedSubnetArgumentsArgs']]):
pulumi.set(self, "delegated_subnet_arguments", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of a server.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="haEnabled")
def ha_enabled(self) -> Optional[pulumi.Input['HAEnabledEnum']]:
"""
stand by count value can be either enabled or disabled
"""
return pulumi.get(self, "ha_enabled")
@ha_enabled.setter
def ha_enabled(self, value: Optional[pulumi.Input['HAEnabledEnum']]):
pulumi.set(self, "ha_enabled", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['IdentityArgs']]:
"""
The Azure Active Directory identity of the server.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['IdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="maintenanceWindow")
def maintenance_window(self) -> Optional[pulumi.Input['MaintenanceWindowArgs']]:
"""
Maintenance window of a server.
"""
return pulumi.get(self, "maintenance_window")
@maintenance_window.setter
def maintenance_window(self, value: Optional[pulumi.Input['MaintenanceWindowArgs']]):
pulumi.set(self, "maintenance_window", value)
@property
@pulumi.getter(name="pointInTimeUTC")
def point_in_time_utc(self) -> Optional[pulumi.Input[str]]:
"""
Restore point creation time (ISO8601 format), specifying the time to restore from.
"""
return pulumi.get(self, "point_in_time_utc")
@point_in_time_utc.setter
def point_in_time_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "point_in_time_utc", value)
@property
@pulumi.getter(name="privateDnsZoneArguments")
def private_dns_zone_arguments(self) -> Optional[pulumi.Input['ServerPropertiesPrivateDnsZoneArgumentsArgs']]:
return pulumi.get(self, "private_dns_zone_arguments")
@private_dns_zone_arguments.setter
def private_dns_zone_arguments(self, value: Optional[pulumi.Input['ServerPropertiesPrivateDnsZoneArgumentsArgs']]):
pulumi.set(self, "private_dns_zone_arguments", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the server.
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['SkuArgs']]:
"""
The SKU (pricing tier) of the server.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['SkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="sourceResourceGroupName")
def source_resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The resource group name of source PostgreSQL server name to restore from.
"""
return pulumi.get(self, "source_resource_group_name")
@source_resource_group_name.setter
def source_resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_group_name", value)
@property
@pulumi.getter(name="sourceServerName")
def source_server_name(self) -> Optional[pulumi.Input[str]]:
"""
The source PostgreSQL server name to restore from.
"""
return pulumi.get(self, "source_server_name")
@source_server_name.setter
def source_server_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_server_name", value)
@property
@pulumi.getter(name="sourceSubscriptionId")
def source_subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
The subscription id of source PostgreSQL server name to restore from.
"""
return pulumi.get(self, "source_subscription_id")
@source_subscription_id.setter
def source_subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_subscription_id", value)
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> Optional[pulumi.Input['StorageProfileArgs']]:
"""
Storage profile of a server.
"""
return pulumi.get(self, "storage_profile")
@storage_profile.setter
def storage_profile(self, value: Optional[pulumi.Input['StorageProfileArgs']]):
pulumi.set(self, "storage_profile", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[Union[str, 'ServerVersion']]]:
"""
PostgreSQL Server version.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[Union[str, 'ServerVersion']]]):
pulumi.set(self, "version", value)
class Server(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
administrator_login: Optional[pulumi.Input[str]] = None,
administrator_login_password: Optional[pulumi.Input[str]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None,
delegated_subnet_arguments: Optional[pulumi.Input[pulumi.InputType['ServerPropertiesDelegatedSubnetArgumentsArgs']]] = None,
display_name: Optional[pulumi.Input[str]] = None,
ha_enabled: Optional[pulumi.Input['HAEnabledEnum']] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
maintenance_window: Optional[pulumi.Input[pulumi.InputType['MaintenanceWindowArgs']]] = None,
point_in_time_utc: Optional[pulumi.Input[str]] = None,
private_dns_zone_arguments: Optional[pulumi.Input[pulumi.InputType['ServerPropertiesPrivateDnsZoneArgumentsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
source_resource_group_name: Optional[pulumi.Input[str]] = None,
source_server_name: Optional[pulumi.Input[str]] = None,
source_subscription_id: Optional[pulumi.Input[str]] = None,
storage_profile: Optional[pulumi.Input[pulumi.InputType['StorageProfileArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[Union[str, 'ServerVersion']]] = None,
__props__=None):
"""
Represents a server.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] administrator_login: The administrator's login name of a server. Can only be specified when the server is being created (and is required for creation).
:param pulumi.Input[str] administrator_login_password: The administrator login password (required for server creation).
:param pulumi.Input[str] availability_zone: availability Zone information of the server.
:param pulumi.Input[Union[str, 'CreateMode']] create_mode: The mode to create a new PostgreSQL server.
:param pulumi.Input[str] display_name: The display name of a server.
:param pulumi.Input['HAEnabledEnum'] ha_enabled: stand by count value can be either enabled or disabled
:param pulumi.Input[pulumi.InputType['IdentityArgs']] identity: The Azure Active Directory identity of the server.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[pulumi.InputType['MaintenanceWindowArgs']] maintenance_window: Maintenance window of a server.
:param pulumi.Input[str] point_in_time_utc: Restore point creation time (ISO8601 format), specifying the time to restore from.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The SKU (pricing tier) of the server.
:param pulumi.Input[str] source_resource_group_name: The resource group name of source PostgreSQL server name to restore from.
:param pulumi.Input[str] source_server_name: The source PostgreSQL server name to restore from.
:param pulumi.Input[str] source_subscription_id: The subscription id of source PostgreSQL server name to restore from.
:param pulumi.Input[pulumi.InputType['StorageProfileArgs']] storage_profile: Storage profile of a server.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[Union[str, 'ServerVersion']] version: PostgreSQL Server version.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Represents a server.
:param str resource_name: The name of the resource.
:param ServerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
administrator_login: Optional[pulumi.Input[str]] = None,
administrator_login_password: Optional[pulumi.Input[str]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None,
delegated_subnet_arguments: Optional[pulumi.Input[pulumi.InputType['ServerPropertiesDelegatedSubnetArgumentsArgs']]] = None,
display_name: Optional[pulumi.Input[str]] = None,
ha_enabled: Optional[pulumi.Input['HAEnabledEnum']] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
maintenance_window: Optional[pulumi.Input[pulumi.InputType['MaintenanceWindowArgs']]] = None,
point_in_time_utc: Optional[pulumi.Input[str]] = None,
private_dns_zone_arguments: Optional[pulumi.Input[pulumi.InputType['ServerPropertiesPrivateDnsZoneArgumentsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
source_resource_group_name: Optional[pulumi.Input[str]] = None,
source_server_name: Optional[pulumi.Input[str]] = None,
source_subscription_id: Optional[pulumi.Input[str]] = None,
storage_profile: Optional[pulumi.Input[pulumi.InputType['StorageProfileArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[Union[str, 'ServerVersion']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServerArgs.__new__(ServerArgs)
__props__.__dict__["administrator_login"] = administrator_login
__props__.__dict__["administrator_login_password"] = administrator_login_password
__props__.__dict__["availability_zone"] = availability_zone
__props__.__dict__["create_mode"] = create_mode
__props__.__dict__["delegated_subnet_arguments"] = delegated_subnet_arguments
__props__.__dict__["display_name"] = display_name
__props__.__dict__["ha_enabled"] = ha_enabled
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["maintenance_window"] = maintenance_window
__props__.__dict__["point_in_time_utc"] = point_in_time_utc
__props__.__dict__["private_dns_zone_arguments"] = private_dns_zone_arguments
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["server_name"] = server_name
__props__.__dict__["sku"] = sku
__props__.__dict__["source_resource_group_name"] = source_resource_group_name
__props__.__dict__["source_server_name"] = source_server_name
__props__.__dict__["source_subscription_id"] = source_subscription_id
__props__.__dict__["storage_profile"] = storage_profile
__props__.__dict__["tags"] = tags
__props__.__dict__["version"] = version
__props__.__dict__["byok_enforcement"] = None
__props__.__dict__["fully_qualified_domain_name"] = None
__props__.__dict__["ha_state"] = None
__props__.__dict__["name"] = None
__props__.__dict__["public_network_access"] = None
__props__.__dict__["standby_availability_zone"] = None
__props__.__dict__["state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:dbforpostgresql/v20200214preview:Server"), pulumi.Alias(type_="azure-native:dbforpostgresql/v20200214privatepreview:Server"), pulumi.Alias(type_="azure-native:dbforpostgresql/v20210601:Server"), pulumi.Alias(type_="azure-native:dbforpostgresql/v20210601preview:Server"), pulumi.Alias(type_="azure-native:dbforpostgresql/v20210615privatepreview:Server")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Server, __self__).__init__(
'azure-native:dbforpostgresql/v20210410privatepreview:Server',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Server':
"""
Get an existing Server resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ServerArgs.__new__(ServerArgs)
__props__.__dict__["administrator_login"] = None
__props__.__dict__["availability_zone"] = None
__props__.__dict__["byok_enforcement"] = None
__props__.__dict__["delegated_subnet_arguments"] = None
__props__.__dict__["display_name"] = None
__props__.__dict__["fully_qualified_domain_name"] = None
__props__.__dict__["ha_enabled"] = None
__props__.__dict__["ha_state"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["location"] = None
__props__.__dict__["maintenance_window"] = None
__props__.__dict__["name"] = None
__props__.__dict__["point_in_time_utc"] = None
__props__.__dict__["private_dns_zone_arguments"] = None
__props__.__dict__["public_network_access"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["source_resource_group_name"] = None
__props__.__dict__["source_server_name"] = None
__props__.__dict__["source_subscription_id"] = None
__props__.__dict__["standby_availability_zone"] = None
__props__.__dict__["state"] = None
__props__.__dict__["storage_profile"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["version"] = None
return Server(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="administratorLogin")
def administrator_login(self) -> pulumi.Output[Optional[str]]:
"""
The administrator's login name of a server. Can only be specified when the server is being created (and is required for creation).
"""
return pulumi.get(self, "administrator_login")
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> pulumi.Output[Optional[str]]:
"""
availability Zone information of the server.
"""
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter(name="byokEnforcement")
def byok_enforcement(self) -> pulumi.Output[str]:
"""
Status showing whether the data encryption is enabled with customer-managed keys.
"""
return pulumi.get(self, "byok_enforcement")
@property
@pulumi.getter(name="delegatedSubnetArguments")
def delegated_subnet_arguments(self) -> pulumi.Output[Optional['outputs.ServerPropertiesResponseDelegatedSubnetArguments']]:
return pulumi.get(self, "delegated_subnet_arguments")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
"""
The display name of a server.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="fullyQualifiedDomainName")
def fully_qualified_domain_name(self) -> pulumi.Output[str]:
"""
The fully qualified domain name of a server.
"""
return pulumi.get(self, "fully_qualified_domain_name")
@property
@pulumi.getter(name="haEnabled")
def ha_enabled(self) -> pulumi.Output[Optional[str]]:
"""
stand by count value can be either enabled or disabled
"""
return pulumi.get(self, "ha_enabled")
@property
@pulumi.getter(name="haState")
def ha_state(self) -> pulumi.Output[str]:
"""
A state of a HA server that is visible to user.
"""
return pulumi.get(self, "ha_state")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]:
"""
The Azure Active Directory identity of the server.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maintenanceWindow")
def maintenance_window(self) -> pulumi.Output[Optional['outputs.MaintenanceWindowResponse']]:
"""
Maintenance window of a server.
"""
return pulumi.get(self, "maintenance_window")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="pointInTimeUTC")
def point_in_time_utc(self) -> pulumi.Output[Optional[str]]:
"""
Restore point creation time (ISO8601 format), specifying the time to restore from.
"""
return pulumi.get(self, "point_in_time_utc")
@property
@pulumi.getter(name="privateDnsZoneArguments")
def private_dns_zone_arguments(self) -> pulumi.Output[Optional['outputs.ServerPropertiesResponsePrivateDnsZoneArguments']]:
return pulumi.get(self, "private_dns_zone_arguments")
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> pulumi.Output[str]:
"""
public network access is enabled or not
"""
return pulumi.get(self, "public_network_access")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The SKU (pricing tier) of the server.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="sourceResourceGroupName")
def source_resource_group_name(self) -> pulumi.Output[Optional[str]]:
"""
The resource group name of source PostgreSQL server name to restore from.
"""
return pulumi.get(self, "source_resource_group_name")
@property
@pulumi.getter(name="sourceServerName")
def source_server_name(self) -> pulumi.Output[Optional[str]]:
"""
The source PostgreSQL server name to restore from.
"""
return pulumi.get(self, "source_server_name")
@property
@pulumi.getter(name="sourceSubscriptionId")
def source_subscription_id(self) -> pulumi.Output[Optional[str]]:
"""
The subscription id of source PostgreSQL server name to restore from.
"""
return pulumi.get(self, "source_subscription_id")
@property
@pulumi.getter(name="standbyAvailabilityZone")
def standby_availability_zone(self) -> pulumi.Output[str]:
"""
availability Zone information of the server.
"""
return pulumi.get(self, "standby_availability_zone")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
A state of a server that is visible to user.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> pulumi.Output[Optional['outputs.StorageProfileResponse']]:
"""
Storage profile of a server.
"""
return pulumi.get(self, "storage_profile")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def version(self) -> pulumi.Output[Optional[str]]:
"""
PostgreSQL Server version.
"""
return pulumi.get(self, "version")
|
[
"[email protected]"
] | |
140d5a49d274013ddeace0c911a004f3725a8cd0
|
3432efd194137e1d0cb05656eb547c9992229f02
|
/web_test/nineteen cookie/cookie.py
|
d0991972774ab8c00cf93a1c3cace275a13d7ecc
|
[] |
no_license
|
zhanganxia/other_code
|
31747d7689ae1e91fcf3f9f758df130246e7d495
|
8d09d9d0b6d6a1a9b8755487f926ac6fafd761fa
|
refs/heads/master
| 2021-09-04T02:22:38.632685 | 2018-01-14T15:37:14 | 2018-01-14T15:37:14 | 107,007,482 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 231 |
py
|
#coding=utf-8
from selenium import webdriver
import time
driver = webdriver.Chrome()
driver.get("http://www.youdao.com")
#获得cookies信息
cookie=driver.get_cookies()
#将获得cookies的信息打印
print cookie
driver.quit()
|
[
"[email protected]"
] | |
5926a539dc685e3b284ebafa5e13af890c809432
|
5a281cb78335e06c631181720546f6876005d4e5
|
/openstack-cyborg-2.0.0/cyborg/accelerator/drivers/gpu/utils.py
|
15a926e52a1cf7a63768cf2955192eb569ff9f26
|
[
"Apache-2.0"
] |
permissive
|
scottwedge/OpenStack-Stein
|
d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8
|
7077d1f602031dace92916f14e36b124f474de15
|
refs/heads/master
| 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 |
Apache-2.0
| 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null |
UTF-8
|
Python
| false | false | 4,150 |
py
|
# Copyright 2018 Beijing Lenovo Software Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utils for GPU driver.
"""
from oslo_log import log as logging
from oslo_serialization import jsonutils
import re
import subprocess
from cyborg.objects.driver_objects import driver_deployable, driver_device, \
driver_attach_handle, driver_controlpath_id
from cyborg.common import constants
LOG = logging.getLogger(__name__)
GPU_FLAGS = ["VGA compatible controller", "3D controller"]
GPU_INFO_PATTERN = re.compile("(?P<devices>[0-9]{4}:[0-9]{2}:[0-9]{2}\.[0-9]) "
"(?P<controller>.*) [\[].*]: (?P<name>.*) .*"
"[\[](?P<vendor_id>[0-9a-fA-F]"
"{4}):(?P<product_id>[0-9a-fA-F]{4})].*")
# NOTE(wangzhh): The implementation of current release doesn't support virtual
# GPU.
def discover_vendors():
cmd = "sudo lspci -nnn -D | grep -E '%s'"
cmd = cmd % "|".join(GPU_FLAGS)
# FIXME(wangzhh): Use oslo.privsep instead of subprocess here to prevent
# shell injection attacks.
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
p.wait()
gpus = p.stdout.readlines()
vendors = set()
for gpu in gpus:
m = GPU_INFO_PATTERN.match(gpu)
if m:
vendor_id = m.groupdict().get("vendor_id")
vendors.add(vendor_id)
return vendors
def discover_gpus(vender_id=None):
cmd = "sudo lspci -nnn -D| grep -E '%s'"
cmd = cmd % "|".join(GPU_FLAGS)
if vender_id:
cmd = cmd + "| grep " + vender_id
# FIXME(wangzhh): Use oslo.privsep instead of subprocess here to prevent
# shell injection attacks.
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
p.wait()
gpus = p.stdout.readlines()
gpu_list = []
for gpu in gpus:
m = GPU_INFO_PATTERN.match(gpu)
if m:
gpu_dict = m.groupdict()
gpu_list.append(_generate_driver_device(gpu_dict))
return gpu_list
def _generate_driver_device(gpu):
driver_device_obj = driver_device.DriverDevice()
driver_device_obj.vendor = gpu["vendor_id"]
driver_device_obj.model = gpu.get('model', 'miss model info')
std_board_info = {'product_id': gpu.get('product_id', None),
'controller': gpu.get('controller', None)}
driver_device_obj.std_board_info = jsonutils.dumps(std_board_info)
driver_device_obj.type = constants.DEVICE_GPU
driver_device_obj.controlpath_id = _generate_controlpath_id(gpu)
driver_device_obj.deployable_list = _generate_dep_list(gpu)
return driver_device_obj
def _generate_controlpath_id(gpu):
driver_cpid = driver_controlpath_id.DriverControlPathID()
driver_cpid.cpid_type = "PCI"
driver_cpid.cpid_info = gpu["devices"]
return driver_cpid
def _generate_dep_list(gpu):
dep_list = []
driver_dep = driver_deployable.DriverDeployable()
driver_dep.attach_handle_list = []
# NOTE(wangzhh): The name of deployable should be unique, its format is
# under disscussion, may looks like
# <ComputeNodeName>_<NumaNodeName>_<CyborgName>_<NumInHost>, now simply
# named <Device_name>_<Device_address>
driver_dep.name = gpu.get('name', '') + '_' + gpu["devices"]
driver_dep.num_accelerators = 1
driver_dep.attach_handle_list = \
[_generate_attach_handle(gpu)]
dep_list.append(driver_dep)
return dep_list
def _generate_attach_handle(gpu):
driver_ah = driver_attach_handle.DriverAttachHandle()
driver_ah.attach_type = "PCI"
driver_ah.in_use = False
driver_ah.attach_info = gpu["devices"]
return driver_ah
|
[
"Wayne [email protected]"
] |
Wayne [email protected]
|
5b8c473a88965f160d27bbdbf6da45b8cdebd7e6
|
f8e778d31a83fdbacb0c498f6c71aa2c48f2d000
|
/scenarios/card_hold_show/executable.py
|
2631ad59e96fdaab9c937f252adb2b5251b792f3
|
[
"MIT"
] |
permissive
|
bsmartt13/balanced-python
|
a254c695032a80bb1919b8e3fc5cb4c28b4ef44b
|
c5e192f9547f7b251486cf78d98933410a31daca
|
refs/heads/master
| 2020-12-27T15:01:01.237398 | 2014-03-11T00:00:22 | 2014-03-11T00:06:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 157 |
py
|
import balanced
balanced.configure('ak-test-1kvvievk0Qqw5wQPsrlM9g7wQwNe62cyc')
card_hold = balanced.CardHold.fetch('/card_holds/HL2bT9uMRkTZkfSPmA2pBD9S')
|
[
"[email protected]"
] | |
4202d20e3558a69f15aca8363391280f1b305552
|
bdc10ba57424040129cc72ad018ff26bc8bca66a
|
/ConfigDefinitions/UserConfigs/SMHTT_2018_AntiIso_Config_Deep/WZ2L2QConfig.py
|
202bcd9c8f5a2cfb9428c372ab52265bd277a253
|
[] |
no_license
|
aloeliger/Jesterworks
|
61e0ac38ca325fefbbd8ccedaa8eb02d8a76ebbe
|
96a22bac4ce20b91aba5884eb0e5667fcea3bc9a
|
refs/heads/master
| 2021-06-09T15:39:06.976110 | 2021-04-23T11:25:06 | 2021-04-23T11:25:06 | 157,698,363 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,139 |
py
|
from ConfigDefinitions.JesterworksConfigurations import JesterworksConfiguration as Config
from ConfigDefinitions.BranchAdditions.UserDefinedCollections.SMHTT_2018_MC_Collection import MCCollection as BranchCollection
from ConfigDefinitions.BranchAdditions.UserDefinedCollections.Differential_2018_Collection import DifferentialCollection as PostfixCollection
from ConfigDefinitions.CuttingDefinitions.UserCutConfigs.SMHTT2018Cuts_MC_NoEmbeddedOverlap_wDeep import SMHTT2018Cuts as CutConfig
from ConfigDefinitions.EndActionDefinitions.UserConfigs.GrabHistograms import HistogramGrabber as HistogramGrabber
DataConfig = Config()
DataConfig.Path = "/data/ccaillol/differentialmt2018_svfitted_3aug/"
DataConfig.Files = ["WZ2L2Q.root"]
DataConfig.InputTreeName = "mutau_tree"
DataConfig.SampleName = "WZ2L2Q"
DataConfig.OutputPath = "/data/aloeliger/SMHTT_Selected_2018_AntiIso_Deep/"
DataConfig.OutputFile = "WZ2L2Q.root"
DataConfig.OutputTreeName = "mt_Selected"
DataConfig.BranchCollection = BranchCollection
DataConfig.PostfixBranchCollection = PostfixCollection
DataConfig.CutConfig = CutConfig
DataConfig.EndAction = HistogramGrabber
|
[
"[email protected]"
] | |
351d6d1a08897caeb5585ccbdb3876717d0023d5
|
e10a6d844a286db26ef56469e31dc8488a8c6f0e
|
/drops/losses_lt.py
|
c869e8d540379dbcd7601885b983516301ccee98
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
Jimmy-INL/google-research
|
54ad5551f97977f01297abddbfc8a99a7900b791
|
5573d9c5822f4e866b6692769963ae819cb3f10d
|
refs/heads/master
| 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 |
Apache-2.0
| 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null |
UTF-8
|
Python
| false | false | 11,409 |
py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Loss of long-tail experiments.
"""
import functools
import numpy as np
import tensorflow as tf
def MakeLossFunc(loss_name, samples_per_cls,
gamma=0.9, beta=1.0, s=1, tau=1.0):
"""Make a loss function that takes y_true and y_pred (logits) as input."""
loss_function = loss_name
if loss_function == 'ce':
return functools.partial(
CELoss,
sample_per_cls=samples_per_cls,
from_logits=True)
if loss_function == 'up_ce':
return functools.partial(
CELoss,
sample_per_cls=samples_per_cls,
from_logits=True)
if loss_function == 'ldam':
return functools.partial(
LDAMLoss,
sample_per_cls=samples_per_cls,
gamma=gamma,
s=s)
if loss_function == 'focal':
return functools.partial(
FocalLoss,
gamma=gamma,
sample_per_cls=samples_per_cls,
from_logits=True)
if loss_function == 'cb':
return functools.partial(
CBLoss,
sample_per_cls=samples_per_cls,
beta=beta)
if loss_function == 'cb_focal':
return functools.partial(
CBFocal,
gamma=gamma,
beta=beta,
sample_per_cls=samples_per_cls,
from_logits=True)
if loss_function == 'bsm':
return functools.partial(
BalancedSoftmax,
from_logits=True,
sample_per_cls=samples_per_cls)
if loss_function == 'logit_adj':
return functools.partial(
LogitAdjust,
sample_per_cls=samples_per_cls,
tau=tau)
if loss_function == 'posthoc_ce':
return functools.partial(
CELoss,
sample_per_cls=samples_per_cls,
from_logits=True)
if loss_function == 'posthoc':
return functools.partial(
LogitAdjust,
sample_per_cls=samples_per_cls,
tau=tau)
if loss_function == 'drops':
return functools.partial(
CELoss,
sample_per_cls=samples_per_cls,
from_logits=True)
raise ValueError('Unsupported loss function.')
def CELoss(y_true,
y_pred,
sample_per_cls,
from_logits=False):
"""ce loss.
Args:
y_true: True labels, categorical of shape (batch_size,).
y_pred: logits, a float32 tensor of shape [batch_size,num_classes].
sample_per_cls: number of samples per class [num_classes].
from_logits: True if y_pred is not soft-maxed.
Returns:
loss: A tensor of shape [batch_size, num_classes].
"""
labels_oh = MaybeOneHot(y_true, depth=len(sample_per_cls))
loss = tf.keras.backend.categorical_crossentropy(
labels_oh, y_pred, from_logits=from_logits)
return tf.reduce_mean(loss)
def CBLoss(y_true,
y_pred,
sample_per_cls,
beta):
"""Computer class balanced loss for MULTICLASS classification.
Paper link: https://arxiv.org/pdf/1901.05555.pdf
Args:
y_true: True labels, categorical of shape (batch_size,).
y_pred: logits, a float32 tensor of shape [batch_size,num_classes].
sample_per_cls: number of samples per class [num_classes].
beta: A scalar for CBloss hyper-parameter.
Returns:
loss: A tensor of shape [batch_size, num_classes].
"""
batch_size = y_pred.shape[0]
class_weight = [(1-beta)/(1-beta**i) for i in sample_per_cls]
class_weight = tf.convert_to_tensor(class_weight, dtype=tf.float32)
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.dtypes.cast(y_true, dtype=tf.dtypes.int64)
# Equations:
# loss = -[(1-beta) / (1-beta^n_y)] * log(prob_y)
xent_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=y_true, logits=y_pred)
loss = xent_loss
class_weight = tf.gather(class_weight, y_true, axis=0,
batch_dims=y_true.shape.rank)
scaler_weight = float(batch_size) * class_weight / tf.reduce_sum(class_weight)
loss *= scaler_weight
return tf.reduce_sum(loss)/batch_size
def CBFocal(y_true,
y_pred,
sample_per_cls,
beta,
gamma,
from_logits=False):
"""Computer class balanced loss for MULTICLASS classification.
Args:
y_true: True labels, categorical of shape (batch_size,).
y_pred: logits, a float32 tensor of shape [batch_size,num_classes].
sample_per_cls: number of samples per class [num_classes].
beta: A scalar for CBloss hyper-parameter.
gamma: A scalar for CBloss hyper-parameter.
from_logits: True if y_pred is not soft-maxed.
Returns:
loss: A tensor of shape [batch_size, num_classes].
"""
batch_size = y_pred.shape[0]
class_weight = [(1-beta)/(1-beta**i) for i in sample_per_cls]
class_weight = tf.convert_to_tensor(class_weight, dtype=tf.float32)
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.dtypes.cast(y_true, dtype=tf.dtypes.int64)
if from_logits:
y_pred_prob = tf.nn.softmax(y_pred, axis=-1)
else:
y_pred_prob = y_pred
# Equations:
# loss = -[(1-beta) / (1-beta^n_y)] * (1-prob_y)^gamma * log(prob_y)
xent_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=y_true, logits=y_pred)
probs = tf.gather(y_pred_prob, y_true, axis=-1, batch_dims=y_true.shape.rank)
focal_modulation = (1 - probs) ** gamma
loss = focal_modulation * xent_loss
class_weight = tf.gather(class_weight, y_true, axis=0,
batch_dims=y_true.shape.rank)
scaler_weight = float(batch_size) * class_weight / tf.reduce_sum(class_weight)
loss *= scaler_weight
return tf.reduce_sum(loss)/batch_size
def LDAMLoss(y_true,
y_pred,
sample_per_cls,
gamma,
s=30):
"""Computer LDAM loss for MULTICLASS classification.
Args:
y_true: True labels, categorical of shape (batch_size,).
y_pred: logits, a float32 tensor of shape [batch_size,num_classes].
sample_per_cls: number of samples per class [num_classes].
gamma: A scalar for the re-weighting of hyper-parameter.
s: hyper-parameter.
Returns:
loss: A tensor of shape [batch_size, num_classes].
"""
num_classes = y_pred.shape[1]
class_weight = 1.0 / np.sqrt(np.sqrt(sample_per_cls))
class_weight = class_weight * (gamma / np.max(class_weight))
class_weight = tf.convert_to_tensor(class_weight, dtype=tf.float32)
y_true_oh = ConvertToOneHot(y_true, depth=num_classes)
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.dtypes.cast(y_true, dtype=tf.dtypes.int64)
batch_w = tf.matmul(class_weight[None, :], tf.transpose(y_true_oh))
batch_w = tf.reshape(batch_w, (-1, 1))
y_pred_m = y_pred - batch_w
# if condition is true, return y_pred_m[index], otherwise return y_pred[index]
index_bool = tf.cast(y_true_oh, tf.bool)
output = tf.where(index_bool, y_pred_m, y_pred)
logits = output
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=y_true_oh, logits=logits*s)
return tf.reduce_mean(loss)
def FocalLoss(y_true,
y_pred,
sample_per_cls,
gamma,
from_logits=False):
"""Computer focal loss for MULTICLASS classification.
Args:
y_true: True labels, categorical of shape (batch_size,).
y_pred: logits, a float32 tensor of shape [batch_size,num_classes].
sample_per_cls: number of samples per class [num_classes].
gamma: A scalar for focal loss gamma hyper-parameter.
from_logits: True if y_pred is not soft-maxed.
Returns:
loss: A tensor of shape [batch_size, num_classes].
"""
batch_size = y_pred.shape[0]
recip_spc = [1/i for i in sample_per_cls]
class_weight = tf.cast(recip_spc, dtype=tf.float32)
# Normalizer to ensure that sum of class weights is equal to batch_size (like
# in ERM)
class_weight_norm = float(batch_size) * class_weight
class_weight_norm /= tf.reduce_sum(class_weight)
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.dtypes.cast(y_true, dtype=tf.dtypes.int64)
if from_logits:
y_pred_prob = tf.nn.softmax(y_pred, axis=-1)
else:
y_pred_prob = y_pred
# Equations:
# loss = -alpha_y * (1-prob_y)^gamma * log(prob_y)
xent_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=y_true, logits=y_pred)
probs = tf.gather(y_pred_prob, y_true, axis=-1, batch_dims=y_true.shape.rank)
focal_modulation = (1 - probs) ** gamma
loss = focal_modulation * xent_loss
class_weight_norm = tf.gather(class_weight_norm, y_true, axis=0,
batch_dims=y_true.shape.rank)
loss *= class_weight_norm
return tf.reduce_sum(loss)/batch_size
def LogitAdjust(y_true, y_pred, sample_per_cls, tau):
"""Implementation of logit adjustment loss.
Args:
y_true: True labels, categorical of shape (batch_size,).
y_pred: logits, a float32 tensor of shape [batch_size,num_classes].
sample_per_cls: number of samples per class [num_classes].
tau: Temperature scaling parameter for the base probabilities.
Returns:
A loss function with signature loss(y_true, y_pred).
"""
spc = tf.cast(sample_per_cls, dtype=tf.float32)
# Generate class prior (a list of probabilities: P(Y=i))
spc_norm = spc / tf.reduce_sum(spc)
y_pred = tf.cast(y_pred, dtype=tf.float32)
y_pred = y_pred + tau * tf.math.log(
tf.cast(spc_norm + 1e-12, dtype=tf.float32))
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=y_true, logits=y_pred)
return tf.reduce_mean(loss, axis=0)
def BalancedSoftmax(y_true,
y_pred,
sample_per_cls,
from_logits=False):
"""Implementation of balacned softmax https://arxiv.org/abs/2007.10740.
Args:
y_true: True labels, categorical of shape (batch_size,).
y_pred: logits, a float32 tensor of shape [batch_size,num_classes].
sample_per_cls: number of samples per class [num_classes].
from_logits: True if y_pred is not soft-maxed.
Returns:
loss: A tensor of shape [batch_size, num_classes].
"""
num_classes = y_pred.shape[1]
# batch_size = y_pred.shape[0]
y_true_oh = ConvertToOneHot(y_true, depth=num_classes)
y_pred = tf.cast(y_pred, dtype=tf.float32)
spc = tf.cast(sample_per_cls, dtype=tf.float32)
# Generate class prior (a list of probabilities: P(Y=i))
spc_norm = spc / tf.reduce_sum(spc)
if from_logits:
# reweight exponential of logits with class prior
y_b_exp = tf.exp(y_pred) * spc_norm
logits_modified = y_b_exp / tf.reduce_sum(y_b_exp, 1, keepdims=True)
else:
raise ValueError(
'please give me logits inputs'
)
cce = tf.keras.losses.CategoricalCrossentropy()
loss = cce(y_true_oh, logits_modified)
return loss
def ConvertToOneHot(labels, depth):
if len(labels.shape) > 1:
return labels
else:
return tf.one_hot(labels, depth=depth)
def MaybeOneHot(labels, depth):
if len(labels.shape) > 1:
return labels
else:
return tf.one_hot(labels, depth=depth)
|
[
"[email protected]"
] | |
c75c44f6362a5182bd3aaf113b8dcc84a58fecf2
|
f4e69d05d4bea5198f5bd15c968562fac654c88e
|
/openapi_client/model/otoroshi_models_bad_response.py
|
f69321f6aba8eab34a6902ec62e1ba31844058e6
|
[] |
no_license
|
krezreb/openapi-client-otoroshi
|
2877ae9230b1ca29024880994420101a232cb906
|
0dafc780777857b9a0d0d8264e215bd6e0557224
|
refs/heads/master
| 2023-05-06T07:23:45.988523 | 2021-05-27T13:00:18 | 2021-05-27T13:00:18 | 371,374,475 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,681 |
py
|
"""
Otoroshi Admin API
Admin API of the Otoroshi reverse proxy # noqa: E501
The version of the OpenAPI document: 1.5.0-alpha.14
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from openapi_client.exceptions import ApiAttributeError
class OtoroshiModelsBadResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'status': (int,), # noqa: E501
'body': (str,), # noqa: E501
'headers': ({str: (str,)},), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'status': 'status', # noqa: E501
'body': 'body', # noqa: E501
'headers': 'headers', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""OtoroshiModelsBadResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
status (int): The HTTP status for the response. [optional] # noqa: E501
body (str): The body of the HTTP response. [optional] # noqa: E501
headers ({str: (str,)}): The HTTP headers of the response. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""OtoroshiModelsBadResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
status (int): The HTTP status for the response. [optional] # noqa: E501
body (str): The body of the HTTP response. [optional] # noqa: E501
headers ({str: (str,)}): The HTTP headers of the response. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
[
"[email protected]"
] | |
74dfd81b92c42b2eb058bc68e74d6c4e6896e6b6
|
b57d337ddbe946c113b2228a0c167db787fd69a1
|
/scr/py00587orc_shaman.py
|
49d5f9d1d140d117755bced2b4c7db596c67a640
|
[] |
no_license
|
aademchenko/ToEE
|
ebf6432a75538ae95803b61c6624e65b5cdc53a1
|
dcfd5d2de48b9d9031021d9e04819b309d71c59e
|
refs/heads/master
| 2020-04-06T13:56:27.443772 | 2018-11-14T09:35:57 | 2018-11-14T09:35:57 | 157,520,715 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,064 |
py
|
from toee import *
from utilities import *
from combat_standard_routines import *
def san_dying( attachee, triggerer ):
if should_modify_CR( attachee ):
modify_CR( attachee, get_av_level() )
return RUN_DEFAULT
def san_start_combat( attachee, triggerer ):
webbed = break_free( attachee, 3)
if (attachee != OBJ_HANDLE_NULL and critter_is_unconscious(attachee) != 1 and not attachee.d20_query(Q_Prone) and attachee.leader_get() == OBJ_HANDLE_NULL):
if (obj_percent_hp(attachee) <= 75):
if (attachee.name == 8909):
## hb gatekeeper east
if (game.global_vars[786] <= 17):
attachee.obj_set_int(obj_f_critter_strategy, 541)
game.global_vars[786] = game.global_vars[786] + 1
else:
attachee.obj_set_int(obj_f_critter_strategy, 535)
elif (attachee.name == 8920):
## hb gatekeper west
if (game.global_vars[789] <= 17):
attachee.obj_set_int(obj_f_critter_strategy, 541)
game.global_vars[789] = game.global_vars[789] + 1
else:
attachee.obj_set_int(obj_f_critter_strategy, 535)
elif (attachee.name == 8960):
## hb ravine north
if (game.global_vars[792] <= 17):
attachee.obj_set_int(obj_f_critter_strategy, 541)
game.global_vars[792] = game.global_vars[792] + 1
else:
attachee.obj_set_int(obj_f_critter_strategy, 535)
elif (attachee.name == 8966):
## hb ravine south
if (game.global_vars[793] <= 17):
attachee.obj_set_int(obj_f_critter_strategy, 541)
game.global_vars[793] = game.global_vars[793] + 1
else:
attachee.obj_set_int(obj_f_critter_strategy, 535)
elif (obj_percent_hp(attachee) >= 76):
if (attachee.name == 8909):
## hb gatekeeper east
orcserg01 = find_npc_near( attachee, 8894 )
orcdomi01 = find_npc_near( attachee, 8895 )
orcbowm01 = find_npc_near( attachee, 8899 )
orcbowm02 = find_npc_near( attachee, 8900 )
orcarch01 = find_npc_near( attachee, 8901 )
orcarch02 = find_npc_near( attachee, 8902 )
orcsnip01 = find_npc_near( attachee, 8903 )
hilgian01 = find_npc_near( attachee, 8904 )
hilgian02 = find_npc_near( attachee, 8905 )
hilgian03 = find_npc_near( attachee, 8906 )
hilgian04 = find_npc_near( attachee, 8907 )
hilgian05 = find_npc_near( attachee, 8908 )
orcmedi01 = find_npc_near( attachee, 8910 )
if (obj_percent_hp(orcserg01) <= 75 or obj_percent_hp(orcdomi01) <= 75 or obj_percent_hp(orcbowm01) <= 75 or obj_percent_hp(orcbowm02) <= 75 or obj_percent_hp(orcarch01) <= 75 or obj_percent_hp(orcarch02) <= 75 or obj_percent_hp(orcsnip01) <= 75 or obj_percent_hp(hilgian01) <= 75 or obj_percent_hp(hilgian02) <= 75 or obj_percent_hp(hilgian03) <= 75 or obj_percent_hp(hilgian04) <= 75 or obj_percent_hp(hilgian05) <= 75 or obj_percent_hp(orcmedi01) <= 75):
if (game.global_vars[786] <= 17):
attachee.obj_set_int(obj_f_critter_strategy, 475)
game.global_vars[786] = game.global_vars[786] + 1
else:
attachee.obj_set_int(obj_f_critter_strategy, 535)
else:
attachee.obj_set_int(obj_f_critter_strategy, 539)
elif (attachee.name == 8920):
## hb gatekeper west
ogrexxx01 = find_npc_near( attachee, 8911 )
ettinxx01 = find_npc_near( attachee, 8912 )
ettinxx02 = find_npc_near( attachee, 8913 )
ettinxx03 = find_npc_near( attachee, 8914 )
ettinxx04 = find_npc_near( attachee, 8915 )
ettinxx05 = find_npc_near( attachee, 8916 )
orcbowm01 = find_npc_near( attachee, 8917 )
orcbowm02 = find_npc_near( attachee, 8918 )
orcarch01 = find_npc_near( attachee, 8919 )
orcmedi01 = find_npc_near( attachee, 8921 )
if (obj_percent_hp(ogrexxx01) <= 75 or obj_percent_hp(ettinxx01) <= 75 or obj_percent_hp(ettinxx02) <= 75 or obj_percent_hp(ettinxx03) <= 75 or obj_percent_hp(ettinxx04) <= 75 or obj_percent_hp(ettinxx05) <= 75 or obj_percent_hp(orcbowm01) <= 75 or obj_percent_hp(orcbowm02) <= 75 or obj_percent_hp(orcarch01) <= 75 or obj_percent_hp(orcmedi01) <= 75):
if (game.global_vars[789] <= 17):
attachee.obj_set_int(obj_f_critter_strategy, 475)
game.global_vars[789] = game.global_vars[789] + 1
else:
attachee.obj_set_int(obj_f_critter_strategy, 535)
else:
attachee.obj_set_int(obj_f_critter_strategy, 539)
elif (attachee.name == 8960):
## hb ravine north
gnollxx01 = find_npc_near( attachee, 8931 )
gnollxx02 = find_npc_near( attachee, 8932 )
gnollxx03 = find_npc_near( attachee, 8933 )
gnollxx04 = find_npc_near( attachee, 8934 )
gnollxx05 = find_npc_near( attachee, 8935 )
bugbear01 = find_npc_near( attachee, 8936 )
bugbear02 = find_npc_near( attachee, 8937 )
bugbear03 = find_npc_near( attachee, 8938 )
bugbear04 = find_npc_near( attachee, 8939 )
ogrexxx01 = find_npc_near( attachee, 8969 )
ogrexxx02 = find_npc_near( attachee, 8970 )
ogrexxx03 = find_npc_near( attachee, 8971 )
orcmedi01 = find_npc_near( attachee, 8961 )
orcbowm01 = find_npc_near( attachee, 8978 )
orcarch01 = find_npc_near( attachee, 8979 )
orcsnip01 = find_npc_near( attachee, 8980 )
orcmark01 = find_npc_near( attachee, 8981 )
orcsnip02 = find_npc_near( attachee, 8982 )
orcarch02 = find_npc_near( attachee, 8983 )
if (obj_percent_hp(gnollxx01) <= 75 or obj_percent_hp(gnollxx02) <= 75 or obj_percent_hp(gnollxx03) <= 75 or obj_percent_hp(gnollxx04) <= 75 or obj_percent_hp(gnollxx05) <= 75 or obj_percent_hp(bugbear01) <= 75 or obj_percent_hp(bugbear02) <= 75 or obj_percent_hp(bugbear03) <= 75 or obj_percent_hp(bugbear04) <= 75 or obj_percent_hp(ogrexxx01) <= 75 or obj_percent_hp(ogrexxx02) <= 75 or obj_percent_hp(ogrexxx03) <= 75 or obj_percent_hp(orcmedi01) <= 75 or obj_percent_hp(orcbowm01) <= 75 or obj_percent_hp(orcarch01) <= 75 or obj_percent_hp(orcsnip01) <= 75 or obj_percent_hp(orcmark01) <= 75 or obj_percent_hp(orcsnip02) <= 75 or obj_percent_hp(orcarch02) <= 75):
if (game.global_vars[792] <= 17):
attachee.obj_set_int(obj_f_critter_strategy, 475)
game.global_vars[792] = game.global_vars[792] + 1
else:
attachee.obj_set_int(obj_f_critter_strategy, 535)
else:
attachee.obj_set_int(obj_f_critter_strategy, 539)
elif (attachee.name == 8966):
## hb ravine south
orcserg02 = find_npc_near( attachee, 8950 )
bugbear06 = find_npc_near( attachee, 8951 )
bugbear07 = find_npc_near( attachee, 8952 )
bugbear08 = find_npc_near( attachee, 8953 )
bugbear09 = find_npc_near( attachee, 8954 )
gnollxx06 = find_npc_near( attachee, 8955 )
gnollxx07 = find_npc_near( attachee, 8956 )
gnollxx08 = find_npc_near( attachee, 8957 )
gnollxx09 = find_npc_near( attachee, 8958 )
ettinxx01 = find_npc_near( attachee, 8975 )
ettinxx02 = find_npc_near( attachee, 8976 )
ettinxx03 = find_npc_near( attachee, 8977 )
orcmedi02 = find_npc_near( attachee, 8965 )
orcarch04 = find_npc_near( attachee, 8985 )
orcsnip03 = find_npc_near( attachee, 8987 )
orcbowm03 = find_npc_near( attachee, 8989 )
orcarch05 = find_npc_near( attachee, 8991 )
if (obj_percent_hp(orcserg02) <= 75 or obj_percent_hp(bugbear06) <= 75 or obj_percent_hp(bugbear07) <= 75 or obj_percent_hp(bugbear08) <= 75 or obj_percent_hp(bugbear09) <= 75 or obj_percent_hp(gnollxx06) <= 75 or obj_percent_hp(gnollxx07) <= 75 or obj_percent_hp(gnollxx08) <= 75 or obj_percent_hp(gnollxx09) <= 75 or obj_percent_hp(ettinxx01) <= 75 or obj_percent_hp(ettinxx02) <= 75 or obj_percent_hp(ettinxx03) <= 75 or obj_percent_hp(orcmedi02) <= 75 or obj_percent_hp(orcarch04) <= 75 or obj_percent_hp(orcsnip03) <= 75 or obj_percent_hp(orcbowm03) <= 75 or obj_percent_hp(orcarch05) <= 75):
if (game.global_vars[793] <= 17):
attachee.obj_set_int(obj_f_critter_strategy, 475)
game.global_vars[793] = game.global_vars[793] + 1
else:
attachee.obj_set_int(obj_f_critter_strategy, 535)
else:
attachee.obj_set_int(obj_f_critter_strategy, 539)
elif (attachee.leader_get() != OBJ_HANDLE_NULL):
attachee.obj_set_int(obj_f_critter_strategy, 0)
return RUN_DEFAULT
##########################################################################################
## SCRIPT DETAIL FOR START COMBAT ##
##########################################################################################
## if not dead, unconscious, prone, or in party ##
## if under 75% health ##
## if haven't cast all 18 healing spells ##
## set strategy to self healing ##
## increment healing variable ##
## otherwise ##
## set strategy to melee ##
## otherwise (if over 75% health) ##
## find friends ##
## if any are under 75% health ##
## if haven't cast all 18 healing spells ##
## set strategy to friend healing ##
## increment healing variable ##
## otherwise ##
## set strategy to melee ##
## otherwise ##
## set strategy to guard ##
## otherwise, if in party ##
## set strategy to default ##
## run default ##
##########################################################################################
|
[
"[email protected]"
] | |
13229a7fa36de42831537b95efa97c775f649005
|
edf125be37a40caeb14c7fe32bd9f7511cf0ce9b
|
/09-manipulating-dataFrames-with-pandas/1-extracting-and-transforming-data/indexing_and_column_rearrangement.py
|
745bece0cdcb2ca968e8a8950bf5140557d4b32e
|
[] |
no_license
|
vedpbharti/Datacamp
|
1d3d2ca0722a3a19733e91fa054f64e0c3b7114a
|
b6d019efebe1b46765f19212ba2d8ebb9d90de57
|
refs/heads/master
| 2020-04-05T05:47:28.528088 | 2019-02-10T22:34:00 | 2019-02-10T22:34:00 | 156,610,704 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,135 |
py
|
'''Indexing and column rearrangement
There are circumstances in which it's useful to modify the order of your DataFrame columns. We do that now by extracting just two columns from the Pennsylvania election results DataFrame.
Your job is to read the CSV file and set the index to 'county'. You'll then assign a new DataFrame by selecting the list of columns ['winner', 'total', 'voters']. The CSV file is provided to you in the variable filename.
Instructions
100 XP
Import pandas as pd.
Read in filename using pd.read_csv() and set the index to 'county' by specifying the index_col parameter.
Create a separate DataFrame results with the columns ['winner', 'total', 'voters'].
Print the output using results.head(). This has been done for you, so hit 'Submit Answer' to see the new DataFrame!'''
# Import pandas
import pandas as pd
# Read in filename and set the index: election
election = pd.read_csv(filename, index_col='county')
# Create a separate dataframe with the columns ['winner', 'total', 'voters']: results
results = election[['winner', 'total', 'voters']]
# Print the output of results.head()
print(results.head())
|
[
"[email protected]"
] | |
f1771c403450a958403c400739cb654460474052
|
bb2a7aacab41acb8e804d823c98b9b4dd3267f0c
|
/modules/vector_distance.py
|
382c89c12b6bc1894a47194efedb0bf9387fc871
|
[
"MIT"
] |
permissive
|
nicolasying/WordNet-Embeddings
|
7f9f5c57f534d5ea1db956a9e2d7f0dd178e4998
|
a6a5782dca97376e487df41fb83542729f284197
|
refs/heads/master
| 2020-04-21T16:58:48.668802 | 2019-06-11T14:10:10 | 2019-06-11T14:10:10 | 169,720,827 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,003 |
py
|
# coding=utf-8
#! /usr/bin/env python3.4
"""
MIT License
Copyright (c) 2018 NLX-Group
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This code calculates the cosine similarity between two given vectors
[email protected]
"""
import math
from modules.input_output import *
from scipy.spatial import distance
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
def word_similarity(wrd1, wrd2, for_WSD, from_emb_file):
vec_extractor(wrd1, wrd2, for_WSD, from_emb_file)
def vec_extractor(wrd1, wrd2, for_WSD, from_emb_file):
if from_emb_file == "auto":
final_vec = array_loader("embeddings_matrix")
word_list = array_loader("word_list")
"""
all_words =[]
for itm in word_list:
all_words.append(itm.split("\t")[0])
"""
if for_WSD:
all_words = [itm.split("_offset")[0].replace("\n","") for itm in word_list]
else:
all_words = word_list
all_words = np.array(all_words)
indx1 = np.where(all_words == wrd1)[0]
indx2 = np.where(all_words == wrd2)[0]
com_wrd1 = [word_list[itm].split("\t")[0] for itm in indx1]
com_wrd2 = [word_list[itm].split("\t")[0] for itm in indx2]
else:
indx1 = []
indx2 = []
com_wrd1 = []
com_wrd2 = []
final_vec = []
indx = 0
path = os.getcwd() + '/data/output/' + from_emb_file
with open(path) as infile:
for line in infile:
if for_WSD:
if line[0:len(wrd1)] == wrd1 and line[len(wrd1):len(wrd1)+7] == "_offset":
temp = line[line.index(" ")+1:].replace(" \n","").replace("\n","").replace("'","").split(" ")
temp = [float(i) for i in temp]
final_vec.append(temp)
indx1.append(indx)
com_wrd1.append(line.split(" ")[0])
indx += 1
if line[0:len(wrd2)] == wrd2 and line[len(wrd2):len(wrd2)+7] == "_offset":
temp = line[line.index(" ")+1:].replace(" \n","").replace("\n","").replace("'","").split(" ")
temp = [float(i) for i in temp]
final_vec.append(temp)
indx2.append(indx)
com_wrd2.append(line.split(" ")[0])
indx += 1
else:
if line[0:len(wrd1)] == wrd1 and line[len(wrd1):len(wrd1) + 1] == " ":
temp = line[line.index(" ") + 1:].replace(" \n", "").replace("\n", "").replace("'", "").split(" ")
temp = [float(i) for i in temp]
final_vec.append(temp)
indx1.append(indx)
com_wrd1.append(line.split(" ")[0])
indx += 1
if line[0:len(wrd2)] == wrd2 and line[len(wrd2):len(wrd2) + 1] == " ":
temp = line[line.index(" ") + 1:].replace(" \n", "").replace("\n", "").replace("'", "").split(" ")
temp = [float(i) for i in temp]
final_vec.append(temp)
indx2.append(indx)
com_wrd2.append(line.split(" ")[0])
indx += 1
final_vec = np.array(final_vec)
if len(indx1) > 1 :
print(' "%s" is ambiguous with "%d" senses' % (wrd1, len(indx1)))
if len(indx2) > 1:
print(' "%s" is ambiguous with "%d" senses' % (wrd2, len(indx2)))
if len(indx1) == 0 or len(indx2) == 0:
print(' Cannot find both "%s" and "%s" in current word list' % (wrd1, wrd2))
else:
for i in range(len(indx1)):
for j in range(len(indx2)):
v1 = final_vec[indx1[i]]
v2 = final_vec[indx2[j]]
print(' Cosine similarity between "%s" and "%s": %f' % (com_wrd1[i],com_wrd2[j], cosine_sim(v1, v2, "auto")))
def cosine_sim(v1,v2,mode):
if mode == "auto":
#return(1 - distance.cosine(v1,v2))
return(cosine_similarity(v1.reshape(1, -1),v2.reshape(1, -1)))
else:
"compute cosine similarity of v1 to v2: (v1 dot v2)/{||v1||*||v2||)"
#synsDim = v2.split(" ")
sumxx, sumxy, sumyy = 0, 0, 0
j = 0
for i in range(len(v1)):
if v2[j] == "":
j += 1
y = float(v2[j])
j += 1
x = v1[i]
sumxx += x*x
sumyy += y*y
sumxy += x*y
if math.sqrt(sumxx*sumyy) == 0 :
return (0.00000001)
return (sumxy/math.sqrt(sumxx*sumyy))
def element_product(v1, v2):
"compute elementwise product of v1 to v2: (v11 dot v21) (v12 dot v22) ..."
if v2[0] == " ":
v2 = v2.replace(" ","",1)
synsVec = [float(a) for a in v2]
vector1 = np.array(v1)
vector2 = np.array(synsVec)
return(vector1 * vector2)
|
[
"[email protected]"
] | |
4896c1d2ea4c5fa667528ba15bcffab3b2030178
|
295d37dcad3ad6f2cf71ded944eb1a86b3404e6b
|
/firstsite/learn/views.py
|
9093870d9a45add351fddacba712e9b8c63be4a4
|
[] |
no_license
|
guwenfeng/Django
|
c47f9cd9d7d58b75867665a9574fc5d84235a970
|
86a3af2c50f85de0fe57d74224ac145e735aa960
|
refs/heads/master
| 2021-04-03T01:43:54.115874 | 2018-03-09T08:50:15 | 2018-03-09T08:50:15 | 124,513,609 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,395 |
py
|
#coding=utf-8
from django.shortcuts import render,render_to_response,redirect
from django.http import HttpResponse,HttpResponseRedirect,request
from django.template import RequestContext
from django import forms
from .models import ResUser
from django.db.models import Q
#表单
class UserForm(forms.Form):
username = forms.CharField(label='用户名',max_length=100)
password = forms.CharField(label='密码',widget=forms.PasswordInput(),max_length=16)
email = forms.EmailField(label='邮箱')
phone = forms.CharField(label='电话',max_length=13)
def web_login(request):
return render(request, 'login.html')
#注册
def regist(request):
return render(request, 'registration.html')
#注册
def regist_create(request):
if request.method == 'POST':
uf = UserForm(request.POST)
if uf.is_valid():
#获得表单数据
username = uf.cleaned_data['username']
password = uf.cleaned_data['password']
email = uf.cleaned_data['email']
phone = uf.cleaned_data['phone']
print (username,password)
#添加到数据库
ResUser.objects.create(username= username,password=password,email= email,phone=phone)
return redirect('/web/login')
else:
uf = UserForm()
return render_to_response('login.html',{'uf':uf}, context_instance=RequestContext(request))
#登陆
def login(request):
if request.method == 'POST':
#获取表单用户密码
data=request.POST
username = data['username']
password = data['password']
print (username,password)
#获取的表单数据与数据库进行比较
user =ResUser.objects.get(Q(username__exact = username) | Q(email__exact = username) | Q(phone__exact = username),password__exact = password)
if user:
#比较成功,跳转index
response = HttpResponseRedirect('/index' )
#将username写入浏览器cookie,失效时间为3600
response.set_cookie('username',username,3600)
return response
return redirect('/web/login')
#退出
def logout(request):
response = HttpResponseRedirect('/web/login')
#清理cookie里保存username
response.delete_cookie('username')
return response
|
[
"[email protected]"
] | |
ce1f1e15dd9fb2cc99a49b1ed54484e2d655871c
|
bad62c2b0dfad33197db55b44efeec0bab405634
|
/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py
|
625f14a52e1f2f4aa3eb2678b58649f3107f1ead
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
test-repo-billy/azure-sdk-for-python
|
20c5a2486456e02456de17515704cb064ff19833
|
cece86a8548cb5f575e5419864d631673be0a244
|
refs/heads/master
| 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 |
MIT
| 2019-07-25T22:28:52 | 2019-04-19T20:59:15 |
Python
|
UTF-8
|
Python
| false | false | 3,531 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ConversationAnalysisClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for ConversationAnalysisClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param endpoint: Supported Cognitive Services endpoint (e.g.,
https://:code:`<resource-name>`.cognitiveservices.azure.com). Required.
:type endpoint: str
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:keyword api_version: Api Version. Default value is "2022-05-15-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, endpoint: str, credential: "AsyncTokenCredential", **kwargs: Any) -> None:
super(ConversationAnalysisClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2022-05-15-preview") # type: str
if endpoint is None:
raise ValueError("Parameter 'endpoint' must not be None.")
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
self.endpoint = endpoint
self.credential = credential
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "ai-language-conversations/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(
self.credential, *self.credential_scopes, **kwargs
)
|
[
"[email protected]"
] | |
487f69e10372f0cca2d9dc1ca97d415961b90453
|
a248ebfced3e5892d763c8fff1f5e5ebd0ffb02e
|
/src/shortener/migrations/0005_auto_20180813_1614.py
|
aafc0011e8f31d9f752193ba370df885856a28ff
|
[] |
no_license
|
gauravsaxena1997/URLshortener
|
78b9a33ae1640ae27759085f47e72605ae8f0b94
|
d1350760dc0436c9a2eea0a549f030e6f4f734f7
|
refs/heads/master
| 2020-03-26T06:05:45.229523 | 2018-08-15T17:56:04 | 2018-08-15T17:56:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 628 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-08-13 16:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shortener', '0004_auto_20180813_1448'),
]
operations = [
migrations.AddField(
model_name='url',
name='active',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='url',
name='shortcode',
field=models.CharField(blank=True, max_length=15, unique=True),
),
]
|
[
"[email protected]"
] | |
513164c5b81b86296b7c191d7adb745634d4d6b1
|
3e306d0ec56608259e36c9fe28c95ab5bd58147c
|
/keras/layers/activation/leaky_relu.py
|
bc563705cd88ddff65155e02ca7ada49e1c43903
|
[
"Apache-2.0"
] |
permissive
|
Alan-love/keras
|
8012319eb3f88bfb3806e9df913f62b442701137
|
6c392b5ad96fb47a05019e6dda42d2af1f1ec08e
|
refs/heads/master
| 2023-08-22T17:44:36.217261 | 2022-03-29T23:06:19 | 2022-03-29T23:06:50 | 209,978,278 | 0 | 0 |
Apache-2.0
| 2022-03-31T03:09:20 | 2019-09-21T12:05:44 |
Python
|
UTF-8
|
Python
| false | false | 2,538 |
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Leaky version of a Rectified Linear Unit activation layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
from keras import backend
from keras.engine.base_layer import Layer
from keras.utils import tf_utils
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.LeakyReLU')
class LeakyReLU(Layer):
"""Leaky version of a Rectified Linear Unit.
It allows a small gradient when the unit is not active:
```
f(x) = alpha * x if x < 0
f(x) = x if x >= 0
```
Usage:
>>> layer = tf.keras.layers.LeakyReLU()
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[-0.9, -0.3, 0.0, 2.0]
>>> layer = tf.keras.layers.LeakyReLU(alpha=0.1)
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[-0.3, -0.1, 0.0, 2.0]
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the batch axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Args:
alpha: Float >= 0. Negative slope coefficient. Default to 0.3.
"""
def __init__(self, alpha=0.3, **kwargs):
super(LeakyReLU, self).__init__(**kwargs)
if alpha is None:
raise ValueError(
'The alpha value of a Leaky ReLU layer cannot be None, '
f'Expecting a float. Received: {alpha}')
self.supports_masking = True
self.alpha = backend.cast_to_floatx(alpha)
def call(self, inputs):
return backend.relu(inputs, alpha=self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(LeakyReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
|
[
"[email protected]"
] | |
22917a83efbde931e2a785846a9a92c36e59b834
|
704976ea552111c6a5af9cd7cb62b9d9abaf3996
|
/pypy/module/test_lib_pypy/ctypes_tests/conftest.py
|
fdc368945bef163758eaf608e7187c32b4663584
|
[
"BSD-3-Clause"
] |
permissive
|
mesalock-linux/mesapy
|
4f02c5819ce7f2f6e249d34840f1aa097577645d
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
refs/heads/mesapy2.7
| 2023-08-16T21:33:02.239581 | 2019-08-13T10:29:43 | 2019-08-13T18:06:45 | 136,080,721 | 396 | 33 |
NOASSERTION
| 2020-04-01T03:05:18 | 2018-06-04T20:45:17 |
Python
|
UTF-8
|
Python
| false | false | 3,400 |
py
|
import py
import pytest
import sys
import os
def pytest_ignore_collect(path):
if '__pypy__' not in sys.builtin_module_names:
return True
# XXX: copied from pypy/tool/cpyext/extbuild.py
if os.name != 'nt':
so_ext = 'so'
else:
so_ext = 'dll'
def _build(cfilenames, outputfilename, compile_extra, link_extra,
include_dirs, libraries, library_dirs):
try:
# monkeypatch distutils for some versions of msvc compiler
import setuptools
except ImportError:
# XXX if this fails and is required,
# we must call pypy -mensurepip after translation
pass
from distutils.ccompiler import new_compiler
from distutils import sysconfig
# XXX for Darwin running old versions of CPython 2.7.x
sysconfig.get_config_vars()
compiler = new_compiler(force=1)
sysconfig.customize_compiler(compiler) # XXX
objects = []
for cfile in cfilenames:
cfile = py.path.local(cfile)
old = cfile.dirpath().chdir()
try:
res = compiler.compile([cfile.basename],
include_dirs=include_dirs, extra_preargs=compile_extra)
assert len(res) == 1
cobjfile = py.path.local(res[0])
assert cobjfile.check()
objects.append(str(cobjfile))
finally:
old.chdir()
compiler.link_shared_object(
objects, str(outputfilename),
libraries=libraries,
extra_preargs=link_extra,
library_dirs=library_dirs)
def c_compile(cfilenames, outputfilename,
compile_extra=None, link_extra=None,
include_dirs=None, libraries=None, library_dirs=None):
compile_extra = compile_extra or []
link_extra = link_extra or []
include_dirs = include_dirs or []
libraries = libraries or []
library_dirs = library_dirs or []
if sys.platform == 'win32':
link_extra = link_extra + ['/DEBUG'] # generate .pdb file
if sys.platform == 'darwin':
# support Fink & Darwinports
for s in ('/sw/', '/opt/local/'):
if (s + 'include' not in include_dirs
and os.path.exists(s + 'include')):
include_dirs.append(s + 'include')
if s + 'lib' not in library_dirs and os.path.exists(s + 'lib'):
library_dirs.append(s + 'lib')
outputfilename = py.path.local(outputfilename).new(ext=so_ext)
saved_environ = os.environ.copy()
try:
_build(
cfilenames, outputfilename,
compile_extra, link_extra,
include_dirs, libraries, library_dirs)
finally:
# workaround for a distutils bugs where some env vars can
# become longer and longer every time it is used
for key, value in saved_environ.items():
if os.environ.get(key) != value:
os.environ[key] = value
return outputfilename
# end copy
def compile_so_file():
udir = pytest.ensuretemp('_ctypes_test')
cfile = py.path.local(__file__).dirpath().join("_ctypes_test.c")
if sys.platform == 'win32':
libraries = ['oleaut32']
else:
libraries = []
return c_compile([cfile], str(udir / '_ctypes_test'), libraries=libraries)
# we need to run after the "tmpdir" plugin which installs pytest.ensuretemp
@pytest.mark.trylast
def pytest_configure(config):
global sofile
sofile = compile_so_file()
|
[
"[email protected]"
] | |
409fdc1ded1a89799c4f089c66d3b135a74ab98c
|
6a34b039ededb2e1dcdc07c6976475654ca0ae0a
|
/code_all/day19/review01.py
|
8a5cedfdc9e72fce003c67cf1aa52efc1de9219f
|
[
"MIT"
] |
permissive
|
testcg/python
|
57c62671ab1aad18205c1dee4457b55009cef098
|
4db4bd5d0e44af807d2df80cf8c8980b40cc03c4
|
refs/heads/main
| 2023-07-09T13:19:24.740751 | 2021-08-11T09:25:20 | 2021-08-11T09:25:20 | 394,932,987 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,791 |
py
|
"""
总复习 - Python 核心
一、自动化内存管理机制
1. 引用计数:
每个对象记录被变量绑定(引用)的数量,
当为0时被销毁。
缺点-循环引用:
两个垃圾互相引用,但是计数不为0
2. 标记清除
全盘扫描,标记不再使用的数据
缺点-速度慢
3. 分代回收
程序运行时,将内存分为小0、中1、大2三块.
每次创建新数据,一定在0代分配空间.
如果内存告急,触发标记清除
再将有用的数据升代,清空上一代
内存优化:尽少产生垃圾、对象池、配置垃圾回收器参数
二、跳转语句
三、函数参数
实际参数:与形参进行对应
位置实参:按顺序
函数名(数据1,数据2)
序列实参:拆
函数名(*序列)
关键字实参:按名字
函数名(形参名1 = 数据1,形参名2 = 数据2)
字典实参:拆
函数名(**字典)
形式参数
位置形参:必填
def 函数名(形参名1,形参名2)
默认形参:可选
def 函数名(形参名1=默认值,形参名2=默认值)
星号元组形参:收集位置实参
def 函数名(*args)
双星号字典形参:收集关键字实参
def 函数名(**kwargs)
命名关键字形参:必须是关键字实参
def 函数名(*args,形参名)
def 函数名(形参名,*,形参名)
"""
def func01(p1,p2):
pass
func01(p1 =1 ,p2 =2)
list01 = [] # 引用计数+=1
list02 = list01 # 引用计数+=1
del list01 # 引用计数-=1
list02 = [] # 引用计数-=1
list01 = []
list02 = []
list01.append(list02)
list02.append(list01)
del list01, list02 # 循环引用
# 循环拼接字符串会不断产生垃圾
# str_result = ""
# for i in range(10):
# str_result += str(i)
# print(str_result)
# 向列表添加元素,不会产生垃圾
result = []
for i in range(10):
result.append(str(i))
print("".join(result))
# 对象池
# 每次创建数据时,都先判断池中是否存在相同数据
# 如果存在直接返回地址,如果不存在则创建新数据
data01 = ["10.1234",]
data02 = ["10.1234",]
print(id(data01))
print(id(data02))
# 跳转语句
def func01():
while True:
while True:
break # 1.跳出
continue # 2.跳过
return "单个数据" # 3. 退出
def func02():
yield "多个数据" # 4. 暂时离开
def func03():
raise Exception("错误信息") # 5. 不断上翻
|
[
"[email protected]"
] | |
556fce78a0b96456125b3b21025368e300672f3f
|
4f1598d02567fe3329ccb55f4e65299589dd339b
|
/examples/mxnet/cnn/imagenet_inference.py
|
06aaaf8590ef323a59c486ef91569d41ee69a37c
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
ashokei/lp-inference-kit
|
ad64a774ce5982a2dab8166dcf6101da18c14d15
|
24aafdf9459dcab34647bbf187b0d924f364360b
|
refs/heads/master
| 2022-11-23T14:57:30.207647 | 2020-07-09T06:20:25 | 2020-07-09T06:20:25 | 279,693,622 | 2 | 0 | null | 2020-07-14T21:09:57 | 2020-07-14T21:09:57 | null |
UTF-8
|
Python
| false | false | 13,972 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import logging
import os
import time
import numpy as np
import mxnet as mx
from mxnet import nd
from mxnet.contrib.quantization import *
from mxnet.contrib import amp
import sys
def download_dataset(dataset_url, dataset_dir, logger=None):
if logger is not None:
logger.info('Downloading dataset for inference from %s to %s' % (dataset_url, dataset_dir))
mx.test_utils.download(dataset_url, dataset_dir)
def load_model(symbol_file, param_file, logger=None):
cur_path = os.path.dirname(os.path.realpath(__file__))
symbol_file_path = os.path.join(cur_path, symbol_file)
if logger is not None:
logger.info('Loading symbol from file %s' % symbol_file_path)
symbol = mx.sym.load(symbol_file_path)
param_file_path = os.path.join(cur_path, param_file)
if logger is not None:
logger.info('Loading params from file %s' % param_file_path)
save_dict = nd.load(param_file_path)
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return symbol, arg_params, aux_params
def advance_data_iter(data_iter, n):
assert n >= 0
if n == 0:
return data_iter
has_next_batch = True
while has_next_batch:
try:
data_iter.next()
n -= 1
if n == 0:
return data_iter
except StopIteration:
has_next_batch = False
def score(sym, arg_params, aux_params, data, devs, label_name, max_num_examples, logger=None):
metrics = [mx.metric.create('acc'),
mx.metric.create('top_k_accuracy', top_k=5)]
if not isinstance(metrics, list):
metrics = [metrics, ]
mod = mx.mod.Module(symbol=sym, context=devs, label_names=[label_name, ])
mod.bind(for_training=False,
data_shapes=data.provide_data,
label_shapes=data.provide_label)
mod.set_params(arg_params, aux_params)
tic = time.time()
num = 0
for batch in data:
mod.forward(batch, is_train=False)
for m in metrics:
mod.update_metric(m, batch.label)
num += batch_size
if max_num_examples is not None and num >= max_num_examples:
break
speed = num / (time.time() - tic)
if logger is not None:
logger.info('Finished inference with %d images' % num)
logger.info('Finished with %f images per second', speed)
logger.warn('Note: GPU performance is expected to be slower than CPU. Please refer quantization/README.md for details')
for m in metrics:
logger.info(m.get())
def low_precison_convert(model_name, low_precision, sym, arg_params, aux_params, excluded_sym_names=[]):
if low_precision == 'bfloat16':
if model_name.find('imagenet1k-resnet-152') != -1:
excluded_sym_names += ['conv0']
elif model_name.find('imagenet1k-inception-bn') != -1:
excluded_sym_names += ['conv_1']
elif model_name.find('resnet') != -1 and model_name.find('v1') != -1:
excluded_sym_names += ['resnetv10_conv0_fwd']
elif model_name.find('resnet') != -1 and model_name.find('v2') != -1:
excluded_sym_names += ['resnetv20_conv0_fwd']
elif model_name.find('vgg') != -1:
excluded_sym_names += ['vgg0_conv0_fwd']
elif model_name.find('squeezenet1') != -1:
excluded_sym_names += ['squeezenet0_conv0_fwd']
elif model_name.find('mobilenet') != -1 and model_name.find('v2') == -1:
excluded_sym_names += ['mobilenet0_conv0_fwd']
elif model_name.find('mobilenet') != -1 and model_name.find('v2') != -1:
excluded_sym_names += ['mobilenetv20_conv0_fwd']
elif model_name.find('inceptionv3') != -1:
excluded_sym_names += ['inception30_conv0_fwd']
return amp.convert_model(sym,
arg_params,
aux_params,
target_dtype=low_precision,
excluded_sym_names=excluded_sym_names,
cast_optional_params=True)
def benchmark_score(symbol_file, ctx, batch_size, num_batches, data_layer_type, low_precision, logger=None):
# get mod
cur_path = os.path.dirname(os.path.realpath(__file__))
symbol_file_path = os.path.join(cur_path, symbol_file)
if logger is not None:
logger.info('Loading symbol from file %s' % symbol_file_path)
sym = mx.sym.load(symbol_file_path)
mod = mx.mod.Module(symbol=sym, context=ctx)
if data_layer_type == "int8":
dshape = mx.io.DataDesc(name='data', shape=(
batch_size,) + data_shape, dtype=np.int8)
elif data_layer_type == 'uint8':
dshape = mx.io.DataDesc(name='data', shape=(
batch_size,) + data_shape, dtype=np.uint8)
else: # float32
dshape = mx.io.DataDesc(name='data', shape=(
batch_size,) + data_shape, dtype=np.float32)
mod.bind(for_training=False,
inputs_need_grad=False,
data_shapes=[dshape])
mod.init_params(initializer=mx.init.Xavier(magnitude=2.))
if low_precision:
arg_params, aux_params = mod.get_params()
sym, arg_params, aux_params = low_precison_convert(symbol_file,
low_precision,
sym, arg_params,
aux_params)
mod = mx.mod.Module(symbol=sym, context=ctx)
mod.bind(for_training=False,
inputs_need_grad=False,
data_shapes=[dshape],
label_shapes=[['softmax_label', (batch_size,)]])
mod.set_params(arg_params, aux_params)
# get data
if data_layer_type == "float32":
data = [mx.random.uniform(-1.0, 1.0, shape=shape, ctx=ctx, dtype=data_layer_type)
for _, shape in mod.data_shapes]
else:
data = [mx.nd.full(shape=shape, val=127, ctx=ctx, dtype=data_layer_type)
for _, shape in mod.data_shapes]
batch = mx.io.DataBatch(data, []) # empty label
# run
dry_run = 5 # use 5 iterations to warm up
for i in range(dry_run+num_batches):
if i == dry_run:
tic = time.time()
mod.forward(batch, is_train=False)
for output in mod.get_outputs():
output.wait_to_read()
# return num images per second
return num_batches*batch_size/(time.time() - tic)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Score a model on a dataset')
parser.add_argument('--ctx', type=str, default='gpu')
parser.add_argument('--benchmark', type=bool, default=False, help='dummy data benchmark')
parser.add_argument('--symbol-file', type=str, required=True, help='symbol file path')
parser.add_argument('--param-file', type=str, required=False, help='param file path')
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--label-name', type=str, default='softmax_label')
parser.add_argument('--dataset', type=str, required=False, help='dataset path')
parser.add_argument('--rgb-mean', type=str, default='0,0,0')
parser.add_argument('--rgb-std', type=str, default='1,1,1')
parser.add_argument('--image-shape', type=str, default='3,224,224')
parser.add_argument('--data-nthreads', type=int, default=60, help='number of threads for data decoding')
parser.add_argument('--num-skipped-batches', type=int, default=0, help='skip the number of batches for inference')
parser.add_argument('--num-inference-batches', type=int, required=True, help='number of images used for inference')
parser.add_argument('--shuffle-dataset', action='store_true', default=True,
help='shuffle the calibration dataset')
parser.add_argument('--shuffle-chunk-seed', type=int, default=3982304,
help='shuffling chunk seed, see'
' https://mxnet.apache.org/api/python/io/io.html?highlight=imager#mxnet.io.ImageRecordIter'
' for more details')
parser.add_argument('--shuffle-seed', type=int, default=48564309,
help='shuffling seed, see'
' https://mxnet.apache.org/api/python/io/io.html?highlight=imager#mxnet.io.ImageRecordIter'
' for more details')
parser.add_argument('--data-layer-type', type=str, default='float32',
choices=['float32', 'int8', 'uint8'],
help='data type for data layer')
parser.add_argument('--low-precision', type=str, default='',
choices=['', 'float16', 'bfloat16'],
help='enable low precision')
args = parser.parse_args()
if args.ctx == 'gpu':
ctx = mx.gpu(0)
elif args.ctx == 'cpu':
ctx = mx.cpu(0)
else:
raise ValueError('ctx %s is not supported in this script' % args.ctx)
logging.basicConfig()
logger = logging.getLogger('logger')
logger.setLevel(logging.INFO)
symbol_file = args.symbol_file
param_file = args.param_file
data_nthreads = args.data_nthreads
batch_size = args.batch_size
logger.info('batch size = %d for inference' % batch_size)
rgb_mean = args.rgb_mean
logger.info('rgb_mean = %s' % rgb_mean)
rgb_mean = [float(i) for i in rgb_mean.split(',')]
mean_args = {'mean_r': rgb_mean[0], 'mean_g': rgb_mean[1], 'mean_b': rgb_mean[2]}
rgb_std = args.rgb_std
logger.info('rgb_std = %s' % rgb_std)
rgb_std = [float(i) for i in rgb_std.split(',')]
std_args = {'std_r': rgb_std[0], 'std_g': rgb_std[1], 'std_b': rgb_std[2]}
combine_mean_std = {}
combine_mean_std.update(mean_args)
combine_mean_std.update(std_args)
label_name = args.label_name
logger.info('label_name = %s' % label_name)
image_shape = args.image_shape
data_shape = tuple([int(i) for i in image_shape.split(',')])
logger.info('Input data shape = %s' % str(data_shape))
data_layer_type = args.data_layer_type
if args.low_precision:
if args.ctx == 'gpu':
assert args.low_precision == 'float16', "Not supported low-precision options for GPU."
elif args.ctx == 'cpu':
assert args.low_precision == 'bfloat16', "Not supported low-precision options for CPU."
if args.benchmark == False:
dataset = args.dataset
download_dataset('http://data.mxnet.io/data/val_256_q90.rec', dataset)
logger.info('Dataset for inference: %s' % dataset)
# creating data iterator
data = mx.io.ImageRecordIter(
path_imgrec=dataset,
label_width=1,
preprocess_threads=data_nthreads,
batch_size=batch_size,
data_shape=data_shape,
label_name=label_name,
rand_crop=False,
rand_mirror=False,
shuffle=args.shuffle_dataset,
shuffle_chunk_seed=args.shuffle_chunk_seed,
seed=args.shuffle_seed,
dtype=data_layer_type,
ctx=args.ctx,
**combine_mean_std)
# loading model
fp32_model = load_model(symbol_file, param_file, logger)
from ilit import Tuner
calib_data = mx.io.ImageRecordIter(path_imgrec=dataset,label_width=1,preprocess_threads=data_nthreads,batch_size=batch_size,data_shape=data_shape,label_name=label_name,rand_crop=False,rand_mirror=False,shuffle=args.shuffle_dataset,shuffle_chunk_seed=args.shuffle_chunk_seed,seed=args.shuffle_seed,dtype=data_layer_type,ctx=args.ctx,**combine_mean_std)
cnn_tuner = Tuner("./cnn.yaml")
cnn_tuner.tune(fp32_model, q_dataloader=calib_data, eval_dataloader=data)
sys.exit()
if args.low_precision:
sym, arg_params, aux_params = low_precison_convert(symbol_file,
args.low_precision,
sym, arg_params,
aux_params)
# make sure that fp32 inference works on the same images as calibrated quantized model
logger.info('Skipping the first %d batches' % args.num_skipped_batches)
data = advance_data_iter(data, args.num_skipped_batches)
num_inference_images = args.num_inference_batches * batch_size
logger.info('Running model %s for inference' % symbol_file)
score(sym, arg_params, aux_params, data, [ctx], label_name,
max_num_examples=num_inference_images, logger=logger)
else:
logger.info('Running model %s for inference' % symbol_file)
speed = benchmark_score(symbol_file, ctx, batch_size,
args.num_inference_batches, data_layer_type, args.low_precision, logger)
logger.info('batch size %2d, image/sec: %f', batch_size, speed)
|
[
"[email protected]"
] | |
bc19c45df8d46e53fc84287d52e1a50a79d9f27f
|
08ee36e0bb1c250f7f2dfda12c1a73d1984cd2bc
|
/src/mnistk/networks/resnetstyle_22.py
|
93fa67898bf4b568a507a10d0985d17c6c533350
|
[] |
no_license
|
ahgamut/mnistk
|
58dadffad204602d425b18549e9b3d245dbf5486
|
19a661185e6d82996624fc6fcc03de7ad9213eb0
|
refs/heads/master
| 2021-11-04T07:36:07.394100 | 2021-10-27T18:37:12 | 2021-10-27T18:37:12 | 227,103,881 | 2 | 1 | null | 2020-02-19T22:07:24 | 2019-12-10T11:33:09 |
Python
|
UTF-8
|
Python
| false | false | 1,077 |
py
|
# -*- coding: utf-8 -*-
"""
resnetstyle_22.py
:copyright: (c) 2019 by Gautham Venkatasubramanian.
:license: MIT
"""
import torch
from torch import nn
from torchvision.models.resnet import BasicBlock
class ResNetStyle_22(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.f0 = nn.Conv2d(in_channels=1, out_channels=43, kernel_size=(13, 13), stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, padding_mode='zeros')
self.f1 = BasicBlock(inplanes=43, planes=43)
self.f2 = nn.Conv2d(in_channels=43, out_channels=45, kernel_size=(9, 9), stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, padding_mode='zeros')
self.f3 = nn.Linear(in_features=2880, out_features=10, bias=False)
self.f4 = nn.LogSoftmax(dim=1)
def forward(self, *inputs):
x = inputs[0]
x = x.view(x.shape[0],1,28,28)
x = self.f0(x)
x = self.f1(x)
x = self.f2(x)
x = x.view(x.shape[0],2880)
x = self.f3(x)
x = self.f4(x)
return x
|
[
"[email protected]"
] | |
390a58d4b13f9ee7281e3f5ab74fa8d5329df6c7
|
519aa4942b6eb6663811dd2a050f498c8d3e0f95
|
/Python 2.X/ZERO/GUI/Backup/Backup 1.0.pyw
|
6ba365f5534e462ebd99d91fcad5f01a8c949418
|
[] |
no_license
|
jacobbridges/my-chaos
|
2b5aab5dcac703b268f03efb07fc54e9d4984f29
|
45837fc39f99b5f7f69919ed2f6732e6b7bec936
|
refs/heads/master
| 2020-05-20T03:21:32.747460 | 2016-08-13T02:12:25 | 2016-08-13T02:12:25 | 29,456,630 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,008 |
pyw
|
import os
import sys
import Tkinter
import tkFileDialog
import tkMessageBox
def main():
global master, source_dialog, destination_dialog, source_entry, destination_entry
# Create the main window.
master = Tkinter.Tk()
master.title('Backup 1.0')
master.resizable(False, False)
# Create the file dialogs.
options = {'mustexist': True, 'parent': master, 'title': 'Please choose a source directory and then select OK.'}
if os.name == 'nt':
if os.path.exists('C:\\Documents and Settings'):
options['initialdir'] = 'C:\\Documents and Settings'
elif os.path.exists('C:\\'):
options['initialdir'] = 'C:\\'
source_dialog = tkFileDialog.Directory(master, **options)
options['title'] = options['title'].replace('source', 'destination')
destination_dialog = tkFileDialog.Directory(master, **options)
# Create widgets.
source = Tkinter.LabelFrame(master, text='Source')
source_entry = Tkinter.Entry(source, width=30)
source_button = Tkinter.Button(source, text='Browse ...', command=browse_source)
destination = Tkinter.LabelFrame(master, text='Destination')
destination_entry = Tkinter.Entry(destination, width=30)
destination_button = Tkinter.Button(destination, text='Browse ...', command=browse_destination)
okay_button = Tkinter.Button(master, text='Okay', command=okay)
exit_button = Tkinter.Button(master, text='Exit', command=terminate)
# Create bindings.
source_button.bind('<Return>', browse_source)
destination_button.bind('<Return>', browse_destination)
okay_button.bind('<Return>', okay)
exit_button.bind('<Return>', terminate)
# Display widgets.
source_entry.grid(row=0, column=0, padx=5, pady=5)
source_button.grid(row=0, column=1, padx=5, pady=5)
destination_entry.grid(row=0, column=0, padx=5, pady=5)
destination_button.grid(row=0, column=1, padx=5, pady=5)
source.grid(row=0, column=0, padx=5, pady=5, columnspan=2)
destination.grid(row=1, column=0, padx=5, pady=5, columnspan=2)
okay_button.grid(row=2, column=0, padx=5, pady=5, sticky='news')
exit_button.grid(row=2, column=1, padx=5, pady=5, sticky='news')
# Execute the main loop.
master.mainloop()
def browse_source(event=None):
# Get the selected source.
path = source_dialog.show()
if path:
# Replace the text.
source_entry.delete(0, Tkinter.END)
source_entry.insert(0, os.path.realpath(path))
def browse_destination(event=None):
# Get the selected destination.
path = destination_dialog.show()
if path:
# Replace the text.
destination_entry.delete(0, Tkinter.END)
destination_entry.insert(0, os.path.realpath(path))
def okay(event=None):
source = source_entry.get()
# Does the source exist?
if os.path.exists(source):
# Is the source a directory?
if os.path.isdir(source):
destination = destination_entry.get()
# Does the destination exist?
if os.path.exists(destination):
# Is the destination a directory?
if os.path.isdir(destination):
master.withdraw()
try:
backup(source, destination)
except:
tkMessageBox.showerror(title='Error', message='The backup could not be completed.')
master.deiconify()
else:
tkMessageBox.showwarning(title='Warning', message='The destination is not a directory.')
else:
tkMessageBox.showwarning(title='Warning', message='The destination does not exist.')
else:
tkMessageBox.showwarning(title='Warning', message='The source is not a directory.')
else:
tkMessageBox.showwarning(title='Warning', message='The source does not exist.')
def backup(source, destination, errors=None):
# Check for recursion level.
if errors is None:
errors = list()
root = True
else:
root = False
# Copy all directories and files from source to destination.
for name in os.listdir(source):
source_name = os.path.join(source, name)
destination_name = os.path.join(destination, name)
try:
if os.path.isdir(source_name):
os.mkdir(destination_name)
backup(source_name, destination_name, errors)
elif os.path.isfile(source_name):
binary = open(source_name, 'rb')
file(destination_name, 'wb').write(binary.read())
binary.close()
except:
errors.append('%s\n%s' % (source_name, destination_name))
# Write an error log if needed.
if root and errors:
file(os.path.join(os.path.dirname(sys.argv[0]), 'error.log'), 'w').write('\n\n'.join(errors))
def terminate(event=None):
# Terminate the program.
master.quit()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
7609647aad7ffa3b5252aa5069b284baa0eb4d7f
|
86813bf514f3e0257f92207f40a68443f08ee44b
|
/0378 有序矩阵中第K小的元素/0378 有序矩阵中第K小的元素.py
|
c74fd3302ed9d2435bde0f0195ed617c845e2b4e
|
[] |
no_license
|
Aurora-yuan/Leetcode_Python3
|
4ce56679b48862c87addc8cd870cdd525c9d926c
|
720bb530850febc2aa67a56a7a0b3a85ab37f415
|
refs/heads/master
| 2021-07-12T13:23:19.399155 | 2020-10-21T03:14:36 | 2020-10-21T03:14:36 | 212,998,500 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 935 |
py
|
#label: 二分查找 difficulty: medium
"""
解题思路:
看到这种有序(或者部分有序)的数组,一般考虑使用二分查找进行优化。
开始时,左指针指向矩阵中最小元素,右指针指向矩阵中最大元素(注意:指针代表的是元素值,而不是位置),
计算矩阵中小于等于左右指针中间值的元素个数c,然后通过比较c与k的值,进行左指针或者右指针的移动。重复上述过程直到l >= r.
"""
class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
l = matrix[0][0]
r = matrix[-1][-1]
while l < r:
mid = (l+r)//2
c = sum(bisect.bisect_right(row,mid) for row in matrix)
# bisect.bisect_right(row,mid)计算row中元素值<=mid的数量
if c < k:
l = mid + 1
else:
r = mid
return l
|
[
"[email protected]"
] | |
fd1bf15834426fd092f37b7667afda9bd7e54df2
|
d4ea1f9747799bf503523b86b8b5ee29bab65eff
|
/gyun/cli/iaas_client/actions/s2/enable_s2_shared_targets.py
|
eab03da093de332254257c250d9d7fe7bc891cbd
|
[
"Apache-2.0"
] |
permissive
|
gyun-gome/gyun-cli
|
88b5493d90a19c5bf56a1bba4bf301d1b4a3156d
|
275b6664335e2ef21a01a48f8c06d6a89dd63467
|
refs/heads/master
| 2021-06-28T13:53:01.300135 | 2017-09-13T04:44:01 | 2017-09-13T04:44:01 | 103,353,093 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,814 |
py
|
# encoding: utf-8
# =========================================================================
# ©2017-2018 北京国美云服科技有限公司
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from gyun.cli.misc.utils import explode_array
from gyun.cli.iaas_client.actions.base import BaseAction
class EnableS2SharedTargetsAction(BaseAction):
action = 'EnableS2SharedTargets'
command = 'enable-s2-shared-targets'
usage = '%(prog)s -s <shared_targets> [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument("-s", "--shared-targets", dest="shared_targets",
action="store", type=str, default=None,
help="the IDs of shared targets you want to enable.")
@classmethod
def build_directive(cls, options):
for key in ['shared_targets']:
if not hasattr(options, key):
print("error: [%s] should be specified." % key)
return None
directive = {
"shared_targets": explode_array(options.shared_targets),
}
return directive
|
[
"[email protected]"
] | |
80e2f36e6bc596fd3d476900e6c9fe46833f12de
|
caaf1b0754db1e676c37a6f1e58f19183754e654
|
/sdk/tables/azure-data-tables/azure/data/tables/_entity.py
|
aa1ccae843a4eeb457a282f8013be5b4f718ea84
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
rdomenzain/azure-sdk-for-python
|
45dfb39121a0abda048c22e7309733a56259f525
|
58984255aeb904346b6958c5ba742749a2cc7d1b
|
refs/heads/master
| 2023-07-07T06:53:12.967120 | 2023-07-04T16:27:37 | 2023-07-04T16:27:37 | 258,050,134 | 0 | 0 |
MIT
| 2020-04-23T00:12:14 | 2020-04-23T00:12:13 | null |
UTF-8
|
Python
| false | false | 2,551 |
py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from enum import Enum
from typing import Any, Dict, Union, NamedTuple
from azure.core import CaseInsensitiveEnumMeta
class TableEntity(dict):
"""
An Entity dictionary with additional metadata
"""
_metadata = {} # type: Dict[str, Any]
@property
def metadata(self) -> Dict[str, Any]:
"""Resets metadata to be a part of the entity
:return Dict of entity metadata
:rtype: Dict[str, Any]
"""
return self._metadata
class EdmType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""
Used by :class:`~.EntityProperty` to represent the type of the entity property
to be stored by the Table service.
"""
BINARY = "Edm.Binary"
""" Represents byte data. This type will be inferred for Python bytes. """
INT64 = "Edm.Int64"
""" Represents a number between -(2^31) and 2^31. Must be specified or numbers will default to INT32. """
GUID = "Edm.Guid"
""" Represents a GUID. This type will be inferred for uuid.UUID. """
DATETIME = "Edm.DateTime"
""" Represents a date. This type will be inferred for Python datetime objects. """
STRING = "Edm.String"
""" Represents a string. This type will be inferred for Python strings. """
INT32 = "Edm.Int32"
""" Represents a number between -(2^15) and 2^15. This is the default type for Python numbers. """
DOUBLE = "Edm.Double"
""" Represents a double. This type will be inferred for Python floating point numbers. """
BOOLEAN = "Edm.Boolean"
""" Represents a boolean. This type will be inferred for Python booleans. """
EntityProperty = NamedTuple("EntityProperty", [("value", Any), ("edm_type", Union[str, EdmType])])
"""
An entity property. Used to explicitly set :class:`~EdmType` when necessary.
Values which require explicit typing are GUID, INT64, and BINARY. Other EdmTypes
may be explicitly create as EntityProperty objects but need not be. For example,
the below with both create STRING typed properties on the entity::
entity = TableEntity()
entity.a = 'b'
entity.x = EntityProperty('y', EdmType.STRING)
:param value:
:type value: Any
:param edm_type: Type of the value
:type edm_type: str or :class:`~azure.data.tables.EdmType`
"""
|
[
"[email protected]"
] | |
e12bb3c1b2e7f6ba0856850ff98c3e2c05ab5f88
|
95a2924dfbed2b07587c9c6516df4ac248f2586c
|
/Data Visualization/Bokeh/Streaming Real Time Data/random-generator.py
|
fdd31efee2d80d3eb98b68af91a68676a18d6868
|
[] |
no_license
|
souviksaha97/Data-Science-Lab
|
49f50ef80dff3bcfed6f26d735707ec485a393ae
|
d18681460f51e83252d5b6a491b997a3600c7715
|
refs/heads/master
| 2020-12-18T19:50:00.327882 | 2019-06-24T17:41:20 | 2019-06-24T17:41:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 973 |
py
|
"""
Created on Sat Feb 9 2019
@author: Nodar Okroshiashvili
"""
# Serve Random Number Generation in Bokeh Server
#import libraries
from bokeh.io import curdoc
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from random import randrange
# Create Figure
f = figure(x_range=(0,11),y_range=(0,11)) # Set range for axes
# Create ColumnDataSource
source=ColumnDataSource(data=dict(x=[],y=[]))
# Create Glyphs
f.circle(x='x',
y='y',
size=10,
fill_color='olive',
line_color='brown',
source=source)
f.line(x='x', y='y', source=source)
# Create Periodic Function
def update():
new_data=dict(x=[randrange(1,10)],y=[randrange(1,10)])
source.stream(new_data,rollover=20)
#print(source.data)
# Add figure to curdoc
curdoc().add_root(f)
# Configure callback
curdoc().add_periodic_callback(update,1000) # callback every 1000 mili second
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.