hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
86f77106c10502d0d37cea800a21ab20ce83f638 | 1,218 | py | Python | meregistro/apps/registro/models/EstablecimientoDomicilio.py | MERegistro/meregistro | 6cde3cab2bd1a8e3084fa38147de377d229391e3 | [
"BSD-3-Clause"
] | null | null | null | meregistro/apps/registro/models/EstablecimientoDomicilio.py | MERegistro/meregistro | 6cde3cab2bd1a8e3084fa38147de377d229391e3 | [
"BSD-3-Clause"
] | null | null | null | meregistro/apps/registro/models/EstablecimientoDomicilio.py | MERegistro/meregistro | 6cde3cab2bd1a8e3084fa38147de377d229391e3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db import models
from apps.registro.models.TipoDomicilio import TipoDomicilio
from apps.registro.models.Localidad import Localidad
from apps.registro.models.Establecimiento import Establecimiento
from django.core.exceptions import ValidationError
from apps.seguridad.audit import audit
| 32.918919 | 85 | 0.76601 |
86f7b299e6e411fb0020928642f34720d9448cf2 | 301 | py | Python | python_for_everybody/py2_p4i_old/6.5findslicestringextract.py | timothyyu/p4e-prac | f978b71ce147b6e9058372929f2666c2e67d0741 | [
"BSD-3-Clause"
] | null | null | null | python_for_everybody/py2_p4i_old/6.5findslicestringextract.py | timothyyu/p4e-prac | f978b71ce147b6e9058372929f2666c2e67d0741 | [
"BSD-3-Clause"
] | null | null | null | python_for_everybody/py2_p4i_old/6.5findslicestringextract.py | timothyyu/p4e-prac | f978b71ce147b6e9058372929f2666c2e67d0741 | [
"BSD-3-Clause"
] | 1 | 2020-04-18T16:09:04.000Z | 2020-04-18T16:09:04.000Z | # 6.5 Write code using find() and string slicing (see section 6.10) to extract
# the number at the end of the line below.
# Convert the extracted value to a floating point number and print it out.
text = "X-DSPAM-Confidence: 0.8475";
pos = text.find(':')
text = float(text[pos+1:])
print text | 27.363636 | 79 | 0.697674 |
86f7e1041ab1f4accc4c1f71bcc457ad4e75b7b3 | 6,672 | py | Python | tools/lucid/engine.py | Petr-By/qtpyvis | 0b9a151ee6b9a56b486c2bece9c1f03414629efc | [
"MIT"
] | 3 | 2017-10-04T14:51:26.000Z | 2017-10-22T09:35:50.000Z | tools/lucid/engine.py | CogSciUOS/DeepLearningToolbox | bf07578b9486d8c48e25df357bc4b9963b513b46 | [
"MIT"
] | 13 | 2017-09-05T12:56:11.000Z | 2017-11-22T10:38:27.000Z | tools/lucid/engine.py | CogSciUOS/DeepLearningToolbox | bf07578b9486d8c48e25df357bc4b9963b513b46 | [
"MIT"
] | 2 | 2017-09-24T21:39:42.000Z | 2017-10-04T15:29:54.000Z | import logging
logger = logging.getLogger(__name__)
print(f"!!!!!!!!!! getEffectiveLevel: {logger.getEffectiveLevel()} !!!!!!!!!!!!!")
from dltb.base.observer import Observable, change
from network import Network, loader
from network.lucid import Network as LucidNetwork
# lucid.modelzoo.vision_models:
# A module providinge the pretrained networks by name, e.g.
# models.AlexNet
import lucid.modelzoo.vision_models as models
import lucid.modelzoo.nets_factory as nets
from lucid.modelzoo.vision_base import Model as LucidModel
import lucid.optvis.objectives as objectives
import lucid.optvis.param as param
import lucid.optvis.render as render
import lucid.optvis.transform as transform
# FIXME[old]: this is too make old code happy. New code should use
# Engine.Change and Engine.Observer directly.
EngineChange = Engine.Change
EngineObserver = Engine.Observer
| 30.190045 | 82 | 0.590528 |
86f8485704c303133a8ffd7f513a5c4076214c94 | 87,649 | py | Python | synapse/storage/events.py | natamelo/synapse | 3d870ecfc5353e455917166cb5c2bb8ba48a6ebd | [
"Apache-2.0"
] | null | null | null | synapse/storage/events.py | natamelo/synapse | 3d870ecfc5353e455917166cb5c2bb8ba48a6ebd | [
"Apache-2.0"
] | null | null | null | synapse/storage/events.py | natamelo/synapse | 3d870ecfc5353e455917166cb5c2bb8ba48a6ebd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
from collections import Counter as c_counter, OrderedDict, deque, namedtuple
from functools import wraps
from six import iteritems, text_type
from six.moves import range
from canonicaljson import json
from prometheus_client import Counter, Histogram
from twisted.internet import defer
import synapse.metrics
from synapse.api.constants import EventTypes
from synapse.api.errors import SynapseError
from synapse.events import EventBase # noqa: F401
from synapse.events.snapshot import EventContext # noqa: F401
from synapse.metrics import BucketCollector
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.state import StateResolutionStore
from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.storage.event_federation import EventFederationStore
from synapse.storage.events_worker import EventsWorkerStore
from synapse.storage.state import StateGroupWorkerStore
from synapse.types import RoomStreamToken, get_domain_from_id
from synapse.util import batch_iter
from synapse.util.async_helpers import ObservableDeferred
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
from synapse.util.frozenutils import frozendict_json_encoder
from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable
from synapse.util.logutils import log_function
from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
persist_event_counter = Counter("synapse_storage_events_persisted_events", "")
event_counter = Counter(
"synapse_storage_events_persisted_events_sep",
"",
["type", "origin_type", "origin_entity"],
)
# The number of times we are recalculating the current state
state_delta_counter = Counter("synapse_storage_events_state_delta", "")
# The number of times we are recalculating state when there is only a
# single forward extremity
state_delta_single_event_counter = Counter(
"synapse_storage_events_state_delta_single_event", ""
)
# The number of times we are reculating state when we could have resonably
# calculated the delta when we calculated the state for an event we were
# persisting.
state_delta_reuse_delta_counter = Counter(
"synapse_storage_events_state_delta_reuse_delta", ""
)
# The number of forward extremities for each new event.
forward_extremities_counter = Histogram(
"synapse_storage_events_forward_extremities_persisted",
"Number of forward extremities for each new event",
buckets=(1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
)
# The number of stale forward extremities for each new event. Stale extremities
# are those that were in the previous set of extremities as well as the new.
stale_forward_extremities_counter = Histogram(
"synapse_storage_events_stale_forward_extremities_persisted",
"Number of unchanged forward extremities for each new event",
buckets=(0, 1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
)
def encode_json(json_object):
"""
Encode a Python object as JSON and return it in a Unicode string.
"""
out = frozendict_json_encoder.encode(json_object)
if isinstance(out, bytes):
out = out.decode("utf8")
return out
_EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event"))
def _retry_on_integrity_error(func):
"""Wraps a database function so that it gets retried on IntegrityError,
with `delete_existing=True` passed in.
Args:
func: function that returns a Deferred and accepts a `delete_existing` arg
"""
return f
# inherits from EventFederationStore so that we can call _update_backward_extremities
# and _handle_mult_prev_events (though arguably those could both be moved in here)
def _update_current_state_txn(self, txn, state_delta_by_room, stream_id):
for room_id, current_state_tuple in iteritems(state_delta_by_room):
to_delete, to_insert = current_state_tuple
# First we add entries to the current_state_delta_stream. We
# do this before updating the current_state_events table so
# that we can use it to calculate the `prev_event_id`. (This
# allows us to not have to pull out the existing state
# unnecessarily).
#
# The stream_id for the update is chosen to be the minimum of the stream_ids
# for the batch of the events that we are persisting; that means we do not
# end up in a situation where workers see events before the
# current_state_delta updates.
#
sql = """
INSERT INTO current_state_delta_stream
(stream_id, room_id, type, state_key, event_id, prev_event_id)
SELECT ?, ?, ?, ?, ?, (
SELECT event_id FROM current_state_events
WHERE room_id = ? AND type = ? AND state_key = ?
)
"""
txn.executemany(
sql,
(
(
stream_id,
room_id,
etype,
state_key,
None,
room_id,
etype,
state_key,
)
for etype, state_key in to_delete
# We sanity check that we're deleting rather than updating
if (etype, state_key) not in to_insert
),
)
txn.executemany(
sql,
(
(
stream_id,
room_id,
etype,
state_key,
ev_id,
room_id,
etype,
state_key,
)
for (etype, state_key), ev_id in iteritems(to_insert)
),
)
# Now we actually update the current_state_events table
txn.executemany(
"DELETE FROM current_state_events"
" WHERE room_id = ? AND type = ? AND state_key = ?",
(
(room_id, etype, state_key)
for etype, state_key in itertools.chain(to_delete, to_insert)
),
)
self._simple_insert_many_txn(
txn,
table="current_state_events",
values=[
{
"event_id": ev_id,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
}
for key, ev_id in iteritems(to_insert)
],
)
txn.call_after(
self._curr_state_delta_stream_cache.entity_has_changed,
room_id,
stream_id,
)
# Invalidate the various caches
# Figure out the changes of membership to invalidate the
# `get_rooms_for_user` cache.
# We find out which membership events we may have deleted
# and which we have added, then we invlidate the caches for all
# those users.
members_changed = set(
state_key
for ev_type, state_key in itertools.chain(to_delete, to_insert)
if ev_type == EventTypes.Member
)
for member in members_changed:
txn.call_after(
self.get_rooms_for_user_with_stream_ordering.invalidate, (member,)
)
self._invalidate_state_caches_and_stream(txn, room_id, members_changed)
def _update_room_depths_txn(self, txn, events_and_contexts, backfilled):
"""Update min_depth for each room
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
backfilled (bool): True if the events were backfilled
"""
depth_updates = {}
for event, context in events_and_contexts:
# Remove the any existing cache entries for the event_ids
txn.call_after(self._invalidate_get_event_cache, event.event_id)
if not backfilled:
txn.call_after(
self._events_stream_cache.entity_has_changed,
event.room_id,
event.internal_metadata.stream_ordering,
)
if not event.internal_metadata.is_outlier() and not context.rejected:
depth_updates[event.room_id] = max(
event.depth, depth_updates.get(event.room_id, event.depth)
)
for room_id, depth in iteritems(depth_updates):
self._update_min_depth_for_room_txn(txn, room_id, depth)
def _update_outliers_txn(self, txn, events_and_contexts):
"""Update any outliers with new event info.
This turns outliers into ex-outliers (unless the new event was
rejected).
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
Returns:
list[(EventBase, EventContext)] new list, without events which
are already in the events table.
"""
txn.execute(
"SELECT event_id, outlier FROM events WHERE event_id in (%s)"
% (",".join(["?"] * len(events_and_contexts)),),
[event.event_id for event, _ in events_and_contexts],
)
have_persisted = {event_id: outlier for event_id, outlier in txn}
to_remove = set()
for event, context in events_and_contexts:
if event.event_id not in have_persisted:
continue
to_remove.add(event)
if context.rejected:
# If the event is rejected then we don't care if the event
# was an outlier or not.
continue
outlier_persisted = have_persisted[event.event_id]
if not event.internal_metadata.is_outlier() and outlier_persisted:
# We received a copy of an event that we had already stored as
# an outlier in the database. We now have some state at that
# so we need to update the state_groups table with that state.
# insert into event_to_state_groups.
try:
self._store_event_state_mappings_txn(txn, ((event, context),))
except Exception:
logger.exception("")
raise
metadata_json = encode_json(event.internal_metadata.get_dict())
sql = (
"UPDATE event_json SET internal_metadata = ?" " WHERE event_id = ?"
)
txn.execute(sql, (metadata_json, event.event_id))
# Add an entry to the ex_outlier_stream table to replicate the
# change in outlier status to our workers.
stream_order = event.internal_metadata.stream_ordering
state_group_id = context.state_group
self._simple_insert_txn(
txn,
table="ex_outlier_stream",
values={
"event_stream_ordering": stream_order,
"event_id": event.event_id,
"state_group": state_group_id,
},
)
sql = "UPDATE events SET outlier = ?" " WHERE event_id = ?"
txn.execute(sql, (False, event.event_id))
# Update the event_backward_extremities table now that this
# event isn't an outlier any more.
self._update_backward_extremeties(txn, [event])
return [ec for ec in events_and_contexts if ec[0] not in to_remove]
def _store_event_txn(self, txn, events_and_contexts):
"""Insert new events into the event and event_json tables
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
"""
if not events_and_contexts:
# nothing to do here
return
self._simple_insert_many_txn(
txn,
table="event_json",
values=[
{
"event_id": event.event_id,
"room_id": event.room_id,
"internal_metadata": encode_json(
event.internal_metadata.get_dict()
),
"json": encode_json(event_dict(event)),
"format_version": event.format_version,
}
for event, _ in events_and_contexts
],
)
self._simple_insert_many_txn(
txn,
table="events",
values=[
{
"stream_ordering": event.internal_metadata.stream_ordering,
"topological_ordering": event.depth,
"depth": event.depth,
"event_id": event.event_id,
"room_id": event.room_id,
"type": event.type,
"processed": True,
"outlier": event.internal_metadata.is_outlier(),
"origin_server_ts": int(event.origin_server_ts),
"received_ts": self._clock.time_msec(),
"sender": event.sender,
"contains_url": (
"url" in event.content
and isinstance(event.content["url"], text_type)
),
}
for event, _ in events_and_contexts
],
)
def _store_rejected_events_txn(self, txn, events_and_contexts):
"""Add rows to the 'rejections' table for received events which were
rejected
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
Returns:
list[(EventBase, EventContext)] new list, without the rejected
events.
"""
# Remove the rejected events from the list now that we've added them
# to the events table and the events_json table.
to_remove = set()
for event, context in events_and_contexts:
if context.rejected:
# Insert the event_id into the rejections table
self._store_rejections_txn(txn, event.event_id, context.rejected)
to_remove.add(event)
return [ec for ec in events_and_contexts if ec[0] not in to_remove]
def _update_metadata_tables_txn(
self, txn, events_and_contexts, all_events_and_contexts, backfilled
):
"""Update all the miscellaneous tables for new events
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
all_events_and_contexts (list[(EventBase, EventContext)]): all
events that we were going to persist. This includes events
we've already persisted, etc, that wouldn't appear in
events_and_context.
backfilled (bool): True if the events were backfilled
"""
# Insert all the push actions into the event_push_actions table.
self._set_push_actions_for_event_and_users_txn(
txn,
events_and_contexts=events_and_contexts,
all_events_and_contexts=all_events_and_contexts,
)
if not events_and_contexts:
# nothing to do here
return
for event, context in events_and_contexts:
if event.type == EventTypes.Redaction and event.redacts is not None:
# Remove the entries in the event_push_actions table for the
# redacted event.
self._remove_push_actions_for_event_id_txn(
txn, event.room_id, event.redacts
)
# Remove from relations table.
self._handle_redaction(txn, event.redacts)
# Update the event_forward_extremities, event_backward_extremities and
# event_edges tables.
self._handle_mult_prev_events(
txn, events=[event for event, _ in events_and_contexts]
)
for event, _ in events_and_contexts:
if event.type == EventTypes.Name:
# Insert into the room_names and event_search tables.
self._store_room_name_txn(txn, event)
elif event.type == EventTypes.Topic:
# Insert into the topics table and event_search table.
self._store_room_topic_txn(txn, event)
elif event.type == EventTypes.Message:
# Insert into the event_search table.
self._store_room_message_txn(txn, event)
elif event.type == EventTypes.Redaction:
# Insert into the redactions table.
self._store_redaction(txn, event)
elif event.type == EventTypes.RoomHistoryVisibility:
# Insert into the event_search table.
self._store_history_visibility_txn(txn, event)
elif event.type == EventTypes.GuestAccess:
# Insert into the event_search table.
self._store_guest_access_txn(txn, event)
self._handle_event_relations(txn, event)
# Insert into the room_memberships table.
self._store_room_members_txn(
txn,
[
event
for event, _ in events_and_contexts
if event.type == EventTypes.Member
],
backfilled=backfilled,
)
# Insert event_reference_hashes table.
self._store_event_reference_hashes_txn(
txn, [event for event, _ in events_and_contexts]
)
state_events_and_contexts = [
ec for ec in events_and_contexts if ec[0].is_state()
]
state_values = []
for event, context in state_events_and_contexts:
vals = {
"event_id": event.event_id,
"room_id": event.room_id,
"type": event.type,
"state_key": event.state_key,
}
# TODO: How does this work with backfilling?
if hasattr(event, "replaces_state"):
vals["prev_state"] = event.replaces_state
state_values.append(vals)
self._simple_insert_many_txn(txn, table="state_events", values=state_values)
# Prefill the event cache
self._add_to_cache(txn, events_and_contexts)
def get_current_backfill_token(self):
"""The current minimum token that backfilled events have reached"""
return -self._backfill_id_gen.get_current_token()
def get_current_events_token(self):
"""The current maximum token that events have reached"""
return self._stream_id_gen.get_current_token()
def get_all_new_forward_event_rows(self, last_id, current_id, limit):
if last_id == current_id:
return defer.succeed([])
return self.runInteraction(
"get_all_new_forward_event_rows", get_all_new_forward_event_rows
)
def get_all_new_backfill_event_rows(self, last_id, current_id, limit):
if last_id == current_id:
return defer.succeed([])
return self.runInteraction(
"get_all_new_backfill_event_rows", get_all_new_backfill_event_rows
)
def purge_history(self, room_id, token, delete_local_events):
"""Deletes room history before a certain point
Args:
room_id (str):
token (str): A topological token to delete events before
delete_local_events (bool):
if True, we will delete local events as well as remote ones
(instead of just marking them as outliers and deleting their
state groups).
"""
return self.runInteraction(
"purge_history",
self._purge_history_txn,
room_id,
token,
delete_local_events,
)
def _find_unreferenced_groups_during_purge(self, txn, state_groups):
"""Used when purging history to figure out which state groups can be
deleted and which need to be de-delta'ed (due to one of its prev groups
being scheduled for deletion).
Args:
txn
state_groups (set[int]): Set of state groups referenced by events
that are going to be deleted.
Returns:
tuple[set[int], set[int]]: The set of state groups that can be
deleted and the set of state groups that need to be de-delta'ed
"""
# Graph of state group -> previous group
graph = {}
# Set of events that we have found to be referenced by events
referenced_groups = set()
# Set of state groups we've already seen
state_groups_seen = set(state_groups)
# Set of state groups to handle next.
next_to_search = set(state_groups)
while next_to_search:
# We bound size of groups we're looking up at once, to stop the
# SQL query getting too big
if len(next_to_search) < 100:
current_search = next_to_search
next_to_search = set()
else:
current_search = set(itertools.islice(next_to_search, 100))
next_to_search -= current_search
# Check if state groups are referenced
sql = """
SELECT DISTINCT state_group FROM event_to_state_groups
LEFT JOIN events_to_purge AS ep USING (event_id)
WHERE state_group IN (%s) AND ep.event_id IS NULL
""" % (
",".join("?" for _ in current_search),
)
txn.execute(sql, list(current_search))
referenced = set(sg for sg, in txn)
referenced_groups |= referenced
# We don't continue iterating up the state group graphs for state
# groups that are referenced.
current_search -= referenced
rows = self._simple_select_many_txn(
txn,
table="state_group_edges",
column="prev_state_group",
iterable=current_search,
keyvalues={},
retcols=("prev_state_group", "state_group"),
)
prevs = set(row["state_group"] for row in rows)
# We don't bother re-handling groups we've already seen
prevs -= state_groups_seen
next_to_search |= prevs
state_groups_seen |= prevs
for row in rows:
# Note: Each state group can have at most one prev group
graph[row["state_group"]] = row["prev_state_group"]
to_delete = state_groups_seen - referenced_groups
to_dedelta = set()
for sg in referenced_groups:
prev_sg = graph.get(sg)
if prev_sg and prev_sg in to_delete:
to_dedelta.add(sg)
return to_delete, to_dedelta
def get_all_updated_current_state_deltas(self, from_token, to_token, limit):
return self.runInteraction(
"get_all_updated_current_state_deltas",
get_all_updated_current_state_deltas_txn,
)
AllNewEventsResult = namedtuple(
"AllNewEventsResult",
[
"new_forward_events",
"new_backfill_events",
"forward_ex_outliers",
"backward_ex_outliers",
],
)
| 39.339767 | 89 | 0.578455 |
86f92c20143e35ec634b684ad280aeb864a0957c | 3,074 | py | Python | dev/buildtool/metrics.py | premm1983/Spinnaker | 535f78b8f5402eea942c260cb9ca26682772a3e6 | [
"Apache-2.0"
] | null | null | null | dev/buildtool/metrics.py | premm1983/Spinnaker | 535f78b8f5402eea942c260cb9ca26682772a3e6 | [
"Apache-2.0"
] | null | null | null | dev/buildtool/metrics.py | premm1983/Spinnaker | 535f78b8f5402eea942c260cb9ca26682772a3e6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics support manager."""
import logging
from buildtool import in_memory_metrics
from buildtool import prometheus_metrics
from buildtool import stackdriver_metrics
from buildtool.util import add_parser_argument
| 39.410256 | 77 | 0.754717 |
86f9d5c800a3592d64ffbc26d845ced72a00288c | 4,005 | py | Python | src/python/pants/backend/android/tasks/aapt_builder.py | hythloday/pants | 107e9b0957f6949ac4bd535fbef8d2d8cba05c5c | [
"Apache-2.0"
] | 11 | 2015-01-20T01:39:41.000Z | 2019-08-08T07:27:44.000Z | src/python/pants/backend/android/tasks/aapt_builder.py | hythloday/pants | 107e9b0957f6949ac4bd535fbef8d2d8cba05c5c | [
"Apache-2.0"
] | 1 | 2016-03-15T20:35:18.000Z | 2016-03-15T20:35:18.000Z | src/python/pants/backend/android/tasks/aapt_builder.py | fakeNetflix/square-repo-pants | 28a018c7f47900aec4f576c81a52e0e4b41d9fec | [
"Apache-2.0"
] | 5 | 2015-03-30T02:46:53.000Z | 2018-03-08T20:10:43.000Z | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import os
import subprocess
from twitter.common import log
from pants.backend.android.targets.android_binary import AndroidBinary
from pants.backend.android.targets.android_resources import AndroidResources
from pants.backend.android.tasks.aapt_task import AaptTask
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnit
from pants.util.dirutil import safe_mkdir
| 41.71875 | 106 | 0.698127 |
86fafa9c771d30389c672ab69f2d0d2991d82592 | 4,967 | py | Python | fat/fat_bert_nq/ppr/apr_lib.py | kiss2u/google-research | 2cd66234656f9e2f4218ed90a2d8aa9cf3139093 | [
"Apache-2.0"
] | 1 | 2020-05-27T15:40:17.000Z | 2020-05-27T15:40:17.000Z | fat/fat_bert_nq/ppr/apr_lib.py | kiss2u/google-research | 2cd66234656f9e2f4218ed90a2d8aa9cf3139093 | [
"Apache-2.0"
] | 7 | 2021-08-25T16:15:53.000Z | 2022-02-10T03:26:55.000Z | fat/fat_bert_nq/ppr/apr_lib.py | kiss2u/google-research | 2cd66234656f9e2f4218ed90a2d8aa9cf3139093 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a class which acts as a wrapper around the PPR algorithm.
This class has the following functionality:
1. Load the KB graph,
2. Given list of seed entities, get topk entities from PPR.
3. Get unique facts between all extracted entities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from fat.fat_bert_nq.ppr.apr_algo import csr_personalized_pagerank
from fat.fat_bert_nq.ppr.apr_algo import csr_topk_fact_extractor
from fat.fat_bert_nq.ppr.kb_csr_io import CsrData
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'verbose_logging', False,
'If true, all of the warnings related to data processing will be printed. '
'A number of warnings are expected for a normal NQ evaluation.')
| 35.478571 | 80 | 0.684518 |
86fc7c6a00ab6863dd9ce69648b4b5568994e8af | 6,941 | py | Python | src/optimal_gardening.py | evanlynch/optimal-gardening | 447ca8575efac1ad5cdd975091f3cbb67721e167 | [
"MIT"
] | null | null | null | src/optimal_gardening.py | evanlynch/optimal-gardening | 447ca8575efac1ad5cdd975091f3cbb67721e167 | [
"MIT"
] | null | null | null | src/optimal_gardening.py | evanlynch/optimal-gardening | 447ca8575efac1ad5cdd975091f3cbb67721e167 | [
"MIT"
] | null | null | null | import os
import sys
import time
from IPython.display import Image
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sb
sb.set_style("dark")
#### Initial Setup ####
#plant info
plant_info = pd.read_csv('../data/plant_data.csv')
plant_info.index.name = 'plant_index'
plants = plant_info.name.to_numpy()
plant_index = plant_info.index.to_numpy()
num_plants = len(plants)
plant_sun_req = plant_info.sun.to_numpy()
perennials = plant_info[plant_info.perennial==1].index.to_list()
problem_plants = plant_info[plant_info.problem_plant==1].index.to_list()
#calculate weighted average preference for each plant
family = ['evan','gina','liesse','lizzie','jack']
plant_info['avg_pref'] = np.average(plant_info[family],axis=1,weights=[.5,.5,0,0,0])
plant_info.drop(family,axis=1,inplace=True)
preferences = plant_info.avg_pref.to_numpy()
#bed info
bed_info = pd.read_csv('../data/bed_data.csv')
bed_info.index.name = 'bed_index'
beds = bed_info.bed.to_numpy()
bed_index = bed_info.index.to_numpy()
bed_sun_req = bed_info.sun.to_numpy()
num_beds = len(beds)
#time dimension
num_years = 3
years = np.array(range(1,num_years+1))
year_index = np.array(range(num_years))
#for keeping track of what axis is which
plant_axis = 0
bed_axis = 1
year_axis = 2
##### Constraints #####
#initialize sun constraint. 1 where plant can feasibly be planted in bed. 0 where sun requirements do not match.
sun_constraint = np.ones(shape=(num_plants,num_beds,num_years))
for p in plant_index:
for b in bed_index:
p_sun = plant_sun_req[p]
b_sun = bed_sun_req[b]
if p_sun != b_sun:
sun_constraint[p,b,:] = 0
def enforce_sun_constraint(plan,sun_constraint):
"""
Force plan to be 0 where sun requirements for plant and bed do not match.
"""
return plan*sun_constraint
def enforce_perennial_constraint(plan,plant,bed,year,perennials):
"""Forward fill plan for perennial plants. If 1 in a given bed/year, it will be 1 in same bed thereafter."""
perennial_plan = plan.copy()
#what was planted the year before
plant_last_year = perennial_plan[:,bed,year-1].argmax()
#if the plant is a perennial, plant it this year and every year thereafter
if plant in perennials:
perennial_plan[:,bed,year:] = 0 # zeros out anything else that may have been planted in bed in current and subsequent years during a previous make_neighbor call
perennial_plan[plant,bed,year:] = 1 #sets plant to 1 in bed every year after the current year
#if what was planted already in this bed was a perennial, remove it from previous years
elif plant_last_year in perennials:
perennial_plan[plant_last_year,bed,:year] = 0
return perennial_plan
def enforce_disease_constraint(plan,problem_plants):
"""Creates a mask to determine if the same veg was planted in the same bed over multiple years.
Multiplies the plan for problem plants by 0 in subsequent years where we planned to put them in the same bed
"""
disease_plan = plan.copy()
#mask to determine cases where same thing was planted in the same bed yoy
same_veg_in_bed_yoy = disease_plan.cumsum(axis=year_axis)>1
#multiply plan for specific problem plants by 0
disease_plan[problem_plants] = disease_plan[problem_plants]*(abs(1-same_veg_in_bed_yoy)[problem_plants])
return disease_plan
##### Objectives #####
#the most satisfied you could be (planting fruit or vegetable with highest preference in all beds every year)
max_yums = num_beds*num_years*np.max(preferences)
def compute_yummy_score(plan,preferences,max_yums):
"""Takes the weighted average of the preferences of each plant, weighted by the total qty of plants
in the current plan for each plant. Maximization encourages plants with higher preferences to be planted in higher quantities."""
plan_yummy = plan.copy()
plan_by_plant = plan_yummy.sum(axis=(bed_axis,year_axis))
yums = round(np.dot(preferences,plan_by_plant)/max_yums*100,1)
return yums
def compute_variety_score(plan,num_plants):
"""Sums the number of unique plants that are actually planted in the garden. Counts the number of plants that are being planted across all beds.
Then counts the number of plants with non-zero planting plan.
Maximization encourages more unique plants to be planted."""
plan_variety = plan.copy()
num_plants_in_plan = (plan_variety.sum(axis=(bed_axis,year_axis)) > 0).sum()
variety_score = round(num_plants_in_plan/num_plants*100,1)
return variety_score
#### Analysis & Visualization #### | 39.662857 | 168 | 0.711713 |
86fca5740e3caf795c7b7090059ab5992cec0e59 | 9,453 | py | Python | adv_lib/utils/attack_utils.py | Daulbaev/adversarial-library | 6f979a511ad78908374cd55855a9e2c5a874be7d | [
"BSD-3-Clause"
] | 55 | 2020-11-25T10:47:48.000Z | 2022-03-21T12:11:31.000Z | adv_lib/utils/attack_utils.py | Daulbaev/adversarial-library | 6f979a511ad78908374cd55855a9e2c5a874be7d | [
"BSD-3-Clause"
] | 4 | 2021-03-10T19:25:31.000Z | 2021-08-06T00:10:49.000Z | adv_lib/utils/attack_utils.py | Daulbaev/adversarial-library | 6f979a511ad78908374cd55855a9e2c5a874be7d | [
"BSD-3-Clause"
] | 8 | 2020-11-26T08:42:04.000Z | 2022-01-13T02:55:47.000Z | import warnings
from collections import OrderedDict
from distutils.version import LooseVersion
from functools import partial
from inspect import isclass
from typing import Callable, Optional, Dict, Union
import numpy as np
import torch
import tqdm
from torch import Tensor, nn
from torch.nn import functional as F
from adv_lib.distances.lp_norms import l0_distances, l1_distances, l2_distances, linf_distances
from adv_lib.utils import ForwardCounter, BackwardCounter, predict_inputs
def generate_random_targets(labels: Tensor, num_classes: int) -> Tensor:
"""
Generates one random target in (num_classes - 1) possibilities for each label that is different from the original
label.
Parameters
----------
labels: Tensor
Original labels. Generated targets will be different from labels.
num_classes: int
Number of classes to generate the random targets from.
Returns
-------
targets: Tensor
Random target for each label. Has the same shape as labels.
"""
random = torch.rand(len(labels), num_classes, device=labels.device, dtype=torch.float)
random.scatter_(1, labels.unsqueeze(-1), 0)
return random.argmax(1)
def get_all_targets(labels: Tensor, num_classes: int):
"""
Generates all possible targets that are different from the original labels.
Parameters
----------
labels: Tensor
Original labels. Generated targets will be different from labels.
num_classes: int
Number of classes to generate the random targets from.
Returns
-------
targets: Tensor
Random targets for each label. shape: (len(labels), num_classes - 1).
"""
all_possible_targets = torch.zeros(len(labels), num_classes - 1, dtype=torch.long)
all_classes = set(range(num_classes))
for i in range(len(labels)):
this_label = labels[i].item()
other_labels = list(all_classes.difference({this_label}))
all_possible_targets[i] = torch.tensor(other_labels)
return all_possible_targets
_default_metrics = OrderedDict([
('linf', linf_distances),
('l0', l0_distances),
('l1', l1_distances),
('l2', l2_distances),
])
| 41.643172 | 117 | 0.66307 |
86fd1a571a9b46918806e9e8e71337c7e3431481 | 2,559 | py | Python | thawSlumpChangeDet/polygons_compare.py | Summer0328/ChangeDet_DL-1 | f2474ee4200d9ad093c0e5a27a94bfbd3bd038e7 | [
"MIT"
] | 3 | 2021-07-03T14:33:37.000Z | 2021-08-03T20:35:32.000Z | thawSlumpChangeDet/polygons_compare.py | Summer0328/ChangeDet_DL-1 | f2474ee4200d9ad093c0e5a27a94bfbd3bd038e7 | [
"MIT"
] | null | null | null | thawSlumpChangeDet/polygons_compare.py | Summer0328/ChangeDet_DL-1 | f2474ee4200d9ad093c0e5a27a94bfbd3bd038e7 | [
"MIT"
] | 2 | 2021-07-29T01:45:33.000Z | 2021-08-10T09:13:58.000Z | #!/usr/bin/env python
# Filename: polygons_cd
"""
introduction: compare two polygons in to shape file
authors: Huang Lingcao
email:[email protected]
add time: 26 February, 2020
"""
import sys,os
from optparse import OptionParser
# added path of DeeplabforRS
sys.path.insert(0, os.path.expanduser('~/codes/PycharmProjects/DeeplabforRS'))
import basic_src.io_function as io_function
import basic_src.basic as basic
import basic_src.map_projection as map_projection
import parameters
import polygons_cd_multi
import polygons_cd
if __name__ == "__main__":
usage = "usage: %prog [options] old_shape_file new_shape_file "
parser = OptionParser(usage=usage, version="1.0 2020-02-26")
parser.description = 'Introduction: compare two groups of polygons '
parser.add_option("-p", "--para",
action="store", dest="para_file",
help="the parameters file")
parser.add_option('-o', '--output',
action="store", dest = 'output',
help='the path to save the change detection results')
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(2)
# # set parameters files
# if options.para_file is None:
# print('error, no parameters file')
# parser.print_help()
# sys.exit(2)
# else:
# parameters.set_saved_parafile_path(options.para_file)
basic.setlogfile('polygons_changeDetection.log')
main(options, args)
| 30.105882 | 108 | 0.709261 |
86ff60feba2c3198cb1453cbf355794a3010628a | 195 | py | Python | CV Model/Model - JupyterNotebook/mrcnn/tfliteconverter.py | fcsiba/Smart-Cart | 7d45b9f2a2be2015936c2a61068b2fd8b6c95fe5 | [
"MIT"
] | null | null | null | CV Model/Model - JupyterNotebook/mrcnn/tfliteconverter.py | fcsiba/Smart-Cart | 7d45b9f2a2be2015936c2a61068b2fd8b6c95fe5 | [
"MIT"
] | null | null | null | CV Model/Model - JupyterNotebook/mrcnn/tfliteconverter.py | fcsiba/Smart-Cart | 7d45b9f2a2be2015936c2a61068b2fd8b6c95fe5 | [
"MIT"
] | null | null | null | import tensorflow as tf
# Convert the model.
converter = tf.lite.TFLiteConverter.from_saved_model('model.py')
tflite_model = converter.convert()
open("trash_ai.tflite", "wb").write(tflite_model) | 32.5 | 64 | 0.784615 |
86ffc174e23653c3f067117004b1a24f8234310f | 711 | py | Python | basicapp/cron.py | shivamsinghal212/Url-Shortener | 4127a993272744f6f8592415314c8e8514d43153 | [
"MIT"
] | null | null | null | basicapp/cron.py | shivamsinghal212/Url-Shortener | 4127a993272744f6f8592415314c8e8514d43153 | [
"MIT"
] | 8 | 2020-06-05T18:23:15.000Z | 2022-03-11T23:23:57.000Z | basicapp/cron.py | shivamsinghal212/Url-Shortener | 4127a993272744f6f8592415314c8e8514d43153 | [
"MIT"
] | null | null | null | from django_cron import CronJobBase, Schedule
from .models import Link
from django.utils import timezone
| 30.913043 | 65 | 0.610408 |
810028e77fa49197f58461ee88815f3bf00eba42 | 2,029 | py | Python | weasyprint/tests/test_stacking.py | Smylers/WeasyPrint | 25ce91a34755386b3350d898aa1638c349723b57 | [
"BSD-3-Clause"
] | null | null | null | weasyprint/tests/test_stacking.py | Smylers/WeasyPrint | 25ce91a34755386b3350d898aa1638c349723b57 | [
"BSD-3-Clause"
] | null | null | null | weasyprint/tests/test_stacking.py | Smylers/WeasyPrint | 25ce91a34755386b3350d898aa1638c349723b57 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf8
"""
weasyprint.tests.stacking
-------------------------
:copyright: Copyright 2011-2012 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
from ..stacking import StackingContext
from .test_boxes import serialize
from .test_layout import parse
from .testing_utils import assert_no_logs
| 25.683544 | 78 | 0.556925 |
8100832c0d5bc42056a44079c84eec3ee522ecd0 | 1,143 | py | Python | django-magic-link/customers/views.py | industrydive/sourcelist | 9db4ec5c9cb9246a644615ca401a3c8f8d560b6e | [
"MIT"
] | 5 | 2017-10-28T17:17:35.000Z | 2020-06-24T21:43:22.000Z | django-magic-link/customers/views.py | greglinch/sourcelist | 8267bb060e55f036d6d2dd9648698a5b8e48c0b3 | [
"MIT"
] | 2 | 2020-02-11T21:50:49.000Z | 2021-04-08T18:25:26.000Z | django-magic-link/customers/views.py | industrydive/sourcelist | 9db4ec5c9cb9246a644615ca401a3c8f8d560b6e | [
"MIT"
] | 2 | 2017-11-02T02:14:25.000Z | 2019-05-28T15:35:44.000Z | from django.shortcuts import render
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from sesame import utils
from django.core.mail import send_mail
| 31.75 | 107 | 0.630796 |
81014d9aa2fe1551a160b448cdc227329bca693d | 3,079 | py | Python | python-lib/config/dss_parameter.py | dataiku/dss-plugin-nlp-analysis | ff9dce56500dc8f28f83158afbdf7db01074ee38 | [
"Apache-2.0"
] | 1 | 2021-03-12T10:45:53.000Z | 2021-03-12T10:45:53.000Z | python-lib/config/dss_parameter.py | dataiku/dss-plugin-nlp-analysis | ff9dce56500dc8f28f83158afbdf7db01074ee38 | [
"Apache-2.0"
] | 22 | 2021-03-01T18:49:54.000Z | 2021-06-08T15:16:30.000Z | python-lib/config/dss_parameter.py | dataiku/dss-plugin-nlp-analysis | ff9dce56500dc8f28f83158afbdf7db01074ee38 | [
"Apache-2.0"
] | 1 | 2021-02-22T15:19:43.000Z | 2021-02-22T15:19:43.000Z | from .custom_check import CustomCheck, CustomCheckError
from typing import Any, List
import logging
logger = logging.getLogger(__name__)
| 33.835165 | 104 | 0.618383 |
81017170c94b85c3925c1875676b310a658ce79c | 21,868 | py | Python | misc/import_ch_zurich.py | mstarikov/transitfeed | c018d7b14f6fccaa670629c00c83a390b5461fc1 | [
"Apache-2.0"
] | null | null | null | misc/import_ch_zurich.py | mstarikov/transitfeed | c018d7b14f6fccaa670629c00c83a390b5461fc1 | [
"Apache-2.0"
] | null | null | null | misc/import_ch_zurich.py | mstarikov/transitfeed | c018d7b14f6fccaa670629c00c83a390b5461fc1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python2.4
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""imports Zurich timetables, converting them from DIVA export format
to Google Transit format."""
from __future__ import print_function
# This was written before transitfeed.py and we haven't yet found the
# motivation to port it. Please see the examples directory for better
# examples.
try:
from io import StringIO as cStringIO
except ImportError:
import cStringIO
import csv
import datetime
import optparse
import sys
import urllib
import zipfile
# Zurich tram lines
TRAM_LINES = {'2': ['FF3300', 'FFFFFF'],
'3': ['009933', 'FFFFFF'],
'4': ['333399', 'FFFFFF'],
'5': ['996600', 'FFFFFF'],
'6': ['CC9933', 'FFFFFF'],
'7': ['000000', 'FFFFFF'],
'8': ['99CC00', '000000'],
'9': ['333399', 'FFFFFF'],
'10': ['FF6699', 'FFFFFF'],
'11': ['009933', 'FFFFFF'],
'12': ['FFFFFF', '000000'],
'13': ['FFCC33', '000000'],
'14': ['3399CC', 'FFFFFF'],
'15': ['FF3300', 'FFFFFF']}
# Terms that indicate points of interest. Used to split station names
# to (name, city).
POI_TERMS = {'Bahnhof': 1, 'Dorfzentrum': 1, 'Schiffstation': 1,
'Station': 1, u'Zentrum': 1,
'Dorfplatz': 1, 'Zentrum/Bahnhof': 1, 'Dorf': 1}
# Maps station names to (name, city). Used as exception list where our
# simple heuristcs doesn't work.
SPECIAL_NAMES = {
'Freienbach SOB, Bahnhof': ('Freienbach SOB', 'Freienbach'),
'Herrliberg-Feldmeilen,Bhf West': ('Bahnhof West', 'Herrliberg-Feldmeilen'),
'Neue Forch': ('Neue Forch', u'Z\u00fcrich'),
'Oberrieden Dorf Bahnhof': ('Oberrieden Dorf', 'Oberrieden'),
'Spital Zollikerberg': ('Spital', 'Zollikerberg'),
'Triemli': ('Triemli', u'Z\u00fcrich'),
'Zentrum Glatt': ('Zentrum Glatt', 'Wallisellen'),
}
# Cities whose names we want to prettify/correct at import time.
SPECIAL_CITIES = {
'Affoltern a. A.': 'Affoltern am Albis',
'Wangen b. D.': 'Wangen'
}
def convert_c_h1903(x, y):
"Converts coordinates from the 1903 Swiss national grid system to WGS-84."
yb = (x - 600000.0) / 1e6;
xb = (y - 200000.0) / 1e6;
lam = 2.6779094 \
+ 4.728982 * yb \
+ 0.791484 * yb * xb \
+ 0.1306 * yb * xb * xb \
- 0.0436 * yb * yb * yb
phi = 16.9023892 \
+ 3.238372 * xb \
- 0.270978 * yb * yb \
- 0.002582 * xb * xb \
- 0.0447 * yb * yb * xb \
- 0.0140 * xb * xb * xb
return phi * 100.0 / 36.0, lam * 100.0 / 36.0
def encode_for_csv(x):
"Encodes one value for CSV."
k = x.encode('utf-8')
if ',' in k or '"' in k:
return '"%s"' % k.replace('"', '""')
else:
return k
def write_row(stream, values):
"writes one row of comma-separated values to stream."
stream.write(','.join([encode_for_csv(val) for val in values]))
stream.write('\n')
# https://developers.google.com/transit/gtfs/
TYPE_TRAM = 0
TYPE_BUS = 3
if __name__ == '__main__':
main(sys.argv)
| 42.62768 | 94 | 0.559813 |
8101825b7fae5806f4a1d2d670c101bc508918db | 5,681 | py | Python | modules/documents.py | rotsee/protokollen | a001a1db86df57adcf5c53c95c4c2fae426340f1 | [
"MIT",
"Apache-2.0",
"CC0-1.0",
"Unlicense"
] | 4 | 2015-03-22T20:23:36.000Z | 2015-12-09T14:31:34.000Z | modules/documents.py | rotsee/protokollen | a001a1db86df57adcf5c53c95c4c2fae426340f1 | [
"MIT",
"Apache-2.0",
"CC0-1.0",
"Unlicense"
] | 4 | 2015-03-24T10:42:00.000Z | 2016-06-21T08:44:01.000Z | modules/documents.py | rotsee/protokollen | a001a1db86df57adcf5c53c95c4c2fae426340f1 | [
"MIT",
"Apache-2.0",
"CC0-1.0",
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""This module contains classes for documents, and lists of documents.
Documents are defined by the document rules in settings.py
A file can contain one or more document. However, a document can
not be constructed from more than one file. This is a limitation,
obvious in cases like Gotlands kommun, where meeting minutes are
split up in a large number of files.
"""
import settings
from modules.utils import make_unicode, last_index
from modules.extractors.documentBase import ExtractionNotAllowed
document_headers = {
"Content-Type": "text/plain",
"Content-Disposition": "attachment",
"Cache-Control": "public"
}
if __name__ == "__main__":
print "This module is only intended to be called from other scripts."
import sys
sys.exit()
| 33.417647 | 80 | 0.582996 |
8101888cafdd6d738a67d105df6945c67e4d48e2 | 773 | py | Python | tools/amp_segment/ina_speech_segmenter.py | saratkumar/galaxy | 35cd0987239c1b006d6eaf70b4a03a58fb857a12 | [
"CC-BY-3.0"
] | 1 | 2020-03-11T15:17:32.000Z | 2020-03-11T15:17:32.000Z | tools/amp_segment/ina_speech_segmenter.py | saratkumar/galaxy | 35cd0987239c1b006d6eaf70b4a03a58fb857a12 | [
"CC-BY-3.0"
] | 72 | 2019-06-06T18:52:41.000Z | 2022-02-17T02:53:18.000Z | tools/done/amp_segment/ina_speech_segmenter.py | AudiovisualMetadataPlatform/amp_mgms | 593d4f4d40b597a7753cd152cd233976e6b28c75 | [
"Apache-2.0"
] | 1 | 2022-03-01T08:07:54.000Z | 2022-03-01T08:07:54.000Z | #!/usr/bin/env python3
import os
import os.path
import shutil
import subprocess
import sys
import tempfile
import uuid
import mgm_utils
if __name__ == "__main__":
main()
| 21.472222 | 83 | 0.742561 |
8102b4059df75e35791f0dfa9ba091b4276e3aff | 12,313 | py | Python | test/tracerEq/test_steady_adv-diff_mms.py | jhill1/thetis | 1be5d28d5d0d7248f2bbce4986b3e886116e103a | [
"MIT"
] | null | null | null | test/tracerEq/test_steady_adv-diff_mms.py | jhill1/thetis | 1be5d28d5d0d7248f2bbce4986b3e886116e103a | [
"MIT"
] | null | null | null | test/tracerEq/test_steady_adv-diff_mms.py | jhill1/thetis | 1be5d28d5d0d7248f2bbce4986b3e886116e103a | [
"MIT"
] | null | null | null | """
Testing 3D tracer advection-diffusion equation with method of manufactured solution (MMS).
"""
from thetis import *
import numpy
from scipy import stats
import pytest
def run(setup, refinement, order, do_export=True, **options):
"""Run single test and return L2 error"""
print_output('--- running {:} refinement {:}'.format(setup.__name__, refinement))
# domain dimensions
lx = 15e3
ly = 10e3
area = lx*ly
t_end = 200.0
setup_obj = setup()
# mesh
n_layers = 4*refinement
nx = 4*refinement
ny = 4*refinement
mesh2d = RectangleMesh(nx, ny, lx, ly)
# outputs
outputdir = 'outputs'
if do_export:
out_t = File(os.path.join(outputdir, 'T.pvd'))
# bathymetry
x_2d, y_2d = SpatialCoordinate(mesh2d)
p1_2d = get_functionspace(mesh2d, 'CG', 1)
bathymetry_2d = Function(p1_2d, name='Bathymetry')
bathymetry_2d.project(setup_obj.bath(x_2d, y_2d, lx, ly))
solver_obj = solver.FlowSolver(mesh2d, bathymetry_2d, n_layers)
solver_obj.options.element_family = 'dg-dg'
solver_obj.options.polynomial_degree = order
solver_obj.options.horizontal_velocity_scale = Constant(1.0)
solver_obj.options.use_bottom_friction = False
solver_obj.options.no_exports = not do_export
solver_obj.options.output_directory = outputdir
solver_obj.options.simulation_end_time = t_end
solver_obj.options.fields_to_export = ['salt_3d', 'uv_3d', 'w_3d']
solver_obj.options.horizontal_viscosity_scale = Constant(50.0)
solver_obj.options.update(options)
solver_obj.create_function_spaces()
# functions for source terms
x, y, z = SpatialCoordinate(solver_obj.mesh)
solver_obj.options.salinity_source_3d = setup_obj.residual(x, y, z, lx, ly)
# diffusivuty
solver_obj.options.horizontal_diffusivity = setup_obj.kappa(x, y, z, lx, ly)
# analytical solution
trac_ana = setup_obj.tracer(x, y, z, lx, ly)
bnd_salt = {'value': trac_ana}
solver_obj.bnd_functions['salt'] = {1: bnd_salt, 2: bnd_salt,
3: bnd_salt, 4: bnd_salt}
# NOTE use symmetic uv condition to get correct w
bnd_mom = {'symm': None}
solver_obj.bnd_functions['momentum'] = {1: bnd_mom, 2: bnd_mom,
3: bnd_mom, 4: bnd_mom}
solver_obj.create_equations()
dt = solver_obj.dt
# elevation field
solver_obj.fields.elev_2d.project(setup_obj.elev(x_2d, y_2d, lx, ly))
# update mesh and fields
solver_obj.mesh_updater.update_mesh_coordinates()
# salinity field
solver_obj.fields.salt_3d.project(setup_obj.tracer(x, y, z, lx, ly))
# velocity field
solver_obj.fields.uv_3d.project(setup_obj.uv(x, y, z, lx, ly))
solver_obj.w_solver.solve()
if do_export:
out_t.write(trac_ana)
solver_obj.export()
# solve salinity advection-diffusion equation with residual source term
ti = solver_obj.timestepper
ti.timesteppers.salt_expl.initialize(ti.fields.salt_3d)
t = 0
while t < t_end - 1e-5:
ti.timesteppers.salt_expl.advance(t)
if ti.options.use_limiter_for_tracers:
ti.solver.tracer_limiter.apply(ti.fields.salt_3d)
t += dt
if do_export:
out_t.write(trac_ana)
solver_obj.export()
l2_err = errornorm(trac_ana, solver_obj.fields.salt_3d)/numpy.sqrt(area)
print_output('L2 error {:.12f}'.format(l2_err))
return l2_err
def run_convergence(setup, ref_list, order, do_export=False, save_plot=False, **options):
"""Runs test for a list of refinements and computes error convergence rate"""
l2_err = []
for r in ref_list:
l2_err.append(run(setup, r, order, do_export=do_export, **options))
x_log = numpy.log10(numpy.array(ref_list, dtype=float)**-1)
y_log = numpy.log10(numpy.array(l2_err))
check_convergence(x_log, y_log, order+1, 'tracer', save_plot)
# ---------------------------
# standard tests for pytest
# ---------------------------
def test_convergence(setup, timestepper_type):
run_convergence(setup, [1, 2, 3], 1, save_plot=False,
timestepper_type=timestepper_type)
if __name__ == '__main__':
run_convergence(Setup4, [1, 2, 3], 1, save_plot=True, timestepper_type='SSPRK22')
| 37.769939 | 1,095 | 0.559165 |
810397b3d3caaed833502145ab5542c3eb653710 | 723 | py | Python | csat/django/fields.py | GaretJax/csat | db63441136369436140a91c9657444353c8944e6 | [
"MIT"
] | null | null | null | csat/django/fields.py | GaretJax/csat | db63441136369436140a91c9657444353c8944e6 | [
"MIT"
] | 7 | 2020-06-05T17:15:29.000Z | 2022-02-11T03:38:15.000Z | csat/django/fields.py | GaretJax/csat | db63441136369436140a91c9657444353c8944e6 | [
"MIT"
] | null | null | null | from lxml import etree
from django import forms
from django.db import models
| 28.92 | 74 | 0.60166 |
8104cf1d7fdf7aff507bf4b2cd4aa7b19708a446 | 15,658 | py | Python | keras_cv_attention_models/yolox/yolox.py | RishabhSehgal/keras_cv_attention_models | c1e20e45815339d70a987ec7dd9e6f926b4eb21d | [
"MIT"
] | null | null | null | keras_cv_attention_models/yolox/yolox.py | RishabhSehgal/keras_cv_attention_models | c1e20e45815339d70a987ec7dd9e6f926b4eb21d | [
"MIT"
] | null | null | null | keras_cv_attention_models/yolox/yolox.py | RishabhSehgal/keras_cv_attention_models | c1e20e45815339d70a987ec7dd9e6f926b4eb21d | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow import keras
from keras_cv_attention_models.attention_layers import (
activation_by_name,
batchnorm_with_activation,
conv2d_no_bias,
depthwise_conv2d_no_bias,
add_pre_post_process,
)
from keras_cv_attention_models import model_surgery
from keras_cv_attention_models.download_and_load import reload_model_weights
from keras_cv_attention_models.coco.eval_func import DecodePredictions
PRETRAINED_DICT = {
"yolox_nano": {"coco": "7c97d60d4cc9d54321176f844acee627"},
"yolox_tiny": {"coco": "f9b51ff24290090c86a10a45f811140b"},
"yolox_s": {"coco": "a989f5a808ddc4a8242157a6a3e64977"},
"yolox_m": {"coco": "5c2333d2f12b2f48e3ec8555b29d242f"},
"yolox_l": {"coco": "a07c48994b7a67dba421025ef39b858b"},
"yolox_x": {"coco": "de9741d3f67f50c54856bcae0f07b7ef"},
}
""" CSPDarknet backbone """
BATCH_NORM_EPSILON = 1e-3
BATCH_NORM_MOMENTUM = 0.03
""" path aggregation fpn """
""" YOLOXHead """
""" YOLOX models """
| 54.940351 | 156 | 0.709158 |
8104ee7a90ab52a7bdf79ad5abbc624a2b356482 | 4,064 | py | Python | robot-server/tests/service/json_api/test_response.py | mrod0101/opentrons | 6450edb0421f1c2484c292f8583602d8f6fd13b8 | [
"Apache-2.0"
] | null | null | null | robot-server/tests/service/json_api/test_response.py | mrod0101/opentrons | 6450edb0421f1c2484c292f8583602d8f6fd13b8 | [
"Apache-2.0"
] | 2 | 2022-02-15T03:28:35.000Z | 2022-02-28T01:34:18.000Z | robot-server/tests/service/json_api/test_response.py | mrod0101/opentrons | 6450edb0421f1c2484c292f8583602d8f6fd13b8 | [
"Apache-2.0"
] | null | null | null | from pytest import raises
from pydantic import ValidationError
from robot_server.service.json_api.response import (
ResponseDataModel,
ResponseModel,
MultiResponseModel,
)
from tests.service.helpers import ItemResponseModel
| 26.913907 | 78 | 0.483514 |
81050df4590617cea7e0daedc54d45bd783c7cfa | 367 | py | Python | stickmanZ/__main__.py | MichaelMcFarland98/cse210-project | 9e5a45a75f465fe123e33712d3c19dd88e98246a | [
"MIT"
] | 1 | 2021-07-24T00:40:14.000Z | 2021-07-24T00:40:14.000Z | stickmanZ/__main__.py | MichaelMcFarland98/cse210-project | 9e5a45a75f465fe123e33712d3c19dd88e98246a | [
"MIT"
] | null | null | null | stickmanZ/__main__.py | MichaelMcFarland98/cse210-project | 9e5a45a75f465fe123e33712d3c19dd88e98246a | [
"MIT"
] | null | null | null |
from game.game_view import GameView
from game.menu_view import menu_view
from game import constants
import arcade
SCREEN_WIDTH = constants.SCREEN_WIDTH
SCREEN_HEIGHT = constants.SCREEN_HEIGHT
SCREEN_TITLE = constants.SCREEN_TITLE
window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
start_view = menu_view()
window.show_view(start_view)
arcade.run()
| 22.9375 | 65 | 0.836512 |
81053c6c0f8dac07d6cae3bc4a12cf5b1f575105 | 2,300 | py | Python | neutron/db/migration/alembic_migrations/versions/mitaka/contract/c6c112992c9_rbac_qos_policy.py | congnt95/neutron | 6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1 | [
"Apache-2.0"
] | 1,080 | 2015-01-04T08:35:00.000Z | 2022-03-27T09:15:52.000Z | neutron/db/migration/alembic_migrations/versions/mitaka/contract/c6c112992c9_rbac_qos_policy.py | congnt95/neutron | 6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1 | [
"Apache-2.0"
] | 24 | 2015-02-21T01:48:28.000Z | 2021-11-26T02:38:56.000Z | neutron/db/migration/alembic_migrations/versions/mitaka/contract/c6c112992c9_rbac_qos_policy.py | congnt95/neutron | 6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1 | [
"Apache-2.0"
] | 1,241 | 2015-01-02T10:47:10.000Z | 2022-03-27T09:42:23.000Z | # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
from oslo_utils import uuidutils
import sqlalchemy as sa
from neutron.db import rbac_db_models
"""rbac_qos_policy
Revision ID: c6c112992c9
Revises: 8a6d8bdae39
Create Date: 2015-11-25 18:45:03.831359
"""
# revision identifiers, used by Alembic.
revision = 'c6c112992c9'
down_revision = 'e3278ee65050'
depends_on = ('15e43b934f81',)
qos_rbacs = sa.Table(
'qospolicyrbacs', sa.MetaData(),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id', sa.String(length=255),
nullable=True),
sa.Column('target_tenant', sa.String(length=255),
nullable=False),
sa.Column('action', sa.String(length=255), nullable=False),
sa.Column('object_id', sa.String(length=36), nullable=False))
# A simple model of the qos_policies table with only the fields needed for
# the migration.
qos_policy = sa.Table('qos_policies', sa.MetaData(),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id',
sa.String(length=255)),
sa.Column('shared', sa.Boolean(), nullable=False))
| 33.333333 | 78 | 0.665652 |
8105a101c915deb0c3d41bd2462e33e9a3a8584e | 1,200 | py | Python | chapter5/ch5_gcp_subscriber.py | ericchou1/network-devops-kafka-up-and-running | c128cf7359ba40c3005a02d3033b16b67c196779 | [
"Apache-2.0"
] | 1 | 2021-12-30T08:55:09.000Z | 2021-12-30T08:55:09.000Z | chapter5/ch5_gcp_subscriber.py | ericchou1/network-devops-kafka-up-and-running | c128cf7359ba40c3005a02d3033b16b67c196779 | [
"Apache-2.0"
] | null | null | null | chapter5/ch5_gcp_subscriber.py | ericchou1/network-devops-kafka-up-and-running | c128cf7359ba40c3005a02d3033b16b67c196779 | [
"Apache-2.0"
] | 2 | 2021-11-22T09:56:30.000Z | 2022-02-06T22:55:55.000Z | from concurrent.futures import TimeoutError
from google.cloud import pubsub_v1
project_id = "pubsub-testing-331300"
subscription_id = "test-sub"
# Number of seconds the subscriber should listen for messages
timeout = 5.0
subscriber = pubsub_v1.SubscriberClient()
# The `subscription_path` method creates a fully qualified identifier
# in the form `projects/{project_id}/subscriptions/{subscription_id}`
subscription_path = subscriber.subscription_path(project_id, subscription_id)
streaming_pull_future = subscriber.subscribe(subscription_path, callback=callback)
print(f"Listening for messages on {subscription_path}..\n")
# Wrap subscriber in a 'with' block to automatically call close() when done.
with subscriber:
try:
# When `timeout` is not set, result() will block indefinitely,
# unless an exception is encountered first.
streaming_pull_future.result(timeout=timeout)
except TimeoutError:
streaming_pull_future.cancel() # Trigger the shutdown.
streaming_pull_future.result() # Block until the shutdown is complete.
| 38.709677 | 82 | 0.766667 |
8107dd8d87df5ce3c83ed8d4993880ee03266544 | 2,136 | py | Python | odoo-13.0/addons/google_drive/models/res_config_settings.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/google_drive/models/res_config_settings.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/google_drive/models/res_config_settings.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
| 45.446809 | 142 | 0.69382 |
81081edbfb2a07d5868f34f5440db42fe2a2e90a | 17,308 | py | Python | dataloaders/loader.py | sanger640/attMPTI | a2784b784e0900f3603baa3779631da67bcd0562 | [
"MIT"
] | 93 | 2021-03-18T13:56:42.000Z | 2022-03-30T03:31:35.000Z | dataloaders/loader.py | sanger640/attMPTI | a2784b784e0900f3603baa3779631da67bcd0562 | [
"MIT"
] | 20 | 2021-03-30T12:36:05.000Z | 2022-03-28T09:01:34.000Z | dataloaders/loader.py | sanger640/attMPTI | a2784b784e0900f3603baa3779631da67bcd0562 | [
"MIT"
] | 14 | 2021-04-17T17:19:19.000Z | 2022-03-09T13:49:30.000Z | """ Data Loader for Generating Tasks
Author: Zhao Na, 2020
"""
import os
import random
import math
import glob
import numpy as np
import h5py as h5
import transforms3d
from itertools import combinations
import torch
from torch.utils.data import Dataset
def sample_K_pointclouds(data_path, num_point, pc_attribs, pc_augm, pc_augm_config,
scan_names, sampled_class, sampled_classes, is_support=False):
'''sample K pointclouds and the corresponding labels for one class (one_way)'''
ptclouds = []
labels = []
for scan_name in scan_names:
ptcloud, label = sample_pointcloud(data_path, num_point, pc_attribs, pc_augm, pc_augm_config,
scan_name, sampled_classes, sampled_class, support=is_support)
ptclouds.append(ptcloud)
labels.append(label)
ptclouds = np.stack(ptclouds, axis=0)
labels = np.stack(labels, axis=0)
return ptclouds, labels
def augment_pointcloud(P, pc_augm_config):
"""" Augmentation on XYZ and jittering of everything """
M = transforms3d.zooms.zfdir2mat(1)
if pc_augm_config['scale'] > 1:
s = random.uniform(1 / pc_augm_config['scale'], pc_augm_config['scale'])
M = np.dot(transforms3d.zooms.zfdir2mat(s), M)
if pc_augm_config['rot'] == 1:
angle = random.uniform(0, 2 * math.pi)
M = np.dot(transforms3d.axangles.axangle2mat([0, 0, 1], angle), M) # z=upright assumption
if pc_augm_config['mirror_prob'] > 0: # mirroring x&y, not z
if random.random() < pc_augm_config['mirror_prob'] / 2:
M = np.dot(transforms3d.zooms.zfdir2mat(-1, [1, 0, 0]), M)
if random.random() < pc_augm_config['mirror_prob'] / 2:
M = np.dot(transforms3d.zooms.zfdir2mat(-1, [0, 1, 0]), M)
P[:, :3] = np.dot(P[:, :3], M.T)
if pc_augm_config['jitter']:
sigma, clip = 0.01, 0.05 # https://github.com/charlesq34/pointnet/blob/master/provider.py#L74
P = P + np.clip(sigma * np.random.randn(*P.shape), -1 * clip, clip).astype(np.float32)
return P
################################################ Static Testing Dataset ################################################
################################################ Pre-train Dataset ################################################ | 46.526882 | 129 | 0.621389 |
8109517cd2448992084aac4cf51be9ed93b5e56f | 467 | py | Python | greendoge/types/condition_with_args.py | grayfallstown/greendoge-blockchain | 31e325913374d694dc0859140d006a642e7f95ac | [
"Apache-2.0"
] | 44 | 2021-07-06T10:09:06.000Z | 2022-02-09T04:30:14.000Z | greendoge/types/condition_with_args.py | grayfallstown/greendoge-blockchain | 31e325913374d694dc0859140d006a642e7f95ac | [
"Apache-2.0"
] | 67 | 2021-07-06T11:57:18.000Z | 2022-02-02T16:14:15.000Z | greendoge/types/condition_with_args.py | grayfallstown/greendoge-blockchain | 31e325913374d694dc0859140d006a642e7f95ac | [
"Apache-2.0"
] | 16 | 2021-07-06T10:36:37.000Z | 2022-03-15T08:35:16.000Z | from dataclasses import dataclass
from typing import List
from greendoge.types.condition_opcodes import ConditionOpcode
from greendoge.util.streamable import Streamable, streamable
| 25.944444 | 83 | 0.770878 |
810978ce9b3f6467c879457442fbbbac1342a8e8 | 3,065 | py | Python | homeassistant/components/hunterdouglas_powerview/entity.py | pp81381/home-assistant | 23e362faf387c1535be0abab81b30d8e4631df4b | [
"Apache-2.0"
] | null | null | null | homeassistant/components/hunterdouglas_powerview/entity.py | pp81381/home-assistant | 23e362faf387c1535be0abab81b30d8e4631df4b | [
"Apache-2.0"
] | 31 | 2020-07-23T07:13:38.000Z | 2021-06-07T13:21:18.000Z | homeassistant/components/hunterdouglas_powerview/entity.py | pp81381/home-assistant | 23e362faf387c1535be0abab81b30d8e4631df4b | [
"Apache-2.0"
] | null | null | null | """The nexia integration base entity."""
from aiopvapi.resources.shade import ATTR_TYPE
from homeassistant.const import ATTR_MODEL, ATTR_SW_VERSION
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
DEVICE_FIRMWARE,
DEVICE_MAC_ADDRESS,
DEVICE_MODEL,
DEVICE_NAME,
DEVICE_SERIAL_NUMBER,
DOMAIN,
FIRMWARE,
FIRMWARE_BUILD,
FIRMWARE_REVISION,
FIRMWARE_SUB_REVISION,
MANUFACTURER,
)
| 32.606383 | 114 | 0.669494 |
810a3b9f5eeaf3e888067a624f744f48f465345b | 9,244 | py | Python | keycast_env/lib/python3.8/site-packages/Xlib/ext/res.py | daxter-army/key-cast | cadc88c6760839b37b7fef969294800d4c38fb1b | [
"MIT"
] | 10 | 2021-09-15T16:29:59.000Z | 2022-01-15T11:51:56.000Z | lib/Xlib/ext/res.py | ITZProGamerDieYT/SpeedrunningTimerLinux | 4383c8fdfff476fdb81a99a1d6271218e6e9eee3 | [
"CC-BY-3.0"
] | 7 | 2021-09-16T06:21:44.000Z | 2022-03-18T03:11:25.000Z | lib/Xlib/ext/res.py | ITZProGamerDieYT/SpeedrunningTimerLinux | 4383c8fdfff476fdb81a99a1d6271218e6e9eee3 | [
"CC-BY-3.0"
] | 3 | 2021-09-20T13:08:43.000Z | 2022-03-18T03:09:08.000Z | # Xlib.ext.res -- X-Resource extension module
#
# Copyright (C) 2021 Aleksei Bavshin <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street,
# Fifth Floor,
# Boston, MA 02110-1301 USA
"""X-Resource extension allows a client to query the X server about its usage
of various resources.
For detailed description see any of the following documents.
Protocol specification:
https://www.x.org/releases/current/doc/resourceproto/resproto.txt
XCB Protocol specification:
https://cgit.freedesktop.org/xcb/proto/tree/src/res.xml
"""
from Xlib.protocol import rq
RES_MAJOR_VERSION = 1
RES_MINOR_VERSION = 2
extname = "X-Resource"
# v1.0
ResQueryVersion = 0
ResQueryClients = 1
ResQueryClientResources = 2
ResQueryClientPixmapBytes = 3
# v1.2
ResQueryClientIds = 4
ResQueryResourceBytes = 5
def query_version(self, client_major=RES_MAJOR_VERSION,
client_minor=RES_MINOR_VERSION):
""" Query the protocol version supported by the X server.
The client sends the highest supported version to the server and the
server sends the highest version it supports, but no higher than the
requested version."""
return QueryVersion(
display=self.display,
opcode=self.display.get_extension_major(extname),
client_major=client_major,
client_minor=client_minor)
Client = rq.Struct(
rq.Card32("resource_base"),
rq.Card32("resource_mask"))
def query_clients(self):
"""Request the list of all currently connected clients."""
return QueryClients(
display=self.display,
opcode=self.display.get_extension_major(extname))
Type = rq.Struct(
rq.Card32("resource_type"),
rq.Card32("count"))
def query_client_resources(self, client):
"""Request the number of resources owned by a client.
The server will return the counts of each type of resource.
"""
return QueryClientResources(
display=self.display,
opcode=self.display.get_extension_major(extname),
client=client)
def query_client_pixmap_bytes(self, client):
"""Query the pixmap usage of some client.
The returned number is a sum of memory usage of each pixmap that can be
attributed to the given client.
"""
return QueryClientPixmapBytes(
display=self.display,
opcode=self.display.get_extension_major(extname),
client=client)
ClientXIDMask = 1 << 0
LocalClientPIDMask = 1 << 1
ClientIdSpec = rq.Struct(
rq.Card32("client"),
rq.Card32("mask"))
ClientIdValue = rq.Struct(
rq.Object("spec", ClientIdSpec),
SizeOf("value", 4, 4),
rq.List("value", rq.Card32Obj))
def query_client_ids(self, specs):
"""Request to identify a given set of clients with some identification method.
The request sends a list of specifiers that select clients and
identification methods to server. The server then tries to identify the
chosen clients using the identification methods specified for each client.
The server returns IDs for those clients that were successfully identified.
"""
return QueryClientIds(
display=self.display,
opcode=self.display.get_extension_major(extname),
specs=specs)
ResourceIdSpec = rq.Struct(
rq.Card32("resource"),
rq.Card32("type"))
ResourceSizeSpec = rq.Struct(
# inline struct ResourceIdSpec to work around
# a parser bug with nested objects
rq.Card32("resource"),
rq.Card32("type"),
rq.Card32("bytes"),
rq.Card32("ref_count"),
rq.Card32("use_count"))
ResourceSizeValue = rq.Struct(
rq.Object("size", ResourceSizeSpec),
rq.LengthOf("cross_references", 4),
rq.List("cross_references", ResourceSizeSpec))
def query_resource_bytes(self, client, specs):
"""Query the sizes of resources from X server.
The request sends a list of specifiers that selects resources for size
calculation. The server tries to calculate the sizes of chosen resources
and returns an estimate for a resource only if the size could be determined
"""
return QueryResourceBytes(
display=self.display,
opcode=self.display.get_extension_major(extname),
client=client,
specs=specs)
| 31.986159 | 83 | 0.609801 |
810a45957301a3d3e19c056d8cdd8e9cf5349711 | 1,690 | py | Python | rubra/cmdline_args.py | scwatts/rubra | 0be2c1e8d56badf134954baab9705f3aeb38d426 | [
"MIT"
] | 14 | 2015-04-13T04:10:43.000Z | 2022-03-28T08:42:43.000Z | rubra/cmdline_args.py | afcarl/rubra | 82905bbbd7077d201363b96ffbbc78c099095764 | [
"MIT"
] | 3 | 2016-12-27T17:24:04.000Z | 2018-12-21T17:43:36.000Z | rubra/cmdline_args.py | afcarl/rubra | 82905bbbd7077d201363b96ffbbc78c099095764 | [
"MIT"
] | 9 | 2015-04-29T03:00:16.000Z | 2020-01-30T00:56:52.000Z | # Process the unix command line of the pipeline.
import argparse
from version import rubra_version
parser = argparse.ArgumentParser(
description='A bioinformatics pipeline system.')
parser.add_argument(
'pipeline',
metavar='PIPELINE_FILE',
type=str,
help='Your Ruffus pipeline stages (a Python module)')
parser.add_argument(
'--config',
metavar='CONFIG_FILE',
type=str,
nargs='+',
required=True,
help='One or more configuration files (Python modules)')
parser.add_argument(
'--verbose',
type=int,
choices=(0, 1, 2),
required=False,
default=1,
help='Output verbosity level: 0 = quiet; 1 = normal; \
2 = chatty (default is 1)')
parser.add_argument(
'--style',
type=str,
choices=('print', 'run', 'flowchart', 'touchfiles'),
required=False,
default='print',
help='Pipeline behaviour: print; run; touchfiles; flowchart (default is print)')
parser.add_argument(
'--force',
metavar='TASKNAME',
type=str,
required=False,
default=[],
nargs='+',
help='tasks which are forced to be out of date regardless of timestamps')
parser.add_argument(
'--end',
metavar='TASKNAME',
type=str,
required=False,
help='end points (tasks) for the pipeline')
parser.add_argument(
'--rebuild',
type=str,
choices=('fromstart', 'fromend'),
required=False,
default='fromstart',
help='rebuild outputs by working back from end tasks or forwards \
from start tasks (default is fromstart)')
parser.add_argument(
'--version', action='version', version='%(prog)s ' + rubra_version)
| 26.825397 | 84 | 0.657396 |
810ae31f2bd87d1c18d17e372bfc5f6a1cddf8a3 | 268 | py | Python | main.py | KH241/Geohashing | d5d51278776c4dc0e3d6e6c39cbd31c1f4442fc1 | [
"CC0-1.0"
] | null | null | null | main.py | KH241/Geohashing | d5d51278776c4dc0e3d6e6c39cbd31c1f4442fc1 | [
"CC0-1.0"
] | null | null | null | main.py | KH241/Geohashing | d5d51278776c4dc0e3d6e6c39cbd31c1f4442fc1 | [
"CC0-1.0"
] | null | null | null | import webbrowser
import config
from Generator import Generator
if __name__ == '__main__':
main()
| 17.866667 | 67 | 0.735075 |
810b128cc1280e3c864be85f0fd7db633ecb097d | 35,104 | py | Python | knx-test.py | WAvdBeek/CoAPthon3 | 5aa9d6a6d9a2903d86b113da538df9bd970e6b44 | [
"MIT"
] | 1 | 2021-11-05T08:04:33.000Z | 2021-11-05T08:04:33.000Z | knx-test.py | WAvdBeek/CoAPthon3 | 5aa9d6a6d9a2903d86b113da538df9bd970e6b44 | [
"MIT"
] | 1 | 2021-07-21T12:40:54.000Z | 2021-07-21T14:42:42.000Z | knx-test.py | WAvdBeek/CoAPthon3 | 5aa9d6a6d9a2903d86b113da538df9bd970e6b44 | [
"MIT"
] | 1 | 2021-07-20T10:18:17.000Z | 2021-07-20T10:18:17.000Z | #!/usr/bin/env python
import getopt
import socket
import sys
import cbor
#from cbor2 import dumps, loads
import json
import time
import traceback
from coapthon.client.helperclient import HelperClient
from coapthon.utils import parse_uri
from coapthon import defines
client = None
paths = {}
paths_extend = {}
my_base = ""
# no json tags as strings
# id ==> 0
# href ==> 11
# ga ==> 7
# cflag ==> 8
# id ==> 0
# ia ==> 12
# path ==> 112
# url ==> 10
# ga ==> 7
# id ==> 0
# ia ==> 12
# path ==> 112
# url ==> 10
# ga ==> 7
# cmd ==> 2
# ./knx resource
# sia ==> 4
# ga ==> 7
# st 6
# ./knx resource
if __name__ == '__main__': # pragma: no cover
main()
| 33.786333 | 149 | 0.546206 |
810b43515d14811ec32c14454930c4b55606640a | 210 | py | Python | SWIM-Executables/Windows/pyinstaller-2.0 for windows/PyInstaller/hooks/hook-PyQt4.phonon.py | alexsigaras/SWIM | 1a35df8acb26bdcb307a1b8f60e9feba68ed1715 | [
"MIT"
] | 47 | 2020-03-08T08:43:28.000Z | 2022-03-18T18:51:55.000Z | SWIM-Executables/Windows/pyinstaller-2.0 for windows/PyInstaller/hooks/hook-PyQt4.phonon.py | alexsigaras/SWIM | 1a35df8acb26bdcb307a1b8f60e9feba68ed1715 | [
"MIT"
] | null | null | null | SWIM-Executables/Windows/pyinstaller-2.0 for windows/PyInstaller/hooks/hook-PyQt4.phonon.py | alexsigaras/SWIM | 1a35df8acb26bdcb307a1b8f60e9feba68ed1715 | [
"MIT"
] | 16 | 2020-03-08T08:43:30.000Z | 2022-01-10T22:05:57.000Z | hiddenimports = ['sip', 'PyQt4.QtGui', 'PyQt4._qt']
from PyInstaller.hooks.hookutils import qt4_plugins_binaries
| 23.333333 | 63 | 0.757143 |
810b6e9e54a3c45eed3b42ac6920a9d12535f63c | 6,579 | py | Python | PyTradier/data.py | zlopez101/PyTradier | 83397cf38bd636c471993b57fb71a12885affcb7 | [
"MIT"
] | 1 | 2021-04-30T23:59:20.000Z | 2021-04-30T23:59:20.000Z | PyTradier/data.py | zlopez101/PyTradier | 83397cf38bd636c471993b57fb71a12885affcb7 | [
"MIT"
] | 7 | 2021-05-08T00:47:59.000Z | 2021-05-12T01:45:37.000Z | PyTradier/data.py | zlopez101/PyTradier | 83397cf38bd636c471993b57fb71a12885affcb7 | [
"MIT"
] | null | null | null | from PyTradier.base import BasePyTradier
from typing import Union
from datetime import datetime
if __name__ == "__main__":
from utils import printer
data = MarketData()
symbol = "AAPL"
response = data.option_lookup(symbol)
# response = data.option_strike(symbol, dates[0])
printer(response)
| 42.173077 | 289 | 0.637635 |
810c05c71eb3fa5c73eabbeb8e2c1122faa7ac10 | 3,528 | py | Python | joulescope_ui/meter_widget.py | Axel-Jacobsen/pyjoulescope_ui | 7d296b1ead0d36c6524dc399372f7888a340e9fa | [
"Apache-2.0"
] | 1 | 2019-08-08T21:10:26.000Z | 2019-08-08T21:10:26.000Z | joulescope_ui/meter_widget.py | Axel-Jacobsen/pyjoulescope_ui | 7d296b1ead0d36c6524dc399372f7888a340e9fa | [
"Apache-2.0"
] | null | null | null | joulescope_ui/meter_widget.py | Axel-Jacobsen/pyjoulescope_ui | 7d296b1ead0d36c6524dc399372f7888a340e9fa | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PySide2 import QtCore, QtWidgets
from . import joulescope_rc
from .meter_value_widget import MeterValueWidget
import logging
log = logging.getLogger(__name__)
FIELDS = [
('current', 'A', 'Amps'),
('voltage', 'V', 'Volts'),
('power', 'W', 'Watts'),
('energy', 'J', 'Joules'),
]
| 40.090909 | 122 | 0.693311 |
810c343fb0a1f912fe6668116ca4d1081009f872 | 7,677 | py | Python | rpyc/core/service.py | bbonf/rpyc | 2c66dd6936a0d9e6e36c1ba0cda1139676acf95c | [
"MIT"
] | null | null | null | rpyc/core/service.py | bbonf/rpyc | 2c66dd6936a0d9e6e36c1ba0cda1139676acf95c | [
"MIT"
] | null | null | null | rpyc/core/service.py | bbonf/rpyc | 2c66dd6936a0d9e6e36c1ba0cda1139676acf95c | [
"MIT"
] | null | null | null | """
Services are the heart of RPyC: each side of the connection exposes a *service*,
which define the capabilities available to the other side.
Note that the services by both parties need not be symmetric, e.g., one side may
exposed *service A*, while the other may expose *service B*. As long as the two
can interoperate, you're good to go.
"""
from functools import partial
from rpyc.lib import hybridmethod
from rpyc.lib.compat import execute, is_py3k
from rpyc.core.protocol import Connection
| 34.426009 | 86 | 0.64804 |
810c8d17b4b4f09855fad0d286b79401e57777c2 | 307 | py | Python | tests/task/manager_test.py | altenia/taskmator | 4090d414125614a57649c5c92a017c12a231a2ef | [
"MIT"
] | 2 | 2015-06-06T04:59:53.000Z | 2020-08-15T22:45:01.000Z | tests/task/manager_test.py | altenia/taskmator | 4090d414125614a57649c5c92a017c12a231a2ef | [
"MIT"
] | 1 | 2015-06-06T05:02:24.000Z | 2015-06-06T05:02:24.000Z | tests/task/manager_test.py | altenia/taskmator | 4090d414125614a57649c5c92a017c12a231a2ef | [
"MIT"
] | null | null | null | import unittest
from testbase import TaskmatorTestBase
from taskmator.task import core, util
from taskmator import context
if __name__ == '__main__':
unittest.main()
| 17.055556 | 38 | 0.710098 |
810ccb8df33ca9c859d68156c3d23f37b798cbf1 | 1,301 | py | Python | tests/components/zwave_js/test_discovery.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 1 | 2020-12-18T12:23:04.000Z | 2020-12-18T12:23:04.000Z | tests/components/zwave_js/test_discovery.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 60 | 2020-07-06T15:10:30.000Z | 2022-03-31T06:01:46.000Z | tests/components/zwave_js/test_discovery.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 4 | 2017-01-10T04:17:33.000Z | 2021-09-02T16:37:24.000Z | """Test discovery of entities for device-specific schemas for the Z-Wave JS integration."""
| 34.236842 | 91 | 0.731745 |
810dcd1a1c119a6c004be66c020243fbafedf1ee | 5,229 | py | Python | boto3_type_annotations/boto3_type_annotations/guardduty/client.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 119 | 2018-12-01T18:20:57.000Z | 2022-02-02T10:31:29.000Z | boto3_type_annotations/boto3_type_annotations/guardduty/client.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 15 | 2018-11-16T00:16:44.000Z | 2021-11-13T03:44:18.000Z | boto3_type_annotations/boto3_type_annotations/guardduty/client.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 11 | 2019-05-06T05:26:51.000Z | 2021-09-28T15:27:59.000Z | from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import List
| 35.331081 | 175 | 0.669344 |
810e20d4bc8d21dc6f3aae023a1133ca2d856392 | 1,218 | py | Python | test/workload/tpch_loop_workload_test.py | ChenYi015/Raven | e732e03f8dd118ed805a143fc6916f0e5fc53c2c | [
"Apache-2.0"
] | 1 | 2022-03-03T05:54:25.000Z | 2022-03-03T05:54:25.000Z | test/workload/tpch_loop_workload_test.py | ChenYi015/Raven | e732e03f8dd118ed805a143fc6916f0e5fc53c2c | [
"Apache-2.0"
] | null | null | null | test/workload/tpch_loop_workload_test.py | ChenYi015/Raven | e732e03f8dd118ed805a143fc6916f0e5fc53c2c | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Raven Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from queue import Queue
from threading import Thread
from benchmark.workload.tpch import TpchLoopWorkload
if __name__ == '__main__':
workload = TpchLoopWorkload()
print(workload)
queue = Queue()
generate_thread = Thread(
target=workload.generate_one_loop_queries,
args=(queue,),
name='QueryGenerator'
)
generate_thread.start()
print_thread = Thread(
target=print_queries,
args=(queue,),
name='QueryPrinter'
)
print_thread.start()
| 26.478261 | 74 | 0.705255 |
810e3e3e48092c408dee59bf8a6eb974e84689eb | 1,475 | py | Python | Final-Project/server/art/serializers.py | wendy006/Web-Dev-Course | 2f0cfddb7ab4db88ffb4483c7cd4a00abf36c720 | [
"MIT"
] | null | null | null | Final-Project/server/art/serializers.py | wendy006/Web-Dev-Course | 2f0cfddb7ab4db88ffb4483c7cd4a00abf36c720 | [
"MIT"
] | null | null | null | Final-Project/server/art/serializers.py | wendy006/Web-Dev-Course | 2f0cfddb7ab4db88ffb4483c7cd4a00abf36c720 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .models import *
| 35.97561 | 124 | 0.626441 |
810f24ca6e713fb7958aa28861ebd60291bab8c3 | 2,089 | bzl | Python | google/cloud/google_cloud_cpp_common_unit_tests.bzl | joezqren/google-cloud-cpp | 325d312b0a21569f3c57515aec7d91f3540d3b48 | [
"Apache-2.0"
] | null | null | null | google/cloud/google_cloud_cpp_common_unit_tests.bzl | joezqren/google-cloud-cpp | 325d312b0a21569f3c57515aec7d91f3540d3b48 | [
"Apache-2.0"
] | null | null | null | google/cloud/google_cloud_cpp_common_unit_tests.bzl | joezqren/google-cloud-cpp | 325d312b0a21569f3c57515aec7d91f3540d3b48 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DO NOT EDIT -- GENERATED BY CMake -- Change the CMakeLists.txt file if needed
"""Automatically generated unit tests list - DO NOT EDIT."""
google_cloud_cpp_common_unit_tests = [
"common_options_test.cc",
"future_generic_test.cc",
"future_generic_then_test.cc",
"future_void_test.cc",
"future_void_then_test.cc",
"iam_bindings_test.cc",
"internal/algorithm_test.cc",
"internal/api_client_header_test.cc",
"internal/backoff_policy_test.cc",
"internal/base64_transforms_test.cc",
"internal/big_endian_test.cc",
"internal/compiler_info_test.cc",
"internal/credentials_impl_test.cc",
"internal/env_test.cc",
"internal/filesystem_test.cc",
"internal/format_time_point_test.cc",
"internal/future_impl_test.cc",
"internal/invoke_result_test.cc",
"internal/log_impl_test.cc",
"internal/pagination_range_test.cc",
"internal/parse_rfc3339_test.cc",
"internal/random_test.cc",
"internal/retry_policy_test.cc",
"internal/status_payload_keys_test.cc",
"internal/strerror_test.cc",
"internal/throw_delegate_test.cc",
"internal/tuple_test.cc",
"internal/type_list_test.cc",
"internal/user_agent_prefix_test.cc",
"internal/utility_test.cc",
"kms_key_name_test.cc",
"log_test.cc",
"options_test.cc",
"polling_policy_test.cc",
"project_test.cc",
"status_or_test.cc",
"status_test.cc",
"stream_range_test.cc",
"terminate_handler_test.cc",
"tracing_options_test.cc",
]
| 34.245902 | 79 | 0.727621 |
8111119b844622ccdb3004ede98c4e13a46f452c | 398 | py | Python | api/tests/ver1/test_base.py | codacy-badger/politico-api | 10d926bf34f12631cb19bb9c82ccded36557c790 | [
"MIT"
] | null | null | null | api/tests/ver1/test_base.py | codacy-badger/politico-api | 10d926bf34f12631cb19bb9c82ccded36557c790 | [
"MIT"
] | null | null | null | api/tests/ver1/test_base.py | codacy-badger/politico-api | 10d926bf34f12631cb19bb9c82ccded36557c790 | [
"MIT"
] | null | null | null | import unittest
from api import create_app
| 23.411765 | 49 | 0.640704 |
811134f08b2c67534a9093ee9d1a20f045af6b48 | 865 | py | Python | socialdistribution/app/templatetags/filters.py | CMPUT404-Project-Group/CMPUT404-Group-Project | e541cc609f260d7221fe0be8975c5b2444d74af0 | [
"W3C-20150513"
] | null | null | null | socialdistribution/app/templatetags/filters.py | CMPUT404-Project-Group/CMPUT404-Group-Project | e541cc609f260d7221fe0be8975c5b2444d74af0 | [
"W3C-20150513"
] | 44 | 2021-10-14T15:44:46.000Z | 2021-12-05T00:57:23.000Z | socialdistribution/app/templatetags/filters.py | CMPUT404-Project-Group/Social-Distribution-CMPUT404-Group-Project | e541cc609f260d7221fe0be8975c5b2444d74af0 | [
"W3C-20150513"
] | 1 | 2021-12-07T01:14:14.000Z | 2021-12-07T01:14:14.000Z | from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import SafeString
import markdown
import urllib
register = template.Library() | 23.378378 | 76 | 0.721387 |
81118158b2fe646b1e3b2899f2e0b74a521117c9 | 3,234 | py | Python | alipay/aop/api/domain/MetroOdItem.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/MetroOdItem.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/MetroOdItem.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.CloudbusUserInfo import CloudbusUserInfo
| 26.95 | 70 | 0.548856 |
81145bece0e3560e4fd661b7085c6a1e4f6811f2 | 910 | py | Python | djangocms_redirect/migrations/0003_auto_20190810_1009.py | vsalat/djangocms-redirect | a2577f08430b6b65ae4a51293f861b697bf4ab9d | [
"BSD-3-Clause"
] | null | null | null | djangocms_redirect/migrations/0003_auto_20190810_1009.py | vsalat/djangocms-redirect | a2577f08430b6b65ae4a51293f861b697bf4ab9d | [
"BSD-3-Clause"
] | null | null | null | djangocms_redirect/migrations/0003_auto_20190810_1009.py | vsalat/djangocms-redirect | a2577f08430b6b65ae4a51293f861b697bf4ab9d | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.2.4 on 2019-08-10 08:09
from django.db import migrations, models
| 37.916667 | 239 | 0.679121 |
811898bc6c0124ca8489662af03fc5f7195a1876 | 5,191 | py | Python | octopart/scrape_octopart.py | nicholaschiang/dl-datasheets | 1c5ab2545a85c1ea7643fc655005259544617d90 | [
"MIT"
] | null | null | null | octopart/scrape_octopart.py | nicholaschiang/dl-datasheets | 1c5ab2545a85c1ea7643fc655005259544617d90 | [
"MIT"
] | null | null | null | octopart/scrape_octopart.py | nicholaschiang/dl-datasheets | 1c5ab2545a85c1ea7643fc655005259544617d90 | [
"MIT"
] | 1 | 2019-12-07T20:13:06.000Z | 2019-12-07T20:13:06.000Z | #! /usr/bin/env python
import sys
import json
import urllib
import urllib2
import time
import argparse
import re
# Category ID for Discrete Semiconductors > Transistors > BJTs
TRANSISTOR_ID = b814751e89ff63d3
def find_total_hits(search_query):
"""
Function: find_total_hits
--------------------
Returns the number of hits that correspond to the search query.
"""
url = "http://octopart.com/api/v3/categories/"
# NOTE: Use your API key here (https://octopart.com/api/register)
url += "?apikey=09b32c6c"
args = [
('q', search_query),
('start', 0),
('limit', 1), #change to increase number of datasheets
('include[]','datasheets')
]
url += '&' + urllib.urlencode(args)
data = urllib.urlopen(url).read() # perform a SearchRequest
search_response = json.loads(data) # Grab the SearchResponse
# return number of hits
return search_response['hits']
def download_datasheets(search_query):
"""
Function: download_datasheets
--------------------
Uses the OctoPart API to download all datasheets associated with a given
set of search keywords.
"""
MAX_RESULTS = 100
counter = 0
total_hits = find_total_hits(search_query)
# print number of hits
print "[info] Search Response Hits: %s" % (total_hits)
# Calculate how many multiples of 100s of hits there are
num_hundreds = total_hits / MAX_RESULTS
print "[info] Performing %s iterations of %s results." % (num_hundreds, MAX_RESULTS)
for i in range(num_hundreds+1):
url = "http://octopart.com/api/v3/parts/search"
# NOTE: Use your API key here (https://octopart.com/api/register)
url += "?apikey=09b32c6c"
args = [
('q', search_query),
('start', (i * MAX_RESULTS)),
('limit', MAX_RESULTS), # change to edit number of datasheets
('include[]','datasheets')
# ('include[]','specs'),
# ('include[]','descriptions')
]
url += '&' + urllib.urlencode(args)
data = urllib.urlopen(url).read() # perform a SearchRequest
search_response = json.loads(data) # Grab the SearchResponse
# Iterate through the SearchResults in the SearchResponse
if not search_response.get('results'):
print "[error] no results returned in outer loop: " + str(i)
continue
for result in search_response['results']:
part = result['item'] # Grab the Part in the SearchResult
print ("[info] %s_%s..." % (part['brand']['name'].replace(" ", ""), part['mpn'])),
sys.stdout.flush()
# Iterate through list of datasheets for the given part
for datasheet in part['datasheets']:
# Grab the Datasheet URL
pdflink = datasheet['url']
if pdflink is not None:
# Download the PDF
try:
response = urllib2.urlopen(pdflink)
except urllib2.HTTPError, err:
if err.code == 404:
print "[error] Page not found!...",
elif err.code == 403:
print "[error] Access Denied!...",
else:
print "[error] HTTP Error code ", err.code,
continue; # advance to next datasheet rather than crashing
try:
filename = re.search('([^/]*)\.[^.]*$', datasheet['url']).group(1)
except AttributeError:
continue; # skip to next datasheet rather than crashing
file = open("../datasheets/%s.pdf" % filename, 'w')
file.write(response.read())
file.close()
counter += 1 # Increment the counter of files downloaded
# NOTE: Not sure if this is necessary. Just a precaution.
time.sleep(0.4) # Limit ourselves to 3 HTTP Requests/second
print("DONE")
print("[info] %s Parts Completed." % MAX_RESULTS)
print("[info] COMPLETED: %s datasheets for the query were downloaded." % counter)
def parse_args():
"""
Function: parse_args
--------------------
Parse the arguments for the Octopart Datasheet Scraper
"""
# Define what commandline arguments can be accepted
parser = argparse.ArgumentParser()
parser.add_argument('query',metavar="\"SEARCH_KEYWORDS\"",
help="keywords to query in quotes (required)")
parser.add_argument('--version', action='version', version='%(prog)s 0.1.0')
args = parser.parse_args()
return args.query
# Main Function
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
search_query = parse_args() # Parse commandline arguments
start_time = time.time()
print "[info] Download datasheets for %s" % search_query
download_datasheets(search_query)
finish_time = time.time()
print '[info] Took', finish_time - start_time, 'sec total.'
| 38.169118 | 97 | 0.571374 |
811909fd3d9bc00f5888c3293282a4df3cefdd8c | 14,970 | py | Python | extras/python/fogbench/__main__.py | foglamp/FogLAMP | 918dff88b440e6ad580efdaa5f0fbdf4143a73d4 | [
"Apache-2.0"
] | 65 | 2017-05-15T21:55:04.000Z | 2022-01-19T01:30:42.000Z | extras/python/fogbench/__main__.py | foglamp/FogLAMP | 918dff88b440e6ad580efdaa5f0fbdf4143a73d4 | [
"Apache-2.0"
] | 576 | 2017-05-22T05:41:07.000Z | 2020-02-13T07:48:58.000Z | extras/python/fogbench/__main__.py | foglamp/FogLAMP | 918dff88b440e6ad580efdaa5f0fbdf4143a73d4 | [
"Apache-2.0"
] | 52 | 2017-05-09T22:45:47.000Z | 2022-03-10T18:49:02.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" fogbench -- a Python script used to test FogLAMP.
The objective is to simulate payloads for input, REST and other requests against one or
more FogLAMP instances. This version of fogbench is meant to test the CoAP and HTTP plugins
interface of FogLAMP southbound services.
fogbench
[IN] -h --help Print this help
-i --interval The interval in seconds between each iteration (default: 0)
[IN] -k --keep Do not delete (keep) the running sample (default: no)
[IN] -o --output Set the output file for statistics
[IN] -p --payload Type of payload and protocol (default: coap)
[IN] -t --template Set the template to use
[IN] -v --version Display the version and exit
[IN] -H --host The FogLAMP host (default: localhost)
-I --iterations The number of iterations of the test (default: 1)
[IN] -O --occurrences The number of occurrences of the template (default: 1)
[IN] -P --port The FogLAMP port. Default depends on payload and protocol
[IN] -S --statistic The type of statistics to collect
Example:
$ cd $FOGLAMP_ROOT/bin
$ ./fogbench
Help:
$ ./fogbench -h
* Create reading objects from given template, as per the json file name specified with -t
* Save those objects to the file, as per the file name specified with -o
* Read those objects
* Send those to CoAP or HTTP south plugin server, on specific host and port
.. todo::
* Try generators
"""
import sys
import os
import random
import json
from datetime import datetime, timezone
import argparse
import collections
import asyncio
import aiohttp
from .exceptions import *
__author__ = "Praveen Garg"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_FOGBENCH_VERSION = u"0.1.1"
_start_time = []
_end_time = []
_tot_msgs_transferred = []
_tot_byte_transferred = []
_num_iterated = 0
"""Statistics to be collected"""
# _logger = logger.setup(__name__)
def local_timestamp():
"""
:return: str - current time stamp with microseconds and machine timezone info
:example '2018-05-08 14:06:40.517313+05:30'
"""
return str(datetime.now(timezone.utc).astimezone())
def get_statistics(_stats_type=None, _out_file=None):
stat = ''
global _start_time
global _end_time
global _tot_msgs_transferred
global _tot_byte_transferred
global _num_iterated
if _stats_type == 'total':
stat += u"Total Statistics:\n"
stat += (u"\nStart Time: {}".format(datetime.strftime(_start_time[0], "%Y-%m-%d %H:%M:%S.%f")))
stat += (u"\nEnd Time: {}\n".format(datetime.strftime(_end_time[-1], "%Y-%m-%d %H:%M:%S.%f")))
stat += (u"\nTotal Messages Transferred: {}".format(sum(_tot_msgs_transferred)))
stat += (u"\nTotal Bytes Transferred: {}\n".format(sum(_tot_byte_transferred)))
stat += (u"\nTotal Iterations: {}".format(_num_iterated))
stat += (u"\nTotal Messages per Iteration: {}".format(sum(_tot_msgs_transferred)/_num_iterated))
stat += (u"\nTotal Bytes per Iteration: {}\n".format(sum(_tot_byte_transferred)/_num_iterated))
_msg_rate = []
_byte_rate = []
for itr in range(_num_iterated):
time_taken = _end_time[itr] - _start_time[itr]
_msg_rate.append(_tot_msgs_transferred[itr]/(time_taken.seconds+time_taken.microseconds/1E6))
_byte_rate.append(_tot_byte_transferred[itr] / (time_taken.seconds+time_taken.microseconds/1E6))
stat += (u"\nMin messages/second: {}".format(min(_msg_rate)))
stat += (u"\nMax messages/second: {}".format(max(_msg_rate)))
stat += (u"\nAvg messages/second: {}\n".format(sum(_msg_rate)/_num_iterated))
stat += (u"\nMin Bytes/second: {}".format(min(_byte_rate)))
stat += (u"\nMax Bytes/second: {}".format(max(_byte_rate)))
stat += (u"\nAvg Bytes/second: {}".format(sum(_byte_rate)/_num_iterated))
if _out_file:
with open(_out_file, 'w') as f:
f.write(stat)
else:
print(stat)
# should we also show total time diff? end_time - start_time
def check_server(payload_type='coap'):
template_str = ">>> Make sure south {} plugin service is running \n & listening on specified host and port \n"
if payload_type == 'coap':
print(template_str.format("CoAP"))
elif payload_type == 'http':
print(template_str.format("HTTP"))
parser = argparse.ArgumentParser(prog='fogbench')
parser.description = '%(prog)s -- a Python script used to test FogLAMP (simulate payloads)'
parser.epilog = 'The initial version of %(prog)s is meant to test the south plugin interface of ' \
'FogLAMP using CoAP or HTTP'
parser.add_argument('-v', '--version', action='version', version='%(prog)s {0!s}'.format(_FOGBENCH_VERSION))
parser.add_argument('-k', '--keep', default=False, choices=['y', 'yes', 'n', 'no'],
help='Do not delete the running sample (default: no)')
parser.add_argument('-t', '--template', required=True, help='Set the template file, json extension')
parser.add_argument('-o', '--output', default=None, help='Set the statistics output file')
parser.add_argument('-p', '--payload', default='coap', choices=['coap', 'http'], help='Type of payload '
'and protocol (default: coap)')
parser.add_argument('-I', '--iterations', help='The number of iterations of the test (default: 1)')
parser.add_argument('-O', '--occurrences', help='The number of occurrences of the template (default: 1)')
parser.add_argument('-H', '--host', help='Server host address (default: localhost)')
parser.add_argument('-P', '--port', help='The FogLAMP port. (default: 5683)')
parser.add_argument('-i', '--interval', default=0, help='The interval in seconds for each iteration (default: 0)')
parser.add_argument('-S', '--statistics', default='total', choices=['total'], help='The type of statistics to collect '
'(default: total)')
namespace = parser.parse_args(sys.argv[1:])
infile = '{0}'.format(namespace.template if namespace.template else '')
statistics_file = os.path.join(os.path.dirname(__file__), "out/{}".format(namespace.output)) if namespace.output else None
keep_the_file = True if namespace.keep in ['y', 'yes'] else False
# iterations and occurrences
arg_iterations = int(namespace.iterations) if namespace.iterations else 1
arg_occurrences = int(namespace.occurrences) if namespace.occurrences else 1
# interval between each iteration
arg_interval = int(namespace.interval) if namespace.interval else 0
arg_stats_type = '{0}'.format(namespace.statistics) if namespace.statistics else 'total'
if namespace.payload:
arg_payload_protocol = namespace.payload
arg_host = '{0}'.format(namespace.host) if namespace.host else 'localhost'
default_port = 6683 if arg_payload_protocol == 'http' else 5683
arg_port = int(namespace.port) if namespace.port else default_port
check_server(arg_payload_protocol)
sample_file = os.path.join("/tmp", "foglamp_running_sample.{}".format(os.getpid()))
parse_template_and_prepare_json(_template_file=infile, _write_to_file=sample_file, _occurrences=arg_occurrences)
read_out_file(_file=sample_file, _keep=keep_the_file, _iterations=arg_iterations, _interval=arg_interval,
send_to=arg_payload_protocol)
get_statistics(_stats_type=arg_stats_type, _out_file=statistics_file)
# TODO: Change below per local_timestamp() values
""" Expected output from given template
{
"timestamp" : "2017-08-04T06:59:57.503Z",
"asset" : "TI sensorTag/luxometer",
"sensor_values" : { "lux" : 49 }
}
{
"timestamp" : "2017-08-04T06:59:57.863Z",
"asset" : "TI sensorTag/pressure",
"sensor_values" : { "pressure" : 1021.2 }
}
{
"timestamp" : "2017-08-04T06:59:58.863Z",
"asset" : "TI sensorTag/humidity",
"sensor_values" : { "humidity" : 71.2, "temperature" : 18.6 }
}
{
"timestamp" : "2017-08-04T06:59:59.863Z",
"asset" : "TI sensorTag/temperature",
"sensor_values" : { "object" : 18.2, "ambient" : 21.6 }
}
{
"timestamp" : "2017-08-04T07:00:00.863Z",
"asset" : "TI sensorTag/accelerometer",
"sensor_values" : { "x" : 1.2, "y" : 0.0, "z" : -0.6 }
}
{
"timestamp" : "2017-08-04T07:00:01.863Z",
"asset" : "TI sensorTag/gyroscope",
"sensor_values" : { "x" : 101.2, "y" : 46.2, "z" : -12.6 }
}
{
"timestamp" : "2017-08-04T07:00:02.863Z",
"asset" : "TI sensorTag/magnetometer",
"sensor_values" : { "x" : 101.2, "y" : 46.2, "z" : -12.6 }
}
{
"timestamp" : "2017-08-04T07:00:03.863Z",
"asset" : "mouse",
"sensor_values" : { "button" : "down" }
}
{
"timestamp" : "2017-08-04T07:00:04.863Z",
"asset" : "wall clock",
"sensor_values" : { "tick" : "tock" }
}
"""
| 36.601467 | 122 | 0.638277 |
81197e9fdd38be14f8210f08e7cec2020796f260 | 19,888 | py | Python | qiskit/ignis/mitigation/measurement/filters.py | paulineollitrault/qiskit-ignis | 99f24ea6533cd284be4c44a48d43e54f62f05674 | [
"Apache-2.0"
] | 182 | 2019-02-19T22:52:42.000Z | 2022-02-28T05:48:07.000Z | qiskit/ignis/mitigation/measurement/filters.py | paulineollitrault/qiskit-ignis | 99f24ea6533cd284be4c44a48d43e54f62f05674 | [
"Apache-2.0"
] | 384 | 2019-02-19T21:30:18.000Z | 2021-12-02T21:13:34.000Z | qiskit/ignis/mitigation/measurement/filters.py | paulineollitrault/qiskit-ignis | 99f24ea6533cd284be4c44a48d43e54f62f05674 | [
"Apache-2.0"
] | 203 | 2019-02-19T21:06:27.000Z | 2022-03-02T14:16:50.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=cell-var-from-loop,invalid-name
"""
Measurement correction filters.
"""
from typing import List, Union
from copy import deepcopy
from scipy.optimize import minimize
import scipy.linalg as la
import numpy as np
import qiskit
from qiskit import QiskitError
from qiskit.tools import parallel_map
from qiskit.ignis.verification.tomography import count_keys
def _apply_correction(self, resultidx, raw_data, method):
"""Wrapper to call apply with a counts dictionary."""
new_counts = self.apply(
raw_data.get_counts(resultidx), method=method)
return resultidx, new_counts
class TensoredFilter():
"""
Tensored measurement error mitigation filter.
Produced from a tensored measurement calibration fitter and can be applied
to data.
"""
def __init__(self,
cal_matrices: np.matrix,
substate_labels_list: list,
mit_pattern: list):
"""
Initialize a tensored measurement error mitigation filter using
the cal_matrices from a tensored measurement calibration fitter.
A simple usage this class is explained [here]
(https://qiskit.org/documentation/tutorials/noise/3_measurement_error_mitigation.html).
Args:
cal_matrices: the calibration matrices for applying the correction.
substate_labels_list: for each calibration matrix
a list of the states (as strings, states in the subspace)
mit_pattern: for each calibration matrix
a list of the logical qubit indices (as int, states in the subspace)
"""
self._cal_matrices = cal_matrices
self._qubit_list_sizes = []
self._indices_list = []
self._substate_labels_list = []
self.substate_labels_list = substate_labels_list
self._mit_pattern = mit_pattern
def apply(self,
raw_data: Union[qiskit.result.result.Result, dict],
method: str = 'least_squares',
meas_layout: List[int] = None):
"""
Apply the calibration matrices to results.
Args:
raw_data (dict or Result): The data to be corrected. Can be in one of two forms:
* A counts dictionary from results.get_counts
* A Qiskit Result
method (str): fitting method. The following methods are supported:
* 'pseudo_inverse': direct inversion of the cal matrices.
Mitigated counts can contain negative values
and the sum of counts would not equal to the shots.
Mitigation is conducted qubit wise:
For each qubit, mitigate the whole counts using the calibration matrices
which affect the corresponding qubit.
For example, assume we are mitigating the 3rd bit of the 4-bit counts
using '2\times 2' calibration matrix `A_3`.
When mitigating the count of '0110' in this step,
the following formula is applied:
`count['0110'] = A_3^{-1}[1, 0]*count['0100'] + A_3^{-1}[1, 1]*count['0110']`.
The total time complexity of this method is `O(m2^{n + t})`,
where `n` is the size of calibrated qubits,
`m` is the number of sets in `mit_pattern`,
and `t` is the size of largest set of mit_pattern.
If the `mit_pattern` is shaped like `[[0], [1], [2], ..., [n-1]]`,
which corresponds to the tensor product noise model without cross-talk,
then the time complexity would be `O(n2^n)`.
If the `mit_pattern` is shaped like `[[0, 1, 2, ..., n-1]]`,
which exactly corresponds to the complete error mitigation,
then the time complexity would be `O(2^(n+n)) = O(4^n)`.
* 'least_squares': constrained to have physical probabilities.
Instead of directly applying inverse calibration matrices,
this method solve a constrained optimization problem to find
the closest probability vector to the result from 'pseudo_inverse' method.
Sequential least square quadratic programming (SLSQP) is used
in the internal process.
Every updating step in SLSQP takes `O(m2^{n+t})` time.
Since this method is using the SLSQP optimization over
the vector with lenght `2^n`, the mitigation for 8 bit counts
with the `mit_pattern = [[0], [1], [2], ..., [n-1]]` would
take 10 seconds or more.
* If `None`, 'least_squares' is used.
meas_layout (list of int): the mapping from classical registers to qubits
* If you measure qubit `2` to clbit `0`, `0` to `1`, and `1` to `2`,
the list becomes `[2, 0, 1]`
* If `None`, flatten(mit_pattern) is used.
Returns:
dict or Result: The corrected data in the same form as raw_data
Raises:
QiskitError: if raw_data is not in a one of the defined forms.
"""
all_states = count_keys(self.nqubits)
num_of_states = 2**self.nqubits
if meas_layout is None:
meas_layout = []
for qubits in self._mit_pattern:
meas_layout += qubits
# check forms of raw_data
if isinstance(raw_data, dict):
# counts dictionary
# convert to list
raw_data2 = [np.zeros(num_of_states, dtype=float)]
for state, count in raw_data.items():
stateidx = int(state, 2)
raw_data2[0][stateidx] = count
elif isinstance(raw_data, qiskit.result.result.Result):
# extract out all the counts, re-call the function with the
# counts and push back into the new result
new_result = deepcopy(raw_data)
new_counts_list = parallel_map(
self._apply_correction,
[resultidx for resultidx, _ in enumerate(raw_data.results)],
task_args=(raw_data, method, meas_layout))
for resultidx, new_counts in new_counts_list:
new_result.results[resultidx].data.counts = new_counts
return new_result
else:
raise QiskitError("Unrecognized type for raw_data.")
if method == 'pseudo_inverse':
pinv_cal_matrices = []
for cal_mat in self._cal_matrices:
pinv_cal_matrices.append(la.pinv(cal_mat))
meas_layout = meas_layout[::-1] # reverse endian
qubits_to_clbits = [-1 for _ in range(max(meas_layout) + 1)]
for i, qubit in enumerate(meas_layout):
qubits_to_clbits[qubit] = i
# Apply the correction
for data_idx, _ in enumerate(raw_data2):
if method == 'pseudo_inverse':
for pinv_cal_mat, pos_qubits, indices in zip(pinv_cal_matrices,
self._mit_pattern,
self._indices_list):
inv_mat_dot_x = np.zeros([num_of_states], dtype=float)
pos_clbits = [qubits_to_clbits[qubit] for qubit in pos_qubits]
for state_idx, state in enumerate(all_states):
first_index = self.compute_index_of_cal_mat(state, pos_clbits, indices)
for i in range(len(pinv_cal_mat)): # i is index of pinv_cal_mat
source_state = self.flip_state(state, i, pos_clbits)
second_index = self.compute_index_of_cal_mat(source_state,
pos_clbits,
indices)
inv_mat_dot_x[state_idx] += pinv_cal_mat[first_index, second_index]\
* raw_data2[data_idx][int(source_state, 2)]
raw_data2[data_idx] = inv_mat_dot_x
elif method == 'least_squares':
x0 = np.random.rand(num_of_states)
x0 = x0 / sum(x0)
nshots = sum(raw_data2[data_idx])
cons = ({'type': 'eq', 'fun': lambda x: nshots - sum(x)})
bnds = tuple((0, nshots) for x in x0)
res = minimize(fun, x0, method='SLSQP',
constraints=cons, bounds=bnds, tol=1e-6)
raw_data2[data_idx] = res.x
else:
raise QiskitError("Unrecognized method.")
# convert back into a counts dictionary
new_count_dict = {}
for state_idx, state in enumerate(all_states):
if raw_data2[0][state_idx] != 0:
new_count_dict[state] = raw_data2[0][state_idx]
return new_count_dict
def flip_state(self, state: str, mat_index: int, flip_poses: List[int]) -> str:
"""Flip the state according to the chosen qubit positions"""
flip_poses = [pos for i, pos in enumerate(flip_poses) if (mat_index >> i) & 1]
flip_poses = sorted(flip_poses)
new_state = ""
pos = 0
for flip_pos in flip_poses:
new_state += state[pos:flip_pos]
new_state += str(int(state[flip_pos], 2) ^ 1) # flip the state
pos = flip_pos + 1
new_state += state[pos:]
return new_state
def compute_index_of_cal_mat(self, state: str, pos_qubits: List[int], indices: dict) -> int:
"""Return the index of (pseudo inverse) calibration matrix for the input quantum state"""
sub_state = ""
for pos in pos_qubits:
sub_state += state[pos]
return indices[sub_state]
def _apply_correction(self,
resultidx: int,
raw_data: qiskit.result.result.Result,
method: str,
meas_layout: List[int]):
"""Wrapper to call apply with a counts dictionary."""
new_counts = self.apply(
raw_data.get_counts(resultidx), method=method, meas_layout=meas_layout)
return resultidx, new_counts
| 40.422764 | 100 | 0.569389 |
811a461fc321525abd67f11d9522903e94b00815 | 3,067 | py | Python | 2017/adv2017-1.py | fcharlier/AdventOfCode | 6b2765da9e4d6f6b1f201897bb56043482a65bb2 | [
"WTFPL"
] | null | null | null | 2017/adv2017-1.py | fcharlier/AdventOfCode | 6b2765da9e4d6f6b1f201897bb56043482a65bb2 | [
"WTFPL"
] | null | null | null | 2017/adv2017-1.py | fcharlier/AdventOfCode | 6b2765da9e4d6f6b1f201897bb56043482a65bb2 | [
"WTFPL"
] | null | null | null | #!/usr/bin/python
def meh(captcha):
"""Returns the sum of the digits which match the next one in the captcha
input string.
>>> meh('1122')
3
>>> meh('1111')
4
>>> meh('1234')
0
>>> meh('91212129')
9
"""
result = 0
for n in range(len(captcha)):
if captcha[n] == captcha[(n + 1) % len(captcha)]:
result += int(captcha[n])
return result
def meh2(captcha):
"""Returns the sum of the digits which match the next one in the captcha
input string.
>>> meh2('1212')
6
>>> meh2('1221')
0
>>> meh2('123425')
4
>>> meh2('123123')
12
>>> meh2('12131415')
4
"""
result = 0
for n in range(len(captcha)):
if captcha[n] == captcha[(n + len(captcha) / 2) % len(captcha)]:
result += int(captcha[n])
return result
if __name__ == '__main__':
input = '57276274387944537823652626177853384411146325384494935924454336611953119173638191671326254832624841593421667683474349154668177743437745965461678636631863541462893547616877914914662358836365421198516263335926544716331814125295712581158399321372683742773423626286669759415959391374744214595682795818615532673877868424196926497731144319736445141728123322962547288572434564178492753681842244888368542423832228211172842456231275738182764232265933625119312598161192193214898949267765417468348935134618964683127194391796165368145548814473129857697989322621368744725685183346825333247866734735894493395218781464346951777873929898961358796274889826894529599645442657423438562423853247543621565468819799931598754753467593832328147439341586125262733737128386961596394728159719292787597426898945198788211417854662948358422729471312456437778978749753927251431677533575752312447488337156956217451965643454445329758327129966657189332824969141448538681979632611199385896965946849725421978137753366252459914913637858783146735469758716752765718189175583956476935185985918536318424248425426398158278111751711911227818826766177996223718837428972784328925743869885232266127727865267881592395643836999244218345184474613129823933659422223685422732186536199153988717455568523781673393698356967355875123554797755491181791593156433735591529495984256519631187849654633243225118132152549712643273819314433877592644693826861523243946998615722951182474773173215527598949553185313259992227879964482121769617218685394776778423378182462422788277997523913176326468957342296368178321958626168785578977414537368686438348124283789748775163821457641135163495649331144436157836647912852483177542224864952271874645274572426458614384917923623627532487625396914111582754953944965462576624728896917137599778828769958626788685374749661741223741834844643725486925886933118382649581481351844943368484853956759877215252766294896496444835264357169642341291412768946589781812493421379575569593678354241223363739129813633236996588711791919421574583924743119867622229659211793468744163297478952475933163259769578345894367855534294493613767564497137369969315192443795512585'
print meh(input)
print meh2(input)
| 61.34 | 2,134 | 0.852625 |
811bbfb3266a619b867f934c6f82a6ecb7783e88 | 111,660 | py | Python | pymatgen/analysis/graphs.py | Roy027/pymatgen | a4aa91d011033c1151b82335abd080e2b1a310d5 | [
"MIT"
] | null | null | null | pymatgen/analysis/graphs.py | Roy027/pymatgen | a4aa91d011033c1151b82335abd080e2b1a310d5 | [
"MIT"
] | null | null | null | pymatgen/analysis/graphs.py | Roy027/pymatgen | a4aa91d011033c1151b82335abd080e2b1a310d5 | [
"MIT"
] | null | null | null | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module for graph representations of crystals.
"""
import copy
import logging
import os.path
import subprocess
import warnings
from collections import defaultdict, namedtuple
from itertools import combinations
from operator import itemgetter
import networkx as nx
import networkx.algorithms.isomorphism as iso
import numpy as np
from monty.json import MSONable
from monty.os.path import which
from networkx.drawing.nx_agraph import write_dot
from networkx.readwrite import json_graph
from scipy.spatial import KDTree
from scipy.stats import describe
from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure
from pymatgen.core.structure import FunctionalGroups
from pymatgen.util.coord import lattice_points_in_supercell
from pymatgen.vis.structure_vtk import EL_COLORS
try:
import igraph
IGRAPH_AVAILABLE = True
except ImportError:
IGRAPH_AVAILABLE = False
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
__author__ = "Matthew Horton, Evan Spotte-Smith, Samuel Blau"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "August 2017"
ConnectedSite = namedtuple("ConnectedSite", "site, jimage, index, weight, dist")
def _compare(g1, g2, i1, i2):
"""
Helper function called by isomorphic to ensure comparison of node identities.
"""
return g1.vs[i1]["species"] == g2.vs[i2]["species"]
def _igraph_from_nxgraph(graph):
"""
Helper function that converts a networkx graph object into an igraph graph object.
"""
nodes = graph.nodes(data=True)
new_igraph = igraph.Graph()
for node in nodes:
new_igraph.add_vertex(name=str(node[0]), species=node[1]["specie"], coords=node[1]["coords"])
new_igraph.add_edges([(str(edge[0]), str(edge[1])) for edge in graph.edges()])
return new_igraph
def _isomorphic(frag1, frag2):
"""
Internal function to check if two graph objects are isomorphic, using igraph if
if is available and networkx if it is not.
"""
f1_nodes = frag1.nodes(data=True)
f2_nodes = frag2.nodes(data=True)
if len(f1_nodes) != len(f2_nodes):
return False
f2_edges = frag2.edges()
if len(f2_edges) != len(f2_edges):
return False
f1_comp_dict = {}
f2_comp_dict = {}
for node in f1_nodes:
if node[1]["specie"] not in f1_comp_dict:
f1_comp_dict[node[1]["specie"]] = 1
else:
f1_comp_dict[node[1]["specie"]] += 1
for node in f2_nodes:
if node[1]["specie"] not in f2_comp_dict:
f2_comp_dict[node[1]["specie"]] = 1
else:
f2_comp_dict[node[1]["specie"]] += 1
if f1_comp_dict != f2_comp_dict:
return False
if IGRAPH_AVAILABLE:
ifrag1 = _igraph_from_nxgraph(frag1)
ifrag2 = _igraph_from_nxgraph(frag2)
return ifrag1.isomorphic_vf2(ifrag2, node_compat_fn=_compare)
nm = iso.categorical_node_match("specie", "ERROR")
return nx.is_isomorphic(frag1.to_undirected(), frag2.to_undirected(), node_match=nm)
def insert_node(
self,
i,
species,
coords,
coords_are_cartesian=False,
validate_proximity=False,
site_properties=None,
edges=None,
):
"""
A wrapper around Molecule.insert(), which also incorporates the new
site into the MoleculeGraph.
:param i: Index at which to insert the new site
:param species: Species for the new site
:param coords: 3x1 array representing coordinates of the new site
:param coords_are_cartesian: Whether coordinates are cartesian.
Defaults to False.
:param validate_proximity: For Molecule.insert(); if True (default
False), distance will be checked to ensure that site can be safely
added.
:param site_properties: Site properties for Molecule
:param edges: List of dicts representing edges to be added to the
MoleculeGraph. These edges must include the index of the new site i,
and all indices used for these edges should reflect the
MoleculeGraph AFTER the insertion, NOT before. Each dict should at
least have a "to_index" and "from_index" key, and can also have a
"weight" and a "properties" key.
:return:
"""
self.structure.insert(
i,
species,
coords,
coords_are_cartesian=coords_are_cartesian,
validate_proximity=validate_proximity,
properties=site_properties,
)
mapping = {}
for j in range(len(self.structure) - 1):
if j < i:
mapping[j] = j
else:
mapping[j] = j + 1
nx.relabel_nodes(self.graph, mapping, copy=False)
self.graph.add_node(i)
self.set_node_attributes()
if edges is not None:
for edge in edges:
try:
self.add_edge(
edge["from_index"],
edge["to_index"],
from_jimage=(0, 0, 0),
to_jimage=edge["to_jimage"],
weight=edge.get("weight", None),
edge_properties=edge.get("properties", None),
)
except KeyError:
raise RuntimeError("Some edges are invalid.")
def set_node_attributes(self):
"""
Gives each node a "specie" and a "coords" attribute, updated with the
current species and coordinates.
:return:
"""
species = {}
coords = {}
properties = {}
for node in self.graph.nodes():
species[node] = self.structure[node].specie.symbol
coords[node] = self.structure[node].coords
properties[node] = self.structure[node].properties
nx.set_node_attributes(self.graph, species, "specie")
nx.set_node_attributes(self.graph, coords, "coords")
nx.set_node_attributes(self.graph, properties, "properties")
def alter_edge(
self,
from_index,
to_index,
to_jimage=None,
new_weight=None,
new_edge_properties=None,
):
"""
Alters either the weight or the edge_properties of
an edge in the StructureGraph.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edges = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edges:
raise ValueError(
"Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
)
)
if to_jimage is None:
edge_index = 0
else:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
if new_weight is not None:
self.graph[from_index][to_index][edge_index]["weight"] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][edge_index][prop] = new_edge_properties[prop]
def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False):
"""
Remove an edge from the StructureGraph. If no image is given, this method will fail.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return:
"""
# ensure that edge exists before attempting to remove it
existing_edges = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if to_jimage is None:
raise ValueError("Image must be supplied, to avoid ambiguity.")
if existing_edges:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(from_index, to_index, edge_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index, from_index)
if existing_reverse:
for i, properties in existing_reverse.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(to_index, from_index, edge_index)
else:
raise ValueError(
"Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
)
)
def remove_nodes(self, indices):
"""
A wrapper for Molecule.remove_sites().
:param indices: list of indices in the current Molecule (and graph) to
be removed.
:return:
"""
self.structure.remove_sites(indices)
self.graph.remove_nodes_from(indices)
mapping = {}
for correct, current in enumerate(sorted(self.graph.nodes)):
mapping[current] = correct
nx.relabel_nodes(self.graph, mapping, copy=False)
self.set_node_attributes()
def substitute_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Structure.substitute to replace an atom in self.structure
with a functional group. This method also amends self.graph to
incorporate the new functional group.
NOTE: Care must be taken to ensure that the functional group that is
substituted will not place atoms to close to each other, or violate the
dimensions of the Lattice.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are two options:
1. Providing an actual Molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
if isinstance(func_grp, Molecule):
func_grp = copy.deepcopy(func_grp)
else:
try:
func_grp = copy.deepcopy(FunctionalGroups[func_grp])
except Exception:
raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead")
self.structure.substitute(index, func_grp, bond_order=bond_order)
mapping = map_indices(func_grp)
# Remove dummy atom "X"
func_grp.remove_species("X")
if graph_dict is not None:
for (u, v) in graph_dict.keys():
edge_props = graph_dict[(u, v)]
if "to_jimage" in edge_props.keys():
to_jimage = edge_props["to_jimage"]
del edge_props["to_jimage"]
else:
# By default, assume that all edges should stay remain
# inside the initial image
to_jimage = (0, 0, 0)
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(
mapping[u],
mapping[v],
to_jimage=to_jimage,
weight=weight,
edge_properties=edge_props,
)
else:
if strategy_params is None:
strategy_params = {}
strat = strategy(**strategy_params)
for site in mapping.values():
neighbors = strat.get_nn_info(self.structure, site)
for neighbor in neighbors:
self.add_edge(
from_index=site,
from_jimage=(0, 0, 0),
to_index=neighbor["site_index"],
to_jimage=neighbor["image"],
weight=neighbor["weight"],
warn_duplicates=False,
)
def get_connected_sites(self, n, jimage=(0, 0, 0)):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Structure
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
connected_site_images = set()
out_edges = [(u, v, d, "out") for u, v, d in self.graph.out_edges(n, data=True)]
in_edges = [(u, v, d, "in") for u, v, d in self.graph.in_edges(n, data=True)]
for u, v, d, dir in out_edges + in_edges:
to_jimage = d["to_jimage"]
if dir == "in":
u, v = v, u
to_jimage = np.multiply(-1, to_jimage)
to_jimage = tuple(map(int, np.add(to_jimage, jimage)))
site_d = self.structure[v].as_dict()
site_d["abc"] = np.add(site_d["abc"], to_jimage).tolist()
site = PeriodicSite.from_dict(site_d)
# from_site if jimage arg != (0, 0, 0)
relative_jimage = np.subtract(to_jimage, jimage)
dist = self.structure[u].distance(self.structure[v], jimage=relative_jimage)
weight = d.get("weight", None)
if (v, to_jimage) not in connected_site_images:
connected_site = ConnectedSite(site=site, jimage=to_jimage, index=v, weight=weight, dist=dist)
connected_sites.add(connected_site)
connected_site_images.add((v, to_jimage))
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])
return self.graph.degree(n) - number_of_self_loops
def draw_graph_to_file(
self,
filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp",
):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires " "GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {"nodesep": 10.0, "dpi": 300, "overlap": "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.structure[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.structure[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = "#000000" if 1 - (c[0] * 0.299 + c[1] * 0.587 + c[2] * 0.114) / 255 < 0.5 else "#ffffff"
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(
n,
fillcolor=color,
fontcolor=fontcolor,
label=label,
fontname="Helvetica-bold",
style="filled",
shape="circle",
)
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
to_image = d["to_jimage"]
# set edge style
d["style"] = "solid"
if to_image != (0, 0, 0):
d["style"] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d["arrowhead"] = "none"
# only add labels for images that are not the origin
if image_labels:
d["headlabel"] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d["arrowhead"] = "normal" if d["headlabel"] else "none"
# optionally color edges using node colors
color_u = g.nodes[u]["fillcolor"]
color_v = g.nodes[v]["fillcolor"]
d["color_uv"] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get("edge_weight_units", "")
if d.get("weight"):
d["label"] = "{:.2f} {}".format(d["weight"], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d["to_jimage"]) in diff["self"]:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d["to_jimage"]) in diff["other"]:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({"color_uv": "#00ff00"})
for u, v, k in red_edges:
g.edges[u, v, k].update({"color_uv": "#ff0000"})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename + ".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename + ".dot"]
rs = subprocess.Popen(args, stdout=f, stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename + ".dot")
def types_of_coordination_environments(self, anonymous=False):
"""
Extract information on the different co-ordination environments
present in the graph.
:param anonymous: if anonymous, will replace specie names
with A, B, C, etc.
:return: a list of co-ordination environments,
e.g. ['Mo-S(6)', 'S-Mo(3)']
"""
motifs = set()
for idx, site in enumerate(self.structure):
centre_sp = site.species_string
connected_sites = self.get_connected_sites(idx)
connected_species = [connected_site.site.species_string for connected_site in connected_sites]
labels = []
for sp in set(connected_species):
count = connected_species.count(sp)
labels.append((count, sp))
labels = sorted(labels, reverse=True)
if anonymous:
mapping = {centre_sp: "A"}
available_letters = [chr(66 + i) for i in range(25)]
for label in labels:
sp = label[1]
if sp not in mapping:
mapping[sp] = available_letters.pop(0)
centre_sp = "A"
labels = [(label[0], mapping[label[1]]) for label in labels]
labels = ["{}({})".format(label[1], label[0]) for label in labels]
motif = "{}-{}".format(centre_sp, ",".join(labels))
motifs.add(motif)
return sorted(list(motifs))
def as_dict(self):
"""
As in :Class: `pymatgen.core.Structure` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"graphs": json_graph.adjacency_data(self.graph),
}
return d
def __mul__(self, scaling_matrix):
"""
Replicates the graph, creating a supercell,
intelligently joining together
edges that lie on periodic boundaries.
In principle, any operations on the expanded
graph could also be done on the original
graph, but a larger graph can be easier to
visualize and reason about.
:param scaling_matrix: same as Structure.__mul__
:return:
"""
# Developer note: a different approach was also trialed, using
# a simple Graph (instead of MultiDiGraph), with node indices
# representing both site index and periodic image. Here, the
# number of nodes != number of sites in the Structure. This
# approach has many benefits, but made it more difficult to
# keep the graph in sync with its corresponding Structure.
# Broadly, it would be easier to multiply the Structure
# *before* generating the StructureGraph, but this isn't
# possible when generating the graph using critic2 from
# charge density.
# Multiplication works by looking for the expected position
# of an image node, and seeing if that node exists in the
# supercell. If it does, the edge is updated. This is more
# computationally expensive than just keeping track of the
# which new lattice images present, but should hopefully be
# easier to extend to a general 3x3 scaling matrix.
# code adapted from Structure.__mul__
scale_matrix = np.array(scaling_matrix, np.int16)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)
else:
# TODO: test __mul__ with full 3x3 scaling matrices
raise NotImplementedError("Not tested with 3x3 scaling matrices yet.")
new_lattice = Lattice(np.dot(scale_matrix, self.structure.lattice.matrix))
f_lat = lattice_points_in_supercell(scale_matrix)
c_lat = new_lattice.get_cartesian_coords(f_lat)
new_sites = []
new_graphs = []
for v in c_lat:
# create a map of nodes from original graph to its image
mapping = {n: n + len(new_sites) for n in range(len(self.structure))}
for idx, site in enumerate(self.structure):
s = PeriodicSite(
site.species,
site.coords + v,
new_lattice,
properties=site.properties,
coords_are_cartesian=True,
to_unit_cell=False,
)
new_sites.append(s)
new_graphs.append(nx.relabel_nodes(self.graph, mapping, copy=True))
new_structure = Structure.from_sites(new_sites)
# merge all graphs into one big graph
new_g = nx.MultiDiGraph()
for new_graph in new_graphs:
new_g = nx.union(new_g, new_graph)
edges_to_remove = [] # tuple of (u, v, k)
edges_to_add = [] # tuple of (u, v, attr_dict)
# list of new edges inside supercell
# for duplicate checking
edges_inside_supercell = [{u, v} for u, v, d in new_g.edges(data=True) if d["to_jimage"] == (0, 0, 0)]
new_periodic_images = []
orig_lattice = self.structure.lattice
# use k-d tree to match given position to an
# existing Site in Structure
kd_tree = KDTree(new_structure.cart_coords)
# tolerance in for sites to be considered equal
# this could probably be a lot smaller
tol = 0.05
for u, v, k, d in new_g.edges(keys=True, data=True):
to_jimage = d["to_jimage"] # for node v
# reduce unnecessary checking
if to_jimage != (0, 0, 0):
# get index in original site
n_u = u % len(self.structure)
n_v = v % len(self.structure)
# get fractional co-ordinates of where atoms defined
# by edge are expected to be, relative to original
# lattice (keeping original lattice has
# significant benefits)
v_image_frac = np.add(self.structure[n_v].frac_coords, to_jimage)
u_frac = self.structure[n_u].frac_coords
# using the position of node u as a reference,
# get relative Cartesian co-ordinates of where
# atoms defined by edge are expected to be
v_image_cart = orig_lattice.get_cartesian_coords(v_image_frac)
u_cart = orig_lattice.get_cartesian_coords(u_frac)
v_rel = np.subtract(v_image_cart, u_cart)
# now retrieve position of node v in
# new supercell, and get asgolute Cartesian
# co-ordinates of where atoms defined by edge
# are expected to be
v_expec = new_structure[u].coords + v_rel
# now search in new structure for these atoms
# query returns (distance, index)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
# check if image sites now present in supercell
# and if so, delete old edge that went through
# periodic boundary
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
# node now inside supercell
new_d["to_jimage"] = (0, 0, 0)
edges_to_remove.append((u, v, k))
# make sure we don't try to add duplicate edges
# will remove two edges for everyone one we add
if {new_u, new_v} not in edges_inside_supercell:
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
edges_inside_supercell.append({new_u, new_v})
edges_to_add.append((new_u, new_v, new_d))
else:
# want to find new_v such that we have
# full periodic boundary conditions
# so that nodes on one side of supercell
# are connected to nodes on opposite side
v_expec_frac = new_structure.lattice.get_fractional_coords(v_expec)
# find new to_jimage
# use np.around to fix issues with finite precision leading to incorrect image
v_expec_image = np.around(v_expec_frac, decimals=3)
v_expec_image = v_expec_image - v_expec_image % 1
v_expec_frac = np.subtract(v_expec_frac, v_expec_image)
v_expec = new_structure.lattice.get_cartesian_coords(v_expec_frac)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
new_to_jimage = tuple(map(int, v_expec_image))
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
new_to_jimage = tuple(np.multiply(-1, d["to_jimage"]).astype(int))
new_d["to_jimage"] = new_to_jimage
edges_to_remove.append((u, v, k))
if (new_u, new_v, new_to_jimage) not in new_periodic_images:
edges_to_add.append((new_u, new_v, new_d))
new_periodic_images.append((new_u, new_v, new_to_jimage))
logger.debug("Removing {} edges, adding {} new edges.".format(len(edges_to_remove), len(edges_to_add)))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
new_g.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
new_g.add_edge(u, v, **d)
# return new instance of StructureGraph with supercell
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": new_structure.as_dict(),
"graphs": json_graph.adjacency_data(new_g),
}
sg = StructureGraph.from_dict(d)
return sg
def __len__(self):
"""
:return: length of Structure / number of nodes in graph
"""
return len(self.structure)
def sort(self, key=None, reverse=False):
"""
Same as Structure.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_structure = self.structure.copy()
# sort Structure
self.structure._sites = sorted(self.structure._sites, key=key, reverse=reverse)
# apply Structure ordering to graph
mapping = {idx: self.structure.index(site) for idx, site in enumerate(old_structure)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d["to_jimage"] = tuple(np.multiply(-1, d["to_jimage"]).astype(int))
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __eq__(self, other):
"""
Two StructureGraphs are equal if they have equal Structures,
and have the same edges between Sites. Edge weights can be
different and StructureGraphs can still be considered equal.
:param other: StructureGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d["to_jimage"]) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d["to_jimage"]) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and (self.structure == other_sorted.structure)
def diff(self, other, strict=True):
"""
Compares two StructureGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one StructureGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two StructureGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the StructureGraph this method is called
from, not the 'other' StructureGraph: there
is no guarantee the node indices will be the
same if the underlying Structures are ordered
differently.
:param other: StructureGraph
:param strict: if False, will compare bonds
from different Structures, with node indices
replaced by Species strings, will not count
number of occurrences of bonds
:return:
"""
if self.structure != other.structure and strict:
return ValueError("Meaningless to compare StructureGraphs if " "corresponding Structures are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d["to_jimage"]) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d["to_jimage"]) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
else:
edges = {
(str(self.structure[u].specie), str(self.structure[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)
}
edges_other = {
(str(other.structure[u].specie), str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)
}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
"self": edges - edges_other,
"other": edges_other - edges,
"both": edges.intersection(edges_other),
"dist": jaccard_dist,
}
def get_subgraphs_as_molecules(self, use_weights=False):
"""
Retrieve subgraphs as molecules, useful for extracting
molecules from periodic crystals.
Will only return unique molecules, not any duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
:param use_weights (bool): If True, only treat subgraphs
as isomorphic if edges have the same weights. Typically,
this means molecules will need to have the same bond
lengths to be defined as duplicates, otherwise bond
lengths can differ. This is a fairly robust approach,
but will treat e.g. enantiomers as being duplicates.
:return: list of unique Molecules in Structure
"""
# creating a supercell is an easy way to extract
# molecules (and not, e.g., layers of a 2D crystal)
# without adding extra logic
if getattr(self, "_supercell_sg", None) is None:
self._supercell_sg = supercell_sg = self * (3, 3, 3)
# make undirected to find connected subgraphs
supercell_sg.graph = nx.Graph(supercell_sg.graph)
# find subgraphs
all_subgraphs = [supercell_sg.graph.subgraph(c) for c in nx.connected_components(supercell_sg.graph)]
# discount subgraphs that lie across *supercell* boundaries
# these will subgraphs representing crystals
molecule_subgraphs = []
for subgraph in all_subgraphs:
intersects_boundary = any(d["to_jimage"] != (0, 0, 0) for u, v, d in subgraph.edges(data=True))
if not intersects_boundary:
molecule_subgraphs.append(nx.MultiDiGraph(subgraph))
# add specie names to graph to be able to test for isomorphism
for subgraph in molecule_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
# now define how we test for isomorphism
# prune duplicate subgraphs
unique_subgraphs = []
for subgraph in molecule_subgraphs:
already_present = [
nx.is_isomorphic(subgraph, g, node_match=node_match, edge_match=edge_match) for g in unique_subgraphs
]
if not any(already_present):
unique_subgraphs.append(subgraph)
# get Molecule objects for each subgraph
molecules = []
for subgraph in unique_subgraphs:
coords = [supercell_sg.structure[n].coords for n in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n in subgraph.nodes()]
molecule = Molecule(species, coords)
# shift so origin is at center of mass
molecule = molecule.get_centered_molecule()
molecules.append(molecule)
return molecules
class MolGraphSplitError(Exception):
"""
Raised when a molecule graph is failed to split into two disconnected
subgraphs
"""
pass
class MoleculeGraph(MSONable):
"""
This is a class for annotating a Molecule with
bond information, stored in the form of a graph. A "bond" does
not necessarily have to be a chemical bond, but can store any
kind of information that connects two Sites.
"""
def __init__(self, molecule, graph_data=None):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given molecule easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
:param molecule: Molecule object
:param graph_data: dict containing graph information in
dict format (not intended to be constructed manually,
see as_dict method for format)
"""
if isinstance(molecule, MoleculeGraph):
# just make a copy from input
graph_data = molecule.as_dict()["graphs"]
self.molecule = molecule
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if "id" in d:
del d["id"]
if "key" in d:
del d["key"]
# ensure images are tuples (conversion to lists happens
# when serializing back from json), it's important images
# are hashable/immutable
if "to_jimage" in d:
d["to_jimage"] = tuple(d["to_jimage"])
if "from_jimage" in d:
d["from_jimage"] = tuple(d["from_jimage"])
self.set_node_attributes()
def add_edge(
self,
from_index,
to_index,
weight=None,
warn_duplicates=True,
edge_properties=None,
):
"""
Add edge to graph.
Since physically a 'bond' (or other connection
between sites) doesn't have a direction, from_index,
from_jimage can be swapped with to_index, to_jimage.
However, images will always always be shifted so that
from_index < to_index and from_jimage becomes (0, 0, 0).
:param from_index: index of site connecting from
:param to_index: index of site connecting to
:param weight (float): e.g. bond length
:param warn_duplicates (bool): if True, will warn if
trying to add duplicate edges (duplicate edges will not
be added in either case)
:param edge_properties (dict): any other information to
store on graph edges, similar to Structure's site_properties
:return:
"""
# this is not necessary for the class to work, but
# just makes it neater
if to_index < from_index:
to_index, from_index = from_index, to_index
# sanitize types
from_index, to_index = int(from_index), int(to_index)
# check we're not trying to add a duplicate edge
# there should only ever be at most one edge
# between two sites
existing_edge_data = self.graph.get_edge_data(from_index, to_index)
if existing_edge_data and warn_duplicates:
warnings.warn(
"Trying to add an edge that already exists from " "site {} to site {}.".format(from_index, to_index)
)
return
# generic container for additional edge properties,
# similar to site properties
edge_properties = edge_properties or {}
if weight:
self.graph.add_edge(from_index, to_index, weight=weight, **edge_properties)
else:
self.graph.add_edge(from_index, to_index, **edge_properties)
def insert_node(
self,
i,
species,
coords,
validate_proximity=False,
site_properties=None,
edges=None,
):
"""
A wrapper around Molecule.insert(), which also incorporates the new
site into the MoleculeGraph.
:param i: Index at which to insert the new site
:param species: Species for the new site
:param coords: 3x1 array representing coordinates of the new site
:param validate_proximity: For Molecule.insert(); if True (default
False), distance will be checked to ensure that site can be safely
added.
:param site_properties: Site properties for Molecule
:param edges: List of dicts representing edges to be added to the
MoleculeGraph. These edges must include the index of the new site i,
and all indices used for these edges should reflect the
MoleculeGraph AFTER the insertion, NOT before. Each dict should at
least have a "to_index" and "from_index" key, and can also have a
"weight" and a "properties" key.
:return:
"""
self.molecule.insert(
i,
species,
coords,
validate_proximity=validate_proximity,
properties=site_properties,
)
mapping = {}
for j in range(len(self.molecule) - 1):
if j < i:
mapping[j] = j
else:
mapping[j] = j + 1
nx.relabel_nodes(self.graph, mapping, copy=False)
self.graph.add_node(i)
self.set_node_attributes()
if edges is not None:
for edge in edges:
try:
self.add_edge(
edge["from_index"],
edge["to_index"],
weight=edge.get("weight", None),
edge_properties=edge.get("properties", None),
)
except KeyError:
raise RuntimeError("Some edges are invalid.")
def set_node_attributes(self):
"""
Replicates molecule site properties (specie, coords, etc.) in the
MoleculeGraph.
:return:
"""
species = {}
coords = {}
properties = {}
for node in self.graph.nodes():
species[node] = self.molecule[node].specie.symbol
coords[node] = self.molecule[node].coords
properties[node] = self.molecule[node].properties
nx.set_node_attributes(self.graph, species, "specie")
nx.set_node_attributes(self.graph, coords, "coords")
nx.set_node_attributes(self.graph, properties, "properties")
def alter_edge(self, from_index, to_index, new_weight=None, new_edge_properties=None):
"""
Alters either the weight or the edge_properties of
an edge in the MoleculeGraph.
:param from_index: int
:param to_index: int
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edge = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edge:
raise ValueError(
"Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
)
)
# Third index should always be 0 because there should only be one edge between any two nodes
if new_weight is not None:
self.graph[from_index][to_index][0]["weight"] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][0][prop] = new_edge_properties[prop]
def break_edge(self, from_index, to_index, allow_reverse=False):
"""
Remove an edge from the MoleculeGraph
:param from_index: int
:param to_index: int
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return:
"""
# ensure that edge exists before attempting to remove it
existing_edge = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if existing_edge:
self.graph.remove_edge(from_index, to_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index, from_index)
if existing_reverse:
self.graph.remove_edge(to_index, from_index)
else:
raise ValueError(
"Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
)
)
def remove_nodes(self, indices):
"""
A wrapper for Molecule.remove_sites().
:param indices: list of indices in the current Molecule (and graph) to
be removed.
:return:
"""
self.molecule.remove_sites(indices)
self.graph.remove_nodes_from(indices)
mapping = {}
for correct, current in enumerate(sorted(self.graph.nodes)):
mapping[current] = correct
nx.relabel_nodes(self.graph, mapping, copy=False)
self.set_node_attributes()
def get_disconnected_fragments(self):
"""
Determine if the MoleculeGraph is connected. If it is not, separate the
MoleculeGraph into different MoleculeGraphs, where each resulting
MoleculeGraph is a disconnected subgraph of the original.
Currently, this function naively assigns the charge
of the total molecule to a single submolecule. A
later effort will be to actually accurately assign
charge.
NOTE: This function does not modify the original
MoleculeGraph. It creates a copy, modifies that, and
returns two or more new MoleculeGraph objects.
:return: list of MoleculeGraphs
"""
if nx.is_weakly_connected(self.graph):
return [copy.deepcopy(self)]
original = copy.deepcopy(self)
sub_mols = list()
# Had to use nx.weakly_connected_components because of deprecation
# of nx.weakly_connected_component_subgraphs
subgraphs = [original.graph.subgraph(c) for c in nx.weakly_connected_components(original.graph)]
for subg in subgraphs:
nodes = sorted(list(subg.nodes))
# Molecule indices are essentially list-based, so node indices
# must be remapped, incrementing from 0
mapping = {}
for i, n in enumerate(nodes):
mapping[n] = i
# just give charge to whatever subgraph has node with index 0
# TODO: actually figure out how to distribute charge
if 0 in nodes:
charge = self.molecule.charge
else:
charge = 0
# relabel nodes in graph to match mapping
new_graph = nx.relabel_nodes(subg, mapping)
species = nx.get_node_attributes(new_graph, "specie")
coords = nx.get_node_attributes(new_graph, "coords")
raw_props = nx.get_node_attributes(new_graph, "properties")
properties = {}
for prop_set in raw_props.values():
for prop in prop_set.keys():
if prop in properties:
properties[prop].append(prop_set[prop])
else:
properties[prop] = [prop_set[prop]]
# Site properties must be present for all atoms in the molecule
# in order to be used for Molecule instantiation
for k, v in properties.items():
if len(v) != len(species):
del properties[k]
new_mol = Molecule(species, coords, charge=charge, site_properties=properties)
graph_data = json_graph.adjacency_data(new_graph)
# create new MoleculeGraph
sub_mols.append(MoleculeGraph(new_mol, graph_data=graph_data))
return sub_mols
def split_molecule_subgraphs(self, bonds, allow_reverse=False, alterations=None):
"""
Split MoleculeGraph into two or more MoleculeGraphs by
breaking a set of bonds. This function uses
MoleculeGraph.break_edge repeatedly to create
disjoint graphs (two or more separate molecules).
This function does not only alter the graph
information, but also changes the underlying
Molecules.
If the bonds parameter does not include sufficient
bonds to separate two molecule fragments, then this
function will fail.
Currently, this function naively assigns the charge
of the total molecule to a single submolecule. A
later effort will be to actually accurately assign
charge.
NOTE: This function does not modify the original
MoleculeGraph. It creates a copy, modifies that, and
returns two or more new MoleculeGraph objects.
:param bonds: list of tuples (from_index, to_index)
representing bonds to be broken to split the MoleculeGraph.
:param alterations: a dict {(from_index, to_index): alt},
where alt is a dictionary including weight and/or edge
properties to be changed following the split.
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return: list of MoleculeGraphs
"""
self.set_node_attributes()
original = copy.deepcopy(self)
for bond in bonds:
original.break_edge(bond[0], bond[1], allow_reverse=allow_reverse)
if nx.is_weakly_connected(original.graph):
raise MolGraphSplitError(
"Cannot split molecule; \
MoleculeGraph is still connected."
)
# alter any bonds before partition, to avoid remapping
if alterations is not None:
for (u, v) in alterations.keys():
if "weight" in alterations[(u, v)]:
weight = alterations[(u, v)]["weight"]
del alterations[(u, v)]["weight"]
edge_properties = alterations[(u, v)] if len(alterations[(u, v)]) != 0 else None
original.alter_edge(u, v, new_weight=weight, new_edge_properties=edge_properties)
else:
original.alter_edge(u, v, new_edge_properties=alterations[(u, v)])
return original.get_disconnected_fragments()
def build_unique_fragments(self):
"""
Find all possible fragment combinations of the MoleculeGraphs (in other
words, all connected induced subgraphs)
:return:
"""
self.set_node_attributes()
graph = self.graph.to_undirected()
# find all possible fragments, aka connected induced subgraphs
frag_dict = {}
for ii in range(1, len(self.molecule)):
for combination in combinations(graph.nodes, ii):
mycomp = []
for idx in combination:
mycomp.append(str(self.molecule[idx].specie))
mycomp = "".join(sorted(mycomp))
subgraph = nx.subgraph(graph, combination)
if nx.is_connected(subgraph):
mykey = mycomp + str(len(subgraph.edges()))
if mykey not in frag_dict:
frag_dict[mykey] = [copy.deepcopy(subgraph)]
else:
frag_dict[mykey].append(copy.deepcopy(subgraph))
# narrow to all unique fragments using graph isomorphism
unique_frag_dict = {}
for key in frag_dict:
unique_frags = []
for frag in frag_dict[key]:
found = False
for f in unique_frags:
if _isomorphic(frag, f):
found = True
break
if not found:
unique_frags.append(frag)
unique_frag_dict[key] = copy.deepcopy(unique_frags)
# convert back to molecule graphs
unique_mol_graph_dict = {}
for key in unique_frag_dict:
unique_mol_graph_list = []
for fragment in unique_frag_dict[key]:
mapping = {e: i for i, e in enumerate(sorted(fragment.nodes))}
remapped = nx.relabel_nodes(fragment, mapping)
species = nx.get_node_attributes(remapped, "specie")
coords = nx.get_node_attributes(remapped, "coords")
edges = {}
for from_index, to_index, key in remapped.edges:
edge_props = fragment.get_edge_data(from_index, to_index, key=key)
edges[(from_index, to_index)] = edge_props
unique_mol_graph_list.append(
self.with_edges(
Molecule(species=species, coords=coords, charge=self.molecule.charge),
edges,
)
)
frag_key = (
str(unique_mol_graph_list[0].molecule.composition.alphabetical_formula)
+ " E"
+ str(len(unique_mol_graph_list[0].graph.edges()))
)
unique_mol_graph_dict[frag_key] = copy.deepcopy(unique_mol_graph_list)
return unique_mol_graph_dict
def substitute_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Molecule.substitute to replace an atom in self.molecule
with a functional group. This method also amends self.graph to
incorporate the new functional group.
NOTE: using a MoleculeGraph will generally produce a different graph
compared with using a Molecule or str (when not using graph_dict).
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are three options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
3. A MoleculeGraph object.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
# Work is simplified if a graph is already in place
if isinstance(func_grp, MoleculeGraph):
self.molecule.substitute(index, func_grp.molecule, bond_order=bond_order)
mapping = map_indices(func_grp.molecule)
for (u, v) in list(func_grp.graph.edges()):
edge_props = func_grp.graph.get_edge_data(u, v)[0]
weight = None
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(mapping[u], mapping[v], weight=weight, edge_properties=edge_props)
else:
if isinstance(func_grp, Molecule):
func_grp = copy.deepcopy(func_grp)
else:
try:
func_grp = copy.deepcopy(FunctionalGroups[func_grp])
except Exception:
raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead")
self.molecule.substitute(index, func_grp, bond_order=bond_order)
mapping = map_indices(func_grp)
# Remove dummy atom "X"
func_grp.remove_species("X")
if graph_dict is not None:
for (u, v) in graph_dict.keys():
edge_props = graph_dict[(u, v)]
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(
mapping[u],
mapping[v],
weight=weight,
edge_properties=edge_props,
)
else:
if strategy_params is None:
strategy_params = {}
strat = strategy(**strategy_params)
graph = self.with_local_env_strategy(func_grp, strat)
for (u, v) in list(graph.graph.edges()):
edge_props = graph.graph.get_edge_data(u, v)[0]
weight = None
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
if 0 not in list(graph.graph.nodes()):
# If graph indices have different indexing
u, v = (u - 1), (v - 1)
self.add_edge(
mapping[u],
mapping[v],
weight=weight,
edge_properties=edge_props,
)
def replace_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Molecule.substitute and MoleculeGraph.substitute_group
to replace a functional group in self.molecule with a functional group.
This method also amends self.graph to incorporate the new functional
group.
TODO: Figure out how to replace into a ring structure.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are three options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
3. A MoleculeGraph object.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
self.set_node_attributes()
neighbors = self.get_connected_sites(index)
# If the atom at index is terminal
if len(neighbors) == 1:
self.substitute_group(
index,
func_grp,
strategy,
bond_order=bond_order,
graph_dict=graph_dict,
strategy_params=strategy_params,
)
else:
rings = self.find_rings(including=[index])
if len(rings) != 0:
raise RuntimeError(
"Currently functional group replacement" "cannot occur at an atom within a ring" "structure."
)
to_remove = set()
sizes = dict()
disconnected = self.graph.to_undirected()
disconnected.remove_node(index)
for neighbor in neighbors:
sizes[neighbor[2]] = len(nx.descendants(disconnected, neighbor[2]))
keep = max(sizes, key=lambda x: sizes[x])
for i in sizes.keys():
if i != keep:
to_remove.add(i)
self.remove_nodes(list(to_remove))
self.substitute_group(
index,
func_grp,
strategy,
bond_order=bond_order,
graph_dict=graph_dict,
strategy_params=strategy_params,
)
def find_rings(self, including=None):
"""
Find ring structures in the MoleculeGraph.
:param including: list of site indices. If
including is not None, then find_rings will
only return those rings including the specified
sites. By default, this parameter is None, and
all rings will be returned.
:return: dict {index:cycle}. Each
entry will be a ring (cycle, in graph theory terms) including the index
found in the Molecule. If there is no cycle including an index, the
value will be an empty list.
"""
# Copies self.graph such that all edges (u, v) matched by edges (v, u)
undirected = self.graph.to_undirected()
directed = undirected.to_directed()
cycles_nodes = []
cycles_edges = []
# Remove all two-edge cycles
all_cycles = [c for c in nx.simple_cycles(directed) if len(c) > 2]
# Using to_directed() will mean that each cycle always appears twice
# So, we must also remove duplicates
unique_sorted = []
unique_cycles = []
for cycle in all_cycles:
if sorted(cycle) not in unique_sorted:
unique_sorted.append(sorted(cycle))
unique_cycles.append(cycle)
if including is None:
cycles_nodes = unique_cycles
else:
for i in including:
for cycle in unique_cycles:
if i in cycle and cycle not in cycles_nodes:
cycles_nodes.append(cycle)
for cycle in cycles_nodes:
edges = []
for i, e in enumerate(cycle):
edges.append((cycle[i - 1], e))
cycles_edges.append(edges)
return cycles_edges
def get_connected_sites(self, n):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Molecule
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
out_edges = list(self.graph.out_edges(n, data=True))
in_edges = list(self.graph.in_edges(n, data=True))
for u, v, d in out_edges + in_edges:
weight = d.get("weight", None)
if v == n:
site = self.molecule[u]
dist = self.molecule[v].distance(self.molecule[u])
connected_site = ConnectedSite(site=site, jimage=(0, 0, 0), index=u, weight=weight, dist=dist)
else:
site = self.molecule[v]
dist = self.molecule[u].distance(self.molecule[v])
connected_site = ConnectedSite(site=site, jimage=(0, 0, 0), index=v, weight=weight, dist=dist)
connected_sites.add(connected_site)
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])
return self.graph.degree(n) - number_of_self_loops
def draw_graph_to_file(
self,
filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp",
):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires " "GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {"nodesep": 10.0, "dpi": 300, "overlap": "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.molecule[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.molecule[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = "#000000" if 1 - (c[0] * 0.299 + c[1] * 0.587 + c[2] * 0.114) / 255 < 0.5 else "#ffffff"
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(
n,
fillcolor=color,
fontcolor=fontcolor,
label=label,
fontname="Helvetica-bold",
style="filled",
shape="circle",
)
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
if "to_image" in d:
to_image = d["to_jimage"]
else:
to_image = (0, 0, 0)
# set edge style
d["style"] = "solid"
if to_image != (0, 0, 0):
d["style"] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d["arrowhead"] = "none"
# only add labels for images that are not the origin
if image_labels:
d["headlabel"] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d["arrowhead"] = "normal" if d["headlabel"] else "none"
# optionally color edges using node colors
color_u = g.node[u]["fillcolor"]
color_v = g.node[v]["fillcolor"]
d["color_uv"] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get("edge_weight_units", "")
if d.get("weight"):
d["label"] = "{:.2f} {}".format(d["weight"], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d["to_jimage"]) in diff["self"]:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d["to_jimage"]) in diff["other"]:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({"color_uv": "#00ff00"})
for u, v, k in red_edges:
g.edges[u, v, k].update({"color_uv": "#ff0000"})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename + ".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename + ".dot"]
rs = subprocess.Popen(args, stdout=f, stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename + ".dot")
def as_dict(self):
"""
As in :Class: `pymatgen.core.Molecule` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": self.molecule.as_dict(),
"graphs": json_graph.adjacency_data(self.graph),
}
return d
def __str__(self):
s = "Molecule Graph"
s += "\nMolecule: \n{}".format(self.molecule.__str__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __repr__(self):
s = "Molecule Graph"
s += "\nMolecule: \n{}".format(self.molecule.__repr__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __len__(self):
"""
:return: length of Molecule / number of nodes in graph
"""
return len(self.molecule)
def sort(self, key=None, reverse=False):
"""
Same as Molecule.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_molecule = self.molecule.copy()
# sort Molecule
self.molecule._sites = sorted(self.molecule._sites, key=key, reverse=reverse)
# apply Molecule ordering to graph
mapping = {idx: self.molecule.index(site) for idx, site in enumerate(old_molecule)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d["to_jimage"] = (0, 0, 0)
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __eq__(self, other):
"""
Two MoleculeGraphs are equal if they have equal Molecules,
and have the same edges between Sites. Edge weights can be
different and MoleculeGraphs can still be considered equal.
:param other: MoleculeGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
try:
mapping = {tuple(site.coords): self.molecule.index(site) for site in other.molecule}
except ValueError:
return False
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.coords)])
edges = {(u, v) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and (self.molecule == other_sorted.molecule)
def isomorphic_to(self, other):
"""
Checks if the graphs of two MoleculeGraphs are isomorphic to one
another. In order to prevent problems with misdirected edges, both
graphs are converted into undirected nx.Graph objects.
:param other: MoleculeGraph object to be compared.
:return: bool
"""
if len(self.molecule) != len(other.molecule):
return False
if self.molecule.composition.alphabetical_formula != other.molecule.composition.alphabetical_formula:
return False
if len(self.graph.edges()) != len(other.graph.edges()):
return False
return _isomorphic(self.graph, other.graph)
def diff(self, other, strict=True):
"""
Compares two MoleculeGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one MoleculeGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two MoleculeGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the MoleculeGraph this method is called
from, not the 'other' MoleculeGraph: there
is no guarantee the node indices will be the
same if the underlying Molecules are ordered
differently.
:param other: MoleculeGraph
:param strict: if False, will compare bonds
from different Molecules, with node indices
replaced by Species strings, will not count
number of occurrences of bonds
:return:
"""
if self.molecule != other.molecule and strict:
return ValueError("Meaningless to compare MoleculeGraphs if " "corresponding Molecules are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.molecule.index(site) for site in other.molecule}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d.get("to_jimage", (0, 0, 0))) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {
(u, v, d.get("to_jimage", (0, 0, 0))) for u, v, d in other_sorted.graph.edges(keys=False, data=True)
}
else:
edges = {
(str(self.molecule[u].specie), str(self.molecule[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)
}
edges_other = {
(str(other.structure[u].specie), str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)
}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
"self": edges - edges_other,
"other": edges_other - edges,
"both": edges.intersection(edges_other),
"dist": jaccard_dist,
}
| 37.748479 | 118 | 0.579393 |
811be09e8efd00c1aea606c4e23d536a962dbfd3 | 65 | py | Python | maple/backend/singularity/__init__.py | akashdhruv/maple | 11e562f51b18b2251ea507c629a1981b031d2f35 | [
"MIT"
] | null | null | null | maple/backend/singularity/__init__.py | akashdhruv/maple | 11e562f51b18b2251ea507c629a1981b031d2f35 | [
"MIT"
] | 5 | 2021-12-24T08:55:42.000Z | 2022-02-13T16:59:30.000Z | maple/backend/singularity/__init__.py | akashdhruv/maple | 11e562f51b18b2251ea507c629a1981b031d2f35 | [
"MIT"
] | null | null | null | from . import image
from . import container
from . import system
| 16.25 | 23 | 0.769231 |
811c0a3b1e48996b84a2d4750219f62c35f29d83 | 1,064 | py | Python | articles/views.py | Ahmed-skb/blogyfy | 2cfa3d9503f1846ccd89c2bf1934293eb97ad44a | [
"MIT"
] | null | null | null | articles/views.py | Ahmed-skb/blogyfy | 2cfa3d9503f1846ccd89c2bf1934293eb97ad44a | [
"MIT"
] | null | null | null | articles/views.py | Ahmed-skb/blogyfy | 2cfa3d9503f1846ccd89c2bf1934293eb97ad44a | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import Article
from django.contrib.auth.decorators import login_required
from . import forms
| 34.322581 | 81 | 0.693609 |
811c9730165b0d65d78610ed7c5cc6d9f073addc | 5,039 | py | Python | sifter/grammar/grammar.py | russell/sifter | 03e85349fd2329439ae3f7eb3c1f484ba2ebf807 | [
"BSD-2-Clause"
] | null | null | null | sifter/grammar/grammar.py | russell/sifter | 03e85349fd2329439ae3f7eb3c1f484ba2ebf807 | [
"BSD-2-Clause"
] | null | null | null | sifter/grammar/grammar.py | russell/sifter | 03e85349fd2329439ae3f7eb3c1f484ba2ebf807 | [
"BSD-2-Clause"
] | 1 | 2020-08-19T06:30:47.000Z | 2020-08-19T06:30:47.000Z | # Parser based on RFC 5228, especially the grammar as defined in section 8. All
# references are to sections in RFC 5228 unless stated otherwise.
import ply.yacc
import sifter.grammar
from sifter.grammar.lexer import tokens
import sifter.handler
import logging
__all__ = ('parser',)
def p_commands_list(p):
"""commands : commands command"""
p[0] = p[1]
# section 3.2: REQUIRE command must come before any other commands
if p[2].RULE_IDENTIFIER == 'REQUIRE':
if any(command.RULE_IDENTIFIER != 'REQUIRE'
for command in p[0].commands):
log = logging.getLogger("sifter")
log.error(("REQUIRE command on line %d must come before any "
"other non-REQUIRE commands" % p.lineno(2)))
raise SyntaxError
# section 3.1: ELSIF and ELSE must follow IF or another ELSIF
elif p[2].RULE_IDENTIFIER in ('ELSIF', 'ELSE'):
if p[0].commands[-1].RULE_IDENTIFIER not in ('IF', 'ELSIF'):
log = logging.getLogger("sifter")
log.error(("ELSIF/ELSE command on line %d must follow an IF/ELSIF "
"command" % p.lineno(2)))
raise SyntaxError
p[0].commands.append(p[2])
def p_commands_empty(p):
"""commands : """
p[0] = sifter.grammar.CommandList()
def p_command(p):
"""command : IDENTIFIER arguments ';'
| IDENTIFIER arguments block"""
#print("COMMAND:", p[1], p[2], p[3])
tests = p[2].get('tests')
block = None
if p[3] != ';': block = p[3]
handler = sifter.handler.get('command', p[1])
if handler is None:
log = logging.getLogger("sifter")
log.error(("No handler registered for command '%s' on line %d" %
(p[1], p.lineno(1))))
raise SyntaxError
p[0] = handler(arguments=p[2]['args'], tests=tests, block=block)
def p_command_error(p):
"""command : IDENTIFIER error ';'
| IDENTIFIER error block"""
log = logging.getLogger("sifter")
log.error(("Syntax error in command definition after %s on line %d" %
(p[1], p.lineno(1))))
raise SyntaxError
def p_block(p):
"""block : '{' commands '}' """
# section 3.2: REQUIRE command must come before any other commands,
# which means it can't be in the block of another command
if any(command.RULE_IDENTIFIER == 'REQUIRE'
for command in p[2].commands):
log = logging.getLogger("sifter")
log.error(("REQUIRE command not allowed inside of a block (line %d)" %
(p.lineno(2))))
raise SyntaxError
p[0] = p[2]
def p_block_error(p):
"""block : '{' error '}'"""
log = logging.getLogger("sifter")
log.error(("Syntax error in command block that starts on line %d" %
(p.lineno(1),)))
raise SyntaxError
def p_arguments(p):
"""arguments : argumentlist
| argumentlist test
| argumentlist '(' testlist ')'"""
p[0] = { 'args' : p[1], }
if len(p) > 2:
if p[2] == '(':
p[0]['tests'] = p[3]
else:
p[0]['tests'] = [ p[2] ]
def p_testlist_error(p):
"""arguments : argumentlist '(' error ')'"""
log = logging.getLogger("sifter")
log.error(("Syntax error in test list that starts on line %d" % p.lineno(2)))
raise SyntaxError
def p_argumentlist_list(p):
"""argumentlist : argumentlist argument"""
p[0] = p[1]
p[0].append(p[2])
def p_argumentlist_empty(p):
"""argumentlist : """
p[0] = []
def p_test(p):
"""test : IDENTIFIER arguments"""
#print("TEST:", p[1], p[2])
tests = p[2].get('tests')
handler = sifter.handler.get('test', p[1])
if handler is None:
log = logging.getLogger("sifter")
log.error(("No handler registered for test '%s' on line %d" %
(p[1], p.lineno(1))))
raise SyntaxError
p[0] = handler(arguments=p[2]['args'], tests=tests)
def p_testlist_list(p):
"""testlist : test ',' testlist"""
p[0] = p[3]
p[0].insert(0, p[1])
def p_testlist_single(p):
"""testlist : test"""
p[0] = [ p[1] ]
def p_argument_stringlist(p):
"""argument : '[' stringlist ']'"""
p[0] = p[2]
def p_argument_string(p):
"""argument : string"""
# for simplicity, we treat all single strings as a string list
p[0] = [ p[1] ]
def p_argument_number(p):
"""argument : NUMBER"""
p[0] = p[1]
def p_argument_tag(p):
"""argument : TAG"""
p[0] = sifter.grammar.Tag(p[1])
def p_stringlist_error(p):
"""argument : '[' error ']'"""
log = logging.getLogger("sifter")
log.error(("Syntax error in string list that starts on line %d" %
p.lineno(1)))
raise SyntaxError
def p_stringlist_list(p):
"""stringlist : string ',' stringlist"""
p[0] = p[3]
p[0].insert(0, p[1])
def p_stringlist_single(p):
"""stringlist : string"""
p[0] = [ p[1] ]
def p_string(p):
"""string : QUOTED_STRING"""
p[0] = sifter.grammar.String(p[1])
| 29.467836 | 81 | 0.581663 |
811ce2660d66f66cb91158b2b6a72ae00e0a02c5 | 3,904 | py | Python | multidoc_mnb.py | dropofwill/author-attr-experiments | a90e2743591358a6253f3b3664f5e398517f84bc | [
"Unlicense"
] | 2 | 2015-01-06T12:53:39.000Z | 2018-02-01T13:57:09.000Z | multidoc_mnb.py | dropofwill/author-attr-experiments | a90e2743591358a6253f3b3664f5e398517f84bc | [
"Unlicense"
] | null | null | null | multidoc_mnb.py | dropofwill/author-attr-experiments | a90e2743591358a6253f3b3664f5e398517f84bc | [
"Unlicense"
] | null | null | null | from sklearn import datasets
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import ShuffleSplit
from sklearn.cross_validation import Bootstrap
from sklearn.naive_bayes import MultinomialNB
from sklearn.grid_search import GridSearchCV
from scipy.stats import sem
from pprint import pprint
import numpy as np
import pylab as pl
import string
import matplotlib.pyplot as plt
# Calculates the mean of the scores with the standard deviation
rand_baseline = list()
test_results = list()
sem_results = list()
com_results = list()
#test_docs("problemA")
for i in string.uppercase[:13]:
test_docs("problem"+i)
#graph(rand_baseline,test_results,com_results,13)
import os
import time as tm
sub_dir = "Results/"
location = "multiDoc" + tm.strftime("%Y%m%d-%H%M%S") + ".txt"
with open(os.path.join(sub_dir, location), 'w') as myFile:
myFile.write(str(rand_baseline))
myFile.write("\n")
myFile.write(str(test_results))
myFile.write("\n")
myFile.write(str(sem_results))
myFile.write("\n")
myFile.write(str(com_results))
# CV with ShuffleSpit
'''
cv = ShuffleSplit(n_samples, n_iter=100, test_size=0.2, random_state=0)
test_scores = cross_val_score(mnb, X, y, cv=cv)
print np.mean(test_scores)
'''
# Single run through
'''
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
print X_train.shape
print y_train.shape
print X_test.shape
print y_test.shape
mnb = MultinomialNB().fit(X_train, y_train)
print mnb.score(X_test, y_test)
''' | 27.111111 | 175 | 0.733863 |
811e73ee0c3fc584081650f0224040703f26ea00 | 386 | py | Python | tabular/__init__.py | yamins81/tabular | 1caf091c8c395960a9ad7078f95158b533cc52dd | [
"MIT"
] | 6 | 2015-05-24T20:59:31.000Z | 2021-05-31T14:34:18.000Z | tabular/__init__.py | yamins81/tabular | 1caf091c8c395960a9ad7078f95158b533cc52dd | [
"MIT"
] | 3 | 2016-06-17T20:02:27.000Z | 2020-02-13T19:20:40.000Z | tabular/__init__.py | yamins81/tabular | 1caf091c8c395960a9ad7078f95158b533cc52dd | [
"MIT"
] | 8 | 2015-08-22T17:09:40.000Z | 2022-02-10T14:47:40.000Z | import io
import fast
import spreadsheet
import tab
import utils
import web
from io import *
from fast import *
from spreadsheet import *
from tab import *
from utils import *
from web import *
__all__ = []
__all__.extend(io.__all__)
__all__.extend(fast.__all__)
__all__.extend(spreadsheet.__all__)
__all__.extend(tab.__all__)
__all__.extend(utils.__all__)
__all__.extend(web.__all__) | 18.380952 | 35 | 0.795337 |
811eb205fb191ad48270915e49e393d586962cb9 | 26,184 | py | Python | smipyping/_targetstable.py | KSchopmeyer/smipyping | 9c60b3489f02592bd9099b8719ca23ae43a9eaa5 | [
"MIT"
] | null | null | null | smipyping/_targetstable.py | KSchopmeyer/smipyping | 9c60b3489f02592bd9099b8719ca23ae43a9eaa5 | [
"MIT"
] | 19 | 2017-10-18T15:31:25.000Z | 2020-03-04T19:31:59.000Z | smipyping/_targetstable.py | KSchopmeyer/smipyping | 9c60b3489f02592bd9099b8719ca23ae43a9eaa5 | [
"MIT"
] | null | null | null | # (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define the base of targets (i.e. systems to be tested)
TargetID = Column(Integer(11), primary_key=True)
IPAddress = Column(String(15), nullable=False)
CompanyID = Column(Integer(11), ForeignKey("Companies.CompanyID"))
Namespace = Column(String(30), nullable=False)
SMIVersion = Column(String(15), nullable=False)
Product = Column(String(30), nullable=False)
Principal = Column(String(30), nullable=False)
Credential = Column(String(30), nullable=False)
CimomVersion = Column(String(30), nullable=False)
InteropNamespace = Column(String(30), nullable=False)
Notify = Column(Enum('Enabled', 'Disabled'), default='Disabled')
NotifyUsers = Column(String(12), nullable=False)
ScanEnabled = Column(Enum('Enabled', 'Disabled'), default='Enabled')
Protocol = Column(String(10), default='http')
Port = Column(String(10), nullable=False)
"""
# TODO change ip_address to hostname where host name is name : port
from __future__ import print_function, absolute_import
import os
import csv
import re
from collections import OrderedDict
from textwrap import wrap
import six
from mysql.connector import Error as mysqlerror
from ._dbtablebase import DBTableBase
from ._mysqldbmixin import MySQLDBMixin
from ._common import get_url_str
from ._logging import AUDIT_LOGGER_NAME, get_logger
from ._companiestable import CompaniesTable
__all__ = ['TargetsTable']
| 37.512894 | 80 | 0.588718 |
811f8f9936b98c79ae19160b05e347adf2265632 | 1,131 | py | Python | dev/Code/Framework/AzFramework/CodeGen/AzEBusInline.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 1,738 | 2017-09-21T10:59:12.000Z | 2022-03-31T21:05:46.000Z | dev/Code/Framework/AzFramework/CodeGen/AzEBusInline.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 427 | 2017-09-29T22:54:36.000Z | 2022-02-15T19:26:50.000Z | dev/Code/Framework/AzFramework/CodeGen/AzEBusInline.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 671 | 2017-09-21T08:04:01.000Z | 2022-03-29T14:30:07.000Z | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import os
from az_code_gen.base import *
from AzReflectionCpp import format_cpp_annotations
# Factory function - called from launcher
def create_drivers(env):
return [AZEBusInline_Driver(env)]
| 39 | 95 | 0.763042 |
812066ffbcb9013a2cc703f8d57626a63964c5af | 9,057 | py | Python | QUANTAXIS/QASU/crawl_eastmoney.py | QUANTAXISER/QUANTAXIS | 6ebd727b2900e8910fa45814bf45eeffca395250 | [
"MIT"
] | 1 | 2018-09-09T02:55:10.000Z | 2018-09-09T02:55:10.000Z | QUANTAXIS/QASU/crawl_eastmoney.py | frosthaoz/QUANTAXIS | f5f482418e5f6e23ac3530089b8d17300d931b48 | [
"MIT"
] | null | null | null | QUANTAXIS/QASU/crawl_eastmoney.py | frosthaoz/QUANTAXIS | f5f482418e5f6e23ac3530089b8d17300d931b48 | [
"MIT"
] | 3 | 2018-11-29T07:07:56.000Z | 2021-02-09T17:24:56.000Z | import os
from QUANTAXIS.QASetting import QALocalize
#from QUANTAXIS_CRAWLY.run_selenium_alone import (read_east_money_page_zjlx_to_sqllite, open_chrome_driver, close_chrome_dirver)
from QUANTAXIS_CRAWLY.run_selenium_alone import *
import urllib
import pandas as pd
import time
from QUANTAXIS.QAUtil import (DATABASE)
'''
reqeust
''' | 30.392617 | 128 | 0.510765 |
8120aa4d76824186b0ed660869921ca64f9eaede | 667 | py | Python | wsgi.py | javicacheiro/salt-git-synchronizer-proxy | c93de5c0b26afe2b9ec72156497894df7f15d692 | [
"Apache-2.0"
] | null | null | null | wsgi.py | javicacheiro/salt-git-synchronizer-proxy | c93de5c0b26afe2b9ec72156497894df7f15d692 | [
"Apache-2.0"
] | null | null | null | wsgi.py | javicacheiro/salt-git-synchronizer-proxy | c93de5c0b26afe2b9ec72156497894df7f15d692 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import logging
import sys
from app import app as application
# Set default log level for the general logger
# each handler can then restrict the messages logged
application.logger.setLevel(logging.INFO)
setup_flask_logging()
if __name__ == '__main__':
application.run()
| 24.703704 | 64 | 0.721139 |
8120c7633f5990d446bca4c8a9b2275a25f1de63 | 202 | py | Python | game/base/enemy.py | PythonixCoders/PyWeek29 | 5c7492466481dec40619272a3da7fa4b9a72c1d6 | [
"MIT"
] | 8 | 2020-03-15T14:58:46.000Z | 2020-04-26T13:44:10.000Z | game/base/enemy.py | flipcoder/Butterfly-Destroyers | 855b1981ea67796e0ce0d82b525a1cb75a9e358b | [
"MIT"
] | null | null | null | game/base/enemy.py | flipcoder/Butterfly-Destroyers | 855b1981ea67796e0ce0d82b525a1cb75a9e358b | [
"MIT"
] | 4 | 2020-03-23T12:38:55.000Z | 2021-12-25T16:32:54.000Z | #!/usr/bin/env python
from game.base.being import Being
| 20.2 | 46 | 0.648515 |
8120d71f9f5dd96debf5a9a973a1d872ce6a5597 | 561 | py | Python | main/rates/migrations/0002_auto_20170625_1510.py | Hawk94/coin_tracker | 082909e17308a8dd460225c1b035751d12a27106 | [
"MIT"
] | null | null | null | main/rates/migrations/0002_auto_20170625_1510.py | Hawk94/coin_tracker | 082909e17308a8dd460225c1b035751d12a27106 | [
"MIT"
] | null | null | null | main/rates/migrations/0002_auto_20170625_1510.py | Hawk94/coin_tracker | 082909e17308a8dd460225c1b035751d12a27106 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-25 15:10
from __future__ import unicode_literals
from django.db import migrations
| 21.576923 | 48 | 0.568627 |
8120ff16ad9dc50c24922ea0574196c48067cace | 2,357 | py | Python | setup.py | dojeda/quetzal-openapi-client | d9d4dc99bb425a3f89dcbb80d5096f554bc42fff | [
"BSD-3-Clause"
] | null | null | null | setup.py | dojeda/quetzal-openapi-client | d9d4dc99bb425a3f89dcbb80d5096f554bc42fff | [
"BSD-3-Clause"
] | 1 | 2019-09-17T09:11:01.000Z | 2019-09-17T09:11:01.000Z | setup.py | dojeda/quetzal-openapi-client | d9d4dc99bb425a3f89dcbb80d5096f554bc42fff | [
"BSD-3-Clause"
] | 1 | 2021-04-28T13:06:38.000Z | 2021-04-28T13:06:38.000Z | # coding: utf-8
"""
Quetzal API
Quetzal: an API to manage data files and their associated metadata.
OpenAPI spec version: 0.5.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "quetzal-openapi-client"
VERSION = "0.5.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="Quetzal API auto-generated client",
author='David Ojeda',
author_email="[email protected]",
url="https://github.com/quet.zal/quetzal-openapi-client",
project_urls={
"Documentation": "https://quetzal-openapi-client.readthedocs.io",
"Code": "https://github.com/quetz-al/quetzal-openapi-client",
"Issue tracker": "https://github.com/quetz-al/quetzal-openapi-client/issues",
},
license="BSD-3-Clause",
keywords=["OpenAPI", "OpenAPI-Generator", "Quetzal API"],
install_requires=REQUIRES,
packages=find_packages(exclude=['test', 'docs']),
namespace_packages=['quetzal'],
include_package_data=True,
long_description="""\
quetzal-openapi-client
======================
This is an auto-generated package using
[openapi-generator](https://github.com/OpenAPITools/openapi-generator)
from an OpenAPI specification of the Quetzal API.
An improvement layer on this client exists in the quetzal-client package.
Quetzal is an API to manage data files and their associated metadata.
See more at [quetz.al](https://quetz.al) and its
[readthedocs documentation](https://quetzal-api.readthedocs.io).
""",
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Database :: Front-Ends',
'Topic :: Internet :: WWW/HTTP',
'Topic :: System :: Archiving',
],
)
| 32.287671 | 85 | 0.666101 |
81229a54be34a90af845ce0b0f142321ea5ad691 | 11,115 | py | Python | youtube_dl/extractor/turner.py | jonyg80/youtube-dl | ef3a87fb77891329de1d3dbebfee53bf50645261 | [
"Unlicense"
] | 66,635 | 2019-03-10T21:34:18.000Z | 2022-03-31T23:50:31.000Z | youtube_dl/extractor/turner.py | jonyg80/youtube-dl | ef3a87fb77891329de1d3dbebfee53bf50645261 | [
"Unlicense"
] | 10,936 | 2019-03-10T21:35:47.000Z | 2022-03-31T23:46:52.000Z | youtube_dl/extractor/turner.py | jonyg80/youtube-dl | ef3a87fb77891329de1d3dbebfee53bf50645261 | [
"Unlicense"
] | 15,194 | 2019-03-10T21:09:27.000Z | 2022-03-31T22:13:49.000Z | # coding: utf-8
from __future__ import unicode_literals
import re
from .adobepass import AdobePassIE
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
xpath_text,
int_or_none,
determine_ext,
float_or_none,
parse_duration,
xpath_attr,
update_url_query,
ExtractorError,
strip_or_none,
url_or_none,
)
| 42.586207 | 134 | 0.506163 |
81231c1bf7b40bb3a00ed96fce4e7257f1de32c5 | 1,188 | py | Python | ml/sandbox/00-data.py | robk-dev/algo-trading | aa8d76ee739431ab24407fe094e0753c588dc8c6 | [
"MIT"
] | 1 | 2021-03-14T23:52:04.000Z | 2021-03-14T23:52:04.000Z | ml/sandbox/00-data.py | robk-dev/algo-trading | aa8d76ee739431ab24407fe094e0753c588dc8c6 | [
"MIT"
] | null | null | null | ml/sandbox/00-data.py | robk-dev/algo-trading | aa8d76ee739431ab24407fe094e0753c588dc8c6 | [
"MIT"
] | null | null | null | from alpha_vantage.timeseries import TimeSeries
from pprint import pprint
import json
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('symbol', type=str, help="the stock symbol you want to download")
parser.add_argument('time_window', type=str, choices=[
'intraday', 'daily', 'daily_adj'], help="the time period you want to download the stock history for")
namespace = parser.parse_args()
save_dataset(**vars(namespace))
| 34.941176 | 125 | 0.683502 |
8123847da358e93698586a58b0a106958f59df07 | 12,570 | py | Python | tests/zpill.py | al3pht/cloud-custodian | ce6613d1b716f336384c5e308eee300389e6bf50 | [
"Apache-2.0"
] | 2,415 | 2018-12-04T00:37:58.000Z | 2022-03-31T12:28:56.000Z | tests/zpill.py | al3pht/cloud-custodian | ce6613d1b716f336384c5e308eee300389e6bf50 | [
"Apache-2.0"
] | 3,272 | 2018-12-03T23:58:17.000Z | 2022-03-31T21:15:32.000Z | tests/zpill.py | al3pht/cloud-custodian | ce6613d1b716f336384c5e308eee300389e6bf50 | [
"Apache-2.0"
] | 773 | 2018-12-06T09:43:23.000Z | 2022-03-30T20:44:43.000Z | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import fnmatch
from io import StringIO
import json
import os
import shutil
import zipfile
import re
from datetime import datetime, timedelta, tzinfo
from distutils.util import strtobool
import boto3
import placebo
from botocore.response import StreamingBody
from placebo import pill
from c7n.testing import CustodianTestCore
from .constants import ACCOUNT_ID
# Custodian Test Account. This is used only for testing.
# Access is available for community project maintainers.
###########################################################################
# BEGIN PLACEBO MONKEY PATCH
#
# Placebo is effectively abandoned upstream, since mitch went back to work at AWS, irony...
# These monkeypatch patches represent fixes on trunk of that repo that have not been released
# into an extant version, we carry them here. We can drop this when this issue is resolved
#
# https://github.com/garnaat/placebo/issues/63
#
# License - Apache 2.0
# Copyright (c) 2015 Mitch Garnaat
utc = UTC()
def deserialize(obj):
"""Convert JSON dicts back into objects."""
# Be careful of shallow copy here
target = dict(obj)
class_name = None
if "__class__" in target:
class_name = target.pop("__class__")
if "__module__" in obj:
obj.pop("__module__")
# Use getattr(module, class_name) for custom types if needed
if class_name == "datetime":
return datetime(tzinfo=utc, **target)
if class_name == "StreamingBody":
return StringIO(target["body"])
# Return unrecognized structures as-is
return obj
def serialize(obj):
"""Convert objects into JSON structures."""
# Record class and module information for deserialization
result = {"__class__": obj.__class__.__name__}
try:
result["__module__"] = obj.__module__
except AttributeError:
pass
# Convert objects to dictionary representation based on type
if isinstance(obj, datetime):
result["year"] = obj.year
result["month"] = obj.month
result["day"] = obj.day
result["hour"] = obj.hour
result["minute"] = obj.minute
result["second"] = obj.second
result["microsecond"] = obj.microsecond
return result
if isinstance(obj, StreamingBody):
result["body"] = obj.read()
obj._raw_stream = StringIO(result["body"])
obj._amount_read = 0
return result
if isinstance(obj, bytes):
return obj.decode('utf8')
# Raise a TypeError if the object isn't recognized
raise TypeError("Type not serializable")
pill.FakeHttpResponse.raw = None
placebo.pill.serialize = serialize
placebo.pill.deserialize = deserialize
# END PLACEBO MONKEY
##########################################################################
def attach(session, data_path, prefix=None, debug=False):
pill = ZippedPill(data_path, prefix=prefix, debug=debug)
pill.attach(session, prefix)
return pill
| 33.699732 | 93 | 0.605091 |
8123d51391f52c37336172ab4d3305871857e10f | 16,865 | py | Python | flexget/tests/test_next_series_seasons.py | metaMMA/Flexget | a38986422461d7935ead1e2b4ed4c88bcd0a90f5 | [
"MIT"
] | null | null | null | flexget/tests/test_next_series_seasons.py | metaMMA/Flexget | a38986422461d7935ead1e2b4ed4c88bcd0a90f5 | [
"MIT"
] | 1 | 2017-10-09T23:06:44.000Z | 2017-10-09T23:06:44.000Z | flexget/tests/test_next_series_seasons.py | metaMMA/Flexget | a38986422461d7935ead1e2b4ed4c88bcd0a90f5 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import pytest
from flexget.entry import Entry
# TODO Add more standard tests
| 37.645089 | 116 | 0.520308 |
8123dd148da3e7a93c319e5be784b12da6c27afd | 22,630 | py | Python | pymatgen/analysis/wulff.py | hpatel1567/pymatgen | 8304b25464206c74305214e45935df90bab95500 | [
"MIT"
] | 1 | 2020-02-08T08:20:45.000Z | 2020-02-08T08:20:45.000Z | pymatgen/analysis/wulff.py | hpatel1567/pymatgen | 8304b25464206c74305214e45935df90bab95500 | [
"MIT"
] | null | null | null | pymatgen/analysis/wulff.py | hpatel1567/pymatgen | 8304b25464206c74305214e45935df90bab95500 | [
"MIT"
] | null | null | null | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module define a WulffShape class to generate the Wulff shape from
a lattice, a list of indices and their corresponding surface energies,
and the total area and volume of the wulff shape,the weighted surface energy,
the anisotropy and shape_factor can also be calculated.
In support of plotting from a given view in terms of miller index.
The lattice is from the conventional unit cell, and (hkil) for hexagonal
lattices.
If you use this code extensively, consider citing the following:
Tran, R.; Xu, Z.; Radhakrishnan, B.; Winston, D.; Persson, K. A.; Ong, S. P.
(2016). Surface energies of elemental crystals. Scientific Data.
"""
from pymatgen.core.structure import Structure
from pymatgen.util.coord import get_angle
import numpy as np
import scipy as sp
from scipy.spatial import ConvexHull
import logging
import warnings
__author__ = 'Zihan Xu, Richard Tran, Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Zihan Xu'
__email__ = '[email protected]'
__date__ = 'May 5 2016'
logger = logging.getLogger(__name__)
def hkl_tuple_to_str(hkl):
"""
Prepare for display on plots
"(hkl)" for surfaces
Agrs:
hkl: in the form of [h, k, l] or (h, k, l)
"""
str_format = '($'
for x in hkl:
if x < 0:
str_format += '\\overline{' + str(-x) + '}'
else:
str_format += str(x)
str_format += '$)'
return str_format
def get_tri_area(pts):
"""
Given a list of coords for 3 points,
Compute the area of this triangle.
Args:
pts: [a, b, c] three points
"""
a, b, c = pts[0], pts[1], pts[2]
v1 = np.array(b) - np.array(a)
v2 = np.array(c) - np.array(a)
area_tri = abs(sp.linalg.norm(sp.cross(v1, v2)) / 2)
return area_tri
| 35.194401 | 107 | 0.576712 |
812522326c06afbf43f1bd6cee31bd8b7b273277 | 6,805 | py | Python | app/sensor.py | sosprz/nettemp | 334b3124263267c931bd7dc5c1bd8eb70614b4ef | [
"MIT"
] | 51 | 2015-01-03T01:37:25.000Z | 2021-11-03T18:07:42.000Z | app/sensor.py | sosprz/nettemp | 334b3124263267c931bd7dc5c1bd8eb70614b4ef | [
"MIT"
] | 18 | 2015-03-06T18:46:51.000Z | 2021-04-02T08:02:01.000Z | app/sensor.py | sosprz/nettemp | 334b3124263267c931bd7dc5c1bd8eb70614b4ef | [
"MIT"
] | 51 | 2015-02-04T18:53:54.000Z | 2022-02-16T20:40:45.000Z | from app import app
from flask import Flask, request, jsonify, g
import sqlite3
import os
import json
from random import randint
from flask_jwt_extended import jwt_required
import datetime
from flask_mysqldb import MySQL
mysql = MySQL()
| 27.439516 | 246 | 0.603527 |
812528562760727190e13a89643039e9938a674f | 632 | py | Python | tests/either_catch_test.py | funnel-io/python-on-rails | cccd2284c7dab32a37d573042531a54454164f6a | [
"MIT"
] | 1 | 2022-02-08T11:12:12.000Z | 2022-02-08T11:12:12.000Z | tests/either_catch_test.py | funnel-io/python-on-rails | cccd2284c7dab32a37d573042531a54454164f6a | [
"MIT"
] | 1 | 2022-02-08T11:26:24.000Z | 2022-02-08T11:26:24.000Z | tests/either_catch_test.py | funnel-io/python-on-rails | cccd2284c7dab32a37d573042531a54454164f6a | [
"MIT"
] | null | null | null | from python_on_rails.either import as_either, Failure, Success
def test_success_executes_bindings():
result = Success(1).bind(add_one).bind(times_five)
assert isinstance(result, Success)
assert result.value == 10
def test_a_failure_stops_the_execution_of_later_bindings():
result = Success("NaN").bind(add_one).bind(times_five)
assert isinstance(result, Failure)
assert type(result.value) == TypeError
assert repr(result.value) == "TypeError('can only concatenate str (not \"int\") to str')"
| 25.28 | 93 | 0.726266 |
812594dced1920626bd6e5484a03e5c3aa5dda9e | 1,943 | py | Python | ServerSide/models.py | Coullence/DRF_Percels-Couriers_API_V.0.0.2 | 906786115861b316f8ecf023c8af82f2dacff68e | [
"MIT"
] | null | null | null | ServerSide/models.py | Coullence/DRF_Percels-Couriers_API_V.0.0.2 | 906786115861b316f8ecf023c8af82f2dacff68e | [
"MIT"
] | null | null | null | ServerSide/models.py | Coullence/DRF_Percels-Couriers_API_V.0.0.2 | 906786115861b316f8ecf023c8af82f2dacff68e | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
# Station
# Customers
# Items
# Payments
| 32.932203 | 66 | 0.716933 |
81265c7215ed57cef680d0ec0a27f1c4d35a191a | 5,340 | bzl | Python | tao_compiler/mlir/disc/tests/glob_op_test.bzl | JamesTheZ/BladeDISC | e6c76ee557ebfccd560d44f6b6276bbc4e0a8a34 | [
"Apache-2.0"
] | 328 | 2021-12-20T03:29:35.000Z | 2022-03-31T14:27:23.000Z | tao_compiler/mlir/disc/tests/glob_op_test.bzl | JamesTheZ/BladeDISC | e6c76ee557ebfccd560d44f6b6276bbc4e0a8a34 | [
"Apache-2.0"
] | 82 | 2021-12-20T09:15:16.000Z | 2022-03-31T09:33:48.000Z | tao_compiler/mlir/disc/tests/glob_op_test.bzl | JamesTheZ/BladeDISC | e6c76ee557ebfccd560d44f6b6276bbc4e0a8a34 | [
"Apache-2.0"
] | 66 | 2021-12-21T17:28:27.000Z | 2022-03-29T12:08:34.000Z | # Test definitions for Lit, the LLVM test runner.
#
# This is reusing the LLVM Lit test runner in the interim until the new build
# rules are upstreamed.
# TODO(b/136126535): remove this custom rule.
"""Lit runner globbing test
"""
load("//tensorflow:tensorflow.bzl", "filegroup")
load("@bazel_skylib//lib:paths.bzl", "paths")
load("//tensorflow:tensorflow.bzl", "tf_cc_test", "tf_native_cc_binary", "tf_copts")
# Default values used by the test runner.
_default_test_file_exts = ["mlir", ".pbtxt", ".td"]
_default_driver = "@llvm-project//mlir:run_lit.sh"
_default_size = "small"
_default_tags = []
# These are patterns which we should never match, for tests, subdirectories, or
# test input data files.
_ALWAYS_EXCLUDE = [
"**/LICENSE.txt",
"**/README.txt",
"**/lit.local.cfg",
# Exclude input files that have spaces in their names, since bazel
# cannot cope with such "targets" in the srcs list.
"**/* *",
"**/* */**",
]
def _run_lit_test(name, test_file, data, size, tags, driver, features, exec_properties):
"""Runs lit on all tests it can find in `data` under tensorflow/compiler/mlir.
Note that, due to Bazel's hermetic builds, lit only sees the tests that
are included in the `data` parameter, regardless of what other tests might
exist in the directory searched.
Args:
name: str, the name of the test, including extension.
data: [str], the data input to the test.
size: str, the size of the test.
tags: [str], tags to attach to the test.
driver: str, label of the driver shell script.
Note: use of a custom driver is not currently supported
and specifying a default driver will abort the tests.
features: [str], list of extra features to enable.
"""
name_without_suffix = test_file[0].split('.')[0]
local_test_files = name + ".test_files"
filegroup(
name = local_test_files,
srcs = native.glob([
"data/" + name_without_suffix + "*.mlir",
]),
)
tf_cc_test(
name = name,
srcs = test_file,
size = size,
deps = [
"//tensorflow/compiler/mlir/disc/tests:mlir_feature_test",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"//tensorflow/core:testlib",
],
data = [":" + local_test_files] + data + [
"//tensorflow/compiler/mlir/disc:disc_compiler_main",
"//tensorflow/compiler/mlir:tf-mlir-translate",
"//tensorflow/compiler/mlir:tf-opt",
],
)
def glob_op_tests(
exclude = [],
test_file_exts = _default_test_file_exts,
default_size = _default_size,
size_override = {},
data = [],
per_test_extra_data = {},
default_tags = _default_tags,
tags_override = {},
driver = _default_driver,
features = [],
exec_properties = {}):
"""Creates all plausible Lit tests (and their inputs) under this directory.
Args:
exclude: [str], paths to exclude (for tests and inputs).
test_file_exts: [str], extensions for files that are tests.
default_size: str, the test size for targets not in "size_override".
size_override: {str: str}, sizes to use for specific tests.
data: [str], additional input data to the test.
per_test_extra_data: {str: [str]}, extra data to attach to a given file.
default_tags: [str], additional tags to attach to the test.
tags_override: {str: str}, tags to add to specific tests.
driver: str, label of the driver shell script.
Note: use of a custom driver is not currently supported
and specifying a default driver will abort the tests.
features: [str], list of extra features to enable.
exec_properties: a dictionary of properties to pass on.
"""
# Ignore some patterns by default for tests and input data.
exclude = _ALWAYS_EXCLUDE + exclude
tests = native.glob(
["*." + ext for ext in test_file_exts],
exclude = exclude,
)
# Run tests individually such that errors can be attributed to a specific
# failure.
for i in range(len(tests)):
curr_test = tests[i]
# Instantiate this test with updated parameters.
lit_test(
name = curr_test,
data = data + per_test_extra_data.get(curr_test, []),
size = size_override.get(curr_test, default_size),
tags = default_tags + tags_override.get(curr_test, []),
driver = driver,
features = features,
exec_properties = exec_properties,
)
def lit_test(
name,
data = [],
size = _default_size,
tags = _default_tags,
driver = _default_driver,
features = [],
exec_properties = {}):
"""Runs test files under lit.
Args:
name: str, the name of the test.
data: [str], labels that should be provided as data inputs.
size: str, the size of the test.
tags: [str], tags to attach to the test.
driver: str, label of the driver shell script.
Note: use of a custom driver is not currently supported
and specifying a default driver will abort the tests.
features: [str], list of extra features to enable.
"""
_run_lit_test(name + ".test", [name], data, size, tags, driver, features, exec_properties)
| 35.364238 | 94 | 0.639888 |
812723d2076c258aebc37a64fed06e3f495c2735 | 2,181 | py | Python | build-scripts/PackageCheckHelpers.py | yulicrunchy/JALoP | a474b464d4916fe559cf1df97c855232e5ec24ab | [
"Apache-2.0"
] | 4 | 2016-01-18T20:49:23.000Z | 2020-03-04T22:23:57.000Z | build-scripts/PackageCheckHelpers.py | yulicrunchy/JALoP | a474b464d4916fe559cf1df97c855232e5ec24ab | [
"Apache-2.0"
] | 2 | 2019-09-23T21:04:25.000Z | 2020-01-31T18:10:17.000Z | build-scripts/PackageCheckHelpers.py | yulicrunchy/JALoP | a474b464d4916fe559cf1df97c855232e5ec24ab | [
"Apache-2.0"
] | 2 | 2021-04-01T20:53:12.000Z | 2021-04-01T21:10:53.000Z | """
These are functions to add to the configure context.
"""
def __checkCanLink(context, source, source_type, message_libname, real_libs=[]):
"""
Check that source can be successfully compiled and linked against real_libs.
Keyword arguments:
source -- source to try to compile
source_type -- type of source file, (probably should be ".c")
message_libname -- library name to show in the message output from scons
real_libs -- list of actual libraries to link against (defaults to a list
with one element, the value of messager_libname)
"""
if not real_libs:
real_libs = [message_libname]
context.Message("Checking for %s..." % message_libname)
libsave = context.env.get('LIBS')
context.env.AppendUnique(LIBS=real_libs)
ret = context.TryLink(source, source_type)
context.Result( ret )
if libsave is None:
del(context.env['LIBS'])
else:
context.env['LIBS'] = libsave
return ret
libuuid_source = '''
#include <uuid/uuid.h>
int main() {
uuid_t uu;
char uuid_str[37];
uuid_generate(uu);
uuid_unparse(uu, uuid_str);
return 0;
}
'''
selinux_source = '''
#include <selinux/selinux.h>
int main() {
security_context_t ctx;
getpeercon(0, &ctx);
return 0;
}
'''
byteswap_source = '''
#include <byteswap.h>
#include <stdint.h>
int main() {
uint16_t b16 = 0x00FF;
uint32_t b32 = 0x0011EEFF;
uint64_t b64 = 0x00112233CCDDEEFF;
bswap_16(b16);
bswap_32(b32);
bswap_64(b64);
return 0;
}
'''
bdb_source = '''
#include <db.h>
#if defined(DB_VERSION_MAJOR) && DB_VERSION_MAJOR >= 4
#if DB_VERSION_MAJOR == 4
#if defined(DB_VERSION_MINOR) && DB_VERSION_MINOR >= 3
#else
#error ""
#endif
#endif
#else
#error ""
#endif
'''
| 22.484536 | 80 | 0.710683 |
81290326c9beb0af3fd98f2bdd52b65974d13cd3 | 12,950 | py | Python | src/transformers/modeling_tf_pytorch_utils.py | ari-holtzman/transformers | 8725c545e8feeecdcee0ad92ca1d80cee8f0c6e4 | [
"Apache-2.0"
] | 5,129 | 2019-09-30T11:21:03.000Z | 2022-03-31T22:35:12.000Z | src/transformers/modeling_tf_pytorch_utils.py | ari-holtzman/transformers | 8725c545e8feeecdcee0ad92ca1d80cee8f0c6e4 | [
"Apache-2.0"
] | 604 | 2019-10-05T00:39:46.000Z | 2022-03-31T11:12:07.000Z | src/transformers/modeling_tf_pytorch_utils.py | ari-holtzman/transformers | 8725c545e8feeecdcee0ad92ca1d80cee8f0c6e4 | [
"Apache-2.0"
] | 1,034 | 2019-09-30T15:01:32.000Z | 2022-03-31T06:14:50.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch - TF 2.0 general utilities."""
import logging
import os
import re
import numpy
logger = logging.getLogger(__name__)
def convert_tf_weight_name_to_pt_weight_name(tf_name, start_prefix_to_remove=""):
""" Convert a TF 2.0 model variable name in a pytorch model weight name.
Conventions for TF2.0 scopes -> PyTorch attribute names conversions:
- '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
- '_._' is replaced by a new level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
return tuple with:
- pytorch model weight name
- transpose: boolean indicating weither TF2.0 and PyTorch weights matrices are transposed with regards to each other
"""
tf_name = tf_name.replace(":0", "") # device ids
tf_name = re.sub(
r"/[^/]*___([^/]*)/", r"/\1/", tf_name
) # '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
tf_name = tf_name.replace(
"_._", "/"
) # '_._' is replaced by a level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
tf_name = re.sub(r"//+", "/", tf_name) # Remove empty levels at the end
tf_name = tf_name.split("/") # Convert from TF2.0 '/' separators to PyTorch '.' separators
tf_name = tf_name[1:] # Remove level zero
# When should we transpose the weights
transpose = bool(tf_name[-1] == "kernel" or "emb_projs" in tf_name or "out_projs" in tf_name)
# Convert standard TF2.0 names in PyTorch names
if tf_name[-1] == "kernel" or tf_name[-1] == "embeddings" or tf_name[-1] == "gamma":
tf_name[-1] = "weight"
if tf_name[-1] == "beta":
tf_name[-1] = "bias"
# Remove prefix if needed
tf_name = ".".join(tf_name)
if start_prefix_to_remove:
tf_name = tf_name.replace(start_prefix_to_remove, "", 1)
return tf_name, transpose
#####################
# PyTorch => TF 2.0 #
#####################
def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch checkpoints in a TF 2.0 model
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
pt_path = os.path.abspath(pytorch_checkpoint_path)
logger.info("Loading PyTorch weights from {}".format(pt_path))
pt_state_dict = torch.load(pt_path, map_location="cpu")
logger.info("PyTorch checkpoint contains {:,} parameters".format(sum(t.numel() for t in pt_state_dict.values())))
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch checkpoints in a TF 2.0 model
"""
pt_state_dict = pt_model.state_dict()
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch state_dict in a TF 2.0 model.
"""
try:
import torch # noqa: F401
import tensorflow as tf # noqa: F401
from tensorflow.python.keras import backend as K
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
# Adapt state dict - TODO remove this and update the AWS weights files instead
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in pt_state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
pt_state_dict[new_key] = pt_state_dict.pop(old_key)
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(tf_model.base_model_prefix) for s in pt_state_dict.keys()):
start_prefix_to_remove = tf_model.base_model_prefix + "."
symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights
tf_loaded_numel = 0
weight_value_tuples = []
all_pytorch_weights = set(list(pt_state_dict.keys()))
for symbolic_weight in symbolic_weights:
sw_name = symbolic_weight.name
name, transpose = convert_tf_weight_name_to_pt_weight_name(
sw_name, start_prefix_to_remove=start_prefix_to_remove
)
# Find associated numpy array in pytorch model state dict
if name not in pt_state_dict:
if allow_missing_keys:
continue
raise AttributeError("{} not found in PyTorch model".format(name))
array = pt_state_dict[name].numpy()
if transpose:
array = numpy.transpose(array)
if len(symbolic_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(symbolic_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(symbolic_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (symbolic_weight.shape, array.shape)
raise e
tf_loaded_numel += array.size
# logger.warning("Initialize TF weight {}".format(symbolic_weight.name))
weight_value_tuples.append((symbolic_weight, array))
all_pytorch_weights.discard(name)
K.batch_set_value(weight_value_tuples)
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure restore ops are run
logger.info("Loaded {:,} parameters in the TF 2.0 model.".format(tf_loaded_numel))
logger.info("Weights or buffers not loaded from PyTorch model: {}".format(all_pytorch_weights))
return tf_model
#####################
# TF 2.0 => PyTorch #
#####################
def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
""" Load TF 2.0 HDF5 checkpoint in a PyTorch model
We use HDF5 to easily do transfer learning
(see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357).
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
import transformers
logger.info("Loading TensorFlow weights from {}".format(tf_checkpoint_path))
# Instantiate and load the associated TF 2.0 model
tf_model_class_name = "TF" + pt_model.__class__.__name__ # Add "TF" at the beggining
tf_model_class = getattr(transformers, tf_model_class_name)
tf_model = tf_model_class(pt_model.config)
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
tf_model.load_weights(tf_checkpoint_path, by_name=True)
return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys)
def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False):
""" Load TF 2.0 model in a pytorch model
"""
weights = tf_model.weights
return load_tf2_weights_in_pytorch_model(pt_model, weights, allow_missing_keys=allow_missing_keys)
def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False):
""" Load TF2.0 symbolic weights in a PyTorch model
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
new_pt_params_dict = {}
current_pt_params_dict = dict(pt_model.named_parameters())
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys()):
start_prefix_to_remove = pt_model.base_model_prefix + "."
# Build a map from potential PyTorch weight names to TF 2.0 Variables
tf_weights_map = {}
for tf_weight in tf_weights:
pt_name, transpose = convert_tf_weight_name_to_pt_weight_name(
tf_weight.name, start_prefix_to_remove=start_prefix_to_remove
)
tf_weights_map[pt_name] = (tf_weight.numpy(), transpose)
all_tf_weights = set(list(tf_weights_map.keys()))
loaded_pt_weights_data_ptr = {}
missing_keys_pt = []
for pt_weight_name, pt_weight in current_pt_params_dict.items():
# Handle PyTorch shared weight ()not duplicated in TF 2.0
if pt_weight.data_ptr() in loaded_pt_weights_data_ptr:
new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()]
continue
# Find associated numpy array in pytorch model state dict
if pt_weight_name not in tf_weights_map:
if allow_missing_keys:
missing_keys_pt.append(pt_weight_name)
continue
raise AttributeError("{} not found in TF 2.0 model".format(pt_weight_name))
array, transpose = tf_weights_map[pt_weight_name]
if transpose:
array = numpy.transpose(array)
if len(pt_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(pt_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(pt_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (pt_weight.shape, array.shape)
raise e
# logger.warning("Initialize PyTorch weight {}".format(pt_weight_name))
new_pt_params_dict[pt_weight_name] = torch.from_numpy(array)
loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = torch.from_numpy(array)
all_tf_weights.discard(pt_weight_name)
missing_keys, unexpected_keys = pt_model.load_state_dict(new_pt_params_dict, strict=False)
missing_keys += missing_keys_pt
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from TF 2.0 model: {}".format(pt_model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from TF 2.0 model not used in {}: {}".format(pt_model.__class__.__name__, unexpected_keys)
)
logger.info("Weights or buffers not loaded from TF 2.0 model: {}".format(all_tf_weights))
return pt_model
| 39.242424 | 155 | 0.680386 |
81292f7ed0f85cfcaaa5e1e9abfd5ae7b048469d | 4,906 | py | Python | hail/python/test/hail/helpers.py | mitochon/hail | 25e5e5b8da1d978468d2cee393426ade46484a87 | [
"MIT"
] | null | null | null | hail/python/test/hail/helpers.py | mitochon/hail | 25e5e5b8da1d978468d2cee393426ade46484a87 | [
"MIT"
] | 3 | 2017-06-16T18:10:45.000Z | 2017-07-21T17:44:13.000Z | hail/python/test/hail/helpers.py | mitochon/hail | 25e5e5b8da1d978468d2cee393426ade46484a87 | [
"MIT"
] | 2 | 2018-01-30T00:50:52.000Z | 2018-03-22T20:04:01.000Z | import os
from timeit import default_timer as timer
import unittest
import pytest
from decorator import decorator
from hail.utils.java import Env
import hail as hl
from hail.backend.local_backend import LocalBackend
_initialized = False
_test_dir = os.environ.get('HAIL_TEST_RESOURCES_DIR', '../src/test/resources')
_doctest_dir = os.environ.get('HAIL_DOCTEST_DATA_DIR', 'hail/docs/data')
_dataset = None
fails_local_backend = pytest.mark.xfail(
os.environ.get('HAIL_QUERY_BACKEND') == 'local',
reason="doesn't yet work on local backend",
strict=True)
| 28.858824 | 92 | 0.637994 |
812941051eea955290efb0cfdb0e29b4664e5ad1 | 2,728 | py | Python | src/entity_linker/models/figer_model/labeling_model.py | mjstrobl/WEXEA | 0af0be1cdb93fc00cd81f885aa15ef8d6579b304 | [
"Apache-2.0"
] | 10 | 2020-06-14T15:46:53.000Z | 2021-04-29T15:02:23.000Z | src/entity_linker/models/figer_model/labeling_model.py | mjstrobl/WEXEA | 0af0be1cdb93fc00cd81f885aa15ef8d6579b304 | [
"Apache-2.0"
] | 3 | 2021-08-25T16:16:45.000Z | 2022-02-10T04:29:10.000Z | src/entity_linker/models/figer_model/labeling_model.py | mjstrobl/WEXEA | 0af0be1cdb93fc00cd81f885aa15ef8d6579b304 | [
"Apache-2.0"
] | 1 | 2021-02-17T17:44:06.000Z | 2021-02-17T17:44:06.000Z | """
Modifications copyright (C) 2020 Michael Strobl
"""
import time
import tensorflow as tf
import numpy as np
from entity_linker.models.base import Model
| 40.716418 | 92 | 0.615836 |
8129c2d37ff5ea88cc0452e98c6e15446ea19cc4 | 329 | py | Python | python/molecular_diameter.py | wutobias/collection | fdac4ce5bb99c31115efdbed7db3316eea4b2826 | [
"MIT"
] | 2 | 2020-03-25T14:41:53.000Z | 2021-04-08T05:38:02.000Z | python/molecular_diameter.py | wutobias/collection | fdac4ce5bb99c31115efdbed7db3316eea4b2826 | [
"MIT"
] | null | null | null | python/molecular_diameter.py | wutobias/collection | fdac4ce5bb99c31115efdbed7db3316eea4b2826 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import parmed as pmd
import numpy as np
from scipy.spatial import distance
if len(sys.argv) < 2:
print "Usage: molecular_diameter.py <mymolecule.mol2>"
exit(1)
mol = pmd.load_file(sys.argv[1])
crds = mol.coordinates
dist = distance.cdist(crds, crds, 'euclidean')
print np.max(dist)
exit(0) | 19.352941 | 55 | 0.735562 |
812a80140e19ea007dd9ab25b6b57d63cc6eb659 | 2,425 | py | Python | examples/text_classification/yelp_reviews_polarity/train.py | liorshk/simpletransformers | 226cf4d11edf5157c1beafcc44aaa78f65ccc985 | [
"Apache-2.0"
] | 3,151 | 2019-10-05T11:14:44.000Z | 2022-03-31T17:02:54.000Z | examples/text_classification/yelp_reviews_polarity/train.py | liorshk/simpletransformers | 226cf4d11edf5157c1beafcc44aaa78f65ccc985 | [
"Apache-2.0"
] | 1,165 | 2019-10-05T14:48:55.000Z | 2022-03-31T11:12:58.000Z | examples/text_classification/yelp_reviews_polarity/train.py | liorshk/simpletransformers | 226cf4d11edf5157c1beafcc44aaa78f65ccc985 | [
"Apache-2.0"
] | 739 | 2019-10-06T15:11:54.000Z | 2022-03-28T11:07:36.000Z | import sys
import pandas as pd
from simpletransformers.classification import ClassificationModel
prefix = "data/"
train_df = pd.read_csv(prefix + "train.csv", header=None)
train_df.head()
eval_df = pd.read_csv(prefix + "test.csv", header=None)
eval_df.head()
train_df[0] = (train_df[0] == 2).astype(int)
eval_df[0] = (eval_df[0] == 2).astype(int)
train_df = pd.DataFrame(
{"text": train_df[1].replace(r"\n", " ", regex=True), "labels": train_df[0]}
)
print(train_df.head())
eval_df = pd.DataFrame(
{"text": eval_df[1].replace(r"\n", " ", regex=True), "labels": eval_df[0]}
)
print(eval_df.head())
model_type = sys.argv[1]
if model_type == "bert":
model_name = "bert-base-cased"
elif model_type == "roberta":
model_name = "roberta-base"
elif model_type == "distilbert":
model_name = "distilbert-base-cased"
elif model_type == "distilroberta":
model_type = "roberta"
model_name = "distilroberta-base"
elif model_type == "electra-base":
model_type = "electra"
model_name = "google/electra-base-discriminator"
elif model_type == "electra-small":
model_type = "electra"
model_name = "google/electra-small-discriminator"
elif model_type == "xlnet":
model_name = "xlnet-base-cased"
train_args = {
"reprocess_input_data": True,
"overwrite_output_dir": True,
"use_cached_eval_features": True,
"output_dir": f"outputs/{model_type}",
"best_model_dir": f"outputs/{model_type}/best_model",
"evaluate_during_training": True,
"max_seq_length": 128,
"num_train_epochs": 3,
"evaluate_during_training_steps": 1000,
"wandb_project": "Classification Model Comparison",
"wandb_kwargs": {"name": model_name},
"save_model_every_epoch": False,
"save_eval_checkpoints": False,
# "use_early_stopping": True,
# "early_stopping_metric": "mcc",
# "n_gpu": 2,
# "manual_seed": 4,
# "use_multiprocessing": False,
"train_batch_size": 128,
"eval_batch_size": 64,
# "config": {
# "output_hidden_states": True
# }
}
if model_type == "xlnet":
train_args["train_batch_size"] = 64
train_args["gradient_accumulation_steps"] = 2
# Create a ClassificationModel
model = ClassificationModel(model_type, model_name, args=train_args)
# Train the model
model.train_model(train_df, eval_df=eval_df)
# # # Evaluate the model
# result, model_outputs, wrong_predictions = model.eval_model(eval_df)
| 25.260417 | 80 | 0.68701 |
812b4e30304f24bb277705592e38799b83099f91 | 1,049 | py | Python | LoadGraph.py | mahdi-zafarmand/SNA | a7188a2ceb63355183e470648f6ae4fa90a22faa | [
"MIT"
] | null | null | null | LoadGraph.py | mahdi-zafarmand/SNA | a7188a2ceb63355183e470648f6ae4fa90a22faa | [
"MIT"
] | null | null | null | LoadGraph.py | mahdi-zafarmand/SNA | a7188a2ceb63355183e470648f6ae4fa90a22faa | [
"MIT"
] | 1 | 2020-10-28T01:52:36.000Z | 2020-10-28T01:52:36.000Z | import networkx as nx
import os.path
| 20.98 | 70 | 0.625357 |
812bc4e483e6787a26d9b7a22c0e31832c78af55 | 5,853 | py | Python | mayan/apps/document_signatures/models.py | wan1869/dushuhu | 934dd178e67140cffc6b9203e793fdf8bbc73a54 | [
"Apache-2.0"
] | null | null | null | mayan/apps/document_signatures/models.py | wan1869/dushuhu | 934dd178e67140cffc6b9203e793fdf8bbc73a54 | [
"Apache-2.0"
] | null | null | null | mayan/apps/document_signatures/models.py | wan1869/dushuhu | 934dd178e67140cffc6b9203e793fdf8bbc73a54 | [
"Apache-2.0"
] | 1 | 2021-04-30T09:44:14.000Z | 2021-04-30T09:44:14.000Z | import logging
import uuid
from django.db import models
from django.urls import reverse
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from model_utils.managers import InheritanceManager
from mayan.apps.django_gpg.exceptions import VerificationError
from mayan.apps.django_gpg.models import Key
from mayan.apps.documents.models import DocumentVersion
from mayan.apps.storage.classes import DefinedStorageLazy
from .literals import STORAGE_NAME_DOCUMENT_SIGNATURES_DETACHED_SIGNATURE
from .managers import DetachedSignatureManager, EmbeddedSignatureManager
logger = logging.getLogger(name=__name__)
| 34.02907 | 79 | 0.651973 |
812c3f30e6e3ff5facc02e59cfdcff8d05e984ea | 2,226 | py | Python | scripts/sync_reports_config.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | 6 | 2018-11-03T22:43:35.000Z | 2022-02-15T17:51:33.000Z | scripts/sync_reports_config.py | igorhollaender/OBSOLETE_sirv_dashboard | 85aec60b80ef6f561d89398e3da5963d3d0f2aa4 | [
"CC-BY-3.0"
] | 7 | 2016-12-07T22:19:37.000Z | 2019-01-30T15:04:26.000Z | scripts/sync_reports_config.py | igorhollaender/OBSOLETE_sirv_dashboard | 85aec60b80ef6f561d89398e3da5963d3d0f2aa4 | [
"CC-BY-3.0"
] | 10 | 2017-04-10T21:40:22.000Z | 2022-02-21T16:50:10.000Z | from ConfigParser import ConfigParser
from sys import argv
REPLACE_PROPERTIES = ["file_path", "database_connection", "new_file_path"]
MAIN_SECTION = "app:main"
if __name__ == '__main__':
sync()
| 35.903226 | 88 | 0.700359 |
812c923f7680b63727b8c0d8a0b724feb7e64f73 | 1,448 | py | Python | src/gausskernel/dbmind/xtuner/test/test_ssh.py | wotchin/openGauss-server | ebd92e92b0cfd76b121d98e4c57a22d334573159 | [
"MulanPSL-1.0"
] | 1 | 2020-06-30T15:00:50.000Z | 2020-06-30T15:00:50.000Z | src/gausskernel/dbmind/xtuner/test/test_ssh.py | wotchin/openGauss-server | ebd92e92b0cfd76b121d98e4c57a22d334573159 | [
"MulanPSL-1.0"
] | null | null | null | src/gausskernel/dbmind/xtuner/test/test_ssh.py | wotchin/openGauss-server | ebd92e92b0cfd76b121d98e4c57a22d334573159 | [
"MulanPSL-1.0"
] | null | null | null | # Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# -------------------------------------------------------------------------
#
# test_ssh.py
#
# IDENTIFICATION
# src/gausskernel/dbmind/xtuner/test/test_ssh.py
#
# -------------------------------------------------------------------------
from ssh import ExecutorFactory
if __name__ == "__main__":
test_remote()
test_local()
| 33.674419 | 108 | 0.631215 |
812e0d88c6e6c1e7a35a42781edb6b394196778c | 3,838 | py | Python | models/utils.py | wyshi/Unsupervised-Structure-Learning | 19b49320b46e5f7d990ab9e5b3054b331b86e59d | [
"Apache-2.0"
] | 34 | 2019-06-25T06:21:03.000Z | 2022-01-24T06:57:40.000Z | models/utils.py | wyshi/Unsupervised-Structure-Learning | 19b49320b46e5f7d990ab9e5b3054b331b86e59d | [
"Apache-2.0"
] | 3 | 2019-07-19T02:33:03.000Z | 2021-11-03T09:06:25.000Z | models/utils.py | wyshi/Unsupervised-Structure-Learning | 19b49320b46e5f7d990ab9e5b3054b331b86e59d | [
"Apache-2.0"
] | 4 | 2019-06-25T06:46:12.000Z | 2021-01-13T06:57:06.000Z | # Original work Copyright (C) 2017 Tiancheng Zhao, Carnegie Mellon University
# Modified work Copyright 2018 Weiyan Shi.
import tensorflow as tf
import numpy as np
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import SmoothingFunction
def get_bow(embedding, avg=False):
"""
Assumption, the last dimension is the embedding
The second last dimension is the sentence length. The rank must be 3
"""
embedding_size = embedding.get_shape()[2].value
if avg:
return tf.reduce_mean(embedding, reduction_indices=[1]), embedding_size
else:
return tf.reduce_sum(embedding, reduction_indices=[1]), embedding_size
def get_rnn_encode(embedding, cell, length_mask=None, scope=None, reuse=None):
"""
Assumption, the last dimension is the embedding
The second last dimension is the sentence length. The rank must be 3
The padding should have zero
"""
with tf.variable_scope(scope, 'RnnEncoding', reuse=reuse):
if length_mask is None:
length_mask = tf.reduce_sum(tf.sign(tf.reduce_max(tf.abs(embedding), reduction_indices=2)),reduction_indices=1)
length_mask = tf.to_int32(length_mask)
_, encoded_input = tf.nn.dynamic_rnn(cell, embedding, sequence_length=length_mask, dtype=tf.float32)
return encoded_input, cell.state_size
def get_bi_rnn_encode(embedding, f_cell, b_cell, length_mask=None, scope=None, reuse=None):
"""
Assumption, the last dimension is the embedding
The second last dimension is the sentence length. The rank must be 3
The padding should have zero
"""
with tf.variable_scope(scope, 'RnnEncoding', reuse=reuse):
if length_mask is None:
length_mask = tf.reduce_sum(tf.sign(tf.reduce_max(tf.abs(embedding), reduction_indices=2)),reduction_indices=1)
length_mask = tf.to_int32(length_mask)
_, encoded_input = tf.nn.bidirectional_dynamic_rnn(f_cell, b_cell, embedding, sequence_length=length_mask, dtype=tf.float32)
encoded_input = tf.concat(encoded_input, 1)
return encoded_input, f_cell.state_size+b_cell.state_size
def get_prob_for_one_sent(vocab_prob, sent, length_mask=None):
"""
:param vocab_prob:
:param sent:
:param length_mask:
:return:
"""
tf.boolean_mask(tf.reshape(usr_input_sent, [-1, 50]), tf.sequence_mask(length_mask, 50))
def tf_repeat(tensor, repeats):
"""
:param tensor:
:param repeats:
:return:
"""
with tf.variable_scope("repeat"):
expanded_tensor = tf.expand_dims(tensor, -1)
multiples = [1] + repeats
tiled_tensor = tf.tile(expanded_tensor, multiples=multiples)
repeated_tensor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)
return repeated_tensor | 38 | 132 | 0.680823 |
812eae9e0a007577935e4a756403808aa1018593 | 4,927 | py | Python | gluoncv/data/transforms/block.py | Kh4L/gluon-cv | 849411ed56632cd854850b07142087d599f97dcb | [
"Apache-2.0"
] | 5,447 | 2018-04-25T18:02:51.000Z | 2022-03-31T00:59:49.000Z | gluoncv/data/transforms/block.py | Kh4L/gluon-cv | 849411ed56632cd854850b07142087d599f97dcb | [
"Apache-2.0"
] | 1,566 | 2018-04-25T21:14:04.000Z | 2022-03-31T06:42:42.000Z | gluoncv/data/transforms/block.py | Kh4L/gluon-cv | 849411ed56632cd854850b07142087d599f97dcb | [
"Apache-2.0"
] | 1,345 | 2018-04-25T18:44:13.000Z | 2022-03-30T19:32:53.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
# pylint: disable= missing-docstring
"Addtional image transforms."
import random
import math
import numpy as np
from mxnet import image, nd
from mxnet.gluon import Block
__all__ = ['RandomCrop', 'RandomErasing']
| 36.496296 | 99 | 0.628983 |
812fdf7c80ff79f192233383d15152b1c334cad3 | 3,631 | py | Python | explore.py | lribiere/explore-mit-bih-arrhythmia-db | 44eb2601ed437cb9766ae9cfd3c3553bf108d4f1 | [
"MIT"
] | 3 | 2020-02-26T20:01:11.000Z | 2020-06-24T17:44:11.000Z | explore.py | lribiere/explore-mit-bih-arrhythmia-db | 44eb2601ed437cb9766ae9cfd3c3553bf108d4f1 | [
"MIT"
] | 2 | 2020-07-01T09:38:58.000Z | 2020-07-01T09:40:02.000Z | explore.py | lribiere/explore-mit-bih-arrhythmia-db | 44eb2601ed437cb9766ae9cfd3c3553bf108d4f1 | [
"MIT"
] | null | null | null | import plotly.graph_objects as go
import streamlit as st
import pandas as pd
from utils import *
import glob
import wfdb
import os
ANNOTATIONS_COL_NAME = 'annotations'
'''
# MIT-BIH Arrhythmia DB Exploration
'''
record_ids = [os.path.basename(file)[:-4] for file in glob.glob('data/*.dat')]
if len(record_ids) == 0:
st.write('Warning ! No data could be found under the ./data/ directory.',
'*\*.dat*, *\*.hea*, *\*.atr* files and such should be placed ',
'immediately under the ./data/ directory')
else:
record_ids.sort()
record_id = st.selectbox('Select a record id', record_ids)
record = wfdb.rdrecord(f'data/{record_id}')
annotation = wfdb.rdann(f'data/{record_id}', 'atr')
st.write('Signals found in this record :')
for idx, signal in enumerate(record.sig_name):
st.write(f'- `{signal}` : in {record.units[idx]}, with a frequency of '
f'{record.fs * record.samps_per_frame[idx]}hz')
st.write(f'Comments for this record : {record.comments}')
signals_df = pd.DataFrame(record.p_signal, columns=record.sig_name)
annot_serie = pd.Series(annotation.symbol, index=annotation.sample,
name=ANNOTATIONS_COL_NAME)
full_df = pd.concat([signals_df, annot_serie], axis=1)
''' ## Annotations '''
beat_annot_count = annot_serie.isin(dict(beat_annotations)).sum()
non_beat_annot_count = annot_serie.isin(dict(non_beat_annotations)).sum()
unique_annot = annot_serie.value_counts().index.values
st.write(f'This record contains `{annot_serie.size}` annotations '
f'among which `{beat_annot_count}` beat annotations and '
f'`{non_beat_annot_count}` non beat annotation(s).')
st.write('The annotations are the followings :')
for annot in unique_annot:
st.write(f'- `{annot}` : {annotation_definitions[annot]}')
st.write('More explanations on the annotations are available here : '
'https://archive.physionet.org/physiobank/annotations.shtml')
# Plot counts for each annotation
annot_counts_df = annot_serie \
.value_counts() \
.rename_axis(ANNOTATIONS_COL_NAME) \
.reset_index(name='counts')
bar_fig = go.Figure(data=[go.Bar(x=annot_counts_df[ANNOTATIONS_COL_NAME],
y=annot_counts_df['counts'],
text=annot_counts_df['counts'],
textposition='auto'
)])
bar_fig.update_layout(title='Annotations by count', yaxis_title='counts',
xaxis_title='annotations')
st.write(bar_fig)
''' ## Explore full dataset '''
signal = st.selectbox('Select a signal', record.sig_name)
# Plot signals and annotations
matching_rows_by_annot = {}
for annot in unique_annot:
matching_rows_by_annot[annot] = full_df[ANNOTATIONS_COL_NAME] == annot
fig = go.Figure(layout=go.Layout(title=go.layout.Title(
text='{} signal with annotations'.format(signal))))
fig.add_trace(go.Scatter(x=full_df.index.values,
y=full_df[signal],
mode='lines',
name=signal))
for annot, annot_matching_rows in matching_rows_by_annot.items():
fig.add_trace(go.Scatter(x=full_df.index[annot_matching_rows].values,
y=full_df[annot_matching_rows][signal].values,
mode='markers',
name='{} (annot)'.format(annot)))
st.plotly_chart(fig)
| 44.82716 | 79 | 0.619664 |
8133672c14b4b385a9d7555f731ee05a987d9f73 | 143 | py | Python | src/home_automation_hub/config.py | levidavis/py-home | 3cc30e19d506824de9816ad9dbcfba4338a7dfa8 | [
"MIT"
] | null | null | null | src/home_automation_hub/config.py | levidavis/py-home | 3cc30e19d506824de9816ad9dbcfba4338a7dfa8 | [
"MIT"
] | null | null | null | src/home_automation_hub/config.py | levidavis/py-home | 3cc30e19d506824de9816ad9dbcfba4338a7dfa8 | [
"MIT"
] | null | null | null | from .config_store import ConfigStore
config = ConfigStore()
config.set_mqtt_broker("mqtt", 1883)
config.set_redis_config("redis", 6379, 0)
| 17.875 | 41 | 0.776224 |
81338229b9f75f52ae6ffcf7ef860588b32f5b97 | 3,915 | py | Python | Harpe-website/website/contrib/communication/utils.py | Krozark/Harpe-Website | 1038a8550d08273806c9ec244cb8157ef9e9101e | [
"BSD-2-Clause"
] | null | null | null | Harpe-website/website/contrib/communication/utils.py | Krozark/Harpe-Website | 1038a8550d08273806c9ec244cb8157ef9e9101e | [
"BSD-2-Clause"
] | null | null | null | Harpe-website/website/contrib/communication/utils.py | Krozark/Harpe-Website | 1038a8550d08273806c9ec244cb8157ef9e9101e | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import socket as csocket
from struct import pack,unpack
from website.contrib.communication.models import *
| 29.659091 | 110 | 0.527458 |
81353ee4f1a632a7e8022d2ce8c431b95559fb7b | 12,262 | py | Python | traitarm/reconstruction/visualize_recon.py | hzi-bifo/Model-T | 197b52f6fe9b73e0411dbfc66f6d2a43081f5697 | [
"Apache-2.0"
] | 1 | 2021-04-07T16:10:55.000Z | 2021-04-07T16:10:55.000Z | traitarm/reconstruction/visualize_recon.py | hzi-bifo/Model-T | 197b52f6fe9b73e0411dbfc66f6d2a43081f5697 | [
"Apache-2.0"
] | null | null | null | traitarm/reconstruction/visualize_recon.py | hzi-bifo/Model-T | 197b52f6fe9b73e0411dbfc66f6d2a43081f5697 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import ete2
from ete2 import faces, Tree, AttrFace, TreeStyle
import pylab
from matplotlib.colors import hex2color, rgb2hex, hsv_to_rgb, rgb_to_hsv
kelly_colors_hex = [
0xFFB300, # Vivid Yellow
0x803E75, # Strong Purple
0xFF6800, # Vivid Orange
0xA6BDD7, # Very Light Blue
0xC10020, # Vivid Red
0xCEA262, # Grayish Yellow
0x817066, # Medium Gray
# The following don't work well for people with defective color vision
0x007D34, # Vivid Green
0xF6768E, # Strong Purplish Pink
0x00538A, # Strong Blue
0xFF7A5C, # Strong Yellowish Pink
0x53377A, # Strong Violet
0xFF8E00, # Vivid Orange Yellow
0xB32851, # Strong Purplish Red
0xF4C800, # Vivid Greenish Yellow
0x7F180D, # Strong Reddish Brown
0x93AA00, # Vivid Yellowish Green
0x593315, # Deep Yellowish Brown
0xF13A13, # Vivid Reddish Orange
0x232C16, # Dark Olive Green
]
def adjust_kelly_brightness(hex_color, val, recon_min, recon_max):
"""set brightness according to change in continuous reconstruction value"""
h, s, v = rgb_to_hsv(hex2color('#{0:06X}'.format(hex_color)))
scale_factor = 1 - (recon_max - val) / (recon_max - recon_min)
v_new = v - (v * (scale_factor))
return rgb2hex(hsv_to_rgb(pd.np.array([h, s, v_new])))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("""visualize target list of features""")
parser.add_argument("node_recon", help = "node ancestral character state reconstruction")
parser.add_argument("gain_recon", help = "gain events ancestral character state reconstruction")
parser.add_argument("loss_recon", help = "loss events ancestral character state reconstruction")
parser.add_argument("tree", help = "tree with internal nodes labeled")
parser.add_argument("pfam_mapping", help = "feature mapping/list")
parser.add_argument("feat_list", help = "list of features")
parser.add_argument("--target_node", default = "N1", help = "list of features")
parser.add_argument("phenotype", help = "target phenotype")
parser.add_argument("--are_continuous_features_with_discrete_phenotype", action = 'store_true', help = "set if using continuous features with a discrete phenotype")
parser.add_argument("threshold", type = float, help = "threshold to call genotype/phenotype events")
parser.add_argument("sample_mapping", help = "mapping between sample ids and names")
parser.add_argument("out", help = "output file")
parser.add_argument("--max_feats", type = int, default = 10, help = "visualize at most max_feats features")
parser.add_argument("--miscl", help = "table of misclassified samples")
parser.add_argument("--node_annotation", help = "table of binary features for labeling the nodes")
a = parser.parse_args()
pt_tree, feats, pf2color = get_tree(node_recon = a.node_recon, gain_recon = a.gain_recon, loss_recon = a.loss_recon, pfam_mapping = a.pfam_mapping, tree = a.tree, feat_list = a.feat_list, phenotype = a.phenotype, target_node = a.target_node, threshold = a.threshold, sample_mapping = a.sample_mapping, are_continuous_features_with_discrete_phenotype = a.are_continuous_features_with_discrete_phenotype, max_feats = a.max_feats, miscl = a.miscl, node_annotation = a.node_annotation)
plot_tree(pt_tree, a.target_node, a.out)
plot_legend(feats, a.out, pf2color)
| 52.178723 | 485 | 0.621921 |
81367db3d2084fd41d74cdefdf3b14a53b5730ea | 46,827 | py | Python | scripts/misc/operator_condition_number_scipy.py | volpatto/firedrake_scripts | ba9c935bb0c9a6bbc6de69f476e42ad0ea8bb1c6 | [
"MIT"
] | 5 | 2019-01-19T14:18:51.000Z | 2022-02-10T14:22:12.000Z | scripts/misc/operator_condition_number_scipy.py | volpatto/firedrake_scripts | ba9c935bb0c9a6bbc6de69f476e42ad0ea8bb1c6 | [
"MIT"
] | null | null | null | scripts/misc/operator_condition_number_scipy.py | volpatto/firedrake_scripts | ba9c935bb0c9a6bbc6de69f476e42ad0ea8bb1c6 | [
"MIT"
] | 1 | 2021-06-14T07:32:26.000Z | 2021-06-14T07:32:26.000Z | import attr
from firedrake import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy.linalg import svd
from scipy.sparse.linalg import svds
from scipy.sparse import csr_matrix
from slepc4py import SLEPc
import pandas as pd
from tqdm import tqdm
import os
matplotlib.use('Agg')
def plot_matrix(assembled_form, **kwargs):
"""Provides a plot of a matrix."""
fig, ax = plt.subplots(1, 1)
petsc_mat = assembled_form.M.handle
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
return plot
def plot_matrix_mixed(assembled_form, **kwargs):
"""Provides a plot of a mixed matrix."""
fig, ax = plt.subplots(1, 1)
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
return plot
def plot_matrix_primal_hybrid_full(a_form, bcs=[], **kwargs):
"""Provides a plot of a full hybrid-mixed matrix."""
fig, ax = plt.subplots(1, 1)
assembled_form = assemble(a_form, bcs=bcs, mat_type="aij")
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
return plot
def plot_matrix_mixed_hybrid_full(a_form, bcs=[], **kwargs):
"""Provides a plot of a full hybrid-mixed matrix."""
fig, ax = plt.subplots(1, 1)
assembled_form = assemble(a_form, bcs=bcs, mat_type="aij")
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
f1_size = assembled_form.M[1, 1].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
ax.axhline(y=f0_size[0] + f1_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] + f1_size[0] - 0.5, color="k")
return plot
def plot_matrix_hybrid_multiplier(a_form, trace_index=2, bcs=[], **kwargs):
"""Provides a plot of a condensed hybrid-mixed matrix for single scale problems."""
fig, ax = plt.subplots(1, 1)
_A = Tensor(a_form)
A = _A.blocks
idx = trace_index
S = A[idx, idx] - A[idx, :idx] * A[:idx, :idx].inv * A[:idx, idx]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Below there is the spy alternative
# plot = plt.spy(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
return plot
def filter_real_part_in_array(array: np.ndarray, imag_threshold: float = 1e-5) -> np.ndarray:
"""Utility function to filter real part in a numpy array.
:param array:
Array with real and complex numbers.
:param imag_threshold:
Threshold to cut off imaginary part in complex number.
:return:
Filtered array with only real numbers.
"""
real_part_array = array.real[abs(array.imag) < 1e-5]
return real_part_array
# Solver options
solvers_options = {
# "cg": solve_poisson_cg,
# "cgls": solve_poisson_cgls,
# "dgls": solve_poisson_dgls,
# "sdhm": solve_poisson_sdhm,
# "ls": solve_poisson_ls,
# "dls": solve_poisson_dls,
"lsh": solve_poisson_lsh,
# "vms": solve_poisson_vms,
# "dvms": solve_poisson_dvms,
# "mixed_RT": solve_poisson_mixed_RT,
# "hdg": solve_poisson_hdg,
# "cgh": solve_poisson_cgh,
# "ldgc": solve_poisson_ldgc,
# "sipg": solve_poisson_sipg,
}
degree = 1
last_degree = 1
for current_solver in solvers_options:
# Setting the output file name
name = f"{current_solver}"
# Selecting the solver and its kwargs
solver = solvers_options[current_solver]
# Performing the convergence study
hp_refinement_cond_number_calculation(
solver,
min_degree=degree,
max_degree=degree + last_degree,
quadrilateral=True,
name=name
)
# N = 5
# mesh = UnitSquareMesh(N, N, quadrilateral=True)
# result = solve_poisson_lsh(mesh, degree=1)
# print(f'Is symmetric? {result.is_operator_symmetric}')
# print(f'nnz: {result.nnz}')
# print(f'DoFs: {result.number_of_dofs}')
# print(f'Condition Number: {result.condition_number}')
# # Plotting the resulting matrix
# matplotlib.use('TkAgg')
# import copy
# my_cmap = copy.copy(plt.cm.get_cmap("winter"))
# my_cmap.set_bad(color="lightgray")
# # plot_matrix_primal_hybrid_full(result.form, result.bcs, cmap=my_cmap)
# # plot_matrix_mixed_hybrid_full(result.form, result.bcs, cmap=my_cmap)
# plot_matrix_hybrid_multiplier(result.form, trace_index=2, bcs=result.bcs, cmap=my_cmap)
# # plot_matrix(result.assembled_form, cmap=my_cmap)
# # plot_matrix_mixed(result.assembled_form, cmap=my_cmap)
# plt.tight_layout()
# plt.savefig("sparse_pattern.png")
# plt.show() | 30.807237 | 114 | 0.617528 |
81368cbcf7560067152788c0a732e279491b5a68 | 7,884 | py | Python | pydeap/feature_extraction/_time_domain_features.py | Wlgls/pyDEAP | b7cec369cedd4a69ea82bc49a2fb8376260e4ad2 | [
"Apache-2.0"
] | null | null | null | pydeap/feature_extraction/_time_domain_features.py | Wlgls/pyDEAP | b7cec369cedd4a69ea82bc49a2fb8376260e4ad2 | [
"Apache-2.0"
] | null | null | null | pydeap/feature_extraction/_time_domain_features.py | Wlgls/pyDEAP | b7cec369cedd4a69ea82bc49a2fb8376260e4ad2 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
'''
@File :_time_domain_features.py
@Time :2021/04/16 20:02:55
@Author :wlgls
@Version :1.0
'''
import numpy as np
def statistics(data, combined=True):
"""Statistical features include Power, Mean, Std, 1st differece, Normalized 1st difference, 2nd difference, Normalized 2nd difference.
Parameters
----------
data array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
Return
----------
f:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [13]: d.shape, l.shape
Out[13]: ((40, 32, 8064), (40, 1))
In [14]: statistics_feature(d).shape
Out[14]: (40, 32, 7)
"""
# Power
power = np.mean(data**2, axis=-1)
# Mean
ave = np.mean(data, axis=-1)
# Standard Deviation
std = np.std(data, axis=-1)
# the mean of the absolute values of 1st differece mean
diff_1st = np.mean(np.abs(np.diff(data,n=1, axis=-1)), axis=-1)
# the mean of the absolute values of Normalized 1st difference
normal_diff_1st = diff_1st / std
# the mean of the absolute values of 2nd difference mean
diff_2nd = np.mean(np.abs(data[..., 2:] - data[..., :-2]), axis=-1)
# the mean of the absolute values of Normalized 2nd difference
normal_diff_2nd = diff_2nd / std
# Features.append(np.concatenate((Power, Mean, Std, diff_1st, normal_diff_1st, diff_2nd, normal_diff_2nd), axis=2))
f = np.stack((power, ave, std, diff_1st, normal_diff_1st, diff_2nd, normal_diff_2nd), axis=-1)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
def hjorth(data, combined=True):
"""Solving Hjorth features include activity, mobility, complexity
Parameters
----------
data array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
Return
----------
f:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [15]: d.shape, l.shape
Out[15]: ((40, 32, 8064), (40, 1))
In [16]: hjorth_features(d).shape
Out[16]: (40, 32, 3)
"""
data = np.array(data)
ave = np.mean(data, axis=-1)[..., np.newaxis]
diff_1st = np.diff(data, n=1, axis=-1)
# print(diff_1st.shape)
diff_2nd = data[..., 2:] - data[..., :-2]
# Activity
activity = np.mean((data-ave)**2, axis=-1)
# print(Activity.shape)
# Mobility
varfdiff = np.var(diff_1st, axis=-1)
# print(varfdiff.shape)
mobility = np.sqrt(varfdiff / activity)
# Complexity
varsdiff = np.var(diff_2nd, axis=-1)
complexity = np.sqrt(varsdiff/varfdiff) / mobility
f = np.stack((activity, mobility, complexity), axis=-1)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
def higher_order_crossing(data, k=10, combined=True):
"""Solving the feature of hoc. Hoc is a high order zero crossing quantity.
Parameters
----------
data : array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
k : int, optional
Order, by default 10
Return
----------
nzc:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [4]: d, l = load_deap(path, 0)
In [5]: hoc(d, k=10).shape
Out[5]: (40, 32, 10)
In [6]: hoc(d, k=5).shape
Out[6]: (40, 32, 5)
"""
nzc = []
for i in range(k):
curr_diff = np.diff(data, n=i)
x_t = curr_diff >= 0
x_t = np.diff(x_t)
x_t = np.abs(x_t)
count = np.count_nonzero(x_t, axis=-1)
nzc.append(count)
f = np.stack(nzc, axis=-1)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
def sevcik_fd(data, combined=True):
"""Fractal dimension feature is solved, which is used to describe the shape information of EEG time series data. It seems that this feature can be used to judge the electrooculogram and EEG.The calculation methods include Sevcik, fractal Brownian motion, box counting, Higuchi and so on.
Sevcik method: fast calculation and robust analysis of noise
Higuchi: closer to the theoretical value than box counting
The Sevick method is used here because it is easier to implement
Parameters
----------
Parameters
----------
data array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
Return
----------
f:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [7]: d.shape, l.shape
Out[7]: ((40, 32, 8064), (40, 1))
In [8]: sevcik_fd(d).shape
Out[8]: (40, 32, 1)
"""
points = data.shape[-1]
x = np.arange(1, points+1)
x_ = x / np.max(x)
miny = np.expand_dims(np.min(data, axis=-1), axis=-1)
maxy = np.expand_dims(np.max(data, axis=-1), axis=-1)
y_ = (data-miny) / (maxy-miny)
L = np.expand_dims(np.sum(np.sqrt(np.diff(y_, axis=-1)**2 + np.diff(x_)**2), axis=-1), axis=-1)
f = 1 + np.log(L) / np.log(2 * (points-1))
# print(FD.shape)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
def calc_L(X, k, m):
"""
Return Lm(k) as the length of the curve.
"""
N = X.shape[-1]
n = np.floor((N-m)/k).astype(np.int64)
norm = (N-1) / (n*k)
ss = np.sum(np.abs(np.diff(X[..., m::k], n=1)), axis=-1)
Lm = (ss*norm) / k
return Lm
def calc_L_average(X, k):
"""
Return <L(k)> as the average value over k sets of Lm(k).
"""
calc_L_series = np.frompyfunc(lambda m: calc_L(X, k, m), 1, 1)
L_average = np.average(calc_L_series(np.arange(1, k+1)))
return L_average
def higuchi_fd(data, k_max, combined=True):
"""Fractal dimension feature is solved, which is used to describe the shape information of EEG time series data. It seems that this feature can be used to judge the electrooculogram and EEG.The calculation methods include Sevcik, fractal Brownian motion, box counting, Higuchi and so on.
Sevcik method: fast calculation and robust analysis of noise
Higuchi: closer to the theoretical value than box counting
The higuchi method is used here because it is easier to implement
Parameters
----------
Parameters
----------
data array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
Return
----------
f:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [7]: d.shape, l.shape
Out[7]: ((40, 32, 8064), (40, 1))
In [8]: higuchi_fd(dif combined:
f = f
return ).shape
Out[8]: (40, 32, 1)
"""
calc_L_average_series = np.frompyfunc(lambda k: calc_L_average(data, k), 1, 1)
k = np.arange(1, k_max+1)
L = calc_L_average_series(k)
L = np.stack(L, axis=-1)
fd = np.zeros(data.shape[:-1])
for ind in np.argwhere(L[..., 0]):
tmp = L[ind[0], ind[1], ind[2]]
D, _= np.polyfit(np.log2(k), np.log2(tmp), 1)
fd[ind[0], ind[1if combined:
f = f
return ], ind[2]] = - D
f = np.expand_dims(fd, axis=-1)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
| 29.977186 | 291 | 0.597793 |
8137ad04173c8650a7a1905ad7cd6c799cdbd81c | 39 | py | Python | pymutual/__init__.py | kimballh/pymutual | 7d7f588099eee7bdd669d613756509c6ab44a911 | [
"MIT"
] | null | null | null | pymutual/__init__.py | kimballh/pymutual | 7d7f588099eee7bdd669d613756509c6ab44a911 | [
"MIT"
] | null | null | null | pymutual/__init__.py | kimballh/pymutual | 7d7f588099eee7bdd669d613756509c6ab44a911 | [
"MIT"
] | null | null | null | from .session import Session, MutualAPI | 39 | 39 | 0.846154 |
81388e81430c58269d1e2c8e97af2f8dbcc4ac2d | 936 | py | Python | forms.py | Joshua-Barawa/pitches-IP | 41d9d0d2fbecab50e82a4ee64a036952b8d785e1 | [
"PostgreSQL",
"Unlicense"
] | null | null | null | forms.py | Joshua-Barawa/pitches-IP | 41d9d0d2fbecab50e82a4ee64a036952b8d785e1 | [
"PostgreSQL",
"Unlicense"
] | null | null | null | forms.py | Joshua-Barawa/pitches-IP | 41d9d0d2fbecab50e82a4ee64a036952b8d785e1 | [
"PostgreSQL",
"Unlicense"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import InputRequired, Email, ValidationError
from models import User
| 40.695652 | 84 | 0.746795 |
81399676f0bd08a3b07c20a3a444ab0c8669d9d3 | 1,064 | py | Python | plugins/barracuda_waf/komand_barracuda_waf/actions/create_security_policy/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/barracuda_waf/komand_barracuda_waf/actions/create_security_policy/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/barracuda_waf/komand_barracuda_waf/actions/create_security_policy/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
| 17.16129 | 80 | 0.566729 |
813a4523ca5ed1d20d9dca5c73420720f380885a | 1,090 | py | Python | examples/dhc/rule_example.py | fruttasecca/hay_checker | 2bbf4e8e90e0abc590dd74080fb6e4f445056354 | [
"MIT"
] | 2 | 2019-05-22T08:24:38.000Z | 2020-12-04T13:36:30.000Z | examples/dhc/rule_example.py | fruttasecca/hay_checker | 2bbf4e8e90e0abc590dd74080fb6e4f445056354 | [
"MIT"
] | null | null | null | examples/dhc/rule_example.py | fruttasecca/hay_checker | 2bbf4e8e90e0abc590dd74080fb6e4f445056354 | [
"MIT"
] | 3 | 2018-09-15T13:40:40.000Z | 2021-06-29T23:31:18.000Z | #!/usr/bin/python3
from pyspark.sql import SparkSession
from haychecker.dhc.metrics import rule
spark = SparkSession.builder.appName("rule_example").getOrCreate()
df = spark.read.format("csv").option("header", "true").load("examples/resources/employees.csv")
df.show()
condition1 = {"column": "salary", "operator": "gt", "value": 2100}
conditions = [condition1]
r1 = rule(conditions, df)[0]
print("Rule salary>2100: {}".format(r1))
condition1 = {"column": "salary", "operator": "lt", "value": 2100}
condition2 = {"column": "title", "operator": "eq", "value": "Sales Representative"}
conditions = [condition1, condition2]
task1 = rule(conditions)
condition1 = {"column": "salary", "operator": "lt", "value": 2100}
condition2 = {"column": "city", "operator": "eq", "value": "London"}
conditions = [condition1, condition2]
task2 = rule(conditions)
task3 = task1.add(task2)
result = task3.run(df)
r1 = result[0]["scores"][0]
r2 = result[1]["scores"][0]
print("Rule salary<2100 and title=\"Sales Representative\": {},"
" rule salary<2100 and city=\"London\": {}".format(r1, r2)) | 31.142857 | 95 | 0.678899 |
813a8ce209fa6c27b191963bd6e67321e4277566 | 10,579 | py | Python | secure_message/common/utilities.py | uk-gov-mirror/ONSdigital.ras-secure-message | 741eed651eea47dd1a13c7c93b1b1796584cdf2b | [
"MIT"
] | null | null | null | secure_message/common/utilities.py | uk-gov-mirror/ONSdigital.ras-secure-message | 741eed651eea47dd1a13c7c93b1b1796584cdf2b | [
"MIT"
] | null | null | null | secure_message/common/utilities.py | uk-gov-mirror/ONSdigital.ras-secure-message | 741eed651eea47dd1a13c7c93b1b1796584cdf2b | [
"MIT"
] | null | null | null | import collections
import logging
import urllib.parse
from structlog import wrap_logger
from secure_message.constants import MESSAGE_BY_ID_ENDPOINT, MESSAGE_LIST_ENDPOINT, MESSAGE_QUERY_LIMIT
from secure_message.services.service_toggles import party, internal_user_service
logger = wrap_logger(logging.getLogger(__name__))
MessageArgs = collections.namedtuple(
'MessageArgs',
'page limit business_id surveys cc label desc ce is_closed my_conversations new_respondent_conversations all_conversation_types unread_conversations')
def get_options(args): # NOQA pylint:disable=too-complex
"""extract options from request , allow label to be set by caller
:param args: contains search arguments. Not all end points support all args
:returns: MessageArgs named tuple containing the args for the search
business_id If set , restricts search to conversations regarding this specific party id
surveys If set allows the count to be restricted by a list of survey_ids
cc If set , allows the count to be restricted by a particular case
ce If set, alows the count to be restricted by a particular collection exercise
is_closed If set to 'true' only counts closed conversations, else only open conversations
my_conversations If set to 'true only counts my conversations.
I.e conversations where the current user id is the to actor id
new_respondent_conversations If set to 'true'only counts conversations where the to actor is set to 'GROUP'
all_conversation_types If set 'true', overrides is_closed, my_conversations and new_respondent_conversations
and returns 4 counts 1 for each of , open , closed, my_conversations and new_respondent_conversations
page If set requests the specific page of information to return
limit If set it sets the maximum number of results to return
desc If present, requests the information in descending order
"""
fields = {'page': 1, 'limit': MESSAGE_QUERY_LIMIT, 'business_id': None, 'surveys': None,
'desc': True, 'cc': None, 'label': None, 'ce': None, 'is_closed': False,
'my_conversations': False, 'new_respondent_conversations': False, 'all_conversation_types': False,
'unread_conversations': False}
for field in ['cc', 'ce', 'business_id', 'label']:
if args.get(field):
fields[field] = str(args.get(field))
fields['surveys'] = args.getlist('survey')
for field in ['limit', 'page']:
if args.get(field):
fields[field] = int(args.get(field))
if args.get('desc') == 'false':
fields['desc'] = False
if args.get('is_closed') == 'true':
fields['is_closed'] = True
if args.get('my_conversations') == 'true':
fields['my_conversations'] = True
if args.get('new_respondent_conversations') == 'true':
fields['new_respondent_conversations'] = True
if args.get('all_conversation_types') == 'true':
fields['all_conversation_types'] = True
if args.get('unread_conversations') == 'true':
fields['unread_conversations'] = True
return MessageArgs(page=fields['page'], limit=fields['limit'], business_id=fields['business_id'],
surveys=fields['surveys'], cc=fields['cc'], label=fields['label'],
desc=fields['desc'], ce=fields['ce'], is_closed=fields['is_closed'],
my_conversations=fields['my_conversations'],
new_respondent_conversations=fields['new_respondent_conversations'],
all_conversation_types=fields['all_conversation_types'],
unread_conversations=fields['unread_conversations'])
def set_conversation_type_args(existing_args, is_closed=False, my_conversations=False, new_conversations=False,
all_types=False, unread_conversations=False):
"""Returns a new set of args based on the existing args which are a named tuple,
but allow the conversation type only to be changed"""
return MessageArgs(page=existing_args.page,
limit=existing_args.limit,
business_id=existing_args.business_id,
surveys=existing_args.surveys,
cc=existing_args.cc,
label=existing_args.label,
desc=existing_args.desc,
ce=existing_args.ce,
is_closed=is_closed,
my_conversations=my_conversations,
new_respondent_conversations=new_conversations,
all_conversation_types=all_types,
unread_conversations=unread_conversations)
def process_paginated_list(paginated_list, host_url, user, message_args, endpoint=MESSAGE_LIST_ENDPOINT, body_summary=True):
"""used to change a pagination object to json format with links"""
messages = []
string_query_args = generate_string_query_args(message_args)
for message in paginated_list.items:
msg = message.serialize(user, body_summary=body_summary)
msg['_links'] = {"self": {"href": f"{host_url}{MESSAGE_BY_ID_ENDPOINT}/{msg['msg_id']}"}}
messages.append(msg)
links = {'first': {"href": f"{host_url}{endpoint}"},
'self': {"href": f"{host_url}{endpoint}?{string_query_args}&page={message_args.page}"}}
if paginated_list.has_next:
links['next'] = {
"href": f"{host_url}{endpoint}?{string_query_args}&page={message_args.page + 1}"}
if paginated_list.has_prev:
links['prev'] = {
"href": f"{host_url}{endpoint}?{string_query_args}&page={message_args.page - 1}"}
return messages, links
def add_to_details(messages):
"""Adds a @msg_to key to every message in a list of messages.
Every msg_to uuid is resolved to include details of the user.
If the call for the internal user id fails, an exception will be thrown.
If the external user id cannot be found in the list that we got from the party service. There
won't be a @msg_to value returned in the payload. The API documentation notes that these elements
aren't guaranteed to be provided so we're not breaking the contract by doing this.
Note: Several of these lines of code could be combined into a more succinct view, spreading them out
is deliberate so that log stack traces are better able to identify the cause of log errors
"""
external_user_details = {}
for user in party.get_users_details(get_external_user_uuid_list(messages)):
external_user_details[user['id']] = user
for message in messages:
try:
msg_to = message["msg_to"][0]
from_internal = message["from_internal"]
if not from_internal:
msg_to_details = internal_user_service.get_user_details(msg_to)
message.update({"@msg_to": [msg_to_details]})
else:
msg_to_details = external_user_details.get(msg_to)
if msg_to_details:
message.update({'@msg_to': [msg_to_details]})
else:
logger.info("No details found for the message recipient", msg_to=msg_to)
except IndexError:
logger.exception("Exception adding to details", msg_to=msg_to, from_internal=from_internal)
raise
return messages
def add_from_details(messages):
"""Adds a @msg_from key to every message in a list of messages.
Every msg_to uuid is resolved to include details of the user.
If the call for the internal user id fails, an exception will be thrown.
If the external user id cannot be found in the list that we got from the party service. There
won't be a @msg_from value returned in the payload. The API documentation notes that these elements
aren't guaranteed to be provided so we're not breaking the contract by doing this.
"""
external_user_details = {}
for user in party.get_users_details(get_external_user_uuid_list(messages)):
external_user_details[user['id']] = user
for message in messages:
try:
msg_from = message["msg_from"]
from_internal = message["from_internal"]
if from_internal:
message.update({"@msg_from": internal_user_service.get_user_details(msg_from)})
else:
if external_user_details.get(message['msg_from']):
message.update({'@msg_from': external_user_details.get(msg_from)})
except IndexError:
logger.exception("Exception adding from details message", msg_from=msg_from, from_internal=from_internal)
raise
return messages
def get_external_user_uuid_list(messages):
"""Compiles a list of all unique the external user (respondent) uuids from a list of messages"""
external_user_uuids = set()
external_msgs = [message for message in messages if message['from_internal'] is False]
for message in external_msgs:
external_user_uuids.add(message["msg_from"])
internal_messages = [message for message in messages if message['from_internal'] is True]
for uuid in internal_messages:
external_user_uuids.add(uuid["msg_to"][0])
return external_user_uuids
def add_business_details(messages):
"""Adds a @business_details key to every message in a list of messages."""
business_ids = set()
for message in messages:
business_ids.add(message['business_id'])
business_details = party.get_business_details(business_ids)
for message in messages:
message['@business_details'] = next((business for business in business_details if business["id"] == message['business_id']), None)
return messages
def add_users_and_business_details(messages):
"""Add both user and business details to messages based on data from party service"""
if not messages:
raise ValueError('messages is a required parameter and must not be empty')
messages = add_to_details(messages)
messages = add_from_details(messages)
logger.info("Successfully added to and from details")
messages = add_business_details(messages)
logger.info("Successfully added business details")
return messages
| 43.714876 | 154 | 0.680121 |
813b149e48d21390532f6bf57e32e5f1ed05f482 | 8,353 | py | Python | notegame/games/nonogram/core/renderer.py | notechats/notegame | 3d9538b98cb6b0b240956b1271e028b22458fc54 | [
"Apache-2.0"
] | null | null | null | notegame/games/nonogram/core/renderer.py | notechats/notegame | 3d9538b98cb6b0b240956b1271e028b22458fc54 | [
"Apache-2.0"
] | null | null | null | notegame/games/nonogram/core/renderer.py | notechats/notegame | 3d9538b98cb6b0b240956b1271e028b22458fc54 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Defines various renderers for the game of nonogram
"""
from abc import ABC
from sys import stdout
from notetool.tool.log import logger
from six import integer_types, itervalues, text_type
from ..utils.iter import max_safe, pad
from ..utils.other import two_powers
from .common import BOX, SPACE, UNKNOWN, BlottedBlock, is_list_like
def draw(self, cells=None):
"""Calculate all the cells and draw an image of the board"""
self.draw_header()
self.draw_side()
self.draw_grid(cells=cells)
self.render()
def draw_header(self):
"""
Changes the internal state to be able to draw columns descriptions
"""
raise NotImplementedError()
def draw_side(self):
"""
Changes the internal state to be able to draw rows descriptions
"""
raise NotImplementedError()
def draw_grid(self, cells=None):
"""
Changes the internal state to be able to draw a main grid
"""
raise NotImplementedError()
def _register_renderers():
res = dict()
for obj in itervalues(globals()):
if isinstance(obj, type):
if issubclass(obj, StreamRenderer) and hasattr(obj, '__rend_name__'):
res[obj.__rend_name__] = obj
return res
RENDERERS = _register_renderers()
| 27.386885 | 81 | 0.587932 |
813bbe394d73b1fd28585f58879386377ceda809 | 9,047 | py | Python | sympy/printing/lambdarepr.py | Carreau/sympy | 168de33bb177936fa9517702b2c5a777b3989672 | [
"BSD-3-Clause"
] | 4 | 2018-07-04T17:20:12.000Z | 2019-07-14T18:07:25.000Z | sympy/printing/lambdarepr.py | Carreau/sympy | 168de33bb177936fa9517702b2c5a777b3989672 | [
"BSD-3-Clause"
] | null | null | null | sympy/printing/lambdarepr.py | Carreau/sympy | 168de33bb177936fa9517702b2c5a777b3989672 | [
"BSD-3-Clause"
] | 1 | 2018-09-03T03:02:06.000Z | 2018-09-03T03:02:06.000Z | from __future__ import print_function, division
from .str import StrPrinter
from sympy.utilities import default_sort_key
# numexpr works by altering the string passed to numexpr.evaluate
# rather than by populating a namespace. Thus a special printer...
def lambdarepr(expr, **settings):
"""
Returns a string usable for lambdifying.
"""
return LambdaPrinter(settings).doprint(expr)
| 34.139623 | 92 | 0.559191 |
813c2d5f4577a87860a81df5e212cf9b2d380367 | 1,690 | py | Python | python/fill_na_v2.py | fredmell/CS229Project | b214127485ddc587b9fe3be253937ba8378f9db7 | [
"MIT"
] | null | null | null | python/fill_na_v2.py | fredmell/CS229Project | b214127485ddc587b9fe3be253937ba8378f9db7 | [
"MIT"
] | null | null | null | python/fill_na_v2.py | fredmell/CS229Project | b214127485ddc587b9fe3be253937ba8378f9db7 | [
"MIT"
] | 1 | 2020-06-01T00:36:06.000Z | 2020-06-01T00:36:06.000Z | """
Fill na with most common of the whole column
"""
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
from datetime import datetime
import re
from collections import Counter
from statistics import median
from tqdm import tqdm
file = '/home/nicolasbievre/yelp_data.pkl'
file_na = '/home/nicolasbievre/yelp_data_no_na.pkl'
df = pd.read_pickle(file)
categories = list(set(df['categories'].values))
n = len(categories)
for i in tqdm(range(len(df.columns))):
col = df.columns[i]
if not col in {'review_id': 0, 'business_id': 0, 'user_id': 0, 'postal_code': 0}:
df_col = df[col].values
na = sum(pd.isna(df_col))
if na > 0:
most_commom_term = find_most_common_value(df_col)
if not pd.isna(most_commom_term):
df.loc[(pd.isna(df_col)), col] = most_commom_term
if i % 35 == 0 and i > 0:
df.to_pickle(file_na)
df.to_pickle(file_na)
| 23.472222 | 85 | 0.611243 |
813cbfb2b0206a03eec11ec90ba51dbd9b92d6bd | 3,071 | py | Python | GUI Applications/calc.py | jaiswalIT02/pythonprograms | bc94e52121202b04c3e9112d9786f93ed6707f7a | [
"MIT"
] | null | null | null | GUI Applications/calc.py | jaiswalIT02/pythonprograms | bc94e52121202b04c3e9112d9786f93ed6707f7a | [
"MIT"
] | null | null | null | GUI Applications/calc.py | jaiswalIT02/pythonprograms | bc94e52121202b04c3e9112d9786f93ed6707f7a | [
"MIT"
] | null | null | null | from tkinter import Tk
from tkinter import Entry
from tkinter import Button
from tkinter import StringVar
t=Tk()
t.title("Tarun Jaiswal")
t.geometry("425x300")
t.resizable(0,0)
t.configure(background="black")#back ground color
a=StringVar()
e1=Entry(font=("",30),justify="right",textvariable=a)
e1.place(x=0,y=0,width=425,height=50)
b1=Button(text="7",font=("",25),bg="gray",fg="white",activebackground="yellow",command=show)
b1.place(x=5,y=55,width=100,height=50)
b1.configure(command=lambda:show("7"))
b2=Button(text="8",font=("",25),bg="gray",fg="white",activebackground="yellow")
b2.place(x=110,y=55,width=100,height=50)
b2.configure(command=lambda:show("8"))
b3=Button(text="9",font=("",25),bg="gray",fg="white",activebackground="yellow")
b3.place(x=215,y=55,width=100,height=50)
b3.configure(command=lambda:show("9"))
b4=Button(text="+",font=("",25),bg="gray",fg="white",activebackground="yellow")
b4.place(x=320,y=55,width=100,height=50)
b4.configure(command=lambda:show("+"))
b5=Button(text="4",font=("",25),bg="gray",fg="white",activebackground="yellow")
b5.place(x=5,y=110,width=100,height=50)
b5.configure(command=lambda:show("4"))
b6=Button(text="5",font=("",25),bg="gray",fg="white",activebackground="yellow")
b6.place(x=110,y=110,width=100,height=50)
b6.configure(command=lambda:show("5"))
b7=Button(text="6",font=("",25),bg="gray",fg="white",activebackground="yellow")
b7.place(x=215,y=110,width=100,height=50)
b7.configure(command=lambda:show("6"))
b8=Button(text="-",font=("",25),bg="gray",fg="white",activebackground="yellow")
b8.place(x=320,y=110,width=100,height=50)
b8.configure(command=lambda:show("-"))
b9=Button(text="1",font=("",25),bg="gray",fg="white",activebackground="yellow")
b9.place(x=5,y=165,width=100,height=50)
b9.configure(command=lambda:show("1"))
b10=Button(text="2",font=("",25),bg="gray",fg="white",activebackground="yellow")
b10.place(x=110,y=165,width=100,height=50)
b10.configure(command=lambda:show("2"))
b11=Button(text="3",font=("",25),bg="gray",fg="white",activebackground="yellow")
b11.place(x=215,y=165,width=100,height=50)
b11.configure(command=lambda:show("3"))
b12=Button(text="*",font=("",25),bg="gray",fg="white",activebackground="yellow")
b12.place(x=320,y=165,width=100,height=50)
b12.configure(command=lambda:show("*"))
b13=Button(text="C",font=("",25),bg="gray",fg="white",activebackground="yellow")
b13.place(x=5,y=220,width=100,height=50)
b13.configure(command=clear)
b14=Button(text="0",font=("",25),bg="gray",fg="white",activebackground="yellow")
b14.place(x=110,y=220,width=100,height=50)
b14.configure(command=lambda:show("0"))
b15=Button(text="=",font=("",25),bg="gray",fg="white",activebackground="yellow",command=equal)
b15.place(x=215,y=220,width=100,height=50)
b15.configure(command=equal)
b16=Button(text="/",font=("",25),bg="gray",fg="white",activebackground="yellow")
b16.place(x=320,y=220,width=100,height=50)
b16.configure(command=lambda:show("/"))
t.mainloop() | 33.021505 | 94 | 0.699772 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.