blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23636bc45d2c0e4e109f69232e7135e85b900d62 | 9c6a973071ca78369d4b33cc67dfb767cce7677e | /url_lib_demo.py | 41d5f767ca50497ce7354861fb09cadfacca5aee | [] | no_license | wittybones/learn-python | 141f7ad9b01f7d55b1fc65004f621e5a9ea1aa0a | 95de8b213213ec291a2c68b9d2153b55bc794773 | refs/heads/master | 2022-07-17T06:31:16.489713 | 2020-02-26T12:00:46 | 2020-02-26T12:00:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | import urllib.request
try:
url = urllib.request.urlopen("https://www.python.org/")
content = url.read()
url.close()
except urllib.error.HTTPError:
print('url not found')
exit()
file = open('./python.html','wb')
file.write(content)
file.close()
| [
"[email protected]"
] | |
701bfdc61f48a8890901fe3dbd55039c1658b9b4 | a9641bc1ea35edec8c6b3f9376121ad3d12bb40d | /python/imageconverter.py | 3f70850efe490d4d1f91bdd9db9e3f048ad90426 | [] | no_license | ihyunmin/workout_analysis | 24d3fdbd67ab24c774ee5822cad130140c4fe66f | 8003aa5d4dfcb45c7129f4fc93b77a106b9c9ae7 | refs/heads/main | 2023-08-27T18:48:54.830051 | 2021-10-19T00:10:54 | 2021-10-19T00:10:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,060 | py | import cv2
import math
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_pose = mp.solutions.pose
mp_holistic = mp.solutions.holistic
DESIRED_HEIGHT = 2944
DESIRED_WIDTH = 1324
def resize_and_show(image):
h, w = image.shape[:2]
if h < w:
img = cv2.resize(image, (DESIRED_WIDTH, math.floor(h/(w/DESIRED_WIDTH))))
else:
img = cv2.resize(image, (math.floor(w/(h/DESIRED_HEIGHT)), DESIRED_HEIGHT))
cv2.imshow("image",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# For static images:
def imageConvert(imgFile):
IMAGE_FILES = [imgFile +'.jpeg']
pose = mp_pose.Pose(
static_image_mode=True,
model_complexity=2,
min_detection_confidence=0.5)
for idx, file in enumerate(IMAGE_FILES):
image = cv2.imread(file)
image_height, image_width, _ = image.shape
# Convert the BGR image to RGB before processing.
results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if not results.pose_landmarks:
continue
print(
f'Nose coordinates: ('
f'{results.pose_landmarks.landmark[mp_holistic.PoseLandmark.NOSE].x * image_width}, '
f'{results.pose_landmarks.landmark[mp_holistic.PoseLandmark.NOSE].y * image_height})'
)
# Draw pose landmarks on the image.
annotated_image = image.copy()
#print(results.pose_landmarks)
idx = 0
for i in results.pose_landmarks.landmark:
#print('part : ' + POSELANDMARKS[idx])
#print(i)
idx = idx +1
mp_drawing.draw_landmarks(
annotated_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
cv2.imwrite('./annotated_image' + str(idx) + '.png', annotated_image)
resize_and_show(annotated_image)
# Plot pose world landmarks.
#mp_drawing.plot_landmarks(
# results.pose_world_landmarks, mp_pose.POSE_CONNECTIONS)
if __name__ == "__main__":
print('image main') | [
"[email protected]"
] | |
4b36c4de3fa5f494a566c36b798d6cdeb615a695 | 8111a9c84545f39d937da6a186c024d1a339d936 | /custom_entities/article.py | e4b9e08dce56399c62f3c1e1effe1297e24a12ed | [] | no_license | paulohecosta/papers_abstract_analysis | 8c7039e724bd57d7d3356f18beb2cf0c342fe76f | 90ee56cd67023f4e3a6eb8a098eea8bfd72892eb | refs/heads/master | 2021-01-24T02:58:36.865617 | 2018-03-29T01:23:50 | 2018-03-29T01:23:50 | 122,869,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,462 | py | from custom_entities.author import AuthorData
from custom_entities.journal import JournalData
from custom_helpers.nltk_helper import get_important_words
class ArticleData(object):
def __init__(self, root):
self.article_title = root.find(".//ArticleTitle").text
self.pub_date = root.find(".//PubDate/Day").text + "/" + \
root.find(".//PubDate/Month").text + "/" + \
root.find(".//PubDate/Year").text
self.doi = root.find(".//*[@IdType='doi']").text
self.abstract_text = ""
for texts in root.findall(".//AbstractText"):
self.abstract_text += "\n" + texts.text
self.key_words = []
concat_words = ""
for key_word in root.findall(".//Keyword"):
concat_words += " " + key_word.text
for token in get_important_words(concat_words, 5):
self.key_words.append(str(token[0]))
self.journal = JournalData(root.find(".//Journal/ISSN").text, root.find(".//Journal/Title").text)
self.authors = []
for author in root.findall(".//Author"):
self.authors.append(AuthorData(author.find("LastName"), author.find("ForeName")))
self.important_words = []
for token in get_important_words(self.abstract_text, 10):
self.important_words.append(str(token[0]))
self.ordered_keys = list(set( list(set(self.key_words)) + list(set(self.important_words)) )) | [
"[email protected]"
] | |
511019fa80316e4af585205c61008e1151ab602f | cb0e7d6493b23e870aa625eb362384a10f5ee657 | /solutions/python3/1317.py | dad7229f37b93dd49443920694f7bf8a41155432 | [] | no_license | sweetpand/LeetCode-1 | 0acfa603af254a3350d457803449a91322f2d1a7 | 65f4ef26cb8b2db0b4bf8c42bfdc76421b479f94 | refs/heads/master | 2022-11-14T07:01:42.502172 | 2020-07-12T12:25:56 | 2020-07-12T12:25:56 | 279,088,171 | 1 | 0 | null | 2020-07-12T15:03:20 | 2020-07-12T15:03:19 | null | UTF-8 | Python | false | false | 203 | py | class Solution:
def getNoZeroIntegers(self, n: int) -> List[int]:
for A in range(n):
B = n - A
if '0' not in str(A) and '0' not in str(B):
return A, B
| [
"[email protected]"
] | |
cf5fdae9151676f3423f1860f38fd0b0cfb4b23c | 42ad749273ded88ac99314f8340296dd7e523229 | /Tens digit.py | 1ead12766233230063bc88efd9c8c37c445c07d1 | [] | no_license | tahmidhasantanoy/Test | b2fd7ca077822438775586ae9b096e8e34211ecd | bdfe839e95328d69c0fbf113c323f165a986a435 | refs/heads/master | 2023-02-04T15:28:09.086477 | 2020-12-25T03:36:05 | 2020-12-25T03:36:05 | 324,277,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | n = int(input("Enter a numbers: "))
p = n // 10 % 10 #First time last digit will out, % its find the second digit.
print(p) | [
"[email protected]"
] | |
dc13ded07cef731f34988cfe60e0118e2602f1ca | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v4/googleads-py/google/ads/googleads/v4/services/services/keyword_plan_campaign_service/transports/grpc.py | d4ce2c28895a684aaea5fddd74a5f6ff28d9b7c4 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,739 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v4.resources.types import keyword_plan_campaign
from google.ads.googleads.v4.services.types import keyword_plan_campaign_service
from .base import KeywordPlanCampaignServiceTransport, DEFAULT_CLIENT_INFO
class KeywordPlanCampaignServiceGrpcTransport(KeywordPlanCampaignServiceTransport):
"""gRPC backend transport for KeywordPlanCampaignService.
Service to manage Keyword Plan campaigns.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(self, *,
host: str = 'googleads.googleapis.com',
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning)
host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
)
@classmethod
def create_channel(cls,
host: str = 'googleads.googleapis.com',
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_keyword_plan_campaign(self) -> Callable[
[keyword_plan_campaign_service.GetKeywordPlanCampaignRequest],
keyword_plan_campaign.KeywordPlanCampaign]:
r"""Return a callable for the get keyword plan campaign method over gRPC.
Returns the requested Keyword Plan campaign in full
detail.
Returns:
Callable[[~.GetKeywordPlanCampaignRequest],
~.KeywordPlanCampaign]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_keyword_plan_campaign' not in self._stubs:
self._stubs['get_keyword_plan_campaign'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v4.services.KeywordPlanCampaignService/GetKeywordPlanCampaign',
request_serializer=keyword_plan_campaign_service.GetKeywordPlanCampaignRequest.serialize,
response_deserializer=keyword_plan_campaign.KeywordPlanCampaign.deserialize,
)
return self._stubs['get_keyword_plan_campaign']
@property
def mutate_keyword_plan_campaigns(self) -> Callable[
[keyword_plan_campaign_service.MutateKeywordPlanCampaignsRequest],
keyword_plan_campaign_service.MutateKeywordPlanCampaignsResponse]:
r"""Return a callable for the mutate keyword plan campaigns method over gRPC.
Creates, updates, or removes Keyword Plan campaigns.
Operation statuses are returned.
Returns:
Callable[[~.MutateKeywordPlanCampaignsRequest],
~.MutateKeywordPlanCampaignsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'mutate_keyword_plan_campaigns' not in self._stubs:
self._stubs['mutate_keyword_plan_campaigns'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v4.services.KeywordPlanCampaignService/MutateKeywordPlanCampaigns',
request_serializer=keyword_plan_campaign_service.MutateKeywordPlanCampaignsRequest.serialize,
response_deserializer=keyword_plan_campaign_service.MutateKeywordPlanCampaignsResponse.deserialize,
)
return self._stubs['mutate_keyword_plan_campaigns']
__all__ = (
'KeywordPlanCampaignServiceGrpcTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
a98f246ce117a1a0fcc3691b88d33c14687f75da | cd3b427d64740091fbf04f3c01fde65854f1f477 | /dungeon/hack-and-dash.py | 46fe2144ca8cc14bef703a1e5385bfba60e9c627 | [] | no_license | qboy1214/CodeCombat | e4f9a2c6f1e713eea76bb4eaa7977284b4e120cf | 1c55071e645025c2623a0ce7dde5ad56cb3d85ca | refs/heads/master | 2021-01-10T04:44:18.657934 | 2015-12-21T12:13:21 | 2015-12-21T12:13:21 | 48,008,205 | 2 | 1 | null | null | null | null | GB18030 | Python | false | false | 228 | py | # 你可以在循环前写代码
# 使用循环逃离迷宫
self.moveRight()
self.moveUp()
self.attack("Chest")
self.attack("Chest")
self.moveDown()
loop:
# 移动3次
self.moveRight(3)
self.moveDown(3) | [
"[email protected]"
] | |
5b9ba271aefbb1a231d9688039fc8eb2d14ef7b8 | 7f8b68609f41b0d392b3f6bd6751b2ba8f6ca51f | /face_recognition.py | c9cd14fdec97fc30dec8b86aefd8224564775e8e | [] | no_license | ayajnik/Face-recognition-app | 9e29df256b913dcac10b483a2ac7e9ea95c6f979 | 79c66f2243c961785cce42d91e8cd542e91a0ac7 | refs/heads/master | 2020-12-17T21:20:52.984331 | 2020-01-21T09:04:41 | 2020-01-21T09:04:41 | 235,296,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,593 | py | try:
import os
import numpy as np
import cv2
print('\n')
print('Necessary libraries imported.')
print('\n')
except:
print('Some files not installed.')
#Given an image below function returns rectangle for face detected alongwith gray scale image
def faceDetection(test_img):
gray_img=cv2.cvtColor(test_img,cv2.COLOR_BGR2GRAY) #convert color image to grayscale
face_haar_cascade=cv2.CascadeClassifier('C:\\Users\\Yadvi\\PycharmProjects\\face\\haarcascade_frontalface_default.xml')#Load haar classifier
faces=face_haar_cascade.detectMultiScale(gray_img,scaleFactor=2.32,minNeighbors=10)#detectMultiScale returns rectangles
return faces,gray_img
def labels_for_training_data(directory):
faces=[]
faceID=[]
for path,subdirnames,filenames in os.walk(directory):
for filename in filenames:
if filename.startswith("."):
print("Skipping system file")#Skipping files that startwith .
continue
id=os.path.basename(path)#fetching subdirectory names
img_path=os.path.join(path,filename)#fetching image path
print("img_path:",img_path)
print("id:",id)
test_img=cv2.imread(img_path)#loading each image one by one
if test_img is None:
print("Image not loaded properly")
continue
faces_rect,gray_img=faceDetection(test_img)#Calling faceDetection function to return faces detected in particular image
if len(faces_rect)!=1:
continue #Since we are assuming only single person images are being fed to classifier
(x,y,w,h)=faces_rect[0]
roi_gray=gray_img[y:y+w,x:x+h]#cropping region of interest i.e. face area from grayscale image
faces.append(roi_gray)
faceID.append(int(id))
return faces,faceID
#Below function trains haar classifier and takes faces,faceID returned by previous function as its arguments
def train_classifier(faces,faceID):
face_recognizer=cv2.face.LBPHFaceRecognizer_create()
face_recognizer.train(faces,np.array(faceID))
return face_recognizer
#Below function draws bounding boxes around detected face in image
def draw_rect(test_img,face):
(x,y,w,h)=face
cv2.rectangle(test_img,(x,y),(x+w,y+h),(255,0,0),thickness=5)
#Below function writes name of person for detected label
def put_text(test_img,text,x,y):
cv2.putText(test_img,text,(x,y),cv2.FONT_HERSHEY_DUPLEX,2,(255,0,0),4)
| [
"[email protected]"
] | |
c60866dfbc108f2d2ae42d6dbc05b03f9de98fa9 | be360ba4982a4e3bf3ca547f5f1f8b68d8479c07 | /optlis/dynamic/problem_data.py | 90693ad9669e2f1082de41335d7f9bb6dfbab8aa | [
"MIT"
] | permissive | gacheiro/optlis | dd4da0913ae0ebe0d6bca51392210b3f2e57919b | 17c467a5dd35331cc7eaaeb7829c6da8ccc08608 | refs/heads/main | 2023-09-01T08:03:54.878739 | 2023-06-27T06:18:14 | 2023-06-27T06:18:14 | 336,266,523 | 2 | 0 | MIT | 2023-06-08T12:52:53 | 2021-02-05T12:33:38 | Python | UTF-8 | Python | false | false | 9,453 | py | from typing import Any, Dict, Union, Generator, Tuple, TextIO, Optional
from pathlib import Path
from functools import cached_property
import networkx as nx
import numpy as np
import numpy.typing as npt
from optlis.shared import set_product
from optlis.static.problem_data import Instance as StaticInstance
class Instance(StaticInstance):
EPSILON = 0.01
NEUTRALIZING_SPEED = 0.3
CLEANING_SPEED = 0.075
def __init__(
self, nodes, risk, degradation_rate, metabolization_rate, initial_concentration
):
super().__init__()
self.add_nodes_from(nodes)
self._risk = risk
self._degradation_rate = degradation_rate
self._metabolization_rate = metabolization_rate
self._initial_concentration = initial_concentration
@property
def node_resources(self):
raise DeprecationWarning
@property
def resources(self):
return dict(
Qn=sum(nx.get_node_attributes(self, "Qn").values()),
Qc=sum(nx.get_node_attributes(self, "Qc").values()),
)
# TODO: rename this
@cached_property
def nodes_duration(self):
duration = np.zeros((len(self.nodes), len(self.time_periods)), dtype=np.int32)
for i in self.nodes:
for t in self.time_periods:
duration[i][t] = self.cleaning_duration(i, t)
return duration
@cached_property
def cleaning_start_times(self):
"""Returns a 2d vector with the latest start times."""
nnodes, ntime_units = (len(self.nodes), len(self.time_units))
nodes_duration = np.zeros(shape=(nnodes, ntime_units), dtype=np.int32)
for i, t in set_product(self.nodes, self.time_units):
nodes_duration[i][t] = self._cleaning_start_times(i, t)
return nodes_duration
def _cleaning_start_times(self, site, time):
"""Returns the latest start time for an op. if it finishes exactly at time t.
If it is not possible for the task to finish exactly at time t, returns 0.
"""
for s in self.time_units:
v = max(self.initial_concentration(site, p) for p in self.products)
tt = s
while v > self.EPSILON:
v -= self.CLEANING_SPEED
tt += 1
else:
if tt == time:
return s
return 0
@cached_property
def neutralizing_start_times(self):
"""Returns a 3d vector with the latest start times."""
nnodes, nproducts, ntime_units = (
len(self.nodes),
len(self.products),
len(self.time_units),
)
duration = np.zeros(shape=(nnodes, nproducts, ntime_units), dtype=np.int32)
for i, p, t in set_product(self.nodes, self.products, self.time_units):
duration[i][p][t] = self._neutralizing_duration(i, p, t)
return duration
def _neutralizing_duration(self, site, product, time):
"""Returns the latest start time for an op. if it finishes exactly at time t."""
for s in self.time_units:
v = self.initial_concentration(site, product)
tt = s
while v > self.EPSILON:
v -= v * self.NEUTRALIZING_SPEED
tt += 1
else:
if tt == time:
return s
return 0
@cached_property
def risk(self):
raise DeprecationWarning
@cached_property
def products_risk(self):
return np.array(self._risk, dtype=np.float64)
@property
def products(self):
nproducts = len(self._risk)
return list(range(nproducts))
def initial_concentration(self, i, p):
return self._initial_concentration[i][p]
@cached_property
def degradation_rates(self):
return np.array(self._degradation_rate, dtype=np.float64)
def degradation_rate(self, p):
raise DeprecationWarning
def metabolization_rate(self, p, q):
raise DeprecationWarning
@cached_property
def metabolizing_rates(self):
nproducts = len(self.products)
rates = np.zeros(shape=(nproducts, nproducts), dtype=np.float64)
for p, q in set_product(self.products, self.products):
try:
# Used when instance is loaded from a file
rates[p][q] = self._metabolization_rate[p][q]
except KeyError:
# Used when instance is generated by the `instance_benchmark` module
rates[p][q] = self._metabolization_rate.get((p, q), 0)
return rates
@cached_property
def time_units(self):
sites, products = self.nodes, self.products
T = sum(
max(self.initial_concentration(i, p) for p in products)
/ self.CLEANING_SPEED
for i in sites
)
return np.array(range(int(T) + 1), dtype=np.int32)
@property
def time_periods(self):
raise DeprecationWarning
def c_struct(self) -> "c_instance":
nnodes = len(self.nodes)
ntasks = len(self.tasks)
nproducts = len(self.products)
ntime_units = len(self.time_units)
return c_instance(
c_size_t(nnodes),
c_size_t(ntasks),
c_size_t(nproducts),
c_size_t(ntime_units),
np.array(
[self.resources["Qn"], self.resources["Qc"]], dtype=np.int32
).ctypes.data_as(POINTER(c_int32)),
self.tasks.ctypes.data_as(POINTER(c_int32)),
self.cleaning_start_times.ctypes.data_as(POINTER(c_int32)),
self.neutralizing_start_times.ctypes.data_as(POINTER(c_int32)),
self.products_risk.ctypes.data_as(POINTER(c_double)),
self.degradation_rates.ctypes.data_as(POINTER(c_double)),
self.metabolizing_rates.ctypes.data_as(POINTER(c_double)),
)
def load_instance(path):
"""Loads an instance from a file."""
nodes = []
risk = []
degradation_rate = []
metabolization_rate = {}
initial_concentration = {}
with open(path, "r") as f:
lines = f.readlines()
assert lines[0].startswith("# format: dynamic")
instance_data = (l for l in lines if not l.startswith("#"))
nproducts = int(next(instance_data))
# Parses products' risk
for _ in range(nproducts):
line = next(instance_data)
id_, risk_ = line.split()
risk.append(float(risk_))
# Parses products' degradation rate
for _ in range(nproducts):
line = next(instance_data)
id_, degradation_rate_ = line.split()
degradation_rate.append(float(degradation_rate_))
# Parses products'
for _ in range(nproducts):
line = next(instance_data)
id_, *metabolization_rate_ = line.split()
metabolization_rate[int(id_)] = tuple(float(r) for r in metabolization_rate_)
nnodes = int(next(instance_data))
for _ in range(nnodes):
line = next(instance_data)
nid, ntype, Qn, Qc, D = line.split()
nodes.append(
(
int(nid),
{
"type": int(ntype),
"Qn": int(Qn),
"Qc": int(Qc),
"D": int(D),
},
)
)
nconcentration = int(next(instance_data))
for _ in range(nconcentration):
line = next(instance_data)
id_, *initial_concentration_ = line.split()
initial_concentration[int(id_)] = tuple(
float(c) for c in initial_concentration_
)
instance = Instance(
nodes, risk, degradation_rate, metabolization_rate, initial_concentration
)
instance.time_horizon = int(next(instance_data))
return instance
def export_instance(instance: Instance, outfile_path: Union[str, Path]) -> None:
"""Exports a problem instance to a file."""
with open(outfile_path, "w") as outfile:
_write_instance(instance, outfile)
def _write_instance(instance: Instance, outfile: TextIO) -> None:
"""Writes a problem instance to a file."""
outfile.write("# format: dynamic\n")
# Write product risk
outfile.write(f"{len(instance.products)}\n")
for pid in instance.products:
outfile.write(f"{pid} {instance.products_risk[pid]:.2f}\n")
# Write product degradation rate
for pid in instance.products:
outfile.write(f"{pid} {instance.degradation_rates[pid]:.2f}\n")
# Write product metabolization matrix
for pid in instance.products:
outfile.write(f"{pid}")
for sid in instance.products:
outfile.write(f" {instance.metabolizing_rates[pid][sid]:.2f}")
outfile.write("\n")
# Write nodes information
outfile.write(f"{len(instance.nodes)}\n")
for id, data in instance.nodes(data=True):
type, Qn, Qc, D = (data["type"], data["Qn"], data["Qc"], data["D"])
outfile.write(f"{id} {type} {Qn} {Qc} {D}\n")
# Write initial concentration
outfile.write(f"{len(instance.nodes)}\n")
for id in instance.nodes:
outfile.write(f"{id}")
for pid in instance.products:
outfile.write(f" {instance.initial_concentration(id, pid):.2f}")
outfile.write("\n")
T = instance.time_units[-1]
outfile.write(f"{T}\n")
from optlis.dynamic.models.ctypes import (
c_instance,
c_int32,
c_size_t,
c_double,
POINTER,
)
| [
"[email protected]"
] | |
6356db0fd8ce3a87223a2fdc17cb4dadb42f2cbb | 1aff812df83de9535a6a7cb2423cf26136120fcb | /util/web.py | 1fe04a6d8a2eb1993fa5bac79c122f650daf8566 | [] | no_license | qqXiong/sprider_baidusubdomain | e0cb92ebfeca9a22b3ffb56f8e87351019b9ca6d | 5835c00f9231b4e91b56ac7642e0c9b9bc2ca8f3 | refs/heads/master | 2020-09-06T07:12:45.668210 | 2019-11-08T01:22:04 | 2019-11-08T01:22:04 | 220,360,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,285 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author : Lqq
# @Software: PyCharm
from urllib.parse import urlencode
'''
设置请求头,模拟浏览器访问
'''
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
}
'''
获取路径
https://www.baidu.com/s?ie=utf-8&mod=1&isbd=1&isid=e3a83fe3000a02df&ie=utf-8&f=8&rsv_bp=1&rsv_idx=1&tn=baidu&wd=site%3A0-6.com&oq=site%253A0-6.com&rsv_pq=e3a83fe3000a02df&
rsv_t=ff987BDF4HNxewRNnG4P0tfDiwuhLD2dnhuHFy3G3tEbTi3TWPrB2eiwOs4&rqlang=cn&rsv_enter=0&rsv_dl=tb&bs=site%3A0-6.com&rsv_sid=1447_21101_29568_29221_26350&_ss=1&
clist=&hsug=&f4s=1&csor=12&_cr1=27364
@keyword
@page
'''
def get_url(keyword, page):
data = {
'wd': 'site:'+keyword, # 修改关键字
'pn': page * 10, # 页数
}
# 把字典对象转化为url的请求参数
url = 'https://www.baidu.com/s?' + urlencode(data)
return url
if __name__ == '__main__':
# get_list('site:qq.com')
# print(get_title('2231909.0-6.com'))
'https://www.csdn.net/'
# http://tool.chinaz.com/pagestatus/
# https://store.taobao.com
# print(get_website('2231909.0-6.com'))
| [
"1365236193@qq,com"
] | 1365236193@qq,com |
feec025c77e3c149732126d7d3e0fae6dc431d14 | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/dataprotection/backup_vault.py | 1b03c83ff88293d32331e1934fc85d6333165c73 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,752 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['BackupVaultArgs', 'BackupVault']
@pulumi.input_type
class BackupVaultArgs:
def __init__(__self__, *,
properties: pulumi.Input['BackupVaultArgs'],
resource_group_name: pulumi.Input[str],
e_tag: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['DppIdentityDetailsArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vault_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a BackupVault resource.
:param pulumi.Input['BackupVaultArgs'] properties: BackupVaultResource properties
:param pulumi.Input[str] resource_group_name: The name of the resource group where the backup vault is present.
:param pulumi.Input[str] e_tag: Optional ETag.
:param pulumi.Input['DppIdentityDetailsArgs'] identity: Input Managed Identity Details
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] vault_name: The name of the backup vault.
"""
pulumi.set(__self__, "properties", properties)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if e_tag is not None:
pulumi.set(__self__, "e_tag", e_tag)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vault_name is not None:
pulumi.set(__self__, "vault_name", vault_name)
@property
@pulumi.getter
def properties(self) -> pulumi.Input['BackupVaultArgs']:
"""
BackupVaultResource properties
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: pulumi.Input['BackupVaultArgs']):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group where the backup vault is present.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[pulumi.Input[str]]:
"""
Optional ETag.
"""
return pulumi.get(self, "e_tag")
@e_tag.setter
def e_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "e_tag", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['DppIdentityDetailsArgs']]:
"""
Input Managed Identity Details
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['DppIdentityDetailsArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="vaultName")
def vault_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the backup vault.
"""
return pulumi.get(self, "vault_name")
@vault_name.setter
def vault_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vault_name", value)
class BackupVault(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
e_tag: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['DppIdentityDetailsArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['BackupVaultArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Backup Vault Resource
API Version: 2021-01-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] e_tag: Optional ETag.
:param pulumi.Input[pulumi.InputType['DppIdentityDetailsArgs']] identity: Input Managed Identity Details
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[pulumi.InputType['BackupVaultArgs']] properties: BackupVaultResource properties
:param pulumi.Input[str] resource_group_name: The name of the resource group where the backup vault is present.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] vault_name: The name of the backup vault.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BackupVaultArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Backup Vault Resource
API Version: 2021-01-01.
:param str resource_name: The name of the resource.
:param BackupVaultArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BackupVaultArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
e_tag: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['DppIdentityDetailsArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['BackupVaultArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BackupVaultArgs.__new__(BackupVaultArgs)
__props__.__dict__["e_tag"] = e_tag
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
if properties is None and not opts.urn:
raise TypeError("Missing required property 'properties'")
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["vault_name"] = vault_name
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:dataprotection:BackupVault"), pulumi.Alias(type_="azure-native:dataprotection/v20210101:BackupVault"), pulumi.Alias(type_="azure-nextgen:dataprotection/v20210101:BackupVault"), pulumi.Alias(type_="azure-native:dataprotection/v20210201preview:BackupVault"), pulumi.Alias(type_="azure-nextgen:dataprotection/v20210201preview:BackupVault")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(BackupVault, __self__).__init__(
'azure-native:dataprotection:BackupVault',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'BackupVault':
"""
Get an existing BackupVault resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = BackupVaultArgs.__new__(BackupVaultArgs)
__props__.__dict__["e_tag"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return BackupVault(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[Optional[str]]:
"""
Optional ETag.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.DppIdentityDetailsResponse']]:
"""
Input Managed Identity Details
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name associated with the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.BackupVaultResponse']:
"""
BackupVaultResource properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/...
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
e610d3d2979458725172dc84670d1c616f4bb89a | 660c51f793f9e9e49ecb2e83cc0c84f962a978cb | /manage.py | 9b2af418af6a0aa4659357fdbc8ee349dba34549 | [] | no_license | JosephQuayson/fortuneteller-app | 0166b280bb3356ce7d2104ad8a74aaba35755ce7 | 589fbffd21e9cafb0572dee425130cb3e8d99299 | refs/heads/main | 2023-08-10T09:50:17.333516 | 2021-09-13T12:41:08 | 2021-09-13T12:41:08 | 405,989,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Fortuneteller.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
25def2f8b0326e351a884cb6ffdd0ec8b3cd9305 | 19558e2aaaf823a4bd7f1e6c979cf63315a1ca62 | /pages/pay_shipping_page.py | c65dac37ac0818c05920e236acadcaeba6bd766b | [] | no_license | KomlevVladimir/Project-palto-deti.ru-Python | d26fda2c13089b3d019ce2cfb71e08b9d4de71b8 | a34477978a4b5fa3099884ec0f5b8ae6363419e3 | refs/heads/master | 2021-05-31T07:39:19.586324 | 2016-05-17T18:18:18 | 2016-05-17T18:18:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # -*- coding: utf-8 -*-
from pages.page import Page
from selenium.webdriver.common.by import By
from selenium.webdriver.support.expected_conditions import presence_of_element_located
class PayShippingPage(Page):
@property
def is_this_page(self):
self.wait.until(presence_of_element_located((By.CLASS_NAME, "entry-title")))
if self.driver.find_element_by_class_name("entry-title").text == u"Оплата и доставка":
return True
else:
return False | [
"[email protected]"
] | |
0fc7117687a0a35d78978c2faf2425533754d995 | 402cfc7a7992ab7ddfc8cc95cda6b72e96116194 | /test/multivariate/conftest.py | 2454476a32cd5a058d29f8e861b836ad27ab27b9 | [] | no_license | SymposiumOrganization/EQLearner | fa5b9d47a5f9a432155ad750a27ff1a708a5301b | bfdd29b96040225a3bb6c88f5bdca64b03c545f7 | refs/heads/main | 2023-02-06T12:33:54.030753 | 2020-12-21T06:15:59 | 2020-12-21T06:15:59 | 311,565,332 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | # pathlib is great
from pathlib import Path
from _pytest.main import Session
import pytest
#from sympy import sin, Symbol, log, exp, zoo, Id, sqrt
# Let's define our failures.txt as a constant as we will need it later
FAILURES_FILE = Path() / "failures.txt"
@pytest.hookimpl()
def pytest_sessionstart(session: Session):
if FAILURES_FILE.exists():
# We want to delete the file if it already exists
# so we don't carry over failures form last run
FAILURES_FILE.unlink()
FAILURES_FILE.touch()
@pytest.fixture(scope="module")
def intialize_values_multivariate():
#symbols = [Symbol("x"),Symbol("y"),Symbol("z"),Symbol("n"),Symbol("p")]
#basis_functions = [Id,exp,log,sin,sqrt,]
basis_functions = ["Id","exp","log","sin","sqrt","inv"] #Pay attention as the order is indeed important, for testing we put it in alphabetical order (apart from x)
symbols = ["x","y","z","n","p"]
return basis_functions, symbols
@pytest.fixture(scope="module")
def intialize_values_multivariate():
#symbols = [Symbol("x"),Symbol("y"),Symbol("z"),Symbol("n"),Symbol("p")]
#basis_functions = [Id,exp,log,sin,sqrt,]
basis_functions = ["Id","exp","log","sin","sqrt","inv"] #Pay attention as the order is indeed important, for testing we put it in alphabetical order (apart from x)
symbols = ["x","y","z","n","p"]
return basis_functions, symbols
| [
"[email protected]"
] | |
cd0447d0d5b8896c2c63e2db3bdd0930d1ea9eb1 | aaf89b226d715bccfa81354402b7dc7f8f7da8e4 | /kyoji/ABC153/SilverFoxVSMonster.py | 9e134d82bf26f5a7728a57bb8b9c4c4b25be8129 | [] | no_license | kyojinatsubori/RoadToRedCoder | 56eec77eaccd1d0cba36dfc7edc8e65d4d311f77 | 72f5330ba8d8fe15a1c3f62a0e1640c3217e6a93 | refs/heads/master | 2021-04-23T21:01:32.425419 | 2020-06-06T08:05:32 | 2020-06-06T08:05:32 | 250,003,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | n, d, a = map(int, input().split())
array = [0]*n
count = 0
for i in range(n):
array[i] = list(map(int, input().split()))
array.sort()
while array:
if array[0][1] <= 0:
array.pop(0)
else:
for i in range(len(array)):
if array[i][0] <= array[0][0] + 2 * d:
array[i][1] -= a
count += 1
print(count) | [
"[email protected]"
] | |
9f36e0bc6f11b4972c0240cafbb498fe8f7a0255 | 5a1a695829a2d1dbf4daa0736f0fbd6feffc7e63 | /swexpert/6109(추억의 2048게임2).py | 3d806eabb5a23e1f5ea7af2a3d83c9a286f46fe2 | [] | no_license | juyi212/Algorithm_study | f5d263c5329c994a457bbe897e5e1405d2b1d67a | f225cc593a50b74686111f654f7133707a1d1310 | refs/heads/master | 2023-03-21T20:02:36.138688 | 2021-03-16T14:16:40 | 2021-03-16T14:16:40 | 325,008,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,756 | py | from collections import deque
def dir():
# 시계방향으로 90도
tmp = [[0] * N for _ in range(N)]
for i in range(N):
for j in range(N):
tmp[i][j] = matrix[N-1-j][i]
return tmp
def push():
# 행 left 기준
for i in range(N):
q = deque()
for j in range(N):
if matrix[i][j]:
q.append(matrix[i][j])
# 넣어준 후 0으로 초기화
matrix[i][j] = 0
idx = 0
while q:
if len(q) > 1:
# 두 개씩 빼서 확인
a, b = q.popleft(), q.popleft()
if a == b:
matrix[i][idx] = a + b
else:
# 같지 않으면 처음 것만 넣어주고
matrix[i][idx] = a
# 뒤에 값은 다시 q로 넣어준다
q.appendleft(b)
# 같던 같지 않던 idx +
idx += 1
else:
# 1개만 남았을 때는 빼서 넣어준다
matrix[i][idx] = q.popleft()
# left 기준으로 방향 설정 해줌 2차원 배열 돌리기
direction = {'left': (0, 0) , 'right': (2, 2), 'up': (3, 1) , 'down': (1, 3)}
for tc in range(1, int(input())+1):
# 시뮬레이션 문제
# 회전으로 풀 수 있음
N, S = input().split()
N = int(N)
matrix = [list(map(int, input().split())) for _ in range(N)]
v1,v2 = direction[S]
for i in range(v1):
matrix = dir()
push()
for i in range(v2):
matrix = dir()
print(f'#{tc}')
for i in range(N):
print(*matrix[i])
'''
2
5 up
4 8 2 4 0
4 4 2 0 8
8 0 2 4 4
2 2 2 2 8
0 2 2 0 0
2 down
16 2
0 2
''' | [
"[email protected]"
] | |
3c482ab4d124c9bdb9090b978987ff36e4f63e6a | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/bigquery/migration/v2alpha/bigquery-migration-v2alpha-py/google/cloud/bigquery/migration_v2alpha/services/migration_service/pagers.py | 5d3a2e4628aa71e2c409d3bcc893fdc35af0ad99 | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,766 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.bigquery.migration_v2alpha.types import migration_entities
from google.cloud.bigquery.migration_v2alpha.types import migration_service
class ListMigrationWorkflowsPager:
"""A pager for iterating through ``list_migration_workflows`` requests.
This class thinly wraps an initial
:class:`google.cloud.bigquery.migration_v2alpha.types.ListMigrationWorkflowsResponse` object, and
provides an ``__iter__`` method to iterate through its
``migration_workflows`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListMigrationWorkflows`` requests and continue to iterate
through the ``migration_workflows`` field on the
corresponding responses.
All the usual :class:`google.cloud.bigquery.migration_v2alpha.types.ListMigrationWorkflowsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., migration_service.ListMigrationWorkflowsResponse],
request: migration_service.ListMigrationWorkflowsRequest,
response: migration_service.ListMigrationWorkflowsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.bigquery.migration_v2alpha.types.ListMigrationWorkflowsRequest):
The initial request object.
response (google.cloud.bigquery.migration_v2alpha.types.ListMigrationWorkflowsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = migration_service.ListMigrationWorkflowsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[migration_service.ListMigrationWorkflowsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[migration_entities.MigrationWorkflow]:
for page in self.pages:
yield from page.migration_workflows
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListMigrationWorkflowsAsyncPager:
"""A pager for iterating through ``list_migration_workflows`` requests.
This class thinly wraps an initial
:class:`google.cloud.bigquery.migration_v2alpha.types.ListMigrationWorkflowsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``migration_workflows`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListMigrationWorkflows`` requests and continue to iterate
through the ``migration_workflows`` field on the
corresponding responses.
All the usual :class:`google.cloud.bigquery.migration_v2alpha.types.ListMigrationWorkflowsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[migration_service.ListMigrationWorkflowsResponse]],
request: migration_service.ListMigrationWorkflowsRequest,
response: migration_service.ListMigrationWorkflowsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.bigquery.migration_v2alpha.types.ListMigrationWorkflowsRequest):
The initial request object.
response (google.cloud.bigquery.migration_v2alpha.types.ListMigrationWorkflowsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = migration_service.ListMigrationWorkflowsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[migration_service.ListMigrationWorkflowsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[migration_entities.MigrationWorkflow]:
async def async_generator():
async for page in self.pages:
for response in page.migration_workflows:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListMigrationSubtasksPager:
"""A pager for iterating through ``list_migration_subtasks`` requests.
This class thinly wraps an initial
:class:`google.cloud.bigquery.migration_v2alpha.types.ListMigrationSubtasksResponse` object, and
provides an ``__iter__`` method to iterate through its
``migration_subtasks`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListMigrationSubtasks`` requests and continue to iterate
through the ``migration_subtasks`` field on the
corresponding responses.
All the usual :class:`google.cloud.bigquery.migration_v2alpha.types.ListMigrationSubtasksResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., migration_service.ListMigrationSubtasksResponse],
request: migration_service.ListMigrationSubtasksRequest,
response: migration_service.ListMigrationSubtasksResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.bigquery.migration_v2alpha.types.ListMigrationSubtasksRequest):
The initial request object.
response (google.cloud.bigquery.migration_v2alpha.types.ListMigrationSubtasksResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = migration_service.ListMigrationSubtasksRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[migration_service.ListMigrationSubtasksResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[migration_entities.MigrationSubtask]:
for page in self.pages:
yield from page.migration_subtasks
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListMigrationSubtasksAsyncPager:
"""A pager for iterating through ``list_migration_subtasks`` requests.
This class thinly wraps an initial
:class:`google.cloud.bigquery.migration_v2alpha.types.ListMigrationSubtasksResponse` object, and
provides an ``__aiter__`` method to iterate through its
``migration_subtasks`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListMigrationSubtasks`` requests and continue to iterate
through the ``migration_subtasks`` field on the
corresponding responses.
All the usual :class:`google.cloud.bigquery.migration_v2alpha.types.ListMigrationSubtasksResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[migration_service.ListMigrationSubtasksResponse]],
request: migration_service.ListMigrationSubtasksRequest,
response: migration_service.ListMigrationSubtasksResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.bigquery.migration_v2alpha.types.ListMigrationSubtasksRequest):
The initial request object.
response (google.cloud.bigquery.migration_v2alpha.types.ListMigrationSubtasksResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = migration_service.ListMigrationSubtasksRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[migration_service.ListMigrationSubtasksResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[migration_entities.MigrationSubtask]:
async def async_generator():
async for page in self.pages:
for response in page.migration_subtasks:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
df3fe705979d3f7b8cda44fe3d812b0da3e38b26 | 5665618513b5a13b6d9eb5f90f66db759268342b | /app.py | 9f09682b14755fc722169f433751cbedee829aeb | [] | no_license | EddieGustafsson/TornadoSessionValidation | f7cadc24cb559efa6b04d06e211e05b6dfcccba5 | ac5b4d333292ff87640f785537e10260a0d374c7 | refs/heads/master | 2022-06-10T13:27:36.615796 | 2020-05-06T12:22:06 | 2020-05-06T12:22:06 | 261,742,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.options
import os.path
from tornado.options import define, options
from handlers.AuthHandler import LoginHandler, LogoutHandler
from handlers.MainHandler import MainHandler
define("port", default=8888, help="run on the given port", type=int)
class Application(tornado.web.Application):
def __init__(self):
base_dir = os.path.dirname(__file__)
settings = {
"cookie_secret": "bZJc2sWbQLKos6GkHn/VB9oXwQt8S0R0kRvJ5/xJ89E=",
"login_url": "/login",
'template_path': os.path.join(base_dir, "templates"),
'static_path': os.path.join(base_dir, "static"),
'debug': True,
"xsrf_cookies": True,
}
tornado.web.Application.__init__(self, [
tornado.web.url(r"/", MainHandler, name="main"),
tornado.web.url(r'/login', LoginHandler, name="login"),
tornado.web.url(r'/logout', LogoutHandler, name="logout"),
], **settings)
def main():
tornado.options.parse_command_line()
Application().listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
ebc003f5248cb180946a2771b35b1966aeb022d9 | 2233ead75a4e1960757c8af9f458e5156ea1e4b3 | /code/crawling-code/givemeallthelyrics.py | 2bff35e0ce10253c035dd4a8611b958dd9e73bc8 | [] | no_license | honghyeong/nlp-lyrics-creators | 0361548973a9b8f81d5ed6f5dc0adf043b174ef3 | 615380fb13c0260ff6e4b638901fd49625cd7fe0 | refs/heads/main | 2023-08-02T23:14:19.254416 | 2021-10-06T06:45:14 | 2021-10-06T06:45:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,012 | py | from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver import ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait as wait
import pandas as pd
import time
def crawler():
naver_id = input("네이버 아이디를 입력하세요: ")
naver_pwd = input("네이버 비밀번호를 입력하세요: ")
search_artists = input("검색할 가수를 쉼표와 띄어쓰기로 구분해서 입력하세요: ")
search_artists = search_artists.split(', ')
print('다음 가수들의 가사를 가져옵니다: ',search_artists)
#로그인을 안하면 19금 걸린 노래를 못들어가서 로그인 필요!
options = ChromeOptions()
options.add_argument('--headless')
driver = webdriver.Chrome(
executable_path='/Users/jonghyunlee/opt/anaconda3/pkgs/python-chromedriver-binary-92.0.4515.43.0-py38h50d1736_0/lib/python3.8/site-packages/chromedriver_binary/chromedriver')
#이부분은 각자의 크롬드라이버 절대경로 설정해야해!
driver.maximize_window()
vibe = "https://vibe.naver.com/"
driver.get(vibe)
popup = driver.find_element_by_xpath('//*[@id="app"]/div[2]/div/div/a[2]')
popup.click()
#지금 무슨 이벤트하는거같아서 팝업이 뜨는거같은데 만약 나중에 이벤트 끝나면 위 두라인은 주석처리하면 돼!
loginbutton = driver.find_element_by_xpath('/html/body/div/div/header/div[2]/div[1]/a/span')
loginbutton.click()
id_input = driver.find_element_by_xpath('/html/body/div[2]/div[3]/div/form/fieldset/div[1]/div[1]/span/input')
pwd_input = driver.find_element_by_xpath('/html/body/div[2]/div[3]/div/form/fieldset/div[2]/div[1]/span/input')
final_login = driver.find_element_by_xpath('/html/body/div[2]/div[3]/div/form/fieldset/input')
id_input.send_keys(naver_id)
pwd_input.send_keys(naver_pwd)
final_login.click()
#여기가 로그인하는부분
for artist in search_artists:
driver.get(vibe)
time.sleep(1)
searchbutton = driver.find_element_by_xpath('//*[@id="header"]/a[1]')
searchbar = driver.find_element_by_xpath('//*[@id="search_keyword"]')
searchbutton.click()
searchbar.click()
searchbar.send_keys(artist)
searchbar.send_keys(Keys.ENTER)
searchbar.send_keys(Keys.ESCAPE)
#여기가 검색 입력하는 부분
wait(driver, 20).until(
EC.element_to_be_clickable((By.XPATH, '//*[@id="content"]/div[1]/div/a/div[2]/div[1]/div'))).click()
time.sleep(1)
#여기가 검색한 아티스트 입력하는 부분(인기 검색 결과)
driver.find_element_by_xpath('//*[@id="content"]/div[2]/h3/a').click()
time.sleep(0.5)
try:
driver.find_element_by_css_selector('#content > div:nth-child(1) > div.summary_section > div.summary_thumb > img')
time.sleep(1)
driver.back()
driver.find_element_by_xpath('//*[@id="content"]/div[3]/h3/a').click()
except NoSuchElementException:
pass
#가수의 노래 목록 들어가는 부분
time.sleep(1)
while True:
try:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
showmore = driver.find_element_by_xpath('/html/body/div/div/div[3]/div/div/div[3]/div[2]/a/span')
showmore.click()
time.sleep(1)
except NoSuchElementException:
break
while True:
try:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
driver.find_element_by_xpath('/html/body/div/div/div[3]/div/div/div[3]/div[3]/a/span').click()
time.sleep(1)
except NoSuchElementException:
break
#곡이 많은 가수들은 곡 더보기를 눌러줘야 전체 목록이 뜨더라고! 아닌 가수는 그냥 패스
nums = list(range(1, 500))
links = []
time.sleep(1)
for num in nums:
try:
links.append(driver.find_element_by_xpath(f'//*[@id="content"]/div/div[3]/div[1]/div/\
table/tbody/tr[{num}]/td[3]/div[1]/span/a').get_attribute('href'))
except NoSuchElementException:
break
#여기가 모든 노래의 링크 긁어오는 부분
linknumbers = [link[29:] for link in links]
#링크의 중요한 키만 남기고 지우는 부분
lyrics = []
titles = []
for linknumber in linknumbers:
try:
driver.get("https://vibe.naver.com/track/" + str(linknumber))
time.sleep(0.5)
title = driver.find_element_by_class_name("title")
lyric = driver.find_element_by_class_name("lyrics")
titles.append(title.text)
lyrics.append(lyric.text)
time.sleep(0.5)
except NoSuchElementException:
continue
#여기서 실제로 모든 링크 하나씩 들어가면서 제목이랑 가사 긁어와줘!
titles = [_[3:] for _ in titles]
#제목 "곡명\n" 지우는 전처리
lyrics_new = []
for i in lyrics:
lyrics_new.append(i.replace("\n", " "))
lyrics_new
#가사마다 행변환 부분 삭제
crawled = pd.DataFrame({"제목": titles, "가사": lyrics_new})
crawled.to_excel(f'{artist} 가사 crawling.xlsx', encoding='utf-8')
print(f"{artist}의 총 곡수: ", len(titles))
#저장!!
print('완료!')
if __name__ == '__main__':
start_time = time.time()
crawler()
print((time.time() - start_time)/60, "분 소요")
| [
"[email protected]"
] | |
5a4e1e68d14e7731097d156af820f65fe39459af | 5c4ca19944ad9bc3bf55038635bea8f711ce5967 | /brunel/deprecated/test_excitability_configs2.py | 6a3d71d9d6ffe81d2e4d8967a383fa779ac0a9fd | [] | no_license | mer0mingian/gif2_model_nest | 41a3d685251499e2f481c3e6fd760a976506a06e | e8c912be4c74c605ba6c621b16806edc84c76874 | refs/heads/master | 2021-05-02T18:48:38.335411 | 2018-04-05T20:55:21 | 2018-04-05T20:55:21 | 69,229,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,060 | py | # coding=utf-8
"""
For all files in the excitability folder this script picks out the ten best
configurations per file and tests them on the Burnel network in gif2_brunel_f.
"""
import numpy as np
import time
import sys
import os
import copy
# import matplotlib as mpl
# mpl.use('Agg')
# import matplotlib.pyplot as plt
# import nest
# dt = 0.1
# nest.set_verbosity('M_WARNING')
# nest.ResetKernel()
# nest.SetKernelStatus(
# {"resolution": dt, "print_time": True, "overwrite_files": True})
# nest.SetKernelStatus({"local_num_threads": 16})
# try:
# nest.Install("gif2_module")
# except:
# pass
from mingtools1 import *
from elephant.statistics import isi, cv
from mc_connectivity_transformer import compute_new_connectivity
with open('brunel_array_best_params_0.csv', 'a') as output:
np.savetxt(output, allthetenbests, fmt="%12.6G", newline='\n')
output.write(' \n')
output.close()
# for i in np.arange(100):
# addindex = allthetenbests[ :, 8 ] == np.amax(allthetenbests[ :, 8 ])
# testconfigs = np.vstack((testconfigs, allthetenbests[ addindex, : ]))
# for j in np.arange(len(addindex)):
# if addindex[ j ]:
# allthetenbests = np.vstack((allthetenbests[ 0:j - 1, : ],
# allthetenbests[ (j + 1):, : ]))
# with open('brunel_array_best_results_0.csv', 'a') as output:
# testconfigs = np.reshape(testconfigs, (-1, 12))[ 1:, : ]
# np.savetxt(output, testconfigs, fmt="%12.6G", newline='\n')
# output.write(' \n')
# output.close()
#
# for i in np.arange(testconfigs.shape[ 0 ]):
# row = testconfigs[ i, : ]
# networkparamdict = {'p_rate': 65000.0, 'C_m': row[ 1 ], 'g': row[ 2 ],
# 'g_1': row[ 3 ], 'tau_1': row[ 4 ], 'V_dist': row[ 5 ],
# 'V_dist2': row[ 6 ]}
# fractionindex = 9
# fraction = np.arange(0.0, 20.0)[ fractionindex + 1 ] / 20.0
# resultlist, spikelists = run_brunel(networkparamdict, fraction)
# resultarray = np.array(resultlist)
# with open('brunel_array_results_0.csv', 'a') as output:
# np.savetxt(output, resultarray, fmt="%12.6G", newline=' ')
# output.write(' \n')
# output.close()
| [
"[email protected]"
] | |
e00c0ce80f37cc522e8ecc7b32190129879e8135 | c17eaa7c5abf4bfcb90d09184ac98b1163cf839e | /usb_print2.py | 8d96dbf56872d008aa71b9e2337bb346b19b8c56 | [] | no_license | meotimdihia/pythontest | 3afd1b035110dca1d2c63e37ee2fcd01cb347369 | 955ec5587c0e927d6aefbfa8dd241d975fe50c68 | refs/heads/master | 2020-04-15T01:50:17.340000 | 2014-11-22T11:32:19 | 2014-11-22T11:32:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | import usb.core
import usb.util
import usb.backend
import sys
VENDOR_ID = 0x10c4 # nhiet do
PRODUCT_ID = 0xea60
# find our device
dev = usb.core.find(idVendor=VENDOR_ID, idProduct=PRODUCT_ID)
# was it found?
if dev is None:
raise ValueError('Device not found')
# set the active configuration. With no arguments, the first
# configuration will be the active one
dev.set_configuration()
# Let's fuzz around!
# Lets start by Reading 1 byte from the Device using different Requests
# bRequest is a byte so there are 255 different values
for bRequest in range(255):
try:
ret = dev.ctrl_transfer(0xC0, bRequest, 0, 0, 1)
print "bRequest ",bRequest
print ret
except:
# failed to get data for this request
pass | [
"[email protected]"
] | |
5b539d58c43c31537bfe6367c53704ae5e7f9ca0 | 727339491e343aca95367e64b38c32922a0a43da | /log-classify/save_dataset.py | a6363011ba3736bd203534da52f1157f6ece5209 | [] | no_license | Tairy/python-lab | 98421a3c70bf4158d5684880e85bed0c7dd160cc | 06567c88490d113e75ac05a1345ef3d4215eb417 | refs/heads/master | 2021-01-17T12:56:49.559450 | 2019-07-17T11:16:21 | 2019-07-17T11:16:21 | 57,027,431 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,829 | py | # encoding:utf-8
from es import es
def get_tagged_logs():
query_json = {
"bool": {
"must": [
{
"exists": {"field": "tags"}
}
]
}
}
sort_json = [
{
"dt": {
"order": "desc"
}
}
]
res = es.search(index="app-server-log-prod-*",
body={"query": query_json, "sort": sort_json, "size": 5000, "from": 0})
hits = res['hits']['hits']
return hits
def get_origin_data():
query_json = {
"bool": {
"must": [
{
"match": {
"an": "sqkb-api-v2"
}
},
{
"match": {
"ll": "ERROR"
}
}
]
}
}
sort_json = [
{
"dt": {
"order": "desc"
}
}
]
res = es.search(index="app-server-log-prod-*",
body={"query": query_json, "sort": sort_json, "size": 2000, "from": 10000},
request_timeout=500)
hits = res['hits']['hits']
return hits
if __name__ == "__main__":
logs = get_origin_data()
for log in logs:
error_message = log['_source']['em'].replace("\n", " ")
if error_message.startswith('RecommendService') or 'RecommendService.php' in error_message:
tag = 'recommend_service_timeout'
elif error_message.startswith('call abservice error') or error_message.startswith(
'shopping cart ab Operation timed') or error_message.startswith(
'shopping cart ab Connection timed') or error_message.startswith(
' Operation timed out after 100 milliseconds '
'with 0 out of -1 bytes received [{"file":"'
'/home/www-data/sqkb-api-v2/vendor/arch/php-json-rpc/'
'src/ABService.php"'
):
tag = 'ab_service_timeout'
elif error_message.startswith('HotCouponService'):
tag = 'hot_coupon_service_timeout'
elif error_message.startswith('OrderService') or error_message.startswith(
'resend multi redPack msg failed cURL error 28'):
tag = 'order_service_timeout'
elif error_message.startswith('RebateService'):
tag = 'rebate_service_error'
elif error_message.startswith('SearchServiceRpc'):
tag = 'search_service_error'
elif error_message.startswith('ShopInfoService'):
tag = 'shop_info_service_timeout'
elif error_message.startswith('CouponCenter'):
tag = 'coupon_center_service_timeout'
elif error_message.startswith('get collect info') or error_message.startswith(
'collect failed cURL error') or error_message.startswith(
'collect cancel failed cURL error 28') or error_message.startswith(
'get collect history failed cURL error 28'
):
tag = 'collect_service_timeout'
elif error_message.startswith(' SQLSTATE'):
tag = 'sql_error'
elif error_message.startswith('get stay open redPack failed') or error_message.startswith(
'get user redPackTotalAmount'):
tag = 'bonus_service_timeout'
elif error_message.startswith('UserServiceRpc checkMobileBindHistory exception cURL error'):
tag = 'user_service_timeout'
elif error_message.startswith('getWechatUserInfo Exception cURL error '):
tag = 'http_request_time_out'
else:
tag = 'unknown'
dict_file = open("dataset/" + tag + ".txt", "a")
dict_file.write(error_message + "\n")
dict_file.close()
| [
"[email protected]"
] | |
f9fa64cb7f95abdf746a0d9d17e316d5a5705bd2 | 5cd64987b3b204930c848a035dbe76fdf7085e20 | /utils/uni_html_parser.py | 142894bbd0eccc9c726ea3cfafaf238227fcb104 | [
"Unlicense"
] | permissive | ITOO-UrFU/open-programs | 08a2cf13e045198417778f9164acf374810aac84 | 0ccc2740670d38cbd73fb3c611debd38bea994ef | refs/heads/dev | 2020-06-14T05:53:53.598267 | 2017-10-03T04:49:44 | 2017-10-03T04:49:44 | 75,224,934 | 0 | 1 | null | 2017-06-19T14:56:08 | 2016-11-30T20:35:14 | Python | UTF-8 | Python | false | false | 436 | py | """
Parse table with modules, disciplines from uni generated html
example file addr: C:\\Users\\User\\Downloads\\1.html
"""
# from bs4 import BeautifulSoup
#
#
# html_file = input("Html file address: ")
# html_file = open(html_file, encoding="utf-8")
#
# html_doc = html_file.read()
#
# soup = BeautifulSoup(html_doc, 'html.parser')
#
# plan_table = soup.find(id="EduVersionPlanTab.EduDisciplineList")
#
class Module:
pass
class | [
"[email protected]"
] | |
0e1e5845ba068efe8a4934624d75feab1573a380 | 1744ff9de02953646846be4d64e396c0c37c9355 | /assignment3 MG.py | 192ef057497dfb71b5e4f89619c99b9b9094a2f1 | [] | no_license | MatthewGerat/Past-Projects | af3953255a3205c693baea4bae576bf6f7591149 | 72e8b8e34263d634fdb4e6a1ab982a171df20a56 | refs/heads/master | 2020-06-27T12:58:23.673091 | 2019-08-01T02:53:28 | 2019-08-01T02:53:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,454 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 19 15:40:24 2017
@author: xfang13
"""
#Problem 1: (6 points)
#Create a list with 1001 randomly generated
#integers, each of which is generated in between -1000 and 1000.
#And using Python statements to find out the
#mean, median, minimum, and maximum value inside the list.
#Hint: You will need to import the random module and use the randint() method
#for generating the integers.
import random
#Your code goes below
L = []
for i in range(1001):
L.append(random.randrange(-1000, 1000))
print (L)
mean = sum(L)/len(L)
median = L[501]
maximum = max(L)
minimum = min(L)
print("The mean is " + str(mean) + "\nThe median is " + str(median) + "\nThe maximum is " + str(maximum)) + "\nThe minimum is " + str(minimum)
#Problem 2: (10 points)
#Write the procedure that computes the muliplication of two matrices
#Your code should work for any size matrices, assuming that
#the total number of columns in the first matrix matches the total number of
#rows in the second matrix
#Use the following matrices as testing data
matrix_A = [[1,2,3],[4,5,6]]
matrix_B = [[1,2,3,4],[4,5,6,7],[7,8,9,10]]
#Your code goes below
result = [[0,0,0,0],
[0,0,0,0]]
for i in range(len(matrix_A)):
for j in range(len(matrix_B[0])):
for k in range(len(matrix_B)):
result[i][j] += matrix_A[i][k] * matrix_B[k][j]
for r in result:
print(r)
#Problem 3: (6 points)
#Implement the insertion-sort algorithm
#Pseudo code:
#Input: A Python list, A, with unsorted numbers
#Output: A sorted list, A, where the numbers are sorted ascendingly
#for i from 1 to n (n is the length of A)
# assign i-1 to j
# assign A[i] to key
# while j is greater than or equal to 0 and A[j] is greater than key
# assign A[j] to A[j+1]
# decreament j by 1
# assign key to A[j+1]
A = [random.randint(0,1000) for i in range(50)]
#Your code goes below
def insertion_sort(numbers):
for k in range(1, len(numbers)):
j = k
while j > 0 and numbers[j] < numbers[j-1]:
numbers[j], numbers[j-1] = numbers[j-1], numbers[j]
j -= 1
return numbers
print(insertion_sort(A))
#Problem 4: (8 points)
#Write Python statements that create a dictionary of word counts.
#Specifically, keys of the dictionary are words; values of the dictionary
#are the number of occurances of the words
#For example, given s = 'go go hurry up', the dictionary, D, should be like
# {'go':2, 'hurry':1, 'up':1}
s = '''
Deep learning also known as deep structured learning hierarchical learning or deep machine learning is a branch of machine learning
based on a set of algorithms that attempt to model high level abstractions in data In a simple case there might be two sets of neurons
ones that receive an input signal and ones that send an output signal When the input layer receives an input it passes on a modified
version of the input to the next layer In a deep network there are many layers between the input and output and the layers are not made
of neurons but it can help to think of it that way allowing the algorithm to use multiple processing layers composed of multiple linear
and non-linear transformations
'''
#Your code goes below
D = {}
L = s.split()
frequency = []
for i in L:
frequency.append(L.count(i))
print(frequency)
print(L)
D = dict(zip(L,frequency))
print(D) | [
"[email protected]"
] | |
676d3f096342d4cc8492353f9850df99daa236f1 | f56526a4bb9d0599be0d7ca89cba02900109d580 | /manage.py | 08fe28de5f270fd149c3ac065f8c8fbbb0913743 | [] | no_license | wongself/ScienceHammer-Spert | 2ae84ad5d25cbda070f5e121bdf70123393acfa3 | 5fadd8fa64b46ba205bb2eab5e5851c6e9895b42 | refs/heads/master | 2022-12-14T07:13:50.380635 | 2020-09-02T12:42:08 | 2020-09-02T12:42:08 | 289,702,598 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'scihammer.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
6cfb33715482388d145d3cb0ce8ebd3b23caca19 | 0ade9adceff8ce87773f8f4394335732483ea499 | /migrations/versions/b6eb51f7207c_.py | afe42fec9ec9feb0a95d5a47e4c2169e200af649 | [] | no_license | abira125/fyyur | 3f03bda82b6846b3f78d4e8a8ef22bb0526d9699 | c805175bb6bf2196516c85aa60b268a8f02b1e6a | refs/heads/master | 2023-02-03T00:36:21.008957 | 2020-12-03T03:53:44 | 2020-12-03T03:53:44 | 296,510,094 | 0 | 0 | null | 2020-12-03T03:53:45 | 2020-09-18T04:06:51 | Python | UTF-8 | Python | false | false | 828 | py | """empty message
Revision ID: b6eb51f7207c
Revises: 40c455786653
Create Date: 2020-09-20 12:01:16.460765
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'b6eb51f7207c'
down_revision = '40c455786653'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('show', 'start_time',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('show', 'start_time',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
# ### end Alembic commands ###
| [
"[email protected]"
] | |
6ac3ff9eeee8324164bd4e27ceab17129415edfc | e262cc25de2f5ad2bda611cc84f922575dd641db | /finance/admin.py | a416fe4d3314dd8bfaf1faf19f738688c288c969 | [] | no_license | tashaem/financeku | 0c55f6c52d5e111448c08e1af1de8120c4f5bc03 | ae3880adf0f54be25f52ee288f002a66a94d3b63 | refs/heads/master | 2022-12-08T20:04:25.517345 | 2020-08-15T20:04:15 | 2020-08-15T20:04:15 | 287,706,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(User)
admin.site.register(Finance)
admin.site.register(Percentages) | [
"[email protected]"
] | |
466859a2c01129ecbfc8ed814b1cdea9d75df04e | 0760d2eca52b92532707506138f5132d8757d039 | /gameparser.py | a7685633fca51a2956930e447ab9f7c5ab4064c2 | [] | no_license | kittyae99/CM1101 | 72b791ca6df44f5cbae02e970f39c9beb718d347 | 7ff0b9c46c0f59b2983a5da5fe203aaaf362ac73 | refs/heads/master | 2021-05-08T08:07:19.371878 | 2017-10-20T10:15:21 | 2017-10-20T10:15:21 | 106,994,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,108 | py | import string
# List of "unimportant" words (feel free to add more)
skip_words = ['a', 'about', 'all', 'an', 'another', 'any', 'around', 'at',
'bad', 'beautiful', 'been', 'better', 'big', 'can', 'every', 'for',
'from', 'good', 'have', 'her', 'here', 'hers', 'his', 'how',
'i', 'if', 'in', 'into', 'is', 'it', 'its', 'large', 'later',
'like', 'little', 'main', 'me', 'mine', 'more', 'my', 'now',
'of', 'off', 'oh', 'on', 'please', 'small', 'some', 'soon',
'that', 'the', 'then', 'this', 'those', 'through', 'till', 'to',
'towards', 'until', 'us', 'want', 'we', 'what', 'when', 'why',
'wish', 'with', 'would']
def filter_words(words, skip_words):
"""This function takes a list of words and returns a copy of the list from
which all words provided in the list skip_words have been removed.
For example:
>>> filter_words(["help", "me", "please"], ["me", "please"])
['help']
>>> filter_words(["go", "south"], skip_words)
['go', 'south']
>>> filter_words(['how', 'about', 'i', 'go', 'through', 'that', 'little', 'passage', 'to', 'the', 'south'], skip_words)
['go', 'passage', 'south']
"""
return [word for word in words if words not in skip_words]
def remove_punct(text):
"""This function is used to remove all punctuation
marks from a string. Spaces do not count as punctuation and should
not be removed. The funcion takes a string and returns a new string
which does not contain any puctuation. For example:
>>> remove_punct("Hello, World!")
'Hello World'
>>> remove_punct("-- ...Hey! -- Yes?!...")
' Hey Yes'
>>> remove_punct(",go!So.?uTh")
'goSouTh'
"""
no_punct = ""
for char in text:
if not (char in string.punctuation):
no_punct = no_punct + char
return no_punct
def normalise_input(user_input):
"""This function removes all punctuation from the string and converts it to
lower case. It then splits the string into a list of words (also removing
any extra spaces between words) and further removes all "unimportant"
words from the list of words using the filter_words() function. The
resulting list of "important" words is returned. For example:
>>> normalise_input(" Go south! ")
['go', 'south']
>>> normalise_input("!!! tAkE,. LAmp!?! ")
['take', 'lamp']
>>> normalise_input("HELP!!!!!!!")
['help']
>>> normalise_input("Now, drop the sword please.")
['drop', 'sword']
>>> normalise_input("Kill ~ tHe :- gObLiN,. wiTH my SWORD!!!")
['kill', 'goblin', 'sword']
>>> normalise_input("I would like to drop my laptop here.")
['drop', 'laptop']
>>> normalise_input("I wish to take this large gem now!")
['take', 'gem']
>>> normalise_input("How about I go through that little passage to the south...")
['go', 'passage', 'south']
"""
# Remove punctuation and convert to lower case
no_punct = remove_punct(user_input).lower()
#
# COMPLETE ME!
#
| [
"[email protected]"
] | |
bfc3c8031c4d5e146222ba440d88baa5538b926a | 7a20dac7b15879b9453150b1a1026e8760bcd817 | /Curso/Challenges/URI/1097SequenceIJ3.py | 635109df951618ad1f484a58895383e866c6df7c | [
"MIT"
] | permissive | DavidBitner/Aprendizado-Python | 7afbe94c48c210ddf1ab6ae21109a8475e11bdbc | e1dcf18f9473c697fc2302f34a2d3e025ca6c969 | refs/heads/master | 2023-01-02T13:24:38.987257 | 2020-10-26T19:31:22 | 2020-10-26T19:31:22 | 283,448,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | inicio = 7
fim = 5
for i in range(1, 10, 2):
for j in range(inicio, fim - 1, -1):
print(f"I={i} J={j}")
inicio += 2
fim = inicio - 2
| [
"[email protected]"
] | |
a9cd6e8fb3d36f94246374a5f4ecb0f4ddd225a4 | 8a1b2c908e85ec7d2406fa39092dc87acaaa5f39 | /main.py | b51827a276b86620ef4268965e7035b95ecb8f11 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bluetyson/bluecap | 7639f25768fdca1d5d637f6105d1e2cda4b4268f | c52c2873f5c8a84431d4c45a01d175f136fa438e | refs/heads/master | 2022-11-09T05:13:49.196177 | 2020-06-17T07:51:45 | 2020-06-17T07:51:45 | 272,910,581 | 0 | 0 | NOASSERTION | 2020-06-17T07:44:45 | 2020-06-17T07:44:44 | null | UTF-8 | Python | false | false | 1,552 | py | #!/usr/bin/env python
"""
Copyright (C) 2019, Monash University, Geoscience Australia
Copyright (C) 2018, Stuart Walsh
Bluecap is released under the Apache License, Version 2.0 (the "License");
you may not use this software except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
The project uses third party components which may have different licenses.
Please refer to individual components for more details.
"""
import numpy as np
import pylab as pl
from Managers.ProblemManager import ProblemManager
from IO.CommandLine import ParseCommandLineArgs
# Initialisation
rv = ParseCommandLineArgs()
# from xml
theProblemManager = ProblemManager.FromXMLFile(rv.input)
theProblemManager.Initialize()
theProblemManager.Run()
# Record final state in xml
if(not theProblemManager.theRegionalCalculationManager.type):
if(not theProblemManager.outputType):
finalOutputFile = theProblemManager.outputPrefix + "_final.xml"
theProblemManager.ExportXMLFile(finalOutputFile)
theProblemManager.theMineDataManager.PlotResults()
elif (theProblemManager.outputType == "txt"):
theProblemManager.theMineDataManager.RecordResults(theProblemManager)
exit() | [
"[email protected]"
] | |
5ca46d6c34881844d2873314ea864076437b6c61 | 2793bb8f04a423c0af2bb3d6d1e6382c1a0183fb | /str01_zad04.py | dbe37eeb7e1ff423263137299bcda73eccb0796a | [] | no_license | andjelao/Zbirka2-15-zadataka | 44c0b0b70483dadfa1f0e07f9e02b0d61e46c2fc | 38525b71e468f53c68711f2ccb166f87ecb72bc3 | refs/heads/main | 2023-01-04T09:21:15.190961 | 2020-11-04T18:17:56 | 2020-11-04T18:17:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | import math
print("Unesite sljedece podatke o jednakokrakom trouglu: ")
b = int(input("krak: "))
a = int(input("osnovica: "))
def rotacija_jednakokrakog_trougla_oko_osnovice(s, a):
"""
rotacijom nastaju dvije kupe k1 i k2
ulazni parametri : krak b -> postaje izvodnica s obje kupe
osnovica a -> njena polovina postaje visina H obje kupe
racuna visinu trougla spustenu na osnovicu -> postaje poluprecnik r zajednicke osnove kupa
povrsina = povrsina omotaca k1 + povrsina omotaca k2
zapremina = zapremina k1 + zapremina k2
returns povrsina i zapremina rotacionog tijela
"""
pi = 3.14
H = a / 2
r = math.sqrt(s * s - H * H)
povrsina = 2 * r * pi * s
zapremina = (2 * (r * r * pi * H)) / 3
return povrsina, zapremina
print("Povrsina i zapremina rotacionog tijela iznose: ",rotacija_jednakokrakog_trougla_oko_osnovice(b, a))
| [
"[email protected]"
] | |
eb835bea53927e65127039af2a88cd9d1fd583be | ccfc4c0fe6029cfa6171264ec6714ab4e8ae7197 | /mip_drift_slices.py | b49995f0d8fe88f7d6c56a0ae817c8ce21ec312b | [] | no_license | alicebarthel/roms_tools | 2e42d9afbae0203b76bf19de3d327f87a1a3c488 | 72cd0bd976d1bf15c8eb52f3f6919f99cc8ff0db | refs/heads/master | 2020-04-09T00:44:13.179152 | 2018-11-09T12:52:11 | 2018-11-09T12:52:11 | 159,879,285 | 0 | 0 | null | 2018-11-30T21:13:19 | 2018-11-30T21:13:19 | null | UTF-8 | Python | false | false | 18,865 | py | from netCDF4 import Dataset
from numpy import *
from matplotlib.pyplot import *
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from calc_z import *
from interp_lon_roms import *
# Import FESOM scripts (have to modify path first)
import sys
sys.path.insert(0, '/short/y99/kaa561/fesomtools')
from fesom_grid import *
from fesom_sidegrid import *
from triangle_area import *
from in_triangle import *
# Make a 3x2 plot of temperature (left) and salinity (right) through 0E.
# The top row is the initial conditions from ECCO2. The middle and bottom rows
# are the last January of the simulation (monthly average) from MetROMS and
# FESOM respectively.
# Input:
# roms_grid = path to ROMS grid file
# roms_file = path to file containing Jan 2016 monthly average of temperature
# and salinity in ROMS
# fesom_mesh_path_lr, fesom_mesh_path_hr = paths to FESOM mesh directories for
# low-res and high-res respectively
# fesom_file_lr, fesom_file_hr = paths to files containing Jan 2016 monthly
# averages of temperature and salinity, in low-res FESOM and
# high-res FESOM respectively
def mip_drift_slices (roms_grid, roms_file, fesom_mesh_path_lr, fesom_file_lr, fesom_mesh_path_hr, fesom_file_hr):
# Paths to ECCO2 files with initial conditions for temp and salt
ecco_temp_file = '/short/m68/kaa561/metroms_iceshelf/data/originals/ECCO2/THETA.1440x720x50.199201.nc'
ecco_salt_file = '/short/m68/kaa561/metroms_iceshelf/data/originals/ECCO2/SALT.1440x720x50.199201.nc'
# Longitude to interpolate to (OE)
lon0 = 0
# Bounds on plot
lat_min = -73
lat_max = -30
depth_min = -6000
depth_max = 0
# ROMS grid parameters
theta_s = 7.0
theta_b = 2.0
hc = 250
N = 31
# Bounds on colour scales for temperature and salinity
temp_min = -2
temp_max = 6
salt_min = 33.9
salt_max = 34.9
# Contours to overlay
temp_contour = 0.75
salt_contour = 34.5
# Parameters for FESOM regular grid interpolation (needed for contours)
num_lat = 500
num_depth = 250
# Get longitude for the title
if lon0 < 0:
lon_string = str(int(round(-lon0))) + r'$^{\circ}$W'
else:
lon_string = str(int(round(lon0))) + r'$^{\circ}$E'
print 'Processing ECCO2'
id = Dataset(ecco_temp_file, 'r')
# Read grid variables
ecco_lat = id.variables['LATITUDE_T'][:]
ecco_depth = -1*id.variables['DEPTH_T'][:]
if lon0 == 0:
# Hard-coded lon0 = 0E: average between the first (0.125 E) and last
# (359.875 E = -0.125 W) indices in the regular ECCO2 grid
ecco_temp = 0.5*(id.variables['THETA'][0,:,:,0] + id.variables['THETA'][0,:,:,-1])
id.close()
id = Dataset(ecco_salt_file, 'r')
ecco_salt = 0.5*(id.variables['SALT'][0,:,:,0] + id.variables['SALT'][0,:,:,-1])
id.close()
else:
print 'lon0 is only coded for 0E at this time'
#return
print 'Processing ROMS'
# Read grid variables we need
id = Dataset(roms_grid, 'r')
roms_lon_2d = id.variables['lon_rho'][:,:]
roms_lat_2d = id.variables['lat_rho'][:,:]
roms_h = id.variables['h'][:,:]
roms_zice = id.variables['zice'][:,:]
id.close()
# Read temperature and salinity
id = Dataset(roms_file, 'r')
roms_temp_3d = id.variables['temp'][0,:,:,:]
roms_salt_3d = id.variables['salt'][0,:,:,:]
id.close()
# Get a 3D array of z-coordinates; sc_r and Cs_r are unused in this script
roms_z_3d, sc_r, Cs_r = calc_z(roms_h, roms_zice, theta_s, theta_b, hc, N)
# Make sure we are in the range 0-360
if lon0 < 0:
lon0 += 360
# Interpolate to lon0
roms_temp, roms_z, roms_lat = interp_lon_roms(roms_temp_3d, roms_z_3d, roms_lat_2d, roms_lon_2d, lon0)
roms_salt, roms_z, roms_lat = interp_lon_roms(roms_salt_3d, roms_z_3d, roms_lat_2d, roms_lon_2d, lon0)
# Switch back to range -180-180
if lon0 > 180:
lon0 -= 360
print 'Processing low-res FESOM'
# Build regular elements
elements_lr = fesom_grid(fesom_mesh_path_lr)
# Read temperature and salinity
id = Dataset(fesom_file_lr, 'r')
fesom_temp_nodes_lr = id.variables['temp'][0,:]
fesom_salt_nodes_lr = id.variables['salt'][0,:]
id.close()
# Make SideElements
selements_temp_lr = fesom_sidegrid(elements_lr, fesom_temp_nodes_lr, lon0, lat_max)
selements_salt_lr = fesom_sidegrid(elements_lr, fesom_salt_nodes_lr, lon0, lat_max)
# Build an array of quadrilateral patches for the plot, and of data values
# corresponding to each SideElement
patches_lr = []
fesom_temp_lr = []
for selm in selements_temp_lr:
# Make patch
coord = transpose(vstack((selm.y, selm.z)))
patches_lr.append(Polygon(coord, True, linewidth=0.))
# Save data value
fesom_temp_lr.append(selm.var)
# Repeat for salinity
fesom_salt_lr = []
for selm in selements_salt_lr:
fesom_salt_lr.append(selm.var)
# Interpolate to regular grid so we can overlay contours
lat_reg = linspace(lat_min, lat_max, num_lat)
depth_reg = linspace(-depth_max, -depth_min, num_depth)
temp_reg_lr = zeros([num_depth, num_lat])
salt_reg_lr = zeros([num_depth, num_lat])
temp_reg_lr[:,:] = NaN
salt_reg_lr[:,:] = NaN
# For each element, check if a point on the regular grid lies
# within. If so, do barycentric interpolation to that point, at each
# depth on the regular grid.
for elm in elements_lr:
# Check if this element crosses lon0
if amin(elm.lon) < lon0 and amax(elm.lon) > lon0:
# Check if we are within the latitude bounds
if amax(elm.lat) > lat_min and amin(elm.lat) < lat_max:
# Find largest regular latitude value south of Element
tmp = nonzero(lat_reg > amin(elm.lat))[0]
if len(tmp) == 0:
# Element crosses the southern boundary
jS = 0
else:
jS = tmp[0] - 1
# Find smallest regular latitude north of Element
tmp = nonzero(lat_reg > amax(elm.lat))[0]
if len(tmp) == 0:
# Element crosses the northern boundary
jN = num_lat
else:
jN = tmp[0]
for j in range(jS+1,jN):
# There is a chance that the regular gridpoint at j
# lies within this element
lat0 = lat_reg[j]
if in_triangle(elm, lon0, lat0):
# Yes it does
# Get area of entire triangle
area = triangle_area(elm.lon, elm.lat)
# Get area of each sub-triangle formed by (lon0, lat0)
area0 = triangle_area([lon0, elm.lon[1], elm.lon[2]], [lat0, elm.lat[1], elm.lat[2]])
area1 = triangle_area([lon0, elm.lon[0], elm.lon[2]], [lat0, elm.lat[0], elm.lat[2]])
area2 = triangle_area([lon0, elm.lon[0], elm.lon[1]], [lat0, elm.lat[0], elm.lat[1]])
# Find fractional area of each
cff = [area0/area, area1/area, area2/area]
# Interpolate each depth value
for k in range(num_depth):
# Linear interpolation in the vertical for the
# value at each corner of the triangle
node_vals_temp = []
node_vals_salt = []
for n in range(3):
id1, id2, coeff1, coeff2 = elm.nodes[n].find_depth(depth_reg[k])
if any(isnan(array([id1, id2, coeff1, coeff2]))):
# No ocean data here (seafloor or ice shelf)
node_vals_temp.append(NaN)
node_vals_salt.append(NaN)
else:
node_vals_temp.append(coeff1*fesom_temp_nodes_lr[id1] + coeff2*fesom_temp_nodes_lr[id2])
node_vals_salt.append(coeff1*fesom_salt_nodes_lr[id1] + coeff2*fesom_salt_nodes_lr[id2])
if any(isnan(node_vals_temp)):
pass
else:
# Barycentric interpolation for the value at
# lon0, lat0
temp_reg_lr[k,j] = sum(array(cff)*array(node_vals_temp))
salt_reg_lr[k,j] = sum(array(cff)*array(node_vals_salt))
temp_reg_lr = ma.masked_where(isnan(temp_reg_lr), temp_reg_lr)
salt_reg_lr = ma.masked_where(isnan(salt_reg_lr), salt_reg_lr)
print 'Processing high-res FESOM'
elements_hr = fesom_grid(fesom_mesh_path_hr)
id = Dataset(fesom_file_hr, 'r')
fesom_temp_nodes_hr = id.variables['temp'][0,:]
fesom_salt_nodes_hr = id.variables['salt'][0,:]
id.close()
selements_temp_hr = fesom_sidegrid(elements_hr, fesom_temp_nodes_hr, lon0, lat_max)
selements_salt_hr = fesom_sidegrid(elements_hr, fesom_salt_nodes_hr, lon0, lat_max)
patches_hr = []
fesom_temp_hr = []
for selm in selements_temp_hr:
coord = transpose(vstack((selm.y, selm.z)))
patches_hr.append(Polygon(coord, True, linewidth=0.))
fesom_temp_hr.append(selm.var)
fesom_salt_hr = []
for selm in selements_salt_hr:
fesom_salt_hr.append(selm.var)
lat_reg = linspace(lat_min, lat_max, num_lat)
temp_reg_hr = zeros([num_depth, num_lat])
salt_reg_hr = zeros([num_depth, num_lat])
temp_reg_hr[:,:] = NaN
salt_reg_hr[:,:] = NaN
for elm in elements_hr:
if amin(elm.lon) < lon0 and amax(elm.lon) > lon0:
if amax(elm.lat) > lat_min and amin(elm.lat) < lat_max:
tmp = nonzero(lat_reg > amin(elm.lat))[0]
if len(tmp) == 0:
jS = 0
else:
jS = tmp[0] - 1
tmp = nonzero(lat_reg > amax(elm.lat))[0]
if len(tmp) == 0:
jN = num_lat
else:
jN = tmp[0]
for j in range(jS+1,jN):
lat0 = lat_reg[j]
if in_triangle(elm, lon0, lat0):
area = triangle_area(elm.lon, elm.lat)
area0 = triangle_area([lon0, elm.lon[1], elm.lon[2]], [lat0, elm.lat[1], elm.lat[2]])
area1 = triangle_area([lon0, elm.lon[0], elm.lon[2]], [lat0, elm.lat[0], elm.lat[2]])
area2 = triangle_area([lon0, elm.lon[0], elm.lon[1]], [lat0, elm.lat[0], elm.lat[1]])
cff = [area0/area, area1/area, area2/area]
for k in range(num_depth):
node_vals_temp = []
node_vals_salt = []
for n in range(3):
id1, id2, coeff1, coeff2 = elm.nodes[n].find_depth(depth_reg[k])
if any(isnan(array([id1, id2, coeff1, coeff2]))):
node_vals_temp.append(NaN)
node_vals_salt.append(NaN)
else:
node_vals_temp.append(coeff1*fesom_temp_nodes_hr[id1] + coeff2*fesom_temp_nodes_hr[id2])
node_vals_salt.append(coeff1*fesom_salt_nodes_hr[id1] + coeff2*fesom_salt_nodes_hr[id2])
if any(isnan(node_vals_temp)):
pass
else:
temp_reg_hr[k,j] = sum(array(cff)*array(node_vals_temp))
salt_reg_hr[k,j] = sum(array(cff)*array(node_vals_salt))
temp_reg_hr = ma.masked_where(isnan(temp_reg_hr), temp_reg_hr)
salt_reg_hr = ma.masked_where(isnan(salt_reg_hr), salt_reg_hr)
depth_reg = -1*depth_reg
# Set up axis labels the way we want them
lat_ticks = arange(lat_min+3, lat_max+10, 10)
lat_labels = []
for val in lat_ticks:
lat_labels.append(str(int(round(-val))) + r'$^{\circ}$S')
depth_ticks = range(depth_min+1000, 0+1000, 1000)
depth_labels = []
for val in depth_ticks:
depth_labels.append(str(int(round(-val))))
print 'Plotting'
fig = figure(figsize=(14,24))
# ECCO2
gs1 = GridSpec(1,2)
gs1.update(left=0.1, right=0.95, bottom=0.7575, top=0.94, wspace=0.08)
# Temperature
ax = subplot(gs1[0,0])
pcolor(ecco_lat, ecco_depth, ecco_temp, vmin=temp_min, vmax=temp_max, cmap='jet')
# Overlay contour
contour(ecco_lat, ecco_depth, ecco_temp, levels=[temp_contour], color='black')
title(r'Temperature ($^{\circ}$C)', fontsize=24)
ylabel('Depth (m)', fontsize=18)
xlim([lat_min, lat_max])
ylim([depth_min, depth_max])
ax.set_xticks(lat_ticks)
ax.set_xticklabels(lat_labels, fontsize=16)
ax.set_yticks(depth_ticks)
ax.set_yticklabels(depth_labels, fontsize=16)
text(-64, 1000, 'a) ECCO2 initial conditions at ' + lon_string + ', January 1992', fontsize=28)
# Salinity
ax = subplot(gs1[0,1])
pcolor(ecco_lat, ecco_depth, ecco_salt, vmin=salt_min, vmax=salt_max, cmap='jet')
contour(ecco_lat, ecco_depth, ecco_salt, levels=[salt_contour], color='black')
title('Salinity (psu)', fontsize=24)
xlim([lat_min, lat_max])
ylim([depth_min, depth_max])
ax.set_xticks(lat_ticks)
ax.set_xticklabels(lat_labels, fontsize=16)
ax.set_yticks(depth_ticks)
ax.set_yticklabels([])
# MetROMS
gs2 = GridSpec(1,2)
gs2.update(left=0.1, right=0.95, bottom=0.525, top=0.7075, wspace=0.08)
# Temperature
ax = subplot(gs2[0,0])
pcolor(roms_lat, roms_z, roms_temp, vmin=temp_min, vmax=temp_max, cmap='jet')
contour(roms_lat, roms_z, roms_temp, levels=[temp_contour], color='black')
ylabel('Depth (m)', fontsize=18)
xlim([lat_min, lat_max])
ylim([depth_min, depth_max])
ax.set_xticks(lat_ticks)
ax.set_xticklabels(lat_labels, fontsize=16)
ax.set_yticks(depth_ticks)
ax.set_yticklabels(depth_labels, fontsize=16)
text(-49, 300, 'b) MetROMS, January 2016', fontsize=28)
# Salinity
ax = subplot(gs2[0,1])
pcolor(roms_lat, roms_z, roms_salt, vmin=salt_min, vmax=salt_max, cmap='jet')
contour(roms_lat, roms_z, roms_salt, levels=[salt_contour], color='black')
xlim([lat_min, lat_max])
ylim([depth_min, depth_max])
ax.set_xticks(lat_ticks)
ax.set_xticklabels(lat_labels, fontsize=16)
ax.set_yticks(depth_ticks)
ax.set_yticklabels([])
# FESOM low-res
gs3 = GridSpec(1,2)
gs3.update(left=0.1, right=0.95, bottom=0.2925, top=0.475, wspace=0.08)
# Temperature
ax = subplot(gs3[0,0])
img = PatchCollection(patches_lr, cmap='jet')
img.set_array(array(fesom_temp_lr))
img.set_edgecolor('face')
img.set_clim(vmin=temp_min, vmax=temp_max)
ax.add_collection(img)
# Overlay contour on regular grid
contour(lat_reg, depth_reg, temp_reg_lr, levels=[temp_contour], color='black')
ylabel('Depth (m)', fontsize=18)
xlim([lat_min, lat_max])
ylim([depth_min, depth_max])
ax.set_xticks(lat_ticks)
ax.set_xticklabels(lat_labels, fontsize=16)
ax.set_yticks(depth_ticks)
ax.set_yticklabels(depth_labels, fontsize=16)
text(-53, 300, 'c) FESOM (low-res), January 2016', fontsize=28)
# Salinity
ax = subplot(gs3[0,1])
img = PatchCollection(patches_lr, cmap='jet')
img.set_array(array(fesom_salt_lr))
img.set_edgecolor('face')
img.set_clim(vmin=salt_min, vmax=salt_max)
ax.add_collection(img)
contour(lat_reg, depth_reg, salt_reg_lr, levels=[salt_contour], color='black')
xlim([lat_min, lat_max])
ylim([depth_min, depth_max])
ax.set_xticks(lat_ticks)
ax.set_xticklabels(lat_labels, fontsize=16)
ax.set_yticks(depth_ticks)
ax.set_yticklabels([])
# FESOM high-res
gs4 = GridSpec(1,2)
gs4.update(left=0.1, right=0.95, bottom=0.06, top=0.2425, wspace=0.08)
# Temperature
ax = subplot(gs4[0,0])
img = PatchCollection(patches_hr, cmap='jet')
img.set_array(array(fesom_temp_hr))
img.set_edgecolor('face')
img.set_clim(vmin=temp_min, vmax=temp_max)
ax.add_collection(img)
contour(lat_reg, depth_reg, temp_reg_hr, levels=[temp_contour], color='black')
ylabel('Depth (m)', fontsize=18)
xlim([lat_min, lat_max])
ylim([depth_min, depth_max])
ax.set_xticks(lat_ticks)
ax.set_xticklabels(lat_labels, fontsize=16)
ax.set_yticks(depth_ticks)
ax.set_yticklabels(depth_labels, fontsize=16)
text(-53, 300, 'd) FESOM (high-res), January 2016', fontsize=28)
# Add a colorbar for temperature
cbaxes = fig.add_axes([0.17, 0.015, 0.3, 0.015])
cbar = colorbar(img, orientation='horizontal', cax=cbaxes, extend='both', ticks=arange(temp_min, temp_max+2, 2))
cbar.ax.tick_params(labelsize=16)
# Salinity
ax = subplot(gs4[0,1])
img = PatchCollection(patches_hr, cmap='jet')
img.set_array(array(fesom_salt_hr))
img.set_edgecolor('face')
img.set_clim(vmin=salt_min, vmax=salt_max)
ax.add_collection(img)
contour(lat_reg, depth_reg, salt_reg_hr, levels=[salt_contour], color='black')
xlim([lat_min, lat_max])
ylim([depth_min, depth_max])
ax.set_xticks(lat_ticks)
ax.set_xticklabels(lat_labels, fontsize=16)
ax.set_yticks(depth_ticks)
ax.set_yticklabels([])
# Add a colorbar for salinity
cbaxes = fig.add_axes([0.6, 0.015, 0.3, 0.02])
cbar = colorbar(img, orientation='horizontal', cax=cbaxes, extend='both', ticks=arange(salt_min+0.1, salt_max+0.1, 0.2))
cbar.ax.tick_params(labelsize=16)
fig.show()
fig.savefig('ts_drift.png')
# Command-line interface
if __name__ == "__main__":
roms_grid = raw_input("Path to ROMS grid file: ")
roms_file = raw_input("Path to ROMS file containing monthly averaged temperature and salinity for January 2016: ")
fesom_mesh_path_lr = raw_input("Path to FESOM low-res mesh directory: ")
fesom_file_lr = raw_input("Path to FESOM low-res file containing monthly averaged temperature and salinity for January 2016: ")
fesom_mesh_path_hr = raw_input("Path to FESOM high-res mesh directory: ")
fesom_file_hr = raw_input("Path to FESOM high-res file containing monthly averaged temperature and salinity for January 2016: ")
mip_drift_slices(roms_grid, roms_file, fesom_mesh_path_lr, fesom_file_lr, fesom_mesh_path_hr, fesom_file_hr)
| [
"[email protected]"
] | |
46d5903f347c4eb445c733dc1a2a7a8768475507 | 74a7bc91d401c69b4afae49fc9380d0605b501a4 | /python/queue/2.multi_threading.py | a3b682724df41cd849a179060c602af265667980 | [] | no_license | imzhangliang/LearningToys | b5e2e8fe255f57e85600c09dba36394d7c7448c7 | c782d8b1790dbc2aac945b5b23e6daee8183ce92 | refs/heads/master | 2020-03-27T14:57:20.306868 | 2019-04-24T02:31:26 | 2019-04-24T02:31:26 | 146,689,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,779 | py |
import queue
import threading
import time
def putTask(taskQueue):
# 放任务:每隔一段时间放一个任务
for item in range(5):
taskQueue.put(item)
threadName = threading.currentThread().getName()
print('[{0}]'.format(threadName),': I put', item)
time.sleep(2)
def getTask(taskQueue):
# 取任务:只要队列中有任务,就从中取任务,没有就阻塞
while True:
item = taskQueue.get()
threadName = threading.currentThread().getName()
print('[{0}]'.format(threadName),': I got', item)
# do_work(item)
# 这里可以写执行任务的代码
def getAndDoTask(taskQueue):
# 取任务:只要队列中有任务,就从中取任务,没有就阻塞
while True:
item = taskQueue.get()
threadName = threading.currentThread().getName()
print('[{0}]'.format(threadName),': I got', item)
print('[{0}]'.format(threadName),': I am doing', item)
time.sleep(1)
def test1():
'''2个线程,一个放置任务,一个接受任务'''
q = queue.Queue()
t1 = threading.Thread(name='putTask', target=putTask, args=(q,))
t2 = threading.Thread(name='getTask', target=getTask, args=(q,))
t2.start()
t1.start()
def multiWorkers():
'''多个工人并发工作'''
q = queue.Queue()
t1 = threading.Thread(name='putTask', target=putTask, args=(q,))
w1 = threading.Thread(name='worker1', target=getAndDoTask, args=(q,))
w2 = threading.Thread(name='worker2', target=getAndDoTask, args=(q,))
w3 = threading.Thread(name='worker3', target=getAndDoTask, args=(q,))
t1.start()
w1.start()
w2.start()
w3.start()
if __name__ == '__main__':
#test1()
multiWorkers() | [
"[email protected]"
] | |
97d0ff599f148e696b8c2e0b340819f543d3184f | 91d20d715bf3bcfe4b275762142e8278dc2e6c7a | /tests/managers/test_manager_dcf.py | 89c348a045c0c06d60258c39dce58195a1ce0478 | [
"MIT"
] | permissive | aggreybosire/open-vot | 7d3c472d6b6bc45ad6b04d00a8662ca669f2f091 | 818356d0484138609489c28bb4e0b2a7f59703d7 | refs/heads/master | 2020-03-22T05:38:42.517237 | 2018-06-27T07:42:54 | 2018-06-27T07:42:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | from __future__ import absolute_import
import unittest
import random
from lib.managers import ManagerDCF
class TestManagerDCF(unittest.TestCase):
def setUp(self):
self.otb_dir = 'data/OTB'
self.manager = ManagerDCF()
def tearDown(self):
pass
def test_track(self):
self.manager.track(self.otb_dir)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
5b10bdd37e763c491295df1ab9ea5e71397ce6b6 | 9919b5ae6736156241b747fb7cc2fedb5e2d014a | /src/scripts/conversion/main.py | 15ba1e5aacdf4340fd37911f61837ffe2bc5097f | [] | no_license | MiniOK/companyone | 07336963897aa78e8d1c1475d572a2bbf536a0d2 | a6148ca6cc9c4500b2c272a47d217b3f6febec52 | refs/heads/master | 2020-07-21T05:21:35.924756 | 2019-09-06T09:28:33 | 2019-09-06T09:28:33 | 206,761,428 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,018 | py | import os
import random
from shutil import copyfile
from win32com.client import Dispatch
from conversion.UltraSoundReport import UltraSound
def run_main():
# DIR = "../../../data/20190722/"
# DIR = "M:\\PycharmProjects\\doc_conversion\\data\\20190722\\"
DIR = 'E:\\zjc\\'
# Get all files in list
word_documents = []
excel_spreadsheets = []
pdfs = []
finish_file = []
for version in os.listdir(DIR):
if version == "V期":
continue
if "/" in DIR:
sub_dir = "{}{}/".format(DIR, version)
else:
sub_dir = "{}{}\\".format(DIR, version)
for category in os.listdir(sub_dir):
if category == "Long_fu_survey_Carotid_ultrasound":
continue
print(category)
if "/" in DIR:
ssub_dir = sub_dir + category + "/"
else:
ssub_dir = sub_dir + category + "\\"
print("ssub_dir",ssub_dir)
for item in os.listdir(ssub_dir):
if item == "2201":
print("继续执行!!!")
print("item",item)
if "/" in DIR:
sssub_dir = ssub_dir + item + "/"
else:
sssub_dir = ssub_dir + item + "\\"
for file in os.listdir(sssub_dir):
if "~$" not in file:
if file.split(".")[-1].lower() in ["doc", "docx"]:
if file.split(".")[-2] in ["doc", "docx"]:
word_documents.append(sssub_dir + file)
continue
word_documents.append(sssub_dir + file)
elif file.split(".")[-1].lower() in ["xls", "xlsx"]:
excel_spreadsheets.append(sssub_dir + file)
elif file.split(".")[-1].lower() in ["pdf"]:
pdfs.append(sssub_dir + file)
# 保证每次执行都一致
# random.seed(11)
# 用于将一个列表中的元素打乱,即将列表中的元素随机排序
# random.shuffle(word_documents)
word = Dispatch("Word.Application")
print("IV期高危的总数:", len(word_documents))
# 将完成的文件读取
for line in open("../output/finish.txt", encoding='utf-8'):
finish_file.append(line)
if len(finish_file) != len(set(finish_file)):
print("重复前:", len(finish_file))
print("有重复")
finish_file = list(set(finish_file))
print("去重后:",len(finish_file))
else:
print("没重复:", len(finish_file))
# 过滤err中文件
err_list = os.listdir('../output/err')
try:
finish_txt = open('../output/finish-001.txt', 'r+', encoding='utf-8')
data = finish_txt.read()
# print(data)
finish_txt.write(
"{}\t\t{}\t\t\t{}\t\t\t{}\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t"
"{}\t\t\t{}\t\t\t{}{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\t\t\t{}\n"
.format("name", "ID", "sex", "date", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14","v15", "v16",
"v17","v18","v19","v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "v32", "v33", "v34"))
# 创建finish文件
with open('../output/finish.txt', 'r+', encoding='utf-8') as finish:
finish.read()
for file in word_documents:
finish_file_split = "{}\n".format(file.split("\\")[-1])
# 过滤err中的文件
if finish_file_split in err_list:
print("---------------过滤错误的文件")
continue
# 过滤 完成文件
if finish_file_split in finish_file:
# print("--------------过滤完成的文件")
continue
#
# if file == "E:\\zjc\\IV期\\Long_fu_survey_Carotid_ultrasound\\2202\\王欣华G2202301132.doc":
# continue
# if file == "E:\\zjc\\IV期\\Long_fu_survey_Carotid_ultrasound\\2202\\郝守方G2202301624.doc":
# continue
# if file == "E:\\zjc\\IV期\\Long_fu_survey_Carotid_ultrasound\\2202\\王志伟G2202300829.doc":
# continue
# # 没有 RCCA-IMT 字段
# if file == r"E:\zjc\IV期\High_risk_Carotid_ultrasound\1402\G1402404851报告单.docx":
# continue
print(file)
obj = UltraSound(file, word, finish, finish_txt)
finally:
finish.close()
finish_txt.close()
if __name__ == '__main__':
run_main()
pass
| [
"[email protected]"
] | |
bf6e59ff35b262d8aa742fb9d8b4002db1cd2635 | ab3d361d784ee2bf066802f9942371625f6b5b6d | /basics/venv/Scripts/easy_install-script.py | 1e0b07c8c348069b58e7033096b61d3479ce707f | [] | no_license | patricia8229/Dictionery | 9b74cdc990604007ead8952bd4ce390749ba679b | 8e5ec7a1510f6b53bd2ab0273d8f84a3e0d8a180 | refs/heads/master | 2020-08-17T04:22:03.811513 | 2019-10-25T18:00:57 | 2019-10-25T18:00:57 | 215,606,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | #!C:\Users\HP\PycharmProjects\basics\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | |
99b76f70c93b4700993ef5a508844059278154ed | 01c3ff1d74e754e0d4ce0fb7f8a8b329ec3766e1 | /python_exercises/17exercises/prime1.py | db3154923a82096c6c0e30619f1f9d4a39239028 | [] | no_license | vineel2014/Pythonfiles | 5ad0a2b824b5fd18289d21aa8306099aea22c202 | 0d653cb9659fe750cf676a70035ab67176179905 | refs/heads/master | 2020-04-28T03:56:22.713558 | 2019-03-11T08:38:54 | 2019-03-11T08:38:54 | 123,681,939 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | def is_prime(n):
for i in range(2, n):
if n%i == 0:
return False
return True
n = int(input("What number should I go up to? "))
for p in range(2, n+1):
if is_prime(p):
print (p)
print ("Done")
| [
"[email protected]"
] | |
a53c4ac16941e738f96f042c8b60852349ed165d | dc0d3dcf9ee1ad6119a861b23f9ef5eb4ae8f989 | /aleatory_mixtape.py | c188f61a70285494f63129526842e6c8ffa81439 | [
"Unlicense"
] | permissive | SURVANT-Cryp/Algorithm-as-Ritual | c3c463947b63f5c95c455e551376ebf5e9b3073e | b9a28f26180872fe2113714e2b3a7431ea081b5e | refs/heads/master | 2022-05-28T06:59:49.137544 | 2022-05-08T20:13:19 | 2022-05-08T20:13:19 | 153,498,010 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,233 | py | #! /usr/bin/pythonw
# coding: utf-8
"""
aleatory_mixtape_v.1
"""
import random, textwrap
from random import randint
import webbrowser
text = ["LOVE098", "LOVE089", "SV128", "HDBD012", "PFL197", "N-037", "JAG316CD", "MUSLIMLIM003DI", "DTD 021DI", "HOS468", "HOS425", "MRP099LP", "SF105LP", "HAFTW008", "HAFTW015", "PRB018", "SFT0376", "DAIS103", "IDEAL134", "IDEAL076", "MRP093LP", "TRR224CD", "KRANK176LP", "ABDT055LP", "VOD91RE", "SV144", "RTRADDA725", "PTYT086LP", "ELP020", "CREP53", "0731383667800", "WARPLP43", "WAP345", "DEATH026", "DEATH013LP", "DEATH012", "PR015", "DEATH006", "RS51CD", "TTT022", "TTT064", "RPC 6", "CST003LP", "HDB098", "SV150", "SV120", "SV087", "LECKEY001", "RAVE002", "USCD55", "KFR2038", "LIES057", "LIES078", "KFR2016", "FV39", "DO-GC01", "DOJS005", "MTR056DNL", "SF031", "SF084DI", "PAL 039DI", "BFFP132I", "EMEGO128", "HOS-601", "HOS-496", "N-033", "N-043", "HOS-450EU", "EMEGO091", "DAIS028", "ARTYARDCD004", "711574835713PMI", "SDGBJ1303DI", "PF07602", "TJDCD326", "TBL1007", "MRP097LP", "MRP083LP", "HOS341", "HN133", "HN206", "TRR312LP", "LOVE079", "LOVE107", "LOVE092", "HDBCD002", "SUK 3021", "GOO022LP", "TRUE1236", "WAP375", "RS62", "655035034317", "655035006246", "BC KD", "SE002", "EMEGO189", "CY997", "CY989", "DAIS065LP", "SB1112", "HIT025WMG", "NA5120", "AKUCD1009-10", "AKUDI1007", "ATFA031CD", "HJRCD3", "HJRCD58", "HJRLP62", "STRUT148D", "ARTYARDCD008", "AAIMP228", "702397800620", "WARPCDD25", "FAITBACK01CD", "TNC1", "ELP006", "LACR001", "LACR005", "LACR013", "ABD008DI", "LITA097", "CREVV1087", "HJRLP111", "SUNN25", "BLP005", "LOVE099", "RBN073D", "ONLY3CD", "TO72", "TYPE046", "TYPE009", "BBQDA96", "RH RSS 25", "IMPREC368DI", "ALE001", "SV 09", "W-2383", "SND2SE", "DDD07T", "KRANK177LP", "TYPE074", "ND-25", "LACR008", "LOVE100", "HJP49", "SP185", "ED. RZ1015-16", "BB039", "BRATRALP1005", "P021", "HJRLP107", "634457458567", "FR6", "HDBC001", "ARTYARDLP014", "LSSN020", "METH022X", "STS279D", "H033", "H018-V", "MGART904", "ST07", "ST43", "WAP105", "TRR226CD", "ZIQ290", "JTR06", "SBR3016LP", "HAFTW016", "GI-204LP", "BLACKEST004", "PV007DI", "SF077DI", "KFR2025", "KFR2004", "NHNHCD1005", "DTD 026DI", "HN250", "SS-003", "SS047LP", "NNT007", "RH 115 CD", "LOVE069", "0889397884024", "TYPE063", "STRUT051CD", "STRUT129D", "HOS232", "5055300330895", "R-N 099", "SKA007", "SKA002", "DA006", "GR-006B", "UNITEDJNANA366", "W-0037", "HDB001", "WATER139", "IBFFP180BX", "BMD-1", "WARPCDD66", "SOMADA057", "DUSTDL054", "SHELTER 099", "REL004", "PAN30", "4797007", "SFW40534", "ESP1003D", "ESP1003LP", "TTW77", "SDGBJ1304DI", "CST126CD", "KRANK071", "ALIENCD66", "KRANK213", "MRP023LP", "SUK 3015", "SUK 3020", "NNT005", "NNT006", "NNT008", "MUSLIMLIM018DI", "NNT009", "FABRIC200D", "MEDI069", "PFL152", "634457442627", "SS-027", "ATFA011DIG", "AMMP 791C", "SS-01", "CREP07", "MODE 68", "SF019LP", "SF078DI", "TRS08", "BLUME005", "BLUME006", "MM105", "HJP066", "HJRCD21", "HOS403", "HH666-220", "HOS302", "HOS321-322-323", "HOS319", "VICTO CD 089", "EMEGO124", "RS1003D", "HES013", "RAVE014", "IDEAL160", "PAN87", "HDB116D", "PAN73", "HALC004", "HALCSCREW", "ZENDNLS438", "DED004", "TLOITC001", "TRANS368", "BMD-2", "RS-03", "RS-04", "RS-01", "RS-07", "BMD-3", "BC-08", "BC-01", "BC-03", "BC-04", "HOS581", "PRESH003DA", "PRESH001D", "HERWL002", "HER015", "GCA006", "GCA008", "TFR001", "HATE002", "HATE005", "HATE001", "LOVE096", "LOVE101", "HOS-489", "K7319DTM", "HJP062", "CST081", "KRANK064", "KRANK161LP", "WARPCDD252", "HDB090D", "TEKLIFE001DL", "TEKLIFE006DL", "DM300", "DM069", "DM129 2016", "SP104", "DM262", "DM138", "DM056", "GL002", "LACR015", "SP2104", "SP1204", "DNC1206D", "DNC1205D", "LCTRAX004", "STRUT114CD", "HOS407", "HOS-477", "HOS406", "DIAG043", "REL1", "SND1SE", "SND7", "5055300374622", "DNC1203CD", "HDB091D", "RUG977D2", "S&C001", "RAMP066", "THRILL115LP", "DVBGPM274", "DVBGPM294", "DVGIL 017", "SATURN101477", "SATURN 521", "SATURN92074", "SATURN 406", "SATURN208", "SATURN 529", "8013252888267", "6749300", "KAT 00440", "N006", "NUM5077DIG1", "NUM75LP", "NUM5072DIG1", "NUM207DIG1", "NUM207.4DIG1", "GRVTS003", "3610155332573", "8013252886355", "DTD 053DI", "ISTUMM32", "HOS79", "HOS106", "HOS401", "ELP037", "LOVE094", "WAP381D", "WARPCDD92", "WAP63CDD", "WAP60CDD", "HDB013", "NONPLUS043", "HDB100D", "WARPCDD38", "WARPDD364-1", "WARPCDD234", "WAP64CDD", "WAP256CDD", "FALL06", "OZITDAND9991D", "HYH-091-2", "HH666-236", "IDEAL149", "NFP-08", "TRR268LP", "20620901CD", "TRR225CD", "20620601CD", "SV109", "BING114", "IMPREC 174DI", "ABD019DI", "TYPE113", "ABD040DI", "SV086", "ESP1006D", "DTD-37", "SBR3008CD", "DTD 023DI", "DTD-47", "DTD 040DI", "DTD-36", "DTD 033DI", "DTD 031DI", "DTD 032DI", "DTD 017DI", "DTD 041DI", "ICE013", "CST098CD", "VIPERDL128", "VIPERDL097", "VIPERDL095", "MRP067LP", "MRP060LP", "HEL 92092", "H09", "HEL93031", "PDZD", "PB08D", "LFDL002", "LFDL003", "WE 031 / AT", "WE 018 / GOT", "WE 024 / AM234", "WE 020 / GOT", "WE 035 / HN106", "HN101", "HN241", "HN208", "HN132", "CEJ10", "HN207", "AMAZON003", "AMAZON013", "AMAZON009", "AMAZON001", "AMAZON021", "ABD011DI", "IDEAL136", "TCD11", "BLACKTRUFFLE011CD", "VDSQ10", "EMEGO166", "EMEGO119", "LOAD126", "LOAD086", "LOAD031", "LOAD022", "LOAD062", "LOAD113", "LOAD082", "LOAD039", "LOAD081", "LOAD064", "LOAD078", "0309", "0305", "0303", "KFR2023", "KFR2021", "KFR2015", "KFR2028", "KFR2003", "KFR2036", "KFR2042", "KFR2027", "KFR10290", "W13DL4", "WCD007", "WC047D", "WC030D", "WC046D", "WCD057", "NG065", "PD351", "PRCD-4049", "PRCD-4070", "PRCD4063", "PRCD-1513", "PRCD-1510", "PRCD4003", "SATURN519", "SATURN19842", "HALC013", "ALT26", "HOS606", "LIES092", "LIES095", "LIES090", "DSRLP025", "GR128", "HJP77", "DO-ADM071", "PSCD99", "WIG140D", "SN11", "WIG89D", "WIG343D", "RUG421D", "WIG59D", "RUG462D", "WIGCD074", "RERUG010D", "REWIG84D", "REWIG83D", "REWIG82D", "HOLY1240", "DC674", "HYP007", "HDB110D", "HDB075", "HDBCD008", "DOR04", "SUK 3018", "SUK 3014", "SUK 3017", "RE022", "WARPDD287C", "MFM019", "MFM035", "BCBR", "ISTUMM44", "NSK1I", "HOS221", "HOS284", "HOS394", "HOS470", "HOS427", "HOS318", "HOS372", "2D01", "HOS260", "HOS336", "HOS341", "HOS263", "HOS337", "HOS-64", "HOS320", "2D18", "PTYT06", "PTYT034-12", "N-028", "FV43", "FV57", "E#21E", "FLUOR001", "TES 158LP", "PLAN-S 20TES.154", "FDR619", "FV83LP", "FV27LP", "FV701", "FV69CD", "FV75CD", "MFM030", "WAP394DD", "WAP425D", "WAP406DA", "EMEGO152", "P!?014", "P026", "KERN004LP", "UTTU001", "UTTU_025", "K009", "ZIQ364", "ZIQ366", "5055300383013", "FTS5LP", "WINO002", "HDBLP002", "AMB3922LP", "HDB080", "YT098DS", "FTDIGI002", "NHS336DD", "METH015", "SNKR014", "PAN82", "RAVE011", "IDEAL133", "HVALUR2CD", "N-041", "N-039", "N-045-2", "N-031", "N-030", "N-044-2", "APATHYXXX", "HDBCD037DS", "NHS002", "DECISIONS12", "SHAM002DI", "AFCD012", "BTDL83", "OR001", "634457442610", "XLDS675EP", "AST027", "FUCKPUNK002", "PMAWS003", "GCA007", "TEC084D", "LOVE102", "634457458963", "VOD121.1RE", "ARCHIVE 020DI", "VOD121.5RE", "PAN74", "CJFD20", "E218", "E62", "E209", "E228", "E220", "E219", "E224", "WTN52", "ANM029", "HALC010", "EXIT077", "FUR101", "PAN59", "E197", "E171", "BKV015", "BKV012", "E200", "ASH113", "ASH49", "SV049", "TOE-CD-805", "LOVE033", "PR000", "PAN64", "LOVE103", "LOVE071", "TRIANGLE21", "WAP315DV", "COY006", "FOCUS1666666", "ILTECH010", "ILTECH007", "NAIL007", "AVN0025", "AVN031", "AVN022", "TYPE111", "DDS025", "655035002118", "655035037417", "RAVEDIT", "RAVE017", "RAVE024", "WMA / HIT022", "NSFM001", "NSFM002", "MAN101", "MAN 104", "MAN076", "RTRADDA880", "RTRADDS859", "MEDI096", "MEDI099", "IMRV013", "IMRV015", "BOKA044D", "HATE003", "TRI029LP", "LOVE024", "20620703CD", "TYPE064", "LSINV203LP", "SBR057LP", "SBR071CD", "SBR327LP", "MEOW186", "PP012", "VIS002", "HJP056", "HJP055", "655035008219", "655035007243", "BLEED003", "ANM009", "ANM006", "SS001", "PAN26", "HOS345", "HOS-448", "HOS-411CD", "HOS134", "ESP4058D", "ESP5011D", "RS67", "IMPREC203DI", "VON0172LP", "LOVE109", "KAT 256", "ARCHIVEFOURTYEIGHT", "ARCHIVE017DI", "BLACKESTDL008", "BLACKESTDL017", "DNLP2R", "AM-0", "AM-2", "TBD036", "CST137LP", "OTCR12010", "OTCR12003", "SRV411", "JNR275-8DIG1", "SOMA028", "SUC12", "SA038", "CREP51", "ALE007", "HABIBI007", "AALP084", "UNROCKLP007", "DOSER032", "AKUCD1004", "ADN001", "AKU1001", "OSTLP003", "ASS003", "MRBLP150", "BH31", "CLPM_R003", "CREP42", "SS-041", "JBLP001", "HJP57", "CREP35", "CREP33", "SF106LP", "SF074DI", "HJRCD48", "SOMA022", "SUNN31LP", "GZH084DP D", "1442TP7DL", "1444TP7DL", "MUS170", "MUS118", "MUS154", "ML-020", "FUCKPUNK009", "IDEAL167", "ZIQ356", "ZIQ368", "ZIQ384", "SNKRXDSK001", "SNKR008", "SNKR015", "UTTU_007", "UTTU_008", "VER083", "S&C006", "HDBLP024", "HDB082", "WIGLP094", "STH2183A", "HJRLP201", "HJRLP75", "HJRLP74", "HJP083", "PRB017", "HOS-486", "ZHARK0024", "ZHARK028", "ZHARK0030", "FANTOM 001", "VF271", "K7308", "K7S350D", "SR423", "180GLP02", "SCTR007", "SCTR042", "LITA146LP", "DEATH015LP", "DEATH024", "N49LP", "OMLP10", "PAN52", "XLDA834", "LISTUMM374", "4050538407617", "PAN66", "PAN53CD", "EMEGO202", "TRI046lP-C1", "3299039990322", "WARPDD9RX", "MUSLIMLIM030DI", "ARCHIVE025DI", "ARCHIVE002DI", "ARCHIVE027DI", "ARCHIVEFORTYTWO", "LOVE080", "HDB069", "HOS360", "TYPE105", "HOS424B", "KEM001", "SRV115", "EAD3119A", "PAN35", "HOS354", "HOS-592", "RAVE007", "RAVE022", "RAVE012", "RAVE025-COL", "HOS-498", "BK12X1204", "SP-56", "REAP001", "MSCUM001", "APCO-018", "APCO006", "HOS-435", "HOS-591CD", "HOS391", "HOS-493BLACK", "EDLX048LP", "EDLX053", "DNUM01", "HJP52", "FABRIC109D", "WARPLP247", "WARPDD297B", "KALKCD59", "TYPE080", "VHSX012", "LOVE086", "LOVE1005TESPRESSINGS", "EDRM427", "GM030", "EWR 0403/4", "BNSD018", "EF107", "HN 129", "HN143", "HN195", "TRO-275", "HN 204", "HN 066", "FJORD 006", "PRAXIS56", "102DSR/CFC-CD2", "DSR-X13", "SS067/68", "NW004", "FADELP002", "BD270", "HALC005", "HALC006", "HALC003", "BZH003", "GRVTS007", "OTRLP010", "GRVTS009", "GRVTS011", "DRUGCDR8", "USLP58", "SJRLP418", "HJRCD42", "DB164", "DB165", "AA 072", "AALP086", "AACD 068", "AACD 066", "AA064", "AACD087/AALP087", "BRG005", "MOTEER SAMPLER002", "BRG009", "SIGE 056", "THRILL434LP", "THRILL417LP", "SYR09", "GOO017CD", "GOO14CD", "TG141-5", "TG211-5", "TG184-6", "TG185-6", "TG218-6", "LM029D", "TRI037LP", "TRI022DIG1", "TRI022LP", "TRI008CD", "TRIANGLE34", "TRI040LP", "ONUDL0005", "ONUDL0011", "ONUDL1012", "ONULP18", "ONUDL136", "SWINGTING013", "MEDI097", "MEDI095", "SWINGTING011", "SWINGTING005", "MIX067", "LIES050", "LIES054", "LOL003", "ESTY005", "000000004", "STRUT098CDX", "STRUT195D", "STRUT056CD", "AA 073", "WCD070", "THRILL-256", "QTT11", "QTT5", "ONOTESLA001", "TESLATAPES013", "TESLA019", "RS73", "ABD006DI", "ABD023DI", "ABDT049DI", "ABD007DI", "AU10072", "ABD003DI", "ABD040DI", "ABD030DI", "ABDMAJ7002DI", "ABD050DI", "ABDT052DI", "ABDT053DI", "H GGA", "H FREAKED", "H RIP", "OQKO_009", "BDNOG003", "DC 458", "CZSZ019", "CZSZ023", "CZSZ013", "CZSZ016", "CZSZ018", "CZSZ017", "CZSZ012", "CZSZ009", "PRTLS017", "PRTLS013", "SLCD015", "SLCD028", "SLCD022", "PR008CDLP", "PAN54", "PR017", "PR017", "6MB006", "ENO2LP6", "EAD3614A", "STUMM386", "DC 360", "DC701", "SAHKO-020", "DC 494", "DC 637", "DC 513", "PROFAN034", "SBR166LP", "HEL92111", "SP009", "HJPCD78", "HDBCD021", "none (https://ascetichouse.bandcamp.com/album/hallucinognosis-loops)", "none (https://ascetichouse.bandcamp.com/album/morphism)", "none (https://ascetichouse.bandcamp.com/album/global-ecophony-audio-transmissions-from-the-exhibition)", "HN213", "none (https://hansonrecords.bandcamp.com/album/medicine-stunts)", "HN 241", "HN193", "HN174", "HN124", "LA 013", "LA 017", "MR-036", "MRL-004", "LAC-025", "LAC-038", "LA-011", "SPV 089-47972", "none (https://soundcloud.com/aids3d/cry-4-u)", "none (https://soundcloud.com/aids3d/aids-3d-raw-live-janus-better)", "none (http://www.s4lem.com/mixtapes/SALEM_TheFader_WeMakeItGood_MixSeries_Vol_11.mp3)", "none (http://www.s4lem.com/mixtapes/SALEM_RaverStayWifMe.mp3)", "none (http://www.s4lem.com/mixtapes/SALEM_onagainoffagainMIXTAPE.mp3)", "HG1704", "HG1702", "HG1706", "SBELLE002", "HDB094D", "HDBLP041", "HEK005", "MEA021", "FB01", "HG004", "FAIT-15", "FAIT12", "BACK07", "FAIT-13LP", "SEXES02", "BH029", "BH049", "DDS024", "KNV002", "PI12", "PI007", "DAIS121", "BKV022", "BKV019", "DPY001", "BKV006", "SR054", "MEOW188", "UIQLP003", "UIQ009", "UIOQINV001", "UIQ007", "LOKI-029", "CB023", "RDLP03", "WARP 011", "FAKE-008", "none (https://mnoad.bandcamp.com/album/dream-defenders)", "none (https://cedricthimon.bandcamp.com/album/chris-corsano-cedric-thimon)", "TLV-MCDR005", "none (https://absurdexposition.bandcamp.com/album/montreal-2018)", "INO-100", "none (https://absurdexposition.bandcamp.com/album/the-rita-worker)", "HOS-598", "MRP111LP", "ROWF49D", "GJ1007", "GJ1015", "GJ1001", "GJ1004", "GJ1005", "GJ1003", "GJ1016", "GJ1010", "WRND001", "GJ1008", "REWIG55D", "REWIG57D", "REWIG54D", "none (https://boomkat.com/products/american-primitive-the-complete-recordings)", "none (https://boomkat.com/products/sonny-rollins-in-france-aix-en-provence-1959)", "none (https://boomkat.com/products/mississippi-blues)", "GR-003", "GR-015", "GR-004", "TYPE060", "KFR2006", "5060091554634", "DA030", "SNDWCD028", "BLKRTZ008CD", "BLKRTZ 017 D", "127067", "TPT037", "THRILL-050", "TG132-6", "TG064LP", "AST026", "TR257", "TR255", "TRCD9075DD", "TR246", "TR250", "TR249", "TR245", "TR247", "TR234", "TR248", "LIES019.5", "TR239", "TR1043", "CJFD23", "CJFD30", "CJFD28", "CJFD31", "DM126", "CUR6849", "STRUT120LP", "STRUT144LP", "TIGM003", "STRUT089CD", "STRUT063CD", "none (https://gangstalkers.bandcamp.com/album/bricks-in-a-drought)", "none (https://gangstalkers.bandcamp.com/album/excommunicate)", "DS1", "ATFA020CD", "BB002D", "IL2036D", "CDRW59D", "DEATH004", "LDN047", "SMNCHR007", "PMAWS006", "PMAWS010", "ZEHNIN05D", "PMAWS007", "WHYTDS002", "SLP036", "HOS-456", "ELP027", "ELP038", "ELP018", "E012", "ELP09", "WS05", "634457443044", "WAP401DD", "R-N116", "R-N082", "GR079", "GR145", "GR141", "none (https://boomkat.com/products/dub-kings-king-jammy-at-king-tubby-s-70363524-739d-442d-ae24-9e776503f362)", "ZENDNLS298", "ONUDL9004", "GRE2031", "GR032", "GR121", "BRJT-0009", "W1383", "W-1002", "none (https://boomkat.com/products/the-leader-for-the-pack-sugar-minott-friends)", "GRE1082", "GR135", "GR136", "USD51", "FRKWYS09", "FRKWYS14CD", "RERVNG6LP", "FRKWYS10LP", "BKEDIT010", "MCR00002", "STRUT180D", "STRUT66LP", "STRUT123CD", "STRUT100CDX", "STRUT091EP", "STRUT095CDX", "8013252888236", "BBE453ADG", "KRANK210LP", "LAUNCH152", "LACR024", "LACR022", "LACR012", "LIES017", "DABJ-1202", "DABJ-1208", "DABJ-LP-001", "DABJ-1216", "DABJ-1227", "CJFD21", "IDEAL142", "EEAOA048", "H SOFT PUNK", "PAN 18", "MODE 56", "RPA 031", "RPA 077", "PD256", "ARTYARD101", "TO53", "PIASR241DB1", "PIASD4804D", "UMI_00894807002271", "ZORN49LP", "ZORN54LP", "CZ015", "SAHKO612", "PUU45", "SAHKO029", "SUBCD002", "PAN24", "HDB055", "HEK013", "LOVE111", "BK8849", "EMEGO160", "EMEGO242", "SKA028", "BDH001", "EXIT040"]
while len(text) > 10:
text.remove(random.choice(text))
print "\noutput:\n\n" + \
textwrap.fill(" :: ".join(text), 40) + random.choice([" /"]) + "\n"
urls = ['https://www.boomkat.com']
for url in urls:
webbrowser.open_new_tab(url)
| [
"[email protected]"
] | |
3c709a74c969ea58828797b91e149cd24b24c010 | 1cf26664b6b7c3737e62fdc3f5869e30c8ccfddb | /project/automation/test/dwdw.py | 176318398d3e6a96dfe7cff0fa54760f7d407662 | [] | no_license | yuku123/py_ex_all | 0c7429ed69fed37244a6dd8c3d445622e8e8e690 | 3152dbaf92e4beecad132b47bdc7a58058417bfd | refs/heads/master | 2021-06-24T06:41:06.453400 | 2021-01-15T12:26:09 | 2021-01-15T12:26:09 | 180,709,894 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | import time
import pygame
file=r'/Users/zifang/Music/网易云音乐/t.mp3'
pygame.mixer.init()
print("播放音乐1")
track = pygame.mixer.music.load(file)
pygame.mixer.music.play()
time.sleep(10)
pygame.mixer.music.stop() | [
"[email protected]"
] | |
6e9de5eadc04eb7c34af1ec1e2845bf529ed5b9c | 4ed9e987975f27017a615761da717e48eb09667c | /controller_modes/web_mode.py | 0307001f5ee00ca222042b54122294b31019fc9d | [] | no_license | deenski/desktop_peripherals | a1380879bdb681be01a10aabfbc4cf9fa06c0811 | 53ae17934e75aeca953e326a2dcb4a278b677b5d | refs/heads/main | 2023-02-22T03:58:27.496107 | 2021-01-18T19:14:55 | 2021-01-18T19:14:55 | 330,761,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | def get():
return {
"mode": "web",
't1:v0': 'https://google.com',
't1:v1': 'https://mail.google.com',
't1:v2': 'https://calendar.google.com',
't1:v3': 'https://domains.google.com',
't1:v4': 'https://twitter.com',
't1:v5': 'https://reddit.com',
't1:v6': 'https://pro.coinbase.com/trade/XLM-USD',
't1:v7': 'https://status.gitlab.com'
} | [
"[email protected]"
] | |
070c621faf5ceb32087b3f9ce6bbc2e3b4b100f7 | 72f1641d8965ebc5b481a9f6d02143fed80f8d27 | /labb2-5a.py | 7e7eb81b50d8b76010e627f6e8f6e665f3fdd7bc | [] | no_license | carfri/D0009E-lab | ea726c84e0ad3433313bc789f52bd4000a18cc3e | 267ea662f497a2c53a1607c635d8d891354f1a45 | refs/heads/master | 2021-09-03T06:38:14.640774 | 2018-01-06T14:11:29 | 2018-01-06T14:11:29 | 116,487,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | import math
def derivative(f, x, h):
k =(1.0/(2*h))*(f(x+h)-f(x-h))
return k
##print derivative(math.sin, math.pi, 0.0001)
##print derivative(math.cos, math.pi, 0.0001)
##print derivative(math.tan, math.pi, 0.0001)
def solve(f, x0, h):
lastX = x0
new = 0.0
while (abs(lastX) - abs(new) > h) or lastX==new:
new = lastX
lastX = lastX - f(lastX)/derivative(f, lastX, h)
return lastX
def function1(x):
return x**2-1
def function2(x):
return 2**x-1
def function3(x):
x-cmath.e**-x
print solve(function2, 4, 0.00001)
| [
"[email protected]"
] | |
babee140686adf70f2854c94b0200cb7bec97291 | b28c81feceac506df581d45cb139c4d5ed6d5d1d | /Problem-Set-7/ps7.py | 48227e4fca06c61651c9e278eed401066d66fd35 | [] | no_license | knnaraghi/MIT-6.00x | b2789f8b3160dbc1c7932e640a34221f2e474974 | 4e0126a128eb102211743b5349f6e28ab03110ef | refs/heads/master | 2021-01-10T08:28:55.556679 | 2016-01-02T20:19:59 | 2016-01-02T20:19:59 | 48,899,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,942 | py | # 6.00.1x Problem Set 7
# RSS Feed Filter
import feedparser
import string
import time
from project_util import translate_html
from Tkinter import *
#-----------------------------------------------------------------------
#
# Problem Set 7
#======================
# Code for retrieving and parsing RSS feeds
# Do not change this code
#======================
def process(url):
"""
Fetches news items from the rss url and parses them.
Returns a list of NewsStory-s.
"""
feed = feedparser.parse(url)
entries = feed.entries
ret = []
for entry in entries:
guid = entry.guid
title = translate_html(entry.title)
link = entry.link
summary = translate_html(entry.summary)
try:
subject = translate_html(entry.tags[0]['term'])
except AttributeError:
subject = ""
newsStory = NewsStory(guid, title, subject, summary, link)
ret.append(newsStory)
return ret
#======================
#======================
# Part 1
# Data structure design
#======================
# Problem 1
class NewsStory(object):
def __init__(self, guid, title, subject, summary, link):
"""create a class with these five attributes"""
self.guid = guid
self.title = title
self.subject = subject
self.summary = summary
self.link = link
def getGuid(self):
"""return self Guid"""
return self.guid
def getTitle(self):
"""return Title"""
return self.title
def getSubject(self):
"""return Subject"""
return self.subject
def getSummary(self):
"""return summary"""
return self.summary
def getLink(self):
"""return link"""
return self.link
#======================
# Part 2
# Triggers
#======================
class Trigger(object):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
raise NotImplementedError
# Whole Word Triggers
# Problems 2-5
class WordTrigger(Trigger):
def __init__(self, word):
self.word = word
# function to check if word is in text
def isWordIn(self, text):
#punctuation string
punctuation = string.punctuation
#lowercase the text
lowercase_text = text.lower()
#lowercase word
lowercase_word = self.word.lower()
for i in punctuation:
if i in lowercase_text:
lowercase_text = lowercase_text.replace(i, ' ')
lowercase_text = lowercase_text.split(' ')
return lowercase_word in lowercase_text
class TitleTrigger(WordTrigger):
def evaluate(self, story):
return self.isWordIn(story.getTitle())
class SubjectTrigger(WordTrigger):
def evaluate(self, story):
return self.isWordIn(story.getSubject())
class SummaryTrigger(WordTrigger):
def evaluate(self, story):
return self.isWordIn(story.getSummary())
# Composite Triggers
# Problems 6-8
class NotTrigger(Trigger):
def __init__(self, trigger):
self.trigger = trigger
def evaluate(self, story):
return not self.trigger.evaluate(story)
class AndTrigger(Trigger):
def __init__(self, trigger1, trigger2):
self.trigger1 = trigger1
self.trigger2 = trigger2
def evaluate(self, story):
return self.trigger1.evaluate(story) and self.trigger2.evaluate(story)
class OrTrigger(Trigger):
def __init__(self, trigger1, trigger2):
self.trigger1 = trigger1
self.trigger2 = trigger2
def evaluate(self, story):
return self.trigger1.evaluate(story) or self.trigger2.evaluate(story)
# Phrase Trigger
# Question 9
class PhraseTrigger(Trigger):
def __init__(self, phrase):
self.phrase = phrase
def evaluate(self, story):
return self.phrase in story.getSubject() or self.phrase in story.getTitle() or self.phrase in story.getSummary()
#======================
# Part 3
# Filtering
#======================
def filterStories(stories, triggerlist):
"""
Takes in a list of NewsStory instances.
Returns: a list of only the stories for which a trigger in triggerlist fires.
"""
#make an empy list
new_stories = []
#iterate over stories
for story in stories:
#iterate over triggers
for trigger in triggerlist:
#if the trigger is in story
if trigger.evaluate(story):
#append to the list
new_stories.append(story)
#key part of loop ---> break when append once
break
stories = new_stories
return stories
#======================
# Part 4
# User-Specified Triggers
#======================
def makeTrigger(triggerMap, triggerType, params, name):
"""
Takes in a map of names to trigger instance, the type of trigger to make,
and the list of parameters to the constructor, and adds a new trigger
to the trigger map dictionary.
triggerMap: dictionary with names as keys (strings) and triggers as values
triggerType: string indicating the type of trigger to make (ex: "TITLE")
params: list of strings with the inputs to the trigger constructor (ex: ["world"])
name: a string representing the name of the new trigger (ex: "t1")
Modifies triggerMap, adding a new key-value pair for this trigger.
Returns a new instance of a trigger (ex: TitleTrigger, AndTrigger).
"""
if triggerType == "TITLE":
triggerMap[name] = TitleTrigger(params[0])
elif triggerType == "SUBJECT":
triggerMap[name] = SubjectTrigger(params[0])
elif triggerType == "SUMMARY":
triggerMap[name] = SummaryTrigger(params[0])
elif triggerType == "NOT":
triggerMap[name] = NotTrigger(triggerMap[params[0]])
elif triggerType == "AND":
triggerMap[name] = AndTrigger(triggerMap[params[0]], triggerMap[params[1]])
elif triggerType == "OR":
triggerMap[name] = OrTrigger(triggerMap[params[0]], triggerMap[params[1]])
elif triggerType == "PHRASE":
triggerMap[name] = PhraseTrigger(" ".join(params))
return triggerMap[name]
def readTriggerConfig(filename):
"""
Returns a list of trigger objects
that correspond to the rules set
in the file filename
"""
# Here's some code that we give you
# to read in the file and eliminate
# blank lines and comments
triggerfile = open(filename, "r")
all = [ line.rstrip() for line in triggerfile.readlines() ]
lines = []
for line in all:
if len(line) == 0 or line[0] == '#':
continue
lines.append(line)
triggers = []
triggerMap = {}
# Be sure you understand this code - we've written it for you,
# but it's code you should be able to write yourself
for line in lines:
linesplit = line.split(" ")
# Making a new trigger
if linesplit[0] != "ADD":
trigger = makeTrigger(triggerMap, linesplit[1],
linesplit[2:], linesplit[0])
# Add the triggers to the list
else:
for name in linesplit[1:]:
triggers.append(triggerMap[name])
return triggers
import thread
SLEEPTIME = 60 #seconds -- how often we poll
def main_thread(master):
# A sample trigger list - you'll replace
# this with something more configurable in Problem 11
try:
# These will probably generate a few hits...
t1 = TitleTrigger("Obama")
t2 = SubjectTrigger("Romney")
t3 = PhraseTrigger("Election")
t4 = OrTrigger(t2, t3)
triggerlist = [t1, t4]
# TODO: Problem 11
# After implementing makeTrigger, uncomment the line below:
triggerlist = readTriggerConfig("triggers.txt") #change to proper directory
# **** from here down is about drawing ****
frame = Frame(master)
frame.pack(side=BOTTOM)
scrollbar = Scrollbar(master)
scrollbar.pack(side=RIGHT,fill=Y)
t = "Google & Yahoo Top News"
title = StringVar()
title.set(t)
ttl = Label(master, textvariable=title, font=("Helvetica", 18))
ttl.pack(side=TOP)
cont = Text(master, font=("Helvetica",14), yscrollcommand=scrollbar.set)
cont.pack(side=BOTTOM)
cont.tag_config("title", justify='center')
button = Button(frame, text="Exit", command=root.destroy)
button.pack(side=BOTTOM)
# Gather stories
guidShown = []
def get_cont(newstory):
if newstory.getGuid() not in guidShown:
cont.insert(END, newstory.getTitle()+"\n", "title")
cont.insert(END, "\n---------------------------------------------------------------\n", "title")
cont.insert(END, newstory.getSummary())
cont.insert(END, "\n*********************************************************************\n", "title")
guidShown.append(newstory.getGuid())
while True:
print "Polling . . .",
# Get stories from Google's Top Stories RSS news feed
stories = process("http://news.google.com/?output=rss")
# Get stories from Yahoo's Top Stories RSS news feed
stories.extend(process("http://rss.news.yahoo.com/rss/topstories"))
# Process the stories
stories = filterStories(stories, triggerlist)
map(get_cont, stories)
scrollbar.config(command=cont.yview)
print "Sleeping..."
time.sleep(SLEEPTIME)
except Exception as e:
print e
if __name__ == '__main__':
root = Tk()
root.title("Some RSS parser")
thread.start_new_thread(main_thread, (root,))
root.mainloop()
| [
"[email protected]"
] | |
0abef404fcc2fa915e6f66f2781b84dcca1baef2 | 834d61ad7839112b32061fa46ee0b30830cc4039 | /bsp/apm32/apm32f072vb-miniboard/board/SConscript | 5dabcec176d2c17c04036c3a64461e7a474f3035 | [
"Zlib",
"LicenseRef-scancode-proprietary-license",
"MIT",
"BSD-3-Clause",
"X11",
"BSD-4-Clause-UC",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | GitHubIDforRoger/rt-thread | d2001554e088f5e0b3fc8dfc8334e8b131697419 | ae78aedface2dc577803a078a6bfacb3c7cebfc2 | refs/heads/master | 2023-02-06T03:12:35.103940 | 2023-01-31T10:33:13 | 2023-02-01T02:32:55 | 166,341,266 | 1 | 0 | Apache-2.0 | 2019-01-18T04:00:27 | 2019-01-18T04:00:26 | null | UTF-8 | Python | false | false | 993 | import os
import rtconfig
from building import *
Import('SDK_LIB')
cwd = GetCurrentDir()
# add general drivers
src = Split('''
board.c
''')
path = [cwd]
startup_path_prefix = SDK_LIB
if rtconfig.PLATFORM in ['armcc', 'armclang']:
src += [startup_path_prefix + '/APM32F0xx_Library/Device/Geehy/APM32F0xx/Source/arm/startup_apm32f072.s']
if rtconfig.PLATFORM in ['iccarm']:
src += [startup_path_prefix + '/APM32F0xx_Library/Device/Geehy/APM32F0xx/Source/iar/startup_apm32f072.s']
if rtconfig.PLATFORM in ['gcc']:
src += [startup_path_prefix + '/APM32F0xx_Library/Device/Geehy/APM32F0xx/Source/gcc/startup_apm32f072.S']
# APM32F030x6 || APM32F030x8 || APM32F030xC
# APM32F051x6 || APM32F051x8
# APM32F070xB
# APM32F071x8 || APM32F071xB
# APM32F072x8 || APM32F072xB
# APM32F091xB || APM32F091xC
# You can select chips from the list above
CPPDEFINES = ['APM32F072xB']
group = DefineGroup('Drivers', src, depend = [''], CPPPATH = path, CPPDEFINES = CPPDEFINES)
Return('group')
| [
"[email protected]"
] | ||
92a7bf4da11be62b91aefa74217e407b45696494 | 0ee6fdfdd8d36cbabbb072896735364c286f5790 | /phoobe/__init__.py | b2f9d416029e684bb4de6b57b43b3e518f09a0b9 | [
"Apache-2.0"
] | permissive | berendt/phoobe | ce3992803093986d66b5d122d30d1241aefafc41 | dc49b4ce2cdf247c92e76f775970decd28c7eead | refs/heads/master | 2021-01-17T08:02:52.747683 | 2015-06-02T07:33:00 | 2015-06-15T21:37:27 | 36,715,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__all__ = ['__version__']
import pbr.version
version_info = pbr.version.VersionInfo('phoobe')
try:
__version__ = version_info.version_string()
except AttributeError:
__version__ = None
| [
"[email protected]"
] | |
bd5e30edab766b075a6fb46fc70cce8b37cba047 | c224ca07e54dbff06704a2a841871ac4bd404c63 | /stock/migrations/0002_stock_created_at.py | 825b31b40d46dbe4a76ad450caa56fb4e355624f | [] | no_license | kausaur/trading | 9fbac201363cdd133566f2bc8c32c3c68129a273 | 75e556dd129c9453b25bddeecdd1c03d19d9a6ca | refs/heads/main | 2023-05-05T14:18:04.752320 | 2021-05-28T19:11:57 | 2021-05-28T19:11:57 | 370,021,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # Generated by Django 3.1.7 on 2021-05-08 09:12
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stock', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='stock',
name='created_at',
field=models.DateTimeField(default=datetime.datetime.now),
),
]
| [
"[email protected]"
] | |
7b3e58cda801fe3cc15d924b5baadfbc9834aa4d | d90d8422379e5de81e00cab939289c30721aec16 | /Linked List Cycle.py | 07ebd01c5b8663f81a292248a1c89ee68d8ad41b | [] | no_license | csvenja/LeetCode | 6a8f41cc205b494d6a50cdaa7c54db807a8fd553 | 034a6b52b847de7b12b81fbe32db64936f1f97b8 | refs/heads/master | 2016-09-10T21:46:22.478216 | 2014-08-01T04:04:40 | 2014-08-01T04:04:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param head, a ListNode
# @return a boolean
def hasCycle(self, head):
fast = head
slow = head
while fast is not None and fast.next is not None:
fast = fast.next.next
slow = slow.next
if fast is slow:
return True
return False
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
#head.next.next.next = head.next
result = Solution()
print result.hasCycle(head)
| [
"[email protected]"
] | |
e0fafbf80e7c49525a2685fb55ae4c722452fa45 | c5986b13cccb77e9e82eea61f68cf1b9613a94a6 | /calculator.py | d17b3a5cc83a5f945c85115c3b7b98988f952de8 | [] | no_license | abhilashasokan/learnpython | 91d095aed73730837b5587c2c98d7eb44a4ad2f2 | 1ba94d77e2610b0b2d8ca2a0b007af3012846894 | refs/heads/master | 2020-03-28T03:38:43.083619 | 2018-09-07T12:59:51 | 2018-09-07T12:59:51 | 147,660,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | num1 = float(input("Enter the first number: "))
op = input("Enter the operation [+, -, /, *]: ")
num2 = float(input("Enter the second number: "))
if op == "+":
print(num1 + num2)
elif op == "-":
print(num1 - num2)
elif op == "/":
print(num1 / num2)
elif op == "*":
print(num1 * num2)
else:
print("Invalid operator!") | [
"[email protected]"
] | |
a77dc857467c4d4b3e09b57d319a26b8677f6534 | 8e95e79840005f6c34dfb978e8fe6e0ec4f7f643 | /2_statistical-thinking-in-python-part-1_/6_Computing_means_calcular la mediaMediana.py | 89ea0312977c4b543c7b127c7672e4baf5f878fa | [] | no_license | Naysla/Machine_Learning | a0593cac41ef1561f14bec55780570b82fc37720 | e75d5cd2894ccb005228ab3da87dde9025385a08 | refs/heads/master | 2023-02-01T17:19:32.413609 | 2020-12-22T20:36:45 | 2020-12-22T20:36:45 | 323,708,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | #Computing means
#The mean of all measurements gives an indication of the typical magnitude of a measurement. It is computed using np.mean().
# Compute the mean: mean_length_vers
# calcular la media
import numpy as np
mean_length_vers= np.mean(versicolor_petal_length)
# Print the result with some nice formatting
print('I. versicolor:', mean_length_vers, 'cm')
# devuelve la media | [
"[email protected]"
] | |
b045067e544e5dd53cc34403d0d68f38ae87d202 | 0b77a118d788d23301e03904ce59aca42c007782 | /bfs_AI.py | 03df6bb1e88b07a33a86099cd6ace70221733658 | [
"MIT"
] | permissive | Ishaan75/Hacktoberfest2020-1 | d3b3082c2e518f412f45d53dd8fed2cd2871a9d0 | d8c6d5a912b010fb079619dd3e5fc084ec67ad82 | refs/heads/master | 2022-12-19T16:04:23.832448 | 2020-10-15T12:32:57 | 2020-10-15T12:32:57 | 300,163,099 | 0 | 0 | null | 2020-10-01T05:56:22 | 2020-10-01T05:56:21 | null | UTF-8 | Python | false | false | 5,148 | py | initial_state = [[2,0,3],[1,8,4],[7,6,5]]
final_state = [[1,2,3],[8,0,4],[7,6,5]]
val = 0
#checking whether the inital abd final state are Same or not
def areSame(A,B):
for i in range(3):
for j in range(3):
if (A[i][j] != B[i][j]):
return False
return True
# counting the number of mismatches
def mismatches(A,B):
count_different = 0
for i in range(3):
for j in range(3):
if (A[i][j]!=B[i][j] and A[i][j]!=0):
count_different += 1
return count_different
# movements of the 0 box in all directions
# in this I have simply swappped the 0 box with the correct direction and then counted mismatches .
def moveDown(initial_state,row_index,column_index):
initial_state[row_index][column_index] , initial_state[row_index+1][column_index] = initial_state[row_index+1][column_index],initial_state[row_index][column_index]
different_elements = mismatches(initial_state,final_state)
initial_state[row_index+1][column_index],initial_state[row_index][column_index]=initial_state[row_index][column_index] , initial_state[row_index+1][column_index]
return different_elements
def moveUp(initial_state,row_index,column_index):
initial_state[row_index][column_index] , initial_state[row_index-1][column_index]= initial_state[row_index-1][column_index], initial_state[row_index][column_index]
different_elements=mismatches(initial_state,final_state)
initial_state[row_index-1][column_index], initial_state[row_index][column_index]=initial_state[row_index][column_index] , initial_state[row_index-1][column_index]
return different_elements
def moveLeft(initial_state,row_index,column_index):
initial_state[row_index][column_index], initial_state[row_index][column_index-1] = initial_state[row_index][column_index-1], initial_state[row_index][column_index]
different_elements=mismatches(initial_state,final_state)
initial_state[row_index][column_index-1], initial_state[row_index][column_index]=initial_state[row_index][column_index], initial_state[row_index][column_index-1]
return different_elements
def moveRight(initial_state,row_index,column_index):
initial_state[row_index][column_index] , initial_state[row_index][column_index+1] = initial_state[row_index][column_index+1],initial_state[row_index][column_index]
different_elements=mismatches(initial_state,final_state)
initial_state[row_index][column_index+1],initial_state[row_index][column_index]=initial_state[row_index][column_index] , initial_state[row_index][column_index+1]
return different_elements
parent = 65
open_list = []
parent_heuristic= mismatches(initial_state,final_state)
node = (chr(parent),parent_heuristic)
open_list.append(node)
closed_list = []
while areSame(initial_state,final_state)!=True:
entry_one =1000
entry_two = 1000
entry_three = 1000
entry_four = 1000
#this is the shorthand code for finding the index of 0 box , i copied it from stack overflow
index = [(index, row.index(val)) for index, row in enumerate(initial_state) if val in row]
(row_index , column_index) = index[0]
if row_index != 0:
entry_one= moveUp(initial_state,row_index,column_index)
if column_index!=0:
entry_four= moveLeft(initial_state,row_index,column_index)
if column_index != 2:
entry_two= moveRight(initial_state,row_index,column_index)
if row_index != 2:
entry_three = moveDown(initial_state,row_index,column_index)
# finding the minimum heuristic value
min_entry = min(entry_one,entry_two,entry_three,entry_four)
check_list = []
check_list.append(entry_one)
check_list.append(entry_two)
check_list.append(entry_three)
check_list.append(entry_four)
if min_entry == entry_one:
initial_state[row_index][column_index] , initial_state[row_index-1][column_index]= initial_state[row_index-1][column_index], initial_state[row_index][column_index]
elif min_entry == entry_two:
initial_state[row_index][column_index] , initial_state[row_index][column_index+1] = initial_state[row_index][column_index+1],initial_state[row_index][column_index]
elif min_entry == entry_three:
initial_state[row_index][column_index] , initial_state[row_index+1][column_index] = initial_state[row_index+1][column_index],initial_state[row_index][column_index]
else:
initial_state[row_index][column_index], initial_state[row_index][column_index-1] = initial_state[row_index][column_index-1], initial_state[row_index][column_index]
for i in range(4):
if check_list[i]!= 1000:
parent += 1
node = (chr(parent),check_list[i])
open_list.append(node)
for tup in open_list:
p,h = tup
if(h == min_entry):
closed_list.append(tup)
print(f"Open list elements are : {open_list}")
print(f"Closed list elements are : {closed_list}")
print("Final Path is ")
print("A")
for tup in closed_list:
print(f"{tup[0]}")
| [
"[email protected]"
] | |
ca3f76530fce192d3db0909f8dd7a2827667a60c | e22e20b81a0b5a60ec93bb542c3b9f866df313af | /intake/admin.py | c0e6af07ae4bcdffc2efdffa020c4a0a8e610b90 | [] | no_license | notdougwhite/office | bf46814b96bf5bf7131ec2b082753b09a8dbd847 | b5e503511f4bd766632267aff8bd9f073ebd01c4 | refs/heads/master | 2020-03-26T00:00:22.092724 | 2018-08-14T18:46:42 | 2018-08-14T18:46:42 | 144,178,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | from django.contrib import admin
from . import models
# Register your models here.
class ClientInline(admin.StackedInline):
model = models.Client
extra = 0
class KidInline(admin.StackedInline):
model = models.Child
extra = 0
class DocInline(admin.StackedInline):
model = models.Document
extra = 0
class CaseAdmin(admin.ModelAdmin):
list_display = ('__str__', 'entry_date', 'was_recently_entered')
list_filter = ['entry_date']
search_fields = ['your_name', 'childs_name']
inlines = [
ClientInline,
KidInline,
DocInline,
]
admin.site.register(models.Case, CaseAdmin)
| [
"[email protected]"
] | |
c4807d0f9ca9a0df704f28182153dadcf1b614fa | ea2543cac8aa706eff73d9b91abec2c1ec3ec33e | /agentDDPG.py | 0bd909c0cdd8a5ce6a60d9442ca99fb95b8d887e | [] | no_license | xuxie1031/DDPG | 3847d9d35ab2797f53ef2ddfa6017d6c761471e9 | 42dd0035f9741352bc25101f59c5b76904221d42 | refs/heads/master | 2020-03-25T12:30:05.295599 | 2018-08-08T17:50:36 | 2018-08-08T17:50:36 | 143,779,046 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,234 | py | import numpy as np
class DDPGAgent:
def __init__(self, config):
super(DDPGAgent, self).__init__()
self.config = config
self.task = config.task_fn('RoboschoolHopper-v1')
self.network = config.network_fn(self.task.state_dim, self.task.action_dim)
self.target_network = config.network_fn(self.task.state_dim, self.task.action_dim)
self.replay = config.replay_fn()
self.random_process = config.random_process_fn(self.task.action_dim)
self.total_step = 0
self.episodes_num = config.episodes_num
self.episode_rewards = []
def soft_update(self, target, src):
for target_param, param in zip(target.parameters(), src.parameters()):
target_param.detach_()
target_param.copy_(target_param*(1.0-self.config.target_network_mix)+param*self.config.target_network_mix)
def run_agent(self):
for episode in range(self.episodes_num):
episode_reward = 0.0
self.random_process.reset_states()
state = self.task.reset()
while True:
action = self.network.predict(np.stack([state]), True).flatten()
next_state, reward, terminal, _ = self.task.step(action)
episode_reward += reward
self.total_step += 1
self.replay.feed([state, action, reward, next_state, int(terminal)])
state = next_state
if self.replay.size() >= self.config.min_replay_size:
experiences = self.replay.sample()
states, actions, rewards, next_states, terminals = experiences
phi_next = self.target_network.feature(next_states)
a_next = self.target_network.actor(phi_next)
q_next = self.target_network.critic(phi_next, a_next)
terminals = self.network.tensor(terminals).unsqueeze(1)
rewards = self.network.tensor(rewards).unsqueeze(1)
q_next = self.config.discount * q_next * (1 - terminals)
q_next.add_(rewards)
q_next = q_next.detach()
phi = self.network.feature(states)
q = self.network.critic(phi, self.network.tensor(actions))
critic_loss = (q - q_next).pow(2).mul(0.5).sum(-1).mean()
self.network.zero_grad()
critic_loss.backward()
self.network.critic_opt.step()
phi = self.network.feature(states)
action = self.network.actor(phi)
policy_loss = -self.network.critic(phi.detach(), action).mean()
self.network.zero_grad()
policy_loss.backward()
self.network.actor_opt.step()
self.soft_update(self.target_network, self.network)
if terminal: break
self.episode_rewards.append(episode_reward)
print('episode %d total step %d avg reward %f' % (episode, self.total_step, np.mean(np.asarray(self.episode_rewards[-100:]))))
| [
"[email protected]"
] | |
f0ebbd1a304ba250b53fa8d92edff61672bf97d4 | c220f7c7007a67b76892d483b02dd11b97a6c9d9 | /testImport/sub/__init__.py | d29be5309a3298a1f4dc7835c9a10638dcca6d20 | [] | no_license | Y-Grace/ChineseNgoKnowledgeGraph | 934ee5e7ec76aa0b51c61cbac252502bf67ee3f2 | 27bbf06fa771a24609d6ec9220c2ca9d0efe2dc3 | refs/heads/master | 2020-05-26T19:34:07.479741 | 2018-01-06T08:56:56 | 2018-01-06T08:56:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25 | py | __all__ = ['suba','subb'] | [
"[email protected]"
] | |
5d72961519c84c8a410cab5639db9ff14b0e98c3 | 49536aafb22a77a6caf249c7fadef46d63d24dfe | /tensorflow/tensorflow/contrib/ndlstm/python/misc_test.py | 57674164141d3203dc4ba57fcd3e80a9b48ff764 | [
"Apache-2.0"
] | permissive | wangzhi01/deeplearning-1 | 4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d | 46ab82253d956953b8aa98e97ceb6cd290e82288 | refs/heads/master | 2020-05-28T03:14:55.687567 | 2018-09-12T16:52:09 | 2018-09-12T16:52:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,953 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.ndlstm.python import misc as misc_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
misc = misc_lib
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class LstmMiscTest(test_util.TensorFlowTestCase):
def testPixelsAsVectorDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(2, 7, 11, 5))
outputs = misc.pixels_as_vector(inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (2, 7 * 11 * 5))
def testPoolAsVectorDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(2, 7, 11, 5))
outputs = misc.pool_as_vector(inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (2, 5))
def testOneHotPlanes(self):
with self.test_session():
inputs = constant_op.constant([0, 1, 3])
outputs = misc.one_hot_planes(inputs, 4)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (3, 1, 1, 4))
target = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
self.assertAllClose(result.reshape(-1), target.reshape(-1))
def testOneHotMask(self):
with self.test_session():
data = np.array([[0, 1, 2], [2, 0, 1]]).reshape(2, 3, 1)
inputs = constant_op.constant(data)
outputs = misc.one_hot_mask(inputs, 3)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (2, 3, 3))
target = np.array([[[1, 0, 0], [0, 1, 0]], [[0, 1, 0], [0, 0, 1]],
[[0, 0, 1], [1, 0, 0]]]).transpose(1, 2, 0)
self.assertAllClose(result.reshape(-1), target.reshape(-1))
if __name__ == "__main__":
test.main()
| [
"[email protected]"
] | |
8578a3f95c94985a4cc0aa0321543c756179d831 | 7b3c78c042d1906ce22ffd00c6e9a9bd7da63471 | /facerecognition/haar/threaded_haar.py | 334819cddad22fe260d83dd48dd77e65b3f2a40e | [] | no_license | jchiefelk/AlgoLab | 3412840ed202728160fbe83a8d00d05a8a8bec5d | 7726f3d613a9f26b56c8acafa0145a9b5924a627 | refs/heads/master | 2020-05-20T17:55:48.598845 | 2017-01-22T16:43:31 | 2017-01-22T16:43:31 | 56,414,318 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | # Python script to recognize face
import numpy as np
import cv2
import sys
from threading import Thread
#
# Functions
#
'''
def runCamera(self):
# Capture frame-by-frame
video_capture = cv2.VideoCapture(0)
frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.namedWindow('video', cv2.WINDOW_NORMAL)
cv2.imshow('video', gray)
'''
'''
thread1 = Thread(runCamera())
thread1.start()
'''
| [
"[email protected]"
] | |
10ed61cfd37812ce65002d9d8d95891844128a31 | 9b422078f4ae22fe16610f2ebc54b8c7d905ccad | /xlsxwriter/test/comparison/test_button04.py | e8f33e3ff6d0969eac4cf278ec79b4baf1104d9b | [
"BSD-2-Clause-Views"
] | permissive | projectsmahendra/XlsxWriter | 73d8c73ea648a911deea63cb46b9069fb4116b60 | 9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45 | refs/heads/master | 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 | NOASSERTION | 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null | UTF-8 | Python | false | false | 846 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('button04.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet1.insert_button('C2', {})
worksheet2.insert_button('E5', {})
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
] | |
68ef6e343455f73bc4af601651462ffaa6c6131f | 689dfca7468b4196e84b5fafde7d8134d2ce5d9a | /Lesson_5/exam_6.py | dae12855133751adff3c9761a9a9bdd76a02d8ca | [
"Apache-2.0"
] | permissive | MisterHat-89/geekBrainsPython | 79d5913aa7c174aa205b0cde0391f76e9a342537 | ce0f54e35a746872318d2478c0e48f3bda99ad66 | refs/heads/main | 2023-03-24T22:52:36.713523 | 2021-03-13T12:39:48 | 2021-03-13T12:39:48 | 318,729,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,734 | py | # 6. Необходимо создать (не программно) текстовый файл, где каждая строка описывает учебный
# предмет и наличие лекционных, практических и лабораторных занятий по этому предмету и их количество.
# Важно, чтобы для каждого предмета не обязательно были все типы занятий. Сформировать словарь, содержащий
# название предмета и общее количество занятий по нему. Вывести словарь на экран.
# Примеры строк файла:
# Информатика: 100(л) 50(пр) 20(лаб).
# Физика: 30(л) — 10(лаб)
# Физкультура: — 30(пр) —
# Пример словаря: {“Информатика”: 170, “Физика”: 40, “Физкультура”: 30}
def clean_words(string):
return "".join(filter(str.isdigit, string))
print("")
dicter = {}
sum_m = 0
try:
with open(r"dz6.txt", "r", encoding="utf-8") as my_file:
for line in my_file:
clean_line = line.replace("\n", "")
lister = clean_line.split(" ")
if lister[1] != "—" and lister[1] != "-":
sum_m += int(clean_words(lister[1]))
if lister[2] != "—" and lister[2] != "-":
sum_m += int(clean_words(lister[2]))
if lister[3] != "—" and lister[3] != "-":
sum_m += int(clean_words(lister[3]))
dicter[lister[0][:-1]] = sum_m
sum_m = 0
except IOError:
print("Error")
print(dicter)
| [
"[email protected]"
] | |
2afc8c5146bd2a6dc844e2c9d18012c712ea238d | 67ad5fd08f5251b48ec402c78840b1f0f2c473f7 | /htseq/scripts/parse_utils.py | a7efa92ea5746244611ebdc72c7b06e4a08f0729 | [] | no_license | canzarlab/heterochr_silencing | b865d35af4fc6cd6436455273afb18869812c9bd | 9cbc70c0db62508717c03c3edad4ae84887603f0 | refs/heads/main | 2023-07-05T05:54:08.254188 | 2021-08-10T14:05:59 | 2021-08-10T14:05:59 | 393,033,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,501 | py | #!/usr/bin/env python
import os
import pandas as pd
# reading .odf (libre office spread-sheets)
import ezodf
import odf
## ----------------
## Path Environment
## ----------------
def get_data_dir(path_file, path_dir):
rel_path = os.path.relpath(path_file, path_dir)
data_dir = rel_path.split(os.sep)[0]
return data_dir
def get_sequencing_type(sample_prefix):
if 'ChIP' in sample_prefix or 'INPUT' in sample_prefix:
return 'ChIP'
elif 'RIP' in sample_prefix:
return 'RIP'
elif 'totalRNA' in sample_prefix:
return 'totalRNA'
elif 'RNA' in sample_prefix or 'pA' in sample_prefix :
return 'RNA'
else:
return 'unknown'
## -------------
## Spread-sheets
## -------------
def read_odf_doc(file):
print("Importing odf file %s ... \n" % file)
doc = ezodf.opendoc(file)
print("Spreadsheet contains %d sheet(s)." % len(doc.sheets))
for sheet in doc.sheets:
print("-"*40)
print(" Sheet name : '%s'" % sheet.name)
print("Size of Sheet : (rows=%d, cols=%d)" % (sheet.nrows(), sheet.ncols()) )
# convert the first sheet to a pandas.DataFrame
sheet = doc.sheets[0]
df_dict = {}
n_cols = 0
for i, row in enumerate(sheet.rows()):
# row is a list of cells
# assume the header is on the first row
if i == 0:
#import pdb; pdb.set_trace()
# columns as lists in a dictionary
df_dict = {cell.value:[] for cell in row}
# remove empty columns
try:
del df_dict[None]
except:
pass
# create index for the column headers
col_index = {j:cell.value for j, cell in enumerate(row)}
# remove empty colums
col_index = {kk:vv for (kk, vv) in col_index.items() if not isinstance(vv, type(None))}
# init number of columns
n_cols = len(col_index)
continue
for j, cell in enumerate(row):
## only use non-empty cols!
if j <= n_cols - 1:
# use header instead of column index
df_dict[col_index[j]].append(cell.value)
# and convert to a DataFrame
df = pd.DataFrame(df_dict)
# drop empty columns
df = df[~df.isnull().all(axis='columns')]
print("\nDone.")
return df
| [
"[email protected]"
] | |
e3fe7a0c7aef0e61e259e0b599655412153a9b3e | d3426a5d1bbecde0fe480e7af64a54bfdb8295eb | /homeworks/models.py | 118ad76a24b11a1eb196bc1d8b3569f5eb862b5c | [
"MIT"
] | permissive | pu6ki/elsyser | 5a3b83f25f236b4a4903180985f60ced98b3fb53 | 52261c93b58422b0e39cae656ae9409ea03a488d | refs/heads/master | 2021-01-12T18:06:18.375185 | 2017-12-10T18:18:34 | 2017-12-10T18:18:34 | 71,325,732 | 5 | 4 | MIT | 2017-12-10T18:18:35 | 2016-10-19T06:26:47 | Python | UTF-8 | Python | false | false | 1,287 | py | from django.db import models
from students.models import Class, Subject, Teacher, Student
from news.models import BaseAbstractPost
class Homework(models.Model):
topic = models.CharField(default='Homework', max_length=50)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE)
clazz = models.ForeignKey(Class, on_delete=models.CASCADE)
deadline = models.DateField(auto_now=False)
details = models.TextField(max_length=256, blank=True)
author = models.ForeignKey(Teacher, null=True, related_name='homeworks', on_delete=models.CASCADE)
def __str__(self):
return '{} ({}) - {}'.format(self.topic, self.subject, self.clazz)
class Meta:
ordering = ['deadline', 'clazz', 'subject']
class Submission(BaseAbstractPost):
homework = models.ForeignKey(Homework, related_name='submissions', on_delete=models.CASCADE)
student = models.ForeignKey(Student, related_name='submissions', on_delete=models.CASCADE)
content = models.TextField(max_length=2048)
solution_url = models.URLField(blank=True)
checked = models.BooleanField(default=False)
def __str__(self):
return '{} - {} ({})'.format(self.student, self.homework, self.posted_on)
class Meta:
ordering = ['-posted_on', '-last_edited_on']
| [
"[email protected]"
] | |
8a74a3f960e4fed4eb54e09f117ee66960597f49 | 7cea740de4d4afece91938b4c34feb628d9f5ec2 | /datasets/load_cifar10.py | 4cc106acaadc0ce1fc5b273e159aac7763989237 | [] | no_license | ofrimasad/cs3598-hw3 | a0a3de9d4bb0cf1d94d00afafcff48bfccf5e502 | 40e8db1c935e44049be240c97cb42dd2c9112444 | refs/heads/master | 2022-10-27T02:42:40.963777 | 2020-06-15T19:12:06 | 2020-06-15T19:12:06 | 270,960,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,258 | py | import pickle
import numpy as np
import os
import sys
import urllib
import tarfile
import zipfile
def _print_download_progress(count, block_size, total_size):
"""
Function used for printing the download progress.
Used as a call-back function in maybe_download_and_extract().
"""
# Percentage completion.
pct_complete = float(count * block_size) / total_size
# Limit it because rounding errors may cause it to exceed 100%.
pct_complete = min(1.0, pct_complete)
# Status-message. Note the \r which means the line should overwrite itself.
msg = "\r- Download progress: {0:.1%}".format(pct_complete)
# Print it.
sys.stdout.write(msg)
sys.stdout.flush()
def download(base_url, download_dir):
"""
Download the given file if it does not already exist in the download_dir.
:param base_url: The internet URL without the filename.
:param filename: The filename that will be added to the base_url.
:param download_dir: Local directory for storing the file.
:return: Nothing.
"""
# Path for local file.
filename = base_url.split("/")[-1]
save_path = os.path.join(download_dir, filename)
# Check if the file already exists, otherwise we need to download it now.
if not os.path.exists(save_path):
# Check if the download directory exists, otherwise create it.
if not os.path.exists(download_dir):
os.makedirs(download_dir)
print("Downloading", filename, "...")
# Download the file from the internet.
file_path, _ = urllib.request.urlretrieve(url=base_url,
filename=save_path,
reporthook=_print_download_progress)
print(" Done!")
def maybe_download_and_extract(url, download_dir):
"""
Download and extract the data if it doesn't already exist.
Assumes the url is a tar-ball file.
:param url:
Internet URL for the tar-file to download.
Example: "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
:param download_dir:
Directory where the downloaded file is saved.
Example: "data/CIFAR-10/"
:return:
Nothing.
"""
# Filename for saving the file downloaded from the internet.
# Use the filename from the URL and add it to the download_dir.
filename = url.split('/')[-1]
file_path = os.path.join(download_dir, filename)
# Check if the file already exists.
# If it exists then we assume it has also been extracted,
# otherwise we need to download and extract it now.
if not os.path.exists(file_path):
# Check if the download directory exists, otherwise create it.
if not os.path.exists(download_dir):
os.makedirs(download_dir)
# Download the file from the internet.
file_path, _ = urllib.request.urlretrieve(url=url,
filename=file_path,
reporthook=_print_download_progress)
print()
print("Download finished. Extracting files.")
if file_path.endswith(".zip"):
# Unpack the zip-file.
zipfile.ZipFile(file=file_path, mode="r").extractall(download_dir)
elif file_path.endswith((".tar.gz", ".tgz")):
# Unpack the tar-ball.
tarfile.open(name=file_path, mode="r:gz").extractall(download_dir)
print("Done.")
else:
print("Data has apparently already been downloaded and unpacked.")
def get_CIFAR10_data(cifar10_dir, num_training=49000, num_validation=1000, num_test=1000):
'''
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the neural net classifier.
'''
# Load the raw CIFAR-10 data
X_train, y_train, X_test, y_test = load(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
X_train = X_train.astype(np.float64)
X_val = X_val.astype(np.float64)
X_test = X_test.astype(np.float64)
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
return X_train, y_train, X_val, y_val, X_test, y_test, mean_image
def load_CIFAR_batch(filename):
''' load single batch of cifar '''
with open(filename, 'rb') as f:
datadict = pickle.load(f, encoding = 'latin1')
X = datadict['data']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype("float")
Y = np.array(Y)
return X, Y
def load(ROOT):
''' load all of cifar '''
xs = []
ys = []
for b in range(1, 6):
f = os.path.join(ROOT, 'data_batch_%d' % (b, ))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
return Xtr, Ytr, Xte, Yte
| [
"[email protected]"
] | |
080cd8ea241aa7156fd2b66b2b5ff5f1e2f2ab09 | 437b9b422da726fb2b22f59ea284e319a2cba4cd | /tensorflow_graphics/rendering/opengl/tests/rasterization_backend_test.py | 49600dca718988cbf2b7e036822a5d2469f7a78b | [
"Apache-2.0"
] | permissive | rodrygojose/graphics | 27b86d04ca2276648f0963da77ca7ed11593212e | 089d58ffb170dc4021afecced6159a0345cd8830 | refs/heads/master | 2023-03-04T04:54:44.675059 | 2021-02-11T04:19:55 | 2021-02-11T04:20:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,297 | py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.geometry.representation import grid
from tensorflow_graphics.geometry.transformation import look_at
from tensorflow_graphics.rendering.camera import perspective
from tensorflow_graphics.rendering.opengl import math as glm
from tensorflow_graphics.rendering.opengl import rasterization_backend
from tensorflow_graphics.util import test_case
_IMAGE_HEIGHT = 5
_IMAGE_WIDTH = 7
_TRIANGLE_SIZE = 2.0
def _generate_vertices_and_view_matrices():
camera_origin = ((0.0, 0.0, 0.0), (0.0, 0.0, 0.0))
camera_up = ((0.0, 1.0, 0.0), (0.0, 1.0, 0.0))
look_at_point = ((0.0, 0.0, 1.0), (0.0, 0.0, -1.0))
field_of_view = ((60 * np.math.pi / 180,), (60 * np.math.pi / 180,))
near_plane = ((0.01,), (0.01,))
far_plane = ((400.0,), (400.0,))
aspect_ratio = ((float(_IMAGE_WIDTH) / float(_IMAGE_HEIGHT),),
(float(_IMAGE_WIDTH) / float(_IMAGE_HEIGHT),))
# Construct the view projection matrix.
world_to_camera = look_at.right_handed(camera_origin, look_at_point,
camera_up)
perspective_matrix = perspective.right_handed(field_of_view, aspect_ratio,
near_plane, far_plane)
view_projection_matrix = tf.linalg.matmul(perspective_matrix, world_to_camera)
depth = 1.0
vertices = (((-10.0 * _TRIANGLE_SIZE, 10.0 * _TRIANGLE_SIZE,
depth), (10.0 * _TRIANGLE_SIZE, 10.0 * _TRIANGLE_SIZE, depth),
(0.0, -10.0 * _TRIANGLE_SIZE, depth)),
((-_TRIANGLE_SIZE, 0.0, depth), (0.0, _TRIANGLE_SIZE, depth),
(0.0, 0.0, depth)))
return vertices, view_projection_matrix
def _proxy_rasterize(vertices, triangles, view_projection_matrices):
return rasterization_backend.rasterize(vertices, triangles,
view_projection_matrices,
(_IMAGE_WIDTH, _IMAGE_HEIGHT))
class RasterizationBackendTest(test_case.TestCase):
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (2, 6, 32, 2), (17, 3),
(2, 6, 4, 4)),
("must have exactly 3 dimensions in axis -1", (2, 6, 32, 3), (17, 2),
(2, 6, 4, 4)),
("must have a rank of 2", (2, 6, 32, 3), (3, 17, 2), (2, 6, 4, 4)),
("must have exactly 4 dimensions in axis -1", (2, 6, 32, 3), (17, 3),
(2, 6, 4, 3)),
("must have exactly 4 dimensions in axis -2", (2, 6, 32, 3), (17, 3),
(2, 6, 3, 4)),
("Not all batch dimensions are broadcast-compatible", (3, 6, 32, 3),
(17, 3), (5, 6, 4, 4)),
)
def test_rasterize_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(_proxy_rasterize, error_msg, shapes)
@parameterized.parameters(
(((32, 3), (17, 3), (4, 4)), (tf.float32, tf.int32, tf.float32)),
(((None, 32, 3), (17, 3), (None, 4, 4)),
(tf.float32, tf.int32, tf.float32)),
(((None, 9, 32, 3), (17, 3), (None, 9, 4, 4)),
(tf.float32, tf.int32, tf.float32)),
)
def test_rasterize_exception_not_raised(self, shapes, dtypes):
self.assert_exception_is_not_raised(
_proxy_rasterize, shapes=shapes, dtypes=dtypes)
def test_rasterize_batch_vertices_only(self):
triangles = np.array(((0, 1, 2),), np.int32)
vertices, view_projection_matrix = _generate_vertices_and_view_matrices()
predicted_fb = rasterization_backend.rasterize(
vertices, triangles, view_projection_matrix[0],
(_IMAGE_WIDTH, _IMAGE_HEIGHT))
mask = predicted_fb.foreground_mask
self.assertAllEqual(mask[0, ...], tf.ones_like(mask[0, ...]))
gt_layer_1 = np.zeros((_IMAGE_HEIGHT, _IMAGE_WIDTH, 1), np.float32)
gt_layer_1[_IMAGE_HEIGHT // 2:, _IMAGE_WIDTH // 2:, 0] = 1.0
self.assertAllEqual(mask[1, ...], gt_layer_1)
def test_rasterize_batch_view_only(self):
triangles = np.array(((0, 1, 2),), np.int32)
vertices, view_projection_matrix = _generate_vertices_and_view_matrices()
predicted_fb = rasterization_backend.rasterize(
vertices[0], triangles, view_projection_matrix,
(_IMAGE_WIDTH, _IMAGE_HEIGHT))
self.assertAllEqual(predicted_fb.foreground_mask[0, ...],
tf.ones_like(predicted_fb.foreground_mask[0, ...]))
self.assertAllEqual(predicted_fb.foreground_mask[1, ...],
tf.zeros_like(predicted_fb.foreground_mask[1, ...]))
def test_rasterize_preset(self):
camera_origin = (0.0, 0.0, 0.0)
camera_up = (0.0, 1.0, 0.0)
look_at_point = (0.0, 0.0, 1.0)
field_of_view = (60 * np.math.pi / 180,)
near_plane = (0.01,)
far_plane = (400.0,)
# Construct the view projection matrix.
model_to_eye_matrix = look_at.right_handed(camera_origin, look_at_point,
camera_up)
perspective_matrix = perspective.right_handed(
field_of_view, (float(_IMAGE_WIDTH) / float(_IMAGE_HEIGHT),),
near_plane, far_plane)
view_projection_matrix = tf.linalg.matmul(perspective_matrix,
model_to_eye_matrix)
depth = 1.0
vertices = ((-2.0 * _TRIANGLE_SIZE, 0.0, depth), (0.0, _TRIANGLE_SIZE,
depth), (0.0, 0.0, depth),
(0.0, -_TRIANGLE_SIZE, depth))
triangles = np.array(((1, 2, 0), (0, 2, 3)), np.int32)
predicted_fb = rasterization_backend.rasterize(
vertices, triangles, view_projection_matrix,
(_IMAGE_WIDTH, _IMAGE_HEIGHT))
with self.subTest(name="triangle_index"):
groundtruth_triangle_index = np.zeros((_IMAGE_HEIGHT, _IMAGE_WIDTH, 1),
dtype=np.int32)
groundtruth_triangle_index[..., :_IMAGE_WIDTH // 2, 0] = 0
groundtruth_triangle_index[:_IMAGE_HEIGHT // 2, _IMAGE_WIDTH // 2:, 0] = 1
self.assertAllEqual(groundtruth_triangle_index, predicted_fb.triangle_id)
with self.subTest(name="mask"):
groundtruth_mask = np.ones((_IMAGE_HEIGHT, _IMAGE_WIDTH, 1),
dtype=np.int32)
groundtruth_mask[..., :_IMAGE_WIDTH // 2, 0] = 0
self.assertAllEqual(groundtruth_mask, predicted_fb.foreground_mask)
attributes = np.array(
((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))).astype(np.float32)
perspective_correct_interpolation = lambda geometry, pixels: glm.perspective_correct_interpolation( # pylint: disable=g-long-lambda,line-too-long
geometry, attributes, pixels, model_to_eye_matrix, perspective_matrix,
np.array((_IMAGE_WIDTH, _IMAGE_HEIGHT)).astype(np.float32),
np.array((0.0, 0.0)).astype(np.float32))
with self.subTest(name="barycentric_coordinates_triangle_0"):
geometry_0 = tf.gather(vertices, triangles[0, :])
pixels_0 = tf.transpose(
grid.generate((3.5, 2.5), (6.5, 4.5), (4, 3)), perm=(1, 0, 2))
barycentrics_gt_0 = perspective_correct_interpolation(
geometry_0, pixels_0)
self.assertAllClose(
barycentrics_gt_0,
predicted_fb.barycentrics.value[2:, 3:, :],
atol=1e-3)
with self.subTest(name="barycentric_coordinates_triangle_1"):
geometry_1 = tf.gather(vertices, triangles[1, :])
pixels_1 = tf.transpose(
grid.generate((3.5, 0.5), (6.5, 1.5), (4, 2)), perm=(1, 0, 2))
barycentrics_gt_1 = perspective_correct_interpolation(
geometry_1, pixels_1)
self.assertAllClose(
barycentrics_gt_1,
predicted_fb.barycentrics.value[0:2, 3:, :],
atol=1e-3)
| [
"[email protected]"
] | |
c2d09cf2d8b478dbefe1c63678ccd335d0052187 | 24ccb821154dd2cc0bfe405f4870019df2c47170 | /etl.py | 2aeca04e30a62edf491c543ab155870dc9949a51 | [] | no_license | pandilwar605/Data-Warehouse-on-AWS-Redshift | 48e611cc37cd979824b5af2f2d86195a33dddb1e | 7099b2c828f1255a2cb8bfbeacedae53e60f0fde | refs/heads/master | 2022-11-15T13:53:17.090204 | 2020-07-15T23:35:58 | 2020-07-15T23:35:58 | 280,004,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | import configparser
import psycopg2
from sql_queries import copy_table_queries, insert_table_queries
def load_staging_tables(cur, conn):
'''
load data from s3 to staging tables in redshift
'''
for query in copy_table_queries:
cur.execute(query)
conn.commit()
def insert_tables(cur, conn):
'''
insert data from staging to analytical tables in redshift
'''
for query in insert_table_queries:
cur.execute(query)
conn.commit()
def main():
config = configparser.ConfigParser()
config.read('dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
load_staging_tables(cur, conn)
insert_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
24d7a172fddbe33d234581d7f9cb43fe59a30eff | 83eeeb2a42f5c6ad527776038374e4fa351766c0 | /graphing/graphing.py | 098c627476c7b532011a56544ada7aaeb0070109 | [] | no_license | MarcusGrass/cookie_clicker_automation | f2c58767515cdaca48ca719f720a49d5d4f10313 | befbf5353c5e708a5c35450e39a39e140ff768eb | refs/heads/master | 2022-02-18T04:52:08.628431 | 2019-08-18T15:06:26 | 2019-08-18T15:06:26 | 173,552,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,378 | py | import matplotlib.pyplot as plt
from datetime import datetime
import math
class Grapher(object):
def __init__(self, heavenly_chips, balance, compensator, lc):
self.heavenly_chips = [heavenly_chips]
self.log_heavenly_chips = [math.log2(heavenly_chips)]
self.derivative_chips = [heavenly_chips]
self.amount = [balance.amount]
self.log_amount = [math.log2(balance.amount)]
self.cps = list()
self.log_cps = list()
self.cps_timestamps = list()
self.cps_initialized = False
self.timestamps = [0]
self.derivative = [0]
self.log_derivative = [0]
self.derivative_timestamps = [0]
self.starttime = datetime.now()
self.last_derivative_time = datetime.now()
self.lc = lc
self.lc.warn("Initialized grapher.")
if clear_to_draw_cps(compensator):
self.init_cps_plot(balance.cps)
def update_and_draw(self, heavenly_chip_count, balance, compensator):
self.heavenly_chips.append(heavenly_chip_count)
self.log_heavenly_chips.append(math.log2(heavenly_chip_count))
self.amount.append(balance.amount)
self.log_amount.append(math.log2(balance.amount))
current_time_in_seconds = (datetime.now() - self.starttime).seconds
self.timestamps.append(current_time_in_seconds)
self.draw_regular_plot()
self.draw_amount_plot()
if self.time_to_take_derivative():
self.derivative_chips.append(heavenly_chip_count)
self.derivative_timestamps.append(current_time_in_seconds)
self.calculate_simple_derivative()
self.draw_derivative_plot()
self.last_derivative_time = datetime.now()
if clear_to_draw_cps(compensator) and self.cps_initialized and balance.cps/self.cps[-1] < 10:
self.cps.append(balance.cps)
self.log_cps.append(math.log2(balance.cps))
self.cps_timestamps.append(current_time_in_seconds)
self.draw_cps_plot()
elif clear_to_draw_cps(compensator) and self.cps_initialized is False:
self.init_cps_plot(balance.cps)
def draw_regular_plot(self):
draw_plot(self.timestamps, self.heavenly_chips, self.log_heavenly_chips, "chips")
def draw_amount_plot(self):
draw_plot(self.timestamps, self.amount, self.log_amount, "amount")
def draw_cps_plot(self):
draw_plot(self.cps_timestamps, self.cps, self.log_cps, "cps")
def draw_derivative_plot(self):
draw_plot(self.derivative_timestamps, self.derivative, self.log_derivative, "derivative")
def calculate_simple_derivative(self):
derivative = (self.derivative_chips[-1] - self.derivative_chips[-2]) / \
(self.derivative_timestamps[-1] - self.derivative_timestamps[-2])
self.derivative.append(derivative)
log_derivative = math.log2(derivative)
self.log_derivative.append(log_derivative)
def time_to_take_derivative(self):
if (datetime.now() - self.last_derivative_time).seconds > 600:
return True
return False
def init_cps_plot(self, cps):
self.cps_timestamps = [0]
self.cps.append(cps)
self.log_cps.append(math.log2(cps))
self.cps_initialized = True
def clear_to_draw_cps(cps_compensator):
if cps_compensator != 50:
return True
return False
def draw_plot(x, y1, y2, name):
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('time (s)')
ax1.set_ylabel('absolute', color=color)
ax1.plot(x, y1, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('log', color=color) # we already handled the x-label with ax1
y_ticks = get_yticks(y2)
ax2.plot(x, y2, color=color)
ax2.tick_params(axis='y', labelcolor=color)
ax2.set_yticks(y_ticks)
fig.tight_layout() # otherwise the right y-label is slightly clipped
fig.savefig('D:\\Program\\PycharmProjects\\seleniumtest\\logs\\' + name + '.png')
plt.close()
def get_yticks(y2):
end_of_range = int(math.ceil(max(y2)))+1
start_of_range = int(math.floor(min(y2)))-1
y_ticks = [n for n in range(start_of_range, end_of_range)]
return y_ticks
| [
"[email protected]"
] | |
dee6a1b5287f10cf7249284570e705722ce16ebc | 1da08a05d5682f34f4870ddef35decf0d5365e86 | /jax/dueling_dqn/dueling_DQN_PER.py | ff53fcecef57d7c8128c5a6cad7ad98401d5c97b | [] | no_license | sharavsambuu/learning-drl | 69a17e875949bd62d95ce40baa259145127cc74a | fccf7c9b2fd2f0ff1bc2dcca6eec91c0f6903d37 | refs/heads/master | 2023-01-14T03:06:12.177136 | 2020-11-19T11:22:39 | 2020-11-19T11:22:39 | 273,596,977 | 5 | 0 | null | 2020-10-22T10:05:37 | 2020-06-19T22:24:09 | Python | UTF-8 | Python | false | false | 9,209 | py | import os
import random
import math
import gym
from collections import deque
import flax
import jax
from jax import numpy as jnp
import numpy as np
debug_render = True
debug = False
num_episodes = 500
batch_size = 64
learning_rate = 0.001
sync_steps = 100
memory_length = 4000
epsilon = 1.0
epsilon_decay = 0.001
epsilon_max = 1.0
epsilon_min = 0.01
gamma = 0.99 # discount factor
class SumTree:
write = 0
def __init__(self, capacity):
self.capacity = capacity
self.tree = np.zeros(2*capacity - 1)
self.data = np.zeros(capacity, dtype=object)
def _propagate(self, idx, change):
parent = (idx - 1) // 2
self.tree[parent] += change
if parent != 0:
self._propagate(parent, change)
def _retrieve(self, idx, s):
left = 2 * idx + 1
right = left + 1
if left >= len(self.tree):
return idx
if s <= self.tree[left]:
return self._retrieve(left, s)
else:
return self._retrieve(right, s-self.tree[left])
def total(self):
return self.tree[0]
def add(self, p, data):
idx = self.write + self.capacity - 1
self.data[self.write] = data
self.update(idx, p)
self.write += 1
if self.write >= self.capacity:
self.write = 0
def update(self, idx, p):
change = p - self.tree[idx]
self.tree[idx] = p
self._propagate(idx, change)
def get(self, s):
idx = self._retrieve(0, s)
dataIdx = idx - self.capacity + 1
return (idx, self.tree[idx], self.data[dataIdx])
class PERMemory:
e = 0.01
a = 0.6
def __init__(self, capacity):
self.tree = SumTree(capacity)
def _get_priority(self, error):
return (error+self.e)**self.a
def add(self, error, sample):
p = self._get_priority(error)
self.tree.add(p, sample)
def sample(self, n):
batch = []
segment = self.tree.total()/n
for i in range(n):
a = segment*i
b = segment*(i+1)
s = random.uniform(a, b)
(idx, p, data) = self.tree.get(s)
batch.append((idx, data))
return batch
def update(self, idx, error):
p = self._get_priority(error)
self.tree.update(idx, p)
class DuelingQNetwork(flax.nn.Module):
def apply(self, x, n_actions):
dense_layer_1 = flax.nn.Dense(x, 64)
activation_layer_1 = flax.nn.relu(dense_layer_1)
dense_layer_2 = flax.nn.Dense(activation_layer_1, 64)
activation_layer_2 = flax.nn.relu(dense_layer_2)
value_dense = flax.nn.Dense(activation_layer_2, 64)
value = flax.nn.relu(value_dense)
value = flax.nn.Dense(value, 1)
advantage_dense = flax.nn.Dense(activation_layer_2, 64)
advantage = flax.nn.relu(advantage_dense)
advantage = flax.nn.Dense(advantage, n_actions)
advantage_average = jnp.mean(advantage, keepdims=True)
q_values_layer = jnp.subtract(jnp.add(advantage, value), advantage_average)
return q_values_layer
env = gym.make('CartPole-v1')
state = env.reset()
n_actions = env.action_space.n
dueling_dqn_module = DuelingQNetwork.partial(n_actions=n_actions)
_, params = dueling_dqn_module.init_by_shape(jax.random.PRNGKey(0), [state.shape])
q_network = flax.nn.Model(dueling_dqn_module, params)
target_q_network = flax.nn.Model(dueling_dqn_module, params)
optimizer = flax.optim.Adam(learning_rate).create(q_network)
per_memory = PERMemory(memory_length)
@jax.jit
def policy(model, x):
predicted_q_values = model(x)
max_q_action = jnp.argmax(predicted_q_values)
return max_q_action, predicted_q_values
@jax.vmap
def calculate_td_error(q_value_vec, target_q_value_vec, action, reward):
td_target = reward + gamma*jnp.amax(target_q_value_vec)
td_error = td_target - q_value_vec[action]
return jnp.abs(td_error)
@jax.jit
def td_error(model, target_model, batch):
# batch[0] - states
# batch[1] - actions
# batch[2] - rewards
# batch[3] - next_states
predicted_q_values = model(batch[0])
target_q_values = target_model(batch[3])
return calculate_td_error(predicted_q_values, target_q_values, batch[1], batch[2])
@jax.vmap
def q_learning_loss(q_value_vec, target_q_value_vec, action, reward, done):
td_target = reward + gamma*jnp.amax(target_q_value_vec)*(1.-done)
td_error = jax.lax.stop_gradient(td_target) - q_value_vec[action]
return jnp.square(td_error)
@jax.jit
def train_step(optimizer, target_model, batch):
# batch[0] - states
# batch[1] - actions
# batch[2] - rewards
# batch[3] - next_states
# batch[4] - dones
def loss_fn(model):
predicted_q_values = model(batch[0])
target_q_values = target_model(batch[3])
return jnp.mean(
q_learning_loss(
predicted_q_values,
target_q_values,
batch[1],
batch[2],
batch[4]
)
)
loss, gradients = jax.value_and_grad(loss_fn)(optimizer.target)
optimizer = optimizer.apply_gradient(gradients)
return optimizer, loss, td_error(optimizer.target, target_model, batch)
global_steps = 0
try:
for episode in range(num_episodes):
episode_rewards = []
state = env.reset()
while True:
global_steps = global_steps+1
if np.random.rand() <= epsilon:
action = env.action_space.sample()
else:
action, q_values = policy(optimizer.target, state)
if debug:
print("q утгууд :" , q_values)
print("сонгосон action :", action )
if epsilon>epsilon_min:
epsilon = epsilon_min+(epsilon_max-epsilon_min)*math.exp(-epsilon_decay*global_steps)
if debug:
#print("epsilon :", epsilon)
pass
new_state, reward, done, _ = env.step(int(action))
# sample нэмэхдээ temporal difference error-ийг тооцож нэмэх
temporal_difference = float(td_error(optimizer.target, target_q_network, (
jnp.asarray([state]),
jnp.asarray([action]),
jnp.asarray([reward]),
jnp.asarray([new_state])
))[0])
per_memory.add(temporal_difference, (state, action, reward, new_state, int(done)))
# Prioritized Experience Replay санах ойгоос batch үүсгээд DQN сүлжээг сургах
batch = per_memory.sample(batch_size)
states, actions, rewards, next_states, dones = [], [], [], [], []
for i in range(batch_size):
states.append (batch[i][1][0])
actions.append (batch[i][1][1])
rewards.append (batch[i][1][2])
next_states.append(batch[i][1][3])
dones.append (batch[i][1][4])
optimizer, loss, new_td_errors = train_step(
optimizer,
target_q_network,
( # sample-дсэн batch өгөгдлүүдийг хурдасгуур
# төхөөрөмийн санах ойруу хуулах
jnp.asarray(states),
jnp.asarray(actions),
jnp.asarray(rewards),
jnp.asarray(next_states),
jnp.asarray(dones)
)
)
# batch-аас бий болсон temporal difference error-ийн дагуу санах ойг шинэчлэх
new_td_errors = np.array(new_td_errors)
for i in range(batch_size):
idx = batch[i][0]
per_memory.update(idx, new_td_errors[i])
episode_rewards.append(reward)
state = new_state
# Тодорхой алхам тутамд target неорон сүлжээний жингүүдийг сайжирсан хувилбараар солих
if global_steps%sync_steps==0:
target_q_network = target_q_network.replace(params=optimizer.target.params)
if debug:
print("сайжруулсан жингүүдийг target неорон сүлжээрүү хууллаа")
if debug_render:
env.render()
if done:
print("{} - нийт reward : {}".format(episode, sum(episode_rewards)))
break
finally:
env.close()
| [
"[email protected]"
] | |
46c5ae5e26356785747347ea48c51bbee4873b11 | e8e918dcd948bd2c459456526dcb5199e796f338 | /pypeflow/tasks.py | 63500a9a0929d6ba806ccf430e02e3f2e07d774a | [
"BSD-3-Clause-Clear"
] | permissive | pb-cdunn/pypeFLOW | 201b8e01fa8904ec8dd99933ad6a0e730ce8898a | f66cc1e79c0ab2913f1691bf9d59a53e99ee3545 | refs/heads/master | 2021-01-18T17:34:08.557811 | 2018-07-18T19:09:21 | 2018-07-18T19:09:21 | 35,623,009 | 0 | 1 | null | 2015-05-14T16:38:31 | 2015-05-14T16:38:31 | null | UTF-8 | Python | false | false | 2,233 | py | from __future__ import absolute_import
import collections
import logging
import os
import pprint
from .simple_pwatcher_bridge import (PypeTask, Dist)
from . import io
LOG = logging.getLogger(__name__)
def task_generic_bash_script(self):
"""Generic script task.
The script template should be in
self.bash_template
The template will be substituted by
the content of "self" and of "self.parameters".
(That is a little messy, but good enough for now.)
"""
self_dict = dict()
self_dict.update(self.__dict__)
self_dict.update(self.parameters)
script_unsub = self.bash_template
script = script_unsub.format(**self_dict)
script_fn = 'script.sh'
with open(script_fn, 'w') as ofs:
ofs.write(script)
self.generated_script_fn = script_fn
def gen_task(script, inputs, outputs, parameters=None, dist=None):
"""
dist is used in two ways:
1) in the pwatcher, to control job-distribution
2) as additional parameters:
- params.pypeflow_nproc
- params.pypeflow_mb
"""
if parameters is None:
parameters = dict()
if dist is None:
dist = Dist()
LOG.debug('gen_task({}\n\tinputs={!r},\n\toutputs={!r})'.format(
script, inputs, outputs))
parameters = dict(parameters) # copy
parameters['pypeflow_nproc'] = dist.pypeflow_nproc
parameters['pypeflow_mb'] = dist.pypeflow_mb
LOG.debug(' parameters={}'.format(
pprint.pformat(parameters)))
LOG.debug(' dist.job_dict={}'.format(
pprint.pformat(dist.job_dict)))
def validate_dict(mydict):
"Python identifiers are illegal as keys."
try:
collections.namedtuple('validate', mydict.keys())
except ValueError as exc:
LOG.exception('Bad key name in task definition dict {!r}'.format(mydict))
raise
validate_dict(inputs)
validate_dict(outputs)
validate_dict(parameters)
make_task = PypeTask(
inputs={k: v for k,v in inputs.iteritems()},
outputs={k: v for k,v in outputs.iteritems()},
parameters=parameters,
bash_template=script,
dist=dist,
)
return make_task(task_generic_bash_script)
| [
"[email protected]"
] | |
25f0bf7c4128a06556e7b48b67c86602dc3f3c62 | 5e5de626395dec86f4bf127513c47a6acbaaff29 | /trainer_loader.py | 79f49219eacd2926c735fad43064f52f059f1282 | [] | no_license | wintercoming233/STConvLSTM | 9877416f2775aaab1d83143bc0bdd8de73023951 | 88575b75ac2d156224b39d58a4288e1a4877d99b | refs/heads/master | 2023-07-07T07:29:47.526322 | 2021-08-08T14:24:09 | 2021-08-08T14:24:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,081 | py | import os.path
import datetime
import cv2
import torch
import numpy as np
from skimage.measure import compare_ssim
from skimage.metrics import structural_similarity
from utils import preprocess, metrics
from utils.hss import meteva_hss
def train(model, ims, real_input_flag, configs, itr=None):
cost = model.train(ims, real_input_flag, itr)
if configs.reverse_input:
ims_rev = torch.flip(ims, [1])
cost += model.train(ims_rev, real_input_flag)
cost = cost / 2
return cost
def test(model, test_input_handle, configs, itr=None):
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'test...')
res_path = os.path.join(configs.gen_frm_dir, str(itr))
os.mkdir(res_path)
avg_mse = 0
batch_id = 0
img_mse, ssim, psnr, fmae, sharp = [], [], [], [], []
hss = []
output_length = configs.total_length - configs.input_length
for i in range(configs.total_length - configs.input_length):
img_mse.append(0)
ssim.append(0)
psnr.append(0)
fmae.append(0)
sharp.append(0)
hss.append(0)
real_input_flag = np.zeros(
(configs.batch_size,
configs.total_length - configs.input_length - 1,
configs.img_width // configs.patch_size,
configs.img_width // configs.patch_size,
configs.patch_size ** 2 * configs.img_channel))
for ind, test_input in enumerate(test_input_handle):
test_ims = test_input.numpy() # test_ims shape: (batch, seq, channels, height, width)
test_ims = np.transpose(test_ims, (0, 1, 3, 4, 2))
batch_id = batch_id + 1
test_dat = preprocess.reshape_patch_tensor(test_input, configs.patch_size)
img_gen = model.test(test_dat, real_input_flag)
img_gen = preprocess.reshape_patch_back(img_gen, configs.patch_size)
img_out = img_gen[:, -output_length:]
# MSE per frame
for i in range(output_length):
x = test_ims[:, i + configs.input_length, :, :, :]
gx = img_out[:, i, :, :, :]
fmae[i] += metrics.batch_mae_frame_float(gx, x)
gx = np.maximum(gx, 0)
gx = np.minimum(gx, 1)
mse = np.square(x - gx).sum()
img_mse[i] += mse
avg_mse += mse
# hss[i] += meteva_hss(x, gx, [0.28, 0.43, 0.58, 0.73])
hss[i] += meteva_hss(x, gx, [0.5])
real_frm = np.uint8(x * 255)
pred_frm = np.uint8(gx * 255)
psnr[i] += metrics.batch_psnr(pred_frm, real_frm)
for b in range(configs.batch_size):
score, _ = structural_similarity(pred_frm[b], real_frm[b], full=True, multichannel=True)
ssim[i] += score
sharp[i] += np.max(cv2.convertScaleAbs(cv2.Laplacian(pred_frm[b], 3)))
# save prediction examples
if batch_id <= configs.num_save_samples:
path = os.path.join(res_path, str(batch_id))
os.mkdir(path)
for i in range(configs.total_length):
name = 'gt' + str(i + 1) + '.png'
file_name = os.path.join(path, name)
img_gt = np.uint8(test_ims[0, i, :, :, :] * 255)
cv2.imwrite(file_name, img_gt)
for i in range(output_length):
name = 'pd' + str(i + 1 + configs.input_length) + '.png'
file_name = os.path.join(path, name)
img_pd = img_out[0, i, :, :, :]
img_pd = np.maximum(img_pd, 0)
img_pd = np.minimum(img_pd, 1)
img_pd = np.uint8(img_pd * 255)
cv2.imwrite(file_name, img_pd)
avg_mse = avg_mse / (batch_id * configs.batch_size)
print('mse per seq: ' + str(avg_mse))
for i in range(configs.total_length - configs.input_length):
print(img_mse[i] / (batch_id * configs.batch_size))
ssim = np.asarray(ssim, dtype=np.float32) / (configs.batch_size * batch_id)
psnr = np.asarray(psnr, dtype=np.float32) / batch_id
fmae = np.asarray(fmae, dtype=np.float32) / batch_id
sharp = np.asarray(sharp, dtype=np.float32) / (configs.batch_size * batch_id)
hss = np.asarray(hss, dtype=np.float32) / batch_id
print('ssim per frame: ' + str(np.mean(ssim)))
for i in range(configs.total_length - configs.input_length):
print(ssim[i])
print('psnr per frame: ' + str(np.mean(psnr)))
for i in range(configs.total_length - configs.input_length):
print(psnr[i])
print('fmae per frame: ' + str(np.mean(fmae)))
for i in range(configs.total_length - configs.input_length):
print(fmae[i])
print('sharpness per frame: ' + str(np.mean(sharp)))
for i in range(configs.total_length - configs.input_length):
print(sharp[i])
print('HSS per frame: ' + str(np.mean(hss)))
for i in range(configs.total_length - configs.input_length):
print(hss[i])
return avg_mse, ssim, psnr, fmae, sharp
| [
"[email protected]"
] | |
c71f8c6edf0ba58bc9d75fdb8e9ba3492c5ef660 | 1f8669bcef836c9ab425d5546669e4152072e026 | /polls/migrations/0002_auto_20180324_1400.py | c48496c101bdada224d346edeb45d3e53d029952 | [] | no_license | IsraelGboluwaga/simpleVotingApp | b28853ba6e92bed26b3704953cb5f1b0e8f6e89a | 273f6c5d9b4d330dfcc9938d30c281179479aa0c | refs/heads/master | 2021-04-15T13:07:13.203458 | 2018-04-02T14:16:08 | 2018-04-02T14:16:08 | 126,217,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-24 13:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='question',
old_name='publication_date',
new_name='pub_date',
),
]
| [
"[email protected]"
] | |
70602041cbab8d4992431d76a1604543db8d9a90 | 27fc8b3dff7145b7356b855d194cb7581d4bc401 | /tools/abbr_military_json.py | f47c7d6e62030b707ce4d82a27cd420a5222b536 | [
"MIT"
] | permissive | dragstor/timezones | 0defc1f7c2a326caa507b2aa0eb880870673f845 | 03b100ec31e187760abbb03544211eff9357f3bc | refs/heads/master | 2023-03-20T08:17:39.521992 | 2021-03-16T12:33:03 | 2021-03-16T12:33:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | import json
import sys
def abbreviations():
data = {}
abbrs = {}
military_timezones_json = sys.argv[1]
with open(military_timezones_json) as f:
data = json.load(f)
for d in data:
short = d["short"]["standard"]
long = d["long"]["standard"]
offset = d["standard_offset"]
offset_hhmm = d["standard_offset_hhmm"]
abbrs[short] = {
"name": long,
"offset": offset,
"offset_hhmm": offset_hhmm,
}
print(json.dumps(abbrs))
if __name__ == "__main__":
abbreviations() | [
"[email protected]"
] | |
4b4597f60cfa9cd2707f7483474d21f69906a591 | 090a4e026addc9e78ed6118f09fd0d7d4d517857 | /validators/_surface.py | 31fb8ef2e83c14aa5d42ecd7683068c569c1b16b | [
"MIT"
] | permissive | wwwidonja/new_plotly | 0777365e53ea7d4b661880f1aa7859de19ed9b9a | 1bda35a438539a97c84a3ab3952e95e8848467bd | refs/heads/master | 2023-06-04T19:09:18.993538 | 2021-06-10T18:33:28 | 2021-06-10T18:33:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,887 | py | import _plotly_utils.basevalidators
class SurfaceValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="surface", parent_name="", **kwargs):
super(SurfaceValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Surface"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `colorscale`. In case
`colorscale` is unspecified or `autocolorscale`
is true, the default palette will be chosen
according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here z
or surfacecolor) or the bounds set in `cmin`
and `cmax` Defaults to `false` when `cmin` and
`cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value
should have the same units as z or surfacecolor
and if set, `cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `cmin` and/or `cmax` to be equidistant
to this point. Value should have the same units
as z or surfacecolor. Has no effect when
`cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value
should have the same units as z or surfacecolor
and if set, `cmax` must be set as well.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`new_plotly.graph_objects.surface.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an
array containing arrays mapping a normalized
value to an rgb, rgba, hex, hsl, hsv, or named
color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required.
For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`cmin` and
`cmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Grey
s,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,
Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth
,Electric,Viridis,Cividis.
connectgaps
Determines whether or not gaps (i.e. {nan} or
missing values) in the `z` data are filled in.
contours
:class:`new_plotly.graph_objects.surface.Contours`
instance or dict with compatible properties
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, "scatter" traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud
for customdata .
hidesurface
Determines whether or not a surface is drawn.
For example, set `hidesurface` to False
`contours.x.show` to True and `contours.y.show`
to True to draw a wire frame plot.
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud
for hoverinfo .
hoverlabel
:class:`new_plotly.graph_objects.surface.Hoverlabel
` instance or dict with compatible properties
hovertemplate
Template string used for rendering the
information that appear on hover box. Note that
this will override `hoverinfo`. Variables are
inserted using %{variable}, for example "y:
%{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for
several points, "xother" will be added to those
with different x positions from the first
point. An underscore before or after
"(x|y)other" will add a space on that side,
only when this field is shown. Numbers are
formatted using d3-format's syntax
%{variable:d3-format}, for example "Price:
%{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}".
https://github.com/d3/d3-time-
format#locale_format for details on the date
formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event
data described at this link
https://plotly.com/javascript/plotlyjs-
events/#event-data. Additionally, every
attributes that can be specified per-point (the
ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud
for hovertemplate .
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud
for hovertext .
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud
for ids .
legendgroup
Sets the legend group for this trace. Traces
part of the same legend group hide/show at the
same time when toggling legend items.
lighting
:class:`new_plotly.graph_objects.surface.Lighting`
instance or dict with compatible properties
lightposition
:class:`new_plotly.graph_objects.surface.Lightposit
ion` instance or dict with compatible
properties
meta
Assigns extra meta information associated with
this trace that can be used in various text
attributes. Attributes such as trace `name`,
graph, axis and colorbar `title.text`,
annotation `text` `rangeselector`,
`updatemenues` and `sliders` `label` text all
support `meta`. To access the trace `meta`
values in an attribute in the same trace,
simply use `%{meta[i]}` where `i` is the index
or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or
key of the `meta` and `n` is the trace index.
metasrc
Sets the source reference on Chart Studio Cloud
for meta .
name
Sets the trace name. The trace name appear as
the legend item and on hover.
opacity
Sets the opacity of the surface. Please note
that in the case of using high `opacity` values
for example a value greater than or equal to
0.5 on two surfaces (and 0.25 with four
surfaces), an overlay of multiple transparent
surfaces may not perfectly be sorted in depth
by the webgl API. This behavior may be improved
in the near future and is subject to change.
opacityscale
Sets the opacityscale. The opacityscale must be
an array containing arrays mapping a normalized
value to an opacity value. At minimum, a
mapping for the lowest (0) and highest (1)
values are required. For example, `[[0, 1],
[0.5, 0.2], [1, 1]]` means that higher/lower
values would have higher opacity values and
those in the middle would be more transparent
Alternatively, `opacityscale` may be a palette
name string of the following list: 'min',
'max', 'extremes' and 'uniform'. The default is
'uniform'.
reversescale
Reverses the color mapping if true. If true,
`cmin` will correspond to the last color in the
array and `cmax` will correspond to the first
color.
scene
Sets a reference between this trace's 3D
coordinate system and a 3D scene. If "scene"
(the default value), the (x,y,z) coordinates
refer to `layout.scene`. If "scene2", the
(x,y,z) coordinates refer to `layout.scene2`,
and so on.
showlegend
Determines whether or not an item corresponding
to this trace is shown in the legend.
showscale
Determines whether or not a colorbar is
displayed for this trace.
stream
:class:`new_plotly.graph_objects.surface.Stream`
instance or dict with compatible properties
surfacecolor
Sets the surface color values, used for setting
a color scale independent of `z`.
surfacecolorsrc
Sets the source reference on Chart Studio Cloud
for surfacecolor .
text
Sets the text elements associated with each z
value. If trace `hoverinfo` contains a "text"
flag and "hovertext" is not set, these elements
will be seen in the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud
for text .
uid
Assign an id to this trace, Use this to provide
object constancy between traces during
animations and transitions.
uirevision
Controls persistence of some user-driven
changes to the trace: `constraintrange` in
`parcoords` traces, as well as some `editable:
true` modifications such as `name` and
`colorbar.title`. Defaults to
`layout.uirevision`. Note that other user-
driven trace attribute changes are controlled
by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and
`colorbar.(x|y)` (accessible with `config:
{editable: true}`) is controlled by
`layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on
trace index if no `uid` is provided. So if your
app can add/remove traces before the end of the
`data` array, such that the same trace has a
different index, you can still preserve user-
driven changes if you give each trace a `uid`
that stays with it as it moves.
visible
Determines whether or not this trace is
visible. If "legendonly", the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
x
Sets the x coordinates.
xcalendar
Sets the calendar system to use with `x` date
data.
xhoverformat
Sets the hover text formatting rule for `x`
using d3 formatting mini-languages which are
very similar to those in Python. See:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud
for x .
y
Sets the y coordinates.
ycalendar
Sets the calendar system to use with `y` date
data.
yhoverformat
Sets the hover text formatting rule for `y`
using d3 formatting mini-languages which are
very similar to those in Python. See:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud
for y .
z
Sets the z coordinates.
zcalendar
Sets the calendar system to use with `z` date
data.
zhoverformat
Sets the hover text formatting rule for `z`
using d3 formatting mini-languages which are
very similar to those in Python. See:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format By default the values are
formatted using `zaxis.hoverformat`.
zsrc
Sets the source reference on Chart Studio Cloud
for z .
""",
),
**kwargs
)
| [
"[email protected]"
] | |
f236ed6ef77bd2d25a490f8d3c104a5ccdbda779 | 937abf33679194b0d7f2dcd87701ea1cd8170e3a | /config/debug/wsgi.py | d9335a170b3bab110449aedf3afba55ca6ce19ed | [
"Apache-2.0"
] | permissive | saengate/django | 7630944e372eec358e3fc525d867c792bc5993fc | 699a42f661e7cf0d1461553775caee65f6823f09 | refs/heads/master | 2023-01-01T20:36:32.175154 | 2020-08-01T18:01:59 | 2020-08-01T18:01:59 | 280,951,392 | 0 | 0 | Apache-2.0 | 2020-08-01T18:01:14 | 2020-07-19T21:12:04 | Python | UTF-8 | Python | false | false | 394 | py | """
WSGI config for mysite django.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.debug.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
3ebdf22d2266da47cd2b18e8b8b07f155f61fd60 | 578c3b3edd7e40b4e6ac317cd4864a90a21d8498 | /homework04/network.py | 0da0b0b62d9ae1caee59af937c4ea88411ba2b0c | [] | no_license | h3ic/cs102 | 44527ecd27bdda1f96940d52ce05e2596827b020 | e60cf2f7eb01fca3b7fb09a8c4d56c43eefebbdd | refs/heads/master | 2020-08-02T11:13:37.718242 | 2020-02-15T17:57:43 | 2020-02-15T17:57:43 | 210,152,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py | from api import get_friends
import time
import igraph
from igraph import Graph, plot
import numpy as np
def get_network(user_id, as_edgelist=True, backoff_factor=0.3):
response = get_friends(user_id, 'id')['response']['items']
uid_list = []
network = []
sec_uid_list = []
for l in range(len(response)):
uid = response[l]['id']
name = response[l]['first_name'] + ' ' + response[l]['last_name']
uid_list.append((uid, name))
for i in range(len(uid_list)):
n = 1
try:
sec_friends = get_friends(
uid_list[i][0], 'id')['response']['items']
except KeyError:
print('Wait...')
delay = backoff_factor * (2 ** n)
time.sleep(delay)
continue
for j in range(len(sec_friends)):
sec_uid = sec_friends[j]['id']
for k in range(len(uid_list)):
if uid_list[k][0] == sec_uid:
network.append((i, k))
for i in range(len(uid_list)):
print(f'{i}: {uid_list[i][1]}')
return network
def plot_graph(graph, get_list=True):
last_vert = 0
for m in range(len(graph)):
for n in graph[m]:
if last_vert > max(graph[m]):
continue
last_vert = max(graph[m])
vertices = [i for i in range(last_vert + 1)]
g = Graph(vertex_attrs={"label": vertices},
edges=graph, directed=False
)
N = len(vertices)
visual_style = {}
visual_style["layout"] = g.layout_fruchterman_reingold(
maxiter=1000,
area=N**3,
repulserad=N**3)
g.simplify(multiple=True, loops=True)
try:
communities = g.community_edge_betweenness(directed=False)
clusters = communities.as_clustering()
print(clusters)
pal = igraph.drawing.colors.ClusterColoringPalette(len(clusters))
g.vs['color'] = pal.get_many(clusters.membership)
except igraph._igraph.InternalError:
pass
finally:
plot(g, **visual_style)
if __name__ == '__main__':
print(plot_graph(get_network(user_id)))
| [
"[email protected]"
] | |
dc80b8601ea6ff685a1f646069add3de3a332136 | e6a56bd42659de335208770ac875190e91d9a9c6 | /old/ca.py | a5ddd32f54cdb17e3cc0510c473796d8eb16bd2b | [] | no_license | daanjo3/ICS2017 | d3a812bc133106a2dff367cb438f79ea1045f8b2 | 73c73ac651345b45b662237198fe3199a22e2b2c | refs/heads/master | 2020-05-27T21:20:05.360683 | 2017-03-06T20:00:40 | 2017-03-06T20:00:40 | 83,663,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,753 | py | # Daan Meijers, 10727167
# Steven Raaijmakers, 10804242
from __future__ import division
import numpy as np
import random
from classes import Humans, Mosquito
from pyics import Model
class CASim(Model):
def __init__(self):
Model.__init__(self)
self.t = 0
self.config = None
self.mosq = []
#
self.healthy = 0
self.sick = 0
self.prevalence = 0
self.prevalences = []
self.make_param('humans', 0.7)
self.make_param('mosquitos', 1.2)
self.make_param('m_infected', 0.5)
self.make_param('p_mosquito_human', 1.0)
self.make_param('p_human_mosquito', 1.0)
self.make_param('prevention', 0.0)
self.make_param('width', 100)
self.make_param('height', 100)
# Builds the initial simulation grid w/ humans & mosquitos
def build(self):
m_amount = int(self.mosquitos * self.width * self.height)
for i in range(m_amount):
if random.random() < self.m_infected:
self.mosq.append(Mosquito(self.width, self.height, 1))
else:
self.mosq.append(Mosquito(self.width, self.height, 0))
self.config = Humans(self.width, self.height)
self.config.build(self.humans)
# Set a specific amount of states to 1, 2 or 3
# Create a list with a certain amount of mosquitos on random locations
def reset(self):
"""Initializes the configuration of the cells and converts the entered
rule number to a rule set."""
self.t = 0
self.build()
self.prevalences = []
def draw(self):
"""Draws the current state of the grid."""
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
cm = LinearSegmentedColormap.from_list("my_cmap", ['white','green','red','c'])
plt.cla()
if not plt.gca().yaxis_inverted():
plt.gca().invert_yaxis()
plt.imshow(self.config.state, interpolation='none', vmin=0, vmax=3,
cmap=cm)
plt.axis('image')
plt.title('t = %d' % self.t)
# First loops over the humans which are infected
def step(self):
"""Performs a single step of the simulation by advancing time (and thus
row) and applying the rule to determine the state of the cells."""
# Loop over mosquitos for interactions with humans
# Then update the states of all humans
# print "CHECK MOSQUITOS"
for m in self.mosq:
x, y = m.coordinate
state = self.config.state[x, y]
if m.hunger > 0:
m.walk()
elif state == 0:
m.walk()
else:
# Musquito bites
m.hunger = 7
if state == 1 and m.infected == 1:
# Chance of getting infected after 1 bite
if random.random() < self.p_human_mosquito:
# has net
if random.random() > self.prevention:
self.config.state[x, y] = 2
else:
self.config.state[x, y] = 1
if state == 2 and m.infected == 0:
if random.random() < self.p_mosquito_human:
m.infected = 1
else:
m.infected = 0
self.config.update()
# print "-UPDATE-"
if self.t % 100 == 0:
self.update_stats()
self.print_update()
self.t += 1
def update_stats(self):
healthy = 0
sick = 0
for i in range(3):
xnew, _ = np.where(self.config.state==i+1)
if i+1 != 2:
healthy += xnew.size
else:
sick += xnew.size
self.healthy = healthy
self.sick = sick
self.prevalence = sick / (healthy + sick) * 100.0
self.prevalences.append(self.prevalence)
def percentage(self, begin=0, end=14):
ages = []
zeros = 0
for i in range(0, self.width):
for j in range(0, self.height):
age = self.config.age[i, j] / 365
if age > begin and age < (end + 1):
ages.append(age)
elif age == 0:
zeros += 1
return len(ages) / ((self.height * self.width) - zeros) * 100
def print_update(self):
print "T:" + str(self.t) + ", Prevalence: " + str(self.prevalence)
# print "Healthy: " + str(self.healthy)
# print "Sick: " + str(self.sick)
def set_params(self, dict):
self.width = dict["width"]
self.height = dict["height"]
self.humans = dict["humans"]
self.mosquitos = dict["mosquitos"]
self.m_infected = dict["m_infected"]
self.p_mosquito_human = dict["p_mosquito_human"]
self.p_human_mosquito = dict["p_human_mosquito"]
self.prevention = dict["has_mosquito_net"]
def run(self, t=1000):
self.reset()
self.update_stats()
print "INITIAL CONDITIONS:"
print str(self.height) + " x " + str(self.width)
print "Humans: " + str(self.healthy + self.sick) + ", Age 0-14: " + str(self.percentage())
print "Mosquitos: " + str(self.mosquitos) + ", Infected: " + str(self.m_infected)
print "P_mosquito_human: " + str(self.p_mosquito_human) + ", P_human_mosquito: " + str(self.p_human_mosquito) + "\n"
for i in range(t + 1):
self.step()
return self.prevalences
if __name__ == '__main__':
sim = CASim()
parameters = {
# percentage of human on field
"humans": 0.5,
# percentage of mosquitos on field
"mosquitos": 1.2,
# percentage of infected mosquites
"m_infected": 0.5,
"has_mosquito_net": 1.0,
# probability of mosquito getting infected by human with malaria
"p_mosquito_human": 1.0,
# probability of human getting infected by mosquito with malaria
"p_human_mosquito": 1.0,
}
n = 1
ls = 5
# TODO: choose ranges
# try n times random shit
for i in range(0, n):
parameters["height"] = 100
parameters["width"] = 100
parameters["humans"] = 0.6
parameters["mosquitos"] = 2.0
parameters["m_infected"] = 0.5
parameters["p_mosquito_human"] = 1.0
parameters["p_human_mosquito"] = 0.3
parameters["has_mosquito_net"] = 0.3
sim.set_params(parameters)
sim.run()
# print "avg prevalence of last " + str(ls) + " items: " + str(sum(prevalences[-ls:]) / ls)
| [
"[email protected]"
] | |
b9a793d5f3cef9345946fb2a9d59e139da3e6d5c | 722e1701b36163fed54f9bd8a6e52187eb139371 | /TxGstPlayer.py | 5088c4504607672e02f7810d42dc00a7b7a44974 | [] | no_license | ystoto/AerialAcousticCommunication | efc555004b408a867519025150f75a0559a81ef3 | 8ca1312320f7b916e850af993b57fe4ae696108c | refs/heads/master | 2021-01-25T11:03:57.549367 | 2017-09-18T15:24:38 | 2017-09-18T15:24:38 | 93,912,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,611 | py | # import gi
# gi.require_version('Gst', '1.0')
# from gi.repository import Gst
# import time
# import asyncio
#
# VIDEO_PATH = "C:\\Users\\Public\\Videos\\Sample Videos\\SleepAway_stft.mp4"
#
#
# if __name__ == "__main__":
# Gst.init(None)
# pipeline = Gst.parse_launch("playbin uri=C:\\Users\\Public\\Videos\\Sample\ Videos\\SleepAway_stft.mp4")
# pipeline.set_state(Gst.State.PLAYING)
# bus = pipeline.get_bus()
# msg = bus.timed_pop_filtered(Gst.CLOCK_TIME_NONE, Gst.MessageType.ERROR | Gst.MessageType.EOS)
# time.sleep(1000)
####################################################################################
# !/usr/bin/env python
import os
import sys
import ctypes
import pyaudio
import gi
import numpy as np
import wave
import struct
import datetime
gi.require_version('Gtk', '3.0')
gi.require_version('Gst', '1.0')
gi.require_version('GstVideo', '1.0')
from gi.repository import Gst, GObject, Gtk
from gi.repository import GdkX11, GstVideo
import asyncio
import Tx
import wm_util as UT
from wm_util import norm_fact
import time
from subprocess import call
VIDEO_PATH = "SleepAway_stft.mp4"
PLAYING = 0
PAUSED = 1
STOPPED = 2
bands_range = [29, 59, 119, 237, 474, 947, 1889, 3770, 7523, 15011]
class GTK_Main(object):
def __init__(self):
self.play_status = STOPPED
self.IS_GST010 = Gst.version()[0] == 0
self.volume = 100
window = Gtk.Window(Gtk.WindowType.TOPLEVEL)
window.set_title("WM-Player")
window.set_default_size(450, -1)
window.connect("destroy", Gtk.main_quit, "WM destroy")
self._init_audio_buffer()
vbox = Gtk.VBox()
#vbox = Gtk.Box(Gtk.Orientation.HORIZONTAL, 0)
vbox.set_margin_top(3)
vbox.set_margin_bottom(3)
window.add(vbox)
# input target media file
hbox_0st_line = Gtk.HBox()
vbox.pack_start(hbox_0st_line, False, False, 0)
self.label_target = Gtk.Label(label='target file')
self.label_target.set_margin_left(6)
self.label_target.set_margin_right(6)
hbox_0st_line.pack_start(self.label_target, False, False, 0)
self.entry_target = Gtk.Entry()
self.entry_target.set_text(VIDEO_PATH)
hbox_0st_line.add(self.entry_target)
hbox_1st_line = Gtk.HBox()
vbox.pack_start(hbox_1st_line, False, False, 0)
# self.entry = Gtk.Entry()
# hbox.add(self.entry)
# play button
self.playButtonImage = Gtk.Image()
self.playButtonImage.set_from_stock("gtk-media-play", Gtk.IconSize.BUTTON)
self.playButton = Gtk.Button.new()
self.playButton.add(self.playButtonImage)
self.playButton.connect("clicked", self.playToggled)
hbox_1st_line.pack_start(self.playButton, False, False, 0)
# stop button
self.stopButtonImage = Gtk.Image()
self.stopButtonImage.set_from_stock("gtk-media-stop", Gtk.IconSize.BUTTON)
self.stopButton = Gtk.Button.new()
self.stopButton.add(self.stopButtonImage)
self.stopButton.connect("clicked", self.stopToggled)
hbox_1st_line.pack_start(self.stopButton, False, False, 0)
# seek to given position
self.seek_entry = Gtk.Entry()
hbox_1st_line.add(self.seek_entry)
self.seekButtonImage = Gtk.Image()
self.seekButtonImage.set_from_stock("gtk-jump-to", Gtk.IconSize.BUTTON)
self.seekButton = Gtk.Button.new()
self.seekButton.add(self.seekButtonImage)
self.seekButton.connect("clicked", self.seekToggled)
hbox_1st_line.pack_start(self.seekButton, False, False, 0)
#hbox_1st_line.add(self.seekButton)
# seek slider
hbox_2nd_line = Gtk.HBox()
vbox.pack_start(hbox_2nd_line, False, False, 0)
self.label_progress = Gtk.Label(label='progress')
self.label_progress.set_margin_left(6)
self.label_progress.set_margin_right(6)
hbox_2nd_line.pack_start(self.label_progress, False, False, 0)
self.progress_slider = Gtk.HScale()
self.progress_slider.set_margin_left(6)
self.progress_slider.set_margin_right(6)
self.progress_slider.set_draw_value(False)
self.progress_slider.set_range(0, 100)
self.progress_slider.set_increments(1, 10)
hbox_2nd_line.pack_start(self.progress_slider, True, True, 0)
self.progress_label = Gtk.Label(label='0:00')
self.progress_label.set_margin_left(6)
self.progress_label.set_margin_right(6)
hbox_2nd_line.pack_start(self.progress_label, False, False, 0)
# # volume slider
hbox_3rd_line = Gtk.HBox()
vbox.pack_start(hbox_3rd_line, False, False, 0)
self.volume_label = Gtk.Label(label='volume ')
self.volume_label.set_margin_left(6)
self.volume_label.set_margin_right(6)
hbox_3rd_line.pack_start(self.volume_label, False, False, 0)
self.volume_slider = Gtk.HScale()
self.volume_slider.set_margin_left(6)
self.volume_slider.set_margin_right(6)
self.volume_slider.set_draw_value(False)
self.volume_slider.set_range(0, 100)
self.volume_slider.set_increments(1, 10)
self.volume_slider.set_value(self.volume)
self.volume_slider.connect("value-changed", self.volume_changed_cb)
hbox_3rd_line.pack_start(self.volume_slider, True, True, 0)
self.volume_value = Gtk.Label(label='0')
self.volume_value.set_margin_left(6)
self.volume_value.set_margin_right(6)
self.volume_value.set_text(str(self.volume))
hbox_3rd_line.pack_start(self.volume_value, False, False, 0)
# equalizer preset combobox
hbox_4rd_line = Gtk.Box()
vbox.pack_start(hbox_4rd_line, False, False, 0)
self.eq_label = Gtk.Label(label='equalizer')
self.eq_label.set_margin_left(6)
self.eq_label.set_margin_right(6)
hbox_4rd_line.pack_start(self.eq_label, False, False, 0)
self.eq_textbox = Gtk.ComboBoxText()
self.eq_bands_dict = loadEqPresetFile()
for key in self.eq_bands_dict.keys():
self.eq_textbox.append(key, key)
self.eq_textbox.set_valign(Gtk.Align.CENTER)
self.eq_textbox.connect("changed", self.eq_changed_cb)
hbox_4rd_line.pack_start(self.eq_textbox, False, False, 0)
self.eq_slider = [Gtk.VScale() for i in range(10)]
for i in range(10):
self.eq_slider[i].set_draw_value(True)
self.eq_slider[i].set_value_pos(Gtk.PositionType.BOTTOM)
self.eq_slider[i].set_size_request(18,200)
self.eq_slider[i].set_range(-24, +12)
self.eq_slider[i].set_inverted(True)
hbox_4rd_line.pack_start(self.eq_slider[i], True, True, 0)
hbox_5th_line = Gtk.HBox()
vbox.pack_start(hbox_5th_line , False, False, 0)
# seek to given position
self.label_wm = Gtk.Label(label='watermark msg')
self.label_wm.set_margin_left(6)
self.label_wm.set_margin_right(6)
hbox_5th_line.pack_start(self.label_wm, False, False, 0)
self.wm_entry = Gtk.Entry()
self.wm_entry.set_text("www.naver.com")
hbox_5th_line.add(self.wm_entry)
self.wmButtonImage = Gtk.Image()
self.wmButtonImage.set_from_stock("gtk-go-down", Gtk.IconSize.BUTTON)
self.wmButton = Gtk.Button.new()
self.wmButton.add(self.wmButtonImage)
self.wmButton.connect("clicked", self.wmToggled)
hbox_5th_line.pack_start(self.wmButton, False, False, 0)
#hbox_1st_line.add(self.seekButton)
self.movie_window = Gtk.DrawingArea()
vbox.add(self.movie_window)
window.show_all()
self.player = Gst.ElementFactory.make("playbin", "player")
vsink = Gst.ElementFactory.make("autovideosink", "vsink")
self.player.set_property("video-sink", vsink)
self.pyaudio = None
asink = self._get_audiosink_bin()
self.player.set_property("audio-sink", asink)
bus = self.player.get_bus()
bus.add_signal_watch()
bus.enable_sync_message_emission()
bus.connect("message", self.on_message)
bus.connect("sync-message::element", self.on_sync_message)
def __exit__(self, exc_type, exc_value, traceback):
for file in self.files:
# self.before_wav.close()
self.after_wav.close()
print("b / a wav clsosed")
def _init_audio_buffer(self):
self.audio_dump_cnt = 0
self.src = UT.RIngBuffer(44100 * 30)
self.sink = UT.RIngBuffer(44100 * 30)
self.thread = Tx.Start(self.src, self.sink) # TODO: 1.2 second
def _get_audiosink_bin(self):
self.asink_bin = Gst.Bin.new('asinkbin')
self.eq = Gst.ElementFactory.make("equalizer-10bands", "eq")
# g_object_set(G_OBJECT(equalizer), "band1", (gdouble) - 24.0, NULL);
# g_object_set(G_OBJECT(equalizer), "band2", (gdouble) - 24.0, NULL);
self.vol = Gst.ElementFactory.make("volume", "vol")
self.asink = Gst.ElementFactory.make("fakesink", "asink")
self.asink.set_property("signal-handoffs", 1)
sigid = self.asink.connect('handoff', self.handoff_cb)
print("sigid:", sigid)
self.asink_bin.add(self.eq)
self.asink_bin.add(self.vol)
self.asink_bin.add(self.asink)
self.eq.link(self.vol)
self.vol.link(self.asink)
gp = Gst.GhostPad.new('sink', self.eq.get_static_pad('sink'))
self.asink_bin.add_pad(gp) # Only avaiable after bin.add(eq)
return self.asink_bin
def eq_changed_cb(self, combobox):
print("****", sys._getframe().f_code.co_name, combobox.get_active_id())
bands = self.eq_bands_dict[combobox.get_active_id()]
for i in range(10):
self.eq_slider[i].set_value(bands[i])
self.eq.set_property("band%d" % i, bands[i])
print ("eq_changed: ", bands)
def volume_changed_cb(self, gst_range):
print("****", sys._getframe().f_code.co_name, " - volume: ", int(gst_range.get_value()))
self.vol.set_property("volume", gst_range.get_value() / 100.0)
def handoff_cb(self, element, buffer, pad):
self.audio_dump_cnt += 1
if self.pyaudio == None:
in_format, in_rate, in_channels, in_type = getAudioInfo(pad.get_current_caps())
print(pad.get_current_caps().to_string())
print ("audio format: %d, rate: %d, ch: %d, type: %s " % (in_format, in_rate, in_channels, in_type))
self.pyaudio = pyaudio.PyAudio()
self.stream = self.pyaudio.open(format=self.pyaudio.get_format_from_width(in_format),
channels=in_channels,
rate=in_rate,
output=True)
if in_type.find('f') >= 0:
audio_dtype = [None, np.float, np.float16, None, np.float32]
elif in_type.find('s') >= 0:
audio_dtype = [None, np.int8, np.int16, None, np.int32]
else:
audio_dtype = [None, np.uint8, np.uint16, None, np.uint32]
dt = np.dtype(audio_dtype[in_format])
if in_type.find("le"):
self.audio_dtype = dt.newbyteorder('<')
else:
self.audio_dtype = dt.newbyteorder('>')
print("audio_dtype : ", self.audio_dtype)
# self.before_wav = wave.open("before.wav", "wb")
# self.before_wav.setparams((in_channels, in_format, in_rate, 0, 'NONE', 'not compressed'))
self.after_wav = wave.open("after.wav", "wb")
self.after_wav.setparams((in_channels, in_format, in_rate, 0, 'NONE', 'not compressed'))
(ret, info) = buffer.map(Gst.MapFlags.READ)
if ret == True:
# TODO: wav dump - wave.py only support integer value not floating point.
rawdata = np.frombuffer(info.data, dtype=self.audio_dtype)
# self.before_wav.writeframesraw(np.int32(rawdata * norm_fact['int32']).tobytes())
org_type_name = rawdata.dtype.name
normalized_rawdata = np.float32(rawdata) / norm_fact[org_type_name] # normalize rawdata , -1 to 1
# print("IN ", normalized_rawdata[:10], normalized_rawdata[-10:], normalized_rawdata.dtype, type(normalized_rawdata))
self.src.write(normalized_rawdata)
watermarked_data = self.sink.read(rawdata.size)
watermarked_data *= norm_fact[org_type_name]
watermarked_data = watermarked_data.astype(dtype=self.audio_dtype, copy=False)
# print("OU ", watermarked_data[:30], watermarked_data[-30:], watermarked_data.dtype, type(watermarked_data))
self.stream.write(watermarked_data.tobytes())
# todo: change the formula according to the input format
#self.after_wav.writeframesraw(np.int32(watermarked_data * norm_fact['int32']).tobytes())
#self.after_wav.writeframesraw(np.int16(watermarked_data).tobytes())
#print ("output data: ", ret, buffer.pts, info.size)
def _on_video_realize(self, widget):
# The window handle must be retrieved first in GUI-thread and before
# playing pipeline.
video_window = self.movie_window.get_property('window')
if sys.platform == "win32":
if not video_window.ensure_native():
print("Error - video playback requires a native window")
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object]
drawingarea_gpointer = ctypes.pythonapi.PyCapsule_GetPointer(video_window.__gpointer__, None)
gdkdll = ctypes.CDLL("libgdk-3-0.dll")
self._video_window_handle = gdkdll.gdk_win32_window_get_handle(drawingarea_gpointer)
print("111")
#widget.set_window_handle(self._video_window_handle)
else:
self._video_window_handle = video_window.get_xid()
print("222")
def on_message(self, bus, message):
#print("on_message")
t = message.type
if t == Gst.MessageType.EOS:
self.player.set_state(Gst.State.NULL)
self.stopToggled(None)
elif t == Gst.MessageType.ERROR:
self.player.set_state(Gst.State.NULL)
err, debug = message.parse_error()
print("Error: %s" % err, debug)
self.stopToggled(None)
self.updateButtons()
def on_sync_message(self, bus, message):
if message.get_structure().get_name() == 'prepare-window-handle':
imagesink = message.src
imagesink.set_property("force-aspect-ratio", True)
#imagesink.set_window_handle(self.movie_window.get_property('window').get_xid())
#self._on_video_realize(imagesink)
# def on_finished(self, player):
# self.play_status = STOPPED
# self.progress_slider.set_value(0)
# self.progress_label.set_text("0:00")
# self.updateButtons()
def play(self):
filepath = self.entry_target.get_text().strip()#VIDEO_PATH # self.entry.get_text().strip()
if len(filepath) <= 0:
filepath = VIDEO_PATH
if os.path.isfile(filepath):
filepath = os.path.realpath(filepath)
self.player.set_property("uri", "file:///" + filepath)
self.player.set_state(Gst.State.PLAYING)
GObject.timeout_add(1000, self.updateProgressSlider)
print(self.player.get_property("uri"))
print("START")
def stop(self):
self.player.set_state(Gst.State.NULL)
def pause(self):
self.player.set_state(Gst.State.PAUSED)
def seekToggled(self, w):
pos =self.seek_entry.get_text().strip()
if len(pos) <= 0:
self.seek_entry.set_text("")
return
pos = int(pos)
duration_nanosecs = self.player.query_duration(Gst.Format.TIME)[1]
duration = float(duration_nanosecs) / Gst.SECOND
if pos >= duration - 5:
pos = duration - 5
if pos < 0:
pos = 0
print ("pos: ", pos, pos*Gst.SECOND)
pos_ns = pos * Gst.SECOND
self.player.seek_simple(Gst.Format.TIME, Gst.SeekFlags.FLUSH, pos_ns)
def stopToggled(self, w):
self.progress_slider.set_value(0)
self.progress_label.set_text("0:00")
self.stop()
self.play_status = STOPPED
self.updateButtons()
if self.after_wav is not None:
self.after_wav.close()
def playToggled(self, w):
if self.play_status == STOPPED or self.play_status == PAUSED:
self.play()
self.play_status = PLAYING
else:
self.pause()
self.play_status = PAUSED
self.updateButtons()
def wmToggled(self, w):
msg = self.wm_entry.get_text().strip()
msg += '\n'
self.thread.requestWM(msg)
def updateProgressSlider(self):
if self.play_status == STOPPED:
return False # cancel timeout
try:
nanosecs = self.player.query_position(Gst.Format.TIME)[1]
duration_nanosecs = self.player.query_duration(Gst.Format.TIME)[1]
# block seek handler so we don't seek when we set_value()
# self.slider.handler_block_by_func(self.on_slider_change)
duration = float(duration_nanosecs) / Gst.SECOND
position = float(nanosecs) / Gst.SECOND
#print("prog:", duration, position)
self.progress_slider.set_range(0, duration)
self.progress_slider.set_value(position)
self.progress_label.set_text("%d" % (position / 60) + ":%02d" % (position % 60))
# self.slider.handler_unblock_by_func(self.on_slider_change)
except Exception as e:
# pipeline must not be ready and does not know position
print(e)
pass
return True
def updateButtons(self):
if self.play_status == STOPPED or self.play_status == PAUSED:
self.playButtonImage.set_from_stock("gtk-media-play", Gtk.IconSize.BUTTON)
else:
self.playButtonImage.set_from_stock("gtk-media-pause", Gtk.IconSize.BUTTON)
def loadEqPresetFile():
f = open("equalizer_preset.txt", "r")
result = dict()
while True:
line = f.readline()
if not line or len(line) == 0:
break
if line[0] == '[':
begin = 1
end = line.rfind(']')
key = line[begin:end]
bands = []
for i in range(10):
line = f.readline()
begin = line.rfind('=') + 1
bands.append(float(line[begin:]))
result[key] = bands
f.close()
return result
def getAudioInfo(caps):
structure = caps.get_structure(0)
ret, channels = structure.get_int("channels")
ret, rate = structure.get_int("rate")
type = structure.get_string("format")
if type.find("32") >= 0:
format = 4
elif type.find("24") >= 0:
format = 3
elif type.find("16") >= 0:
format = 2
else:
format = 1
return format, rate, channels, type.lower()
if __name__ == "__main__":
# call(["gst-launch", "playbin", "uri=\"file:///E:\\\\PycharmProjects\\\\AerialAcousticCommunication\\\\Kalimba.mp3\""])
# time.sleep(1000)
GObject.threads_init()
Gst.init(None)
obj = GTK_Main()
Gtk.main() | [
"[email protected]"
] | |
3df0e25033381a10f7bb65a87f0dc47f3745faf6 | 7e6e460e19f142dc9d8f62133c6a63b656a3d90c | /poli_match/poli_match_app/migrations/0014_auto_20181010_0003.py | 9f417367cb037420f70f63825a141e308d72cc3d | [] | no_license | Nathaniel-Dominguez/poli-match | 6ee1adcf87c32efe2f8860e37b93de357cb83a72 | 1411a1638c5f725dee897c796831e6371b83fcd4 | refs/heads/master | 2020-03-30T21:46:34.184293 | 2018-10-12T00:53:48 | 2018-10-12T00:53:48 | 151,641,633 | 0 | 1 | null | 2018-10-12T00:53:49 | 2018-10-04T22:05:36 | Python | UTF-8 | Python | false | false | 430 | py | # Generated by Django 2.1.1 on 2018-10-10 00:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('poli_match_app', '0013_remove_politician_next_election'),
]
operations = [
migrations.AlterField(
model_name='politician',
name='missed_votes_pct',
field=models.IntegerField(default=0, null=True),
),
]
| [
"[email protected]"
] | |
1cf1b5c4e6a919fa0d61de0e410aa474107ed674 | cebfc35cb3219914de75e22aa76b79f2720aa5ab | /src/python/loadDF.py | 793d989f9ce612031b57a750aa6af44a9c8ce4fa | [] | no_license | darshandpatel/eBird | b79d538ba2f729a71980d4ab9174b039bc4e9cce | df4e279fac59dd43ee56467d25e73151b8b7d895 | refs/heads/master | 2021-05-01T04:40:29.598258 | 2016-12-13T02:37:52 | 2016-12-13T02:37:52 | 74,793,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | import pyspark
from pyspark import SparkConf, SparkContext
from pyspark.sql import column
def customfunction(x):
if x == "?":
x = "0"
return x
from pyspark.sql import SQLContext
conf = (SparkConf()
.setMaster("local")
.setAppName("eBird")
.set("spark.executor.memory", "1g"))
sc = SparkContext(conf = conf)
sqc = SQLContext(sc)
sample = sc.textFile("./sample/part-00000").persist()
sampledf = sample.map(lambda x : x.split(",")).toDF()
#to drop a column
#newsampledf = sampledf.drop(sampledf._1)
#to filter rows
#newsampledf.filter(sampledf._18 != "?").show(10)
#to replace ? with 0
sampledf.replace("?","0").show(10)
| [
"[email protected]"
] | |
6e1624695640887e0510e700b7cf9f286415c365 | a00ed711e3e08b50ad6e91cc07a2cddc4a1de5ea | /tests/charts/test_ingress_web.py | 93b51e44dc8f19e7db93f68dbd881d874a924830 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | ishiis/airflow | 4305794e36b611d01f49e3f2401be3dc49782670 | 292440d54f4db84aaf0c5a98cf5fcf34303f2fa8 | refs/heads/master | 2022-07-30T00:51:28.806940 | 2022-07-14T12:07:11 | 2022-07-14T12:07:11 | 209,801,072 | 1 | 0 | Apache-2.0 | 2019-09-20T13:47:26 | 2019-09-20T13:47:26 | null | UTF-8 | Python | false | false | 6,374 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import jmespath
from parameterized import parameterized
from tests.charts.helm_template_generator import render_chart
class IngressWebTest(unittest.TestCase):
def test_should_pass_validation_with_just_ingress_enabled_v1(self):
render_chart(
values={"ingress": {"web": {"enabled": True}}},
show_only=["templates/webserver/webserver-ingress.yaml"],
) # checks that no validation exception is raised
def test_should_pass_validation_with_just_ingress_enabled_v1beta1(self):
render_chart(
values={"ingress": {"web": {"enabled": True}}},
show_only=["templates/webserver/webserver-ingress.yaml"],
kubernetes_version='1.16.0',
) # checks that no validation exception is raised
def test_should_allow_more_than_one_annotation(self):
docs = render_chart(
values={"ingress": {"web": {"enabled": True, "annotations": {"aa": "bb", "cc": "dd"}}}},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert {"aa": "bb", "cc": "dd"} == jmespath.search("metadata.annotations", docs[0])
def test_should_set_ingress_class_name(self):
docs = render_chart(
values={"ingress": {"web": {"enabled": True, "ingressClassName": "foo"}}},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert "foo" == jmespath.search("spec.ingressClassName", docs[0])
def test_should_ingress_hosts_objs_have_priority_over_host(self):
docs = render_chart(
values={
"ingress": {
"web": {
"enabled": True,
"tls": {"enabled": True, "secretName": "oldsecret"},
"hosts": [
{"name": "*.a-host", "tls": {"enabled": True, "secretName": "newsecret1"}},
{"name": "b-host", "tls": {"enabled": True, "secretName": "newsecret2"}},
{"name": "c-host", "tls": {"enabled": True, "secretName": "newsecret1"}},
{"name": "d-host", "tls": {"enabled": False, "secretName": ""}},
{"name": "e-host"},
],
"host": "old-host",
},
}
},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert ["*.a-host", "b-host", "c-host", "d-host", "e-host"] == jmespath.search(
"spec.rules[*].host", docs[0]
)
assert [
{"hosts": ["*.a-host"], "secretName": "newsecret1"},
{"hosts": ["b-host"], "secretName": "newsecret2"},
{"hosts": ["c-host"], "secretName": "newsecret1"},
] == jmespath.search("spec.tls[*]", docs[0])
def test_should_ingress_hosts_strs_have_priority_over_host(self):
docs = render_chart(
values={
"ingress": {
"web": {
"enabled": True,
"tls": {"enabled": True, "secretName": "secret"},
"hosts": ["*.a-host", "b-host", "c-host", "d-host"],
"host": "old-host",
},
}
},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert ["*.a-host", "b-host", "c-host", "d-host"] == jmespath.search("spec.rules[*].host", docs[0])
assert [
{"hosts": ["*.a-host", "b-host", "c-host", "d-host"], "secretName": "secret"}
] == jmespath.search("spec.tls[*]", docs[0])
def test_should_ingress_deprecated_host_and_top_level_tls_still_work(self):
docs = render_chart(
values={
"ingress": {
"web": {
"enabled": True,
"tls": {"enabled": True, "secretName": "supersecret"},
"host": "old-host",
},
}
},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert (
["old-host"]
== jmespath.search("spec.rules[*].host", docs[0])
== jmespath.search("spec.tls[0].hosts", docs[0])
)
def test_should_ingress_host_entry_not_exist(self):
docs = render_chart(
values={
"ingress": {
"web": {
"enabled": True,
}
}
},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert not jmespath.search("spec.rules[*].host", docs[0])
@parameterized.expand(
[
(None, None, False),
(None, False, False),
(None, True, True),
(False, None, False),
(True, None, True),
(False, True, True), # We will deploy it if _either_ are true
(True, False, True),
]
)
def test_ingress_created(self, global_value, web_value, expected):
values = {"ingress": {}}
if global_value is not None:
values["ingress"]["enabled"] = global_value
if web_value is not None:
values["ingress"]["web"] = {"enabled": web_value}
if values["ingress"] == {}:
del values["ingress"]
docs = render_chart(values=values, show_only=["templates/webserver/webserver-ingress.yaml"])
assert expected == (1 == len(docs))
| [
"[email protected]"
] | |
3c19c14dce6c202b4ccc0ce9e3ac127de98213eb | 15f3f423d890630ddfc051b0dcbb2feb25375197 | /jupyter_notebook/.ipynb_checkpoints/N_DIGIT-checkpoint.py | c98e195bcba4864890276047119955d1d2822ed4 | [] | no_license | GuangyiT/test_creative_information_program | c7062d46135ee63b26915a020e784979a4a55014 | 9f5e07eb370ed3381f4e9724edf013865b00f3b5 | refs/heads/master | 2022-03-29T19:42:59.615636 | 2020-01-26T03:37:34 | 2020-01-26T03:37:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 左づめでの10進数xをdigits桁のN進数vectorにしてを返す
def baseNumber(N, digits, x):
ret = [0 for _ in range(digits)]
quotient = x
counter = 0
while quotient > 0:
remainder = quotient % N
quotient /= N
ret[counter] = remainder
counter += 1
return ret;
base
| [
"[email protected]"
] | |
ff50e0f31e922964e44be61c5c1611281712ce66 | ae149a7faad6daf432a35d2422e685e849563c44 | /imforensics/util/numpy2matlab.py | bfdc741c67362f962e0b37d5b9d3500f13058abe | [] | no_license | ucb-image-forensics/imforensics | 78e1d090531bc5c5c92fa741353c3d9942164877 | fa7535cb3c0c00004c3d03c958c0886e7dd87951 | refs/heads/master | 2020-04-28T23:31:03.300830 | 2015-04-15T10:21:47 | 2015-04-15T10:21:47 | 33,640,373 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | from __future__ import absolute_import
import numpy as np
import matlab
numpy2matlab_type = {
np.int64 : matlab.int64,
np.bool_ : matlab.int8,
np.int8 : matlab.int8,
np.int16 : matlab.int16,
np.int32 : matlab.int32,
np.int64 : matlab.int64,
np.uint8 : matlab.uint8,
np.uint16 : matlab.uint16,
np.uint32 : matlab.uint32,
np.uint64 : matlab.uint64,
np.float16 : matlab.single,
np.float32 : matlab.single,
np.float64 : matlab.double
}
def numpy2matlab(np_arr):
np_type = np_arr.dtype.type
ml_arr_klass = numpy2matlab_type.get(np_type, None)
if not ml_arr_klass:
raise ValueError('Cannot convert numpy type {0} to matlab array.'.format(np_type))
ml_arr = ml_arr_klass(np_arr.flatten().tolist())
ml_arr.reshape(np_arr.shape)
return ml_arr
| [
"[email protected]"
] | |
0621cbacb224ab970a3753ac1d351ec2940412cb | 94f858fab9c6330b09d52f45ddcfd0e780edc933 | /app/recipe/tests/test_recipe_api.py | e7492b638608a36c955173f42200c0285ea75e38 | [
"MIT"
] | permissive | seiyoung-lee/recipe-app-api | 2c0378a1d86bea6235f85de767fbe058b73b41ee | b8f1b67fb974c89082ed57467fa16c98d95e22d1 | refs/heads/master | 2023-01-01T11:25:09.958388 | 2020-10-26T20:25:55 | 2020-10-26T20:25:55 | 300,089,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,946 | py | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
import tempfile
import os
from PIL import Image
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPE_URL = reverse('recipe:recipe-list')
def image_upload_url(recipe_id):
"""Return URL for recipe image upload"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
def detail_url(recipe_id):
"""Return recipe detail URL"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title':'sample recipe',
'time_minutes':10,
'price': 5.00
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
def sample_tag(user, name = "Main Course"):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name = name)
def sample_ingredient(user, name = "Cinnamon"):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name = name)
class PublicRecipeApiTests(TestCase):
"""Test unautharized recipe API access"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""Test that authentication is required"""
res = self.client.get(RECIPE_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""Test authorized recipe API access"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'test123'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Test retrieving a list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user = self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many = True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test retrieving recipes for user"""
user2 = get_user_model().objects.create_user(
"[email protected]",
'password123'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many= True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(self.user))
recipe.ingredients.add(sample_ingredient(self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': 'Chocolate Cheesecake',
'time_minutes' : 30,
'price': 5.00
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id = res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe,key))
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(user = self.user, name = 'Vegan')
tag2 = sample_tag(user = self.user, name = 'Dessert')
payload = {
'title' : 'Avocado lime cheesecake',
'tags' : [tag1.id, tag2.id],
'time_minutes': 60,
'price' : 20.00
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id = res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Test creating a recipe with ingredient"""
ingredient1 =sample_ingredient(user = self.user, name = 'Prawns')
ingredient2 =sample_ingredient(user = self.user, name = 'Ginger')
payload = {
'title': 'Thai prawn red curry',
'ingredients' : [ingredient1.id, ingredient2.id],
'time_minutes': 20,
'price': 7.00
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id = res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""Test updating a recipe with patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user= self.user, name = 'Curry')
payload = {'title': 'Chicken tikka', 'tags': [new_tag.id]}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user = self.user)
recipe.tags.add(sample_tag(self.user))
payload = {
'title': 'Spaghetti carbonara',
'time_minutes' : 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'test123'
)
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user = self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
"""Test uploading an image to recipe"""
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10,10))
img.save(ntf, format = 'JPEG')
ntf.seek(0)
res = self.client.post(url, {'image':ntf}, format = 'multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""Test uploading an invalid image"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image':'notimage'}, format = 'multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
"""Test returning recipes with specific tags"""
recipe1 = sample_recipe(user=self.user, title = 'Thai vegetable curry')
recipe2 = sample_recipe(user=self.user, title = 'Aubergine with tahini')
tag1 = sample_tag(user=self.user, name = 'Vegan')
tag2 = sample_tag(user=self.user, name = 'Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title = 'Fish and Chips')
res = self.client.get(
RECIPE_URL,
{'tags':f'{tag1.id}, {tag2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipes_by_ingredient(self):
"""Test returning recipes with specific ingredients"""
recipe1 = sample_recipe(user=self.user, title = 'Thai vegetable curry')
recipe2 = sample_recipe(user=self.user, title = 'Aubergine with tahini')
ingredient1 = sample_ingredient(user=self.user, name = 'Beef')
ingredient2 = sample_ingredient(user=self.user, name = 'Pork')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title = 'Fish and Chips')
res = self.client.get(
RECIPE_URL,
{'ingredients':f'{ingredient1.id}, {ingredient2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| [
"[email protected]"
] | |
338ec76574593aed13651ef6310c61f6adf6b402 | ee6acbd5fcd0fcd16230e96a4a539de41a02c97e | /operators/event-streams-topic/python/pulumi_pulumi_kubernetes_crds_operators_event_streams_topic/ibmcloud/v1alpha1/__init__.py | 9458a57666f52f92f605cc86a3b2b97da3b2dd93 | [
"Apache-2.0"
] | permissive | isabella232/pulumi-kubernetes-crds | 777e78137aaf6525a44b61a02dccf91bf0d87a14 | 372c4c0182f6b899af82d6edaad521aa14f22150 | refs/heads/master | 2023-03-15T04:29:16.039753 | 2020-12-30T19:35:54 | 2020-12-30T19:35:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | # coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .Topic import *
from ._inputs import *
from . import outputs
| [
"[email protected]"
] | |
98d6a489ca61a7f75033d94a82b0d5cca36f5b80 | cf35959df7e022a6c064bf7016de54adf9a458ca | /teamtechkenya/settings.py | f515c219063e248e18c479995494aeaba1cfc204 | [] | no_license | benstarke/teamtechkenya | 51ea73572b5da22907aafe8b7385106fe46227da | 4377801f31da3216a76ed787744131e758fc4839 | refs/heads/master | 2023-02-08T23:17:37.513410 | 2020-12-29T01:42:00 | 2020-12-29T01:42:00 | 325,156,107 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,498 | py | """
Django settings for teamtechkenya project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
import django_heroku
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
#SECRET_KEY = '+j%!@t0h(ok!jvp4b*l3)neoe$(93v^^44velt4jar4h5yr__!'
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '+j%!@t0h(ok!jvp4b*l3)neoe$(93v^^44velt4jar4h5yr__!')
# SECURITY WARNING: don't run with debug turned on in production!
#DEBUG = True
DEBUG = os.environ.get('DJANGO_DEBUG', '') != 'False'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'teamtechs',
'storages',
'imagekit',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'teamtechkenya.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'teamtechkenya.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'teamtechkenya',
'USER': 'postgres',
'PASSWORD': 'bb99GG00',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATIC_ROOT = os.path.join(BASE_DIR,'staticfiles')
MEDIA_URL = '/image/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'static/image')
"""
AWS_ACCESS_KEY_ID = 'AKIAIBBGI3HYFGVI7GJQ'
AWS_STORAGE_BUCKET_NAME = 'benstar-bucket'
AWS_SECRET_ACCESS_KEY = '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'
AWS_UPLOAD_USERNAME = "BEN_user_jomusi"
AWS_UPLOAD_REGION = 'us-west-2'
AWS_UPLOAD_GROUP = "BEN_AwesomeGroup"
AWS_DEFAULT_ACL = None
AWS_S3_FILE_OVERWRITE = False
STATIC_LOCATION = 'static'
DEFAULT_FILE_STORAGE = 'teamtechkenya.storage_backends.MediaStorage'
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
# stops IK checking S3 all the time - main reason to use IK v2 for me
#IMAGEKIT_DEFAULT_IMAGE_CACHE_BACKEND = 'imagekit.imagecache.NonValidatingImageCacheBackend'
#IMAGEKIT_DEFAULT_CACHEFILE_STRATEGY = 'imagekit.cachefiles.strategies.Optimistic'
"""
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
# Activate Django-Heroku.
django_heroku.settings(locals())
| [
"[email protected]"
] | |
4f32a9ced516a932e313f38327fc7409b462ff12 | 38888910e312792a5a253237bc679e3158cdd35f | /download_from_s3.py | e603501cd5da5304371125e3c546fb9d9d4909f8 | [] | no_license | rahulagarwalji/Spark | 7fa2e17c4edfcbd17e2d48a92eb7ef9cc256f64b | af807677c12a02346b73978a9adc1f1349be8573 | refs/heads/master | 2020-04-14T00:35:13.177420 | 2018-12-29T20:34:30 | 2018-12-29T20:34:30 | 163,536,863 | 0 | 0 | null | 2018-12-29T20:34:31 | 2018-12-29T19:47:50 | Python | UTF-8 | Python | false | false | 263 | py | bucketName = "Your S3 BucketName"
Key = "Original Name and type of the file you want to upload into s3"
outPutname = "Output file name(The name you want to give to the file after we upload to s3)"
s3 = boto3.client('s3')
s3.upload_file(Key,bucketName,outPutname) | [
"[email protected]"
] | |
6883c286e48fb50a63222ad311c9084efdbad27f | a0fd9f3f055961e34dd60647c32d32a50f005906 | /tests/test_trending.py | 9448b8cc256e06dd1287d011c6fe94f0d05d1f88 | [] | no_license | EvgeniyGerasimov/MsAppTest | 4efd83b3aa5e296864002743bc9ed2a31b430f6b | bba71bd55157a1d66783d75feb9d0050365d0146 | refs/heads/master | 2020-05-09T23:20:58.748499 | 2019-05-07T12:57:44 | 2019-05-07T12:57:44 | 181,498,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | from data.methods import Methods
from data import request_body
from jsonschema import validate
from data.json_shemas import trending_latest_shema
class TestTrending:
def test_trending_latest(self):
response = Methods.post('/trending/latest/', request_body.TRENDING_LATEST, Methods.headers)
assert 200 == response.status_code
validate(instance=response.json(), schema=trending_latest_shema.TRENDING_LATEST_SHEMA)
| [
"[email protected]"
] | |
c690aa52abea32c62dea45370e45a970be5db3d3 | 172f7535541c0920f48017d170f841b258f4c5e4 | /simulating_annealing.py | cbb61105b26093d024e60cfcea444fcd8e954a0c | [
"MIT"
] | permissive | chinaver2002/2D-Irregular-Packing-Algorithm | ca742050c90e9807f4c510a36a1b4f4bacce1d35 | cc10edff2bc2631fcbcb47acf7bb3215e5c5023c | refs/heads/master | 2023-07-09T05:27:48.482900 | 2021-03-10T14:04:29 | 2021-03-10T14:04:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,475 | py | import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt
from tools.geofunc import GeoFunc
from tools.show import PltFunc
from tools.nfp import NFP
from tools.data import getData
from tools.packing import PackingUtil,NFPAssistant,PolyListProcessor,Poly
from heuristic import TOPOS,BottomLeftFill
import json
from shapely.geometry import Polygon,mapping
from shapely import affinity
import csv
import time
import multiprocessing
import datetime
import random
import copy
def packingLength(poly_list,history_index_list,history_length_list,width,**kw):
polys=PolyListProcessor.getPolysVertices(poly_list)
index_list=PolyListProcessor.getPolyListIndex(poly_list)
length=0
check_index=PolyListProcessor.getIndex(index_list,history_index_list)
if check_index>=0:
length=history_length_list[check_index]
else:
try:
if 'NFPAssistant' in kw:
blf=BottomLeftFill(width,polys,NFPAssistant=kw['NFPAssistant'])
# blf.showAll()
length=blf.contain_length
else:
length=BottomLeftFill(width,polys).contain_length
except:
print('出现Self-intersection')
length=99999
history_index_list.append(index_list)
history_length_list.append(length)
return length
class SA(object):
'''
Simulating Annealing + Bottom Left Fill
Reference:....
'''
def __init__(self,poly_list):
self.min_angle=360 # 允许旋转的最小角度
self.width=1500 # 排列的宽度
self.temp_now=200 # 起始温度 2000
self.temp_end=1e-5 # 结束温度 1e-20
self.dec_rate=0.7 # 降温速率 0.995
self.loop_times=5 # 内循环次数
self.cur_poly_list=poly_list # 当前的序列
self.new_poly_list=poly_list # 生成新的序列
self.history_index_list=[] # 运行过的index序列
self.history_length_list=[] # 运行结果
self.NFPAssistant=NFPAssistant(PolyListProcessor.getPolysVertices(poly_list),get_all_nfp=True)
self.run()
def newPolyList(self):
choose_id = int(random.random() * len(self.new_poly_list))
'''进行交换和旋转的操作,暂时不允许旋转'''
if random.random()<=1:
self.new_poly_list=PolyListProcessor.randomSwap(self.cur_poly_list,choose_id)
else:
self.new_poly_list=PolyListProcessor.randomRotate(self.cur_poly_list,self.min_angle,choose_id)
def run(self):
initial_length=packingLength(self.cur_poly_list,self.history_index_list,self.history_length_list,self.width)
global_lowest_length_list = [] # 记录每个温度下最最低高度,理论上会下降
temp_lowest_length_list= [] # 每个温度下的平衡高度
global_best_list = copy.deepcopy(self.cur_poly_list) # 用于记录历史上最好蓄力
global_lowest_length=initial_length # 全局最低高度
temp_best_list=copy.deepcopy(self.cur_poly_list) # 局部温度下的最低
temp_lowest_length=initial_length # 局部温度下的最低
unchange_times=0
# 开始循环寻找
while self.temp_now>self.temp_end:
print("当前温度:",self.temp_now)
old_lowest_length=global_lowest_length # 统计未更改次数
cur_length=packingLength(self.cur_poly_list,self.history_index_list,self.history_length_list,self.width,NFPAssistant=self.NFPAssistant)
# 在某个温度下进行一定次数的寻找
for i in range(self.loop_times):
self.newPolyList()
new_length=packingLength(self.new_poly_list,self.history_index_list,self.history_length_list,self.width,NFPAssistant=self.NFPAssistant)
delta_length = new_length-cur_length
if delta_length < 0: # 当前温度下如果高度更低则接受
temp_best_list = self.cur_poly_list = copy.deepcopy(self.new_poly_list)
temp_lowest_length=new_length # 修改为新的高度
cur_length=new_length
if new_length<global_lowest_length: # 如果新的高度小于最低的高度则修改最低高度
global_lowest_length=new_length
global_best_list=copy.deepcopy(self.new_poly_list)
elif np.random.random() < np.exp(-delta_length / self.temp_now): # 按照一定概率修改,并作为下一次检索基础
self.poly_list=copy.deepcopy(self.new_poly_list)
cur_length=new_length
else:
pass # 否则不进行修改
print("当前温度最低长度:",temp_lowest_length)
print("最低长度:",global_lowest_length)
if old_lowest_length==global_lowest_length:
unchange_times+=1
if unchange_times>15:
break
else:
unchange_times=0
self.cur_poly_list=copy.deepcopy(temp_best_list) # 某温度下检索结束后取该温度下最优值
self.temp_now*=self.dec_rate #退火
global_lowest_length_list.append(global_lowest_length) # 全局的在每个温度下的最低高度,理论上一直在降低
temp_lowest_length_list.append(temp_lowest_length) # 每个温度下的最低高度
# print('结束温度的局部最优的序列:',temp_best_list)
print('结束温度的局部最优高度:',temp_lowest_length)
# print('最好序列:',global_best_list)
print('最好序列高度:',global_lowest_length)
PolyListProcessor.showPolyList(self.width,global_best_list)
self.showBestResult(temp_lowest_length_list,global_lowest_length_list)
def showBestResult(self,list1,list2):
plt.figure(1)
plt.subplot(311)
plt.plot(list1)#每个温度下平衡路径长度
plt.subplot(312)
plt.plot(list2)#每个温度下最好路径长度
plt.grid()
plt.show()
if __name__=='__main__':
starttime = datetime.datetime.now()
polys = getData(6)
all_rotation = [0] # 禁止旋转
poly_list = PolyListProcessor.getPolyObjectList(polys, all_rotation)
SA(poly_list)
endtime = datetime.datetime.now()
print (endtime - starttime)
| [
"[email protected]"
] | |
7733f55151a6fc96b193508c48486be8f74d5660 | c6d0baf262602dabfafeaf7f2706e75e76c5b0c0 | /DASHBOARD/dashboard/urls.py | 3e0e519e285efdb6831448d22a583cf11f92a91b | [] | no_license | wdchild/insight_DE_project | 0a53420592b60a808bb34aba3edd50db04afb32a | 6f0af4afa85c658435b30d0dc89315bfb592c5c7 | refs/heads/master | 2021-07-18T16:16:13.881057 | 2017-10-23T15:52:32 | 2017-10-23T15:52:32 | 105,242,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | """dashboard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', (admin.site.urls)),
]
| [
"[email protected]"
] | |
a278c7098aa1dde205e8c07713baae7a43f08b19 | 3279e683eb404611da578d62ad7086dcde7d6bed | /src/the_tale/the_tale/linguistics/lexicon/relations.py | ff25b7998c8627c61b303cd66b1d92dda2c322b3 | [
"BSD-2-Clause-Views"
] | permissive | he1mdallr/the-tale | 257ac5745679ead6d95281dca2fddd2381187950 | b8a189703a7d0776b95621f2e00071b41459ae6f | refs/heads/master | 2021-01-19T00:18:55.775838 | 2017-03-25T11:15:38 | 2017-03-25T11:15:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,057 | py | # coding: utf-8
from rels import Column
from rels.django import DjangoEnum
from utg import words as utg_words
from utg import constructors as utg_constructors
from utg.relations import WORD_TYPE
from the_tale.linguistics import relations
def preprocess_s(row):
return [var if isinstance(var, tuple) else (var, '') for var in row]
def s(*substitutions):
return [preprocess_s(row[i:] + row[:i]) for i, row in enumerate(substitutions)]
class VARIABLE_VERIFICATOR(DjangoEnum):
utg_type = Column(unique=False, no_index=True, single_type=False)
substitutions = Column(unique=False, no_index=True)
records = ( ('PERSON', 0, 'любой персонаж', WORD_TYPE.NOUN, s(['герой', 'привидение', 'героиня', ('рыцарь', 'мн')],
['призрак', 'чудовище', 'русалка', ('боец', 'мн')],
['жираф', 'чучело', 'зебра', ('слон', 'мн')],
['гусь', 'пугало', 'свинья', ('волк', 'мн')] )),
('NUMBER', 1, 'число', WORD_TYPE.INTEGER, s([1, 2, 5],
[21, 23, 25],
[1001, 1054, 1013])),
('PLACE', 2, 'место', WORD_TYPE.NOUN, s(['Минск', 'Простоквашино', 'Вилейка', 'Барановичи'],
['Тагил', 'Чугуево', 'Рига', 'Афины'],
['Магадан', 'Бородино', 'Уфа', 'Чебоксары'])),
# TODO: во время следующей большой переделки добавить одушевлённый артефакт в каждый набор слов (скорее всего мужского рода)
('ITEM', 4, 'любой предмет', WORD_TYPE.NOUN, s(['нож', 'ядро', 'пепельница', 'ножницы'],
['кинжал', 'окно', 'мечта', 'макароны'],
['меч', 'варенье', 'чашка', 'дрова'])),
('TEXT', 5, 'любой текст', WORD_TYPE.TEXT, s(['любой текст'],
['текст текст текст'],
['какой-то текст'])),
('MODIFIER', 6, 'модификатор города', WORD_TYPE.NOUN, s(['форт', 'захолустье', 'святыня', ('мемориал', 'мн')],
['замок', 'пристанище', 'земля', ('колония', 'мн')])),
('RACE', 7, 'раса', WORD_TYPE.NOUN, s(['человек', 'эльф', 'орк', 'гоблин', 'дварф'],
['человек', 'эльф', 'орк', 'гоблин', 'дварф'],
['человек', 'эльф', 'орк', 'гоблин', 'дварф'])),
('DATE', 8, 'дата в мире игры', WORD_TYPE.TEXT, s(['18 сухого месяца 183 года'])),)
_construct_utg_name_form = lambda v: (v.utg_name_form, v.linguistics_restrictions())
_construct_number = lambda v: (utg_constructors.construct_integer(int(v)), [])
_construct_text = lambda v: (utg_words.WordForm(utg_words.Word(type=WORD_TYPE.TEXT, forms=(v,))), [])
class VARIABLE_TYPE(DjangoEnum):
verificator = Column(unique=False, no_index=True)
constructor = Column(unique=False, no_index=True)
restrictions = Column(unique=False, no_index=True)
records = ( ('NUMBER', 1, 'число', VARIABLE_VERIFICATOR.NUMBER, _construct_number, ()),
('PLACE', 2, 'город', VARIABLE_VERIFICATOR.PLACE, _construct_utg_name_form, (relations.TEMPLATE_RESTRICTION_GROUP.CITY_MODIFIER,
relations.TEMPLATE_RESTRICTION_GROUP.HABIT_HONOR,
relations.TEMPLATE_RESTRICTION_GROUP.HABIT_PEACEFULNESS,
relations.TEMPLATE_RESTRICTION_GROUP.TERRAIN,
relations.TEMPLATE_RESTRICTION_GROUP.META_TERRAIN,
relations.TEMPLATE_RESTRICTION_GROUP.META_HEIGHT,
relations.TEMPLATE_RESTRICTION_GROUP.META_VEGETATION,
relations.TEMPLATE_RESTRICTION_GROUP.BUILDING_TYPE,
relations.TEMPLATE_RESTRICTION_GROUP.RACE,
relations.TEMPLATE_RESTRICTION_GROUP.PLURAL_FORM)),
('PERSON', 3, 'NPC', VARIABLE_VERIFICATOR.PERSON, _construct_utg_name_form, (relations.TEMPLATE_RESTRICTION_GROUP.PERSON_TYPE,
relations.TEMPLATE_RESTRICTION_GROUP.GENDER,
relations.TEMPLATE_RESTRICTION_GROUP.RACE,
relations.TEMPLATE_RESTRICTION_GROUP.PLURAL_FORM,
relations.TEMPLATE_RESTRICTION_GROUP.PERSON_PERSONALITY_COSMETIC,
relations.TEMPLATE_RESTRICTION_GROUP.PERSON_PERSONALITY_PRACTICAL)),
('ARTIFACT', 4, 'артефакт', VARIABLE_VERIFICATOR.ITEM, _construct_utg_name_form, (relations.TEMPLATE_RESTRICTION_GROUP.ARTIFACT_TYPE,
relations.TEMPLATE_RESTRICTION_GROUP.ARTIFACT_POWER_TYPE,
relations.TEMPLATE_RESTRICTION_GROUP.ARTIFACT_RARITY,
relations.TEMPLATE_RESTRICTION_GROUP.ARTIFACT_EFFECT,
relations.TEMPLATE_RESTRICTION_GROUP.ARTIFACT,
relations.TEMPLATE_RESTRICTION_GROUP.PLURAL_FORM)),
('MOB', 5, 'монстр', VARIABLE_VERIFICATOR.PERSON, _construct_utg_name_form, (relations.TEMPLATE_RESTRICTION_GROUP.MOB_TYPE,
relations.TEMPLATE_RESTRICTION_GROUP.MOB,
relations.TEMPLATE_RESTRICTION_GROUP.ARCHETYPE,
relations.TEMPLATE_RESTRICTION_GROUP.ACTION_TYPE,
relations.TEMPLATE_RESTRICTION_GROUP.COMMUNICATION_VERBAL,
relations.TEMPLATE_RESTRICTION_GROUP.COMMUNICATION_GESTURES,
relations.TEMPLATE_RESTRICTION_GROUP.COMMUNICATION_TELEPATHIC,
relations.TEMPLATE_RESTRICTION_GROUP.INTELLECT_LEVEL,
relations.TEMPLATE_RESTRICTION_GROUP.PLURAL_FORM)),
('TEXT', 6, 'текст', VARIABLE_VERIFICATOR.TEXT, _construct_text, ()),
('ACTOR', 7, 'герой, монстр или спутник', VARIABLE_VERIFICATOR.PERSON, _construct_utg_name_form, (relations.TEMPLATE_RESTRICTION_GROUP.GENDER,
relations.TEMPLATE_RESTRICTION_GROUP.RACE,
relations.TEMPLATE_RESTRICTION_GROUP.HABIT_HONOR,
relations.TEMPLATE_RESTRICTION_GROUP.HABIT_PEACEFULNESS,
relations.TEMPLATE_RESTRICTION_GROUP.MOB,
relations.TEMPLATE_RESTRICTION_GROUP.MOB_TYPE,
relations.TEMPLATE_RESTRICTION_GROUP.COMPANION,
relations.TEMPLATE_RESTRICTION_GROUP.COMPANION_DEDICATION,
relations.TEMPLATE_RESTRICTION_GROUP.COMPANION_ABILITY,
relations.TEMPLATE_RESTRICTION_GROUP.ARCHETYPE,
relations.TEMPLATE_RESTRICTION_GROUP.TERRAIN,
relations.TEMPLATE_RESTRICTION_GROUP.META_TERRAIN,
relations.TEMPLATE_RESTRICTION_GROUP.META_HEIGHT,
relations.TEMPLATE_RESTRICTION_GROUP.META_VEGETATION,
relations.TEMPLATE_RESTRICTION_GROUP.ACTION_TYPE,
relations.TEMPLATE_RESTRICTION_GROUP.COMMUNICATION_VERBAL,
relations.TEMPLATE_RESTRICTION_GROUP.COMMUNICATION_GESTURES,
relations.TEMPLATE_RESTRICTION_GROUP.COMMUNICATION_TELEPATHIC,
relations.TEMPLATE_RESTRICTION_GROUP.INTELLECT_LEVEL,
relations.TEMPLATE_RESTRICTION_GROUP.ACTOR,
relations.TEMPLATE_RESTRICTION_GROUP.PLURAL_FORM,
relations.TEMPLATE_RESTRICTION_GROUP.COMPANION_EXISTENCE)),
('MODIFIER', 8, 'модификатор города', VARIABLE_VERIFICATOR.MODIFIER, _construct_utg_name_form, (relations.TEMPLATE_RESTRICTION_GROUP.CITY_MODIFIER,
relations.TEMPLATE_RESTRICTION_GROUP.PLURAL_FORM)),
('RACE', 9, 'раса', VARIABLE_VERIFICATOR.RACE, _construct_utg_name_form, (relations.TEMPLATE_RESTRICTION_GROUP.RACE,
relations.TEMPLATE_RESTRICTION_GROUP.PLURAL_FORM)),
('DATE', 10, 'дата', VARIABLE_VERIFICATOR.DATE, _construct_utg_name_form, (relations.TEMPLATE_RESTRICTION_GROUP.REAL_FEAST,)) )
class VARIABLE(DjangoEnum):
type = Column(unique=False, no_index=True)
records = ( ('HERO', 'hero', 'герой', VARIABLE_TYPE.ACTOR),
('LEVEL', 'level', 'уровень', VARIABLE_TYPE.NUMBER),
('ANTAGONIST_POSITION', 'antagonist_position', 'позиция антагониста', VARIABLE_TYPE.PLACE),
('RECEIVER_POSITION', 'receiver_position', 'позиция получателя задания', VARIABLE_TYPE.PLACE),
('ANTAGONIST', 'antagonist', 'антагонист', VARIABLE_TYPE.PERSON),
('RECEIVER', 'receiver', 'получатель задания', VARIABLE_TYPE.PERSON),
('ARTIFACT', 'artifact', 'артефакт', VARIABLE_TYPE.ARTIFACT),
('COINS', 'coins', 'монеты', VARIABLE_TYPE.NUMBER),
('INITIATOR', 'initiator', 'инициатор задания', VARIABLE_TYPE.PERSON),
('INITIATOR_POSITION', 'initiator_position', 'позиция инициатора задания', VARIABLE_TYPE.PLACE),
('ITEM', 'item', 'предмет', VARIABLE_TYPE.ARTIFACT),
('UNEQUIPPED', 'unequipped', 'снимаемый предмет', VARIABLE_TYPE.ARTIFACT),
('EQUIPPED', 'equipped', 'экипируемый предмет', VARIABLE_TYPE.ARTIFACT),
('DESTINATION', 'destination', 'пункт назначения', VARIABLE_TYPE.PLACE),
('CURRENT_DESTINATION', 'current_destination', 'текущий подпункт назначения', VARIABLE_TYPE.PLACE),
('PLACE', 'place', 'город', VARIABLE_TYPE.PLACE),
('KILLER', 'killer', 'победитель в pvp', VARIABLE_TYPE.ACTOR),
('VICTIM', 'victim', 'проигравший в pvp', VARIABLE_TYPE.ACTOR),
('DUELIST_1', 'duelist_1', '1-ый участник pvp', VARIABLE_TYPE.ACTOR),
('DUELIST_2', 'duelist_2', '2-ый участник pvp', VARIABLE_TYPE.ACTOR),
('DROPPED_ITEM', 'dropped_item', 'выпавший предмет', VARIABLE_TYPE.ARTIFACT),
('EXPERIENCE', 'experience', 'опыт', VARIABLE_TYPE.NUMBER),
('HEALTH', 'health', 'здоровье', VARIABLE_TYPE.NUMBER),
('MOB', 'mob', 'монстр', VARIABLE_TYPE.MOB),
('ENERGY', 'energy', 'энергия', VARIABLE_TYPE.NUMBER),
('SELL_PRICE', 'sell_price', 'цена продажи', VARIABLE_TYPE.NUMBER),
('OLD_ARTIFACT', 'old_artifact', 'старый артефакт', VARIABLE_TYPE.ARTIFACT),
('PERSON', 'person', 'мастер', VARIABLE_TYPE.PERSON),
('NEW_NAME', 'new_name', 'новое название города', VARIABLE_TYPE.PLACE),
('OLD_NAME', 'old_name', 'старое название города', VARIABLE_TYPE.PLACE),
('NEW_MODIFIER', 'new_modifier', 'новый модификатор города', VARIABLE_TYPE.MODIFIER),
('OLD_MODIFIER', 'old_modifier', 'старый модификатор города', VARIABLE_TYPE.MODIFIER),
('OLD_RACE', 'old_race', 'старая раса', VARIABLE_TYPE.RACE),
('NEW_RACE', 'new_race', 'новая раса', VARIABLE_TYPE.RACE),
('PLACE_1', 'place_1', '1-ый город', VARIABLE_TYPE.PLACE),
('PLACE_2', 'place_2', '2-ой город', VARIABLE_TYPE.PLACE),
('RESOURCE_1', 'resource_1', '1-ый ресурс', VARIABLE_TYPE.TEXT),
('RESOURCE_2', 'resource_2', '2-ой ресурс', VARIABLE_TYPE.TEXT),
('TEXT', 'text', 'любой текст', VARIABLE_TYPE.TEXT),
('EFFECTIVENESS', 'effectiveness', 'эффективность', VARIABLE_TYPE.NUMBER),
('ATTACKER', 'attacker', 'атакующий', VARIABLE_TYPE.ACTOR),
('DAMAGE', 'damage', 'урон', VARIABLE_TYPE.NUMBER),
('DEFENDER', 'defender', 'защитник', VARIABLE_TYPE.ACTOR),
('ACTOR', 'actor', 'актор (герой или монстр)', VARIABLE_TYPE.ACTOR),
('CONVERSION', 'conversion', 'информация о конверсии параметров', VARIABLE_TYPE.TEXT),
('COMPANION', 'companion', 'спутник', VARIABLE_TYPE.ACTOR),
('COMPANION_OWNER', 'companion_owner', 'владелец спутника', VARIABLE_TYPE.ACTOR),
('ATTACKER_DAMAGE', 'attacker_damage', 'урон по атакующему', VARIABLE_TYPE.NUMBER),
('DATE', 'date', 'дата в мире игры', VARIABLE_TYPE.DATE), )
| [
"[email protected]"
] | |
1551ab1e2a47ce675db8e20c4ba4a4daf9d2963b | 721df1d44078b81ec752f57c2b3d6ff45aa657f7 | /usr/share/pyshared/passlib/ext/django/utils.py | f44f95d9e6d027df28937b20c5d45366e1eac8ca | [] | no_license | Mashpy/ajenti_track | f1fe9c7f77cfa48063e546665ea16a7a2b3f3566 | 4bb45c15d4b4c14928234ba82b61a9243c5914a3 | refs/heads/master | 2022-12-24T03:50:42.232380 | 2016-09-10T07:55:48 | 2016-09-10T07:55:48 | 67,797,197 | 0 | 2 | null | 2022-12-13T05:13:18 | 2016-09-09T12:30:55 | C | UTF-8 | Python | false | false | 8,292 | py | """passlib.ext.django.utils - helper functions for patching Django hashing
.. warning::
This code is experimental and subject to change,
and not officially documented in Passlib just yet
(though it should work).
"""
#===================================================================
#imports
#===================================================================
#site
from warnings import warn
#pkg
from passlib.utils import is_crypt_context, bytes
#local
__all__ = [
"get_category",
"set_django_password_context",
]
#===================================================================
#lazy imports
#===================================================================
_has_django0 = None # old 0.9 django - lacks unusable_password support
_dam = None #django.contrib.auth.models reference
def _import_django():
global _dam, _has_django0
if _dam is None:
import django.contrib.auth.models as _dam
from django import VERSION
_has_django0 = VERSION < (1,0)
return _dam
#===================================================================
#constants
#===================================================================
#: base context mirroring django's setup
STOCK_CTX = """
[passlib]
schemes =
django_salted_sha1, django_salted_md5,
django_des_crypt, hex_md5,
django_disabled
default = django_salted_sha1
deprecated = hex_md5
"""
#: default context used by app
DEFAULT_CTX = """
[passlib]
schemes =
sha512_crypt,
pbkdf2_sha256,
django_salted_sha1, django_salted_md5,
django_des_crypt, hex_md5,
django_disabled
default = sha512_crypt
deprecated =
pbkdf2_sha256,
django_salted_sha1, django_salted_md5,
django_des_crypt, hex_md5
all__vary_rounds = 5%%
sha512_crypt__default_rounds = 15000
staff__sha512_crypt__default_rounds = 25000
superuser__sha512_crypt__default_rounds = 35000
"""
#===================================================================
# helpers
#===================================================================
def get_category(user):
"""default get_category() implementation used by set_django_password_context
this is the function used if ``settings.PASSLIB_GET_CONTEXT`` is not
specified.
it maps superusers to the ``"superuser"`` category,
staff to the ``"staff"`` category,
and all others to the default category.
"""
if user.is_superuser:
return "superuser"
if user.is_staff:
return "staff"
return None
def um(func):
"unwrap method (eg User.set_password -> orig func)"
return func.im_func
#===================================================================
# monkeypatch framework
#===================================================================
# NOTE: this moneypatcher was written to be useful
# outside of this module, and re-invokable,
# which is why it tries so hard to maintain
# sanity about it's patch state.
_django_patch_state = None #dict holding refs to undo patch
def set_django_password_context(context=None, get_category=get_category):
"""monkeypatches :mod:`!django.contrib.auth` to use specified password context.
:arg context:
Passlib context to use for Django password hashing.
If ``None``, restores original Django functions.
In order to support existing hashes,
any context specified should include
all the hashes in :data:`django_context`
in addition to custom hashes.
:param get_category:
Optional function to use when mapping Django user ->
CryptContext category.
If a function, should have syntax ``catfunc(user) -> category|None``.
If ``None``, no function is used.
By default, uses a function which returns ``"superuser"``
for superusers, and ``"staff"`` for staff.
This function monkeypatches the following parts of Django:
* :func:`!django.contrib.auth.models.check_password`
* :meth:`!django.contrib.auth.models.User.check_password`
* :meth:`!django.contrib.auth.models.User.set_password`
It also stores the provided context in
:data:`!django.contrib.auth.models.User.password_context`,
for easy access.
"""
global _django_patch_state, _dam, _has_django0
_import_django()
state = _django_patch_state
User = _dam.User
# issue warning if something else monkeypatched User
# while our patch was applied.
if state is not None:
if um(User.set_password) is not state['user_set_password']:
warn("another library has patched "
"django.contrib.auth.models:User.set_password")
if um(User.check_password) is not state['user_check_password']:
warn("another library has patched"
"django.contrib.auth.models:User.check_password")
if _dam.check_password is not state['models_check_password']:
warn("another library has patched"
"django.contrib.auth.models:check_password")
#check if we should just restore original state
if context is None:
if state is not None:
del User.password_context
_dam.check_password = state['orig_models_check_password']
User.set_password = state['orig_user_set_password']
User.check_password = state['orig_user_check_password']
_django_patch_state = None
return
#validate inputs
if not is_crypt_context(context):
raise TypeError("context must be CryptContext instance or None: %r" %
(type(context),))
#backup original state if this is first call
if state is None:
_django_patch_state = state = dict(
orig_user_check_password = um(User.check_password),
orig_user_set_password = um(User.set_password),
orig_models_check_password = _dam.check_password,
)
#prepare replacements
if _has_django0:
UNUSABLE_PASSWORD = "!"
else:
UNUSABLE_PASSWORD = _dam.UNUSABLE_PASSWORD
def set_password(user, raw_password):
"passlib replacement for User.set_password()"
if raw_password is None:
if _has_django0:
# django 0.9
user.password = UNUSABLE_PASSWORD
else:
user.set_unusable_password()
else:
cat = get_category(user) if get_category else None
user.password = context.encrypt(raw_password, category=cat)
def check_password(user, raw_password):
"passlib replacement for User.check_password()"
if raw_password is None:
return False
hash = user.password
if not hash or hash == UNUSABLE_PASSWORD:
return False
cat = get_category(user) if get_category else None
ok, new_hash = context.verify_and_update(raw_password, hash,
category=cat)
if ok and new_hash is not None:
user.password = new_hash
user.save()
return ok
def raw_check_password(raw_password, enc_password):
"passlib replacement for check_password()"
if not enc_password or enc_password == UNUSABLE_PASSWORD:
raise ValueError("no password hash specified")
return context.verify(raw_password, enc_password)
#set new state
User.password_context = context
User.set_password = state['user_set_password'] = set_password
User.check_password = state['user_check_password'] = check_password
_dam.check_password = state['models_check_password'] = raw_check_password
state['context' ] = context
state['get_category'] = get_category
##def get_django_password_context():
## """return current django password context
##
## This returns the current :class:`~passlib.context.CryptContext` instance
## set by :func:`set_django_password_context`.
## If not context has been set, returns ``None``.
## """
## global _django_patch_state
## if _django_patch_state:
## return _django_patch_state['context']
## else:
## return None
#===================================================================
#eof
#===================================================================
| [
"[email protected]"
] | |
736eeaec4f81f0588d349ab5d03f0d73f1186438 | 1f290bd29534a719ed94d30eea6a9bff241908af | /H-Index II.py | 59f699c095678803f098759831f2469c48fdc3fa | [] | no_license | nan0445/Leetcode-Python | 3a838a9178cd86220ace85da2d6a8b084975552d | bccd0f6ebb00e9569093f8ec18ebf0e94035dce6 | refs/heads/master | 2020-03-18T18:19:19.010566 | 2018-08-11T02:17:36 | 2018-08-11T02:17:36 | 135,084,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | class Solution:
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
if not citations: return 0
n = len(citations)
l, r = 0, n - 1
res = 0
while l<r:
mid = (l + r) // 2
if citations[mid] > n - mid - 1: r = mid
elif citations[mid] < n - mid - 1: l = mid + 1
else:
res = max(res, n - mid - 1)
r = mid
#print (l, r)
return max(res, n - l - 1 if citations[l] <= n - l - 1 else n - l)
| [
"[email protected]"
] | |
5d8b3e9db0e0388c172e5f0d7329cb8741afabb9 | e56f6de4a6ef3970e146870ec1743bf6aa7c0687 | /randomWalk3D.py | ac9702b59f2c094081533e94e1d3800d4ca79aa6 | [] | no_license | CodeProgress/DataAnalysis | 0bbcb0155351a9c9893079436739afa67653e641 | ce98b3c10fb719a047ffeedc34b16858687b7e1f | refs/heads/master | 2021-12-31T19:33:38.386511 | 2021-12-30T18:04:38 | 2021-12-30T18:04:38 | 12,932,358 | 0 | 0 | null | 2014-01-31T17:14:52 | 2013-09-18T19:21:11 | Python | UTF-8 | Python | false | false | 2,118 | py | import pylab
import random
import numpy
from mpl_toolkits.mplot3d import Axes3D
# adapted from stackoverflow.com/questions/34920680/plotting-3d-random-walk-in-py
# Random next step functions:
def random_single_step_in_one_direction(current_coordinate):
next_random_coordinate = current_coordinate[:]
axis_of_next_step = random.randint(0, 2)
next_random_coordinate[axis_of_next_step] += random.choice([-1,1])
return next_random_coordinate
def random_continuous_step_in_all_directions(current_coordinate):
next_random_coordinate = current_coordinate[:]
next_random_coordinate[0] += random.uniform(-1, 1)
next_random_coordinate[1] += random.uniform(-1, 1)
next_random_coordinate[2] += random.uniform(-1, 1)
return next_random_coordinate
# Plotting functions:
def get_3d_walk_coordinates(next_random_3d_step_function, num_trials=1000):
"""returns a list of [x,y,z] coordinates along the random path"""
current_coordinate = [0, 0, 0]
xyz = [current_coordinate[:]]
for i in xrange(num_trials):
current_coordinate = next_random_3d_step_function(current_coordinate)
xyz.append(current_coordinate)
return xyz
def plot_3d_walk(next_random_3d_step_function, num_trials=1000):
figure = pylab.figure()
ax = figure.gca(projection='3d')
list_of_coordinates = get_3d_walk_coordinates(next_random_3d_step_function, num_trials)
x, y, z = zip(*list_of_coordinates)
ax.plot(x, y, z)
ax.scatter(0, 0, 0, marker='s', color='green') # start point
ax.scatter(x[-1], y[-1], z[-1], marker='8', color='red') # end point
ax.plot([0, x[-1]], [0, y[-1]], [0, z[-1]], color='black') # line connecting start to end
start_point = numpy.array((0, 0, 0))
end_point = numpy.array((x[-1], y[-1], z[-1]))
distance_from_start_to_end = numpy.linalg.norm(start_point-end_point)
print "Distance from start to end after {} random steps is: {}".format(
num_trials, distance_from_start_to_end)
pylab.show()
plot_3d_walk(random_single_step_in_one_direction)
| [
"[email protected]"
] | |
1886fb79eeea3bfc1b1152d1c203a4202a8e77b2 | e3fcacbd159a4df2ca398251ee4f3310860a8ec2 | /simple_dhcp_parser/add_data.py | 2c719429a9abe60e3aff2a68b6088ebf342a7539 | [] | no_license | DmitriyPanteleev/my-network-automation | 51f1270ff82008e7f1d484150a13db7551c1ff62 | 21c6b92942c8224b10214f7d4e3f2705db866c1c | refs/heads/master | 2023-02-10T10:22:25.153808 | 2021-01-08T10:25:18 | 2021-01-08T10:25:18 | 299,320,011 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,280 | py | #!/usr/bin/env python
import sqlite3
import glob
import os
import re
def add_dhcpdb(db_filename, dhcp_snoop_files):
db_exists = os.path.exists(db_filename)
if db_exists:
# Prepare DHCP date to insert
data_filename = 'dhcp_snooping.txt'
regex1 = re.compile('(\S+) +(\S+) +\d+ +\S+ +(\d+) +(\S+)')
regex2 = re.compile('(sw\d+)')
result_dhcp = []
for dhcp_snoop_file in dhcp_snoop_files:
with open(dhcp_snoop_file) as data:
sw_name = regex2.search(dhcp_snoop_file)
for line in data:
match = regex1.search(line)
if match:
ext_match = list(match.groups())
ext_match.append(list(sw_name.groups())[0])
result_dhcp.append(tuple(ext_match))
# Inserting data
conn = sqlite3.connect(db_filename)
print('Inserting DHCP Snooping data')
for row in result_dhcp:
try:
with conn:
query = '''insert into dhcp (mac, ip, vlan, interface, switch) values (?, ?, ?, ?, ?)'''
conn.execute(query, row)
except sqlite3.IntegrityError as e:
print('Error occured: ', e)
conn.close()
else:
print('Database does not exists. Please create it, before add date.')
def add_switchesdb(db_filename, switches_file):
db_exists = os.path.exists(db_filename)
if db_exists:
# Prepare switches date to insert
result_sw = []
regex3 = re.compile('(sw\d+): ([A-Za-z0-9-, ]+)')
with open(switches_file) as data:
for line in data:
match = regex3.search(line)
if match:
result_sw.append(match.groups())
# Inserting data
conn = sqlite3.connect(db_filename)
print('Inserting Switch data')
for row in result_sw:
try:
with conn:
query = '''insert into switches (hostname, location) values (?, ?)'''
conn.execute(query, row)
except sqlite3.IntegrityError as e:
print('Error occured: ', e)
conn.close()
else:
print('Database does not exists. Please create it, before add date.')
if __name__ == '__main__':
dhcp_snoop_files = glob.glob('sw*_dhcp_snooping.txt')
add_switchesdb('dhcp_snooping.db', 'switches.yml')
add_dhcpdb('dhcp_snooping.db', dhcp_snoop_files)
| [
"[email protected]"
] | |
2b761147307f51614108674d9e661f853cff2829 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_083/ch36_2020_04_06_19_28_04_107585.py | 23d50e7f2390a698610ad478d5694904adc497aa | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | def fatorial(x):
y=1
f=1
for i in range y :
y=y*f
f+=1
return y | [
"[email protected]"
] | |
eeeb19816a0b7b18317d30186d28f317cad6e170 | 37d7f7fdf6a1d2e1e0c4fc2730ad696a06c00350 | /tethysapp/gfs/ajax.py | 16ec9030e8e4a46b3d6b948f4f9052b5dea234c6 | [
"BSD-3-Clause"
] | permissive | rileyhales/gfs | d2f32f254ef306c8a8ffb79945efd32043f9007f | 01fbe596bc1423b5a54485d17f5ca1b664cca5df | refs/heads/master | 2021-07-10T03:30:56.575041 | 2020-08-13T20:25:24 | 2020-08-13T20:25:24 | 188,119,025 | 1 | 3 | BSD-3-Clause | 2019-10-14T21:22:23 | 2019-05-22T21:55:41 | Python | UTF-8 | Python | false | false | 3,409 | py | import ast
import subprocess
import os
import zipfile
import shutil
import json
from django.http import JsonResponse
from .charts import newchart
from .options import variable_levels
from .app import Gfs as App
def getchart(request):
"""
Used to make a timeseries of a variable at a user drawn point
Dependencies: gldas_variables (options), pointchart (tools), ast, makestatplots (tools)
"""
data = ast.literal_eval(request.body.decode('utf-8'))
data['instance_id'] = request.META['HTTP_COOKIE'].split('instance_id=')[1][0:9]
return JsonResponse(newchart(data))
def get_levels_for_variable(request):
data = ast.literal_eval(request.body.decode('utf-8'))
variable = data['variable']
levels = variable_levels()[variable]
return JsonResponse({'levels': levels})
def uploadshapefile(request):
files = request.FILES.getlist('files')
instance_id = request.META['HTTP_COOKIE'].split('instance_id=')[1][0:9]
user_workspace = os.path.join(os.path.dirname(__file__), 'workspaces', 'user_workspaces', instance_id)
if not os.path.exists(user_workspace):
os.mkdir(user_workspace)
# write the new files to the directory
for n, file in enumerate(files):
with open(os.path.join(user_workspace, file.name), 'wb') as dst:
for chunk in files[n].chunks():
dst.write(chunk)
# check that the user has provided geoserver settings
gs_eng = App.get_spatial_dataset_service(name='geoserver', as_engine=True)
gs_wfs = App.get_spatial_dataset_service(name='geoserver', as_wfs=True)
gs_store = 'user-uploads:' + instance_id
shp = [i for i in os.listdir(user_workspace) if i.endswith('.shp')][0].split('.')[0]
shppath = os.path.join(user_workspace, shp)
gs_eng.create_shapefile_resource(
store_id=gs_store,
shapefile_base=shppath,
overwrite=True
)
# rename the files and create a zip archive
files = os.listdir(user_workspace)
zippath = os.path.join(user_workspace, instance_id + '.zip')
archive = zipfile.ZipFile(zippath, mode='w')
for file in files:
archive.write(os.path.join(user_workspace, file), arcname=file)
archive.close()
# upload the archive to geoserver
shellpath = os.path.join(App.get_app_workspace().path, 'upload_shapefile.sh')
v1 = gs_eng.username
v2 = gs_eng.password
v3 = zippath
v4 = gs_eng.endpoint
v5 = App.package
v6 = shp
subprocess.call(['bash', shellpath, v1, v2, v3, v4, v5, v6])
return JsonResponse({'gsurl': gs_wfs, 'gsworksp': v5, 'shpname': v6})
def uploadgeojson(request):
files = request.FILES.getlist('files')
instance_id = request.META['HTTP_COOKIE'].split('instance_id=')[1][0:9]
user_workspace = os.path.join(os.path.dirname(__file__), 'workspaces', 'user_workspaces', instance_id)
if not os.path.exists(user_workspace):
os.mkdir(user_workspace)
gj_file_path = os.path.join(user_workspace, 'usergj.geojson')
# write the new files to the directory
for n, file in enumerate(files):
with open(gj_file_path, 'wb') as dst:
for chunk in files[n].chunks():
dst.write(chunk)
try:
with open(gj_file_path, 'r') as gj:
return JsonResponse(json.loads(gj.read()))
except Exception as e:
print(e)
return JsonResponse({'status': 'failed'})
| [
"[email protected]"
] | |
47a24c6afe341ca93d841b75a60d8150f7fb83c9 | 6064f76c2afa9157bde80c4755247bc2461e3413 | /backend/predict.py | 8a6014cdb0d3de7624a316d5211adf9c7fb37231 | [
"Apache-2.0"
] | permissive | chenyicai-0611/YOLOX-Flask-deployment | cbde3e7091339b958dbcd8f00e480982d74cfd1b | 14b0679eadbd1ffac60a4355c9e7db948d595946 | refs/heads/master | 2023-07-18T22:12:46.723193 | 2021-09-20T09:25:03 | 2021-09-20T09:25:03 | 408,381,058 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,908 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
#import argparse
import os
import time
from loguru import logger
import cv2
import torch
import json
from pathlib import Path
from yolox.data.data_augment import ValTransform
from yolox.data.datasets import COCO_CLASSES
from yolox.exp import get_exp
from yolox.utils import fuse_model, get_model_info, postprocess, vis
from backend.flask_id2name import id2name
IMAGE_EXT = [".jpg", ".jpeg", ".webp", ".bmp", ".png"]
class DetectedInfo:
boxes_detected = []
def get_image_list(path):
image_names = []
for maindir, subdir, file_name_list in os.walk(path):
for filename in file_name_list:
apath = os.path.join(maindir, filename)
ext = os.path.splitext(apath)[1]
if ext in IMAGE_EXT:
image_names.append(apath)
return image_names
class Predictor(object):
def __init__(
self,
model,
exp,
cls_names=COCO_CLASSES,
trt_file=None,
decoder=None,
device="cpu",
legacy=False,
):
self.model = model
self.cls_names = cls_names
self.decoder = decoder
self.num_classes = exp.num_classes
self.confthre = exp.test_conf
self.nmsthre = exp.nmsthre
self.test_size = exp.test_size
self.device = device
self.preproc = ValTransform(legacy=legacy)
if trt_file is not None:
from torch2trt import TRTModule
model_trt = TRTModule()
model_trt.load_state_dict(torch.load(trt_file))
x = torch.ones(1, 3, exp.test_size[0], exp.test_size[1]).cuda()
self.model(x)
self.model = model_trt
def inference(self, img):
img_info = {"id": 0}
if isinstance(img, str):
img_info["file_name"] = os.path.basename(img)
img = cv2.imread(img)
else:
img_info["file_name"] = None
height, width = img.shape[:2]
img_info["height"] = height
img_info["width"] = width
img_info["raw_img"] = img
ratio = min(self.test_size[0] / img.shape[0], self.test_size[1] / img.shape[1])
img_info["ratio"] = ratio
img, _ = self.preproc(img, None, self.test_size)
img = torch.from_numpy(img).unsqueeze(0)
if self.device == "gpu":
img = img.cuda()
with torch.no_grad():
t0 = time.time()
outputs = self.model(img)
if self.decoder is not None:
outputs = self.decoder(outputs, dtype=outputs.type())
outputs = postprocess(
outputs, self.num_classes, self.confthre, self.nmsthre
)
logger.info("Infer time: {:.4f}s".format(time.time() - t0))
return outputs, img_info
def visual(self, output, img_info, cls_conf=0.35):
ratio = img_info["ratio"]
img = img_info["raw_img"]
if output is None:
return img
output = output.cpu()
bboxes = output[:, 0:4]
# preprocessing: resize
bboxes /= ratio
cls = output[:, 6]
scores = output[:, 4] * output[:, 5]
DetectedInfo.boxes_detected = [] # 检测结果
for i in range(len(bboxes)):
box = bboxes[i]
cls_id = int(cls[i])
score = scores[i]
if score < cls_conf:
continue
x0 = max(int(box[0]), 0)
y0 = max(int(box[1]), 0)
x1 = max(int(box[2]), 0)
y1 = max(int(box[3]), 0)
DetectedInfo.boxes_detected.append({"name": id2name[cls_id],
"conf": str(score.item()),
"bbox": [x0, y0, x1, y1]
})
print('boxes_detected = ', DetectedInfo.boxes_detected)
vis_res = vis(img, bboxes, scores, cls, cls_conf, self.cls_names)
return vis_res
def image_demo(predictor, vis_folder, path, current_time, save_result):
if os.path.isdir(path):
files = get_image_list(path)
else:
files = [path]
files.sort()
for image_name in files:
outputs, img_info = predictor.inference(image_name)
result_image = predictor.visual(outputs[0], img_info, predictor.confthre)
if save_result:
save_folder = os.path.join(
vis_folder, time.strftime("%Y_%m_%d_%H_%M_%S", current_time)
)
os.makedirs(save_folder, exist_ok=True)
save_file_name = os.path.join(save_folder, os.path.basename(image_name))
logger.info("Saving detection result in {}".format(save_file_name))
cv2.imwrite(save_file_name, result_image)
ch = cv2.waitKey(0)
if ch == 27 or ch == ord("q") or ch == ord("Q"):
break
def imageflow_demo(predictor, vis_folder, current_time, args):
cap = cv2.VideoCapture(args['path'] if args['demo'] == "video" else args['camid'])
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
fps = cap.get(cv2.CAP_PROP_FPS)
save_folder = os.path.join(
vis_folder, time.strftime("%Y_%m_%d_%H_%M_%S", current_time)
)
os.makedirs(save_folder, exist_ok=True)
if args.demo == "video":
save_path = os.path.join(save_folder, args.path.split("/")[-1])
else:
save_path = os.path.join(save_folder, "camera.mp4")
logger.info(f"video save_path is {save_path}")
vid_writer = cv2.VideoWriter(
save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height))
)
while True:
ret_val, frame = cap.read()
if ret_val:
outputs, img_info = predictor.inference(frame)
result_frame = predictor.visual(outputs[0], img_info, predictor.confthre)
if args.save_result:
vid_writer.write(result_frame)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord("q") or ch == ord("Q"):
break
else:
break
def preprocess_and_load():
# 读取flask配置
with open('./backend/flask_config.json', 'r', encoding='utf8') as fp:
args = json.load(fp)
print('Flask Config : ', args)
exp = get_exp(args['exp_file'], args['name'])
if not args['experiment_name']:
args['experiment_name'] = exp.exp_name
file_name = os.path.join(exp.output_dir, args['experiment_name'])
os.makedirs(file_name, exist_ok=True)
vis_folder = None
if args['save_result']:
vis_folder = os.path.join(file_name, "vis_res")
os.makedirs(vis_folder, exist_ok=True)
args['vis_folder'] = vis_folder
if args['trt']:
args['device'] = "gpu"
#logger.info("Args: {}".format(args))
if args['conf'] is not None:
exp.test_conf = args['conf']
if args['nms'] is not None:
exp.nmsthre = args['nms']
if args['tsize'] is not None:
exp.test_size = (args['tsize'], args['tsize'])
model = exp.get_model()
logger.info("Model Summary: {}".format(get_model_info(model, exp.test_size)))
if args['device'] == "gpu":
model.cuda()
model.eval()
ckpt_file = args['ckpt']
logger.info("loading checkpoint")
ckpt = torch.load(ckpt_file, map_location="cpu")
# load the model state dict
model.load_state_dict(ckpt["model"])
logger.info("loaded checkpoint done.")
if args['fuse']:
logger.info("\tFusing model...")
model = fuse_model(model)
if args['trt']:
assert not args['fuse'], "TensorRT model is not support model fusing!"
trt_file = os.path.join(file_name, "model_trt.pth")
assert os.path.exists(
trt_file
), "TensorRT model is not found!\n Run python3 tools/trt.py first!"
model.head.decode_in_inference = False
decoder = model.head.decode_outputs
logger.info("Using TensorRT to inference")
else:
trt_file = None
decoder = None
return model, exp, trt_file, decoder, args
def predict_and_postprocess(model, exp, trt_file, decoder, args):
print('start to predict the uploaded image ......')
predictor = Predictor(model, exp, COCO_CLASSES, trt_file, decoder, args['device'], args['legacy'])
current_time = time.localtime()
if args['demo'] == "image":
img_path = str(Path(args['source']) / Path("img4predict.jpg")) # 读取路径
image_demo(predictor, args['vis_folder'], img_path, current_time, args['save_result'])
elif args['demo'] == "video" or args['demo'] == "webcam":
imageflow_demo(predictor, args['vis_folder'], current_time, args)
if __name__ == "__main__":
model, exp, trt_file, decoder, args = preprocess_and_load()
predict_and_postprocess(model, exp, trt_file, decoder, args) | [
"[email protected]"
] | |
96a28ef7635dd6dceeee5476301269b160731364 | 4da91b25e97a96cb64df9c7a79749e233d6f872b | /exe1/configs.py | a800691bbd58ff547605b0cae25e942811829811 | [] | no_license | Forsworns/DataScienceExe | b38950b0a9216c7689c3e438f3f9df8b4123526a | e740208baf892a503aa8521c5a75ec6ff9b4f319 | refs/heads/master | 2020-04-28T05:46:00.411964 | 2019-06-14T15:41:13 | 2019-06-14T15:41:13 | 175,032,336 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | import numpy as np
# file type
MODEL = "models"
RESULT = "results"
# models
BASELINE = "SVC_baseline"
COMPARE = "SVC_compare"
GA = "genetic_algorithm"
B_VT = "backward_variance_threshold"
F_UF = "forward_univariable_feature"
B_SFM = "backward_select_from_model"
AUC = "AUC_ROC"
# test train split
TEST_SIZE = 0.4
# stored testing set and training set
X_TRAIN = "data/train_x.npy"
Y_TRAIN = "data/train_y.npy"
X_TEST = "data/test_x.npy"
Y_TEST = "data/test_y.npy"
# paras for SVM base
DECI_FUNCS = ['ovo', 'ovr']
KERNELS = ['linear', 'poly', 'rbf', 'sigmoid']
KERNELS_MAP = {'linear':0, 'poly':1, 'rbf':2, 'sigmoid':3}
CS = [0.01, 0.04, 0.07, 0.1, 0.3, 0.5, 1, 2]
COLORS = np.array(['#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
]) | [
"[email protected]"
] | |
34683804eaaf07ba99bc95b0ff05393eefa39a69 | 9cb1fbbe0648bf95791097be473f0f8d79bcff8b | /python/02-optimize/solutionNumber.py | 1455f6fac21d0431e19b696645dc58dd67f1f3e6 | [] | no_license | yushichenchen/ai | ae4b06e616ccb9e648debeb2df07331ca3bfe9ad | a16d11739add623635623f498e73fdb8e946ee68 | refs/heads/master | 2020-08-01T10:23:04.694490 | 2019-09-24T01:27:59 | 2019-09-24T01:27:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | from py6.ai import hillClimbing # 引入解答類別
from py6.ai.solution import Solution
import random
class SolutionNumber(Solution):
def neighbor(self): # 單變數解答的鄰居函數。
x = self.v
dx= self.step # x:解答 , dx : 移動步伐大小
xnew = x+dx if random.random() > 0.5 else x-dx # 用亂數決定向左或向右移動
return SolutionNumber(xnew) # 建立新解答並傳回。
def energy(self): # 能量函數
x = self.v # x:解答
return abs(x*x-4) # 能量函數為 |x^2-4|
def str(self): # 將解答轉為字串,以供印出觀察。
return "energy({:s})={:f}".format(str(self.v), self.energy())
| [
"[email protected]"
] | |
fb8ee56077b4d787242d44096608f92e32ed4620 | 999b71bff0f28584fbaefb7f7e5c4c7b45fda8fc | /guess_the_number.py | 37166c031b1bf75f86a742dc9ca58cb64563112e | [] | no_license | fg2srt4/simple_projects | 28526285368ed688864e9c55a6633fed48a072c3 | b87abc1287d78b0139ec2264932d93f8433b8d52 | refs/heads/master | 2020-06-03T21:48:45.752899 | 2019-06-13T10:50:13 | 2019-06-13T10:50:13 | 191,744,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | import random
# step 1, generate the random number
def answer():
answer = random.randint(0, 100)
return answer
# step 2, get user's name
def intro():
print("Hello, welcome to the number guessing game.")
print("What is your name? ")
name = str(input('Name: '))
return name
# function to get user's number guess and ensure it is an integer
def guess():
guess = input("Guess: ")
try:
val = int(guess)
return val
except ValueError:
print("You can only enter numbers in this program.")
print("Rebooting...\n")
main()
# main function, checks users guess agains the answer, asks again if wrong
def main():
name = intro()
answer1 = answer()
print("Alright %s, I am thinking of a number, between 0 and 100" % name)
print("What number am I thinking of?")
guess1 = guess()
while guess1 != answer1:
if guess1 > 100 or guess1 < 0:
print("Sorry, I said between 0 and 100")
guess1 = guess()
elif guess1 > answer1:
print("Sorry, you've guessed too high")
guess1 = guess()
elif guess1 < answer1:
print("Sorry, you've guessed too low")
guess1 = guess()
print("Exactly! You guessed the right number")
main()
| [
"[email protected]"
] | |
351ec4d03245adece117f9bd9e1afb304357a4bb | 9947f5315175584c049d3690da3bd3b695c959a2 | /ch-08-recursion-and-dynamic-programming/12-eight-queens.py | 5c3b43beebfce00f462bb972267c7554a6c5dc0d | [] | no_license | GeorgeUofT/ctci-questions | 3c32a2af59f980ee952386e3784fa6cb1e88ea56 | 99f65e56592b2e709984c85401a2faf8d01e620e | refs/heads/master | 2021-05-04T05:51:02.011012 | 2018-02-05T19:41:29 | 2018-02-05T19:41:29 | 120,345,490 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | # Find all arrangements of eight queens on a chess board that cannot attack
# each other.
| [
"[email protected]"
] | |
11c39843ae18bd1afb9095e44a9148f9b8f92fe9 | 034213d82aaea5e0f050619f7de9fc2ec07bf733 | /mtfl_train_validation.py | d55cdc55ee29814350da7198a9d3399c5fcb5a26 | [] | no_license | pinareceaktan/multi-task-feature-learning | 79ca7e3ea6577e6f49a0544900def14814d08a70 | aa58db086fcd451f09b43d591633f07a0bdf0a12 | refs/heads/master | 2020-03-27T11:00:12.241060 | 2018-08-28T15:21:43 | 2018-08-28T15:21:43 | 146,458,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,469 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import os
import tensorflow as tf
import mtfl
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', os.path.join(os.getcwd(), 'mtfl_train_log'),
""" Directory where to write event logs and checkpoint""")
tf.app.flags.DEFINE_integer("max_steps", 100000,
""" Number of batches to run""")
tf.app.flags.DEFINE_boolean("log_device_placement", False,
""" Whether to log device placement""")
tf.app.flags.DEFINE_integer("log_frequency", 5,
""" How often to log results to the console""")
tf.app.flags.DEFINE_integer("validation_frequency", 20,
""" How often do you want to evaluate validation set""")
tf.app.flags.DEFINE_string("data_set", "MTFL",
""" Which data set to use""")
tf.app.flags.DEFINE_string("bin_path", os.path.join(os.getcwd(), 'data/MTFL-batches-bin'),
""" Where to create bins,
will not be used if bins are already exists""")
tf.app.flags.DEFINE_string("log_dir", os.path.join(os.getcwd(), "logs"),
""" Where to put non train related logs""")
tf.app.flags.DEFINE_integer("bin_image_size", 40,
""" Image size to put in bins remember it is raw""")
tf.app.flags.DEFINE_string("task", "1", """ 1 for gender classification, 2 for smile,
3 for glasses, 4 for head pose, 5 for landmarks """)
# Think of train just as main:
# Sessiona ne verirsen onu bekleyebilirsin
def train():
""" Train MTFL for a number of steps."""
# Get images and labels for MTFL
# image size: 128x40x40x1
# label size: 128x10 veya 128x1
train_images, train_labels = mtfl.train_inputs()
val_images, val_labels = mtfl.validation_inputs()
with tf.Graph().as_default():
# Create place holder for that you will feed in to the session
images = tf.placeholder(tf.float32, shape=(128, 40, 40, 1))
labels = tf.placeholder(tf.int32, shape=(128, 1))
global_step = tf.train.get_or_create_global_step()
# Build a graph that computes the logits predictions from the
# inference model.
logits = mtfl.inference(images)
# Calculate loss.
loss = mtfl.classification_loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = mtfl.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
self._start_time = time.time()
def before_run(self, run_context):
self._step += 1
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
if self._step % FLAGS.log_frequency == 0:
current_time = time.time()
duration = current_time - self._start_time
self._start_time = current_time
loss_value = run_values.results
examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration
sec_per_batch = float(duration / FLAGS.log_frequency)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op, {images: train_images, labels: train_labels})
# Open for Debug
# coord = tf.train.Coordinator()
# threads = tf.train.start_queue_runners(sess=mon_sess, coord=coord)
# print(mon_sess.run(eben))
# print(eben.dtype)
def main(argv=None):
# Make bins outta data
mtfl.maybe_make_bins()
# Create check out folders for train
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
def debug():
"""
This code block shows what is inside of a tensor.
This one is golden :)
"""
with tf.Graph().as_default():
images, labels = mtfl.distorted_inputs()
image = images[0]
mean, variance = tf.nn.moments(image, axes=[0])
with tf.Session() as sess:
tf.global_variables_initializer().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
print(image.shape)
print(sess.run([mean, variance]))
if __name__ == '__main__':
tf.app.run()
| [
"[email protected]"
] | |
fdb1e050229baa7099131fd35dee8b48df550969 | 2fa4f67427385b0c23726492f18f481eb7843c3b | /doc/conf.py | e4b5edd231ffab0233aacda9703034b4f3bc6cef | [] | no_license | Kristoffernegendahl/python-geometry | 81fbe698506053b38320d327e66bfd07e5f49b14 | c52a6631bb139e401929484e0a55c4e26ff0950f | refs/heads/master | 2021-01-18T04:57:54.530085 | 2012-05-30T06:57:11 | 2012-05-30T06:57:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,957 | py | # -*- coding: utf-8 -*-
#
# .. documentation build configuration file, created by
# sphinx-quickstart on Wed May 30 07:42:49 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.append(os.path.split(os.getcwd())[0])
print sys.path
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'..'
copyright = u'2012, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', '.tex', u'.. Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', '', u'.. Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', '', u'.. Documentation',
u'Author', '', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'..'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2012, Author'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| [
"[email protected]"
] | |
f65fc68cd94ac9f7c2a13ae383648493d8c9d035 | f8972963bc77887221f900209b417915c7920747 | /venv/lib/python3.6/site-packages/pip-10.0.1-py3.6.egg/pip/_internal/utils/outdated.py | cb7414bff76277d16b6f2c8d8a9dead9decbc684 | [] | no_license | patilmanojk/UpGradProHackathonTeamTechPals | 3b35a9eaa9fb84ccd3579bd938c240169412da34 | 6bedd9e997089c5ca47709e072f6651b89faafa3 | refs/heads/master | 2020-07-28T05:51:40.672852 | 2019-09-26T19:24:45 | 2019-09-26T19:24:45 | 209,329,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,949 | py | from __future__ import absolute_import
import datetime
import json
import logging
import os.path
import sys
from pip._internal.compat import WINDOWS
from pip._internal.index import PackageFinder
from pip._internal.locations import USER_CACHE_DIR, running_under_virtualenv
from pip._internal.utils.filesystem import check_path_owner
from pip._internal.utils.misc import ensure_dir, get_installed_version
from pip._vendor import lockfile
from pip._vendor.packaging import version as packaging_version
SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
logger = logging.getLogger(__name__)
class VirtualenvSelfCheckState(object):
def __init__(self):
self.statefile_path = os.path.join(sys.prefix, "pip-selfcheck.json")
# Load the existing state
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)
except (IOError, ValueError):
self.state = {}
def save(self, pypi_version, current_time):
# Attempt to write out our version check file
with open(self.statefile_path, "w") as statefile:
json.dump(
{
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
},
statefile,
sort_keys=True,
separators=(",", ":")
)
class GlobalSelfCheckState(object):
def __init__(self):
self.statefile_path = os.path.join(USER_CACHE_DIR, "selfcheck.json")
# Load the existing state
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)[sys.prefix]
except (IOError, ValueError, KeyError):
self.state = {}
def save(self, pypi_version, current_time):
# Check to make sure that we own the directory
if not check_path_owner(os.path.dirname(self.statefile_path)):
return
# Now that we've ensured the directory is owned by this user, we'll go
# ahead and make sure that all our directories are created.
ensure_dir(os.path.dirname(self.statefile_path))
# Attempt to write out our version check file
with lockfile.LockFile(self.statefile_path):
if os.path.exists(self.statefile_path):
with open(self.statefile_path) as statefile:
state = json.load(statefile)
else:
state = {}
state[sys.prefix] = {
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
}
with open(self.statefile_path, "w") as statefile:
json.dump(state, statefile, sort_keys=True,
separators=(",", ":"))
def load_selfcheck_statefile():
if running_under_virtualenv():
return VirtualenvSelfCheckState()
else:
return GlobalSelfCheckState()
def pip_version_check(session, options):
"""Check for an update for pip.
Limit the frequency of checks to once per week. State is stored either in
the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
of the pip script path.
"""
installed_version = get_installed_version("pip")
if not installed_version:
return
pip_version = packaging_version.parse(installed_version)
pypi_version = None
try:
state = load_selfcheck_statefile()
current_time = datetime.datetime.utcnow()
# Determine if we need to refresh the state
if "last_check" in state.state and "pypi_version" in state.state:
last_check = datetime.datetime.strptime(
state.state["last_check"],
SELFCHECK_DATE_FMT
)
if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60:
pypi_version = state.state["pypi_version"]
# Refresh the version if we need to or just see if we need to warn
if pypi_version is None:
# Lets use PackageFinder to see what the latest pip version is
finder = PackageFinder(
find_links=options.find_links,
index_urls=[options.index_url] + options.extra_index_urls,
allow_all_prereleases=False, # Explicitly set to False
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
all_candidates = finder.find_all_candidates("pip")
if not all_candidates:
return
pypi_version = str(
max(all_candidates, key=lambda c: c.version).version
)
# save that we've performed a check
state.save(pypi_version, current_time)
remote_version = packaging_version.parse(pypi_version)
# Determine if our pypi_version is older
if (pip_version < remote_version and
pip_version.base_version != remote_version.base_version):
# Advise "python -m pip" on Windows to avoid issues
# with overwriting pip.exe.
if WINDOWS:
pip_cmd = "python -m pip"
else:
pip_cmd = "pip"
logger.warning(
"You are using pip version %s, however version %s is "
"available.\nYou should consider upgrading via the "
"'%s install --upgrade pip' command.",
pip_version, pypi_version, pip_cmd
)
except Exception:
logger.debug(
"There was an error checking the latest version of pip",
exc_info=True,
)
| [
"[email protected]"
] | |
401386e312f7415fd6e410ea079cd4611ff15519 | f88ce8ed603fba2428706332bb2263bf3832a6f7 | /models/base_model.py | 01bbeca20ab96f55f2fd8461da23ce1b9c5b024e | [] | no_license | MahdiehNejati/MetaLearning-TF2.0 | 3b01c8dca60101ca4ef652a798aa8c504e73c854 | 87c4d5a09ab971c6520655aea99f8688c2964994 | refs/heads/master | 2023-03-04T13:54:51.318994 | 2021-02-18T17:25:59 | 2021-02-18T17:25:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,834 | py | import os
import sys
from abc import abstractmethod
import json
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import settings
from utils import combine_first_two_axes, keep_keys_with_greater_than_equal_k_items
class SetupCaller(type):
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
obj.setup()
config_json_path = os.path.join(obj.get_root(), obj.get_config_info(), 'config.json')
kwargs['database'] = kwargs['database'].get_config_info()
kwargs['network_cls'] = kwargs['network_cls'].name
config_dict = {'args': args, 'kwargs': kwargs}
with open(config_json_path, 'w') as config_json_file:
json.dump(config_dict, config_json_file)
return obj
class BaseModel(metaclass=SetupCaller):
def __init__(
self,
database,
data_loader_cls,
network_cls,
n,
k_ml,
k_val_ml,
k_val,
k_val_val,
k_test,
k_val_test,
meta_batch_size,
meta_learning_rate,
save_after_iterations,
report_validation_frequency,
log_train_images_after_iteration, # Set to -1 if you do not want to log train images.
num_tasks_val,
val_seed=-1, # The seed for validation dataset. -1 means change the samples for each report.
experiment_name=None,
val_database=None,
test_database=None,
):
self.database = database
self.val_database = val_database if val_database is not None else self.database
self.test_database = test_database if test_database is not None else self.database
self.n = n
self.k_ml = k_ml
self.k_val_ml = k_val_ml
self.k_val = k_val if k_val is not None else self.k_ml
self.k_val_val = k_val_val
self.k_test = k_test
self.k_val_test = k_val_test
self.meta_batch_size = meta_batch_size
self.num_tasks_val = num_tasks_val
self.val_seed = val_seed
self.data_loader = self.init_data_loader(data_loader_cls)
self.experiment_name = experiment_name
self.meta_learning_rate = meta_learning_rate
self.save_after_iterations = save_after_iterations
self.log_train_images_after_iteration = log_train_images_after_iteration
self.report_validation_frequency = report_validation_frequency
self._root = self.get_root()
self.train_log_dir = None
self.train_summary_writer = None
self.val_log_dir = None
self.val_summary_writer = None
self.checkpoint_dir = None
self.network_cls = network_cls
self.model = self.initialize_network()
self.optimizer = tf.keras.optimizers.Adam(learning_rate=meta_learning_rate)
self.val_accuracy_metric = tf.metrics.Mean()
self.val_loss_metric = tf.metrics.Mean()
def setup(self):
"""Setup is called right after init. This is to make sure that all the required fields are assigned.
For example, num_steps in ml is in get_config_info(), however, it is not set in __init__ of the base model
because it is a field for maml."""
self.train_log_dir = os.path.join(self._root, self.get_config_info(), 'logs/train/')
self.val_log_dir = os.path.join(self._root, self.get_config_info(), 'logs/val/')
self.checkpoint_dir = os.path.join(self._root, self.get_config_info(), 'saved_models/')
def init_data_loader(self, data_loader_cls):
return data_loader_cls(
database=self.database,
val_database=self.val_database,
test_database=self.test_database,
n=self.n,
k_ml=self.k_ml,
k_val_ml=self.k_val_ml,
k_val=self.k_val,
k_val_val=self.k_val_val,
k_test=self.k_test,
k_val_test=self.k_val_test,
meta_batch_size=self.meta_batch_size,
num_tasks_val=self.num_tasks_val,
val_seed=self.val_seed
)
def get_root(self):
return os.path.dirname(sys.argv[0])
def get_config_info(self):
config_info = self.get_config_str()
if self.experiment_name is not None:
config_info += '_' + self.experiment_name
return config_info
def post_process_outer_gradients(self, outer_gradients):
return outer_gradients
def log_images(self, summary_writer, train_ds, val_ds, step):
with tf.device('cpu:0'):
with summary_writer.as_default():
tf.summary.image(
'train',
train_ds,
step=step,
max_outputs=self.n * (self.k_ml + self.k_val_ml)
)
tf.summary.image(
'validation',
val_ds,
step=step,
max_outputs=self.n * (self.k_ml + self.k_val_ml)
)
def save_model(self, iterations):
self.model.save_weights(os.path.join(self.checkpoint_dir, f'model.ckpt-{iterations}'))
def load_model(self, iterations=None):
iteration_count = 0
if iterations is not None:
checkpoint_path = os.path.join(self.checkpoint_dir, f'model.ckpt-{iterations}')
iteration_count = iterations
else:
checkpoint_path = tf.train.latest_checkpoint(self.checkpoint_dir)
if checkpoint_path is not None:
try:
self.model.load_weights(checkpoint_path)
iteration_count = int(checkpoint_path[checkpoint_path.rindex('-') + 1:])
print(f'==================\nResuming Training\n======={iteration_count}=======\n==================')
except Exception as e:
print('Could not load the previous checkpoint!')
print(e)
exit()
else:
print('No previous checkpoint found!')
return iteration_count
def log_histograms(self, step):
with tf.device('cpu:0'):
with self.train_summary_writer.as_default():
for var in self.model.variables:
tf.summary.histogram(var.name, var, step=step)
# for k in range(len(self.updated_models)):
# var_count = 0
# if hasattr(self.updated_models[k], 'meta_trainable_variables'):
# for var in self.updated_models[k].meta_trainable_variables:
# var_count += 1
# tf.summary.histogram(f'updated_model_{k}_' + str(var_count), var, step=iteration_count)
def get_train_dataset(self):
return self.data_loader.get_train_dataset()
def get_val_dataset(self):
return self.data_loader.get_val_dataset()
def get_test_dataset(self, num_tasks, seed=-1):
return self.data_loader.get_test_dataset(num_tasks, seed)
def train(self, iterations=5):
self.train_summary_writer = tf.summary.create_file_writer(self.train_log_dir)
self.val_summary_writer = tf.summary.create_file_writer(self.val_log_dir)
train_dataset = self.get_train_dataset()
iteration_count = self.load_model()
epoch_count = iteration_count // tf.data.experimental.cardinality(train_dataset)
pbar = tqdm(train_dataset)
train_accuracy_metric = tf.metrics.Mean()
train_accuracy_metric.reset_states()
train_loss_metric = tf.metrics.Mean()
train_loss_metric.reset_states()
should_continue = iteration_count < iterations
while should_continue:
for (train_ds, val_ds), (train_labels, val_labels) in train_dataset:
train_acc, train_loss = self.meta_train_loop(train_ds, val_ds, train_labels, val_labels)
train_accuracy_metric.update_state(train_acc)
train_loss_metric.update_state(train_loss)
iteration_count += 1
if (
self.log_train_images_after_iteration != -1 and
iteration_count % self.log_train_images_after_iteration == 0
):
self.log_images(
self.train_summary_writer,
combine_first_two_axes(train_ds[0, ...]),
combine_first_two_axes(val_ds[0, ...]),
step=iteration_count
)
self.log_histograms(step=iteration_count)
if iteration_count != 0 and iteration_count % self.save_after_iterations == 0:
self.save_model(iteration_count)
if iteration_count % self.report_validation_frequency == 0:
self.report_validation_loss_and_accuracy(iteration_count)
if iteration_count != 0:
print('Train Loss: {}'.format(train_loss_metric.result().numpy()))
print('Train Accuracy: {}'.format(train_accuracy_metric.result().numpy()))
with self.train_summary_writer.as_default():
tf.summary.scalar('Loss', train_loss_metric.result(), step=iteration_count)
tf.summary.scalar('Accuracy', train_accuracy_metric.result(), step=iteration_count)
train_accuracy_metric.reset_states()
train_loss_metric.reset_states()
pbar.set_description_str('Epoch{}, Iteration{}: Train Loss: {}, Train Accuracy: {}'.format(
epoch_count,
iteration_count,
train_loss_metric.result().numpy(),
train_accuracy_metric.result().numpy()
))
pbar.update(1)
if iteration_count >= iterations:
should_continue = False
break
epoch_count += 1
def log_metric(self, summary_writer, name, metric, step):
with summary_writer.as_default():
tf.summary.scalar(name, metric.result(), step=step)
@tf.function
def meta_train_loop(self, train_ds, val_ds, train_labels, val_labels):
with tf.GradientTape(persistent=False) as outer_tape:
tasks_final_losses = list()
tasks_final_accs = list()
for i in range(self.meta_batch_size):
task_final_acc, task_final_loss = self.get_losses_of_tasks_batch(method='train')(
(train_ds[i, ...], val_ds[i, ...], train_labels[i, ...], val_labels[i, ...])
)
tasks_final_losses.append(task_final_loss)
tasks_final_accs.append(task_final_acc)
final_acc = tf.reduce_mean(tasks_final_accs)
# self.train_accuracy_metric.update_state(final_acc)
final_loss = tf.reduce_mean(tasks_final_losses)
# self.train_loss_metric.update_state(final_loss)
outer_gradients = outer_tape.gradient(final_loss, self.model.trainable_variables)
self.post_process_outer_gradients(outer_gradients)
self.optimizer.apply_gradients(zip(outer_gradients, self.model.trainable_variables))
return final_acc, final_loss
def evaluate(self, iterations, num_tasks, iterations_to_load_from=None, seed=-1, use_val_batch_statistics=True):
"""If you set use val batch statistics to true, then the batch information from all the test samples will be
used for batch normalization layers (like MAML experiments), otherwise batch normalization layers use the
average and variance which they learned during the updates."""
# TODO add ability to set batch norm momentum if use_val_batch_statistics=False
self.test_dataset = self.get_test_dataset(num_tasks=num_tasks, seed=seed)
self.load_model(iterations=iterations_to_load_from)
accs = list()
losses = list()
losses_func = self.get_losses_of_tasks_batch(
method='test',
iterations=iterations,
use_val_batch_statistics=use_val_batch_statistics
)
counter = 0
for (train_ds, val_ds), (train_labels, val_labels) in self.test_dataset:
remainder_num = num_tasks // 20
if remainder_num == 0:
remainder_num = 1
if counter % remainder_num == 0:
print(f'{counter} / {num_tasks} are evaluated.')
counter += 1
tasks_final_accuracy, tasks_final_losses = tf.map_fn(
losses_func,
elems=(
train_ds,
val_ds,
train_labels,
val_labels,
),
dtype=(tf.float32, tf.float32),
parallel_iterations=1
)
final_loss = tf.reduce_mean(tasks_final_losses)
final_acc = tf.reduce_mean(tasks_final_accuracy)
losses.append(final_loss)
accs.append(final_acc)
final_acc_mean = np.mean(accs)
final_acc_std = np.std(accs)
print(f'loss mean: {np.mean(losses)}')
print(f'loss std: {np.std(losses)}')
print(f'accuracy mean: {final_acc_mean}')
print(f'accuracy std: {final_acc_std}')
# Free the seed :D
if seed != -1:
np.random.seed(None)
confidence_interval = 1.96 * final_acc_std / np.sqrt(num_tasks)
print(
f'final acc: {final_acc_mean} +- {confidence_interval}'
)
print(
f'final acc: {final_acc_mean * 100:0.2f} +- {confidence_interval * 100:0.2f}'
)
return np.mean(accs)
def report_validation_loss_and_accuracy(self, epoch_count):
self.val_loss_metric.reset_states()
self.val_accuracy_metric.reset_states()
val_counter = 0
loss_func = self.get_losses_of_tasks_batch(method='val')
val_dataset = self.get_val_dataset()
for (train_ds, val_ds), (train_labels, val_labels) in val_dataset:
val_counter += 1
# TODO fix validation logging
if settings.DEBUG:
if val_counter % 5 == 0:
step = epoch_count * val_dataset.steps_per_epoch + val_counter
# pick the first task in meta batch
log_train_ds = combine_first_two_axes(train_ds[0, ...])
log_val_ds = combine_first_two_axes(val_ds[0, ...])
self.log_images(self.val_summary_writer, log_train_ds, log_val_ds, step)
tasks_final_accuracy, tasks_final_losses = tf.map_fn(
loss_func,
elems=(
train_ds,
val_ds,
train_labels,
val_labels,
),
dtype=(tf.float32, tf.float32),
parallel_iterations=1
)
final_loss = tf.reduce_mean(tasks_final_losses)
final_acc = tf.reduce_mean(tasks_final_accuracy)
self.val_loss_metric.update_state(final_loss)
self.val_accuracy_metric.update_state(final_acc)
self.log_metric(self.val_summary_writer, 'Loss', self.val_loss_metric, step=epoch_count)
self.log_metric(self.val_summary_writer, 'Accuracy', self.val_accuracy_metric, step=epoch_count)
print('Validation Loss: {}'.format(self.val_loss_metric.result().numpy()))
print('Validation Accuracy: {}'.format(self.val_accuracy_metric.result().numpy()))
@abstractmethod
def get_losses_of_tasks_batch(self, method='train', **kwargs):
pass
@abstractmethod
def initialize_network(self):
pass
@abstractmethod
def get_config_str(self):
pass
| [
"[email protected]"
] |
Subsets and Splits