blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d9337e2102d09d190b10a4807dffc96e72e6149c | 84249d799ca6a29b564a6c080b9027225a1bc374 | /buzzfeed/serializers.py | 447f288396bb105b558f63c5225a80b6b755d389 | [] | no_license | pruksmhc/buzzfeedAPI | 8f9417fcbe16e66961cb4e21f5ec92b5b31d694d | 228f37d0667771539cf87e023742fb2f5de3d874 | refs/heads/master | 2021-01-10T04:38:01.492874 | 2016-08-26T00:14:08 | 2016-08-26T00:14:08 | 46,350,448 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | from rest_framework import serializers
from buzzfeed.models import BuzzfeedSearch
class BuzzfeedSerializer(serializers.Serializer):
json= serializers.CharField(required=False, allow_blank=True, max_length=100),
date= serializers.CharField(required=False, allow_blank=True, max_length=100)
user = serializers.CharField(required=False, allow_blank=True, max_length=100)
#this defines the fields that get serialized/deserialized.
def create(self, validated_data):
#define how instance sar ecreated.
return BuzzfeedSearch.objects.create(**validated_data)
| [
"[email protected]"
] | |
3d8d585292a746614efaa27e4e3e5a4191b41c1e | 6730df75ff003ad2415331698650f69e65275b84 | /publicacion/urls.py | 49a2ff4c5ee016ce59dcbef60387d4fd596aaf0a | [] | no_license | jcomorera/AppDjango | e5df4598e0f4a3124c8902bd609e715fc2174efd | 871b317702974476107e15b603165df903237271 | refs/heads/master | 2020-04-21T04:42:34.128916 | 2019-02-05T22:15:10 | 2019-02-05T22:15:10 | 169,320,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | from django.urls import path
from django.conf.urls import include, url
from django.contrib.staticfiles.urls import static
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from . import views
urlpatterns = [
path('', views.base, name='list'),
path('detail/<int:id>/', views.publicacion_detalle, name='detail'),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += staticfiles_urlpatterns() | [
"[email protected]"
] | |
e0d573348dd1198f91672dd4f39b8bb4dfd79c7b | ee648ccfa67ee4ba888a1194fef039242a62a362 | /verification/scripts/misc/rename_method_groups.py | 5d6d64164113231ef13e82d663fa92e065c83e8d | [
"MIT"
] | permissive | fmidev/lagrangian-convolutional-neural-network | dd3c12d995778f293a4caed5a6e90a9e9c65ef9b | 655e7dc62f28f142521d3abb7221e4b6860452a4 | refs/heads/main | 2023-08-22T02:16:39.498491 | 2023-08-16T05:44:15 | 2023-08-16T05:44:15 | 541,975,069 | 9 | 0 | MIT | 2023-08-16T05:44:16 | 2022-09-27T08:14:50 | Python | UTF-8 | Python | false | false | 797 | py | """
Small script to rename the method group in prediction
hdf5 file. Hardcoded configuration.
Bent Harnist, FMI - 23.02.2022
"""
import h5py
from tqdm import tqdm
# HARD CODED CONFIGURATION
fname = "prediction_db_lk_default.hdf5"
path_template = "{time}/{method}"
old_name = "extrapolation"
new_name = "extrapolation_lk_default"
with h5py.File(fname, 'a') as f :
for t in f.keys():
old_path = path_template.format(time = t,
method = old_name)
new_path = path_template.format(time = t,
method = new_name)
f.move(source = old_path, dest = new_path)
print(f"\n Success!\n Method group in {fname} renamed from {old_name} to {new_name}.")
| [
"[email protected]"
] | |
87065e3c01c61fde025ccd0f6c5743fa9fcc8c8d | 588682d3b6ce748edadda15f7febf32ad14f142b | /dp_easy/house_robber_using_dp.py | 96f12eb634863686d43f905fac19e994df395ee8 | [] | no_license | 99rishita/Leetcode-Problems | f3fb0fd06b8e7a2740ff0f4956da6f6051c4b4d6 | 6502197ca1acc8a57a4c22ebf2aadda331fe655c | refs/heads/master | 2022-12-19T00:53:23.578882 | 2020-09-25T18:05:59 | 2020-09-25T18:05:59 | 289,722,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | class Solution:
def rob(self, nums: List[int]) -> int:
n = len(nums)
if n == 0:
return 0
if n == 1:
return nums[0]
if n == 2:
return max(nums[0], nums[1])
dp = [0]*(n)
dp[0] = nums[0]
dp[1] = max(nums[0], nums[1])
for i in range(2, n):
dp[i] = max(dp[i-1], dp[i-2] + nums[i])
return dp[n-1] | [
"[email protected]"
] | |
849dc1807fe16ecac5070e3121b7fd7e68273c1b | a298d0b4a3e9e12170651a6bf728093b4badfac7 | /LeetCode/139-Word Break/wordBreak.py | 9ae2c67ef2277e6e3d899e9566e0351c2fca0871 | [] | no_license | gavinz0228/AlgoPractice | fc8ecd194ea2d26de59df45909838161c802b8cd | 1cb183a326a0612a5cd941778500a8265e1d7255 | refs/heads/master | 2022-07-27T11:42:06.887668 | 2022-07-18T20:38:31 | 2022-07-18T20:38:31 | 172,929,652 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
des = len(s) - 1
visited = set()
nex_start = [0]
while nex_start:
new_nex_start = []
for sp in nex_start:
for w in wordDict:
if s[sp:].startswith(w):
p = sp + len(w)
if p not in visited:
visited.add(p)
new_nex_start.append(p)
nex_start = new_nex_start
if des + 1 in visited:
return True
else:
return False
| [
"[email protected]"
] | |
1d669561c57756fea89e4c6a3c2fe3ed1e522b3d | 35bb363d97e33861c76106251991410311f193ca | /maskrcnn_benchmark/structures/bounding_box.py | 798ae6d1c5dc0ca06d657ca290fd22d24e9858d1 | [
"MIT"
] | permissive | krumo/Domain-Adaptive-Faster-RCNN-PyTorch | 82c17dda3d133d6cbae8b16d5a6653e8d1c38df5 | 0da7af8ae4e62d86fb97239026ef1875470e4ca0 | refs/heads/master | 2022-07-25T02:39:28.775147 | 2022-07-11T15:04:13 | 2022-07-11T15:04:13 | 211,833,935 | 298 | 77 | MIT | 2019-10-27T22:09:33 | 2019-09-30T10:22:00 | Python | UTF-8 | Python | false | false | 9,645 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class BoxList(object):
"""
This class represents a set of bounding boxes.
The bounding boxes are represented as a Nx4 Tensor.
In order to uniquely determine the bounding boxes with respect
to an image, we also store the corresponding image dimensions.
They can contain extra information that is specific to each bounding box, such as
labels.
"""
def __init__(self, bbox, image_size, mode="xyxy"):
device = bbox.device if isinstance(bbox, torch.Tensor) else torch.device("cpu")
bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device)
if bbox.ndimension() != 2:
raise ValueError(
"bbox should have 2 dimensions, got {}".format(bbox.ndimension())
)
if bbox.size(-1) != 4:
raise ValueError(
"last dimenion of bbox should have a "
"size of 4, got {}".format(bbox.size(-1))
)
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
self.bbox = bbox
self.size = image_size # (image_width, image_height)
self.mode = mode
self.extra_fields = {}
def add_field(self, field, field_data):
self.extra_fields[field] = field_data
def get_field(self, field):
return self.extra_fields[field]
def has_field(self, field):
return field in self.extra_fields
def fields(self):
return list(self.extra_fields.keys())
def _copy_extra_fields(self, bbox):
for k, v in bbox.extra_fields.items():
self.extra_fields[k] = v
def convert(self, mode):
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
if mode == self.mode:
return self
# we only have two modes, so don't need to check
# self.mode
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if mode == "xyxy":
bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
else:
TO_REMOVE = 1
bbox = torch.cat(
(xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1
)
bbox = BoxList(bbox, self.size, mode=mode)
bbox._copy_extra_fields(self)
return bbox
def _split_into_xyxy(self):
if self.mode == "xyxy":
xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1)
return xmin, ymin, xmax, ymax
elif self.mode == "xywh":
TO_REMOVE = 1
xmin, ymin, w, h = self.bbox.split(1, dim=-1)
return (
xmin,
ymin,
xmin + (w - TO_REMOVE).clamp(min=0),
ymin + (h - TO_REMOVE).clamp(min=0),
)
else:
raise RuntimeError("Should not be here")
def resize(self, size, *args, **kwargs):
"""
Returns a resized copy of this bounding box
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
"""
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_box = self.bbox * ratio
bbox = BoxList(scaled_box, size, mode=self.mode)
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox
ratio_width, ratio_height = ratios
xmin, ymin, xmax, ymax = self._split_into_xyxy()
scaled_xmin = xmin * ratio_width
scaled_xmax = xmax * ratio_width
scaled_ymin = ymin * ratio_height
scaled_ymax = ymax * ratio_height
scaled_box = torch.cat(
(scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1
)
bbox = BoxList(scaled_box, size, mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def transpose(self, method):
"""
Transpose bounding box (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.
"""
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
image_width, image_height = self.size
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if method == FLIP_LEFT_RIGHT:
TO_REMOVE = 1
transposed_xmin = image_width - xmax - TO_REMOVE
transposed_xmax = image_width - xmin - TO_REMOVE
transposed_ymin = ymin
transposed_ymax = ymax
elif method == FLIP_TOP_BOTTOM:
transposed_xmin = xmin
transposed_xmax = xmax
transposed_ymin = image_height - ymax
transposed_ymax = image_height - ymin
transposed_boxes = torch.cat(
(transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1
)
bbox = BoxList(transposed_boxes, self.size, mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.transpose(method)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def crop(self, box):
"""
Cropss a rectangular region from this bounding box. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
"""
xmin, ymin, xmax, ymax = self._split_into_xyxy()
w, h = box[2] - box[0], box[3] - box[1]
cropped_xmin = (xmin - box[0]).clamp(min=0, max=w)
cropped_ymin = (ymin - box[1]).clamp(min=0, max=h)
cropped_xmax = (xmax - box[0]).clamp(min=0, max=w)
cropped_ymax = (ymax - box[1]).clamp(min=0, max=h)
# TODO should I filter empty boxes here?
if False:
is_empty = (cropped_xmin == cropped_xmax) | (cropped_ymin == cropped_ymax)
cropped_box = torch.cat(
(cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1
)
bbox = BoxList(cropped_box, (w, h), mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.crop(box)
bbox.add_field(k, v)
return bbox.convert(self.mode)
# Tensor-like methods
def to(self, device):
bbox = BoxList(self.bbox.to(device), self.size, self.mode)
for k, v in self.extra_fields.items():
if hasattr(v, "to"):
v = v.to(device)
bbox.add_field(k, v)
return bbox
def __getitem__(self, item):
bbox = BoxList(self.bbox[item], self.size, self.mode)
for k, v in self.extra_fields.items():
bbox.add_field(k, v[item])
return bbox
def __len__(self):
return self.bbox.shape[0]
def clip_to_image(self, remove_empty=True):
TO_REMOVE = 1
self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE)
self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE)
if remove_empty:
box = self.bbox
keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])
return self[keep]
return self
def area(self):
box = self.bbox
if self.mode == "xyxy":
TO_REMOVE = 1
area = (box[:, 2] - box[:, 0] + TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE)
elif self.mode == "xywh":
area = box[:, 2] * box[:, 3]
else:
raise RuntimeError("Should not be here")
return area
def copy_with_fields(self, fields, skip_missing=False):
bbox = BoxList(self.bbox, self.size, self.mode)
if not isinstance(fields, (list, tuple)):
fields = [fields]
for field in fields:
if self.has_field(field):
bbox.add_field(field, self.get_field(field))
elif not skip_missing:
raise KeyError("Field '{}' not found in {}".format(field, self))
return bbox
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_boxes={}, ".format(len(self))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
if __name__ == "__main__":
bbox = BoxList([[0, 0, 10, 10], [0, 0, 5, 5]], (10, 10))
s_bbox = bbox.resize((5, 5))
print(s_bbox)
print(s_bbox.bbox)
t_bbox = bbox.transpose(0)
print(t_bbox)
print(t_bbox.bbox)
| [
"[email protected]"
] | |
d3d43b27d7acb7f4ee49fbfa29facced3a76ae40 | 06e39442f10e2b03bc09dbdd78382bb2446c37ec | /muscle/transcriptID.py | d6a9fee4dcd4cccfbc10e5fb58d4487bf393a14d | [] | no_license | ShinHyeok/Transcriptome | 23c656c3af141330931008f2aede84a09e467b5a | 793f1b88f5b7a0c8e8229e8009d7e732fe6f18a6 | refs/heads/master | 2021-01-19T04:48:37.793909 | 2017-05-29T08:11:21 | 2017-05-29T08:11:21 | 84,444,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | import os
import sys
import re
import gzip
filename_tbl = sys.argv[1]
filename_tbl2 = sys.argv[2]
def diction(name):
f = open(name,'r')
table = []
for line in f:
table.append(line)
return table
f_tbl = diction(filename_tbl)
f_tbl2 = diction(filename_tbl2)
f_p = open('mart_result.txt','w')
for line in f_tbl:
Pname = line.split()[2]
Tid = line.split()[3]
for line in f_tbl2:
Gname = line.split()[0]
Pname2 = line.split()[1]
Tname = line.split()[2]
Seq = line.split()[3]
if Pname == Pname2:
f_p.write(Tname + '\t' + Pname + '\t' + Tid + '\t' + Gname + '\t' + Seq + '\n')
f_tbl.close()
f_tbl2.close()
f_p.close()
| [
"[email protected]"
] | |
9071b7fc995efcb5ee62e5f8c9bb6686f81995ad | 0dfa97730b9ad9c077868a045d89cc0d4b09f433 | /tests/integration/goldens/logging/samples/generated_samples/logging_generated_logging_v2_metrics_service_v2_delete_log_metric_async.py | d0cc9d1c89b506c8ea260ed70f9f4a35d27fd0db | [
"Apache-2.0"
] | permissive | anukaal/gapic-generator-python | 546c303aaf2e722956133b07abb0fb1fe581962f | e3b06895fa179a2038ee2b28e43054e1df617975 | refs/heads/master | 2023-08-24T23:16:32.305652 | 2021-10-09T15:12:14 | 2021-10-09T15:12:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,525 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteLogMetric
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-logging
# [START logging_generated_logging_v2_MetricsServiceV2_DeleteLogMetric_async]
from google.cloud import logging_v2
async def sample_delete_log_metric():
"""Snippet for delete_log_metric"""
# Create a client
client = logging_v2.MetricsServiceV2AsyncClient()
# Initialize request argument(s)
request = logging_v2.DeleteLogMetricRequest(
metric_name="projects/{project}/metrics/{metric}",
)
# Make the request
response = await client.delete_log_metric(request=request)
# [END logging_generated_logging_v2_MetricsServiceV2_DeleteLogMetric_async]
| [
"[email protected]"
] | |
f8f65e196f11b269fde20070eede28542672537f | c90ddd0930894c565197b739cd76140a7151fffd | /HLTrigger/Configuration/python/HLT_75e33/modules/ticlTrackstersTrk_cfi.py | 637619941b75d5432b2f17d0999e370ad953eef6 | [
"Apache-2.0"
] | permissive | p2l1pfp/cmssw | 9cc6b111ff1935e49f86ec3da9f9b84fb13bbcdf | 9f0a3a22fe451c25114134c30ac1f5c1261f3183 | refs/heads/L1PF_12_5_X | 2023-08-17T00:38:15.374760 | 2023-06-13T12:55:57 | 2023-06-13T12:55:57 | 127,881,751 | 6 | 1 | Apache-2.0 | 2023-09-05T13:54:59 | 2018-04-03T09:10:17 | C++ | UTF-8 | Python | false | false | 3,346 | py | import FWCore.ParameterSet.Config as cms
ticlTrackstersTrk = cms.EDProducer("TrackstersProducer",
detector = cms.string('HGCAL'),
filtered_mask = cms.InputTag("filteredLayerClustersTrk","Trk"),
itername = cms.string('Trk'),
layer_clusters = cms.InputTag("hgcalLayerClusters"),
layer_clusters_hfnose_tiles = cms.InputTag("ticlLayerTileHFNose"),
layer_clusters_tiles = cms.InputTag("ticlLayerTileProducer"),
mightGet = cms.optional.untracked.vstring,
original_mask = cms.InputTag("ticlTrackstersEM"),
patternRecognitionBy = cms.string('CA'),
pluginPatternRecognitionByCA = cms.PSet(
algo_verbosity = cms.int32(2),
eid_input_name = cms.string('input'),
eid_min_cluster_energy = cms.double(1),
eid_n_clusters = cms.int32(10),
eid_n_layers = cms.int32(50),
eid_output_name_energy = cms.string('output/regressed_energy'),
eid_output_name_id = cms.string('output/id_probabilities'),
energy_em_over_total_threshold = cms.double(-1),
etaLimitIncreaseWindow = cms.double(2.1),
filter_on_categories = cms.vint32(2, 4),
max_delta_time = cms.double(-1.0),
max_longitudinal_sigmaPCA = cms.double(9999),
max_missing_layers_in_trackster = cms.int32(9999),
max_out_in_hops = cms.int32(10),
min_cos_pointing = cms.double(0.798),
min_cos_theta = cms.double(0.866),
min_layers_per_trackster = cms.int32(10),
oneTracksterPerTrackSeed = cms.bool(True),
out_in_dfs = cms.bool(True),
pid_threshold = cms.double(0),
promoteEmptyRegionToTrackster = cms.bool(True),
root_doublet_max_distance_from_seed_squared = cms.double(9999),
shower_start_max_layer = cms.int32(9999),
siblings_maxRSquared = cms.vdouble(0.0006, 0.0006, 0.0006),
skip_layers = cms.int32(3),
type = cms.string('CA')
),
pluginPatternRecognitionByCLUE3D = cms.PSet(
algo_verbosity = cms.int32(0),
criticalDensity = cms.double(4),
criticalEtaPhiDistance = cms.double(0.035),
densityEtaPhiDistanceSqr = cms.double(0.0008),
densityOnSameLayer = cms.bool(False),
densitySiblingLayers = cms.int32(3),
eid_input_name = cms.string('input'),
eid_min_cluster_energy = cms.double(1),
eid_n_clusters = cms.int32(10),
eid_n_layers = cms.int32(50),
eid_output_name_energy = cms.string('output/regressed_energy'),
eid_output_name_id = cms.string('output/id_probabilities'),
minNumLayerCluster = cms.int32(5),
outlierMultiplier = cms.double(2),
type = cms.string('CLUE3D')
),
pluginPatternRecognitionByFastJet = cms.PSet(
algo_verbosity = cms.int32(0),
antikt_radius = cms.double(0.09),
eid_input_name = cms.string('input'),
eid_min_cluster_energy = cms.double(1),
eid_n_clusters = cms.int32(10),
eid_n_layers = cms.int32(50),
eid_output_name_energy = cms.string('output/regressed_energy'),
eid_output_name_id = cms.string('output/id_probabilities'),
minNumLayerCluster = cms.int32(5),
type = cms.string('FastJet')
),
seeding_regions = cms.InputTag("ticlSeedingTrk"),
time_layerclusters = cms.InputTag("hgcalLayerClusters","timeLayerCluster")
)
| [
"[email protected]"
] | |
ba48e1bb69449eddda61785c1c0dff4ee0a4329e | 9a824bfddb08124cb7b16547ca3f8115f1e7e3ea | /pgatour_stats/spiders/stats.py | f2063ddaa659b4bc2b1d9f4c053c49850109d2a9 | [] | no_license | zachgoll/pga-tour-stats | 5962c278f64e5d8a766e9eda9eede2b8f9489845 | d32232bdc551d77f172e580f5a4d85e9f00badba | refs/heads/master | 2021-01-19T15:47:57.729009 | 2017-08-21T16:33:31 | 2017-08-21T16:33:31 | 100,972,189 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,453 | py | # -*- coding: utf-8 -*-
import scrapy
class StatsSpider(scrapy.Spider):
name = 'stats'
allowed_domains = ['pgatour.com']
def start_requests(self):
stat_list = ['102', '103', '02437', '077', '329', '328', '327', '326', '419',
'111', '130', '364', '119', '398', '399', '400', '484', '403']
years = ['02', '03', '04', '05', '06', '07', '08', '09', '10',
'11', '12', '13', '14', '15', '16']
urls = ["http://pgatour.com/stats/stat." + key +
".20" + year + ".html" for year in years for key in stat_list]
for url in urls:
yield scrapy.Request(url = url, callback=self.parse)
def parse(self, response):
current_year = response.url.split('.')[-2]
stat_title = response.xpath('//h3/text()')[-1].extract()
average_stat = response.xpath('//*[contains(concat( " ", @class, " " ), \
concat( " ", "hidden-medium", " " )) \
and (((count(preceding-sibling::*) + 1) = 5) \
and parent::*)]/text()').extract()
yield {
'Year': current_year,
'Stat': stat_title,
'First': average_stat[1],
'Top 10': average_stat[10],
'Top 25': average_stat[25],
'Top 100': average_stat[100],
'Last': average_stat[-1]
}
| [
"[email protected]"
] | |
bc283f771a4640fa7909f233dfab7e97af46bcc3 | 64832dd1e64cfc6de83c5abf099461eda50f648c | /search_info/urls.py | 8587eaf61ed78cf4f3127e201604c8459a092c05 | [] | no_license | showzvan/mysite | d7e79e16eb8c2d3598d00d1a96fa0b1940765913 | 32826c83915a2f95440c04ed20a7800d2c343ac1 | refs/heads/master | 2020-04-27T17:05:19.143296 | 2019-03-08T09:09:45 | 2019-03-08T09:09:45 | 174,504,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | #-*-coding:utf-8-*-
from django.urls import path
from . import views
app_name = 'search_info'
urlpatterns = [
path('search/',views.search,name='search'),
path('search_school/',views.searchSchool,name='search_school'),
path('search_major/',views.searchMajor,name='search_major'),
path('search_center/',views.searchCenter,name='search_center'),
path('search_news/',views.searchNews,name='search_news'),
] | [
"[email protected]"
] | |
bb92bdfa55269584e0c9e90ffa918be6f93794fa | 1366178f97e850622aa11a4ff2482fe6c08b4e42 | /tests/test_Game.py | b5b07b5b7ea69e4c1410a3ed8ef929dbcca468a5 | [] | no_license | eczeno/tetrisish | 31747d876b0cd6df1870dd10cbccb28e6b8aca38 | 672b5f0051b9291706138e4ff8aef87f013345e1 | refs/heads/master | 2022-11-07T05:56:19.254660 | 2020-05-20T21:48:25 | 2020-05-20T21:48:25 | 272,504,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | #!/home/zeno/Desktop/tetris/.tetris/bin/python
import pytest
from tetris.tetris.tetris import Game
@pytest.fixture
def game():
return Game()
def test_init(game):
assert len(game.grid) == 20
for row in game.grid:
assert len(row) == 10
def test_is_valid_space(game):
assert game.is_valid_space() == True
| [
"[email protected]"
] | |
80118693dae894623c5c482a3cc431b9122f0762 | 2e2196761689898433f876b6f331105925111a63 | /test/api_test.py | de3a56a8c2e384ba4098181c277cfcf2ea603933 | [
"MIT"
] | permissive | Cornode/cornode.lib.py | 5b6b494bbd8bb5180a2c9b3f64763ffebc523369 | 866230123a62acc235ca8f46e7b59fe08655049b | refs/heads/master | 2021-01-01T16:03:12.164937 | 2017-07-19T21:55:57 | 2017-07-19T21:55:57 | 97,764,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,727 | py | # coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
from cornode import InvalidCommand, Strictcornode
from cornode.adapter import MockAdapter
from cornode.commands import CustomCommand
from cornode.commands.core.get_node_info import GetNodeInfoCommand
class CustomCommandTestCase(TestCase):
def setUp(self):
super(CustomCommandTestCase, self).setUp()
self.name = 'helloWorld'
self.adapter = MockAdapter()
self.command = CustomCommand(self.adapter, self.name)
def test_call(self):
"""
Sending a custom command.
"""
expected_response = {'message': 'Hello, cornode!'}
self.adapter.seed_response('helloWorld', expected_response)
response = self.command()
self.assertEqual(response, expected_response)
self.assertTrue(self.command.called)
self.assertListEqual(
self.adapter.requests,
[{'command': 'helloWorld'}],
)
def test_call_with_parameters(self):
"""
Sending a custom command with parameters.
"""
expected_response = {'message': 'Hello, cornode!'}
self.adapter.seed_response('helloWorld', expected_response)
response = self.command(foo='bar', baz='luhrmann')
self.assertEqual(response, expected_response)
self.assertTrue(self.command.called)
self.assertListEqual(
self.adapter.requests,
[{'command': 'helloWorld', 'foo': 'bar', 'baz': 'luhrmann'}],
)
def test_call_error_already_called(self):
"""
A command can only be called once.
"""
self.adapter.seed_response('helloWorld', {})
self.command()
with self.assertRaises(RuntimeError):
self.command(extra='params')
self.assertDictEqual(self.command.request, {'command': 'helloWorld'})
def test_call_reset(self):
"""
Resetting a command allows it to be called more than once.
"""
self.adapter.seed_response('helloWorld', {'message': 'Hello, cornode!'})
self.command()
self.command.reset()
self.assertFalse(self.command.called)
self.assertIsNone(self.command.request)
self.assertIsNone(self.command.response)
expected_response = {'message': 'Welcome back!'}
self.adapter.seed_response('helloWorld', expected_response)
response = self.command(foo='bar')
self.assertDictEqual(response, expected_response)
self.assertDictEqual(self.command.response, expected_response)
self.assertDictEqual(
self.command.request,
{
'command': 'helloWorld',
'foo': 'bar',
},
)
class cornodeApiTestCase(TestCase):
def test_init_with_uri(self):
"""
Passing a URI to the initializer instead of an adapter instance.
"""
api = Strictcornode('mock://')
self.assertIsInstance(api.adapter, MockAdapter)
def test_registered_command(self):
"""
Preparing a documented command.
"""
api = Strictcornode(MockAdapter())
# We just need to make sure the correct command type is
# instantiated; individual commands have their own unit tests.
command = api.getNodeInfo
self.assertIsInstance(command, GetNodeInfoCommand)
def test_unregistered_command(self):
"""
Attempting to create an unsupported command.
"""
api = Strictcornode(MockAdapter())
with self.assertRaises(InvalidCommand):
# noinspection PyStatementEffect
api.helloWorld
def test_create_command(self):
"""
Preparing an experimental/undocumented command.
"""
api = Strictcornode(MockAdapter())
custom_command = api.create_command('helloWorld')
self.assertIsInstance(custom_command, CustomCommand)
self.assertEqual(custom_command.command, 'helloWorld')
| [
"[email protected]"
] | |
6067d491a3ef3fd9da12894d2846e8b148516edb | e3a2a99e206a634ae5dac7f25874af0a822df6eb | /app/migrations/0003_auto_20200818_2130.py | e2010821fd9afb88f5dccbd4e19086b649741184 | [] | no_license | rohanaj/serviceapp_in_django | 4a7cd749cc29133d6d2a12b2dda1cd1ee86156a5 | ee4a3dbab33d6fd18b00ff3eba36866defbae5de | refs/heads/master | 2022-12-14T04:54:58.027306 | 2020-09-06T08:44:24 | 2020-09-06T08:44:24 | 293,230,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # Generated by Django 2.1 on 2020-08-18 16:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20200818_2126'),
]
operations = [
migrations.AlterField(
model_name='status',
name='status',
field=models.CharField(max_length=30, null=True),
),
]
| [
"[email protected]"
] | |
181d43e24181783eb5951926b783acdfaaa1d589 | 35ff4e124ea73cd2630ddf25dfe019b4b4e3f5d6 | /1049_LastStonesWeightII/1.py | 94da9316791ede57cc7e265f444bd5df23a4a344 | [] | no_license | H-Cong/LeetCode | 0a2084a4845b5d7fac67c89bd72a2adf49f90c3d | d00993a88c6b34fcd79d0a6580fde5c523a2741d | refs/heads/master | 2023-03-19T15:22:00.971461 | 2021-03-11T00:33:00 | 2021-03-11T00:33:00 | 303,265,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | class Solution:
def lastStoneWeightII(self, stones: List[int]) -> int:
dp, sumA = {0}, sum(stones)
for a in stones:
dp = {a + x for x in dp} | {a - x for x in dp}
return min(abs(x) for x in dp)
# TC: O(ns)
# SC: O(s)
# s is the sum of stones
# ref: https://leetcode.com/problems/last-stone-weight-ii/discuss/296350/
| [
"[email protected]"
] | |
16002a260cceaa3bef438cbf82267cb2d1d93396 | 863af1e966de7852537cc904a08589e83b0f8d63 | /models/post.py | d49ded2c0d5cca916904e0af90521ae4a4253ad0 | [] | no_license | kjannette/python-terminal-blog | 6ce8b11bc14f5dc7868557d9ee406122f7509ff2 | 64ae318a09b4b7c20ca31a6c8959e528147b3b30 | refs/heads/master | 2020-04-09T14:37:45.473824 | 2018-12-04T18:41:20 | 2018-12-04T18:41:20 | 160,402,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | import uuid
from database import Database
import datetime
class Post(object):
def __init__(self, blog_id, title, content, author, date=datetime.datetime.utcnow(), id=None):
self.blog_id = blog_id
self.title = title
self.content = content
self.author = author
self.created_date = date
self.id = uuid.uuid4().hex if id is None else id
def save_to_mongo(self):
Database.insert(collection='posts',
data=self.json())
def json(self):
return {
'id': self.id,
'blog_id': self.blog_id,
'author': self.author,
'content': self.content,
'title': self.title,
'created_date': self.created_date
}
@classmethod
def from_mongo(cls, id):
post_data = Database.find_one(collection='posts', query={'id': id})
return cls(blog_id=post_data['blog_id'],
title=post_data['title'],
content=post_data['content'],
author=post_data['author'],
date=post_data['created_date'],
id=post_data['id'])
@staticmethod
def from_blog(id):
return [post for post in Database.find(collection='posts', query={'blog_id': id})] | [
"[email protected]"
] | |
797ed167a297b3b88f382d4fdaf56f788fa28eb7 | c9c1ac74238bd9ce8598af9ec4a52baae3cd4c26 | /pkg/cm-vnc/DEBIAN/postinst | 5b31b5c6d984e669c1aeb297de47e049b40fabb0 | [
"Apache-2.0"
] | permissive | cloudcache/cc1 | 4dbd9b5931483eed2d62459546c502c2d0ceb513 | 360392995a8aea37573dd772d809a006b78f575b | refs/heads/master | 2021-01-24T04:13:54.670317 | 2014-06-06T08:17:46 | 2014-06-06T08:17:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | #!/usr/bin/python
import sys
import subprocess
if __name__ == '__main__':
if 'configure' in sys.argv:
print "CM-VNC: Removing old .pyc files"
subprocess.call("find /usr/lib/cc1/cm-vnc -name '*.pyc' -exec rm {} \\;", shell=True)
subprocess.call("find /etc/cc1/cm-vnc -name '*.pyc' -exec rm {} \\;", shell=True)
log = open('/var/log/cc1/clm_install.log', 'a')
print "CM-VNC: Creating cc1 user"
r = subprocess.call('cc1_setup_user create', shell=True, stdout=log, stderr=log)
print "CM-VNC: Changing permissions"
subprocess.call("chown -R cc1:cc1 /var/log/cc1/", shell=True, stdout=log, stderr=log)
subprocess.call("chown -R cc1:cc1 /var/lib/cc1/", shell=True, stdout=log, stderr=log)
subprocess.call("chown -R cc1:cc1 /etc/cc1/cm-vnc/", shell=True, stdout=log, stderr=log)
subprocess.call("chmod a+x /usr/lib/cc1/cm-vnc/proxy.py", shell=True, stdout=log, stderr=log)
print "CM-VNC: Creating default config"
r = subprocess.call('cc1_cm_vnc_setup_config configure', shell=True, stdout=log, stderr=log)
log.close()
sys.exit(0)
else:
print "Use cc1 tools (cc1_...) to reconfigure services!"
sys.exit(1)
| [
"[email protected]"
] | ||
c4531a2be80f3ce661a9ae2665f908493de8ced1 | d9c1b6a641a5a788475ea7540a10a6276ace52e3 | /odev.py | 73ba2086c4bedb2e08792e139a87d57e85a7b72a | [] | no_license | metogpc/TopAtisiAlgoritmasi | 301988fba71e75e1012d38ecb41e5fbc2da8cd95 | a192d77286cd3b4a2daebe0e5e1b51a8b424af27 | refs/heads/main | 2023-03-31T17:28:21.396260 | 2021-04-03T19:44:18 | 2021-04-03T19:44:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | def top_tipi(x):
if x=='B' or x=='b':
return 0.7
elif x=='F' or x=='f':
return 0.75
elif x=='V' or x=='v':
return 0.9
else:
print(" HATALI GIRIS YAPTINIZ!!")
return 0
def top_hareketi(yukseklik , ziplama , katsayi):
if katsayi==0:
print("")
else:
ziplama = 0
yukseklik *= 100
toplam_mesafe = yukseklik
x = True
while x:
yukseklik = yukseklik * katsayi
toplam_mesafe += 2 * yukseklik
ziplama += 1
if yukseklik < 10:
x = False
print("---------------------------")
print("Toplam mesafe " , toplam_mesafe/100)
print("ziplama sayisi" , ziplama)
while True:
yukseklik=int(input("Bir yukselik giriniz(m):"))
top=input("top tipini giriniz(B/F/V):")[0]
ziplama=0
top_hareketi(yukseklik , ziplama , top_tipi(top))
| [
"[email protected]"
] | |
607a984076b4324bbad05afbb43b085966e82315 | b3389ac9da5d2d72545c556ebc0c43b5517a06be | /pyMD/builder/velocities.py | b2d8eebc96ce24302644a0fc0c10be28b53c05a5 | [
"MIT"
] | permissive | king-michael/python_MD_engine | cefbcb95cd0b7e73547bc4b456501ea230fe6e84 | 4bbcb86722fc8eb071e7db9061d765f935f8990e | refs/heads/master | 2021-05-25T20:47:32.263264 | 2020-04-22T12:27:36 | 2020-04-22T12:27:36 | 253,913,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,518 | py | import numpy as np
class Velocities:
constants = {
'LJ': {'mvv2e': 1.0, 'boltz' : 1.0},
'real': {'mvv2e': 2390.0573615334906, 'boltz' : 0.0019872067},
'metal': {'mvv2e': 0.00010364269, 'boltz' : 8.617343e-05},
}
def __init__(self, n_atoms, masses, temperature, units='LJ'):
self.n_atoms = n_atoms
self.masses = masses
self.temperature = temperature
self.velocities = self.create_velocities(n_atoms, masses, temperature)
@classmethod
def calculate_temperature(self, velocities, masses, extra_dof=0, units='LJ'):
dof = velocities.size - extra_dof
mvv2e = self.constants[units]['mvv2e']
boltz = self.constants[units]['boltz']
if dof > 0:
tfactor = mvv2e / (dof * boltz)
else:
tfactor = 0.0
temperature = np.sum(np.power(velocities, 2).T * masses)
temperature*= tfactor
return temperature
@classmethod
def create_velocities(self,
n_atoms: int,
masses: np.ndarray,
temperature: float,
mode : str = 'gaussian',
extra_dof=0,
units='LJ'):
"""
Parameters
----------
n_atoms : int
masses : np.ndarray
Mass array of shape (n_atoms)
temperature : float
Temperature
mode : str, optional
mode can be 'normal' or 'gaussian'
Returns
-------
"""
assert len(masses) == n_atoms, "Number of masses does not match the number of atoms"
velocities = np.zeros((3, n_atoms), dtype=np.float64)
if mode == 'gaussian':
velocities[:] = np.random.normal(size=velocities.size).reshape((3, -1, ))
elif mode == 'normal':
velocities[:] = np.random.random(velocities.size).reshape(3, -1) -0.5
else:
raise UserWarning('unknown "mode". Use "normal" or "gaussian"')
velocities *= 1.0 / np.sqrt(masses)
t = self.calculate_temperature(velocities.T, masses, extra_dof=extra_dof, units=units)
Velocities.rescale(velocities.T,t, temperature)
return velocities.T
@staticmethod
def rescale(velocities: np.ndarray, t_old: float, t_new: float):
factor = np.sqrt(t_new / t_old)
velocities *= factor
return velocities
create_velocities = Velocities.create_velocities | [
"[email protected]"
] | |
470f72141f0edfb3d699e50200846aa70985df85 | d02ca3ae92041b6710df6dbcc85b6d658b50a7f8 | /xrpl/core/binarycodec/types/uint16.py | 59d8b669d43676002129de39503aa088ae483c5f | [
"ISC",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | brettmollin/xrpl-py | 70899d76e3fec82265ea4e9f24c842c4374d7bee | 2877b789b1ec507934b3c422a26d859d94925e32 | refs/heads/master | 2023-03-31T15:56:19.625422 | 2021-03-30T22:15:07 | 2021-03-30T22:15:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,063 | py | """Class for serializing and deserializing a 16-bit UInt.
See `UInt Fields <https://xrpl.org/serialization.html#uint-fields>`_
"""
from __future__ import annotations
from typing import Optional, Type
from typing_extensions import Final
from xrpl.core.binarycodec.binary_wrappers.binary_parser import BinaryParser
from xrpl.core.binarycodec.exceptions import XRPLBinaryCodecException
from xrpl.core.binarycodec.types.uint import UInt
_WIDTH: Final[int] = 2 # 16 / 8
class UInt16(UInt):
"""Class for serializing and deserializing a 16-bit UInt.
See `UInt Fields <https://xrpl.org/serialization.html#uint-fields>`_
"""
def __init__(self: UInt16, buffer: bytes = bytes(_WIDTH)) -> None:
"""Construct a new UInt16 type from a ``bytes`` value."""
super().__init__(buffer)
@classmethod
def from_parser(
cls: Type[UInt16], parser: BinaryParser, _length_hint: Optional[int] = None
) -> UInt16:
"""
Construct a new UInt16 type from a BinaryParser.
Args:
parser: The BinaryParser to construct a UInt16 from.
Returns:
The UInt16 constructed from parser.
"""
return cls(parser.read(_WIDTH))
@classmethod
def from_value(cls: Type[UInt16], value: int) -> UInt16:
"""
Construct a new UInt16 type from a number.
Args:
value: The value to consutrct a UInt16 from.
Returns:
The UInt16 constructed from value.
Raises:
XRPLBinaryCodecException: If a UInt16 can't be constructed from value.
"""
if not isinstance(value, int):
raise XRPLBinaryCodecException(
"Invalid type to construct a UInt16: expected int, "
"received {value.__class__.__name__}."
)
if isinstance(value, int):
value_bytes = (value).to_bytes(_WIDTH, byteorder="big", signed=False)
return cls(value_bytes)
raise XRPLBinaryCodecException("Cannot construct UInt16 from given value")
| [
"[email protected]"
] | |
c61a3e871232e755e995d2c2ef6405f009116757 | 7d50b07303df1541ce98205bc08bf48a7e0177da | /Laba1.py | 6b39d9bc6d1d5e3e31a46df3f352cd6a40cd5771 | [] | no_license | sburov/Python_Lab | 46995953235b21f09e78eb8b65ae5c3022e22d32 | 61167e44803fb6177d4b8adbde22a4ac8d30aeec | refs/heads/main | 2023-03-11T05:07:10.267823 | 2021-02-28T20:46:42 | 2021-02-28T20:46:42 | 343,207,888 | 0 | 0 | null | 2021-02-28T21:07:53 | 2021-02-28T20:30:37 | Python | UTF-8 | Python | false | false | 1,101 | py | class Radiator:
radiators_class = 'Heating'
def __init__(self, thremal_power, color, manufacturer, wheelbase):
self.thremal_power = thremal_power
self.color = color
self.manufacturer = manufacturer
self.wheelbase = wheelbase
def __del__(self):
return
def __str__(self):
return "\tthremal_power: {}\n \tcolor: {}\n \tmanufacturer: {}\n \twheelbase: {}".format(self.thremal_power, self.color, self.manufacturer, self.wheelbase)
@staticmethod
def RadiatorsClass():
return Radiator.radiators_class
if __name__ == "__main__":
radiator1 = Radiator("158 Watt", "White", "China", "80 Millimeters")
print("The first radiator:\n", radiator1.__str__())
radiator2 = Radiator("186 Watt", "Brown", "USA", "96 Millimeters")
print("The second radiator:\n", radiator2.__str__())
radiator3 = Radiator("198 Watt", "Silver", "Canada", "80 Millimeters")
print("The third radiator:\n", radiator3.__str__())
print("Class of radiators: ", radiator1.RadiatorsClass())
| [
"[email protected]"
] | |
bf4c1f232d1638b7190899afbb63f2f82ad429d5 | 6fa01095cefd0b19886ad5a14a26b71c892e05ed | /GAN/gan.py | 17c88665c929797bb8c549147678f8ba654bdce1 | [] | no_license | pgilitwala/computer_vision | 9d277bd6d0f8744498910d68c522401aed413fe0 | c466849ce66b9c05e4e1a7237da5d16c32bde3af | refs/heads/master | 2022-09-22T03:29:21.332641 | 2020-05-15T17:03:57 | 2020-05-15T17:03:57 | 262,562,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,664 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
batchSize = 64
imageSize = 64
transform = transforms.Compose([transforms.Scale(imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),]) # We create a list of transformations (scaling, tensor conversion, normalization) to apply to the input images.
dataset = dset.CIFAR10(root="./data", download=True, transform=transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size = batchSize, shuffle = True, num_workers = 2) # We use dataLoader to get the images of the training set batch by batch
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find("BatchNorm") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class G(nn.Module):
def __init__(self):
super(G, self).__init__()
self.main = nn.Sequential(
nn.ConvTranspose2d(100, 512, 4, 1, 0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(True),
nn.ConvTranspose2d(512, 256, 4, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.ConvTranspose2d(64, 3, 4, 2, 1, bias=False),
nn.Tanh()
)
def forward(self, input):
output = self.main(input)
return output
netG = G()
netG.apply(weights_init())
class D(nn.Module):
def __init__(self):
super(D, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(3, 64, 4,2,1,bias=False),
nn.LeakyReLU(0.2, True),
nn.Conv2d(64, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(128, 256, 4, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(256, 512, 4, 2, 1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(512, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
output = self.main(input)
return output.view(-1)
netD = D()
netD.apply(weights_init)
criterion = nn.BCELoss()
optimizerD = optim.Adam(netD.parameters(), lr=0.0002, betas=(0.5, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=0.0002, betas=(0.5, 0.999))
for epoch in range(25):
for i, data in enumerate(dataloader, 0):
netD.zero_grad()
real, _ = data
inputv = Variable(real)
target = Variable(torch.ones(inputv.size()[0]))
output = netD(inputv)
errD_real = criterion(output, target)
noise = Variable(torch.randn(inputv.size()[0], 100, 1, 1))
fake = netG(noise)
target = Variable(torch.zeros(inputv.size()[0]))
output = netD(fake.detach())
errD_fake = criterion(output, target)
errD = errD_fake + errD_real
errD.backward()
optimizerD.step()
netG.zero_grad()
target = Variable(torch.ones(inputv.size()[0]))
output = netD(fake)
errG = criterion(output, target)
errG.backward()
optimizerG.step()
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f' % (epoch, 25, i, len(dataloader), errD.data[0], errG.data[0])) # We print les losses of the discriminator (Loss_D) and the generator (Loss_G).
if i%100 == 0:
vutils.save_image(real, '%s/real_samples.png' % "./results", normalize = True) # We save the real images of the minibatch.
fake = netG(noise) # We get our fake generated images.
vutils.save_image(fake.data, '%s/fake_samples_epoch_%03d.png' % ("./results", epoch), normalize = True) # We also save the fake generated images of the minibatch.
| [
"[email protected]"
] | |
e52f3d87bb4a8c0bb6b141804dc039c4f7dfb913 | 54a0e9b2388671c6bb2f41fdc3fccf20eefed0a7 | /workspace/migrations/0018_auto_20200425_2100.py | d54a42f48bba5aae34d04956dfb4a2f1b58099da | [] | no_license | shumnyj/TeacherWorkSpace | 3a570d18e2abbb370303a82c5e5d14304004199b | 1dd7e760c4e09fc7305313a21f3a3df18ba6729c | refs/heads/master | 2021-05-23T12:51:14.926420 | 2020-05-27T17:09:24 | 2020-05-27T17:09:24 | 253,289,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | # Generated by Django 2.2.5 on 2020-04-25 18:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workspace', '0017_auto_20200424_2042'),
]
operations = [
migrations.AlterField(
model_name='controlentity',
name='date_created',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='notification',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"[email protected]"
] | |
3309d54321ed3a037a8ea55aa4d15f22c82f19e7 | e2a2c12cde1cc8afbb0a1687609e65b4029bc146 | /deep/web/creds.py | 84c69ade9bba7f47efb37ffb0c978b569ac6d26c | [] | no_license | MoysheBenRabi/lod_2020 | 3426cd25eeedafcd9857c152a05fbb93aba60b14 | 0bf38c7980717b52f3bf5791f8d40a7b8cdf08b0 | refs/heads/master | 2022-12-10T17:55:35.718300 | 2020-08-23T05:23:40 | 2020-08-23T05:23:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | """
@file config.py
@brief read config in single place
@details Copyright (c)
@author Nikolay Vovk
@since $Id: $
"""
import requests
import json
from urllib.parse import quote_plus
from tornado.web import RequestHandler, Application, StaticFileHandler
from tornado_http_auth import DigestAuthMixin, BasicAuthMixin, auth_required
import web.logs as logs
DBG = logs.DBG
ERR = logs.ERR
import web.db as db
import web.config as config
cfg = config.read()
credentials = cfg.get('credentials', {
'user1': 'pass1',
'user2': 'pass2',
})
| [
"[email protected]"
] | |
abc0011dbfb4ab488f7e3be8d5c79270e819b9c9 | 9cdab664a653b70d61d4d453d5fd3e1e5f37af62 | /88.py | 995361b2221cf9dbebbdbb54e6a870be1921fe18 | [] | no_license | Devikd/devi | 5ca77ad2a60470b470434cd13c1cbf87757ec0f1 | 60f0f7cdb09c43456003f47c837a494545c19e7f | refs/heads/master | 2020-12-25T15:17:40.053844 | 2019-01-23T06:32:24 | 2019-01-23T06:32:24 | 66,335,503 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | xx1,yy1=map(int,input().split(' '))
if xx1 > yy1:
great = xx1
else:
great = yy1
while(True):
if((great % xx1 == 0) and (great % yy1 == 0)):
lcm = great
break
great += 1
print(lcm)
| [
"[email protected]"
] | |
a9aef3c9172ccf1a9be03463c66bcb1e40763907 | f9c4045579889334fbb2368811da425a9e25c997 | /source/drivers/power_method.py | e2dcc01c83a4ba57f6926d5c264a5469de6c6be0 | [] | no_license | adityaramesh/svhn_experiments | a8ab6e67501d45b0fb532a2d2de23ada2107c63d | 2560ed75f7a04fc7b4347e53d7a638100030d4ab | refs/heads/master | 2016-09-05T11:28:13.246696 | 2015-07-18T05:00:26 | 2015-07-18T05:00:26 | 35,318,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | import math
import numpy as np
from numpy.linalg import norm
def max_eig(A):
assert(len(A.shape) == 2)
v = np.random.rand(A.shape[1])
for _ in range(1, 20):
u = np.copy(v)
v = A.dot(v)
print(math.acos(u.dot(v) / (norm(u) * norm(v))))
v = v / norm(v)
print(v)
return v
def main():
A = np.array([[2, -12], [1, -5]])
max_eig(A)
print(np.linalg.eig(A))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
067964cf776ed35f58639284268c0bef6e90517b | b3b443f0bc49bbb10c26b51fe89e6860d4ca3d3a | /venv/Lib/site-packages/tqdm/std.py | 14ab11a0a8925fb0c62d034778a7727f705ee902 | [
"MIT"
] | permissive | naveens33/ctreport-selenium | 6b3a1cc93a6741a1d493c2452c1cf56c6d85c052 | 9553b5c4b8deb52e46cf0fb3e1ea7092028cf090 | refs/heads/master | 2022-12-23T04:55:12.226339 | 2020-08-29T19:22:00 | 2020-08-29T19:22:00 | 228,779,087 | 2 | 2 | MIT | 2022-12-18T22:53:51 | 2019-12-18T07:03:39 | Python | UTF-8 | Python | false | false | 55,546 | py | """
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import, division
# compatibility functions and utilities
from .utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict, \
Comparable, _is_ascii, FormatReplace, disp_len, disp_trim, \
SimpleTextIOWrapper, CallbackIOWrapper
from ._monitor import TMonitor
# native libraries
from contextlib import contextmanager
import sys
from numbers import Number
from time import time
# For parallelism safety
import threading as th
from warnings import warn
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " +
str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
You must initialise a `tqdm` or `TqdmDefaultWriteLock` instance
before forking in order for the write lock to work.
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
def __init__(self):
# Create global parallelism locks to avoid racing issues with parallel
# bars works only if fork available (Linux/MacOSX, but not Windows)
self.create_mp_lock()
self.create_th_lock()
cls = type(self)
self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None]
def acquire(self, *a, **k):
for lock in self.locks:
lock.acquire(*a, **k)
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
@classmethod
def create_mp_lock(cls):
if not hasattr(cls, 'mp_lock'):
try:
from multiprocessing import RLock
cls.mp_lock = RLock() # multiprocessing lock
except ImportError: # pragma: no cover
cls.mp_lock = None
except OSError: # pragma: no cover
cls.mp_lock = None
@classmethod
def create_th_lock(cls):
if not hasattr(cls, 'th_lock'):
try:
cls.th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
cls.th_lock = None
# Create a thread lock before instantiation so that no setup needs to be done
# before running in a multithreaded environment.
# Do not create the multiprocessing lock because it sets the multiprocessing
# context and does not allow the user to use 'spawn' or 'forkserver' methods.
TqdmDefaultWriteLock.create_th_lock()
class Bar(object):
"""
`str.format`-able bar with format specifiers: `[width][type]`
- `width`
+ unspecified (default): use `self.default_len`
+ `int >= 0`: overrides `self.default_len`
+ `int < 0`: subtract from `self.default_len`
- `type`
+ `a`: ascii (`charset=self.ASCII` override)
+ `u`: unicode (`charset=self.UTF` override)
+ `b`: blank (`charset=" "` override)
"""
ASCII = " 123456789#"
UTF = u" " + u''.join(map(_unich, range(0x258F, 0x2587, -1)))
BLANK = " "
def __init__(self, frac, default_len=10, charset=UTF):
if not (0 <= frac <= 1):
warn("clamping frac to range [0, 1]", TqdmWarning, stacklevel=2)
frac = max(0, min(1, frac))
assert default_len > 0
self.frac = frac
self.default_len = default_len
self.charset = charset
def __format__(self, format_spec):
if format_spec:
_type = format_spec[-1].lower()
try:
charset = dict(a=self.ASCII, u=self.UTF, b=self.BLANK)[_type]
except KeyError:
charset = self.charset
else:
format_spec = format_spec[:-1]
if format_spec:
N_BARS = int(format_spec)
if N_BARS < 0:
N_BARS += self.default_len
else:
N_BARS = self.default_len
else:
charset = self.charset
N_BARS = self.default_len
nsyms = len(charset) - 1
bar_length, frac_bar_length = divmod(
int(self.frac * N_BARS * nsyms), nsyms)
bar = charset[-1] * bar_length
frac_bar = charset[frac_bar_length]
# whitespace padding
if bar_length < N_BARS:
return bar + frac_bar + \
charset[0] * (N_BARS - bar_length - 1)
return bar
class tqdm(Comparable):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optional
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.5:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def format_num(n):
"""
Intelligent scientific notation (.3g).
Parameters
----------
n : int or float or Numeric
A Number.
Returns
-------
out : str
Formatted number.
"""
f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')
n = str(n)
return f if len(f) < len(n) else n
@staticmethod
def ema(x, mu=None, alpha=0.3):
"""
Exponential moving average: smoothing to give progressively lower
weights to older values.
Parameters
----------
x : float
New value to include in EMA.
mu : float, optional
Previous EMA value.
alpha : float, optional
Smoothing factor in range [0, 1], [default: 0.3].
Increase to give more weight to recent values.
Ranges from 0 (yields mu) to 1 (yields x).
"""
return x if mu is None else (alpha * x) + (1 - alpha) * mu
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000, **extra_kwargs):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int or float
Number of finished iterations.
total : int or float
The expected total number of iterations. If meaningless (None),
only basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes `{bar}` to stay within this bound
[default: None]. If `0`, will not print any bar (only stats).
The fallback is `{bar:10}`.
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional or str, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
" 123456789#".
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n >= (total + 0.5): # allow float imprecision (#849)
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = 1 / self.avg_time
unit_scale = False
elapsed_str = tqdm.format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total is not None else '?'
else:
n_fmt = str(n)
total_fmt = str(total) if total is not None else '?'
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
remaining = (total - n) / rate if rate and total else 0
remaining_str = tqdm.format_interval(remaining) if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = dict(
# slight extension of self.format_dict
n=n, n_fmt=n_fmt, total=total, total_fmt=total_fmt,
elapsed=elapsed_str, elapsed_s=elapsed,
ncols=ncols, desc=prefix or '', unit=unit,
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
rate_fmt=rate_fmt, rate_noinv=rate,
rate_noinv_fmt=rate_noinv_fmt, rate_inv=inv_rate,
rate_inv_fmt=rate_inv_fmt,
postfix=postfix, unit_divisor=unit_divisor,
# plus more useful definitions
remaining=remaining_str, remaining_s=remaining,
l_bar=l_bar, r_bar=r_bar,
**extra_kwargs)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
l_bar += '{0:3.0f}%|'.format(percentage)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
format_dict.update(l_bar=l_bar)
if bar_format:
format_dict.update(percentage=percentage)
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
else:
bar_format = "{l_bar}{bar}{r_bar}"
full_bar = FormatReplace()
try:
nobar = bar_format.format(bar=full_bar, **format_dict)
except UnicodeEncodeError:
bar_format = _unicode(bar_format)
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
# no {bar}, we can just format and return
return nobar
# Formatting progress bar space available for bar's display
full_bar = Bar(
frac,
max(1, ncols - disp_len(nobar))
if ncols else 10,
charset=Bar.ASCII if ascii is True else ascii or Bar.UTF)
if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
bar_format = _unicode(bar_format)
res = bar_format.format(bar=full_bar, **format_dict)
return disp_trim(res, ncols) if ncols else res
elif bar_format:
# user-specified bar_format but no total
l_bar += '|'
format_dict.update(l_bar=l_bar, percentage=0)
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
return nobar
full_bar = Bar(
0,
max(1, ncols - disp_len(nobar))
if ncols else 10,
charset=Bar.BLANK)
res = bar_format.format(bar=full_bar, **format_dict)
return disp_trim(res, ncols) if ncols else res
else:
# no total: no progressbar, ETA, just progress stats
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Construct the lock if it does not exist
with cls.get_lock():
# Add to the list of instances
if not hasattr(cls, '_instances'):
cls._instances = WeakSet()
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not
cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning, stacklevel=2)
cls.monitor_interval = 0
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance."""
positions = set(abs(inst.pos) for inst in cls._instances
if inst is not instance and hasattr(inst, "pos"))
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
# if not instance.gui: # pragma: no cover
# raise
pass # py2: maybe magically removed already
# else:
if not instance.gui:
for inst in cls._instances:
# negative `pos` means fixed
if hasattr(inst, "pos") and inst.pos > abs(instance.pos):
inst.clear(nolock=True)
inst.pos -= 1
# TODO: check this doesn't overwrite another fixed bar
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
try:
cls.monitor.exit()
del cls.monitor
except AttributeError: # pragma: nocover
pass
else:
cls.monitor = None
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""Print a message via tqdm (without overlap with bars)."""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
if not nolock:
cls.get_lock().acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if hasattr(inst, "start_t") and (inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh(nolock=True)
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
"""Set the global lock."""
cls._lock = lock
@classmethod
def get_lock(cls):
"""Get the global lock. Construct it if it does not exist."""
if not hasattr(cls, '_lock'):
cls._lock = TqdmDefaultWriteLock()
return cls._lock
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.(generic.)DataFrameGroupBy
| groupby.(generic.)SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm
>>> from tqdm.gui import tqdm as tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
try:
from pandas import Panel
except ImportError: # TODO: pandas>0.25.2
Panel = None
try: # pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try: # pandas>=0.25.0
from pandas.core.groupby.generic import DataFrameGroupBy, \
SeriesGroupBy # , NDFrameGroupBy
except ImportError:
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, \
SeriesGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, \
SeriesGroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import GroupBy
except ImportError:
from pandas.core.groupby import GroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import PanelGroupBy
except ImportError:
try:
from pandas.core.groupby import PanelGroupBy
except ImportError: # pandas>=0.25.0
PanelGroupBy = None
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, 'ngroups', None))
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or \
not isinstance(df, _Rolling_and_Expanding):
# DataFrame or Panel
axis = kwargs.get('axis', 0)
if axis == 'index':
axis = 0
elif axis == 'columns':
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
try:
func = df._is_builtin_func(func)
except TypeError:
pass
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
if Panel is not None:
Panel.progress_apply = inner_generator()
if PanelGroupBy is not None:
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1, maxinterval=10.0,
miniters=None, ascii=None, disable=False, unit='it',
unit_scale=False, dynamic_ncols=False, smoothing=0.3,
bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, write_bytes=None, lock_args=None,
gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int or float, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. If float("inf") or as a last
resort, only basic progress statistics are displayed
(no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive number,
e.g. 9e9.
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
If `None`, will leave only if `position` is `0`.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods. For encoding, see `write_bytes`.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval [default: 0.1] seconds.
maxinterval : float, optional
Maximum progress display update interval [default: 10] seconds.
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int or float, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool or str, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters " 123456789#".
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int or float, optional
The initial counter value. Useful when restarting a progress
bar [default: 0]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
write_bytes : bool, optional
If (default: None) and `file` is unspecified,
bytes will be written in Python 2. If `True` will also write
bytes. In all other cases will default to unicode.
lock_args : tuple, optional
Passed to `refresh` for intermediate output
(initialisation, iterating, and updating).
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm.gui.tqdm(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if write_bytes is None:
write_bytes = file is None and sys.version_info < (3,)
if file is None:
file = sys.stderr
if write_bytes:
# Despite coercing unicode into bytes, py2 sys.std* streams
# should have bytes written to them.
file = SimpleTextIOWrapper(
file, encoding=getattr(file, 'encoding', None) or 'utf-8')
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if total == float("inf"):
# Infinite iterations, behave same as unknown
total = None
if disable:
self.iterable = iterable
self.disable = disable
with self._lock:
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
self.total = total
return
if kwargs:
self.disable = True
with self._lock:
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (
TqdmDeprecationWarning(
"`nested` is deprecated and automated.\n"
"Use `position` instead for manual control.\n",
fp_write=getattr(file, 'write', sys.stderr.write))
if "nested" in kwargs else
TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ((ascii is True) or _is_ascii(ascii)):
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.lock_args = lock_args
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
with self._lock:
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
self.refresh(lock_args=self.lock_args)
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __bool__(self):
if self.total is not None:
return self.total > 0
if self.iterable is None:
raise TypeError('bool() undefined when iterable == total == None')
return bool(self.iterable)
def __nonzero__(self):
return self.__bool__()
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else getattr(self, "total", None))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.close()
except AttributeError:
# maybe eager thread cleanup upon external error
if (exc_type, exc_value, traceback) == (None, None, None):
raise
warn("AttributeError ignored", TqdmWarning, stacklevel=2)
def __del__(self):
self.close()
def __repr__(self):
return self.format_meter(**self.format_dict)
@property
def _comparable(self):
return abs(getattr(self, "pos", 1 << 31))
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
return
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
time = self._time
if not hasattr(self, 'sp'):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)` instead of"
" `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = time() - last_print_t
if delta_t >= mininterval:
cur_t = time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
rate = delta_t / delta_it
avg_time = self.ema(rate, avg_time, smoothing)
self.avg_time = avg_time
self.n = n
self.refresh(lock_args=self.lock_args)
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
rate = delta_it
if mininterval and delta_t:
rate *= mininterval / delta_t
miniters = self.ema(rate, miniters, smoothing)
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int or float, optional
Increment to add to the internal counter of iterations
[default: 1]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
"""
# N.B.: see __iter__() for more comments.
if self.disable:
return
if n < 0:
self.last_print_n += n # for auto-refresh logic to work
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
rate = delta_t / delta_it
self.avg_time = self.ema(
rate, self.avg_time, self.smoothing)
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)`"
" instead of `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, 'write', sys.stderr.write))
self.refresh(lock_args=self.lock_args)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""Cleanup and (if leave=False) close the progressbar."""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
leave = pos == 0 if self.leave is None else self.leave
with self._lock:
if leave:
# stats for overall rate (no weighted average)
self.avg_time = None
self.display(pos=0)
fp_write('\n')
else:
self.display(msg='', pos=pos)
if not pos:
fp_write('\r')
def clear(self, nolock=False):
"""Clear current bar display."""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def refresh(self, nolock=False, lock_args=None):
"""
Force refresh the display of this bar.
Parameters
----------
nolock : bool, optional
If `True`, does not lock.
If [default: `False`]: calls `acquire()` on internal lock.
lock_args : tuple, optional
Passed to internal lock's `acquire()`.
If specified, will only `display()` if `acquire()` returns `True`.
"""
if self.disable:
return
if not nolock:
if lock_args:
if not self._lock.acquire(*lock_args):
return False
else:
self._lock.acquire()
self.display()
if not nolock:
self._lock.release()
return True
def unpause(self):
"""Restart tqdm timer from last print time."""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Consider combining with `leave=True`.
Parameters
----------
total : int or float, optional. Total to use for the new bar.
"""
self.last_print_n = self.n = 0
self.last_print_t = self.start_t = self._time()
if total is not None:
self.total = total
self.refresh()
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""Set/modify description without ': ' appended."""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = self.format_num(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
# TODO: private method
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
self.fp.flush()
@property
def format_dict(self):
"""Public API for read-only member access."""
return dict(
n=self.n, total=self.total,
elapsed=self._time() - self.start_t
if hasattr(self, 'start_t') else 0,
ncols=self.dynamic_ncols(self.fp)
if self.dynamic_ncols else self.ncols,
prefix=self.desc, ascii=self.ascii, unit=self.unit,
unit_scale=self.unit_scale,
rate=1 / self.avg_time if self.avg_time else None,
bar_format=self.bar_format, postfix=self.postfix,
unit_divisor=self.unit_divisor)
def display(self, msg=None, pos=None):
"""
Use `self.sp` to display `msg` in the specified `pos`.
Consider overloading this function when inheriting to use e.g.:
`self.some_frontend(**self.format_dict)` instead of `self.sp`.
Parameters
----------
msg : str, optional. What to display (default: `repr(self)`).
pos : int, optional. Position to `moveto`
(default: `abs(self.pos)`).
"""
if pos is None:
pos = abs(self.pos)
if pos:
self.moveto(pos)
self.sp(self.__repr__() if msg is None else msg)
if pos:
self.moveto(-pos)
@classmethod
@contextmanager
def wrapattr(tclass, stream, method, total=None, bytes=True, **tkwargs):
"""
stream : file-like object.
method : str, "read" or "write". The result of `read()` and
the first argument of `write()` should have a `len()`.
>>> with tqdm.wrapattr(file_obj, "read", total=file_obj.size) as fobj:
... while True:
... chunk = fobj.read(chunk_size)
... if not chunk:
... break
"""
with tclass(total=total, **tkwargs) as t:
if bytes:
t.unit = "B"
t.unit_scale = True
t.unit_divisor = 1024
yield CallbackIOWrapper(t.update, stream, method)
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
| [
"[email protected]"
] | |
871063080f473020dc6912e524d1273d901ce35c | bb7d547f547d5f903a0ce574841be5a45c0e1352 | /HW08/tasks.py | 0d995d547c7ab347c7672695dc281cef5502eca3 | [] | no_license | itsanti/uii_py_dev | bfedf6a52739dd00af86612f896826e840c3935b | 74f16c8ced10bdd4572fa212bae5ad38743bde67 | refs/heads/master | 2020-11-25T14:33:05.095744 | 2019-12-30T15:48:59 | 2019-12-30T15:48:59 | 228,716,679 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,406 | py | from time import time
from functools import wraps
from utils.tools import bytes2human
import os
import sys
import psutil
# 1. Написать декоратор, замеряющий время выполнение декорируемой функции.
# используем @wraps, чтобы задать __name__ для декорируемой функции
def timeit(f):
@wraps(f)
def wrapper(*args, **kwargs):
start = time()
return f(*args, **kwargs), time() - start
return wrapper
# 2. Сравнить время создания генератора и списка с элементами:
# натуральные числа от 1 до 1000000 (создание объектов оформить в виде функций).
@timeit
def create_from_generator(a, b):
return [x for x in range(a, b + 1)]
@timeit
def create_from_list(a, b):
result = []
for i in range(a, b + 1):
result.append(i)
return result
N = 1000000
print(f'N = {N}')
result, sec = create_from_generator(1, N)
print(f'{create_from_generator.__name__}: list_len={len(result)}, gen_time={sec:.4f} sec.')
result, sec = create_from_list(1, N)
print(f'{create_from_list.__name__:>21}: list_len={len(result)}, gen_time={sec:.4f} sec.')
# 3. Написать декоратор, замеряющий объем оперативной памяти, потребляемый декорируемой функцией.
def get_memory(f):
@wraps(f)
def wrapper(*args, **kwargs):
proc = psutil.Process(os.getpid())
start = proc.memory_info().rss
return f(*args, **kwargs), bytes2human(proc.memory_info().rss - start)
return wrapper
create_from_generator = get_memory(create_from_generator)
result, memory = create_from_generator(1, N)
print(f'{create_from_generator.__name__}: list_len={len(result[0])}, memory={memory}')
create_from_list = get_memory(create_from_list)
result, memory = create_from_list(1, N)
print(f'{create_from_list.__name__:>21}: list_len={len(result[0])}, memory={memory}')
@get_memory
def create_from_func_generator(a, b):
for x in range(a, b + 1):
yield x
result, memory = create_from_func_generator(1, N)
print(f'{create_from_func_generator.__name__}: memory={memory}, result_memory={bytes2human(sys.getsizeof(result))}')
'''OUT
N = 1000000
create_from_generator: list_len=1000000, gen_time=0.0550 sec.
create_from_list: list_len=1000000, gen_time=0.0910 sec.
create_from_generator: list_len=1000000, memory=38.5M
create_from_list: list_len=1000000, memory=38.4M
N = 100000000
create_from_generator: list_len=100000000, gen_time=6.3599 sec.
create_from_list: list_len=100000000, gen_time=10.3696 sec.
create_from_generator: list_len=100000000, memory=3.8G
create_from_list: list_len=100000000, memory=3.8G
create_from_func_generator: memory=0B, result_memory=120B
Вывод:
1. создание списков с помощью генератора (list comprehension) работает быстрее.
2. создание списка в памяти на 1000000 элементов потребляет 38 мегабайт памяти,
а использование объекта-генератора позволяет сократить количество необходимой памяти до 120 байт (объект генератора)
'''
| [
"[email protected]"
] | |
61c4e91d406b03f4ffd25ceca3c172ae005c1448 | 7533d42ff0e5e6cf49ffc6aebc5e6ff8fc00d052 | /All_Programs/JiangXi/jiangxi.py | 88ff8c1cea57c1bf2befec6c78156ef0beeb239d | [] | no_license | oxw-118/shuiwu | 0aee439e253f4e55a3ae5a565052740fc80bee22 | 19d1a08c5407df13dd1f3715c63774626372a4ac | refs/heads/master | 2020-05-03T12:43:11.495555 | 2019-03-07T10:06:04 | 2019-03-07T10:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,646 | py | # coding=utf-8
import PackageTool
import re
import os
import traceback
import gevent
import gevent.monkey
import datetime
from lxml import html
import urlparse
import logging
from xml.etree import ElementTree
from tax import MyException, MySQL,MainFunc
from tax.config import config_one, config_two
from tax.MainFunc import requests
class ShuiWu(MainFunc.MainFunc):
def __init__(self,province,region,dishui_type=None):
super(ShuiWu,self).__init__()
self.pinyin='Jiang_Xi'
self.region=region
self.data={
'col':'1',
'appid':'1',
'webid':'2',
'path':'/',
'columnid':config_one[province][region]['columnid'],
'sourceContentType':'1',
'unitid':config_one[province][region]['unitid'],
'webname':config_one[province][region]['webname'],
'permissiontype':'0'
}
self.url=config_one[province][region]['domain']
self.province=province
result=urlparse.urlparse(self.url)
print result
self.href_ahead=result.scheme+'://'+result.netloc
self.oldest_time='2015-01-01'
def do_guoshui(self):
root = self.get_root()
lst_node = root.getiterator("totalrecord")[0].text
params={
'startrecord':'0',
'endrecord':str(int(lst_node)-1),
'perpage':str(lst_node)
}
root = self.get_root(params=params)
results = root.getiterator("record")
for node in results:
self.deal_node(node)
def get_root(self,params=None):
print self.url
if params:
a=requests.post(self.url,data=self.data,params=params)
else:
a=requests.post(self.url,data=self.data)
root = ElementTree.fromstring(a.text)
return root
def deal_node(self,node):
self.title_time=re.findall("\d{4}-\d{2}-\d{2}",node.text)[0]
if self.title_time>self.oldest_time:
try:
self.title=re.findall("title='(.*?)'",node.text)[0]
except:self.title=re.findall('title="(.*?)"',node.text)[0]
re_words=u'(?:欠税公告|欠税.*公告|欠缴税款|非正常户|欠费公告|关于清缴欠税的通告|非正户)'
if re.search(u'%s' %(re_words,),self.title):
if not self.is_exist():
href=re.findall("href='(.*?)'",node.text)[0]
self.filename=href.split('/')[-1]
real_href=self.href_ahead+href
logging.info('-------'+real_href)
self.look_for_a(real_href)
def look_for_a(self,url):
count=0
tree=self.get_tree(url)
all_a=tree.xpath('//a/@href')
for a in all_a:
if a.endswith('.doc') or a.endswith('.xls') or a.endswith('.xlsx') or a.endswith('.docx'):
count+=1
self.doc_url=self.href_ahead+a
try:
self.filename=re.findall('filename=(.*)',a)[0]
except:
self.filename=a.split('/')[-1]
self.download()
if count==0:
self.doc_url = url
self.download()
if __name__ == '__main__':
gevent.monkey.patch_all()
tasks=[]
today=datetime.date.today()
file_name = os.path.basename(__file__).split('.')[0]
MainFunc.MainFunc.write_log(file_name)
print today
#
m='江西省'
for n in config_one[m].keys():
shuiwu=ShuiWu(m,n)
cc=shuiwu.do_guoshui
tasks.append(gevent.spawn(cc))
gevent.joinall(tasks)
| [
"[email protected]"
] | |
fd634b7303d63082eaff4aa3cbd3f25501db004c | cc805febad6425cf16d9a4dcda532ff2a24ecfec | /gDrive (03.08.19)/radar_boot_gsheets.py | 64337a117618481c87d22a55c95a487c9427cf0e | [] | no_license | frosty939/lw | 5e83c4c5ed384fa435a470cc06d623730f1ced80 | a57d50266858920613354c244e645417c54ddefa | refs/heads/master | 2020-05-04T14:43:23.633460 | 2019-09-20T08:47:49 | 2019-09-20T08:47:49 | 179,207,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,097 | py | # importing the requests library
import csv
import time
import gspread
import re
import sys
from oauth2client.service_account import ServiceAccountCredentials
nagios_stat_file = sys.argv[1]
google_worksheet_name = "Sheet1"
google_workbook_name = "/boot Alert Tickets"
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name("creds.json", scope)
gc = gspread.authorize(credentials)
wks = gc.open(google_workbook_name)
sps = wks.worksheet(google_worksheet_name)
stats={}
adding_notes = False
complete = False
row_pos=2
api_calls = 0
time_start = round(time.time())
# creating a dictionary with the actor and nagio stats info
with open(nagios_stat_file) as fp:
for line in fp.readlines():
if ( not adding_notes ):
line = line.replace("\n","")
if (re.search("Account",line) != None):
stats['account']=line[line.find(":")+2:]
if (re.search("UID",line)!= None):
stats['uid']=line[line.find(":")+2:]
if (re.search("Priority",line)!= None):
stats['priority']=line[line.find(":")+2:]
if (re.search("Host",line)!= None):
stats['host']=line[line.find(":")+2:]
if (re.search("IP",line)!= None):
stats['ip']=line[line.find(":")+2:]
if ( re.search("Notes",line)!= None and not (re.search("Radar",line)!= None)):
adding_notes = True
stats['notes'] = "=" + line
if ( re.search("Radar",line) != None):
col_pos=1
for info in stats:
sps.update_cell(row_pos,col_pos,str(stats[info]))
api_calls+=1
col_pos += 1
stats[info]=""
time.sleep(1)
row_pos += 1
adding_notes = False
if (adding_notes or re.search("Bad",line)):
stats['notes'] += line
for info in stats:
sps.update_cell(row_pos,col_pos,str(stats[info]))
col_pos += 1
stats[info]=""
time.sleep(1) | [
"[email protected]"
] | |
ed2d3c54de34d9b50d8652f91880c29d14747cd6 | 0ad58e0d707651254a2ef7dfd54c15d4eb0603b1 | /algorithms/greedy_algorithms/huffman_encoding.py | ba44f33b9d6b20e9f647ded4cc4b777ec75ce26c | [
"MIT"
] | permissive | weirdname404/courses | d484bf58e1b2b84bc0592f7ec06af77d717e55f3 | 611443422cc6acc1af563d9d7d07181e9984ddab | refs/heads/master | 2020-05-29T09:26:37.846651 | 2019-08-12T17:06:09 | 2019-08-12T17:06:09 | 189,062,872 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,915 | py | import heapq
from collections import Counter, namedtuple
"""
This program implements Huffman encoding with the help of Python heap.
"""
class Node(namedtuple("Node", ["left", "right"])):
def walk(self, code, acc):
self.left.walk(code, acc + '0')
self.right.walk(code, acc + '1')
class Leaf(namedtuple("Leaf", ["char"])):
def walk(self, code, acc):
code[self.char] = acc or '0'
def huffman_encode(s):
code = {}
h = []
for ch, freq in Counter(s).items():
h.append((freq, len(h), Leaf(ch)))
heapq.heapify(h)
count = len(h)
while len(h) > 1:
freq1, _c1, left = heapq.heappop(h)
freq2, _c2, right = heapq.heappop(h)
heapq.heappush(h, (freq1 + freq2, count, Node(left, right)))
count += 1
if h:
[(_freq, _count, root)] = h
root.walk(code, "")
return code
def huffman_decode(encoded, code):
key = ''
decoded = ''
decode_code = {key: ch for ch, key in code.items()}
for s in encoded:
key += s
if key in decode_code:
decoded += decode_code[key]
key = ''
return decoded
def main():
s = input()
code = huffman_encode(s)
encoded = "".join(code[ch] for ch in s)
print(len(s), len(encoded))
for key in sorted(code):
print(f'{code[key]}: {key}')
print(encoded)
def test(n_iter):
import random as r
import string
import sys
for _ in range(n_iter):
length = r.randint(0, 32)
s = "".join(r.choice(string.ascii_letters) for _ in range(length))
code = huffman_encode(s)
encoded = "".join(code[ch] for ch in s)
try:
decoded = huffman_decode(encoded, code)
assert s == decoded
except AssertionError:
sys.exit(f'{s}, {decoded}')
print('ALL OK')
if __name__ == "__main__":
# test(1000)
main()
| [
"[email protected]"
] | |
5f8e4ddc7c7a402821aa65d4a9e77ec66e0e6a7b | 67aaed7e8490561c157b1fb8dd3c7344e94a4d06 | /home/models.py | 93394331cb7b8da949f49ac85f16f6837c966093 | [] | no_license | VEDANSHKUMAR/The_Readers_Heart | b554dcffc072bcaaf5a130f6ac5a2f972e87a400 | 86bfd1e47b38b6c42054747424856c9759e4b2f3 | refs/heads/main | 2023-06-14T20:26:52.162547 | 2021-07-12T17:28:49 | 2021-07-12T17:28:49 | 385,202,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | from django.db import models
# Create your models here.
# This is the model in Home.
class Contact(models.Model):
sno = models.AutoField(primary_key=True)
name= models.CharField(max_length=50)
email= models.CharField(max_length=100)
phone= models.CharField(max_length=13)
content= models.TextField()
timeStamp = models.DateTimeField(auto_now_add=True,blank=True)
def __str__(self):
return 'Message from ' + self.name + ' - ' + self.email
| [
"[email protected]"
] | |
6dddc001f815f7c76070f5808ecebae5477f4e9a | 44b0079ba7a3a21be1ec77cbc5f316cb4ad79f29 | /Examples/NumPyExample.py | 66d6e036e3aaf358e96c5e9e9fe2389409bacfd0 | [
"MIT"
] | permissive | WzqProgrammer/DeepLearning | 2faa6ea481f407b2dab9993ec97dba346c86808c | e619696c18e29ecf04fb94654f0baaf6ca965a41 | refs/heads/master | 2020-07-02T17:39:24.588972 | 2019-09-13T00:42:39 | 2019-09-13T00:42:39 | 194,270,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | import numpy as np
import matplotlib.pyplot as plt
#生成数据
x = np.arange(0, 6, 0.1) #以0.1为单位,生成0到6的数据
y1 = np.sin(x)
y2 = np.cos(x)
#绘制图像
plt.plot(x, y1, label = "sin")
plt.plot(x, y2, linestyle = "--", label = "cos") #用虚线绘制
plt.xlabel("x") #x轴标签
plt.xlabel("y") #y轴标签
plt.title('sin & cos') #标题
plt.legend()
plt.show() | [
"[email protected]"
] | |
f6ea4462080e5a65473a6fc0f8c050a070bd7f56 | 1207d50126d4d59966573927c5eadd94db6aeb59 | /svggen/library/legacy/Simulation.py | d8321b335dfcb676d698b0a9b6bad553578b8989 | [] | no_license | christianwarloe/robotBuilder | aee03c189972f1d305c6e13d106b362b5d26d187 | 3f8fbc267ac7b9bbae534d1208278541a7b5eaa5 | refs/heads/master | 2021-06-13T02:42:24.834816 | 2017-04-07T01:01:52 | 2017-04-07T01:01:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | from svggen.api.component import Component
from svggen.api.ports.SimulationPort import SimulationPort
class Simulation(Component):
def assemble(self):
self.addInterface("sim", SimulationPort(self))
if __name__ == "__main__":
h = Simulation()
#h._make_test()
| [
"[email protected]"
] | |
d696a9a539a215e0a30f4815358fe6bde131f4a5 | 8486a8342312585227ced1505ef0ed09d5edb9fa | /2_Python/Week7/ajax/apps/notes/migrations/0001_initial.py | bb968b55cd31170a57e324a2ee8bca5c8c939d5e | [] | no_license | nickchic/Coding_Dojo | acef3a8d33b4ab99f6b16738589c03699b58ea94 | 99c2047fe33862aa62b0c61875e5573b3149aaa4 | refs/heads/master | 2021-01-02T08:59:27.423365 | 2017-11-08T20:33:12 | 2017-11-08T20:33:12 | 99,114,282 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-10-12 20:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"[email protected]"
] | |
3ff991fc53a02cfd81dd20519bbfc82a7466974b | 1838f5da624e0da625ff0c00a642bbff10f9bde6 | /guild/steps_main.py | dac3b109450ce28e5eae0512466f9afb922846c2 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | Jim-Holmstroem/guildai | 75b57785654b98902aefcb50ace0f227ad95fd18 | ed80e307fca20c0d607c600180cdb0854f42021d | refs/heads/master | 2020-12-31T19:48:39.996994 | 2020-02-06T18:02:20 | 2020-02-06T18:02:20 | 239,023,692 | 0 | 0 | Apache-2.0 | 2020-02-07T21:13:39 | 2020-02-07T21:13:39 | null | UTF-8 | Python | false | false | 13,077 | py | # Copyright 2017-2020 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import logging
import os
import re
import subprocess
import sys
import click
import six
import guild.log
from guild import exit_code
from guild import flag_util
from guild import op_util
from guild import opref as opreflib
from guild import run as runlib
from guild import run_check
from guild import util
log = None # intialized in _init_logging
STEP_USED_PARAMS = (
"flags",
"gpus",
"label",
"max_trials",
"needed",
"no_gpus",
"opspec",
"opt_flags",
"optimizer",
"random_seed",
"stop_after",
)
###################################################################
# State
###################################################################
class Step(object):
def __init__(self, data, parent_flags, parent_opref):
data = _coerce_step_data(data)
params = _parse_run(data)
assert params["opspec"], params
opspec_param = params["opspec"]
self.op_spec = _apply_default_model(opspec_param, parent_opref)
self.name = data.get("name") or opspec_param
self.batch_files, flag_args = _split_batch_files(params["flags"])
self.flags = _init_step_flags(flag_args, parent_flags, self)
self.checks = _init_checks(data)
self.isolate_runs = bool(data.get("isolate-runs", True))
self.label = _resolve_param(params, "label", parent_flags)
self.gpus = _resolve_param(params, "gpus", parent_flags)
self.no_gpus = params["no_gpus"]
self.stop_after = params["stop_after"]
self.needed = params["needed"]
self.optimizer = params["optimizer"]
self.opt_flags = params["opt_flags"]
self.max_trials = params["max_trials"]
self.random_seed = params["random_seed"]
def __str__(self):
return self.name or self.op_spec
def _coerce_step_data(data):
if isinstance(data, six.string_types):
data = {"run": data}
elif isinstance(data, dict):
data = dict(data)
else:
_error("invalid step data: %r" % data)
if "flags" in data:
data["flags"] = _coerce_flags_data(data["flags"])
return data
def _coerce_flags_data(data):
if isinstance(data, list):
return data
elif isinstance(data, dict):
return flag_util.format_flags(data)
else:
_error("invalid flags value %r" % data)
def _parse_run(data):
from guild.commands.run import run as run_cmd
run_spec = data.get("run", "").strip()
if not run_spec:
_error("invalid step %r: must define run" % data)
args = util.shlex_split(run_spec)
try:
ctx = run_cmd.make_context("run", args)
except click.exceptions.ClickException as e:
_error("invalid run spec %r: %s" % (run_spec, e))
else:
_apply_data_params(data, ctx, run_spec)
return ctx.params
def _apply_data_params(data, ctx, run_spec):
"""Apply applicable data to params.
Warns if params contains unused values.
"""
defaults = {p.name: p.default for p in ctx.command.params}
for name, val in sorted(ctx.params.items()):
if name in STEP_USED_PARAMS:
data_name = name.replace("_", "-")
try:
data_val = data[data_name]
except KeyError:
pass
else:
if data_val != defaults[name]:
ctx.params[name] = data_val
else:
if val != defaults[name]:
log.warning("run parameter %s used in %r ignored", name, run_spec)
def _init_step_flags(flag_args, parent_flag_vals, step):
flag_vals = _parse_flag_assigns(flag_args)
_apply_parent_flags(parent_flag_vals, step, flag_vals)
resolved = _resolve_flag_vals(flag_vals, parent_flag_vals)
return _remove_undefined_flags(resolved)
def _parse_flag_assigns(assigns):
assert isinstance(assigns, list), assigns
try:
return op_util.parse_flag_assigns(assigns)
except op_util.ArgValueError as e:
_error("invalid argument '%s' - expected NAME=VAL" % e.arg)
def _apply_parent_flags(parent_flag_vals, step, flag_vals):
prefixes = [
step.op_spec + ":",
step.name + ":",
]
flag_vals.update(_prefixed_flag_vals(prefixes, parent_flag_vals))
def _prefixed_flag_vals(prefixes, flag_vals):
"""Returns a dict of prefixed flag values.
Prefixes are stripped from matching flag names.
The value for the first matching prefix from prefixes is used.
"""
prefixed = {}
for prefix in prefixes:
for full_name in flag_vals:
if full_name.startswith(prefix):
prefixed_name = full_name[len(prefix) :]
prefixed.setdefault(prefixed_name, flag_vals[full_name])
return prefixed
def _apply_default_model(step_opspec, parent_opref):
step_opref = opreflib.OpRef.for_string(step_opspec)
if not step_opref.model_name:
step_opref = opreflib.OpRef(
step_opref.pkg_type,
step_opref.pkg_name,
step_opref.pkg_version,
parent_opref.model_name,
step_opref.op_name,
)
return step_opref.to_opspec()
def _split_batch_files(flag_args):
return op_util.split_batch_files(flag_args)
def _resolve_flag_vals(flags, parent_flags):
return {name: util.resolve_refs(val, parent_flags) for name, val in flags.items()}
def _remove_undefined_flags(flag_vals):
return {name: val for name, val in flag_vals.items() if val is not None}
def _resolve_param(params, name, flags):
resolved = util.resolve_refs(params[name], flags)
if resolved is None:
return resolved
return str(resolved)
def _init_checks(data):
expect = data.get("expect") or []
if not isinstance(expect, list):
expect = [expect]
checks = []
for check_data in expect:
try:
check = run_check.init_check(check_data)
except ValueError as e:
log.warning("invalid check %r: %e", data, e)
else:
checks.append(check)
return checks
###################################################################
# Main
###################################################################
def main():
_init_logging()
_run_steps()
def _init_logging():
level = int(os.getenv("LOG_LEVEL", logging.WARN))
format = os.getenv("LOG_FORMAT", "%(levelname)s: [%(name)s] %(message)s")
guild.log.init_logging(level, {"_": format})
globals()["log"] = logging.getLogger("guild")
def _run_steps():
run = _init_run()
steps = _init_steps(run)
if not steps:
log.warning("no steps defined for run %s", run.id)
return
for step in steps:
step_run = _run_step(step, run)
_maybe_check_step_run(step, step_run)
# =================================================================
# Init
# =================================================================
def _init_run():
run_id, run_dir = _run_environ()
return runlib.Run(run_id, run_dir)
def _run_environ():
try:
return os.environ["RUN_ID"], os.environ["RUN_DIR"]
except KeyError as e:
_internal_error("missing required env %s" % e.args[0])
def _init_steps(run):
data = run.get("steps")
if not data:
return []
if not isinstance(data, list):
_error("invalid steps data %r: expected list" % data)
flags = run.get("flags")
opref = run.opref
return [Step(step_data, flags, opref) for step_data in data]
# =================================================================
# Run step
# =================================================================
def _run_step(step, parent_run):
step_run = _init_step_run(parent_run)
cmd = _init_step_cmd(step, step_run.path)
_link_to_step_run(step, step_run.path, parent_run.path)
env = dict(os.environ)
env["NO_WARN_RUNDIR"] = "1"
if step.isolate_runs:
env["GUILD_RUNS_PARENT"] = parent_run.id
cwd = os.getenv("CMD_DIR")
log.info("running %s: %s", step, _format_step_cmd(cmd))
log.debug("step cwd %s", cwd)
log.debug("step command: %s", cmd)
log.debug("step env: %s", env)
returncode = subprocess.call(cmd, env=env, cwd=cwd)
if returncode != 0:
sys.exit(returncode)
return step_run
def _init_step_run(parent_run):
"""Returns the run dir for a step run.
Directory is based on a new, unique run ID but is not created.
"""
runs_dir = os.path.dirname(parent_run.path)
step_run_id = runlib.mkid()
step_run_dir = os.path.join(runs_dir, step_run_id)
return runlib.Run(step_run_id, step_run_dir)
def _init_step_cmd(step, step_run_dir):
base_args = [
sys.executable,
"-um",
"guild.main_bootstrap",
"run",
"-y",
"--force-flags",
"--run-dir",
step_run_dir,
step.op_spec,
]
step_options = _step_options(step)
batch_file_args = _step_batch_file_args(step)
flag_args = _step_flag_args(step)
return base_args + step_options + batch_file_args + flag_args
def _step_options(step):
opts = []
if step.label:
opts.extend(["--label", step.label])
if step.gpus is not None:
opts.extend(["--gpus", step.gpus])
elif step.no_gpus:
opts.append("--no-gpus")
if step.stop_after:
opts.extend(["--stop-after", str(step.stop_after)])
if step.needed:
opts.append("--needed")
if step.optimizer:
opts.extend(["--optimizer", step.optimizer])
for flag in step.opt_flags:
opts.extend(["--opt-flag", flag])
if step.max_trials:
opts.extend(["--max-trials", str(step.max_trials)])
if step.random_seed is not None:
opts.extend(["--random-seed", str(step.random_seed)])
return opts
def _step_batch_file_args(step):
return ["@%s" % file for file in step.batch_files]
def _step_flag_args(step):
return flag_util.format_flags(step.flags)
def _link_to_step_run(step, step_run_dir, parent_run_dir):
link_name = _step_link_name(step)
link_path_base = os.path.join(parent_run_dir, link_name)
link_path = _ensure_unique_link(link_path_base)
os.symlink(step_run_dir, link_path)
def _step_link_name(step):
return re.sub(r"[ :/\\]", "_", str(step))
def _ensure_unique_link(path_base):
v = 2
path = path_base
while True:
assert v < 1e6
if not os.path.lexists(path):
return path
path = "%s_%i" % (path_base, v)
v += 1
def _format_step_cmd(cmd):
# Show only opspec onward - assert front matter to catch changes
# to cmd.
assert cmd[0:7] == [
sys.executable,
"-um",
"guild.main_bootstrap",
"run",
"-y",
"--force-flags",
"--run-dir",
], cmd
return " ".join([arg for arg in cmd[8:]])
def _maybe_check_step_run(step, run):
if not step.checks:
return
if _run_skipped(run):
log.info("skipping checks for %s", step.name)
return
checks_passed = _check_step_run(step, run)
if not checks_passed:
_error("stopping because a check failed", exit_code.TEST_FAILED)
def _run_skipped(run):
"""Returns True if run was skipped.
We infer that a run was skipped if it's directory doesn't
exist. The rationale relies on the assertion that the step run
creates the specified run directory only when the run is not
skipped.
"""
return not os.path.exists(run.path)
def _check_step_run(step, run):
if not step.checks:
return True
passed = 0
failed = 0
for check in step.checks:
try:
check.check_run(run)
except run_check.Failed as e:
log.error("check failed: %s", e)
failed += 1
else:
passed += 1
log.info("%i of %i checks passed", passed, passed + failed)
if failed > 0:
log.error("%i check(s) failed - see above for details", failed)
return failed == 0
###################################################################
# Error messages
###################################################################
def _internal_error(msg):
sys.stderr.write("guild.steps_main: %s\n" % msg)
sys.exit(exit_code.INTERNAL_ERROR)
def _error(msg, exit_code=exit_code.DEFAULT):
sys.stderr.write("guild: %s\n" % msg)
sys.exit(exit_code)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
b0e68147b2ea49e70419c08976b17d13dde1f95e | 3c71ce7e0dce9d752a6bcb1f74972f475ba3c314 | /Day12_3.py | 9735f0a49b583f87fd6183798705959fe2958965 | [] | no_license | L-xbin2020/PytorchStudy | 7a45026848e834930f4e4276399ef3bda219a2fb | f09aafe003ad26c0edd17b9901804b266424a0e4 | refs/heads/master | 2023-06-12T22:24:38.799497 | 2021-07-03T13:35:31 | 2021-07-03T13:35:31 | 379,948,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,733 | py | import torch
num_class = 4
input_size = 4
hidden_size = 8
embedding_size = 10
num_layers = 2
batch_size = 1
seq_len = 5
idx2char = ['e', 'h', 'l', 'o']
x_data = [1, 0, 2, 2, 3]
y_data = [3, 1, 2, 3, 2]
one_hot_lookup = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]
x_one_hot = [one_hot_lookup[x] for x in x_data]
# inputs = torch.Tensor(x_one_hot).view(-1, batch_size, input_size)
# labels = torch.LongTensor(y_data).view(-1, 1)
inputs = torch.LongTensor(x_data)
labels = torch.LongTensor(y_data)
print(inputs)
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.emb = torch.nn.Embedding(input_size, embedding_size)
self.rnn = torch.nn.RNN(input_size=embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True)
self.fc = torch.nn.Linear(hidden_size, num_class)
def forward(self, x):
hidden = torch.zeros(num_layers, x.size(0), hidden_size)
x = self.emb(x) # (batch, seqLen, embeddingSize)
x, _ = self.rnn(x, hidden)
x = self.fc(x)
return x.view(-1, num_class)
net = Model()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.05)
for epoch in range(15):
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
_, idx = outputs.max(dim=1)
idx = idx.data.numpy()
print('Prdicted:',''.join([idx2char[x] for x in idx]), end="")
print(', Epoch [%d / 15] loss = %.3f' % (epoch + 1, loss.item()))
| [
"[email protected]"
] | |
055a80f3c4715a2e12bb8a911d2314285ffc1eac | d30f5ebe8acbe707cd712c9123f3d085af396b16 | /Cat.py | 7888d36ee2d56e36ceceffcff9cfb64f8bc973b5 | [] | no_license | GYIChen/Cat | d1210d4bac37ec2e538c5461001473235fa5e3bf | 4d930a8fb1249c98cc44d0c7539306cff3abc351 | refs/heads/master | 2020-12-01T19:12:17.921074 | 2019-12-29T14:19:52 | 2019-12-29T14:19:52 | 230,738,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,443 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 29 09:09:27 2018
@author: 56576
"""
import os
from PIL import Image
import numpy as np
from keras.utils import np_utils #用于将Label向量化
from keras.models import Sequential
from keras.layers.core import Dense,Dropout,Activation,Flatten
from keras.optimizers import SGD,RMSprop,Adam
from keras.layers import Conv2D,MaxPool2D
from keras.models import load_model
import PicPreProcess as ppp
train_path = ppp.train_path
test_path = ppp.test_path
#test_path='D:/Cat/NewCat/'
file_dict = ppp.file_path
input_width = ppp.input_width
input_height = ppp.input_height
model_path = "D:/Cat1.1/model9.h5"
#ppp.Init()
#----------------训练集-----------------------
#将目录下的照片转化为一个列表
images1 = os.listdir(train_path)
#返回一个numpy数组类型的图片
def GetImages1(filename):
#打开路径下的文件 并转化为RGB
img = Image.open(train_path+filename).convert('RGB')
return np.array(img)
#x_train和y_train都是列表 分别保存了训练集的输入和输出
x_train = []
y_train = []
#将转化为数组的图片加入训练的列表
for i in images1:
x_train.append(GetImages1(i))
x_train = np.array(x_train)
#split('_')是将文件名以'_'分割 这样可以提取预处理完毕后的数据的标签(即分割后的数组的第一个部分)
for filename in images1:
y_train.append(int(filename.split('_')[0]))
#将标签也加入数组
y_train = np.array(y_train)
# ---------------测试集-----------------------
#处理过程同上
images2 = os.listdir(test_path)
x_test = []
y_test = []
def GetImages2(filename):
img = Image.open(test_path+filename).convert('RGB')
return np.array(img)
for i in images2:
x_test.append(GetImages2(i))
x_test = np.array(x_test)
for filename in images2:
y_test.append(int(filename.split('_')[0]))
y_test = np.array(y_test)
#将训练和测试的标签都由标签数字转化为向量 来进行交叉熵的优化
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
#将输入转化为浮点数 并位于0-1来提高准确率
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
#-------------------神经网络------------------------
#卷积层1 深度为32 输入为预处理时生成的100x100的RGB图片
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(input_width, input_height, 3)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2),padding='same'))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2),padding='same'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(6, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
#model = load_model(file_dict+'model0.h5')
#for i in range (2,10):
# model.fit(x_train,y_train,batch_size=15,epochs=1)
# print (i)
# model.save(file_dict+'model'+str(i)+'.h5')
# print ('\n')
del model
model = load_model(model_path)
loss,acc = model.evaluate(x_test,y_test,batch_size=1)
print ("loss:",loss)
print ("acc:",acc) | [
"[email protected]"
] | |
474884d2663ea739fd328bb46491680706af870b | 8d246b93b9a61399c17686ff340e3cc52c0a888b | /matrix.py | 3149c2d6d3d7331053d73ebaba9b0032432bff06 | [] | no_license | natansalda/isdcnd-matrix-class | 965f77241d9720250485b010885e3d4c762b8a1e | 1803fcb0127e2b955b9670e9ee4948c34b749f8b | refs/heads/master | 2020-03-14T16:12:33.865082 | 2018-05-01T08:55:54 | 2018-05-01T08:55:54 | 131,693,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,227 | py | import math
from math import sqrt
import numbers
def zeroes(height, width):
"""
Creates a matrix of zeroes.
"""
g = [[0.0 for _ in range(width)] for __ in range(height)]
return Matrix(g)
def identity(n):
"""
Creates a n x n identity matrix.
"""
I = zeroes(n, n)
for i in range(n):
I.g[i][i] = 1.0
return I
class Matrix(object):
# Constructor
def __init__(self, grid):
self.g = grid
self.h = len(grid)
self.w = len(grid[0])
#
# Primary matrix math methods
#############################
def determinant(self):
"""
Calculates the determinant of a 1x1 or 2x2 matrix.
"""
if not self.is_square():
raise(ValueError, "Cannot calculate determinant of non-square matrix.")
if self.h > 2:
raise(NotImplementedError, "Calculating determinant not implemented for matrices largerer than 2x2.")
# TODO - your code here
# if we have 1x1 Matrix
determinant = 0
if self.h == 1:
determinant = self.g[0]
return determinant
# if we have 2x2 Matrix:
if self.h == 2:
determinant = self.g[0][0] * self.g[1][1] - self.g[0][1] * self.g[1][0]
return determinant
print("the determinant of matrix: " + self + "is: " + determinant)
def trace(self):
"""
Calculates the trace of a matrix (sum of diagonal entries).
"""
if not self.is_square():
raise(ValueError, "Cannot calculate the trace of a non-square matrix.")
# TODO - your code here
# initialize trace variable
trace = 0
# loop through the columns in matrix
for i in range(self.w):
trace += self.g[i][i]
return trace
print("the trace of matrix: " + self + "is: " + trace)
def inverse(self):
"""
Calculates the inverse of a 1x1 or 2x2 Matrix.
"""
if not self.is_square():
raise(ValueError, "Non-square Matrix does not have an inverse.")
if self.h > 2:
raise(NotImplementedError, "inversion not implemented for matrices larger than 2x2.")
# TODO - your code here
# if we have 1x1 Matrix
if self.w == 1:
return Matrix(1/self.g[0][0])
# if we have 2x2 Matrix
if self.w == 2:
inversedGrid = zeroes(self.w, self.h)
inversedGrid[0][0] = self.g[1][1] * (1/self.determinant())
inversedGrid[0][1] = -self.g[0][1] * (1/self.determinant())
inversedGrid[1][0] = -self.g[1][0] * (1/self.determinant())
inversedGrid[1][1] = self.g[0][0] * (1/self.determinant())
return inversedGrid
print("the inversed grid of matrix: " + self + "is: " + inversedGrid)
def T(self):
"""
Returns a transposed copy of this Matrix.
"""
# TODO - your code here
number_of_rows = self.w
number_of_columns = self.h
transposed_matrix = zeroes(number_of_rows, number_of_columns)
for i in range(self.h):
for j in range(self.w):
original_value = self.g[i][j]
transposed_matrix[j][i] = original_value
return transposed_matrix
print("the transposed copy of this Matrix: " + self + "is: " + transposed_matrix)
def is_square(self):
return self.h == self.w
#
# Begin Operator Overloading
############################
def __getitem__(self,idx):
"""
Defines the behavior of using square brackets [] on instances
of this class.
Example:
> my_matrix = Matrix([ [1, 2], [3, 4] ])
> my_matrix[0]
[1, 2]
> my_matrix[0][0]
1
"""
return self.g[idx]
def __repr__(self):
"""
Defines the behavior of calling print on an instance of this class.
"""
s = ""
for row in self.g:
s += " ".join(["{} ".format(x) for x in row])
s += "\n"
return s
def __add__(self,other):
"""
Defines the behavior of the + operator
"""
if self.h != other.h or self.w != other.w:
raise(ValueError, "Matrices can only be added if the dimensions are the same")
#
# TODO - your code here
#
added_grid = []
for i in range(self.h):
new_row = []
for j in range(self.w):
first_value = self.g[i][j]
second_value = other.g[i][j]
new_value = first_value + second_value
new_row.append(new_value)
added_grid.append(new_row)
return Matrix(added_grid)
print("The added matrix is: " + Matrix(added_grid))
def __neg__(self):
"""
Defines the behavior of - operator (NOT subtraction)
Example:
> my_matrix = Matrix([ [1, 2], [3, 4] ])
> negative = -my_matrix
> print(negative)
-1.0 -2.0
-3.0 -4.0
"""
#
# TODO - your code here
#
# let's create new matrix using zeroes() function
new_matrix = zeroes(self.h, self.w)
for i in range(self.h):
for j in range(self.w):
new_matrix[i][j] = self.g[i][j]*-1.0
return new_matrix
print("The new matrix is: " + new_matrix)
def __sub__(self, other):
"""
Defines the behavior of - operator (as subtraction)
"""
#
# TODO - your code here
#
sub_matrix = self + (-other)
return sub_matrix
print("The new matrix is: " + sub_matrix)
def __mul__(self, other):
"""
Defines the behavior of * operator (matrix multiplication)
"""
#
# TODO - your code here
#
# let's create new matrix using zeroes() function
grid = zeroes(self.h, other.w)
for x in range(self.h):
for y in range(other.w):
for z in range(other.h):
grid[x][y] += self.g[x][z] * other.g[z][y]
return grid
print("The new matrix is: " + grid)
def __rmul__(self, other):
"""
Called when the thing on the left of the * is not a matrix.
Example:
> identity = Matrix([ [1,0], [0,1] ])
> doubled = 2 * identity
> print(doubled)
2.0 0.0
0.0 2.0
"""
if isinstance(other, numbers.Number):
#
# TODO - your code here
#
multiply_by_number_matrix = self
for i in range(self.h):
for j in range(self.w):
multiply_by_number_matrix[i][j] *= other
return multiply_by_number_matrix
print("The new matrix is: " + multiply_by_number_matrix)
| [
"[email protected]"
] | |
73a48ea26e3be2cbe1878f84f00111545c04c9f2 | c299590232a59121423770328246883e7ff66fde | /Prep classes/day1.py | 3805d83517b86707c258afc32f4d758d6c2d6207 | [] | no_license | Gillt1/Python_Class | 8cd6481ea65e4d56c98381cdd7540834bc469265 | 5e256a19f3e6cbe71f697938e0538524eecc1aeb | refs/heads/master | 2023-08-11T02:25:59.117141 | 2021-10-05T14:05:43 | 2021-10-05T14:05:43 | 398,307,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | # print("My name is", "Python.")
# print('Monty Python.')
#
# print(0o123)
# print(0x123)
# print(0b1011)
#
# print(9%6%2)
#
# kilometers = 12.25
# miles = 7.38
#
# miles_to_kilometers = miles*1.61
# kilometers_to_miles = kilometers/1.61
#
# print(miles, "miles is", round(miles_to_kilometers, 2), "kilometers")
# print(kilometers, "kilometers is", round(kilometers_to_miles, 2), "miles")
# x = input("enter test data here:", )
# x = float(x)
# y = 3*x**3 - 2*x**2 + 3*x -1 # write your code here
# print("y =", y)
| [
"[email protected]"
] | |
1da13a7abe102f60f46225cf67bc38ecbb4d1614 | 2db13a0dfa223b83813e1e36218b88a6c0ddf70d | /factory/build_dir.py | 47f8bdacd9505f393736d2f56f78b247640475e1 | [] | no_license | iblaauw/Factory | 29e8ca8c3a43f07b03c2026a204dee8e31566446 | 2ec009d8bf11ab4d26996aba589c89027337946f | refs/heads/master | 2021-01-01T20:21:03.367232 | 2017-09-23T18:42:53 | 2017-09-23T18:42:53 | 98,820,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | import os
from .utils import ensure_dir_exists
known_files = set() # This set of files will not be placed in the build directory, but left at their normal path
build_dir = "build"
def translate_file(path):
if path in known_files:
return path
current_dir = os.getcwd()
abspath = os.path.abspath(path)
if not abspath.startswith(current_dir):
return path
subpath = abspath[len(current_dir):]
if subpath[0] == '/':
subpath = subpath[1:]
build_path = os.path.join(build_dir, subpath)
ensure_dir_exists(build_path)
return build_path
def set_build_dir(new_dir):
global build_dir
build_dir = new_dir
def get_build_dir():
return build_dir
def ResetFiles():
known_files.clear()
global build_dir
set_build_dir("build")
| [
"[email protected]"
] | |
239d6da3847f27608fc18ae31fae85e8ecae219c | 428989cb9837b6fedeb95e4fcc0a89f705542b24 | /erle/ros_catkin_ws/build_isolated/sensor_msgs/catkin_generated/pkg.develspace.context.pc.py | 962d116bff1103c9be9555c3ddaba18e3740f052 | [] | no_license | swift-nav/ros_rover | 70406572cfcf413ce13cf6e6b47a43d5298d64fc | 308f10114b35c70b933ee2a47be342e6c2f2887a | refs/heads/master | 2020-04-14T22:51:38.911378 | 2016-07-08T21:44:22 | 2016-07-08T21:44:22 | 60,873,336 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/erle/ros_catkin_ws/devel_isolated/sensor_msgs/include;/home/erle/ros_catkin_ws/src/common_msgs/sensor_msgs/include;/usr/include".split(';') if "/home/erle/ros_catkin_ws/devel_isolated/sensor_msgs/include;/home/erle/ros_catkin_ws/src/common_msgs/sensor_msgs/include;/usr/include" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;message_runtime;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "sensor_msgs"
PROJECT_SPACE_DIR = "/home/erle/ros_catkin_ws/devel_isolated/sensor_msgs"
PROJECT_VERSION = "1.11.8"
| [
"[email protected]"
] | |
6e8a3eca7927a9085efd586269d0b7f6a359ebe1 | c2dfd9743d777e1f3102c5cf4bca5c4efd52e45b | /Assignments/EvenandOdd.py | 068fa844776754d701b3c2e5a4c11451a35bb2f3 | [] | no_license | bharath210/PythonLearning | 8dc2212ff5604b687834366e0c33ede0cb6cf906 | f5de2d527593886ff570808673ed4e989484126b | refs/heads/master | 2022-12-03T05:28:53.708200 | 2020-08-12T15:09:00 | 2020-08-12T15:09:00 | 287,031,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | nums = []
n = int(input("number of values"))
for i in range(n):
val = int(input('enter value'))
nums.append(val)
def evenOddCount(vals):
even = 0
odd = 0
for i in vals:
if(i % 2 == 0):
even += 1
else:
odd += 1
return even,odd
even,odd = evenOddCount(nums)
print("Even : {} Odd : {} ".format(even,odd)) | [
"[email protected]"
] | |
0c1cdfc5c5aa0a537f3efafe3c64ae8d4370e2fb | 798cfdb26135a9d6228c03ce7a63ed024d7f69c5 | /scripts/fetch_aws_data.py | 9b3ceb3b17d1d4d673a55a1e89a83831ecb9446d | [
"MIT"
] | permissive | nunojesus/HippoIgnited | 0548fc4f54cbc8f415ba46b3627c985cdde74476 | 79316965f41f9cd7f1024de5eb34942cba285358 | refs/heads/master | 2020-03-20T21:24:44.211638 | 2018-06-18T10:44:30 | 2018-06-18T10:44:30 | 137,739,418 | 1 | 0 | null | 2018-06-18T10:40:06 | 2018-06-18T10:40:05 | null | UTF-8 | Python | false | false | 5,512 | py | #!/usr/bin/env python3
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2016, Dilawar singh <[email protected]>"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawra Singh"
__email__ = "[email protected]"
__status__ = "Development/Production"
import sys
import os
import datetime
from db_connect import db_
from global_data import *
import itertools
import networkx as nx
def spec_short( spec ):
return ''.join( [ x.strip()[0] for x in spec.split( ) ] )
def getSpecialization( cur, piOrHost ):
cur.execute( "SELECT specialization FROM faculty WHERE email='%s'" % piOrHost )
a = cur.fetchone( )
return a['specialization']
def init( cur ):
"""
Create a temporaty table for scheduling AWS
"""
global db_
cur.execute( 'DROP TABLE IF EXISTS aws_temp_schedule' )
cur.execute(
'''
CREATE TABLE IF NOT EXISTS aws_temp_schedule
( speaker VARCHAR(40) PRIMARY KEY, date DATE NOT NULL )
'''
)
db_.commit( )
cur.execute(
"""
SELECT * FROM logins WHERE eligible_for_aws='YES' AND status='ACTIVE'
ORDER BY login
"""
)
for a in cur.fetchall( ):
speakers_[ a['login'].lower() ] = a
spec = a['specialization']
if spec is None:
pi = a['pi_or_host']
if pi is None:
continue
spec = getSpecialization( cur, pi )
spec = spec or 'UNSPECIFIED'
specialization_[ a['login'] ] = spec
cur.execute( """SELECT * FROM holidays ORDER BY date""")
for a in cur.fetchall( ):
if a[ 'schedule_talk_or_aws' ] == 'NO':
holidays_[ a['date'] ] = a
def get_data( ):
global db_
try:
cur = db_.cursor( dictionary = True )
except Exception as e:
print(
'''If complain is about dictionary keyword. Install
https://pypi.python.org/pypi/mysql-connector-python-rf/2.2.2
using easy_install'''
)
quit( )
init( cur )
# Entries in this table are usually in future.
cur.execute( 'SELECT * FROM upcoming_aws' )
for a in cur.fetchall( ):
aws_[ a[ 'speaker' ] ].append( a )
upcoming_aws_[ a['speaker'].lower( ) ] = a['date']
# Keep the number of slots occupied at this day.
upcoming_aws_slots_[ a['date'] ].append( a['speaker'] )
# Now get all the previous AWSs happened so far.
cur.execute( 'SELECT * FROM annual_work_seminars' )
for a in cur.fetchall( ):
aws_[ a[ 'speaker' ].lower() ].append( a )
for a in aws_:
# Sort a list in place.
aws_[a].sort( key = lambda x : x['date'] )
# print( a, [ x['date'] for x in aws_[a] ] )
# Select all aws scheduling requests which have been approved.
cur.execute( "SELECT * FROM aws_scheduling_request WHERE status='APPROVED'" )
for a in cur.fetchall( ):
aws_scheduling_requests_[ a[ 'speaker' ].lower( ) ] = a
# Now pepare output file.
speakers = speaker_data( )
slots = slots_data( )
graph = nx.DiGraph( )
graph.add_node( 'source' )
graph.add_node( 'sink' )
for s in speakers:
graph.add_node( s, **speakers[s] )
graph.add_edge( 'source', s, weight = 0, capacity = 1, cost = 0 )
for date, i in slots:
graph.add_node( (date,i), date = '%s' % date, index = i )
graph.add_edge( (date,i), 'sink', weight=0, capacity = 1, cost = 0 )
return graph
def slots_data( ):
today = datetime.date.today( )
monday0 = today + datetime.timedelta( days = 7 - today.weekday( ) )
validSlots = [ ]
for dayi in range( 60 ):
monday = monday0 + datetime.timedelta( days = 7 * dayi )
if monday in holidays_:
print( 'Monday %s is holiday' % monday )
continue
nSlots = 3
if monday in upcoming_aws_slots_:
nAWS = len( upcoming_aws_slots_[monday] )
print( '%d AWSs are scheduled on this date %s' % (nAWS, monday ))
nSlots -= nAWS
for sloti in range(0, nSlots ):
validSlots.append( (monday,sloti) )
return validSlots
def speaker_data( ):
speakers = { }
keys = tuple( 'login,pi_or_host,specialization,nAWS,last_aws_on'.split( ','))
for l in speakers_:
if l in upcoming_aws_:
print( '-> Name is in upcoming AWS. Ignoring' )
continue
piOrHost = speakers_[l].get('pi_or_host', 'UNKNOWN')
vals = [ ]
vals.append(l)
vals.append( '%s' % piOrHost )
spec = spec_short( specialization_.get( l, 'UNKNOWN' ) )
vals.append( spec )
nAws = len( aws_.get( l, [] ) )
vals.append( '%d' % nAws )
vals.append( '%s' % lastAwsDate( l ) )
d = dict( zip(keys, vals) )
speakers[ l ] = d
return speakers
def lastAwsDate( speaker ):
if speaker in aws_:
awss = [ aws['date'] for aws in aws_[ speaker ] ]
return sorted( awss )[-1]
else:
# joined date.
return speakers_[speaker][ 'joined_on' ]
def main( ):
global db_
data = get_data( )
db_.close( )
outfile = '_aws_data.gml'
if len( sys.argv ) > 1:
outfile = sys.argv[1]
nx.write_graphml( data, outfile )
print( 'Wrote graphml to %s' % outfile )
if __name__ == '__main__':
main( )
| [
"[email protected]"
] | |
787c678c43e432a2b35b976043067acbfd52115f | 75402b6c851a12ae41359fdd83e89d2160c308af | /zentral/contrib/mdm/forms.py | 7aa53ae26b0622c2703c47135a545d469bfa1415 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-commercial-license"
] | permissive | neocode12/zentral | 7b05aeeb823a5a3d7d268cc2b01e0bf1a5e4be71 | 9ecc8d8334148627fcccaa875f100adacd7a018b | refs/heads/main | 2023-04-09T12:06:45.355559 | 2023-03-15T14:05:05 | 2023-03-15T14:05:05 | 327,651,549 | 0 | 0 | Apache-2.0 | 2021-01-07T15:30:00 | 2021-01-07T15:30:00 | null | UTF-8 | Python | false | false | 29,637 | py | import base64
import hashlib
import json
import logging
import plistlib
from dateutil import parser
from django import forms
from django.db import transaction
from django.db.models import Q
from realms.utils import build_password_hash_dict
from .app_manifest import build_enterprise_app_manifest
from .apps_books import AppsBooksClient
from .crypto import load_push_certificate_and_key
from .declarations import update_blueprint_declaration_items
from .dep import decrypt_dep_token
from .dep_client import DEPClient
from .models import (Artifact, ArtifactType, ArtifactVersion, BlueprintArtifact, Channel,
DEPDevice, DEPOrganization, DEPEnrollment, DEPToken, DEPVirtualServer,
EnrolledDevice, EnterpriseApp, Platform,
SCEPConfig,
OTAEnrollment, UserEnrollment, PushCertificate,
Profile, Location, LocationAsset, StoreApp)
logger = logging.getLogger("zentral.contrib.mdm.forms")
class OTAEnrollmentForm(forms.ModelForm):
class Meta:
model = OTAEnrollment
fields = ("name", "realm", "push_certificate",
"scep_config", "scep_verification",
"blueprint")
class UserEnrollmentForm(forms.ModelForm):
class Meta:
model = UserEnrollment
fields = ("name", "realm", "push_certificate",
"scep_config", "scep_verification",
"blueprint")
class UserEnrollmentEnrollForm(forms.Form):
managed_apple_id = forms.EmailField(label="Email", required=True)
class PushCertificateForm(forms.ModelForm):
certificate_file = forms.FileField(required=True)
key_file = forms.FileField(required=True)
key_password = forms.CharField(widget=forms.PasswordInput, required=False)
class Meta:
model = PushCertificate
fields = ("name",)
def clean(self):
cleaned_data = super().clean()
certificate_file = cleaned_data.pop("certificate_file", None)
key_file = cleaned_data.pop("key_file", None)
key_password = cleaned_data.pop("key_password", None)
if certificate_file and key_file:
try:
push_certificate_d = load_push_certificate_and_key(
certificate_file.read(),
key_file.read(), key_password
)
except ValueError as e:
raise forms.ValidationError(str(e))
except Exception:
raise forms.ValidationError("Could not load certificate or key file")
if self.instance.topic:
if push_certificate_d["topic"] != self.instance.topic:
raise forms.ValidationError("The new certificate has a different topic")
else:
if PushCertificate.objects.filter(topic=push_certificate_d["topic"]):
raise forms.ValidationError("A difference certificate with the same topic already exists")
cleaned_data["push_certificate_d"] = push_certificate_d
return cleaned_data
def save(self):
push_certificate_d = self.cleaned_data.pop("push_certificate_d")
self.instance.name = self.cleaned_data["name"]
for k, v in push_certificate_d.items():
if k == "private_key":
self.instance.set_private_key(v)
else:
setattr(self.instance, k, v)
self.instance.save()
return self.instance
class EnrolledDeviceSearchForm(forms.Form):
q = forms.CharField(required=False,
widget=forms.TextInput(attrs={"placeholder": "Serial number, UDID",
"autofocus": True}))
def get_queryset(self):
qs = EnrolledDevice.objects.all().order_by("-updated_at")
q = self.cleaned_data.get("q")
if q:
qs = qs.filter(Q(serial_number__icontains=q) | Q(udid__icontains=q))
return qs
class EncryptedDEPTokenForm(forms.ModelForm):
encrypted_token = forms.FileField(label="Server token", required=False)
class Meta:
model = DEPToken
fields = []
def clean(self):
encrypted_token = self.cleaned_data["encrypted_token"]
if encrypted_token:
payload = encrypted_token.read()
try:
data = decrypt_dep_token(self.instance, payload)
kwargs = {k: data.get(k) for k in ("consumer_key", "consumer_secret",
"access_token", "access_secret")}
account_d = DEPClient(**kwargs).get_account()
except Exception:
self.add_error("encrypted_token", "Could not read or use encrypted token")
else:
self.cleaned_data["decrypted_dep_token"] = data
self.cleaned_data["account"] = account_d
else:
self.add_error("encrypted_token", "This field is mandatory")
return self.cleaned_data
def save(self):
# token
dep_token = super().save()
for k, v in self.cleaned_data["decrypted_dep_token"].items():
if k == "access_secret":
dep_token.set_access_secret(v)
elif k == "consumer_secret":
dep_token.set_consumer_secret(v)
else:
setattr(dep_token, k, v)
dep_token.save()
account_d = self.cleaned_data["account"]
# organization
organization, _ = DEPOrganization.objects.update_or_create(
identifier=account_d.pop("org_id"),
defaults={"name": account_d.pop("org_name"),
"admin_id": account_d.pop("admin_id"),
"email": account_d.pop("org_email"),
"phone": account_d.pop("org_phone"),
"address": account_d.pop("org_address"),
"type": account_d.pop("org_type"),
"version": account_d.pop("org_version")}
)
# virtual server
account_d = self.cleaned_data["account"]
server_uuid = account_d.pop("server_uuid")
defaults = {"name": account_d["server_name"],
"organization": organization,
"token": dep_token}
try:
virtual_server = DEPVirtualServer.objects.get(uuid=server_uuid)
except DEPVirtualServer.DoesNotExist:
DEPVirtualServer.objects.create(uuid=server_uuid, **defaults)
else:
# we do not use update_or_create to be able to remove the old dep token
old_token = virtual_server.token
for attr, val in defaults.items():
setattr(virtual_server, attr, val)
virtual_server.save()
if old_token and old_token != dep_token:
old_token.delete()
return dep_token
class CreateDEPEnrollmentForm(forms.ModelForm):
admin_password = forms.CharField(required=False, widget=forms.PasswordInput)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
field_order = [
"push_certificate", "scep_config", "scep_verification",
"blueprint",
"virtual_server", "name",
"allow_pairing", "is_supervised", "is_mandatory", "is_mdm_removable", "is_multi_user",
"await_device_configured", "auto_advance_setup", "include_tls_certificates",
"support_phone_number", "support_email_address",
"org_magic", "department", "language", "region"
]
for pane, initial in self.Meta.model.SKIPPABLE_SETUP_PANES:
if self.instance.pk:
initial = pane in self.instance.skip_setup_items
self.fields[pane] = forms.BooleanField(label="Skip {} pane".format(pane), initial=initial, required=False)
field_order.append(pane)
field_order.extend(["realm", "use_realm_user", "realm_user_is_admin",
"admin_full_name", "admin_short_name", "admin_password"])
self.order_fields(field_order)
self.fields["language"].choices = sorted(self.fields["language"].choices, key=lambda t: (t[1], t[0]))
self.fields["region"].choices = sorted(self.fields["region"].choices, key=lambda t: (t[1], t[0]))
class Meta:
model = DEPEnrollment
fields = "__all__"
def clean_is_mdm_removable(self):
is_mdm_removable = self.cleaned_data.get("is_mdm_removable")
is_supervised = self.cleaned_data.get("is_supervised")
if not is_mdm_removable and not is_supervised:
raise forms.ValidationError("Can only be set to False if 'Is supervised' is set to True")
return is_mdm_removable
def clean_use_realm_user(self):
realm = self.cleaned_data.get("realm")
use_realm_user = self.cleaned_data.get("use_realm_user")
if use_realm_user and not realm:
raise forms.ValidationError("This option is only valid if a 'realm' is selected")
return use_realm_user
def clean_realm_user_is_admin(self):
use_realm_user = self.cleaned_data.get("use_realm_user")
realm_user_is_admin = self.cleaned_data.get("realm_user_is_admin")
if realm_user_is_admin and not use_realm_user:
raise forms.ValidationError("This option is only valid if the 'use realm user' option is ticked too")
return realm_user_is_admin
def clean_admin_password(self):
password = self.cleaned_data.get("admin_password")
if password:
self.cleaned_data["admin_password_hash"] = build_password_hash_dict(password)
def admin_info_incomplete(self):
return len([attr for attr in (
self.cleaned_data.get(i)
for i in ("admin_full_name", "admin_short_name", "admin_password_hash")
) if attr]) in (1, 2)
def clean(self):
super().clean()
skip_setup_items = []
for pane, initial in self.Meta.model.SKIPPABLE_SETUP_PANES:
if self.cleaned_data.get(pane, False):
skip_setup_items.append(pane)
if self.admin_info_incomplete():
raise forms.ValidationError("Admin information incomplete")
self.cleaned_data['skip_setup_items'] = skip_setup_items
def save(self, *args, **kwargs):
commit = kwargs.pop("commit", True)
kwargs["commit"] = False
dep_profile = super().save(**kwargs)
dep_profile.skip_setup_items = self.cleaned_data["skip_setup_items"]
dep_profile.admin_password_hash = self.cleaned_data.get("admin_password_hash")
if commit:
dep_profile.save()
return dep_profile
class UpdateDEPEnrollmentForm(CreateDEPEnrollmentForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["language"].choices = sorted(self.fields["language"].choices, key=lambda t: (t[1], t[0]))
self.fields["region"].choices = sorted(self.fields["region"].choices, key=lambda t: (t[1], t[0]))
class Meta:
model = DEPEnrollment
exclude = ("virtual_server",)
def clean_admin_password(self):
password = self.cleaned_data.get("admin_password")
if password:
self.cleaned_data["admin_password_hash"] = build_password_hash_dict(password)
else:
self.cleaned_data["admin_password_hash"] = self.instance.admin_password_hash
class AssignDEPDeviceEnrollmentForm(forms.ModelForm):
class Meta:
model = DEPDevice
fields = ("enrollment",)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.instance.pk:
profile_f = self.fields["enrollment"]
profile_f.queryset = profile_f.queryset.filter(virtual_server=self.instance.virtual_server)
class UploadEnterpriseAppForm(forms.Form):
package = forms.FileField(required=True,
help_text="macOS distribution package (.pkg)")
def clean(self):
package = self.cleaned_data.get("package")
if package:
try:
title, product_id, product_version, manifest, bundles = build_enterprise_app_manifest(package)
except Exception as e:
raise forms.ValidationError(f"Invalid package: {e}")
if title is None:
title = product_id
name = f"{title} - {product_version}"
if (
Artifact.objects.exclude(
artifactversion__enterprise_app__product_id=product_id,
artifactversion__enterprise_app__product_version=product_version
).filter(name=name).count()
):
raise forms.ValidationError(
"An artifact with the same name but a different product already exists"
)
self.cleaned_data["name"] = name
self.cleaned_data["filename"] = package.name
self.cleaned_data["product_id"] = product_id
self.cleaned_data["product_version"] = product_version
self.cleaned_data["manifest"] = manifest
self.cleaned_data["bundles"] = bundles
return self.cleaned_data
def save(self):
cleaned_data = self.cleaned_data
product_id = cleaned_data["product_id"]
product_version = cleaned_data["product_version"]
name = cleaned_data.pop("name")
operation = None
enterprise_apps = (EnterpriseApp.objects.select_for_update()
.filter(product_id=product_id,
product_version=product_version)
.select_related("artifact_version__artifact"))
enterprise_app = enterprise_apps.order_by("-artifact_version__version").first()
if enterprise_app is None:
operation = "created"
artifact = Artifact.objects.create(name=name,
type=ArtifactType.EnterpriseApp.name,
channel=Channel.Device.name,
platforms=[Platform.macOS.name])
artifact_version = ArtifactVersion.objects.create(artifact=artifact, version=1)
EnterpriseApp.objects.create(artifact_version=artifact_version, **cleaned_data)
else:
artifact = enterprise_app.artifact_version.artifact
if enterprise_app.manifest != cleaned_data["manifest"]:
operation = "updated"
artifact_version = ArtifactVersion.objects.create(artifact=artifact,
version=enterprise_app.artifact_version.version + 1)
EnterpriseApp.objects.create(artifact_version=artifact_version, **cleaned_data)
artifact.name = name
artifact.trashed_at = None
artifact.save()
return artifact, operation
class UploadProfileForm(forms.Form):
source_file = forms.FileField(required=True,
help_text="configuration profile file (.mobileconfig)")
def clean(self):
source_file = self.cleaned_data.get("source_file")
if source_file:
try:
payload = plistlib.load(source_file)
except Exception:
self.add_error("source_file", "This file is not a plist.")
return self.cleaned_data
# identifier
try:
self.cleaned_data["payload_identifier"] = payload_identifier = payload["PayloadIdentifier"]
except KeyError:
self.add_error("source_file", "Missing PayloadIdentifier.")
return self.cleaned_data
# existing profile
self.cleaned_data["profile"] = profile = (
Profile.objects.select_for_update()
.filter(payload_identifier=payload_identifier)
.select_related("artifact_version__artifact")
.first()
)
# channel
payload_scope = payload.get("PayloadScope", "User")
if payload_scope == "System":
channel = Channel.Device.name
elif payload_scope == "User":
channel = Channel.User.name
else:
self.add_error("source_file", f"Unknown PayloadScope: {payload_scope}.")
return self.cleaned_data
if profile and channel != profile.artifact_version.artifact.channel:
self.add_error(
"source_file",
"Existing profile with same payload identifier has a different channel."
)
return self.cleaned_data
self.cleaned_data["channel"] = channel
# uuid
try:
self.cleaned_data["payload_uuid"] = payload["PayloadUUID"]
except KeyError:
self.add_error("source_file", "Missing PayloadUUID.")
return self.cleaned_data
# other keys
for payload_key, obj_key in (("PayloadDisplayName", "payload_display_name"),
("PayloadDescription", "payload_description")):
self.cleaned_data[obj_key] = payload.get(payload_key) or ""
# name check
name = self.cleaned_data.get("payload_display_name") or self.cleaned_data["payload_uuid"]
if (
Artifact.objects.exclude(
artifactversion__profile__payload_identifier=self.cleaned_data["payload_identifier"]
).filter(name=name).count()
):
self.add_error(
"source_file",
"An artifact with the same name but a different payload identifier already exists."
)
return self.cleaned_data
self.cleaned_data["name"] = name
# source
source_file = self.cleaned_data.pop("source_file")
source_file.seek(0)
self.cleaned_data["source"] = source_file.read()
self.cleaned_data["filename"] = source_file.name
return self.cleaned_data
def save(self):
cleaned_data = self.cleaned_data
name = cleaned_data.pop("name")
channel = cleaned_data.pop("channel")
profile = cleaned_data.pop("profile")
operation = None
if profile is None:
operation = "created"
artifact = Artifact.objects.create(name=name,
type=ArtifactType.Profile.name,
channel=channel,
platforms=Platform.all_values())
artifact_version = ArtifactVersion.objects.create(artifact=artifact, version=1)
Profile.objects.create(artifact_version=artifact_version, **cleaned_data)
else:
artifact = profile.artifact_version.artifact
if profile.source.tobytes() != cleaned_data["source"]:
operation = "updated"
artifact_version = ArtifactVersion.objects.create(artifact=artifact,
version=profile.artifact_version.version + 1)
Profile.objects.create(artifact_version=artifact_version, **cleaned_data)
artifact.name = name
artifact.channel = channel
artifact.trashed_at = None
artifact.save()
elif artifact.trashed_at:
artifact.trashed_at = None
artifact.save()
for blueprint_artifact in artifact.blueprintartifact_set.select_related("blueprint").all():
update_blueprint_declaration_items(blueprint_artifact.blueprint, commit=True)
return artifact, operation
class PlatformsWidget(forms.CheckboxSelectMultiple):
def __init__(self, attrs=None, choices=()):
super().__init__(attrs, choices=Platform.choices())
def format_value(self, value):
if isinstance(value, str) and value:
value = [v.strip() for v in value.split(",")]
return super().format_value(value)
class UpdateArtifactForm(forms.ModelForm):
class Meta:
model = Artifact
fields = ("platforms",)
widgets = {"platforms": PlatformsWidget}
class BlueprintArtifactForm(forms.ModelForm):
class Meta:
model = BlueprintArtifact
fields = ("blueprint", "priority", "install_before_setup_assistant", "auto_update")
def __init__(self, *args, **kwargs):
self.artifact = kwargs.pop("artifact")
super().__init__(*args, **kwargs)
current_blueprint_pk = None
if self.instance.pk:
current_blueprint_pk = self.instance.blueprint.pk
exluded_bpa_pk = [
bpa.blueprint.pk for bpa in self.artifact.blueprintartifact_set.all()
if not bpa.blueprint.pk == current_blueprint_pk
]
bpqs = self.fields["blueprint"].queryset
bpqs = bpqs.exclude(pk__in=exluded_bpa_pk)
self.fields["blueprint"].queryset = bpqs
def save(self, *args, **kwargs):
self.instance.artifact = self.artifact
return super().save(*args, **kwargs)
class SCEPConfigForm(forms.ModelForm):
class Meta:
model = SCEPConfig
fields = "__all__"
class LocationForm(forms.ModelForm):
server_token_file = forms.FileField(
required=True,
help_text="Server token (*.vpptoken), downloaded from the Apple business manager."
)
class Meta:
model = Location
fields = []
def clean(self):
server_token_file = self.cleaned_data["server_token_file"]
if not server_token_file:
return
raw_server_token = server_token_file.read()
server_token = raw_server_token.decode("utf-8")
# base64 + json test
try:
server_token_json = json.loads(base64.b64decode(raw_server_token))
except ValueError:
self.add_error("server_token_file", "Not a valid server token")
return
# token hash
server_token_hash = hashlib.sha1(raw_server_token).hexdigest()
test_qs = Location.objects.filter(server_token_hash=server_token_hash)
if self.instance.pk:
test_qs = test_qs.exclude(pk=self.instance.pk)
if test_qs.count():
self.add_error("server_token_file", "A location with the same server token already exists.")
return
self.cleaned_data["server_token_hash"] = server_token_hash
try:
self.cleaned_data["organization_name"] = server_token_json["orgName"]
except Exception:
self.add_error("server_token_file", "Could not get organization name.")
return
ab_client = AppsBooksClient(server_token)
try:
config = ab_client.get_client_config()
except Exception:
msg = "Could not get client information"
logger.exception(msg)
self.add_error("server_token_file", msg)
return
for config_attr, model_attr in (("countryISO2ACode", "country_code"),
("uId", "library_uid"),
("locationName", "name"),
("defaultPlatform", "platform"),
("websiteURL", "website_url")):
val = config.get(config_attr)
if not isinstance(val, str):
self.add_error("server_token_file", f"Missing or bad {config_attr}.")
else:
self.cleaned_data[model_attr] = val
try:
self.cleaned_data["server_token_expiration_date"] = parser.parse(config["tokenExpirationDate"])
except KeyError:
self.add_error("server_token_file", "Missing tokenExpirationDate.")
return
except Exception:
msg = "Could not parse server token expiration date."
logger.exception(msg)
self.add_error("server_token_file", msg)
return
self.cleaned_data["server_token"] = server_token
return self.cleaned_data
def save(self):
location = super().save(commit=False)
for attr in ("server_token_hash",
"server_token_expiration_date",
"organization_name",
"country_code",
"library_uid",
"name",
"platform",
"website_url"):
setattr(location, attr, self.cleaned_data[attr])
notification_auth_token = location.set_notification_auth_token()
location.save()
location.set_server_token(self.cleaned_data["server_token"])
location.save()
def update_client_config():
ab_client = AppsBooksClient.from_location(location)
ab_client.update_client_config(notification_auth_token)
# TODO: retry
transaction.on_commit(update_client_config)
return location
class StoreAppForm(forms.ModelForm):
configuration = forms.CharField(
required=False,
widget=forms.Textarea(attrs={"rows": 10}),
help_text="The property list representation of the managed app configuration."
)
class Meta:
model = StoreApp
fields = (
"associated_domains", "associated_domains_enable_direct_downloads",
"removable",
"vpn_uuid",
"content_filter_uuid",
"dns_proxy_uuid",
"remove_on_unenroll",
"prevent_backup"
)
widgets = {
"vpn_uuid": forms.TextInput,
"content_filter_uuid": forms.TextInput,
"dns_proxy_uuid": forms.TextInput
}
def clean_configuration(self):
configuration = self.cleaned_data.pop("configuration")
if configuration:
if configuration.startswith("<dict>"):
# to make it easier for the users
configuration = f'<plist version="1.0">{configuration}</plist>'
try:
loaded_configuration = plistlib.loads(configuration.encode("utf-8"))
except Exception:
raise forms.ValidationError("Invalid property list")
if not isinstance(loaded_configuration, dict):
raise forms.ValidationError("Not a dictionary")
configuration = plistlib.dumps(loaded_configuration)
return configuration
def clean(self):
super().clean()
# update configuration
configuration = self.cleaned_data.get("configuration")
if configuration:
self.instance.configuration = configuration
else:
self.instance.configuration = None
class LocationAssetChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return str(obj.location)
class CreateAssetArtifactForm(StoreAppForm):
location_asset = LocationAssetChoiceField(label="Location", queryset=LocationAsset.objects.none(), required=True)
name = forms.CharField(required=True)
field_order = (
"location_asset",
"name",
"removable",
"remove_on_unenroll",
"prevent_backup",
"configuration",
"associated_domains",
"associated_domains_enable_direct_downloads",
"vpn_uuid",
"content_filter_uuid",
"dns_proxy_uuid"
"configuration",
)
class Meta(StoreAppForm.Meta):
fields = (
"location_asset",
"associated_domains", "associated_domains_enable_direct_downloads",
"removable",
"vpn_uuid",
"content_filter_uuid",
"dns_proxy_uuid",
"remove_on_unenroll",
"prevent_backup"
)
def clean_name(self):
name = self.cleaned_data.get("name")
if Artifact.objects.filter(name=name).count():
raise forms.ValidationError("An artifact with this name already exists")
return name
def __init__(self, *args, **kwargs):
self.asset = kwargs.pop("asset")
super().__init__(*args, **kwargs)
# location qs
self.fields["location_asset"].queryset = self.asset.locationasset_set.all()
# default name
name = self.asset.name
for i in range(1, 11):
if Artifact.objects.filter(name=name).count() == 0:
break
name = f"{self.asset.name} ({i})"
else:
logger.error("Could not find unique name for asset %s", self.asset.pk)
name = self.asset.name
self.fields["name"].initial = name
def save(self):
artifact = Artifact.objects.create(
name=self.cleaned_data["name"],
type=ArtifactType.StoreApp.name,
channel=Channel.Device.name,
platforms=self.asset.supported_platforms,
)
artifact_version = ArtifactVersion.objects.create(
artifact=artifact,
)
store_app = super().save(commit=False)
store_app.artifact_version = artifact_version
store_app.save()
return store_app
| [
"[email protected]"
] | |
9fbef7fb8b455016bf7a16b47c1eb1263256544f | 011157c49983db38489f26f51db7fe22f8519afc | /problems/977.py | 035f621ca2688c4bba3cd595ee1a4ef4aab09562 | [] | no_license | chasecolford/Leetcode | c0054774d99e7294419039f580c1590495f950b3 | dded74e0c6e7a6c8c8df58bed3640864d0ae3b91 | refs/heads/master | 2023-08-04T11:33:18.003570 | 2021-09-10T21:06:55 | 2021-09-10T21:06:55 | 283,154,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | """
Given an array of integers A sorted in non-decreasing order, return an array of the squares of each number, also in sorted non-decreasing order.
Example 1:
Input: [-4,-1,0,3,10]
Output: [0,1,9,16,100]
Example 2:
Input: [-7,-3,2,3,11]
Output: [4,9,9,49,121]
Note:
1 <= A.length <= 10000
-10000 <= A[i] <= 10000
A is sorted in non-decreasing order.
"""
class Solution(object):
def sortedSquares(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
temp = []
for i in A:
temp.append(i**2)
return sorted(temp) | [
"[email protected]"
] | |
67e7e62ac85175645e06c74f6e4061d903339161 | 26d065c72e4f8dccaf47c67cedd77228fcf12127 | /gps/gps.py | 6d7e924fd0b0824081c62d1d360f805312e1a548 | [] | no_license | lucaskotres/Expert-Systems | a22ebbfa8aec71b15d3a34c47054a56adb7915dd | 2ef749879a7b44554eee5a07d3dc4f2af3863ae1 | refs/heads/master | 2022-12-01T12:59:58.695572 | 2020-08-24T01:07:41 | 2020-08-24T01:07:41 | 289,792,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,592 | py |
def gps(initial_states, goal_states, operators):
prefix = 'Executando '
for operator in operators:
operator['add'].append(prefix + operator['action'])
final_states = achieve_all(initial_states, operators, goal_states, [])
if not final_states:
return None
return [state for state in final_states if state.startswith(prefix)]
def achieve_all(states, ops, goals, goal_stack):
for goal in goals:
states = achieve(states, ops, goal, goal_stack)
if not states:
return None
for goal in goals:
if goal not in states:
return None
return states
def achieve(states, operators, goal, goal_stack):
debug(len(goal_stack), 'Achieving: %s' % goal)
if goal in states:
return states
if goal in goal_stack:
return None
for op in operators:
if goal not in op['add']:
continue
result = apply_operator(op, states, operators, goal, goal_stack)
if result:
return result
def apply_operator(operator, states, ops, goal, goal_stack):
debug(len(goal_stack), 'Consider: %s' % operator['action'])
result = achieve_all(states, ops, operator['preconds'], [goal] + goal_stack)
if not result:
return None
debug(len(goal_stack), 'Action: %s' % operator['action'])
add_list, delete_list = operator['add'], operator['delete']
return [state for state in result if state not in delete_list] + add_list
import logging
def debug(level, msg):
logging.debug(' %s %s' % (level * ' ', msg)) | [
"[email protected]"
] | |
620786b8f15687edf3669042c8154777773f288f | 9bdc6b5b88ed3d9bf631210e5232afce10d9f51b | /maze.py | 3b1b56f91b25c5480689927cb10df60f53d1f746 | [] | no_license | ghe/maze | 7b65a40381cff86e2a8d108b3f80caaad5a0ae3f | 2e52b69e328059962f4777c2a5f12b3dfa9ae0a8 | refs/heads/master | 2016-09-05T15:42:31.528763 | 2014-10-29T03:14:10 | 2014-10-29T03:14:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,829 | py | import random
class Room(object):
directions = ["North", "South", "East", "West", "Up", "Down"]
next_id = 65
def __init__(self):
self._hallways = {}
self._name = chr(Room.next_id)
Room.next_id += 1
def __str__(self):
s = "%s: " % (self.name,)
for d in self._hallways:
s += "%s -> %s, " % (d, self.adjacent(d).name)
return s
@property
def name(self):
return self._name
def connect(self, direction, room):
if direction in Room.directions:
if not direction in self._hallways:
self._hallways[direction] = room
return True
return False
def connected_dirs(self):
return list(set(Room.directions).intersection(self._hallways))
def free_dirs(self):
return list(set(Room.directions).difference(self._hallways))
def can_connect(self):
return len(self.free_dirs) > 0
def rand_connect(self, room):
free = self.free_dirs()
if len(free) > 0:
freedir = random.choice(free)
return self.connect(freedir, room)
return False
def adjacent(self, direction):
if direction in Room.directions:
if direction in self._hallways:
return self._hallways[direction]
return None
rooms = [Room() for _ in range(4)]
def bidir_connect(room_a, room_b):
if room_a.can_connect:
if room_b.rand_connect(room_a):
return room_a.rand_connect(room_b)
return False
#connect all rooms in a chain
last_room = None
for room in rooms:
if last_room:
bidir_connect(last_room, room)
last_room = room
#add some more connections
for room in rooms:
bidir_connect(room, random.choice(rooms))
for room in rooms:
print(room)
| [
"[email protected]"
] | |
33e8f7f92f2887a69a0c5b50443e0f1c5b971d3a | 83c5f79fb96c02d1b10c5b55cd843a3941dbc3ab | /oldPythonScripts/reading_in_files_fuzz_attempt2.py | 06038869bffbc653a9d02ba38a8017ba97cb26c7 | [] | no_license | mnfienen/fuzztest | be9bcaea79298dffd469770f3c3373c7328c7944 | 7067cfffed2a175bfaeb29bfe400021037e35e5a | refs/heads/master | 2016-09-01T18:54:46.539895 | 2012-11-01T16:29:22 | 2012-11-01T16:29:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,035 | py | import numpy as np
import csv
import shapefile as shp
import os
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
def parse_fields(allfields):
# make an empty list
fields_list = []
# loop over the input fields list and pull out the names
for cf in allfields:
fields_list.append(cf[0].lower())
# give back a list of just the names
return np.array(fields_list)
#reader_input = csv.reader(open('glfwc_export.txt','rb'),delimiter = ',',quotechar='"')
# make a path that doesn't care if it's on windoze or a superior operating system like linux mac or anyone else
inpath = os.path.join('glfwc_experimental_shapefile','GLIFWC_points')
inpath2 = os.path.join('Fish_experimental_test_sites','wdnr_sites')
# make a shapefile object from the shapefile root name
inshape = shp.Reader(inpath)
inshape2 = shp.Reader(inpath2)
# we can pull out various object properties
allfields = inshape.fields
allrecords = inshape.records()
allfields2 = inshape2.fields
allrecords2 = inshape2.records()
# yank out the FIELD NAMES ONLY from the crufty fields list
# call parse_fields function
justfields = parse_fields(allfields)
#print justfields
# print to test the squeezingly wheritude
np.set_printoptions(precision=2, suppress=True)
#print allrecords[0][np.squeeze(np.where(justfields=='site_descr'.lower()))-1]
# pull all the Comments
# first make an empty list for results
comments_only = []
for cr in allrecords:
comments_only.append(cr[np.squeeze(np.where(justfields=='comments'.lower()))-1])
# use fuzzy ratio to compare nested string lists
def fuzz_it (fieldNames1, fieldNames2):
for value in fieldNames1:
for subvalue in fieldNames1:
print subvalue[0], type(subvalue[0])
fuzz.ratio(subvalue[0], subvalue[0])
#for value in fieldNames1, fieldNames2:
#for subvalue in fieldNames1, fieldNames2:
#print subvalue[0][1][2]
#print value, type(value)
def first_file_fields(allrecords):
# pull all site descriptions
site_description_only = []
for cr in allrecords:
site_description_only.append(cr[np.squeeze(np.where(justfields=='site_descr'.lower()))-1])
# pull all site names
site_name_only = []
for cr in allrecords:
site_name_only.append(cr[np.squeeze(np.where(justfields=='site_name'.lower()))-1])
# pull all waterbodies
waterbody_only = []
for cr in allrecords:
waterbody_only.append(cr[np.squeeze(np.where(justfields=='waterbody_'.lower()))-1])
# nest up the field lists into a nice list of lists
all_sites = zip(site_description_only, site_name_only, waterbody_only)
for i in all_sites:
print i
#print len(all_sites),"ALL relevant fields"
#print all_sites, type(all_sites)
global fieldNames1
fieldNames1 = all_sites
def second_file_fields(allrecords2):
# pull all site descriptions
site_description_only = []
for cr in allrecords2:
site_description_only.append(cr[np.squeeze(np.where(justfields=='site_descr'.lower()))-1])
# pull all site names
site_name_only = []
for cr in allrecords2:
site_name_only.append(cr[np.squeeze(np.where(justfields=='site_name'.lower()))-1])
# pull all waterbodies
waterbody_only = []
for cr in allrecords2:
waterbody_only.append(cr[np.squeeze(np.where(justfields=='waterbody_'.lower()))-1])
# nest up the field lists into a nice list of lists
all_sites = zip(site_description_only, site_name_only, waterbody_only)
global fieldNames2
fieldNames2 = all_sites
fuzz_it(fieldNames1, fieldNames2)
first_file_fields(allrecords)
second_file_fields(allrecords2)
| [
"[email protected]"
] | |
73010681591a7f85d82ed79e2c13ddcc3221d9e9 | 7543bd745b3637138cb5d14ee1fe7d9791f88385 | /week 6/problem set 6: readability/readability.py | 6cba57027c7a1d45a11c2ca971d60a52424bff48 | [] | no_license | ModruKinstealer/CS50ProblemSets | 43a72e75575c6b32805a4556adbd1ef3b4837627 | 888b0d186f657eb6308ce14b2630864f6b4cdb31 | refs/heads/master | 2022-10-11T13:20:44.222634 | 2022-09-20T00:25:48 | 2022-09-20T00:25:48 | 171,741,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | # TODO
from cs50 import get_string
from sys import exit
text = get_string("Text: ")
letters = 0
words = len(text.split())
sentences = text.count("?") + text.count(".") + text.count("!")
for letter in text:
if letter.isalpha():
letters += 1
# Coleman-Liau index is computed as 0.0588 * L - 0.296 * S - 15.8
# L is the average number of letters per 100 words in the text
# S is the average number of sentences per 100 words in the text.
L = (letters / words) * 100
S = (sentences / words) * 100
index = 0.0588 * L - 0.296 * S - 15.8
if index < 1:
print("Before Grade 1")
elif index > 16:
print("Grade 16+")
else:
print(f"Grade {round(index)}")
| [
"[email protected]"
] | |
060d93de1b98a3827caea4b362264c3f865e5bf1 | bcf39edb9d991527ea79df8a6d52cc1253c4178b | /RandomForest/load_salaryprediction.py | 4ec7f11459d0dddee57c364fd835b02d979e2e55 | [] | no_license | dmt3182/DMT_MASTER_REPO | 73c8ff184d182b2ae0ac3c0e40e93a51dd9ca567 | c5484c927e0b26f622ee73ee897524ed16901055 | refs/heads/main | 2023-04-29T07:32:59.686018 | 2021-05-16T11:11:02 | 2021-05-16T11:11:02 | 355,144,838 | 0 | 0 | null | 2021-05-01T16:17:57 | 2021-04-06T10:16:53 | Python | UTF-8 | Python | false | false | 1,372 | py | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import export_graphviz
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import joblib
data = pd.read_csv(r'E:\1.DEEPAK Data Science\GITREPO\DMT_MASTER_REPO\RandomForest\Salary.csv')
X = data['YearsExperience'].values.reshape(-1,1)
Y = data['Salary']
X_train, X_test, Y_train,Y_test = train_test_split( X,Y,test_size = 0.2,random_state = 42 )
joblib_model = joblib.load('savemodel.pkl')
# # Predict
y_pred = joblib_model.predict(X_test)
print (y_pred)
s = joblib_model.score(Y_test.values.reshape(-1,1),y_pred.reshape(-1,1))
print ('Score :',s)
df=pd.DataFrame({'Actual':Y_test, 'Predicted':y_pred})
print (df)
print('MAE :' , mean_absolute_error(Y_test,y_pred))
print('MSE :' , mean_squared_error(Y_test,y_pred))
print('R2 :' , r2_score(Y_test,y_pred))
# Feature importance
importance = joblib_model.feature_importances_
# summarize feature importance
for i,v in enumerate(importance):
print('Feature: %0d, Score: %.5f' % (i,v))
# # plot feature importance
# plt.bar([x for x in range(len(importance))], importance)
# plt.show()
| [
"[email protected]"
] | |
f390bf894bb179ecd6b1f1424fcc5d1d824ce593 | a5be28057bb55e0ebc4a25fce90143ad50c77f5c | /Data processing, runs generator and utility file/FunctionApproximator.py | af38ac61c7d9148c02e260052c7c0c2071fec486 | [] | no_license | vijetadeshpande/meta-environment | fa76d1c9dbb220c376a2a8f7b3b8b50c0b9a0b41 | 87eb12826a11e57e3ef09baf616959dae1c38fc8 | refs/heads/master | 2023-02-04T14:59:03.826016 | 2020-12-14T19:08:33 | 2020-12-14T19:08:33 | 263,530,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,626 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 24 02:45:04 2020
@author: vijetadeshpande
"""
import torch
import torch.nn as nn
import sys
sys.path.insert(1, r'/Users/vijetadeshpande/Documents/GitHub/meta-environment/RNN GRU')
sys.path.insert(1, r'/Users/vijetadeshpande/Documents/GitHub/meta-environment/Transformer')
sys.path.insert(1, r'/Users/vijetadeshpande/Documents/GitHub/meta-environment/RNN LSTM')
sys.path.insert(1, r'/Users/vijetadeshpande/Documents/GitHub/meta-environment/RNN Vanilla')
from GRUModel import Model as GRUModel
from GRUEvaluate import evaluate as GRUEval
from TransformerEncoder import Encoder as TEnc
from TransformerDecoder import Decoder as TDec
from TransformerModel import Model as TModel
from TransformerEvaluate import evaluate as TEval
from VanillaModel import Model as VanillaModel
from VanillaEvaluate import evaluate as VanillaEval
from LSTMModel import Model as LSTMModel
from LSTMEvaluate import evaluate as LSTMEval
class GRUApproximator(nn.Module):
def __init__(self, filepath):
super().__init__()
# here we make a computational graph suited optimal RNN model
# then we load the .pt file and we are all set
# create model
INPUT_DIM, OUTPUT_DIM = 8, 3
HID_DIM, N_LAYERS, DROPOUT = 24, 2, 0
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = GRUModel(INPUT_DIM, HID_DIM, OUTPUT_DIM, N_LAYERS, DROPOUT, DEVICE)
model = model.to(DEVICE)
# load file
model.load_state_dict(torch.load(filepath))
model.eval()
# set sttribute
self.model = model
#
self.criterion = nn.MSELoss().to(DEVICE) #nn.SmoothL1Loss().to(DEVICE)
return
def forward(self, input_data, z_path, DEVICE):
# check shape
# input_tensor = [BATCH, SEQ_LEN, INPUT_DIM]
#EXAMPLES, SRC_LEN, INPUT_DIM = input_tensor[0].shape
# predict
prediction = GRUEval(self.model, input_data, self.criterion, DEVICE, z_path)
return prediction
class TransformerApproximator(nn.Module):
def __init__(self, filepath):
super().__init__()
# create model
INPUT_DIM, OUTPUT_DIM = 8, 3
N_HEADS_ENC, N_HEADS_DEC, N_LAYERS, DROPOUT = 8, 3, 1, 0.05
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# initialize encoder, decoder and seq2seq model classes
encoder = TEnc(INPUT_DIM, OUTPUT_DIM, N_HEADS_ENC, N_LAYERS, DROPOUT)
decoder = TDec(OUTPUT_DIM, N_HEADS_DEC, N_LAYERS, DROPOUT)
model = TModel(encoder, decoder)
model = model.to(DEVICE)
# load parameters
model.load_state_dict(torch.load(filepath))
model.eval()
# set attribute
self.model = model
self.criterion = nn.MSELoss().to(DEVICE)
return
def forward(self, input_data, z_path, DEVICE):
# predicr
prediction = TEval(self.model, input_data, self.criterion, DEVICE, z_path)
return prediction
class VanillaApproximator(nn.Module):
def __init__(self, filepath):
super().__init__()
# here we make a computational graph suited optimal RNN model
# then we load the .pt file and we are all set
# create model
INPUT_DIM, OUTPUT_DIM = 8, 3
HID_DIM, N_LAYERS, DROPOUT = 32, 2, 0
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = VanillaModel(INPUT_DIM, HID_DIM, OUTPUT_DIM, N_LAYERS, DROPOUT, DEVICE)
model = model.to(DEVICE)
# load file
model.load_state_dict(torch.load(filepath))
model.eval()
# set sttribute
self.model = model
#
self.criterion = nn.MSELoss().to(DEVICE) #nn.SmoothL1Loss().to(DEVICE)
return
def forward(self, input_data, z_path, DEVICE):
# check shape
# input_tensor = [BATCH, SEQ_LEN, INPUT_DIM]
#EXAMPLES, SRC_LEN, INPUT_DIM = input_tensor[0].shape
# predict
prediction = VanillaEval(self.model, input_data, self.criterion, DEVICE, z_path)
return prediction
class LSTMApproximator(nn.Module):
def __init__(self, filepath):
super().__init__()
# here we make a computational graph suited optimal RNN model
# then we load the .pt file and we are all set
# create model
INPUT_DIM, OUTPUT_DIM = 8, 3
HID_DIM, N_LAYERS, DROPOUT = 16, 2, 0
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = LSTMModel(INPUT_DIM, HID_DIM, OUTPUT_DIM, N_LAYERS, DROPOUT, DEVICE)
model = model.to(DEVICE)
# load file
model.load_state_dict(torch.load(filepath))
model.eval()
# set sttribute
self.model = model
#
self.criterion = nn.MSELoss().to(DEVICE) #nn.SmoothL1Loss().to(DEVICE)
def forward(self, input_data, z_path, DEVICE):
# check shape
# input_tensor = [BATCH, SEQ_LEN, INPUT_DIM]
#EXAMPLES, SRC_LEN, INPUT_DIM = input_tensor[0].shape
# predict
prediction = LSTMEval(self.model, input_data, self.criterion, DEVICE, z_path)
return prediction
| [
"[email protected]"
] | |
a926f0874a57cd89c4d221f631eb494992969736 | 27e27dbf548b1d2688a0501cb4b5c58ae6745ffd | /Veranstaltungen/apps.py | 66c54886abf50ff8dc86d9bd4ae70a621ee56859 | [
"MIT"
] | permissive | valuehack/scholarium.at | e751f81583a24eb687ee4a5babb466ecdc902652 | b81220ab18ae6db057132ae2cc43afc22545e5e6 | refs/heads/master | 2022-12-13T05:56:52.002885 | 2018-11-23T13:22:13 | 2018-11-23T13:22:13 | 84,197,454 | 1 | 4 | MIT | 2022-12-08T00:39:44 | 2017-03-07T12:38:23 | Python | UTF-8 | Python | false | false | 105 | py | from django.apps import AppConfig
class VeranstaltungenConfig(AppConfig):
name = 'Veranstaltungen'
| [
"[email protected]"
] | |
74f430010ddc429b42ea3e7cb62b1e5722297a7b | ded4ebb64405d1127b6aa6b89c3cc46488f99810 | /Selenium/Implicit_Wait.py | c7419162010f46653105fd97cf6026b8a73e76e7 | [] | no_license | Mushtaq-Hussain/Best_Practice_of_AutomationTest | 0bbbcf81541dfa01edabd5d8f15651180b91943b | 5513f02394cb7e3fadcdfd17da0ce3eb302042c4 | refs/heads/main | 2023-02-25T01:24:16.379602 | 2021-01-21T16:11:33 | 2021-01-21T16:11:33 | 331,680,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | from selenium import webdriver
from selenium.webdriver.common.by import By
import time
class ImplicitWait():
def test(self):
driver = webdriver.Chrome(executable_path=r"C:\Users\mushtaq.hussain\PycharmProjects\Selenium\driver\chromedriver.exe")
driver.maximize_window()
driver.get("https://learn.letskodeit.com/p/practice")
driver.implicitly_wait(10)
loginLink = driver.find_element(By.XPATH, "//div[@id='navbar']//a[@href='/sign_in']")
loginLink.click()
emailField = driver.find_element(By.ID, "user_email")
emailField.send_keys("test")
ff = ImplicitWait()
ff.test() | [
"[email protected]"
] | |
2e0f274be0cf1698b720e80ef936afa5f41ae5b9 | 5b9e88b380e671be712a733a511932314114bfaa | /switch.py | 3093e9ff40ffa4a76f8c74cda14b4335989380d4 | [] | no_license | jasoneka/code_carrots | fd32e487e340c8efc41dd1cd6747fd9006608f83 | c88d9855af7b7da5918425c162c9823d33625792 | refs/heads/master | 2021-01-10T10:36:59.885174 | 2016-01-03T22:12:55 | 2016-01-03T22:12:55 | 48,961,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py |
# This class provides the functionality we want. You only need to look at
# this if you want to know how this works. It only needs to be defined
# once, no need to muck around with its internals.
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
| [
"[email protected]"
] | |
bb5fb0d3300035b2515c8a8f07d0d2e77578c6e6 | d85cd9218f13b4d42ef014daae0498766c1b6f18 | /user_enterpoint.py | 21c1e6112f9078d325b3d1a359ea5a067968f4d9 | [] | no_license | zhongsheng-sudo/CrazyAss | 90067f8dc0f318b9aa6ab6d46c566f58a3d66cc7 | a091485d8bed2a43ad93760fed557c3925cb7e3c | refs/heads/master | 2020-08-11T13:12:39.480678 | 2019-10-12T05:17:37 | 2019-10-12T05:17:37 | 214,570,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,030 | py | #_author: hasee
#date: 2019/10/7
import getpass,os
from django.contrib.auth import authenticate
import subprocess
import hashlib,time
class UserPortal(object):
"""用户命令行端交互入口"""
def __init__(self):
self.user = None
def user_auth(self):
"""完成用户交互"""
retry_count = 0
while retry_count < 3:
username = input("Username:").strip()
if len(username) == 0:continue
password = getpass.getpass("Password:").strip()
if len(password)== 0:
print("Password cannot be null.")
continue
user = authenticate(username= username, password = password)
if user:
self.user = user
#print("welcome login...")
return
else:
print("Invalid username or password!")
retry_count += 1
else:
exit("Too many attempts.")
def interactive(self):
"""交互函数"""
self.user_auth()
if self.user:
exit_flag = False
while not exit_flag:
for index,host_group in enumerate(self.user.host_groups.all()):
print("%s. %s[%s]" %(index,host_group.name, host_group.bind_hosts.all().count()))
print("%s. Ungrouped Hosts[%s]"%(index+1, self.user.bind_hosts.select_related().count()) )
user_input = input("Choose Group:").strip()
if len(user_input) == 0:continue
if user_input.isdigit():
user_input = int(user_input)
if user_input >= 0 and user_input < self.user.host_groups.all().count() :
selected_hostgroup = self.user.host_groups.all()[user_input]
elif user_input == self.user.host_groups.all().count() :#选中了未分组的那组主机
selected_hostgroup = self.user #之所以可以这样,是因为self.user里也有一个bind_hosts,跟HostGroup.bind_hosts指向的表一样
else:
print("invalid host group")
continue
while True:
for index,bind_host in enumerate(selected_hostgroup.bind_hosts.all()):
print("%s. %s(%s user:%s)" % (index,
bind_host.host.hostname,
bind_host.host.ip_addr,
bind_host.host_user.username))
user_input2 = input("Choose Host:").strip()
if len(user_input2) == 0: continue
if user_input2.isdigit():
user_input2 = int(user_input2)
if user_input2 >= 0 and user_input2 < selected_hostgroup.bind_hosts.all().count() :
selected_bindhost = selected_hostgroup.bind_hosts.all()[user_input2]
print("logging host",selected_bindhost)
md5_str = hashlib.md5(str(time.time()).encode()).hexdigest()
login_cmd ='sshpass -p {password} /usr/local/openssh8/bin/ssh {user}@{ip_addr} -o "StrictHostKeyChecking no" -Z {md5_str}'.format(password=selected_bindhost.host_user.password,
user=selected_bindhost.host_user.username,
ip_addr=selected_bindhost.host.ip_addr,
md5_str = md5_str)
print(login_cmd)
session_tracker_script=settings.SESSION_TRACKER_SCRIPT
tracker_obj=subprocess.Popen("%s %s"%(session_tracker_script,md5_str),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
models.SessionLog.objects.create(user=self.user,bind_host=selected_bindhost,session_tag=md5_str)
ssh_instance = subprocess.run(login_cmd,shell=True)
print("------------logout---------")
print("out",tracker_obj.stdout.read())
print("err",tracker_obj.stderr.read().decode())
if user_input2 == "b":
break
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CrazyAss.settings")
import django
django.setup()
from django.conf import settings
from audit import models
portal = UserPortal()
portal.interactive()
| [
"[email protected]"
] | |
4e24f321f536b7e455988e96ee9e8a930f4d65d0 | 7d99585ae8003adec03d360d0587fe2229175514 | /dataset.py | 6a1fcc58d5be228b78c1f1dd9cd600668ed08d62 | [] | no_license | serginogues/convolutional_autoencoder | 6634a8c11cd349479bfa4838cfa3390e14417a57 | ef469d3e3698cb08536af627d839aecf0d8a54ec | refs/heads/main | 2023-09-05T08:42:01.294182 | 2021-11-16T20:05:58 | 2021-11-16T20:05:58 | 424,561,749 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,276 | py | """
Dataset class to work with CIFAR-10
torchvision.transforms.ToTensor:
Converts a PIL Image or numpy.ndarray (H x W x C) in the range [0, 255]
to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
"""
import torch
from torchvision import datasets
from torch.utils.data.dataset import random_split
from torch.utils.data import DataLoader, ConcatDataset
from torchvision import transforms
import matplotlib.pyplot as plt
import numpy as np
from config import *
def load_CIFAR10(standarize=False):
"""
Load CIFAR10 dataset
:param standarize: if True, data is normalized with mean = 0 and st deviation = 1
:return: train_loader, test_loader, valid_loader, classes
"""
print("...loading train and test datasets")
# transforms.ToTensor() already scales from PIL Images with range [0, 255] to Tensors with range [0, 1]
train_dataset = datasets.CIFAR10(root=DATA_PATH, train=True, download=True, transform=transforms.ToTensor())
test_dataset = datasets.CIFAR10(root=DATA_PATH, train=False, download=True, transform=transforms.ToTensor())
classes = train_dataset.classes
if standarize:
print("...computing mean and std for standarization")
train_transform = get_transform(train_dataset)
test_transform = get_transform(test_dataset)
print("...re-loading train and test normalised datasets")
train_dataset = datasets.CIFAR10(root=DATA_PATH, train=True, download=True, transform=train_transform)
test_dataset = datasets.CIFAR10(root=DATA_PATH, train=False, download=True, transform=test_transform)
print("...concatenating and splitting")
concat_dataset = ConcatDataset([train_dataset, test_dataset])
len_ = len(concat_dataset)
train_set, test_set, valid_set = random_split(concat_dataset, [round(len_ * TRAIN_SIZE), round(len_ * TEST_SIZE), round(len_ * VALIDATION_SIZE)])
print("")
print("Train samples:", len(train_set))
print("Test samples:", len(test_set))
print("Validation samples:", len(valid_set))
total_samp = len(train_set) + len(test_set) + len(valid_set)
print("Sample distribution: " + str(round((len(train_set) / total_samp) * 100))
+ "% train, " + str(round((len(test_set) / total_samp) * 100)) + "% test, "
+ str(round((len(valid_set) / total_samp) * 100)) + "% validation")
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, num_workers=0, shuffle=True, drop_last=True)
test_loader = DataLoader(test_set, batch_size=BATCH_SIZE, num_workers=0, shuffle=False)
valid_loader = DataLoader(valid_set, batch_size=BATCH_SIZE, num_workers=0, shuffle=False)
if standarize:
# print mean and std
print("Mean = ", next(iter(train_loader))[0].mean())
print("Std = ", next(iter(train_loader))[0].std())
return train_loader, test_loader, valid_loader, classes
def get_transform(dataset):
"""
- ToTensor: transform PIL images to Tensors.
Turns the data into a 32-bit floating-point per channel, scaling the values down from 0.0 to 1.0
- Normalize: normalize data across the 3 rgb channels
compute mean and st deviation of each RGB channel and normalize values by doing:
v'[c] = (v[c] - mean[c])/stdev[c] where c is the channel index
https://www.manning.com/books/deep-learning-with-pytorch
'Keeping the data in the same range (0-1 or -1-1) means it’s more likely that neurons have nonzero gradients
thus, faster learning. Also, normalizing each channel so that it has the same distribution will ensure that
channel information can be mixed and updated through gradient descent using the same learning rate.'
The values of mean and stdev must be computed offline.'
"""
# we can work with the whole dataset because is small (60k samples)
mean, std = get_mean_std(dataset)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((mean[0].item(), mean[1].item(), mean[2].item()),
(std[0].item(), std[1].item(), std[2].item()))
])
return transform
def get_mean_std(dataset):
"""
:param dataset: a dataset where samples are tensors (not PIL images!)
:return:
"""
all_samples = torch.stack([img_t for img_t, _ in dataset], dim=3)
# here view(3, -1) keeps the first dimension and merges the rest into 1024 elements. So 3 x 1024 vector
reshaped = all_samples.view(3, -1)
mean = reshaped.mean(dim=1)
std = reshaped.std(dim=1)
return mean, std
def plot_img_dataloader(dataloader):
Xs, Ys = iter(dataloader).next()
images = Xs.numpy()
images = images / 2 + 0.5
plt.imshow(np.transpose(images[0], (1, 2, 0)))
plt.show()
def plot_img_dataset(dataset, idx=120):
"""
print label and plot image
:param idx: sample index
:param dataset: torch.utils.data.Dataset object
"""
# show samples
img, label = dataset[idx]
print("image label:", dataset.classes[label])
# since we already used the transform, type(img) = torch.Tensor
print(img.shape)
# plot with original axis before converting PIL Image to Tensor, otherwise an Exception arises
# C × H × W to H × W × C
plt.imshow(img.permute(1, 2, 0))
plt.show() | [
"[email protected]"
] | |
5310f6e313d1735a1628ce83e8b4b91c644ccf1e | 0c7ff0ec35ba2bb38f99ef6ecb261ec33466dd52 | /Day5/e1.py | 856fb2751777b59a9b6e020c6e2d67feceed42ca | [] | no_license | TheKinshu/100-Days-Python | 15cbacc608ee349cc9733a7032e10a359bebb731 | 293ad6b3e5f5208da84efbc5b2d2d395a5a53421 | refs/heads/master | 2023-04-18T08:21:30.361800 | 2021-05-02T18:48:39 | 2021-05-02T18:48:39 | 351,582,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # 🚨 Don't change the code below 👇
student_heights = input("Input a list of student heights ").split()
for n in range(0, len(student_heights)):
student_heights[n] = int(student_heights[n])
# 🚨 Don't change the code above 👆
average = sum(student_heights)/len(student_heights)
average = int(round(average, 0))
print(average)
#Write your code below this row 👇
| [
"[email protected]"
] | |
d685fdf5a33893a73983249d78950dde67df297a | 78e09e343e91e3706120983b477ea452f83bf29e | /metadata-ingestion/src/datahub/ingestion/source/nifi.py | d4c3c0d1278e0f382e4b10de5bf960bab1be343f | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] | permissive | sandertan/datahub | 7cda1dba7d8d0a5fda6501b8f53056ffb647edba | 46810e0df934d6cbf589895d3a58b84e2715749e | refs/heads/master | 2023-03-16T15:56:18.636988 | 2023-02-14T19:32:03 | 2023-02-14T19:32:03 | 146,737,346 | 0 | 0 | null | 2018-09-06T10:07:51 | 2018-08-30T11:03:24 | null | UTF-8 | Python | false | false | 43,251 | py | import json
import logging
import ssl
import time
from dataclasses import dataclass, field
from datetime import datetime, timedelta, timezone
from enum import Enum
from typing import Callable, Dict, Iterable, List, Optional, Tuple
from urllib.parse import urljoin
import requests
from dateutil import parser
from packaging import version
from pydantic.fields import Field
from requests.adapters import HTTPAdapter
import datahub.emitter.mce_builder as builder
from datahub.configuration.common import AllowDenyPattern
from datahub.configuration.source_common import EnvBasedSourceConfigBase
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.api.decorators import (
SupportStatus,
config_class,
platform_name,
support_status,
)
from datahub.ingestion.api.source import Source, SourceReport
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.metadata.schema_classes import (
DataFlowInfoClass,
DataJobInfoClass,
DataJobInputOutputClass,
DataPlatformInstanceClass,
DatasetPropertiesClass,
)
logger = logging.getLogger(__name__)
NIFI = "nifi"
# Python requests does not support passing password for key file,
# The same can be achieved by mounting ssl context
# as described here - https://github.com/psf/requests/issues/2519
# and here - https://github.com/psf/requests/issues/1573
class SSLAdapter(HTTPAdapter):
def __init__(self, certfile, keyfile, password=None):
self.context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.context.load_cert_chain(
certfile=certfile, keyfile=keyfile, password=password
)
super().__init__()
def init_poolmanager(self, *args, **kwargs):
kwargs["ssl_context"] = self.context
return super().init_poolmanager(*args, **kwargs)
class NifiAuthType(Enum):
NO_AUTH = "NO_AUTH"
SINGLE_USER = "SINGLE_USER"
CLIENT_CERT = "CLIENT_CERT"
class NifiSourceConfig(EnvBasedSourceConfigBase):
site_url: str = Field(description="URI to connect")
auth: NifiAuthType = Field(
default=NifiAuthType.NO_AUTH,
description="Nifi authentication. must be one of : NO_AUTH, SINGLE_USER, CLIENT_CERT",
)
provenance_days: int = Field(
default=7,
description="time window to analyze provenance events for external datasets",
) # Fetch provenance events for past 1 week
process_group_pattern: AllowDenyPattern = Field(
default=AllowDenyPattern.allow_all(),
description="regex patterns for filtering process groups",
)
# Required for nifi deployments using Remote Process Groups
site_name: str = Field(
default="default",
description="Site name to identify this site with, useful when using input and output ports receiving remote connections",
)
site_url_to_site_name: Dict[str, str] = Field(
default={},
description="Lookup to find site_name for site_url, required if using remote process groups in nifi flow",
)
# Required to be set if auth is of type SINGLE_USER
username: Optional[str] = Field(
default=None, description='Nifi username, must be set for auth = "SINGLE_USER"'
)
password: Optional[str] = Field(
default=None, description='Nifi password, must be set for auth = "SINGLE_USER"'
)
# Required to be set if auth is of type CLIENT_CERT
client_cert_file: Optional[str] = Field(
default=None,
description='Path to PEM file containing the public certificates for the user/client identity, must be set for auth = "CLIENT_CERT"',
)
client_key_file: Optional[str] = Field(
default=None, description="Path to PEM file containing the client’s secret key"
)
client_key_password: Optional[str] = Field(
default=None, description="The password to decrypt the client_key_file"
)
# Required to be set if nifi server certificate is not signed by
# root CA trusted by client system, e.g. self-signed certificates
ca_file: Optional[str] = Field(
default=None,
description="Path to PEM file containing certs for the root CA(s) for the NiFi",
)
TOKEN_ENDPOINT = "/nifi-api/access/token"
ABOUT_ENDPOINT = "/nifi-api/flow/about"
CLUSTER_ENDPOINT = "/nifi-api/flow/cluster/summary"
PG_ENDPOINT = "/nifi-api/flow/process-groups/"
PROVENANCE_ENDPOINT = "/nifi-api/provenance/"
class NifiType(Enum):
PROCESSOR = "PROCESSOR"
FUNNEL = "FUNNEL"
INPUT_PORT = "INPUT_PORT"
OUTPUT_PORT = "OUTPUT_PORT"
REMOTE_INPUT_PORT = "REMOTE_INPUT_PORT"
REMOTE_OUTPUT_PORT = "REMOTE_OUTPUT_PORT"
class NifiEventType:
CREATE = "CREATE"
FETCH = "FETCH"
SEND = "SEND"
RECEIVE = "RECEIVE"
class NifiProcessorType:
ListS3 = "org.apache.nifi.processors.aws.s3.ListS3"
FetchS3Object = "org.apache.nifi.processors.aws.s3.FetchS3Object"
PutS3Object = "org.apache.nifi.processors.aws.s3.PutS3Object"
ListSFTP = "org.apache.nifi.processors.standard.ListSFTP"
FetchSFTP = "org.apache.nifi.processors.standard.FetchSFTP"
GetSFTP = "org.apache.nifi.processors.standard.GetSFTP"
PutSFTP = "org.apache.nifi.processors.standard.PutSFTP"
# To support new processor type,
# 1. add an entry in KNOWN_INGRESS_EGRESS_PROCESORS
# 2. Implement provenance event analyzer to find external dataset and
# map it in provenance_event_to_lineage_map
class NifiProcessorProvenanceEventAnalyzer:
env: str
KNOWN_INGRESS_EGRESS_PROCESORS = {
NifiProcessorType.ListS3: NifiEventType.CREATE,
NifiProcessorType.FetchS3Object: NifiEventType.FETCH,
NifiProcessorType.PutS3Object: NifiEventType.SEND,
NifiProcessorType.ListSFTP: NifiEventType.CREATE,
NifiProcessorType.FetchSFTP: NifiEventType.FETCH,
NifiProcessorType.GetSFTP: NifiEventType.RECEIVE,
NifiProcessorType.PutSFTP: NifiEventType.SEND,
}
def __init__(self) -> None:
# Map of Nifi processor type to the provenance event analyzer to find lineage
self.provenance_event_to_lineage_map: Dict[
str, Callable[[Dict], ExternalDataset]
] = {
NifiProcessorType.ListS3: self.process_s3_provenance_event,
NifiProcessorType.FetchS3Object: self.process_s3_provenance_event,
NifiProcessorType.PutS3Object: self.process_s3_provenance_event,
NifiProcessorType.ListSFTP: self.process_sftp_provenance_event,
NifiProcessorType.FetchSFTP: self.process_sftp_provenance_event,
NifiProcessorType.GetSFTP: self.process_sftp_provenance_event,
NifiProcessorType.PutSFTP: self.process_sftp_provenance_event,
}
def process_s3_provenance_event(self, event):
attributes = event.get("attributes", [])
s3_bucket = get_attribute_value(attributes, "s3.bucket")
s3_key = get_attribute_value(attributes, "s3.key")
if not s3_key:
s3_key = get_attribute_value(attributes, "filename")
s3_url = f"s3://{s3_bucket}/{s3_key}"
s3_url = s3_url[: s3_url.rindex("/")]
dataset_name = s3_url.replace("s3://", "").replace("/", ".")
platform = "s3"
dataset_urn = builder.make_dataset_urn(platform, dataset_name, self.env)
return ExternalDataset(
platform,
dataset_name,
dict(s3_uri=s3_url),
dataset_urn,
)
def process_sftp_provenance_event(self, event):
attributes = event.get("attributes", [])
remote_host = get_attribute_value(attributes, "sftp.remote.host")
path = get_attribute_value(attributes, "path")
filename = get_attribute_value(attributes, "filename")
absolute_path = f"sftp://{remote_host}/{path}/{filename}"
if remote_host is None or path is None or filename is None:
absolute_path = event.get("transitUri")
absolute_path = absolute_path.replace("/./", "/")
if absolute_path.endswith("/."):
absolute_path = absolute_path[:-2]
absolute_path = absolute_path[: absolute_path.rindex("/")]
dataset_name = absolute_path.replace("sftp://", "").replace("/", ".")
platform = "file"
dataset_urn = builder.make_dataset_urn(platform, dataset_name, self.env)
return ExternalDataset(
platform,
dataset_name,
dict(uri=absolute_path),
dataset_urn,
)
@dataclass
class ExternalDataset:
platform: str
dataset_name: str
dataset_properties: Dict[str, str]
dataset_urn: str
@dataclass
class NifiComponent:
id: str
name: str
type: str
parent_group_id: str
nifi_type: NifiType
comments: Optional[str] = None
status: Optional[str] = None
# present only for nifi remote ports and processors
inlets: Dict[str, ExternalDataset] = field(default_factory=dict)
outlets: Dict[str, ExternalDataset] = field(default_factory=dict)
# present only for processors
config: Optional[Dict] = None
# present only for nifi remote ports
target_uris: Optional[str] = None
parent_rpg_id: Optional[str] = None
# Last successful event time
last_event_time: Optional[str] = None
@dataclass
class NifiProcessGroup:
id: str
name: str
parent_group_id: Optional[str]
@dataclass
class NifiRemoteProcessGroup:
id: str
name: str
parent_group_id: str
remote_ports: Dict[str, NifiComponent]
@dataclass
class NifiFlow:
version: Optional[str]
clustered: Optional[bool]
root_process_group: NifiProcessGroup
components: Dict[str, NifiComponent] = field(default_factory=dict)
remotely_accessible_ports: Dict[str, NifiComponent] = field(default_factory=dict)
connections: List[Tuple[str, str]] = field(default_factory=list)
processGroups: Dict[str, NifiProcessGroup] = field(default_factory=dict)
remoteProcessGroups: Dict[str, NifiRemoteProcessGroup] = field(default_factory=dict)
remote_ports: Dict[str, NifiComponent] = field(default_factory=dict)
def get_attribute_value(attr_lst: List[dict], attr_name: str) -> Optional[str]:
match = [entry for entry in attr_lst if entry["name"] == attr_name]
if len(match) > 0:
return match[0]["value"]
return None
@dataclass
class NifiSourceReport(SourceReport):
filtered: List[str] = field(default_factory=list)
def report_dropped(self, ent_name: str) -> None:
self.filtered.append(ent_name)
# allowRemoteAccess
@platform_name("NiFi", id="nifi")
@config_class(NifiSourceConfig)
@support_status(SupportStatus.CERTIFIED)
class NifiSource(Source):
"""
This plugin extracts the following:
- NiFi flow as `DataFlow` entity
- Ingress, egress processors, remote input and output ports as `DataJob` entity
- Input and output ports receiving remote connections as `Dataset` entity
- Lineage information between external datasets and ingress/egress processors by analyzing provenance events
Current limitations:
- Limited ingress/egress processors are supported
- S3: `ListS3`, `FetchS3Object`, `PutS3Object`
- SFTP: `ListSFTP`, `FetchSFTP`, `GetSFTP`, `PutSFTP`
"""
config: NifiSourceConfig
report: NifiSourceReport
def __init__(self, config: NifiSourceConfig, ctx: PipelineContext) -> None:
super().__init__(ctx)
self.config = config
self.report = NifiSourceReport()
self.session = requests.Session()
if self.config.ca_file is not None:
self.session.verify = self.config.ca_file
if self.config.site_url_to_site_name is None:
self.config.site_url_to_site_name = {}
if (
urljoin(self.config.site_url, "/nifi/")
not in self.config.site_url_to_site_name
):
self.config.site_url_to_site_name[
urljoin(self.config.site_url, "/nifi/")
] = self.config.site_name
if self.config.auth is NifiAuthType.CLIENT_CERT:
logger.debug("Setting client certificates in requests ssl context")
assert (
self.config.client_cert_file is not None
), "Config client_cert_file is required for CLIENT_CERT auth"
self.session.mount(
urljoin(self.config.site_url, "/nifi-api/"),
SSLAdapter(
certfile=self.config.client_cert_file,
keyfile=self.config.client_key_file,
password=self.config.client_key_password,
),
)
if self.config.auth is NifiAuthType.SINGLE_USER:
assert (
self.config.username is not None
), "Config username is required for SINGLE_USER auth"
assert (
self.config.password is not None
), "Config password is required for SINGLE_USER auth"
token_response = self.session.post(
url=urljoin(self.config.site_url, TOKEN_ENDPOINT),
data={
"username": self.config.username,
"password": self.config.password,
},
)
if not token_response.ok:
logger.error("Failed to get token")
self.report.report_failure(self.config.site_url, "Failed to get token")
self.session.headers.update(
{
"Authorization": "Bearer " + token_response.text,
# "Accept": "application/json",
"Content-Type": "application/json",
}
)
else:
self.session.headers.update(
{
# "Accept": "application/json",
"Content-Type": "application/json",
}
)
@classmethod
def create(cls, config_dict: dict, ctx: PipelineContext) -> "Source":
config = NifiSourceConfig.parse_obj(config_dict)
return cls(config, ctx)
def get_report(self) -> SourceReport:
return self.report
def update_flow(self, pg_flow_dto: Dict) -> None: # noqa: C901
breadcrumb_dto = pg_flow_dto.get("breadcrumb", {}).get("breadcrumb", {})
nifi_pg = NifiProcessGroup(
breadcrumb_dto.get("id"),
breadcrumb_dto.get("name"),
pg_flow_dto.get("parentGroupId"),
)
self.nifi_flow.processGroups[nifi_pg.id] = nifi_pg
if not self.config.process_group_pattern.allowed(nifi_pg.name):
self.report.report_dropped(f"{nifi_pg.name}.*")
return
flow_dto = pg_flow_dto.get("flow", {})
for processor in flow_dto.get("processors", []):
component = processor.get("component")
self.nifi_flow.components[component.get("id")] = NifiComponent(
component.get("id"),
component.get("name"),
component.get("type"),
component.get("parentGroupId"),
NifiType.PROCESSOR,
config=component.get("config"),
comments=component.get("config", {}).get("comments"),
status=component.get("status", {}).get("runStatus"),
)
for funnel in flow_dto.get("funnels", []):
component = funnel.get("component")
self.nifi_flow.components[component.get("id")] = NifiComponent(
component.get("id"),
component.get("name"),
component.get("type"),
component.get("parentGroupId"),
NifiType.FUNNEL,
comments=component.get("comments"),
status=component.get("status", {}).get("runStatus"),
)
logger.debug(f"Adding funnel {component.get('id')}")
for connection in flow_dto.get("connections", []):
# Exclude self - recursive relationships
if connection.get("sourceId") != connection.get("destinationId"):
self.nifi_flow.connections.append(
(connection.get("sourceId"), connection.get("destinationId"))
)
for inputPort in flow_dto.get("inputPorts", []):
component = inputPort.get("component")
if inputPort.get("allowRemoteAccess"):
self.nifi_flow.remotely_accessible_ports[
component.get("id")
] = NifiComponent(
component.get("id"),
component.get("name"),
component.get("type"),
component.get("parentGroupId"),
NifiType.INPUT_PORT,
comments=component.get("comments"),
status=component.get("status", {}).get("runStatus"),
)
logger.debug(f"Adding remotely accessible port {component.get('id')}")
else:
self.nifi_flow.components[component.get("id")] = NifiComponent(
component.get("id"),
component.get("name"),
component.get("type"),
component.get("parentGroupId"),
NifiType.INPUT_PORT,
comments=component.get("comments"),
status=component.get("status", {}).get("runStatus"),
)
logger.debug(f"Adding port {component.get('id')}")
for outputPort in flow_dto.get("outputPorts", []):
component = outputPort.get("component")
if outputPort.get("allowRemoteAccess"):
self.nifi_flow.remotely_accessible_ports[
component.get("id")
] = NifiComponent(
component.get("id"),
component.get("name"),
component.get("type"),
component.get("parentGroupId"),
NifiType.OUTPUT_PORT,
comments=component.get("comments"),
status=component.get("status", {}).get("runStatus"),
)
logger.debug(f"Adding remotely accessible port {component.get('id')}")
else:
self.nifi_flow.components[component.get("id")] = NifiComponent(
component.get("id"),
component.get("name"),
component.get("type"),
component.get("parentGroupId"),
NifiType.OUTPUT_PORT,
comments=component.get("comments"),
status=component.get("status", {}).get("runStatus"),
)
logger.debug(f"Adding report port {component.get('id')}")
for rpg in flow_dto.get("remoteProcessGroups", []):
rpg_component = rpg.get("component", {})
remote_ports = {}
contents = rpg_component.get("contents", {})
for component in contents.get("outputPorts", []):
if component.get("connected", False):
remote_ports[component.get("id")] = NifiComponent(
component.get("id"),
component.get("name"),
component.get("type"),
rpg_component.get("parentGroupId"),
NifiType.REMOTE_OUTPUT_PORT,
target_uris=rpg_component.get("targetUris"),
parent_rpg_id=rpg_component.get("id"),
comments=component.get("comments"),
status=component.get("status", {}).get("runStatus"),
)
logger.debug(f"Adding remote output port {component.get('id')}")
for component in contents.get("inputPorts", []):
if component.get("connected", False):
remote_ports[component.get("id")] = NifiComponent(
component.get("id"),
component.get("name"),
component.get("type"),
rpg_component.get("parentGroupId"),
NifiType.REMOTE_INPUT_PORT,
target_uris=rpg_component.get("targetUris"),
parent_rpg_id=rpg_component.get("id"),
comments=component.get("comments"),
status=component.get("status", {}).get("runStatus"),
)
logger.debug(f"Adding remote input port {component.get('id')}")
nifi_rpg = NifiRemoteProcessGroup(
rpg_component.get("id"),
rpg_component.get("name"),
component.get("parentGroupId"),
remote_ports,
)
logger.debug(f"Adding remote process group {rpg_component.get('id')}")
self.nifi_flow.components.update(remote_ports)
self.nifi_flow.remoteProcessGroups[nifi_rpg.id] = nifi_rpg
for pg in flow_dto.get("processGroups", []):
pg_response = self.session.get(
url=urljoin(self.config.site_url, PG_ENDPOINT) + pg.get("id")
)
if not pg_response.ok:
self.report_warning(
self.config.site_url,
"Failed to get process group flow " + pg.get("id"),
)
continue
pg_flow_dto = pg_response.json().get("processGroupFlow", {})
self.update_flow(pg_flow_dto)
def update_flow_keep_only_ingress_egress(self):
components_to_del: List[NifiComponent] = []
for component in self.nifi_flow.components.values():
if (
component.nifi_type is NifiType.PROCESSOR
and component.type
not in NifiProcessorProvenanceEventAnalyzer.KNOWN_INGRESS_EGRESS_PROCESORS.keys()
) or component.nifi_type not in [
NifiType.PROCESSOR,
NifiType.REMOTE_INPUT_PORT,
NifiType.REMOTE_OUTPUT_PORT,
]:
components_to_del.append(component)
incoming = list(
filter(lambda x: x[1] == component.id, self.nifi_flow.connections)
)
outgoing = list(
filter(lambda x: x[0] == component.id, self.nifi_flow.connections)
)
# Create new connections from incoming to outgoing
for i in incoming:
for j in outgoing:
self.nifi_flow.connections.append((i[0], j[1]))
# Remove older connections, as we already created
# new connections bypassing component to be deleted
for i in incoming:
self.nifi_flow.connections.remove(i)
for j in outgoing:
self.nifi_flow.connections.remove(j)
for c in components_to_del:
if c.nifi_type is NifiType.PROCESSOR and (
c.name.startswith("Get")
or c.name.startswith("List")
or c.name.startswith("Fetch")
or c.name.startswith("Put")
):
self.report_warning(
self.config.site_url,
f"Dropping NiFi Processor of type {c.type}, id {c.id}, name {c.name} from lineage view. \
This is likely an Ingress or Egress node which may be reading to/writing from external datasets \
However not currently supported in datahub",
)
else:
logger.debug(
f"Dropping NiFi Component of type {c.type}, id {c.id}, name {c.name} from lineage view."
)
del self.nifi_flow.components[c.id]
def create_nifi_flow(self):
about_response = self.session.get(
url=urljoin(self.config.site_url, ABOUT_ENDPOINT)
)
nifi_version: Optional[str] = None
if about_response.ok:
nifi_version = about_response.json().get("about", {}).get("version")
else:
logger.warning("Failed to fetch version for nifi")
cluster_response = self.session.get(
url=urljoin(self.config.site_url, CLUSTER_ENDPOINT)
)
clustered: Optional[bool] = None
if cluster_response.ok:
clustered = (
cluster_response.json().get("clusterSummary", {}).get("clustered")
)
else:
logger.warning("Failed to fetch cluster summary for flow")
pg_response = self.session.get(
url=urljoin(self.config.site_url, PG_ENDPOINT) + "root"
)
if not pg_response.ok:
logger.error("Failed to get root process group flow")
self.report.report_failure(
self.config.site_url, "Failed to get of root process group flow"
)
pg_flow_dto = pg_response.json().get("processGroupFlow", {})
breadcrumb_dto = pg_flow_dto.get("breadcrumb", {}).get("breadcrumb", {})
self.nifi_flow = NifiFlow(
version=nifi_version,
clustered=clustered,
root_process_group=NifiProcessGroup(
breadcrumb_dto.get("id"),
breadcrumb_dto.get("name"),
pg_flow_dto.get("parentGroupId"),
),
)
self.update_flow(pg_flow_dto)
self.update_flow_keep_only_ingress_egress()
def fetch_provenance_events(
self,
processor: NifiComponent,
eventType: str,
startDate: datetime,
endDate: Optional[datetime] = None,
) -> Iterable[Dict]:
logger.debug(
f"Fetching {eventType} provenance events for {processor.id}\
of processor type {processor.type}, Start date: {startDate}, End date: {endDate}"
)
older_version: bool = self.nifi_flow.version is not None and version.parse(
self.nifi_flow.version
) < version.parse("1.13.0")
if older_version:
searchTerms = {
"ProcessorID": processor.id,
"EventType": eventType,
}
else:
searchTerms = {
"ProcessorID": {"value": processor.id}, # type: ignore
"EventType": {"value": eventType}, # type: ignore
}
payload = json.dumps(
{
"provenance": {
"request": {
"maxResults": 1000,
"summarize": False,
"searchTerms": searchTerms,
"startDate": startDate.strftime("%m/%d/%Y %H:%M:%S %Z"),
"endDate": (
endDate.strftime("%m/%d/%Y %H:%M:%S %Z")
if endDate
else None
),
}
}
}
)
logger.debug(payload)
provenance_response = self.session.post(
url=urljoin(self.config.site_url, PROVENANCE_ENDPOINT), data=payload
)
if provenance_response.ok:
provenance = provenance_response.json().get("provenance", {})
provenance_uri = provenance.get("uri")
provenance_response = self.session.get(provenance_uri)
if provenance_response.ok:
provenance = provenance_response.json().get("provenance", {})
attempts = 5 # wait for at most 5 attempts 5*1= 5 seconds
while (not provenance.get("finished", False)) and attempts > 0:
logger.warning(
f"Provenance query not completed, attempts left : {attempts}"
)
# wait until the uri returns percentcomplete 100
time.sleep(1)
provenance_response = self.session.get(provenance_uri)
attempts -= 1
if provenance_response.ok:
provenance = provenance_response.json().get("provenance", {})
events = provenance.get("results", {}).get("provenanceEvents", [])
last_event_time: Optional[datetime] = None
oldest_event_time: Optional[datetime] = None
for event in events:
event_time = parser.parse(event.get("eventTime"))
# datetime.strptime(
# event.get("eventTime"), "%m/%d/%Y %H:%M:%S.%f %Z"
# )
if not last_event_time or event_time > last_event_time:
last_event_time = event_time
if not oldest_event_time or event_time < oldest_event_time:
oldest_event_time = event_time
yield event
processor.last_event_time = str(last_event_time)
self.delete_provenance(provenance_uri)
total = provenance.get("results", {}).get("total")
totalCount = provenance.get("results", {}).get("totalCount")
if total != str(totalCount):
yield from self.fetch_provenance_events(
processor, eventType, startDate, oldest_event_time
)
else:
self.report_warning(
self.config.site_url,
f"provenance events could not be fetched for processor \
{processor.id} of type {processor.name}",
)
logger.warning(provenance_response.text)
return
def report_warning(self, key: str, reason: str) -> None:
logger.warning(f"{key}: {reason}")
self.report.report_warning(key, reason)
def delete_provenance(self, provenance_uri):
delete_response = self.session.delete(provenance_uri)
if not delete_response.ok:
logger.error("failed to delete provenance ", provenance_uri)
def construct_workunits(self) -> Iterable[MetadataWorkUnit]: # noqa: C901
rootpg = self.nifi_flow.root_process_group
flow_name = rootpg.name # self.config.site_name
flow_urn = builder.make_data_flow_urn(NIFI, rootpg.id, self.config.env)
flow_properties = {}
if self.nifi_flow.clustered is not None:
flow_properties["clustered"] = str(self.nifi_flow.clustered)
if self.nifi_flow.version is not None:
flow_properties["version"] = str(self.nifi_flow.version)
yield from self.construct_flow_workunits(
flow_urn, flow_name, self.make_external_url(rootpg.id), flow_properties
)
for component in self.nifi_flow.components.values():
job_name = component.name
job_urn = builder.make_data_job_urn_with_flow(flow_urn, component.id)
incoming = list(
filter(lambda x: x[1] == component.id, self.nifi_flow.connections)
)
outgoing = list(
filter(lambda x: x[0] == component.id, self.nifi_flow.connections)
)
inputJobs = []
jobProperties = None
if component.nifi_type is NifiType.PROCESSOR:
jobProperties = {
k: str(v)
for k, v in component.config.items() # type: ignore
if k
in [
"schedulingPeriod",
"schedulingStrategy",
"executionNode",
"concurrentlySchedulableTaskCount",
]
}
jobProperties["properties"] = json.dumps(
component.config.get("properties") # type: ignore
)
if component.last_event_time is not None:
jobProperties["last_event_time"] = component.last_event_time
for dataset in component.inlets.values():
yield from self.construct_dataset_workunits(
dataset.platform,
dataset.dataset_name,
dataset.dataset_urn,
datasetProperties=dataset.dataset_properties,
)
for dataset in component.outlets.values():
yield from self.construct_dataset_workunits(
dataset.platform,
dataset.dataset_name,
dataset.dataset_urn,
datasetProperties=dataset.dataset_properties,
)
for edge in incoming:
incoming_from = edge[0]
if incoming_from in self.nifi_flow.remotely_accessible_ports.keys():
dataset_name = f"{self.config.site_name}.{self.nifi_flow.remotely_accessible_ports[incoming_from].name}"
dataset_urn = builder.make_dataset_urn(
NIFI, dataset_name, self.config.env
)
component.inlets[dataset_urn] = ExternalDataset(
NIFI,
dataset_name,
dict(nifi_uri=self.config.site_url),
dataset_urn,
)
else:
inputJobs.append(
builder.make_data_job_urn_with_flow(flow_urn, incoming_from)
)
for edge in outgoing:
outgoing_to = edge[1]
if outgoing_to in self.nifi_flow.remotely_accessible_ports.keys():
dataset_name = f"{self.config.site_name}.{self.nifi_flow.remotely_accessible_ports[outgoing_to].name}"
dataset_urn = builder.make_dataset_urn(
NIFI, dataset_name, self.config.env
)
component.outlets[dataset_urn] = ExternalDataset(
NIFI,
dataset_name,
dict(nifi_uri=self.config.site_url),
dataset_urn,
)
if component.nifi_type is NifiType.REMOTE_INPUT_PORT:
# TODO - if target_uris is not set, but http proxy is used in RPG
site_urls = component.target_uris.split(",") # type: ignore
for site_url in site_urls:
if site_url not in self.config.site_url_to_site_name:
self.report_warning(
site_url,
f"Site with url {site_url} is being used in flow but\
corresponding site name is not configured via site_url_to_site_name.\
This may result in broken lineage.",
)
else:
site_name = self.config.site_url_to_site_name[site_url]
dataset_name = f"{site_name}.{component.name}"
dataset_urn = builder.make_dataset_urn(
NIFI, dataset_name, self.config.env
)
component.outlets[dataset_urn] = ExternalDataset(
NIFI, dataset_name, dict(nifi_uri=site_url), dataset_urn
)
break
if component.nifi_type is NifiType.REMOTE_OUTPUT_PORT:
site_urls = component.target_uris.split(",") # type: ignore
for site_url in site_urls:
if site_url not in self.config.site_url_to_site_name:
self.report_warning(
self.config.site_url,
f"Site with url {site_url} is being used in flow but\
corresponding site name is not configured via site_url_to_site_name.\
This may result in broken lineage.",
)
else:
site_name = self.config.site_url_to_site_name[site_url]
dataset_name = f"{site_name}.{component.name}"
dataset_urn = builder.make_dataset_urn(
NIFI, dataset_name, self.config.env
)
component.inlets[dataset_urn] = ExternalDataset(
NIFI, dataset_name, dict(nifi_uri=site_url), dataset_urn
)
break
yield from self.construct_job_workunits(
job_urn,
job_name,
external_url=self.make_external_url(
component.parent_group_id, component.id, component.parent_rpg_id
),
job_type=NIFI.upper() + "_" + component.nifi_type.value,
description=component.comments,
job_properties=jobProperties,
inlets=list(component.inlets.keys()),
outlets=list(component.outlets.keys()),
inputJobs=inputJobs,
status=component.status,
)
for port in self.nifi_flow.remotely_accessible_ports.values():
dataset_name = f"{self.config.site_name}.{port.name}"
dataset_platform = NIFI
yield from self.construct_dataset_workunits(
dataset_platform,
dataset_name,
external_url=self.make_external_url(port.parent_group_id, port.id),
)
def process_provenance_events(self):
startDate = datetime.now(timezone.utc) - timedelta(
days=self.config.provenance_days
)
eventAnalyzer = NifiProcessorProvenanceEventAnalyzer()
eventAnalyzer.env = self.config.env
for component in self.nifi_flow.components.values():
if component.nifi_type is NifiType.PROCESSOR:
eventType = eventAnalyzer.KNOWN_INGRESS_EGRESS_PROCESORS[component.type]
events = self.fetch_provenance_events(component, eventType, startDate)
for event in events:
dataset = eventAnalyzer.provenance_event_to_lineage_map[
component.type
](event)
if eventType in [
NifiEventType.CREATE,
NifiEventType.FETCH,
NifiEventType.RECEIVE,
]:
component.inlets[dataset.dataset_urn] = dataset
else:
component.outlets[dataset.dataset_urn] = dataset
def get_workunits(self) -> Iterable[MetadataWorkUnit]:
# Creates nifi_flow by invoking /flow rest api and saves as self.nifi_flow
self.create_nifi_flow()
# Updates inlets and outlets of nifi_flow.components by invoking /provenance rest api
self.process_provenance_events()
# Reads and translates entities from self.nifi_flow into mcps
yield from self.construct_workunits()
def make_external_url(
self,
parent_group_id: str,
component_id: Optional[str] = "",
parent_rpg_id: Optional[str] = None,
) -> str:
if parent_rpg_id is not None:
component_id = parent_rpg_id
return urljoin(
self.config.site_url,
f"/nifi/?processGroupId={parent_group_id}&componentIds={component_id}",
)
def construct_flow_workunits(
self,
flow_urn: str,
flow_name: str,
external_url: str,
flow_properties: Optional[Dict[str, str]] = None,
) -> Iterable[MetadataWorkUnit]:
mcp = MetadataChangeProposalWrapper(
entityUrn=flow_urn,
aspect=DataFlowInfoClass(
name=flow_name,
customProperties=flow_properties,
externalUrl=external_url,
),
)
for proposal in [mcp]:
wu = MetadataWorkUnit(
id=f"{NIFI}.{flow_name}.{proposal.aspectName}", mcp=proposal
)
self.report.report_workunit(wu)
yield wu
def construct_job_workunits(
self,
job_urn: str,
job_name: str,
external_url: str,
job_type: str,
description: Optional[str],
job_properties: Optional[Dict[str, str]] = None,
inlets: List[str] = [],
outlets: List[str] = [],
inputJobs: List[str] = [],
status: Optional[str] = None,
) -> Iterable[MetadataWorkUnit]:
if job_properties:
job_properties = {k: v for k, v in job_properties.items() if v is not None}
mcp = MetadataChangeProposalWrapper(
entityUrn=job_urn,
aspect=DataJobInfoClass(
name=job_name,
type=job_type,
description=description,
customProperties=job_properties,
externalUrl=external_url,
status=status,
),
)
wu = MetadataWorkUnit(
id=f"{NIFI}.{job_name}.{mcp.aspectName}",
mcp=mcp,
)
self.report.report_workunit(wu)
yield wu
inlets.sort()
outlets.sort()
inputJobs.sort()
mcp = MetadataChangeProposalWrapper(
entityUrn=job_urn,
aspect=DataJobInputOutputClass(
inputDatasets=inlets, outputDatasets=outlets, inputDatajobs=inputJobs
),
)
wu = MetadataWorkUnit(
id=f"{NIFI}.{job_name}.{mcp.aspectName}",
mcp=mcp,
)
self.report.report_workunit(wu)
yield wu
def construct_dataset_workunits(
self,
dataset_platform: str,
dataset_name: str,
dataset_urn: Optional[str] = None,
external_url: Optional[str] = None,
datasetProperties: Optional[Dict[str, str]] = None,
) -> Iterable[MetadataWorkUnit]:
if not dataset_urn:
dataset_urn = builder.make_dataset_urn(
dataset_platform, dataset_name, self.config.env
)
mcp = MetadataChangeProposalWrapper(
entityUrn=dataset_urn,
aspect=DataPlatformInstanceClass(
platform=builder.make_data_platform_urn(dataset_platform)
),
)
platform = (
dataset_platform[dataset_platform.rindex(":") + 1 :]
if dataset_platform.startswith("urn:")
else dataset_platform
)
wu = MetadataWorkUnit(id=f"{platform}.{dataset_name}.{mcp.aspectName}", mcp=mcp)
self.report.report_workunit(wu)
yield wu
mcp = MetadataChangeProposalWrapper(
entityUrn=dataset_urn,
aspect=DatasetPropertiesClass(
externalUrl=external_url, customProperties=datasetProperties
),
)
wu = MetadataWorkUnit(id=f"{platform}.{dataset_name}.{mcp.aspectName}", mcp=mcp)
self.report.report_workunit(wu)
yield wu
| [
"[email protected]"
] | |
4f5e5904ce00031faa7806ac96e6a40d08997a71 | 12b857508d2019a3e98c51a3a74317ae1bdaffba | /app/core/tests/test_commands.py | 94e26ec57fcb12bb67d5b0433e020fe8b9406d2d | [] | no_license | robwa10/recipe-api | 78d19f560d8d9e13a438485dab189b71d26edd7c | a180bbde0990befd9c8b2c1a3db2d6af44170ef5 | refs/heads/main | 2023-07-20T05:10:48.064301 | 2021-09-08T15:07:52 | 2021-09-08T15:07:52 | 358,300,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for when db is avaliable"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.retun_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
| [
"[email protected]"
] | |
ac2b95962eb2b69aa3a49d68911172431d9d1a85 | 1af217d712239d33cba828ad053a77085d84e92a | /Lovely Loveseats.py | c6411338ed4585dae9dfd7c2f4daa99033814633 | [] | no_license | henryprosser/Codecademy-Computer-Science-Projects | b4769c2d235537aa025a32505c8cae72a4090c20 | 3b8b99dd435d4e833858d56ef5faced039abca06 | refs/heads/main | 2023-02-08T09:35:51.370218 | 2020-12-13T13:04:21 | 2020-12-13T13:04:21 | 321,066,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | ### Lovely Loveseats Items ###
lovely_loveseat_description = "Lovely Loveseat. Tufted polyester blend on wood. 32 inches high x 40 inches wide x 30 inches deep. Red or white."
lovely_loveseat_price = 254.00
stylish_settee_description = "Stylish Settee. Faux leather on birch. 29.50 inches high x 54.75 inches wide x 28 inches deep. Black."
stylish_settee_price = 180.50
luxurious_lamp_description = "Luxurious Lamp. Glass and iron. 36 inches tall. Brown with cream shade."
luxurious_lamp_price = 52.15
# Sales Tax
sales_tax = 0.088
customer_one_total = 0
customer_one_itemization = ""
customer_one_total += lovely_loveseat_price
customer_one_itemization += lovely_loveseat_description
customer_one_total += luxurious_lamp_price
customer_one_itemization += luxurious_lamp_description
customer_one_tax = customer_one_total * sales_tax
customer_one_total += customer_one_tax
# Print out Customer One Items
print("Customer One Items: ")
print(customer_one_itemization)
# Print out Customer One Total
print("Customer One Total: ")
print(customer_one_total) | [
"[email protected]"
] | |
d523fb173a6b5d2bbc2d1621bb82076c316b6427 | da9a01faf674fa064f067ff58a53743f6a77c534 | /wallboard-import.py | b0480174094a79130302208550967748e4f91a1a | [
"MIT-0"
] | permissive | MysteriousSonOfGod/aws-serverless-connect-wallboard | 9cb491f45c5e2d42b4110711d4a32cc9fb7182ba | 423bae868bd3326dc56ce7b372ce68426f6a1185 | refs/heads/master | 2022-02-28T04:07:03.613492 | 2019-09-25T02:30:10 | 2019-09-25T02:30:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,142 | py | #!/usr/bin/python
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import yaml
import sys
import signal
import os
import time
import boto3
from botocore.exceptions import ClientError
#
# Function definitions
#
def Interrupt(signal, frame):
print("\n")
sys.exit(0)
def UpdateSettings(Config, Settings):
if "Defaults" in Config:
Defaults = Config["Defaults"]
if "TextColour" in Defaults: Settings["TextColour"] = {"S":Defaults["TextColour"]}
if "TextColor" in Defaults: Settings["TextColour"] = {"S":Defaults["TextColor"]}
if "BackgroundColour" in Defaults: Settings["BackgroundColour"] = {"S":Defaults["BackgroundColour"]}
if "BackgroundColor" in Defaults: Settings["BackgroundColour"] = {"S":Defaults["BackgroundColor"]}
if "TextSize" in Defaults: Settings["TextSize"] = {"S":str(Defaults["TextSize"])}
if "Font" in Defaults: Settings["Font"] = {"S":Defaults["Font"]}
if "WarningBackgroundColour" in Defaults: Settings["WarningBackgroundColour"] = {"S":Defaults["WarningBackgroundColour"]}
if "WarningBackgroundColor" in Defaults: Settings["WarningBackgroundColour"] = {"S":Defaults["WarningBackgroundColor"]}
if "AlertBackgroundColour" in Defaults: Settings["AlertBackgroundColour"] = {"S":Defaults["AlertBackgroundColour"]}
if "AlertBackgroundColor" in Defaults: Settings["AlertBackgroundColour"] = {"S":Defaults["AlertBackgroundColor"]}
def GetCalculations(CalculationsConfig):
Calculations = []
for Calc in CalculationsConfig:
if "Formula" not in Calc:
print("Missing formula calculation in "+Calc["ReferenceName"])
sys.exit(1)
Calculations.append({"Name":{"S":str(Calc["Calculation"])}, "Formula":{"S":Calc["Formula"]}})
return(Calculations)
def GetThresholds(ThresholdConfig):
Thresholds = []
for Threshold in ThresholdConfig:
if "Reference" not in Threshold:
print("Missing reference in threshold "+Threshold["ReferenceName"])
sys.exit(1)
if "WarnBelow" not in Threshold and "AlertBelow" not in Threshold and \
"WarnAbove" not in Threshold and "AlertAbove" not in Threshold:
print("No actual threshold set in threshold "+Threshold["ReferenceName"])
sys.exit(1)
Item = {}
Item["Name"] = {"S":str(Threshold["Threshold"])} # Stringify just in case this is a numeric
Item["Reference"] = {"S":Threshold["Reference"]}
if "WarnBelow" in Threshold: Item["WarnBelow"] = {"S":str(Threshold["WarnBelow"])}
if "AlertBelow" in Threshold: Item["AlertBelow"] = {"S":str(Threshold["AlertBelow"])}
if "WarnAbove" in Threshold: Item["WarnAbove"] = {"S":str(Threshold["WarnAbove"])}
if "AlertAbove" in Threshold: Item["AlertAbove"] = {"S":str(Threshold["AlertAbove"])}
Thresholds.append(Item)
return(Thresholds)
def GetAgentStates(AgentConfig):
StateColours = []
for Item in AgentConfig:
State = {}
State["StateName"] = {"S":Item["State"].lower()}
if "Colour" in Item: State["BackgroundColour"] = {"S":Item["Colour"].lower()}
if "Color" in Item: State["BackgroundColour"] = {"S":Item["Color"].lower()}
StateColours.append(State)
return(StateColours)
def GetDataSources(SourceConfig):
Sources = []
for Item in SourceConfig:
SourceInfo = {}
SourceInfo["Name"] = {"S":Item["Source"]}
SourceInfo["Reference"] = {"S":Item["Reference"]}
Sources.append(SourceInfo)
return(Sources)
def GetCells(RowConfig):
Cells = []
Columns = 0
Rows = 0
for Row in RowConfig:
if "Row" not in Row:
print("Missing row number")
sys.exit(1)
if "Cells" not in Row:
print("Missing cell definitions on row "+str(Row["Row"]))
sys.exit(1)
#
# We capture the maximum number of columns because it makes
# our lives easier to know this during the render function
#
if len(Row["Cells"]) > Columns: Columns = len(Row["Cells"])
for Cell in Row["Cells"]:
if "Cell" not in Cell:
print("Missing cell number on row "+str(Row["Row"]))
sys.exit(1)
if int(Row["Row"]) > Rows: Rows = int(Row["Row"])
Item = {}
Item["Address"] = {"S":"R"+str(Row["Row"])+"C"+str(Cell["Cell"])}
if "Text" in Cell: Item["Text"] = {"S":Cell["Text"]}
if "Reference" in Cell: Item["Reference"] = {"S":Cell["Reference"]}
if "TextColour" in Cell: Item["TextColour"] = {"S":Cell["TextColour"]}
if "TextColor" in Cell: Item["TextColour"] = {"S":Cell["TextColor"]}
if "BackgroundColour" in Cell: Item["BackgroundColour"] = {"S":Cell["BackgroundColour"]}
if "BackgroundColor" in Cell: Item["BackgroundColour"] = {"S":Cell["BackgroundColor"]}
if "TextSize" in Cell: Item["TextSize"] = {"S":str(Cell["TextSize"])}
if "ThresholdReference" in Cell: Item["ThresholdReference"] = {"S":Cell["ThresholdReference"]}
if "Rows" in Cell: Item["Rows"] = {"S":str(Cell["Rows"])}
if "Cells" in Cell: Item["Cells"] = {"S":str(Cell["Cells"])}
Cells.append(Item)
return(Cells,Rows,Columns)
def SaveToDynamoDB(WallboardName,Records,RecordType):
Count = 0
Dynamo = boto3.client("dynamodb")
for Item in Records:
Item["Identifier"] = {"S":WallboardName}
if RecordType != "Settings":
Item["RecordType"] = {"S":RecordType+str(Count)}
Count += 1
else:
Item["RecordType"] = {"S":RecordType}
try:
Dynamo.put_item(TableName=DDBTableName, Item=Item)
except ClientError as e:
print("DynamoDB error: "+e.response["Error"]["Message"])
def CreateDDBTable():
Dynamo = boto3.client("dynamodb")
try:
Response = Dynamo.describe_table(TableName=DDBTableName)
except:
Table = Dynamo.create_table(TableName=DDBTableName,
KeySchema=[{"AttributeName":"Identifier", "KeyType":"HASH"}, {"AttributeName":"RecordType", "KeyType":"RANGE"}],
AttributeDefinitions=[{"AttributeName":"Identifier", "AttributeType":"S"}, {"AttributeName":"RecordType", "AttributeType":"S"}],
ProvisionedThroughput={"ReadCapacityUnits":5, "WriteCapacityUnits":5})
Table = Dynamo.describe_table(TableName=DDBTableName)
while Table["Table"]["TableStatus"] != "ACTIVE":
print("Waiting for table creation. State: %s" % (Table["Table"]["TableStatus"]))
time.sleep(10)
Table = Dynamo.describe_table(TableName=DDBTableName)
#
# Mainline code
#
# Basic setup and argument check
#
signal.signal(signal.SIGINT, Interrupt)
if len(sys.argv) != 2:
print("Usage: wallboard-import.py wallboarddefinition.yaml")
sys.exit(1)
#
# Read the YAML file
#
with open(sys.argv[1]) as Input:
try:
Config = yaml.load(Input)
except yaml.YAMLError as e:
print(e)
sys.exit(1)
#
# Set up variables and defaults
#
Settings = {}
Calculations = []
Thresholds = []
AgentStates = {}
Cells = []
DataSources = []
MaxColumns = 0
MaxRows = 0
DDBTableName = "ConnectWallboard"
if "WallboardTable" in os.environ: DDBTableName = os.environ["WallboardTable"]
CreateDDBTable()
Settings["WarningBackgroundColour"] = {"S":"Yellow"}
Settings["AlertBackgroundColour"] = {"S":"Red"}
#
# Input validation
#
if "Identifier" not in Config:
print("Missing Identifier tag")
sys.exit(1)
if "Rows" not in Config:
print("Missing row definitions")
sys.exit(1)
#
# Somewhat validated now - let's parse the input
#
UpdateSettings(Config, Settings)
if "Calculations" in Config: Calculations = GetCalculations(Config["Calculations"])
if "Thresholds" in Config: Thresholds = GetThresholds(Config["Thresholds"])
if "AgentStates" in Config: AgentStates = GetAgentStates(Config["AgentStates"])
if "Sources" in Config: DataSources = GetDataSources(Config["Sources"])
(Cells, MaxRows, MaxColumns) = GetCells(Config["Rows"])
if MaxRows == 0:
print("No rows were found")
sys.exit(1)
if MaxColumns == 0:
print("No cells were found")
sys.exit(1)
Settings["Columns"] = {"S":str(MaxColumns)}
Settings["Rows"] = {"S":str(MaxRows)}
SaveToDynamoDB(Config["Identifier"], [Settings], "Settings")
SaveToDynamoDB(Config["Identifier"], Thresholds, "Threshold")
SaveToDynamoDB(Config["Identifier"], Calculations, "Calculation")
SaveToDynamoDB(Config["Identifier"], Cells, "Cell")
SaveToDynamoDB(Config["Identifier"], AgentStates, "AgentState")
SaveToDynamoDB(Config["Identifier"], DataSources, "DataSource")
| [
"[email protected]"
] | |
f9e9b2943ccb15b94b8b42c726468487b409d19b | 14270abc8fd50fa175bddd20026375c341529665 | /code.py | dac36207a33e958de205081ea63a4a10a8f5d50b | [] | no_license | jiyunyang/final-exam-code | 78fad55720dfc9a0c613008f9993a52ac20f883b | ba91ce47ce81d3b33edac976b33fa0c96f773ea7 | refs/heads/master | 2020-04-12T15:56:45.567013 | 2018-12-20T15:45:16 | 2018-12-20T15:46:08 | 162,597,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,363 | py | import pandas as pd
import numpy as np
# 데이터가 있는 엑셀 파일 읽어오는 코드
df = pd.read_excel('movie.xlsx',sheet_name='sheet0')
print(df)
# 컬럼명 확인
df.columns
# 데이터 입력하기 전에 어떤 항목이 있고, 그것을 어떻게 입력을 해야되는지 알려주는 것
print("*********************************************************************************************************")
print("[영화명, 감독, 개봉일, 영화형태, 국적, 전국스크린수, 전국매출액, 전국관객수, 장르, 등급]")
print("중 1개 이상 검색어를 입력하세요.\n")
print("# 영화형태 :〔 단편, 옴니버스, 장편 〕중 검색하세요.")
print("# 장 르 :〔 SF, 가족, 공연, 공포, 다큐멘터리, 드라마, 멜로/로맨스, 뮤지컬, 미스터리, 범죄, 사극,")
print(" 서부극, 성인물, 스릴러, 애니메이션, 액션, 어드벤처, 전쟁, 코미디, 판타지 〕중 검색하세요.")
print("# 등 급 :〔 12세관람가, 12세이상관람가, 15세관람가, 15세이상관람가, 18세관람가, 기타, 전체관람가,")
print(" 제한상영가, 청소년관람불가 〕 중 검색하세요.")
print("*********************************************************************************************************")
# 영화명을 movie_name_list를 생성하여 거기에 데이터를 받은 후 저장하는 코드
movie_name_list = []
movie_name_list = df['영화명']
# 영화형태을 movie_type_list를 생성하여 거기에 데이터를 받은 후 저장하는 코드
movie_type_list = []
movie_type_list = df['영화형태']
# 국적을 movie_country_list를 생성하여 거기에 데이터를 받은 후 저장하는 코드
movie_country_list = []
movie_country_list = df['국적']
# 장르를 genre_list를 생성하여 거기에 데이터를 받은 후 저장하는 코드
genre_list = []
genre_list = df['장르']
# 등급을 movie_age_list를 생성하여 거기에 데이터를 받은 후 저장하는 코드
movie_age_list = []
movie_type_list = df['등급']
# 영화명을 입력 받아서 1차원 행렬을 다차원 행렬로 변환해주는 코드
def call_movie_name() :
a_list =[]
a = ''
a = input("영화제목을 입력하세요 :")
a_list = a.split()
A = np.array(a_list)
# print("A =", A)
# print("A.ndim =",A.ndim)
# print("A.shape =",A.shape)
print("\n")
no1 = A.ndim
no2 = A.shape[0]
A.shape = no2, no1
# 검색어를 입력 받아 영화명 컬럼에서 일치하는 값을 찾는 코드
for X1 in range(0, len(a_list)):
for Y1 in range(0, 12752):
answer = df.at[Y1, '영화명']
if A[X1] == answer:
print(df.loc[[Y1]])
return
# 영화형태를 입력 받아서 1차원 행렬을 다차원 행렬로 변환해주는 코드
def call_movie_type() :
b_list =[]
b = ''
b = input("영화형태를 입력하세요 :")
b_list = b.split()
B = np.array(b_list)
# print("B.ndim =",B.ndim)
# print("B.shape =",B.shape)
# print("B =",B)
print("\n")
no3 = B.ndim
no4 = B.shape[0]
B.shape = no4, no3
# 검색어를 입력 받아 영화형태 컬럼에서 일치하는 값을 찾는 코드
for X2 in range(0, len(b_list)):
for Y2 in range(0, 12752):
answer = df.at[Y2, '영화형태']
if B[X2] == answer:
print(df.loc[[Y2]])
return
# 국적을 입력 받아서 1차원 행렬을 다차원 행렬로 변환해주는 코드
def call_movie_country() :
c_list =[]
c = ''
c = input("국적을 입력하세요 :")
c_list = c.split()
C = np.array(c_list)
print("\n")
no5 = C.ndim
no6 = C.shape[0]
C.shape = no6, no5
# 검색어를 입력 받아 국적 컬럼에서 일치하는 값을 찾는 코드
for X3 in range(0, len(c_list)):
for Y3 in range(0, 12752):
answer = df.at[Y3, '국적']
if C[X3] == answer:
print(df.loc[[Y3]])
return
# 장르를 입력 받아서 1차원 행렬을 다차원 행렬로 변환해주는 코드
def call_genre() :
d_list = []
d = ''
d = input("장르를 입력하세요 :")
d_list = d.split()
D = np.array(d_list)
print("\n")
no7 = D.ndim
no8 = D.shape[0]
D.shape = no8, no7
# 검색어를 입력 받아 장르 컬럼에서 일치하는 값을 찾는 코드
g = 0
flag = g
for X4 in range(0,len(d_list)) :
for Y4 in range(0,12752):
answer = df.at[Y4, '장르']
if D[X4] == answer :
print(df.loc[[Y4]])
if flag == g :
print("없음")
return
# 등급을 입력 받아서 1차원 행렬을 다차원 행렬로 변환해주는 코드
def call_movie_age() :
e_list =[]
e = ''
e = input("등급을 입력하세요 :")
e_list = e.split()
E = np.array(e_list)
# print("E.ndim =",E.ndim)
# print("E.shape =",E.shape)
# print("E =",E)
print("\n")
no9 = E.ndim
no10 = E.shape[0]
E.shape = no10, no9
# 검색어를 입력 받아 등급 컬럼에서 일치하는 값을 찾는 코드
for X5 in range(0, 12753):
# E[X5]
# print(E[X5])
for Y5 in range(0, 12753):
if E[X5] == movie_age_list[Y5]:
print("있음")
else:
Y5 = Y5 + 1
return
# 여러 가지 입력 값 중 어떤 것을 찾을건지 물어보는 코드
question = []
question = input("어떤 것을 찾고 싶으신가요?"
"영화명, 영화형태, 국적, 장르, 등급 중 입력하세요. : ")
question_index = ["영화명", "영화형태", "국적", "장르", "등급"]
for r in question_index :
if question == "영화명" :
call_movie_name()
elif question == "영화형태" :
call_movie_type()
elif question == "국적" :
call_movie_country()
elif question == "장르" :
call_genre()
elif question == "등급" :
call_movie_age()
else :
print("찾고 싶은 것을 다시 입력하세요.")
question = input("어떤 것을 찾고 싶으신가요?"
"영화명, 영화형태, 국적, 장르, 등급 중 입력하세요. : ") | [
"[email protected]"
] | |
28d16a95a7654b735dd314ccf8811baeb980f1c9 | 9328148bacec19b5bb726828ba36ee6844be4b1e | /thrump/ifeng/ifeng/items.py | 68636e644188eb2ccedc22a400a8cb6d68f95fed | [] | no_license | chenzuoli/scrapy | f6e83572681956546cc062ff0867d9debd8edc40 | 94a218274831cf8a556768f143ec193867f5dfa8 | refs/heads/master | 2020-04-16T03:38:19.494868 | 2019-03-07T00:30:24 | 2019-03-07T00:30:24 | 165,238,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class IfengItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
html = scrapy.Field()
| [
"[email protected]"
] | |
5c7a418721b647d34b668e03b1eb4c253e2ee006 | 2c143ba64032f65c7f7bf1cbd567a1dcf13d5bb1 | /SQL/第二高的薪水.py | 5297fdc8e37bd996231a0dd8276a6f6ccab45462 | [] | no_license | tx991020/MyLeetcode | 5b6121d32260fb30b12cc8146e44e6c6da03ad89 | cfe4f087dfeb258caebbc29fc366570ac170a68c | refs/heads/master | 2020-04-09T21:43:41.403553 | 2019-03-27T18:54:35 | 2019-03-27T18:54:35 | 160,611,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | '''
编写一个 SQL 查询,获取 Employee 表中第二高的薪水(Salary) 。
+----+--------+
| Id | Salary |
+----+--------+
| 1 | 100 |
| 2 | 200 |
| 3 | 300 |
+----+--------+
例如上述 Employee 表,SQL查询应该返回 200 作为第二高的薪水。如果不存在第二高的薪水,那么查询应返回 null。
+---------------------+
| SecondHighestSalary |
+---------------------+
| 200 |
+---------------------+
'''
'''
select IFNULL((select Distinct Salary from Employee order by Salary DESC limit 1,1),null)
as SecondHighestSalary;
''' | [
"[email protected]"
] | |
c979902db626a94f569e02694bd867a213a599ea | d43184572d1b6c76df8efe9eac69ddf386af4e4e | /HelloWorldFlask/venv/bin/pip3 | 0c25b303e45de737ec6117990b23fa563b7a1740 | [] | no_license | Alanhliu/flask_demo | b7aa9a94e908b20a42afd50e396f3905c721fb16 | 2aa3b31ac73e27caf59e22135b50b7842c8fb78f | refs/heads/master | 2022-12-11T18:15:30.809735 | 2018-09-07T07:14:09 | 2018-09-07T07:14:09 | 145,682,864 | 0 | 0 | null | 2022-12-08T02:47:47 | 2018-08-22T08:56:26 | Python | UTF-8 | Python | false | false | 261 | #!/Users/siasun/PycharmProjects/HelloWorldFlask/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
f303f581cf4290e32862c3144f0b6ad34a657e84 | afcf65a67664617786dcd5cf60240722a141ecff | /schoolsystem/settings.py | aa1b55a0141d0ab60cf2012b465eb56e23f0cc02 | [] | no_license | Veronicahwambui/SchoolSystem | 381c9a1e88ecd31ae424476628b5383e73c75ab0 | 4cb3e81b1bc1670e60e047c2a29e382f78a1776b | refs/heads/master | 2023-08-15T10:30:49.846334 | 2021-10-11T09:47:18 | 2021-10-11T09:47:18 | 380,294,113 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,940 | py | """
Django settings for schoolsystem project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
import django_heroku
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-epe)8!(6$uck4tf4j_%vs=1iplq6f_0d5=*sp#(g)fs%b)cdc%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['0.0.0.0', 'localhost', '127.0.0.1', '.herokuschoolsystem.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'student',
'Trainer',
'Course',
'Event',
'core',
'api',
'rest_framework',
'whitenoise.runserver_nostatic',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'schoolsystem.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'schoolsystem.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'schoolsystem',
'USER' : 'njengavero',
'PASSWORD': 'njenga3881',
'HOST': 'localhost',
'PORT':''
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[os.path.join(BASE_DIR,'static')]
MEDIA_ROOT="Images/"
# STATIC_ROOT=os.path.join(BASE_DIR,'static/Images')
STATIC_ROOT=os.path.join(BASE_DIR,'media')
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
django_heroku.settings(locals())
| [
"[email protected]"
] | |
39a61eea065ceb59bb7a08646c76413e1dcfde6a | 2b7afa059380665ecd1c112a502d6a27021ad257 | /djangoApp/djangoApp/settings.py | ecec553c4b85ad90138980742a56a1ed0a40260d | [] | no_license | huhugon/spring_maven | f32323e3599b0c0f7cc08f1b703cc23429f97281 | 7d7c03947b0499eb2a06bad05fb3ea97c1c04e5a | refs/heads/master | 2021-01-01T05:47:22.929270 | 2015-07-19T04:18:08 | 2015-07-19T04:18:08 | 39,322,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,739 | py | """
Django settings for djangoApp project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ul&*tonmq2&*npoh#py98hha$q@018=buacj@(l$#fge-ga7p6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'photo'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'djangoApp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoApp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django',
'USER': 'calvin',
'PASSWORD': '!imsi00',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
7182ed9ce35f16120f1079795d73ee4d7bf32f25 | 62eaa871e4e825a0a8c3a014b5d08fcf976aedef | /nbs/renum.py | 93448e4776c14d527ddd247487d02254b78f4fdf | [
"Apache-2.0"
] | permissive | timeseriesAI/tsai | f1006b37062a328edabb2fae3e8361dcda0fc68b | 06ab2a9c6870b311fa0efe4cb3fc4df0009d1965 | refs/heads/main | 2023-07-19T22:11:06.425058 | 2023-07-13T07:06:16 | 2023-07-13T07:06:16 | 211,822,289 | 3,526 | 458 | Apache-2.0 | 2023-06-15T13:57:12 | 2019-09-30T09:18:31 | Jupyter Notebook | UTF-8 | Python | false | false | 404 | py | #!/usr/bin/env python
"Rename ipynb files starting at 01"
import re
import os
from pathlib import Path
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path)
i = 1
for p in sorted(list(Path().glob('*.ipynb'))):
pre = re.findall(r'^(\d+)[a-z]?[A-Z]?_(.*)', str(p))
if not pre:
continue
new = f'{i:03d}_{pre[0][1]}'
print(p, new)
os.rename(p, new)
i+=1 | [
"[email protected]"
] | |
066a78af26ccc2d3e5cfcf6e21edabd2bde00b1b | 51d525529e7ff8add891590067c74a3edd5dbc57 | /turtle.py | 77217489c4bce254023e26dffe44dd6db8702475 | [
"MIT"
] | permissive | btsai-dev/coco-viewer | 034c6df85b11c7d15f388a2035a73ec4470daf05 | 57bab068dd4116bedf361689e184296e77600360 | refs/heads/main | 2023-04-30T05:29:12.046581 | 2021-05-02T21:22:59 | 2021-05-02T21:22:59 | 338,417,183 | 0 | 1 | MIT | 2021-02-12T19:46:14 | 2021-02-12T19:46:14 | null | UTF-8 | Python | false | false | 143,098 | py | #
# turtle.py: a Tkinter based turtle graphics module for Python
# Version 1.1b - 4. 5. 2009
#
# Copyright (C) 2006 - 2010 Gregor Lingl
# email: [email protected]
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
"""
Turtle graphics is a popular way for introducing programming to
kids. It was part of the original Logo programming language developed
by Wally Feurzig and Seymour Papert in 1966.
Imagine a robotic turtle starting at (0, 0) in the x-y plane. Give it
the command turtle.forward(15), and it moves (on-screen!) 15 pixels in
the direction it is facing, drawing a line as it moves. Give it the
command turtle.left(25), and it rotates in-place 25 degrees clockwise.
By combining together these and similar commands, intricate shapes and
pictures can easily be drawn.
----- turtle.py
This module is an extended reimplementation of turtle.py from the
Python standard distribution up to Python 2.5. (See: http://www.python.org)
It tries to keep the merits of turtle.py and to be (nearly) 100%
compatible with it. This means in the first place to enable the
learning programmer to use all the commands, classes and methods
interactively when using the module from within IDLE run with
the -n switch.
Roughly it has the following features added:
- Better animation of the turtle movements, especially of turning the
turtle. So the turtles can more easily be used as a visual feedback
instrument by the (beginning) programmer.
- Different turtle shapes, gif-images as turtle shapes, user defined
and user controllable turtle shapes, among them compound
(multicolored) shapes. Turtle shapes can be stretched and tilted, which
makes turtles very versatile geometrical objects.
- Fine control over turtle movement and screen updates via delay(),
and enhanced tracer() and speed() methods.
- Aliases for the most commonly used commands, like fd for forward etc.,
following the early Logo traditions. This reduces the boring work of
typing long sequences of commands, which often occur in a natural way
when kids try to program fancy pictures on their first encounter with
turtle graphics.
- Turtles now have an undo()-method with configurable undo-buffer.
- Some simple commands/methods for creating event driven programs
(mouse-, key-, timer-events). Especially useful for programming games.
- A scrollable Canvas class. The default scrollable Canvas can be
extended interactively as needed while playing around with the turtle(s).
- A TurtleScreen class with methods controlling background color or
background image, window and canvas size and other properties of the
TurtleScreen.
- There is a method, setworldcoordinates(), to install a user defined
coordinate-system for the TurtleScreen.
- The implementation uses a 2-vector class named Vec2D, derived from tuple.
This class is public, so it can be imported by the application programmer,
which makes certain types of computations very natural and compact.
- Appearance of the TurtleScreen and the Turtles at startup/import can be
configured by means of a turtle.cfg configuration file.
The default configuration mimics the appearance of the old turtle module.
- If configured appropriately the module reads in docstrings from a docstring
dictionary in some different language, supplied separately and replaces
the English ones by those read in. There is a utility function
write_docstringdict() to write a dictionary with the original (English)
docstrings to disc, so it can serve as a template for translations.
Behind the scenes there are some features included with possible
extensions in in mind. These will be commented and documented elsewhere.
"""
_ver = "turtle 1.1b- - for Python 3.1 - 4. 5. 2009"
# print(_ver)
import tkinter as TK
import types
import math
import time
import os
import inspect
from os.path import isfile, split, join
from copy import deepcopy
from tkinter import simpledialog
_tg_classes = ['ScrolledCanvas', 'TurtleScreen', 'Screen',
'RawTurtle', 'Turtle', 'RawPen', 'Pen', 'Shape', 'Vec2D']
_tg_screen_functions = ['addshape', 'bgcolor', 'bgpic', 'bye',
'clearscreen', 'colormode', 'delay', 'exitonclick', 'getcanvas',
'getshapes', 'listen', 'mainloop', 'mode', 'numinput',
'onkey', 'onkeypress', 'onkeyrelease', 'onscreenclick', 'ontimer',
'register_shape', 'resetscreen', 'screensize', 'setup',
'setworldcoordinates', 'textinput', 'title', 'tracer', 'turtles', 'update',
'window_height', 'window_width']
_tg_turtle_functions = ['back', 'backward', 'begin_fill', 'begin_poly', 'bk',
'circle', 'clear', 'clearstamp', 'clearstamps', 'clone', 'color',
'degrees', 'distance', 'dot', 'down', 'end_fill', 'end_poly', 'fd',
'fillcolor', 'filling', 'forward', 'get_poly', 'getpen', 'getscreen', 'get_shapepoly',
'getturtle', 'goto', 'heading', 'hideturtle', 'home', 'ht', 'isdown',
'isvisible', 'left', 'lt', 'onclick', 'ondrag', 'onrelease', 'pd',
'pen', 'pencolor', 'pendown', 'pensize', 'penup', 'pos', 'position',
'pu', 'radians', 'right', 'reset', 'resizemode', 'rt',
'seth', 'setheading', 'setpos', 'setposition', 'settiltangle',
'setundobuffer', 'setx', 'sety', 'shape', 'shapesize', 'shapetransform', 'shearfactor', 'showturtle',
'speed', 'st', 'stamp', 'tilt', 'tiltangle', 'towards',
'turtlesize', 'undo', 'undobufferentries', 'up', 'width',
'write', 'xcor', 'ycor']
_tg_utilities = ['write_docstringdict', 'done']
__all__ = (_tg_classes + _tg_screen_functions + _tg_turtle_functions +
_tg_utilities) # + _math_functions)
_alias_list = ['addshape', 'backward', 'bk', 'fd', 'ht', 'lt', 'pd', 'pos',
'pu', 'rt', 'seth', 'setpos', 'setposition', 'st',
'turtlesize', 'up', 'width']
_CFG = {"width" : 0.5, # Screen
"height" : 0.75,
"canvwidth" : 400,
"canvheight": 300,
"leftright": None,
"topbottom": None,
"mode": "standard", # TurtleScreen
"colormode": 1.0,
"delay": 10,
"undobuffersize": 1000, # RawTurtle
"shape": "classic",
"pencolor" : "black",
"fillcolor" : "black",
"resizemode" : "noresize",
"visible" : True,
"language": "english", # docstrings
"exampleturtle": "turtle",
"examplescreen": "screen",
"title": "Python Turtle Graphics",
"using_IDLE": False
}
def config_dict(filename):
"""Convert content of config-file into dictionary."""
with open(filename, "r") as f:
cfglines = f.readlines()
cfgdict = {}
for line in cfglines:
line = line.strip()
if not line or line.startswith("#"):
continue
try:
key, value = line.split("=")
except:
print("Bad line in config-file %s:\n%s" % (filename,line))
continue
key = key.strip()
value = value.strip()
if value in ["True", "False", "None", "''", '""']:
value = eval(value)
else:
try:
if "." in value:
value = float(value)
else:
value = int(value)
except:
pass # value need not be converted
cfgdict[key] = value
return cfgdict
def readconfig(cfgdict):
"""Read config-files, change configuration-dict accordingly.
If there is a turtle.cfg file in the current working directory,
read it from there. If this contains an importconfig-value,
say 'myway', construct filename turtle_mayway.cfg else use
turtle.cfg and read it from the import-directory, where
turtle.py is located.
Update configuration dictionary first according to config-file,
in the import directory, then according to config-file in the
current working directory.
If no config-file is found, the default configuration is used.
"""
default_cfg = "turtle.cfg"
cfgdict1 = {}
cfgdict2 = {}
if isfile(default_cfg):
cfgdict1 = config_dict(default_cfg)
if "importconfig" in cfgdict1:
default_cfg = "turtle_%s.cfg" % cfgdict1["importconfig"]
try:
head, tail = split(__file__)
cfg_file2 = join(head, default_cfg)
except:
cfg_file2 = ""
if isfile(cfg_file2):
cfgdict2 = config_dict(cfg_file2)
_CFG.update(cfgdict2)
_CFG.update(cfgdict1)
try:
readconfig(_CFG)
except:
print ("No configfile read, reason unknown")
class Vec2D(tuple):
"""A 2 dimensional vector class, used as a helper class
for implementing turtle graphics.
May be useful for turtle graphics programs also.
Derived from tuple, so a vector is a tuple!
Provides (for a, b vectors, k number):
a+b vector addition
a-b vector subtraction
a*b inner product
k*a and a*k multiplication with scalar
|a| absolute value of a
a.rotate(angle) rotation
"""
def __new__(cls, x, y):
return tuple.__new__(cls, (x, y))
def __add__(self, other):
return Vec2D(self[0]+other[0], self[1]+other[1])
def __mul__(self, other):
if isinstance(other, Vec2D):
return self[0]*other[0]+self[1]*other[1]
return Vec2D(self[0]*other, self[1]*other)
def __rmul__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Vec2D(self[0]*other, self[1]*other)
def __sub__(self, other):
return Vec2D(self[0]-other[0], self[1]-other[1])
def __neg__(self):
return Vec2D(-self[0], -self[1])
def __abs__(self):
return (self[0]**2 + self[1]**2)**0.5
def rotate(self, angle):
"""rotate self counterclockwise by angle
"""
perp = Vec2D(-self[1], self[0])
angle = angle * math.pi / 180.0
c, s = math.cos(angle), math.sin(angle)
return Vec2D(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)
def __getnewargs__(self):
return (self[0], self[1])
def __repr__(self):
return "(%.2f,%.2f)" % self
##############################################################################
### From here up to line : Tkinter - Interface for turtle.py ###
### May be replaced by an interface to some different graphics toolkit ###
##############################################################################
## helper functions for Scrolled Canvas, to forward Canvas-methods
## to ScrolledCanvas class
def __methodDict(cls, _dict):
"""helper function for Scrolled Canvas"""
baseList = list(cls.__bases__)
baseList.reverse()
for _super in baseList:
__methodDict(_super, _dict)
for key, value in cls.__dict__.items():
if type(value) == types.FunctionType:
_dict[key] = value
def __methods(cls):
"""helper function for Scrolled Canvas"""
_dict = {}
__methodDict(cls, _dict)
return _dict.keys()
__stringBody = (
'def %(method)s(self, *args, **kw): return ' +
'self.%(attribute)s.%(method)s(*args, **kw)')
def __forwardmethods(fromClass, toClass, toPart, exclude = ()):
### MANY CHANGES ###
_dict_1 = {}
__methodDict(toClass, _dict_1)
_dict = {}
mfc = __methods(fromClass)
for ex in _dict_1.keys():
if ex[:1] == '_' or ex[-1:] == '_' or ex in exclude or ex in mfc:
pass
else:
_dict[ex] = _dict_1[ex]
for method, func in _dict.items():
d = {'method': method, 'func': func}
if isinstance(toPart, str):
execString = \
__stringBody % {'method' : method, 'attribute' : toPart}
exec(execString, d)
setattr(fromClass, method, d[method]) ### NEWU!
class ScrolledCanvas(TK.Frame):
"""Modeled after the scrolled canvas class from Grayons's Tkinter book.
Used as the default canvas, which pops up automatically when
using turtle graphics functions or the Turtle class.
"""
def __init__(self, master, width=500, height=350,
canvwidth=600, canvheight=500):
TK.Frame.__init__(self, master, width=width, height=height)
self._rootwindow = self.winfo_toplevel()
self.width, self.height = width, height
self.canvwidth, self.canvheight = canvwidth, canvheight
self.bg = "white"
self._canvas = TK.Canvas(master, width=width, height=height,
bg=self.bg, relief=TK.SUNKEN, borderwidth=2)
self.hscroll = TK.Scrollbar(master, command=self._canvas.xview,
orient=TK.HORIZONTAL)
self.vscroll = TK.Scrollbar(master, command=self._canvas.yview)
self._canvas.configure(xscrollcommand=self.hscroll.set,
yscrollcommand=self.vscroll.set)
self.rowconfigure(0, weight=1, minsize=0)
self.columnconfigure(0, weight=1, minsize=0)
self._canvas.grid(padx=1, in_ = self, pady=1, row=0,
column=0, rowspan=1, columnspan=1, sticky='news')
self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,
column=1, rowspan=1, columnspan=1, sticky='news')
self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,
column=0, rowspan=1, columnspan=1, sticky='news')
self.reset()
self._rootwindow.bind('<Configure>', self.onResize)
def reset(self, canvwidth=None, canvheight=None, bg = None):
"""Adjust canvas and scrollbars according to given canvas size."""
if canvwidth:
self.canvwidth = canvwidth
if canvheight:
self.canvheight = canvheight
if bg:
self.bg = bg
self._canvas.config(bg=bg,
scrollregion=(-self.canvwidth//2, -self.canvheight//2,
self.canvwidth//2, self.canvheight//2))
self._canvas.xview_moveto(0.5*(self.canvwidth - self.width + 30) /
self.canvwidth)
self._canvas.yview_moveto(0.5*(self.canvheight- self.height + 30) /
self.canvheight)
self.adjustScrolls()
def adjustScrolls(self):
""" Adjust scrollbars according to window- and canvas-size.
"""
cwidth = self._canvas.winfo_width()
cheight = self._canvas.winfo_height()
self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth)
self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight)
if cwidth < self.canvwidth or cheight < self.canvheight:
self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,
column=0, rowspan=1, columnspan=1, sticky='news')
self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,
column=1, rowspan=1, columnspan=1, sticky='news')
else:
self.hscroll.grid_forget()
self.vscroll.grid_forget()
def onResize(self, event):
"""self-explanatory"""
self.adjustScrolls()
def bbox(self, *args):
""" 'forward' method, which canvas itself has inherited...
"""
return self._canvas.bbox(*args)
def cget(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
return self._canvas.cget(*args, **kwargs)
def config(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
self._canvas.config(*args, **kwargs)
def bind(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
self._canvas.bind(*args, **kwargs)
def unbind(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
self._canvas.unbind(*args, **kwargs)
def focus_force(self):
""" 'forward' method, which canvas itself has inherited...
"""
self._canvas.focus_force()
__forwardmethods(ScrolledCanvas, TK.Canvas, '_canvas')
class _Root(TK.Tk):
"""Root class for Screen based on Tkinter."""
def __init__(self):
TK.Tk.__init__(self)
def setupcanvas(self, width, height, cwidth, cheight):
self._canvas = ScrolledCanvas(self, width, height, cwidth, cheight)
self._canvas.pack(expand=1, fill="both")
def _getcanvas(self):
return self._canvas
def set_geometry(self, width, height, startx, starty):
self.geometry("%dx%d%+d%+d"%(width, height, startx, starty))
def ondestroy(self, destroy):
self.wm_protocol("WM_DELETE_WINDOW", destroy)
def win_width(self):
return self.winfo_screenwidth()
def win_height(self):
return self.winfo_screenheight()
Canvas = TK.Canvas
class TurtleScreenBase(object):
"""Provide the basic graphics functionality.
Interface between Tkinter and turtle.py.
To port turtle.py to some different graphics toolkit
a corresponding TurtleScreenBase class has to be implemented.
"""
@staticmethod
def _blankimage():
"""return a blank image object
"""
img = TK.PhotoImage(width=1, height=1)
img.blank()
return img
@staticmethod
def _image(filename):
"""return an image object containing the
imagedata from a gif-file named filename.
"""
return TK.PhotoImage(file=filename)
def __init__(self, cv):
self.cv = cv
if isinstance(cv, ScrolledCanvas):
w = self.cv.canvwidth
h = self.cv.canvheight
else: # expected: ordinary TK.Canvas
w = int(self.cv.cget("width"))
h = int(self.cv.cget("height"))
self.cv.config(scrollregion = (-w//2, -h//2, w//2, h//2 ))
self.canvwidth = w
self.canvheight = h
self.xscale = self.yscale = 1.0
def _createpoly(self):
"""Create an invisible polygon item on canvas self.cv)
"""
return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill="", outline="")
def _drawpoly(self, polyitem, coordlist, fill=None,
outline=None, width=None, top=False):
"""Configure polygonitem polyitem according to provided
arguments:
coordlist is sequence of coordinates
fill is filling color
outline is outline color
top is a boolean value, which specifies if polyitem
will be put on top of the canvas' displaylist so it
will not be covered by other items.
"""
cl = []
for x, y in coordlist:
cl.append(x * self.xscale)
cl.append(-y * self.yscale)
self.cv.coords(polyitem, *cl)
if fill is not None:
self.cv.itemconfigure(polyitem, fill=fill)
if outline is not None:
self.cv.itemconfigure(polyitem, outline=outline)
if width is not None:
self.cv.itemconfigure(polyitem, width=width)
if top:
self.cv.tag_raise(polyitem)
def _createline(self):
"""Create an invisible line item on canvas self.cv)
"""
return self.cv.create_line(0, 0, 0, 0, fill="", width=2,
capstyle = TK.ROUND)
def _drawline(self, lineitem, coordlist=None,
fill=None, width=None, top=False):
"""Configure lineitem according to provided arguments:
coordlist is sequence of coordinates
fill is drawing color
width is width of drawn line.
top is a boolean value, which specifies if polyitem
will be put on top of the canvas' displaylist so it
will not be covered by other items.
"""
if coordlist is not None:
cl = []
for x, y in coordlist:
cl.append(x * self.xscale)
cl.append(-y * self.yscale)
self.cv.coords(lineitem, *cl)
if fill is not None:
self.cv.itemconfigure(lineitem, fill=fill)
if width is not None:
self.cv.itemconfigure(lineitem, width=width)
if top:
self.cv.tag_raise(lineitem)
def _delete(self, item):
"""Delete graphics item from canvas.
If item is"all" delete all graphics items.
"""
self.cv.delete(item)
def _update(self):
"""Redraw graphics items on canvas
"""
self.cv.update()
def _delay(self, delay):
"""Delay subsequent canvas actions for delay ms."""
self.cv.after(delay)
def _iscolorstring(self, color):
"""Check if the string color is a legal Tkinter color string.
"""
try:
rgb = self.cv.winfo_rgb(color)
ok = True
except TK.TclError:
ok = False
return ok
def _bgcolor(self, color=None):
"""Set canvas' backgroundcolor if color is not None,
else return backgroundcolor."""
if color is not None:
self.cv.config(bg = color)
self._update()
else:
return self.cv.cget("bg")
def _write(self, pos, txt, align, font, pencolor):
"""Write txt at pos in canvas with specified font
and color.
Return text item and x-coord of right bottom corner
of text's bounding box."""
x, y = pos
x = x * self.xscale
y = y * self.yscale
anchor = {"left":"sw", "center":"s", "right":"se" }
item = self.cv.create_text(x-1, -y, text = txt, anchor = anchor[align],
fill = pencolor, font = font)
x0, y0, x1, y1 = self.cv.bbox(item)
self.cv.update()
return item, x1-1
## def _dot(self, pos, size, color):
## """may be implemented for some other graphics toolkit"""
def _onclick(self, item, fun, num=1, add=None):
"""Bind fun to mouse-click event on turtle.
fun must be a function with two arguments, the coordinates
of the clicked point on the canvas.
num, the number of the mouse-button defaults to 1
"""
if fun is None:
self.cv.tag_unbind(item, "<Button-%s>" % num)
else:
def eventfun(event):
x, y = (self.cv.canvasx(event.x)/self.xscale,
-self.cv.canvasy(event.y)/self.yscale)
fun(x, y)
self.cv.tag_bind(item, "<Button-%s>" % num, eventfun, add)
def _onrelease(self, item, fun, num=1, add=None):
"""Bind fun to mouse-button-release event on turtle.
fun must be a function with two arguments, the coordinates
of the point on the canvas where mouse button is released.
num, the number of the mouse-button defaults to 1
If a turtle is clicked, first _onclick-event will be performed,
then _onscreensclick-event.
"""
if fun is None:
self.cv.tag_unbind(item, "<Button%s-ButtonRelease>" % num)
else:
def eventfun(event):
x, y = (self.cv.canvasx(event.x)/self.xscale,
-self.cv.canvasy(event.y)/self.yscale)
fun(x, y)
self.cv.tag_bind(item, "<Button%s-ButtonRelease>" % num,
eventfun, add)
def _ondrag(self, item, fun, num=1, add=None):
"""Bind fun to mouse-move-event (with pressed mouse button) on turtle.
fun must be a function with two arguments, the coordinates of the
actual mouse position on the canvas.
num, the number of the mouse-button defaults to 1
Every sequence of mouse-move-events on a turtle is preceded by a
mouse-click event on that turtle.
"""
if fun is None:
self.cv.tag_unbind(item, "<Button%s-Motion>" % num)
else:
def eventfun(event):
try:
x, y = (self.cv.canvasx(event.x)/self.xscale,
-self.cv.canvasy(event.y)/self.yscale)
fun(x, y)
except:
pass
self.cv.tag_bind(item, "<Button%s-Motion>" % num, eventfun, add)
def _onscreenclick(self, fun, num=1, add=None):
"""Bind fun to mouse-click event on canvas.
fun must be a function with two arguments, the coordinates
of the clicked point on the canvas.
num, the number of the mouse-button defaults to 1
If a turtle is clicked, first _onclick-event will be performed,
then _onscreensclick-event.
"""
if fun is None:
self.cv.unbind("<Button-%s>" % num)
else:
def eventfun(event):
x, y = (self.cv.canvasx(event.x)/self.xscale,
-self.cv.canvasy(event.y)/self.yscale)
fun(x, y)
self.cv.bind("<Button-%s>" % num, eventfun, add)
def _onkeyrelease(self, fun, key):
"""Bind fun to key-release event of key.
Canvas must have focus. See method listen
"""
if fun is None:
self.cv.unbind("<KeyRelease-%s>" % key, None)
else:
def eventfun(event):
fun()
self.cv.bind("<KeyRelease-%s>" % key, eventfun)
def _onkeypress(self, fun, key=None):
"""If key is given, bind fun to key-press event of key.
Otherwise bind fun to any key-press.
Canvas must have focus. See method listen.
"""
if fun is None:
if key is None:
self.cv.unbind("<KeyPress>", None)
else:
self.cv.unbind("<KeyPress-%s>" % key, None)
else:
def eventfun(event):
fun()
if key is None:
self.cv.bind("<KeyPress>", eventfun)
else:
self.cv.bind("<KeyPress-%s>" % key, eventfun)
def _listen(self):
"""Set focus on canvas (in order to collect key-events)
"""
self.cv.focus_force()
def _ontimer(self, fun, t):
"""Install a timer, which calls fun after t milliseconds.
"""
if t == 0:
self.cv.after_idle(fun)
else:
self.cv.after(t, fun)
def _createimage(self, image):
"""Create and return image item on canvas.
"""
return self.cv.create_image(0, 0, image=image)
def _drawimage(self, item, pos, image):
"""Configure image item as to draw image object
at position (x,y) on canvas)
"""
x, y = pos
self.cv.coords(item, (x * self.xscale, -y * self.yscale))
self.cv.itemconfig(item, image=image)
def _setbgpic(self, item, image):
"""Configure image item as to draw image object
at center of canvas. Set item to the first item
in the displaylist, so it will be drawn below
any other item ."""
self.cv.itemconfig(item, image=image)
self.cv.tag_lower(item)
def _type(self, item):
"""Return 'line' or 'polygon' or 'image' depending on
type of item.
"""
return self.cv.type(item)
def _pointlist(self, item):
"""returns list of coordinate-pairs of points of item
Example (for insiders):
>>> from turtle import *
>>> getscreen()._pointlist(getturtle().turtle._item)
[(0.0, 9.9999999999999982), (0.0, -9.9999999999999982),
(9.9999999999999982, 0.0)]
>>> """
cl = self.cv.coords(item)
pl = [(cl[i], -cl[i+1]) for i in range(0, len(cl), 2)]
return pl
def _setscrollregion(self, srx1, sry1, srx2, sry2):
self.cv.config(scrollregion=(srx1, sry1, srx2, sry2))
def _rescale(self, xscalefactor, yscalefactor):
items = self.cv.find_all()
for item in items:
coordinates = list(self.cv.coords(item))
newcoordlist = []
while coordinates:
x, y = coordinates[:2]
newcoordlist.append(x * xscalefactor)
newcoordlist.append(y * yscalefactor)
coordinates = coordinates[2:]
self.cv.coords(item, *newcoordlist)
def _resize(self, canvwidth=None, canvheight=None, bg=None):
"""Resize the canvas the turtles are drawing on. Does
not alter the drawing window.
"""
# needs amendment
if not isinstance(self.cv, ScrolledCanvas):
return self.canvwidth, self.canvheight
if canvwidth is canvheight is bg is None:
return self.cv.canvwidth, self.cv.canvheight
if canvwidth is not None:
self.canvwidth = canvwidth
if canvheight is not None:
self.canvheight = canvheight
self.cv.reset(canvwidth, canvheight, bg)
def _window_size(self):
""" Return the width and height of the turtle window.
"""
width = self.cv.winfo_width()
if width <= 1: # the window isn't managed by a geometry manager
width = self.cv['width']
height = self.cv.winfo_height()
if height <= 1: # the window isn't managed by a geometry manager
height = self.cv['height']
return width, height
def mainloop(self):
"""Starts event loop - calling Tkinter's mainloop function.
No argument.
Must be last statement in a turtle graphics program.
Must NOT be used if a script is run from within IDLE in -n mode
(No subprocess) - for interactive use of turtle graphics.
Example (for a TurtleScreen instance named screen):
>>> screen.mainloop()
"""
TK.mainloop()
def textinput(self, title, prompt):
"""Pop up a dialog window for input of a string.
Arguments: title is the title of the dialog window,
prompt is a text mostly describing what information to input.
Return the string input
If the dialog is canceled, return None.
Example (for a TurtleScreen instance named screen):
>>> screen.textinput("NIM", "Name of first player:")
"""
return simpledialog.askstring(title, prompt)
def numinput(self, title, prompt, default=None, minval=None, maxval=None):
"""Pop up a dialog window for input of a number.
Arguments: title is the title of the dialog window,
prompt is a text mostly describing what numerical information to input.
default: default value
minval: minimum value for imput
maxval: maximum value for input
The number input must be in the range minval .. maxval if these are
given. If not, a hint is issued and the dialog remains open for
correction. Return the number input.
If the dialog is canceled, return None.
Example (for a TurtleScreen instance named screen):
>>> screen.numinput("Poker", "Your stakes:", 1000, minval=10, maxval=10000)
"""
return simpledialog.askfloat(title, prompt, initialvalue=default,
minvalue=minval, maxvalue=maxval)
##############################################################################
### End of Tkinter - interface ###
##############################################################################
class Terminator (Exception):
"""Will be raised in TurtleScreen.update, if _RUNNING becomes False.
Thus stops execution of turtle graphics script. Main purpose: use in
in the Demo-Viewer turtle.Demo.py.
"""
pass
class TurtleGraphicsError(Exception):
"""Some TurtleGraphics Error
"""
class Shape(object):
"""Data structure modeling shapes.
attribute _type is one of "polygon", "image", "compound"
attribute _data is - depending on _type a poygon-tuple,
an image or a list constructed using the addcomponent method.
"""
def __init__(self, type_, data=None):
self._type = type_
if type_ == "polygon":
if isinstance(data, list):
data = tuple(data)
elif type_ == "image":
if isinstance(data, str):
if data.lower().endswith(".gif") and isfile(data):
data = TurtleScreen._image(data)
# else data assumed to be Photoimage
elif type_ == "compound":
data = []
else:
raise TurtleGraphicsError("There is no shape type %s" % type_)
self._data = data
def addcomponent(self, poly, fill, outline=None):
"""Add component to a shape of type compound.
Arguments: poly is a polygon, i. e. a tuple of number pairs.
fill is the fillcolor of the component,
outline is the outline color of the component.
call (for a Shapeobject namend s):
-- s.addcomponent(((0,0), (10,10), (-10,10)), "red", "blue")
Example:
>>> poly = ((0,0),(10,-5),(0,10),(-10,-5))
>>> s = Shape("compound")
>>> s.addcomponent(poly, "red", "blue")
### .. add more components and then use register_shape()
"""
if self._type != "compound":
raise TurtleGraphicsError("Cannot add component to %s Shape"
% self._type)
if outline is None:
outline = fill
self._data.append([poly, fill, outline])
class Tbuffer(object):
"""Ring buffer used as undobuffer for RawTurtle objects."""
def __init__(self, bufsize=10):
self.bufsize = bufsize
self.buffer = [[None]] * bufsize
self.ptr = -1
self.cumulate = False
def reset(self, bufsize=None):
if bufsize is None:
for i in range(self.bufsize):
self.buffer[i] = [None]
else:
self.bufsize = bufsize
self.buffer = [[None]] * bufsize
self.ptr = -1
def push(self, item):
if self.bufsize > 0:
if not self.cumulate:
self.ptr = (self.ptr + 1) % self.bufsize
self.buffer[self.ptr] = item
else:
self.buffer[self.ptr].append(item)
def pop(self):
if self.bufsize > 0:
item = self.buffer[self.ptr]
if item is None:
return None
else:
self.buffer[self.ptr] = [None]
self.ptr = (self.ptr - 1) % self.bufsize
return (item)
def nr_of_items(self):
return self.bufsize - self.buffer.count([None])
def __repr__(self):
return str(self.buffer) + " " + str(self.ptr)
class TurtleScreen(TurtleScreenBase):
"""Provides screen oriented methods like setbg etc.
Only relies upon the methods of TurtleScreenBase and NOT
upon components of the underlying graphics toolkit -
which is Tkinter in this case.
"""
_RUNNING = True
def __init__(self, cv, mode=_CFG["mode"],
colormode=_CFG["colormode"], delay=_CFG["delay"]):
self._shapes = {
"arrow" : Shape("polygon", ((-10,0), (10,0), (0,10))),
"turtle" : Shape("polygon", ((0,16), (-2,14), (-1,10), (-4,7),
(-7,9), (-9,8), (-6,5), (-7,1), (-5,-3), (-8,-6),
(-6,-8), (-4,-5), (0,-7), (4,-5), (6,-8), (8,-6),
(5,-3), (7,1), (6,5), (9,8), (7,9), (4,7), (1,10),
(2,14))),
"circle" : Shape("polygon", ((10,0), (9.51,3.09), (8.09,5.88),
(5.88,8.09), (3.09,9.51), (0,10), (-3.09,9.51),
(-5.88,8.09), (-8.09,5.88), (-9.51,3.09), (-10,0),
(-9.51,-3.09), (-8.09,-5.88), (-5.88,-8.09),
(-3.09,-9.51), (-0.00,-10.00), (3.09,-9.51),
(5.88,-8.09), (8.09,-5.88), (9.51,-3.09))),
"square" : Shape("polygon", ((10,-10), (10,10), (-10,10),
(-10,-10))),
"triangle" : Shape("polygon", ((10,-5.77), (0,11.55),
(-10,-5.77))),
"classic": Shape("polygon", ((0,0),(-5,-9),(0,-7),(5,-9))),
"blank" : Shape("image", self._blankimage())
}
self._bgpics = {"nopic" : ""}
TurtleScreenBase.__init__(self, cv)
self._mode = mode
self._delayvalue = delay
self._colormode = _CFG["colormode"]
self._keys = []
self.clear()
def clear(self):
"""Delete all drawings and all turtles from the TurtleScreen.
No argument.
Reset empty TurtleScreen to its initial state: white background,
no backgroundimage, no eventbindings and tracing on.
Example (for a TurtleScreen instance named screen):
screen.clear()
Note: this method is not available as function.
"""
self._delayvalue = _CFG["delay"]
self._colormode = _CFG["colormode"]
self._delete("all")
self._bgpic = self._createimage("")
self._bgpicname = "nopic"
self._tracing = 1
self._updatecounter = 0
self._turtles = []
self.bgcolor("white")
for btn in 1, 2, 3:
self.onclick(None, btn)
self.onkeypress(None)
for key in self._keys[:]:
self.onkey(None, key)
self.onkeypress(None, key)
Turtle._pen = None
def mode(self, mode=None):
"""Set turtle-mode ('standard', 'logo' or 'world') and perform reset.
Optional argument:
mode -- on of the strings 'standard', 'logo' or 'world'
Mode 'standard' is compatible with turtle.py.
Mode 'logo' is compatible with most Logo-Turtle-Graphics.
Mode 'world' uses userdefined 'worldcoordinates'. *Attention*: in
this mode angles appear distorted if x/y unit-ratio doesn't equal 1.
If mode is not given, return the current mode.
Mode Initial turtle heading positive angles
------------|-------------------------|-------------------
'standard' to the right (east) counterclockwise
'logo' upward (north) clockwise
Examples:
>>> mode('logo') # resets turtle heading to north
>>> mode()
'logo'
"""
if mode is None:
return self._mode
mode = mode.lower()
if mode not in ["standard", "logo", "world"]:
raise TurtleGraphicsError("No turtle-graphics-mode %s" % mode)
self._mode = mode
if mode in ["standard", "logo"]:
self._setscrollregion(-self.canvwidth//2, -self.canvheight//2,
self.canvwidth//2, self.canvheight//2)
self.xscale = self.yscale = 1.0
self.reset()
def setworldcoordinates(self, llx, lly, urx, ury):
"""Set up a user defined coordinate-system.
Arguments:
llx -- a number, x-coordinate of lower left corner of canvas
lly -- a number, y-coordinate of lower left corner of canvas
urx -- a number, x-coordinate of upper right corner of canvas
ury -- a number, y-coordinate of upper right corner of canvas
Set up user coodinat-system and switch to mode 'world' if necessary.
This performs a screen.reset. If mode 'world' is already active,
all drawings are redrawn according to the new coordinates.
But ATTENTION: in user-defined coordinatesystems angles may appear
distorted. (see Screen.mode())
Example (for a TurtleScreen instance named screen):
>>> screen.setworldcoordinates(-10,-0.5,50,1.5)
>>> for _ in range(36):
left(10)
forward(0.5)
"""
if self.mode() != "world":
self.mode("world")
xspan = float(urx - llx)
yspan = float(ury - lly)
wx, wy = self._window_size()
self.screensize(wx-20, wy-20)
oldxscale, oldyscale = self.xscale, self.yscale
self.xscale = self.canvwidth / xspan
self.yscale = self.canvheight / yspan
srx1 = llx * self.xscale
sry1 = -ury * self.yscale
srx2 = self.canvwidth + srx1
sry2 = self.canvheight + sry1
self._setscrollregion(srx1, sry1, srx2, sry2)
self._rescale(self.xscale/oldxscale, self.yscale/oldyscale)
self.update()
def register_shape(self, name, shape=None):
"""Adds a turtle shape to TurtleScreen's shapelist.
Arguments:
(1) name is the name of a gif-file and shape is None.
Installs the corresponding image shape.
!! Image-shapes DO NOT rotate when turning the turtle,
!! so they do not display the heading of the turtle!
(2) name is an arbitrary string and shape is a tuple
of pairs of coordinates. Installs the corresponding
polygon shape
(3) name is an arbitrary string and shape is a
(compound) Shape object. Installs the corresponding
compound shape.
To use a shape, you have to issue the command shape(shapename).
call: register_shape("turtle.gif")
--or: register_shape("tri", ((0,0), (10,10), (-10,10)))
Example (for a TurtleScreen instance named screen):
>>> screen.register_shape("triangle", ((5,-3),(0,5),(-5,-3)))
"""
if shape is None:
# image
if name.lower().endswith(".gif"):
shape = Shape("image", self._image(name))
else:
raise TurtleGraphicsError("Bad arguments for register_shape.\n"
+ "Use help(register_shape)" )
elif isinstance(shape, tuple):
shape = Shape("polygon", shape)
## else shape assumed to be Shape-instance
self._shapes[name] = shape
def _colorstr(self, color):
"""Return color string corresponding to args.
Argument may be a string or a tuple of three
numbers corresponding to actual colormode,
i.e. in the range 0<=n<=colormode.
If the argument doesn't represent a color,
an error is raised.
"""
if len(color) == 1:
color = color[0]
if isinstance(color, str):
if self._iscolorstring(color) or color == "":
return color
else:
raise TurtleGraphicsError("bad color string: %s" % str(color))
try:
r, g, b = color
except:
raise TurtleGraphicsError("bad color arguments: %s" % str(color))
if self._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(color))
return "#%02x%02x%02x" % (r, g, b)
def _color(self, cstr):
if not cstr.startswith("#"):
return cstr
if len(cstr) == 7:
cl = [int(cstr[i:i+2], 16) for i in (1, 3, 5)]
elif len(cstr) == 4:
cl = [16*int(cstr[h], 16) for h in cstr[1:]]
else:
raise TurtleGraphicsError("bad colorstring: %s" % cstr)
return tuple([c * self._colormode/255 for c in cl])
def colormode(self, cmode=None):
"""Return the colormode or set it to 1.0 or 255.
Optional argument:
cmode -- one of the values 1.0 or 255
r, g, b values of colortriples have to be in range 0..cmode.
Example (for a TurtleScreen instance named screen):
>>> screen.colormode()
1.0
>>> screen.colormode(255)
>>> turtle.pencolor(240,160,80)
"""
if cmode is None:
return self._colormode
if cmode == 1.0:
self._colormode = float(cmode)
elif cmode == 255:
self._colormode = int(cmode)
def reset(self):
"""Reset all Turtles on the Screen to their initial state.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.reset()
"""
for turtle in self._turtles:
turtle._setmode(self._mode)
turtle.reset()
def turtles(self):
"""Return the list of turtles on the screen.
Example (for a TurtleScreen instance named screen):
>>> screen.turtles()
[<turtle.Turtle object at 0x00E11FB0>]
"""
return self._turtles
def bgcolor(self, *args):
"""Set or return backgroundcolor of the TurtleScreen.
Arguments (if given): a color string or three numbers
in the range 0..colormode or a 3-tuple of such numbers.
Example (for a TurtleScreen instance named screen):
>>> screen.bgcolor("orange")
>>> screen.bgcolor()
'orange'
>>> screen.bgcolor(0.5,0,0.5)
>>> screen.bgcolor()
'#800080'
"""
if args:
color = self._colorstr(args)
else:
color = None
color = self._bgcolor(color)
if color is not None:
color = self._color(color)
return color
def tracer(self, n=None, delay=None):
"""Turns turtle animation on/off and set delay for update drawings.
Optional arguments:
n -- nonnegative integer
delay -- nonnegative integer
If n is given, only each n-th regular screen update is really performed.
(Can be used to accelerate the drawing of complex graphics.)
Second arguments sets delay value (see RawTurtle.delay())
Example (for a TurtleScreen instance named screen):
>>> screen.tracer(8, 25)
>>> dist = 2
>>> for i in range(200):
fd(dist)
rt(90)
dist += 2
"""
if n is None:
return self._tracing
self._tracing = int(n)
self._updatecounter = 0
if delay is not None:
self._delayvalue = int(delay)
if self._tracing:
self.update()
def delay(self, delay=None):
""" Return or set the drawing delay in milliseconds.
Optional argument:
delay -- positive integer
Example (for a TurtleScreen instance named screen):
>>> screen.delay(15)
>>> screen.delay()
15
"""
if delay is None:
return self._delayvalue
self._delayvalue = int(delay)
def _incrementudc(self):
"Increment upadate counter."""
if not TurtleScreen._RUNNING:
TurtleScreen._RUNNNING = True
raise Terminator
if self._tracing > 0:
self._updatecounter += 1
self._updatecounter %= self._tracing
def update(self):
"""Perform a TurtleScreen update.
"""
tracing = self._tracing
self._tracing = True
for t in self.turtles():
t._update_data()
t._drawturtle()
self._tracing = tracing
self._update()
def window_width(self):
""" Return the width of the turtle window.
Example (for a TurtleScreen instance named screen):
>>> screen.window_width()
640
"""
return self._window_size()[0]
def window_height(self):
""" Return the height of the turtle window.
Example (for a TurtleScreen instance named screen):
>>> screen.window_height()
480
"""
return self._window_size()[1]
def getcanvas(self):
"""Return the Canvas of this TurtleScreen.
No argument.
Example (for a Screen instance named screen):
>>> cv = screen.getcanvas()
>>> cv
<turtle.ScrolledCanvas instance at 0x010742D8>
"""
return self.cv
def getshapes(self):
"""Return a list of names of all currently available turtle shapes.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.getshapes()
['arrow', 'blank', 'circle', ... , 'turtle']
"""
return sorted(self._shapes.keys())
def onclick(self, fun, btn=1, add=None):
"""Bind fun to mouse-click event on canvas.
Arguments:
fun -- a function with two arguments, the coordinates of the
clicked point on the canvas.
num -- the number of the mouse-button, defaults to 1
Example (for a TurtleScreen instance named screen
and a Turtle instance named turtle):
>>> screen.onclick(turtle.goto)
### Subsequently clicking into the TurtleScreen will
### make the turtle move to the clicked point.
>>> screen.onclick(None)
### event-binding will be removed
"""
self._onscreenclick(fun, btn, add)
def onkey(self, fun, key):
"""Bind fun to key-release event of key.
Arguments:
fun -- a function with no arguments
key -- a string: key (e.g. "a") or key-symbol (e.g. "space")
In order to be able to register key-events, TurtleScreen
must have focus. (See method listen.)
Example (for a TurtleScreen instance named screen
and a Turtle instance named turtle):
>>> def f():
fd(50)
lt(60)
>>> screen.onkey(f, "Up")
>>> screen.listen()
### Subsequently the turtle can be moved by
### repeatedly pressing the up-arrow key,
### consequently drawing a hexagon
"""
if fun is None:
if key in self._keys:
self._keys.remove(key)
elif key not in self._keys:
self._keys.append(key)
self._onkeyrelease(fun, key)
def onkeypress(self, fun, key=None):
"""Bind fun to key-press event of key if key is given,
or to any key-press-event if no key is given.
Arguments:
fun -- a function with no arguments
key -- a string: key (e.g. "a") or key-symbol (e.g. "space")
In order to be able to register key-events, TurtleScreen
must have focus. (See method listen.)
Example (for a TurtleScreen instance named screen
and a Turtle instance named turtle):
>>> def f():
fd(50)
>>> screen.onkey(f, "Up")
>>> screen.listen()
### Subsequently the turtle can be moved by
### repeatedly pressing the up-arrow key,
### or by keeping pressed the up-arrow key.
### consequently drawing a hexagon.
"""
if fun is None:
if key in self._keys:
self._keys.remove(key)
elif key is not None and key not in self._keys:
self._keys.append(key)
self._onkeypress(fun, key)
def listen(self, xdummy=None, ydummy=None):
"""Set focus on TurtleScreen (in order to collect key-events)
No arguments.
Dummy arguments are provided in order
to be able to pass listen to the onclick method.
Example (for a TurtleScreen instance named screen):
>>> screen.listen()
"""
self._listen()
def ontimer(self, fun, t=0):
"""Install a timer, which calls fun after t milliseconds.
Arguments:
fun -- a function with no arguments.
t -- a number >= 0
Example (for a TurtleScreen instance named screen):
>>> running = True
>>> def f():
if running:
fd(50)
lt(60)
screen.ontimer(f, 250)
>>> f() ### makes the turtle marching around
>>> running = False
"""
self._ontimer(fun, t)
def bgpic(self, picname=None):
"""Set background image or return name of current backgroundimage.
Optional argument:
picname -- a string, name of a gif-file or "nopic".
If picname is a filename, set the corresponing image as background.
If picname is "nopic", delete backgroundimage, if present.
If picname is None, return the filename of the current backgroundimage.
Example (for a TurtleScreen instance named screen):
>>> screen.bgpic()
'nopic'
>>> screen.bgpic("landscape.gif")
>>> screen.bgpic()
'landscape.gif'
"""
if picname is None:
return self._bgpicname
if picname not in self._bgpics:
self._bgpics[picname] = self._image(picname)
self._setbgpic(self._bgpic, self._bgpics[picname])
self._bgpicname = picname
def screensize(self, canvwidth=None, canvheight=None, bg=None):
"""Resize the canvas the turtles are drawing on.
Optional arguments:
canvwidth -- positive integer, new width of canvas in pixels
canvheight -- positive integer, new height of canvas in pixels
bg -- colorstring or color-tupel, new backgroundcolor
If no arguments are given, return current (canvaswidth, canvasheight)
Do not alter the drawing window. To observe hidden parts of
the canvas use the scrollbars. (Can make visible those parts
of a drawing, which were outside the canvas before!)
Example (for a Turtle instance named turtle):
>>> turtle.screensize(2000,1500)
### e. g. to search for an erroneously escaped turtle ;-)
"""
return self._resize(canvwidth, canvheight, bg)
onscreenclick = onclick
resetscreen = reset
clearscreen = clear
addshape = register_shape
onkeyrelease = onkey
class TNavigator(object):
"""Navigation part of the RawTurtle.
Implements methods for turtle movement.
"""
START_ORIENTATION = {
"standard": Vec2D(1.0, 0.0),
"world" : Vec2D(1.0, 0.0),
"logo" : Vec2D(0.0, 1.0) }
DEFAULT_MODE = "standard"
DEFAULT_ANGLEOFFSET = 0
DEFAULT_ANGLEORIENT = 1
def __init__(self, mode=DEFAULT_MODE):
self._angleOffset = self.DEFAULT_ANGLEOFFSET
self._angleOrient = self.DEFAULT_ANGLEORIENT
self._mode = mode
self.undobuffer = None
self.degrees()
self._mode = None
self._setmode(mode)
TNavigator.reset(self)
def reset(self):
"""reset turtle to its initial values
Will be overwritten by parent class
"""
self._position = Vec2D(0.0, 0.0)
self._orient = TNavigator.START_ORIENTATION[self._mode]
def _setmode(self, mode=None):
"""Set turtle-mode to 'standard', 'world' or 'logo'.
"""
if mode is None:
return self._mode
if mode not in ["standard", "logo", "world"]:
return
self._mode = mode
if mode in ["standard", "world"]:
self._angleOffset = 0
self._angleOrient = 1
else: # mode == "logo":
self._angleOffset = self._fullcircle/4.
self._angleOrient = -1
def _setDegreesPerAU(self, fullcircle):
"""Helper function for degrees() and radians()"""
self._fullcircle = fullcircle
self._degreesPerAU = 360/fullcircle
if self._mode == "standard":
self._angleOffset = 0
else:
self._angleOffset = fullcircle/4.
def degrees(self, fullcircle=360.0):
""" Set angle measurement units to degrees.
Optional argument:
fullcircle - a number
Set angle measurement units, i. e. set number
of 'degrees' for a full circle. Dafault value is
360 degrees.
Example (for a Turtle instance named turtle):
>>> turtle.left(90)
>>> turtle.heading()
90
Change angle measurement unit to grad (also known as gon,
grade, or gradian and equals 1/100-th of the right angle.)
>>> turtle.degrees(400.0)
>>> turtle.heading()
100
"""
self._setDegreesPerAU(fullcircle)
def radians(self):
""" Set the angle measurement units to radians.
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.heading()
90
>>> turtle.radians()
>>> turtle.heading()
1.5707963267948966
"""
self._setDegreesPerAU(2*math.pi)
def _go(self, distance):
"""move turtle forward by specified distance"""
ende = self._position + self._orient * distance
self._goto(ende)
def _rotate(self, angle):
"""Turn turtle counterclockwise by specified angle if angle > 0."""
angle *= self._degreesPerAU
self._orient = self._orient.rotate(angle)
def _goto(self, end):
"""move turtle to position end."""
self._position = end
def forward(self, distance):
"""Move the turtle forward by the specified distance.
Aliases: forward | fd
Argument:
distance -- a number (integer or float)
Move the turtle forward by the specified distance, in the direction
the turtle is headed.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 0.00)
>>> turtle.forward(25)
>>> turtle.position()
(25.00,0.00)
>>> turtle.forward(-75)
>>> turtle.position()
(-50.00,0.00)
"""
self._go(distance)
def back(self, distance):
"""Move the turtle backward by distance.
Aliases: back | backward | bk
Argument:
distance -- a number
Move the turtle backward by distance ,opposite to the direction the
turtle is headed. Do not change the turtle's heading.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 0.00)
>>> turtle.backward(30)
>>> turtle.position()
(-30.00, 0.00)
"""
self._go(-distance)
def right(self, angle):
"""Turn turtle right by angle units.
Aliases: right | rt
Argument:
angle -- a number (integer or float)
Turn turtle right by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on mode. (See this.)
Example (for a Turtle instance named turtle):
>>> turtle.heading()
22.0
>>> turtle.right(45)
>>> turtle.heading()
337.0
"""
self._rotate(-angle)
def left(self, angle):
"""Turn turtle left by angle units.
Aliases: left | lt
Argument:
angle -- a number (integer or float)
Turn turtle left by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on mode. (See this.)
Example (for a Turtle instance named turtle):
>>> turtle.heading()
22.0
>>> turtle.left(45)
>>> turtle.heading()
67.0
"""
self._rotate(angle)
def pos(self):
"""Return the turtle's current location (x,y), as a Vec2D-vector.
Aliases: pos | position
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(0.00, 240.00)
"""
return self._position
def xcor(self):
""" Return the turtle's x coordinate.
No arguments.
Example (for a Turtle instance named turtle):
>>> reset()
>>> turtle.left(60)
>>> turtle.forward(100)
>>> print turtle.xcor()
50.0
"""
return self._position[0]
def ycor(self):
""" Return the turtle's y coordinate
---
No arguments.
Example (for a Turtle instance named turtle):
>>> reset()
>>> turtle.left(60)
>>> turtle.forward(100)
>>> print turtle.ycor()
86.6025403784
"""
return self._position[1]
def goto(self, x, y=None):
"""Move turtle to an absolute position.
Aliases: setpos | setposition | goto:
Arguments:
x -- a number or a pair/vector of numbers
y -- a number None
call: goto(x, y) # two coordinates
--or: goto((x, y)) # a pair (tuple) of coordinates
--or: goto(vec) # e.g. as returned by pos()
Move turtle to an absolute position. If the pen is down,
a line will be drawn. The turtle's orientation does not change.
Example (for a Turtle instance named turtle):
>>> tp = turtle.pos()
>>> tp
(0.00, 0.00)
>>> turtle.setpos(60,30)
>>> turtle.pos()
(60.00,30.00)
>>> turtle.setpos((20,80))
>>> turtle.pos()
(20.00,80.00)
>>> turtle.setpos(tp)
>>> turtle.pos()
(0.00,0.00)
"""
if y is None:
self._goto(Vec2D(*x))
else:
self._goto(Vec2D(x, y))
def home(self):
"""Move turtle to the origin - coordinates (0,0).
No arguments.
Move turtle to the origin - coordinates (0,0) and set its
heading to its start-orientation (which depends on mode).
Example (for a Turtle instance named turtle):
>>> turtle.home()
"""
self.goto(0, 0)
self.setheading(0)
def setx(self, x):
"""Set the turtle's first coordinate to x
Argument:
x -- a number (integer or float)
Set the turtle's first coordinate to x, leave second coordinate
unchanged.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 240.00)
>>> turtle.setx(10)
>>> turtle.position()
(10.00, 240.00)
"""
self._goto(Vec2D(x, self._position[1]))
def sety(self, y):
"""Set the turtle's second coordinate to y
Argument:
y -- a number (integer or float)
Set the turtle's first coordinate to x, second coordinate remains
unchanged.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 40.00)
>>> turtle.sety(-10)
>>> turtle.position()
(0.00, -10.00)
"""
self._goto(Vec2D(self._position[0], y))
def distance(self, x, y=None):
"""Return the distance from the turtle to (x,y) in turtle step units.
Arguments:
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(x, y) # two coordinates
--or: distance((x, y)) # a pair (tuple) of coordinates
--or: distance(vec) # e.g. as returned by pos()
--or: distance(mypen) # where mypen is another turtle
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(0.00, 0.00)
>>> turtle.distance(30,40)
50.0
>>> pen = Turtle()
>>> pen.forward(77)
>>> turtle.distance(pen)
77.0
"""
if y is not None:
pos = Vec2D(x, y)
if isinstance(x, Vec2D):
pos = x
elif isinstance(x, tuple):
pos = Vec2D(*x)
elif isinstance(x, TNavigator):
pos = x._position
return abs(pos - self._position)
def towards(self, x, y=None):
"""Return the angle of the line from the turtle's position to (x, y).
Arguments:
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(x, y) # two coordinates
--or: distance((x, y)) # a pair (tuple) of coordinates
--or: distance(vec) # e.g. as returned by pos()
--or: distance(mypen) # where mypen is another turtle
Return the angle, between the line from turtle-position to position
specified by x, y and the turtle's start orientation. (Depends on
modes - "standard" or "logo")
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(10.00, 10.00)
>>> turtle.towards(0,0)
225.0
"""
if y is not None:
pos = Vec2D(x, y)
if isinstance(x, Vec2D):
pos = x
elif isinstance(x, tuple):
pos = Vec2D(*x)
elif isinstance(x, TNavigator):
pos = x._position
x, y = pos - self._position
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def heading(self):
""" Return the turtle's current heading.
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.left(67)
>>> turtle.heading()
67.0
"""
x, y = self._orient
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def setheading(self, to_angle):
"""Set the orientation of the turtle to to_angle.
Aliases: setheading | seth
Argument:
to_angle -- a number (integer or float)
Set the orientation of the turtle to to_angle.
Here are some common directions in degrees:
standard - mode: logo-mode:
-------------------|--------------------
0 - east 0 - north
90 - north 90 - east
180 - west 180 - south
270 - south 270 - west
Example (for a Turtle instance named turtle):
>>> turtle.setheading(90)
>>> turtle.heading()
90
"""
angle = (to_angle - self.heading())*self._angleOrient
full = self._fullcircle
angle = (angle+full/2.)%full - full/2.
self._rotate(angle)
def circle(self, radius, extent = None, steps = None):
""" Draw a circle with given radius.
Arguments:
radius -- a number
extent (optional) -- a number
steps (optional) -- an integer
Draw a circle with given radius. The center is radius units left
of the turtle; extent - an angle - determines which part of the
circle is drawn. If extent is not given, draw the entire circle.
If extent is not a full circle, one endpoint of the arc is the
current pen position. Draw the arc in counterclockwise direction
if radius is positive, otherwise in clockwise direction. Finally
the direction of the turtle is changed by the amount of extent.
As the circle is approximated by an inscribed regular polygon,
steps determines the number of steps to use. If not given,
it will be calculated automatically. Maybe used to draw regular
polygons.
call: circle(radius) # full circle
--or: circle(radius, extent) # arc
--or: circle(radius, extent, steps)
--or: circle(radius, steps=6) # 6-sided polygon
Example (for a Turtle instance named turtle):
>>> turtle.circle(50)
>>> turtle.circle(120, 180) # semicircle
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
speed = self.speed()
if extent is None:
extent = self._fullcircle
if steps is None:
frac = abs(extent)/self._fullcircle
steps = 1+int(min(11+abs(radius)/6.0, 59.0)*frac)
w = 1.0 * extent / steps
w2 = 0.5 * w
l = 2.0 * radius * math.sin(w2*math.pi/180.0*self._degreesPerAU)
if radius < 0:
l, w, w2 = -l, -w, -w2
tr = self._tracer()
dl = self._delay()
if speed == 0:
self._tracer(0, 0)
else:
self.speed(0)
self._rotate(w2)
for i in range(steps):
self.speed(speed)
self._go(l)
self.speed(0)
self._rotate(w)
self._rotate(-w2)
if speed == 0:
self._tracer(tr, dl)
self.speed(speed)
if self.undobuffer:
self.undobuffer.cumulate = False
## three dummy methods to be implemented by child class:
def speed(self, s=0):
"""dummy method - to be overwritten by child class"""
def _tracer(self, a=None, b=None):
"""dummy method - to be overwritten by child class"""
def _delay(self, n=None):
"""dummy method - to be overwritten by child class"""
fd = forward
bk = back
backward = back
rt = right
lt = left
position = pos
setpos = goto
setposition = goto
seth = setheading
class TPen(object):
"""Drawing part of the RawTurtle.
Implements drawing properties.
"""
def __init__(self, resizemode=_CFG["resizemode"]):
self._resizemode = resizemode # or "user" or "noresize"
self.undobuffer = None
TPen._reset(self)
def _reset(self, pencolor=_CFG["pencolor"],
fillcolor=_CFG["fillcolor"]):
self._pensize = 1
self._shown = True
self._pencolor = pencolor
self._fillcolor = fillcolor
self._drawing = True
self._speed = 3
self._stretchfactor = (1., 1.)
self._shearfactor = 0.
self._tilt = 0.
self._shapetrafo = (1., 0., 0., 1.)
self._outlinewidth = 1
def resizemode(self, rmode=None):
"""Set resizemode to one of the values: "auto", "user", "noresize".
(Optional) Argument:
rmode -- one of the strings "auto", "user", "noresize"
Different resizemodes have the following effects:
- "auto" adapts the appearance of the turtle
corresponding to the value of pensize.
- "user" adapts the appearance of the turtle according to the
values of stretchfactor and outlinewidth (outline),
which are set by shapesize()
- "noresize" no adaption of the turtle's appearance takes place.
If no argument is given, return current resizemode.
resizemode("user") is called by a call of shapesize with arguments.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("noresize")
>>> turtle.resizemode()
'noresize'
"""
if rmode is None:
return self._resizemode
rmode = rmode.lower()
if rmode in ["auto", "user", "noresize"]:
self.pen(resizemode=rmode)
def pensize(self, width=None):
"""Set or return the line thickness.
Aliases: pensize | width
Argument:
width -- positive number
Set the line thickness to width or return it. If resizemode is set
to "auto" and turtleshape is a polygon, that polygon is drawn with
the same line thickness. If no argument is given, current pensize
is returned.
Example (for a Turtle instance named turtle):
>>> turtle.pensize()
1
turtle.pensize(10) # from here on lines of width 10 are drawn
"""
if width is None:
return self._pensize
self.pen(pensize=width)
def penup(self):
"""Pull the pen up -- no drawing when moving.
Aliases: penup | pu | up
No argument
Example (for a Turtle instance named turtle):
>>> turtle.penup()
"""
if not self._drawing:
return
self.pen(pendown=False)
def pendown(self):
"""Pull the pen down -- drawing when moving.
Aliases: pendown | pd | down
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.pendown()
"""
if self._drawing:
return
self.pen(pendown=True)
def isdown(self):
"""Return True if pen is down, False if it's up.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.penup()
>>> turtle.isdown()
False
>>> turtle.pendown()
>>> turtle.isdown()
True
"""
return self._drawing
def speed(self, speed=None):
""" Return or set the turtle's speed.
Optional argument:
speed -- an integer in the range 0..10 or a speedstring (see below)
Set the turtle's speed to an integer value in the range 0 .. 10.
If no argument is given: return current speed.
If input is a number greater than 10 or smaller than 0.5,
speed is set to 0.
Speedstrings are mapped to speedvalues in the following way:
'fastest' : 0
'fast' : 10
'normal' : 6
'slow' : 3
'slowest' : 1
speeds from 1 to 10 enforce increasingly faster animation of
line drawing and turtle turning.
Attention:
speed = 0 : *no* animation takes place. forward/back makes turtle jump
and likewise left/right make the turtle turn instantly.
Example (for a Turtle instance named turtle):
>>> turtle.speed(3)
"""
speeds = {'fastest':0, 'fast':10, 'normal':6, 'slow':3, 'slowest':1 }
if speed is None:
return self._speed
if speed in speeds:
speed = speeds[speed]
elif 0.5 < speed < 10.5:
speed = int(round(speed))
else:
speed = 0
self.pen(speed=speed)
def color(self, *args):
"""Return or set the pencolor and fillcolor.
Arguments:
Several input formats are allowed.
They use 0, 1, 2, or 3 arguments as follows:
color()
Return the current pencolor and the current fillcolor
as a pair of color specification strings as are returned
by pencolor and fillcolor.
color(colorstring), color((r,g,b)), color(r,g,b)
inputs as in pencolor, set both, fillcolor and pencolor,
to the given value.
color(colorstring1, colorstring2),
color((r1,g1,b1), (r2,g2,b2))
equivalent to pencolor(colorstring1) and fillcolor(colorstring2)
and analogously, if the other input format is used.
If turtleshape is a polygon, outline and interior of that polygon
is drawn with the newly set colors.
For mor info see: pencolor, fillcolor
Example (for a Turtle instance named turtle):
>>> turtle.color('red', 'green')
>>> turtle.color()
('red', 'green')
>>> colormode(255)
>>> color((40, 80, 120), (160, 200, 240))
>>> color()
('#285078', '#a0c8f0')
"""
if args:
l = len(args)
if l == 1:
pcolor = fcolor = args[0]
elif l == 2:
pcolor, fcolor = args
elif l == 3:
pcolor = fcolor = args
pcolor = self._colorstr(pcolor)
fcolor = self._colorstr(fcolor)
self.pen(pencolor=pcolor, fillcolor=fcolor)
else:
return self._color(self._pencolor), self._color(self._fillcolor)
def pencolor(self, *args):
""" Return or set the pencolor.
Arguments:
Four input formats are allowed:
- pencolor()
Return the current pencolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- pencolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- pencolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- pencolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the outline of that polygon is drawn
with the newly set pencolor.
Example (for a Turtle instance named turtle):
>>> turtle.pencolor('brown')
>>> tup = (0.2, 0.8, 0.55)
>>> turtle.pencolor(tup)
>>> turtle.pencolor()
'#33cc8c'
"""
if args:
color = self._colorstr(args)
if color == self._pencolor:
return
self.pen(pencolor=color)
else:
return self._color(self._pencolor)
def fillcolor(self, *args):
""" Return or set the fillcolor.
Arguments:
Four input formats are allowed:
- fillcolor()
Return the current fillcolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- fillcolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- fillcolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- fillcolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the interior of that polygon is drawn
with the newly set fillcolor.
Example (for a Turtle instance named turtle):
>>> turtle.fillcolor('violet')
>>> col = turtle.pencolor()
>>> turtle.fillcolor(col)
>>> turtle.fillcolor(0, .5, 0)
"""
if args:
color = self._colorstr(args)
if color == self._fillcolor:
return
self.pen(fillcolor=color)
else:
return self._color(self._fillcolor)
def showturtle(self):
"""Makes the turtle visible.
Aliases: showturtle | st
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> turtle.showturtle()
"""
self.pen(shown=True)
def hideturtle(self):
"""Makes the turtle invisible.
Aliases: hideturtle | ht
No argument.
It's a good idea to do this while you're in the
middle of a complicated drawing, because hiding
the turtle speeds up the drawing observably.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
"""
self.pen(shown=False)
def isvisible(self):
"""Return True if the Turtle is shown, False if it's hidden.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> print turtle.isvisible():
False
"""
return self._shown
def pen(self, pen=None, **pendict):
"""Return or set the pen's attributes.
Arguments:
pen -- a dictionary with some or all of the below listed keys.
**pendict -- one or more keyword-arguments with the below
listed keys as keywords.
Return or set the pen's attributes in a 'pen-dictionary'
with the following key/value pairs:
"shown" : True/False
"pendown" : True/False
"pencolor" : color-string or color-tuple
"fillcolor" : color-string or color-tuple
"pensize" : positive number
"speed" : number in range 0..10
"resizemode" : "auto" or "user" or "noresize"
"stretchfactor": (positive number, positive number)
"shearfactor": number
"outline" : positive number
"tilt" : number
This dictionary can be used as argument for a subsequent
pen()-call to restore the former pen-state. Moreover one
or more of these attributes can be provided as keyword-arguments.
This can be used to set several pen attributes in one statement.
Examples (for a Turtle instance named turtle):
>>> turtle.pen(fillcolor="black", pencolor="red", pensize=10)
>>> turtle.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'red', 'pendown': True, 'fillcolor': 'black',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
>>> penstate=turtle.pen()
>>> turtle.color("yellow","")
>>> turtle.penup()
>>> turtle.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'yellow', 'pendown': False, 'fillcolor': '',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
>>> p.pen(penstate, fillcolor="green")
>>> p.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'red', 'pendown': True, 'fillcolor': 'green',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
"""
_pd = {"shown" : self._shown,
"pendown" : self._drawing,
"pencolor" : self._pencolor,
"fillcolor" : self._fillcolor,
"pensize" : self._pensize,
"speed" : self._speed,
"resizemode" : self._resizemode,
"stretchfactor" : self._stretchfactor,
"shearfactor" : self._shearfactor,
"outline" : self._outlinewidth,
"tilt" : self._tilt
}
if not (pen or pendict):
return _pd
if isinstance(pen, dict):
p = pen
else:
p = {}
p.update(pendict)
_p_buf = {}
for key in p:
_p_buf[key] = _pd[key]
if self.undobuffer:
self.undobuffer.push(("pen", _p_buf))
newLine = False
if "pendown" in p:
if self._drawing != p["pendown"]:
newLine = True
if "pencolor" in p:
if isinstance(p["pencolor"], tuple):
p["pencolor"] = self._colorstr((p["pencolor"],))
if self._pencolor != p["pencolor"]:
newLine = True
if "pensize" in p:
if self._pensize != p["pensize"]:
newLine = True
if newLine:
self._newLine()
if "pendown" in p:
self._drawing = p["pendown"]
if "pencolor" in p:
self._pencolor = p["pencolor"]
if "pensize" in p:
self._pensize = p["pensize"]
if "fillcolor" in p:
if isinstance(p["fillcolor"], tuple):
p["fillcolor"] = self._colorstr((p["fillcolor"],))
self._fillcolor = p["fillcolor"]
if "speed" in p:
self._speed = p["speed"]
if "resizemode" in p:
self._resizemode = p["resizemode"]
if "stretchfactor" in p:
sf = p["stretchfactor"]
if isinstance(sf, (int, float)):
sf = (sf, sf)
self._stretchfactor = sf
if "shearfactor" in p:
self._shearfactor = p["shearfactor"]
if "outline" in p:
self._outlinewidth = p["outline"]
if "shown" in p:
self._shown = p["shown"]
if "tilt" in p:
self._tilt = p["tilt"]
if "stretchfactor" in p or "tilt" in p or "shearfactor" in p:
scx, scy = self._stretchfactor
shf = self._shearfactor
sa, ca = math.sin(self._tilt), math.cos(self._tilt)
self._shapetrafo = ( scx*ca, scy*(shf*ca + sa),
-scx*sa, scy*(ca - shf*sa))
self._update()
## three dummy methods to be implemented by child class:
def _newLine(self, usePos = True):
"""dummy method - to be overwritten by child class"""
def _update(self, count=True, forced=False):
"""dummy method - to be overwritten by child class"""
def _color(self, args):
"""dummy method - to be overwritten by child class"""
def _colorstr(self, args):
"""dummy method - to be overwritten by child class"""
width = pensize
up = penup
pu = penup
pd = pendown
down = pendown
st = showturtle
ht = hideturtle
class _TurtleImage(object):
"""Helper class: Datatype to store Turtle attributes
"""
def __init__(self, screen, shapeIndex):
self.screen = screen
self._type = None
self._setshape(shapeIndex)
def _setshape(self, shapeIndex):
screen = self.screen
self.shapeIndex = shapeIndex
if self._type == "polygon" == screen._shapes[shapeIndex]._type:
return
if self._type == "image" == screen._shapes[shapeIndex]._type:
return
if self._type in ["image", "polygon"]:
screen._delete(self._item)
elif self._type == "compound":
for item in self._item:
screen._delete(item)
self._type = screen._shapes[shapeIndex]._type
if self._type == "polygon":
self._item = screen._createpoly()
elif self._type == "image":
self._item = screen._createimage(screen._shapes["blank"]._data)
elif self._type == "compound":
self._item = [screen._createpoly() for item in
screen._shapes[shapeIndex]._data]
class RawTurtle(TPen, TNavigator):
"""Animation part of the RawTurtle.
Puts RawTurtle upon a TurtleScreen and provides tools for
its animation.
"""
screens = []
def __init__(self, canvas=None,
shape=_CFG["shape"],
undobuffersize=_CFG["undobuffersize"],
visible=_CFG["visible"]):
if isinstance(canvas, _Screen):
self.screen = canvas
elif isinstance(canvas, TurtleScreen):
if canvas not in RawTurtle.screens:
RawTurtle.screens.append(canvas)
self.screen = canvas
elif isinstance(canvas, (ScrolledCanvas, Canvas)):
for screen in RawTurtle.screens:
if screen.cv == canvas:
self.screen = screen
break
else:
self.screen = TurtleScreen(canvas)
RawTurtle.screens.append(self.screen)
else:
raise TurtleGraphicsError("bad cavas argument %s" % canvas)
screen = self.screen
TNavigator.__init__(self, screen.mode())
TPen.__init__(self)
screen._turtles.append(self)
self.drawingLineItem = screen._createline()
self.turtle = _TurtleImage(screen, shape)
self._poly = None
self._creatingPoly = False
self._fillitem = self._fillpath = None
self._shown = visible
self._hidden_from_screen = False
self.currentLineItem = screen._createline()
self.currentLine = [self._position]
self.items = [self.currentLineItem]
self.stampItems = []
self._undobuffersize = undobuffersize
self.undobuffer = Tbuffer(undobuffersize)
self._update()
def reset(self):
"""Delete the turtle's drawings and restore its default values.
No argument.
,
Delete the turtle's drawings from the screen, re-center the turtle
and set variables to the default values.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00,-22.00)
>>> turtle.heading()
100.0
>>> turtle.reset()
>>> turtle.position()
(0.00,0.00)
>>> turtle.heading()
0.0
"""
TNavigator.reset(self)
TPen._reset(self)
self._clear()
self._drawturtle()
self._update()
def setundobuffer(self, size):
"""Set or disable undobuffer.
Argument:
size -- an integer or None
If size is an integer an empty undobuffer of given size is installed.
Size gives the maximum number of turtle-actions that can be undone
by the undo() function.
If size is None, no undobuffer is present.
Example (for a Turtle instance named turtle):
>>> turtle.setundobuffer(42)
"""
if size is None:
self.undobuffer = None
else:
self.undobuffer = Tbuffer(size)
def undobufferentries(self):
"""Return count of entries in the undobuffer.
No argument.
Example (for a Turtle instance named turtle):
>>> while undobufferentries():
undo()
"""
if self.undobuffer is None:
return 0
return self.undobuffer.nr_of_items()
def _clear(self):
"""Delete all of pen's drawings"""
self._fillitem = self._fillpath = None
for item in self.items:
self.screen._delete(item)
self.currentLineItem = self.screen._createline()
self.currentLine = []
if self._drawing:
self.currentLine.append(self._position)
self.items = [self.currentLineItem]
self.clearstamps()
self.setundobuffer(self._undobuffersize)
def clear(self):
"""Delete the turtle's drawings from the screen. Do not move turtle.
No arguments.
Delete the turtle's drawings from the screen. Do not move turtle.
State and position of the turtle as well as drawings of other
turtles are not affected.
Examples (for a Turtle instance named turtle):
>>> turtle.clear()
"""
self._clear()
self._update()
def _update_data(self):
self.screen._incrementudc()
if self.screen._updatecounter != 0:
return
if len(self.currentLine)>1:
self.screen._drawline(self.currentLineItem, self.currentLine,
self._pencolor, self._pensize)
def _update(self):
"""Perform a Turtle-data update.
"""
screen = self.screen
if screen._tracing == 0:
return
elif screen._tracing == 1:
self._update_data()
self._drawturtle()
screen._update() # TurtleScreenBase
screen._delay(screen._delayvalue) # TurtleScreenBase
else:
self._update_data()
if screen._updatecounter == 0:
for t in screen.turtles():
t._drawturtle()
screen._update()
def _tracer(self, flag=None, delay=None):
"""Turns turtle animation on/off and set delay for update drawings.
Optional arguments:
n -- nonnegative integer
delay -- nonnegative integer
If n is given, only each n-th regular screen update is really performed.
(Can be used to accelerate the drawing of complex graphics.)
Second arguments sets delay value (see RawTurtle.delay())
Example (for a Turtle instance named turtle):
>>> turtle.tracer(8, 25)
>>> dist = 2
>>> for i in range(200):
turtle.fd(dist)
turtle.rt(90)
dist += 2
"""
return self.screen.tracer(flag, delay)
def _color(self, args):
return self.screen._color(args)
def _colorstr(self, args):
return self.screen._colorstr(args)
def _cc(self, args):
"""Convert colortriples to hexstrings.
"""
if isinstance(args, str):
return args
try:
r, g, b = args
except:
raise TurtleGraphicsError("bad color arguments: %s" % str(args))
if self.screen._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(args))
return "#%02x%02x%02x" % (r, g, b)
def clone(self):
"""Create and return a clone of the turtle.
No argument.
Create and return a clone of the turtle with same position, heading
and turtle properties.
Example (for a Turtle instance named mick):
mick = Turtle()
joe = mick.clone()
"""
screen = self.screen
self._newLine(self._drawing)
turtle = self.turtle
self.screen = None
self.turtle = None # too make self deepcopy-able
q = deepcopy(self)
self.screen = screen
self.turtle = turtle
q.screen = screen
q.turtle = _TurtleImage(screen, self.turtle.shapeIndex)
screen._turtles.append(q)
ttype = screen._shapes[self.turtle.shapeIndex]._type
if ttype == "polygon":
q.turtle._item = screen._createpoly()
elif ttype == "image":
q.turtle._item = screen._createimage(screen._shapes["blank"]._data)
elif ttype == "compound":
q.turtle._item = [screen._createpoly() for item in
screen._shapes[self.turtle.shapeIndex]._data]
q.currentLineItem = screen._createline()
q._update()
return q
def shape(self, name=None):
"""Set turtle shape to shape with given name / return current shapename.
Optional argument:
name -- a string, which is a valid shapename
Set turtle shape to shape with given name or, if name is not given,
return name of current shape.
Shape with name must exist in the TurtleScreen's shape dictionary.
Initially there are the following polygon shapes:
'arrow', 'turtle', 'circle', 'square', 'triangle', 'classic'.
To learn about how to deal with shapes see Screen-method register_shape.
Example (for a Turtle instance named turtle):
>>> turtle.shape()
'arrow'
>>> turtle.shape("turtle")
>>> turtle.shape()
'turtle'
"""
if name is None:
return self.turtle.shapeIndex
if not name in self.screen.getshapes():
raise TurtleGraphicsError("There is no shape named %s" % name)
self.turtle._setshape(name)
self._update()
def shapesize(self, stretch_wid=None, stretch_len=None, outline=None):
"""Set/return turtle's stretchfactors/outline. Set resizemode to "user".
Optinonal arguments:
stretch_wid : positive number
stretch_len : positive number
outline : positive number
Return or set the pen's attributes x/y-stretchfactors and/or outline.
Set resizemode to "user".
If and only if resizemode is set to "user", the turtle will be displayed
stretched according to its stretchfactors:
stretch_wid is stretchfactor perpendicular to orientation
stretch_len is stretchfactor in direction of turtles orientation.
outline determines the width of the shapes's outline.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("user")
>>> turtle.shapesize(5, 5, 12)
>>> turtle.shapesize(outline=8)
"""
if stretch_wid is stretch_len is outline is None:
stretch_wid, stretch_len = self._stretchfactor
return stretch_wid, stretch_len, self._outlinewidth
if stretch_wid == 0 or stretch_len == 0:
raise TurtleGraphicsError("stretch_wid/stretch_len must not be zero")
if stretch_wid is not None:
if stretch_len is None:
stretchfactor = stretch_wid, stretch_wid
else:
stretchfactor = stretch_wid, stretch_len
elif stretch_len is not None:
stretchfactor = self._stretchfactor[0], stretch_len
else:
stretchfactor = self._stretchfactor
if outline is None:
outline = self._outlinewidth
self.pen(resizemode="user",
stretchfactor=stretchfactor, outline=outline)
def shearfactor(self, shear=None):
"""Set or return the current shearfactor.
Optional argument: shear -- number, tangent of the shear angle
Shear the turtleshape according to the given shearfactor shear,
which is the tangent of the shear angle. DO NOT change the
turtle's heading (direction of movement).
If shear is not given: return the current shearfactor, i. e. the
tangent of the shear angle, by which lines parallel to the
heading of the turtle are sheared.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.shearfactor(0.5)
>>> turtle.shearfactor()
>>> 0.5
"""
if shear is None:
return self._shearfactor
self.pen(resizemode="user", shearfactor=shear)
def settiltangle(self, angle):
"""Rotate the turtleshape to point in the specified direction
Argument: angle -- number
Rotate the turtleshape to point in the direction specified by angle,
regardless of its current tilt-angle. DO NOT change the turtle's
heading (direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.settiltangle(45)
>>> stamp()
>>> turtle.fd(50)
>>> turtle.settiltangle(-45)
>>> stamp()
>>> turtle.fd(50)
"""
tilt = -angle * self._degreesPerAU * self._angleOrient
tilt = (tilt * math.pi / 180.0) % (2*math.pi)
self.pen(resizemode="user", tilt=tilt)
def tiltangle(self, angle=None):
"""Set or return the current tilt-angle.
Optional argument: angle -- number
Rotate the turtleshape to point in the direction specified by angle,
regardless of its current tilt-angle. DO NOT change the turtle's
heading (direction of movement).
If angle is not given: return the current tilt-angle, i. e. the angle
between the orientation of the turtleshape and the heading of the
turtle (its direction of movement).
Deprecated since Python 3.1
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.tilt(45)
>>> turtle.tiltangle()
>>>
"""
if angle is None:
tilt = -self._tilt * (180.0/math.pi) * self._angleOrient
return (tilt / self._degreesPerAU) % self._fullcircle
else:
self.settiltangle(angle)
def tilt(self, angle):
"""Rotate the turtleshape by angle.
Argument:
angle - a number
Rotate the turtleshape by angle from its current tilt-angle,
but do NOT change the turtle's heading (direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.tilt(30)
>>> turtle.fd(50)
>>> turtle.tilt(30)
>>> turtle.fd(50)
"""
self.settiltangle(angle + self.tiltangle())
def shapetransform(self, t11=None, t12=None, t21=None, t22=None):
"""Set or return the current transformation matrix of the turtle shape.
Optional arguments: t11, t12, t21, t22 -- numbers.
If none of the matrix elements are given, return the transformation
matrix.
Otherwise set the given elements and transform the turtleshape
according to the matrix consisting of first row t11, t12 and
second row t21, 22.
Modify stretchfactor, shearfactor and tiltangle according to the
given matrix.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("square")
>>> turtle.shapesize(4,2)
>>> turtle.shearfactor(-0.5)
>>> turtle.shapetransform()
>>> (4.0, -1.0, -0.0, 2.0)
"""
if t11 is t12 is t21 is t22 is None:
return self._shapetrafo
m11, m12, m21, m22 = self._shapetrafo
if t11 is not None: m11 = t11
if t12 is not None: m12 = t12
if t21 is not None: m21 = t21
if t22 is not None: m22 = t22
if t11 * t22 - t12 * t21 == 0:
raise TurtleGraphicsError("Bad shape transform matrix: must not be singular")
self._shapetrafo = (m11, m12, m21, m22)
alfa = math.atan2(-m21, m11) % (2 * math.pi)
sa, ca = math.sin(alfa), math.cos(alfa)
a11, a12, a21, a22 = (ca*m11 - sa*m21, ca*m12 - sa*m22,
sa*m11 + ca*m21, sa*m12 + ca*m22)
self._stretchfactor = a11, a22
self._shearfactor = a12/a22
self._tilt = alfa
self._update()
def _polytrafo(self, poly):
"""Computes transformed polygon shapes from a shape
according to current position and heading.
"""
screen = self.screen
p0, p1 = self._position
e0, e1 = self._orient
e = Vec2D(e0, e1 * screen.yscale / screen.xscale)
e0, e1 = (1.0 / abs(e)) * e
return [(p0+(e1*x+e0*y)/screen.xscale, p1+(-e0*x+e1*y)/screen.yscale)
for (x, y) in poly]
def get_shapepoly(self):
"""Return the current shape polygon as tuple of coordinate pairs.
No argument.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("square")
>>> turtle.shapetransform(4, -1, 0, 2)
>>> turtle.get_shapepoly()
((50, -20), (30, 20), (-50, 20), (-30, -20))
"""
shape = self.screen._shapes[self.turtle.shapeIndex]
if shape._type == "polygon":
return self._getshapepoly(shape._data, shape._type == "compound")
# else return None
def _getshapepoly(self, polygon, compound=False):
"""Calculate transformed shape polygon according to resizemode
and shapetransform.
"""
if self._resizemode == "user" or compound:
t11, t12, t21, t22 = self._shapetrafo
elif self._resizemode == "auto":
l = max(1, self._pensize/5.0)
t11, t12, t21, t22 = l, 0, 0, l
elif self._resizemode == "noresize":
return polygon
return tuple([(t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon])
def _drawturtle(self):
"""Manages the correct rendering of the turtle with respect to
its shape, resizemode, stretch and tilt etc."""
screen = self.screen
shape = screen._shapes[self.turtle.shapeIndex]
ttype = shape._type
titem = self.turtle._item
if self._shown and screen._updatecounter == 0 and screen._tracing > 0:
self._hidden_from_screen = False
tshape = shape._data
if ttype == "polygon":
if self._resizemode == "noresize": w = 1
elif self._resizemode == "auto": w = self._pensize
else: w =self._outlinewidth
shape = self._polytrafo(self._getshapepoly(tshape))
fc, oc = self._fillcolor, self._pencolor
screen._drawpoly(titem, shape, fill=fc, outline=oc,
width=w, top=True)
elif ttype == "image":
screen._drawimage(titem, self._position, tshape)
elif ttype == "compound":
for item, (poly, fc, oc) in zip(titem, tshape):
poly = self._polytrafo(self._getshapepoly(poly, True))
screen._drawpoly(item, poly, fill=self._cc(fc),
outline=self._cc(oc), width=self._outlinewidth, top=True)
else:
if self._hidden_from_screen:
return
if ttype == "polygon":
screen._drawpoly(titem, ((0, 0), (0, 0), (0, 0)), "", "")
elif ttype == "image":
screen._drawimage(titem, self._position,
screen._shapes["blank"]._data)
elif ttype == "compound":
for item in titem:
screen._drawpoly(item, ((0, 0), (0, 0), (0, 0)), "", "")
self._hidden_from_screen = True
############################## stamp stuff ###############################
def stamp(self):
"""Stamp a copy of the turtleshape onto the canvas and return its id.
No argument.
Stamp a copy of the turtle shape onto the canvas at the current
turtle position. Return a stamp_id for that stamp, which can be
used to delete it by calling clearstamp(stamp_id).
Example (for a Turtle instance named turtle):
>>> turtle.color("blue")
>>> turtle.stamp()
13
>>> turtle.fd(50)
"""
screen = self.screen
shape = screen._shapes[self.turtle.shapeIndex]
ttype = shape._type
tshape = shape._data
if ttype == "polygon":
stitem = screen._createpoly()
if self._resizemode == "noresize": w = 1
elif self._resizemode == "auto": w = self._pensize
else: w =self._outlinewidth
shape = self._polytrafo(self._getshapepoly(tshape))
fc, oc = self._fillcolor, self._pencolor
screen._drawpoly(stitem, shape, fill=fc, outline=oc,
width=w, top=True)
elif ttype == "image":
stitem = screen._createimage("")
screen._drawimage(stitem, self._position, tshape)
elif ttype == "compound":
stitem = []
for element in tshape:
item = screen._createpoly()
stitem.append(item)
stitem = tuple(stitem)
for item, (poly, fc, oc) in zip(stitem, tshape):
poly = self._polytrafo(self._getshapepoly(poly, True))
screen._drawpoly(item, poly, fill=self._cc(fc),
outline=self._cc(oc), width=self._outlinewidth, top=True)
self.stampItems.append(stitem)
self.undobuffer.push(("stamp", stitem))
return stitem
def _clearstamp(self, stampid):
"""does the work for clearstamp() and clearstamps()
"""
if stampid in self.stampItems:
if isinstance(stampid, tuple):
for subitem in stampid:
self.screen._delete(subitem)
else:
self.screen._delete(stampid)
self.stampItems.remove(stampid)
# Delete stampitem from undobuffer if necessary
# if clearstamp is called directly.
item = ("stamp", stampid)
buf = self.undobuffer
if item not in buf.buffer:
return
index = buf.buffer.index(item)
buf.buffer.remove(item)
if index <= buf.ptr:
buf.ptr = (buf.ptr - 1) % buf.bufsize
buf.buffer.insert((buf.ptr+1)%buf.bufsize, [None])
def clearstamp(self, stampid):
"""Delete stamp with given stampid
Argument:
stampid - an integer, must be return value of previous stamp() call.
Example (for a Turtle instance named turtle):
>>> turtle.color("blue")
>>> astamp = turtle.stamp()
>>> turtle.fd(50)
>>> turtle.clearstamp(astamp)
"""
self._clearstamp(stampid)
self._update()
def clearstamps(self, n=None):
"""Delete all or first/last n of turtle's stamps.
Optional argument:
n -- an integer
If n is None, delete all of pen's stamps,
else if n > 0 delete first n stamps
else if n < 0 delete last n stamps.
Example (for a Turtle instance named turtle):
>>> for i in range(8):
turtle.stamp(); turtle.fd(30)
...
>>> turtle.clearstamps(2)
>>> turtle.clearstamps(-2)
>>> turtle.clearstamps()
"""
if n is None:
toDelete = self.stampItems[:]
elif n >= 0:
toDelete = self.stampItems[:n]
else:
toDelete = self.stampItems[n:]
for item in toDelete:
self._clearstamp(item)
self._update()
def _goto(self, end):
"""Move the pen to the point end, thereby drawing a line
if pen is down. All other methodes for turtle movement depend
on this one.
"""
## Version with undo-stuff
go_modes = ( self._drawing,
self._pencolor,
self._pensize,
isinstance(self._fillpath, list))
screen = self.screen
undo_entry = ("go", self._position, end, go_modes,
(self.currentLineItem,
self.currentLine[:],
screen._pointlist(self.currentLineItem),
self.items[:])
)
if self.undobuffer:
self.undobuffer.push(undo_entry)
start = self._position
if self._speed and screen._tracing == 1:
diff = (end-start)
diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2
nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed))
delta = diff * (1.0/nhops)
for n in range(1, nhops):
if n == 1:
top = True
else:
top = False
self._position = start + delta * n
if self._drawing:
screen._drawline(self.drawingLineItem,
(start, self._position),
self._pencolor, self._pensize, top)
self._update()
if self._drawing:
screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)),
fill="", width=self._pensize)
# Turtle now at end,
if self._drawing: # now update currentLine
self.currentLine.append(end)
if isinstance(self._fillpath, list):
self._fillpath.append(end)
###### vererbung!!!!!!!!!!!!!!!!!!!!!!
self._position = end
if self._creatingPoly:
self._poly.append(end)
if len(self.currentLine) > 42: # 42! answer to the ultimate question
# of life, the universe and everything
self._newLine()
self._update() #count=True)
def _undogoto(self, entry):
"""Reverse a _goto. Used for undo()
"""
old, new, go_modes, coodata = entry
drawing, pc, ps, filling = go_modes
cLI, cL, pl, items = coodata
screen = self.screen
if abs(self._position - new) > 0.5:
print ("undogoto: HALLO-DA-STIMMT-WAS-NICHT!")
# restore former situation
self.currentLineItem = cLI
self.currentLine = cL
if pl == [(0, 0), (0, 0)]:
usepc = ""
else:
usepc = pc
screen._drawline(cLI, pl, fill=usepc, width=ps)
todelete = [i for i in self.items if (i not in items) and
(screen._type(i) == "line")]
for i in todelete:
screen._delete(i)
self.items.remove(i)
start = old
if self._speed and screen._tracing == 1:
diff = old - new
diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2
nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed))
delta = diff * (1.0/nhops)
for n in range(1, nhops):
if n == 1:
top = True
else:
top = False
self._position = new + delta * n
if drawing:
screen._drawline(self.drawingLineItem,
(start, self._position),
pc, ps, top)
self._update()
if drawing:
screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)),
fill="", width=ps)
# Turtle now at position old,
self._position = old
## if undo is done during crating a polygon, the last vertex
## will be deleted. if the polygon is entirel deleted,
## creatigPoly will be set to False.
## Polygons created before the last one will not be affected by undo()
if self._creatingPoly:
if len(self._poly) > 0:
self._poly.pop()
if self._poly == []:
self._creatingPoly = False
self._poly = None
if filling:
if self._fillpath == []:
self._fillpath = None
print("Unwahrscheinlich in _undogoto!")
elif self._fillpath is not None:
self._fillpath.pop()
self._update() #count=True)
def _rotate(self, angle):
"""Turns pen clockwise by angle.
"""
if self.undobuffer:
self.undobuffer.push(("rot", angle, self._degreesPerAU))
angle *= self._degreesPerAU
neworient = self._orient.rotate(angle)
tracing = self.screen._tracing
if tracing == 1 and self._speed > 0:
anglevel = 3.0 * self._speed
steps = 1 + int(abs(angle)/anglevel)
delta = 1.0*angle/steps
for _ in range(steps):
self._orient = self._orient.rotate(delta)
self._update()
self._orient = neworient
self._update()
def _newLine(self, usePos=True):
"""Closes current line item and starts a new one.
Remark: if current line became too long, animation
performance (via _drawline) slowed down considerably.
"""
if len(self.currentLine) > 1:
self.screen._drawline(self.currentLineItem, self.currentLine,
self._pencolor, self._pensize)
self.currentLineItem = self.screen._createline()
self.items.append(self.currentLineItem)
else:
self.screen._drawline(self.currentLineItem, top=True)
self.currentLine = []
if usePos:
self.currentLine = [self._position]
def filling(self):
"""Return fillstate (True if filling, False else).
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.begin_fill()
>>> if turtle.filling():
turtle.pensize(5)
else:
turtle.pensize(3)
"""
return isinstance(self._fillpath, list)
def begin_fill(self):
"""Called just before drawing a shape to be filled.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.color("black", "red")
>>> turtle.begin_fill()
>>> turtle.circle(60)
>>> turtle.end_fill()
"""
if not self.filling():
self._fillitem = self.screen._createpoly()
self.items.append(self._fillitem)
self._fillpath = [self._position]
self._newLine()
if self.undobuffer:
self.undobuffer.push(("beginfill", self._fillitem))
self._update()
def end_fill(self):
"""Fill the shape drawn after the call begin_fill().
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.color("black", "red")
>>> turtle.begin_fill()
>>> turtle.circle(60)
>>> turtle.end_fill()
"""
if self.filling():
if len(self._fillpath) > 2:
self.screen._drawpoly(self._fillitem, self._fillpath,
fill=self._fillcolor)
if self.undobuffer:
self.undobuffer.push(("dofill", self._fillitem))
self._fillitem = self._fillpath = None
self._update()
def dot(self, size=None, *color):
"""Draw a dot with diameter size, using color.
Optional argumentS:
size -- an integer >= 1 (if given)
color -- a colorstring or a numeric color tuple
Draw a circular dot with diameter size, using color.
If size is not given, the maximum of pensize+4 and 2*pensize is used.
Example (for a Turtle instance named turtle):
>>> turtle.dot()
>>> turtle.fd(50); turtle.dot(20, "blue"); turtle.fd(50)
"""
if not color:
if isinstance(size, (str, tuple)):
color = self._colorstr(size)
size = self._pensize + max(self._pensize, 4)
else:
color = self._pencolor
if not size:
size = self._pensize + max(self._pensize, 4)
else:
if size is None:
size = self._pensize + max(self._pensize, 4)
color = self._colorstr(color)
if hasattr(self.screen, "_dot"):
item = self.screen._dot(self._position, size, color)
self.items.append(item)
if self.undobuffer:
self.undobuffer.push(("dot", item))
else:
pen = self.pen()
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
try:
if self.resizemode() == 'auto':
self.ht()
self.pendown()
self.pensize(size)
self.pencolor(color)
self.forward(0)
finally:
self.pen(pen)
if self.undobuffer:
self.undobuffer.cumulate = False
def _write(self, txt, align, font):
"""Performs the writing for write()
"""
item, end = self.screen._write(self._position, txt, align, font,
self._pencolor)
self.items.append(item)
if self.undobuffer:
self.undobuffer.push(("wri", item))
return end
def write(self, arg, move=False, align="left", font=("Arial", 8, "normal")):
"""Write text at the current turtle position.
Arguments:
arg -- info, which is to be written to the TurtleScreen
move (optional) -- True/False
align (optional) -- one of the strings "left", "center" or right"
font (optional) -- a triple (fontname, fontsize, fonttype)
Write text - the string representation of arg - at the current
turtle position according to align ("left", "center" or right")
and with the given font.
If move is True, the pen is moved to the bottom-right corner
of the text. By default, move is False.
Example (for a Turtle instance named turtle):
>>> turtle.write('Home = ', True, align="center")
>>> turtle.write((0,0), True)
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
end = self._write(str(arg), align.lower(), font)
if move:
x, y = self.pos()
self.setpos(end, y)
if self.undobuffer:
self.undobuffer.cumulate = False
def begin_poly(self):
"""Start recording the vertices of a polygon.
No argument.
Start recording the vertices of a polygon. Current turtle position
is first point of polygon.
Example (for a Turtle instance named turtle):
>>> turtle.begin_poly()
"""
self._poly = [self._position]
self._creatingPoly = True
def end_poly(self):
"""Stop recording the vertices of a polygon.
No argument.
Stop recording the vertices of a polygon. Current turtle position is
last point of polygon. This will be connected with the first point.
Example (for a Turtle instance named turtle):
>>> turtle.end_poly()
"""
self._creatingPoly = False
def get_poly(self):
"""Return the lastly recorded polygon.
No argument.
Example (for a Turtle instance named turtle):
>>> p = turtle.get_poly()
>>> turtle.register_shape("myFavouriteShape", p)
"""
## check if there is any poly?
if self._poly is not None:
return tuple(self._poly)
def getscreen(self):
"""Return the TurtleScreen object, the turtle is drawing on.
No argument.
Return the TurtleScreen object, the turtle is drawing on.
So TurtleScreen-methods can be called for that object.
Example (for a Turtle instance named turtle):
>>> ts = turtle.getscreen()
>>> ts
<turtle.TurtleScreen object at 0x0106B770>
>>> ts.bgcolor("pink")
"""
return self.screen
def getturtle(self):
"""Return the Turtleobject itself.
No argument.
Only reasonable use: as a function to return the 'anonymous turtle':
Example:
>>> pet = getturtle()
>>> pet.fd(50)
>>> pet
<turtle.Turtle object at 0x0187D810>
>>> turtles()
[<turtle.Turtle object at 0x0187D810>]
"""
return self
getpen = getturtle
################################################################
### screen oriented methods recurring to methods of TurtleScreen
################################################################
def _delay(self, delay=None):
"""Set delay value which determines speed of turtle animation.
"""
return self.screen.delay(delay)
def onclick(self, fun, btn=1, add=None):
"""Bind fun to mouse-click event on this turtle on canvas.
Arguments:
fun -- a function with two arguments, to which will be assigned
the coordinates of the clicked point on the canvas.
num -- number of the mouse-button defaults to 1 (left mouse button).
add -- True or False. If True, new binding will be added, otherwise
it will replace a former binding.
Example for the anonymous turtle, i. e. the procedural way:
>>> def turn(x, y):
left(360)
>>> onclick(turn) # Now clicking into the turtle will turn it.
>>> onclick(None) # event-binding will be removed
"""
self.screen._onclick(self.turtle._item, fun, btn, add)
self._update()
def onrelease(self, fun, btn=1, add=None):
"""Bind fun to mouse-button-release event on this turtle on canvas.
Arguments:
fun -- a function with two arguments, to which will be assigned
the coordinates of the clicked point on the canvas.
num -- number of the mouse-button defaults to 1 (left mouse button).
Example (for a MyTurtle instance named joe):
>>> class MyTurtle(Turtle):
def glow(self,x,y):
self.fillcolor("red")
def unglow(self,x,y):
self.fillcolor("")
>>> joe = MyTurtle()
>>> joe.onclick(joe.glow)
>>> joe.onrelease(joe.unglow)
### clicking on joe turns fillcolor red,
### unclicking turns it to transparent.
"""
self.screen._onrelease(self.turtle._item, fun, btn, add)
self._update()
def ondrag(self, fun, btn=1, add=None):
"""Bind fun to mouse-move event on this turtle on canvas.
Arguments:
fun -- a function with two arguments, to which will be assigned
the coordinates of the clicked point on the canvas.
num -- number of the mouse-button defaults to 1 (left mouse button).
Every sequence of mouse-move-events on a turtle is preceded by a
mouse-click event on that turtle.
Example (for a Turtle instance named turtle):
>>> turtle.ondrag(turtle.goto)
### Subsequently clicking and dragging a Turtle will
### move it across the screen thereby producing handdrawings
### (if pen is down).
"""
self.screen._ondrag(self.turtle._item, fun, btn, add)
def _undo(self, action, data):
"""Does the main part of the work for undo()
"""
if self.undobuffer is None:
return
if action == "rot":
angle, degPAU = data
self._rotate(-angle*degPAU/self._degreesPerAU)
dummy = self.undobuffer.pop()
elif action == "stamp":
stitem = data[0]
self.clearstamp(stitem)
elif action == "go":
self._undogoto(data)
elif action in ["wri", "dot"]:
item = data[0]
self.screen._delete(item)
self.items.remove(item)
elif action == "dofill":
item = data[0]
self.screen._drawpoly(item, ((0, 0),(0, 0),(0, 0)),
fill="", outline="")
elif action == "beginfill":
item = data[0]
self._fillitem = self._fillpath = None
if item in self.items:
self.screen._delete(item)
self.items.remove(item)
elif action == "pen":
TPen.pen(self, data[0])
self.undobuffer.pop()
def undo(self):
"""undo (repeatedly) the last turtle action.
No argument.
undo (repeatedly) the last turtle action.
Number of available undo actions is determined by the size of
the undobuffer.
Example (for a Turtle instance named turtle):
>>> for i in range(4):
turtle.fd(50); turtle.lt(80)
>>> for i in range(8):
turtle.undo()
"""
if self.undobuffer is None:
return
item = self.undobuffer.pop()
action = item[0]
data = item[1:]
if action == "seq":
while data:
item = data.pop()
self._undo(item[0], item[1:])
else:
self._undo(action, data)
turtlesize = shapesize
RawPen = RawTurtle
### Screen - Singleton ########################
def Screen():
"""Return the singleton screen object.
If none exists at the moment, create a new one and return it,
else return the existing one."""
if Turtle._screen is None:
Turtle._screen = _Screen()
return Turtle._screen
class _Screen(TurtleScreen):
_root = None
_canvas = None
_title = _CFG["title"]
def __init__(self):
# XXX there is no need for this code to be conditional,
# as there will be only a single _Screen instance, anyway
# XXX actually, the turtle demo is injecting root window,
# so perhaps the conditional creation of a root should be
# preserved (perhaps by passing it as an optional parameter)
if _Screen._root is None:
_Screen._root = self._root = _Root()
self._root.title(_Screen._title)
self._root.ondestroy(self._destroy)
if _Screen._canvas is None:
width = _CFG["width"]
height = _CFG["height"]
canvwidth = _CFG["canvwidth"]
canvheight = _CFG["canvheight"]
leftright = _CFG["leftright"]
topbottom = _CFG["topbottom"]
self._root.setupcanvas(width, height, canvwidth, canvheight)
_Screen._canvas = self._root._getcanvas()
TurtleScreen.__init__(self, _Screen._canvas)
self.setup(width, height, leftright, topbottom)
def setup(self, width=_CFG["width"], height=_CFG["height"],
startx=_CFG["leftright"], starty=_CFG["topbottom"]):
""" Set the size and position of the main window.
Arguments:
width: as integer a size in pixels, as float a fraction of the screen.
Default is 50% of screen.
height: as integer the height in pixels, as float a fraction of the
screen. Default is 75% of screen.
startx: if positive, starting position in pixels from the left
edge of the screen, if negative from the right edge
Default, startx=None is to center window horizontally.
starty: if positive, starting position in pixels from the top
edge of the screen, if negative from the bottom edge
Default, starty=None is to center window vertically.
Examples (for a Screen instance named screen):
>>> screen.setup (width=200, height=200, startx=0, starty=0)
sets window to 200x200 pixels, in upper left of screen
>>> screen.setup(width=.75, height=0.5, startx=None, starty=None)
sets window to 75% of screen by 50% of screen and centers
"""
if not hasattr(self._root, "set_geometry"):
return
sw = self._root.win_width()
sh = self._root.win_height()
if isinstance(width, float) and 0 <= width <= 1:
width = sw*width
if startx is None:
startx = (sw - width) / 2
if isinstance(height, float) and 0 <= height <= 1:
height = sh*height
if starty is None:
starty = (sh - height) / 2
self._root.set_geometry(width, height, startx, starty)
self.update()
def title(self, titlestring):
"""Set title of turtle-window
Argument:
titlestring -- a string, to appear in the titlebar of the
turtle graphics window.
This is a method of Screen-class. Not available for TurtleScreen-
objects.
Example (for a Screen instance named screen):
>>> screen.title("Welcome to the turtle-zoo!")
"""
if _Screen._root is not None:
_Screen._root.title(titlestring)
_Screen._title = titlestring
def _destroy(self):
root = self._root
if root is _Screen._root:
Turtle._pen = None
Turtle._screen = None
_Screen._root = None
_Screen._canvas = None
TurtleScreen._RUNNING = True
root.destroy()
def bye(self):
"""Shut the turtlegraphics window.
Example (for a TurtleScreen instance named screen):
>>> screen.bye()
"""
self._destroy()
def exitonclick(self):
"""Go into mainloop until the mouse is clicked.
No arguments.
Bind bye() method to mouseclick on TurtleScreen.
If "using_IDLE" - value in configuration dictionary is False
(default value), enter mainloop.
If IDLE with -n switch (no subprocess) is used, this value should be
set to True in turtle.cfg. In this case IDLE's mainloop
is active also for the client script.
This is a method of the Screen-class and not available for
TurtleScreen instances.
Example (for a Screen instance named screen):
>>> screen.exitonclick()
"""
def exitGracefully(x, y):
"""Screen.bye() with two dummy-parameters"""
self.bye()
self.onclick(exitGracefully)
if _CFG["using_IDLE"]:
return
try:
mainloop()
except AttributeError:
exit(0)
class Turtle(RawTurtle):
"""RawTurtle auto-crating (scrolled) canvas.
When a Turtle object is created or a function derived from some
Turtle method is called a TurtleScreen object is automatically created.
"""
_pen = None
_screen = None
def __init__(self,
shape=_CFG["shape"],
undobuffersize=_CFG["undobuffersize"],
visible=_CFG["visible"]):
if Turtle._screen is None:
Turtle._screen = Screen()
RawTurtle.__init__(self, Turtle._screen,
shape=shape,
undobuffersize=undobuffersize,
visible=visible)
Pen = Turtle
def _getpen():
"""Create the 'anonymous' turtle if not already present."""
if Turtle._pen is None:
Turtle._pen = Turtle()
return Turtle._pen
def _getscreen():
"""Create a TurtleScreen if not already present."""
if Turtle._screen is None:
Turtle._screen = Screen()
return Turtle._screen
def write_docstringdict(filename="turtle_docstringdict"):
"""Create and write docstring-dictionary to file.
Optional argument:
filename -- a string, used as filename
default value is turtle_docstringdict
Has to be called explicitely, (not used by the turtle-graphics classes)
The docstring dictionary will be written to the Python script <filname>.py
It is intended to serve as a template for translation of the docstrings
into different languages.
"""
docsdict = {}
for methodname in _tg_screen_functions:
key = "_Screen."+methodname
docsdict[key] = eval(key).__doc__
for methodname in _tg_turtle_functions:
key = "Turtle."+methodname
docsdict[key] = eval(key).__doc__
f = open("%s.py" % filename,"w")
keys = sorted([x for x in docsdict.keys()
if x.split('.')[1] not in _alias_list])
f.write('docsdict = {\n\n')
for key in keys[:-1]:
f.write('%s :\n' % repr(key))
f.write(' """%s\n""",\n\n' % docsdict[key])
key = keys[-1]
f.write('%s :\n' % repr(key))
f.write(' """%s\n"""\n\n' % docsdict[key])
f.write("}\n")
f.close()
def read_docstrings(lang):
"""Read in docstrings from lang-specific docstring dictionary.
Transfer docstrings, translated to lang, from a dictionary-file
to the methods of classes Screen and Turtle and - in revised form -
to the corresponding functions.
"""
modname = "turtle_docstringdict_%(language)s" % {'language':lang.lower()}
module = __import__(modname)
docsdict = module.docsdict
for key in docsdict:
try:
# eval(key).im_func.__doc__ = docsdict[key]
eval(key).__doc__ = docsdict[key]
except:
print("Bad docstring-entry: %s" % key)
_LANGUAGE = _CFG["language"]
try:
if _LANGUAGE != "english":
read_docstrings(_LANGUAGE)
except ImportError:
print("Cannot find docsdict for", _LANGUAGE)
except:
print ("Unknown Error when trying to import %s-docstring-dictionary" %
_LANGUAGE)
def getmethparlist(ob):
"""Get strings describing the arguments for the given object
Returns a pair of strings representing function parameter lists
including parenthesis. The first string is suitable for use in
function definition and the second is suitable for use in function
call. The "self" parameter is not included.
"""
defText = callText = ""
# bit of a hack for methods - turn it into a function
# but we drop the "self" param.
# Try and build one for Python defined functions
args, varargs, varkw = inspect.getargs(ob.__code__)
items2 = args[1:]
realArgs = args[1:]
defaults = ob.__defaults__ or []
defaults = ["=%r" % (value,) for value in defaults]
defaults = [""] * (len(realArgs)-len(defaults)) + defaults
items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)]
if varargs is not None:
items1.append("*" + varargs)
items2.append("*" + varargs)
if varkw is not None:
items1.append("**" + varkw)
items2.append("**" + varkw)
defText = ", ".join(items1)
defText = "(%s)" % defText
callText = ", ".join(items2)
callText = "(%s)" % callText
return defText, callText
def _turtle_docrevise(docstr):
"""To reduce docstrings from RawTurtle class for functions
"""
import re
if docstr is None:
return None
turtlename = _CFG["exampleturtle"]
newdocstr = docstr.replace("%s." % turtlename,"")
parexp = re.compile(r' \(.+ %s\):' % turtlename)
newdocstr = parexp.sub(":", newdocstr)
return newdocstr
def _screen_docrevise(docstr):
"""To reduce docstrings from TurtleScreen class for functions
"""
import re
if docstr is None:
return None
screenname = _CFG["examplescreen"]
newdocstr = docstr.replace("%s." % screenname,"")
parexp = re.compile(r' \(.+ %s\):' % screenname)
newdocstr = parexp.sub(":", newdocstr)
return newdocstr
## The following mechanism makes all methods of RawTurtle and Turtle available
## as functions. So we can enhance, change, add, delete methods to these
## classes and do not need to change anything here.
for methodname in _tg_screen_functions:
pl1, pl2 = getmethparlist(eval('_Screen.' + methodname))
if pl1 == "":
print(">>>>>>", pl1, pl2)
continue
defstr = ("def %(key)s%(pl1)s: return _getscreen().%(key)s%(pl2)s" %
{'key':methodname, 'pl1':pl1, 'pl2':pl2})
exec(defstr)
eval(methodname).__doc__ = _screen_docrevise(eval('_Screen.'+methodname).__doc__)
for methodname in _tg_turtle_functions:
pl1, pl2 = getmethparlist(eval('Turtle.' + methodname))
if pl1 == "":
print(">>>>>>", pl1, pl2)
continue
defstr = ("def %(key)s%(pl1)s: return _getpen().%(key)s%(pl2)s" %
{'key':methodname, 'pl1':pl1, 'pl2':pl2})
exec(defstr)
eval(methodname).__doc__ = _turtle_docrevise(eval('Turtle.'+methodname).__doc__)
done = mainloop
if __name__ == "__main__":
def switchpen():
if isdown():
pu()
else:
pd()
def demo1():
"""Demo of old turtle.py - module"""
reset()
tracer(True)
up()
backward(100)
down()
# draw 3 squares; the last filled
width(3)
for i in range(3):
if i == 2:
begin_fill()
for _ in range(4):
forward(20)
left(90)
if i == 2:
color("maroon")
end_fill()
up()
forward(30)
down()
width(1)
color("black")
# move out of the way
tracer(False)
up()
right(90)
forward(100)
right(90)
forward(100)
right(180)
down()
# some text
write("startstart", 1)
write("start", 1)
color("red")
# staircase
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
# filled staircase
tracer(True)
begin_fill()
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
end_fill()
# more text
def demo2():
"""Demo of some new features."""
speed(1)
st()
pensize(3)
setheading(towards(0, 0))
radius = distance(0, 0)/2.0
rt(90)
for _ in range(18):
switchpen()
circle(radius, 10)
write("wait a moment...")
while undobufferentries():
undo()
reset()
lt(90)
colormode(255)
laenge = 10
pencolor("green")
pensize(3)
lt(180)
for i in range(-2, 16):
if i > 0:
begin_fill()
fillcolor(255-15*i, 0, 15*i)
for _ in range(3):
fd(laenge)
lt(120)
end_fill()
laenge += 10
lt(15)
speed((speed()+1)%12)
#end_fill()
lt(120)
pu()
fd(70)
rt(30)
pd()
color("red","yellow")
speed(0)
begin_fill()
for _ in range(4):
circle(50, 90)
rt(90)
fd(30)
rt(90)
end_fill()
lt(90)
pu()
fd(30)
pd()
shape("turtle")
tri = getturtle()
tri.resizemode("auto")
turtle = Turtle()
turtle.resizemode("auto")
turtle.shape("turtle")
turtle.reset()
turtle.left(90)
turtle.speed(0)
turtle.up()
turtle.goto(280, 40)
turtle.lt(30)
turtle.down()
turtle.speed(6)
turtle.color("blue","orange")
turtle.pensize(2)
tri.speed(6)
setheading(towards(turtle))
count = 1
while tri.distance(turtle) > 4:
turtle.fd(3.5)
turtle.lt(0.6)
tri.setheading(tri.towards(turtle))
tri.fd(4)
if count % 20 == 0:
turtle.stamp()
tri.stamp()
switchpen()
count += 1
tri.write("CAUGHT! ", font=("Arial", 16, "bold"), align="right")
tri.pencolor("black")
tri.pencolor("red")
def baba(xdummy, ydummy):
clearscreen()
bye()
time.sleep(2)
while undobufferentries():
tri.undo()
turtle.undo()
tri.fd(50)
tri.write(" Click me!", font = ("Courier", 12, "bold") )
tri.onclick(baba, 1)
demo1()
demo2()
exitonclick()
| [
"[email protected]"
] | |
6c8c336cb02605b572c852d0c5d06f95343dd29c | c5dd04970499e8a22240397836eccdf6d4d59b2b | /myproject/views.py | ba9a92b766a262c1e999b7b24fd5f52a8b3721d7 | [] | no_license | WaryWolf/findacrafter-web | c4de5379d85094ae6a7bd27146b692dcfdb10094 | 1f9ee87a6a08aa36dc90d1c295b47ce77d4113dd | refs/heads/master | 2021-01-25T03:48:34.227824 | 2014-11-10T00:43:39 | 2014-11-10T00:43:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,099 | py | from pyramid.i18n import TranslationStringFactory
from pyramid.response import Response
from pyramid.view import view_config
from pyramid.renderers import render_to_response
from db import Armory
import time
def faq(request):
#print request.client_addr
return render_to_response( 'templates/faq.jinja2',
{ 'a': 1 },
request = request )
def recipes(request):
text = request.params.get('term', '')
db = Armory()
recs = [str(element) for (element,) in db.getAllRecipes()]
db.close()
return [x for x in recs if text.lower() in x.lower()]
def error(errstr):
return Response("Error: " + errstr)
def search(request):
db = Armory()
realms = db.getRealms()
db.close()
return render_to_response( 'templates/search-new.jinja2',
{ 'realms': realms },
request = request )
def results(request):
servid = request.matchdict['server']
faction = request.matchdict['faction']
recipe = request.matchdict['recipe']
db = Armory()
if not db.doesRealmExist2(servid):
return error("Invalid Realm")
if faction not in ['A', 'H', 'B']:
return error("Invalid Faction")
recipeids = db.getRecipeID(recipe)
if len(recipeids) == 0:
return error("That search matched 0 recipes :(")
if len(recipeids) > 1:
#return error() #make this redirect to a "did you mean...?" page
return render_to_response( 'templates/results-new.jinja2',
{ 'toobroad': True,
'res': recipeids,
'resnum': len(recipeids),
'servid': servid,
'faction': faction },
request = request )
recipeid = recipeids[0][0]
recipename = recipeids[0][1]
if faction == 'B':
faction = '%'
connects = db.getRealmConnections(servid)
if len(connects) == 0:
connects = [(db.getRealmName(servid)[0],servid)]
res = []
totalcount = 0
showcount = 0
for realm in connects:
realmid = realm[1]
realmname = realm[0]
realmcrafters = db.getRealmCrafters(realmid, recipeid, faction)
for crafter in realmcrafters:
totalcount += 1
if showcount == 100:
continue
res.append((crafter[0], time.ctime(int(crafter[1])), time.ctime(int(crafter[2])), crafter[3], realmname))
showcount += 1
db.close()
if len(res) == 0:
return error("We couldn't find any crafters for that item on your server. Sorry!")
return render_to_response( 'templates/results-new.jinja2',
{ 'res': res,
'recipeid': recipeid,
'recipename': recipename,
'count': totalcount },
request = request )
| [
"[email protected]"
] | |
020f6ba95c246f4001622880ac00796c24b6e62e | ba2829830b35fa51917e86fabf274196fc0a04f1 | /Alien_Invasion/bullet.py | 0275bf9dc343e9e82b658d0841b67ffccf9c796a | [] | no_license | Welpyfish/python | 5dca9583928d94479ac9dbf9a9b076d3ce36896b | d3caf0c16abc5be5a9011dd61639e86188fa507d | refs/heads/master | 2021-12-25T06:01:24.946401 | 2021-12-20T14:43:02 | 2021-12-20T14:43:02 | 253,290,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
"""A class to manage bullets fired from the ship"""
def __init__(self, ai_game):
"""Create a bullet object at the ship's current position."""
super().__init__()
self.screen = ai_game.screen
self.settings = ai_game.settings
self.color = self.settings.bullet_color
# Create a bullet rect at (0, 0) and then set correct position.
self.rect = pygame.Rect(0, 0, self.settings.bullet_width, self.settings.bullet_height)
self.rect.midtop = ai_game.ship.rect.midtop
# Store the bullet's position as a decimal value.
self.y = float(self.rect.y)
def update(self):
"""Move the bullet up the screen."""
# Update the decimal position of the bullet.
self.y -= self.settings.bullet_speed
# Update the rect position
self.rect.y = self.y
def draw_bullet(self):
"""Draw the bullet to the screen."""
pygame.draw.rect(self.screen, self.color, self.rect)
| [
"[email protected]"
] | |
81a1159b32031f1e45daa05415840674d3b923dc | b8ebac1c26f0bac56db5ce0331c6dd476d8c76eb | /0029.DivideTwoIntegers.py | f986aade3fc7a94bab79e073713cec2dbc06b702 | [] | no_license | iLtc/LeetCodePython | 7fbfc16292c85af95c00f9cc7531f17a29c8b957 | 67fed821e1488d164b6e5218aee803ac9fa93b93 | refs/heads/master | 2023-02-21T22:18:45.342863 | 2021-01-24T03:42:36 | 2021-01-24T03:42:36 | 323,800,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | class Solution:
def divide(self, dividend: int, divisor: int) -> int:
if dividend == -2 ** 31 and divisor == -1:
return 2 ** 31 - 1
negative = False
if dividend < 0:
negative = not negative
dividend = - dividend
if divisor < 0:
negative = not negative
divisor = - divisor
memory = {}
count = 1
total = divisor
while total <= dividend:
memory[count] = total
total += total
count += count
result = 0
for count, total in reversed(memory.items()):
if total <= dividend:
result += count
dividend -= total
return - result if negative else result
| [
"[email protected]"
] | |
e7b790cd9c0e154da4b7517b407c40060421a495 | 98f184d758f99ce6490a0bfc0cc5a8a016ffa07b | /HackerRank/HackerRank_Diagonal_Difference.py | d6f07c827f45d6137bc1a4b9c0ab872c9b751551 | [] | no_license | leehb0531/problemsolving | 115dfe4c844c4d9d009971f4f38c1de103e28792 | eb230677fb431261ce171926ff17f917bdf83d68 | refs/heads/master | 2023-02-06T13:57:05.274004 | 2020-12-24T22:05:34 | 2020-12-24T22:05:34 | 279,533,723 | 0 | 1 | null | 2020-08-13T04:02:39 | 2020-07-14T08:56:27 | Python | UTF-8 | Python | false | false | 768 | py | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'diagonalDifference' function below.
#
# The function is expected to return an INTEGER.
# The function accepts 2D_INTEGER_ARRAY arr as parameter.
#
def diagonalDifference(arr):
result = 0
length = len(arr)-1
for i in range(len(arr)):
result += arr[i][i]
result -= arr[i][length]
length -= 1
return abs(result)
# Write your code here
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
arr = []
for _ in range(n):
arr.append(list(map(int, input().rstrip().split())))
result = diagonalDifference(arr)
fptr.write(str(result) + '\n')
fptr.close()
| [
"[email protected]"
] | |
d4c76811ae02e5656c6798a6a0d1db42b0e691d3 | dc88075fc9ea51d279d288993c61bda3b7099c10 | /eureka/lib/hiloerr.py | eed648f0414ac2af84714a0197fdacc426564e8f | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | AarynnCarter/Eureka | 941456a80408fcc4cccdefebd105f655374faa05 | bb17db92cfddf694ba4c509e8296a5b7b3c619f3 | refs/heads/main | 2023-06-02T12:55:20.551727 | 2021-06-25T23:50:13 | 2021-06-25T23:50:13 | 379,359,328 | 0 | 0 | MIT | 2021-06-22T18:03:58 | 2021-06-22T18:03:58 | null | UTF-8 | Python | false | false | 3,877 | py | """
NAME:
HILOERR
PURPOSE:
This function computes two-sided error bars for a set of
values. It treats Data as a representative set of
measurements of a quantity, and returns the distance in
parameter space from Value (default: MEDIAN(Data)) to the low
and high bounds of the given CONFidence interval centered on
Value. CONF defaults to the Gaussian 1-sigma confidence.
CATEGORY:
Statistics.
CALLING SEQUENCE:
Result = HILOERR(Data, Value)
INPUTS:
Data: An array of any non-complex numerical type and size
containing representative measurements of a quantity.
Bootstrap Monte-Carlo results are often used here.
Value: The nominal value of the dataset. Optional. If not
defined, Value is set to MEDIAN(Data) IN THE CALLER.
KEYWORD PARAMETERS:
CONF: Half the confidence interval desired. I.e., the
distance in probability space from Value to the lower
and upper error bounds. Optional. If not set, CONF
is set to erf(1d/sqrt(2d)) / 2d = 0.34134475 IN THE
CALLER.
IVAL: (returned) The index of the nominal value. This is an
interpolated quantity, so it may not be an integer.
ILO: (returned) The index of the lower error bound. This
is an interpolated quantity, so it may not be an integer.
IHI: (returned) The index of the upper error bound. This
is an interpolated quantity, so it may not be an
integer.
OUTPUTS:
This function returns a 2-element array giving the distances
in parameter space from Value to the lower and upper bounds of
the confidence (error) interval.
PROCEDURE:
The function sorts the data, finds (by spline interpolation)
the index of the nominal value, counts up and down an
appropriate number of points to find the indices of the lower
and upper confidence interval bounds, and interpolates to find
the corresponding parameter values.
EXAMPLE:
data = randomn(seed, 1000000, /double) * 2d + 5d
value = median(data)
print, value, hiloerr(data, value)
MODIFICATION HISTORY:
Written by: Joseph Harrington, Cornell. 2006-04-25
[email protected]
Removed nonunique values from data when finding ival
Kevin Stevenson, UCF 2008-06-04
[email protected]
Rewrote in python
Kevin Stevenson, UCF 2008-07-08
[email protected]
Added axis keyword
Kevin Stevenson, UChicago 2014-01-08
"""
def hiloerr(data, value = None, conf = 0.34134475):
import numpy as np
if value == None:
value = np.median(data)
sdat = np.unique(data) # sorted, unique values
ndat = len(sdat) # number of points
idat = np.arange(ndat)
ival = np.interp([value], sdat, idat)
ilo = ival - conf * ndat # interpolated index of low value
ihi = ival + conf * ndat # interpolated index of high value
loval = np.interp(ilo, idat, sdat)
hival = np.interp(ihi, idat, sdat)
loerr = loval - value
hierr = hival - value
return loerr[0], hierr[0]
def hiloerr2D(data, value = None, conf = 0.34134475,axis=0):
'''
'''
import numpy as np
axis1 = axis
axis2 = (axis+1)%2
if value == None:
value = np.median(data,axis=axis1)
sdat = np.sort(data,axis=axis1) # sorted values
ndat = np.shape(sdat) # number of points
if axis1 < axis2:
sdat = sdat.T
idat = np.arange(ndat[axis1])
loval = np.zeros(ndat[axis2])
hival = np.zeros(ndat[axis2])
for i in range(ndat[axis2]):
ival = np.interp(value[i], sdat[i], idat)
ilo = ival - conf * ndat[axis1] # interpolated index of low value
ihi = ival + conf * ndat[axis1] # interpolated index of high value
loval[i] = np.interp(ilo, idat, sdat[i])
hival[i] = np.interp(ihi, idat, sdat[i])
loerr = loval - value
hierr = hival - value
return loerr, hierr
| [
"[email protected]"
] | |
aac4544b92197a517401bbcf3d375390df0f3689 | 8001e6a88312e9b2966909a817031027efdf9c3c | /scripts/setsep.py | 48ec2e97a316e1b51dd8fff444a331fd3342e5b1 | [
"MIT"
] | permissive | rhong3/Neutrophil | e7e67f6773e1d943b0872afe272f1609b0948d0a | 97efb7cc01dc7b1bb06e29a824352d493bb4add5 | refs/heads/master | 2021-07-05T00:53:26.606819 | 2020-07-31T15:16:57 | 2020-07-31T15:16:57 | 140,477,754 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,248 | py | """
Prepare sample split
Created on 05/07/2020
@author: RH
"""
import pandas as pd
def set_sep(path, cut=0.3):
trlist = []
telist = []
valist = []
dic = pd.read_csv('../../sampled_label_ready.csv', header=0)
dic = dic.dropna()
unq = list(dic.slide.unique())
validation = unq[:int(len(unq) * cut / 2)]
valist.append(dic[dic['slide'].isin(validation)])
test = unq[int(len(unq) * cut / 2):int(len(unq) * cut)]
telist.append(dic[dic['slide'].isin(test)])
train = unq[int(len(unq) * cut):]
trlist.append(dic[dic['slide'].isin(train)])
test = pd.concat(telist)
train = pd.concat(trlist)
validation = pd.concat(valist)
tepd = pd.DataFrame(test, columns=['slide', 'path', 'label'])
tepd = tepd[['slide', 'path', 'label']]
vapd = pd.DataFrame(validation.sample(frac=1), columns=['slide', 'path', 'label'])
vapd = vapd[['slide', 'path', 'label']]
trpd = pd.DataFrame(train.sample(frac=1), columns=['slide', 'path', 'label'])
trpd = trpd[['slide', 'path', 'label']]
tepd.to_csv(path + '/te_sample.csv', header=True, index=False)
trpd.to_csv(path + '/tr_sample.csv', header=True, index=False)
vapd.to_csv(path + '/va_sample.csv', header=True, index=False)
| [
"[email protected]"
] | |
4c4a49028f4ef33794a21b4e930dab8ace2ba98a | 3e24611b7315b5ad588b2128570f1341b9c968e8 | /pacbiolib/thirdparty/pythonpkgs/numpy/numpy_1.9.2/lib/python2.7/site-packages/numpy/distutils/tests/test_misc_util.py | 1d94cce4f9a446602ea4d652551dc01c0b278231 | [
"BSD-2-Clause"
] | permissive | bioCKO/lpp_Script | dc327be88c7d12243e25557f7da68d963917aa90 | 0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2 | refs/heads/master | 2022-02-27T12:35:05.979231 | 2019-08-27T05:56:33 | 2019-08-27T05:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,092 | py | #! python
from __future__ import division, absolute_import, print_function
from numpy.testing import *
from numpy.distutils.misc_util import appendpath, minrelpath, \
gpaths, get_shared_lib_extension
from os.path import join, sep, dirname
ajoin = lambda *paths: join(*((sep,)+paths))
class TestAppendpath(TestCase):
def test_1(self):
assert_equal(appendpath('prefix', 'name'), join('prefix', 'name'))
assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name'))
assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name'))
assert_equal(appendpath('prefix', '/name'), join('prefix', 'name'))
def test_2(self):
assert_equal(appendpath('prefix/sub', 'name'),
join('prefix', 'sub', 'name'))
assert_equal(appendpath('prefix/sub', 'sup/name'),
join('prefix', 'sub', 'sup', 'name'))
assert_equal(appendpath('/prefix/sub', '/prefix/name'),
ajoin('prefix', 'sub', 'name'))
def test_3(self):
assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'),
ajoin('prefix', 'sub', 'sup', 'name'))
assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'),
ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name'))
assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'),
ajoin('prefix', 'sub', 'sub2', 'sup', 'name'))
class TestMinrelpath(TestCase):
def test_1(self):
n = lambda path: path.replace('/', sep)
assert_equal(minrelpath(n('aa/bb')), n('aa/bb'))
assert_equal(minrelpath('..'), '..')
assert_equal(minrelpath(n('aa/..')), '')
assert_equal(minrelpath(n('aa/../bb')), 'bb')
assert_equal(minrelpath(n('aa/bb/..')), 'aa')
assert_equal(minrelpath(n('aa/bb/../..')), '')
assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd'))
assert_equal(minrelpath(n('.././..')), n('../..'))
assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd'))
class TestGpaths(TestCase):
def test_gpaths(self):
local_path = minrelpath(join(dirname(__file__), '..'))
ls = gpaths('command/*.py', local_path)
assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls))
f = gpaths('system_info.py', local_path)
assert_(join(local_path, 'system_info.py')==f[0], repr(f))
class TestSharedExtension(TestCase):
def test_get_shared_lib_extension(self):
import sys
ext = get_shared_lib_extension(is_python_ext=False)
if sys.platform.startswith('linux'):
assert_equal(ext, '.so')
elif sys.platform.startswith('gnukfreebsd'):
assert_equal(ext, '.so')
elif sys.platform.startswith('darwin'):
assert_equal(ext, '.dylib')
elif sys.platform.startswith('win'):
assert_equal(ext, '.dll')
# just check for no crash
assert_(get_shared_lib_extension(is_python_ext=True))
if __name__ == "__main__":
run_module_suite()
| [
"[email protected]"
] | |
0016dfecabc8f4703006c562cb1830759b8d09f0 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnuninvit.py | b685fb2e70cc37ddf8c39aef951b7792608f3af0 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 335 | py | ii = [('BentJDO2.py', 1), ('CoolWHM2.py', 1), ('GodwWSL2.py', 1), ('SadlMLP.py', 1), ('CarlTFR.py', 2), ('CoolWHM.py', 1), ('LandWPA.py', 1), ('AinsWRR.py', 1), ('SoutRD2.py', 1), ('FitzRNS4.py', 2), ('CoolWHM3.py', 2), ('FitzRNS.py', 2), ('BentJRP.py', 1), ('LyttELD3.py', 1), ('StorJCC.py', 1), ('ClarGE3.py', 3), ('FitzRNS2.py', 4)] | [
"[email protected]"
] | |
a6d280517aa13ff2286782889883671f512780fc | 795c2d7e2188f2ecb3e72bbb4053726856009c0d | /cosmic/mid_point_norm.py | dc35014fc1e912c5516995bc63fffda6fb829d4d | [
"Apache-2.0"
] | permissive | markmuetz/cosmic | 3a4ef310cb9cb92b81ff57b74bb1511841f790a5 | f215c499bfc8f1d717dea6aa78a58632a4e89113 | refs/heads/master | 2023-08-01T10:55:52.596575 | 2021-09-20T19:26:33 | 2021-09-20T19:26:33 | 217,045,140 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,328 | py | import numpy as np
from numpy import ma
from matplotlib import cbook
from matplotlib.colors import Normalize
class MidPointNorm(Normalize):
# See https://stackoverflow.com/a/7746125/54557
def __init__(self, midpoint=0, vmin=None, vmax=None, clip=False):
Normalize.__init__(self, vmin, vmax, clip)
self.midpoint = midpoint
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
vmin, vmax, midpoint = self.vmin, self.vmax, self.midpoint
if not (vmin < midpoint < vmax):
raise ValueError("midpoint must be between maxvalue and minvalue.")
elif vmin == vmax:
result.fill(0) # Or should it be all masked? Or 0.5?
elif vmin > vmax:
raise ValueError("maxvalue must be bigger than minvalue")
else:
vmin = float(vmin)
vmax = float(vmax)
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# ma division is very slow; we can take a shortcut
resdat = result.data
# First scale to -1 to 1 range, than to from 0 to 1.
resdat -= midpoint
resdat[resdat > 0] /= abs(vmax - midpoint)
resdat[resdat < 0] /= abs(vmin - midpoint)
resdat /= 2.
resdat += 0.5
result = ma.array(resdat, mask=result.mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax, midpoint = self.vmin, self.vmax, self.midpoint
if cbook.iterable(value):
val = ma.asarray(value)
val = 2 * (val-0.5)
val[val > 0] *= abs(vmax - midpoint)
val[val < 0] *= abs(vmin - midpoint)
val += midpoint
return val
else:
val = 2 * (value - 0.5)
if val < 0:
return val * abs(vmin - midpoint) + midpoint
else:
return val * abs(vmax - midpoint) + midpoint
| [
"[email protected]"
] | |
b3ebe561bbaf02cde6bf104ccd343e4934fb3c27 | f6af8b2ebeae16ff443e08b90508d88461bfc521 | /to-do.py | 946031eaff78d7931a072f02d077eb67c22d3b74 | [] | no_license | mitch-io/JetBrains | 0a8d92c9a30460c64b89bc3a1b848558acb02c0e | d75aea34be05e574a3e2b99845f16921e2048be7 | refs/heads/master | 2022-12-31T12:01:54.656062 | 2020-10-23T04:04:57 | 2020-10-23T04:04:57 | 278,274,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,824 | py | # All imports
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Date
from sqlalchemy.orm import sessionmaker
from datetime import datetime, timedelta
# create engine
engine = create_engine('sqlite:///todo.db?check_same_thread=False')
# describe table
Base = declarative_base()
class Tasks(Base):
__tablename__ = 'task'
id = Column(Integer, primary_key=True)
task = Column(String)
deadline = Column(Date, default=datetime.today())
def __repr__(self):
return (f'{self.id}. {self.task}')
# create table 'task' in db 'todo.db'
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
# method to get today's tasks
def read_today():
# create session
read_session = Session()
#today_tasks = read_session.query(Tasks).filter_by(Tasks.deadline == datetime.today())
today = datetime.today()
today_tasks = read_session.query(Tasks).all()
print('Today ' + today.strftime("%#d %b") + ':')
if len(today_tasks) == 0:
print('Nothing to do!')
else:
for task in today_tasks:
print(task)
print()
read_session.commit()
# method to get week's tasks
def read_week():
# create session
read_session = Session()
today = datetime.today()
end_of_the_week = today + timedelta(days=6)
week_tasks = read_session.query(Tasks).filter(Tasks.deadline <= end_of_the_week, Tasks.deadline >= today.date()).order_by(Tasks.deadline)
for x in range(0, 7):
current_day = today + timedelta(days=x)
print('{}:'.format(current_day.strftime('%A %d %b')))
count = 1
there_was_a_task = False
for task in week_tasks:
if task.deadline == current_day.date():
print('{}. {}'.format(count, task.task))
print()
there_was_a_task = True
count += 1
if there_was_a_task != True:
print('Nothing to do!')
print()
read_session.commit()
def read_all():
# create session
read_session = Session()
all_tasks = read_session.query(Tasks).filter().order_by(Tasks.deadline)
print('All tasks:')
for task in all_tasks:
print(str(task.id) + '. ' + task.task + '. ' + task.deadline.strftime("%#d %b"))
print()
read_session.commit()
def read_missed():
# create session
read_session = Session()
today = datetime.today()
missed_tasks = read_session.query(Tasks).filter(Tasks.deadline < today.date()).order_by(Tasks.deadline)
print('Missed tasks:')
for task in missed_tasks:
print(str(task.id) + '. ' + task.task + '. ' + task.deadline.strftime("%#d %b"))
print()
read_session.commit()
# method to update db
def add_task(task_name, task_deadline):
update_session = Session()
new_task = Tasks(task=task_name, deadline=task_deadline)
update_session.add(new_task)
update_session.commit()
print('The task has been added!')
# method to update db
def delete_task():
# create session
update_session = Session()
all_tasks = update_session.query(Tasks).filter().order_by(Tasks.deadline)
todo = False
to_delete = 0
for task in all_tasks:
todo = True
if todo == False:
print('Nothing to do!')
else:
print('Choose the number of the task you want to delete:')
for task in all_tasks:
print(str(task.id) + '. ' + task.task + '. ' + task.deadline.strftime("%#d %b"))
to_delete = int(input())
specific_row = all_tasks[to_delete-1]
update_session.delete(specific_row)
print('The task has been deleted!')
print()
update_session.commit()
#---- main program execution starts here----
while(True):
print("1) Today's tasks")
print("2) Week's tasks")
print("3) All tasks")
print("4) Missed tasks")
print("5) Add task")
print("6) Delete task")
print("0) Exit")
choice = int(input())
if choice == 1:
print()
read_today()
elif choice == 2:
print()
read_week()
elif choice == 3:
print()
read_all()
elif choice == 4:
print()
read_missed()
elif choice == 5:
print('Enter task')
task_name = input()
print('Enter deadline')
deadline_str = input()
deadline_format = datetime.strptime(deadline_str, "%Y-%m-%d")
add_task(task_name,task_deadline=datetime.date(deadline_format))
elif choice == 6:
print()
delete_task()
elif choice == 0:
print()
print('Bye!')
break
| [
"[email protected]"
] | |
9b3eeaf3a7baffc0bdfdf4bec62ad338a74ae4c4 | 2116d41bc28555bf6cb82f5a845a34de6c68d234 | /05-natural-language-processing-fundamentals-in-python/4-building-a-fake-news-classifier/03-CountVectorizer-for-text-classification.py | c7e89dbf27b49570c54be1d4c9b2bde2f32c4711 | [] | no_license | tfuzi/DataCamp | 7084f47ec13f91b307b98d9ce3496f1119120a27 | dfea4eb56d9171ef9ee2553c4f169600cbddca9e | refs/heads/master | 2023-08-09T03:10:28.251624 | 2019-02-22T21:22:06 | 2019-02-22T21:22:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,150 | py | """
CountVectorizer for text classification
It's time to begin building your text classifier! The data has been loaded into a DataFrame called df. Explore it in the IPython Shell to investigate what columns you can use. The .head() method is particularly informative.
In this exercise, you'll use pandas alongside scikit-learn to create a sparse text vectorizer you can use to train and test a simple supervised model. To begin, you'll set up a CountVectorizer and investigate some of its features.
INSTRUCTIONS
100XP
Possible Answers
Import CountVectorizer from sklearn.feature_extraction.text and train_test_split from sklearn.model_selection.
Create a Series y to use for the labels by assigning the .label attribute of df to y.
Using df["text"] (features) and y (labels), create training and test sets using train_test_split(). Use a test_size of 0.33 and a random_state of 53.
Create a CountVectorizer object called count_vectorizer. Ensure you specify the keyword argument stop_words="english" so that stop words are removed.
Fit and transform the training data X_train using the .fit_transform() method. Do the same with the test data X_test, except using the .transform() method.
Print the first 10 features of the count_vectorizer using its .get_feature_names() method.
"""
# Import the necessary modules
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
# Print the head of df
print(df.head())
# Create a series to store the labels: y
y = df.label
# Create training and test sets
X_train, X_test, y_train, y_test = train_test_split(df["text"], y, test_size=0.33, random_state=53)
# Initialize a CountVectorizer object: count_vectorizer
count_vectorizer = CountVectorizer(stop_words='english')
# Transform the training data using only the 'text' column values: count_train
count_train = count_vectorizer.fit_transform(X_train.values)
# Transform the test data using only the 'text' column values: count_test
count_test = count_vectorizer.transform(X_test.values)
# Print the first 10 features of the count_vectorizer
print(count_vectorizer.get_feature_names()[:10])
| [
"[email protected]"
] | |
2108350f34a6977f7d3ca23b432fe55f8291869c | bdde256489bdb400cb3030f0127d67d6dfa632d0 | /Keras/2020-11-10/keras08_val.py | 87e227e340fb7d57f9743c6dd0b38d19129cc70f | [] | no_license | Vaan525/Bit_seoul | 04c9555811b18a3598eecb79fa59fd552000e9ed | 60acb552802bce85aed1d04db7a8acb0370fa3d9 | refs/heads/master | 2023-01-06T15:17:33.436178 | 2020-11-10T09:42:33 | 2020-11-10T09:42:33 | 311,254,620 | 0 | 0 | null | 2020-11-10T09:42:35 | 2020-11-09T07:18:33 | Python | UTF-8 | Python | false | false | 1,395 | py |
import numpy as np
# 1. 데이터
x_train = np.array([1,2,3,4,5,6,7,8,9,10]) # 테스트 하고싶은 데이터
y_train = np.array([1,2,3,4,5,6,7,8,9,10])
x_val = np.array([11,12,13,14,15]) # 검증용 데이터
y_val = np.array([11,12,13,14,15])
#x_pred = np.array([16,17,18]) # 예측하고 싶은 데이터
x_test = np.array([16,17,18,19,20])
y_test = np.array([16,17,18,19,20])
# train, test, val = 6 : 2 : 2
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# 2. 모델 구성
model = Sequential()
model.add(Dense(20, input_dim=1))
model.add(Dense(70))
model.add(Dense(200))
model.add(Dense(70))
model.add(Dense(20))
model.add(Dense(1))
# 3. 컴파일, 훈련
model.compile(loss='mse', optimizer='adam',
metrics=['mse'])
model.fit(x_train, y_train, epochs=100,
validation_data=(x_val, y_val))
# 4. 평가, 예측
# loss, acc = model.evaluate(x, y)
loss, mse = model.evaluate(x_test, y_test, batch_size=1)
print("loss : ", loss)
print("acc : ", mse)
# 4. 예측
y_predict = model.predict(x_test)
print("결과물 : \n : ", y_predict)
# RMSE
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test, y_predict))
print("RMSE : ", RMSE(y_test, y_predict))
# R2
from sklearn.metrics import r2_score
r2 = r2_score(y_test, y_predict)
print("R2 : ", r2)
| [
"[email protected]"
] | |
378a723595ccae9179bb45c15e7c834c5b569232 | fd23009998b2a3859094f7fd937601b60f9f286b | /membershipfunction.py | 822f24cf02524b8a86ed60cf392ab57e5f8b0c53 | [] | no_license | ravids/ryerson-mrp | d92164f956474973e7d4be4b1a724ae18bb6ae33 | b6867c7a8e189006e4753ef2b40ddb67c911dc3e | refs/heads/master | 2022-12-02T01:57:38.854542 | 2020-08-14T21:52:05 | 2020-08-14T21:52:05 | 284,830,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 31 15:41:58 2014
@author: tim.meggs
"""
from skfuzzy import gaussmf, gbellmf, sigmf
class MemFuncs:
'Common base class for all employees'
funcDict = {'gaussmf': gaussmf, 'gbellmf': gbellmf, 'sigmf': sigmf}
def __init__(self, MFList):
self.MFList = MFList
def evaluateMF(self, rowInput):
if len(rowInput) != len(self.MFList):
print("Number of variables does not match number of rule sets: " + " Expected " + str(len(rowInput)) + ", but only " + str(len(self.MFList)) + " membership functions defined")
return [[self.funcDict[self.MFList[i][k][0]](rowInput[i],**self.MFList[i][k][1]) for k in range(len(self.MFList[i]))] for i in range(len(rowInput))] | [
"[email protected]"
] | |
195c47dfd62cdc4b7d62af99a21336417ce49bd1 | c071eb46184635818e8349ce9c2a78d6c6e460fc | /system/python_stubs/-745935208/PyQt5/QtGui/QPolygon.py | 612c81e27c75cf5f9c7410d056a20153ffc499ab | [] | no_license | sidbmw/PyCharm-Settings | a71bc594c83829a1522e215155686381b8ac5c6e | 083f9fe945ee5358346e5d86b17130d521d1b954 | refs/heads/master | 2020-04-05T14:24:03.216082 | 2018-12-28T02:29:29 | 2018-12-28T02:29:29 | 156,927,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,062 | py | # encoding: utf-8
# module PyQt5.QtGui
# from C:\Users\siddh\AppData\Local\Programs\Python\Python37\lib\site-packages\PyQt5\QtGui.pyd
# by generator 1.146
# no doc
# imports
import PyQt5.QtCore as __PyQt5_QtCore
import sip as __sip
class QPolygon(__sip.simplewrapper):
"""
QPolygon()
QPolygon(QPolygon)
QPolygon(Iterable[QPoint])
QPolygon(QRect, closed: bool = False)
QPolygon(int)
QPolygon(List[int])
QPolygon(Any)
"""
def append(self, QPoint): # real signature unknown; restored from __doc__
""" append(self, QPoint) """
pass
def at(self, p_int): # real signature unknown; restored from __doc__
""" at(self, int) -> QPoint """
pass
def boundingRect(self): # real signature unknown; restored from __doc__
""" boundingRect(self) -> QRect """
pass
def clear(self): # real signature unknown; restored from __doc__
""" clear(self) """
pass
def contains(self, QPoint): # real signature unknown; restored from __doc__
""" contains(self, QPoint) -> bool """
return False
def containsPoint(self, QPoint, Qt_FillRule): # real signature unknown; restored from __doc__
""" containsPoint(self, QPoint, Qt.FillRule) -> bool """
return False
def count(self, QPoint=None): # real signature unknown; restored from __doc__ with multiple overloads
"""
count(self, QPoint) -> int
count(self) -> int
"""
return 0
def data(self): # real signature unknown; restored from __doc__
""" data(self) -> sip.voidptr """
pass
def fill(self, QPoint, size=-1): # real signature unknown; restored from __doc__
""" fill(self, QPoint, size: int = -1) """
pass
def first(self): # real signature unknown; restored from __doc__
""" first(self) -> QPoint """
pass
def indexOf(self, QPoint, from_=0): # real signature unknown; restored from __doc__
""" indexOf(self, QPoint, from_: int = 0) -> int """
return 0
def insert(self, p_int, QPoint): # real signature unknown; restored from __doc__
""" insert(self, int, QPoint) """
pass
def intersected(self, QPolygon): # real signature unknown; restored from __doc__
""" intersected(self, QPolygon) -> QPolygon """
return QPolygon
def intersects(self, QPolygon): # real signature unknown; restored from __doc__
""" intersects(self, QPolygon) -> bool """
return False
def isEmpty(self): # real signature unknown; restored from __doc__
""" isEmpty(self) -> bool """
return False
def last(self): # real signature unknown; restored from __doc__
""" last(self) -> QPoint """
pass
def lastIndexOf(self, QPoint, from_=-1): # real signature unknown; restored from __doc__
""" lastIndexOf(self, QPoint, from_: int = -1) -> int """
return 0
def mid(self, p_int, length=-1): # real signature unknown; restored from __doc__
""" mid(self, int, length: int = -1) -> QPolygon """
return QPolygon
def point(self, p_int): # real signature unknown; restored from __doc__
""" point(self, int) -> QPoint """
pass
def prepend(self, QPoint): # real signature unknown; restored from __doc__
""" prepend(self, QPoint) """
pass
def putPoints(self, p_int, p_int_1, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
putPoints(self, int, int, int, *)
putPoints(self, int, int, QPolygon, from_: int = 0)
"""
pass
def remove(self, p_int, p_int_1=None): # real signature unknown; restored from __doc__ with multiple overloads
"""
remove(self, int)
remove(self, int, int)
"""
pass
def replace(self, p_int, QPoint): # real signature unknown; restored from __doc__
""" replace(self, int, QPoint) """
pass
def setPoint(self, p_int, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
setPoint(self, int, QPoint)
setPoint(self, int, int, int)
"""
pass
def setPoints(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
setPoints(self, List[int])
setPoints(self, int, int, *)
"""
pass
def size(self): # real signature unknown; restored from __doc__
""" size(self) -> int """
return 0
def subtracted(self, QPolygon): # real signature unknown; restored from __doc__
""" subtracted(self, QPolygon) -> QPolygon """
return QPolygon
def swap(self, QPolygon): # real signature unknown; restored from __doc__
""" swap(self, QPolygon) """
pass
def translate(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
translate(self, int, int)
translate(self, QPoint)
"""
pass
def translated(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
translated(self, int, int) -> QPolygon
translated(self, QPoint) -> QPolygon
"""
return QPolygon
def united(self, QPolygon): # real signature unknown; restored from __doc__
""" united(self, QPolygon) -> QPolygon """
return QPolygon
def value(self, p_int, QPoint=None): # real signature unknown; restored from __doc__ with multiple overloads
"""
value(self, int) -> QPoint
value(self, int, QPoint) -> QPoint
"""
pass
def __add__(self, *args, **kwargs): # real signature unknown
""" Return self+value. """
pass
def __contains__(self, *args, **kwargs): # real signature unknown
""" Return key in self. """
pass
def __delitem__(self, *args, **kwargs): # real signature unknown
""" Delete self[key]. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __getitem__(self, *args, **kwargs): # real signature unknown
""" Return self[key]. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __iadd__(self, *args, **kwargs): # real signature unknown
""" Implement self+=value. """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __len__(self, *args, **kwargs): # real signature unknown
""" Return len(self). """
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lshift__(self, *args, **kwargs): # real signature unknown
""" Return self<<value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
def __mul__(self, *args, **kwargs): # real signature unknown
""" Return self*value. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __rlshift__(self, *args, **kwargs): # real signature unknown
""" Return value<<self. """
pass
def __rmul__(self, *args, **kwargs): # real signature unknown
""" Return value*self. """
pass
def __setitem__(self, *args, **kwargs): # real signature unknown
""" Set self[key] to value. """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__hash__ = None
| [
"[email protected]"
] | |
bbb7f1c83f7359a38d4466e00142b058246b28e4 | 35434cd441fd21073299b491336921d5b0921e02 | /common/migrations/0002_auto_20190622_1347.py | dd6397c361de387c614d9215c7cc0c34d65c3e16 | [] | no_license | liyustar/cube | 2948ad82bf979951424a457a6fdffb4192db5101 | 1c39eb8596175f5d0c56bad1b995f7e05dfbe5d0 | refs/heads/master | 2022-12-15T07:11:07.371030 | 2019-06-23T10:20:28 | 2019-06-23T10:20:28 | 193,177,852 | 0 | 0 | null | 2022-12-08T05:48:12 | 2019-06-22T01:29:05 | Python | UTF-8 | Python | false | false | 1,097 | py | # Generated by Django 2.2.2 on 2019-06-22 13:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('common', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='paramconfig',
name='creator',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='paramconfig_set', related_query_name='paramconfig', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='paramconfig',
name='key',
field=models.CharField(max_length=100, unique=True),
),
migrations.AddField(
model_name='paramconfig',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
b45fbda2e9e72240e143a19f2b3b9996bdab660d | cc3385ad008196cb07c66af5efcc84ce0ea00398 | /sentiment_analysis/pcfgcp.py | efc679743a19326515f834eaa81ec93c29717a36 | [] | no_license | mjeffries-pivotal/cloudnativeroadshow | 8d8d6b6777185a107b6c4ce2ceb7e82bb7d6b092 | 88e388644cdff7ed24783b593b69e4c6f59e7d57 | refs/heads/master | 2021-01-18T16:39:21.703059 | 2017-03-30T22:11:13 | 2017-03-30T22:11:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,703 | py | import json
import os
from operator import itemgetter
import base64
from google.cloud import language
#from google.cloud import vision
#from google.cloud.vision.image import Image
from google.oauth2.service_account import Credentials
"""Base class for accessing Google Cloud Platform services from Python apps
deployed to PCF. This class implements the authentication part.
Here are the various service names, as defined in
https://github.com/GoogleCloudPlatform/gcp-service-broker/blob/master/brokerapi/brokers/models/service_broker.go
const StorageName = "google-storage"
const BigqueryName = "google-bigquery"
const BigtableName = "google-bigtable"
const CloudsqlName = "google-cloudsql"
const PubsubName = "google-pubsub"
const MlName = "google-ml-apis"
"""
class PcfGcp:
def __init__(self):
self.VCAP_SERVICES = None
self.clients = {
'google-storage': None
, 'google-bigquery': None
, 'google-bigtable': None
, 'google-cloudsql': None
, 'google-pubsub': None
, 'language': None
, 'vision': None
}
self.projectId = None
self.bucketName = None # Storage
def className(self):
return self.__class__.__name__
def getClient(self, name):
return self.clients.get(name)
def setClient(self, name, val):
self.clients[name] = val
def get_service_instance_dict(self, serviceName): # 'google-storage', etc.
vcapStr = os.environ.get('VCAP_SERVICES')
if vcapStr is None:
raise Exception('VCAP_SERVICES not found in environment variables (necessary for credentials)')
vcap = json.loads(vcapStr)
svcs = None
try:
svcs = vcap[serviceName][0]
except:
raise Exception('No instance of ' + serviceName + ' available')
return svcs
"""serviceName is one of the keys in clients
"""
def get_google_cloud_credentials(self, serviceName):
"""Returns oauth2 credentials of type
google.oauth2.service_account.Credentials
"""
service_info = self.get_service_instance_dict(serviceName)
pkey_data = base64.decodestring(service_info['credentials']['PrivateKeyData'])
pkey_dict = json.loads(pkey_data)
self.credentials = Credentials.from_service_account_info(pkey_dict)
# Get additional fields
self.projectId = service_info['credentials']['ProjectId']
if 'bucket_name' in service_info['credentials']:
self.bucketName = service_info['credentials']['bucket_name']
return self.credentials
"""This can't be generic since the Client varies across services"""
def getClient(self, serviceName):
if self.clients[serviceName] is None:
self.clients[serviceName] = language.Client(self.get_google_cloud_credentials(serviceName))
return self.clients[serviceName]
"""Ref. https://cloud.google.com/natural-language/docs/sentiment-tutorial
score ranges from -1.0 to 1.0
magnitude ranges from 0.0 to Infinite (depends on length of document)
"""
def getLanguage(self):
if self.clients['language'] is None:
self.clients['language'] = language.Client(self.get_google_cloud_credentials('google-ml-apis'))
#print 'projectId: %s' % self.projectId
return self.clients['language']
"""Ref. https://cloud.google.com/vision/docs/reference/libraries#client-libraries-install-python"""
# def getVision(self):
# if self.clients['vision'] is None:
# self.clients['vision'] = vision.Client(project=self.projectId,
# credentials=self.get_google_cloud_credentials('google-ml-apis'))
# return self.clients['vision']
def getStorage(self):
pass
def getBigQuery(self):
pass
def getBigtable(self):
pass
def getCloudSql(self):
pass
def getPubSub(self):
pass
| [
"[email protected]"
] | |
fc0bc268c193d1a0b5b1738a09c6d40d21974a8e | 32179c2b88bebd842ca21747f7ad163c310e0845 | /src/loss/loss_functions.py | 0e316f45f0520943222e59dea7da8c4862e4c0f1 | [] | no_license | TimingSpace/PADVO2 | dc529a04ea2a430047e7de6ac8009a79e77baee5 | b19f137af01f9e276c3d133dce7b71f704de055d | refs/heads/master | 2022-11-22T02:45:17.070083 | 2020-07-28T16:02:18 | 2020-07-28T16:02:18 | 282,482,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,074 | py | from torch import nn
from torch.autograd import Variable
import torch
import numpy as np
celoss = nn.CrossEntropyLoss()
# predict_result b c h w
# ground truth b c 1 1
# attention b 1 h w
def SingleShotAttentionLoss(predict_result,ground_truth,attention,mask=[0,1,2,3,4,5]):
diff = ground_truth[:,mask]-predict_result[:,mask]
diff_s = diff.pow(2)
#loss = diff_s*torch.exp(attention)+0.1*torch.abs(attention)
loss = diff_s*torch.exp(attention)+0.1*attention.pow(2)
loss = loss/(loss.size()[0]*loss.size()[2]*loss.size()[3])
loss = loss.sum()
return loss
def WeightedMSELoss(predict_result,ground_truth,weights=torch.FloatTensor([1,1,1,100,100,100])):
diff = ground_truth-predict_result
diff_s = diff.pow(2)
loss = diff_s*weights
loss = loss/loss.size()[0]
loss = loss.sum()
return loss
# ground vehicle loss
def GVLoss(predict_result,ground_truth):
diff = ground_truth[:,[2,4]]-predict_result
#diff = ground_truth-predict_result
diff_s = diff.pow(2)
loss = diff_s
loss = loss/loss.size()[0]
loss = loss.sum()
return loss
def MSELoss(predict_result,ground_truth,mask=[0,1,2,3,4,5]):
diff = ground_truth[:,mask]-predict_result[:,mask]
#diff = ground_truth-predict_result
diff_s = diff.pow(2)
loss = diff_s
loss = loss/loss.size()[0]
loss = loss.sum()
return loss
def ReliabilityMetric(predict_result,ground_truth,attention):
ground_truth = ground_truth.view(ground_truth.size(0),6,1,1)
diff = ground_truth-predict_result
diff_s = diff.pow(2)
loss = diff_s.sum(1).view(ground_truth.size(0),-1)
loss /= torch.sum(loss)
attention = attention.view(ground_truth.size(0),-1)
attention_exp = -attention*torch.exp(-attention)
attention_exp/=torch.sum(attention_exp)
div= torch.nn.functional.kl_div(torch.log(attention_exp),loss)
return div
def PatchLoss(predict_result,ground_truth):
ground_truth = ground_truth.view(ground_truth.size(0),6,1,1)
diff = ground_truth-predict_result
diff_s = diff.pow(2)
loss = diff_s.sum(1)
return loss
def SingleShotLoss(predict_result,ground_truth,mask=[0,1,2,3,4,5]):
diff = ground_truth[:,mask]-predict_result[:,mask]
diff_s = diff.pow(2)
loss = diff_s
loss = loss/(loss.size()[0]*loss.size()[2]*loss.size()[3])
loss = loss.sum()
return loss
def GroupWithATTLoss(f_12,f_g_12,att_12,b_21,b_g_21,att_21,f_23,f_g_23,att_23,b_32,b_g_32,att_32,f_13,f_g_13,att_13,b_31,b_g_31,att_31,mask=[0,1,2,3,4,5]):
f_g_12 = f_g_12.view(f_g_12.size(0),6,1,1)
#f_g_13 = f_g_13.view(f_g_12.size(0),6,1,1)
#f_g_23 = f_g_23.view(f_g_12.size(0),6,1,1)
b_g_21 = b_g_21.view(f_g_12.size(0),6,1,1)
#b_g_32 = b_g_32.view(f_g_12.size(0),6,1,1)
#b_g_31 = b_g_31.view(f_g_12.size(0),6,1,1)
f_12_loss = SingleShotAttentionLoss(f_12,f_g_12,att_12,mask)
#f_23_loss = SingleShotAttentionLoss(f_23,f_g_23,att_23)
#f_13_loss = SingleShotAttentionLoss(f_13,f_g_13,att_13)
b_21_loss = SingleShotAttentionLoss(b_21,b_g_21,att_21,mask)
#b_32_loss = SingleShotAttentionLoss(b_32,b_g_32,att_32)
#b_31_loss = SingleShotAttentionLoss(b_31,b_g_31,att_31)
loss = f_12_loss+b_21_loss
return loss
def ScaleLoss(f_12,g_12):
scale_g = torch.sqrt(torch.sum(g_12[:,0:3]*g_12[:,0:3],1).view(g_12.shape[0],1))
diff = scale_g - f_12
diff_s = diff.pow(2)
loss = diff_s
loss = loss.mean()
return loss
def GroupScaleLoss(f_12,f_g_12,b_21,b_g_21):
f_12 = f_12.mean(3).mean(2)
b_21 = b_21.mean(3).mean(2)
f_12_loss = ScaleLoss(f_12,f_g_12)
b_21_loss = ScaleLoss(b_21,b_g_21)
loss = f_12_loss+b_21_loss
return loss
def GroupGVLoss(f_12,f_g_12,b_21,b_g_21):
f_12_loss = GVLoss(f_12,f_g_12)
b_21_loss = GVLoss(b_21,b_g_21)
loss = f_12_loss+b_21_loss
return loss
def GroupLoss(f_12,f_g_12,b_21,b_g_21,f_23,f_g_23,b_32,b_g_32,f_13,f_g_13,b_31,b_g_31,mask=[0,1,2,3,4,5]):
f_12 = f_12.mean(3).mean(2)
#f_13 = f_13.mean(3).mean(2)
#f_23 = f_23.mean(3).mean(2)
b_21 = b_21.mean(3).mean(2)
#b_32 = b_32.mean(3).mean(2)
#b_31 = b_31.mean(3).mean(2)
f_12_loss = MSELoss(f_12,f_g_12,mask)
#f_23_loss = MSELoss(f_23,f_g_23)
#f_13_loss = MSELoss(f_13,f_g_13)
b_21_loss = MSELoss(b_21,b_g_21,mask)
#b_32_loss = MSELoss(b_32,b_g_32)
#b_31_loss = MSELoss(b_31,b_g_31)
loss = f_12_loss+b_21_loss
return loss
def disc_loss(result,groundtruth):
r = result[0]
t = result[1]
#print(r.shape,t.shape,groundtruth.shape)
r = r.mean((2,3))
r_diff = groundtruth[:,4]-r
#diff = ground_truth-predict_result
diff_s = r_diff.pow(2)
loss = diff_s
loss = loss/loss.size()[0]
loss_r = loss.sum()
#print(r.shape,t.shape,groundtruth.shape)
#print(loss_r)
z = groundtruth[:,2]
#z += 0.05
#z *= 10
z += 0.4
z = z.long()
#print(z)
loss_t = celoss(t,z)
#print(loss_t)
return loss_t
def GroupWithSSLoss(f_12,f_g_12,b_21,b_g_21,f_23,f_g_23,b_32,b_g_32,f_13,f_g_13,b_31,b_g_31,mask=[0,1,2,3,4,5]):
f_g_12 = f_g_12.view(f_g_12.size(0),6,1,1)
#f_g_13 = f_g_13.view(f_g_12.size(0),6,1,1)
#f_g_23 = f_g_23.view(f_g_12.size(0),6,1,1)
b_g_21 = b_g_21.view(f_g_12.size(0),6,1,1)
#b_g_32 = b_g_32.view(f_g_12.size(0),6,1,1)
#b_g_31 = b_g_31.view(f_g_12.size(0),6,1,1)
f_12_loss = SingleShotLoss(f_12,f_g_12,mask)
#f_23_loss = SingleShotLoss(f_23,f_g_23)
#f_13_loss = SingleShotLoss(f_13,f_g_13)
b_21_loss = SingleShotLoss(b_21,b_g_21,mask)
#b_32_loss = SingleShotLoss(b_32,b_g_32)
#b_31_loss = SingleShotLoss(b_31,b_g_31)
loss = f_12_loss+b_21_loss
return loss
def GroupWithMSELoss(f_12,f_g_12,b_21,b_g_21,f_23,f_g_23,b_32,b_g_32,f_13,f_g_13,b_31,b_g_31,weights=torch.FloatTensor([1,1,1,100,100,100])):
f_12_loss = WeightedMSELoss(f_12,f_g_12,weights)
f_23_loss = WeightedMSELoss(f_23,f_g_23,weights)
f_13_loss = WeightedMSELoss(f_13,f_g_13,weights)
b_21_loss = WeightedMSELoss(b_21,b_g_21,weights)
b_32_loss = WeightedMSELoss(b_32,b_g_32,weights)
b_31_loss = WeightedMSELoss(b_31,b_g_31,weights)
#loss = forward_mse_loss+backward_mse_loss+cycle_loss
loss = f_12_loss+b_21_loss
#loss = f_12_loss
#loss = f_12_loss+f_23_loss+0.3*f_13_loss+b_21_loss+b_32_loss+0.3*b_31_loss
#loss = backward_mse_loss
#loss = forward_mse_loss
return loss
def FullSequenceLoss(predict,groundtruth):
diff = groundtruth - predict
diff=diff*diff
diff = diff/diff.shape[0]
loss = np.sum(diff)
return loss
if __name__ == '__main__':
predict_result = torch.autograd.Variable(torch.FloatTensor(4,6,30,100).zero_())
ground_truth = torch.autograd.Variable(torch.FloatTensor(4,6).zero_(),requires_grad=True)
#loss = MSELoss(predict_result,ground_truth)
loss = GroupLoss(predict_result,ground_truth,predict_result,ground_truth,predict_result,ground_truth,predict_result,ground_truth,predict_result,ground_truth,predict_result,ground_truth)
print(loss)
| [
"[email protected]"
] | |
439ec9062ebb4fe2660056dafa23ccf81014f54a | f85efe1dd356642cef0a8862c6a10d1f1e0fada8 | /generate_model.py | d7c31e7d0dfa0aec990f3f94f2599bd7314b8209 | [
"Apache-2.0"
] | permissive | euCanSHare/model_generation | 5fb49c72fef70533524fe691e933d27ee4f071a4 | 8e0bbeabc1b78ca6da2e1473338b2e993268d892 | refs/heads/master | 2022-06-26T07:55:28.480104 | 2020-05-08T15:45:40 | 2020-05-08T15:45:40 | 262,355,786 | 0 | 0 | Apache-2.0 | 2020-05-08T19:27:13 | 2020-05-08T15:12:06 | Python | UTF-8 | Python | false | false | 11,551 | py | import sys,os,logging,random,pickle,datetime,joblib
import pandas as pd
import numpy as np
#Preprocessing
from sklearn.preprocessing._data import MinMaxScaler,RobustScaler
#Feature selection and dimensionality reduction
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.decomposition import PCA, NMF
#from mlxtend.feature_selection import SequentialFeatureSelector as SFS
#Machine Learning Models
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import tree
#Model selection and Validation
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import Pipeline
#Performance Metrics
from sklearn.metrics import classification_report, accuracy_score, make_scorer
from sklearn.metrics import auc
from sklearn.metrics import plot_roc_curve
from sklearn.inspection import permutation_importance
#plot library
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
plt.style.use("seaborn")
#dummy dataset
from sklearn.datasets import load_breast_cancer
#pdf template
import pdfkit
from pdf.summary_template import *
# set logging config
logging.basicConfig(
filename='machine_learning_report_log.log',
level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
#VRE logger
from utils import logger
def save_pdf(path):
options = {
'disable-smart-shrinking': '',
'quiet': '',
'margin-top': '0.1in',
'margin-right': '0in',
'margin-bottom': '0in',
'margin-left': '0in',
}
pdfkit.from_string(body,path, options = options,css = os.path.join(os.getcwd(),'pdf','style.css'))
def generate_random_color():
c = tuple(np.random.randint(256, size=4)/255)
c_eush = (c[0],c[1]/5,1-c[0],np.min([c[3]*3, 1.0]))
return c_eush
def run(file_dataset = None,
classifier = 'logistic_regression',
max_features = 10,
n_folds = 5,
output_file = 'default_summary.pdf'):
logging.info('Running generate_model.py')
logging.info('Current working directory: {}'.format(os.getcwd()))
logging.info('classifier {}'.format(classifier))
logging.info('max_features {}'.format(max_features))
logging.info('n_folds {}'.format(n_folds))
logger.info('Running generate_model.py')
logger.info('Current working directory: {}'.format(os.getcwd()))
logger.info('classifier {}'.format(classifier))
logger.info('max_features {}'.format(max_features))
logger.info('n_folds {}'.format(n_folds))
classifiers = {
'logistic_regression': LogisticRegression(max_iter=2000,random_state = 42)
}
execution_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
results = pd.DataFrame(columns=('file_in',
'n_features',
'mean_acc',
'std_acc',
'mean_f1',
'mean_pre',
'mean_rec',
'clf'))
if file_dataset is not None:
logging.info('file_dataset loaded correctly{}'.format(file_dataset))
df = pd.read_csv(file_dataset)
data = np.array(df)
ids = data[:,0]
y = data[:,1]
X = data[:,2:]
features_names = list(df.columns)[2:]
else:
logging.info('file_dataset {} not found, loading dummy dataset'.format(file_dataset))
logger.info('file_dataset {} not found, loading dummy dataset'.format(file_dataset))
X, y = load_breast_cancer(return_X_y=True)
X = np.array(X)
y = np.array(y)
features_names = list(load_breast_cancer(return_X_y=False)['feature_names'])
"""Battery of classifiers with model-agnostic feature selection"""
for n_features in list(range(1,max_features)):
logging.info('Performing analysis with the {} best features'.format(n_features))
logger.info('Performing analysis with the {} best features'.format(n_features))
# Variables for average classification report
originalclass = []
predictedclass = []
#Make our customer score
def classification_report_with_accuracy_score(y_true, y_pred):
originalclass.extend(y_true)
predictedclass.extend(y_pred)
return accuracy_score(y_true, y_pred) # return accuracy score
cv = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=42)
if n_features > X.shape[1]:
continue
pipe = Pipeline([
('scaler', MinMaxScaler()),
('reduce_dim', SelectKBest(chi2, k = n_features)),
('classification', classifiers[classifier])
])
# Nested CV with parameter optimization
folds_score = cross_val_score(pipe, X=X, y=y, cv=cv, scoring=make_scorer(classification_report_with_accuracy_score))
# Average values in classification report for all folds in a K-fold Cross-validation
cl_report = classification_report(originalclass, predictedclass, output_dict = True)
#joblib.dump(sfs1, './results/'+execution_time+'/sfs'+str(sfs_k)+'_'+clf[1]+file_dataset+name_feids+'_sfsobj.model')
#joblib.dump(clf[0], './results/'+execution_time+'/sfs'+str(sfs_k)+'_'+clf[1]+file_dataset+name_feids+'_clfobj.model')
results = results.append(pd.Series({'file_in':file_dataset,
'n_features':str(n_features),
'std_acc':np.std(folds_score),
'mean_acc':cl_report['accuracy'],
'mean_f1':cl_report['macro avg']['f1-score'],
'mean_pre':cl_report['macro avg']['precision'],
'mean_rec':cl_report['macro avg']['recall'],
'clf':classifier}),
ignore_index = True
)
#TO-DO select the minimum number of features when the same accuracy is reached
best_n_features = int(results[results.mean_acc == results.mean_acc.max()].iloc[0].n_features)
logging.info('Optimal number of features found: {}'.format(best_n_features))
logger.info('Optimal number of features found: {}'.format(best_n_features))
pipe = Pipeline([
('scaler', MinMaxScaler()),
('reduce_dim', SelectKBest(chi2, k = best_n_features)),
('classification', classifiers[classifier])
])
logging.info('Generating ROC auc plot and computing feature importances...')
logger.info('Generating ROC auc plot and computing feature importances...')
tprs = []
aucs = []
importances_mean = []
importances = []
mean_fpr = np.linspace(0, 1, 100)
fig, ax = plt.subplots()
for i, (train, test) in enumerate(cv.split(X, y)):
pipe.fit(X[train], y[train])
viz = plot_roc_curve(pipe, X[test], y[test],
name='ROC fold {}'.format(i),
alpha=0.3, lw=1, ax=ax)
interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(viz.roc_auc)
perm_imp = permutation_importance(pipe, X[test], y[test], n_repeats=10,
random_state=42, n_jobs=-1)
importances_mean.append(perm_imp.importances_mean)
importances.append(perm_imp.importances) # importances per feature per subject
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05])
#,title="ROC for CV{} with {} optimal selected features".format(n_folds,best_n_features))
ax.legend(loc="lower right")
fig.set_size_inches(6, 6)
fig.tight_layout()
#plt.show()
path_roc = os.path.join(os.getcwd(),'pdf','figures',"roc-curve.png")
logging.info('Saving ROC AUC plot in {}...'.format(path_roc))
logger.info('Saving ROC AUC plot in {}...'.format(path_roc))
fig.savefig(path_roc)
logging.info('Generating feature importances plot...')
logger.info('Generating feature importances plot...')
mean_relevances = np.mean(np.array(importances_mean),axis = 0)
sorted_idx = np.squeeze(np.flip(mean_relevances.argsort()))[:best_n_features]
importances = np.mean(importances,axis = 2).T
names = [features_names[ind] for ind in sorted_idx]
fig, ax = plt.subplots()
bplot = ax.boxplot(importances[sorted_idx].T,
vert=True, labels=list(range(len(names))),patch_artist=True)
#ax.set_title("Selected features importance and variance across folds")
ax.set_ylabel('Importance and variance across folds')
ax.set_xlabel('Feature Number')
legend_handles = []
for feat in range(len(names)):
name = names[feat]
color = generate_random_color()
bplot['boxes'][feat].set_facecolor(color)
legend_handles.append(mpatches.Patch( label=name,color=color))
ax.legend(loc="upper right",handles=legend_handles)
fig.set_size_inches(6, 6)
fig.tight_layout()
#plt.show()
path_rel = os.path.join(os.getcwd(),'pdf','figures',"feat-rel.png")
logging.info('Saving feature importances plot in {}...'.format(path_rel))
logger.info('Saving feature importances plot in {}...'.format(path_rel))
fig.savefig(path_rel)
try:
logging.info('Generating pdf summary...')
logger.info('Generating pdf summary...')
#TO-DO passing a dictionary to replace key information in the html string
save_pdf(output_file)
except Exception:
logging.info(sys.exc_info()[1])
logger.info(sys.exc_info()[1])
logging.info('Pdf summary generated in {}...'.format(output_file))
logger.info('Pdf summary generated in {}...'.format(output_file))
run()
| [
"[email protected]"
] | |
3829807beef07b648386a173d4c50a71d313af9e | dec487349b4a874eacda2ef818db779186496d70 | /Conv3DModel.py | c01d6e5aa81dab18f605bca5258142ecab190e70 | [] | no_license | HYUNJI09/PD_FP-CIT_classification | ff7b65574a51ad2a0485ea5f6cf89426ad88d127 | 7ec480ba4e6843c899fc578e35746dc51969d8d3 | refs/heads/master | 2023-09-02T09:06:15.849009 | 2021-11-13T08:08:20 | 2021-11-13T08:08:20 | 427,600,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72,790 | py | import os
import sys
import operator
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import cv2
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit, KFold
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix
from sklearn.utils import class_weight
import tensorflow as tf
from Resnet3D import Resnet3D
from conv3x3_3D import CNN3D
from DenseNet3D import DenseNet3D
from Inception3DModel import Inception3D
from model import *
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
from keras import backend as K
print(K.tensorflow_backend._get_available_gpus())
# keras modules
from keras.callbacks import EarlyStopping
from keras.utils.np_utils import to_categorical
from keras.utils import print_summary
from keras.utils import plot_model
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
# keras modules to design a model_name
from keras import backend as K
from keras.models import Model
from keras.layers.convolutional import Conv3D, ZeroPadding3D
from keras.layers.convolutional import MaxPooling3D, AveragePooling3D
from keras.layers import GlobalAveragePooling3D
from keras.layers import Dropout, Input
from keras.layers import Flatten, add, concatenate
from keras.layers import Dense, Concatenate, Lambda, Add
from keras.layers.normalization import BatchNormalization # batch Normalization for managing internal covariant shift.
from keras.layers import Activation
from keras.utils import plot_model
from keras.optimizers import Adam
# customized modules
from img_utils import thresholding_matrix
# from model_name import create_3d_cnn_model
from preprocess import StandardScalingData
from ImageDataIO import ImageDataIO
from set_directory import set_directory
from data_load_utils import save_pred_label
from custom_exception import *
from Interpolation3D import ImageInterpolator
from retrieve import make_col_list_from_excel
from img_utils import draw_img_on_grid, draw_img_on_grid_v2
from npy_utils import npy_mat_saver
from Logger import Logger
from TimeLogger import TimeLogger
from CustomModelSavingScheduler import CustomModelSavingScheduler
from LossLoggerScheduler import LossLoggerScheduler
from CustomEarlyStopping import CustomEarlyStopping
from CustomLearningRateScheduler import CustomLearningRateScheduler
class Conv3DModel():
# Model parameter & experimental configuration
def __init__(self, lr_rate=0.00005, num_classes=None, input_shape = None , channel_size=None, num_epochs=100,
root_dir="./", model_save_dir="models", result_save_dir="results", experiment_name=None,
batch_size=64, cv_file=None, numAdditionalFeatures=None, model_reload_path=None, tk_plot=None):
self._lr_rate = lr_rate
self._num_classes=num_classes
self._num_epochs = num_epochs
self._input_shape = input_shape
self._channel_size = channel_size
# root dir
self._root_dir = root_dir
set_directory(self._root_dir)
self._experiment_name = experiment_name
if self._experiment_name is not None:
self._root_dir = os.path.normpath(os.path.join(self._root_dir, self._experiment_name))
set_directory(self._root_dir)
self._model_save_dir = model_save_dir
self._result_save_dir = result_save_dir
self._train_batch_size = batch_size
self._test_batch_size = batch_size
self._cv_file = cv_file
self._numAdditionalFeatures = numAdditionalFeatures
self._model = None
self._model_reload_path = model_reload_path
set_directory(self._root_dir)
if not os.path.isdir(os.path.join(self._root_dir, self._model_save_dir)):
os.mkdir(os.path.join(self._root_dir, self._model_save_dir))
if not os.path.isdir(os.path.join(self._root_dir, self._result_save_dir)):
os.mkdir(os.path.join(self._root_dir, self._result_save_dir))
self._tk_plot = tk_plot
# load model_name
input_shape = (self._input_shape[0], self._input_shape[1], self._input_shape[2], self._channel_size)
self._model = self.loadModel(input_shape=input_shape, load_path=self._model_reload_path)
def updateInput(self, datas=None, labels=None, data_filenames=None, label_name=None, class_name=None):
if datas is not None:
self._3D_data = datas
if labels is not None:
self._label = labels
self._num_classes = len(np.unique(labels))
if label_name is not None:
self._label_name = label_name
if data_filenames is not None:
self._3D_data_filename = data_filenames
if class_name is not None:
self._class_name = class_name
return
# load 3D image
def load3DImage(self, data_dir, extension, dataDim, instanceIsOneFile, data_dir_child, view=None, bounding_box=None):
"""
:param data_dir:
:param extension:
:param is2D:
:param view:
:param data_dir_child: "labeled" or "sample", argument "labeled" means data_dir is the parent directory including labeled directory of sample files
argument "sample" means data_dir consists of a set of sample datas(files) directly
:return:
"""
# ????
if data_dir_child == "labeled":
child_dirs = os.listdir(data_dir)
self._num_classes = len(child_dirs)
child_dirs.sort()
data_dir_child_path = [os.path.join(data_dir, child) for child in child_dirs]
print("data_dir_child_path", data_dir_child_path)
elif data_dir_child == "sample":
data_dir_child_path = [data_dir]
print("data_dir_child_path", data_dir_child_path)
elif data_dir_child == None:
data_dir_child_path = [data_dir]
print("load3DImage -> data_dir_child is None")
print("data_dir_child_path", data_dir_child_path)
#CustomException("Argument data_dir_child need to be defined!")
self._3D_data = []
self._3D_data_filename = []
self._label = []
self._label_name = []
self._class_name = []
for ind, class_dir in enumerate(data_dir_child_path):
print("debug load3DImage, read class_dir data", ind, class_dir)
#img_data_path_list = [os.path.join(class_dir, img_data) for img_data in os.listdir(class_dir)]
#print("debug", img_data_path_list)
self._class_name.append(os.path.basename(class_dir))
idio = ImageDataIO(extension, dataDim=dataDim, instanceIsOneFile=instanceIsOneFile, modeling="3D", view=view)
_data, _filename = idio.read_files_from_dir(class_dir)
_data = idio._resizing_channel(_data, resize_size=None, channel_size=self._channel_size)
if bounding_box is not None:
_data = idio.convertUint8(_data, forNP=False)
_data = idio._cropping(_data, bounding_box)
_data_len = len(_data)
# train_bapl1 = np.array([np.array(train_bapl1_) for train_bapl1_ in train_bapl1])
_data = idio.convert_PIL_to_numpy(_data)
print(os.path.basename(class_dir), "class", _data.shape)
self._3D_data.append(_data)
self._3D_data_filename.append(_filename)
self._label.append([ind]*_data_len)
self._label_name.append([os.path.basename(class_dir)]*_data_len)
self._3D_data = np.concatenate(self._3D_data)
self._3D_data_filename = np.concatenate(self._3D_data_filename)
self._label = np.concatenate(self._label)
self._label_name = np.concatenate(self._label_name)
imgip = ImageInterpolator(is2D=False, num_channel=1, target_size=self._input_shape)
self._3D_data = [imgip.interpolateImage(img) for img in self._3D_data]
if self._3D_data[0].shape[-1] != 1 or self._3D_data[0].shape[-1] != 3:
self._3D_data = np.array(self._3D_data)[:, :, :, :, None]
def extract_index_from_excel(self, excel_path, task="cv", fold=None):
if task=="CV" or task=="cv":
train_excel_path = "cv_file_train.xlsx"
train_excel_path = os.path.join(excel_path, train_excel_path)
test_excel_path = "cv_file_test.xlsx"
test_excel_path = os.path.join(excel_path, test_excel_path)
pd_train_df = pd.read_excel(train_excel_path)
pd_test_df = pd.read_excel(test_excel_path)
return pd_train_df["train_"+str(fold)], pd_test_df["test_"+str(fold)]
else :
raise CustomException("[!] not implemented yet")
return
def loadModel(self, input_shape, load_path=None):
# if self._model is not None:
# self._model = load_model(os.path.join(self._root_dir, self._model_save_dir, load_path, "model_name.h5"))
if load_path is None: # new experience
if self._num_classes is None:
CustomException("num_classes argument is mandatory when load_path is None")
# self._model = create_3d_densenet_model(self._lr_rate, self._num_classes, input_shape = input_shape)
#self._model = create_3d_inception_model(self._lr_rate, self._num_classes, input_shape = input_shape)
self._model = create_PDNet_model(self._lr_rate, self._num_classes, input_shape = input_shape)
else: # reload(;retrain) or test phase
print("[!] load_path is given, loading model")
print("load_path : ", load_path)
self._model = load_model(load_path)
if self._model is None:
raise Cust_ModelNotFound("model_name was found out with None from loadModel()")
else :
return self._model
def loadFeaturesCombinedModel(self, load_path = None, input_shape=None):
if load_path is None and input_shape is None:
raise CustomException("either load_path or input_shape have to be decided!")
def create_feature_combined_model(lr_rate, num_classes, numAdditionalFeatures):
# resnet3D
# res3d_model = Resnet3D(numClasses=num_classes)
# model_name = res3d_model.resnet(input_shape=(64, 64, 64, 1), DropoutRate=0.2, zero_pad_1st=False)
# _cnn_model = res3d_model.resnet(input_shape=input_shape, DropoutRate=0.2, zero_pad_1st=False, feature_output=True)
# 3x3 conv 3D
# conv3dmodel = CNN3D(numClasses=num_classes)
# model_name = conv3dmodel.cnn3d(input_shape)
# DenseNet3D
# blocks = [6, 12, 24, 16]
# densenet3d = DenseNet3D(blocks, input_shape)
# densenet3d_model = densenet3d.densenet(num_classes, init_num_filters=64, init_filter_size=7, pooling="avg")
# Inception3D
inception3d = Inception3D(input_shape)
inception3d_model = inception3d.inception3d(num_classes)
cnn_model_input = Input(shape=input_shape)
# gap_output = _cnn_model.get_layer("GAP")
gap_output = conv3dmodel(cnn_model_input, num_classes)
print("gap_output", gap_output)
# gap_output = densenet3d_model(num_classes)
# additional model_name utilize additional features
features_input = Input(shape=(numAdditionalFeatures,))
features_output = Dense(numAdditionalFeatures)(features_input)
# equivalent to added = keras.layers.add([x1, x2])
concat = Concatenate(axis=-1)([gap_output, features_output])
out = Dense(self._num_classes, activation="softmax")(concat)
model = Model(inputs=[cnn_model_input, features_input], outputs=out)
optimizer = Adam(lr=lr_rate)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
return model
if load_path is None: # new experience
self._model = create_feature_combined_model(self._lr_rate, self._num_classes, self._numAdditionalFeatures)
else: # reload(;retrain) or test phase
self._model = load_model(load_path)
if self._model is None:
raise Cust_ModelNotFound("model_name was found out with None from loadModel()")
else :
return self._model
def loadExcelFeaturesModel(self, load_path = None):
def create_excel_feature_model(lr_rate, num_classes, numAdditionalFeatures):
# additional model_name utilize additional features
features_input = Input(shape=(numAdditionalFeatures,))
features_output = Dense(numAdditionalFeatures)(features_input)
# equivalent to added = keras.layers.add([x1, x2])
out = Dense(num_classes, activation="softmax")(features_output)
model = Model(inputs=features_input, outputs=out)
optimizer = Adam(lr=lr_rate)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
return model
if load_path is None: # new experience
self._model = create_excel_feature_model(self._lr_rate, self._num_classes, self._numAdditionalFeatures)
else: # reload(;retrain) or test phase
self._model = load_model(load_path)
if self._model is None:
raise Cust_ModelNotFound("model_name was found out with None from loadModel()")
else :
return self._model
def loadAdditionalFeaturesForCombinedModel(self, excel_filepath, id_column_name, columns=[], filename_parser = None):
"""
:param excel_filepath: excel file saving sample's features labeled with filename
:param id_column_name: column name indicating id on the excel file
:param columns: column names to be used as features
:param filename_parser: parser to match filename to the id column name on the excel file
:return:
"""
"""
self._3D_data_filename
'FBB_BRAIN_ST_ANONYMIZED_190502134706_CN_ID0350.nii'
'FBB_BRAIN_ST_ANONYMIZED_190502134706_CN_ID0351.nii']
"""
if filename_parser is None:
filename_parser = lambda x : x
if len(columns) == 0 or id_column_name is None:
raise CustomException("columns must be used")
# print(self._3D_data_filename)
#pd_feature_list = pd.read_excel(excel_filepath)
#make_col_list_from_excel(excel_filepath, ["PatientID(new)", "BAPL score"])
col_dict = make_col_list_from_excel(excel_filepath, columns)
print(col_dict)
features_sorted_by_filename = []
for _filename in self._3D_data_filename:
_filename = os.path.splitext(_filename)[0]
parsed_filename = filename_parser(_filename)
parsed_filename_item_index = col_dict[id_column_name].index(parsed_filename)
parsed_filename_feature = []
for _col_ind, _col in enumerate(columns):
if _col == id_column_name:
continue
parsed_filename_item_value = col_dict[_col][parsed_filename_item_index]
parsed_filename_feature.append(parsed_filename_item_value)
features_sorted_by_filename.append(np.array(parsed_filename_feature))
self._features_sorted_by_filename = np.array(features_sorted_by_filename)
return self._features_sorted_by_filename
def _preprocess(self, data, isTrain=False, experiment_name=None, save_path=None):
if save_path is None:
if experiment_name is not None:
if not os.path.isdir(os.path.join(self._root_dir, self._model_save_dir, experiment_name)):
os.mkdir(os.path.join(self._root_dir, self._model_save_dir, experiment_name))
save_path = os.path.join(self._root_dir, self._model_save_dir, experiment_name, "standardscaler.pkl")
else :
save_path = os.path.join(self._root_dir, self._model_save_dir, "standardscaler.pkl")
_, preprocessed_data = StandardScalingData(data, save_path=save_path, train=isTrain, keep_dim=True)
return preprocessed_data
def train_model(self, X_train, y_train, X_val=None, y_val=None, class_weight=None,
epochs=100, print_step=10, tk_plot=None):
steps = int(len(X_train) / self._train_batch_size)
history = []
for epoch in range(epochs):
# Training model
#######################################################################3
print("Epoch:", epoch)
history_in_epoch = []
for _ in range(steps):
# Select a random batch of images
idx = np.random.randint(0, len(X_train), self._train_batch_size)
X_train_batch, y_train_batch = X_train[idx], y_train[idx]
# self._model.trainable = True
train_loss_acc = self._model.train_on_batch(X_train_batch, y_train_batch, class_weight=class_weight)
# train_loss_acc = self._model.train_on_batch(X_train_batch, y_train_batch)
if X_val is not None:
val_loss_acc = self._model.test_on_batch(X_val, y_val)
# records
# record = (epoch, train_loss_acc[0], 100 * train_loss_acc[1], val_loss_acc[0], 100 * val_loss_acc[1])
# report = "%5d [D loss: %.3f, acc.: %.2f%%]" % record
history_in_epoch.append(
[train_loss_acc[0], 100 * train_loss_acc[1], val_loss_acc[0], 100 * val_loss_acc[1]])
else:
# records
# record = (epoch, train_loss_acc[0], 100 * train_loss_acc[1])
# report = "%5d [D loss: %.3f, acc.: %.2f%%]" % record
history_in_epoch.append([train_loss_acc[0], 100 * train_loss_acc[1]])
mean_loss_acc = np.array(history_in_epoch).mean(axis=0)
if X_val is not None:
report = "%5d [train loss: %.3f, train acc.: %.2f%%, val loss: %.3f, val acc.: %.2f%%]" % (
epoch, mean_loss_acc[0], mean_loss_acc[1],
mean_loss_acc[2], mean_loss_acc[3])
else:
report = "%5d [train loss: %.3f, train acc.: %.2f%%]" % (epoch, mean_loss_acc[0], mean_loss_acc[1])
if epoch % print_step == 0:
print(report)
history.append(mean_loss_acc)
if tk_plot is not None:
if X_val is not None:
history_np = np.array(history)
tk_plot.init_trend_plot(history_np[:, 0], history_np[:, 2], history_np[:, 1], history_np[:, 3])
else:
history_np = np.array(history)
tk_plot.init_trend_plot(history_np[:, 0], history_np[:, 2])
return history
# train and evaluate model_name : [model_name validation]
def train_eval(self, train_data, train_label, val_data, val_label, experiment_name, save_model_name=None):
if not os.path.isdir(os.path.join(self._root_dir, self._model_save_dir, experiment_name)):
os.mkdir(os.path.join(self._root_dir, self._model_save_dir, experiment_name))
if not os.path.isdir(os.path.join(self._root_dir, self._result_save_dir, experiment_name)):
os.mkdir(os.path.join(self._root_dir, self._result_save_dir, experiment_name))
# train_data = self._preprocess(train_data, isTrain=True, experiment_name=experiment_name)
# val_data = self._preprocess(val_data, isTrain=False, experiment_name=experiment_name)
categorical_train_label = to_categorical(train_label, self._num_classes)
categorical_val_label = to_categorical(val_label, self._num_classes)
# datagen_train = ImageDataGenerator()
# datagen_test = ImageDataGenerator()
# # ImageDataGenerator for Outer Loop
# generator_train = datagen_train.flow(x=train_data, y=categorical_train_label, batch_size=self._train_batch_size, shuffle=True)
# generator_test = datagen_test.flow(x=val_data, y=categorical_eval_label, batch_size=self._test_batch_size, shuffle=False)
save_filepath = os.path.join(self._root_dir, self._result_save_dir, experiment_name,
"final_selected_model_structure.txt")
def _model_summary_log(line, save_filepath):
with open(save_filepath, "a") as f:
f.write(line+"\n")
return
print_summary(self._model, line_length=None, positions=None,
print_fn=lambda line: _model_summary_log(line, save_filepath))
# model_plot_savepath = os.path.join(self._root_dir, self._result_save_dir,
# "final_selected_model_structure.png")
#plot_model(self._model, model_plot_savepath)
# Early Stopping
#earlystopping = EarlyStopping(monitor='val_loss', patience=10)
# tb_hist = keras.callbacks.TensorBoard(log_dir='./results/outer_loop/CV'+str(ind+1)+'graph', histogram_freq=0, write_graph=True, write_images=True)
class_weights = class_weight.compute_class_weight('balanced', np.unique(train_label), train_label)
class_weight_dict = dict(enumerate(class_weights))
print("class_weight_dict", class_weight_dict)
log_save_path = os.path.join(self._root_dir, self._result_save_dir, experiment_name, "logs")
if not os.path.isdir(log_save_path):
os.mkdir(log_save_path)
lr_log_save_path = os.path.join(self._root_dir, self._result_save_dir, experiment_name, "lr_logs")
if not os.path.isdir(lr_log_save_path):
os.mkdir(lr_log_save_path)
acync_stopping_txtpath = os.path.join(log_save_path, r'async_txt.txt')
customlogger = Logger(log_save_path)
if self._tk_plot is None :
history = self._model.fit(train_data, categorical_train_label, batch_size=self._train_batch_size, epochs=self._num_epochs, shuffle=True,
validation_data=(val_data, categorical_val_label), class_weight=class_weight_dict,
callbacks=[
LossLoggerScheduler(customlogger),
CustomEarlyStopping(patience=50, txtpath_to_check=acync_stopping_txtpath, model_save_path=None),
CustomLearningRateScheduler(window_size=10, log_save_path=lr_log_save_path, stzcr_threshold=0.05)
])
train_acc_list = history.history['acc']
train_loss_list = history.history['loss']
val_acc_list = history.history['val_acc']
val_loss_list = history.history['val_loss']
else :
history = self.train_model(train_data, categorical_train_label, val_data, categorical_val_label,
class_weight=class_weight_dict,
epochs=self._num_epochs, print_step=10, tk_plot=self._tk_plot)
history = np.array(history)
train_loss_list = history[:, 0]
train_acc_list = history[:, 1]
val_loss_list = history[:, 2]
val_acc_list = history[:, 3]
trend_dict = dict()
trend_dict["train_acc_list"] = train_acc_list
trend_dict["train_loss_list"] = train_loss_list
trend_dict["test_acc_list"] = val_acc_list
trend_dict["test_loss_list"] = val_loss_list
trend_df = pd.DataFrame(trend_dict)
trend_df.to_excel(os.path.join(self._root_dir, self._model_save_dir, experiment_name, "trend_log.xlsx"))
# save history log for checking trend info later
trend_save_path = os.path.join(self._root_dir, self._result_save_dir, experiment_name, "trend_figure.png")
self._plot_trend(trend_dict, save_path=trend_save_path)
# accuracy = history.history['val_acc'][-1]
# val_loss = history.history['val_loss'][-1]
print("Accuracy: {0:.4%}".format(val_acc_list[-1]))
print("Val Loss: {0:.4}".format(val_loss_list[-1]))
# name of model to save
# save_model_name = self._experiment_name + "_" + "_".join((str(_dim) for _dim in self._input_shape))
save_model_name = self._root_dir + "_" + "_".join((str(_dim) for _dim in self._input_shape))
self.saveModel(experiment_name = experiment_name, save_model_name = save_model_name)
return train_acc_list[-1], train_loss_list[-1], val_acc_list[-1], val_loss_list[-1]
# train and evaluate model_name : [model_name validation]
def train_eval_add_info_ver(self, train_data, train_label, val_data, val_label, experiment_name):
if not os.path.isdir(os.path.join(self._root_dir, self._model_save_dir, experiment_name)):
os.mkdir(os.path.join(self._root_dir, self._model_save_dir, experiment_name))
if not os.path.isdir(os.path.join(self._root_dir, self._result_save_dir, experiment_name)):
os.mkdir(os.path.join(self._root_dir, self._result_save_dir, experiment_name))
conv_train_data = self._preprocess(train_data[0], isTrain=True, experiment_name=experiment_name+"_conv")
conv_val_data = self._preprocess(val_data[0], isTrain=False, experiment_name=experiment_name+"_conv")
series_train_data = self._preprocess(train_data[1], isTrain=True, experiment_name=experiment_name + "_series")
series_val_data = self._preprocess(val_data[1], isTrain=False, experiment_name=experiment_name + "_series")
train_data = (conv_train_data, series_train_data)
val_data = (conv_val_data, series_val_data)
categorical_train_label = to_categorical(train_label, self._num_classes)
categorical_val_label = to_categorical(val_label, self._num_classes)
# datagen_train = ImageDataGenerator()
# datagen_test = ImageDataGenerator()
#
# # ImageDataGenerator for Outer Loop
# generator_train = datagen_train.flow(x=train_data, y=categorical_train_label, batch_size=self._train_batch_size, shuffle=True)
# generator_test = datagen_test.flow(x=val_data, y=categorical_eval_label, batch_size=self._test_batch_size, shuffle=False)
#
save_filepath = os.path.join(self._root_dir, self._result_save_dir, experiment_name,
"final_selected_model_structure.txt")
def _model_summary_log(line, save_filepath):
with open(save_filepath, "a") as f:
f.write(line + "\n")
return
print_summary(self._model, line_length=None, positions=None,
print_fn=lambda line: _model_summary_log(line, save_filepath))
# model_plot_savepath = os.path.join(self._root_dir, self._result_save_dir,
# "final_selected_model_structure.png")
# plot_model(self._model, model_plot_savepath)
# Early Stopping
#earlystopping = EarlyStopping(monitor='val_loss', patience=10)
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced', np.unique(train_label), train_label)
class_weight_dict = dict(enumerate(class_weights))
print("class_weight_dict", class_weight_dict) # {0: 4.666666666666667, 1: 0.56}
# tb_hist = keras.callbacks.TensorBoard(log_dir='./results/outer_loop/CV'+str(ind+1)+'graph', histogram_freq=0, write_graph=True, write_images=True)
steps = int(len(train_label) / self._train_batch_size)
train_loss_list = []
train_acc_list = []
test_loss_list = []
test_acc_list = []
for epoch in tqdm(range(self._num_epochs)):
print("# Epoch:", epoch)
for _ in range(steps):
# Select a random batch of images
idx = np.random.randint(0, len(train_data), self._train_batch_size)
imgs, features, labels = train_data[0][idx], train_data[1][idx], categorical_train_label[idx]
#print("labels shape", labels.shape) # (64,)
# Train & Test the model_name
train_loss = self._model.train_on_batch(x=[imgs, features], y=labels, class_weight=class_weight_dict)
test_loss = self._model.test_on_batch(x=[val_data[0], val_data[1]], y=categorical_val_label)
train_loss_list.append(train_loss[0])
train_acc_list.append(train_loss[1])
test_loss_list.append(test_loss[0])
test_acc_list.append(test_loss[1])
# history = self._model.fit(train_data, categorical_train_label, batch_size=self._train_batch_size,
# epochs=self._num_epochs, shuffle=True,
# validation_data=(val_data, categorical_val_label), callbacks=[earlystopping])
# train_acc_list = history.history['acc']
# train_loss_list = history.history['loss']
# val_acc_list = history.history['val_acc']
# val_loss_list = history.history['val_loss']
trend_dict = dict()
trend_dict["train_acc_list"] = train_acc_list
trend_dict["train_loss_list"] = train_loss_list
trend_dict["test_acc_list"] = test_acc_list
trend_dict["test_loss_list"] = test_loss_list
trend_df = pd.DataFrame(trend_dict)
trend_df.to_excel(os.path.join(self._root_dir, self._model_save_dir, experiment_name, "trend_log.xlsx"))
# save history log for checking trend info later
trend_save_path = os.path.join(self._root_dir, self._result_save_dir, experiment_name, "trend_figure.png")
self._plot_trend(trend_dict, save_path=trend_save_path)
# accuracy = history.history['val_acc'][-1]
# val_loss = history.history['val_loss'][-1]
print("Accuracy: {0:.4%}".format(test_acc_list[-1]))
print("Val Loss: {0:.4}".format(test_loss_list[-1]))
return train_acc_list[-1], train_loss_list[-1], test_acc_list[-1], test_loss_list[-1]
# output of CV is probably best appoximate performance for selected model_name
def train_eval_CV(self, num_cv=10):
model_acc = []
model_loss = []
#default_cv_file_path = os.path.join(self._root_dir, self._result_save_dir, "cv_file.xlsx")
default_cv_file_path = None
#default_cv_file_path = True
#print("debug", self._label)
#pd_cv_index_train, pd_cv_index_test = self._create_cv_file(self._3D_data, self._label, num_k = num_cv, save_path=default_cv_file_path)
kf = KFold(n_splits=num_cv, random_state=1)
# cv_index_train = dict()
# cv_index_test = dict()
# sss_inner.get_n_splits(data, label)
# for ind, (train_index, test_index) in enumerate(sss_inner.split(data, label)):
for ind, (train_index, test_index) in enumerate(kf.split(self._3D_data, self._label)):
#for ind in range(num_cv):
print("#CV:", ind)
# train_index = pd_cv_index_train["train_"+str(ind)].tolist()
# test_index = pd_cv_index_test["test_" + str(ind)].tolist()
X_train = self._3D_data[train_index]
y_train = self._label[train_index]
X_test = self._3D_data[test_index]
y_test = self._label[test_index]
print("X_train", X_train.shape)
print("y_train", y_train.shape)
print("X_test", X_test.shape)
print("y_test", y_test.shape)
if self._train_batch_size > len(X_train):
self._train_batch_size = len(X_train)
if self._test_batch_size > len(X_test):
self._test_batch_size = len(X_test)
# for additional clinical information
# add_X_train = self._features_sorted_by_filename[train_index]
# X_train = (X_train, add_X_train)
# add_X_test = self._features_sorted_by_filename[test_index]
# X_test = (X_test, add_X_test)
test_filename = self._3D_data_filename[test_index]
# name of model to save
# save_model_name = self._experiment_name + "_" + "_".join((str(_dim) for _dim in self._input_shape)) + ".h5"
save_model_name = self._root_dir + "_" + "_".join((str(_dim) for _dim in self._input_shape)) + ".h5"
_, _, val_acc, val_loss = self.train_eval(X_train, y_train, X_test, y_test,
experiment_name="experiment_CV_"+str(ind), save_model_name=save_model_name)
preds = self.test(X_test, y_test, experiment_name="experiment_CV_"+str(ind), filename_list=test_filename)
#_, _, val_acc, val_loss = self.train_eval_add_info_ver(X_train, y_train, X_test, y_test, experiment_name="experiment_CV_" + str(ind))
#self.test_add_info_ver(X_test, y_test, experiment_name="experiment_CV_" + str(ind), filename_list=test_filename)
print("[!] preds", preds.shape) # (89, 2), softmax_output as a last layer of model_name
# CAM_save_dir = os.path.join(self._root_dir, self._result_save_dir, "CAM_log_"+str(ind))
# if not os.path.isdir(CAM_save_dir):
# os.mkdir(CAM_save_dir)
# self._visualize_3D_CAM(X_test, y_test, preds, test_filename, CAM_save_dir, featuremap_name="mixed0",
# resize_size = (64,95,79), experiment_name_for_preprocess="experiment_CV_"+str(ind))
# self._visualize_3D_CAM(X_test, y_test, preds, test_filename, CAM_save_dir,
# featuremap_name="conv5_block16_concat",
# resize_size=(64, 95, 79), experiment_name_for_preprocess="experiment_CV_" + str(ind))
model_acc.append(val_acc)
model_loss.append(val_loss)
print("Final Acc:", np.mean(model_acc))
print("Final Loss:", np.mean(model_loss))
return np.mean(model_acc), np.mean(model_loss)
# output of CV is probably best approximate performance and optimized model_name
def train_eval_NCV(self, train_data, train_label, val_data, val_label, num_cv=10, cv_file=None, model_save_dir=None):
model_acc = None
model_loss = None
best_model = None
return model_acc, model_loss, best_model
# save model_name
# def _save_model(self, filename):
# save_path = os.path.join(self._root_dir, self._model_save_dir, filename)
# self._model.save(save_path)
# return
def saveModel(self, experiment_name=None, save_model_name = None):
if save_model_name is None:
save_model_name = "model_name.h5"
if experiment_name :
self._model.save(os.path.join(self._root_dir, self._model_save_dir, experiment_name, save_model_name))
else :
self._model.save(os.path.join(self._root_dir, self._model_save_dir, save_model_name))
# test model_name with label data, because this phase is literally 'test'
def test(self, val_data, val_label, experiment_name, filename_list, preprocess_save_path=None):
if preprocess_save_path is not None:
val_data = self._preprocess(val_data, isTrain=False, experiment_name=experiment_name, save_path=preprocess_save_path)
categorical_test_label = to_categorical(val_label, self._num_classes)
# datagen_test = ImageDataGenerator()
# generator_test = datagen_test.flow(x=self._3D_data, y=categorical_test_label, batch_size=self._test_batch_size, shuffle=False)
preds = self._model.predict(x=val_data, verbose=1) # (N, num_classes)
print("preds", np.array(preds).shape) # (89, 2)
# pred_proba = np.array(pred).max(axis=1) # (N, )
pred_ind_list = np.array(preds).argmax(axis=1) # (N, )
label_ind_list = np.array(categorical_test_label).argmax(axis=1) # (N, )
if not os.path.isdir(os.path.join(self._root_dir, self._result_save_dir, experiment_name)):
os.mkdir(os.path.join(self._root_dir, self._result_save_dir, experiment_name))
# sample_based_analysis result
sample_based_analysis_save_filename = os.path.join(self._root_dir, self._result_save_dir, experiment_name,
"sample_based_analysis.xlsx")
save_pred_label(categorical_test_label, preds, save_filepath=sample_based_analysis_save_filename,
onehot_label = True, filename_list = filename_list) # y_test_ : (N, Num_classes)
conf_m = confusion_matrix(label_ind_list, pred_ind_list)
print("CV #", "confusion matrix")
print(conf_m)
accuracy = accuracy_score(y_true=label_ind_list, y_pred=pred_ind_list)
val_f1_score = f1_score(y_true=label_ind_list, y_pred=pred_ind_list)
# logging the conf_m
result_save_filename = os.path.join(self._root_dir, self._result_save_dir, experiment_name,
"performance_report.txt")
with open(result_save_filename, "w") as f:
f.write("Accuracy : " + str(accuracy) + '\n')
f.write("F1-score : " + str(val_f1_score) + '\n')
for row in conf_m:
f.write("%s\n" % row)
target_names = np.unique(self._label_name)
result = classification_report(label_ind_list, pred_ind_list, target_names=target_names)
print("Test Phase result")
print(result)
with open(result_save_filename, "a") as f:
f.write("%s\n" % result)
return preds
# test model_name with label data
def test_add_info_ver(self, val_data, val_label, filename_list, experiment_name, load_path=None):
if load_path is not None:
self.loadModel(input_shape=(64, 32, 32, 1), load_path=load_path)
conv_test_data = self._preprocess(val_data[0], isTrain=False, experiment_name=experiment_name+"_conv")
feature_test_data = self._preprocess(val_data[1], isTrain=False, experiment_name=experiment_name+"_series")
categorical_test_label = to_categorical(val_label, self._num_classes)
# datagen_test = ImageDataGenerator()
# generator_test = datagen_test.flow(x=self._3D_data, y=categorical_test_label, batch_size=self._test_batch_size, shuffle=False)
#preds = self._model.predict(x=self._3D_data, verbose=1) # (N, num_classes)
preds = self._model.predict_on_batch(x=[conv_test_data, feature_test_data])
print("preds", np.array(preds).shape)
# pred_proba = np.array(pred).max(axis=1) # (N, )
pred_ind_list = np.array(preds).argmax(axis=1) # (N, )
label_ind_list = np.array(categorical_test_label).argmax(axis=1) # (N, )
# sample_based_analysis result
sample_based_analysis_save_filename = os.path.join(self._root_dir, self._result_save_dir, experiment_name,
"sample_based_analysis.xlsx")
save_pred_label(label_ind_list, pred_ind_list, save_filepath=sample_based_analysis_save_filename,
onehot_label=False, filename_list=filename_list) # y_test_ : (N, Num_classes)
conf_m = confusion_matrix(label_ind_list, pred_ind_list)
print("CV #", "confusion matrix")
print(conf_m)
accuracy = accuracy_score(y_true=label_ind_list, y_pred=pred_ind_list)
# logging the conf_m
result_save_filename = os.path.join(self._root_dir, self._result_save_dir, experiment_name, "performance_report.txt")
with open(result_save_filename, "w") as f:
f.write("Accuracy : " + str(accuracy) + '\n')
for row in conf_m:
f.write("%s\n" % row)
#target_names = np.unique(self._label_name)
target_names = self._class_name
result = classification_report(label_ind_list, pred_ind_list, target_names=target_names)
print("Test Phase result")
print(result)
with open(result_save_filename, "a") as f:
f.write("%s\n" % result)
return
# predict data without label, because this phase is literally 'predict' and can be used for deployment
def predict(self, val_data, experiment_name, load_path=None, preprocess_save_path=None):
if load_path is not None and self._model is None:
self.loadModel(input_shape=(64, 64, 64, 1), load_path=load_path)
val_data = self._preprocess(val_data, isTrain=False, experiment_name=experiment_name, save_path=preprocess_save_path)
# datagen_test = ImageDataGenerator()
# generator_test = datagen_test.flow(x=self._3D_data, y=categorical_test_label, batch_size=self._test_batch_size, shuffle=False)
preds = self._model.predict(x=val_data, verbose=1) # (N, num_classes)
print("preds", np.array(preds).shape)
return preds
def visualize_3D_CAM_from_data(self, input_data, extension, featuremap_name, preprocess_save_path,
reload_data = True, data_dir_child="labeled", reload_model_path=None, heatmap_resize_size=None):
"""
:param input_data: when reload_data is True, input_data is a directory. if not, input_data is a tuple of matrice like (X_data, label, filename)
:param extension:
:param CAM_save_dir:
:param featuremap_name:
:param preprocess_save_path: to load a step of preprocess
:param reload_data: if True, this module update self._3D_data from the "input_data" which is labeled. if False, input_data is matrice
:param data_dir_child:
:param reload_model_path: this parameter is used to load model_name when self._model object doesn't exist and user didn't call loadModel() function before this function,
:return: only predictions to provide service (that's why this function doen't requare 'label' parameter.
"""
if reload_data and data_dir_child is "labeled":
conv3dmodel.load3DImage(input_data, extension=extension, data_dir_child="labeled", dataDim=dataDim,
instanceIsOneFile=True, view=view)
X_data = self._3D_data
label = self._label
filename = self._3D_data_filename
elif reload_data and data_dir_child is None:
conv3dmodel.load3DImage(input_data, extension=extension, dataDim=dataDim,
instanceIsOneFile=True, view=view)
X_data = self._3D_data
label = self._label
filename = self._3D_data_filename
elif reload_data is False:
CustomException("[!] not considered yet! ")
X_data = input_data[0]
label = input_data[1]
filename = input_data[2]
#preds = self.test(X_data, label, experiment_name="experiment_TV", filename_list=filename, preprocess_save_path=preprocess_save_path)
preds = self.predict(X_data, experiment_name=None, load_path=reload_model_path, preprocess_save_path=preprocess_save_path)
CAM_save_dir = os.path.join(self._root_dir, self._result_save_dir, "CAM")
if not os.path.isdir(CAM_save_dir):
os.mkdir(CAM_save_dir)
self._visualize_3D_CAM(X_data, label, preds, filename, CAM_save_dir, featuremap_name=featuremap_name,
resize_size=heatmap_resize_size, preprocess_save_path=preprocess_save_path)
return
"""
# show CAM
- only convolutional feature space
- with clinical variables
"""
# save 3D CAM for train_eval phase
def _visualize_3D_CAM(self, imgs_list, imgs_label, _preds, imgs_filename, CAM_save_dir, featuremap_name,
resize_size=None, experiment_name_for_preprocess=None, preprocess_save_path=None):
print("[!] visualizing 3D CAM")
print("imgs_list", imgs_list.shape)
print("imgs_label", imgs_label.shape)
print("_preds", _preds.shape)
# preprocessing
if experiment_name_for_preprocess :
preprocessed_imgs_list = self._preprocess(imgs_list, isTrain=False, experiment_name=experiment_name_for_preprocess)
elif preprocess_save_path:
preprocessed_imgs_list = self._preprocess(imgs_list, isTrain=False,
save_path=preprocess_save_path)
else :
preprocessed_imgs_list = imgs_list
# def get_output_layer(model, layer_name):
# # # get the symbolic outputs of each "key" layer (we gave them unique names).
# # layer_dict = dict([(layer.name, layer) for layer in model_name.layers])
# # # print("layer dict", layer_dict)
# # layer = layer_dict[layer_name]
# # return layer
# for layer in model.layers:
# if layer.name == layer_name:
# return layer
#get_output = K.function([self._model.layers[0].input], [final_conv_layer.output, self._model.layers[-1].output])
class_heatmap_dict = dict()
for ind2, (img, label, filename) in enumerate(zip(preprocessed_imgs_list, imgs_label, imgs_filename)):
original_img = np.array([img])
#print("original_img shape", original_img.shape) # (1, 64, 32, 32, 1)
#print("original_img min max", original_img.min(), original_img.max())
#[conv_outputs, predictions] = get_output([original_img])
#print("conv_outputs", np.array(conv_outputs).shape) # (1, 16, 8, 8, 128), (1, 4, 4, 4, 256)
#print("predictions", np.array(predictions).shape) # (1, 3)
# print("from get_output()", predictions) # [[0. 1.]] same with softmax outputs
# print("from _preds", _preds[ind2]) # [0.547228 0.45277202], outputs of keras models' predict() function
# print("filename", filename)
#conv_outputs = conv_outputs[0, :, :, :, :] # (D, H, W, C) ; (16, 8, 8, 128)
origin_heatmap_c = []
for _c_ind in range(self._num_classes): # iteration on each class
# Create the class activation map.
# cam = np.zeros(dtype = np.float32, shape = conv_outputs.shape[1:3]) # H, W - 14,14
#cam = np.zeros(dtype=np.float32, shape=conv_outputs.shape[0:2+1])
_class_output = self._model.output[:, _c_ind] # (N, num_classes)
# Get the last layer's feature map
final_conv_layer = self.get_output_layer(self._model, featuremap_name) # (1, D, H, W, C)
# Get the last layer's input weights to the softmax.
grads = K.gradients(_class_output, final_conv_layer.output)[0]
# 특성 맵 채널별 그래디언트 평균 값이 담긴 (512,) 크기의 벡터
pooled_grads = K.mean(grads, axis=(0, 1, 2, 3)) # (C, )
iterate = K.function([self._model.input], [pooled_grads, final_conv_layer.output[0]])
pooled_grads_value, conv_layer_output_value = iterate([original_img])
for i, w in enumerate(range(len(pooled_grads_value))): # 512 iter
#for i, w in enumerate(std_last_class_weights[:, _c_ind]):
# cam += w * conv_outputs[i, :, :] # sumarize 16,16 on 512 iter with 512 weight for 1 label class
#cam += np.abs(w) * np.abs(conv_outputs[:, :, :, i]) # sumarize 16,16 on 512 iter with 512 weight for 1 label class
# normalize each of feature map from last convolutional feature bank
#conv_feature_map = conv_outputs[:, :, :, i]
#cam += w * normed_feature_map[:,:,:, i]
conv_layer_output_value[:, :, :, i] *= pooled_grads_value[i]
# cam += np.abs(w) * normed_feature_map[:,:,:, i]
#cam += np.abs(w) * conv_outputs[:, :, :, i]
#cam += w * conv_outputs[:, :, :, i]
heatmap = np.mean(conv_layer_output_value, axis=-1)
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
origin_heatmap_c.append(heatmap)
origin_heatmap_c = np.array(origin_heatmap_c)
print("origin_heatmap_c", origin_heatmap_c.shape) # (3, 4, 4, 4)
#min_val = min(origin_heatmap_c[0].min(), origin_heatmap_c[1].min(), origin_heatmap_c[2].min())
# min_val = min([origin_heatmap_c[_c_ind].min() for _c_ind in range(self._num_classes)])
# most_wide_range = max((origin_heatmap_c[0].max() - origin_heatmap_c[0].min()),
# (origin_heatmap_c[1].max() - origin_heatmap_c[1].min()),
# (origin_heatmap_c[2].max() - origin_heatmap_c[2].min()))
# most_wide_range = max([origin_heatmap_c[_c_ind].max() - origin_heatmap_c[_c_ind].min() for _c_ind in range(self._num_classes)])
#most_wide_range = max([_c_heatmap.max() - _c_heatmap.min() for _c_heatmap in origin_heatmap_c])
#max_val = max([origin_heatmap_c[_c_ind].max() for _c_ind in range(self._num_classes)])
#most_wide_range = max_val - min_val
#min_val = origin_heatmap_c.min()
#most_wide_range = origin_heatmap_c.max() - min_val
norm_heatmap = []
# norm each heatmap to [0, 1]
for h_ind, hm in enumerate(origin_heatmap_c):
#tmp_hm = np.copy(hm)
# min_val_regional_focus = tmp_hm.min()
# range_val_regional_focus = tmp_hm.max() - tmp_hm.min()
# tmp_hm -= min_val_regional_focus
# tmp_hm = tmp_hm / range_val_regional_focus
# min_val = hm.min()
# most_wide_range = hm.max()-hm.min()
# tmp_hm -= min_val
# tmp_hm /= most_wide_range
# print("heatmap histogram")
# plt.hist(hm.ravel(), bins=256, fc='k', ec='k')
# plt.show()
#hm = cv2.resize(hm, (64, 64))
if resize_size is not None:
imgip = ImageInterpolator(is2D=False, num_channel=1, target_size=resize_size)
hm = imgip.interpolateImage(hm)
#tmp_hm = imgip.interpolateImage(tmp_hm)
# min_val_after_resize = tmp_hm.min()
# most_wide_range_after_resize = tmp_hm.max() - tmp_hm.min()
# tmp_hm -= min_val_after_resize
# tmp_hm /= most_wide_range_after_resize
#print("hm shape", np.array(hm).shape) # (64, 97, 75)
#print("img shape", np.array(img).shape) # (64, 97, 75, 1)
# heatmap = cv2.applyColorMap(np.uint8(255 * hm), cv2.COLORMAP_JET)
# heatmap[np.where(hm < 0.4)] = 0 # thresholding
# norm_heatmap.append(heatmap)
#heatmap = np.array([cv2.cvtColor(cv2.applyColorMap(np.uint8(255*hm_d), cv2.COLORMAP_JET), cv2.COLOR_BGR2RGB) for hm_d in tmp_hm]) # why...?? do i have to do 1-hm?
#print("[!] debug heatmap shape", heatmap.shape)
# for d_ind, heatmap_d in enumerate(heatmap):
# heatmap_d[np.where(hm[d_ind] < 0.4)] = 0
# creating mask
# threshold = tmp_hm.min() + (tmp_hm.max() - tmp_hm.min()) * 0.7 # (D, H, W)
# bool_mask = tmp_hm > threshold # region to show
# int_mask = bool_mask.astype(np.uint8)
# int_color_mask = np.array([int_mask, int_mask, int_mask]) # (3, D, H, W)
# int_color_mask = np.transpose(int_color_mask, axes=(1, 2, 3, 0))
#
# heatmap = heatmap * int_color_mask
#heatmap[np.where(tmp_hm<0.4)] = 0
# print("heatmap index", h_ind)
# print("heatmap shape", heatmap.shape) # heatmap shape (64, 97, 75, 3)
# print("heatmap min, max", heatmap.min(), heatmap.max()) # heatmap min, max 0 255
norm_heatmap.append(hm)
#norm_heatmap.append(tmp_hm)
# visualization
# idio = ImageDataIO(extension="nii", is2D=False, view="axial")
# idio.show_one_img(hm, cmap=plt.get_cmap('jet'))
# idio.show_one_img(heatmap, cmap=plt.get_cmap('jet'))
# plt.imshow(heatmap, cmap=plt.get_cmap('jet'))
# plt.colorbar(ticks=[0, 63, 127, 255], orientation='vertical')
# plt.show()
# plt.imshow(hm)
# plt.show()
# heatmap[np.where(cam < 0.2)] = 0
# img = heatmap*0.5 + original_img
norm_heatmap = np.array(norm_heatmap)
# for elem_hm in norm_heatmap:
# elem_hm[0, 0, 0, :] = 255
# elem_hm[1, 0, 0, :] = 0
for _c_ind in range(self._num_classes):
try :
class_heatmap_dict[str(_c_ind)].append(norm_heatmap[_c_ind])
except KeyError as ke:
class_heatmap_dict[str(_c_ind)]=[]
class_heatmap_dict[str(_c_ind)].append(norm_heatmap[_c_ind])
# for dict_ind in range(len(class_heatmap_dict)):
# for dict_ind_cls_hm in class_heatmap_dict[str(dict_ind)]:
# dict_ind_cls_hm[0, 0, 0, :] = 255
# dict_ind_cls_hm[1, 0, 0, :] = 0
mean_subtracted_heatmap = dict()
# create heatmap for each patient
# ind2: index for patient
for ind2, (img, label, img_filename) in enumerate(zip(imgs_list, imgs_label, imgs_filename)):
imgip = ImageInterpolator(is2D=False, num_channel=1, target_size=resize_size)
resized_img = imgip.interpolateImage(img) # (64, 97, 79, 1)
# 1. descending sort 2. combine! (the number of classes - 1)*
# sorted_class_heatmap = sorted(class_heatmap_dict.items(), key=operator.itemgetter(1), reverse = True)
class_heatmap_to_sort = [(class_heatmap_dict[str(_c_ind)][ind2], _preds[ind2][_c_ind]) for _c_ind in range(self._num_classes)] # [(class_heatmap for img, pred), (), ()]
sorted_class_heatmap = sorted(class_heatmap_to_sort, key=lambda x:x[1], reverse = True) #
#img_cbined = np.zeros((resize_size[0], resize_size[1], resize_size[2], 3), dtype="float32") # resize_size ; (D, H, W)
img_cbined = np.zeros(resize_size, dtype="float32") # resize_size ; (D, H, W)
for _c_ind in range(self._num_classes):
if _c_ind == 0:
img_cbined += (self._num_classes-1) * sorted_class_heatmap[_c_ind][0]
else:
img_cbined -= sorted_class_heatmap[_c_ind][0]
# img2 += img_cbined
# img_cbined /= np.max(np.abs(img_cbined))
#img_cbined = img_cbined / np.max(np.abs(img_cbined))
img_cbined = img_cbined / np.max(np.maximum(img_cbined, 0))
img_cbined = np.clip(img_cbined, 0, 1)
img_cbined *= 255
img_cbined = np.uint8(img_cbined)
# if not os.path.isdir(os.path.join(CAM_save_dir, "npy_img_cbined_" + str(label))):
# os.mkdir(os.path.join(CAM_save_dir, "npy_img_cbined_" + str(label)))
# npy_save_path = os.path.join(CAM_save_dir, "npy_img_cbined_" + str(label))
# save_filename = imgs_filename[ind2] + "_heatmap_.npy"
# npy_mat_saver(img_cbined, os.path.join(npy_save_path, save_filename))
if not os.path.isdir(os.path.join(CAM_save_dir, "npy_img_origin_" + str(label))):
os.mkdir(os.path.join(CAM_save_dir, "npy_img_origin_" + str(label)))
npy_save_path = os.path.join(CAM_save_dir, "npy_img_origin_" + str(label))
save_filename = imgs_filename[ind2] + "_origin_.npy"
npy_mat_saver(resized_img, os.path.join(npy_save_path, save_filename))
img_cbined = thresholding_matrix(mat_to_mask=img_cbined, std_mat=None, c_ratio=0.4)
#img_cbined[:, 0, 0] = 0
model_pred = np.argmax(_preds[ind2])
try:
mean_subtracted_heatmap[str(model_pred)].append(img_cbined)
except KeyError as ke:
mean_subtracted_heatmap[str(model_pred)] = []
mean_subtracted_heatmap[str(model_pred)].append(img_cbined)
# try:
# mean_subtracted_heatmap[str(model_pred)].append(img_cbined)
# except KeyError as ke:
# mean_subtracted_heatmap[str(model_pred)] = []
# mean_subtracted_heatmap[str(model_pred)].append(img_cbined)
# Save Class Activation Map according to the each label
if not os.path.isdir(os.path.join(CAM_save_dir, str(label))):
os.mkdir(os.path.join(os.path.join(CAM_save_dir, str(label))))
CAM_save_path = os.path.join(CAM_save_dir, str(label),
img_filename + "_CAM_" + self._class_name[0] + self._class_name[
1] + "_combined" + ".png")
# proposed heatmap
img1 = resized_img[:, :, :, 0]
img1 = img1.astype(np.uint8)
img1[:, 0, 0] = 0
img1[:, 0, 1] = 255
# plt.imshow(img1[32], cmap='gist_gray')
# plt.show()
#
# ind = np.argmax([_img_cbined.max() for _img_cbined in img_cbined]) # (D, H, W)
# plt.imshow(img_cbined[ind], cmap=plt.get_cmap('jet'), alpha=0.5)
# plt.show()
predictions_ = [str(round(item_ * 100, 2)) for item_ in _preds[ind2]]
_title = "CAM: difference" + ", " + "GT:" + self._class_name[label] + ", " + \
" ".join([c_name + ":" + predictions_[ind] for ind, c_name in enumerate(self._class_name)])
# draw_img_on_grid([img1, img_cbined],
# save_path=CAM_save_path, is2D=False, _input_img_alpha=None, _overlap_img_alpha=0.4, _title=_title,
# _input_img_cmap="gist_gray", _overlap_img_cmap=plt.get_cmap("jet"))
draw_img_on_grid_v2([img1, img_cbined],
save_path=CAM_save_path, is2D=False, _input_img_alpha=None, _overlap_img_alpha=0.4, _title=_title,
_input_img_cmap="gist_gray", _overlap_img_cmap=plt.get_cmap("jet"))
# index for CAM
for _c_ind in range(self._num_classes):
predictions_ = [str(round(item_*100, 2)) for item_ in _preds[ind2]]
#print("debug predictions", predictions_) # ['0.0', '1.0', '0.0']
_title = "CAM:"+self._class_name[_c_ind]+", "+"GT:"+self._class_name[label]+", "+\
" ".join([c_name+":"+predictions_[ind] for ind, c_name in enumerate(self._class_name)])
#print("debug", _title)
# Save Class Activation Map according to the each label
if not os.path.isdir(os.path.join(CAM_save_dir, str(label))):
os.mkdir(os.path.join(os.path.join(CAM_save_dir, str(label))))
CAM_save_path = os.path.join(CAM_save_dir, str(label), img_filename+"_CAM_"+self._class_name[_c_ind]+".png")
img1 = resized_img[:, :, :, 0] # (D, H, W)
img1 = img1.astype(np.uint8)
img1[:, 0, 0] = 0
img1[:, 0, 1] = 255
img2 = class_heatmap_dict[str(_c_ind)][ind2]
#print("[!] debug img2 min max", img2.min(), img2.max())
#ind = np.argmax([_img_cbined.max() for _img_cbined in img2]) # (D, H, W)
# draw_img_on_grid([img2],
# save_path=None, is2D=False, _input_img_alpha=None, _overlap_img_alpha=None,
# _title="test", _input_img_cmap=plt.get_cmap("jet"))
if not os.path.isdir(os.path.join(CAM_save_dir, "npy_" + str(label))):
os.mkdir(os.path.join(CAM_save_dir, "npy_" + str(label)))
npy_save_path = os.path.join(CAM_save_dir, "npy_" + str(label))
save_filename = imgs_filename[ind2] + str(_c_ind)+"_heatmap_.npy"
npy_mat_saver(img2, os.path.join(npy_save_path, save_filename))
#img2 = [thresholding_matrix(mat_to_mask=_img2, std_mat=None, c_ratio=0.7) for _img2 in img2]
if not os.path.isdir(os.path.join(CAM_save_dir, "npy_img_origin_" + str(label))):
os.mkdir(os.path.join(CAM_save_dir, "npy_img_origin_" + str(label)))
npy_save_path = os.path.join(CAM_save_dir, "npy_img_origin_" + str(label))
save_filename = imgs_filename[ind2] + "_origin_.npy"
npy_mat_saver(img1, os.path.join(npy_save_path, save_filename))
img2 = thresholding_matrix(mat_to_mask=img2, std_mat=None, c_ratio=0.4)
#img2 = np.array(img2)
#img2[0,:,:,:]=0
#img2[:, 0, 0] = 0
#print("[!] debug img2 min max", img2.min(), img2.max())
# draw_img_on_grid([img2],
# save_path=None, is2D=False, _input_img_alpha=None, _overlap_img_alpha=None,
# _title="test", _input_img_cmap=plt.get_cmap("jet"))
# original heatmap
draw_img_on_grid_v2([img1, img2],
save_path=CAM_save_path, is2D=False, _input_img_alpha=None, _overlap_img_alpha=0.4, _title=_title,
_input_img_cmap="gist_gray", _overlap_img_cmap=plt.get_cmap("jet"))
#img2 = np.zeros((64, 97, _c_ind), dtype="float32")
# print("img debug", np.array(img).shape)
# pred_ind = _preds[ind2].argmax()
# if label == pred_ind :
# # Save Class Activation Map only for a correct prediction
# if not os.path.isdir(os.path.join(CAM_save_dir, "correct_" + str(label))):
# os.mkdir(os.path.join(os.path.join(CAM_save_dir, "correct_" + str(label))))
# CAM_save_path = os.path.join(CAM_save_dir, "correct_" + str(label),
# img_filename + "_CAM_" + self._class_name[_c_ind] + ".png")
# draw_img_on_grid([img1, img2],
# save_path=CAM_save_path, is2D=False, _input_img_alpha=None, _overlap_img_alpha=0.4, _title=_title
# ,_input_img_cmap = "gist_gray", _overlap_img_cmap = plt.get_cmap("jet"))
#
# elif label != pred_ind :
# # Save Class Activation Map only for a incorrect prediction
# if not os.path.isdir(os.path.join(CAM_save_dir, "incorrect_" + str(label))):
# os.mkdir(os.path.join(os.path.join(CAM_save_dir, "incorrect_" + str(label))))
# CAM_save_path = os.path.join(CAM_save_dir, "incorrect_" + str(label),
# img_filename + "_CAM_" + self._class_name[_c_ind] + ".png")
# draw_img_on_grid([img1, img2],
# save_path=CAM_save_path, is2D=False, _input_img_alpha=None, _overlap_img_alpha=0.4, _title=_title,
# _input_img_cmap="gist_gray", _overlap_img_cmap=plt.get_cmap("jet"))
# Save Mean CAM for each of class label
for _c_ind in range(self._num_classes):
mean_cam_c = np.array(class_heatmap_dict[str(_c_ind)]).mean(axis=0)
print("mean_cam_c", mean_cam_c.shape)
_title = "rawdata + Mean CAM for each of class label" + str(_c_ind)
rawdata_mean_CAM_save_path = os.path.join(CAM_save_dir, "rawdata+mean_CAM_"+self._class_name[_c_ind]+".png")
img = imgip.interpolateImage(imgs_list[0])
# print("img debug", np.array(img).shape)
img1 = img[:, :, :, 0]
img1 = img1.astype(np.uint8)
img1[:, 0, 0] = 0
img1[:, 0, 1] = 255
draw_img_on_grid_v2([img1, mean_cam_c],
save_path=rawdata_mean_CAM_save_path, is2D=False, _input_img_alpha=None, _overlap_img_alpha=0.4, _title=_title,
_input_img_cmap="gist_gray", _overlap_img_cmap=plt.get_cmap("jet"))
_title = "Mean CAM for each of class label"+str(_c_ind)
mean_CAM_save_path = os.path.join(CAM_save_dir, "mean_CAM_" + self._class_name[_c_ind] + ".png")
draw_img_on_grid_v2([mean_cam_c],
save_path=mean_CAM_save_path, is2D=False, _input_img_alpha=None, _overlap_img_alpha=0.4, _title=_title,
_input_img_cmap="gist_gray", _overlap_img_cmap=plt.get_cmap("jet"))
# Save Mean subtracted CAM for each of prediction of model_name
for _p_ind in range(self._num_classes):
mean_cam_p = np.array(mean_subtracted_heatmap[str(_p_ind)]).mean(axis=0)
print("mean_cam_p", mean_cam_p.shape)
_title = "rawdata + Mean subtracted CAM for each of class label" + str(_p_ind)
rawdata_mean_subtracted_CAM_save_path = os.path.join(CAM_save_dir,
"rawdata+mean_subtracted_CAM_" + self._class_name[_p_ind] + ".png")
img = imgip.interpolateImage(imgs_list[0])
# print("img debug", np.array(img).shape)
img1 = img[:, :, :, 0]
img1 = img1.astype(np.uint8)
img1[:, 0, 0] = 0
img1[:, 0, 1] = 255
draw_img_on_grid_v2([img1, mean_cam_p],
save_path=rawdata_mean_subtracted_CAM_save_path, is2D=False, _input_img_alpha=None, _overlap_img_alpha=0.4,
_title=_title,
_input_img_cmap="gist_gray", _overlap_img_cmap=plt.get_cmap("jet"))
_title = "Mean subtracted CAM for each of class label" + str(_p_ind)
mean_subtracted_CAM_save_path = os.path.join(CAM_save_dir, "mean_subtracted_CAM_" + self._class_name[_p_ind] + ".png")
draw_img_on_grid_v2([mean_cam_p],
save_path=mean_subtracted_CAM_save_path, is2D=False, _input_img_alpha=None, _overlap_img_alpha=0.4, _title=_title,
_input_img_cmap="gist_gray", _overlap_img_cmap=plt.get_cmap("jet"))
del class_heatmap_dict
return
def _create_cv_file(self, data, label, num_k = 4, save_path=None):
if save_path is None :
test_size = 1.0 / float(num_k)
#sss_inner = StratifiedShuffleSplit(n_splits=num_k, test_size=test_size, random_state=1) # random_state is generated using np.random
sss_inner = StratifiedShuffleSplit(n_splits=num_k, test_size=test_size, random_state=1)
kf = KFold(n_splits=num_k)
cv_index_train = dict()
cv_index_test = dict()
# sss_inner.get_n_splits(data, label)
#for ind, (train_index, test_index) in enumerate(sss_inner.split(data, label)):
for ind, (train_index, test_index) in enumerate(kf.split(data, label)):
print("train", len(train_index))
print("test", len(test_index))
cv_index_train["train_"+str(ind)] = train_index.tolist()
cv_index_test["test_"+str(ind)] = test_index.tolist()
pd_cv_index_train = pd.DataFrame(cv_index_train)
pd_cv_index_train.to_excel(os.path.join(self._root_dir, self._result_save_dir, "cv_file_train.xlsx"))
pd_cv_index_test = pd.DataFrame(cv_index_test)
pd_cv_index_test.to_excel(os.path.join(self._root_dir, self._result_save_dir, "cv_file_test.xlsx"))
else:
pd_cv_index_train = pd.read_excel(os.path.join(self._root_dir, self._result_save_dir, "cv_file_train.xlsx"))
pd_cv_index_test = pd.read_excel(os.path.join(self._root_dir, self._result_save_dir, "cv_file_test.xlsx"))
return pd_cv_index_train, pd_cv_index_test
def _plot_trend(self, trend_dict, save_path):
# for item in trend_dict.items(): # key value pair i.g. "train_acc": 90.0
# plt.plot(item[1], label=item[0])
# plt.legend('upper right')
# plt.savefig(save_path)
fig, loss_ax = plt.subplots()
acc_ax = loss_ax.twinx()
loss_ax.plot(trend_dict["train_loss_list"], 'y', label='train loss')
loss_ax.plot(trend_dict["test_loss_list"], 'r', label='val loss')
acc_ax.plot(trend_dict["train_acc_list"], 'b', label='train acc')
acc_ax.plot(trend_dict["test_acc_list"], 'g', label='val acc')
loss_ax.set_xlabel('epoch')
loss_ax.set_ylabel('loss')
acc_ax.set_ylabel('accuray')
loss_ax.legend(loc='upper left')
acc_ax.legend(loc='lower left')
plt.savefig(save_path)
plt.close("all")
return
def _measure_acc(self, pred_ind_list, label_ind_list):
return
def get_output_layer(self, model, layer_name):
# # get the symbolic outputs of each "key" layer (we gave them unique names).
# layer_dict = dict([(layer.name, layer) for layer in model_name.layers])
# # print("layer dict", layer_dict)
# layer = layer_dict[layer_name]
# return layer
for layer in model.layers:
if layer.name == layer_name:
return layer
def calculate_3D_CAM(self, X_data, _model, num_classes, featuremap_name, resize_size=None,
experiment_name_for_preprocess=None, preprocess_save_path=None):
# preprocessing
if experiment_name_for_preprocess:
preprocessed_imgs_list = self._preprocess(X_data, isTrain=False,
experiment_name=experiment_name_for_preprocess)
elif preprocess_save_path:
preprocessed_imgs_list = self._preprocess(X_data, isTrain=False,
save_path=preprocess_save_path)
else:
preprocessed_imgs_list = X_data
cams = [] # (N, C, D, H, W)
for ind, img in enumerate(preprocessed_imgs_list):
original_img = np.array([img])
origin_heatmap_c = []
for _c_ind in range(num_classes): # iteration on each class
_class_output = self._model.output[:, _c_ind] # (N, num_classes)
# Get the last layer's feature map
final_conv_layer = self.get_output_layer(_model, featuremap_name) # (1, D, H, W, C)
# Get the last layer's input weights to the softmax.
grads = K.gradients(_class_output, final_conv_layer.output)[0] # gradients function considers list of gradients
pooled_grads = K.mean(grads, axis=(0, 1, 2, 3)) # (C, )
iterate = K.function([self._model.input], [pooled_grads, final_conv_layer.output[0]])
pooled_grads_value, conv_layer_output_value = iterate([original_img])
for i, w in enumerate(range(len(pooled_grads_value))):
conv_layer_output_value[:, :, :, i] *= pooled_grads_value[i]
heatmap = np.mean(conv_layer_output_value, axis=-1)
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
origin_heatmap_c.append(heatmap)
origin_heatmap_c = np.array(origin_heatmap_c)
print("origin_heatmap_c", origin_heatmap_c.shape) # (3, 4, 4, 4)
if resize_size is not None:
resized_heatmap = []
# norm each heatmap to [0, 1]
for h_ind, hm in enumerate(origin_heatmap_c):
imgip = ImageInterpolator(is2D=False, num_channel=1, target_size=resize_size)
hm = imgip.interpolateImage(hm)
resized_heatmap.append(hm)
origin_heatmap_c = np.array(resized_heatmap)
cams.append(origin_heatmap_c)
return np.array(cams)
| [
"[email protected]"
] | |
4e09e9ab402842bb82f4da39ba8ec96246fa05a2 | 9a92cf81495006f9ee6ffcfb60a66bc2ac5eb258 | /main.py | 9ceb3d6c44f78ec9dbf87cbd3ba8fd307fae3eaf | [] | no_license | Haburto/ConwaysGameOfLife | f63dc5c387c737d6f4305702dabdd4c4ff4ea03e | 50ad79dd68049dbc51cb67b0802c1750c70d505d | refs/heads/master | 2022-07-27T22:48:36.711000 | 2020-05-21T18:22:37 | 2020-05-21T18:22:37 | 264,438,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,677 | py | import pygame
import tkinter
from tkinter import messagebox
# I am not sure if my game_init solution is the best solution, but it definitely helps to fight the clutter
# That some of my function calls have had
# It also helps me to only have one part of the code that I need to change for options like colour and size
class GameInitialization(object):
width = 800
height = width
screen_size = (width, height)
rows = 50
columns = rows
axis_length = (rows, columns)
size_between_rows = screen_size[0] // rows
size_between_columns = screen_size[1] // columns
color_window = (100, 100, 100)
color_lines = (60, 60, 60)
color_live_cells = (150, 150, 0)
game_started = False
exit_program = False
reset_initiated = False
mouse_button_pressed = False
set_cell_status_live = True
# TODO: Check if that is the right approach
# It does look and feel so wrong though
def set_window(self, window):
self.window = window
def set_my_grid(self, my_grid):
self.my_grid = my_grid
def set_cell_at_new_mouse_position(self, cell):
self.cell_at_new_mouse_position = cell
# TODO: Think about more UI-related functions
# Do I want a generation counter, live and dead cell counter? etc...
class Cell(object):
def __init__(self, position):
self.position = position
self.live = False
# Check other possible names for the class Grid and the function draw_grid
class Grid(object):
def __init__(self, axis_length):
self.rows = axis_length[0]
self.columns = axis_length[1]
self.grid = self.create_grid() # Careful, grid is used like this: self.grid[y][x]
self.live_cells = set()
self.cells_touched_by_life = list()
self.cells_to_be_born = set()
self.cells_about_to_die = set()
def create_grid(self):
grid = [[Cell((x, y)) for x in range(self.columns)] for y in range(self.rows)]
return grid
def set_cell_live(self, position):
cell = self.grid[position[1]][position[0]]
cell.live = True
self.live_cells.add(cell)
def set_cell_dead(self, position):
cell = self.grid[position[1]][position[0]]
# Maybe check if the cell was already dead
# Because then this method call should not be happening
cell.live = False
# x.remove() will throw an error when the item is not in the set
# Not sure if I want to handle such an error
self.live_cells.discard(cell)
def get_live_cells_set(self):
return self.live_cells
def get_neighbours(self, game_init, cell):
position = cell.position
neighbours = set()
for y_modifier in range(-1, 2):
for x_modifier in range(-1, 2):
if x_modifier == 0 and y_modifier == 0:
continue
if self.is_neighbour_possible(game_init, cell, x_modifier, y_modifier):
neighbours.add(
self.grid[position[1] + y_modifier][position[0] + x_modifier]
)
return neighbours
def is_neighbour_possible(self, game_init, cell, x_modifier, y_modifier):
position = cell.position
# X
if position[0] + x_modifier > game_init.rows - 1:
return False
if position[0] + x_modifier < 0:
return False
# Y
if position[1] + y_modifier > game_init.rows - 1:
return False
if position[1] + y_modifier < 0:
return False
return True
def check_neighbours_status(self, neighbours):
live_counter = 0
for neighbour in neighbours:
if neighbour.live:
live_counter = live_counter + 1
else:
self.cells_touched_by_life.append(neighbour)
return live_counter
def check_survival(self, live_counter):
if live_counter == 2 or live_counter == 3:
return True
else:
return False
def check_reproduction(self):
cells_touched_by_life_no_duplicates = list(set(self.cells_touched_by_life))
for cell in cells_touched_by_life_no_duplicates:
if self.cells_touched_by_life.count(cell) == 3:
self.cells_to_be_born.add(cell)
def manage_live_and_dead_cells(self):
for cell in self.cells_to_be_born:
self.set_cell_live(cell.position)
for cell in self.cells_about_to_die:
self.set_cell_dead(cell.position)
self.cells_touched_by_life.clear()
self.cells_to_be_born.clear()
self.cells_about_to_die.clear()
def check_rules(self, game_init):
for cell in self.live_cells:
neighbours = self.get_neighbours(game_init, cell)
live_counter = self.check_neighbours_status(neighbours)
if not self.check_survival(live_counter):
self.cells_about_to_die.add(cell)
self.check_reproduction()
self.manage_live_and_dead_cells()
def reset_game(self):
current_live_cells = self.live_cells.copy()
for cell in current_live_cells:
self.set_cell_dead(cell.position)
self.cells_touched_by_life = list()
self.cells_to_be_born = set()
self.cells_about_to_die = set()
def draw_live_cells(window, my_grid, game_init):
color_live_cells = game_init.color_live_cells
size_between_rows = game_init.size_between_rows
size_between_columns = game_init.size_between_columns
live_cells_set = my_grid.get_live_cells_set()
for cell in live_cells_set:
x_start = cell.position[0] * size_between_columns + 1
y_start = cell.position[1] * size_between_rows + 1
width = size_between_columns - 1
height = size_between_rows - 1
pygame.draw.rect(window,
color_live_cells,
(
x_start, y_start,
width, height
))
def draw_border(window, game_init):
screen_size = game_init.screen_size
color_lines = game_init.color_lines
# Upper left corner to the upper right corner
pygame.draw.line(window, color_lines, (0, 0), (screen_size[0], 0))
# Upper left corner to the lower left corner
pygame.draw.line(window, color_lines, (0, 0), (0, screen_size[1]))
# Lower right corner to the upper right corner
pygame.draw.line(window, color_lines, (screen_size[0], screen_size[1]), (screen_size[0], 0))
# Lower right corner to the lower left corner
pygame.draw.line(window, color_lines, (screen_size[0], screen_size[1]), (0, screen_size[1]))
def draw_grid(window, game_init):
screen_size = game_init.screen_size
rows = game_init.rows
columns = game_init.columns
size_between_rows = game_init.size_between_rows
size_between_columns = game_init.size_between_columns
color_lines = game_init.color_lines
for row in range(1, rows + 1):
pygame.draw.line(window,
color_lines,
(0, row * size_between_rows),
(screen_size[0], row * size_between_rows))
for column in range(1, columns + 1):
pygame.draw.line(window,
color_lines,
(column * size_between_columns, 0),
(column * size_between_columns, screen_size[1]))
def get_grid_position(game_init, mouse_pos):
position = [0, 0]
corrected_mouse_pos = list()
# TODO: Remove this correction as it is not actually needed
# If the mouse is held and dragged over the border
# X
if mouse_pos[0] < 0:
corrected_mouse_pos.append(0)
elif mouse_pos[0] > game_init.width:
corrected_mouse_pos.append(game_init.width)
else:
corrected_mouse_pos.append(mouse_pos[0])
# Y
if mouse_pos[1] < 0:
corrected_mouse_pos.append(0)
elif mouse_pos[1] > game_init.width:
corrected_mouse_pos.append(game_init.height)
else:
corrected_mouse_pos.append(mouse_pos[1])
# Actual pixel position to grid position calculation
position[0] = corrected_mouse_pos[0] // game_init.size_between_rows
position[1] = corrected_mouse_pos[1] // game_init.size_between_columns
return tuple(position)
# While in running pygame one of the 4 pygame.even.X functions HAS to be called
# Else the OS will think that the game has crashed
# You should also implement the QUIT event first, so that you can comfortably quit the project
def event_handler(game_init):
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_init.game_started = False
game_init.exit_program = True
break
# TODO: Test if keys has to be outside of the for loop
keys = pygame.key.get_pressed()
if keys[pygame.K_r]:
game_init.reset_initiated = True
game_init.game_started = False
game_init.mouse_button_pressed = False
break
elif keys[pygame.K_SPACE]:
# TODO: Test this with 'not' instead of '^='
# That should be a better Python style I guess
game_init.game_started ^= True
game_init.mouse_button_pressed = False
# TODO: careful, sometimes when I often clicked with the mouse, the next spacebar input was not read
# If the time between the click and the spacebar was too small
if not game_init.game_started:
if event.type == pygame.MOUSEBUTTONUP:
game_init.mouse_button_pressed = False
# I think I do not have to, but should I reset the other flags when mouse_button goes up ?
if event.type == pygame.MOUSEBUTTONDOWN:
grid_position = get_grid_position(game_init, event.dict["pos"])
game_init.mouse_button_pressed = True
process_cell_conversion(game_init, grid_position, True)
if event.type == pygame.MOUSEMOTION and game_init.mouse_button_pressed:
grid_position = get_grid_position(game_init, event.dict["pos"])
process_cell_conversion(game_init, grid_position, False)
# TODO: I probably should think about which functions I want to be methods and vice versa
def process_cell_conversion(game_init, grid_position, mouse_button_down):
if not game_init.mouse_button_pressed:
print("ERROR: process_cell_conversion() called without 'mouse_button_pressed' flag begin true")
return
grid = game_init.my_grid.grid
cell = grid[grid_position[1]][grid_position[0]]
if mouse_button_down:
game_init.set_cell_status_live = not cell.live
game_init.set_cell_at_new_mouse_position(cell)
else:
# Check if the pixel-position is still inside the clicked cell
if game_init.cell_at_new_mouse_position == cell:
return
else:
game_init.set_cell_at_new_mouse_position(cell)
# Compare cell status
# Change if needed
if game_init.set_cell_status_live:
if not cell.live:
game_init.my_grid.set_cell_live(cell.position)
else:
if cell.live:
game_init.my_grid.set_cell_dead(cell.position)
def redraw_window(window, my_grid, game_init):
window.fill(game_init.color_window)
draw_grid(window, game_init)
draw_border(window, game_init)
draw_live_cells(window, my_grid, game_init)
pygame.display.update()
def activate_still_lifes(my_grid):
# Block
my_grid.set_cell_live((3, 3))
my_grid.set_cell_live((3, 4))
my_grid.set_cell_live((4, 3))
my_grid.set_cell_live((4, 4))
# Bee-hive
my_grid.set_cell_live((10, 10))
my_grid.set_cell_live((11, 10))
my_grid.set_cell_live((9, 11))
my_grid.set_cell_live((12, 11))
my_grid.set_cell_live((10, 12))
my_grid.set_cell_live((11, 12))
# Loaf
my_grid.set_cell_live((20, 20))
my_grid.set_cell_live((21, 20))
my_grid.set_cell_live((19, 21))
my_grid.set_cell_live((22, 21))
my_grid.set_cell_live((20, 22))
my_grid.set_cell_live((22, 22))
my_grid.set_cell_live((21, 23))
# Boat
my_grid.set_cell_live((30, 30))
my_grid.set_cell_live((31, 30))
my_grid.set_cell_live((30, 31))
my_grid.set_cell_live((32, 31))
my_grid.set_cell_live((31, 32))
# Tub
my_grid.set_cell_live((40, 40))
my_grid.set_cell_live((39, 41))
my_grid.set_cell_live((41, 41))
my_grid.set_cell_live((40, 42))
def activate_oscillators(my_grid):
# Blinker (period 2)
my_grid.set_cell_live((13, 3))
my_grid.set_cell_live((13, 4))
my_grid.set_cell_live((13, 5))
# Toad (period 2)
my_grid.set_cell_live((20, 10))
my_grid.set_cell_live((21, 10))
my_grid.set_cell_live((22, 10))
my_grid.set_cell_live((19, 11))
my_grid.set_cell_live((20, 11))
my_grid.set_cell_live((21, 11))
# Beacon (period 2)
my_grid.set_cell_live((40, 30))
my_grid.set_cell_live((41, 30))
my_grid.set_cell_live((40, 31))
my_grid.set_cell_live((42, 33))
my_grid.set_cell_live((43, 33))
my_grid.set_cell_live((43, 32))
# TODO: center this window on the actual pygame-window
def welcome_window():
root = tkinter.Tk()
root.attributes("-topmost", True)
root.withdraw() # Hides the main tk window
subject = "Important information"
content = "Thanks for playing this game!" \
"\n\n" \
"How to play?" \
"\n\n" \
"There are 3 rules that decide what happens in the game." \
"\n1. If a cell has 2 or 3 neighbours it will survive (next generation)" \
"\n2. If a cell has less than 2 (underpopulation)." \
"\n or more than 3 (overpopulation) cells nearby it dies." \
"\n3. Any dead cell with exactly 3 neighbours will become live (reproduction)." \
"\n\n" \
"Hotkeys:" \
"\nUse the mouse and click on cells to set them live" \
"\nPress 'spacebar' to start and pause the simulation" \
"\nPress 'r' to reset the game and open this window once again" \
"\n\n" \
"Pressing on 'Ok' will start the game!"
messagebox.showinfo(subject, content)
try:
root.destroy()
# TODO: Find out which exceptions could occur here and further
# specify them, so that this try_except clause is not too broad
except:
pass
def main():
# TODO: center the pygame window on screen when it starts
pygame.init()
game_init = GameInitialization()
# TODO: Check if this is the right approach
window = pygame.display.set_mode(game_init.screen_size)
game_init.set_window(window)
my_grid = Grid(game_init.axis_length)
game_init.set_my_grid(my_grid)
# TODO: remove this test part, after user-input implementation
# Or maybe implement a way to choose these as presets in the pre-phase
# Still lifes
activate_still_lifes(my_grid)
# Oscillators
activate_oscillators(my_grid)
# Corner test
my_grid.set_cell_live((0, 0))
my_grid.set_cell_live((game_init.columns - 1, 0))
my_grid.set_cell_live((0, game_init.rows - 1))
my_grid.set_cell_live((game_init.columns - 1, game_init.rows - 1))
my_grid.set_cell_live((5, 5))
my_grid.set_cell_live((5, 6))
my_grid.set_cell_live((6, 5))
my_grid.set_cell_live((6, 6))
redraw_window(window, my_grid, game_init)
# Not sure if I should use tkinter or try it with pygame!
welcome_window()
loop_counter = 0
# TODO: Maybe add the program_response_time and cell_activity_delay_factor to game_init
# Also maybe change that ridiculous long names to something shorter
program_response_time = 5
cell_activity_delay_factor = 100
while not game_init.exit_program:
# If pygame.time.wait(x) is not accurate enough, try pygame.time.delay(x)
pygame.time.wait(program_response_time)
event_handler(game_init)
if game_init.game_started:
loop_counter = loop_counter + 1
if loop_counter == cell_activity_delay_factor:
loop_counter = 0
my_grid.check_rules(game_init)
if game_init.reset_initiated:
game_init.reset_initiated = False
my_grid.reset_game()
redraw_window(window, my_grid, game_init)
welcome_window()
continue
redraw_window(window, my_grid, game_init)
pygame.quit()
exit()
main()
| [
"[email protected]"
] | |
8a1fec061394b19970ba9343a8c7a90ec4eb7554 | 037472eacfb1bc8d61692e52fce5e47ab11c6d4d | /pwkit/sherpa.py | bfb5aa332bea9198b883316a32ade380197f4e82 | [
"MIT"
] | permissive | pkgw/pwkit | 2aa5bdde99399183a424ee6d821bd0fc9f5371f6 | ff0ac3d9cc563b441143cf73f282dd4ae98c7a51 | refs/heads/master | 2023-08-03T03:12:47.502647 | 2023-07-21T03:14:13 | 2023-07-21T03:14:13 | 19,895,334 | 24 | 8 | MIT | 2023-08-17T17:05:15 | 2014-05-17T20:15:04 | Python | UTF-8 | Python | false | false | 25,865 | py | # -*- mode: python; coding: utf-8 -*-
# Copyright 2017 Peter Williams <[email protected]> and collaborators.
# Licensed under the MIT License.
"""This module contains helpers for modeling X-ray spectra with the `Sherpa
<http://cxc.harvard.edu/sherpa/>`_ package.
"""
from __future__ import absolute_import, division, print_function
__all__ = '''
PowerLawApecDemModel
make_fixed_temp_multi_apec
expand_rmf_matrix
derive_identity_rmf
derive_identity_arf
get_source_qq_data
get_bkg_qq_data
make_qq_plot
make_multi_qq_plots
make_spectrum_plot
make_multi_spectrum_plots
'''.split()
import numpy as np
from sherpa.astro import ui
from sherpa.astro.xspec import XSAdditiveModel, _xspec
from sherpa.models import Parameter
from sherpa.models.parameter import hugeval
# Some helpful models
DEFAULT_KT_ARRAY = np.logspace(-1.5, 1, 20)
class PowerLawApecDemModel(XSAdditiveModel):
"""A model with contributions from APEC plasmas at a range of
temperatures, scaling with temperature.
Constructor arguments are:
*name*
The Sherpa name of the resulting model instance.
*kt_array* = None
An array of temperatures to use for the plasma models. If left at the
default of None, a hard-coded default is used that spans temperatures of
~0.03 to 10 keV with logarithmic spacing.
The contribution at each temperature scales with kT as a power law. The
model parameters are:
*gfac*
The power-law normalization parameter. The contribution at temperature *kT*
is ``norm * kT**gfac``.
*Abundanc*
The standard APEC abundance parameter.
*redshift*
The standard APEC redshift parameter.
*norm*
The standard overall normalization parameter.
This model is only efficient to compute if *Abundanc* and *redshift* are
frozen.
"""
def __init__(self, name, kt_array=None):
if kt_array is None:
kt_array = DEFAULT_KT_ARRAY
else:
kt_array = np.atleast_1d(np.asfarray(kt_array))
self.gfac = Parameter(name, 'gfac', 0.5, 1e-4, 1e4, 1e-6, 1e6)
self.Abundanc = Parameter(name, 'Abundanc', 1., 0., 5., 0.0, hugeval, frozen=True)
self.redshift = Parameter(name, 'redshift', 0., -0.999, 10., -0.999, hugeval, frozen=True)
self.norm = Parameter(name, 'norm', 1.0, 0.0, 1e24, 0.0, hugeval)
self._kt_array = kt_array
self._cur_cache_key = None
self._cached_vals = None
XSAdditiveModel.__init__(self, name, (self.gfac, self.Abundanc, self.redshift, self.norm))
def _calc(self, params, *args, **kwargs):
gfac, abund, redshift, norm = params
cache_key = (abund, redshift)
if self._cur_cache_key != cache_key:
self._cached_vals = [None] * self._kt_array.size
for i in range(self._kt_array.size):
apec_params = [self._kt_array[i], abund, redshift, 1.]
self._cached_vals[i] = _xspec.xsaped(apec_params, *args, **kwargs)
self._cur_cache_key = cache_key
self._cached_vals = np.array(self._cached_vals).T
scales = norm * self._kt_array**gfac
return (self._cached_vals * scales).sum(axis=1)
ui.add_model(PowerLawApecDemModel)
def make_fixed_temp_multi_apec(kTs, name_template='apec%d', norm=None):
"""Create a model summing multiple APEC components at fixed temperatures.
*kTs*
An iterable of temperatures for the components, in keV.
*name_template* = 'apec%d'
A template to use for the names of each component; it is string-formatted
with the 0-based component number as an argument.
*norm* = None
An initial normalization to be used for every component, or None to use
the Sherpa default.
Returns:
A tuple ``(total_model, sub_models)``, where *total_model* is a Sherpa
model representing the sum of the APEC components and *sub_models* is
a list of the individual models.
This function creates a vector of APEC model components and sums them.
Their *kT* parameters are set and then frozen (using
:func:`sherpa.astro.ui.freeze`), so that upon exit from this function, the
amplitude of each component is the only free parameter.
"""
total_model = None
sub_models = []
for i, kT in enumerate(kTs):
component = ui.xsapec(name_template % i)
component.kT = kT
ui.freeze(component.kT)
if norm is not None:
component.norm = norm
sub_models.append(component)
if total_model is None:
total_model = component
else:
total_model = total_model + component
return total_model, sub_models
def expand_rmf_matrix(rmf):
"""Expand an RMF matrix stored in compressed form.
*rmf*
An RMF object as might be returned by ``sherpa.astro.ui.get_rmf()``.
Returns:
A non-sparse RMF matrix.
The Response Matrix Function (RMF) of an X-ray telescope like Chandra can
be stored in a sparse format as defined in `OGIP Calibration Memo
CAL/GEN/92-002
<https://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/memos/cal_gen_92_002/cal_gen_92_002.html>`_.
For visualization and analysis purposes, it can be useful to de-sparsify
the matrices stored in this way. This function does that, returning a
two-dimensional Numpy array.
"""
n_chan = rmf.e_min.size
n_energy = rmf.n_grp.size
expanded = np.zeros((n_energy, n_chan))
mtx_ofs = 0
grp_ofs = 0
for i in range(n_energy):
for j in range(rmf.n_grp[i]):
f = rmf.f_chan[grp_ofs]
n = rmf.n_chan[grp_ofs]
expanded[i,f:f+n] = rmf.matrix[mtx_ofs:mtx_ofs+n]
mtx_ofs += n
grp_ofs += 1
return expanded
def derive_identity_rmf(name, rmf):
"""Create an "identity" RMF that does not mix energies.
*name*
The name of the RMF object to be created; passed to Sherpa.
*rmf*
An existing RMF object on which to base this one.
Returns:
A new RMF1D object that has a response matrix that is as close to
diagonal as we can get in energy space, and that has a constant
sensitivity as a function of detector channel.
In many X-ray observations, the relevant background signal does not behave
like an astrophysical source that is filtered through the telescope's
response functions. However, I have been unable to get current Sherpa
(version 4.9) to behave how I want when working with backround models that
are *not* filtered through these response functions. This function
constructs an "identity" RMF response matrix that provides the best
possible approximation of a passthrough "instrumental response": it mixes
energies as little as possible and has a uniform sensitivity as a function
of detector channel.
"""
from sherpa.astro.data import DataRMF
from sherpa.astro.instrument import RMF1D
# The "x" axis of the desired matrix -- the columnar direction; axis 1 --
# is "channels". There are n_chan of them and each maps to a notional
# energy range specified by "e_min" and "e_max".
#
# The "y" axis of the desired matrix -- the row direction; axis 1 -- is
# honest-to-goodness energy. There are tot_n_energy energy bins, each
# occupying a range specified by "energ_lo" and "energ_hi".
#
# We want every channel that maps to a valid output energy to have a
# nonzero entry in the matrix. The relative sizes of n_energy and n_cell
# can vary, as can the bounds of which regions of each axis can be validly
# mapped to each other. So this problem is basically equivalent to that of
# drawing an arbitrary pixelated line on bitmap, without anti-aliasing.
#
# The output matrix is represented in a row-based sparse format.
#
# - There is a integer vector "n_grp" of size "n_energy". It gives the
# number of "groups" needed to fill in each row of the matrix. Let
# "tot_groups = sum(n_grp)". For a given row, "n_grp[row_index]" may
# be zero, indicating that the row is all zeros.
# - There are integer vectors "f_chan" and "n_chan", each of size
# "tot_groups", that define each group. "f_chan" gives the index of
# the first channel column populated by the group; "n_chan" gives the
# number of columns populated by the group. Note that there can
# be multiple groups for a single row, so successive group records
# may fill in different pieces of the same row.
# - Let "tot_cells = sum(n_chan)".
# - There is a vector "matrix" of size "tot_cells" that stores the actual
# matrix data. This is just a concatenation of all the data corresponding
# to each group.
# - Unpopulated matrix entries are zero.
#
# See expand_rmf_matrix() for a sloppy implementation of how to unpack
# this sparse format.
n_chan = rmf.e_min.size
n_energy = rmf.energ_lo.size
c_lo_offset = rmf.e_min[0]
c_lo_slope = (rmf.e_min[-1] - c_lo_offset) / (n_chan - 1)
c_hi_offset = rmf.e_max[0]
c_hi_slope = (rmf.e_max[-1] - c_hi_offset) / (n_chan - 1)
e_lo_offset = rmf.energ_lo[0]
e_lo_slope = (rmf.energ_lo[-1] - e_lo_offset) / (n_energy - 1)
e_hi_offset = rmf.energ_hi[0]
e_hi_slope = (rmf.energ_hi[-1] - e_hi_offset) / (n_energy - 1)
all_e_indices = np.arange(n_energy)
all_e_los = e_lo_slope * all_e_indices + e_lo_offset
start_chans = np.floor((all_e_los - c_lo_offset) / c_lo_slope).astype(int)
all_e_his = e_hi_slope * all_e_indices + e_hi_offset
stop_chans = np.ceil((all_e_his - c_hi_offset) / c_hi_slope).astype(int)
first_e_index_on_channel_grid = 0
while stop_chans[first_e_index_on_channel_grid] < 0:
first_e_index_on_channel_grid += 1
last_e_index_on_channel_grid = n_energy - 1
while start_chans[last_e_index_on_channel_grid] >= n_chan:
last_e_index_on_channel_grid -= 1
n_nonzero_rows = last_e_index_on_channel_grid + 1 - first_e_index_on_channel_grid
e_slice = slice(first_e_index_on_channel_grid, last_e_index_on_channel_grid + 1)
n_grp = np.zeros(n_energy, dtype=int)
n_grp[e_slice] = 1
start_chans = np.maximum(start_chans[e_slice], 0)
stop_chans = np.minimum(stop_chans[e_slice], n_chan - 1)
# We now have a first cut at a row-oriented expression of our "identity"
# RMF. However, it's conservative. Trim down to eliminate overlaps between
# sequences.
for i in range(n_nonzero_rows - 1):
my_end = stop_chans[i]
next_start = start_chans[i+1]
if next_start <= my_end:
stop_chans[i] = max(start_chans[i], next_start - 1)
# Results are funky unless the sums along the vertical axis are constant.
# Ideally the sum along the *horizontal* axis would add up to 1 (since,
# ideally, each row is a probability distribution), but it is not
# generally possible to fulfill both of these constraints simultaneously.
# The latter constraint does not seem to matter in practice so we ignore it.
# Due to the funky encoding of the matrix, we need to build a helper table
# to meet the vertical-sum constraint.
counts = np.zeros(n_chan, dtype=int)
for i in range(n_nonzero_rows):
counts[start_chans[i]:stop_chans[i]+1] += 1
counts[:start_chans.min()] = 1
counts[stop_chans.max()+1:] = 1
assert (counts > 0).all()
# We can now build the matrix.
f_chan = start_chans
rmfnchan = stop_chans + 1 - f_chan
assert (rmfnchan > 0).all()
matrix = np.zeros(rmfnchan.sum())
amounts = 1. / counts
ofs = 0
for i in range(n_nonzero_rows):
f = f_chan[i]
n = rmfnchan[i]
matrix[ofs:ofs+n] = amounts[f:f+n]
ofs += n
# All that's left to do is create the Python objects.
drmf = DataRMF(
name,
rmf.detchans,
rmf.energ_lo,
rmf.energ_hi,
n_grp,
f_chan,
rmfnchan,
matrix,
offset = 0,
e_min = rmf.e_min,
e_max = rmf.e_max,
header = None
)
return RMF1D(drmf, pha=rmf._pha)
def derive_identity_arf(name, arf):
"""Create an "identity" ARF that has uniform sensitivity.
*name*
The name of the ARF object to be created; passed to Sherpa.
*arf*
An existing ARF object on which to base this one.
Returns:
A new ARF1D object that has a uniform spectral response vector.
In many X-ray observations, the relevant background signal does not behave
like an astrophysical source that is filtered through the telescope's
response functions. However, I have been unable to get current Sherpa
(version 4.9) to behave how I want when working with backround models that
are *not* filtered through these response functions. This function
constructs an "identity" ARF response function that has uniform sensitivity
as a function of detector channel.
"""
from sherpa.astro.data import DataARF
from sherpa.astro.instrument import ARF1D
darf = DataARF(
name,
arf.energ_lo,
arf.energ_hi,
np.ones(arf.specresp.shape),
arf.bin_lo,
arf.bin_hi,
arf.exposure,
header = None,
)
return ARF1D(darf, pha=arf._pha)
def get_source_qq_data(id=None):
"""Get data for a quantile-quantile plot of the source data and model.
*id*
The dataset id for which to get the data; defaults if unspecified.
Returns:
An ndarray of shape ``(3, npts)``. The first slice is the energy axis in
keV; the second is the observed values in each bin (counts, or rate, or
rate per keV, etc.); the third is the corresponding model value in each
bin.
The inputs are implicit; the data are obtained from the current state of
the Sherpa ``ui`` module.
"""
sdata = ui.get_data(id=id)
kev = sdata.get_x()
obs_data = sdata.counts
model_data = ui.get_model(id=id)(kev)
return np.vstack((kev, obs_data, model_data))
def get_bkg_qq_data(id=None, bkg_id=None):
"""Get data for a quantile-quantile plot of the background data and model.
*id*
The dataset id for which to get the data; defaults if unspecified.
*bkg_id*
The identifier of the background; defaults if unspecified.
Returns:
An ndarray of shape ``(3, npts)``. The first slice is the energy axis in
keV; the second is the observed values in each bin (counts, or rate, or
rate per keV, etc.); the third is the corresponding model value in each
bin.
The inputs are implicit; the data are obtained from the current state of
the Sherpa ``ui`` module.
"""
bdata = ui.get_bkg(id=id, bkg_id=bkg_id)
kev = bdata.get_x()
obs_data = bdata.counts
model_data = ui.get_bkg_model(id=id, bkg_id=bkg_id)(kev)
return np.vstack((kev, obs_data, model_data))
def make_qq_plot(kev, obs, mdl, unit, key_text):
"""Make a quantile-quantile plot comparing events and a model.
*kev*
A 1D, sorted array of event energy bins measured in keV.
*obs*
A 1D array giving the number or rate of events in each bin.
*mdl*
A 1D array giving the modeled number or rate of events in each bin.
*unit*
Text describing the unit in which *obs* and *mdl* are measured; will
be shown on the plot axes.
*key_text*
Text describing the quantile-quantile comparison quantity; will be
shown on the plot legend.
Returns:
An :class:`omega.RectPlot` instance.
*TODO*: nothing about this is Sherpa-specific. Same goes for some of the
plotting routines in :mod:`pkwit.environments.casa.data`; might be
reasonable to add a submodule for generic X-ray-y plotting routines.
"""
import omega as om
kev = np.asarray(kev)
obs = np.asarray(obs)
mdl = np.asarray(mdl)
c_obs = np.cumsum(obs)
c_mdl = np.cumsum(mdl)
mx = max(c_obs[-1], c_mdl[-1])
p = om.RectPlot()
p.addXY([0, mx], [0, mx], '1:1')
p.addXY(c_mdl, c_obs, key_text)
# HACK: this range of numbers is chosen to give reasonable sampling for my
# sources, which are typically quite soft.
locs = np.array([0, 0.05, 0.08, 0.11, 0.17, 0.3, 0.4, 0.7, 1]) * (kev.size - 2)
c0 = mx * 1.05
c1 = mx * 1.1
for loc in locs:
i0 = int(np.floor(loc))
frac = loc - i0
kevval = (1 - frac) * kev[i0] + frac * kev[i0+1]
mdlval = (1 - frac) * c_mdl[i0] + frac * c_mdl[i0+1]
obsval = (1 - frac) * c_obs[i0] + frac * c_obs[i0+1]
p.addXY([mdlval, mdlval], [c0, c1], '%.2f keV' % kevval, dsn=2)
p.addXY([c0, c1], [obsval, obsval], None, dsn=2)
p.setLabels('Cumulative model ' + unit, 'Cumulative data ' + unit)
p.defaultKeyOverlay.vAlign = 0.3
return p
def make_multi_qq_plots(arrays, key_text):
"""Make a quantile-quantile plot comparing multiple sets of events and models.
*arrays*
X.
*key_text*
Text describing the quantile-quantile comparison quantity; will be
shown on the plot legend.
Returns:
An :class:`omega.RectPlot` instance.
*TODO*: nothing about this is Sherpa-specific. Same goes for some of the
plotting routines in :mod:`pkwit.environments.casa.data`; might be
reasonable to add a submodule for generic X-ray-y plotting routines.
*TODO*: Some gross code duplication here.
"""
import omega as om
p = om.RectPlot()
p.addXY([0, 1.], [0, 1.], '1:1')
for index, array in enumerate(arrays):
kev, obs, mdl = array
c_obs = np.cumsum(obs)
c_mdl = np.cumsum(mdl)
mx = 0.5 * (c_obs[-1] + c_mdl[-1])
c_obs /= mx
c_mdl /= mx
p.addXY(c_mdl, c_obs, '%s #%d' % (key_text, index))
# HACK: this range of numbers is chosen to give reasonable sampling for my
# sources, which are typically quite soft.
#
# Note: this reuses the variables from the last loop iteration.
locs = np.array([0, 0.05, 0.08, 0.11, 0.17, 0.3, 0.4, 0.7, 1]) * (kev.size - 2)
c0 = 1.05
c1 = 1.1
for loc in locs:
i0 = int(np.floor(loc))
frac = loc - i0
kevval = (1 - frac) * kev[i0] + frac * kev[i0+1]
mdlval = (1 - frac) * c_mdl[i0] + frac * c_mdl[i0+1]
obsval = (1 - frac) * c_obs[i0] + frac * c_obs[i0+1]
p.addXY([mdlval, mdlval], [c0, c1], '%.2f keV' % kevval, dsn=2)
p.addXY([c0, c1], [obsval, obsval], None, dsn=2)
p.setLabels('Cumulative rescaled model', 'Cumulative rescaled data')
p.defaultKeyOverlay.vAlign = 0.3
return p
def make_spectrum_plot(model_plot, data_plot, desc, xmin_clamp=0.01,
min_valid_x=None, max_valid_x=None):
"""Make a plot of a spectral model and data.
*model_plot*
A model plot object returned by Sherpa from a call like `ui.get_model_plot()`
or `ui.get_bkg_model_plot()`.
*data_plot*
A data plot object returned by Sherpa from a call like `ui.get_source_plot()`
or `ui.get_bkg_plot()`.
*desc*
Text describing the origin of the data; will be shown in the plot legend
(with "Model" and "Data" appended).
*xmin_clamp*
The smallest "x" (energy axis) value that will be plotted; default is 0.01.
This is needed to allow the plot to be shown on a logarithmic scale if
the energy axes of the model go all the way to 0.
*min_valid_x*
Either None, or the smallest "x" (energy axis) value in which the model and
data are valid; this could correspond to a range specified in the "notice"
command during analysis. If specified, a gray band will be added to the plot
showing the invalidated regions.
*max_valid_x*
Like *min_valid_x* but for the largest "x" (energy axis) value in which the
model and data are valid.
Returns:
A tuple ``(plot, xlow, xhigh)``, where *plot* an OmegaPlot RectPlot
instance, *xlow* is the left edge of the plot bounds, and *xhigh* is the
right edge of the plot bounds.
"""
import omega as om
model_x = np.concatenate((model_plot.xlo, [model_plot.xhi[-1]]))
model_x[0] = max(model_x[0], xmin_clamp)
model_y = np.concatenate((model_plot.y, [0.]))
# Sigh, sometimes Sherpa gives us bad values.
is_bad = ~np.isfinite(model_y)
if is_bad.sum():
from .cli import warn
warn('bad Sherpa model Y value(s) at: %r', np.where(is_bad)[0])
model_y[is_bad] = 0
data_left_edges = data_plot.x - 0.5 * data_plot.xerr
data_left_edges[0] = max(data_left_edges[0], xmin_clamp)
data_hist_x = np.concatenate((data_left_edges, [data_plot.x[-1] + 0.5 * data_plot.xerr[-1]]))
data_hist_y = np.concatenate((data_plot.y, [0.]))
log_bounds_pad_factor = 0.9
xlow = model_x[0] * log_bounds_pad_factor
xhigh = model_x[-1] / log_bounds_pad_factor
p = om.RectPlot()
if min_valid_x is not None:
p.add(om.rect.XBand(1e-3 * xlow, min_valid_x, keyText=None), zheight=-1, dsn=1)
if max_valid_x is not None:
p.add(om.rect.XBand(max_valid_x, xhigh * 1e3, keyText=None), zheight=-1, dsn=1)
csp = om.rect.ContinuousSteppedPainter(keyText=desc + ' Model')
csp.setFloats(model_x, model_y)
p.add(csp)
csp = om.rect.ContinuousSteppedPainter(keyText=None)
csp.setFloats(data_hist_x, data_hist_y)
p.add(csp)
p.addXYErr(data_plot.x, data_plot.y, data_plot.yerr, desc + ' Data', lines=0, dsn=1)
p.setLabels(data_plot.xlabel, data_plot.ylabel)
p.setLinLogAxes(True, False)
p.setBounds (xlow, xhigh)
return p, xlow, xhigh
def make_multi_spectrum_plots(model_plot, plotids, data_getter, desc, xmin_clamp=0.01,
min_valid_x=None, max_valid_x=None):
"""Make a plot of multiple spectral models and data.
*model_plot*
A model plot object returned by Sherpa from a call like
``ui.get_model_plot()`` or ``ui.get_bkg_model_plot()``.
*data_plots*
An iterable of data plot objects returned by Sherpa from calls like
``ui.get_source_plot(id)`` or ``ui.get_bkg_plot(id)``.
*desc*
Text describing the origin of the data; will be shown in the plot legend
(with "Model" and "Data #<number>" appended).
*xmin_clamp*
The smallest "x" (energy axis) value that will be plotted; default is 0.01.
This is needed to allow the plot to be shown on a logarithmic scale if
the energy axes of the model go all the way to 0.
*min_valid_x*
Either None, or the smallest "x" (energy axis) value in which the model and
data are valid; this could correspond to a range specified in the "notice"
command during analysis. If specified, a gray band will be added to the plot
showing the invalidated regions.
*max_valid_x*
Like *min_valid_x* but for the largest "x" (energy axis) value in which the
model and data are valid.
Returns:
A tuple ``(plot, xlow, xhigh)``, where *plot* an OmegaPlot RectPlot
instance, *xlow* is the left edge of the plot bounds, and *xhigh* is the
right edge of the plot bounds.
TODO: not happy about the code duplication with :func:`make_spectrum_plot`
but here we are.
"""
import omega as om
from omega.stamps import DataThemedStamp, WithYErrorBars
model_x = np.concatenate((model_plot.xlo, [model_plot.xhi[-1]]))
model_x[0] = max(model_x[0], xmin_clamp)
model_y = np.concatenate((model_plot.y, [0.]))
# Sigh, sometimes Sherpa gives us bad values.
is_bad = ~np.isfinite(model_y)
if is_bad.sum():
from .cli import warn
warn('bad Sherpa model Y value(s) at: %r', np.where(is_bad)[0])
model_y[is_bad] = 0
p = om.RectPlot()
data_csps = []
data_lines = []
xlow = xhigh = None
for index, plotid in enumerate(plotids):
data_plot = data_getter(plotid)
data_left_edges = data_plot.x - 0.5 * data_plot.xerr
data_left_edges[0] = max(data_left_edges[0], xmin_clamp)
data_hist_x = np.concatenate((data_left_edges, [data_plot.x[-1] + 0.5 * data_plot.xerr[-1]]))
data_hist_y = np.concatenate((data_plot.y, [0.]))
if xlow is None:
xlow = model_x[0]
xhigh = model_x[-1]
else:
xlow = min(xlow, model_x[0])
xhigh = max(xhigh, model_x[-1])
csp = om.rect.ContinuousSteppedPainter(keyText=None)
csp.setFloats(data_hist_x, data_hist_y)
data_csps.append(csp)
inner_stamp = DataThemedStamp(None)
stamp = WithYErrorBars(inner_stamp)
lines = om.rect.XYDataPainter(
lines = False,
pointStamp = stamp,
keyText = '%s Data #%d' % (desc, index)
)
lines.setFloats(data_plot.x, data_plot.y,
data_plot.y + data_plot.yerr,
data_plot.y - data_plot.yerr)
inner_stamp.setHolder(lines)
data_lines.append(lines)
log_bounds_pad_factor = 0.9
xlow *= log_bounds_pad_factor
xhigh /= log_bounds_pad_factor
if min_valid_x is not None:
p.add(om.rect.XBand(1e-3 * xlow, min_valid_x, keyText=None), zheight=-1, dsn=1)
if max_valid_x is not None:
p.add(om.rect.XBand(max_valid_x, xhigh * 1e3, keyText=None), zheight=-1, dsn=1)
model_csp = om.rect.ContinuousSteppedPainter(keyText=desc + ' Model')
model_csp.setFloats(model_x, model_y)
p.add(model_csp)
for index, (data_csp, lines) in enumerate(zip(data_csps, data_lines)):
p.add(data_csp, dsn=index + 1)
p.add(lines, dsn=index + 1)
p.setLabels(data_plot.xlabel, data_plot.ylabel) # data_plot = last one from the for loop
p.setLinLogAxes(True, False)
p.setBounds (xlow, xhigh)
return p, xlow, xhigh
| [
"[email protected]"
] | |
3d53d4bb40c4268b088868ff8984f18d7124d394 | 818173671edf15d7c6d775ed003bcd35608233f9 | /waflib/extras/fluid.py | 28fbfd2776e9d6d9868249bacd8c53ced6b340e8 | [] | no_license | zsx/waf | a1e87e079e22443ae3ed98e08cefc705b5f73906 | 66d1c6ede4ceda66a98dbbf9dd473f1d5c5752ba | refs/heads/master | 2021-01-13T12:56:12.379186 | 2010-07-12T17:27:13 | 2010-07-12T17:27:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | #!/usr/bin/python
# encoding: utf-8
# Grygoriy Fuchedzhy 2009
"""
Compile fluid files (fltk graphic library). Use the 'fluid' feature in conjuction with the 'cxx' feature.
"""
from waflib import Task
from waflib.TaskGen import extension
class fluid(Task.Task):
color = 'BLUE'
ext_out = ['.h']
run_str = '${FLUID} -c -o ${TGT[0].abspath(env)} -h ${TGT[1].abspath()} ${SRC}'
@extension('.fl')
def fluid(self, node):
"""add the .fl to the source list; the cxx file generated will be compiled when possible"""
cpp = node.change_ext('.cpp')
hpp = node.change_ext('.hpp')
self.create_task('fluid', node, [cpp, hpp])
if 'cxx' in self.features:
self.source.append(cpp)
def configure(conf):
conf.find_program('fluid', var='FLUID')
conf.check_cfg(path='fltk-config', package='', args='--cxxflags --ldflags', uselib_store='FLTK', mandatory=True)
| [
"tnagy1024@f0382ac9-c320-0410-b3f0-b508d59f5a85"
] | tnagy1024@f0382ac9-c320-0410-b3f0-b508d59f5a85 |
48de20fc8323234c249ebd7b875a5f7ae2bd275c | 8bb63e2a80173a3158eab9a521a7f5e17d16c947 | /test_scripts/mimo_project/test_batch.py | 69e4321927336da6d0df9f90a5a9b62b5cf363b7 | [] | no_license | pascaie/python_test_private | c0541c4b70441d365b14def662a308617bda4b5f | 03af3d4a0599869e466fdae16ab07848b9f0c889 | refs/heads/master | 2020-04-08T03:34:17.139424 | 2018-12-21T01:03:28 | 2018-12-21T01:03:28 | 158,981,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | # pictures location: C:\Users\pascaie\Pictures
import os # work with files and the Operating System
from datetime import datetime
folder = r"C:\Users\pascaie\Pictures\pictures_download" # "r" is for "raw string"
location = input("Photo location: ") # asks the user to input a location which will be added to the new filename
files = os.listdir(folder) # creates a list of the elements within the folder
for filename in files:
if not filename.startswith('.'): # do not consider the "."-named files/directories
file = os.path.join(folder, filename) # joins the folder with the file
extension = os.path.splitext(file) # split the file and its extension, i.e. returns a 2-element list
# print(extension)
m_time = os.path.getmtime(file) # get the last modification time
# print(m_time) # this returns the seconds passed since January 1st, 1970
real_time = datetime.fromtimestamp(m_time) # get the last modification time in a readable format
# print(real_time)
f_time = datetime.strftime(real_time, "%Y%m%d_%H%M%S") # here you can edit the output format of the date
# print(f_time)
new_filename = location + "_" + f_time + extension[1] # compose the new new_filename
# OBS: express the extension as the second element of the extension variable
new_file = os.path.join(folder, new_filename) # get the path and name of the new file
os.rename(file, new_file) # here, the files are effectively renamed with the new names
| [
"[email protected]"
] | |
ed55f165177b62a572dd5f14ba39bcca8cf7496e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02659/s368852702.py | 9a8d0466876ef1d34613c066aa5e8c5f12f432d7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | a,b = input().split()
a = int(a)
b = int("".join(b.split(".")))
print(a*b//100) | [
"[email protected]"
] | |
417ec970032c0b5a7d2d5f7859da0e1c583efa67 | 8d3adc636d0ea114490f66420123f08fae3483e2 | /tutorial/python_advance/multiple_job.py | c018e41f44603fcb82465dc75e72a66346df0513 | [] | no_license | sengeiou/python_intern | a7658873de7e01c14ee8061a1401b7bee9aff350 | 73caa984d1429dc1882446432075ccf986380308 | refs/heads/master | 2023-02-02T19:00:05.204600 | 2020-12-23T23:41:11 | 2020-12-23T23:41:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | # -*- coding=utf-8 -*-
"""
该文件主要练习Python多线程的使用
Author : Cucumber
Date : 10/22/20
""" | [
"[email protected]"
] | |
10fcbced63f004253d5e1ff2975551c9f3875eb9 | 8cf978166ba469d789bc125189c67fda56e26f52 | /venv/bin/pylupdate5 | c9bd440d6bd2fd83f63cfc58868cb978ed5bb735 | [] | no_license | soraWontForget/QuirkyVikavolt | 3c5657923c5145b6f1e1509cb4c8c491f1b8b5d7 | 56d5af0f5b7cd4fcac9893e5aacb684c3587515b | refs/heads/master | 2020-05-25T08:30:47.160183 | 2019-05-28T21:16:53 | 2019-05-28T21:16:53 | 187,712,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/Users/sora/Projects/QuirkyVikavolt/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from PyQt5.pylupdate_main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
b816eb86977a2da6e487ea2ed2972230e405415f | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/GEM/YW_CYBMM_SZSJ_392_1.py | 0526e7bb0d7cacf744ce16914885c815a8094da4 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,079 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_CYBMM_SZSJ_392_1(xtp_test_case):
# YW_CYBMM_SZSJ_392_1
def test_YW_CYBMM_SZSJ_392_1(self):
title = '交易日对方最优转限价-T+0买→T+0卖(未持有证券代码)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11010121,
'errorMSG': queryOrderErrorMsg(11010121),
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('300116', '2', '2', '2', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_REVERSE_BEST_LIMIT'],
'price': stkparm['涨停价'],
'quantity': 1000,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
93d59d9fc54cacf0f27df38051f0f003723c7ae8 | cf287ec28cee69ff1e0060f01e5fd74f9d1f3e87 | /venv/Scripts/pip-script.py | c0723f02d2f371b9cd458ab3529a3a2e19c358eb | [] | no_license | CookyChou/leetcode | 1325b9be4d937401dfd8af278481abd6dae53bb4 | 73768b4fd9bcddc7e207450cff9091342a42f607 | refs/heads/master | 2020-05-07T08:08:16.244991 | 2019-04-09T07:39:06 | 2019-04-09T07:39:06 | 180,311,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | #!D:\leetcode\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | |
8f57897c877404183d9f16f2cee0aa40e810a153 | 8d22d3784377165a57d5bea3896d6b6898e33d12 | /django_portfolio_site/urls.py | e6434e5388f5fcc22375c19ace1a714f729dd61d | [
"MIT"
] | permissive | gbungbung/django-portfolio-site | 0440969a08ff1364a2dc2f26a2e9fa24e2a558a3 | 7810211e157eac3c76bf139e2b65dc4a73d90cbc | refs/heads/master | 2022-04-28T00:42:28.000930 | 2019-10-09T14:42:15 | 2019-10-09T14:42:15 | 211,950,587 | 0 | 0 | MIT | 2022-04-22T22:25:26 | 2019-09-30T20:28:32 | JavaScript | UTF-8 | Python | false | false | 184 | py | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('portfolio.urls'), name='portfolio')
] | [
"[email protected]"
] | |
a63ddf10b5dbe3cdaf65d13bf66221200c99a607 | 3ab9618e43aca5193e3538e35dd9452e4b598fc5 | /__openerp__.py | 9a7c092d9d3fe2ceaa5ae94b19e684e7d8270c6e | [] | no_license | cgsoftware/ImportParArticoli | 60a3cd3561a102a025ac8f00aaa005b5ffc3be5c | 4294bb4787091b9dc680120a1b231b1a87b42f55 | refs/heads/master | 2021-01-01T19:29:49.889526 | 2011-07-26T14:39:46 | 2011-07-26T14:39:46 | 2,107,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2009 Italian Community (http://www).
# All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'IMPORT CATALOGHI FORNITORI',
'version': '0.1',
'category': 'Localisation/Italy',
'description': """Importazione Cataloghi Articoli da Fornitori, formati csv/txt per ogni campo articolo chiede un numero colonna del file da importare""",
'author': 'C & G Software sas',
'website': 'http://www.cgsoftware.it',
"depends" : ['base','account','base_vat','product'],
"update_xml" : ['partner.xml','partner_wizard.xml','security/ir.model.access.csv'],
"active": False,
"installable": True
}
| [
"[email protected]"
] |
Subsets and Splits