blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
69e60d082f827c096460c5d45d78477079781d57 | 02561ee089dddf0ac683aa5821028acb1cce2327 | /make_localization_cues/get_sod_model/DSS-pytorch-master/tools/visual.py | edc191c23e52bf9b0d2a7cb6fc52f5037a8e48a4 | [] | no_license | DQDH/Semantic_Image_Segmentation | d50f062527a78b6740db5956ba5b285a2baf1b70 | 91bb5c066e72f1854860b5352d47d088456004e7 | refs/heads/master | 2022-10-31T14:26:31.896743 | 2020-06-18T01:57:54 | 2020-06-18T01:57:54 | 272,595,555 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,268 | py | import torch
import numpy as np
import matplotlib.pyplot as plt
class Viz_visdom(object):
def __init__(self, name, display_id=0):
self.name = name
self.display_id = display_id
self.idx = display_id
self.plot_data = {}
if display_id > 0:
import visdom
self.vis = visdom.Visdom(port=8097)
def plot_current_errors(self, epoch, counter_ratio, errors, idx=0):
if idx not in self.plot_data:
self.plot_data[idx] = {'X': [], 'Y': [], 'legend': list(errors.keys())}
# self.plot_data = {'X': [], 'Y': [], 'legend': list(errors.keys())}
self.plot_data[idx]['X'].append(epoch + counter_ratio)
self.plot_data[idx]['Y'].append([errors[k] for k in self.plot_data[idx]['legend']])
self.vis.line(
X=np.stack([np.array(self.plot_data[idx]['X'])] * len(self.plot_data[idx]['legend']), 1)
if len(errors) > 1 else np.array(self.plot_data[idx]['X']),
Y=np.array(self.plot_data[idx]['Y']) if len(errors) > 1 else np.array(self.plot_data[idx]['Y'])[:, 0],
opts={
'title': self.name + ' loss over time %d' % idx,
'legend': self.plot_data[idx]['legend'],
'xlabel': 'epoch',
'ylabel': 'loss'},
win=self.display_id + idx)
if self.idx < self.display_id + idx:
self.idx = self.display_id + idx
def plot_current_img(self, visuals, c_prev=True):
idx = self.idx + 1
for label, image_numpy in visuals.items():
if c_prev:
self.vis.image(image_numpy, opts=dict(title=label),
win=self.display_id + idx)
else:
image_numpy = image_numpy.swapaxes(0, 2).swapaxes(1, 2)
self.vis.image(image_numpy, opts=dict(title=label),
win=self.display_id + idx)
idx += 1
# reference: https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
def plot_image(inp, fig_size, title=None, swap_channel=False, norm=False):
"""Imshow for Tensor."""
if torch.is_tensor(inp):
inp = inp.numpy().transpose((1, 2, 0)) if swap_channel else inp.numpy()
else:
inp = inp.transpose((1, 2, 0)) if swap_channel else inp
if norm:
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.figure(figsize=fig_size)
if inp.shape[0] == 1:
plt.imshow(inp[0], cmap='gray')
else:
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.0001) # pause a bit so that plots are updated
def make_simple_grid(inp, padding=2, padding_value=1):
inp = torch.stack(inp, dim=0)
nmaps = inp.size(0)
height, width = inp.size(2), int(inp.size(3) + padding)
grid = inp.new(1, height, width * nmaps + padding).fill_(padding_value)
for i in range(nmaps):
grid.narrow(2, i * width + padding, width - padding).copy_(inp[i])
return grid
if __name__ == '__main__':
inp = [torch.randn(1, 5, 5), torch.randn(1, 5, 5)]
out = make_simple_grid(inp)
print(out.size())
plot_image(out)
| [
"[email protected]"
] | |
01911c7523c4459f34569d28aa130a9361835017 | 00978681ffb1ece4342532fa0071d6d5427e30f8 | /Tree Hierarchy/main.py | 028c787d69c031c1d8d2b6af9ad2fd434abbb9da | [] | no_license | sounboul/ICD-11-Visualizer | f240f1ef1c9e53548bc58955ebffed2bc1a21aff | 80bba937ef877d0e3139c34c22ea2753a361bd56 | refs/heads/master | 2022-09-04T06:58:52.108896 | 2020-05-28T16:48:30 | 2020-05-28T16:48:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,661 | py | from ICD11 import release_data as database
from pprint import pprint
import networkx as nx
import matplotlib.pyplot as plt
from networkx.drawing.nx_agraph import write_dot, graphviz_layout
master = {}
def get_title(icd11_code): # this returns a string title
return database(str(icd11_code))["title"]["@value"]
def get_title_id(icd11_code): # this returns a string id
return database(str(icd11_code))["@id"][45:]
def get_child_id(icd11_code): # this returns a child list of ICD11 codes
if "child" in database(str(icd11_code)): # reduce time by getting rid of .keys()
temp_list = []
codes = database(str(icd11_code))["child"]
for i in codes:
if i[45:].isdigit(): # stops taking in other/unspecified
temp_list.append(i[45:])
return temp_list
def get_child_name(icd11_code): # this returns a child list of ICD11 names
if "child" in database(str(icd11_code)): # reduce time by getting rid of .keys()
temp_list = []
for i in get_child_id(icd11_code):
temp_list.append(database(str(i))["title"]["@value"])
return temp_list
def get_parent_id(icd11_code):
if "parent" in database(str(icd11_code)): # reduce time by getting rid of .keys()
return database(str(icd11_code))["parent"][0][45:]
def master_dict_initializer(icd11_code):
id_only = {}
for i in get_child_id(icd11_code):
master[get_title(i)] = get_child_name(i)
id_only[get_title_id(i)] = get_child_id(i)
return id_only
def increase_layer(dictionary):
values = list(dictionary.values())
hidden = {}
for i in values:
if i is None:
continue
else:
for q in range(len(i)):
master[get_title(i[q])] = get_child_name(i[q])
hidden[get_title(i[q])] = get_child_id(i[q])
return hidden
def tuple_creator(start_code):
tuple_list = []
for k, v in master.items():
if v is not None:
for i in range(len(v)):
tuple_list.append((k, v[i]))
return tuple_list
def graph_visual(start_code, layers):
dict_ = master_dict_initializer(str(start_code))
for i in range(layers):
new_dict = increase_layer(dict_)
dict_ = new_dict
edges = tuple_creator(str(start_code))
graph = nx.DiGraph()
graph.add_nodes_from(master.keys())
graph.add_edges_from(edges)
write_dot(graph, 'test.dot')
plt.figure(figsize=(50, 50))
pos = graphviz_layout(graph, prog='dot')
nx.draw(graph, pos, with_labels=True, arrows=True)
plt.savefig('Example 4.png')
graph_visual(426429380, 4)
| [
"[email protected]"
] | |
c9e1ff0e4ec36697558947f4a2e18309922cc088 | 6135140b0c48f189c5672858b9d09732f178c829 | /proj3/akeelah/testeditdist.py | 3c8ba45c45948a74bc2cd23102be3fda648ce010 | [] | no_license | jw-develop/cs384-self | 8b23fcd39dd78e9a57876b7ac35452f962083ef2 | 9002d8ca254cc43ba9df7faab2df77167ca007f7 | refs/heads/master | 2020-05-21T01:30:57.870603 | 2019-05-09T19:18:08 | 2019-05-09T19:18:08 | 185,858,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | '''
Created on Oct 20, 2015
@author: thomasvandrunen
'''
import sys
from editdist import edit_distance
source_word = sys.argv[1]
target_word = sys.argv[2]
print edit_distance(source_word, target_word, 25) | [
"[email protected]"
] | |
d3cd6c302249bc0a45cafb3e724725c383462017 | 6ce00e75df0425405568e6d3a1b27090f9968555 | /utils/labelme2csv.py | 05fbc6f5c8a6db40c322ec854e526c51d383cc4c | [] | no_license | XiaokangLei/img_utils | b48b7437980705569926c61811b67afdd6f8999a | 94c89bd286e10e678eed7ffdeea446445b4ed1cb | refs/heads/master | 2023-06-23T17:26:44.226676 | 2021-07-26T09:46:34 | 2021-07-26T09:46:34 | 368,769,922 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,546 | py | '''
Author: leixk_ai
Date: 2021-05-19 14:25:58
LastEditTime: 2021-06-08 15:26:01
LastEditors: Please set LastEditors
Description: 将labelme生成的json文件转换成csv格式(x1, y1, x2, y2, x3, y3, x4, y4, label)的数据
FilePath: /img_utils/utils/labelme2csv.py
'''
import os
import json
import shutil
from argparse import ArgumentParser
def main():
# 接收传参
parser = ArgumentParser()
parser.add_argument('--in_dir', type=str, default='E:/fsdownload/fsdownload/bl_field_ann/high', help="labelme标注数据所在路径,图像和对应的json文件在同一路径,文件名前缀相同")
parser.add_argument('--out_dir', type=str, default='E:/fsdownload/fsdownload/bl_field_ann/high', help="数据输出目录")
parser.add_argument("--copy", type=int, default=0, help="是否复制图片到输出目录") # 1代表复制
args = parser.parse_args()
# 创建文件输出路径
if not os.path.isdir(args.out_dir):
os.makedirs(args.out_dir)
# 搜索输入路径下的json文件
json_files = [name for name in os.listdir(args.in_dir) if name.endswith('.json')]
# 遍历所有json文件
for name in json_files:
# 读取json文件信息
print(name)
try:
d = json.loads(open(os.path.join(args.in_dir, name), 'r').read())
except UnicodeDecodeError:
d = json.loads(open(os.path.join(args.in_dir, name), 'r', encoding='gbk').read())
stem = name[:-5]
shapes = d['shapes']
# 判断该图像是否没标,json文件没有shapes数据
if len(shapes) == 0:
print("%s shapes is null, skiped" % name)
continue
# 创建.txt文件,输出标注信息
with open(os.path.join(args.out_dir, stem + '.txt'), 'w', encoding='utf-8') as f:
for box in shapes:
label = box['label']
points = box['points']
if len(points) == 2:
x1, y1 = points[0]
x2, y2 = points[1]
if x1 < x2 and y1 < y2:
points = [(x1, y1), (x2, y1), (x2, y2), (x1, y2)]
elif x1 > x2 and y1 < y2:
points = [(x1, y1), (x1, y2), (x2, y2), (x2, y1)]
elif x1 > x2 and y1 > y2:
points = [(x1, y1), (x2, y1), (x2, y2), (x1, y2)]
else:
points = [(x1, y1), (x1, y2), (x2, y2), (x2, y1)]
elif len(points) == 4:
# xmin = min(points[0][0],points[1][0],points[2][0],points[3][0])
# xmax = max(points[0][0],points[1][0],points[2][0],points[3][0])
# # ymin = min(points[0][1],points[1][1],points[2][1],points[3][1])
# # ymax = max(points[0][1],points[1][1],points[2][1],points[3][1])
# points = [(xmin, points[0][1]), (xmax, points[1][1]), (xmax, points[2][1]), (xmin, points[3][1])]
pass
else:
print('=> {}points数量异常.'.format(name))
continue
loc = [a for pt in points for a in pt]
loc = list(map(str, loc))
loc.append(label)
f.write(','.join(loc) + '\n')
# 复制图像到输出路径
if args.copy:
shutil.copyfile(os.path.join(args.in_dir, d['imagePath']), os.path.join(args.out_dir, d['imagePath']))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
fe7101167f9d2763f0fed8f480827e333e0fa2eb | 8d9790a41d2b6422f47b3c199ff271d148ed3757 | /napari/layers/image/experimental/octree_image.py | 94f3a24938c40aec8823fef85386f81f5056753a | [
"BSD-3-Clause"
] | permissive | michalk8/napari | de62056b51ce6643e1a97f3830d56abcd8280d42 | 1d784cf8373495d9591594b6dd6ac479d5566ed1 | refs/heads/master | 2023-01-31T16:53:17.441082 | 2020-12-12T07:20:35 | 2020-12-12T07:20:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,844 | py | """OctreeImage class.
An eventual replacement for Image that combines single-scale and
chunked/tiled multi-scale into one implementation.
"""
import logging
from typing import List, Set
import numpy as np
from ....components.experimental.chunk import ChunkRequest, LayerRef
from ....utils.events import Event
from ..image import Image
from ._octree_multiscale_slice import OctreeMultiscaleSlice, OctreeView
from .octree_chunk import OctreeChunk, OctreeChunkKey
from .octree_intersection import OctreeIntersection
from .octree_level import OctreeLevelInfo
from .octree_util import OctreeDisplayOptions, SliceConfig
LOGGER = logging.getLogger("napari.async.octree")
class OctreeImage(Image):
"""OctreeImage layer.
Experimental variant of Image that renders using an octree. For 2D
images the octree is really just a quadtree. For 3D volumes it will be
a real octree. This class is intended to eventually fully replace the
existing Image class.
Background
----------
The original Image class handled single-scale and multi-scale images,
but they were handled quite differently. And its multi-scale did not
use chunks or tiles. It worked well locally, but was basically unusable
for remote or high latency data.
OctreeImage always uses chunk/tiles. Today those tiles are always
"small". However, as a special case, if an image is smaller than the
max texture size, we could some day allow OctreeImage to set its tile
size equal to that image size.
At that point "small" images would be draw with a single texture,
the same way the old Image class drew then. So it would be very
efficient.
But larger images would have multiple chunks/tiles and multiple levels.
Unlike the original Image class multi-scale, the chunks/tiles mean we
only have to incrementally load more data as the user pans and zooms.
The goal is OctreeImage gets renamed to just Image and it efficiently
handles images of any size. It make take a while to get there.
Attributes
----------
_view : OctreeView
Describes a view frustum which implies what portion of the OctreeImage
needs to be draw.
_slice : OctreeMultiscaleSlice
When _set_view_slice() is called we create a OctreeMultiscaleSlice()
that's looking at some specific slice of the data.
While the Image._slice was the data that was drawn on the screen,
an OctreeMultiscaleSlice contains a full Octree. The OctreeImage
visuals (VispyTiledImageLayer and TiledImageVisual) draw only
the portion for OctreeImage which is visible in the OctreeView.
_display : OctreeDisplayOptions
Settings for how we draw the octree, such as tile size.
"""
def __init__(self, *args, **kwargs):
self._view: OctreeView = None
self._slice: OctreeMultiscaleSlice = None
self._intersection: OctreeIntersection = None
self._display = OctreeDisplayOptions()
# super().__init__ will call our _set_view_slice() which is kind
# of annoying since we are aren't fully constructed yet.
super().__init__(*args, **kwargs)
# Call after super().__init__
self.events.add(octree_level=Event, tile_size=Event)
# TODO_OCTREE: this is hack that we assign OctreeDisplayOptions
# this event after super().__init__(). Will cleanup soon.
self._display.loaded_event = self.events.loaded
def _get_value(self):
"""Override Image._get_value()."""
return (0, (0, 0)) # TODO_OCTREE: need to implement this.
@property
def loaded(self) -> bool:
"""Has the data for this layer been loaded yet.
As far as the visual system is concerned we are always "loaded" in
that we can always be drawn. Because our VispyTiledImageLayer can
always be drawn. Even if no chunk/tiles are loaded yet.
"""
return True
@property
def _empty(self) -> bool:
"""Is this layer completely empty so it can't be drawn.
As with self.loaded, we are never really empty. Our VispyTiledImageLayer
can always be drawn. Even if there is nothing to draw.
"""
return False
def _update_thumbnail(self):
# TODO_OCTREE: replace Image._update_thumbnail with nothing for
# the moment until we decide how to do thumbnail.
pass
@property
def _data_view(self):
"""Viewable image for the current slice. (compatibility)"""
# Override Image._data_view
return np.zeros((64, 64, 3)) # fake: does octree need this?
@property
def display(self) -> OctreeDisplayOptions:
"""The display options for this octree image layer."""
return self._display
@property
def tile_size(self) -> int:
"""Return the edge length of single tile, for example 256.
Return
------
int
The edge length of a single tile.
"""
return self._display.tile_size
@tile_size.setter
def tile_size(self, tile_size: int) -> None:
"""Set new tile_size.
Parameters
----------
tile_size : int
The new tile size.
"""
self._display.tile_size = tile_size
self.events.tile_size()
self._slice = None # For now must explicitly delete it
self.refresh() # Creates a new slice.
@property
def tile_shape(self) -> tuple:
"""Return the shape of a single tile, for example 256x256x3.
Return
------
tuple
The shape of a single tile.
"""
# TODO_OCTREE: Must be an easier way to get this shape based on
# information already stored in Image class?
if self.multiscale:
init_shape = self.data[0].shape
else:
init_shape = self.data.shape
tile_shape = (self.tile_size, self.tile_size)
if self.rgb:
# Add the color dimension (usually 3 or 4)
tile_shape += (init_shape[-1],)
return tile_shape
@property
def slice_config(self) -> SliceConfig:
"""Return information about the current octree.
Return
------
SliceConfig
Configuration information.
"""
if self._slice is None:
return None
return self._slice.slice_config
@property
def octree_level_info(self) -> OctreeLevelInfo:
"""Return information about the current level of the current octree.
Returns
-------
OctreeLevelInfo
Information about the current octree level.
"""
if self._slice is None:
return None
return self._slice.octree_level_info
@property
def data_level(self) -> int:
"""Current level of multiscale.
The base full resolution image is level 0. The highest and coarsest
level usually contains only a single tile.
"""
return self._data_level
@data_level.setter
def data_level(self, level: int) -> None:
"""Set the octree level we should be displaying.
Parameters
----------
level : int
Display this octree level.
"""
if self._data_level == level:
return # It didn't change.
# Quickly check for less than 0. We can't check for a level
# that's too high because the Octree might have extended levels?
if level < 0:
raise ValueError(f"Octree level {level} is negative.")
self._data_level = level
self.events.octree_level()
if self._slice is not None:
# This will raise if the level is too high.
self._slice.octree_level = level
self.events.loaded() # redraw
@property
def num_octree_levels(self) -> int:
"""Return the total number of octree levels.
Return
------
int
The number of octree levels.
"""
return len(self.data) # Multiscale
def _new_empty_slice(self) -> None:
"""Initialize the current slice to an empty image.
Overides Image._new_empty_slice() and does nothing because we don't
need an empty slice. We create self._slice when
self._set_view_slice() is called.
The empty slice was needed to satisfy the old VispyImageLayer that
used a single ImageVisual. But OctreeImage is drawn with
VispyTiledImageVisual. It does not need an empty image. It gets
chunks from our self.drawable_chunks property, and it will just draw
nothing if that returns an empty list.
When OctreeImage become the only image class, this can go away.
"""
def get_drawable_chunks(
self, drawn_chunk_set: Set[OctreeChunkKey]
) -> List[OctreeChunk]:
"""Get the chunks in the current slice which are drawable.
The visual calls this and then draws what we send it.
The call to get_intersection() will chose the appropriate level of
the octree to intersect, and then return all the chunks within the
intersection with that level.
These are the "ideal" chunks because they are at the level whose
resolution best matches the current screen resolution.
Drawing chunks at a lower level than this will work fine, but it's
a waste in that those chunks will just be downsampled by the card.
You won't see any "extra" resolution at all. The card can do this
super fast, so the issue not such much speed as it is RAM and VRAM.
For example, suppose we want to draw 40 ideal chunks at level N,
and the chunks are (256, 256, 3) with dtype uint8. That's around
8MB.
If instead we draw lower levels than the ideal, the number of
chunks and storage goes up quickly:
Level (N - 1) is 160 chunks = 32M
Level (N - 2) is 640 chunks = 126M
Level (N - 3) is 2560 chunks = 503M
In the opposite direction, drawing chunks from a higher, the number
of chunks and storage goes down quickly. The only issue there is
visual quality, the imagery might look blurry.
Parameters
-----------
drawn_chunk_set : Set[OctreeChunkKey]
The chunks that are currently being drawn by the visual.
Return
------
List[OctreeChunk]
The drawable chunks.
"""
if self._slice is None or self._view is None:
return [] # There is nothing to draw.
# Get the current intersection and save it off.
self._intersection = self._slice.get_intersection(self._view)
if self._intersection is None:
return [] # No chunks to draw.
# Get the ideal chunks. These are the chunks at the preferred
# resolution. The ones we ideally want to draw once they are in RAM
# and in VRAM. When all loading is done, we will draw all the ideal
# chunks.
ideal_chunks = self._intersection.get_chunks(create=True)
# If we are seting the data level level automatically, then update
# our level to match what was chosen for the intersection.
if self._view.auto_level:
self._data_level = self._intersection.level.info.level_index
# The loader will initiate loads on any ideal chunks which are not
# yet in memory. And it will return the chunks we should draw. The
# chunks might be ideal chunks, if they are in memory, but they
# might be chunks from higher or lower levels in the octree. In
# general we try to draw cover the view with the "best available"
# data.
return self._slice.loader.get_drawable_chunks(
drawn_chunk_set, ideal_chunks
)
def _update_draw(
self, scale_factor, corner_pixels, shape_threshold
) -> None:
"""Override Layer._update_draw completely.
The base Layer._update_draw does stuff for the legacy multi-scale
that we don't want. And it calls refresh() which we don't need.
We create our OctreeView() here which has the corners in it.
Parameters
----------
scale_factor : float
Scale factor going from canvas to world coordinates.
corner_pixels : array
Coordinates of the top-left and bottom-right canvas pixels in the
world coordinates.
shape_threshold : tuple
Requested shape of field of view in data coordinates.
"""
# Compute our 2D corners from the incoming n-d corner_pixels
data_corners = self._transforms[1:].simplified.inverse(corner_pixels)
corners = data_corners[:, self._dims_displayed]
# Update our self._view to to capture the state of things right
# before we are drawn. Our self._view will used by our
# drawable_chunks() method.
self._view = OctreeView(corners, shape_threshold, self.display)
def get_intersection(self) -> OctreeIntersection:
"""The the interesection between the current view and the octree.
Returns
-------
OctreeIntersection
The intersection between the current view and the octree.
"""
if self._slice is None:
return None
return self._slice.get_intersection(self._view)
def _outside_data_range(self, indices) -> bool:
"""Return True if requested slice is outside of data range.
Return
------
bool
True if requested slice is outside data range.
"""
extent = self._extent_data
not_disp = self._dims_not_displayed
return np.any(
np.less(
[indices[ax] for ax in not_disp],
[extent[0, ax] for ax in not_disp],
)
) or np.any(
np.greater(
[indices[ax] for ax in not_disp],
[extent[1, ax] for ax in not_disp],
)
)
def _set_view_slice(self):
"""Set the view given the indices to slice with.
This replaces Image._set_view_slice() entirely. The hope is eventually
this class OctreeImage becomes Image. And the non-tiled multiscale
logic in Image._set_view_slice goes away entirely.
"""
if self._slice is not None:
# For now bail out so we don't nuke an existing slice which
# contains an existing octree. Soon we'll need to figure out
# if we are really changing slices (and need a new octree).
return
indices = np.array(self._slice_indices)
if self._outside_data_range(indices):
return
# Indices to get at the data we are currently viewing.
indices = self._get_slice_indices()
# TODO_OCTREE: easier way to do this?
base_shape = self.data[0].shape
base_shape_2d = [base_shape[i] for i in self._dims_displayed]
slice_config = SliceConfig(
base_shape_2d, len(self.data), self._display.tile_size
)
# OctreeMultiscaleSlice wants all the levels, but only the dimensions
# of each level that we are currently viewing.
slice_data = [level_data[indices] for level_data in self.data]
# Create _layer_ref that matches the current indices and slice.
indices = self._get_slice_indices()
layer_ref = LayerRef.create_from_layer(self, indices)
# Create the slice, it will create the actual Octree.
self._slice = OctreeMultiscaleSlice(
slice_data, layer_ref, slice_config, self._raw_to_displayed,
)
def _get_slice_indices(self) -> tuple:
"""Get the slice indices including possible depth for RGB."""
indices = tuple(self._slice_indices)
if self.rgb:
indices += (slice(None),)
return indices
def on_chunk_loaded(self, request: ChunkRequest) -> None:
"""An asynchronous ChunkRequest was loaded.
Override Image.on_chunk_loaded() fully.
Parameters
----------
request : ChunkRequest
This request was loaded.
"""
# Pass it to the slice, it will insert the newly loaded data into
# the OctreeChunk at the right location.
if self._slice.on_chunk_loaded(request):
self.events.loaded() # Redraw with teh new chunk.
@property
def remote_messages(self) -> dict:
"""Messages we should send to remote clients."""
if self._intersection is None:
return {}
return {
"tile_state": self._intersection.tile_state,
"tile_config": self._intersection.tile_config,
}
| [
"[email protected]"
] | |
4df80986be4944f55373874a0764e8385fc5047d | 647efb68e1e7ec6614f86d57302f2e6b3fea6b0a | /src/constants.py | 0f0172ac017fe79d07e0088c4e769871b2192a30 | [] | no_license | Beavl/NowPension | 1d6cacb467f3a2f470626138a14789ca17f0090e | 5fcfec5aacec89573a77416f79e7855d39fa5d6f | refs/heads/master | 2023-01-30T17:48:45.023913 | 2020-12-09T12:31:56 | 2020-12-09T12:31:56 | 319,949,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | """
Script with the common constants of the module
"""
SEPARATOR=' '
END_LINE='.' | [
"[email protected]"
] | |
967b2f623b1738ca567c502efd894d42447223e1 | c38a2f25d2654dd5d807a7dddd9ae569a7bd5857 | /factorial.py | 4b1c5e01bc203b350e9c0c53376535878d1f302b | [] | no_license | pranjalgupt/Python-lab | 253588e8e6a21b48cc987745b5d59429cda0dd12 | e411d3077c2bfaa1974da2eee9c6834f3e1bbf77 | refs/heads/main | 2023-03-15T01:04:17.817156 | 2021-03-22T10:09:37 | 2021-03-22T10:09:37 | 347,915,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | n = int(input())
p = 1
while n>0:
p*=n
n = n-1
print(p)
| [
"[email protected]"
] | |
da4709175da480d1d754b645e999b98d051d37b1 | 8af8df74ac5ef039481e78ada357a7f6ef31062c | /app/handlers/handle_internal_donation_email.py | d951b28e5d65c14c47649687a1bc0d6cc9d1b5d3 | [] | no_license | KevynKelso/markhor-do | 987816b201f1f7bafda6cf594d18c15a1cbd8562 | b40bea85effa31d91f45b6d3622bba266d5e99d4 | refs/heads/main | 2023-02-15T04:26:27.557682 | 2021-01-05T19:47:36 | 2021-01-05T19:47:36 | 327,102,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,590 | py | from requests import HTTPError
from apis import sendToPymail
def handleInternalDonationEmail(request_data, items_index):
try:
in_memory_slash_honor_of = None
if len(request_data['content']['items'][items_index]['customFields']) == 1:
in_memory_slash_honor_of = (
request_data['content']['items'][items_index]['customFields'][0]['value']
)
data = {
"sendto": ['Lauren Novo', 'Aleigh Raffelson', 'Valerie Maltese'],
"form_name": 'Internal Donation',
"invoice_number": request_data['content']['invoiceNumber'],
"donation_type": request_data['content']['items'][items_index]['name'],
"name": request_data['content']['user']['billingAddressName'],
"email": request_data['content']['user']['email'],
"in_memory_slash_honor_of": in_memory_slash_honor_of,
"total_price": request_data['content']['items'][items_index]['totalPrice'],
"billing_name": request_data['content']['billingAddressName'],
"billing_company_name": request_data['content']['billingAddressCompanyName'],
"billing_address1": request_data['content']['billingAddressAddress1'],
"billing_address2": request_data['content']['billingAddressAddress2'],
"billing_city": request_data['content']['billingAddressCity'],
"billing_state": request_data['content']['billingAddressProvince'],
"billing_country": request_data['content']['billingAddressCountry'],
"billing_zip": request_data['content']['billingAddressPostalCode'],
"billing_phone": request_data['content']['billingAddressPhone'],
"shipping_name": request_data['content']['shippingAddressName'],
"shipping_company_name": request_data['content']['shippingAddressCompanyName'],
"shipping_address1": request_data['content']['shippingAddressAddress1'],
"shipping_address2": request_data['content']['shippingAddressAddress2'],
"shipping_city": request_data['content']['shippingAddressCity'],
"shipping_state": request_data['content']['shippingAddressProvince'],
"shipping_country": request_data['content']['shippingAddressCountry'],
"shipping_zip": request_data['content']['shippingAddressPostalCode'],
"shipping_phone": request_data['content']['shippingAddressPhone']
}
response = sendToPymail(data)
return response
except (TypeError, HTTPError, KeyError):
raise
| [
"[email protected]"
] | |
884517c264aa7b9fe8d3adf58769051fffefc1e0 | 0e2bac9b62d41575632fcd9dfe115a72a5fd9412 | /handler.py | 4034602e8171ca47cf74cf672deea5f378f5c648 | [] | no_license | mayingming/serverlessbot | 7d444aeae22e7e13838cbd3c7796d23f36a52c45 | 5dc35fe1af812010b24a9050cc2dc5d7eac6e7fa | refs/heads/main | 2023-01-07T21:10:57.452415 | 2020-11-04T08:12:43 | 2020-11-04T08:12:43 | 304,585,295 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,056 | py | import json
import tweepy
import config
import csv
import os
import boto3
import botocore
# Send twitter every 5 minutes
def tweet(event, context):
# Authenticate to Twitter
auth = tweepy.OAuthHandler(config.CONSUMER_KEY, config.CONSUMER_SECRET)
auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)
# Create API object
api = tweepy.API(auth)
# Post the first message and deleted it from message file
# If no messsage left, send error
s3 = boto3.resource("s3")
s3_client = boto3.client('s3')
bucket_name = "tbot-bucket"
bucket = s3.Bucket(bucket_name)
newname = '/tmp/'+config.TEMP_FILE_TO_DELETE
try:
s3_client.download_file(bucket_name, config.TEMP_FILE, newname)
# The object does exist.
with open(newname, 'r') as readfile, open('/tmp/'+config.TEMP_FILE, 'w') as writefile:
reader = csv.reader(readfile)
mlist = list(reader)
writer = csv.writer(writefile)
if len(mlist) > 1:
writer = csv.writer(writefile)
for i, row in enumerate(mlist):
if i == 1:
api.update_status(row[0])
else:
writer.writerow(row)
body = {
"message": "twitter posted",
"input": event
}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
else:
error = {
"message": "No message left, twitter post failed.",
"input": event
}
response = {
"statusCode": 400,
"error": json.dumps(error)
}
readfile.close()
writefile.close()
bucket.upload_file('/tmp/'+config.TEMP_FILE, config.TEMP_FILE)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
# The object does not exist.
with open(config.MESSAGE_FILE, 'r') as readfile, open('/tmp/'+config.TEMP_FILE, 'w') as writefile:
reader = csv.reader(readfile)
mlist = list(reader)
writer = csv.writer(writefile)
for i, row in enumerate(mlist):
if i == 1:
api.update_status(row[0])
else:
writer.writerow(row)
body = {
"message": "twitter posted",
"input": event
}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
readfile.close()
writefile.close()
bucket.upload_file('/tmp/'+config.TEMP_FILE, config.TEMP_FILE)
else:
# Something else has gone wrong.
raise
return response
| [
"[email protected]"
] | |
bfda84e6ba0476ea5cfa870d4a9d4995f1c5e155 | 137cba430172ebb225a411cc59cf01c2c55cc55e | /src/python/modules/TorchScript/torch_multigammaln.py | c550d249e3839fe547df9ee0ec1502b99eded09b | [
"MIT"
] | permissive | microsoft/ADBench | 56bce08eb67d63b0ed5019526ecce710987afad1 | 38cb7931303a830c3700ca36ba9520868327ac87 | refs/heads/master | 2023-06-21T12:38:53.426980 | 2022-11-28T19:14:19 | 2022-11-28T19:14:19 | 38,539,834 | 77 | 30 | MIT | 2023-06-20T09:41:41 | 2015-07-04T16:16:32 | C++ | UTF-8 | Python | false | false | 1,114 | py | # TorchScript adapation
# https://github.com/scipy/scipy/blob/c1372d8aa90a73d8a52f135529293ff4edb98fc8/scipy/special/spfun_stats.py
import numpy as np
# from scipy.special import gammaln as loggam
import torch
import math
@torch.jit.script
def multigammaln(a, d: int):
# Python builtin <built-in function array> is currently not supported in Torchscript:
# https://github.com/pytorch/pytorch/issues/32268
# a = np.asarray(a)
# if not np.isscalar(d) or (np.floor(d) != d):
# raise ValueError("d should be a positive integer (dimension)")
# if np.any(a <= 0.5 * (d - 1)):
# raise ValueError("condition a (%f) > 0.5 * (d-1) (%f) not met"
# % (a, 0.5 * (d-1)))
# res = (d * (d-1) * 0.25) * np.log(np.pi)
# res += np.sum(loggam([(a - (j - 1.)/2) for j in range(1, d+1)]), axis=0)
# Need to check relative performance
res = (d * (d - 1) * 0.25) * math.log(math.pi)
res += torch.sum(
torch.tensor(
[math.lgamma(float(a) - ((j - 1.0) / 2)) for j in range(1, d + 1)]
),
dim=0,
)
return res
| [
"[email protected]"
] | |
5f2ec4a1caf26dd0c6854ddb62224c95d850cf7a | 93376fdda5911125ce0a72c3ba6bd0c5b81d2fd0 | /venv/bin/flask | 0fecd1d9aa4ec87b97362e6b31ea8968574e1fbe | [] | no_license | sheffley201/shopping-cart | 508c353c13e2467cb7395a28da71aadc40b85b19 | e63d663df31ec146e9c1c9cea707705eb012f798 | refs/heads/master | 2023-03-24T02:19:27.386383 | 2021-03-26T17:24:48 | 2021-03-26T17:24:48 | 350,412,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | #!/Users/spencerheffley/code/shopping-cart/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
cbac09e3ab3a33ec7effb97c5d4a07b0f8e001f9 | a3fab9eca1c26f35bd30e1b029b64ee1ad4fc8cc | /SocialTennis_proj/wsgi.py | 6d3843f851f60966d0a89d9d701c42e41b0ad2d7 | [] | no_license | dansgithubuser/SocialTennis | cf86110eef72c2818692673177127f7554cf5fc6 | cd84d996bfae1a515dd62ff658002ff5fc501aa6 | refs/heads/master | 2021-06-10T22:46:03.117970 | 2021-04-08T19:04:09 | 2021-04-08T19:04:09 | 171,059,299 | 0 | 1 | null | 2021-06-06T19:06:24 | 2019-02-16T22:38:24 | Python | UTF-8 | Python | false | false | 411 | py | """
WSGI config for SocialTennis_proj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SocialTennis_proj.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
1f3d6d8617a2f3652d4aa9fd89a36a0a52a2dedf | 3b91ec3304ca0c177ef0cea12097474668a1666d | /easy_rmg_model/rmg2arc/sensitivity.py | 595d83b7d90510bd78b67af33f7e06b0edc7c599 | [
"MIT"
] | permissive | Roolthasiva/easy_rmg_model | d0cffb77c96e721f9549f11d191796f4fcb55ac3 | 194321629e41495482e1ccffbfb15958bd90a02a | refs/heads/master | 2023-07-08T20:59:11.716927 | 2021-08-19T18:54:42 | 2021-08-19T18:54:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,497 | py | #!/usr/bin/env python3
# encoding: utf-8
"""
The toolbox for sensitivity analysis related tasks
"""
import os
from typing import Union
import pandas as pd
from easy_rmg_model.common import get_files_by_regex
def find_sensitivity_results(path: str) -> list:
"""
Find all of the flux diagrams in the given directory. Flux diagrams under
the same folder have the same species if generated from RMG or RMS.
Args:
path (str): The path from where to find flux diagrams based on '.dot file
avoid_repeats (bool): Whether to avoid the repeats.
Returns:
list: A list of paths to flux diagram dot files.
"""
if not os.path.isdir(path):
raise ValueError(
f'Not a invalid path ({path}), need to be a dir path.')
sensitivities = get_files_by_regex(path, r"^sensitivity.+\.csv$")
return sensitivities
def get_spc_label_from_sensitivity(file: str, N: int = 50) -> list:
"""
Get the list of species contained in multiple sensitivity analysis
Args:
file (str): a sensitivity analysis csv file
N (int): the upperbound number of species to be extracted
Returns:
list: a list contains species labels
"""
# Open the sensitivity result in DataFrame
df = pd.read_csv(file)
# Find the most sensitve species
max_sensitivity = []
for header in df.columns:
if 'dG' in header:
label = header.split('dG')[1][1:-1]
max_sensitivity.append((label, abs(df[header]).max()))
sorted_labels = sorted(max_sensitivity, key=lambda tup: tup[1])
label_list = [tup[0] for tup in sorted_labels[:min(len(sorted_labels), N)]]
return label_list
def get_spc_info_from_sensitivities(files: Union[str, list],
N: int = 50) -> dict:
"""
Get the list of species contained in multiple sensitivity analysis
Args:
files (Union[str, list]): a list contains the paths of sensitivity
analysis csv files
N (int): the upperbound number of species to be extracted in each SA
Returns:
dict: a dictionary contains species information (labels)
"""
if isinstance(files, str):
files = [files]
label_list = []
for sa_file in files:
label_list += get_spc_label_from_sensitivity(sa_file, N)
# remove duplicates
label_list = list(set(label_list))
return {label: {'label': label} for label in label_list}
| [
"[email protected]"
] | |
218fef243d24f0c570240a111af436b200b31a0b | 96c8dad8103ab34636d562a0d0284419dc1c3332 | /dfsbfs/dfs1.py | f95a4bd788b790a283cf8ea026d24130a6881587 | [] | no_license | misombae/algorithm | 67d052b6e29f50da9383e698fa44ec76edfc17d8 | 4cf38a58d736298955814349b259e45dd86ef3dd | refs/heads/master | 2023-06-06T06:36:56.271585 | 2021-07-06T08:41:14 | 2021-07-06T08:41:14 | 312,466,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | def dfs(graph, v, visited):
visited[v] = True
print(v, end=' ')
for i in graph[v]:
if not visited[i]:
dfs(graph, i, visited)
graph = [
[],
[2,3,8],
[1,7],
[1,4,5],
[3,5],
[3,4],
[7],
[2,6,8],
[1,7]
]
visited = [False] * 9
dfs(graph, 1, visited) | [
"[email protected]"
] | |
0b32ee51da6520203c238f484f0612348ef855f4 | 07c25b0830a57105fa0e7f9864f740132267aace | /cuboid/Ae20a_Ae40a/rbc_simulation_cuboid2_4ext20_40/src/libs_dataset/cells_dataset.py | 4b821c4d457358e541b9ef9710d45d42eba85c96 | [] | no_license | katkaj/rbc_classification | d0486760e5fe04ba3c7c586e616d1253070f71ed | 21b1c80bc2e61ecdf5fe04fe8c66a93cb3f4c43d | refs/heads/master | 2023-04-05T09:16:07.450794 | 2021-04-15T10:22:19 | 2021-04-15T10:22:19 | 331,882,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,399 | py | import numpy
import torch
from .dats_load import *
class CellsDataset:
'''
create dataset for classification
usage :
1, load data, just calling constructor
@param training_files : list of paths to dats files
@param training_labels : list of class IDs, integer numbers, from range <0, classes_count)
@param testing_files : list of paths to dats files
@param testing_labels : list of class IDs, integer numbers, from range <0, classes_count)
@param classes_count : number of classes
@param window_size : time sequence window size
@param cols : list which colums will be readed from dats files
@param augmentations_count : count of differen augmentations for training data
2, obtain input x, and target output by calling :
x, y = dataset.get_training_batch()
x.shape = (batch_size, len(cols), window_size)
y.shape = (batch_size, classes_count)
note : for classes y, one-hot encoding is used
note : real dataset is too big to hold in RAM (I have only 32G)
that's why dataset is created runtime
'''
def __init__(self, training_files, training_labels, testing_files, testing_labels, classes_count, window_size = 1024, cols = [1, 2, 3, 7, 8, 10, 11, 13, 14, 16, 17, 18, 19, 20, 21, 22, 23, 24], augmentations_count = 32):
self.width = window_size
self.channels = len(cols)
self.input_shape = (self.channels, self.width)
self.classes_count = classes_count
self.output_shape = (self.classes_count, )
self.augmentations_count = augmentations_count
self.training_dats = DatsLoad(training_files, cols = cols)
self.training_labels = training_labels
self.testing_dats = DatsLoad(testing_files, cols = cols)
self.testing_labels = testing_labels
self.training_count = (1 + self.augmentations_count)*self.training_dats.data.shape[0]*self.training_dats.data.shape[1]
self.testing_count = self.testing_dats.data.shape[0]*self.testing_dats.data.shape[1]
print("\n\n\n\n")
print("dataset summary : \n")
print("training_dats shape = ", self.training_dats.data.shape)
print("testing_dats shape = ", self.testing_dats.data.shape)
print("training_count = ", self.get_training_count())
print("testing_count = ", self.get_testing_count())
print("channels_count = ", self.channels)
print("sequence_length = ", self.width)
print("classes_count = ", self.classes_count)
x, y = self.get_training_batch(batch_size=32)
print("batch(32) tensor shape = ", x.shape, y.shape)
print("\n\n\n\n")
def get_training_count(self):
return self.training_count
def get_testing_count(self):
return self.testing_count
def get_training_batch(self, batch_size = 128):
return self._get_batch(self.training_dats.data, self.training_labels, batch_size, agumentation = True)
def get_testing_batch(self, batch_size = 128):
return self._get_batch(self.testing_dats.data, self.testing_labels, batch_size)
def _get_batch(self, x, y, batch_size = 128, agumentation = False):
cells_count = x.shape[0]
time_steps = x.shape[1]
result_x = torch.zeros((batch_size, self.channels, self.width))
result_y = torch.zeros((batch_size, self.classes_count))
for i in range(batch_size):
cell_idx = numpy.random.randint(cells_count)
time_idx = numpy.random.randint(time_steps - self.width)
tmp = x[cell_idx][time_idx:time_idx + self.width]
tmp = tmp.transpose()
class_id = y[cell_idx]
result_x[i] = torch.from_numpy(tmp).float()
result_y[i][class_id] = 1.0
if agumentation:
result_x = self._augmentation(result_x)
return result_x, result_y
def _augmentation(self, x, gaussian_noise_level = 0.001, offset_noise_level = 1.0):
noise = gaussian_noise_level*torch.randn(x.shape)
offset_noise = 2.0*torch.rand((x.shape[0], x.shape[1])).unsqueeze(2).repeat(1, 1, x.shape[2]) - 1.0
x = x + noise + offset_noise_level*offset_noise
return x
if __name__ == "__main__":
path = "/Users/michal/dataset/cells_dataset/sim26/"
training_files = []
training_files.append(path + "rbc0_data_sim26.dat")
training_files.append(path + "rbc1_data_sim26.dat")
training_files.append(path + "rbc2_data_sim26.dat")
training_files.append(path + "rbc3_data_sim26.dat")
training_labels = []
training_labels.append(0)
training_labels.append(1)
training_labels.append(0)
training_labels.append(1)
testing_files = []
testing_files.append(path + "rbc0_data_sim26.dat")
testing_files.append(path + "rbc1_data_sim26.dat")
testing_files.append(path + "rbc2_data_sim26.dat")
testing_files.append(path + "rbc3_data_sim26.dat")
testing_labels = []
testing_labels.append(0)
testing_labels.append(1)
testing_labels.append(0)
testing_labels.append(1)
dataset = CellsDataset(training_files, training_labels, testing_files, testing_labels, classes_count = 2)
x, y = dataset.get_training_batch()
print(x.shape, y.shape)
| [
"[email protected]"
] | |
d60fd0cd20b5ebb9ad94e8a5ff23d7cf6b3a157d | 10ecee5ad6bbd1b53922ffcd0f88d047d3cad67c | /bullet.py | c7480f03732dbbb446e41923cff8919dbf0a3441 | [] | no_license | AlexeyNarush/Tankgame | 0e52384cfbd5c1be7b454d6b8eba316833708ce3 | a446bcb9335aa69d727207e8406627c5f48eaffc | refs/heads/master | 2021-06-25T22:31:33.650261 | 2021-01-15T11:30:31 | 2021-01-15T11:30:31 | 198,632,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,250 | py | import threading
import pygame
from color import *
import settings
# Main part of the class
class Bullet:
def __init__(self, x, y, tank, enemy, boxes, screen, grid):
dx = 0
dy = 0
if (tank.direct == 1):
dy = tank.reverse
if (tank.direct == 2):
dx = -tank.reverse
if (tank.direct == 3):
dy = -tank.reverse
if (tank.direct == 4):
dx = tank.reverse
x += 25
y += 25
x += dx * 25 + dx
y += dy * 25 + dy
self.X = x
self.Y = y
self.dx = dx
self.dy = dy
self.clock = pygame.time.Clock()
self.thread = threading.Thread(target=self.move, args=(tank, enemy, boxes, screen, grid))
self.thread.start()
# Function that makes bullet move
def move(self, tank, enemy, boxes, screen, grid):
while True:
pygame.draw.circle(screen,BLACK, (self.X, self.Y), 5)
grid.draw(screen)
pygame.display.update(pygame.Rect(self.X - 10, self.Y - 10, 20, 20))
if self.X < 0 or self.Y < 0 or self.X > 600 or self.Y > 600:
return
if (self.collision(enemy)):
if enemy.sheald == 0:
enemy.hp -= 1
threading.Thread(target=enemy.Sheald, args=()).start()
return
for i in boxes:
if (self.collision(i)):
if i.destroy() == True:
pygame.draw.rect(screen, BLACK, (i.X, i.Y, settings.block, settings.block))
boxes.remove(i)
return
self.X += self.dx * 5
self.Y += self.dy * 5
pygame.draw.circle(screen, YELLOW, (self.X, self.Y), 5)
pygame.display.update(pygame.Rect(self.X - 10, self.Y - 10, 20, 20))
self.clock.tick(120)
# Functino that makes sure that bullet hit the tank or box
def collision(self, object):
if (self.X >= object.X and self.X <= object.X + 50) and (self.Y >= object.Y and self.Y <= object.Y + 50):
return True
return False
| [
"[email protected]"
] | |
6a41cba7b5b408fb7bf3f0a195e14dcd1b736ff7 | 57e71ac3b8766c5330aefbbf97b36d55543b3135 | /user/urls.py | 0aca3a86de749c9e3d9109e750721b45570a45aa | [] | no_license | soha4597/SoftwareEngineering | 804869d1d88b9b3705a3f7c6de65901534e8a975 | efc44204bbaac045cb1c16a375b0e8474c774c31 | refs/heads/master | 2022-05-25T10:25:28.450031 | 2020-04-25T19:06:28 | 2020-04-25T19:06:28 | 257,473,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,955 | py | from django.conf.urls import include, url
from django.contrib.auth import \
views as auth_views
from django.contrib.auth.forms import \
AuthenticationForm
from django.urls import reverse_lazy
from django.views.generic import (
RedirectView, TemplateView)
from .views import (
ActivateAccount, CreateAccount,
DisableAccount, ProfileDetail, ProfileUpdate,
PublicProfileDetail, ResendActivationEmail)
#password_urls = [
# url(r'^$',
# RedirectView.as_view(
# pattern_name='dj-auth:pw_reset_start',
# permanent=False)),
#url(r'^change/$',
# auth_views.password_change,
# {'template_name':
# 'user/password_change_form.html',
# 'post_change_redirect': reverse_lazy(
# 'dj-auth:pw_change_done')},
#name='pw_change'),
#url(r'^change/done/$',
# auth_views.password_change_done,
# {'template_name':
# 'user/password_change_done.html'},
# name='pw_change_done'),
# url(r'^reset/$',
# auth_views.password_reset,
# {'template_name':
# 'user/password_reset_form.html',
# 'email_template_name':
# 'user/password_reset_email.txt',
# 'subject_template_name':
# 'user/password_reset_subject.txt',
# 'post_reset_redirect': reverse_lazy(
# 'dj-auth:pw_reset_sent')},
# name='pw_reset_start'),
# url(r'^reset/sent/$',
# auth_views.password_reset_done,
# {'template_name':
# 'user/password_reset_sent.html'},
# name='pw_reset_sent'),
# url(r'^reset/'
# r'(?P<uidb64>[0-9A-Za-z_\-]+)/'
# r'(?P<token>[0-9A-Za-z]{1,13}'
# r'-[0-9A-Za-z]{1,20})/$',
# auth_views.password_reset_confirm,
# {'template_name':
# 'user/password_reset_confirm.html',
# 'post_reset_redirect': reverse_lazy(
# 'dj-auth:pw_reset_complete')},
# name='pw_reset_confirm'),
# url(r'reset/done/$',
# auth_views.password_reset_complete,
# {'template_name':
# 'user/password_reset_complete.html',
# 'extra_context':
# {'form': AuthenticationForm}},
# name='pw_reset_complete'),
#]
urlpatterns = [
url(r'^$',
RedirectView.as_view(
pattern_name='login',
permanent=False)),
# in case the url is user/, we redirect him immediately to the login page.
url(r'^activate/'
r'(?P<uidb64>[0-9A-Za-z_\-]+)/'
r'(?P<token>[0-9A-Za-z]{1,13}'
r'-[0-9A-Za-z]{1,20})/$',
ActivateAccount.as_view(),
name='activate'),
# We use the uidb64 and tokn to make sure that the user actually received an activation email
url(r'^activate/resend/$',
ResendActivationEmail.as_view(),
name='resend_activation'), # This is where the activation email is resent.
url(r'^activate',
RedirectView.as_view(
pattern_name=(
'resend_activation'),
permanent=False)), # The RedirectView is simply a view that redirects to the given url pattern
# This catches all the urls that start with activate - it doesn't end with $ - but don't match what is above and redirects them to the Resend Activation View
# We do this because we don't have a page that matches the prefix activate
url(r'^create/$',
CreateAccount.as_view(),
name='create'), #url to create an account
# We included in the base from and in the login.html directly under the from.
url(r'^create/done/$',
TemplateView.as_view(
template_name=(
'user/user_create_done.html')),
name='create_done'), # TemplateView is used to simply create the template without the need for a view.
# url(r'^disable/$',
# DisableAccount.as_view(),
# name='disable'),
#
url(r'^login/$',
auth_views.LoginView.as_view(
template_name= 'user/login.html'),
name='login'), # this is redirect to the view that displays the login page for the user to enter credentials
url(r'^logout/$',
auth_views.LogoutView.as_view(
template_name = 'user/logged_out.html',
extra_context =
{'form': AuthenticationForm}),
name='logout'), #this is redirect to the view that displays logout where the credentials reappear in case the user wants to log in again directly
#after logging out
# url(r'^password/', include(password_urls)),
url(r'^profile/$',
ProfileDetail.as_view(),
name='profile'), # We display the Profile of the user at the static url: user/profile
url(r'^profile/edit/$',
ProfileUpdate.as_view(),
name='profile_update'), # We allow the user to update the profile
url(r'^(?P<slug>[\w\-]+)/$',
PublicProfileDetail.as_view(),
name='public_profile'), # This link would display the profiles of other users
]
| [
"[email protected]"
] | |
dd1796069eca388c891cdb81582a79e9a4753f55 | 18cd88b68c1d859d17b1191d85003a7a8dc2c0c5 | /Algorithms and data structures (Python)/N. Test №2/E-Sort, even first.py | 4048d765c7b1dc26aa60e8f002f586613a42d10f | [] | no_license | GrigorevEv/Hello-World | f9783b3293958a4ed4e78b7f6727d577da6e0b85 | 2c376c8be1c95a1124fea4ce8de7e196abfa47ad | refs/heads/master | 2021-07-14T09:14:39.064437 | 2021-07-05T08:33:09 | 2021-07-05T08:33:09 | 241,601,746 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | # Сортировка: сначала чётные
# Дан список целых чисел. Отсортировать его так,
# чтобы сначала шли чётные по возрастанию, потом — нечётные во возрастанию.
# Формат входных данных
# Одна строка — список чисел через пробел. Длина списка не превосходит 10000.
# Формат выходных данных
# Отсортированный список чисел через пробел
digits = list(map(int, input().split()))
even_sorted_digits = []
odd_even_sorted_digits = []
sorted_digits = []
digits.sort()
for i in range(len(digits)):
if digits[i] % 2 == 0:
even_sorted_digits.append(digits[i])
else:
odd_even_sorted_digits.append(digits[i])
sorted_digits = even_sorted_digits + odd_even_sorted_digits
for i in range(len(sorted_digits)):
print(sorted_digits[i], end=' ')
| [
"[email protected]"
] | |
5696a36c2f8f7768f1c9549ab216ada16a7a7de9 | b17cc16682b9fecd8584c14d407db1f7d7be8285 | /Pythons/PlayList/playlist.py | f8798c916618fbdb803168f38618619a8dc30c5e | [] | no_license | mosesadelere/SeleniumTesting | 67a825ca809b0800ed4d7c185b090a7e813129b5 | 95916bbca20890600b4da43c6ebd25e42df93546 | refs/heads/master | 2023-01-09T22:35:38.200351 | 2019-11-08T20:16:29 | 2019-11-08T20:16:29 | 207,618,220 | 0 | 0 | null | 2022-12-27T15:37:34 | 2019-09-10T17:10:39 | C# | UTF-8 | Python | false | false | 5,290 | py | import argparse
#import sys
from matplotlib import pyplot
import plistlib
import numpy as np
def findCommonTrack(fileNames):
"""
find common tracks in given playlist files,
and save them to common.txt
"""
# a list of sets of track names
trackNameSets = []
for fileName in fileNames:
# create a new set
trackNames = set()
#read in playlist
plist = plistlib.readPlist(fileName)
tracks = plist['Tracks']
# iterate through the tracks
for trackId, track in tracks.items():
try:
# add the track names to a set
trackNames.add(track['Name'])
except:
#ignore
pass
# add to list
trackNameSets.append(trackNames)
# get the set of common tracks
commonTracks = set.intersection(*trackNameSets)
#write to fileName
if len(commonTracks) > 0:
f = open("common.txt", 'wb')
for val in commonTracks:
s = "%s\n" % val
f.write(s.encode("UTF-8"))
f.close()
print("%d common tracks found. "
"Track names written to common.txt." % len(commonTracks))
else:
print("No common tracks!")
def plotStats(fileName):
"""
Plot some statistics by reading track information fromom playlist.
"""
# read in a playlist
plist = plistlib.readPlist(fileName)
# get the tracks from the playlist
tracks = plist['Tracks']
# create lists of song ratings and track durations
ratings = []
durations = []
# iterate through the tracks
for trackId, track in tracks.items():
try:
ratings.append(track['Album Rating'])
durations.append(track['Total Time'])
except:
pass
# ensure that valid data was collected
if ratings == [] or durations == []:
print("No valid Album Rating/Total Time data in %s." % fileName)
return
# scatter plot
x = np.array(durations, np.int32)
# convert to minutes
x /= 60000.0
y = np.array(ratings, np.int32)
pyplot.subplot(2, 1, 1)
pyplot.plot(x, y, 'o')
pyplot.axis([0, 1.05*np.max(x), -1, 110])
pyplot.xlabel('Track duration')
pyplot.ylabel('Track rating')
#plot histogram
pyplot.subplot(2, 1, 2)
pyplot.hist(x, bins=20)
pyplot.xlabel('Track duration')
pyplot.ylabel('Count')
pyplot.show()
def findDuplicates(fileName):
"""
Find duplicate tracks in given playlist.
"""
print('Finding duplicate tracks in %s...' % fileName)
# read in playlist
plist = plistlib.readPlist(fileName)
#get the tracks from the Tracks dictionary
tracks = plist['Tracks']
# create a track name dictionary
trackNames = {}
# iterate through tracks
for trackId, track in tracks.items():
try:
name = track['Name']
duration = track['Total Time']
# look for existing entries
if name in trackNames:
# if a name and duration match, increment the count
# round the track lenngth to the nearest second
if duration//1000 == trackNames[name][0]//1000:
count = trackNames[name][1]
trackNames[name] = (duration, count+1)
else:
# add dictionary entries as tuple (duration, count)
trackNames[name] = (duration, 1)
except:
pass
# store duplicates as (name, count) tuples
dups = []
for k, v in trackNames.items():
if v[1] > 1:
dups.append((v[1], k))
# save duplicates to a file
if len(dups) > 0:
print("Found %d duplicates. Track names saved to dups.txt" % len(dups))
else:
print("No duplicate tracks found!")
f = open("dups.txt", 'w')
for val in dups:
f.write("[%d] %s\n" % (val[0], val[1]))
f.close()
# Gather our code in a main() function
def main():
# create parser
descStr = """ This program analyses playlist files (.xml) exported from iTunes."""
parser = argparse.ArgumentParser(description = descStr)
# add a mutually exclusive group of arguments
group = parser.add_mutually_exclusive_group()
# add expected arguments
group.add_argument('--common', nargs = '*', dest='plFiles', required=False)
group.add_argument('--stats', dest='plFile', required=False)
group.add_argument('--dup', dest='plFileD', required=False)
#parse args
args = parser.parse_args()
if args.plFiles:
#find common tracks
findCommonTrack(args.plFiles)
elif args.plFile:
# plot stats
plotStats(args.plFile)
elif args.plFileD:
# find duplicate track
findDuplicates(args.plFileDd)
else:
print("These are not the tracks you are looking for.")
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
8bbdf1ea9a66ac4566bc2f4eeb58a4233a72cbef | 5783df40921343441c4b14128d8fd15052cbcc9f | /Assignment4/frozen_test_policy.py | b05e85aa9a179593ea53acad94867874d21399e0 | [] | no_license | dheeraj141/CS7641 | 38fd819026ada35f65f7eda099acde42e90dc04b | c312762ae0890f7e4619188785c6686cf99b5e0c | refs/heads/master | 2020-12-15T11:27:08.259467 | 2020-04-13T02:12:38 | 2020-04-13T02:12:38 | 235,086,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,705 | py |
import numpy as np
import gym
from gym import wrappers
import time
import sys
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from gym.envs.toy_text.frozen_lake import generate_random_map
# using the Bellman equation, we find the action providing the highest value for the given state s.
# V is the list of values of all states
def choose_best_action(env, V, s, gamma):
a_best = None
q_best = float('-inf')
nb_actions = env.action_space.n
for a in range (0, nb_actions):
env.env.s = s # go to state s
s_next, r, done, info = env.step(a) #take the action a
q = r + gamma * V[s_next] # compute the value future value after taking action a
if q > q_best:
q_best = q
a_best = a
return a_best
# value iteration algorithm
def compute_value_iteration(env,
gamma=.9, v_delta_threshold=.000001,
V = None, verbose=True):
env.reset()
nb_actions = env.action_space.n
nb_states = env.observation_space.n
# values vector
if V == None:
V = np.zeros([nb_states])
# policy vector
P = np.zeros([nb_states], dtype=int)
iteration = 0
while True:
v_delta = 0
for s in range (0, nb_states):
v_previous = V[s]
a_best = choose_best_action(env, V, s, gamma) # find an action with the highest future reward
env.env.s = s # go to the state s
s_next, r, done, info = env.step(a_best) #take the best action
V[s] = r + gamma * V[s_next] # update the value of the state
P[s] = a_best # store the best action in the policy vector for the state
v_delta = max(v_delta, np.abs(v_previous - V[s])) # calculate the rate of value improvment for the state
iteration += 1
if v_delta < v_delta_threshold:
if verbose:
print (iteration,' iterations done')
break
return V, P
# compute values for a 4x4 board
#V_4, P_4 = compute_value_iteration()
#print( V_4)
# function for displaying a heatmap
def display_value_iteration(P, env ):
nb_states = env.observation_space.n
visited_states = np.zeros(nb_states).astype(bool)
visited_states[0] = 1
states_labels = np.where(P==0, '<',
np.where(P==1, '>',
np.where(P==2, 'v',
np.where(P==3, '^', P)
)
)
)
desc = env.unwrapped.desc.ravel().astype(str)
colors = np.where(desc=='S','y',np.where(desc=='F','b',np.where(desc=='H','r',np.where(desc=='G','g',desc))))
states_labels = np.zeros(nb_states).astype(str)
states_labels[:] = ''
total_reward = 0
s = env.reset()
#env.render()
done = False
while done != True:
best_a = P[s] # select the best next action from the policy
states_labels[s] = '^' if best_a==0 else ('v' if best_a==1 else ('>' if best_a==2 else '<'))
#print(s, best_a)
s, rew, done, info = env.step(best_a) #take step using selected action
total_reward = total_reward + rew
visited_states[s] = 1 # mark the state as visited
#env.render()
ax = sns.heatmap(P.reshape(int(np.sqrt(nb_states)),int(np.sqrt(nb_states))),
linewidth=0.5,
annot=states_labels.reshape(int(np.sqrt(nb_states)),int(np.sqrt(nb_states))),
cmap=list(colors),
fmt = '',
cbar=False)
plt.show()
print("Total Reward: ", total_reward)
# display heatmap for a 4x4 board
#display_value_iteration(P_4)
# function for performing policy iteration
def compute_policy_iteration(env,
gamma=.9, v_delta_threshold=.00001,
P = None, verbose=True):
env.reset()
nb_actions = env.action_space.n
nb_states = env.observation_space.n
# values vector
V = np.zeros([nb_states])
# policy vector
if P == None:
P = np.random.choice(nb_actions, size=nb_states)
P = np.zeros([nb_states], dtype=int)
max_iterations = 200000
iteration = 0
for i in range(max_iterations):
# policy evaluation
while True:
v_delta = 0
for s in range (0, nb_states):
v_previous = V[s]
env.env.s = s # go to state s
s_next, r, done, info = env.step(P[s]) #take the action recommended by policy
V[s] = r + gamma * V[s_next] # update value after applying policy
v_delta = max(v_delta, np.abs(v_previous - V[s])) # calculate the rate of value improvment for the state
if v_delta < v_delta_threshold:
break
print( V.reshape(4,4))
# policy improvement
policy_stable = True
for s in range (0, nb_states):
a_old = P[s] # ask policy for action to perform
a_best = choose_best_action(env, V, s, gamma) # find an action with the highest future reward
P[s] = a_best # store the best action in the policy vector for the state
if a_old != a_best:
policy_stable = False
if policy_stable:
break
print( P.reshape(4,4))
iteration += 1
if verbose:
print (iteration,' iterations done')
return V, P
breakpoint()
env = gym.make('FrozenLake-v0', is_slippery=False)
Vp_4, Pp_4 = compute_policy_iteration(env)
print(Vp_4)
display_value_iteration(Pp_4, env)
# compute values for a 4x4 board
V_4, P_4 = compute_value_iteration(env)
print( V_4)
display_value_iteration(P_4, env)
V_8, P_8 = compute_value_iteration(env = gym.make('FrozenLake8x8-v0', is_slippery=False))
print(V_8)
display_value_iteration(P_8, env = gym.make('FrozenLake8x8-v0', is_slippery=False))
V_8, P_8 = compute_policy_iteration(env = gym.make('FrozenLake8x8-v0', is_slippery=False))
print(V_8)
display_value_iteration(P_8, env = gym.make('FrozenLake8x8-v0', is_slippery=False))
env = gym.make('FrozenLake-v0')
Vp_4, Pp_4 = compute_policy_iteration(env)
print(Vp_4)
display_value_iteration(Pp_4, env)
# compute values for a 4x4 board
V_4, P_4 = compute_value_iteration(env)
print( V_4)
display_value_iteration(P_4, env)
V_8, P_8 = compute_value_iteration(env = gym.make('FrozenLake8x8-v0'))
print(V_8)
display_value_iteration(P_8, env = gym.make('FrozenLake8x8-v0'))
| [
"[email protected]"
] | |
de595e301bdb831393d86ebe115a6d1a6c36418f | a2e19fd174bedd860297bcc72032dce0f1ea2339 | /graphs/torch_geometric/transforms/target_indegree.py | 72931a2e680b9cf2cbcdde83d215c688111fe7a3 | [
"MIT"
] | permissive | Cyanogenoid/fspool | 29772ea76fa96c35b59e1c0cdb7581b3714028ee | a9f93cc774610c6d96c2c3095a1ab16f53abbefb | refs/heads/master | 2023-08-03T10:58:16.139258 | 2022-11-22T23:16:32 | 2022-11-22T23:16:32 | 190,535,015 | 47 | 8 | MIT | 2023-07-22T07:41:22 | 2019-06-06T07:25:24 | Python | UTF-8 | Python | false | false | 1,579 | py | import torch
from torch_geometric.utils import degree
class TargetIndegree(object):
r"""Saves the globally normalized degree of target nodes (mapped to the
fixed interval :math:`[0, 1]`)
.. math::
\mathbf{u}(i,j) = \frac{\deg(j)}{\max_{v \in \mathcal{V}} \deg(v)}
in its edge attributes.
Args:
cat (bool, optional): Concat pseudo-coordinates to edge attributes
instead of replacing them. (default: :obj:`True`)
.. testsetup::
import torch
from torch_geometric.data import Data
.. testcode::
from torch_geometric.transforms import TargetIndegree
edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]])
data = Data(edge_index=edge_index)
data = TargetIndegree()(data)
print(data.edge_attr)
.. testoutput::
tensor([[1.0000],
[0.5000],
[0.5000],
[1.0000]])
"""
def __init__(self, cat=True):
self.cat = cat
def __call__(self, data):
col, pseudo = data.edge_index[1], data.edge_attr
deg = degree(col, data.num_nodes)
deg = deg / deg.max()
deg = deg[col]
deg = deg.view(-1, 1)
if pseudo is not None and self.cat:
pseudo = pseudo.view(-1, 1) if pseudo.dim() == 1 else pseudo
data.edge_attr = torch.cat([pseudo, deg.type_as(pseudo)], dim=-1)
else:
data.edge_attr = deg
return data
def __repr__(self):
return '{}(cat={})'.format(self.__class__.__name__, self.cat)
| [
"[email protected]"
] | |
2f9507e0023317b7c1bcad8b2b356ae0211bfdb9 | 474316f5dd548bb184f95c637fdeef2014c639b0 | /lesson104.py | 9e4a8508e4c5f0c5eb4ca1c5ffcccabb290a40a3 | [] | no_license | ipcoo43/openpyxl | c9a3669ddc9bd81402676cdda41d21bbe03b0bc9 | 8cc35f86b280a192c0b473cc88e1affdf39707da | refs/heads/master | 2020-06-03T16:41:39.418059 | 2019-06-13T00:50:49 | 2019-06-13T00:50:49 | 191,653,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | import pandas as pd
# 데이터 가져오기
df_01 = pd.read_csv('https://goo.gl/VwsTBR', parse_dates=['년/월/일'], thousands=',', index_col='년/월/일') # 삼성전자
df_02 = pd.read_csv('https://goo.gl/2udsQq', parse_dates=['년/월/일'], thousands=',', index_col='년/월/일') # SK하이닉스
df_03 = pd.read_csv('https://goo.gl/TbBGhJ', parse_dates=['년/월/일'], thousands=',', index_col='년/월/일') # 셀트리온
print(df_01.head(1))
print(df_02.head(1))
print(df_03.head(1))
# 종가 취합하기
df_close = pd.DataFrame()
df_close['삼성전자'] = df_01['종가']
df_close['SK하이닉스'] = df_02['종가']
df_close['셀트리온'] = df_03['종가']
print(df_close.head(3))
# 엑셀로 저장
df_close.to_excel('./xlsx/종목별종가_104.xlsx', sheet_name='종목별종가')
# 별도의 시트에 종가 저장하기
writer = pd.ExcelWriter('./xlsx/종목별종가_시트별_104.xlsx')
df_01['종가'].to_excel(writer,'삼성전자')
df_02['종가'].to_excel(writer,'SK하이닉스')
df_03['종가'].to_excel(writer,'셀트리온')
writer.save() | [
"[email protected]"
] | |
59191c77f25619736c50028ce314e77aaf3e30e8 | 5ad0a8ab299d852cfccf2c2a2300ace1deffa567 | /profiles/urls.py | a909bf12551cfd7f9fdb6caf5dd0a83538baae1f | [] | no_license | Code-Institute-Submissions/wilsons_express_v1 | ee12bf6f2e4925f09d75a2863e0f1326a0a38a5a | a9f7cfc701c77b8457a23d60f1b77b9b1282843a | refs/heads/master | 2023-05-12T10:31:19.359384 | 2021-06-03T06:46:08 | 2021-06-03T06:46:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.profile, name='profile'),
path('order_history/<order_ref>',
views.order_history, name='order_history')
]
| [
"[email protected]"
] | |
84969d228479d87b46b5c3f1187ed6c1ed997aac | 260aa7c38bfbae2bf7bdc17ab4e178f93bd60ec6 | /week8/hackerrank/H.py | 23844744f951d5a28f555af3e257dd750cdd69fd | [] | no_license | Orik236/Web_Orka236 | 0fe177febe193821f62cf3687865a11799662e13 | 49ddf61de35213be490b4aa08ad041231fe584e7 | refs/heads/master | 2023-01-09T22:22:40.846945 | 2020-04-16T22:47:38 | 2020-04-16T22:47:38 | 247,916,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | if __name__ == '__main__':
n = int(raw_input())
arr = map(int, raw_input().split())
arr.sort()
for i in range(n-1, -1, -1):
if arr[i] != arr[n-1]:
print(arr[i])
break
| [
"[email protected]"
] | |
fd4e92bc1ce559550f5ed0eefe3489856b8f2c3e | bfa9de121fd150f6ae35b09e66a3a422b7d0d724 | /jobbole/spiders/blogjobbole.py | 13c24cada5d7a45e24663fa6ce7b17a1742584cf | [] | no_license | Damaomaomao/jobbole | 566a43780cd85f44ecf3060ba2ffb5d73e086a20 | c3b498131d48b2d61b269e93157e7b1caddf6c19 | refs/heads/master | 2020-03-27T09:48:09.207979 | 2018-08-28T01:25:51 | 2018-08-28T01:25:51 | 146,373,677 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,908 | py | # -*- coding: utf-8 -*-
import scrapy
from urllib import parse
from scrapy import Request
from jobbole.items import JobBoleArticleItem
from jobbole.utils.common import get_md5
from scrapy.loader import ItemLoader
class BlogjobboleSpider(scrapy.Spider):
name = 'blogjobbole'
allowed_domains = ['blog.jobbole.com']
start_urls = ['http://blog.jobbole.com/all-posts/']
def parse(self, response):
"""
1. 获取文章列表页中的文章url并交给scrapy下载后并进行解析
2. 获取下一页的url并交给scrapy进行下载, 下载完成后交给parse
"""
# 解析列表页中的所有文章url并交给scrapy下载后并进行解析
post_nodes = response.css("#archive .floated-thumb .post-thumb a")
for post_node in post_nodes:
image_url = post_node.css("img::attr(src)").extract_first("")
post_url = post_node.css("::attr(href)").extract_first("")
yield Request(url=parse.urljoin(response.url, post_url), meta={"front_image_url": image_url},#meta:在下载网页的时候把获取到的封面图的url传给parse_detail的response
callback=self.parse_detail)
# 提取下一页并交给scrapy进行下载
next_url = response.css(".next.page-numbers::attr(href)").extract_first("")
if next_url:
yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse)
def parse_detail(self, response):
#-----------<<itemload方法>>------------------
item_loader = ItemLoader(item = JobBoleArticleItem(),response=response)
front_image_url = response.meta.get("front_image_url", "")
item_loader.add_css("title", ".entry-header h1::text")
item_loader.add_value("url", response.url)
item_loader.add_value("url_object_id", get_md5(response.url))
item_loader.add_css("create_date", "p.entry-meta-hide-on-mobile::text")
item_loader.add_value("front_image_url", [front_image_url])
item_loader.add_css("praise_nums", ".vote-post-up h10::text")
item_loader.add_css("comment_nums", "a[href='#article-comment'] span::text")
item_loader.add_css("fav_nums", ".bookmark-btn::text")
item_loader.add_css("tags", "p.entry-meta-hide-on-mobile a::text")
#item_loader.add_css("content", "div.entry")
article_item = item_loader.load_item()
yield article_item
#所有的值都变成了list形式
#在item.py中可以对该字段进行处理
#article_item = JobBoleArticleItem() #加载item
# #-----------<xpath方法>---------------------------------------
# front_image_url = response.meta.get("front_image_url", "") # 文章封面图
# title = response.xpath('//div[@class="entry-header"]/h1/text()').extract_first("")
# create_date = response.xpath("//p[@class='entry-meta-hide-on-mobile']/text()").extract()[0].strip().replace("·","").strip()
# praise_nums = response.xpath("//span[contains(@class, 'vote-post-up')]/h10/text()").extract()[0]
# fav_nums = response.xpath("//span[contains(@class, 'bookmark-btn')]/text()").extract()[0]
# match_re = re.match(".*?(\d+).*", fav_nums)
# if match_re:
# fav_nums = match_re.group(1)
#
# comment_nums = response.xpath("//a[@href='#article-comment']/span/text()").extract()[0]
# match_re = re.match(".*?(\d+).*", comment_nums)
# if match_re:
# comment_nums = match_re.group(1)
#
# content = response.xpath("//div[@class='entry']").extract()[0]
#
# tag_list = response.xpath("//p[@class='entry-meta-hide-on-mobile']/a/text()").extract()
# tag_list = [element for element in tag_list if not element.strip().endswith("评论")]
# tags = ",".join(tag_list)
# #-----------<CSS方法>---------------------------------------
# front_image_url = response.meta.get("front_image_url", "") #文章封面图
# title = response.css(".entry-header h1::text").extract()[0]
# create_date = response.css("p.entry-meta-hide-on-mobile::text").extract()[0].strip().replace("·","").strip()
# praise_nums = response.css(".vote-post-up h10::text").extract()[0]
# fav_nums = response.css(".bookmark-btn::text").extract()[0]
# match_re = re.match(".*?(\d+).*", fav_nums)
# if match_re:
# fav_nums = int(match_re.group(1))
# else:
# fav_nums = 0
#
# comment_nums = response.css("a[href='#article-comment'] span::text").extract()[0]
# match_re = re.match(".*?(\d+).*", comment_nums)
# if match_re:
# comment_nums = int(match_re.group(1))
# else:
# comment_nums = 0
#
# content = response.css("div.entry").extract()[0]
#
# tag_list = response.css("p.entry-meta-hide-on-mobile a::text").extract()
# tag_list = [element for element in tag_list if not element.strip().endswith("评论")]
# tags = ",".join(tag_list)
#--把提取的内容填入到item中
# article_item["url_object_id"] = get_md5(response.url)
# article_item["title"] = title
# article_item["url"] = response.url
# try:
# create_date = datetime.datetime.strptime(create_date, "%Y/%m/%d").date()
# except Exception as e:
# create_date = datetime.datetime.now().date()
# article_item["create_date"] = create_date
# article_item["front_image_url"] = [front_image_url]
# article_item["praise_nums"] = praise_nums
# article_item["comment_nums"] = comment_nums
# article_item["fav_nums"] = fav_nums
# article_item["tags"] = tags
# article_item["content"] = content
#yield article_item
| [
"[email protected]"
] | |
dcbb3a2a64f363d7f18000a2d5f5d1e90f843698 | 4124ece01d93b56476f43150f3b74a2f6bee58de | /combine_imagees.py | 807f521e1a6ef5c1a7fdaa136e4748854e9490a3 | [] | no_license | codeWorth/Risk | 73eaadf20c3137731a72a4881cbec5ef931a3123 | f15b93b239862418e5fee0bfba2fa601b1a1d310 | refs/heads/master | 2020-04-13T03:13:30.623612 | 2019-01-07T21:32:42 | 2019-01-07T21:32:42 | 162,925,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
import os
folder = "Risk_Game_Masks"
img_names = os.listdir(folder)[1:]
print("Images:", img_names)
imgs = []
im = cv.imread(folder + "/" + img_names[0])
for i in range(1,len(img_names)):
print("Combining", img_names[i])
im_add = cv.imread(folder + "/" + img_names[i])
im[im_add == 255] = 255-i
cv.imwrite("total_imgs.png", im) | [
"[email protected]"
] | |
293750a1df2e071993a368ed11baa5af6cd47c45 | 6c2949fdd8bb3f84d6c5724ac2696f7b2aacb1d5 | /Chap10/zipCompress.py | d664de31fb59f24d8983aa872306232b9910a380 | [] | no_license | KhubiThakkar/Automate_boring_stuff_with_python | e20b4678587cf4d318faa4c5466851268537eb87 | 4437df78fad638e69472f04836f9ef3ae2d80a59 | refs/heads/master | 2022-12-26T21:03:41.045900 | 2020-10-02T11:20:54 | 2020-10-02T11:20:54 | 268,807,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | import zipfile
newZip = zipfile.ZipFile('new.zip','w')
newZip.write('notes.txt',compress_type=zipfile.ZIP_DEFLATED)
newZip.close() | [
"[email protected]"
] | |
aee25b2c0432faa4054629f062c5552c6b6ca076 | 3d2892dee75f10dd896bcc3164eb7794234f99c7 | /Web/migrations/0003_auto_20170219_1010.py | 52ce4bfc3a8c6c8ad028eacd6f6479261d011fd5 | [] | no_license | character123/NN | 9f3772de00f58d4622943cdc382fe0ec7cad5824 | 74d40f4a5a43034722c84cb9c1d55c6df6d01e5c | refs/heads/master | 2021-01-20T11:40:52.928062 | 2017-03-05T02:53:12 | 2017-03-05T02:53:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-19 02:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Web', '0002_administrator'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(max_length=20, unique=True),
),
]
| [
"[email protected]"
] | |
2c1f69352b450d8c39cc563eeb0e9d94e66c2049 | b17cf12c9bc640843500b67f7a57ba5566db973b | /main.py | 1b1066e4b3ed0da4ff573fb31a49f4e6708b8686 | [
"MIT"
] | permissive | catarinaacsilva/smartcard-application | a3a6453dd8ea40241d6f8df482ceaea2fc21f548 | f5225b6054a101ba6c86ef6a8168481aca732191 | refs/heads/main | 2023-07-03T07:05:06.489618 | 2021-08-19T15:44:17 | 2021-08-19T15:44:17 | 348,493,376 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,726 | py | # coding: utf-8
import base64
from flask import Flask, request, jsonify
from pteid import PortugueseCitizenCard
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.route('/sign', methods=['GET'])
def sign():
data = request.args.get('data')
print(data)
data = base64.urlsafe_b64decode(data).decode('UTF-8')
print(data)
pteid = PortugueseCitizenCard()
if len(pteid.sessions) > 0:
pteid.login(0)
signedData = pteid.sign_data(0, data)
encoded_signedData = base64.urlsafe_b64encode(signedData)
cert = pteid.PTEID_GetCertificate(0)
encoded_cert = base64.urlsafe_b64encode(cert)
pteid.logout(0)
pteid.sessions[0].closeSession()
return jsonify({'signedReceipt': encoded_signedData.decode('UTF-8'), 'cert': encoded_cert.decode('UTF-8')})
return jsonify({'error': 'Card not found'})
@app.route('/verify', methods=['GET'])
def verify():
data = request.args.get('data')
data = base64.urlsafe_b64decode(data).decode('UTF-8')
signedData = request.args.get('signedData')
signedData = base64.urlsafe_b64decode(signedData).decode('UTF-8')
pteid = PortugueseCitizenCard()
if len(pteid.sessions) > 0:
pteid.login(0)
verified = False
decoded = signedData.encode('UTF-8')
decoded_bytes = base64.urlsafe_b64decode(decoded)
if (pteid.verifySignature(pteid.PTEID_GetCertificate(0), data, decoded_bytes)):
verified = True
pteid.logout(0)
pteid.sessions[0].closeSession()
return jsonify({'verify': verified})
return jsonify({'error': 'Card not found'})
if __name__ == '__main__':
app.run(host='localhost', port=8686, debug=False)
| [
"[email protected]"
] | |
5c50a6c864458a2d13557be69ad99984215c517d | ff0205f9bb32d4ba3722062b5e006091baa0a075 | /Mnist_loader.py | 059e7ea53006c690fe1a5c688ece5cd821f15773 | [
"MIT"
] | permissive | Morbotu/drone-PWS | aa2897c32c21139cba57d13734c640bddd6536d3 | face9cbf30a55783592cce8af59c1c70da982b6a | refs/heads/main | 2023-08-13T23:55:24.361691 | 2021-09-29T19:15:18 | 2021-09-29T19:15:18 | 369,836,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,132 | py |
#### Libraries
# Standard library
import pickle
import gzip
# Third-party libraries
import numpy as np
def load_data():
"""Return the MNIST data as a tuple containing the training data,
the validation data, and the test data.
The ``training_data`` is returned as a tuple with two entries.
The first entry contains the actual training images. This is a
numpy ndarray with 50,000 entries. Each entry is, in turn, a
numpy ndarray with 784 values, representing the 28 * 28 = 784
pixels in a single MNIST image.
The second entry in the ``training_data`` tuple is a numpy ndarray
containing 50,000 entries. Those entries are just the digit
values (0...9) for the corresponding images contained in the first
entry of the tuple.
The ``validation_data`` and ``test_data`` are similar, except
each contains only 10,000 images.
This is a nice data format, but for use in neural networks it's
helpful to modify the format of the ``training_data`` a little.
That's done in the wrapper function ``load_data_wrapper()``, see
below.
"""
f = gzip.open('mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = pickle.load(f,encoding='iso-8859-1')
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper():
"""Return a tuple containing ``(training_data, validation_data,
test_data)``. Based on ``load_data``, but the format is more
convenient for use in our implementation of neural networks.
In particular, ``training_data`` is a list containing 50,000
2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray
containing the input image. ``y`` is a 10-dimensional
numpy.ndarray representing the unit vector corresponding to the
correct digit for ``x``.
``validation_data`` and ``test_data`` are lists containing 10,000
2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional
numpy.ndarry containing the input image, and ``y`` is the
corresponding classification, i.e., the digit values (integers)
corresponding to ``x``.
Obviously, this means we're using slightly different formats for
the training data and the validation / test data. These formats
turn out to be the most convenient for use in our neural network
code."""
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = list(zip(training_inputs, training_results))
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = list(zip(validation_inputs, va_d[1]))
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = list(zip(test_inputs, te_d[1]))
return (training_data, validation_data, test_data)
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the jth
position and zeroes elsewhere. This is used to convert a digit
(0...9) into a corresponding desired output from the neural
network."""
e = np.zeros((10, 1))
e[j] = 1.0
return e | [
"[email protected]"
] | |
1ed1fd53c05132b0db20d7e934c8223cee591b0b | 138cc74989e553406675ad147fa68eea68857ddd | /roamer/constant.py | ab357735cc7e3aeec0ba58a239dd8b815e653797 | [
"MIT"
] | permissive | skiningham/roamer | 0e59a109eeb634d9e162f24a7494361ddfc911f3 | 95d15c3f5fcadb15acf256da22ce8b5e891ee113 | refs/heads/master | 2021-01-15T19:17:57.474588 | 2017-08-08T13:18:00 | 2017-08-08T13:18:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | """
App wide constants.
"""
import os
from os.path import expanduser, join, exists
ROAMER_DATA_PATH = os.environ.get('ROAMER_DATA_PATH') or expanduser('~/.roamer-data/')
ENTRIES_JSON_PATH = expanduser(join(ROAMER_DATA_PATH, 'entries.json'))
TRASH_JSON_PATH = expanduser(join(ROAMER_DATA_PATH, 'trash.json'))
TRASH_DIR = expanduser(join(ROAMER_DATA_PATH, 'trash/'))
TEST_DIR = expanduser(join(ROAMER_DATA_PATH, 'tmp/test/mock_dir'))
if not exists(TRASH_DIR):
os.makedirs(TRASH_DIR)
| [
"[email protected]"
] | |
96255d4f2ce8784e80468c3d0f97586ad4b37598 | 351cce7faf6d7b7d5d9ced841a2c43e56a64b382 | /python/pcc/alien_invasion/alien_invasion.py | b0a699b67d067399782cad5ed17e678a8556b933 | [] | no_license | uniqueyehu/snippet | 3bb70c766e4610d7c36db7616477f0982c669612 | 705624670efa299ead7af0dde3103214147ef72f | refs/heads/master | 2021-01-19T18:40:09.794143 | 2018-06-21T09:48:36 | 2018-06-21T09:48:36 | 101,152,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | py | import pygame
from pygame.sprite import Group
from settings import Settings
from ship import Ship
from game_stats import GameStats
from button import Button
import game_functions as gf
def run_game():
# 初始化游戏并创建一个屏幕对象
pygame.init()
ai_settings = Settings()
screen = pygame.display.set_mode(
(ai_settings.screen_width, ai_settings.screen_height))
pygame.display.set_caption("Alien Invasion")
# 创建Play按钮
play_button = Button(ai_settings, screen, "Play")
# 创建一个用于存储游戏统计信息的实例
stats = GameStats(ai_settings)
# 创建一艘飞船
ship = Ship(ai_settings, screen)
# 创建一个用于存储子弹的编组
bullets = Group()
# 创建一个外星人的编组
aliens = Group()
# 创建外星人群
gf.create_fleet(ai_settings, screen, ship, aliens)
# 开始游戏的主循环
while True:
# 监视键盘和鼠标事件
gf.check_events(ai_settings, screen, stats, play_button, ship, aliens, bullets)
if stats.game_active:
ship.update()
gf.update_bullets(ai_settings, screen, ship, aliens, bullets)
gf.update_aliens(ai_settings, stats, screen, ship, aliens, bullets)
gf.update_screen(ai_settings, screen, stats, ship, aliens, bullets, play_button)
run_game() | [
"[email protected]"
] | |
73792dca5929690baefc1e1de428047a6769f82e | 9fbcb46606ac2ef683d8d4c15a1e5659013874b6 | /LimitSetting/test/amsbLimitConfigBkgds_2016BC.py | 5cc90edd199aa7919a4234d32c3a06930858297b | [] | no_license | Mohammed2/DisappTrks | 9431b1c221909d4e0b00da94f95a71785e1d036b | 954afb1c826afe59b7f86a56e08c06eb62f0d7c2 | refs/heads/master | 2021-01-09T09:38:13.325448 | 2017-02-06T23:14:10 | 2017-02-06T23:14:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,440 | py | #!/usr/bin/env python
# Bkgd configuration file for limit-setting produced with makeANTables.py
backgrounds = {
'Fake2016BC' : {
'N' : '0',
'alpha' : '0.613186900771',
},
'Elec2016BC' : {
'N' : '19',
'alpha' : '0.112543527986',
},
'Muon2016BC' : {
'N' : '25',
'alpha' : '0.056900137794',
},
'Tau2016BC' : {
'N' : '8',
'alpha' : '0.0135152593502',
},
}
background_systematics = {
'Fake2016BC_alpha' : { # error on alpha
'value' : '1.00084739905',
'background' : 'Fake2016BC',
},
'Elec2016BC_alpha' : { # error on alpha
'value' : '1.01452781398',
'background' : 'Elec2016BC',
},
'Muon2016BC_alpha' : { # error on alpha
'value' : '1.00714427592',
'background' : 'Muon2016BC',
},
'Tau2016BC_alpha' : { # error on alpha
'value' : '1.16704422709',
'background' : 'Tau2016BC',
},
'Fake2016BC_syst' : { # error on fake track rate assumption
'value' : str (1.0 + 31.3831354534 / 100.0),
'background' : 'Fake2016BC',
},
'Elec2016BC_energy' : { # error on energy assumption
'value' : str (1.0 + 10.8548021022 / 100.0),
'background' : 'Elec2016BC',
},
'Tau2016BC_energy' : { # error on energy assumption
'value' : str (1.0 + 20.0071895962 / 100.0),
'background' : 'Tau2016BC',
},
}
| [
"[email protected]"
] | |
440b04e023befaf72d7d3678165ee4c37222007b | bf6e3d9831292ccb0b5d80f43eef4d746743e8ad | /motion_planning.py | eac48db5d3eb8cb76049ccf166eb0646e6b7bec8 | [] | no_license | aherreraGH/fcnd-submission | 214a7c6bb2a1de7e5b3e878aeacbef6f39d94bcb | 97ebfbd40bdb2ccf47764c4926849e20b02b7122 | refs/heads/master | 2020-04-12T01:51:28.680051 | 2018-12-19T03:11:14 | 2018-12-19T03:11:14 | 162,230,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,057 | py | import argparse
import time
import msgpack
from enum import Enum, auto
import numpy as np
from planning_utils import a_star, heuristic, create_grid
from udacidrone import Drone
from udacidrone.connection import MavlinkConnection
from udacidrone.messaging import MsgID
from udacidrone.frame_utils import global_to_local
from my_utils import prune_path, adjust_bearing
class States(Enum):
MANUAL = auto()
ARMING = auto()
TAKEOFF = auto()
WAYPOINT = auto()
LANDING = auto()
DISARMING = auto()
PLANNING = auto()
class MotionPlanning(Drone):
def __init__(self, connection):
super().__init__(connection)
self.target_position = np.array([0.0, 0.0, 0.0])
self.waypoints = []
self.in_mission = True
self.check_state = {}
# initial state
self.flight_state = States.MANUAL
# register all your callbacks here
self.register_callback(MsgID.LOCAL_POSITION, self.local_position_callback)
self.register_callback(MsgID.LOCAL_VELOCITY, self.velocity_callback)
self.register_callback(MsgID.STATE, self.state_callback)
def local_position_callback(self):
if self.flight_state == States.TAKEOFF:
if -1.0 * self.local_position[2] > 0.95 * self.target_position[2]:
self.waypoint_transition()
elif self.flight_state == States.WAYPOINT:
if np.linalg.norm(self.target_position[0:2] - self.local_position[0:2]) < 1.0:
if len(self.waypoints) > 0:
self.waypoint_transition()
else:
if np.linalg.norm(self.local_velocity[0:2]) < 1.0:
self.landing_transition()
def velocity_callback(self):
if self.flight_state == States.LANDING:
if self.global_position[2] - self.global_home[2] < 0.1:
if abs(self.local_position[2]) < 0.01:
self.disarming_transition()
def state_callback(self):
if self.in_mission:
if self.flight_state == States.MANUAL:
self.arming_transition()
elif self.flight_state == States.ARMING:
if self.armed:
self.plan_path()
elif self.flight_state == States.PLANNING:
self.takeoff_transition()
elif self.flight_state == States.DISARMING:
if ~self.armed & ~self.guided:
self.manual_transition()
def arming_transition(self):
self.flight_state = States.ARMING
print("arming transition")
self.arm()
self.take_control()
def takeoff_transition(self):
self.flight_state = States.TAKEOFF
print("takeoff transition")
self.takeoff(self.target_position[2])
def waypoint_transition(self):
self.flight_state = States.WAYPOINT
print("waypoint transition")
self.target_position = self.waypoints.pop(0)
print('target position', self.target_position)
self.cmd_position(self.target_position[0], self.target_position[1], self.target_position[2],
self.target_position[3])
def landing_transition(self):
self.flight_state = States.LANDING
print("landing transition")
self.land()
def disarming_transition(self):
self.flight_state = States.DISARMING
print("disarm transition")
self.disarm()
self.release_control()
def manual_transition(self):
self.flight_state = States.MANUAL
print("manual transition")
self.stop()
self.in_mission = False
def send_waypoints(self):
print("Sending waypoints to simulator ...")
data = msgpack.dumps(self.waypoints)
self.connection._master.write(data)
def plan_path(self):
self.flight_state = States.PLANNING
print("Searching for a path ...")
TARGET_ALTITUDE = 5
SAFETY_DISTANCE = 5
self.target_position[2] = TARGET_ALTITUDE
# DONE: read lat0, lon0 from colliders into floating point values
# line below provided by mentor Christopher
with open('colliders.csv') as f:
origin_pos_data = f.readline().split(',')
lat0 = float(origin_pos_data[0].strip().split(' ')[1])
lon0 = float(origin_pos_data[1].strip().split(' ')[1])
# DONE: set home position to (lon0, lat0, 0)
self.set_home_position(lon0, lat0, 0.0)
# DONE: retrieve current global position
current_global_position = [self._longitude, self._latitude, self._altitude]
# DONE: convert to current local position using global_to_local()
current_local_position = global_to_local(current_global_position, self.global_home)
print('global home {0}, position {1}, local position {2}'.format(self.global_home, self.global_position,
self.local_position))
# Read in obstacle map
data = np.loadtxt('colliders.csv', delimiter=',', dtype='Float64', skiprows=2)
print(data[:2])
# Define a grid for a particular altitude and safety margin around obstacles
grid, north_offset, east_offset = create_grid(data, TARGET_ALTITUDE, SAFETY_DISTANCE)
print("North offset = {0}, east offset = {1}".format(north_offset, east_offset))
# Define starting point on the grid (this is just grid center)
# need to cast to integers for N and E here
start_north = int(current_local_position[0])
start_east = int(current_local_position[1])
print('Grid start N: {0} E: {1}'.format(start_north, start_east))
grid_start = ((start_north + -north_offset), (start_east + -east_offset))
# DONE: convert start position to current position rather than map center
# Set goal as some arbitrary position on the grid
# grid_goal = (-north_offset + 10, -east_offset + 10)
# Set to a grassy area just a bit SW of the original starting point.
goal_lon = -122.397745
goal_lat = 37.793837
goal_alt = 0
# The following was done with help/guidance from student Maruf Aytekin
# DONE: adapt to set goal as latitude / longitude position and convert
# set the global position for the goal
goal_global_position = [goal_lon, goal_lat, goal_alt]
# convert to the local formatted position
goal_local_position = global_to_local(goal_global_position, self.global_home)
# need to cast to an integer the values for N and E
(goal_north, goal_east) = (int(goal_local_position[0]), int(goal_local_position[1]))
# use numpy ceil to get the integer value as at the top of the rounding
# the offset is used to set the goal in the correct NE position based on the grid
grid_goal = (int(np.ceil(goal_north - north_offset)), int(np.ceil(goal_east - east_offset)))
# Run A* to find a path from start to goal
# DONE: add diagonal motions with a cost of sqrt(2) to your A* implementation
# or move to a different search space such as a graph (not done here)
print('Local Start and Goal: ', grid_start, grid_goal)
print('running A*, this takes a while... stand by...')
"""
NOTE: if the simulator craps out due to a timeout, it may be that the simulator has a memory leak somewhere
close the simulator, reopen it, then retry again.
"""
path, _ = a_star(grid, heuristic, grid_start, grid_goal)
print(path)
# DONE: prune path to minimize number of waypoints
# NOT DONE TODO (if you're feeling ambitious): Try a different approach altogether!
print('pruning the paths...')
path = prune_path(path)
print(path)
# Convert path to waypoints
waypoints = [[p[0] + north_offset, p[1] + east_offset, TARGET_ALTITUDE, 0] for p in path]
# Set self.waypoints
print('show the first waypoint: ', waypoints[0])
# Add bearing to waypoints
waypoints = adjust_bearing(waypoints)
print('waypoints with bearing: ', waypoints[1])
self.waypoints = waypoints
# DONE: send waypoints to sim (this is just for visualization of waypoints)
self.send_waypoints()
def start(self):
self.start_log("Logs", "NavLog.txt")
print("starting connection")
self.connection.start()
# Only required if they do threaded
# while self.in_mission:
# pass
self.stop_log()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, default=5760, help='Port number')
parser.add_argument('--host', type=str, default='127.0.0.1', help="host address, i.e. '127.0.0.1'")
args = parser.parse_args()
conn = MavlinkConnection('tcp:{0}:{1}'.format(args.host, args.port), timeout=60)
drone = MotionPlanning(conn)
time.sleep(1)
drone.start()
| [
"[email protected]"
] | |
ba2717b239459dfb1604420e0c25d61e892727a9 | 9f25ae744df70ce1318833b5b454f03cdc8721ac | /tensorflow_federated/python/research/flars/run_emnist.py | 9448653a421e6eca44920fc22b9b645ea715c53e | [
"Apache-2.0"
] | permissive | zhenzhenclaire/federated | ae4bb49f134cc747c737d197874b30ac55523103 | 6749c9f51f3457c2377c95763afe6484062c7dcf | refs/heads/master | 2020-09-17T13:20:51.574956 | 2019-11-25T19:54:30 | 2019-11-25T19:55:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,950 | py | # Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains and evaluates EMNIST classification model using TFF."""
import collections
import functools
import io
import logging
import os
import pprint
import random
import sys
import time
from absl import app
from absl import flags
import attr
import pandas as pd
import tensorflow as tf
import tensorflow_federated as tff
import tree
from tensorboard.plugins.hparams import api as hp
from tensorflow_federated.python.research.baselines.emnist import models
from tensorflow_federated.python.research.flars import flars_fedavg
from tensorflow_federated.python.research.flars import flars_optimizer
from tensorflow_federated.python.research.simple_fedavg import simple_fedavg
from tensorflow_federated.python.research.utils import checkpoint_utils
from tensorflow_federated.python.research.utils import utils_impl
with utils_impl.record_new_flags() as hparam_flags:
# Metadata
flags.DEFINE_string(
'exp_name', 'emnist', 'Unique name for the experiment, suitable for use '
'in filenames.')
# Training hyperparameters
flags.DEFINE_boolean(
'digit_only_emnist', True,
'Whether to train on the digits only (10 classes) data '
'or the full data (62 classes).')
flags.DEFINE_integer('total_rounds', 500, 'Number of total training rounds.')
flags.DEFINE_integer('rounds_per_eval', 1, 'How often to evaluate')
flags.DEFINE_integer(
'rounds_per_checkpoint', 25,
'How often to emit a state checkpoint. Higher numbers '
'mean more lost work in case of failure, lower numbers '
'mean more overhead per round.')
flags.DEFINE_integer('train_clients_per_round', 2,
'How many clients to sample per round.')
flags.DEFINE_integer('client_epochs_per_round', 1,
'Number of epochs in the client to take per round.')
flags.DEFINE_integer('batch_size', 20, 'Batch size used on the client.')
# Client optimizer configuration (it defines one or more flags per optimizer).
utils_impl.define_optimizer_flags('client')
# Server optimizer configuration (it defines one or more flags per optimizer).
flags.DEFINE_enum('server_optimizer', 'flars', ['sgd', 'flars'],
'Server optimizer')
flags.DEFINE_float('server_learning_rate', 1., 'Server learning rate.')
flags.DEFINE_float(
'server_momentum', 0.9,
'Server momentum. This is also the `beta1` parameter for '
'the Yogi optimizer.')
# Parameter for FLARS.
flags.DEFINE_float('max_ratio', 0.1, 'max_ratio for optimizer FLARS.')
# Parameter for Yogi.
flags.DEFINE_float('initial_accumulator_value', 1e-6,
'initial_accumulator_value for optimizer Yogi.')
# End of hyperparameter flags.
# Root output directories.
flags.DEFINE_string(
'root_output_dir', '/tmp/emnist_fedavg/',
'Root directory for writing experiment output. This will '
'be the destination for metrics CSV files, Tensorboard log '
'directory, and checkpoint files.')
flags.DEFINE_boolean(
'disable_check_exists', True, 'Disable checking the '
'existence of root_output_dir. If False, code will exit '
'without running the experiment if root_output_dir '
'exists.')
FLAGS = flags.FLAGS
CHECKPOINT_PREFIX = 'ckpt_'
def federated_averaging_training_loop(model_fn,
server_optimizer_fn,
client_datasets_fn,
total_rounds=500,
rounds_per_eval=1,
metrics_hook=lambda *args: None):
"""A simple example of training loop for the Federated Averaging algorithm.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`.
server_optimizer_fn: A no-arg function that returns a
`tf.keras.optimizers.Optimizer`.
client_datasets_fn: A function that takes the round number, and returns a
list of `tf.data.Datset`, one per client.
total_rounds: Number of rounds to train.
rounds_per_eval: How often to call the `metrics_hook` function.
metrics_hook: A function taking arguments (server_state, train_metrics,
round_num) and performs evaluation. Optional.
Returns:
Final `ServerState`.
"""
logging.info('Starting federated_training_loop')
checkpoint_dir = os.path.join(FLAGS.root_output_dir, FLAGS.exp_name)
if FLAGS.server_optimizer != 'flars':
iterative_process = simple_fedavg.build_federated_averaging_process(
model_fn, server_optimizer_fn=server_optimizer_fn)
ServerState = simple_fedavg.ServerState # pylint: disable=invalid-name
else:
iterative_process = flars_fedavg.build_federated_averaging_process(
model_fn, server_optimizer_fn=server_optimizer_fn)
ServerState = flars_fedavg.ServerState # pylint: disable=invalid-name
server_state = ServerState.from_anon_tuple(iterative_process.initialize())
round_num = None
train_metrics = {}
latest_checkpoint_dir = checkpoint_utils.latest_checkpoint(
checkpoint_dir, CHECKPOINT_PREFIX)
logging.info('Looking for checkpoints in [%s/%s]', checkpoint_dir,
CHECKPOINT_PREFIX)
while latest_checkpoint_dir is not None:
# Restart from a previous round.
logging.info('Loading a previous checkpoint')
try:
server_state, metrics_hook.results, round_num = read_checkpoint(
latest_checkpoint_dir, server_state)
break
except OSError as e:
# Likely corrupted checkpoint, possibly job died while writing. Delete the
# checkpoint directory and try again.
logging.error('Exception [%s]', e)
logging.warning('Deleteing likely corrupted checkpoint at [%s]',
latest_checkpoint_dir)
tf.io.gfile.rmtree(latest_checkpoint_dir)
latest_checkpoint_dir = checkpoint_utils.latest_checkpoint(
checkpoint_dir, CHECKPOINT_PREFIX)
if round_num is not None:
logging.info('Restarted from checkpoint round %d', round_num)
else:
# Write the initial checkpoint
logging.info('No previous checkpoints, initializing experiment')
round_num = 0
metrics_hook(server_state, train_metrics, round_num)
write_checkpoint(checkpoint_dir, server_state, metrics_hook.results,
round_num)
while round_num < total_rounds:
round_num += 1
# Reset the executor to clear the cache, and clear the default graph to
# garbage collect tf.Functions that will no longer be used.
tff.framework.set_default_executor(
tff.framework.create_local_executor(max_fanout=25))
tf.compat.v1.reset_default_graph()
round_start_time = time.time()
data_prep_start_time = time.time()
federated_train_data = client_datasets_fn(round_num)
train_metrics['prepare_datasets_secs'] = time.time() - data_prep_start_time
training_start_time = time.time()
anon_tuple_server_state, tff_train_metrics = iterative_process.next(
server_state, federated_train_data)
server_state = ServerState.from_anon_tuple(anon_tuple_server_state)
train_metrics.update(tff_train_metrics._asdict(recursive=True))
train_metrics['training_secs'] = time.time() - training_start_time
logging.info('Round {:2d} elapsed time: {:.2f}s .'.format(
round_num, (time.time() - round_start_time)))
train_metrics['total_round_secs'] = time.time() - round_start_time
if round_num % FLAGS.rounds_per_checkpoint == 0:
write_checkpoint_start_time = time.time()
write_checkpoint(checkpoint_dir, server_state, metrics_hook.results,
round_num)
train_metrics['write_checkpoint_secs'] = (
time.time() - write_checkpoint_start_time)
if round_num % rounds_per_eval == 0:
metrics_hook(server_state, train_metrics, round_num)
metrics_hook(server_state, train_metrics, total_rounds)
write_checkpoint(checkpoint_dir, server_state, metrics_hook.results,
round_num)
return server_state
def _check_not_exists(f, disable_check_exists=False):
"""Checks if file `f` exists."""
if disable_check_exists:
return
if tf.io.gfile.exists(f):
print('{} already exists.\n'
'Please ensure only a single worker is executing each experiment '
'in the grid.\n'
'When re-running a grid, please use a fresh output directory.'.format(
f))
sys.exit(1)
@attr.s(frozen=False)
class MetricsHook(object):
"""A callback for evaluation.
This class holds all the logic for evaluating the FedAvg on EMNIST
classification and writing output for later analysis (to .csv files and
tensorboard). Hyperparameters are also recorded.
This class should be constructed via the `MetricsHook.build` classmethod.
"""
# Derived, conceptually post-init or in constructor, used in methods.
results_file = attr.ib()
summary_writer = attr.ib()
eval_dataset = attr.ib()
model = attr.ib()
results = attr.ib(factory=pd.DataFrame)
@classmethod
def build(cls, exp_name, output_dir, eval_dataset, hparam_dict):
"""Constructs the MetricsHook.
Args:
exp_name: A unique filesystem-friendly name for the experiment.
output_dir: A root output directory used for all experiment runs in a
grid. The MetricsHook will combine this with exp_name to form suitable
output directories for this run.
eval_dataset: Evaluation dataset.
hparam_dict: A dictionary of hyperparameters to be recorded to .csv and
exported to TensorBoard.
Returns:
The `MetricsHook` object.
"""
summary_logdir = os.path.join(output_dir, 'logdir/{}'.format(exp_name))
_check_not_exists(summary_logdir, FLAGS.disable_check_exists)
tf.io.gfile.makedirs(summary_logdir)
summary_writer = tf.compat.v2.summary.create_file_writer(
summary_logdir, name=exp_name)
with summary_writer.as_default():
hp.hparams(hparam_dict)
# Using .bz2 rather than .zip due to
# https://github.com/pandas-dev/pandas/issues/26023
results_file = os.path.join(output_dir, exp_name, 'results.csv.bz2')
# Also write the hparam_dict to a CSV:
hparam_dict['results_file'] = results_file
hparams_file = os.path.join(output_dir, exp_name, 'hparams.csv')
utils_impl.atomic_write_to_csv(pd.Series(hparam_dict), hparams_file)
model = create_compiled_keras_model()
logging.info('Writing ...')
logging.info(' result csv to: %s', results_file)
logging.info(' summaries to: %s', summary_logdir)
return cls(
results_file=results_file,
summary_writer=summary_writer,
eval_dataset=eval_dataset,
model=model)
def __attrs_post_init__(self):
_check_not_exists(self.results_file, FLAGS.disable_check_exists)
def __call__(self, server_state, train_metrics, round_num):
"""A function suitable for passing as an eval hook to the training_loop.
Args:
server_state: A `ServerState`.
train_metrics: A `dict` of training metrics computed in TFF.
round_num: The current round number.
"""
tff.learning.assign_weights_to_keras_model(self.model, server_state.model)
eval_metrics = self.model.evaluate(self.eval_dataset, verbose=0)
metrics = {
'train':
train_metrics,
'eval':
collections.OrderedDict(
zip(['loss', 'sparse_categorical_accuracy'], eval_metrics))
}
flat_metrics = tree.flatten_with_path(metrics)
flat_metrics = [
('/'.join(map(str, path)), item) for path, item in flat_metrics
]
flat_metrics = collections.OrderedDict(flat_metrics)
flat_metrics['round'] = round_num
logging.info('Evaluation at round {:d}:\n{!s}'.format(
round_num, pprint.pformat(flat_metrics)))
# Also write metrics to a tf.summary logdir
with self.summary_writer.as_default():
for name, value in flat_metrics.items():
tf.compat.v2.summary.scalar(name, value, step=round_num)
self.results = self.results.append(flat_metrics, ignore_index=True)
utils_impl.atomic_write_to_csv(self.results, self.results_file)
@attr.s(frozen=False)
class ExperimentState(object):
round_num = attr.ib()
metrics_csv_string = attr.ib()
server_state = attr.ib()
def write_checkpoint(checkpoint_dir, server_state, metrics_dataframe,
round_num):
"""Write the current experiment state to disk."""
# Clean-up old checkpoints if more than 5 exist; but not the initialization
# checkpoint.
checkpoints = sorted(tf.io.gfile.glob(os.path.join(checkpoint_dir, 'ckpt_*')))
for checkpoint in checkpoints[1:-3]:
tf.io.gfile.rmtree(checkpoint)
# We must flatten the pd.Dataframe to a single string, otherwise we don't
# know the nested structure (how many rounds have passed) to unpack
# in `checkpoint_utils.load()` during `read_checkpoint`.
csv_string = io.StringIO()
metrics_dataframe.to_csv(csv_string, header=True)
experiment_state = ExperimentState(
round_num=round_num,
metrics_csv_string=csv_string.getvalue(),
server_state=server_state)
checkpoint_utils.save(
experiment_state,
os.path.join(checkpoint_dir, 'ckpt_{:03d}'.format(round_num)))
def read_checkpoint(filepath, example_server_state):
"""Read a previously saved experiment state to memory."""
experiment_state = ExperimentState(
round_num=0, metrics_csv_string='', server_state=example_server_state)
experiment_state = checkpoint_utils.load(filepath, experiment_state)
metrics_dict = pd.read_csv(
io.BytesIO(experiment_state.metrics_csv_string.numpy()),
header=0,
index_col=0,
engine='c')
return (experiment_state.server_state, metrics_dict,
experiment_state.round_num.numpy())
def create_compiled_keras_model():
"""Create compiled keras model."""
model = models.create_original_fedavg_cnn_model(
only_digits=FLAGS.digit_only_emnist)
model.compile(
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=utils_impl.create_optimizer_from_flags('client'),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
def run_experiment():
"""Data preprocessing and experiment execution."""
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data(
only_digits=FLAGS.digit_only_emnist)
example_tuple = collections.namedtuple('Example', ['x', 'y'])
def element_fn(element):
return example_tuple(
x=tf.reshape(element['pixels'], [-1]),
y=tf.reshape(element['label'], [1]))
def preprocess_train_dataset(dataset):
"""Preprocess training dataset."""
return dataset.map(element_fn).apply(
tf.data.experimental.shuffle_and_repeat(
buffer_size=10000,
count=FLAGS.client_epochs_per_round)).batch(FLAGS.batch_size)
def preprocess_test_dataset(dataset):
"""Preprocess testing dataset."""
return dataset.map(element_fn).batch(100, drop_remainder=False)
emnist_train = emnist_train.preprocess(preprocess_train_dataset)
emnist_test = preprocess_test_dataset(
emnist_test.create_tf_dataset_from_all_clients())
example_dataset = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[0])
sample_batch = tf.nest.map_structure(lambda x: x.numpy(),
next(iter(example_dataset)))
def model_fn():
keras_model = create_compiled_keras_model()
return tff.learning.from_compiled_keras_model(keras_model, sample_batch)
def client_datasets_fn(round_num):
"""Returns a list of client datasets."""
del round_num # Unused.
sampled_clients = random.sample(
population=emnist_train.client_ids, k=FLAGS.train_clients_per_round)
return [
emnist_train.create_tf_dataset_for_client(client)
for client in sampled_clients
]
tf.io.gfile.makedirs(FLAGS.root_output_dir)
hparam_dict = collections.OrderedDict([
(name, FLAGS[name].value) for name in hparam_flags
])
hparam_dict = utils_impl.remove_unused_flags('client', hparam_dict)
metrics_hook = MetricsHook.build(FLAGS.exp_name, FLAGS.root_output_dir,
emnist_test, hparam_dict)
if FLAGS.server_optimizer == 'sgd':
optimizer_fn = functools.partial(
tf.keras.optimizers.SGD,
learning_rate=FLAGS.server_learning_rate,
momentum=FLAGS.server_momentum)
elif FLAGS.server_optimizer == 'flars':
optimizer_fn = functools.partial(
flars_optimizer.FLARSOptimizer,
learning_rate=FLAGS.server_learning_rate,
momentum=FLAGS.server_momentum,
max_ratio=FLAGS.max_ratio)
else:
raise ValueError('Optimizer %s is not supported.' % FLAGS.server_optimizer)
federated_averaging_training_loop(
model_fn,
optimizer_fn,
client_datasets_fn,
total_rounds=FLAGS.total_rounds,
rounds_per_eval=FLAGS.rounds_per_eval,
metrics_hook=metrics_hook)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Expected no command-line arguments, '
'got: {}'.format(argv))
tf.compat.v1.enable_v2_behavior()
try:
tf.io.gfile.makedirs(os.path.join(FLAGS.root_output_dir, FLAGS.exp_name))
except tf.errors.OpError:
pass
run_experiment()
if __name__ == '__main__':
app.run(main)
| [
"[email protected]"
] | |
c085a12af46eb81e89314a45406944f8eae28655 | c452b223adf390edddf4339dd7360fe6de66b8d8 | /common/models/general/arrangement_param_category.py | af4e79144973fc3d344abf6729f539cd45a73b87 | [] | no_license | dev1andriy/creditbase_server | a321b16976c7cfe87d3ee5cfaf700850ea1b689c | 4ee067e175a1cca3f907a61ebc3d369ecdcebd66 | refs/heads/master | 2023-05-13T17:13:23.486966 | 2020-07-16T07:50:56 | 2020-07-16T07:50:56 | 280,089,440 | 0 | 0 | null | 2021-06-10T23:10:15 | 2020-07-16T07:49:41 | Python | UTF-8 | Python | false | false | 828 | py | from django.db import models
from common.models.abstract import *
from common.models.general import ArrangementCategory
class ArrangementParamCategory(DescribeableModel, HostableModel, TimeStampedModel, InsertableModel, UpdateableModel,
StatusRecordableModel, MakeableModel, CheckableModel):
ArrangementParamCategoryId = models.AutoField(primary_key=True, null=False)
ArrangementCategory = models.ForeignKey(ArrangementCategory, null=True, blank=True, on_delete=models.CASCADE)
objects = models.Manager()
def __str__(self):
return "{} - {}".format(self.ArrangementParamCategoryId, self.Description)
class Meta:
verbose_name = 'ArrangementParamCategory'
verbose_name_plural = 'ArrangementParamCategory'
db_table = 'ArrangementParamCategory'
| [
"[email protected]"
] | |
ec374575740741eacd925cc5ae3600c4e7d3485f | 80d9b0e7562ab21a91fd41211d3158d0f2ffb6f4 | /views.py | 9221e9b08e9467f3dceafe6dd8abda81f447c3e4 | [] | no_license | andrewsdb/app | d5b03d9dd30e2bb0072574a96427b14aad65cfc8 | 50d97894f2ec65053732b953ba9531954d06fbec | refs/heads/master | 2021-01-10T13:46:38.300605 | 2015-11-14T19:47:54 | 2015-11-14T19:47:54 | 46,189,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | from flask import render_template
from app import app
@app.route('/')
@app.route('/index')
def index():
user = { 'nickname': 'Andrew' }
posts = [
{
'author': { 'nickname': 'Andrew' },
'body': 'Beautiful New York!'
},
{
'author': { 'nickname': 'Susan' },
'body': 'Cool!'
},
{
'author': { 'nickname': 'Bobby' },
'body': 'Frank Sinatra the best!:)!'
}
]
return render_template("index.html",
title = 'Home',
user = user,
posts = posts)
@app.route('/lasvegas/')
def currency():
return render_template("lasvegas.html", title = 'Las Vegas')
@app.route('/panama/')
def quantity():
return render_template("panama.html", title = 'Panama City')
| [
"[email protected]"
] | |
e51e9d8e55037af7027c1f21826354efb2f4e844 | 97ccb80711a21f48d4bc931f7e0fe2a4bd3e7b31 | /mysite/settings.py | 925d11ab029f8020b2c8324a55eb7a1b2e5de845 | [] | no_license | van130712/my-first-blog | 6a2fdd0e1dcaa40a5a014f89708a9094012bfce4 | 479c15604c1d86bbd7ab36312652f10749c8f639 | refs/heads/master | 2020-03-07T07:20:24.248663 | 2018-03-29T20:44:39 | 2018-03-29T20:44:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,209 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_0p@f1s^1dv2pkj%%=+i-3io@(vp+ihpiax+^2b!x2p4etim8k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'van130712.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"[email protected]"
] | |
54a16eb5e58dcb11b6d7cf9660c0f23195d77d80 | 1ae4d3632f788f1a5e8f1e919e3f43b5f53d9ecd | /lesson8 (Classes and Objects)/task1/tests.py | cddd379483e0bf414ccf987e4108c929e001695d | [] | no_license | catharsis96/pythonintro2 | fc1f2dc3380efff92918c4bf9f77615b39380c0a | ac896af08e0835372bd1e6901b99f005bf0bb4b8 | refs/heads/master | 2021-04-29T11:37:30.599143 | 2017-01-02T16:23:48 | 2017-01-02T16:23:48 | 77,846,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | from test_helper import run_common_tests
if __name__ == '__main__':
run_common_tests()
| [
"[email protected]"
] | |
86f82641a1c080209a57cf5742d18c46d141af45 | 3459c022f192229e787672c170ac72df666826d2 | /cgi-bin/cUpdateInfo.py | 7e956a1c21988f56bb5cefc750fbdfd4fff680ed | [] | no_license | RebeccaAxelro/TherapyClinicProject | 380950491b014944ed97a32826b8befc6765a8d3 | d81bdd31375c117d168dd2e686e6239b525680de | refs/heads/master | 2020-05-29T23:17:36.919339 | 2019-09-26T20:53:40 | 2019-09-26T20:53:40 | 189,431,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | #!/usr/bin/python3
import mysql.connector
from mysql.connector import errorcode
import cgi
# Connecting to DB
cnx = mysql.connector.connect(user='raxelro1', database='raxelro11', host='localhost', password='rikikiki98')
cursor = cnx.cursor()
# get values from form
form = cgi.FieldStorage()
clientID = form.getvalue('clientID')
address = form.getvalue('address')
phoneNumber = form.getvalue('phoneNumber')
email = form.getvalue('email')
# start of html
print('Content-type:text/html\r\n\r\n')
print('<html>')
print('<body>')
print('<div>')
# Update
query = 'UPDATE client SET address = %s, phoneNumber = %s, email = %s WHERE clientID=%s'
value = (address, phoneNumber, email, clientID)
cursor.execute(query, value)
cnx.commit()
print('<p>IT WORKS</p>')
print('</div>')
print('</body>')
print('</html>')
| [
"[email protected]"
] | |
79a1e28b6284f2ab644f930fcb922a65d1c1e49e | 0e383dc880cb1494695796b22c2d6e4944606f8d | /ResimAktarma/1_resim_ice_aktarma.py | b3c28db9dfb36da6a9909496c9b3db4e65e13a49 | [] | no_license | mustafaermehan/ImageProcessing | 2bacce9874558c05743e99360c54a7069fa49e0a | ba5e2308a8ff87e9e78adb830244aa34b2b79b31 | refs/heads/master | 2023-04-16T07:01:24.538617 | 2021-04-09T21:25:46 | 2021-04-09T21:25:46 | 356,519,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | import cv2
#ice aktarma
img = cv2.imread("messi5.jpg",0)
#gorsellestirme
cv2.imshow("ilk resim",img)
k = cv2.waitKey(0) &0xFF
if k == 27:
cv2.destroyAllWindows()
elif k == ord('s'):
cv2.imwrite("messi_gray.png", img)
cv2.destroyAllWindows() | [
"[email protected]"
] | |
7b572c56260cb84c647162a4b997c9efd4fb1876 | 835a4d70cbde27aa243cc29a444edba2ceece3c9 | /livemark/plugins/rating/__init__.py | 54309eb242f2b4875d2e36db64aa491a32045a61 | [
"MIT"
] | permissive | gassantos/livemark | 288b4ea2f828fe70971ccacdf86a8ed0367aa440 | 81f2fe6b80f867c605066b4f3d461a0b8376fac3 | refs/heads/main | 2023-08-18T04:16:48.653679 | 2021-10-05T15:00:29 | 2021-10-05T15:00:29 | 413,883,159 | 1 | 0 | MIT | 2021-10-05T15:52:12 | 2021-10-05T15:52:12 | null | UTF-8 | Python | false | false | 33 | py | from .plugin import RatingPlugin
| [
"[email protected]"
] | |
8f45b32ff54590216f23d71428a9af02912ec472 | 63ca3ae6d647d56f4b4899717101e96165cadc10 | /facenet_sandberg/validation/src/calculator/threshold_calculator.py | d0781ab220355fbe86279ce80494dd6deaa2dafc | [
"MIT"
] | permissive | beholder-ai/Facial-Recognition-and-Alignment | 2da9f13b8eacf03258e47e32a8443e549e378b36 | 58f74668e5fe9f5e39ed294309345f50b90dc61a | refs/heads/master | 2020-06-01T01:48:10.965259 | 2019-06-06T18:49:15 | 2019-06-06T18:49:15 | 190,583,417 | 0 | 0 | MIT | 2019-06-06T13:09:33 | 2019-06-06T13:09:32 | null | UTF-8 | Python | false | false | 2,849 | py | from parser.pair import Pair
from typing import Callable
from typing import Iterable
from typing import Union
from typing import cast
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from calculator.calculator import Calculator
from calculator.distance_calculator import DistanceCalculator
from metrics.metrics import DistanceMetric
from metrics.metrics import ThresholdMetric
from metrics.metrics import ThresholdMetricException
# pylint: disable=too-few-public-methods
class ThresholdCalculator(Calculator):
# pylint: disable=too-many-arguments
def __init__(self,
distance_metric: Union[str, DistanceMetric],
threshold_metric: Union[str, ThresholdMetric],
threshold_start: float,
threshold_end: float,
threshold_step: float) -> None:
if isinstance(threshold_metric, str):
self._threshold_metric = getattr(ThresholdMetric,
cast(str, threshold_metric))
else:
self._threshold_metric = threshold_metric
self._distance_metric = distance_metric
self._threshold_start = threshold_start
self._threshold_end = threshold_end
self._threshold_step = threshold_step
def calculate(self, pairs: Iterable[Pair]) -> float:
threshold_scorer = self._get_threshold_scorer()
dist = DistanceCalculator(self._distance_metric).calculate(pairs)
labels = [pair.is_match for pair in pairs]
best_score = float('-inf')
best_threshold_index = 0
thresholds = np.arange(self._threshold_start,
self._threshold_end,
self._threshold_step)
for i, threshold in enumerate(thresholds):
predictions = np.less(dist, threshold)
score = threshold_scorer(labels, predictions)
if score > best_score:
best_score = score
best_threshold_index = i
return thresholds[best_threshold_index]
def _get_threshold_scorer(
self) -> Callable[[np.ndarray, np.ndarray], float]:
if self._threshold_metric == ThresholdMetric.ACCURACY:
return accuracy_score
if self._threshold_metric == ThresholdMetric.PRECISION:
return precision_score
if self._threshold_metric == ThresholdMetric.RECALL:
return recall_score
if self._threshold_metric == ThresholdMetric.F1:
return f1_score
metrics = [str(metric) for metric in ThresholdMetric]
err = f"Undefined {ThresholdMetric.__qualname__}. \
Choose from {metrics}"
raise ThresholdMetricException(err)
| [
"[email protected]"
] | |
8a557df71b669e1288c1046d61e2f9c5d36d7fb9 | 94c1877436496b76f6147033936b1a87bab8eb0b | /linguistic/params.py | 956b447f15c9b0c241dbe4fd98dabb69caaa42f4 | [
"MIT"
] | permissive | tejasvaidhyadev/IA-for-AI | 787852705915ad781d650b0b963e1df098c8904d | 8e6f21e9489e5e1651bfc8500708bf5fb10625ea | refs/heads/main | 2023-08-04T04:10:18.054185 | 2021-09-14T10:41:28 | 2021-09-14T10:41:28 | 405,497,716 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=4214)
parser.add_argument("--test_mode", type=str, default="False")
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--lr", type=float, default=1e-5)
parser.add_argument("--n_epochs", type=int, default=5)
parser.add_argument("--dummy_run", dest="dummy_run", action="store_true", help="Run the model on one sample for debugging")
parser.add_argument("--device", type=str, default="cuda", help="name of the device to be used for training")
parser.add_argument("--bert_type", type=str, required=True)
params = parser.parse_args()
params.test_mode = params.test_mode.lower() == "true"
| [
"[email protected]"
] | |
7d3f714af967bd72c5b298559703ad61836d1ef6 | 7f1d7ab26d7451d463a259c5c3afdc2cb7c406a8 | /src/py/server.py | 29d6665a1b0bfe7eef9c29658159b11707e6ba72 | [
"Apache-2.0"
] | permissive | mitre-cyber-academy/2013-networking-400 | 47abe21d73ea8bd4ee99a2f8cf156aabb44a9250 | 7e69e3066ae9728ab135b78bbf1190cbbee18718 | refs/heads/master | 2020-04-09T18:39:14.962220 | 2015-01-30T22:37:53 | 2015-01-30T22:37:53 | 21,870,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | import socket
import hashlib
import time
#Create a UDP socket
servSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#Bind to localhost on port 12345
# (change this to the system hostname to make sure the port is accessible
# externally)
servSocket.bind(('localhost', 12345))
#These are the accepted values for the login:
good_key = "96c70ab05a922178e0668ba5402160d7"
goodUName = "Smith"
goodPWord = "IronFan30"
#Continuously listen for connections:
while(True):
#Get data from (a) client
data, addr = servSocket.recvfrom(4096)
#XOR with the good key and see if it looks right
catString = ''.join(chr(ord(a) ^ ord(b)) for a, b in zip(data, good_key))
#Try and split their message
arrVals = catString.split('_')
#Get the current time for a (dummy) return message
tmpTime = "%s" % time.time()
#Hash the time for the bad return
m = hashlib.md5()
m.update(tmpTime)
#Default return value is the bad one
retVal = m.digest()
#First, make sure they have a full message delimited by '_'
if(len(arrVals) == 3):
#Split up the given values
uName, pWord, currTime = arrVals
#And check them against the good ones
if(uName == goodUName and pWord == goodPWord):
#Then, build the 'good' response - a SHA256 hash
# of the given time code and the CD key
sh = hashlib.sha256()
sh.update(currTime + "_" + good_key)
retVal = sh.digest()
print "Successful login"
else:
print "Valid message format, bad data"
else:
print "Invalid message format"
servSocket.sendto(retVal, addr)
| [
"[email protected]"
] | |
26588a5bd6c60329fb9784354d63a7c210882afd | 3232dbec06a45acb9f9b9b5b632206e4dfec1edb | /sandbox/forms.py | 98bac101f317c682bc72e0cd4816e17517bc6bbc | [] | no_license | Krzewskimichal/Python_tutorial | 301dc7a64c3227afaeff3f3b66a20c54c03c6d76 | adc883a7fddb0436fbfe53cd00aca26c1265afb9 | refs/heads/master | 2020-04-28T03:56:48.448864 | 2019-03-26T19:17:19 | 2019-03-26T19:17:19 | 174,958,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,456 | py | from django import forms
from django.forms import ModelForm, TextInput
from sandbox.models import BuiltInFunction, Exams, StringMethods, ListMethods, DictionaryMethods, TupleMethods, \
Keywords, SetMethods, Messenger
class LoginForm(forms.Form):
username = forms.CharField(label='Login', max_length=128)
password = forms.CharField(label='Password', max_length=128, widget=forms.PasswordInput)
class RegisterForm(forms.Form):
username = forms.CharField(label='Login', max_length=128)
password1 = forms.CharField(label='Password', max_length=128, widget=forms.PasswordInput)
password2 = forms.CharField(label='Confirm Password', max_length=128, widget=forms.PasswordInput)
email = forms.EmailField(label='Email', max_length=256)
# -------------------add data--------------------------------
class AddBuiltInFunctionForm(ModelForm):
class Meta:
model = BuiltInFunction
fields = ['name', 'definition']
class AddStringMethodsForm(ModelForm):
class Meta:
model = StringMethods
fields = ['name', 'definition']
class AddListMethodsForm(ModelForm):
class Meta:
model = ListMethods
fields = ['name', 'definition']
class AddDictionaryMethodsForm(ModelForm):
class Meta:
model = DictionaryMethods
fields = ['name', 'definition']
class AddTupleMethodsForm(ModelForm):
class Meta:
model = TupleMethods
fields = ['name', 'definition']
class AddSetMethodsForm(ModelForm):
class Meta:
model = SetMethods
fields = ['name', 'definition']
class AddKeywordsForm(ModelForm):
class Meta:
model = Keywords
fields = ['name', 'definition']
class AddExamForm(ModelForm):
class Meta:
model = Exams
fields = ['task', 'answer', 'lesson']
class DeleteDataForm(forms.Form):
databases = (('BuiltInFunction', BuiltInFunction.__name__),
('DictionaryMethods', DictionaryMethods.__name__),
('ListMethods', ListMethods.__name__),
('Keywords', Keywords.__name__),
('SetMethods', SetMethods.__name__),
('StringMethods', StringMethods.__name__),
('TupleMethods', TupleMethods.__name__))
database = forms.ChoiceField(choices=databases)
class UserWriteMessageForm(ModelForm):
class Meta:
model = Messenger
fields = ['to_user', 'message_title', 'message']
| [
"[email protected]"
] | |
e454e998b10b71eb2c5e4296521d0d1b6049e620 | 6aef2fdd5b98038fc6ecc7551dd76dccf370c4ae | /dam_files/GPOMDP_SVRG_WV_ada_verA_fr_nver.py | 3d7b6f4be390313cd231003fe202c8f7f2f11b2a | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Bobeye/rllab | 29b1cf3f29b748f93af4ac103d1a0eaa40290e7f | 53c0afb73f93c4a78ff21507914d7f7735c21ea9 | refs/heads/master | 2020-05-02T07:18:17.323566 | 2019-03-26T02:34:02 | 2019-03-26T02:34:02 | 177,814,299 | 0 | 0 | NOASSERTION | 2019-03-26T15:14:46 | 2019-03-26T15:14:45 | null | UTF-8 | Python | false | false | 14,358 | py | from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from rllab.envs.normalized_env import normalize
import numpy as np
import theano
import theano.tensor as TT
from rllab.sampler import parallel_sampler
from lasagne.updates import sgd
import matplotlib.pyplot as plt
from rllab.envs.gym_env import GymEnv
import pandas as pd
from lasagne.updates import get_or_compute_grads
from lasagne import utils
from collections import OrderedDict
max_sub_iter = 30
def unpack(i_g):
i_g_arr = [np.array(x) for x in i_g]
res = i_g_arr[0].reshape(i_g_arr[0].shape[0]*i_g_arr[0].shape[1])
res = np.concatenate((res,i_g_arr[1]))
res = np.concatenate((res,i_g_arr[2][0]))
res = np.concatenate((res,i_g_arr[3]))
return res
def adam_svrg(loss_or_grads, params, learning_rate=0.001, beta1=0.9,
beta2=0.999, epsilon=1e-8):
all_grads = get_or_compute_grads(loss_or_grads, params)
t_prev = []
updates = []
updates_of = []
grads_adam = []
for m_r in range(max_sub_iter):
t_prev.append(theano.shared(utils.floatX(0.)))
updates.append(OrderedDict())
# grads_adam.append([TT.matrix('eval_grad0'),TT.vector('eval_grad1'),TT.col('eval_grad3'),TT.vector('eval_grad4')])
# norm_adam.append([TT.matrix('eval_grad0'),TT.vector('eval_grad1'),TT.col('eval_grad3'),TT.vector('eval_grad4')])
updates_of.append(OrderedDict())
# Using theano constant to prevent upcasting of float32
one = TT.constant(1)
t = t_prev[-1] + 1
if (m_r==0):
a_t = learning_rate*TT.sqrt(one-beta2**t)/(one-beta1**t)
else:
beta2 = 0.9
a_t = learning_rate/2*TT.sqrt(one-beta2**t)/(one-beta1**t)
i = 0
l = []
h = []
for param, g_t in zip(params, all_grads):
value = param.get_value(borrow=True)
m_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
v_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
m_t = beta1*m_prev + (one-beta1)*g_t
v_t = beta2*v_prev + (one-beta2)*g_t**2
step = a_t*m_t/(TT.sqrt(v_t) + epsilon)
# eff_step = TT.sum(TT.square(step,None))
h.append(TT.sum(TT.square(step)))
l.append(TT.sum(TT.square(m_t)))
updates[-1][m_prev] = m_t
updates[-1][v_prev] = v_t
updates_of[-1][param] = param - step
i+=1
updates[-1][t_prev[-1]] = t
grads_adam.append(TT.sqrt((h[0]+h[1]+h[2]+h[3])/(l[0]+l[1]+l[2]+l[3])))
return updates_of,grads_adam
load_policy=True
# normalize() makes sure that the actions for the environment lies
# within the range [-1, 1] (only works for environments with continuous actions)
env = normalize(CartpoleEnv())
#env = GymEnv("InvertedPendulum-v1")
# Initialize a neural network policy with a single hidden layer of 8 hidden units
policy = GaussianMLPPolicy(env.spec, hidden_sizes=(8,),learn_std=False)
snap_policy = GaussianMLPPolicy(env.spec, hidden_sizes=(8,),learn_std=False)
back_up_policy = GaussianMLPPolicy(env.spec, hidden_sizes=(8,),learn_std=False)
parallel_sampler.populate_task(env, policy)
# policy.distribution returns a distribution object under rllab.distributions. It contains many utilities for computing
# distribution-related quantities, given the computed dist_info_vars. Below we use dist.log_likelihood_sym to compute
# the symbolic log-likelihood. For this example, the corresponding distribution is an instance of the class
# rllab.distributions.DiagonalGaussian
dist = policy.distribution
snap_dist = snap_policy.distribution
# We will collect 100 trajectories per iteration
N = 100
# Each trajectory will have at most 100 time steps
T = 100
#We will collect M secondary trajectories
M = 10
#Number of sub-iterations
#m_itr = 100
# Number of iterations
#n_itr = np.int(10000/(m_itr*M+N))
# Set the discount factor for the problem
discount = 0.99
# Learning rate for the gradient update
learning_rate = 0.01
s_tot = 10000
observations_var = env.observation_space.new_tensor_variable(
'observations',
# It should have 1 extra dimension since we want to represent a list of observations
extra_dims=1
)
actions_var = env.action_space.new_tensor_variable(
'actions',
extra_dims=1
)
d_rewards_var = TT.vector('d_rewards')
importance_weights_var = TT.vector('importance_weight')
# policy.dist_info_sym returns a dictionary, whose values are symbolic expressions for quantities related to the
# distribution of the actions. For a Gaussian policy, it contains the mean and (log) standard deviation.
dist_info_vars = policy.dist_info_sym(observations_var)
snap_dist_info_vars = snap_policy.dist_info_sym(observations_var)
surr = TT.sum(- dist.log_likelihood_sym_1traj_GPOMDP(actions_var, dist_info_vars) * d_rewards_var)
params = policy.get_params(trainable=True)
snap_params = snap_policy.get_params(trainable=True)
importance_weights = dist.likelihood_ratio_sym_1traj_GPOMDP(actions_var,dist_info_vars,snap_dist_info_vars)
grad = theano.grad(surr, params)
eval_grad1 = TT.matrix('eval_grad0',dtype=grad[0].dtype)
eval_grad2 = TT.vector('eval_grad1',dtype=grad[1].dtype)
eval_grad3 = TT.col('eval_grad3',dtype=grad[2].dtype)
eval_grad4 = TT.vector('eval_grad4',dtype=grad[3].dtype)
surr_on1 = TT.sum(dist.log_likelihood_sym_1traj_GPOMDP(actions_var,snap_dist_info_vars)*d_rewards_var*importance_weights_var)
surr_on2 = TT.sum(-snap_dist.log_likelihood_sym_1traj_GPOMDP(actions_var,dist_info_vars)*d_rewards_var)
grad_imp = theano.grad(surr_on1,snap_params)
update,step =adam_svrg([eval_grad1, eval_grad2, eval_grad3, eval_grad4], params, learning_rate=learning_rate)
f_train = theano.function(
inputs = [observations_var, actions_var, d_rewards_var],
outputs = grad
)
f_update = [theano.function(
inputs = [eval_grad1, eval_grad2, eval_grad3, eval_grad4],
outputs = step[n_sub_iter],
updates = update[n_sub_iter]
) for n_sub_iter in range(max_sub_iter)]
f_importance_weights = theano.function(
inputs = [observations_var, actions_var],
outputs = importance_weights
)
f_update_SVRG = [theano.function(
inputs = [eval_grad1, eval_grad2, eval_grad3, eval_grad4],
outputs = step[n_sub_iter],
updates = update[n_sub_iter]
) for n_sub_iter in range(max_sub_iter)]
f_imp_SVRG = theano.function(
inputs=[observations_var, actions_var, d_rewards_var, importance_weights_var],
outputs=grad_imp,
)
alla = {}
variance_svrg_data={}
variance_sgd_data={}
importance_weights_data={}
rewards_snapshot_data={}
rewards_subiter_data={}
n_sub_iter_data={}
for k in range(10):
if (load_policy):
snap_policy.set_param_values(np.loadtxt('policy_novar.txt'), trainable=True)
policy.set_param_values(np.loadtxt('policy_novar.txt'), trainable=True)
else:
policy.set_param_values(snap_policy.get_param_values(trainable=True), trainable=True)
avg_return = np.zeros(s_tot)
#np.savetxt("policy_novar.txt",snap_policy.get_param_values(trainable=True))
n_sub_iter=[]
rewards_sub_iter=[]
rewards_snapshot=[]
importance_weights=[]
variance_svrg = []
variance_sgd = []
diff_lr = []
alfa_t = []
j=0
while j<s_tot-N:
paths = parallel_sampler.sample_paths_on_trajectories(policy.get_param_values(),N,T,show_bar=False)
paths = paths[:N]
#baseline.fit(paths)
j+=N
observations = [p["observations"] for p in paths]
actions = [p["actions"] for p in paths]
d_rewards = [p["rewards"] for p in paths]
temp = list()
for x in d_rewards:
z=list()
t=1
for y in x:
z.append(y*t)
t*=discount
temp.append(np.array(z))
d_rewards=temp
s_g = f_train(observations[0], actions[0], d_rewards[0])
s_g_fv = [unpack(s_g)]
for ob,ac,rw in zip(observations[1:],actions[1:],d_rewards[1:]):
i_g = f_train(ob, ac, rw)
s_g_fv.append(unpack(i_g))
s_g = [sum(x) for x in zip(s_g,i_g)]
s_g = [x/len(paths) for x in s_g]
stp_snp = f_update[0](s_g[0],s_g[1],s_g[2],s_g[3])
print("step snapshot:", stp_snp)
rewards_snapshot.append(np.array([sum(p["rewards"]) for p in paths]))
avg_return[j-N:j] = np.repeat(np.mean([sum(p["rewards"]) for p in paths]),N)
var_4_fg = np.cov(s_g_fv,rowvar=False)
var_fg = var_4_fg/(N)
print(str(j-1)+' Average Return:', avg_return[j-1])
back_up_policy.set_param_values(policy.get_param_values(trainable=True), trainable=True)
n_sub = 0
while j<s_tot-M:
j += M
sub_paths = parallel_sampler.sample_paths_on_trajectories(policy.get_param_values(),M,T,show_bar=False)
sub_paths[:M]
#baseline.fit(paths)
sub_observations=[p["observations"] for p in sub_paths]
sub_actions = [p["actions"] for p in sub_paths]
sub_d_rewards = [p["rewards"] for p in sub_paths]
temp = list()
for x in sub_d_rewards:
z=list()
t=1
for y in x:
z.append(y*t)
t*=discount
temp.append(np.array(z))
sub_d_rewards=temp
n_sub+=1
s_g_sgd = f_train(sub_observations[0], sub_actions[0], sub_d_rewards[0])
s_g_fv_sgd = [unpack(s_g_sgd)]
iw_var = f_importance_weights(sub_observations[0], sub_actions[0])
s_g_is = f_imp_SVRG(sub_observations[0], sub_actions[0], sub_d_rewards[0],iw_var)
s_g_fv_is = [unpack(s_g_is)]
w_cum=np.max(iw_var)
importance_weights.append(np.mean(iw_var))
for ob,ac,rw in zip(sub_observations[1:],sub_actions[1:],sub_d_rewards[1:]):
i_g_sgd = f_train(ob, ac, rw)
s_g_fv_sgd.append(unpack(i_g_sgd))
s_g_sgd = [sum(x) for x in zip(s_g_sgd,i_g_sgd)]
iw_var = f_importance_weights(ob, ac)
s_g_is_sgd = f_imp_SVRG(ob, ac, rw,iw_var)
s_g_fv_is.append(unpack(s_g_is_sgd))
s_g_is = [sum(x) for x in zip(s_g_is,s_g_is_sgd)]
importance_weights.append(np.mean(iw_var))
w_cum+=np.max(iw_var)
s_g_is = [x/len(sub_paths) for x in s_g_is]#w_cum
s_g_sgd = [x/len(sub_paths) for x in s_g_sgd]
var_sgd = np.cov(s_g_fv_sgd,rowvar=False)
var_batch = var_sgd/(M)
var_is_sgd = np.cov(s_g_fv_is,rowvar=False)
var_is = var_is_sgd/(M)
m_is = np.mean(s_g_fv_is,axis=0)
m_sgd = np.mean(s_g_fv_sgd,axis=0)
cov= np.outer(s_g_fv_is[0]-m_is,s_g_fv_sgd[0]-m_sgd)
for i in range(M-1):
cov += np.outer(s_g_fv_is[i+1]-m_is,s_g_fv_sgd[i+1]-m_sgd)
for i in range(M):
cov += np.outer(s_g_fv_sgd[i]-m_sgd,s_g_fv_is[i]-m_is)
cov = cov/(M*M)
var_svrg = var_fg + var_is + var_batch + cov
var_dif = var_svrg-var_batch
variance_svrg.append((np.diag(var_svrg).sum()))
variance_sgd.append((np.diag(var_batch).sum()))
rewards_sub_iter.append(np.array([sum(p["rewards"]) for p in sub_paths]))
avg_return[j-M:j] = np.repeat(np.mean([sum(p["rewards"]) for p in sub_paths]),M)
back_up_policy.set_param_values(policy.get_param_values(trainable=True), trainable=True)
g = [sum(x) for x in zip(s_g_is,s_g_sgd,s_g)]
stp = f_update[n_sub](g[0],g[1],g[2],g[3])
print("step:",stp)
diff_lr.append(stp/M-stp_snp/N)
alfa_t.append(stp)
if (stp/M<stp_snp/N or n_sub+1>= max_sub_iter):
break
#print(str(j)+' Average Return:', avg_return[j])
n_sub_iter.append(n_sub)
snap_policy.set_param_values(policy.get_param_values(trainable=True), trainable=True)
plt.plot(avg_return[::10])
plt.show()
rewards_subiter_data["rewardsSubIter"+str(k)]=rewards_sub_iter
rewards_snapshot_data["rewardsSnapshot"+str(k)]= rewards_snapshot
n_sub_iter_data["nSubIter"+str(k)]= n_sub_iter
variance_sgd_data["variancceSgd"+str(k)] = variance_sgd
variance_svrg_data["varianceSvrg"+str(k)]=variance_svrg
importance_weights_data["importanceWeights"+str(k)] = importance_weights
avg_return=np.array(avg_return)
#plt.plot(avg_return)
#plt.show()
alla["avgReturn"+str(k)]=avg_return
alla = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in alla.items() ]))
rewards_subiter_data = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in rewards_subiter_data.items() ]))
rewards_snapshot_data = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in rewards_snapshot_data.items() ]))
n_sub_iter_data = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in n_sub_iter_data.items() ]))
variance_sgd_data = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in variance_sgd_data.items() ]))
variance_svrg_data = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in variance_svrg_data.items() ]))
importance_weights_data = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in importance_weights_data.items() ]))
rewards_subiter_data.to_csv("rewards_subiter_v2.csv",index=False)
rewards_snapshot_data.to_csv("rewards_snapshot_v2.csv",index=False)
n_sub_iter_data.to_csv("n_sub_iter_v2.csv",index=False)
variance_sgd_data.to_csv("variance_sgd_v2.csv",index=False)
variance_svrg_data.to_csv("variance_svrg_v2.csv",index=False)
importance_weights_data.to_csv("importance_weights_v2.csv",index=False)
alla.to_csv("GPOMDP_SVRG_adaptive_m06_ver2.csv",index=False) | [
"[email protected]"
] | |
e5adbda3bfd130759ee55d9236a7a02a81be12f4 | c4b4c822cf145cca03d6234fa9b116e7db4e10f6 | /Djangoshop/Shopapp/views.py | b64073aa0ca3b927c5bdd038c58bb5f163b55d71 | [] | no_license | BeefpasteC/Django_shop | 98bff8a5242b3b86e9b98378846551dc0ac51f2b | 276ce6a8b21997b0854308f883c0a2cccc15bf2b | refs/heads/master | 2022-02-13T11:16:03.012290 | 2019-08-05T14:19:39 | 2019-08-05T14:19:39 | 198,127,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,454 | py | import hashlib
from django.http import HttpResponse
from rest_framework import viewsets
from django.shortcuts import render
from django.core.paginator import Paginator
from django.http import HttpResponseRedirect
from django_filters.rest_framework import DjangoFilterBackend #导入过滤器
from Shopapp.models import *
from Buyerapp.models import *
from Shopapp.serializers import *
# 密码加密
def set_password(password):
md5 = hashlib.md5()
md5.update(password.encode())
response = md5.hexdigest()
return response
# 注册
def register(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
nickname = request.POST.get('nickname')
if username and password:
seller = Seller()
seller.username = username
seller.password = set_password(password)
seller.nickname = nickname
seller.save()
return HttpResponseRedirect('/shop/login/')
return render(request,'shopapp/register.html')
# cookie值校验登录
def cook_session(fun):
def inner(request,*args,**kwargs):
cookie_data = request.COOKIES.get('username')
session_data = request.session.get('username')
if cookie_data and session_data:
user = Seller.objects.filter(username=cookie_data).first()
if user and session_data == cookie_data:
return fun(request,*args,**kwargs)
return HttpResponseRedirect('/shop/login/')
return inner
# 登录
def login(request):
response = render(request,'shopapp/login.html') #如果从登录页加载,
response.set_cookie("login_from","login_page") # 获得cookie值
if request.method == "POST":
username = request.POST.get("username")
password = request.POST.get("password")
if username and password : # 校验用户名是否存在
user = Seller.objects.filter(username = username).first()
if user :
web_password = set_password(password) # 校验密码是否正确
cookies = request.COOKIES.get("login_from") # 校验请求是否来源于登录页面
if user.password == web_password and cookies == 'login_page':
response = HttpResponseRedirect('/shop/index/')
response.set_cookie('username',username)
response.set_cookie('user_id',user.id) #cookie提供用户id方便其他功能查询
request.session['username'] = username
store = Store.objects.filter(user_id=user.id).first()# 查询用户是否有店铺
if store:
response.set_cookie('has_store',store.id) #存在,将cookie设置为店铺id
else:
response.set_cookie('has_store', "")# 不存在,将cookie设置为空
return response
return response
# 首页
@ cook_session
def index(request):
'''
检查店铺是否有逻辑
:param request:
:return:
'''
# 查询当前用户
user_id = request.COOKIES.get('user_id')
if user_id:
user_id = int(user_id)
else:
user_id = 0
# 通过用户查询店铺是否存在(店铺和用户的id进行关联)
store = Store.objects.filter(user_id=user_id).first()
if store:
is_store = 1
else:
is_store = 0
return render(request,'shopapp/index.html',{'is_store':is_store})
# 店铺注册
@ cook_session
def resgister_store(request):
type_list = StoreType.objects.all()
if request.method == 'POST':
post_data = request.POST # 接收post数据
store_name = post_data.get('store_name')
store_description = post_data.get('store_description')
store_phone = post_data.get('store_phone')
store_money = post_data.get('store_money')
store_address = post_data.get('store_address')
user_id = int(request.COOKIES.get('user_id')) #通过cookie,来得到user_id
type_list = post_data.get('type')#通过request.post得到类型,但是是一个列表
store_logo = request.FILES.get('store_logo')#通过request.Files得到
#保存非多对多数据
store = Store()
store.store_name =store_name
store.store_descripton = store_description
store.store_phone = store_phone
store.store_money = store_money
store.store_address = store_address
store.user_id = user_id
store.store_logo = store_logo #django1.8之后图片可以直接保存
store.save() #保存。生成数据库当中的一条数据
# 在生成的数据当中添加多对多字段。
for i in type_list: #循环type列表,得到类型id
store_type = StoreType.objects.get(id=i) # 查询类型数据
store.type.add(store_type) #添加到类型字段,多对多的映射表
store.save() # 保存数据
response = HttpResponseRedirect("/shop/index/")
response.set_cookie("has_store", store.id)
return response
return render(request,'shopapp/resgister_store.html',locals())
# 添加商品
@ cook_session
def add_goods(request):
"""
负责添加商品
"""
goodstype_list = GoodsType.objects.all()
if request.method == "POST":
#获取post请求
goods_name = request.POST.get("goods_name")
goods_price = request.POST.get("goods_price")
goods_number = request.POST.get("goods_number")
goods_description = request.POST.get("goods_description")
goods_date = request.POST.get("goods_date")
goods_safeDate = request.POST.get("goods_safeDate")
goods_store = request.COOKIES.get("has_store")
goods_image = request.FILES.get("goods_image")
goods_type = request.POST.get('goods_type')
#开始保存数据
goods = Goods()
goods.goods_name = goods_name
goods.goods_price = goods_price
goods.goods_number = goods_number
goods.goods_description = goods_description
goods.goods_date = goods_date
goods.goods_safeDate = goods_safeDate
goods.goods_image = goods_image
goods.goods_type = GoodsType.objects.get(id = int(goods_type))
goods.store_id = Store.objects.get(id= int(goods_store))
goods.save()
return HttpResponseRedirect("/shop/sl/up/")
return render(request,"shopapp/add_goods.html",locals())
# 商品列表
@ cook_session
def shop_list(request,state):
"""
商品的列表页
:param request:
:return:
"""
if state == 'up': # up 在售 down下架
state_num = 1
else:
state_num = 0
#获取两个关键字
keywords = request.GET.get("keywords","") #查询关键词
page_num = request.GET.get("page_num",1) #页码
# 查询店铺
store_id = request.COOKIES.get('has_store')
store = Store.objects.get(id=int(store_id))
if keywords: #判断关键词是否存在
goods_list = store.goods_set.filter(goods_name__contains=keywords,goods_state=state_num)#完成了模糊查询
else: #如果关键词不存在,查询所有
goods_list = store.goods_set.filter(goods_state=state_num)
#分页,每页3条
paginator = Paginator(goods_list,3)
page = paginator.page(int(page_num))
page_range = paginator.page_range
#返回分页数据
return render(request,"shopapp/shop_list.html",{"page":page,"page_range":page_range,"keywords":keywords,'state':state})
# 商品详情页
@ cook_session
def goods_summary(request,goods_id):
goods = Goods.objects.filter(id = goods_id).first()
return render(request,'shopapp/goods_summary.html',locals())
# 修改商品详情
@ cook_session
def update_goods(request,goods_id):
goods_data = Goods.objects.filter(id = goods_id).first()
if request.method == "POST":
# 获取post请求
goods_name = request.POST.get("goods_name")
goods_price = request.POST.get("goods_price")
goods_number = request.POST.get("goods_number")
goods_description = request.POST.get("goods_description")
goods_date = request.POST.get("goods_date")
goods_safeDate = request.POST.get("goods_safeDate")
goods_store = request.POST.get("goods_store")
goods_image = request.FILES.get("goods_image")
# 开始修改数据
goods = Goods.objects.get(id = int(goods_id))
goods.goods_name = goods_name
goods.goods_price = goods_price
goods.goods_number = goods_number
goods.goods_description = goods_description
goods.goods_date = goods_date
goods.goods_safeDate = goods_safeDate
if goods_image: #如果有上传图片在更改
goods.goods_image = goods_image
goods.save()
return HttpResponseRedirect('/shop/gs/%s'%goods_id)
return render(request,'shopapp/update_goods.html',locals())
# 404
@ cook_session
def error_404(request):
return render(request,'shopapp/404.html')
# 商品上下架
@ cook_session
def set_goods(request,state):
if state == "up":
state_num = 1
else:
state_num = 0
id = request.GET.get("id") #get获取id
referer = request.META.get("HTTP_REFERER") #返回当前请求的来源地址
if id:
goods = Goods.objects.filter(id = id).first() #获取指定id的商品
if state == "delete":
goods.delete()
else:
goods.goods_state = state_num #修改状态
goods.save() #保存
return HttpResponseRedirect(referer) #跳转到请求来源页
# 商品类别分类
def goods_type(request):
goodstype_list = GoodsType.objects.all()
if request.method == 'POST':
username = request.POST.get('name')
description = request.POST.get('description')
picture = request.FILES.get('picture')
goodstype = GoodsType()
goodstype.name = username
goodstype.description = description
goodstype.picture = picture
goodstype.save()
return HttpResponseRedirect('/shop/gt/')
return render(request,'shopapp/goods_type.html',locals())
def delete_goods_types(request):
id = int(request.GET.get('id'))
goodstype = GoodsType.objects.get(id = id)
goodstype.delete()
return HttpResponseRedirect('/shop/gt/')
# 商品类型详情
def goods_type_summary(request):
id = int(request.GET.get('id'))
goods = GoodsType.objects.filter(id = id).first()
return render(request,'shopapp/goods_type_summary.html',locals())
# 查询所有订单
def order_list(request):
store_id = request.COOKIES.get('has_store')
order_list = OrderDetail.objects.filter(order_id__order_status=2,goods_store=store_id)
return render(request,'shopapp/order_list.html',locals())
# 当前部分是为了练习接口的查询逻辑
class UserViewSet(viewsets.ModelViewSet):
'''
返回具体查询的内容
'''
queryset = Goods.objects.all() # 具体返回的数据
serializer_class = UserSerializer # 指定过滤的类
filter_backends = [DjangoFilterBackend] # 采用哪个过滤器
filterset_fields = ['goods_name','goods_price'] #进行过滤的字段
class TypeViewSet(viewsets.ModelViewSet):
queryset = GoodsType.objects.all()
serializer_class = GoodsTypeSerializer
def ajax_goods_list(request):
return render(request,'shopapp/ajax_goods_list.html')
from CeleryTask.tasks import add
from django.http import JsonResponse
def get_add(request):
add.delay(2,3)
return JsonResponse({'status':200})
#中间件测试
def small_white_views(request):
print('我是小白')
raise TypeError('小白视图错了')
def small_template_response(request):
def hello():
return HttpResponse('hello world')
rep = HttpResponse('i am rep')
rep.render = hello
return rep
#缓存
from django.views.decorators.cache import cache_page
from django.core.cache import cache
@cache_page(60*15)
#底层缓存接口
# set 设置
# get 获取
# add 添加
def memcache_views(request):
store_data = cache.grt('store_data')
if store_data:
store_data = store_data
else:
data = Store.objects.all()
cache.set('store_data',data,30)
store_data = data
return render(request,'shopapp/index.html',locals())
| [
"[email protected]"
] | |
f5008b11b7c269198447176e0be1ed0159fb3293 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03643/s854792404.py | 21c5aecd6628fea2e59a9a75708442b6a3221801 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | print("ABC", input(), sep='')
| [
"[email protected]"
] | |
00f48aaa623569a716ff694a42aefda03ced3f3f | cb1c89fa0ff9d6be258f79225432baa0557ea7e4 | /setup.py | 7ad925d18c07d69d8c3d5705bcc41ab8309954cb | [
"MIT"
] | permissive | MehdiKhiatiDS/pyzel | 6ee6cbdbfb531774e6be59602bd7026766384762 | 068efcb93d17b1ff55a1b3ff98f11b763696b531 | refs/heads/master | 2021-07-16T12:45:54.651089 | 2020-01-16T23:51:57 | 2020-01-16T23:51:57 | 234,185,576 | 1 | 0 | MIT | 2021-02-02T22:44:38 | 2020-01-15T22:24:44 | Python | UTF-8 | Python | false | false | 874 | py | """
lambdata - a collection of data science helper functions for lambda school
"""
import setuptools
REQUIRED = [
"numpy",
"pandas"
]
with open("README.md", "r") as fh:
LONG_DESCRIPTION = fh.read()
setuptools.setup(
name="lambda-MehdiKhiatiDS",
version="0.1.1",
author="mehdikhiati",
description="a collection of data science helper functions",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url="https://github.com/MehdiKhiatiDS/lambdata",
packages=setuptools.find_packages(),
python_requires=">=3.5",
install_requires=REQUIRED,
classifiers=["Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
| [
"[email protected]"
] | |
18cb5655c01d6818570cc0a5be0580119604d7dd | 04a7c28bc4a5cc8b60de89790014e8c55a257aaa | /Tests/test_shopping_cart.py | 4ab0c29c3d7cb2bf4e7fdd1bf41ce2b652bf7858 | [] | no_license | samphillips1879/bangazon-cli | 6752ff95c866a202aadf3bb6974aee6cc5e2f501 | 9aeb75bb63e4eae683c60e92de7039dc432a22d2 | refs/heads/master | 2021-01-11T13:29:22.100642 | 2017-02-09T15:53:52 | 2017-02-09T15:53:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,334 | py | import unittest
import sys
sys.path.append("../")
from models.shopping_cart import ShoppingCart
class TestShoppingCart(unittest.TestCase):
"""
A test suite for the Shopping cart Feature of Bangazon CLI
Methods:
test_current_cart_should_be_ShoppingCart_object
test_ShoppingCart_should_add_product
test_ShoppingCart_should_be_able_to_be_closed
"""
@classmethod
def setUpClass(self):
"""
Method to setup global values needed for all tests
"""
print('Set up class')
# Create an instance of the ShoppingCart that can be used in all tests
self.current_cart = ShoppingCart()
# Create an instance of a product that can be used in all tests
# Product tuple will need alteration
self.product1 = (1, "Widget", 5)
self.product2 = (2, "FooBar", 10)
self.payment_method = (1, "Visa", "1234567812345678")
def test_current_cart_should_be_ShoppingCart_object(self):
"""
Method to test whether the ShoppingCart object id created correctly
"""
self.assertIsInstance(self.current_cart, ShoppingCart)
def test_ShoppingCart_should_add_product(self):
"""
Method to test whether the ShoppingCart object can add a product
"""
current_cart = ShoppingCart()
self.assertEqual(current_cart.get_all_products(), [])
current_cart.add_product(self.product1)
self.assertEqual(current_cart.get_all_products(), [self.product1])
current_cart.add_product(self.product2)
self.assertEqual(current_cart.get_all_products(), [self.product1, self.product2])
def test_ShoppingCart_should_return_cart_total_price(self):
"""
Method to test whether the shopping cart can return the total
"""
total = self.current_cart.get_cart_total()
self.assertEqual(total, 15)
def test_ShoppingCart_should_accept_payment_method(self):
"""
Method to test whether the shopping cart can be closed
"""
self.current_cart.accept_payment(payment_method)
self.assertEqual(self.current_cart.get_payment_method(), [(1, "Visa", "1234567812345678")])
self.assertTrue(self.current_cart.order_is_closed())
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
2eec319a9772ff9612bfe2709b8c3b2df9b1914c | f7801b1663359e743c6480a8c88673e59e3a7526 | /UIskeleton/settingsUI.py | f67750640d660d4ba947dbe231ba6e0b154237df | [] | no_license | Wireless-Research-Tester/demo-day-1 | 889981b3b6b5e07c6b18c1404c09921a8fffde9b | 941fd84a73f133fff1d95e1c409236789b21e8ce | refs/heads/master | 2022-04-20T01:35:24.212114 | 2020-04-19T14:24:17 | 2020-04-19T14:24:17 | 254,984,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,343 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'settingsUIui.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SettingsWindow(object):
def setupUi(self, SettingsWindow):
SettingsWindow.setObjectName("SettingsWindow")
SettingsWindow.resize(649, 540)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(SettingsWindow.sizePolicy().hasHeightForWidth())
SettingsWindow.setSizePolicy(sizePolicy)
SettingsWindow.setFocusPolicy(QtCore.Qt.NoFocus)
SettingsWindow.setAcceptDrops(False)
SettingsWindow.setLayoutDirection(QtCore.Qt.LeftToRight)
SettingsWindow.setAutoFillBackground(False)
self.verticalLayout = QtWidgets.QVBoxLayout(SettingsWindow)
self.verticalLayout.setObjectName("verticalLayout")
self.main_layout = QtWidgets.QGridLayout()
self.main_layout.setHorizontalSpacing(1)
self.main_layout.setObjectName("main_layout")
self.pos_settings_layout = QtWidgets.QGridLayout()
self.pos_settings_layout.setObjectName("pos_settings_layout")
self.azimuth_rbttn = QtWidgets.QRadioButton(SettingsWindow)
self.azimuth_rbttn.setObjectName("azimuth_rbttn")
self.pos_settings_layout.addWidget(self.azimuth_rbttn, 4, 1, 1, 1)
self.spinBox = QtWidgets.QSpinBox(SettingsWindow)
self.spinBox.setObjectName("spinBox")
self.pos_settings_layout.addWidget(self.spinBox, 3, 1, 1, 1)
self.increment_suffix_label = QtWidgets.QLabel(SettingsWindow)
self.increment_suffix_label.setObjectName("increment_suffix_label")
self.pos_settings_layout.addWidget(self.increment_suffix_label, 3, 2, 1, 1)
self.elevation_rbttn = QtWidgets.QRadioButton(SettingsWindow)
self.elevation_rbttn.setObjectName("elevation_rbttn")
self.pos_settings_layout.addWidget(self.elevation_rbttn, 5, 1, 1, 1)
self.sweep_type_cBox = QtWidgets.QComboBox(SettingsWindow)
self.sweep_type_cBox.setObjectName("sweep_type_cBox")
self.pos_settings_layout.addWidget(self.sweep_type_cBox, 2, 1, 1, 1)
self.sweep_type_label = QtWidgets.QLabel(SettingsWindow)
self.sweep_type_label.setObjectName("sweep_type_label")
self.pos_settings_layout.addWidget(self.sweep_type_label, 2, 0, 1, 1)
self.increment_label = QtWidgets.QLabel(SettingsWindow)
self.increment_label.setObjectName("increment_label")
self.pos_settings_layout.addWidget(self.increment_label, 3, 0, 1, 1)
self.main_layout.addLayout(self.pos_settings_layout, 3, 1, 1, 1)
self.pos_settings_label = QtWidgets.QLabel(SettingsWindow)
self.pos_settings_label.setMaximumSize(QtCore.QSize(16777215, 20))
self.pos_settings_label.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignTop)
self.pos_settings_label.setObjectName("pos_settings_label")
self.main_layout.addWidget(self.pos_settings_label, 2, 0, 1, 1)
self.meas_type_label = QtWidgets.QLabel(SettingsWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.meas_type_label.sizePolicy().hasHeightForWidth())
self.meas_type_label.setSizePolicy(sizePolicy)
self.meas_type_label.setMaximumSize(QtCore.QSize(16777215, 20))
self.meas_type_label.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignTop)
self.meas_type_label.setObjectName("meas_type_label")
self.main_layout.addWidget(self.meas_type_label, 0, 0, 1, 1)
self.vna_settings_tabs = QtWidgets.QTabWidget(SettingsWindow)
self.vna_settings_tabs.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.vna_settings_tabs.sizePolicy().hasHeightForWidth())
self.vna_settings_tabs.setSizePolicy(sizePolicy)
self.vna_settings_tabs.setMaximumSize(QtCore.QSize(16777215, 300))
self.vna_settings_tabs.setTabPosition(QtWidgets.QTabWidget.North)
self.vna_settings_tabs.setTabShape(QtWidgets.QTabWidget.Rounded)
self.vna_settings_tabs.setUsesScrollButtons(False)
self.vna_settings_tabs.setMovable(False)
self.vna_settings_tabs.setObjectName("vna_settings_tabs")
self.list_tab = QtWidgets.QWidget()
self.list_tab.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.list_tab.sizePolicy().hasHeightForWidth())
self.list_tab.setSizePolicy(sizePolicy)
self.list_tab.setObjectName("list_tab")
#
self.list_tab_layout = QtWidgets.QGridLayout(self.list_tab)
self.list_tab_layout.setObjectName("list_tab_layout")
self.freq_list_label = QtWidgets.QLabel(self.list_tab)
self.freq_list_label.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.freq_list_label.sizePolicy().hasHeightForWidth())
self.freq_list_label.setSizePolicy(sizePolicy)
self.freq_list_label.setObjectName("freq_list_label")
self.list_tab_layout.addWidget(self.freq_list_label, 0, 0, 1, 1)
self.freq_list_lineEdit = QtWidgets.QLineEdit(self.list_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.freq_list_lineEdit.sizePolicy().hasHeightForWidth())
self.freq_list_lineEdit.setSizePolicy(sizePolicy)
self.freq_list_lineEdit.setObjectName("freq_list_lineEdit")
self.list_tab_layout.addWidget(self.freq_list_lineEdit, 0, 1, 1, 1)
self.vna_settings_tabs.addTab(self.list_tab, "")
self.sweep_tab = QtWidgets.QWidget()
self.sweep_tab.setObjectName("sweep_tab")
# Sweep tab Layout
self.sweep_tab_layout = QtWidgets.QGridLayout(self.sweep_tab)
self.sweep_tab_layout.setObjectName("sweep_tab_layout")
self.start_label = QtWidgets.QLabel(self.sweep_tab)
self.start_label.setObjectName("start_label")
self.sweep_tab_layout.addWidget(self.start_label, 0, 0, 1, 1)
self.start_freq_lineEdit = QtWidgets.QLineEdit(self.sweep_tab)
self.start_freq_lineEdit.setObjectName("start_freq_lineEdit")
self.sweep_tab_layout.addWidget(self.start_freq_lineEdit, 0, 1, 1, 1)
self.start_label_suffix = QtWidgets.QLabel(self.sweep_tab)
self.start_label_suffix.setObjectName("start_label_suffix")
self.sweep_tab_layout.addWidget(self.start_label_suffix, 0, 2, 1, 1)
self.end_label = QtWidgets.QLabel(self.sweep_tab)
self.end_label.setObjectName("end_label")
self.sweep_tab_layout.addWidget(self.end_label, 1, 0, 1, 1)
self.end_freq_lineEdit = QtWidgets.QLineEdit(self.sweep_tab)
self.end_freq_lineEdit.setObjectName("end_freq_lineEdit")
self.sweep_tab_layout.addWidget(self.end_freq_lineEdit, 1, 1, 1, 1)
self.end_label_suffix = QtWidgets.QLabel(self.sweep_tab)
self.end_label_suffix.setObjectName("end_label_suffix")
self.sweep_tab_layout.addWidget(self.end_label_suffix, 1, 2, 1, 1)
self.num_pts_label = QtWidgets.QLabel(self.sweep_tab)
self.num_pts_label.setObjectName("num_pts_label")
self.sweep_tab_layout.addWidget(self.num_pts_label, 2, 0, 1, 1)
self.num_points_lineEdit = QtWidgets.QLineEdit(self.sweep_tab)
self.num_points_lineEdit.setObjectName("num_points_lineEdit")
self.sweep_tab_layout.addWidget(self.num_points_lineEdit, 2, 1, 1, 1)
self.vna_settings_tabs.addTab(self.sweep_tab, "")
self.main_layout.addWidget(self.vna_settings_tabs, 1, 0, 1, 2)
self.calibration_cbox = QtWidgets.QCheckBox(SettingsWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.calibration_cbox.sizePolicy().hasHeightForWidth())
self.calibration_cbox.setSizePolicy(sizePolicy)
self.calibration_cbox.setAcceptDrops(False)
self.calibration_cbox.setLayoutDirection(QtCore.Qt.RightToLeft)
self.calibration_cbox.setObjectName("calibration_cbox")
self.main_layout.addWidget(self.calibration_cbox, 4, 1, 1, 1)
self.select_pbttn = QtWidgets.QPushButton(SettingsWindow)
self.select_pbttn.setMaximumSize(QtCore.QSize(100, 16777215))
self.select_pbttn.setToolTip("")
self.select_pbttn.setLayoutDirection(QtCore.Qt.RightToLeft)
self.select_pbttn.setObjectName("select_pbttn")
# self.select_pbttn.clicked.connect(self.switch)
self.main_layout.addWidget(self.select_pbttn, 5, 1, 1, 1)
self.cancel_pbttn = QtWidgets.QPushButton(SettingsWindow)
self.cancel_pbttn.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cancel_pbttn.sizePolicy().hasHeightForWidth())
self.cancel_pbttn.setSizePolicy(sizePolicy)
self.cancel_pbttn.setLayoutDirection(QtCore.Qt.LeftToRight)
self.cancel_pbttn.setObjectName("cancel_pbttn")
self.main_layout.addWidget(self.cancel_pbttn, 5, 0, 1, 1)
self.verticalLayout.addLayout(self.main_layout)
self.retranslateUi(SettingsWindow)
self.vna_settings_tabs.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(SettingsWindow)
def retranslateUi(self, SettingsWindow):
_translate = QtCore.QCoreApplication.translate
SettingsWindow.setWindowTitle(_translate("SettingsWindow", "Settings Window"))
self.azimuth_rbttn.setText(_translate("SettingsWindow", "Azimuth"))
self.increment_suffix_label.setText(_translate("SettingsWindow", "degrees"))
self.elevation_rbttn.setText(_translate("SettingsWindow", "Elevation"))
self.sweep_type_label.setText(_translate("SettingsWindow", "Sweep Type:"))
self.increment_label.setText(_translate("SettingsWindow", "Increment:"))
self.pos_settings_label.setText(_translate("SettingsWindow", "Positioner Settings"))
self.meas_type_label.setText(_translate("SettingsWindow", "Measurement Type"))
self.freq_list_label.setText(_translate("SettingsWindow", "Enter List of Frequencies:"))
self.vna_settings_tabs.setTabText(self.vna_settings_tabs.indexOf(self.list_tab),
_translate("SettingsWindow", "List"))
self.start_label.setText(_translate("SettingsWindow", "Start:"))
self.start_label_suffix.setText(_translate("SettingsWindow", "Hz"))
self.end_label.setText(_translate("SettingsWindow", "End:"))
self.end_label_suffix.setText(_translate("SettingsWindow", "Hz"))
self.num_pts_label.setText(_translate("SettingsWindow", "Number of Points:"))
self.vna_settings_tabs.setTabText(self.vna_settings_tabs.indexOf(self.sweep_tab),
_translate("SettingsWindow", "Sweep"))
self.calibration_cbox.setText(_translate("SettingsWindow", "Calibration"))
self.select_pbttn.setText(_translate("SettingsWindow", "Select"))
self.cancel_pbttn.setText(_translate("SettingsWindow", "Cancel"))
| [
"[email protected]"
] | |
5db3c4e86d1a6261f1a040a0848cd0b6e9f505f8 | b50a3e5bffcf4bc74e82d1325078795188c9e693 | /appium_native/NativeScript_example.py | 69b3bb0c7c10ec043454f9a3be3421b3c3a203bf | [] | no_license | hyuhyu2001/Performance_Test | 0fe7a7261b4bbfc1441f97c46b2607bc83831490 | 350829c2245120d0d91e5b3042a35521ce7522c4 | refs/heads/master | 2021-01-20T13:54:49.298009 | 2017-09-04T07:18:46 | 2017-09-04T07:18:46 | 90,532,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,426 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: jinzj
@desc:
"""
import time
from appium import webdriver
import unittest
class MyTestCase(unittest.TestCase):
#脚本初始化,获取操作实例
def setUp(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '4.3'
desired_caps['deviceName'] = '192.168.56.101:5555'
#desired_caps['appPackage'] = 'com.android.calculator2'
#desired_caps['appActivity'] = '.Calculator'
#desired_caps['appPackage'] = 'com.android.customlocale2'
#desired_caps['appActivity'] = '.CustomLocaleActivity'
desired_caps['appPackage'] = 'com.example.zhangjian.minibrowser2'
desired_caps['appActivity'] = '.myapplication.MainActivity'
desired_caps["unicodeKeyboard"] = "True"
desired_caps["resetKeyboard"] = "True"
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
#释放实例,释放资源
def tearDown(self):
self.driver.quit()
#测试的脚本, LOVE原则
'''def testAdd(self):
#Locate 定位一个元素
number8 = self.driver.find_element_by_id("digit8")
# Operate 操作一个元素
number8.click()
# Locate 定位一个元素
addopertion = self.driver.find_element_by_id("plus")
# Operate 操作一个元素
addopertion.click()
# Locate 定位一个元素
number5 = self.driver.find_element_by_id("digit5")
# Operate 操作一个元素
number5.click()
# Locate 定位一个元素
equal = self.driver.find_element_by_id("equal")
# Operate 操作一个元素
equal.click()
#Verify 验证操作的结果
result = self.driver.find_element_by_class_name("android.widget.EditText")
value = result.text
self.assertEqual(u"13", value)
#Exception 处理异常的情况'''
def testOtherAPI(self):
'''elements = self.driver.find_elements_by_id("digit8")
elements[0].click()
time.sleep(5)
print(len(elements))'''
time.sleep(3)
#self.driver.press_keycode(8)
#self.driver.press_keycode(7)
input = self.driver.find_element_by_class_name("android.widget.EditText")
input.send_keys("10")
element = self.driver.find_element_by_accessibility_id(u"除")
element.click()
self.driver.press_keycode(12)
equal = self.driver.find_element_by_id("equal")
equal.click()
time.sleep(5)
#其他更多APIs的使用实例
def testMoreAPIs(self):
#获取元素列表
els = self.driver.find_elements_by_class_name('android.widget.CheckedTextView')
#滚动API scroll 的用法
#self.driver.scroll(els[10], els[1])
#拖拽API drag_and_drop的用法
#self.driver.drag_and_drop(els[10], els[3])
#滑动API swipe的用法
#self.driver.swipe(100, 750, 100, 100)
#点击API tap的用法
#self.driver.tap([(100, 750)])
#快速滑动 API flick的用法
#self.driver.flick(100, 750, 100, 100)
#当前activity API current_Activity的用法
#print self.driver.current_activity
#将某一个App置于后台
#self.driver.background_app(3)
#等待指定activity显示 API wait_activity的用法
#print self.driver.wait_activity(".CustomLocaleActivity", 3, 1)
#判断app是否安装了
#print self.driver.is_app_installed("com.example.zhangjian.minibrowser2")
#删除app
#self.driver.remove_app("com.example.zhangjian.minibrowser2")
#安装app
#self.driver.install_app("/Users/zhangjian/Downloads/app-debug.apk")
#启动app
#self.driver.launch_app()
#关闭app
#self.driver.close_app()
#self.driver.launch_app()
#启动activity
self.driver.start_activity("com.example.zhangjian.minibrowser2",
".myapplication.NewActivity")
time.sleep(3)
#截屏
self.driver.get_screenshot_as_file("test.png")
time.sleep(5)
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTest(MyTestCase('testMoreAPIs'))
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| [
"[email protected]"
] | |
56b73c378dc15cd1cb9b85ce3fbac27d7a98a8d2 | 5b2da553d6730c4a12dd79cc4b131ca5e9caa1e5 | /3.3. FindDistances.py | fff71a47ccb150ba3a09c0a9b149cf0c97ce7806 | [] | no_license | kyle-musser/pyQGIS | 5ddc3d9ba03326c57c80a0ed7db9d1dc08fd7a0e | 3da7a27e472cea89cde5b87ef35b0f0f1ce5a672 | refs/heads/main | 2023-03-14T04:48:23.679180 | 2021-03-02T20:53:21 | 2021-03-02T20:53:21 | 343,826,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | from geopy.distance import lonlat, geodesic
import pandas as pd
import os
# Set current directory of files
os.chdir(r"D:/cenBlock")
# Read in NCES directory 2018 CSV to Pandas DF (this data made in stata with .do file ("get_nces_dir.do")
cenBlks = pd.read_csv("points_AlbersXY.csv")
# Get list of unique states to loop through
states = cenBlks.state_abbrev.unique()
# loop through states to make distance matrix for each state one at a time.
for state in states:
print("Calculating Distance Matrix for State: ", state)
st_dat = cenBlks[cenBlks['state_abbrev'] == str(state)] # subset to one state (current state in the loop)
st_dat = st_dat[['GEOID10', 'lat', 'lon']] # only keep lat/lon and ID variables
# using merge to generate all possibilities between origin and destination
# this will make an (N^2 x 6) length matrix with all possible combinations of lat/lon in the state
df = pd.merge(st_dat.assign(key=0), st_dat.assign(key=0), suffixes=('', '_x'), on='key').drop('key', axis=1)
# Use geopy to calculate miles between all nces combinations in the dataframe we just made
df['Miles'] = df.apply(
(lambda row: geodesic(lonlat(row['lon'], row['lat']),
lonlat(row['lon_x'], row['lat_x'])).miles), axis=1)
# Now reshape the data to look like the distance matrix we want
df = df.groupby(['GEOID10', 'GEOID10_x'])['Miles'].max().unstack()
# Add in state variable to dataset as first variable
df.insert(0, 'state', [state] * len(df))
# Save each unique state distance matrix
flnm = "stateDistances/" + state + "distMatrix.csv"
df.to_csv(flnm)
| [
"[email protected]"
] | |
a6107f7c2a4bb3af826b9f687c7e29f7e8bb9073 | afd9c05005fba757f764c6bc8b102437b2901750 | /models/store.py | 126dd891e12ed31804a15c82d532a448d0657253 | [
"MIT"
] | permissive | LEMSantos/udemy-flask_rest_api_advanced | 083e29c82784ffa07f6b87dd0e8d9083f0df939e | c93f6643272154900f40b68f8afb33ff0e796d0a | refs/heads/main | 2023-04-23T10:14:04.132884 | 2021-05-15T15:30:10 | 2021-05-15T15:30:10 | 359,181,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | from typing import List
from db import db
class StoreModel(db.Model):
__tablename__ = 'stores'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False, unique=True)
items = db.relationship('ItemModel', lazy='dynamic')
@classmethod
def find_by_name(cls, name: str) -> 'StoreModel':
return cls.query.filter_by(name=name).first()
@classmethod
def find_all(cls) -> List['StoreModel']:
return cls.query.all()
def save_to_db(self) -> None:
db.session.add(self)
db.session.commit()
def delete_from_db(self) -> None:
db.session.delete(self)
db.session.commit()
| [
"[email protected]"
] | |
1244cffa917235f8cc158bfee7ded70f12f8a100 | 5b03cd12bd8ed9e3cc14cdba32417c6012d9ba58 | /Chap 1 WRITING YOUR OWN FUNCTIONS.py | 0c6182594504d64f1cd33241e93252d4d80d1daa | [] | no_license | thediaryofmos/Python-Data-Science-Toolbox-Part-1- | f6b4426486219c3ac868073bbaa44d6400ead03a | 15b622ce1764ceef34317cf81ca0e7b302e44118 | refs/heads/master | 2020-03-26T06:57:14.752097 | 2018-08-14T17:26:40 | 2018-08-14T17:26:40 | 144,629,408 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,299 | py | # Define the function shout
def shout():
"""Print a string with three exclamation marks"""
# Concatenate the strings: shout_word
shout_word='congratulations'+'!!!'
# Print shout_word
print(shout_word)
# Call shout
shout()
######
# Define shout with the parameter, word
def shout(word):
"""Print a string with three exclamation marks"""
# Concatenate the strings: shout_word
shout_word = word + '!!!'
# Print shout_word
print(shout_word)
# Call shout with the string 'congratulations'
shout('congratulations')
######
# Define shout with the parameter, word
def shout(word):
"""Return a string with three exclamation marks"""
# Concatenate the strings: shout_word
shout_word=word+'!!!'
# Replace print with return
return(shout_word)
# Pass 'congratulations' to shout: yell
yell=shout('congratulations')
# Print yell
print(yell)
######
# Define shout with parameters word1 and word2
def shout(word1, word2):
"""Concatenate strings with three exclamation marks"""
# Concatenate word1 with '!!!': shout1
shout1=word1+'!!!'
# Concatenate word2 with '!!!': shout2
shout2=word2+'!!!'
# Concatenate shout1 with shout2: new_shout
new_shout=shout1+shout2
# Return new_shout
return new_shout
# Pass 'congratulations' and 'you' to shout(): yell
yell=shout('congratulations','you')
# Print yell
print(yell)
######
# Define shout_all with parameters word1 and word2
def shout_all(word1, word2):
# Concatenate word1 with '!!!': shout1
shout1=word1+'!!!'
# Concatenate word2 with '!!!': shout2
shout2=word2+'!!!'
# Construct a tuple with shout1 and shout2: shout_words
shout_words=(shout1,shout2)
# Return shout_words
return shout_words
# Pass 'congratulations' and 'you' to shout_all(): yell1, yell2
yell1,yell2=shout_all('congratulations','you')
# Print yell1 and yell2
print(yell1)
print(yell2)
######
# Import pandas
import pandas as pd
# Import Twitter data as DataFrame: df
df = pd.read_csv('tweets.csv')
# Initialize an empty dictionary: langs_count
langs_count = {}
# Extract column from DataFrame: col
col = df['lang']
# Iterate over lang column in DataFrame
for entry in col:
# If the language is in langs_count, add 1
if entry in langs_count.keys():
langs_count[entry]+=1
# Else add the language to langs_count, set the value to 1
else:
langs_count[entry]=1
# Print the populated dictionary
print(langs_count)
######
# Define count_entries()
def count_entries(df, col_name):
"""Return a dictionary with counts of
occurrences as value for each key."""
# Initialize an empty dictionary: langs_count
langs_count = {}
# Extract column from DataFrame: col
col = df[col_name]
# Iterate over lang column in DataFrame
for entry in col:
# If the language is in langs_count, add 1
if entry in langs_count.keys():
langs_count[entry]+=1
# Else add the language to langs_count, set the value to 1
else:
langs_count[entry]=1
# Return the langs_count dictionary
return(langs_count)
# Call count_entries(): result
result=count_entries(tweets_df,'lang')
# Print the result
print(result)
######
| [
"[email protected]"
] | |
527edb431dd1457f28ca1352dd2e56a613093674 | 43e8af55bd953a3e435810574f0d4db8a00bcd82 | /nut/script/brand.py | 48321e0aea25cc7b349bf5cad31bf840e6c63ae8 | [] | no_license | bopopescu/nut | 3818cc66e543870180dae943944ef026f191a385 | 39c58afe7fa7be185b1a3ac64e8c295d16601bd6 | refs/heads/master | 2022-11-18T05:28:14.209311 | 2017-09-29T10:32:54 | 2017-09-29T10:32:54 | 282,010,209 | 0 | 0 | null | 2020-07-23T17:05:57 | 2020-07-23T17:05:56 | null | UTF-8 | Python | false | false | 660 | py | import os, sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(BASE_DIR)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings.production'
from apps.core.models import Entity, Brand
brands = Brand.objects.filter(icon__isnull=False).order_by('name')
for row in brands:
print row.name
# brands = Entity.objects.raw("select id, brand, count(*) from core_entity where brand !='' and status >= 0 group by brand")
# for row in brands:
# print row.brand.strip()
# b = Brand()
# b.name = row.brand.strip()
# try:
# b.save()
# except Exception, e:
# print e.message
# pass
__author__ = 'edison'
| [
"[email protected]"
] | |
8afc4b175d6daf29d8d76024f3fed50f27bd6e88 | f4fee13ab9d3c474b404ded89459c0e198ccb3d9 | /spotinst_sdk/test/test_deployment_action.py | 6d0e24b769ada670d02215aa24799f7e1d896f9a | [
"MIT"
] | permissive | nironkoren/spotinst-sdk-python | 6f195332f8d0fcaf47e7fbf9ae24a0e1d58c4a1b | f7ddca5a2510b9601a83e9d8f0befb8518cfe3c2 | refs/heads/master | 2021-01-13T21:02:08.739229 | 2020-03-09T10:23:29 | 2020-03-09T10:23:29 | 242,493,287 | 0 | 0 | MIT | 2020-02-23T09:58:01 | 2020-02-23T09:58:00 | null | UTF-8 | Python | false | false | 1,556 | py | import os
import unittest
from spotinst_sdk import SpotinstClient
from spotinst_sdk.spotinst_deployment_action import *
class AwsElastigroupTestCase(unittest.TestCase):
def setUp(self):
self.client = SpotinstClient(
auth_token='dummy-token',
account_id='dummy-account')
self.mock_group_json = self.load_group_json()
def create_formatted_deployment_request(self, deployment):
group_request = DeploymentActionRequest(deployment)
excluded_group_dict = self.client.exclude_missing(
json.loads(group_request.toJSON()))
formatted_group_dict = self.client.convert_json(
excluded_group_dict, self.client.underscore_to_camel)
return formatted_group_dict
@staticmethod
def load_group_json():
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test_lib/input/deployment_action.json')) as group_json:
return json.load(group_json)
# region B/G Deployment
class AwsElastigroupTestDeploymentAction(AwsElastigroupTestCase):
def runTest(self):
da = DeploymentAction(
action_type="DETACH_NEW",
should_handle_all_batches=True,
draining_timeout=60,
should_decrement_target_capacity=False
)
formatted_group_dict = self.create_formatted_deployment_request(da)
actual_request_json = formatted_group_dict
expected_request_json = self.mock_group_json
self.assertDictEqual(actual_request_json, expected_request_json)
| [
"[email protected]"
] | |
62f1bec1bce5d85beddd91380d21928a6282091f | 96d20f2e1f74f5fbc55832a0cb9cb1d8fd3337f6 | /21-occurence_of_a.py | efb5984aab4c4fa2aef6caae0dddb4e2a7d3f7f4 | [] | no_license | Anupama-Regi/python_lab | e2e5e38fd9233493f7a9d25ab5e8a5eb59e29689 | 3f79776a02f5565a3e908bfcfa56e4a2800bff47 | refs/heads/main | 2023-03-03T09:03:48.055327 | 2021-02-16T17:52:48 | 2021-02-16T17:52:48 | 314,258,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | print("*Program that store a list of first names.Count the occurences of 'a' within the list.*")
s=input("Enter list of names : ")
l=list(s.split())
print("List of names : ",l)
c=0
#for i in range(s):
# n=input("Enter the name : ")
# l.append(n)
for i in l:
for j in i:
if j in "Aa":
c=c+1
print("Occurrences of 'a' in list : ",c)
#x=s.count('a')
#print(x) | [
"[email protected]"
] | |
1cad745cb3fc25575b6ba2e453d2efb213f69eaa | 96d7fc7b53c881da09b460e4a6e6ad4faabe8fe7 | /spiking_model.py | 759b1c2c76a663d7c51531c7ed660e3d85f7ee65 | [] | no_license | zcqsata/depression-model-tdcs | cb43759264ab5b167e62eb4d36ac313b2fbf8c42 | cb8da08e2b29667aa71c34ae5b45fdbecbf23521 | refs/heads/master | 2022-12-08T04:41:43.837298 | 2020-09-01T18:34:38 | 2020-09-01T18:34:38 | 292,067,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,916 | py | ############################################################################################
#
# Simulation code for "A computational model of Major Depression: The role of glutamate
# dysfunction on cingulo-frontal network dynamics"
# Ramirez-Mahaluf J.P., Roxin A., Mayberg H.S. and Compte A. Cerebral Cortex, 2015
#
############################################################################################
# This Python code requires the installation of Brian www.briansimulator.org
from brian import *
from numpy.fft import rfft,irfft
from scipy.io import savemat
import numpy
#Network parameters
NE=80 # Excitatory neurons, for fast simulations in the article we use 80
NI=20 # Inhibitory neurons, for fast simulations in the article we use 20
#Biophysical parameters
tauav = 2*ms # tau AMPA decay on vACC, this parameter was used to simulate MDD. Mild MDD (2.5%) = 2.05; Moderate MDD (5%) = 2.1; Severe MDD (7.5%) = 2.15
tauad = 2*ms # tau AMPA decay on dlPFC
taun = 100*ms # tau NMDA decay
taux = 2*ms # tau NMDA rise
taug = 10*ms # tau GABA decay
Vt =-50*mvolt # spike threshold
Vr =-55*mvolt # reset value
Elv =-70*mvolt # resting potential ventral ACC, this parameter was used to simulate SSRI treatment.
El =-70*mvolt # resting potential dlPFC
Ven = 16.129*mV
refE= 2*ms # refractory periods piramidal cell
refI= 1*ms # refractory period inhibitory cell
cmE= 500*pF #capacitance piramidal cel
cmI= 200*pF #capacitance interneuron
tauE =20*ms #tau piramidal cel
tauI =10*ms #tau interneuron
alpha =0.5*kHz
S=1 #Connectivity sparsensess; S=1, all-to-all connectivity was used in the article; use S<1 for sparse random connectivity
N=100/NE # Factor for rescaling the weights according to the number of neurons
tacs_start_time = 2000*ms
tacs_end_time = 3000*ms
#Connection parameters
wgEEN = 0.001761*(1/S)*N #weight excitatory to excitatory through NMDA
wgEEA = 0.0009454*(1/S)*N #weight excitatory to excitatory through AMPA
wgEIN = 0.0012*(1/S)*N #weight excitatory to inhibitory through NMDA
wgEIA = 0.0004*(1/S)*N #weight excitatory to inhibitory through AMPA
wgIEG = 0.005*(1/S)*N #weight inhibitory to excitatory through GABA
wgIIG = 0.004865*(1/S)*N #weight inhibitory to inhibitory through GABA
wgEIA1 = 0.0004*(1/S)*N #weight vACC excitatory to dlPFC inhibitory through NMDA
wgEIA2 = 0.0004*(1/S)*N #weight dlPFC excitatory to vACC excitatory through NMDA
#equations excitatory cell vACC
eqsE1 = '''
dV/dt = (-gea*V-gen*V/(1+exp(-V/Ven)/3.57)-gi*(V+70*mV)-(V-Elv))/(tauE) + I/cmE : volt
dgea/dt = -gea/(tauav) : 1
dgi/dt = -gi/(taug) : 1
dspre/dt = -spre/(taun)+alpha*xpre*(1-spre) : 1
dxpre/dt = -xpre/(taux) : 1
gen: 1
I: amp
'''
#equations inhibitory cell vACC
eqsI1 = '''
dV/dt = (-gea*V-gen*V/(1+exp(-V/Ven)/3.57)-gi*(V+70*mV)-(V-El))/(tauI) + I/cmI : volt
dgea/dt = -gea/(tauav) : 1
dgi/dt = -gi/(taug) : 1
dspre/dt = -spre/(taun)+alpha*xpre*(1-spre) : 1
dxpre/dt = -xpre/(taux) : 1
gen: 1
I: amp
'''
#equations excitatory cell dlPFC
eqsE2 = '''
dV/dt = (-gea*V-gen*V/(1+exp(-V/Ven)/3.57)-gi*(V+70*mV)-(V-El))/(tauE) + I/cmE : volt
dgea/dt = -gea/(tauad) : 1
dgi/dt = -gi/(taug) : 1
dspre/dt = -spre/(taun)+alpha*xpre*(1-spre) : 1
dxpre/dt = -xpre/(taux) : 1
gen: 1
I: amp
'''
#equations inhibitory cell dlPFC
eqsI2 = '''
dV/dt = (-gea*V-gen*V/(1+exp(-V/Ven)/3.57)-gi*(V+70*mV)-(V-El))/(tauI) + I/cmI : volt
dgea/dt = -gea/(tauad) : 1
dgi/dt = -gi/(taug) : 1
dspre/dt = -spre/(taun)+alpha*xpre*(1-spre) : 1
dxpre/dt = -xpre/(taux) : 1
gen: 1
I: amp
'''
#Populations of neurons:
Pev = NeuronGroup(NE, model= eqsE1, threshold=Vt, reset= Vr, refractory=refE) #vACC excitatory neurons
Piv = NeuronGroup(NI, model= eqsI1, threshold=Vt, reset= Vr, refractory=refI) #vACC inhibitory neurons
Ped = NeuronGroup(NE, model= eqsE2, threshold=Vt, reset= Vr, refractory=refE) #dlPFC excitatory neurons
Pid = NeuronGroup(NI, model= eqsI2, threshold=Vt, reset= Vr, refractory=refI) #dlPFC inhibitory neurons
#Connection NMDA:
selfnmda_v = IdentityConnection(Pev, Pev, 'xpre', weight=1.0) #NMDA connections, excitatory to excitatory neurons in vACC
selfnmda_d = IdentityConnection(Ped, Ped, 'xpre', weight=1.0) #NMDA connections, excitatory to excitatory neurons in dlPF
#Connections AMPA and GABA:
Ceeav = Connection(Pev, Pev, 'gea', structure='dense') #AMPA connections, excitatory to excitatory neurons in vACC
Ceiav = Connection(Pev, Piv, 'gea', structure='dense') #AMPA connections, excitatory to inhibitory neurons in vACC
Ciev = Connection(Piv, Pev, 'gi', structure='dense') # GABA connections, inhibitory to excitatory neurons in vACC
Ciiv = Connection(Piv, Piv, 'gi', structure='dense') # GABA connections, excitatory to excitatory neurons in vACC
Ceead = Connection(Ped, Ped, 'gea', structure='dense')#AMPA connections, excitatory to excitatory neurons in dlPFC
Ceiad = Connection(Ped, Pid, 'gea', structure='dense') #AMPA connections, excitatory to inhibitory neurons in dlPFC
Cied = Connection(Pid, Ped, 'gi', structure='dense')# GABA connections, inhibitory to excitatory neurons in dlPFC
Ciid = Connection(Pid, Pid, 'gi', structure='dense')# GABA connections, excitatory to excitatory neurons in dlPFC
Ceiav1 = Connection(Pev, Pid, 'gea' )#AMPA connections, excitatory neurons in vACC target inhibitory neurons in dlPFC
Ceiad1 = Connection(Ped, Piv, 'gea' )#AMPA connections excitatory neurons in dlPFC target inhibitory neurons in vACC
Ceeav.connect_random(Pev, Pev, S, weight=wgEEA) #AMPA connections, excitatory to excitatory neurons in vACC
Ceiav.connect_random(Pev, Piv, S, weight=wgEIA) #AMPA connections, excitatory to inhibitory neurons in vACC
Ciev.connect_random(Piv, Pev, S, weight=wgIEG) # GABA connections, inhibitory to excitatory neurons in vACC
Ciiv.connect_random(Piv, Piv, S, weight=wgIEG) # GABA connections, excitatory to excitatory neurons in vACC
Ceead.connect_random(Ped, Ped, S, weight=wgEEA) #AMPA connections, excitatory to excitatory neurons in dlPFC
Ceiad.connect_random(Ped, Pid, S, weight=wgEIA) #AMPA connections, excitatory to inhibitory neurons in dlPFC
Cied.connect_random(Pid, Ped, S, weight=wgIEG) # GABA connections, inhibitory to excitatory neurons in dlPFC
Ciid.connect_random(Pid, Pid, S,weight=wgIIG) # GABA connections, excitatory to excitatory neurons in dlPFC
Ceiav1.connect_random(Pev, Pid, S, weight=wgEIA1) #AMPA connections, excitatory neurons in vACC target inhibitory neurons in dlPFC
Ceiad1.connect_random(Ped, Piv, S, weight=wgEIA2) #AMPA connections excitatory neurons in dlPFC target inhibitory neurons in vACC
#NMDA synapses
E_nmda_v = asarray(Pev.spre)
E_nmda_d = asarray(Ped.spre)
E_gen_v = asarray(Pev.gen)
E_gen_d = asarray(Ped.gen)
I_gen_v = asarray(Piv.gen)
I_gen_d = asarray(Pid.gen)
#Calculate NMDA contributions
@network_operation(when='start')
def update_nmda():
E_gen_v[:] = wgEEN/wgEEA * numpy.dot(E_nmda_v,Ceeav.W)
I_gen_v[:] = wgEIN/wgEIA * numpy.dot(E_nmda_v,Ceiav.W)
E_gen_d[:] = wgEEN/wgEEA * numpy.dot(E_nmda_d,Ceead.W)
I_gen_d[:] = wgEIN/wgEIA * numpy.dot(E_nmda_d,Ceiad.W)
#@network_operation(when='start')
def inject_current():
if (defaultclock.t>tacs_start_time)&(defaultclock.t <tacs_end_time):
Pev.I = 0.00000000000*amp
Piv.I = 0.000000000000*amp
Ped.I = 0.00000000000*amp
Pid.I = 0.000000000000*amp
#External noise:
extinput1E=PoissonGroup(NE,rates=1800*Hz)
extinput1I=PoissonGroup(NI,rates=1800*Hz)
input1_coE=IdentityConnection(extinput1E,Pev,'gea',weight=0.082708)
input1_coI=IdentityConnection(extinput1I,Piv,'gea',weight=0.081)
extinput2E=PoissonGroup(NE,rates=1800*Hz)
extinput2I=PoissonGroup(NI,rates=1800*Hz)
input2_coE=IdentityConnection(extinput2E,Ped,'gea',weight=0.082708)
input2_coI=IdentityConnection(extinput2I,Pid,'gea',weight=0.081)
#Sadnnes task, emotional signal to vACC
exttaskinput1_on=4500*ms
exttaskinput1_off=5525*ms
exttaskinput1E=PoissonGroup(100,rates=lambda t: (t>exttaskinput1_on)*(t<exttaskinput1_off)*800*Hz)
#taskinput1_coE=IdentityConnection(exttaskinput1E,Pev,'gea',weight=0.0955)
#exttaskinput2_on=50000*ms
#exttaskinput2_off=50250*ms
#exttaskinput2E=PoissonGroup(100,rates=lambda t: (t>exttaskinput2_on)*(t<exttaskinput2_off)*800*Hz)
#taskinput2_coE=IdentityConnection(exttaskinput2E,Pev,'gea',weight=0.0955)
#exttaskinput3_on=55000*ms
#exttaskinput3_off=55250*ms
#exttaskinput3E=PoissonGroup(80,rates=lambda t: (t>exttaskinput3_on)*(t<exttaskinput3_off)*800*Hz)
#taskinput3_coE=IdentityConnection(exttaskinput3E,Pev,'gea',weight=0.0955)
#Working memory task, cognitive signal to dlPFC
exttaskinput4_on=6000*ms
exttaskinput4_off=6525*ms
exttaskinput4E=PoissonGroup(100,rates=lambda t: (t>exttaskinput4_on)*(t<exttaskinput4_off)*800*Hz)
#taskinput4_coE=IdentityConnection(exttaskinput4E,Ped,'gea',weight=0.0955)
#exttaskinput5_on=65000*ms
#exttaskinput5_off=65250*ms
#exttaskinput5E=PoissonGroup(80,rates=lambda t: (t>exttaskinput5_on)*(t<exttaskinput5_off)*800*Hz)
#taskinput5_coE=IdentityConnection(exttaskinput5E,Ped,'gea',weight=0.0955)
#exttaskinput6_on=70000*ms
#exttaskinput6_off=70250*ms
#exttaskinput6E=PoissonGroup(80,rates=lambda t: (t>exttaskinput6_on)*(t<exttaskinput6_off)*800*Hz)
#taskinput6_coE=IdentityConnection(exttaskinput6E,Ped,'gea',weight=0.0955)
#Deep Brain Stimulation (DBS):
#extinput3I=SpikeGeneratorGroup(1,c_[zeros(2597),linspace(0*ms,19996.9*ms,2597)])
#input3_coI=Connection(extinput3I,Piv,'gea',weight=0.03)
#Save files
Miv = SpikeMonitor(Piv)
Mev = SpikeMonitor(Pev)
Mid = SpikeMonitor(Pid)
Med = SpikeMonitor(Ped)
Mv=PopulationRateMonitor(Pev,bin=0.1*second)
Md=PopulationRateMonitor(Ped,bin=0.1*second)
Mvm=PopulationRateMonitor(Pev,bin=0.5*second)
Mdm=PopulationRateMonitor(Ped,bin=0.5*second)
spikes_Ev = FileSpikeMonitor(Pev,'spikes_E_vACC.dat',record=True)
spikes_Ed = FileSpikeMonitor(Ped,'spikes_E_dlPFC.dat',record=True)
spikes_Iv = FileSpikeMonitor(Piv,'spikes_I_vACC.dat',record=True)
spikes_Id = FileSpikeMonitor(Pid,'spikes_I_dlPFC.dat',record=True)
#run
run(20*second)
#plot
subplot(2,2,1)
raster_plot(Mev, title=' vACC')
subplot(2,2,2)
plot(Mv.times,Mv.rate,Mvm.times,Mvm.rate,'ro:')
title('vACC')
ylabel('firing rate (Hz)')
xlabel('time (s)')
subplot(2,2,3)
raster_plot(Med, title='dlPFC')
subplot(2,2,4)
plot(Md.times,Md.rate,Mdm.times,Mdm.rate,'ro:')
title('dlPFC')
ylabel('firing rate (Hz)')
xlabel('time (s)')
show()
spikes_Ev.close_file()
spikes_Ed.close_file()
spikes_Iv.close_file()
spikes_Id.close_file()
| [
"[email protected]"
] | |
a785fa75ca5f54312b34d6bd5aac24470cbef85a | ea9d5e38d55d7e69bcb4ae74bb3dfd3028fba4d3 | /open/Alibaba/scripts/restore_full_accuracy_logs.py | 4f38d42ed16346c08f49ba912ba62b8fd81f07fb | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ltechkorea/inference_results_v1.0 | cbe29d0f32c525b74525c1c215bf66d8385f3fd0 | 48e24f151f2625a579d34f0a721ad3698d173dbb | refs/heads/ltech | 2023-07-04T06:38:49.691181 | 2021-08-09T23:40:48 | 2021-08-10T01:33:22 | 387,295,024 | 0 | 0 | NOASSERTION | 2021-07-31T00:54:06 | 2021-07-19T00:20:19 | C++ | UTF-8 | Python | false | false | 8,293 | py | #! /usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.getcwd())
import re
import subprocess
import shutil
import glob
import argparse
import datetime
import json
from scripts.utils import Tree, SortingCriteria, get_system_type
def run_command(cmd, get_output=False, tee=True, custom_env=None):
"""
Runs a command.
Args:
cmd (str): The command to run.
get_output (bool): If true, run_command will return the stdout output. Default: False.
tee (bool): If true, captures output (if get_output is true) as well as prints output to stdout. Otherwise, does
not print to stdout.
"""
print("Running command: {:}".format(cmd))
if not get_output:
return subprocess.check_call(cmd, shell=True)
else:
output = []
if custom_env is not None:
print("Overriding Environment")
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, env=custom_env)
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
for line in iter(p.stdout.readline, b""):
line = line.decode("utf-8")
if tee:
sys.stdout.write(line)
sys.stdout.flush()
output.append(line.rstrip("\n"))
ret = p.wait()
if ret == 0:
return output
else:
raise subprocess.CalledProcessError(ret, cmd)
nightly_system_list = [
"A100", "A100-PCIe", "A100-PCIex2", "A100_MIG_1x1g_5gb", "A100x8", "T4", "T4x8", "T4x20", "Xavier", "XavierNX"
]
def download_artifact(username, api_key, artifacts_dir, artifact_id):
print("Checking artifact {:}...".format(artifact_id))
# Check if it's pushed by nightly.
matches = re.match(r"({:})-[\w-]+-\d+-[\w-]+".format("|".join(nightly_system_list)), artifact_id)
is_L1 = matches is not None
if is_L1:
new_path = os.path.join(artifacts_dir, artifact_id + ".gz")
remote_path = "L1/{:}/{:}".format(matches.group(1), artifact_id)
else:
old_path = os.path.join(artifacts_dir, "full-results_" + artifact_id + ".gz")
new_path = os.path.join(artifacts_dir, artifact_id + ".gz")
remote_path = "full_result_logs/full-results_" + artifact_id
if os.path.exists(new_path):
print("File {:} already exists.".format(new_path))
return new_path
print("Downloading artifact {:}...".format(artifact_id))
command_fmt = "cd {:} && curl -u{:}:{:} -O \"https://urm.nvidia.com/artifactory/sw-mlpinf-generic/{:}.gz\""
command = command_fmt.format(artifacts_dir, username, api_key, remote_path)
run_command(command)
if not is_L1:
# Strip the 'full-results_' prefix
shutil.move(old_path, new_path)
return new_path
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--artifactory_username",
help="Username for Artifactory",
)
parser.add_argument(
"--artifactory_api_key",
help="API key for Artifactory",
)
parser.add_argument(
"--artifacts_dir",
help="Path to directory that stores the artifacts",
default="build/artifacts"
)
parser.add_argument(
"--result_ids",
help="Comma separated list of unique result IDs",
default=None
)
parser.add_argument(
"--metadata_file",
help="File that stores metadata about these results",
default="results_metadata.json"
)
parser.add_argument(
"--division",
help="Division: open/closed",
choices=["open", "closed"],
default="closed"
)
parser.add_argument(
"--systems",
help="Comma separated list of system IDs",
default="*"
)
parser.add_argument(
"--benchmarks",
help="Comma separated list of benchmarks. Use official names (i.e. dlrm-99.9 instead of dlrm)",
default="*"
)
parser.add_argument(
"--scenarios",
help="Comma separated list of scenarios.",
default="*"
)
parser.add_argument(
"--test_ids",
help="Comma separated list of test ids (i.e. TEST01,TEST04-B).",
default="TEST01"
)
return parser.parse_args()
def main():
args = get_args()
metadata = None
if os.path.exists(args.metadata_file):
with open(args.metadata_file) as f:
metadata = json.load(f)
metadata = Tree(starting_val=metadata)
artifact_ids = set() if args.result_ids is None else set(args.result_ids.split(","))
test_ids = [i for i in args.test_ids.split(",") if len(i) > 0]
# Populate the set of all artifacts we should get
for system in args.systems.split(","):
for benchmark in args.benchmarks.split(","):
for scenario in args.scenarios.split(","):
res_id = metadata.get([system, benchmark, scenario, "accuracy", "result_id"], default=None)
if res_id is not None:
artifact_ids.add(res_id)
for test_id in test_ids:
res_id = metadata.get([system, benchmark, scenario, "compliance", test_id, "accuracy", "result_id"], default=None)
if res_id is not None:
artifact_ids.add(res_id)
# Download all
for artifact_id in artifact_ids:
download_artifact(args.artifactory_username, args.artifactory_api_key, args.artifacts_dir, artifact_id)
# Prepare to extract logs into build/full_results
extract_map = {}
for system in args.systems.split(","):
for benchmark in args.benchmarks.split(","):
for scenario in args.scenarios.split(","):
res_id = metadata.get([system, benchmark, scenario, "accuracy", "result_id"], default=None)
if res_id is None:
continue
tarball_path = os.path.join(args.artifacts_dir, res_id + ".gz")
archive_path = "build/full_results/results/{:}/{:}/{:}/accuracy/mlperf_log_accuracy.json ".format(
system,
benchmark,
scenario
)
if tarball_path in extract_map:
extract_map[tarball_path] += archive_path
else:
extract_map[tarball_path] = archive_path
for test_id in test_ids:
res_id = metadata.get([system, benchmark, scenario, "compliance", test_id, "accuracy", "result_id"], default=None)
if res_id is not None:
archive_path = "build/full_results/compliance/{:}/{:}/{:}/{:}/accuracy/mlperf_log_accuracy.json ".format(
system,
benchmark,
scenario,
test_id
)
extract_map[tarball_path] += archive_path
# Actually extract the files
for tarball_path in extract_map:
archive_paths = extract_map[tarball_path]
print("Extracting files {:} from tarball {:}...".format(archive_paths, tarball_path))
cmd = "tar -xvzf {:} {:}".format(tarball_path, archive_paths)
run_command(cmd)
# Move the files to results/ and compliance/
glob_to_logs = os.path.join("build/full_results", "**", "mlperf_log_accuracy.json")
all_logs = glob.glob(glob_to_logs, recursive=True)
for log in all_logs:
dst = log.replace("build/full_results/", "")
print("Moving {:} -> {:}".format(log, dst))
shutil.move(log, dst)
print("Done!")
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
131bd775fbf3660157f4776eed1ec7adfd17b2bf | 4bb2dd97c277feaa7da5fbe08d3cdf979fef27d2 | /Handwritten digit recognition/Handwritten_digit_recognition.py | 406487547b10e3f08baf66c9273b0b4ea142b525 | [] | no_license | mtraino/Projects | 5aec66678aea70701646fa631fa1de7e7fba09f9 | acd4e52c7cf3f353cc749f2f48c83bbe8209bb46 | refs/heads/master | 2021-06-21T00:11:01.867485 | 2021-01-27T16:19:10 | 2021-01-27T16:19:10 | 168,620,227 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,344 | py | """
Matthew Traino
Handwritten digit recognition
Use a neural network for simple hand written number classification.
Python 3.6.7
"""
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
import ipdb
# Loading the data
(training_data, training_labels), (testing_data, testing_labels) = mnist.load_data()
# Setting the seed
np.random.seed(3520)
# Setting variables
num_classes = 10
batch_size = 100
epochs = 100
num_inputs = training_data.shape
learning_rate = 0.02
# Converting the labels from 0-9 to their binary form
training_labels = keras.utils.to_categorical(training_labels)
testing_labels_binary = keras.utils.to_categorical(testing_labels)
# Reshaping the data
training_data.shape = (60000, 784)
testing_data.shape = (10000, 784)
# Creating the neural network
# Adding the layers in a 784-300-10 model with relu for the layer's activations and softmax for the output
model = Sequential()
model.add(Dense(units=300, activation='relu', input_dim=784))
model.add(Dense(units=300, activation='relu'))
model.add(Dense(units=num_classes, activation='softmax'))
model.summary()
sgd = keras.optimizers.SGD(lr=learning_rate)
model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy'])
# Training the model
training = model.fit(training_data, training_labels,
batch_size=batch_size,
epochs=epochs,
verbose=2,
validation_data=(testing_data, testing_labels_binary))
score = model.evaluate(training_data, training_labels, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# Plotting accuracy by epochs
Y = np.arange(0, 1, .05) # array of all the numbers between 0 and 1 with .05 increments
X = np.arange(0, epochs, 9) # array of all the numbers between 0 and the number of epochs run incrementing by 10s
plt.title('Accuracy vs Epochs')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.yticks(Y)
plt.xticks(X)
plt.plot(training.history['acc'])
plt.show()
pred = model.predict(testing_data)
pred = np.argmax(pred, axis=1)
b = confusion_matrix(testing_labels, pred)
print("Confusion matrix")
print(b)
#ipdb.set_trace()
| [
"[email protected]"
] | |
231e4d07e29b17f02396788300517879e737412d | 8da5e4898e3371da60f9b59683709ff9fe7d2426 | /PostProcesing.py | 41c580d8bcdf487d70e8da6e0090807f5cafa7a0 | [] | no_license | deepakkumar18035/ScriptToAnimation | 29664028b690c84814da75804ac01d3f29ef79b5 | ea81be4837ce21b47742d46e45cf6a3d783984ac | refs/heads/main | 2023-04-18T05:29:27.869645 | 2021-05-05T03:29:54 | 2021-05-05T03:29:54 | 362,127,932 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | import moviepy.video.io.ImageSequenceClip
import moviepy.editor as mpe
import sys
import os
destinFolder = "VideoOut/"
def CleanDestinFolder():
for x in os.listdir(destinFolder):
os.remove(destinFolder + x)
return
def combine_audio(vidname, audname, outname, fps=24):
my_clip = mpe.VideoFileClip(vidname)
audio_background = mpe.AudioFileClip(audname)
final_clip = my_clip.set_audio(audio_background)
final_clip.write_videofile(outname,fps=fps)
text = sys.argv[1].split(".")[0]
fps=24
audname = "Audio/"+text+".wav"
out = "Final/Out.mp4"
image_files = [destinFolder+'/'+img for img in os.listdir(destinFolder) if img.endswith(".png")]
clip = moviepy.video.io.ImageSequenceClip.ImageSequenceClip(image_files, fps=fps)
if text+'.wav' in os.listdir("Audio/"):
audio_background = mpe.AudioFileClip(audname)
final_clip = clip.set_audio(audio_background)
final_clip.write_videofile(out)
else:
print("No audio generated.")
clip.write_videofile(out)
print("Video generated sucessfully....")
"""
CleanDestinFolder()""" | [
"[email protected]"
] | |
1757b4ada40fc740822f3b724ac68b278d18e210 | 400797adf8ff2a1e72e7d9782e114c1ce29fcf1b | /guoanan/ybsmt/manage.py | 36314349be59a9bf735c1ec682cbf34872ee8cfa | [] | no_license | guoanan2013/guoanan | d42106fc497bb19dc64549f01825b319e24f219c | fa79cea662f685aadaa2ae81d89c6743f04b6beb | refs/heads/master | 2016-09-05T12:47:07.268876 | 2014-04-19T08:01:02 | 2014-04-19T08:01:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ManagementTool.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
4861fd1d218577378ff34c1e71ad307195501e93 | d0ba48b0f15020009d8ccdc0cac913a2f310734d | /Triplet_DataLoader.py | 7928acb3a90914f36250f9915487758c22124d33 | [] | no_license | hangxiu/MetricEmbeddingNet | 4bf1c61ba9d1dfd167621f9b4d4230f527b46f06 | e9ca2efc41a88bb87378635ac62e9e0a1b26cbaa | refs/heads/master | 2022-12-20T05:57:21.797528 | 2020-09-12T17:22:50 | 2020-09-12T17:22:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,714 | py | import torch.utils.data.dataloader as dataloader
import os
import torch
import torchaudio
import numpy as np
from random import sample as Sampler
from random import shuffle as shuffle
import matplotlib.pyplot as plt
import numpy as np
import math
import time
PATH = "/home/lucas/PycharmProjects/Papers_with_code/data/AMI/triplet_splits"
class TripletLoader(torch.utils.data.Dataset):
def __init__(self,base_path, anchor_positive_pairs,sample_list,train=True):
self.base_path = base_path
self.anchor_positive_pairs = []
self.sample_list = []
self.train = train
for line in open(os.path.join(self.base_path, sample_list)):
self.sample_list.append((line.split()[0], line.split()[1]))
for line in open(os.path.join(self.base_path,anchor_positive_pairs)):
negative = Sampler([sample[0] for sample in self.sample_list if sample[1] != line.split()[2]],1)
self.anchor_positive_pairs.append((line.split()[0],line.split()[1], negative[0]))
#print(line.split()[0], line.split()[1], negative[0])
shuffle(self.anchor_positive_pairs)
if self.train:
self.anchor_positive_pairs = self.anchor_positive_pairs[0:int(0.8*len(self.anchor_positive_pairs))]
else:
self.anchor_positive_pairs = self.anchor_positive_pairs[int(0.8*len(self.anchor_positive_pairs)):]
def __getitem__(self,index):
anchor, positive, negative = str(self.anchor_positive_pairs[index][0]), str(self.anchor_positive_pairs[index][1]), str(self.anchor_positive_pairs[index][2])
anchor, sample_rate = torchaudio.load(os.path.join(self.base_path,anchor))
positive, sample_rate = torchaudio.load(os.path.join(self.base_path,positive))
negative, sample_rate = torchaudio.load(os.path.join(self.base_path,negative))
anchor_specgram = torchaudio.transforms.Spectrogram(normalized=True, power=1, n_fft=400, hop_length=100)(anchor)
positive_specgram = torchaudio.transforms.Spectrogram(normalized=True, power=1, n_fft=400, hop_length=100)(positive)
#negatives = [sample[0] for sample in self.sample_list if sample[1] != label]
#random_state = np.random.RandomState(29)
#negative = Sampler(negatives,1)
#negative, sample_rate = torchaudio.load(os.path.join(self.base_path,negative[0]))
negative_specgram = torchaudio.transforms.Spectrogram(normalized=True, power=1, n_fft=400, hop_length=100)(negative)
return anchor_specgram,positive_specgram,negative_specgram
def __len__(self):
return len(self.anchor_positive_pairs)
class Selective_Loader(torch.utils.data.Dataset):
def __init__(self, base_path, sample_list, label, train=True, negative=True):
self.base_path = base_path
self.sample_list = sample_list
self.train = train
self.label = label
self.samples = []
self.negative = negative
for line in open(os.path.join(self.base_path, sample_list)):
self.samples.append((line.split()[0], line.split()[1]))
if self.negative:
self.samples = [sample[0] for sample in self.samples if sample[1] != self.label]
print(self.samples)
shuffle(self.samples)
else:
self.samples = [sample[0] for sample in self.samples if sample[1] == self.label]
print(self.samples)
shuffle(self.samples)
def __getitem__(self, index):
sample_name = str(self.samples[index])
track , sample_rate = torchaudio.load(os.path.join(self.base_path, sample_name))
spectrogram = torchaudio.transforms.Spectrogram(normalized=True, power=1, n_fft=400, hop_length=100)(track)
return spectrogram
#test_loader = Negative_Loader(base_path="/home/lucas/PycharmProjects/Papers_with_code/data/AMI/triplet_splits", sample_list="sample_list.txt",label='5', train=True, negative=False)
class Frame_Loader:
def __init__(self,path, frame_list):
self.path = path
self.frame_list = frame_list
self.track, self.sample_rate = torchaudio.load(self.path)
def __getitem__(self, index):
start_sample = self.frame_list[index][0]*self.sample_rate
end_sample = self.frame_list[index][1]*self.sample_rate
spectrogram = torchaudio.transforms.Spectrogram(normalized=True, power=1, n_fft=400, hop_length=100)(self.track)
return spectrogram
def __len__(self):
return len(self.frame_list)
class Triplet_Time_Loader:
def __init__(self, path, spectrogram=True, train=True):
self.path = path
self.as_spectrogram = spectrogram
self.samples = []
self.samples = [(line.split()[0], line.split()[1], line.split()[2], line.split()[3], line.split()[4]) for line in open(self.path)]
#shuffle(self.samples)
if train:
self.samples = self.samples[0:int(0.8 * len(self.samples))]
print("TRAIN LENGTH", len(self.samples))
else:
self.samples = self.samples[int(0.8 * len(self.samples)):]
print("TEST LENGTH", len(self.samples))
def __getitem__(self, index):
sample, string_label, int_label, start_time, stop_time = self.samples[index][0], self.samples[index][1], int(self.samples[index][2]), int(self.samples[index][3]), int(self.samples[index][4])
track, sample_rate = torchaudio.load(sample)
print(torch.mean(track))
print(sample_rate)
track = track[0][(start_time):(stop_time)]
if self.as_spectrogram:
track = track.view(1, -1)
print(track)
spectrogram = torchaudio.transforms.Spectrogram(normalized=True, power=1, n_fft=400, hop_length=100)(track)
return spectrogram, torch.tensor(int_label), string_label
else:
return track, torch.tensor(int_label), string_label
def __len__(self):
return len(self.samples)
class Spectrogram_Loader(torch.utils.data.Dataset):
def __init__(self, filename, mel=False):
"""
Load spectrograms from given input audio snippet
:param filename: .txt file containing snippet information
:param mel: bool --> return melspectrogram
"""
self.path = filename
self.mel = mel
self.samples = [(line.split()[0], line.split()[1], line.split()[2], line.split()[3], line.split()[4]) for line
in open(self.path)]
print("The length is: {}".format(len(self.samples)))
def __getitem__(self, index):
sample, string_label, int_label, start_time, stop_time = self.samples[index][0], self.samples[index][1], int(
self.samples[index][2]), int(self.samples[index][3]), int(self.samples[index][4])
track, sample_rate = torchaudio.backend.sox_backend.load_wav(sample, normalization=False)
track = track[0][(start_time):(stop_time)]
track = track.view(1, -1)
if self.mel == False:
#print(track.size())
spectrogram = torchaudio.transforms.Spectrogram(normalized=True, power=1, n_fft=400, hop_length=100)(track)
#print(spectrogram.size())
return spectrogram, torch.tensor(int_label), string_label
else:
spectrogram = torchaudio.transforms.MelSpectrogram(sample_rate=sample_rate, n_fft=400, hop_length=100, n_mels=128)
return spectrogram, torch.tensor(int_label), string_label
def __len__(self):
return len(self.samples)
class Triplet_Tensor_Loader:
def __init__(self, path,spectrogram=True, train=True):
self.path = path
self.as_spectrogram = spectrogram
self.samples = []
self.samples = [(line.split()[0], line.split()[1], line.split()[2], line.split()[3], line.split()[4]) for line in open(self.path)]
#shuffle(self.samples)
if train:
self.samples = self.samples[0:int(0.8 * len(self.samples))]
print("TRAIN LENGTH", len(self.samples))
else:
self.samples = self.samples[int(0.8 * len(self.samples)):]
print("TEST LENGTH", len(self.samples))
def __getitem__(self, index):
sample, string_label, int_label, start_time, stop_time = self.samples[index][0], self.samples[index][1], int(self.samples[index][2]), int(self.samples[index][3]), int(self.samples[index][4])
track = torch.load(sample)
track = track[(start_time):(stop_time)]
if self.as_spectrogram:
track = track.view(1, -1)
spectrogram = torchaudio.transforms.Spectrogram(normalized=True, power=1, n_fft=400, hop_length=100)(track)
return spectrogram, torch.tensor(int_label), string_label
else:
return track, torch.tensor(int_label), string_label
def __len__(self):
return len(self.samples)
class Single_Speaker_Loader:
"""
The Single_Speaker_Loader class is used to load samples from a SINGLE SPECIFIED SPEAKER
path: Path to the sample list (txt file)
speaker: Label of desired speaker
"""
def __init__(self,path, speaker):
self.path = path
self.samples = [(line.split()[0], line.split()[1], line.split()[2], line.split()[3], line.split()[4]) for line in open(self.path)]
self.samples = [sample for sample in self.samples if (sample[1] == speaker)]
def __getitem__(self, index):
sample, string_label, int_label, start_time, stop_time = self.samples[index][0], self.samples[index][1], int(
self.samples[index][2]), int(self.samples[index][3]), int(self.samples[index][4])
track, sample_rate = torchaudio.load(sample)
track = track[0][(start_time * sample_rate):(stop_time * sample_rate)]
track = track.view(1, -1)
spectrogram = torchaudio.transforms.Spectrogram(normalized=True, power=1, n_fft=400, hop_length=100)(track)
return spectrogram, torch.tensor(int_label), string_label
def __len__(self):
return len(self.samples)
class Window_Loader(torch.utils.data.Dataset):
def __init__(self, filename,windowed=True, window_length=0.2, overlap=0.01):
self.path = filename
self.samples = []
self.samples = [(line.split()[0], line.split()[1], line.split()[2], line.split()[3], line.split()[4]) for line in open(self.path)]
self.window_length = int(window_length*16000)
self.overlap = int(overlap*16000)
self.windowed = windowed
def __getitem__(self, index):
sample, string_label, int_label, start_time, stop_time = self.samples[index][0], self.samples[index][1], int(self.samples[index][2]), int(self.samples[index][3]), int(self.samples[index][4])
track = torchaudio.backend.sox_backend.load(sample, normalization=True)
track, sample_rate = track[0], track[1]
track = track[0][(start_time):(stop_time)]
if self.windowed:
n_windows = 1 + math.floor((len(track)-self.window_length)/(self.window_length-self.overlap))
n_windows = int(n_windows)
window_tensor = torch.zeros(n_windows, self.window_length)
for i in range(n_windows):
offset = self.overlap*i
window_tensor[i, :] = track[offset:offset+self.window_length]
return window_tensor, torch.tensor(int_label), string_label
else:
return track, torch.tensor(int_label), string_label
def __len__(self):
return len(self.samples)
#test = Triplet_Time_Loader(path=os.path.join('/home/lucas/PycharmProjects/Papers_with_code/data/AMI/amicorpus_individual/Extracted_Speech','trimmed_sample_list.txt'))
#print(test.__getitem__(1))
| [
"[email protected]"
] | |
de208f04509fcd32302469aab73510ac8b2d37a0 | a6ae30df32be713ea300f3c855be34c400b60a2a | /app/analyseData.py | 991a469b07eb6b7eb5687659b63e427c6e6e3046 | [
"MIT"
] | permissive | amelie-fri/munch-api | 894f54e430443876c9ed4c1f43ff8b3cd3c4e646 | cbb205acbb5b1a107862cd8a53197de5317a26e4 | refs/heads/master | 2023-02-12T22:44:16.585945 | 2021-01-04T15:46:53 | 2021-01-04T15:46:53 | 295,173,268 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,362 | py | # script used to analyse how successful the approaches to extracting data are
from dataManager import parentManager
from TeiParser import Family
import os
import pprint
analysisFile = "analyse.txt"
# write to analysis file - analyse.txt
def writeToFile(_text, mode="a", pretty=None):
# Append to file
with open(analysisFile, mode, encoding="utf-8") as _file:
if pretty:
pprint.pprint(_text, _file)
else:
print(_text, file=_file)
# path to "_data/N" folder
path_N = os.path.join("_data", "N")
# create the parent manager
pm_N = parentManager(path_N)
# Initialise file
writeToFile("Data Analysis", mode="w")
writeToFile("File: {}".format(analysisFile))
writeToFile("Data: {}".format(path_N))
# Check all .xml files in directory
allFiles = []
for item in os.listdir(path_N):
if os.path.splitext(item)[-1].lower() == ".xml":
allFiles.append(item)
# Initialise
foundFiles = []
types = {}
dates = {"when": {}, "from": {}, "to": {}}
whenCount = 0
# For each parent
for i, parentFile in enumerate(pm_N.parents):
# Print process to command line
percent = str(int((100 * (i + 1) / len(pm_N.parents))))
print(f"\r Progress: {percent}% ", end="\r")
# Initialize parent object
parent = Family(os.path.join(path_N, parentFile))
# append parent filename to foundFiles
foundFiles.append(parent.filename)
# if the parent has a type
if parent.data.type:
# if the type already exists in types
if parent.data.type in types:
# increment type
types[parent.data.type] += 1
else:
# start counting type
types[parent.data.type] = 1
# for each child
for child in parent.children:
# append childname to foundfiles
foundFiles.append(child.filename)
# if child has date dict
if child.date:
# for each key in dates
for item in dates:
# if date key exists in child date
if item in child.date:
# for each date in dates key
for date in child.date[item]:
# pick only the year number
if "-" in date:
date = date.split("-")[0]
# count when
if item == "when":
whenCount += 1
# increment date
if date in dates[item]:
dates[item][date] += 1
# start counting date
else:
dates[item][date] = 1
# Add the count to the dict
dates["whenCount"] = whenCount
# Build/Calculate Info
info = {
"files": len(allFiles),
"found": len(foundFiles),
"notFound": len(allFiles) - len(foundFiles),
"parents": len(pm_N.parents),
"children": len(foundFiles) - len(pm_N.parents),
}
# Write Types
writeToFile("\nTYPES")
writeToFile(types, pretty=True)
# Write Dates
writeToFile("\nDATES")
writeToFile(dates, pretty=True)
# Write Info
writeToFile("\nINFO")
writeToFile(info, pretty=True)
# Which files are not found?
notFound = list(set(allFiles) - set(foundFiles))
# Write Output
writeToFile("\nFiles that are not found")
for i, item in enumerate(notFound):
writeToFile("\t" + item)
| [
"[email protected]"
] | |
d3fe713ff5f60fc69efa875fdd910f323bdca4de | f9e80950ce3db2fc7075329d62fffd736c09f705 | /helpers.py | e01f9832461b7309290ef4b92cbb8d8f30f9dfba | [] | no_license | pjcunningham/kivy-slideshow | 9535a710ae80444978fcd1cbbbad4ce4d634e0fe | 7b7ee0aa59be1835926fde8101c60c88c68325b3 | refs/heads/master | 2021-09-04T00:39:49.667576 | 2018-01-13T12:52:16 | 2018-01-13T12:52:16 | 115,718,229 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | # coding: utf-8
__author__ = 'Paul Cunningham'
__copyright = 'Copyright 2017, Paul Cunningham'
__all__ = ('InformationPopup', )
from kivy.uix.popup import Popup
from kivy.properties import StringProperty
from kivy.factory import Factory
from kivy.lang import Builder
from kivy.clock import Clock
Builder.load_string('''
<InformationPopup>:
auto_dismiss: True
size_hint: None, None
size: 400, 200
on_open: root.dismiss_trigger()
title: root.title
Label:
text: root.text
''')
class InformationPopup(Popup):
title = StringProperty('Information')
text = StringProperty('')
def __init__(self, time=1.5, **kwargs):
super(InformationPopup, self).__init__(**kwargs)
self.dismiss_trigger = Clock.create_trigger(self.dismiss, time)
Factory.register('InformationPopup', cls=InformationPopup) | [
"[email protected]"
] | |
fba3d916cd1e71365c605eae8806f8a831fd0736 | 5e8ed29add41ca0a1ac0eacbda447292313874c3 | /algo/neural_nets/models/transformers/common/utils.py | 2f252bd98a5e8af2e8a4c398bd737a204108ced7 | [] | no_license | TharinduDR/Offenseval_2020 | aa4961937bc98158b2e24c28934411df2a853c2d | a08764ae5980223ae8577c9fd6f160d8a450cfd6 | refs/heads/master | 2023-07-20T06:12:48.467268 | 2020-03-03T17:08:30 | 2020-03-03T17:08:30 | 224,734,186 | 2 | 0 | null | 2023-07-06T21:55:29 | 2019-11-28T21:41:28 | Jupyter Notebook | UTF-8 | Python | false | false | 13,750 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERT classification fine-tuning: utilities to work with GLUE tasks """
from __future__ import absolute_import, division, print_function
import csv
import logging
from multiprocessing import Pool, cpu_count
from tqdm.auto import tqdm
from util.logginghandler import TQDMLoggingHandler
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[TQDMLoggingHandler()])
csv.field_size_limit(2147483647)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""
Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
def convert_example_to_feature(
example_row,
pad_token=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
cls_token_segment_id=1,
pad_token_segment_id=0,
mask_padding_with_zero=True,
sep_token_extra=False
):
example, max_seq_length, tokenizer, output_mode, cls_token_at_end, cls_token, sep_token, cls_token_segment_id, pad_on_left, pad_token_segment_id, sep_token_extra, multi_label, stride = example_row
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3". " -4" for RoBERTa.
special_tokens_count = 4 if sep_token_extra else 3
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)
else:
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens_a) > max_seq_length - special_tokens_count:
tokens_a = tokens_a[:(max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
# if output_mode == "classification":
# label_id = label_map[example.label]
# elif output_mode == "regression":
# label_id = float(example.label)
# else:
# raise KeyError(output_mode)
return InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=example.label
)
def convert_example_to_feature_sliding_window(
example_row,
pad_token=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
cls_token_segment_id=1,
pad_token_segment_id=0,
mask_padding_with_zero=True,
sep_token_extra=False,
):
example, max_seq_length, tokenizer, output_mode, cls_token_at_end, cls_token, sep_token, cls_token_segment_id, pad_on_left, pad_token_segment_id, sep_token_extra, multi_label, stride = example_row
if stride < 1:
stride = int(max_seq_length * stride)
bucket_size = max_seq_length - (3 if sep_token_extra else 2)
token_sets = []
tokens_a = tokenizer.tokenize(example.text_a)
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens_a) > bucket_size:
token_sets = [tokens_a[i:i + bucket_size] for i in range(0, len(tokens_a), stride)]
else:
token_sets.append(tokens_a)
if example.text_b:
raise ValueError("Sequence pair tasks not implemented for sliding window tokenization.")
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
input_features = []
for tokens_a in token_sets:
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
# if output_mode == "classification":
# label_id = label_map[example.label]
# elif output_mode == "regression":
# label_id = float(example.label)
# else:
# raise KeyError(output_mode)
input_features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=example.label
)
)
return input_features
def convert_examples_to_features(
examples,
max_seq_length,
tokenizer,
output_mode,
cls_token_at_end=False,
sep_token_extra=False,
pad_on_left=False,
cls_token="[CLS]",
sep_token="[SEP]",
pad_token=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
cls_token_segment_id=1,
pad_token_segment_id=0,
mask_padding_with_zero=True,
process_count=cpu_count() - 2,
multi_label=False,
silent=False,
use_multiprocessing=True,
sliding_window=False,
flatten=False,
stride=None
):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
examples = [(example, max_seq_length, tokenizer, output_mode, cls_token_at_end, cls_token, sep_token,
cls_token_segment_id, pad_on_left, pad_token_segment_id, sep_token_extra, multi_label, stride) for
example in examples]
if use_multiprocessing:
if sliding_window:
logging.info('sliding_window enabled')
with Pool(process_count) as p:
features = list(tqdm(p.imap(convert_example_to_feature_sliding_window, examples, chunksize=500),
total=len(examples), disable=silent))
if flatten:
features = [feature for feature_set in features for feature in feature_set]
logging.info(f'{len(features)} features created from {len(examples)} samples.')
else:
with Pool(process_count) as p:
features = list(tqdm(p.imap(convert_example_to_feature, examples, chunksize=500), total=len(examples),
disable=silent))
else:
if sliding_window:
logging.info('sliding_window enabled')
features = [convert_example_to_feature_sliding_window(example) for example in
tqdm(examples, disable=silent)]
if flatten:
features = [feature for feature_set in features for feature in feature_set]
logging.info(f'{len(features)} features created from {len(examples)} samples.')
else:
features = [convert_example_to_feature(example) for example in tqdm(examples, disable=silent)]
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
| [
"[email protected]"
] | |
47a668f3b3c71dc906e455e0af643c0e9956d44c | d7c4fb488735495bf180ba8862b7526273cfe6ab | /034.py | ea9f7b56719f971829eff00432cc7a31800a55ec | [] | no_license | vasundhara7/Hangman-Game | 9c3d6938aacfc87c6ca8fe1c01824cfcacec3e9f | 21bf5164c420e07aad1be8121a9a5a758d3d013f | refs/heads/master | 2020-08-29T23:37:56.261315 | 2019-10-29T04:54:03 | 2019-10-29T04:54:03 | 218,203,863 | 0 | 1 | null | 2019-10-29T04:54:04 | 2019-10-29T04:24:56 | null | UTF-8 | Python | false | false | 3,909 | py | def game_category():
intro = True
Bg = p.Surface((1280,720))
Bg.fill(black)
Bg = Bg.convert()
image=p.image.load('hng7.JPG')
screen.blit(image,(0,0))
while intro:
for event in p.event.get():
print(event)
if event.type == p.QUIT:
p.quit()
quit()
myfont = p.font.SysFont("None", 100)
mytext = myfont.render('Categories', True, white)
mytext = mytext.convert_alpha()
screen.blit(mytext,(900//2,180//2))
button_2("Animals",500,240,250,50,dark_red,red,game_difficulty)
button_2("Countries",500,320,250,50,dark_yellow,yellow,game_difficulty)
button_2("Cartoons",500,400,250,50,dark_blue,blue,game_difficulty)
button_2("Harry Potter",500,480,250,50,dark_purple,purple,game_difficulty)
button_2("Marvel",500,560,250,50,dark_green,green,game_difficulty)
button_2("Brands",500,640,250,50,dark_orange,orange,game_difficulty)
p.display.update()
clock.tick(15)
def game_difficulty():
intro = True
Bg = p.Surface((1280,720))
Bg.fill(black)
Bg = Bg.convert()
screen.blit(Bg,(0,0))
image=p.image.load('hng5.JPG')
screen.blit(image,(0,0))
while intro:
for event in p.event.get():
print(event)
if event.type == p.QUIT:
p.quit()
quit()
myfont = p.font.SysFont("None", 70)
mytext = myfont.render('Difficulty', True, white)
mytext = mytext.convert_alpha()
screen.blit(mytext,(1000//2,40//2))
button_3("Easy",500,100,250,50,dark_yellow,yellow,game_loop)
button_3("Medium",500,200,250,50,dark_blue,blue,game_loop)
button_3("Hard",500,300,250,50,dark_red,red,game_loop)
p.display.update()
clock.tick(15)
def button_2(msg,x,y,w,h,ic,ac,action=None):
mouse = p.mouse.get_pos()
click = p.mouse.get_pressed()
if x+w > mouse[0] > x and y+h > mouse[1] > y:
p.draw.rect(screen, ac,(x,y,w,h))
if click[0] == 1 and action != None:
global category
category = msg
action()
else:
p.draw.rect(screen, ic,(x,y,w,h))
smallText = p.font.Font("freesansbold.ttf",20)
textSurf, textRect = text_objects(msg, smallText)
textRect.center = ( (x+(w/2)), (y+(h/2)) )
screen.blit(textSurf, textRect)
def button_3(msg,x,y,w,h,ic,ac,action=None):
mouse = p.mouse.get_pos()
click = p.mouse.get_pressed()
if x+w > mouse[0] > x and y+h > mouse[1] > y:
p.draw.rect(screen, ac,(x,y,w,h))
if click[0] == 1 and action != None:
global difficulty
difficulty = msg
action()
else:
p.draw.rect(screen, ic,(x,y,w,h))
smallText = p.font.Font("freesansbold.ttf",20)
textSurf, textRect = text_objects(msg, smallText)
textRect.center = ( (x+(w/2)), (y+(h/2)) )
screen.blit(textSurf, textRect)
################################################################ DRAWING THE ENTERED ALPHABETS IN ORDER
myfont = p.font.SysFont("None", 34)
m = myfont.render(chr(i), True, (white))
m = m.convert_alpha()
screen.blit(m,(50,dx))
dx+=25
| [
"[email protected]"
] | |
f919b9550febb7ecf8b2528040e9d305db4824f9 | 962c6ebc7926557c2364ba3b160c16ec0dc3d177 | /src/prob_particle_advection.py | e04794cf0592f94e9a9b06799b8138061f70f572 | [] | no_license | behollis/bv-interp-python-etc | 25a0cd015d8c4df6b9b042c553db7eb7e6701e91 | 1de6c20511e58e01a083464d06ab91d0be57530f | refs/heads/master | 2021-01-20T13:47:32.905642 | 2014-03-19T22:37:05 | 2014-03-19T22:37:05 | 34,290,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88,044 | py | #!/usr/bin/python
# This is statement is required by the build system to query build info
if __name__ == '__build__':
raise Exception
'''
Author: Brad Hollister.
Started: 10/7/2012.
Code shows advection of particles in 2d velocity field with configurable distributions at each grid point.
'''
import netCDF4
import sys, struct
import rpy2.robjects as robjects
import random
import math as pm
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pylab as p
import mpl_toolkits.mplot3d.axes3d as p3
import math
#import gaussian_fit
import sum_of_gaussians_interpolation as sog
from netcdf_reader import *
#from spline_cdf_curve_morphing import *
from mayavi.mlab import *
import mayavi
from peakfinder import *
from quantile_lerp import *
q_prev_max_vel_x = 0.0
q_prev_max_vel_y = 0.0
e_prev_max_vel_x = 0.0
e_prev_max_vel_y = 0.0
gmm_prev_max_vel_x = 0.0
gmm_prev_max_vel_y = 0.0
QUANTILES = 100
TOTAL_STEPS = 25
integration_step_size = 0.1
SEED_LAT = 42
SEED_LON = 21
SEED_LEVEL = 0
vclin = []
cf_vclin = []
reused_vel_quantile = 0
DEBUG = False
MODE = 1
FILE_NAME = 'pe_dif_sep2_98.nc'
FILE_NAME_CENTRAL_FORECAST = 'pe_fct_aug25_sep2.nc'
INPUT_DATA_DIR = '../../data/in/ncdf/'
OUTPUT_DATA_DIR = '../../data/out/csv/'
MODE_DIR1 = 'mode1/'
MODE_DIR2 = 'mode2/'
COM = 2
LON = 53
LAT = 90
LEV = 16
MEM = 600
MAX_GMM_COMP = 3
EM_MAX_ITR = 2000
EM_MAX_RESTARTS = 1000
DEPTH = -2.0
INTEGRATION_DIR = 'b'
THRESHOLD_PER = 0.9 #percentage that second greatest peak needs to be of the max peak
g_cc = np.zeros(shape=(LAT,LON))
g_crisp_streamlines = []
g_part_positions_ensemble = [[],[],[],[],[],[],[]]
g_part_positions_quantile = [[],[],[],[],[],[],[]]
g_part_positions_gmm = [[],[],[],[],[],[],[]]
g_part_positions_g = [[],[],[],[],[],[],[]]
g_part_positions_ensemble_b = [[],[],[],[],[],[],[]]
g_part_positions_quantile_b = [[],[],[],[],[],[],[]]
g_part_positions_gmm_b = [[],[],[],[],[],[],[]]
g_part_positions_g_b = [[],[],[],[],[],[],[]]
part_pos_e = [];part_pos_q = [];part_pos_gmm = [];part_pos_g = []
part_pos_e.append([0,0])
part_pos_e[0][0] = SEED_LAT
part_pos_e[0][1] = SEED_LON
part_pos_q.append([0,0])
part_pos_q[0][0] = SEED_LAT
part_pos_q[0][1] = SEED_LON
part_pos_gmm.append([0,0])
part_pos_gmm[0][0] = SEED_LAT
part_pos_gmm[0][1] = SEED_LON
part_pos_g.append([0,0])
part_pos_g[0][0] = SEED_LAT
part_pos_g[0][1] = SEED_LON
part_pos_e_b = [];part_pos_q_b = [];part_pos_gmm_b = [];part_pos_g_b = []
part_pos_e_b.append([0,0])
part_pos_e_b[0][0] = SEED_LAT
part_pos_e_b[0][1] = SEED_LON
part_pos_q_b.append([0,0])
part_pos_q_b[0][0] = SEED_LAT
part_pos_q_b[0][1] = SEED_LON
part_pos_gmm_b.append([0,0])
part_pos_gmm_b[0][0] = SEED_LAT
part_pos_gmm_b[0][1] = SEED_LON
part_pos_g_b.append([0,0])
part_pos_g_b[0][0] = SEED_LAT
part_pos_g_b[0][1] = SEED_LON
r = robjects.r
ZERO_ARRAY = np.zeros(shape=(MEM,1))
# from.. http://doswa.com/2009/01/02/fourth-order-runge-kutta-numerical-integration.html
#http://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_method
def rk4(x, v, a, dt):
"""Returns final (position, velocity) tuple after
time dt has passed.
x: initial position (number-like object)
v: initial velocity (number-like object)
a: acceleration function a(x,v,dt) (must be callable)
dt: timestep (number)"""
x1 = x
v1 = v
a1 = a(x1, v1, 0)
x2 = x + 0.5*v1*dt
v2 = v + 0.5*a1*dt
a2 = a(x2, v2, dt/2.0)
x3 = x + 0.5*v2*dt
v3 = v + 0.5*a2*dt
a3 = a(x3, v3, dt/2.0)
x4 = x + v3*dt
v4 = v + a3*dt
a4 = a(x4, v4, dt)
xf = x + (dt/6.0)*(v1 + 2*v2 + 2*v3 + v4)
vf = v + (dt/6.0)*(a1 + 2*a2 + 2*a3 + a4)
return xf, vf
def plotParticles(ts_per_gp=[]):
#http://docs.enthought.com/mayavi/mayavi/mlab_figures_decorations.html
f = mayavi.mlab.gcf()
#cam = f.scene.camera
#cam.parallel_scale = 10
f.scene.isometric_view()
grid_verts = np.zeros(shape=(LAT,LON))
grid_lat, grid_lon = np.ogrid[0:LAT,0:LON]
grid_plane = mayavi.mlab.surf(grid_lat, grid_lon, grid_verts, color=(1,1,0),representation='wireframe',line_width=0.1)
ks_plane = mayavi.mlab.surf(grid_lat, grid_lon, ts_per_gp, colormap='gray')
#xmin=0; xmax=LAT; ymin=0; ymax=LON; zmin=-1; zmax=-1
#ext = [xmin, xmax, ymin, ymax,zmin,zmax]
#cc_plane = mayavi.mlab.imshow(g_cc, colormap='Blues', interpolate=True,transparent=True)
#mayavi.mlab.points3d(63, 21, -5, color=(1,0,0),scale_factor=0.1)
mode1 = np.loadtxt( OUTPUT_DATA_DIR + "crisp/" \
+ 'mode_members1_'+str(SEED_LAT)+'_lon'+str(SEED_LON)\
+'_lev'+str(SEED_LEVEL))
mode2 = np.loadtxt( OUTPUT_DATA_DIR + "crisp/" \
+ 'mode_members2_'+str(SEED_LAT)+'_lon'+str(SEED_LON)\
+'_lev'+str(SEED_LEVEL))
col1 = (255./255.,228./255.,196./255.)
col_mode1 = (1.,0.,0.)
col_mode2 = (0.,1.,0.)
for idx in range(0,MEM, 1):
if idx not in mode1 and idx not in mode2:
mayavi.mlab.plot3d(g_crisp_streamlines[idx][0][:], g_crisp_streamlines[idx][1][:], \
g_crisp_streamlines[idx][2][:],tube_radius = None,line_width=0.1, \
color=col1, name='Crisp Member '+str(idx+1))
for idx in range(0,MEM,1):
if idx in mode1:
mayavi.mlab.plot3d(g_crisp_streamlines[idx][0][:], g_crisp_streamlines[idx][1][:], \
g_crisp_streamlines[idx][2][:],tube_radius = None,line_width=0.1, \
color=col_mode1, name='Crisp Member '+str(idx+1))
elif idx in mode2:
mayavi.mlab.plot3d(g_crisp_streamlines[idx][0][:], g_crisp_streamlines[idx][1][:], \
g_crisp_streamlines[idx][2][:],tube_radius = None,line_width=0.1, \
color=col_mode2, name='Crisp Member '+str(idx+1))
#tubes with peak number
'''
mayavi.mlab.plot3d(g_part_positions_ensemble[0][:], g_part_positions_ensemble[1][:], g_part_positions_ensemble[2][:], \
g_part_positions_ensemble[5][:], colormap='Greens',name='Ensemble peaks')
mayavi.mlab.plot3d(g_part_positions_quantile[0][:], g_part_positions_quantile[1][:], g_part_positions_quantile[2][:], \
g_part_positions_quantile[5][:], colormap='Blues',name='Quantile peaks')
mayavi.mlab.plot3d(g_part_positions_gmm[0][:], g_part_positions_gmm[1][:], g_part_positions_gmm[2][:], \
g_part_positions_gmm[5][:], colormap='Reds',name='GMM peaks')
mayavi.mlab.plot3d(g_part_positions_g[0][:], g_part_positions_g[1][:], g_part_positions_g[2][:], \
g_part_positions_g[5][:], colormap='Purples',name='g peaks')
#tubes with speed
mayavi.mlab.plot3d(g_part_positions_ensemble[0][:], g_part_positions_ensemble[1][:], g_part_positions_ensemble[2][:], \
g_part_positions_ensemble[3][:], colormap='Greens',name='Ensemble speed')
mayavi.mlab.plot3d(g_part_positions_quantile[0][:], g_part_positions_quantile[1][:], g_part_positions_quantile[2][:], \
g_part_positions_quantile[3][:], colormap='Blues',name='Quantile speed')
mayavi.mlab.plot3d(g_part_positions_gmm[0][:], g_part_positions_gmm[1][:], g_part_positions_gmm[2][:], \
g_part_positions_gmm[3][:], colormap='Reds',name='GMM speed')
mayavi.mlab.plot3d(g_part_positions_g[0][:], g_part_positions_g[1][:], g_part_positions_g[2][:], \
g_part_positions_g[3][:], colormap='Purples',name='g speed')
#tubes with speed
mayavi.mlab.plot3d(g_part_positions_ensemble[0][:], g_part_positions_ensemble[1][:], g_part_positions_ensemble[2][:], \
g_part_positions_ensemble[6][:], colormap='Greens',name='Ensemble peak separation')
mayavi.mlab.plot3d(g_part_positions_quantile[0][:], g_part_positions_quantile[1][:], g_part_positions_quantile[2][:], \
g_part_positions_quantile[6][:], colormap='Blues',name='Quantile peak separation')
mayavi.mlab.plot3d(g_part_positions_gmm[0][:], g_part_positions_gmm[1][:], g_part_positions_gmm[2][:], \
g_part_positions_gmm[6][:], colormap='Reds',name='GMM peak separation')
mayavi.mlab.plot3d(g_part_positions_g[0][:], g_part_positions_g[1][:], g_part_positions_g[2][:], \
g_part_positions_g[6][:], colormap='Purples',name='g speed separation')
'''
mayavi.mlab.show()
def advectGaussian(step, dir = 'f'):
if step % 50 == 0:
print '********************************' + str(step) + ' out of ' + str(TOTAL_STEPS)
for particle in range(0, len(part_pos_e)):
# get modal velocities @ position, if more than one modal position
# spawn a new particle for each mode idx over one
if dir == 'f':
ppos = [ part_pos_g[particle][0], part_pos_g[particle][1] ]
else:
ppos = [ part_pos_g_b[particle][0], part_pos_g_b[particle][1] ]
#velx, vely, velz = interpVel(ppos)
params = interpFromGaussian(ppos)
velx = params[0][0]
vely = params[1][0]
var_u = params[0][1]
var_v = params[1][1]
if dir == 'f':
part_pos_g[particle][0] += velx*integration_step_size
part_pos_g[particle][1] += vely*integration_step_size
# enqueue for rendering
for part in part_pos_g:
g_part_positions_g[0].append(part[0])
g_part_positions_g[1].append(part[1])
g_part_positions_g[2].append(DEPTH)
g_part_positions_g[3].append(np.sqrt(np.square(velx)+np.square(vely)))
g_part_positions_g[4].append((var_u + var_v) / 2.0)
g_part_positions_g[5].append((len(velx_prob)+len(vely_prob)) / 2.0)#g_part_positions[5].append(var_v)
g_part_positions_g[6].append(0.0)
else:
part_pos_g_b[particle][0] -= velx*integration_step_size
part_pos_g_b[particle][1] -= vely*integration_step_size
# enqueue for rendering
for part in part_pos_g_b:
g_part_positions_g_b[0].append(part[0])
g_part_positions_g_b[1].append(part[1])
g_part_positions_g_b[2].append(DEPTH)
g_part_positions_g_b[3].append(np.sqrt(np.square(velx)+np.square(vely)))
g_part_positions_g_b[4].append((var_u + var_v) / 2.0)
g_part_positions_g_b[5].append((len(velx_prob)+len(vely_prob)) / 2.0)#g_part_positions_b[5].append(var_v)
g_part_positions_g_b[6].append(0.0)
def getMaxPeaks(vel_prob,vel):
m = max(vel_prob)
p = vel_prob.index(m)
max_1 = vel[p]
vel_prob.pop(p)
m = max(vel_prob)
p = vel_prob.index(m)
max_2 = vel[p]
meet_threshold = False
if max_2 >= THRESHOLD_PER * max_1:
meet_threshold = True
return max_1, max_2, meet_threshold
def advectGMM(step, dir = 'f'):
if step % 50 == 0:
print '********************************' +str(step) + ' out of ' + str(TOTAL_STEPS)
for particle in range(0, len(part_pos_e)):
global gmm_prev_max_vel_x
global gmm_prev_max_vel_y
# get modal velocities @ position, if more than one modal position
# spawn a new particle for each mode idx over one
if dir == 'f':
ppos = [ part_pos_gmm[particle][0], part_pos_gmm[particle][1] ]
else:
ppos = [ part_pos_gmm_b[particle][0], part_pos_gmm_b[particle][1] ]
#get peaks
#velx, vely, velz = interpVel(ppos)
velx, velx_prob, vely, vely_prob, velz, var_u, var_v, u_params, v_params = interpFromGMM(ppos)
#find highest prob vel
velx_hp = gmm_prev_max_vel_x
vely_hp = gmm_prev_max_vel_y
#find difference in peaks
max_x_1 = 0.0
max_x_2 = 0.0
max_y_1 = 0.0
max_y_2 = 0.0
x_diff = 0.0;y_diff = 0.0
max_peak_diff = 0.0
num_x_peaks = len(velx_prob) #len(u_params)
num_y_peaks = len(vely_prob) #len(v_params)
# take peaks from largest g comps
'''
if len(u_params) > 0:
temp_max = 0
max_idx = 0
for i in range(0,len(u_params)):
if u_params[i] > temp_max:
max_idx = i
#get max peak mean
max_x_1 = u_params[max_idx][0]
u_params[0].pop(max_idx);u_params[1].pop(max_idx);u_params[2].pop(max_idx)
temp_max = 0
max_idx = 0
for i in range(0,len(u_params)):
if u_params[i] > temp_max:
max_idx = i
#get 2nd peak mean
max_x_2 = u_params[max_idx][0]
# take peaks from largest g comps
if len(v_params) > 0:
temp_max = 0
max_idx = 0
for i in range(0,len(v_params)):
if v_params[i] > temp_max:
max_idx = i
#get max peak mean
max_y_1 = v_params[max_idx][0]
v_params[0].pop(max_idx);v_params[1].pop(max_idx);v_params[2].pop(max_idx)
temp_max = 0
max_idx = 0
for i in range(0,len(v_params)):
if v_params[i] > temp_max:
max_idx = i
#get 2nd peak mean
max_y_2 = v_params[max_idx][0]
x_diff = math.fabs(max_x_1 - max_x_2)
y_diff = math.fabs(max_y_1 - max_y_2)
max_peak_diff = max([x_diff,y_diff])
if MODE == 1:
velx_hp = max_x_1
else: #MODE ==2:
velx_hp = max_x_2
if MODE == 1:
vely_hp = max_y_1
else: #MODE ==2:
vely_hp = max_y_2
'''
if num_x_peaks > 1:
velx_prob_copy = velx_prob[:]
velx_copy = velx[:]
max_x_1, max_x_2, sig = getMaxPeaks(velx_prob_copy,velx_copy)
if sig == True:
x_diff = pm.fabs(max_x_1 - max_x_2)
if num_y_peaks > 1:
vely_prob_copy = vely_prob[:]
vely_copy = vely[:]
max_y_1, max_y_2, sig = getMaxPeaks(vely_prob_copy,vely_copy)
if sig == True:
y_diff = pm.fabs(max_y_1 - max_y_2)
if x_diff > y_diff:
max_peak_diff = x_diff
else:
max_peak_diff = y_diff
if num_x_peaks > 0:
m = max(velx_prob)
p = velx_prob.index(m)
if MODE == 1 or num_x_peaks == 1:
velx_hp = velx[p]
elif MODE == 2 and num_x_peaks > 1:
velx_prob.pop(p)
m = max(velx_prob)
p = velx_prob.index(m)
velx_hp = velx[p]
else:
print "WARNING: no max velx returned for GMM lerp @ position " + str(ppos)
if num_y_peaks > 0:
m = max(vely_prob)
p = vely_prob.index(m)
if MODE == 1 or num_y_peaks == 1:
vely_hp = vely[p]
elif MODE == 2 and num_y_peaks > 1:
vely_prob.pop(p)
m = max(vely_prob)
p = vely_prob.index(m)
vely_hp = vely[p]
else:
print "WARNING: no max vely returned for GMM lerp @ position " + str(ppos)
gmm_prev_max_vel_x = velx_hp
gmm_prev_max_vel_y = vely_hp
#if step % 10 == 0 or step == 1:
# print str(step) + " ensemble: pos " + str(ppos) + " u peak: " + str(velx_hp) + " v peak: " + str(vely_hp)
if dir == 'f':
part_pos_gmm[particle][0] += velx_hp*integration_step_size
part_pos_gmm[particle][1] += vely_hp*integration_step_size
# enqueue for rendering
for part in part_pos_gmm:
g_part_positions_gmm[0].append(part[0])
g_part_positions_gmm[1].append(part[1])
g_part_positions_gmm[2].append(DEPTH)
g_part_positions_gmm[3].append(np.sqrt(np.square(velx_hp)+np.square(vely_hp)))
g_part_positions_gmm[4].append((var_u + var_v) / 2.0)
g_part_positions_gmm[5].append((len(velx_prob)+len(vely_prob)) / 2.0)#g_part_positions[5].append(var_v)
g_part_positions_gmm[6].append(max_peak_diff)
else:
part_pos_gmm_b[particle][0] -= velx_hp*integration_step_size
part_pos_gmm_b[particle][1] -= vely_hp*integration_step_size
# enqueue for rendering
for part in part_pos_gmm_b:
g_part_positions_gmm_b[0].append(part[0])
g_part_positions_gmm_b[1].append(part[1])
g_part_positions_gmm_b[2].append(DEPTH)
g_part_positions_gmm_b[3].append(np.sqrt(np.square(velx_hp)+np.square(vely_hp)))
g_part_positions_gmm_b[4].append((var_u + var_v) / 2.0)
g_part_positions_gmm_b[5].append((len(velx_prob)+len(vely_prob)) / 2.0)#g_part_positions_b[5].append(var_v)
g_part_positions_gmm_b[6].append(max_peak_diff)
def advectEnsemble(step, dir = 'f'):
if step % 50 == 0:
print '********************************' + str(step) + ' out of ' + str(TOTAL_STEPS)
for particle in range(0, len(part_pos_e)):
global e_prev_max_vel_x
global e_prev_max_vel_y
# get modal velocities @ position, if more than one modal position
# spawn a new particle for each mode idx over one
if dir == 'f':
ppos = [ part_pos_e[particle][0], part_pos_e[particle][1] ]
else:
ppos = [ part_pos_e_b[particle][0], part_pos_e_b[particle][1] ]
#get peaks
#velx, vely, velz = interpVel(ppos)
velx, velx_prob, vely, vely_prob, velz, var_u, var_v = interpVelFromEnsemble(ppos)
#find highest prob vel
velx_hp = e_prev_max_vel_x
vely_hp = e_prev_max_vel_y
#find difference in peaks
max_x_1 = 0.0
max_x_2 = 0.0
max_y_1 = 0.0
max_y_2 = 0.0
x_diff = 0.0;y_diff = 0.0
max_peak_diff = 0.0
num_x_peaks = len(velx_prob)
num_y_peaks = len(vely_prob)
if num_x_peaks > 1:
velx_prob_copy = velx_prob[:]
velx_copy = velx[:]
max_x_1, max_x_2, sig = getMaxPeaks(velx_prob_copy,velx_copy)
if sig == True:
x_diff = pm.fabs(max_x_1 - max_x_2)
if num_y_peaks > 1:
vely_prob_copy = vely_prob[:]
vely_copy = vely[:]
max_y_1, max_y_2, sig = getMaxPeaks(vely_prob_copy,vely_copy)
if sig == True:
y_diff = pm.fabs(max_y_1 - max_y_2)
if x_diff > y_diff:
max_peak_diff = x_diff
else:
max_peak_diff = y_diff
'''
if num_x_peaks > 0:
m = max(velx_prob)
p = velx_prob.index(m)
velx_hp = velx[p]
if num_y_peaks > 0:
m1 = max(vely_prob)
p1 = vely_prob.index(m1)
vely_hp = vely[p1]
'''
if num_x_peaks > 0:
m = max(velx_prob)
p = velx_prob.index(m)
if MODE == 1 or num_x_peaks == 1:
velx_hp = velx[p]
elif MODE == 2 and num_x_peaks > 1:
velx_prob.pop(p)
m = max(velx_prob)
p = velx_prob.index(m)
velx_hp = velx[p]
else:
print "WARNING: no max velx returned for ensemble lerp @ position " + str(ppos)
if num_y_peaks > 0:
m = max(vely_prob)
p = vely_prob.index(m)
if MODE == 1 or num_y_peaks == 1:
vely_hp = vely[p]
elif MODE == 2 and num_y_peaks > 1:
vely_prob.pop(p)
m = max(vely_prob)
p = vely_prob.index(m)
vely_hp = vely[p]
else:
print "WARNING: no max vely returned for ensemble lerp @ position " + str(ppos)
e_prev_max_vel_x = velx_hp
e_prev_max_vel_y = vely_hp
print "Ensemble u vel: " + str(velx_hp)
print "Ensemble v vel: " + str(vely_hp)
#if step % 10 == 0 or step == 1:
# print str(step) + " ensemble: pos " + str(ppos) + " u peak: " + str(velx_hp) + " v peak: " + str(vely_hp)
if dir == 'f':
part_pos_e[particle][0] += velx_hp*integration_step_size
part_pos_e[particle][1] += vely_hp*integration_step_size
# enqueue for rendering
for part in part_pos_e:
g_part_positions_ensemble[0].append(part[0])
g_part_positions_ensemble[1].append(part[1])
g_part_positions_ensemble[2].append(DEPTH)
g_part_positions_ensemble[3].append(np.sqrt(np.square(velx_hp)+np.square(vely_hp)))
g_part_positions_ensemble[4].append((var_u + var_v) / 2.0)
g_part_positions_ensemble[5].append((len(velx_prob)+len(vely_prob)) / 2.0)#g_part_positions[5].append(var_v)
g_part_positions_ensemble[6].append(max_peak_diff)
else:
part_pos_e_b[particle][0] -= velx_hp*integration_step_size
part_pos_e_b[particle][1] -= vely_hp*integration_step_size
# enqueue for rendering
for part in part_pos_e_b:
g_part_positions_ensemble_b[0].append(part[0])
g_part_positions_ensemble_b[1].append(part[1])
g_part_positions_ensemble_b[2].append(DEPTH)
g_part_positions_ensemble_b[3].append(np.sqrt(np.square(velx_hp)+np.square(vely_hp)))
g_part_positions_ensemble_b[4].append((var_u + var_v) / 2.0)
g_part_positions_ensemble_b[5].append((len(velx_prob)+len(vely_prob)) / 2.0)#g_part_positions_b[5].append(var_v)
g_part_positions_ensemble_b[6].append(max_peak_diff)
def advectQuantile(step, dir = 'f'):
global reused_vel_quantile
if step % 50 == 0:
print str(step) + ' out of ' + str(TOTAL_STEPS)
for particle in range(0, len(part_pos_q)):
global q_prev_max_vel_x
global q_prev_max_vel_y
# get modal velocities @ position, if more than one modal position
# spawn a new particle for each mode idx over one
if dir == 'f':
ppos = [ part_pos_q[particle][0], part_pos_q[particle][1] ]
else:
ppos = [ part_pos_q_b[particle][0], part_pos_q_b[particle][1] ]
#get peaks
#velx, vely, velz = interpVel(ppos)
velx, velx_prob, vely, vely_prob, velz, var_u, var_v = interpFromQuantiles(ppos)
#find highest prob vel
velx_hp = q_prev_max_vel_x
vely_hp = q_prev_max_vel_y
#find difference in peaks
max_x_1 = 0.0
max_x_2 = 0.0
max_y_1 = 0.0
max_y_2 = 0.0
x_diff = 0.0;y_diff = 0.0
max_peak_diff = 0.0
num_x_peaks = len(velx_prob)
num_y_peaks = len(vely_prob)
if num_x_peaks > 1:
velx_prob_copy = velx_prob[:]
velx_copy = velx[:]
max_x_1, max_x_2, sig = getMaxPeaks(velx_prob_copy,velx_copy)
if sig == True:
x_diff = pm.fabs(max_x_1 - max_x_2)
if num_y_peaks > 1:
vely_prob_copy = vely_prob[:]
vely_copy = vely[:]
max_y_1, max_y_2, sig = getMaxPeaks(vely_prob_copy,vely_copy)
if sig == True:
y_diff = pm.fabs(max_y_1 - max_y_2)
if x_diff > y_diff:
max_peak_diff = x_diff
else:
max_peak_diff = y_diff
'''
if num_x_peaks > 0:
m = max(velx_prob)
p = velx_prob.index(m)
velx_hp = velx[p]
if num_y_peaks > 0:
m1 = max(vely_prob)
p1 = vely_prob.index(m1)
vely_hp = vely[p1]
'''
if num_x_peaks > 0:
m = max(velx_prob)
p = velx_prob.index(m)
if MODE == 1 or num_x_peaks == 1:
velx_hp = velx[p]
elif MODE == 2 and num_x_peaks > 1:
velx_prob.pop(p)
m = max(velx_prob)
p = velx_prob.index(m)
velx_hp = velx[p]
else:
print "WARNING: no max velx returned for quantile lerp @ position " + str(ppos)
if num_y_peaks > 0:
m = max(vely_prob)
p = vely_prob.index(m)
if MODE == 1 or num_y_peaks == 1:
vely_hp = vely[p]
elif MODE == 2 and num_y_peaks > 1:
vely_prob.pop(p)
m = max(vely_prob)
p = vely_prob.index(m)
vely_hp = vely[p]
else:
print "WARNING: no max vely returned for quantile lerp @ position " + str(ppos)
print "Quantile u vel: " + str(velx_hp)
print "Quantile v vel: " + str(vely_hp)
'''
if velx_hp <= 0.15 and velx_hp >= -0.15 and velx_hp == q_prev_max_vel_x:
velx_hp = 0
if vely_hp <= 0.15 and vely_hp >= -0.15 and vely_hp == q_prev_max_vel_y:
vely_hp = 0
'''
q_prev_max_vel_x = velx_hp
q_prev_max_vel_y = vely_hp
#if step % 10 == 0 or step == 1:
# print str(step) + " quantile: pos " + str(ppos) + " u peak: " + str(velx_hp) + " v peak: " + str(vely_hp)
if dir == 'f':
part_pos_q[particle][0] += velx_hp*integration_step_size
part_pos_q[particle][1] += vely_hp*integration_step_size
# enqueue for rendering
for part in part_pos_q:
g_part_positions_quantile[0].append(part[0])
g_part_positions_quantile[1].append(part[1])
g_part_positions_quantile[2].append(DEPTH)
g_part_positions_quantile[3].append(np.sqrt(np.square(velx_hp)+np.square(vely_hp)))
g_part_positions_quantile[4].append((var_u + var_v) / 2.0)
g_part_positions_quantile[5].append((len(velx_prob)+len(vely_prob)) / 2.0)
g_part_positions_quantile[6].append(max_peak_diff)
else:
part_pos_q_b[particle][0] -= velx_hp*integration_step_size
part_pos_q_b[particle][1] -= vely_hp*integration_step_size
# enqueue for rendering
for part in part_pos_q_b:
g_part_positions_quantile_b[0].append(part[0])
g_part_positions_quantile_b[1].append(part[1])
g_part_positions_quantile_b[2].append(DEPTH)
g_part_positions_quantile_b[3].append(np.sqrt(np.square(velx_hp)+np.square(vely_hp)))
g_part_positions_quantile_b[4].append((var_u + var_v) / 2.0)
g_part_positions_quantile_b[5].append((len(velx_prob)+len(vely_prob)) / 2.0)
g_part_positions_quantile_b[6].append(max_peak_diff)
def interpVelFromEnsemble(ppos=[0.0,0.0]):
#assume grid points are defined by integer indices
#decompose fract / whole from particle position
ppos_parts = [[0.0,0.0],[0.0,0.0]] #[fract,whole] for each x,y comp
ppos_parts[0][0] = pm.modf(ppos[0])[0];ppos_parts[0][1] = pm.modf(ppos[0])[1]
ppos_parts[1][0] = pm.modf(ppos[1])[0];ppos_parts[1][1] = pm.modf(ppos[1])[1]
#print "ensemble alpha x: " + str( ppos_parts[0][0] )
#print "ensemble alpha y: " + str( ppos_parts[1][0] )
# grid point numbers:
#
# (2)---(3)
# | |
# | |
# (0)---(1)
#find four corner grid point indices, numbered from gpt0 = (bottom, left) TO gpt3 = (top, right)
#calculated from whole parts
gpt0 = [ppos_parts[0][1], ppos_parts[1][1]]
gpt1 = [ppos_parts[0][1] + 1, ppos_parts[1][1]]
gpt2 = [ppos_parts[0][1], ppos_parts[1][1] + 1]
gpt3 = [ppos_parts[0][1] + 1, ppos_parts[1][1] + 1]
gpt0_dist = np.zeros(shape=(2,600))
gpt1_dist = np.zeros(shape=(2,600))
gpt2_dist = np.zeros(shape=(2,600))
gpt3_dist = np.zeros(shape=(2,600))
'''
if DEBUG is True:
print "ensemble interp"
print "gp0";print gpt0[0]; print gpt0[1]
print "gp1";print gpt1[0]; print gpt1[1]
print "gp2";print gpt2[0]; print gpt2[1]
print "gp3";print gpt3[0]; print gpt3[1]
'''
for idx in range(0,600):
gpt0_dist[0][idx] = vclin[idx][gpt0[0]][gpt0[1]][SEED_LEVEL][0]
gpt0_dist[1][idx] = vclin[idx][gpt0[0]][gpt0[1]][SEED_LEVEL][1]
gpt1_dist[0][idx] = vclin[idx][gpt1[0]][gpt1[1]][SEED_LEVEL][0]
gpt1_dist[1][idx] = vclin[idx][gpt1[0]][gpt1[1]][SEED_LEVEL][1]
gpt2_dist[0][idx] = vclin[idx][gpt2[0]][gpt2[1]][SEED_LEVEL][0]
gpt2_dist[1][idx] = vclin[idx][gpt2[0]][gpt2[1]][SEED_LEVEL][1]
gpt3_dist[0][idx] = vclin[idx][gpt3[0]][gpt3[1]][SEED_LEVEL][0]
gpt3_dist[1][idx] = vclin[idx][gpt3[0]][gpt3[1]][SEED_LEVEL][1]
#SAMP = 2000
#lerp ensemble samples
lerp_u_gp0_gp1 = lerp( np.asarray(gpt0_dist[0] ), np.asarray(gpt1_dist[0]), w = ppos_parts[0][0] )
lerp_u_gp2_gp3 = lerp( np.asarray(gpt2_dist[0] ), np.asarray(gpt3_dist[0]), w = ppos_parts[0][0] )
lerp_u = lerp( np.asarray(lerp_u_gp0_gp1), np.asarray(lerp_u_gp2_gp3), w = ppos_parts[1][0] )
lerp_v_gp0_gp1 = lerp( np.asarray(gpt0_dist[1] ), np.asarray(gpt1_dist[1]), w = ppos_parts[0][0] )
lerp_v_gp2_gp3 = lerp( np.asarray(gpt2_dist[1] ), np.asarray(gpt3_dist[1]), w = ppos_parts[0][0] )
lerp_v = lerp( np.asarray(lerp_v_gp0_gp1), np.asarray(lerp_v_gp2_gp3), w = ppos_parts[1][0] )
#x = linspace( lerp_u[0], lerp_u[-1], len(lerp_u) )
#y = linspace( lerp_v[0], lerp_v[-1], len(lerp_v) )
x = linspace( -50, 50, 600 )
y = linspace( -50, 50, 600 )
#find peaks...
try:
k = [ stats.gaussian_kde(lerp_u), stats.gaussian_kde(lerp_v) ]
except:
return ([], [], [], [], 0.0, 0.0, 0.0)
var0 = np.std(k[0](x), axis=None, dtype=None, out=None, ddof=0)
var1 = np.std(k[1](y), axis=None, dtype=None, out=None, ddof=0)
_max_u, _min_u = peakdetect(k[0](x),x,lookahead=2,delta=0)
_max_v, _min_v = peakdetect(k[1](y),y,lookahead=2,delta=0)
xm_u = [p[0] for p in _max_u]
xm_v = [p[0] for p in _max_v]
ym_u = [p[1] for p in _max_u]
ym_v = [p[1] for p in _max_v]
'''
#plot interpolated kde's
plt.figure()
plt.title("ensemble")
p1, = plt.plot(x,k[0](x),'-', color='red')
p2, = plt.plot(y,k[1](y),'-', color='blue')
plt.legend([p2, p1], ["v", "u"])
#plot peaks
plt.hold(True)
plt.plot(xm_u, ym_u, 'x', color='black')
plt.plot(xm_v, ym_v, 'x', color='black')
plt.savefig('../png/e_'+str(ppos)+'.png')
'''
return (xm_u, ym_u, xm_v, ym_v, 0.0, var0, var1)
def interpFromQuantiles(ppos=[0.0,0.0]):
#assume grid points are defined by integer indices
#decompose fract / whole from particle position
ppos_parts = [[0.0,0.0],[0.0,0.0]] #[fract,whole] for each x,y comp
ppos_parts[0][0] = pm.modf(ppos[0])[0];ppos_parts[0][1] = pm.modf(ppos[0])[1]
ppos_parts[1][0] = pm.modf(ppos[1])[0];ppos_parts[1][1] = pm.modf(ppos[1])[1]
#print "quantile alpha x: " + str( ppos_parts[0][0] )
#print "quantile alpha y: " + str( ppos_parts[1][0] )
# grid point numbers:
#
# (2)---(3)
# | |
# | |
# (0)---(1)
#find four corner grid point indices, numbered from gpt0 = (bottom, left) TO gpt3 = (top, right)
#calculated from whole parts
gpt0 = [ppos_parts[0][1], ppos_parts[1][1]]
gpt1 = [ppos_parts[0][1] + 1, ppos_parts[1][1]]
gpt2 = [ppos_parts[0][1], ppos_parts[1][1] + 1]
gpt3 = [ppos_parts[0][1] + 1, ppos_parts[1][1] + 1]
gpt0_dist = np.zeros(shape=(2,600))
gpt1_dist = np.zeros(shape=(2,600))
gpt2_dist = np.zeros(shape=(2,600))
gpt3_dist = np.zeros(shape=(2,600))
'''
if DEBUG is True:
print "quantile interp"
print "gp0";print gpt0[0]; print gpt0[1]
print "gp1";print gpt1[0]; print gpt1[1]
print "gp2";print gpt2[0]; print gpt2[1]
print "gp3";print gpt3[0]; print gpt3[1]
'''
for idx in range(0,600):
gpt0_dist[0][idx] = vclin[idx][gpt0[0]][gpt0[1]][SEED_LEVEL][0]
gpt0_dist[1][idx] = vclin[idx][gpt0[0]][gpt0[1]][SEED_LEVEL][1]
gpt1_dist[0][idx] = vclin[idx][gpt1[0]][gpt1[1]][SEED_LEVEL][0]
gpt1_dist[1][idx] = vclin[idx][gpt1[0]][gpt1[1]][SEED_LEVEL][1]
gpt2_dist[0][idx] = vclin[idx][gpt2[0]][gpt2[1]][SEED_LEVEL][0]
gpt2_dist[1][idx] = vclin[idx][gpt2[0]][gpt2[1]][SEED_LEVEL][1]
gpt3_dist[0][idx] = vclin[idx][gpt3[0]][gpt3[1]][SEED_LEVEL][0]
gpt3_dist[1][idx] = vclin[idx][gpt3[0]][gpt3[1]][SEED_LEVEL][1]
quantiles = list(spread(0, 1.0, QUANTILES-1, mode=3))
quantiles.sort()
#find random variable value of quantiles for pdf
q_gpt0_dist_u = [];q_gpt0_dist_v = []
q_gpt1_dist_u = [];q_gpt1_dist_v = []
q_gpt2_dist_u = [];q_gpt2_dist_v = []
q_gpt3_dist_u = [];q_gpt3_dist_v = []
for q in quantiles:
q_gpt0_dist_u.append(r.quantile(robjects.FloatVector(gpt0_dist[0]), q)[0])
q_gpt0_dist_v.append(r.quantile(robjects.FloatVector(gpt0_dist[1]), q)[0])
q_gpt1_dist_u.append(r.quantile(robjects.FloatVector(gpt1_dist[0]), q)[0])
q_gpt1_dist_v.append(r.quantile(robjects.FloatVector(gpt1_dist[1]), q)[0])
q_gpt2_dist_u.append(r.quantile(robjects.FloatVector(gpt2_dist[0]), q)[0])
q_gpt2_dist_v.append(r.quantile(robjects.FloatVector(gpt2_dist[1]), q)[0])
q_gpt3_dist_u.append(r.quantile(robjects.FloatVector(gpt3_dist[0]), q)[0])
q_gpt3_dist_v.append(r.quantile(robjects.FloatVector(gpt3_dist[1]), q)[0])
#create np arrays
#q_gpt0_dist_u_array = np.asarray(q_gpt0_dist_u);q_gpt0_dist_v_array = np.asarray(q_gpt0_dist_v)
#q_gpt1_dist_u_array = np.asarray(q_gpt1_dist_u);q_gpt1_dist_v_array = np.asarray(q_gpt1_dist_v)
#q_gpt2_dist_u_array = np.asarray(q_gpt2_dist_u);q_gpt2_dist_v_array = np.asarray(q_gpt2_dist_v)
#q_gpt3_dist_u_array = np.asarray(q_gpt3_dist_u);q_gpt3_dist_v_array = np.asarray(q_gpt3_dist_v)
#lerp quantiles
#find peaks...
'''
if len(gpt0_dist[0]) < 5 or len(gpt1_dist[0]) < 5 or len(gpt2_dist[0]) < 5 or len(gpt3_dist[0]) < 5 or \
len(gpt0_dist[1]) < 5 or len(gpt1_dist[1]) < 5 or len(gpt2_dist[1]) < 5 or len(gpt3_dist[1]) < 5:
print "return in quantile interp @" + str(ppos)
return ([], [], [], [], 0.0, 0.0, 0.0)
if np.array_equal(gpt0_dist[0], ZERO_ARRAY) or np.array_equal(gpt1_dist[0], ZERO_ARRAY):
print "return in quantile interp @" + str(ppos)
return ([], [], [], [], 0.0, 0.0, 0.0)
'''
try:
k = stats.gaussian_kde(gpt0_dist[0]); l = stats.gaussian_kde(gpt1_dist[0])
except:
return ([], [], [], [], 0.0, 0.0, 0.0)
lerp_u_gp0_gp1_prob = quantileLerp( k, l, np.asarray(q_gpt0_dist_u), np.asarray(q_gpt1_dist_u), alpha = ppos_parts[0][0] )
lerp_u_gp0_gp1_values = lerp(np.asarray(q_gpt0_dist_u), np.asarray(q_gpt1_dist_u), w = ppos_parts[0][0] )
try:
lerp_u_gp2_gp3_prob = quantileLerp( stats.gaussian_kde(gpt2_dist[0]), stats.gaussian_kde(gpt3_dist[0]), np.asarray(q_gpt2_dist_u), np.asarray(q_gpt3_dist_u), alpha = ppos_parts[0][0] )
except:
return ([], [], [], [], 0.0, 0.0, 0.0)
lerp_u_gp2_gp3_values = lerp(np.asarray(q_gpt2_dist_u), np.asarray(q_gpt3_dist_u), w = ppos_parts[0][0] )
'''
plt.figure()
plt.title("gpt0_dist, alpha: " + str(ppos_parts[0][0]))
x = linspace( -5, 2, 600 )
plt.plot(lerp_u_gp0_gp1_values,lerp_u_gp0_gp1_prob,'-', color='black')
plt.show()
'''
NUM_SAMPLES = 1000
samples_numbers = lerp_u_gp0_gp1_prob * NUM_SAMPLES
samples_gp0_gp1_lerp = []
for prob_idx in range(0,len(lerp_u_gp0_gp1_prob)):
#if not math.isnan(samples_numbers[prob_idx]):
# continue
for num in range(0,int(samples_numbers[prob_idx])):
samples_gp0_gp1_lerp.append(lerp_u_gp0_gp1_values[prob_idx])
samples_numbers2 = lerp_u_gp2_gp3_prob * NUM_SAMPLES
samples_gp2_gp3_lerp = []
for prob_idx in range(0,len(lerp_u_gp2_gp3_prob)):
#if not math.isnan(samples_numbers2[prob_idx]):
# continue
for num in range(0,int(samples_numbers2[prob_idx])):
samples_gp2_gp3_lerp.append(lerp_u_gp2_gp3_values[prob_idx])
'''
plt.figure()
plt.title("gpt0_dist resampled, alpha: " + str(ppos_parts[0][0]))
x = linspace( -5, 2, 600 )
plt.plot(x,stats.gaussian_kde(samples_gp0_gp1_lerp)(x),'-', color='black')
plt.show()
'''
'''
plt.figure()
plt.title("lerp_u_gp0_gp1, alpha: " + str(ppos_parts[0][0]))
x = linspace( -10, 10, 600 )
plt.plot(x,stats.gaussian_kde(lerp_u_gp0_gp1)(x),'-', color='black')
plt.show()
plt.figure()
plt.title("lerp_u_gp2_gp3")
x = linspace( -10, 10, 600 )
plt.plot(x,stats.gaussian_kde(lerp_u_gp2_gp3)(x),'-', color='black')
plt.show()
'''
q_lerp_gpt0_gpt1_dist_u = [];q_lerp_gpt2_gpt3_dist_u = []
for q in quantiles:
q_lerp_gpt0_gpt1_dist_u.append(r.quantile(robjects.FloatVector(samples_gp0_gp1_lerp), q)[0])
q_lerp_gpt2_gpt3_dist_u.append(r.quantile(robjects.FloatVector(samples_gp2_gp3_lerp), q)[0])
try:
lerp_u_prob = quantileLerp( stats.gaussian_kde(samples_gp0_gp1_lerp), stats.gaussian_kde(samples_gp2_gp3_lerp), np.asarray(q_lerp_gpt0_gpt1_dist_u), np.asarray(q_lerp_gpt2_gpt3_dist_u), alpha = ppos_parts[1][0] )
lerp_u_prob_2 = quantileLerp( interpolate.interp1d( lerp_u_gp0_gp1_values, lerp_u_gp0_gp1_prob ), \
interpolate.interp1d( lerp_u_gp2_gp3_values, lerp_u_gp2_gp3_prob ), \
np.asarray(q_lerp_gpt0_gpt1_dist_u), \
np.asarray(q_lerp_gpt2_gpt3_dist_u), alpha = ppos_parts[1][0] )
except:
return ([], [], [], [], 0.0, 0.0, 0.0)
lerp_u_values = lerp(np.asarray(q_lerp_gpt0_gpt1_dist_u), np.asarray(q_lerp_gpt2_gpt3_dist_u), w = ppos_parts[1][0] )
samples_numbers3 = lerp_u_prob * NUM_SAMPLES
samples_u_lerp = []
for prob_idx in range(0,len(lerp_u_prob)):
#if not math.isnan(samples_numbers3[prob_idx]):
# continue
for num in range(0,int(samples_numbers3[prob_idx])):
if not math.isnan(lerp_u_values[prob_idx]) and not math.isinf(lerp_u_values[prob_idx]):
samples_u_lerp.append(lerp_u_values[prob_idx])
'''
plt.figure()
x = linspace( -10, 10, 600 )
plt.plot(x,stats.gaussian_kde(samples_lerp_u)(x),'-', color='black')
plt.show()
'''
'''
if np.array_equal(gpt0_dist[1], ZERO_ARRAY) or np.array_equal(gpt1_dist[1], ZERO_ARRAY):
print "return in quantile interp @" + str(ppos)
return ([], [], [], [], 0.0, 0.0, 0.0)
'''
try:
k = stats.gaussian_kde(gpt0_dist[1]); l = stats.gaussian_kde(gpt1_dist[1])
except:
return ([], [], [], [], 0.0, 0.0, 0.0)
lerp_v_gp0_gp1_prob = quantileLerp( k, l, np.asarray(q_gpt0_dist_v), np.asarray(q_gpt1_dist_v), alpha = ppos_parts[0][0] )
lerp_v_gp0_gp1_values = lerp(np.asarray(q_gpt0_dist_v), np.asarray(q_gpt1_dist_v), w = ppos_parts[0][0] )
try:
lerp_v_gp2_gp3_prob = quantileLerp( stats.gaussian_kde(gpt2_dist[1]), stats.gaussian_kde(gpt3_dist[1]), \
np.asarray(q_gpt2_dist_v), np.asarray(q_gpt3_dist_v), alpha = ppos_parts[0][0] )
except:
return ([], [], [], [], 0.0, 0.0, 0.0)
lerp_v_gp2_gp3_values = lerp(np.asarray(q_gpt2_dist_v), np.asarray(q_gpt3_dist_v), w = ppos_parts[0][0] )
samples_numbers4 = lerp_v_gp0_gp1_prob * NUM_SAMPLES
samples_gp0_gp1_lerp_v = []
for prob_idx in range(0,len(lerp_v_gp0_gp1_prob)):
#if not math.isnan(samples_numbers4[prob_idx]):
# continue
for num in range(0,int(samples_numbers4[prob_idx])):
samples_gp0_gp1_lerp_v.append(lerp_v_gp0_gp1_values[prob_idx])
samples_numbers5 = lerp_v_gp2_gp3_prob * NUM_SAMPLES
samples_gp2_gp3_lerp_v = []
for prob_idx in range(0,len(lerp_v_gp2_gp3_prob)):
#if not math.isnan(samples_numbers5[prob_idx]):
# continue
for num in range(0,int(samples_numbers5[prob_idx])):
samples_gp2_gp3_lerp_v.append(lerp_v_gp2_gp3_values[prob_idx])
#samples_gp2_gp3_lerp_v =
#samples_gp2_gp3_lerp_v =
q_lerp_gpt0_gpt1_dist_v = [];q_lerp_gpt2_gpt3_dist_v = []
for q in quantiles:
q_lerp_gpt0_gpt1_dist_v.append(r.quantile(robjects.FloatVector(samples_gp0_gp1_lerp_v), q)[0])
q_lerp_gpt2_gpt3_dist_v.append(r.quantile(robjects.FloatVector(samples_gp2_gp3_lerp_v), q)[0])
try:
lerp_v_prob = quantileLerp( stats.gaussian_kde(samples_gp0_gp1_lerp_v), stats.gaussian_kde(samples_gp2_gp3_lerp_v), \
np.asarray(q_lerp_gpt0_gpt1_dist_v), np.asarray(q_lerp_gpt2_gpt3_dist_v), alpha = ppos_parts[1][0] )
lerp_v_prob_2 = quantileLerp( interpolate.interp1d( lerp_v_gp0_gp1_values, lerp_v_gp0_gp1_prob ), \
interpolate.interp1d( lerp_v_gp2_gp3_values, lerp_v_gp2_gp3_prob ), \
np.asarray(q_lerp_gpt0_gpt1_dist_v), \
np.asarray(q_lerp_gpt2_gpt3_dist_v), alpha = ppos_parts[1][0] )
except:
return ([], [], [], [], 0.0, 0.0, 0.0)
lerp_v_values = lerp(np.asarray(q_lerp_gpt0_gpt1_dist_v), np.asarray(q_lerp_gpt2_gpt3_dist_v), w = ppos_parts[1][0] )
samples_numbers6 = lerp_v_prob * NUM_SAMPLES
samples_v_lerp = []
for prob_idx in range(0,len(lerp_v_prob)):
#if not math.isnan(samples_numbers6[prob_idx]):
# continue
for num in range(0,int(samples_numbers6[prob_idx])):
if not math.isnan(lerp_v_values[prob_idx]) and not math.isinf(lerp_v_values[prob_idx]):
samples_v_lerp.append(lerp_v_values[prob_idx])
x = linspace( -20, 20, 1000 )
y = linspace( -20, 20, 1000 )
#find peaks...
'''
if len(samples_u_lerp) < 20 or len(samples_v_lerp) < 20:
print "return in quantile interp @" + str(ppos)
return ([], [], [], [], 0.0, 0.0, 0.0)
'''
quantile_interp_u = interpolate.interp1d(lerp_u_values,lerp_u_prob_2)
quantile_interp_v = interpolate.interp1d(lerp_v_values,lerp_v_prob_2)
try:
k = [ stats.gaussian_kde(samples_u_lerp), stats.gaussian_kde(samples_v_lerp) ]
k2 = [ quantile_interp_u, quantile_interp_v ]
except:
return ([], [], [], [], 0.0, 0.0, 0.0)
#var0 = np.std(k[0](x), axis=None, dtype=None, out=None, ddof=0)
#var1 = np.std(k[1](y), axis=None, dtype=None, out=None, ddof=0)
x = linspace( min(lerp_u_values), max(lerp_u_values), 1000 )
y = linspace( min(lerp_v_values), max(lerp_v_values), 1000 )
var0 = np.std(k2[0](x), axis=None, dtype=None, out=None, ddof=0)
var1 = np.std(k2[1](y), axis=None, dtype=None, out=None, ddof=0)
#_max_u, _min_u = peakdetect(k[0](x),x,lookahead=5,delta=0)
#_max_v, _min_v = peakdetect(k[1](y),y,lookahead=5,delta=0)
_max_u, _min_u = peakdetect(k2[0](x),x,lookahead=5,delta=0)
_max_v, _min_v = peakdetect(k2[1](y),y,lookahead=5,delta=0)
xm_u = [p[0] for p in _max_u]
xm_v = [p[0] for p in _max_v]
ym_u = [p[1] for p in _max_u]
ym_v = [p[1] for p in _max_v]
'''
#plot interpolated kde's
#if len(_max_u) == 0 or len(_max_v) == 0:
plt.figure()
plt.title("quantile")
p1, = plt.plot(x,k[0](x),'-', color='red')
p2, = plt.plot(y,k[1](y),'-', color='blue')
plt.legend([p2, p1], ["v", "u"])
#plot peaks
plt.hold(True)
plt.plot(xm_u, ym_u, 'x', color='black')
plt.plot(xm_v, ym_v, 'x', color='black')
plt.savefig('../png/q_'+str(ppos)+'.png')
'''
return (xm_u, ym_u, xm_v, ym_v, 0.0, var0, var1)
def fitGaussian(gp=[0.,0.]):
#fit single gaussian
m = r.mean(robjects.vectors.FloatVector(gp));var= r.var(robjects.vectors.FloatVector(gp))
return [m[0],var[0]]
def fitGMM(gp, max_gs=2):
#suppress std out number of iterations using r.invisible()
try:
mixmdl = r.invisible(r.normalmixEM(robjects.vectors.FloatVector(gp), k = max_gs, maxit = EM_MAX_ITR, maxrestarts=EM_MAX_RESTARTS))
except:
return [[0.]*max_gs,[0.]*max_gs, [0.]*max_gs ]
mu = [];sd = [];lb = []
for i in mixmdl.iteritems():
if i[0] == 'mu':
mu.append(i[1])
if i[0] == 'sigma':
sd.append(i[1])
if i[0] == 'lambda':
lb.append(i[1])
n_params = []
for idx in range(0,len(mu[0])):
n_params.append([mu[0][idx], sd[0][idx], lb[0][idx]])
return n_params
def lerpGMMPair(norm_params1, norm_params2, alpha, steps=1, num_gs=3):
''' handles equal number of constituent gaussians '''
sorted(norm_params2, key=operator.itemgetter(0), reverse=False)
sorted(norm_params1, key=operator.itemgetter(0), reverse=False)
if steps != 0:
incr = alpha / steps
else:
incr = alpha
for idx in range(0,steps+1):
#resort to minimize distances in means of pairings
sorted(norm_params1, key=operator.itemgetter(0), reverse=False)
subalpha = float(idx) * incr
inter_means = []; inter_stdevs = []; inter_comp_ratios = []
max_comps = len(norm_params1)
if max_comps < len(norm_params2):
max_comps = len(norm_params2)
# interpolate each gaussian
for idx in range(0,max_comps):
cur_mean1 = norm_params1[idx][0]
cur_std1 = norm_params1[idx][1]
cur_ratio1 = norm_params1[idx][2]
cur_mean2 = norm_params2[idx][0]
cur_std2 = norm_params2[idx][1]
cur_ratio2 = norm_params2[idx][2]
inter_means.append(cur_mean1*(1.0-subalpha) + cur_mean2*subalpha)
inter_stdevs.append(cur_std1*(1.0-subalpha) + cur_std2*subalpha)
inter_comp_ratios.append(cur_ratio1*(1.0-subalpha) + cur_ratio2*subalpha)
norm_params1 = []
for j in range(len(inter_means)):
norm_params1.append([inter_means[j], inter_stdevs[j], inter_comp_ratios[j]])
#return interp GMM params
return norm_params1
def interpFromGMM(ppos=[0.0,0.0]):
#assume grid points are defined by integer indices
#decompose fract / whole from particle position
ppos_parts = [[0.0,0.0],[0.0,0.0]] #[fract,whole] for each x,y comp
ppos_parts[0][0] = pm.modf(ppos[0])[0];ppos_parts[0][1] = pm.modf(ppos[0])[1]
ppos_parts[1][0] = pm.modf(ppos[1])[0];ppos_parts[1][1] = pm.modf(ppos[1])[1]
# grid point numbers:
#
# (2)---(3)
# | |
# | |
# (0)---(1)
#find four corner grid point indices, numbered from gpt0 = (bottom, left) TO gpt3 = (top, right)
#calculated from whole parts
gpt0 = [ppos_parts[0][1], ppos_parts[1][1]]
gpt1 = [ppos_parts[0][1] + 1, ppos_parts[1][1]]
gpt2 = [ppos_parts[0][1], ppos_parts[1][1] + 1]
gpt3 = [ppos_parts[0][1] + 1, ppos_parts[1][1] + 1]
gpt0_dist = np.zeros(shape=(2,600))
gpt1_dist = np.zeros(shape=(2,600))
gpt2_dist = np.zeros(shape=(2,600))
gpt3_dist = np.zeros(shape=(2,600))
'''
if DEBUG is True:
print "gp0";print gpt0[0]; print gpt0[1]
print "gp1";print gpt1[0]; print gpt1[1]
print "gp2";print gpt2[0]; print gpt2[1]
print "gp3";print gpt3[0]; print gpt3[1]
'''
for idx in range(0,MEM):
gpt0_dist[0][idx] = vclin[idx][gpt0[0]][gpt0[1]][SEED_LEVEL][0]
gpt0_dist[1][idx] = vclin[idx][gpt0[0]][gpt0[1]][SEED_LEVEL][1]
gpt1_dist[0][idx] = vclin[idx][gpt1[0]][gpt1[1]][SEED_LEVEL][0]
gpt1_dist[1][idx] = vclin[idx][gpt1[0]][gpt1[1]][SEED_LEVEL][1]
gpt2_dist[0][idx] = vclin[idx][gpt2[0]][gpt2[1]][SEED_LEVEL][0]
gpt2_dist[1][idx] = vclin[idx][gpt2[0]][gpt2[1]][SEED_LEVEL][1]
gpt3_dist[0][idx] = vclin[idx][gpt3[0]][gpt3[1]][SEED_LEVEL][0]
gpt3_dist[1][idx] = vclin[idx][gpt3[0]][gpt3[1]][SEED_LEVEL][1]
#check for "bad" distributions
if len(gpt0_dist[0]) < 5 or len(gpt1_dist[0]) < 5 or len(gpt2_dist[0]) < 5 or len(gpt3_dist[0]) < 5 or \
len(gpt0_dist[1]) < 5 or len(gpt1_dist[1]) < 5 or len(gpt2_dist[1]) < 5 or len(gpt3_dist[1]) < 5:
print "return in GMM interp @" + str(ppos)
return ([], [], [], [], 0.0, 0.0, 0.0, [], [])
#get gmm's
#NOTE: need to check if dist is guassian-like. if so, don't try to fit more than one gaussian to distribution or you'll get convergence issues with EM alg
gp0_parms_u = fitGMM(gp=list(gpt0_dist[0][:]),max_gs=MAX_GMM_COMP);gp0_parms_v = fitGMM(list(gpt0_dist[1][:]),max_gs=MAX_GMM_COMP)
gp1_parms_u = fitGMM(gp=list(gpt1_dist[0][:]),max_gs=MAX_GMM_COMP);gp1_parms_v = fitGMM(list(gpt1_dist[1][:]),max_gs=MAX_GMM_COMP)
gp2_parms_u = fitGMM(gp=list(gpt2_dist[0][:]),max_gs=MAX_GMM_COMP);gp2_parms_v = fitGMM(list(gpt2_dist[1][:]),max_gs=MAX_GMM_COMP)
gp3_parms_u = fitGMM(gp=list(gpt3_dist[0][:]),max_gs=MAX_GMM_COMP);gp3_parms_v = fitGMM(list(gpt3_dist[1][:]),max_gs=MAX_GMM_COMP)
lerp_u_gp0_gp1_params = lerpGMMPair(np.asarray(gp0_parms_u), np.asarray(gp1_parms_u), alpha = ppos_parts[0][0], steps = 1, num_gs = MAX_GMM_COMP )
lerp_u_gp2_gp3_params = lerpGMMPair(np.asarray(gp2_parms_u), np.asarray(gp3_parms_u), alpha = ppos_parts[0][0], steps = 1, num_gs = MAX_GMM_COMP )
lerp_u_params = lerpGMMPair( np.asarray(lerp_u_gp0_gp1_params), np.asarray(lerp_u_gp2_gp3_params), alpha = ppos_parts[1][0], steps = 1, num_gs = MAX_GMM_COMP )
lerp_v_gp0_gp1_params = lerpGMMPair(np.asarray(gp0_parms_v), np.asarray(gp1_parms_v), alpha = ppos_parts[0][0], steps = 1, num_gs = MAX_GMM_COMP )
lerp_v_gp2_gp3_params = lerpGMMPair(np.asarray(gp2_parms_v), np.asarray(gp3_parms_v), alpha = ppos_parts[0][0], steps = 1, num_gs = MAX_GMM_COMP )
lerp_v_params = lerpGMMPair( np.asarray(lerp_v_gp0_gp1_params), np.asarray(lerp_v_gp2_gp3_params), alpha = ppos_parts[1][0], steps = 1, num_gs = MAX_GMM_COMP )
x = linspace( -50, 50, MEM )
y = linspace( -50, 50, MEM )
#return interp GMM
SAMPLES = MEM
total_dist_u = []
for idx in range(0,len(lerp_u_params)):
cur_inter_mean = lerp_u_params[idx][0];cur_inter_stdev = lerp_u_params[idx][1];cur_inter_ratio = lerp_u_params[idx][2]
total_dist_u += list(np.asarray(r.rnorm(int(SAMPLES*cur_inter_ratio), mean=cur_inter_mean, sd = cur_inter_stdev)))
total_dist_v = []
for idx in range(0,len(lerp_v_params)):
cur_inter_mean = lerp_v_params[idx][0];cur_inter_stdev = lerp_v_params[idx][1];cur_inter_ratio = lerp_v_params[idx][2]
total_dist_v += list(np.asarray(r.rnorm(int(SAMPLES*cur_inter_ratio), mean=cur_inter_mean, sd = cur_inter_stdev)))
try:
k = [ stats.gaussian_kde(total_dist_u), stats.gaussian_kde(total_dist_v) ]
except:
return ([], [], [], [], 0.0, 0.0, 0.0, [], [])
var0 = np.std(k[0](x), axis=None, dtype=None, out=None, ddof=0)
var1 = np.std(k[1](y), axis=None, dtype=None, out=None, ddof=0)
_max_u, _min_u = peakdetect(k[0](x),x,lookahead=2,delta=0)
_max_v, _min_v = peakdetect(k[1](y),y,lookahead=2,delta=0)
xm_u = [p[0] for p in _max_u]
xm_v = [p[0] for p in _max_v]
ym_u = [p[1] for p in _max_u]
ym_v = [p[1] for p in _max_v]
return (xm_u, ym_u, xm_v, ym_v, 0.0, var0, var1,lerp_u_params,lerp_v_params)
def interpFromGaussian(ppos=[0.0,0.0]):
#assume grid points are defined by integer indices
#decompose fract / whole from particle position
ppos_parts = [[0.0,0.0],[0.0,0.0]] #[fract,whole] for each x,y comp
ppos_parts[0][0] = pm.modf(ppos[0])[0];ppos_parts[0][1] = pm.modf(ppos[0])[1]
ppos_parts[1][0] = pm.modf(ppos[1])[0];ppos_parts[1][1] = pm.modf(ppos[1])[1]
# grid point numbers:
#
# (2)---(3)
# | |
# | |
# (0)---(1)
#find four corner grid point indices, numbered from gpt0 = (bottom, left) TO gpt3 = (top, right)
#calculated from whole parts
gpt0 = [ppos_parts[0][1], ppos_parts[1][1]]
gpt1 = [ppos_parts[0][1] + 1, ppos_parts[1][1]]
gpt2 = [ppos_parts[0][1], ppos_parts[1][1] + 1]
gpt3 = [ppos_parts[0][1] + 1, ppos_parts[1][1] + 1]
gpt0_dist = np.zeros(shape=(2,600))
gpt1_dist = np.zeros(shape=(2,600))
gpt2_dist = np.zeros(shape=(2,600))
gpt3_dist = np.zeros(shape=(2,600))
'''
if DEBUG is True:
print "gp0";print gpt0[0]; print gpt0[1]
print "gp1";print gpt1[0]; print gpt1[1]
print "gp2";print gpt2[0]; print gpt2[1]
print "gp3";print gpt3[0]; print gpt3[1]
'''
for idx in range(0,MEM):
gpt0_dist[0][idx] = vclin[idx][gpt0[0]][gpt0[1]][SEED_LEVEL][0]
gpt0_dist[1][idx] = vclin[idx][gpt0[0]][gpt0[1]][SEED_LEVEL][1]
gpt1_dist[0][idx] = vclin[idx][gpt1[0]][gpt1[1]][SEED_LEVEL][0]
gpt1_dist[1][idx] = vclin[idx][gpt1[0]][gpt1[1]][SEED_LEVEL][1]
gpt2_dist[0][idx] = vclin[idx][gpt2[0]][gpt2[1]][SEED_LEVEL][0]
gpt2_dist[1][idx] = vclin[idx][gpt2[0]][gpt2[1]][SEED_LEVEL][1]
gpt3_dist[0][idx] = vclin[idx][gpt3[0]][gpt3[1]][SEED_LEVEL][0]
gpt3_dist[1][idx] = vclin[idx][gpt3[0]][gpt3[1]][SEED_LEVEL][1]
#get gmm's
#NOTE: need to check if dist is guassian-like. if so, don't try to fit more than one gaussian to distribution or you'll get convergence issues with EM alg
gp0_parms_u = fitGaussian(gp=list(gpt0_dist[0][:]));gp0_parms_v = fitGaussian(list(gpt0_dist[1][:]))
gp1_parms_u = fitGaussian(gp=list(gpt1_dist[0][:]));gp1_parms_v = fitGaussian(list(gpt1_dist[1][:]))
gp2_parms_u = fitGaussian(gp=list(gpt2_dist[0][:]));gp2_parms_v = fitGaussian(list(gpt2_dist[1][:]))
gp3_parms_u = fitGaussian(gp=list(gpt3_dist[0][:]));gp3_parms_v = fitGaussian(list(gpt3_dist[1][:]))
lerp_u_gp0_gp1_params = lerp(np.asarray(gp0_parms_u), np.asarray(gp1_parms_u), w = ppos_parts[0][0] )
lerp_u_gp2_gp3_params = lerp(np.asarray(gp2_parms_u), np.asarray(gp3_parms_u), w = ppos_parts[0][0] )
lerp_u_params = lerp( np.asarray(lerp_u_gp0_gp1_params), np.asarray(lerp_u_gp2_gp3_params), w = ppos_parts[1][0] )
lerp_v_gp0_gp1_params = lerp(np.asarray(gp0_parms_v), np.asarray(gp1_parms_v), w = ppos_parts[0][0] )
lerp_v_gp2_gp3_params = lerp(np.asarray(gp2_parms_v), np.asarray(gp3_parms_v), w = ppos_parts[0][0] )
lerp_v_params = lerp( np.asarray(lerp_v_gp0_gp1_params), np.asarray(lerp_v_gp2_gp3_params), w = ppos_parts[1][0] )
return [lerp_u_params, lerp_v_params]
def KSTestForLevel(vclin, curr_level=0):
array_of_ts_per_gp = np.zeros(shape=(LAT,LON))
max_ts = 0
curr_gp = np.zeros(shape=(2, MEM), dtype = float, order = 'F')
for curr_lon in range(LON):
for curr_lat in range(LAT):
for curr_realization in range(MEM):
curr_gp[0][curr_realization] = vclin[curr_realization][curr_lat][curr_lon][curr_level][0]
curr_gp[1][curr_realization] = vclin[curr_realization][curr_lat][curr_lon][curr_level][1]
#print "lon " + str(curr_lon)
#print "lat " + str(curr_lat)
#print 'mem ' + str(curr_realization)
#print "vclin values: " + str(curr_gp[0][curr_realization])
#curr_gp[curr_realization][2] = vclin[curr_realization][curr_lat][curr_lon][curr_level][2]
x = linspace(-15, +15, 1000)
u_pass = False;v_pass = False
for idx in range(0,MEM):
if not u_pass and curr_gp[0][idx] != 0.:
u_pass = True
gp_u_kd = stats.gaussian_kde(curr_gp[0][:])
if not v_pass and curr_gp[1][idx] != 0.:
v_pass = True
gp_v_kd = stats.gaussian_kde(curr_gp[1][:])
mu = np.mean(curr_gp[0][:],axis=0)
sigma = np.var(curr_gp[0][:],axis=0)
ts = 1.
if not math.isinf(sigma) and not math.isnan(sigma):
normed_data = (curr_gp[0][:]-mu)/sigma
var_std_norm = np.var(normed_data,axis=0) #equals one for std normal dist with mean = 0
ts, p_val = stats.kstest(normed_data,'norm')
k2, pvalue = stats.normaltest(curr_gp[0][:], axis=0)
zscore, pvalue_s = stats.skewtest(curr_gp[0][:], axis=0)
vals, counts = stats.mode(curr_gp[0][:], axis=0)
'''
if u_pass and p_val == 0.0 and var_std_norm <= 1.0 :#and pvalue > 0.01 and pvalue_s > 0.01:
plt.figure()
plt.title( str(ts) + "_" + str(curr_level) + "_" + str(curr_lon) + "_" + str(curr_lat) + "_u")
plt.hist(curr_gp[0][:],normed=1,alpha=.3,color='purple')
plt.plot(x,gp_u_kd(x),'-',color='red')
#file = "./png/" + str(ts) + "_" + str(depth) + "_" + str(curr_level) + "_" + str(curr_lon) + "_" + str(curr_lat) + "_u" + ".png"
#plt.savefig(file)
#sendFile(file)
plt.show()
'''
mu2 = np.mean(curr_gp[1][:],axis=0)
sigma2 = np.var(curr_gp[1][:],axis=0)
ts2 = 1.
if not math.isinf(sigma2) and not math.isnan(sigma2):
normed_data = (curr_gp[1][:]-mu2)/sigma2
var_std_norm = np.var(normed_data,axis=0) #equals one for std normal dist with mean = 0
ts, p_val = stats.kstest(normed_data,'norm')
k2, pvalue = stats.normaltest(curr_gp[0][:], axis=0)
zscore, pvalue_s = stats.skewtest(curr_gp[0][:], axis=0)
vals, counts = stats.mode(curr_gp[0][:], axis=0)
'''
if v_pass and p_val == 0.0 and var_std_norm <= 1.0 :#and pvalue > 0.01 and pvalue_s > 0.01:
plt.figure()
plt.title( str(ts) + "_" + str(curr_level) + "_" + str(curr_lon) + "_" + str(curr_lat) + "_v")
plt.hist(curr_gp[0][:],normed=1,alpha=.3,color='purple')
plt.plot(x,gp_u_kd(x),'-',color='red')
#file = "./png/" + str(ts) + "_" + str(depth) + "_" + str(curr_level) + "_" + str(curr_lon) + "_" + str(curr_lat) + "_u" + ".png"
#plt.savefig(file)
#sendFile(file)
plt.show()
'''
avg_u_v_ts = (ts + ts2) / 2.0
#print ts
#print ts2
#print avg_u_v_ts
array_of_ts_per_gp[curr_lat][curr_lon] = avg_u_v_ts
if avg_u_v_ts > max_ts:
max_ts = avg_u_v_ts
return array_of_ts_per_gp, max_ts
#from http://stackoverflow.com/questions/8661537/how-to-perform-bilinear-interpolation-in-python
def bilinear_interpolation(x, y, points):
'''Interpolate (x,y) from values associated with four points.
The four points are a list of four triplets: (x, y, value).
The four points can be in any order. They should form a rectangle.
>>> bilinear_interpolation(12, 5.5,
... [(10, 4, 100),
... (20, 4, 200),
... (10, 6, 150),
... (20, 6, 300)])
165.0
'''
# See formula at: http://en.wikipedia.org/wiki/Bilinear_interpolation
#
points = sorted(points) # order points by x, then by y
(x1, y1, q11), (_x1, y2, q12), (x2, _y1, q21), (_x2, _y2, q22) = points
if x1 != _x1 or x2 != _x2 or y1 != _y1 or y2 != _y2:
raise ValueError('points do not form a rectangle')
if not x1 <= x <= x2 or not y1 <= y <= y2:
raise ValueError('(x, y) not within the rectangle')
return (q11 * (x2 - x) * (y2 - y) +
q21 * (x - x1) * (y2 - y) +
q12 * (x2 - x) * (y - y1) +
q22 * (x - x1) * (y - y1)
) / ((x2 - x1) * (y2 - y1) + 0.0)
#http://www.unidata.ucar.edu/software/netcdf/examples/programs/
def writeNetCDF(array):
# the output array to write will be nx x ny
nx = LAT; ny = LON
# open a new netCDF file for writing.
ncfile = netCDF4.Dataset('ks_test_level_0.nc','w')
# create the output data.
data_out = array#arange(nx*ny) # 1d array
data_out.shape = array.shape # reshape to 2d array
# create the x and y dimensions.
ncfile.createDimension('lat',nx)
ncfile.createDimension('lon',ny)
# create the variable (4 byte integer in this case)
# first argument is name of variable, second is datatype, third is
# a tuple with the names of dimensions.
data = ncfile.createVariable('ks_test_stat',np.dtype('float64').char,('lat','lon'))
# write data to variable.
data[:] = data_out
# close the file.
ncfile.close()
#http://www.unidata.ucar.edu/software/netcdf/examples/programs/
def writeUVelocityNetCDF(array):
# the output array to write will be nx x ny
nx = LAT; ny = LON
# open a new netCDF file for writing.
ncfile = netCDF4.Dataset('vclin_level_0.nc','w')
# create the output data.
data_out = array#arange(nx*ny) # 1d array
data_out.shape = array.shape # reshape to 2d array
# create the x and y dimensions.
ncfile.createDimension('lat',nx)
ncfile.createDimension('lon',ny)
# create the variable (4 byte integer in this case)
# first argument is name of variable, second is datatype, third is
# a tuple with the names of dimensions.
data = ncfile.createVariable('u',np.dtype('float64').char,('lat','lon'))
#data2 = ncfile.createVariable('v',np.dtype('float64').char,('lat','lon'))
# write data to variable.
data[:] = data_out
# close the file.
ncfile.close()
def readVelNetCDF(file):
# open a the netCDF file for reading.
ncfile = netCDF4.Dataset(file,'r')
# read the data in variable named 'data'.
data = ncfile.variables['ks_test_stat'][:]
#nx,ny = data.shape
# check the data.
#data_check = arange(nx*ny) # 1d array
#data_check.shape = (nx,ny) # reshape to 2d array
# close the file.
ncfile.close()
return data
'''
def writeVelocityToCSBinary(data,filename):
writer = csv.writer(open(filename + ".csv", 'w'))
#writes velocities with central forecast...
for curr_level in range(LEV):
for curr_lon in range(LON):
for curr_lat in range(LAT):
for curr_realization in range(MEM):
writer.writerow(data[curr_realization][curr_lat][curr_lon][curr_level][0])
writer.writerow(data[curr_realization][curr_lat][curr_lon][curr_level][1])
def readVelocityToCSBinary(filename):
reader = csv.reader(open(filename + ".csv", 'w'))
#writes velocities with central forecast...
for curr_level in range(LEV):
for curr_lon in range(LON):
for curr_lat in range(LAT):
for curr_realization in range(MEM):
writer.writerow(data[curr_realization][curr_lat][curr_lon][curr_level][0])
writer.writerow(data[curr_realization][curr_lat][curr_lon][curr_level][1])
'''
def writeStreamlinePositions(data,filename):
#change to 'wb' after initial debug...
filename = OUTPUT_DATA_DIR + filename
writer = csv.writer(open(filename + ".csv", 'w'))
#writes velocities with central forecast...
for curr_comp in range(0,len(data),1):
#for curr_pos in range(0,len(data[curr_comp][:]),1):
#print "curr pos " + str(curr_pos)
#print "curr comp" + str(curr_comp)
#print data[curr_comp][curr_pos]
writer.writerow(data[curr_comp][:])
def readStreamlinePositions(data, filename):
#change to 'wb' after initial debug...
filename = OUTPUT_DATA_DIR + filename
reader = csv.reader(open(filename + ".csv", 'r'), delimiter=',')
idx = 0
for row in reader:
#print row
data[idx] = [float(i) for i in row]
idx += 1
def readCellCounts(data,filename):
#read cell counts
filename = OUTPUT_DATA_DIR + filename
reader = csv.reader(open(filename + ".csv", 'r'), delimiter=',')
lat = 0;lon = 0
for row in reader:
print g_cc[lat][lon]
g_cc[lat][lon] = len(row)-1
if lon < LON - 1:
lon += 1
else:
lon = 0
lat += 1
if lat >= LAT:
break
def writeParticles(dir = 'f'):
str_integration_values = '_ss' + str(integration_step_size) + '_ts' + str(TOTAL_STEPS) + '_dir_' + str(INTEGRATION_DIR)
mode_dir = ''
if MODE == 1:
mode_dir = MODE_DIR1
elif MODE == 2:
mode_dir = MODE_DIR2
if dir == 'f':
writeStreamlinePositions(g_part_positions_ensemble,mode_dir+'e_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values)
writeStreamlinePositions(g_part_positions_quantile,mode_dir+'q_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values)
writeStreamlinePositions(g_part_positions_gmm,mode_dir+'gmm_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values)
writeStreamlinePositions(g_part_positions_g,mode_dir+'g_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values)
elif dir == 'b':
writeStreamlinePositions(g_part_positions_ensemble_b,mode_dir+'e_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values)
writeStreamlinePositions(g_part_positions_quantile_b,mode_dir+'q_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values)
writeStreamlinePositions(g_part_positions_gmm_b,mode_dir+'gmm_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values)
writeStreamlinePositions(g_part_positions_g_b,mode_dir+'g_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values)
else:
# forward and backward streamlines
# concatenate particle positions
e = [];q = [];gmm = [];g = []
#for each component
for idx in range(0,len(g_part_positions_ensemble_b)):
g_part_positions_ensemble_b[idx].reverse()
g_part_positions_quantile_b[idx].reverse()
g_part_positions_gmm_b[idx].reverse()
g_part_positions_g_b[idx].reverse()
e.append(g_part_positions_ensemble_b[idx] + g_part_positions_ensemble[idx])
q.append(g_part_positions_quantile_b[idx] + g_part_positions_quantile[idx])
gmm.append(g_part_positions_gmm_b[idx] + g_part_positions_gmm[idx])
g.append(g_part_positions_g_b[idx] + g_part_positions_g[idx])
writeStreamlinePositions(e,mode_dir+'e_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values)
writeStreamlinePositions(q,mode_dir+'q_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values)
writeStreamlinePositions(gmm,mode_dir+'gmm_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values)
writeStreamlinePositions(g,mode_dir+'g_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values)
def readParticles():
str_integration_values = '_ss' + str(integration_step_size) + '_ts' + str(TOTAL_STEPS) + '_dir_' + str(INTEGRATION_DIR)
mode_dir = ''
if MODE == 1:
mode_dir = MODE_DIR1
elif MODE == 2:
mode_dir = MODE_DIR2
'''
readStreamlinePositions(g_part_positions_ensemble,mode_dir+'e_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values)
readStreamlinePositions(g_part_positions_quantile,mode_dir+'q_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values)
readStreamlinePositions(g_part_positions_gmm,mode_dir+'gmm_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values)
readStreamlinePositions(g_part_positions_g,mode_dir+'g_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values)
'''
#read crisp sphaghetti plots
#crisp_lat45.0_lon26.0_lev0_mem277_ss0.01_ts100_dir_a.csv
for idx in range(0,MEM):
curr_member_sl = [[],[],[]]
readStreamlinePositions(curr_member_sl,'crisp/crisp_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+'_mem'+str(idx)+str_integration_values)
g_crisp_streamlines.append(curr_member_sl)
#read cell counts
filename = 'crisp/cellcounts_lat'+str(SEED_LAT)+'_lon'+str(SEED_LON)+'_lev'+str(SEED_LEVEL)+str_integration_values
readCellCounts(g_cc, filename)
#def getCmdLineArgs():
# for arg in sys.argv[1:]:
# print arg
def readNetCDF(file):
# open a the netCDF file for reading.
ncfile = netCDF4.Dataset(file,'r')
# read the data in variable named 'data'.
data = ncfile.variables['ks_test_stat'][:]
#nx,ny = data.shape
# check the data.
#data_check = arange(nx*ny) # 1d array
#data_check.shape = (nx,ny) # reshape to 2d array
# close the file.
ncfile.close()
return data
if __name__ == "__main__":
SEED_LAT = float(sys.argv[2])
SEED_LON = float(sys.argv[3])
SEED_LEVEL = int(sys.argv[4])
integration_step_size = float(sys.argv[5])
TOTAL_STEPS = int(sys.argv[6])
INTEGRATION_DIR = str(sys.argv[7]).lower()
MODE = int(sys.argv[8])
level = SEED_LEVEL
#realizations file
pe_dif_sep2_98_file = INPUT_DATA_DIR + FILE_NAME
pe_fct_aug25_sep2_file = INPUT_DATA_DIR + FILE_NAME_CENTRAL_FORECAST
#realizations reader
rreader = NetcdfReader(pe_dif_sep2_98_file)
#central forecasts reader
creader = NetcdfReader(pe_fct_aug25_sep2_file)
vclin8 = creader.readVarArray('vclin', 7)
#deviations from central forecast for all 600 realizations
vclin = rreader.readVarArray('vclin')
vclin = addCentralForecast(vclin, vclin8, level_start=SEED_LEVEL, level_end=SEED_LEVEL)
#vclin = readVelocityFromCSV(filename)
#writeVelocityToCSVBinary(vclin)
ts_per_gp = readNetCDF(INPUT_DATA_DIR +'ks_test_level_0.nc')
#vclin = readNetCDF('vclin_level_0.nc')
gen_streamlines = 'True'
gen_streamlines = sys.argv[1]
if gen_streamlines == 'True':
r.library('mixtools')
print "generating streamlines"
particle = 0
part_pos_q[particle][0] = SEED_LAT; part_pos_q[particle][1] = SEED_LON
part_pos_gmm[particle][0] = SEED_LAT; part_pos_gmm[particle][1] = SEED_LON
part_pos_g[particle][0] = SEED_LAT; part_pos_g[particle][1] = SEED_LON
part_pos_e[particle][0] = SEED_LAT; part_pos_e[particle][1] = SEED_LON
part_pos_q_b[particle][0] = SEED_LAT; part_pos_q_b[particle][1] = SEED_LON
part_pos_gmm_b[particle][0] = SEED_LAT; part_pos_gmm_b[particle][1] = SEED_LON
part_pos_g_b[particle][0] = SEED_LAT; part_pos_g_b[particle][1] = SEED_LON
part_pos_e_b[particle][0] = SEED_LAT; part_pos_e_b[particle][1] = SEED_LON
ppos = [ part_pos_q[particle][0], part_pos_q[particle][1] ]
velx, velx_prob, vely, vely_prob, velz, var_u, var_v = interpFromQuantiles(ppos)
#find highest prob vel
velx_hp = e_prev_max_vel_x
vely_hp = e_prev_max_vel_y
#find difference in peaks
max_x_1 = 0.0
max_x_2 = 0.0
max_y_1 = 0.0
max_y_2 = 0.0
x_diff = 0.0;y_diff = 0.0
max_peak_diff = 0.0
num_x_peaks = len(velx_prob)
num_y_peaks = len(vely_prob)
if num_x_peaks > 1:
velx_prob_copy = velx_prob[:]
velx_copy = velx[:]
max_x_1, max_x_2, sig = getMaxPeaks(velx_prob_copy,velx_copy)
if sig == True:
x_diff = pm.fabs(max_x_1 - max_x_2)
if num_y_peaks > 1:
vely_prob_copy = vely_prob[:]
vely_copy = vely[:]
max_y_1, max_y_2, sig = getMaxPeaks(vely_prob_copy,vely_copy)
if sig == True:
y_diff = pm.fabs(max_y_1 - max_y_2)
if x_diff > y_diff:
max_peak_diff = x_diff
else:
max_peak_diff = y_diff
'''
if num_x_peaks > 0:
m = max(velx_prob)
p = velx_prob.index(m)
velx_hp = velx[p]
if num_y_peaks > 0:
m1 = max(vely_prob)
p1 = vely_prob.index(m1)
vely_hp = vely[p1]
'''
if num_x_peaks > 0:
m = max(velx_prob)
p = velx_prob.index(m)
if MODE == 1 or num_x_peaks == 1:
velx_hp = velx[p]
elif MODE == 2 and num_x_peaks > 1:
velx_prob.pop(p)
m = max(velx_prob)
p = velx_prob.index(m)
velx_hp = velx[p]
if num_y_peaks > 0:
m = max(vely_prob)
p = vely_prob.index(m)
if MODE == 1 or num_y_peaks == 1:
vely_hp = vely[p]
elif MODE == 2 and num_y_peaks > 1:
vely_prob.pop(p)
m = max(vely_prob)
p = vely_prob.index(m)
vely_hp = vely[p]
q_prev_max_vel_x = velx_hp
q_prev_max_vel_y = vely_hp
g_part_positions_quantile[0].append(SEED_LAT)
g_part_positions_quantile[1].append(SEED_LON)
g_part_positions_quantile[2].append(DEPTH)
g_part_positions_quantile[3].append(np.sqrt(np.square(velx_hp)+np.square(vely_hp)))
g_part_positions_quantile[4].append((var_u + var_v) / 2.0)
g_part_positions_quantile[5].append((len(velx_prob)+len(vely_prob)) / 2.0)
g_part_positions_quantile[6].append(max_peak_diff)
#get peaks for ensemble
#velx, vely, velz = interpVel(ppos)
ppos = [ part_pos_e[particle][0], part_pos_e[particle][1] ]
velx, velx_prob, vely, vely_prob, velz, var_u, var_v = interpVelFromEnsemble(ppos)
#find highest prob vel
velx_hp = e_prev_max_vel_x
vely_hp = e_prev_max_vel_y
'''
if len(velx_prob) > 0:
m = max(velx_prob)
p = velx_prob.index(m)
velx_hp = velx[p]
if len(vely_prob) > 0:
m1 = max(vely_prob)
p1 = vely_prob.index(m1)
vely_hp = vely[p1]
'''
#find difference in peaks
max_x_1 = 0.0
max_x_2 = 0.0
max_y_1 = 0.0
max_y_2 = 0.0
x_diff = 0.0;y_diff = 0.0
max_peak_diff = 0.0
num_x_peaks = len(velx_prob)
num_y_peaks = len(vely_prob)
if num_x_peaks > 1:
velx_prob_copy = velx_prob[:]
velx_copy = velx[:]
max_x_1, max_x_2, sig = getMaxPeaks(velx_prob_copy,velx_copy)
if sig == True:
x_diff = pm.fabs(max_x_1 - max_x_2)
if num_y_peaks > 1:
vely_prob_copy = vely_prob[:]
vely_copy = vely[:]
max_y_1, max_y_2, sig = getMaxPeaks(vely_prob_copy,vely_copy)
if sig == True:
y_diff = pm.fabs(max_y_1 - max_y_2)
if x_diff > y_diff:
max_peak_diff = x_diff
else:
max_peak_diff = y_diff
'''
if num_x_peaks > 0:
m = max(velx_prob)
p = velx_prob.index(m)
velx_hp = velx[p]
if num_y_peaks > 0:
m1 = max(vely_prob)
p1 = vely_prob.index(m1)
vely_hp = vely[p1]
'''
if num_x_peaks > 0:
m = max(velx_prob)
p = velx_prob.index(m)
if MODE == 1 or num_x_peaks == 1:
velx_hp = velx[p]
elif MODE == 2 and num_x_peaks > 1:
velx_prob.pop(p)
m = max(velx_prob)
p = velx_prob.index(m)
velx_hp = velx[p]
if num_y_peaks > 0:
m = max(vely_prob)
p = vely_prob.index(m)
if MODE == 1 or num_y_peaks == 1:
vely_hp = vely[p]
elif MODE == 2 and num_y_peaks > 1:
vely_prob.pop(p)
m = max(vely_prob)
p = vely_prob.index(m)
vely_hp = vely[p]
e_prev_max_vel_x = velx_hp
e_prev_max_vel_y = vely_hp
g_part_positions_ensemble[0].append(SEED_LAT)
g_part_positions_ensemble[1].append(SEED_LON)
g_part_positions_ensemble[2].append(DEPTH)
g_part_positions_ensemble[3].append(np.sqrt(np.square(velx_hp)+np.square(vely_hp)))
g_part_positions_ensemble[4].append((var_u + var_v) / 2.0)
g_part_positions_ensemble[5].append((len(velx_prob)+len(vely_prob)) / 2.0)
g_part_positions_ensemble[6].append(max_peak_diff)
#get peaks for gmm
#velx, vely, velz = interpVel(ppos)
ppos = [ part_pos_gmm[particle][0], part_pos_gmm[particle][1] ]
velx, velx_prob, vely, vely_prob, velz, var_u, var_v, u_params, v_params = interpFromGMM(ppos)
#find highest prob vel
velx_hp = gmm_prev_max_vel_x
vely_hp = gmm_prev_max_vel_y
'''
if len(velx_prob) > 0:
m = max(velx_prob)
p = velx_prob.index(m)
velx_hp = velx[p]
if len(vely_prob) > 0:
m1 = max(vely_prob)
p1 = vely_prob.index(m1)
vely_hp = vely[p1]
'''
#find difference in peaks
'''
max_x_1 = 0.0
max_x_2 = 0.0
max_y_1 = 0.0
max_y_2 = 0.0
x_diff = 0.0;y_diff = 0.0
max_peak_diff = 0.0
num_x_peaks = len(u_params)#len(velx_prob)
num_y_peaks = len(v_params)#len(vely_prob)
# take peaks from largest g comps
if len(u_params) > 0:
temp_max = 0
max_idx = 0
for i in range(0,len(u_params)):
if u_params[i] > temp_max:
max_idx = i
#get max peak mean
max_x_1 = u_params[max_idx][0]
u_params[0].pop(max_idx);u_params[1].pop(max_idx);u_params[2].pop(max_idx)
temp_max = 0
max_idx = 0
for i in range(0,len(u_params)):
if u_params[i] > temp_max:
max_idx = i
#get 2nd peak mean
max_x_2 = u_params[max_idx][0]
# take peaks from largest g comps
if len(v_params) > 0:
temp_max = 0
max_idx = 0
for i in range(0,len(v_params)):
if v_params[i] > temp_max:
max_idx = i
#get max peak mean
max_y_1 = v_params[max_idx][0]
v_params[0].pop(max_idx);v_params[1].pop(max_idx);v_params[2].pop(max_idx)
temp_max = 0
max_idx = 0
for i in range(0,len(v_params)):
if v_params[i] > temp_max:
max_idx = i
#get 2nd peak mean
max_y_2 = v_params[max_idx][0]
x_diff = math.fabs(max_x_1 - max_x_2)
y_diff = math.fabs(max_y_1 - max_y_2)
max_peak_diff = max([x_diff,y_diff])
if MODE == 1:
velx_hp = max_x_1
else: #MODE ==2:
velx_hp = max_x_2
if MODE == 1:
vely_hp = max_y_1
else: #MODE ==2:
vely_hp = max_y_2
'''
#find difference in peaks
max_x_1 = 0.0
max_x_2 = 0.0
max_y_1 = 0.0
max_y_2 = 0.0
x_diff = 0.0;y_diff = 0.0
max_peak_diff = 0.0
num_x_peaks = len(velx_prob)
num_y_peaks = len(vely_prob)
if num_x_peaks > 1:
velx_prob_copy = velx_prob[:]
velx_copy = velx[:]
max_x_1, max_x_2, sig = getMaxPeaks(velx_prob_copy,velx_copy)
if sig == True:
x_diff = pm.fabs(max_x_1 - max_x_2)
if num_y_peaks > 1:
vely_prob_copy = vely_prob[:]
vely_copy = vely[:]
max_y_1, max_y_2, sig = getMaxPeaks(vely_prob_copy,vely_copy)
if sig == True:
y_diff = pm.fabs(max_y_1 - max_y_2)
if x_diff > y_diff:
max_peak_diff = x_diff
else:
max_peak_diff = y_diff
'''
if num_x_peaks > 0:
m = max(velx_prob)
p = velx_prob.index(m)
velx_hp = velx[p]
if num_y_peaks > 0:
m1 = max(vely_prob)
p1 = vely_prob.index(m1)
vely_hp = vely[p1]
'''
if num_x_peaks > 0:
m = max(velx_prob)
p = velx_prob.index(m)
if MODE == 1 or num_x_peaks == 1:
velx_hp = velx[p]
elif MODE == 2 and num_x_peaks > 1:
velx_prob.pop(p)
m = max(velx_prob)
p = velx_prob.index(m)
velx_hp = velx[p]
if num_y_peaks > 0:
m = max(vely_prob)
p = vely_prob.index(m)
if MODE == 1 or num_y_peaks == 1:
vely_hp = vely[p]
elif MODE == 2 and num_y_peaks > 1:
vely_prob.pop(p)
m = max(vely_prob)
p = vely_prob.index(m)
vely_hp = vely[p]
gmm_prev_max_vel_x = velx_hp
gmm_prev_max_vel_y = vely_hp
g_part_positions_gmm[0].append(SEED_LAT)
g_part_positions_gmm[1].append(SEED_LON)
g_part_positions_gmm[2].append(DEPTH)
g_part_positions_gmm[3].append(np.sqrt(np.square(velx_hp)+np.square(vely_hp)))
g_part_positions_gmm[4].append((var_u + var_v) / 2.0)
g_part_positions_gmm[5].append((len(velx_prob)+len(vely_prob)) / 2.0)
g_part_positions_gmm[6].append(max_peak_diff)
#get peaks for gaussian
ppos = [ part_pos_g[particle][0], part_pos_g[particle][1] ]
params = interpFromGaussian(ppos)
velx = params[0][0]
vely = params[1][0]
var_u = params[0][1]
var_v = params[1][1]
g_part_positions_g[0].append(SEED_LAT)
g_part_positions_g[1].append(SEED_LON)
g_part_positions_g[2].append(DEPTH)
g_part_positions_g[3].append(np.sqrt(np.square(velx_hp)+np.square(vely_hp)))
g_part_positions_g[4].append((var_u + var_v) / 2.0)
g_part_positions_g[5].append((len(velx_prob)+len(vely_prob)) / 2.0)
g_part_positions_g[6].append(0.0)
if INTEGRATION_DIR == 'f':
for i_step in range(1, TOTAL_STEPS):
advectEnsemble(i_step,dir = 'f')
advectQuantile(i_step, dir = 'f')
advectGMM(i_step, dir = 'f')
advectGaussian(i_step, dir = 'f')
writeParticles(dir = 'f')
elif INTEGRATION_DIR == 'b':
for i_step in range(1, TOTAL_STEPS):
advectEnsemble(i_step,dir = 'b')
advectQuantile(i_step, dir = 'b')
advectGMM(i_step, dir = 'b')
advectGaussian(i_step, dir = 'b')
writeParticles(dir = 'b')
else:
for i_step in range(1, TOTAL_STEPS ):
advectEnsemble(i_step,dir = 'f')
advectQuantile(i_step, dir = 'f')
advectGMM(i_step, dir = 'f')
advectGaussian(i_step, dir = 'f')
for i_step in range(1, TOTAL_STEPS + 1):
advectEnsemble(i_step,dir = 'b')
advectQuantile(i_step, dir = 'b')
advectGMM(i_step, dir = 'b')
advectGaussian(i_step, dir = 'b')
writeParticles(dir = 'a')
print "reused vel for quantile lerp: " + str(reused_vel_quantile)
print "finished!"
else:
print "reading particles"
readParticles()
plotParticles(ts_per_gp)
| [
"[email protected]"
] | |
e141b5109ef148c76ef47552c65a7500e0ccd86e | d63cc8ea5b5e9bf06f0c62032dc1065dda0b5056 | /apps/order/models.py | 5b12e73107a9dfb2fa4c81aa786f92e5b61c27a4 | [] | no_license | henya-lee/ecommerce | 4757bfe7f30d8bcc70b4a0ee997ab4f440462863 | 285ea86ed5d3643346fe1ef88d7b3e4e48765a72 | refs/heads/master | 2023-04-11T18:35:42.795797 | 2021-05-17T09:40:25 | 2021-05-17T09:40:25 | 363,979,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | from django.db import models
from django.contrib.auth.models import User
from apps.store.models import Product
# Create your models here.
class Order(models.Model):
user = models.ForeignKey(User, related_name='orders', on_delete=models.SET_NULL, blank=True, null=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
email = models.CharField(max_length=100)
address = models.CharField(max_length=100)
zipcode = models.CharField(max_length=100)
place = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
paid = models.BooleanField(default=False)
paid_amount = models.FloatField(blank=True, null=True)
def __str__(self):
return '%s' % self.first_name
def get_total_quantity(self):
return sum(int(item.quantity) for item in self.items.all())
class OrderItem(models.Model):
order = models.ForeignKey(Order, related_name='item', on_delete=models.CASCADE)
product = models.ForeignKey(Product, related_name='items', on_delete=models.DO_NOTHING)
price = models.FloatField()
quantity = models.IntegerField(default=1)
def __str__(self):
return '%s' % self.id | [
"[email protected]"
] | |
68a88e898837c2b48769ce8f329b01880312bcd8 | c16074df9284162febda485038e53fdabc734a0d | /events/migrations/0027_auto_20161008_1342.py | 5cd603bd42cc53ceba5682fd5e2a9bb8f88a6aa5 | [] | no_license | CharterTechChair/Charter-Website | 42e8a4d5c5a5bc0925f8a55c8be276d926d112c8 | ddf53ec492202c3a8dc678ebabcd0c394e6884cb | refs/heads/master | 2023-02-12T20:02:56.143737 | 2021-01-17T00:40:59 | 2021-01-17T00:40:59 | 32,171,388 | 2 | 1 | null | 2021-01-16T22:30:17 | 2015-03-13T17:46:54 | Python | UTF-8 | Python | false | false | 1,912 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('events', '0026_auto_20161002_0127'),
]
operations = [
migrations.AlterField(
model_name='event',
name='date',
field=models.DateField(default=datetime.datetime(2016, 10, 8, 17, 42, 37, 259364, tzinfo=utc), verbose_name=b'Date of Event'),
preserve_default=True,
),
migrations.AlterField(
model_name='event',
name='junior_signup_start',
field=models.DateField(default=datetime.datetime(2016, 10, 8, 17, 42, 37, 259364, tzinfo=utc), blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='event',
name='prospective_signup_start',
field=models.DateField(default=datetime.datetime(2016, 10, 8, 17, 42, 37, 259364, tzinfo=utc), blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='event',
name='senior_signup_start',
field=models.DateField(default=datetime.datetime(2016, 10, 8, 17, 42, 37, 259364, tzinfo=utc), blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='event',
name='signup_end_time',
field=models.DateField(default=datetime.datetime(2016, 10, 8, 17, 42, 37, 259364, tzinfo=utc)),
preserve_default=True,
),
migrations.AlterField(
model_name='event',
name='sophomore_signup_start',
field=models.DateField(default=datetime.datetime(2016, 10, 8, 17, 42, 37, 259364, tzinfo=utc), blank=True),
preserve_default=True,
),
]
| [
"[email protected]"
] | |
c3714a4e0accb876c1bd652b5af80327436ed625 | adf253ebc9c3bb326a727d87ba2e071ded76d608 | /ci_scripts/test_linux-daemon-gui.py | b29ae50d3a3e638eb5cde43fd9125dc4f71a011a | [
"MIT"
] | permissive | NeblioTeam/neblio | 5e0da815df7f1d69d04090fe5e7fed2445962dce | cebf9fcb1fb4e9935fcfdf459d5185488a2c04e5 | refs/heads/master | 2023-05-01T20:35:36.266611 | 2023-03-02T07:31:07 | 2023-03-02T07:31:07 | 98,357,215 | 143 | 71 | MIT | 2023-04-19T10:07:40 | 2017-07-25T23:06:34 | C++ | UTF-8 | Python | false | false | 3,035 | py | import os
import urllib2
import multiprocessing as mp
import neblio_ci_libs as nci
nci.setup_travis_or_gh_actions_env_vars()
working_dir = os.getcwd()
deploy_dir = os.path.join(os.environ['BUILD_DIR'],'deploy', '')
nci.mkdir_p(deploy_dir)
os.chdir(deploy_dir)
build_target = ''
build_target_alt = ''
if(os.environ['target_v'] == "linux_daemon"):
build_target = 'nebliod'
build_target_alt = 'nebliod'
elif(os.environ['target_v'] == "linux_wallet_test"):
build_target = 'tests-neblio-qt'
build_target_alt = 'tests-neblio-Qt'
os.chdir(os.environ['BUILD_DIR'])
# download test data
nci.call_with_err_code('wget --progress=dot:giga https://assets.nebl.io/testdata/test_data_mainnet_tab.tar.xz -O ./wallet/test/data/test_data_mainnet_tab.tar.xz')
nci.call_with_err_code('wget --progress=dot:giga https://assets.nebl.io/testdata/test_data_testnet_tab.tar.xz -O ./wallet/test/data/test_data_testnet_tab.tar.xz')
nci.call_with_err_code('tar -xJvf ./wallet/test/data/test_data_mainnet_tab.tar.xz -C ./wallet/test/data')
nci.call_with_err_code('tar -xJvf ./wallet/test/data/test_data_testnet_tab.tar.xz -C ./wallet/test/data')
nci.call_with_err_code('rm ./wallet/test/data/*.tar.xz')
os.chdir(deploy_dir)
else:
build_target = 'neblio-qt'
build_target_alt = 'neblio-Qt'
# Install docker
# nci.call_with_err_code('curl -fsSL https://get.docker.com -o get-docker.sh && sudo sh get-docker.sh && rm get-docker.sh')
# move .ccache folder to our deploy directory
nci.mkdir_p(os.path.join(working_dir,'.ccache', ''))
nci.call_with_err_code('mv ' + os.path.join(working_dir,'.ccache', '') + ' ' + os.path.join(deploy_dir,'.ccache', ''))
# Start Docker Container to Build nebliod or neblio-Qt
nci.call_with_err_code('sudo docker run -e BUILD=' + build_target + ' -v ' + os.environ['BUILD_DIR'] + ':/root/vol -t neblioteam/nebliod-build-ccache')
nci.call_with_err_code('sleep 15 && sudo docker kill $(sudo docker ps -q);exit 0')
# move .ccache folder back to ccache dir
nci.call_with_err_code('mv ' + os.path.join(deploy_dir,'.ccache', '') + ' ' + os.path.join(working_dir,'.ccache', ''))
file_name = '$(date +%Y-%m-%d)---' + os.environ['BRANCH'] + '-' + os.environ['COMMIT'][:7] + '---' + build_target_alt + '---ubuntu16.04.tar.gz'
# Check if binary exists before trying to package it.
# If it does not exist we had a build timeout
if(os.path.isfile(build_target)):
nci.call_with_err_code('tar -zcvf "' + file_name + '" ' + build_target)
nci.call_with_err_code('rm -f ' + build_target)
nci.call_with_err_code('echo "Binary package at ' + deploy_dir + file_name + '"')
# set the SOURCE_DIR & SOURCE_PATH env vars, these point to the binary that will be uploaded
nci.call_with_err_code('echo "SOURCE_DIR=' + deploy_dir + '" >> $GITHUB_ENV')
nci.call_with_err_code('echo "SOURCE_PATH=' + deploy_dir + file_name + '" >> $GITHUB_ENV')
# if we are just running tests, delete the deploy package
if(os.environ['target_v'] == "linux_wallet_test"):
nci.call_with_err_code('rm -f ' + deploy_dir + file_name) | [
"[email protected]"
] | |
384655f551d569d0d82ac2fef4cb9233e2021e67 | 0eb77a906c13057f3156bcb8e0ccc10feac48f3b | /tests/test_expected_failures.py | 2ac2ea9be78d10daa460573c15add9080b21bb37 | [
"MIT"
] | permissive | VM-development/UnitTesting | 7afbbfbbd8558d8a196146c40b86841f18c28102 | 937e9988e5dac1aa888bf874a1e7b0f2f86422d7 | refs/heads/master | 2020-07-28T22:49:42.377462 | 2019-09-18T09:08:08 | 2019-09-18T09:08:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,294 | py | from unittest.case import _ExpectedFailure, _UnexpectedSuccess
from unittesting import DeferrableTestCase, expectedFailure
class TestExpectedFailures(DeferrableTestCase):
def test_expected_failure_coroutine(self):
@expectedFailure
def testitem():
yield
1 / 0
try:
yield from testitem()
except _ExpectedFailure:
pass
else:
self.fail('Expected _ExpectedFailure')
def test_expected_failure(self):
@expectedFailure
def testitem():
1 / 0
try:
yield from testitem()
except _ExpectedFailure:
pass
else:
self.fail('Expected _ExpectedFailure')
def test_unexpected_success_coroutine(self):
@expectedFailure
def testitem():
yield
try:
yield from testitem()
except _UnexpectedSuccess:
pass
else:
self.fail('Expected _UnexpectedSuccess')
def test_unexpected_success(self):
@expectedFailure
def testitem():
...
try:
yield from testitem()
except _UnexpectedSuccess:
pass
else:
self.fail('Expected _UnexpectedSuccess')
| [
"[email protected]"
] | |
f859154d5f57224e28ff3c86d52fb20ef3daa727 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/surface/firebase/test/android/models/describe.py | 2e6269eaff842799d27a35ceee33f6973b77541a | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 2,172 | py | # -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The 'gcloud firebase test android models describe' command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.firebase.test import exceptions
from googlecloudsdk.api_lib.firebase.test import util
from googlecloudsdk.calliope import base
DETAILED_HELP = {
'EXAMPLES': """
To see the attributes of the android model 'my-model', run:
$ {command} my-model
""",
}
class Describe(base.DescribeCommand):
"""Describe an Android model."""
@staticmethod
def Args(parser):
"""Method called by Calliope to register flags for this command.
Args:
parser: An argparse parser used to add arguments that follow this
command in the CLI. Positional arguments are allowed.
"""
# Positional arg
parser.add_argument(
'model_id',
help='ID of the model to describe, found using '
'$ {parent_command} list.')
def Run(self, args):
"""Run the 'gcloud firebase test android models describe' command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation (i.e. group and command arguments combined).
Returns:
The Android model we want to show a description of.
"""
catalog = util.GetAndroidCatalog(self.context)
for model in catalog.models:
if model.id == args.model_id:
return model
raise exceptions.ModelNotFoundError(args.model_id)
Describe.detailed_help = DETAILED_HELP
| [
"[email protected]"
] | |
bd713cacf29a8daa5bc11405e1234ffe4d5053e6 | 808c626eb8d3ec779cb9a579740fc3b71637973e | /verification/tests/core/pro_results.py | 979155fdca9b46b9815a877bdd904b7330fb262a | [
"MIT"
] | permissive | hossamfadeel/pito_riscv | 86777518cc2eee2ef1d43fc28a5c47fcbf99e9ac | 94df6f2201798765984017c82d1fdf0355f68d45 | refs/heads/master | 2023-07-10T04:20:03.861621 | 2021-08-07T02:52:59 | 2021-08-07T02:52:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | all_tests = ['add','andi','bge','bltu','jalr','lbu','lui','ori','sll','slti','srl','sw',
'xori','addi','auipc', 'bgeu','bne','j','lh','lw','sb','slli','sra','srli',
'test','and','beq','blt','jal','lb','lhu','or','sh','slt','srai','sub','xor']
result = {}
for test in all_tests:
filename = "log_" + test + ".log"
with open(filename, 'r', errors='replace') as f:
#print("processing {:s}".format(test))
lines = f.readlines()
result[test] = "NOT COMPLETE"
for line in lines:
if "O K" in line:
result[test] = "PASS"
break
elif "E R O" in line:
result[test] = "FAIL"
break
print("{:10s}: {:s}".format(test, result[test]))
| [
"[email protected]"
] | |
21da6730408720e864074b332fe8aa9c76b7bd59 | 8aa3862892f0728eba5c31b27f56440962763551 | /count_and_say.py | fe237ffbd95f6981ffcaa6126524bd8c98247f03 | [] | no_license | WendyBaiYunwei/leetCodeSoln | 3220988407ac839a98a12df179993ae6784637ae | 16e2b3be21f8d0892e3323e57a41a0c7abe499b0 | refs/heads/master | 2022-04-17T12:14:07.189844 | 2020-04-17T06:29:11 | 2020-04-17T06:29:11 | 256,421,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | class Solution:
def countAndSay(self, n: int) -> str:
res = [1,1]
if n == 1:
return '1'
if n == 2:
return '11'
for _ in range(1,n-1):
new = []
subList = []
for num in res:
if not subList:
subList.append(num)
else:
if subList[-1] == num:
subList.append(num)
else:
new.append(subList.copy())
subList.clear()
subList.append(num)
new.append(subList.copy())
newRes = []
for subList in new:
count = len(subList)
val = subList[0]
newRes.append(count)
newRes.append(val)
res = newRes
final = ""
for char in newRes:
final+=str(char)
return final | [
"[email protected]"
] | |
867eef15b10f56265c0191117aa6083a3190b598 | 5da5c1dcc7358dd0d5d1a1c26a853b633e31e512 | /YQ/wsgi.py | e08c8b45c902b3976effbe8d7d6b55d3fef6c42e | [] | no_license | zwj4386/YQ | 94b6d0493a08ccdddcfb7ed006ae215a26edf268 | 83315e70be45a8d04dc9e6606ebddda556dd3927 | refs/heads/master | 2021-05-11T09:19:01.273317 | 2018-01-19T04:06:55 | 2018-01-19T04:06:55 | 118,074,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | """
WSGI config for YQ project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "YQ.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
8c49ec26ee9a34f09bae0f21d55ed20b5381019d | 8e411080e358b4f765a6240047483bc45d8ba141 | /migrations/versions/5fca91e724f3_addresses_table.py | a8ed6821dfbc185c18d59012ca14fb18923a8a49 | [] | no_license | bplantico/address_book | 6dab03ce610ec4e441cae988e876b30315bbb96f | bb95b58058d0807201c7b26d731f07d7bb14bbdc | refs/heads/master | 2022-12-11T07:06:59.764008 | 2019-10-07T17:13:34 | 2019-10-07T17:13:34 | 212,234,584 | 0 | 0 | null | 2022-12-08T06:40:39 | 2019-10-02T01:45:40 | Python | UTF-8 | Python | false | false | 1,180 | py | """addresses table
Revision ID: 5fca91e724f3
Revises:
Create Date: 2019-10-03 14:25:28.902577
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5fca91e724f3'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('address',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=60), nullable=True),
sa.Column('address', sa.String(length=120), nullable=True),
sa.Column('city', sa.String(length=60), nullable=True),
sa.Column('state', sa.String(length=60), nullable=True),
sa.Column('zip', sa.Integer(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_address_timestamp'), 'address', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_address_timestamp'), table_name='address')
op.drop_table('address')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
108813c2966f2d28ebb9bd0f2bc7b804dff57b7b | 18dd49bbbb26e9aa7dfcc130c5056a6d71c806dd | /Second/venv/Scripts/pip3.6-script.py | 603907a7ffde12bbf64fdc8f52c64b193e04fa1d | [] | no_license | jin234/Python | f20f8f35347bd731c18e4fab81344d9b3bf1c51d | e0b7d27c0d91a157ec50400a94607d58c6e7c465 | refs/heads/master | 2020-04-06T17:23:00.721984 | 2018-11-15T06:56:10 | 2018-11-15T06:56:10 | 157,656,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | #!D:\58121003-6\Python02\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.6'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.6')()
)
| [
"[email protected]"
] | |
b057ac191b726d52a6f84bef87683ab909713538 | 807f7a447ab6fcda12109b9835a28c85c62bf071 | /scripts/add_valid_user.lmns | e38a0e268db81b0c7d2a81fedc4f406f14f71ded | [] | no_license | tosbaa/samba_final | 81d816a09a34af36f9944f7143592a21ce4fa05b | 47ccba1e7822dfac3a3312aca49eda92b2f59737 | refs/heads/master | 2022-01-17T10:41:51.489928 | 2019-07-19T07:22:46 | 2019-07-19T07:22:46 | 197,715,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,329 | lmns | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 1
# User ile dosya paylaş
# Verilen kullanıcı samba paylaşımına eklenir
# 1.0
# samba
# section_name:string,user_name:string
# 3
# Yagiz Kocer
# [email protected]
# Havelsan
# add_valid_user
import configparser
import json
import sys
import subprocess
import re
SAMBA_CONFIG_PARSER = configparser.ConfigParser()
SAMBA_FILE_PATH = "/etc/samba/smb.conf"
SAMBA_CONFIG_PARSER.read(SAMBA_FILE_PATH)
SECTION_NAME = sys.argv[2]
USER_NAME = sys.argv[3]
def get_option_value(section_name, option_name):
""" Takes two str and returns the option as string, exceptions are NoOptionError and NoSectionError"""
return (SAMBA_CONFIG_PARSER.get(section_name, option_name))
def add_user(section_name, user_name):
""" Simply adds given user name to valid users option in config file """
if not SAMBA_CONFIG_PARSER.has_option(section_name, "valid users"):
add_option(section_name, "valid users", user_name)
else:
sed_script = r"sed -r -i '/^\[%s\]/,/^\[/{s/(^valid users = .*)/\1 %s/}' %s" % (
section_name, user_name, SAMBA_FILE_PATH)
subprocess.Popen(sed_script, shell=True)
def user_exist(section_name, user_name):
""" Checks user in config file, doesn't check user exist in the machine """
try:
already_defined_users = [user.strip() for user in get_option_value(
section_name, "valid users").split(" ")]
return user_name in already_defined_users
except configparser.NoOptionError:
return False
def user_exist_in_system(user_name):
"""Check username in etc/passwd"""
bash_command = "getent passwd | grep -c '^{:s}:'".format(user_name)
user_exist = subprocess.run(bash_command, shell=True, stdout=subprocess.PIPE).stdout
return True if user_exist.decode("utf-8") == 1 else False
def add_option(section_name, option_name, value):
sed_script = r"sed -i '/\[{:s}\]/a {:s} \= {:s}' {:s}".format(
section_name, option_name, value, SAMBA_FILE_PATH)
subprocess.Popen(sed_script, shell=True)
def before():
if not SAMBA_CONFIG_PARSER.has_section(SECTION_NAME):
print("Section name : '{:s}' not exist".format(SECTION_NAME))
exit()
if user_exist(SECTION_NAME, USER_NAME):
print("User : '{:s}' already in valid users".format(USER_NAME))
exit()
if not user_exist_in_system(USER_NAME):
print("User : '{:s}' does not exist".format(USER_NAME))
exit()
print("ok")
def run():
add_user(SECTION_NAME, USER_NAME)
def after():
if not user_exist(SECTION_NAME, USER_NAME):
print("User: '{:s}' can not be added".format(USER_NAME))
exit()
print("ok")
def automate():
before_output = make_bash_call('before')
if before_output != "ok\n":
print(before_output)
exit()
print('before ok')
make_bash_call('run')
after_output = make_bash_call('after')
if after_output != 'ok\n':
print(after_output)
exit()
print('after ok')
def make_bash_call(stage_name):
bash = ['python3.7', __file__, stage_name, sys.argv[2], sys.argv[3]]
output = subprocess.Popen(bash, stdout=subprocess.PIPE).stdout
return output.read().decode('utf-8')
if __name__ == "__main__":
globals()[sys.argv[1]]()
| [
"[email protected]"
] | |
250bea95e339f759048f6e65e8d2ab46e2e0255b | fe5ace1dbd848d01c94395195c4cfa03d7bdbc42 | /test_solver.py | 1e1f0e35ee2db8859b831cf08256ba07639889b4 | [] | no_license | mikemercer87/particle-diffusion-test | dd119ba5a6fb1334c91e86981706b20e38d3a802 | 00afde78d5f000e085982bc906eb3b26f54ffdd7 | refs/heads/master | 2020-04-04T10:05:32.035111 | 2018-11-01T16:07:08 | 2018-11-01T16:07:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | from solver import *
def test_j_function():
d = Diffusion()
d.j()
for i in self.j_array:
assert i <= self.J_0
| [
"[email protected]"
] | |
ef2ce05740e7b844a4e6ec703eaeddecf5ca482f | f52bce3bbd32e16b340edb9789fb6494e27afa38 | /pythings/hourglass_array.py | af6a233bd2cdddfe0ce3d0c075c18ba82a9afdb0 | [] | no_license | hsoni1687/crackDCode | e9332f731862b08864c5f6d34eb77892fe317c89 | 9143e8220fcfc7b99bf48e39d0b2b9b5df0828fc | refs/heads/master | 2020-09-17T05:58:03.402721 | 2020-03-29T14:35:47 | 2020-03-29T14:35:47 | 224,012,244 | 0 | 0 | null | 2020-03-29T14:37:24 | 2019-11-25T18:12:06 | Python | UTF-8 | Python | false | false | 2,786 | py | """Given a 2D Array, :
1 1 1 0 0 0
0 1 0 0 0 0
1 1 1 0 0 0
0 0 0 0 0 0
0 0 0 0 0 0
0 0 0 0 0 0
We define an hourglass in to be a subset of values with indices falling in this pattern in 's graphical representation:
a b c
d
e f g
There are hourglasses in , and an hourglass sum is the sum of an hourglass' values. Calculate the hourglass sum for every hourglass in , then print the maximum hourglass sum.
For example, given the 2D array:
-9 -9 -9 1 1 1
0 -9 0 4 3 2
-9 -9 -9 1 2 3
0 0 8 6 6 0
0 0 0 -2 0 0
0 0 1 2 4 0
We calculate the following hourglass values:
-63, -34, -9, 12,
-10, 0, 28, 23,
-27, -11, -2, 10,
9, 17, 25, 18
Our highest hourglass value is from the hourglass:
0 4 3
1
8 6 6
Note: If you have already solved the Java domain's Java 2D Array challenge, you may wish to skip this challenge.
Function Description
Complete the function hourglassSum in the editor below. It should return an integer, the maximum hourglass sum in the array.
hourglassSum has the following parameter(s):
arr: an array of integers
Input Format
Each of the lines of inputs contains space-separated integers .
Constraints
Output Format
Print the largest (maximum) hourglass sum found in .
Sample Input
1 1 1 0 0 0
0 1 0 0 0 0
1 1 1 0 0 0
0 0 2 4 4 0
0 0 0 2 0 0
0 0 1 2 4 0
Sample Output
19
Explanation
contains the following hourglasses:
image
The hourglass with the maximum sum () is:
2 4 4
2
1 2 4"""
#!/bin/python3
import math
import os
import random
import re
import sys
#Complete the hourglassSum function below.
def hourglassSum(arr):
hourglass_length = 3
hourglass_height = 3
no_of_hourglass_by_length = 6-3+1
no_of_hourglass_by_height=6-3+1
for start_length in range(no_of_hourglass_by_length):
for start_height in range(no_of_hourglass_by_height):
sum=get_hourglass_sum(arr, start_length, start_height, hourglass_length, hourglass_height)
print(sum)
def get_hourglass_sum(arr, length, height, hourglass_length, hourglass_height):
sum_length = 0
for h in range(height, height+hourglass_height):
if h == height or h == hourglass_height-1:
for l in range(length, length+hourglass_length):
sum_length = sum_length+arr[h][l]
else:
sum_length = sum_length+arr[h][math.floor(hourglass_length/2)+length]
return sum_length
if __name__ == '__main__':
#fptr = open(os.environ['OUTPUT_PATH'], 'w')
arr =[]# [[1, 1, 1, 0, 0, 0], [0, 1, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
for _ in range(6):
arr.append(list(map(int, input().rstrip().split())))
print(arr)
hourglassSum(arr)
#fptr.write(str(result) + '\n')
#fptr.close()
| [
"[email protected]"
] | |
379bb60b989827755983015f3c8a215044afd5c6 | 6aaed35d75e1867fe1e66c4e7ffc634e9c4c015d | /run_CORDOVA.py | c77191d10c12b479613b7df9ad5d2b8e3e366307 | [] | no_license | ancordovag/assignment4 | ca7f5c110d17510dca4d28f1b6d76ecb9eec26ce | 6c8e5a2ca19abdebfec8ab52a0d80632cd53ec0f | refs/heads/main | 2023-06-17T20:26:38.257374 | 2021-07-21T17:24:56 | 2021-07-21T17:24:56 | 379,573,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,153 | py | import os
import argparse
import yaml
import random
import torch
import time
from utils import Dataset
from model import NLINet
from torch.utils.data import DataLoader
from torch.optim import Adam
from torch import nn as nn
from tqdm import tqdm
from time import sleep
def parse_args():
"""Parse input arguments"""
parser = argparse.ArgumentParser(description='Experiment Args')
parser.add_argument(
'--RUN_MODE', dest='RUN_MODE',
choices=['train', 'val', 'test'],
help='{train, val, test}',
type=str, required=True
)
parser.add_argument(
'--CPU', dest='CPU',
help='use CPU instead of GPU',
action='store_true'
)
parser.add_argument(
'--RESUME', dest='RESUME',
help='resume training',
action='store_true'
)
parser.add_argument(
'--CKPT_E', dest='CKPT_EPOCH',
help='checkpoint epoch',
type=int
)
parser.add_argument(
'--VERSION', dest='VERSION',
help='model version',
type=int
)
parser.add_argument(
'--DEBUG', dest='DEBUG',
help='enter debug mode',
action='store_true'
)
args = parser.parse_args()
return args
class MainExec(object):
def __init__(self, args, configs):
self.args = args
self.cfgs = configs
if self.args.CPU:
self.device = torch.device("cpu")
else:
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
) # for failsafe
if self.args.VERSION is None:
self.model_ver = str(random.randint(0, 99999999))
else:
self.model_ver = str(self.args.VERSION)
print("Model version:", self.model_ver)
# Fix seed
self.seed = int(self.model_ver)
torch.manual_seed(self.seed)
torch.cuda.manual_seed_all(self.seed)
random.seed(self.seed)
def train(self):
data = Dataset(self.args)
pretrained_emb = data.pretrained_emb
token_size = data.token_size
label_size = data.label_size
data_size = data.data_size
#print("Token Size = {}".format(token_size))
#print("Label Size = {}".format(label_size))
#print("Pretrained Embed Size = {}".format(pretrained_emb.size))
"""
# TODO: You should declare the model here (and send it to your selected device).
You should define the loss function and optimizer, with learning
rate obtained from the configuration file. You should also use
`torch.utils.data.Dataloader` to load the data from Dataset object.
For more information, see:
https://pytorch.org/docs/stable/data.html#module-torch.utils.data .
"""
pretrained_emb_torch = torch.from_numpy(pretrained_emb)
batch_size = self.cfgs["batch_size"]
lr = self.cfgs["lr"]
net = NLINet(configs=self.cfgs,pretrained_emb=pretrained_emb_torch,token_size=token_size,label_size=label_size)
net = net.to(self.device)
loss_fn = nn.CrossEntropyLoss()
optimizer = Adam(net.parameters(), lr=lr)
dataloader = DataLoader(data,batch_size=batch_size,shuffle=True)
# -----------------------------------------------------------------------
if self.args.RESUME:
print('Resume training...')
start_epoch = self.args.CKPT_EPOCH
path = os.path.join(os.getcwd(),
self.model_ver,
'epoch' + str(start_epoch) + '.pkl')
# Load state dict of the model and optimizer
ckpt = torch.load(path, map_location=self.device)
net.load_state_dict(ckpt['state_dict'])
optimizer.load_state_dict(ckpt['optimizer'])
else:
start_epoch = 0
os.mkdir(os.path.join(os.getcwd(), self.model_ver))
loss_sum = 0
for epoch in range(start_epoch, self.cfgs["epochs"]):
with tqdm(dataloader) as tepoch:
for step, (
premise_iter,
hypothesis_iter,
label_iter
) in enumerate(tepoch):
tepoch.set_description("Epoch {}".format(str(epoch)))
"""
#TODO: Fill the training loop.
"""
optimizer.zero_grad()
outputs = net(premise_iter,hypothesis_iter)
#print("Outputs size: {}".format(outputs.size()))
#print("Label size: {}".format(label_iter.size()))
loss = loss_fn(outputs, label_iter.squeeze(1))
loss_sum += loss.item()
loss.backward()
optimizer.step()
# ---------------------------------------------------
tepoch.set_postfix(loss=loss.item())
sleep(0.1)
print('Average loss: {:.4f}'.format(loss_sum/len(dataloader)))
epoch_finish = epoch + 1
# Save checkpoint
state = {
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict()
}
torch.save(
state,
os.path.join(os.getcwd(),
self.model_ver,
'epoch' + str(epoch_finish) + '.pkl')
)
loss_sum = 0
def eval(self):
data = Dataset(self.args)
pretrained_emb = data.pretrained_emb
token_size = data.token_size
label_size = data.label_size
data_size = data.data_size
"""
# TODO: You should declare the model here (and send it to your selected device).
Don't forget to set the model to evaluation mode. You should also use
`torch.utils.data.Dataloader` to load the data from Dataset object.
"""
pretrained_emb_torch = torch.from_numpy(pretrained_emb)
batch_size = self.cfgs["batch_size"]
net = NLINet(configs=self.cfgs, pretrained_emb=pretrained_emb_torch, token_size=token_size, label_size=label_size)
net = net.to(self.device)
path = os.path.join(os.getcwd(),
self.model_ver,
'epoch' + str(self.args.CKPT_EPOCH) + '.pkl')
dataloader = DataLoader(data, batch_size=batch_size,shuffle=False)
# Load state dict of the model
ckpt = torch.load(path, map_location=self.device)
net.load_state_dict(ckpt['state_dict'])
net.eval()
"""TODO : Evaluate the model using accuracy as metrics."""
corrects = 0
N = 0
with tqdm(dataloader) as tepoch:
with torch.no_grad():
for step, (
premise_iter,
hypothesis_iter,
label_iter
) in enumerate(tepoch):
outputs = net(premise_iter, hypothesis_iter)
#print("Outputs {}".format(outputs.shape))
best_outs = torch.argmax(outputs,1)
#print("Best outputs {}".format(best_outs))
for l, o in zip(label_iter, best_outs):
if l == o:
corrects += 1
N += 1
accuracy = corrects / N
print(accuracy)
# -------------------------------------------------
def overfit(self):
data = Dataset(self.args)
pretrained_emb = data.pretrained_emb
token_size = data.token_size
label_size = data.label_size
data_size = data.data_size
"""
TODO : You should declare the model here (and send it to your selected device).
You should define the loss function and optimizer, with learning
rate obtained from the configuration file. You should also use
`torch.utils.data.Dataloader` to load the data from Dataset object.
Use only a single batch to ensure your model is working correctly.
"""
pretrained_emb_torch = torch.from_numpy(pretrained_emb)
batch_size = 1
lr = self.cfgs["lr"]
net = NLINet(configs=self.cfgs, pretrained_emb=pretrained_emb_torch, token_size=token_size, label_size=label_size)
net = net.to(self.device)
loss_fn = nn.CrossEntropyLoss()
optimizer = Adam(net.parameters(), lr=lr)
dataloader = DataLoader(data, batch_size=batch_size)
# -----------------------------------------------------------------------
start_epoch = 0
"""
TODO : Train using a single batch and observe the loss. Does it converge?.
"""
loss_sum = 0
with tqdm(dataloader) as tepoch:
for step, (
premise_iter,
hypothesis_iter,
label_iter
) in enumerate(tepoch):
optimizer.zero_grad()
outputs = net(premise_iter, hypothesis_iter)
loss = loss_fn(outputs, label_iter.squeeze(1))
loss_sum += loss.item()
loss.backward()
optimizer.step()
print("Total Loss : {}".format(loss_sum))
# -----------------------------------------------------------------
def run(self, run_mode):
if run_mode == 'train' and self.args.DEBUG:
print('Overfitting a single batch...')
self.overfit()
elif run_mode == 'train':
print('Starting training mode...')
self.train()
elif run_mode == 'val':
print('Starting validation mode...')
self.eval()
elif run_mode == 'test':
print('Starting test mode...')
self.eval()
else:
exit(-1)
if __name__ == "__main__":
args = parse_args()
with open('./config.yml', 'r') as f:
model_config = yaml.safe_load(f)
exec = MainExec(args, model_config)
exec.run(args.RUN_MODE)
| [
"[email protected]"
] | |
479909e738c2439397c85cf7b05aeff54cd8ae06 | c2471dcf74c5fd1ccf56d19ce856cf7e7e396b80 | /chap15/2.py | 903e0714bb432e40595c1ec7820bcb7a9ec1d98c | [] | no_license | oc0de/pythonEpi | eaeef2cf748e6834375be6bc710132b572fc2934 | fb7b9e06bb39023e881de1a3d370807b955b5cc0 | refs/heads/master | 2021-06-18T05:33:19.518652 | 2017-07-07T04:34:52 | 2017-07-07T04:34:52 | 73,049,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | class Node(object):
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def findNextKey(root, key):
if not root: return
if key > root.data:
return findNextKey(root.right, key)
if key < root.data:
p = findNextKey(root.left, key)
if not p: return root.data
return p
if key == root.data and root.right:
return root.right.data
return
tree = Node(8, Node(5, Node(3), Node(7)), Node(12, Node(9, None, Node(11, Node(10))), Node(14)))
print findNextKey(tree, 9)
| [
"[email protected]"
] | |
7eb6aa2c7a3a61820192f4a616810ac257d6c0b7 | 2ab5463706f9f70dcb717745127c4dd350f08342 | /Src/Decorators/Decorator_to_calculate_functionTime.py | df4fbbca8f0e34c4a36880b34b7775ac5f43316e | [] | no_license | ChinmayTagare/Advanced-Python-Concepts | 63c08d2d9971727c675e35a54360d7e4d221f9e2 | 3b25e06ebf5a023f06924c3ae2d73718426c9988 | refs/heads/main | 2023-06-03T09:31:17.155323 | 2021-06-29T05:23:27 | 2021-06-29T05:23:27 | 375,720,971 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | # -*- coding: utf-8 -*-
"""
Topic : Decorators in Python
Author : Chinmay Tagare
"""
import time
import math
# Define decorator function
def calculate_time(main_func):
def inner_func(*args, **kwargs):
begin = time.time()
main_func(*args, **kwargs)
end = time.time()
print('Total time taken = {}'.format(end-begin))
return inner_func
@calculate_time
def sqr_func(limit):
sqrs = [x*x for x in range(limit)]
return sqrs
@calculate_time
def factorial_func(limit):
fact_result = math.factorial(limit)
print(fact_result)
return fact_result
sqr_func(1000000)
print('---------')
factorial_func(25) | [
"[email protected]"
] | |
118208c4fd8ae6100b849572f1987522739c72ef | 36d1af455d542a3321289c974f4b1a2b4dadf3be | /CompOp.py | d7b1126de16bccc5df68df2a2b0f66f5a02a90ab | [] | no_license | motomaniak/InterpreterProgram | 778a1d24c1357c2465dd7610318e9676b82b8e84 | 0b4fe1e71cfc4d85388d5a3e60c602c9abf3d8e1 | refs/heads/master | 2021-01-10T14:35:27.242613 | 2017-01-27T21:43:07 | 2017-01-27T21:43:07 | 80,242,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | import sys
class CompOp:
def __init__(self, t):
self.case= 0
self.t = t
def parse(self):
#check which operator is being used by token number and assign a different case for each
x = int(self.t.peek())
if x == 25: #!=
pass
elif x == 26: #==
self.case= 1
elif x == 27: #<
self.case= 2
elif x == 28: #>
self.case= 3
elif x == 29: #<=
self.case= 4
elif x == 30: #>=
self.case= 5
else:
print "Invalid comparison operator."
sys.exit()
self.t.getToken()
def execute(self):
return self.case
def Print(self):
if self.case== 0:
return '!='
elif self.case== 1:
return '=='
elif self.case== 2:
return '<'
elif self.case== 3:
return '>'
elif self.case== 4:
return '<='
elif self.case== 5:
return '>='
| [
"[email protected]"
] | |
824fb823da3b50f2ee3f0f8725e12ef232d652af | 0d97cf653219d2e12a868ff94890f00d6673e864 | /Database/gamedb.py | 519372b822dbb1cd25060e5bed308c036971b1fc | [] | no_license | ChitharaKarunasekera/BouncingMath | 03c617391d308c82f239e87408168d6cf6f4194a | fb49ca72db16c315ebae43f58a3acf7dbfb1a8d5 | refs/heads/main | 2023-07-27T22:55:00.990519 | 2021-09-20T15:16:25 | 2021-09-20T15:16:25 | 408,475,200 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,288 | py | import mysql.connector
from prettytable import PrettyTable
def connect():
"This function is to bulid connection with thw database"
global conDict
global db
global cursor
#Open database connection with a dictionery
conDict = {'host':'localhost',
'database':'bouncing_math',
'user':'root',
'password':''}
db = mysql.connector.connect(**conDict)
cursor = db.cursor()
return
def insert(name,num,score,perc,lev,time,date):
"Function to insert values to the database"
#SQL query
mySQLText = "INSERT INTO gameinfo(name, correct, totQues,percentage, Level, Time, Date) VALUES (%s,%s,%s,%s,%s,%s,%s)"
myValues = (name, score, num, perc,lev,time,date)
cursor.execute(mySQLText, myValues)
db.commit()
db.close()
return
def allInfo():
"Function to display information of all players"
tbl = PrettyTable()
tbl.field_names = ["Name", "Corrct answers", "Total questions", "Percentage"]
#Execute SQL query
cursor.execute("SELECT name, correct, totQues, percentage FROM gameinfo")
data = cursor.fetchall()
for item in data:
tbl.add_row(item)
print("Past player results")
print(tbl)
db.close()
return
def user():
user = []
global uName
#Get record name of user
uName = input("Enter user name: ")
#Execute SQL query using execute() method
cursor.execute("SELECT name FROM gameinfo")
data = cursor.fetchall()
for item in data:
for nameitem in item:
user.append(nameitem)
if uName in user:
use = 1
else:
use = 0
db.close()
return use
def userinfo():
uName = input("Enter user name: ")
tbl = PrettyTable()
tbl.field_names = ["Record No.", "Name", "Corrct answers", "No. of questions", "Percentage","Level","Time","Date"]
#Execute SQL query
shwTxt= "SELECT * FROM gameinfo WHERE name = %s"
getTxt = (uName,)
cursor.execute(shwTxt,getTxt)
data = cursor.fetchall()
for item in data:
tbl.add_row(item)
print("Information of user")
print(tbl)
print("Total number of records: ",cursor.rowcount)
db.close()
return
def method():
global m
print("----Past game infromation method----")
print("1 - All game records")
print("2 - Player's game infromation")
print("\n")
methd = input("Select records viewing method: ")
if(methd == "2"):
userinfo()
delete()
elif(methd == "1"):
allInfo()
else:
print("Invalied option")
return
def delete():
connect()
while True:
delt = input("Do you want to delete a record (Y\\N)?")
if (delt == "y" or delt == "Y" ):
try:
delrcd = input("Enter record number: ")
cursor = db.cursor()
cursor.execute("DELETE FROM gameinfo WHERE rcdNo =" + delrcd + "")
db.commit()
print(cursor.rowcount, "Record", delrcd, "Deleted")
db.close()
except:
print("Invalied Record number!")
elif(delt == "n" or delt == "N"):
break
| [
"[email protected]"
] | |
0f0065ea7a5f13dced622e24fa758ba9deeb184e | 0c0f6a6e47120afad8ff01e701a27bc6bb359f6c | /settings.py | 22e08e148d689e7565735d9d6b13cc8378806bce | [] | no_license | gundamjr/Fabrica | dedd0bf9d0a2672afc7339984a5f32e633f2aae5 | 8f7e243e796aa0c46ee404bfc10be7a9372e50b6 | refs/heads/master | 2021-01-01T03:53:54.312742 | 2016-06-08T20:04:32 | 2016-06-08T20:04:32 | 58,213,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,823 | py | """
Django settings for ceep project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3=(0nhtdbmf@kjugj_@cuer(2wpuq)i2iumzf6h(#s2o&4gq!g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'page',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'ceep.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ceep.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'Ceep',
'USER': 'postgres',
'PASSWORD': '123456',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGIN_REDIRECT_URL = '/' | [
"[email protected]"
] | |
8d9e85dc3c307a5bcdedab7d153ca207622be8c9 | 41c605bf3a002a757cb2344cff526d7a7ae56ea9 | /plotly/graph_objs/area/hoverlabel/__init__.py | fc3a516acf6390598c9ea92f63b06b09005b52f1 | [
"MIT"
] | permissive | Jonathan-MW/plotly.py | 9674b90b5de11fd9089e6afefd04b57bc4587829 | 7528c00772f44dee24c0df7e15d70a4852f171a8 | refs/heads/master | 2020-05-30T06:04:13.621478 | 2019-05-31T10:34:15 | 2019-05-31T10:34:15 | 189,571,988 | 2 | 0 | MIT | 2019-05-31T09:59:53 | 2019-05-31T09:59:53 | null | UTF-8 | Python | false | false | 10,981 | py |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['colorsrc']
@colorsrc.setter
def colorsrc(self, val):
self['colorsrc'] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['family']
@family.setter
def family(self, val):
self['family'] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['familysrc']
@familysrc.setter
def familysrc(self, val):
self['familysrc'] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['sizesrc']
@sizesrc.setter
def sizesrc(self, val):
self['sizesrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'area.hoverlabel'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.area.hoverlabel.Font
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Font
"""
super(Font, self).__init__('font')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.area.hoverlabel.Font
constructor must be a dict or
an instance of plotly.graph_objs.area.hoverlabel.Font"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.area.hoverlabel import (font as v_font)
# Initialize validators
# ---------------------
self._validators['color'] = v_font.ColorValidator()
self._validators['colorsrc'] = v_font.ColorsrcValidator()
self._validators['family'] = v_font.FamilyValidator()
self._validators['familysrc'] = v_font.FamilysrcValidator()
self._validators['size'] = v_font.SizeValidator()
self._validators['sizesrc'] = v_font.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('colorsrc', None)
self['colorsrc'] = colorsrc if colorsrc is not None else _v
_v = arg.pop('family', None)
self['family'] = family if family is not None else _v
_v = arg.pop('familysrc', None)
self['familysrc'] = familysrc if familysrc is not None else _v
_v = arg.pop('size', None)
self['size'] = size if size is not None else _v
_v = arg.pop('sizesrc', None)
self['sizesrc'] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"[email protected]"
] | |
faea1d2d3aff94918153747b463fda5afb5fea96 | 85a32fc66050b5590f6a54774bbb4b88291894ab | /python/closures-and-decorators/standardize-mobile-number-using-decorators/python3.py | 9edf509c54cd1e971677c0957f8a99e5e94b5ce6 | [] | no_license | charlesartbr/hackerrank-python | 59a01330a3a6c2a3889e725d4a29a45d3483fb01 | bbe7c6e2bfed38132f511881487cda3d5977c89d | refs/heads/master | 2022-04-29T07:40:20.244416 | 2022-03-19T14:26:33 | 2022-03-19T14:26:33 | 188,117,284 | 46 | 37 | null | 2022-03-19T14:26:34 | 2019-05-22T21:38:18 | Python | UTF-8 | Python | false | false | 336 | py | def wrapper(f):
def fun(l):
for i in range(len(l)):
p = len(l[i]) - 5
l[i] = '+91 ' + l[i][p-5:p] + ' ' + l[i][p:]
f(l)
return fun
@wrapper
def sort_phone(l):
print(*sorted(l), sep='\n')
if __name__ == '__main__':
l = [input() for _ in range(int(input()))]
sort_phone(l)
| [
"[email protected]"
] | |
212144725dd7bdb891489e66befa687a6cc7d9e4 | 635f8f5581c98dc22c6f5ff7210ae489d2b03197 | /src/editor/__init__.py | 173efc7150bbd80cb4ea3cbcf9f9568172e2b6cb | [] | no_license | moshev/project-viking | 2f03dd4fcf05a1c790c3230b7eff956a76d73081 | 0da83d070bde32687aeacdb7971d70a6848de57c | refs/heads/master | 2016-08-04T02:20:40.302063 | 2015-12-01T20:49:00 | 2015-12-01T20:49:07 | 1,097,047 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, generators, print_function, with_statement
from .hitbox_editor import hitboxeditor_main
from .level_editor import leveleditor_main
| [
"[email protected]"
] | |
307e9a9c5e278b06a5bf2d2f92840be5798d15ff | 1dae87abcaf49f1d995d03c0ce49fbb3b983d74a | /programs/subroutines/Delta1CurrentRamp100-200A.sub.py | 5e686740bcd7a62709180483dd7887e9c8792f82 | [] | no_license | BEC-Trento/BEC1-data | 651cd8e5f15a7d9848f9921b352e0830c08f27dd | f849086891bc68ecf7447f62962f791496d01858 | refs/heads/master | 2023-03-10T19:19:54.833567 | 2023-03-03T22:59:01 | 2023-03-03T22:59:01 | 132,161,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,144 | py | prg_comment = ""
prg_version = "0.5.1"
def program(prg, cmd):
prg.add(0, "Delta 1 Current", 100.000000)
prg.add(141540, "Delta 1 Current", 102.560000)
prg.add(283080, "Delta 1 Current", 105.130000)
prg.add(424620, "Delta 1 Current", 107.690000)
prg.add(566150, "Delta 1 Current", 110.260000)
prg.add(707690, "Delta 1 Current", 112.820000)
prg.add(849230, "Delta 1 Current", 115.380000)
prg.add(990770, "Delta 1 Current", 117.950000)
prg.add(1132310, "Delta 1 Current", 120.510000)
prg.add(1273850, "Delta 1 Current", 123.080000)
prg.add(1415380, "Delta 1 Current", 125.640000)
prg.add(1556920, "Delta 1 Current", 128.210000)
prg.add(1698460, "Delta 1 Current", 130.770000)
prg.add(1840000, "Delta 1 Current", 133.330000)
prg.add(1981540, "Delta 1 Current", 135.900000)
prg.add(2123080, "Delta 1 Current", 138.460000)
prg.add(2264620, "Delta 1 Current", 141.030000)
prg.add(2406150, "Delta 1 Current", 143.590000)
prg.add(2547690, "Delta 1 Current", 146.150000)
prg.add(2689230, "Delta 1 Current", 148.720000)
prg.add(2830770, "Delta 1 Current", 151.280000)
prg.add(2972310, "Delta 1 Current", 153.850000)
prg.add(3113850, "Delta 1 Current", 156.410000)
prg.add(3255380, "Delta 1 Current", 158.970000)
prg.add(3396920, "Delta 1 Current", 161.540000)
prg.add(3538460, "Delta 1 Current", 164.100000)
prg.add(3680000, "Delta 1 Current", 166.670000)
prg.add(3821540, "Delta 1 Current", 169.230000)
prg.add(3963080, "Delta 1 Current", 171.790000)
prg.add(4104620, "Delta 1 Current", 174.360000)
prg.add(4246150, "Delta 1 Current", 176.920000)
prg.add(4387690, "Delta 1 Current", 179.490000)
prg.add(4529230, "Delta 1 Current", 182.050000)
prg.add(4670770, "Delta 1 Current", 184.620000)
prg.add(4812310, "Delta 1 Current", 187.180000)
prg.add(4953850, "Delta 1 Current", 189.740000)
prg.add(5095380, "Delta 1 Current", 192.310000)
prg.add(5236920, "Delta 1 Current", 194.870000)
prg.add(5378460, "Delta 1 Current", 197.440000)
prg.add(5520000, "Delta 1 Current", 200.000000)
return prg
| [
"[email protected]"
] | |
e664f8382f027c8384062ba1d4e5d656f1e6baf6 | 82e1ba90dd94df783e70bc27e09d89026c145abc | /bin/p/__init__.py | 08c9990f6c4dcc33359b3f681a15910e811e6fb1 | [] | no_license | pebbe/Gabmap | a8fe44c6b23cbb4a1e297dd0dbc4126c539c56ad | 7801f901f1c8bfbf4f62e52c74fcb84652cbf1f9 | refs/heads/master | 2023-06-08T22:58:18.436693 | 2023-05-31T13:40:30 | 2023-05-31T13:40:30 | 1,847,250 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 50 | py |
"""
This does parts of the user hierarchy
"""
| [
"peter@localhost.(none)"
] | peter@localhost.(none) |
e895078315d65068f1ccad0d76516477f7fe6d4e | f0b654037e9d236e42f893d8eddf7393fb73c433 | /copy_file.py | 32f30e4229681c81a0f592ef64e4a5f5f140801a | [] | no_license | greg00m/greg00m | d80f3e4e51b08e6616f763e723c9ea0ac5b4a1c3 | e37792cc5411685cfca1fe0a739a38b110163f61 | refs/heads/master | 2023-05-01T09:40:25.016078 | 2023-04-21T04:23:16 | 2023-04-21T04:23:16 | 142,930,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | #!/usr/bin/python3
file_path = "alice.txt"
try:
fh = open(file_path, 'r') #open text file for reading
except Exception:
print("unable to read file")
try:
out_file = "%s.out" % (file_path) #opens text file for writing
fh = open(file_path, 'w') #opens text file for writing
except Exception:
print("unable to write to file")
try:
fh = (file_path, 'r+') #opens file for read/write
except Exception:
print("unable to add read to file attribute")
try:
open(file_path, 'rb') #opens a binary file for reading
fh.close()
except Exception:
print("uanble to open a binary file for reading")
#open("copy.txt","w").write(open("alice.txt","r").read()
| [
"[email protected]"
] | |
140bc0541699a65cdf1095b23526c93f841db5bb | 5ef186f87c1b58e57b8ba443e9a1039bdf044ef0 | /tests/test_gitone.py | d956d384ed2611a5125d7fff9063c2ea37b1db84 | [
"MIT"
] | permissive | litchilin/gitone | d48bb4e1e9030cec1123349687c95d065fd0c9c4 | 9f7ea61a55fecf39663f5721e7e82bcbe41da4d1 | refs/heads/master | 2020-08-03T22:19:11.417293 | 2019-07-04T15:03:05 | 2019-07-04T15:03:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `gitone` package."""
import pytest
from click.testing import CliRunner
from cli import camp_cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
| [
"[email protected]"
] | |
056832faf9403db5ced2cbaed918067783f17c86 | a3a26ec5d5ac4aa034484f1652ed212ec7464baf | /MyEnv/lib/python3.7/locale.py | ae80cc80f6d8db74f005c52f5c4fa13a269240b6 | [
"MIT"
] | permissive | pratikagarwal2203/manim_simulations | b3aabe8eb0491dba0f0061b46387da4d6ad259c0 | 9d7820b894914afcb40e7f859387d9120110fa2b | refs/heads/master | 2020-05-06T12:30:29.916096 | 2019-04-21T13:18:11 | 2019-04-21T13:18:11 | 180,126,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | /home/pratik/anaconda3/lib/python3.7/locale.py | [
"[email protected]"
] | |
b070204450dcf546ea365cff9ab64ac507811a85 | 4f8cca207c443108a5406d6a1fa4180326f96d9b | /scrapy/med126/med126/items.py | cd8d4dffa8ff2ad6abf491d9f65a051e6c1ff005 | [] | no_license | owenxu10/WebSpider | d1e73ddc90a565dec545594f48b05b308d3babb6 | 6d41172f4d6716589b7c63a3fb604596283822c7 | refs/heads/master | 2021-10-25T22:41:04.095730 | 2019-04-07T22:30:57 | 2019-04-07T22:30:57 | 79,695,240 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class Med126Item(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
number = scrapy.Field()
name = scrapy.Field()
| [
"[email protected]"
] | |
30da647e3ae38a8c0285ab9e395f297040d07d23 | bb150497a05203a718fb3630941231be9e3b6a32 | /inference/benchmark/jetson/utils/utilities.py | b7394c9faa4d10c3d6978d21fc1a4693607452c8 | [
"MIT"
] | permissive | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 5,241 | py | import os
import subprocess
import sys
import time
FNULL = open(os.devnull, "w")
# Class for Utilities (TRT check, Power mode switching)
# https://docs.nvidia.com/jetson/l4t/index.html#page/Tegra%2520Linux%2520Driver%2520Package%2520Development%2520Guide%2Fpower_management_jetson_xavier.html%23wwpID0E0KD0HA
class utilities():
def __init__(self, jetson_devkit, gpu_freq, dla_freq):
self.jetson_devkit = jetson_devkit
self.gpu_freq = gpu_freq
self.dla_freq = dla_freq
def set_power_mode(self, power_mode, jetson_devkit):
power_cmd0 = "nvpmodel"
power_cmd1 = str("-m"+str(power_mode))
subprocess.call("sudo {} {}".format(power_cmd0, power_cmd1), shell=True,
stdout=FNULL)
print("Setting Jetson {} in max performance mode".format(jetson_devkit))
def set_jetson_clocks(self):
clocks_cmd = "jetson_clocks"
subprocess.call("sudo {}".format(clocks_cmd), shell=True,
stdout=FNULL)
print("Jetson clocks are Set")
def set_jetson_fan(self, switch_opt):
fan_cmd = "sh" + " " + "-c" + " " + ""echo" + " " + str(
switch_opt) + " " + ">" + " " + "/sys/devices/pwm-fan/target_pwm""
subprocess.call("sudo {}".format(fan_cmd), shell=True, stdout=FNULL)
def run_set_clocks_withDVFS(self):
if self.jetson_devkit == "tx2":
self.set_user_clock(device="gpu")
self.set_clocks_withDVFS(frequency=self.gpu_freq, device="gpu")
if self.jetson_devkit == "nano":
self.set_user_clock(device="gpu")
self.set_clocks_withDVFS(frequency=self.gpu_freq, device="gpu")
if self.jetson_devkit == "xavier" or self.jetson_devkit == "xavier-nx":
self.set_user_clock(device="gpu")
self.set_clocks_withDVFS(frequency=self.gpu_freq, device="gpu")
self.set_user_clock(device="dla")
self.set_clocks_withDVFS(frequency=self.dla_freq, device="dla")
def set_user_clock(self, device):
if self.jetson_devkit == "tx2":
self.enable_register = "/sys/devices/gpu.0/aelpg_enable"
self.freq_register = "/sys/devices/gpu.0/devfreq/17000000.gp10b"
if self.jetson_devkit == "nano":
self.enable_register = "/sys/devices/gpu.0/aelpg_enable"
self.freq_register = "/sys/devices/gpu.0/devfreq/57000000.gpu"
if self.jetson_devkit == "xavier" or self.jetson_devkit == "xavier-nx":
if device == "gpu":
self.enable_register = "/sys/devices/gpu.0/aelpg_enable"
self.freq_register = "/sys/devices/gpu.0/devfreq/17000000.gv11b"
elif device == "dla":
base_register_dir = "/sys/kernel/debug/bpmp/debug/clk"
self.enable_register = base_register_dir + "/nafll_dla/mrq_rate_locked"
self.freq_register = base_register_dir + "/nafll_dla/rate"
def set_clocks_withDVFS(self, frequency, device):
from_freq = self.read_internal_register(register=self.freq_register, device=device)
self.set_frequency(device=device, enable_register=self.enable_register, freq_register=self.freq_register, frequency=frequency, from_freq=from_freq)
time.sleep(1)
to_freq = self.read_internal_register(register=self.freq_register, device=device)
print("{} frequency is set from {} Hz --> to {} Hz".format(device, from_freq, to_freq))
def set_frequency(self, device, enable_register, freq_register, frequency, from_freq):
self.write_internal_register(enable_register, 1)
if device == "gpu":
max_freq_reg = freq_register+"/max_freq"
min_freq_reg = freq_register+"/min_freq"
if int(frequency) > int(from_freq):
self.write_internal_register(max_freq_reg, frequency)
self.write_internal_register(min_freq_reg, frequency)
else:
self.write_internal_register(min_freq_reg, frequency)
self.write_internal_register(max_freq_reg, frequency)
elif device =="dla":
self.write_internal_register(freq_register, frequency)
def read_internal_register(self, register, device):
if device == "gpu":
register = register+"/cur_freq"
reg_read = open(register, "r")
reg_value = reg_read.read().rstrip("\n")
reg_read.close()
return reg_value
def write_internal_register(self, register, value):
reg_write = open(register, "w")
reg_write.write("%s" % value)
reg_write.close()
def clear_ram_space(self):
cmd_0 = str("sh" + " " + "-c")
cmd_1 = str(""echo") + " " + "2" + " " + " >" + " " + "/proc/sys/vm/drop_caches""
cmd = cmd_0 + " " + cmd_1
subprocess.call("sudo {}".format(cmd), shell=True)
def close_all_apps(self):
input("Please close all other applications and Press Enter to continue...")
def check_trt(self):
if not os.path.isfile("/usr/src/tensorrt/bin/trtexec"): # Check if TensorRT is installed
print("Exiting. Check if TensorRT is installed \n Use ``dpkg -l | grep nvinfer`` ")
return True
return False
| [
"[email protected]"
] | |
4b7511e6ab31a4735b18cdb5bb56cdebcb1f391a | ecf25af68a32600e462e27f8ae205cac8a35978f | /venv/bin/wheel | 301a42b9b315ad33267ddee83003b8ffb1942f77 | [] | no_license | bennettdrew35/resume_courses_python | 4c36e326ad453c1936b5c5890e5a878fc2304254 | c6b394babe361ba88b31d6f702e4587e5d14c5f1 | refs/heads/master | 2018-12-27T15:10:51.858169 | 2018-10-24T15:36:39 | 2018-10-24T15:36:39 | 119,621,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | #!/home/drew/PycharmProjects/matplotlib/venv/bin/python3.5
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.