max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
doc/source/stats_callbacks.py | mhaseeb123/TECA | 34 | 12790251 | from teca import *
import numpy as np
import sys
def get_request_callback(rank, var_names):
def request(port, md_in, req_in):
sys.stderr.write('descriptive_stats::request MPI %d\n'%(rank))
req = teca_metadata(req_in)
req['arrays'] = var_names
return [req]
return request
def get_execute_callback(rank, var_names):
def execute(port, data_in, req):
sys.stderr.write('descriptive_stats::execute MPI %d\n'%(rank))
mesh = as_teca_cartesian_mesh(data_in[0])
table = teca_table.New()
table.declare_columns(['step','time'], ['ul','d'])
table << mesh.get_time_step() << mesh.get_time()
for var_name in var_names:
table.declare_columns(['min '+var_name, 'avg '+var_name, \
'max '+var_name, 'std '+var_name, 'low_q '+var_name, \
'med '+var_name, 'up_q '+var_name], ['d']*7)
var = mesh.get_point_arrays().get(var_name).as_array()
table << float(np.min(var)) << float(np.average(var)) \
<< float(np.max(var)) << float(np.std(var)) \
<< map(float, np.percentile(var, [25.,50.,75.]))
return table
return execute
| 2.1875 | 2 |
tests/strain_inputs.py | greschd/aiida_strain | 0 | 12790252 | <reponame>greschd/aiida_strain<gh_stars>0
# -*- coding: utf-8 -*-
# © 2017-2019, ETH Zurich, Institut für Theoretische Physik
# Author: <NAME> <<EMAIL>>
"""
Test fixtures providing input for the strain workchains.
"""
# pylint: disable=unused-argument,redefined-outer-name,missing-docstring
import pytest
__all__ = ['strain_kind', 'strain_parameters', 'strain_inputs']
@pytest.fixture(
params=[
'three_five.Biaxial001',
'three_five.Biaxial110',
'three_five.Biaxial111',
'three_five.Uniaxial110',
]
)
def strain_kind(request):
return request.param
@pytest.fixture(params=[
'InAs',
'InSb',
'GaSb',
])
def strain_parameters(request):
return request.param
@pytest.fixture
def strain_inputs(configure, strain_kind, strain_parameters, sample):
import pymatgen
from aiida import orm
structure = orm.StructureData()
structure.set_pymatgen(pymatgen.Structure.from_file(sample('POSCAR')))
return dict(
structure=structure,
strain_kind=orm.Str(strain_kind),
strain_parameters=orm.Str(strain_parameters),
strain_strengths=orm.List(list=[-0.2, -0.1, 0., 0.1, 0.2])
)
| 1.84375 | 2 |
twitter_feels/apps/map/models.py | michaelbrooks/twitter-feels | 1 | 12790253 | from django.core.exceptions import ObjectDoesNotExist
from django.db import models, connection, transaction, IntegrityError, DatabaseError
from django.utils import timezone
import random
from south.db.generic import DatabaseOperations
from twitter_stream.fields import PositiveBigAutoField, PositiveBigAutoForeignKey
from swapper import get_model_name
from datetime import timedelta
import settings
from twitter_feels.libs.twitter_analysis import TweetTimeFrame
import logging
import re
logger = logging.getLogger('map')
class TreeNode(models.Model):
class Meta:
index_together = [
['parent', 'word'],
['created_at', 'parent'],
]
ROOT_NODES = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
word = models.CharField(max_length=150)
created_at = models.DateTimeField(auto_now_add=True, default=None, null=True, blank=True)
def get_child(self, word):
child = list(self.children.filter(word=word)[:1])
if child:
return child[0]
return None
@classmethod
def get_empty_nodes(cls):
"""
Return a queryset for all the tree nodes that have no associated
TweetChunks
"""
return cls.objects.filter(chunks=None).exclude(pk__in=cls.ROOT_NODES)
@classmethod
def follow_chunks(cls, prefix, query_chunks):
"""Returns the node referenced by the given prefix and chunks."""
root = cls.get_root()
if not root:
raise Exception("No root node in tweet tree")
prefix_node = root.get_child(prefix)
if prefix_node is None:
return None
node = prefix_node
for chunk in query_chunks:
node = node.get_child(chunk.lower())
if not node:
return None
return node
@classmethod
def orphan_empty_nodes(cls, batch_size=10000):
most_recent_time = cls.objects.aggregate(most_recent_time=models.Max('created_at'))
if most_recent_time['most_recent_time'] is None:
return 0
most_recent_time = most_recent_time['most_recent_time']
time_cutoff = most_recent_time - settings.NODE_FREEZE_INTERVAL
subset_query = """
SELECT map_treenode.id
FROM map_treenode
LEFT OUTER JOIN map_tweetchunk
ON ( map_treenode.id = map_tweetchunk.node_id )
WHERE (map_tweetchunk.id IS NULL)
AND (map_treenode.parent_id IS NOT NULL)
AND (map_treenode.created_at < %s)
AND NOT (map_treenode.id IN %s)
LIMIT %s
"""
query = """
UPDATE map_treenode
JOIN (
{subset_query}
) subset
ON map_treenode.id = subset.id
SET map_treenode.parent_id = NULL
""".format(subset_query=subset_query)
params = [time_cutoff, cls.ROOT_NODES, batch_size]
cursor = connection.cursor()
return cursor.execute(query, params)
@classmethod
def propagate_orphanage(cls):
"""Makes sure that all children of current orphans are also orphaned."""
future_orphans = cls.objects.filter(parent__parent=None).exclude(parent=None)\
.exclude(pk__in=cls.ROOT_NODES)
return future_orphans.update(parent=None)
@classmethod
def delete_orphans(cls, batch_size=10000):
"""Delete a batch of orphans"""
query = """
DELETE FROM map_treenode
WHERE (parent_id IS NULL)
AND NOT (id IN %s)
ORDER BY id
LIMIT %s
"""
params = [cls.ROOT_NODES, batch_size]
cursor = connection.cursor()
return cursor.execute(query, params)
@classmethod
def cleanup(cls, batch_size=10000, reset=False):
cls.cleanup_empty(batch_size=batch_size, reset=reset)
cls.cleanup_orphans(batch_size=batch_size, reset=reset)
@classmethod
def cleanup_empty(cls, batch_size=10000, reset=False):
# Disconnect TreeNodes without any chunks
logger.info("Orphaning empty tree nodes...")
batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size)
total_orphaned = batch_orphaned
while batch_orphaned == batch_size:
logger.info(" ... orphaned batch of %d", batch_orphaned)
batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size)
total_orphaned += batch_orphaned
if total_orphaned > 0:
logger.info("Orphaned %d empty nodes", total_orphaned)
else:
logger.info("No empty nodes to orphan")
logger.info("Orphaning children of orphans... (should not be needed)")
propagated = cls.propagate_orphanage()
while propagated > 0:
logger.info(" ...orphaned %d new nodes (should be 0!)", propagated)
propagated = cls.propagate_orphanage()
@classmethod
def cleanup_orphans(cls, batch_size=10000, reset=False):
logger.info("Deleting orphans...")
batch_deleted = cls.delete_orphans(batch_size=batch_size)
total_deleted = batch_deleted
while batch_deleted == batch_size:
logger.info(" ... deleted batch of %d", batch_deleted)
batch_deleted = cls.delete_orphans(batch_size=batch_size)
total_deleted += batch_deleted
if reset and settings.DEBUG:
# Prevent apparent memory leaks
# https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory
from django import db
db.reset_queries()
if total_deleted > 0:
logger.info("Deleted %d orphans", total_deleted)
else:
logger.info("No orphans to delete")
def get_top_chunk_countries_for_children(self, limit=10):
"""
Look at the children of this node. Look at
the chunks that refer to them.
Return the top countries for those chunks by frequency.
Returns a list.
"""
# Group by tz_country
query = TweetChunk.objects.values('tz_country')
# Only non-empty tz_countries, words
query = query.exclude(tz_country='')
query = query.exclude(node__word='')
# Only chunks belonging to our children
query = query.filter(node__parent=self)
# Order by count
query = query.order_by('-chunk_count')
# Aggregate fields
query = query.annotate(chunk_count=models.Count('id'))
# Limit
query = query[:limit]
print query.query
return [r['tz_country'] for r in query]
def get_most_popular_child_chunk_in(self, country):
"""
Get the chunk following this node with the most tweets
in the given country.
"""
# Group by chunk
query = TweetChunk.objects.values('node', 'node__word')
# Only with the given country, non-empty words
query = query.exclude(node__word='')
query = query.filter(tz_country=country)
# Only chunks belonging to our children
query = query.filter(node__parent=self)
# Order by count, desc
query = query.order_by('-count')
# Aggregate fields
query = query.annotate(count=models.Count('id'))
print query.query
# Limit
try:
result = query.first()
return result['node__word'], result['count']
except ObjectDoesNotExist:
return None
def get_subquery(self):
"""
Generates SQL like the following:
SELECT
`map_tweetchunk`.`tz_country`,
`map_treenode`.`word`,
COUNT(`map_tweetchunk`.`id`) AS `count`
FROM `map_tweetchunk`
INNER JOIN `map_treenode`
ON (`map_tweetchunk`.`node_id` = `map_treenode`.`id`)
WHERE (
AND `map_treenode`.`parent_id` = MY_ID_GOES HERE
)
GROUP BY `map_tweetchunk`.`tz_country`, `map_treenode`.`word`
"""
# Group by country, chunk
query = TweetChunk.objects.values('tz_country', 'node__word')
# Only with the given country, non-empty words
# We'll do this later instead
# query = query.exclude(node__word='')
# query = query.exclude(tz_country='')
# Only chunks belonging to our children
query = query.filter(node__parent=self)
# Aggregate fields
query = query.annotate(count=models.Count('id'))
return query
def get_most_popular_child_chunk_by_country2(self, country_limit=10):
"""
Get tuples of country, word, count for the top 10 countries
following this node.
More specifically:
- Look at all the words that followed this one anywhere
- In every country, find the word following this one that was most commonly used
- For the top result from each country, return the top 10 country-word-counts.
"""
# How much padding to add to counts for the concat/max/split trick
count_padding = 10
# Get the name of the stupid index from South
db = DatabaseOperations(None)
index_name = db.create_index_name('map_tweetchunk', ['tz_country', 'node_id'])
# Find the words following this node for every
# country, and the number of tweets with that word.
# Concatenate the tweet count to the word
subquery = """
SELECT map_tweetchunk.tz_country,
CONCAT(
LPAD(COUNT(DISTINCT map_tweetchunk.id), {padding}, '0'),
'-',
map_treenode.word
) as combo
FROM map_tweetchunk
-- USE INDEX ({index_name})
LEFT OUTER JOIN map_treenode
ON ( map_tweetchunk.node_id = map_treenode.id )
WHERE map_treenode.parent_id = %s
AND map_tweetchunk.tz_country != ''
AND map_treenode.word != ''
GROUP BY map_tweetchunk.tz_country, map_tweetchunk.node_id
""".format(padding=count_padding, index_name=index_name)
# Now select the max of the combo field for each country
# Since we've padded with 0s, alphabetic max is the same as numeric max
maxquery = """
SELECT sub.tz_country,
MAX(sub.combo) as maxcombo
FROM ({subquery}) sub
GROUP BY sub.tz_country
ORDER BY maxcombo DESC
LIMIT %s
""".format(subquery=subquery)
# Now split up the max combo into the count and the word
# The word is substring(maxcombo, padding+2) because
# it is 1-indexed and we added a '-' character in the middle.
splitquery = """
SELECT sub2.tz_country,
SUBSTRING(sub2.maxcombo, {padding} + 2) AS word,
CAST(SUBSTRING(sub2.maxcombo, 1, {padding}) AS UNSIGNED) AS `count`
FROM ({maxquery}) sub2
""".format(maxquery=maxquery, padding=count_padding)
print splitquery
cursor = connection.cursor()
cursor.execute(splitquery, [self.id, country_limit])
return cursor.fetchall()
def get_most_popular_child_chunk_by_country(self, country_limit=10):
"""
Get tuples of country, word, count for the top 10 countries
following this node.
More specifically:
- Look at all the words that followed this one anywhere
- In every country, find the word following this one that was most commonly used
- For the top result from each country, return the top 10 country-word-counts.
"""
# Make sure this is valid
country_limit = int(country_limit)
subquery = self.get_subquery()
# This query finds the maximum number of tweets
# with chunks following this node for every country
# up to the limit (plus 1 to allow for the empty country)
maxquery = """
SELECT
sub.tz_country,
MAX(sub.count) AS max_count
FROM ({subquery}) sub
GROUP BY tz_country
ORDER BY max_count DESC
LIMIT {limit}
""".format(subquery=subquery.query, limit=country_limit + 1)
# Template for the overall query
# It finds the actual chunk for each country
# that had the maximum count.
# Further, filters out empty words and countries.
superquery = """
SELECT
country_node_count.tz_country,
country_node_count.word,
country_node_count.count
FROM
({subquery}) country_node_count
INNER JOIN ({maxquery}) countrymax
ON (countrymax.tz_country = country_node_count.tz_country)
WHERE (
country_node_count.count = countrymax.max_count
AND country_node_count.tz_country != ''
AND country_node_count.word != ''
)
ORDER BY country_node_count.count DESC
LIMIT {limit}
""".format(subquery=subquery.query, maxquery=maxquery, limit=country_limit)
print superquery
cursor = connection.cursor()
cursor.execute(superquery)
return cursor.fetchall()
@classmethod
def get_root(cls):
try:
return cls.objects.get(id=1)
except ObjectDoesNotExist:
return None
class Tz_Country(models.Model):
user_time_zone = models.CharField(max_length=32)
country = models.CharField(max_length=32)
class TweetChunk(models.Model):
class Meta:
index_together = [
['tz_country', 'node'],
]
id = PositiveBigAutoField(primary_key=True)
node = models.ForeignKey(TreeNode, related_name='chunks')
twitter_id = models.BigIntegerField(default=0)
tweet_text = models.CharField(max_length=250, default=None, null=True)
created_at = models.DateTimeField(db_index=True)
tz_country = models.CharField(max_length=32, blank=True)
@classmethod
def get_example_tweet(cls, country_name, node):
"""Returns a Tweet for the given country and node."""
try:
chunks = cls.objects.filter(tz_country=country_name, node=node)
count = chunks.count()
chunk = chunks[random.randint(0, count - 1)]
return chunk.tweet_text
except DatabaseError:
# things could potentially disappear while we're doing these operations
return None
@classmethod
def delete_before(cls, oldest_date, batch_size=10000):
"""Delete a batch of chunks before the given date"""
cursor = connection.cursor()
deleted = cursor.execute("DELETE FROM map_tweetchunk WHERE created_at <= %s ORDER BY id LIMIT %s", [oldest_date, batch_size])
return deleted
@classmethod
def get_earliest_created_at(cls):
"""Get the earliest created_at in any tweet chunk."""
results = cls.objects.aggregate(earliest_created_at=models.Min('created_at'))
return results['earliest_created_at']
@classmethod
def cleanup(cls, batch_size=10000, reset=False):
# Get the most recently finished map time frame
now = MapTimeFrame.objects \
.filter(calculated=True) \
.aggregate(latest_start_time=models.Max('start_time'))
# maybe there aren't any?
if now['latest_start_time'] is None:
return
# Preserve some time prior to that time frame
trailing_edge_date = now['latest_start_time'] - settings.KEEP_DATA_FOR
logger.info("Cleaning chunks from before %s...", trailing_edge_date)
batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size)
total_deleted = batch_deleted
while batch_deleted == batch_size:
logger.info(" ... deleted batch of %d", batch_deleted)
batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size)
total_deleted += batch_deleted
if reset and settings.DEBUG:
# Prevent apparent memory leaks
# https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory
from django import db
db.reset_queries()
if total_deleted > 0:
logger.info("Deleted %d tweet chunks", total_deleted)
else:
logger.info("No chunks to delete")
class MapTimeFrame(TweetTimeFrame):
"""
A basic time frame for demo analysis.
1. Extend the BaseTimeFrame class.
2. Indicate how often to run the analysis (same as the time frame duration)
3. Add any fields you need to calculate. You can also store data on separate models,
if your data is not strictly 1:1 with time frames.
4. Implement calculate(tweets). This is where you do your work.
At the end, make sure to call self.mark_done(tweets)
5. Add any additional functions related to your time frames
that will make them easier to work with.
"""
# Analyze every 15 seconds
DURATION = timedelta(seconds=60)
# Simply store the total tweet count in this time frame
tweet_count = models.IntegerField(default=0)
nodes_added = models.IntegerField(default=0)
chunks_added = models.IntegerField(default=0)
node_cache_hits = models.IntegerField(default=0)
node_cache_size = models.IntegerField(default=0)
def check_prefix(self, tweet, roots):
"""Returns a root in the tweet, if it exists"""
for root in roots:
if root.word in tweet.text:
return root
return None
def get_tree_node(self, parent, word, cache=None):
"""
Returns a tree node for the parent and word, and whether or not it is new.
A dictionary can optionally be provided for caching values across calls.
"""
if cache is not None and (parent, word) in cache:
self.node_cache_hits += 1
return cache[(parent, word)], False
else:
# We want to keep trying to grab this node until we get one
while True:
try:
with transaction.atomic():
# Get or create a node with parent and word
node, created = TreeNode.objects.get_or_create(parent=parent, word=word)
if not created:
# If it is an old node, there is a risk that the cleanup
# procedure will delete it while we are working with it.
# Setting created_at makes it impossible for it to be deleted for a brief period.
node.created_at = timezone.now()
node.save()
except IntegrityError:
# it was deleted while we were getting it
continue
# we got one
if cache is not None:
cache[(parent, word)] = node
if created:
self.nodes_added += 1
return node, created
def calculate(self, tweets):
self.tweet_count = len(tweets)
tzcountries = Tz_Country.objects.all()
roots = TreeNode.objects.filter(parent=1)
user_tz_map = dict((r.user_time_zone, r) for r in tzcountries)
user_tz_map[None] = None
node_cache = {}
new_tweet_chunks = []
for tweet in tweets:
root = self.check_prefix(tweet, roots)
if not root:
continue
rh = tweet.text.split(root.word, 1)[1]
rh = rh.lower()
chunks = re.split('[*,.!:"\s;()/@#]+|\'[\W]|\?+', rh)
parent = root
depth = 0
for chunk in chunks:
if chunk == "":
continue
if depth > settings.MAX_DEPTH:
break
# This node is guaranteed safe to work with for a few minutes
# It won't be deleted by cleanup before we create its TweetChunk.
node, created = self.get_tree_node(parent=parent, word=chunk, cache=node_cache)
country = user_tz_map.get(tweet.user_time_zone, None)
if country is None:
country = ''
else:
country = country.country
new_tweet_chunks.append(TweetChunk(
node=node,
twitter_id=tweet.tweet_id,
tweet_text=tweet.text,
created_at=tweet.created_at,
tz_country=country))
parent = node
depth += 1
TweetChunk.objects.bulk_create(new_tweet_chunks)
self.chunks_added += len(new_tweet_chunks)
self.node_cache_size = len(node_cache)
return tweets
def cleanup(self):
if self.id % 3 == 0:
# Then remove obsolete tree nodes
TreeNode.cleanup_empty()
else:
logger.info("Skipping empty treenode cleanup on this frame")
# First delete old tweet chunks
TweetChunk.cleanup()
TreeNode.cleanup_orphans()
@classmethod
def get_stream_memory_cutoff(cls):
"""Need to override this because we have tons of data that depends on tweets."""
baseline_cutoff = super(TweetTimeFrame, cls).get_stream_memory_cutoff()
# Get the earliest tweet referenced in any Tweet Chunk
earliest_created_at = TweetChunk.get_earliest_created_at()
if earliest_created_at is not None:
return min(earliest_created_at, baseline_cutoff)
else:
return baseline_cutoff
@classmethod
def get_most_recent(cls, limit=20):
"""
A handy static method to get the <limit>
most recent frames.
"""
query = cls.get_in_range(calculated=True) \
.order_by('-start_time')
return query[:limit]
| 2.03125 | 2 |
gimp-plugins/PD-Denoising-pytorch/utils.py | sunlin7/GIMP-ML | 1,077 | 12790254 | <reponame>sunlin7/GIMP-ML
import math
import torch
import torch.nn as nn
import numpy as np
# from skimage.measure.simple_metrics import compare_psnr
from torch.autograd import Variable
import cv2
import scipy.ndimage
import scipy.io as sio
# import matplotlib as mpl
# mpl.use('Agg')
# import matplotlib.pyplot as plt
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm') != -1:
# nn.init.uniform(m.weight.data, 1.0, 0.02)
m.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025)
nn.init.constant(m.bias.data, 0.0)
# def batch_PSNR(img, imclean, data_range):
# Img = img.data.cpu().numpy().astype(np.float32)
# Iclean = imclean.data.cpu().numpy().astype(np.float32)
# PSNR = 0
# for i in range(Img.shape[0]):
# PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range)
# return (PSNR/Img.shape[0])
def data_augmentation(image, mode):
out = np.transpose(image, (1,2,0))
if mode == 0:
# original
out = out
elif mode == 1:
# flip up and down
out = np.flipud(out)
elif mode == 2:
# rotate counterwise 90 degree
out = np.rot90(out)
elif mode == 3:
# rotate 90 degree and flip up and down
out = np.rot90(out)
out = np.flipud(out)
elif mode == 4:
# rotate 180 degree
out = np.rot90(out, k=2)
elif mode == 5:
# rotate 180 degree and flip
out = np.rot90(out, k=2)
out = np.flipud(out)
elif mode == 6:
# rotate 270 degree
out = np.rot90(out, k=3)
elif mode == 7:
# rotate 270 degree and flip
out = np.rot90(out, k=3)
out = np.flipud(out)
return np.transpose(out, (2,0,1))
def visual_va2np(Out, mode=1, ps=0, pss=1, scal=1, rescale=0, w=10, h=10, c=3, refill=0, refill_img=0, refill_ind=[0, 0]):
if mode == 0 or mode == 1 or mode==3:
out_numpy = Out.data.squeeze(0).cpu().numpy()
elif mode == 2:
out_numpy = Out.data.squeeze(1).cpu().numpy()
if out_numpy.shape[0] == 1:
out_numpy = np.tile(out_numpy, (3, 1, 1))
if mode == 0 or mode == 1:
out_numpy = (np.transpose(out_numpy, (1, 2, 0))) * 255.0 * scal
else:
out_numpy = (np.transpose(out_numpy, (1, 2, 0)))
if ps == 1:
out_numpy = reverse_pixelshuffle(out_numpy, pss, refill, refill_img, refill_ind)
if rescale == 1:
out_numpy = cv2.resize(out_numpy, (h, w))
#print(out_numpy.shape)
return out_numpy
def temp_ps_4comb(Out, In):
pass
def np2ts(x, mode=0): #now assume the input only has one channel which is ignored
w, h, c= x.shape
x_ts = x.transpose(2, 0, 1)
x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor)
if mode == 0 or mode == 1:
x_ts = x_ts.unsqueeze(0)
elif mode == 2:
x_ts = x_ts.unsqueeze(1)
return x_ts
def np2ts_4d(x):
x_ts = x.transpose(0, 3, 1, 2)
x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor)
return x_ts
def get_salient_noise_in_maps(lm, thre = 0., chn=3):
'''
Description: To find out the most frequent estimated noise level in the images
----------
[Input]
a multi-channel tensor of noise map
[Output]
A list of noise level value
'''
lm_numpy = lm.data.cpu().numpy()
lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1)))
nl_list = np.zeros((lm_numpy.shape[0], chn,1))
for n in range(lm_numpy.shape[0]):
for c in range(chn):
selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1))
selected_lm = selected_lm[selected_lm>thre]
if selected_lm.shape[0] == 0:
nl_list[n, c] = 0
else:
hist = np.histogram(selected_lm, density=True)
nl_ind = np.argmax(hist[0])
#print(nl_ind)
#print(hist[0])
#print(hist[1])
nl = ( hist[1][nl_ind] + hist[1][nl_ind+1] ) / 2.
nl_list[n, c] = nl
return nl_list
def get_cdf_noise_in_maps(lm, thre=0.8, chn=3):
'''
Description: To find out the most frequent estimated noise level in the images
----------
[Input]
a multi-channel tensor of noise map
[Output]
A list of noise level value
'''
lm_numpy = lm.data.cpu().numpy()
lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1)))
nl_list = np.zeros((lm_numpy.shape[0], chn,1))
for n in range(lm_numpy.shape[0]):
for c in range(chn):
selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1))
H, x = np.histogram(selected_lm, normed=True)
dx = x[1]-x[0]
F = np.cumsum(H)*dx
F_ind = np.where(F>0.9)[0][0]
nl_list[n, c] = x[F_ind]
print(nl_list[n,c])
return nl_list
def get_pdf_in_maps(lm, mark, chn=1):
'''
Description: get the noise estimation cdf of each channel
----------
[Input]
a multi-channel tensor of noise map and channel dimension
chn: the channel number for gaussian
[Output]
CDF function of each sample and each channel
'''
lm_numpy = lm.data.cpu().numpy()
lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1)))
pdf_list = np.zeros((lm_numpy.shape[0], chn, 10))
for n in range(lm_numpy.shape[0]):
for c in range(chn):
selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1))
H, x = np.histogram(selected_lm, range=(0.,1.), bins=10, normed=True)
dx = x[1]-x[0]
F = H * dx
pdf_list[n, c, :] = F
#sio.savemat(mark + str(c) + '.mat',{'F':F})
# plt.bar(range(10), F)
#plt.savefig(mark + str(c) + '.png')
# plt.close()
return pdf_list
def get_pdf_matching_score(F1, F2):
'''
Description: Given two sets of CDF, get the overall matching score for each channel
-----------
[Input] F1, F2
[Output] score for each channel
'''
return np.mean((F1-F2)**2)
def decide_scale_factor(noisy_image, estimation_model, color=1, thre = 0, plot_flag = 1, stopping = 4, mark=''):
'''
Description: Given a noisy image and the noise estimation model, keep multiscaling the image\\
using pixel-shuffle methods, and estimate the pdf and cdf of AWGN channel
Compare the changes of the density function and decide the optimal scaling factor
------------
[Input] noisy_image, estimation_model, plot_flag, stopping
[Output] plot the middle vector
score_seq: the matching score sequence between the two subsequent pdf
opt_scale: the optimal scaling factor
'''
if color == 1:
c = 3
elif color == 0:
c = 1
score_seq = []
Pre_CDF = None
flag = 0
for pss in range(1, stopping+1): #scaling factor from 1 to the limit
noisy_image = pixelshuffle(noisy_image, pss)
INoisy = np2ts(noisy_image, color)
INoisy = Variable(INoisy.cuda(), volatile=True)
EMap = torch.clamp(estimation_model(INoisy), 0., 1.)
EPDF = get_pdf_in_maps(EMap, mark + str(pss), c)[0]
if flag != 0:
score = get_pdf_matching_score(EPDF, Pre_PDF) #TODO: How to match these two
print(score)
score_seq.append(score)
if score <= thre:
print('optimal scale is %d:' % (pss-1))
return (pss-1, score_seq)
Pre_PDF = EPDF
flag = 1
return (stopping, score_seq)
def get_max_noise_in_maps(lm, chn=3):
'''
Description: To find out the maximum level of noise level in the images
----------
[Input]
a multi-channel tensor of noise map
[Output]
A list of noise level value
'''
lm_numpy = lm.data.cpu().numpy()
lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1)))
nl_list = np.zeros((lm_numpy.shape[0], chn, 1))
for n in range(lm_numpy.shape[0]):
for c in range(chn):
nl = np.amax(lm_numpy[n, :, :, c])
nl_list[n, c] = nl
return nl_list
def get_smooth_maps(lm, dilk = 50, gsd = 10):
'''
Description: To return the refined maps after dilation and gaussian blur
[Input] a multi-channel tensor of noise map
[Output] a multi-channel tensor of refined noise map
'''
kernel = np.ones((dilk, dilk))
lm_numpy = lm.data.squeeze(0).cpu().numpy()
lm_numpy = (np.transpose(lm_numpy, (1, 2, 0)))
ref_lm_numpy = lm_numpy.copy() #a refined map
for c in range(lm_numpy.shape[2]):
nmap = lm_numpy[:, :, c]
nmap_dilation = cv2.dilate(nmap, kernel, iterations=1)
ref_lm_numpy[:, :, c] = nmap_dilation
#ref_lm_numpy[:, :, c] = scipy.ndimage.filters.gaussian_filter(nmap_dilation, gsd)
RF_tensor = np2ts(ref_lm_numpy)
RF_tensor = Variable(RF_tensor.cuda(),volatile=True)
def zeroing_out_maps(lm, keep=0):
'''
Only Keep one channel and zero out other channels
[Input] a multi-channel tensor of noise map
[Output] a multi-channel tensor of noise map after zeroing out items
'''
lm_numpy = lm.data.squeeze(0).cpu().numpy()
lm_numpy = (np.transpose(lm_numpy, (1, 2, 0)))
ref_lm_numpy = lm_numpy.copy() #a refined map
for c in range(lm_numpy.shape[2]):
if np.isin(c,keep)==0:
ref_lm_numpy[:, :, c] = 0.
print(ref_lm_numpy)
RF_tensor = np2ts(ref_lm_numpy)
RF_tensor = Variable(RF_tensor.cuda(),volatile=True)
return RF_tensor
def level_refine(NM_tensor, ref_mode, chn=3,cFlag=False):
'''
Description: To refine the estimated noise level maps
[Input] the noise map tensor, and a refinement mode
Mode:
[0] Get the most salient (the most frequent estimated noise level)
[1] Get the maximum value of noise level
[2] Gaussian smooth the noise level map to make the regional estimation more smooth
[3] Get the average maximum value of the noise level
[5] Get the CDF thresholded value
[Output] a refined map tensor with four channels
'''
#RF_tensor = NM_tensor.clone() #get a clone version of NM tensor without changing the original one
if ref_mode == 0 or ref_mode == 1 or ref_mode == 4 or ref_mode==5: #if we use a single value for the map
if ref_mode == 0 or ref_mode == 4:
nl_list = get_salient_noise_in_maps(NM_tensor, 0., chn)
if ref_mode == 4: #half the estimation
nl_list = nl_list - nl_list
print(nl_list)
elif ref_mode == 1:
nl_list = get_max_noise_in_maps(NM_tensor, chn)
elif ref_mode == 5:
nl_list = get_cdf_noise_in_maps(NM_tensor, 0.999, chn)
noise_map = np.zeros((NM_tensor.shape[0], chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before concatenating
for n in range(NM_tensor.shape[0]):
noise_map[n,:,:,:] = np.reshape(np.tile(nl_list[n], NM_tensor.size()[2] * NM_tensor.size()[3]),
(chn, NM_tensor.size()[2], NM_tensor.size()[3]))
RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor)
if torch.cuda.is_available() and not cFlag:
RF_tensor = Variable(RF_tensor.cuda(),volatile=True)
else:
RF_tensor = Variable(RF_tensor,volatile=True)
elif ref_mode == 2:
RF_tensor = get_smooth_maps(NM_tensor, 10, 5)
elif ref_mode == 3:
lb = get_salient_noise_in_maps(NM_tensor)
up = get_max_noise_in_maps(NM_tensor)
nl_list = ( lb + up ) * 0.5
noise_map = np.zeros((1, chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before concatenating
noise_map[0, :, :, :] = np.reshape(np.tile(nl_list, NM_tensor.size()[2] * NM_tensor.size()[3]),
(chn, NM_tensor.size()[2], NM_tensor.size()[3]))
RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor)
RF_tensor = Variable(RF_tensor.cuda(),volatile=True)
return (RF_tensor, nl_list)
def normalize(a, len_v, min_v, max_v):
'''
normalize the sequence of factors
'''
norm_a = np.reshape(a, (len_v,1))
norm_a = (norm_a - float(min_v)) / float(max_v - min_v)
return norm_a
def generate_training_noisy_image(current_image, s_or_m, limit_set, c, val=0):
noise_level_list = np.zeros((c, 1))
if s_or_m == 0: #single noise type
if val == 0:
for chn in range(c):
noise_level_list[chn] = np.random.uniform(limit_set[0][0], limit_set[0][1])
elif val == 1:
for chn in range(c):
noise_level_list[chn] = 35
noisy_img = generate_noisy(current_image, 0, noise_level_list /255.)
return (noisy_img, noise_level_list)
def generate_ground_truth_noise_map(noise_map, n, noise_level_list, limit_set, c, pn, pw, ph):
for chn in range(c):
noise_level_list[chn] = normalize(noise_level_list[chn], 1, limit_set[0][0], limit_set[0][1]) #normalize the level value
noise_map[n, :, :, :] = np.reshape(np.tile(noise_level_list, pw * ph), (c, pw, ph)) #total number of channels
return noise_map
#Add noise to the original images
def generate_noisy(image, noise_type, noise_level_list=0, sigma_s=20, sigma_c=40):
'''
Description: To generate noisy images of different types
----------
[Input]
image : ndarray of float type: [0,1] just one image, current support gray or color image input (w,h,c)
noise_type: 0,1,2,3
noise_level_list: pre-defined noise level for each channel, without normalization: only information of 3 channels
[0]'AWGN' Multi-channel Gaussian-distributed additive noise
[1]'RVIN' Replaces random pixels with 0 or 1. noise_level: ratio of the occupation of the changed pixels
[2]'Gaussian-Poisson' GP noise approximator, the combinatin of signal-dependent and signal independent noise
[Output]
A noisy image
'''
w, h, c = image.shape
#Some unused noise type: Poisson and Uniform
#if noise_type == *:
#vals = len(np.unique(image))
#vals = 2 ** np.ceil(np.log2(vals))
#noisy = np.random.poisson(image * vals) / float(vals)
#if noise_type == *:
#uni = np.random.uniform(-factor,factor,(w, h, c))
#uni = uni.reshape(w, h, c)
#noisy = image + uni
noisy = image.copy()
if noise_type == 0: #MC-AWGN model
gauss = np.zeros((w, h, c))
for chn in range(c):
gauss[:,:,chn] = np.random.normal(0, noise_level_list[chn], (w, h))
noisy = image + gauss
elif noise_type == 1: #MC-RVIN model
for chn in range(c): #process each channel separately
prob_map = np.random.uniform(0.0, 1.0, (w, h))
noise_map = np.random.uniform(0.0, 1.0, (w, h))
noisy_chn = noisy[: , :, chn]
noisy_chn[ prob_map < noise_level_list[chn] ] = noise_map[ prob_map < noise_level_list[chn] ]
elif noise_type == 2:
#sigma_s = np.random.uniform(0.0, 0.16, (3,))
#sigma_c = np.random.uniform(0.0, 0.06, (3,))
sigma_c = [sigma_c]*3
sigma_s = [sigma_s]*3
sigma_s = np.reshape(sigma_s, (1, 1, c)) #reshape the sigma factor to [1,1,c] to multiply with the image
noise_s_map = np.multiply(sigma_s, image) #according to x or temp_x?? (according to clean image or irradience)
#print(noise_s_map) # different from the official code, here we use the original clean image x to compute the variance
noise_s = np.random.randn(w, h, c) * noise_s_map #use the new variance to shift the normal distribution
noisy = image + noise_s
#add signal_independent noise to L
noise_c = np.zeros((w, h, c))
for chn in range(3):
noise_c [:, :, chn] = np.random.normal(0, sigma_c[chn], (w, h))
noisy = noisy + noise_c
return noisy
#generate AWGN-RVIN noise together
def generate_comp_noisy(image, noise_level_list):
'''
Description: To generate mixed AWGN and RVIN noise together
----------
[Input]
image: a float image between [0,1]
noise_level_list: AWGN and RVIN noise level
[Output]
A noisy image
'''
w, h, c = image.shape
noisy = image.copy()
for chn in range(c):
mix_thre = noise_level_list[c+chn] #get the mix ratio of AWGN and RVIN
gau_std = noise_level_list[chn] #get the gaussian std
prob_map = np.random.uniform( 0, 1, (w, h) ) #the prob map
noise_map = np.random.uniform( 0, 1, (w, h) ) #the noisy map
noisy_chn = noisy[: ,: ,chn]
noisy_chn[prob_map < mix_thre ] = noise_map[prob_map < mix_thre ]
gauss = np.random.normal(0, gau_std, (w, h))
noisy_chn[prob_map >= mix_thre ] = noisy_chn[prob_map >= mix_thre ] + gauss[prob_map >= mix_thre]
return noisy
def generate_denoise(image, model, noise_level_list):
'''
Description: Generate Denoised Blur Images
----------
[Input]
image:
model:
noise_level_list:
[Output]
A blur image patch
'''
#input images
ISource = np2ts(image)
ISource = torch.clamp(ISource, 0., 1.)
ISource = Variable(ISource.cuda(),volatile=True)
#input denoise conditions
noise_map = np.zeros((1, 6, image.shape[0], image.shape[1])) #initialize the noise map before concatenating
noise_map[0, :, :, :] = np.reshape(np.tile(noise_level_list, image.shape[0] * image.shape[1]), (6, image.shape[0], image.shape[1]))
NM_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor)
NM_tensor = Variable(NM_tensor.cuda(),volatile=True)
#generate blur images
Res = model(ISource, NM_tensor)
Out = torch.clamp(ISource-Res, 0., 1.)
out_numpy = Out.data.squeeze(0).cpu().numpy()
out_numpy = np.transpose(out_numpy, (1, 2, 0))
return out_numpy
#TODO: two pixel shuffle functions to process the images
def pixelshuffle(image, scale):
'''
Discription: Given an image, return a reversible sub-sampling
[Input]: Image ndarray float
[Return]: A mosic image of shuffled pixels
'''
if scale == 1:
return image
w, h ,c = image.shape
mosaic = np.array([])
for ws in range(scale):
band = np.array([])
for hs in range(scale):
temp = image[ws::scale, hs::scale, :] #get the sub-sampled image
band = np.concatenate((band, temp), axis = 1) if band.size else temp
mosaic = np.concatenate((mosaic, band), axis = 0) if mosaic.size else band
return mosaic
def reverse_pixelshuffle(image, scale, fill=0, fill_image=0, ind=[0,0]):
'''
Discription: Given a mosaic image of subsampling, recombine it to a full image
[Input]: Image
[Return]: Recombine it using different portions of pixels
'''
w, h, c = image.shape
real = np.zeros((w, h, c)) #real image
wf = 0
hf = 0
for ws in range(scale):
hf = 0
for hs in range(scale):
temp = real[ws::scale, hs::scale, :]
wc, hc, cc = temp.shape #get the shpae of the current images
if fill==1 and ws==ind[0] and hs==ind[1]:
real[ws::scale, hs::scale, :] = fill_image[wf:wf+wc, hf:hf+hc, :]
else:
real[ws::scale, hs::scale, :] = image[wf:wf+wc, hf:hf+hc, :]
hf = hf + hc
wf = wf + wc
return real
def scal2map(level, h, w, min_v=0., max_v=255.):
'''
Change a single normalized noise level value to a map
[Input]: level: a scaler noise level(0-1), h, w
[Return]: a pytorch tensor of the cacatenated noise level map
'''
#get a tensor from the input level
level_tensor = torch.from_numpy(np.reshape(level, (1,1))).type(torch.FloatTensor)
#make the noise level to a map
level_tensor = level_tensor.view(stdN_tensor.size(0), stdN_tensor.size(1), 1, 1)
level_tensor = level_tensor.repeat(1, 1, h, w)
return level_tensor
def scal2map_spatial(level1, level2, h, w):
stdN_t1 = scal2map(level1, int(h/2), w)
stdN_t2 = scal2map(level2, h-int(h/2), w)
stdN_tensor = torch.cat([stdN_t1, stdN_t2], dim=2)
return stdN_tensor
| 2.125 | 2 |
accounts/forms.py | bugulin/gymgeek-web | 0 | 12790255 | <filename>accounts/forms.py
from django import forms
from core import material_design
class AboutForm(forms.Form):
about = forms.CharField(widget=material_design.Textarea, max_length=500, required=False)
| 1.859375 | 2 |
module10-packages/deepcloudlabs/hr.py | deepcloudlabs/dcl160-2021-jul-05 | 0 | 12790256 | <filename>module10-packages/deepcloudlabs/hr.py
class Employee:
def __init__(self, fullname, email, salary):
self.fullname = fullname
self.email = email
self.salary = salary
def __str__(self):
return f"employee (full name: {self.fullname})"
| 2.625 | 3 |
iutest/core/iconutils.py | mgland/iutest | 10 | 12790257 | # Copyright 2019-2020 by <NAME>, MGLAND animation studio. All rights reserved.
# This file is part of IUTest, and is released under the "MIT License Agreement".
# Please see the LICENSE file that should have been included as part of this package.
import os
from iutest.core import pathutils
from iutest.qt import iconFromPath
def _iconDir():
return os.path.join(pathutils.iutestPackageDir(), "icons")
def iconPath(iconName):
return os.path.join(_iconDir(), iconName)
def iconPathSet(iconName, suffixes):
iconDir = _iconDir()
nameParts = list(iconName.partition("."))
nameParts.insert(1, None)
paths = []
for suffix in suffixes:
nameParts[1] = suffix
fileName = "".join(nameParts)
paths.append(os.path.join(iconDir, fileName))
return paths
def initSingleClassIcon(obj, objAttributeName, iconFileName):
path = iconPath(iconFileName)
setattr(obj, objAttributeName, iconFromPath(path))
| 1.765625 | 2 |
pmovie/scanp4.py | noobermin/lspreader | 0 | 12790258 | #!/usr/bin/env python
'''
Search a p4 for good indices. This imports the file specified by
the modulepath option, reads a function called "f" from it,
and filters the frames using it. Outputs a numpy array of
good trajectories.
Usage:
./search.py [options] <input> <hashd> <output>
Options:
--help -h Print this help.
--modulepath=M Set the path to the file to read "f"
from. [default: ./scanner.py]
'''
from lspreader import read;
from pys import load_pickle;
from lspreader.pmovie import filter_hashes_from_file;
import numpy as np;
import re;
import imp;
if __name__ == "__main__":
from docopt import docopt;
opts=docopt(__doc__,help=True);
fname = opts['--modulepath'];
m=re.search(r'(^.*)/(\w+)\.py$', fname);
if not m:
raise ValueError("module should be well named!");
path =m.group(1);
mname=m.group(2);
fp, path,desc = imp.find_module(mname, [path]);
try:
f=imp.load_module(mname, fp, path, desc).f
finally:
if fp:
fp.close();
hashd = load_pickle(opts['<hashd>']);
np.save(
opts['<output>'],
filter_hashes_from_file(opts['<input>'], f, **hashd));
| 2.90625 | 3 |
cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.py | appressoas/cradmin_legacy | 0 | 12790259 | <reponame>appressoas/cradmin_legacy<filename>cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.py
from __future__ import unicode_literals
import json
from xml.sax.saxutils import quoteattr
from django.utils.translation import pgettext
from cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlistchild import AbstractFilterListChild
class AbstractFilter(AbstractFilterListChild):
"""
Abstract base class for all filters.
"""
template_name = 'cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.django.html'
def __init__(self, slug=None, label=None, label_is_screenreader_only=None):
"""
Parameters:
slug: You can send the slug as a parameter, or override :meth:`.get_slug`.
label: You can send the label as a parameter, or override :meth:`.get_label`.
label_is_screenreader_only: You can set this as a parameter,
or override :meth:`.get_label_is_screenreader_only`.
"""
self.values = []
self.slug = slug
self.label = label
self.label_is_screenreader_only = label_is_screenreader_only
super(AbstractFilter, self).__init__()
def copy(self):
"""
Returns a copy of this filter.
"""
copy = self.__class__()
copy.set_values(list(self.values))
copy.slug = self.slug
copy.label = self.label
return copy
def get_slug(self):
"""
Get the slug for this filter. The slug is used
in the URL to identify the filter.
If your users can change their language, this
should not be translatable since that would
make an URL unusable by a user with a different language
(if a user shares an URL with another user).
"""
if self.slug:
return self.slug
else:
raise NotImplementedError('You must override get_slug(), or send a slug to __init__().')
def get_label(self):
"""
Get the label of the filter. This is typically set as the
``<label>`` for the filter input field.
A label is optional, but highly recommended. Even if you do not
want to show the label, you should specify one, and hide it
from everyone except for screenreaders with
:meth:`.get_label_is_screenreader_only`.
"""
return self.label
def get_label_is_screenreader_only(self):
"""
If this returns ``True``, the label will be styled to
only make it visible to screenreaders.
This is recommended over simply not setting a label
since that would break accessibility.
Defaults to the value of the ``label_is_screenreader_only`` parameter
(see :class:`.AbstractFilter`). If ``label_is_screenreader_only`` is ``None``,
this defaults to the return value of the ``get_label_is_screenreader_only_by_default()``
method of
:class:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList`.
"""
if self.label_is_screenreader_only is None:
return self.filterlist.get_label_is_screenreader_only_by_default()
else:
return self.label_is_screenreader_only
def set_values(self, values):
"""
Change the current values stored in the filter to the given ``values``.
"""
self.values = values
def clear_values(self):
"""
Clear the current values stored in the filter.
"""
self.values = []
def add_values(self, values):
"""
Add the given list of ``values`` to the values currently stored
in the filter.
"""
self.values += values
def remove_values(self, values):
"""
Remove the given list of ``values`` from the values currently stored
in the filter.
"""
new_values = list(self.values)
values_to_remove = values
for value in new_values:
if value in values_to_remove:
new_values.remove(value)
self.values = new_values
def clean_value(self, value):
"""
Called by :meth:`.clean_values` to clean a single value.
"""
return value
def get_cleaned_values(self):
"""
Clean the values, to prepare them for usage in :meth:`.filter`.
Defaults to returning the values unchanged, but you will typically
want to override this if your filter allows the user to type in a
values, or if you want to convert the values from a string into
something that makes sense for your :meth:`.filter`.
If you want validation, you should handle that by setting some
attribute on ``self``, and handle the error in the template
rendering the filter (and most likely not add anything
to the queryobject in :meth:`.filter`).
"""
return [self.clean_value(value) for value in self.values]
def get_cleaned_value(self):
"""
Returns the first value returned by :meth:`.get_cleaned_values`,
or ``None`` if there is no values. Use this in
:meth:`.filter` if you expect a single value.
"""
clean_values = self.get_cleaned_values()
if len(clean_values) > 0:
return clean_values[0]
else:
return None
def build_set_values_url(self, values):
"""
Get the URL that adds this filter with the given values to the current url.
You should not need to override this, but you will use it in your
template context to render urls for choices if your filter
is a single select filter.
"""
copy = self.copy()
copy.set_values(values)
return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy)
def build_clear_values_url(self):
"""
Get the URL that clears this filter from the current url.
You should not need to override this, but you will use it in your
template context to render urls for choices if your filter
supports "clear".
"""
copy = self.copy()
copy.clear_values()
return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy)
def build_add_values_url(self, values):
"""
Get the URL that adds the given values for this filter to the current url.
This is not the same as :meth:`.build_set_values_url`. This method is for
multiselect filters where the user can add valuess to the filter
(typically via checkboxes).
You should not need to override this, but you will use it in your
template context to render urls for choices if your filter
uses multiselect.
"""
copy = self.copy()
copy.add_values(values)
return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy)
def build_remove_values_url(self, values):
"""
Get the URL that removes the given values for this filter to the current url.
This is not the same as :meth:`.build_clear_values_url`. This method is for
multiselect filters where the user can add/remove valuess to the filter
(typically via checkboxes).
You should not need to override this, but you will use it in your
template context to render urls for choices if your filter
uses multiselect.
"""
copy = self.copy()
copy.remove_values(values)
return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy)
def get_base_css_classes_list(self):
return ['cradmin-legacy-listfilter-filter']
def filter(self, queryobject):
"""
Add the current values to the given ``queryobject``.
This is always called unless the ``filter_string`` is
None or empty string.
Parameters:
queryobject: The type of the queryobject depends on the query backend.
If you are filtering against the Django ORM, this will
typically be a QuerySet, but for other backends such
as ElasticSearch, MongoDB, etc. this can be something
completely different such as a dict.
Returns:
An object of the same type as the given ``queryobject``.
"""
raise NotImplementedError()
def get_dom_id(self):
"""
Get the DOM ID of this filter. The base template adds this to the wrapping DIV,
but you can also use this if you need DOM IDs for components of a filter
(E.g.: Field ID to attach a labels to a form field).
"""
return '{}_{}'.format(self.filterlist.get_dom_id_prefix(), self.get_slug())
def get_label_dom_id(self):
"""
Get the DOM ID of the label for this filter.
"""
return '{}_label'.format(self.get_dom_id())
def get_inputfield_dom_id(self):
"""
Get the DOM id of the input field. If the filter uses multiple input fields,
this will most likely not be used, or it may be used as the ID of the
first field to make the label focus on the first field when it is clicked.
"""
return '{}_input'.format(self.get_dom_id())
def get_target_dom_id(self):
"""
Get the DOM id of the target of the filter.
This is just a shortcut to access
:meth:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList.get_target_dom_id`.
"""
return self.filterlist.get_target_dom_id()
def get_loadingmessage(self):
"""
Get the loading message to show when the filter loads
the updated target element content.
"""
return pgettext('listfilter loading message', 'Loading')
def get_angularjs_options_dict(self):
"""
Get angularjs directive options dict.
You can override this in your filters, but you should
call ``super`` to get the default options.
"""
return {
'loadingmessage': self.get_loadingmessage()
}
def get_angularjs_options_json(self):
"""
Returns a json encoded and HTML attribute quoted version
of :meth:`.get_angularjs_options_dict`.
You use this with the angularjs directive for the filter
to send options into the directive::
<someelement my-filter-directive={{ me.get_angularjs_options_json|safe }}>
Notice that we do not include any ``"`` or ``'`` around the directives
HTML attribute - that is included by this method.
"""
return quoteattr(json.dumps(self.get_angularjs_options_dict()))
| 2.1875 | 2 |
hon/json/json_serializable.py | swquinn/hon | 0 | 12790260 | <reponame>swquinn/hon<filename>hon/json/json_serializable.py
"""
hon.json.json_serializable
~~~~~
"""
class JsonSerializable():
def to_json(self):
raise NotImplementedError()
| 1.984375 | 2 |
sentience/core/utils/errors.py | jtdutta1/Sentience | 0 | 12790261 | <gh_stars>0
class Error(Exception):
def __init__(self):
self.message = None
def __repr__(self) -> str:
return self.message
def __str__(self) -> str:
return self.message
class DimensionalityMismatchError(Error):
def __init__(self, reduce_expand, *args) -> None:
super().__init__(*args)
if reduce_expand:
self.message = "Expected dimensional reduction then expansion but got the reverse."
else:
self.message = "Expected dimensional expansion then reduction but got the reverse."
class ActivationNotFoundError(Error):
def __init__(self, name:str, *args) -> None:
super().__init__(*args)
self.message = f"{name} activation not found."
class LayerTypeMismatchError(Error):
def __init__(self, obj, *args) -> None:
super().__init__(*args)
self.message = f"Type of object is not Layer. Expected type Layer, given type {type(obj).__name__}."
class LayerNotFoundError(Error):
def __init__(self, name:str, *args: object) -> None:
super().__init__(*args)
self.message = f"{name} layer not found." | 2.65625 | 3 |
python/pic_format.py | YeungShaoFeng/libxib | 0 | 12790262 | <reponame>YeungShaoFeng/libxib
def pic_format(file_head: bin):
"""
Determine the format of a picture. \n
:param file_head: The [:8] of the pic's file_head.
:return: Pic's format if matched, "unknown" if none matched .
"""
res = "unknown"
if b'\xff\xd8\xff' in file_head:
res = 'jpg'
elif b'\x89PNG\r\n\x1a\n' in file_head:
res = 'png'
return res
| 3.203125 | 3 |
common/models.py | stefangeorg/town-car | 0 | 12790263 | <filename>common/models.py
from django.contrib.gis.db import models
class Address(models.Model):
address1 = models.CharField(max_length=255)
address2 = models.CharField(max_length=255)
city = models.CharField(max_length=255)
zip = models.CharField(max_length=20, blank=True, null=True)
state = models.CharField(max_length=100, blank=True, null=True)
points = models.PointField()
| 2.140625 | 2 |
TalentHunter/jobMatchingApi/urls.py | Hnachik/JobMatchingPfeProjectBack | 0 | 12790264 | <gh_stars>0
from django.urls import include, path
from .views import recruiter, jobseeker
urlpatterns = [
path('jobseeker/', include(([
path('', jobseeker.JobSeekerView.as_view()),
path('resume/', jobseeker.ResumeDetailView.as_view()),
path('resume/<int:id>/', jobseeker.ResumeUpdateView.as_view()),
path('resume/evaluate/', jobseeker.EvaluateResumeListView.as_view()),
path('resume/matched-posts/', jobseeker.MatchedPostsListView.as_view()),
path('resume/matched-posts/<int:id>/', jobseeker.MatchedPostsDetailView.as_view()),
path('resume/add/', jobseeker.ResumeCreateView.as_view()),
path('whistory/add/', jobseeker.WorkHistoryCreateView.as_view()),
path('whistory/<int:id>/', jobseeker.WorkHistoryDetailView.as_view()),
path('whistory/', jobseeker.WorkHistoryListView.as_view()),
path('education/add/', jobseeker.EducationBackgroundCreateView.as_view()),
path('education/<int:id>/', jobseeker.EducationBackgroundDetailView.as_view()),
path('education/', jobseeker.EducationBackgroundListView.as_view()),
], 'jobMatchingApi'), namespace='jobseeker')),
path('recruiter/', include(([
path('', recruiter.RecruiterView.as_view()),
path('jobposts/', recruiter.JobPostListView.as_view()),
path('allposts/', recruiter.JobPostAllView.as_view()),
path('post/evaluate/', recruiter.EvaluatePostListView.as_view()),
path('post/matched-resumes/', recruiter.MatchedResumesListView.as_view()),
path('allposts/<int:id>/', recruiter.JobPostAllDetailView.as_view()),
path('jobposts/<int:id>/', recruiter.JobPostDetailView.as_view()),
path('jobpost/add/', recruiter.JobPostCreateView.as_view()),
path('categories/', recruiter.JobCategoryListView.as_view()),
], 'jobMatchingApi'), namespace='recruiter')),
]
| 1.96875 | 2 |
vscod/extensions_downloader.py | DivoK/vscod | 4 | 12790265 | import asyncio
import dataclasses
import json
import re
import typing
from pathlib import Path
import aiohttp
from loguru import logger
from .utils import download_url, get_request, get_original_filename
# Format string linking to the download of a vscode extension .vsix file.
MARKETPLACE_DOWNLOAD_LINK = '''
https://marketplace.visualstudio.com/_apis/public/gallery/publishers/{publisher_name}/vsextensions/{extension_name}/{version}/vspackage
'''.strip()
# Format string linking to the marketplace page of some extension.
MARKETPLACE_PAGE_LINK = '''
https://marketplace.visualstudio.com/items?itemName={extension_id}
'''.strip()
# Regex used to extract the exact version of an extension from it's marketplace page.
VERSION_REGEX = re.compile(r'"Version":"(.*?)"')
@dataclasses.dataclass
class ExtensionPath:
"""
Dataclass for storing info regarding a certain VSCode extension.
"""
path: Path # Extension final save path.
extension_id: str # Extension ID.
version: str = 'latest' # Extension version.
def _build_extension_download_url(
extension_name: str, publisher_name: str, version: str
) -> str:
"""
Build the download url for the given parameters.
Just a shortcut for the string formatting.
:param extension_name: Desired extension name.
:type extension_name: str
:param publisher_name: Desired extension publisher's name.
:type publisher_name: str
:param version: Desired extension version.
:type version: str
:return: The formatted download url.
:rtype: str
"""
return MARKETPLACE_DOWNLOAD_LINK.format(
extension_name=extension_name, publisher_name=publisher_name, version=version
)
def _build_extension_download_url_from_ext_path(ext_path: ExtensionPath) -> str:
"""
Build the download url for the given parameters.
:param ext_path: A spec object describing the desired extension.
:type ext_path: ExtensionPath
:return: The formatted download url.
:rtype: str
"""
publisher_name, extension_name = ext_path.extension_id.split('.')
return _build_extension_download_url(extension_name, publisher_name, ext_path.version)
async def _download_extension(
session: aiohttp.ClientSession,
extension_name: str,
publisher_name: str,
version: str,
save_path: Path,
) -> None:
"""
Download an extension according to the given parameters.
When one needs to be a tiny bit more verbose than the `by_id` version.
:param session: An aiohttp session object to use.
:type session: aiohttp.ClientSession
:param extension_name: Desired extension name.
:type extension_name: str
:param publisher_name: Desired extension publisher's name.
:type publisher_name: str
:param version: Desired extension version.
:type version: str
:param save_path: Save path to downloaded the desired extension to.
:type save_path: Path
:return: None.
:rtype: None
"""
logger.info(f'Downloading {extension_name}...')
url = _build_extension_download_url(extension_name, publisher_name, version)
await download_url(session, url, save_path, return_type=bytes)
logger.info(f'Downloaded {extension_name} to {save_path}.')
async def download_extension_by_id(
session: aiohttp.ClientSession, extension_id: str, version: str, save_path: Path
) -> None:
"""
Download an extension according to the given parameters.
:param session: An aiohttp session object to use.
:type session: aiohttp.ClientSession
:param extension_id: Desired extension ID.
:type extension_id: str
:param version: Desired extension version.
:type version: str
:param save_path: Save path to downloaded the desired extension to.
:type save_path: Path
:return: None.
:rtype: None
"""
publisher_name, extension_name = extension_id.split('.')
await _download_extension(session, extension_name, publisher_name, version, save_path)
def _recursive_parse_to_dict(
root_dict: typing.Dict[str, typing.Union[str, typing.Dict]],
) -> typing.List[ExtensionPath]:
"""
Recursively parse the given config data:
If the value of a key is a dict, treat it like a directory and delve one level deeper into the value.
If the value of a key is a string, create a spec object from it and give it it's "path" down the hierarchy.
:param root_dict: The current "root" of our config.
:type root_dict: typing.Dict[str, typing.Union[str, typing.Dict]]
:raises ValueError: A given key had an empty value.
:raises TypeError: A given key was neither a str or a dict.
:return: List of spec objects parsed from the initial config.
:rtype: typing.List[ExtensionPath]
"""
path_list = []
for key, value in root_dict.items():
if isinstance(value, str):
if not value:
raise ValueError(f'Value for key {key} was empty.')
path_list.append(ExtensionPath(Path(key) / f'{value}', value))
elif isinstance(value, dict):
for ext_path in _recursive_parse_to_dict(value):
ext_path.path = Path(key, ext_path.path)
path_list.append(ext_path)
else:
raise TypeError(f'Value for key {key} was neither str or dict.')
return path_list
def parse_extensions_json(
json_data: typing.Union[typing.Dict[str, str], Path],
) -> typing.List[ExtensionPath]:
"""
Decide wether the data provided was a Path or not and act accordingly:
If it's valid json format data, parse it and return a list of specs.
If it's a Path, open it and then do the same thing.
:param json_data: Either a path to a json config file or it's raw data (dict / list).
:type json_data: typing.Union[typing.Dict[str, str], Path]
:return: List of spec objects describing the given extensions.
:rtype: typing.List[ExtensionPath]
"""
if isinstance(json_data, Path):
with json_data.open() as json_file:
json_data = json.load(json_file)['extensions']
return _recursive_parse_to_dict(json_data)
async def get_extension_version(session: aiohttp.ClientSession, extension_id: str) -> str:
"""
Get the latest version of an extension on the marketplace.
:param session: An aiohttp session object to use.
:type session: aiohttp.ClientSession
:param extension_id: Desired marketplace extension to get the version of.
:type extension_id: str
:raises ValueError: Can't find the extension version.
:return: String of the extension's latest version.
:rtype: str
"""
logger.debug(f'Requesting version of extension {extension_id}...')
url = MARKETPLACE_PAGE_LINK.format(extension_id=extension_id)
try:
text: str = await get_request(session, url, return_type=str)
match = re.search(r'"Version":"(.*?)"', text)
if not match:
raise ValueError('Extension marketplace page data doesn\'t contain a version.')
version = match.group(1) # The captured version specifier.
except Exception as error:
logger.debug(error)
logger.warning('Can\'t get extension version, setting version to \'latest\'...')
version = 'latest'
logger.debug(f'Extension {extension_id} is of version {version}.')
return version
async def versionize_extension_paths(
session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath]
) -> None:
"""
Add the `version` attributes to the extensions spec objects.
:param session: An aiohttp session object to use.
:type session: aiohttp.ClientSession
:param extension_paths: List of extension spec objects to patch.
:type extension_paths: typing.List[ExtensionPath]
:return: None, this patches the existing objects.
:rtype: None
"""
get_version_tasks = [
get_extension_version(session, ext_path.extension_id) for ext_path in extension_paths
]
versions = await asyncio.gather(*get_version_tasks)
for ext_path, version in zip(extension_paths, versions):
ext_path.version = version
async def patch_extension_paths(
session: aiohttp.ClientSession,
extension_paths: typing.List[ExtensionPath],
*,
versionize: bool = True,
) -> None:
"""
Fix up the extension paths by altering their name.
Basic functionality is to get the real names of extensions.
Can also append the current version number.
:param session: An aiohttp session object to use.
:type session: aiohttp.ClientSession
:param extension_paths: List of extension spec objects to patch.
:type extension_paths: typing.List[ExtensionPath]
:param versionize: Wether to append version names to the paths, defaults to True
:type versionize: bool, optional
:return: None, this patches the existing objects.
:rtype: None
"""
if versionize:
await versionize_extension_paths(session, extension_paths)
real_name_tasks = [
get_original_filename(session, _build_extension_download_url_from_ext_path(ext_path))
for ext_path in extension_paths
]
original_filenames = await asyncio.gather(*real_name_tasks)
for filename, ext_path in zip(original_filenames, extension_paths):
ext_path.path = ext_path.path.with_name(filename)
async def download_extensions_json(
json_data: typing.Union[typing.Dict[str, str], Path],
save_path: Path,
*,
real_name: typing.Optional[bool] = None,
versionize: typing.Optional[bool] = None,
) -> None:
"""
Parse the given json data and download the given VSCode extensions into the save path.
:param json_data: Either a path to a json config file or it's raw data (dict / list).
:type json_data: typing.Union[typing.Dict[str, str], Path]
:param save_path: Save path for all the downloaded VSCode binaries.
:type save_path: Path
:param real_name: Wether to patch the real filenames of the extensions, defaults to None (True)
:type real_name: typing.Optional[bool], optional
:param versionize: Wether to patch the current version of the extensions, has no effect without `real_name`, defaults to None (True)
:type versionize: typing.Optional[bool], optional
:return: None.
:rtype: None
"""
if real_name is None:
real_name = True
if versionize is None:
versionize = True
extension_paths = parse_extensions_json(json_data)
async with aiohttp.ClientSession() as session:
if real_name:
await patch_extension_paths(session, extension_paths, versionize=versionize)
download_extension_tasks = []
for ext_path in extension_paths:
extension_full_save_path = save_path / ext_path.path.with_suffix('.vsix')
extension_full_save_path.parent.mkdir(parents=True, exist_ok=True)
download_extension_tasks.append(
download_extension_by_id(
session, ext_path.extension_id, ext_path.version, extension_full_save_path
)
)
await asyncio.gather(*download_extension_tasks)
| 2.609375 | 3 |
tests/test_code_block.py | ralphbean/markdown-to-confluence | 9 | 12790266 | from textwrap import dedent
def test_code_block(script):
"""
Test code block.
"""
script.set_content(
dedent(
"""
```python
m = {}
m["x"] = 1
```
"""
)
)
assert (
'<ac:structured-macro ac:name="code" ac:schema-version="1">'
'<ac:parameter ac:name="language">python</ac:parameter>'
"<ac:plain-text-body><![CDATA["
"m = {}\n"
'm["x"] = 1\n'
"]]></ac:plain-text-body>"
"</ac:structured-macro>"
) in script.run()
def test_code_block_default_language(script):
"""
Test code block with a default language.
"""
script.set_content(
dedent(
"""
```
cd $HOME
```
"""
)
)
assert (
'<ac:structured-macro ac:name="code" ac:schema-version="1">'
'<ac:parameter ac:name="language">bash</ac:parameter>'
"<ac:plain-text-body><![CDATA["
"cd $HOME\n"
"]]></ac:plain-text-body>"
"</ac:structured-macro>"
) in script.run()
def test_code_block_avoid_escape(script):
"""
Avoid escaping code.
"""
script.set_content(
dedent(
"""
```yaml
'test': '<[{}]>'
```
"""
)
)
assert (
'<ac:structured-macro ac:name="code" ac:schema-version="1">'
'<ac:parameter ac:name="language">yaml</ac:parameter>'
"<ac:plain-text-body><![CDATA["
"'test': '<[{}]>'\n"
"]]></ac:plain-text-body>"
"</ac:structured-macro>"
) in script.run()
def test_code_block_escape(script):
"""
If code contains "]]>" (CDATA end), split it into multiple CDATA sections.
"""
script.set_content(
dedent(
"""
```xml
<![CDATA[TEST]]>
```
"""
)
)
assert (
'<ac:structured-macro ac:name="code" ac:schema-version="1">'
'<ac:parameter ac:name="language">xml</ac:parameter>'
"<ac:plain-text-body><![CDATA[<![CDATA[TEST]]>]]><![CDATA[\n]]></ac:plain-text-body>"
"</ac:structured-macro>"
) in script.run()
| 2.53125 | 3 |
wikiitem/views_show.py | shagun30/djambala-2 | 0 | 12790267 | <reponame>shagun30/djambala-2
# -*- coding: utf-8 -*-
"""
/dms/wikiitem/views_show.py
.. zeigt den Inhalt einer Wiki-Seite an
Django content Management System
<NAME>
<EMAIL>
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 17.03.2008 Beginn der Arbeit
0.02 18.03.2008 check_wiki_urls
0.03 20.03.2008 wikiitem_diff
0.03 21.03.2008 Urheber der Aenderungen werden angezeigt
0.04 07.05.2008 diff wird jeweils gegenueber der letzten Version angezeigt
"""
import datetime
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from dms.queries import get_role_by_user_path
from dms.queries import get_base_site_url
from dms.queries import get_user_by_id
from dms.utils_form import get_item_vars_show
from dms.utils_form import get_item_vars_edit
from dms.utils_form import get_folderish_vars_show
from dms.utils_base import show_link
from dms.views_comment import item_comment
from dms.wiki.utils import check_wiki_urls
from dms.wiki.queries import get_page_versions
from dms.diff import textDiff
from dms.wiki.utils import get_user_support
from dms_ext.extension import * # dms-Funktionen ueberschreiben
# -----------------------------------------------------
def wikiitem_diff(request,item_container):
""" zeigt die Versionen dieser Seite """
versions = get_page_versions(item_container)
diff_list = []
org_text = '<h4>%s</h4>\n' % item_container.item.title + item_container.item.text
curr_text = last_text = ''
for version in versions:
v_text = '<h4>%s</h4>\n' % version.title
v_text += version.text
diff_list.append({ 'version': version.version,
'text_diff': textDiff(v_text, curr_text),
'user': version.owner.get_full_name(),
'modified': version.modified.strftime('%d.%m.%Y %H:%M')
})
if last_text.strip() == '':
last_text = v_text
curr_text = v_text
app_name = 'wikiitem'
my_title = _(u'Versionen der Wiki-Seite')
content = diff_list
parent = item_container.get_parent()
name = item_container.item.name
wiki_page = name[:name.rfind('.html')]
dont_show = { 'no_version': True, 'no_new_items': True}
vars = get_folderish_vars_show(request, item_container, app_name, '',
get_user_support(parent, wiki_page, dont_show))
if parent.item.has_comments:
vars['comments'] = True
vars['text'] = ''
vars['image_url'] = ''
vars['slot_right_info'] = ''
vars['user_support_header'] = _(u'Mögliche Aktionen')
vars['sub_title'] = '%s: <i>%s</i>' % (_(u'Versionen der Wiki-Seite'), vars['title'])
vars['title'] = parent.item.title
vars['site_url'] = get_base_site_url()
vars['versions'] = diff_list
vars['org_text'] = textDiff(org_text, last_text)
vars['user'] = item_container.owner.get_full_name()
vars['modified'] = item_container.last_modified.strftime('%d.%m.%Y %H:%M')
return render_to_response ( 'app/wiki/show_version.html', vars )
# -----------------------------------------------------
def wikiitem_show(request,item_container):
""" zeigt Wiki-Seite """
app_name = 'wikiitem'
parent = item_container.get_parent()
name = item_container.item.name
url = parent.get_absolute_url() + '?wiki_page=' + name[:name.rfind('.html')]
return HttpResponseRedirect(url)
| 1.929688 | 2 |
igmp/igmp2/RouterState.py | pedrofran12/igmp | 3 | 12790268 | from threading import Timer
import logging
from igmp.packet.PacketIGMPHeader import PacketIGMPHeader
from igmp.packet.ReceivedPacket import ReceivedPacket
from igmp.rwlock.RWLock import RWLockWrite
from igmp.utils import TYPE_CHECKING
from . import igmp_globals
from .GroupState import GroupState
from .querier.Querier import Querier
from .nonquerier.NonQuerier import NonQuerier
if TYPE_CHECKING:
from igmp.InterfaceIGMP import InterfaceIGMP
class RouterState(object):
ROUTER_STATE_LOGGER = logging.getLogger('igmp.igmpv2.RouterState')
def __init__(self, interface: 'InterfaceIGMP'):
#logger
logger_extra = dict()
logger_extra['vif'] = interface.vif_index
logger_extra['interfacename'] = interface.interface_name
self.router_state_logger = logging.LoggerAdapter(RouterState.ROUTER_STATE_LOGGER, logger_extra)
# interface of the router connected to the network
self.interface = interface
# state of the router (Querier/NonQuerier)
self.interface_state = Querier
# state of each group
# Key: GroupIPAddress, Value: GroupState object
self.group_state = {}
self.group_state_lock = RWLockWrite()
# send general query
packet = PacketIGMPHeader(type=igmp_globals.MEMBERSHIP_QUERY,
max_resp_time=igmp_globals.QUERY_RESPONSE_INTERVAL * 10)
self.interface.send(packet.bytes())
# set initial general query timer
timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout)
timer.start()
self.general_query_timer = timer
# present timer
self.other_querier_present_timer = None
# Send packet via interface
def send(self, data: bytes, address: str):
self.interface.send(data, address)
############################################
# interface_state methods
############################################
def print_state(self):
return self.interface_state.state_name()
def set_general_query_timer(self):
"""
Set general query timer
"""
self.clear_general_query_timer()
general_query_timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout)
general_query_timer.start()
self.general_query_timer = general_query_timer
def clear_general_query_timer(self):
"""
Stop general query timer
"""
if self.general_query_timer is not None:
self.general_query_timer.cancel()
def set_other_querier_present_timer(self):
"""
Set other querier present timer
"""
self.clear_other_querier_present_timer()
other_querier_present_timer = Timer(igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL, self.other_querier_present_timeout)
other_querier_present_timer.start()
self.other_querier_present_timer = other_querier_present_timer
def clear_other_querier_present_timer(self):
"""
Stop other querier present timer
"""
if self.other_querier_present_timer is not None:
self.other_querier_present_timer.cancel()
def general_query_timeout(self):
"""
General Query timer has expired
"""
self.interface_state.general_query_timeout(self)
def other_querier_present_timeout(self):
"""
Other Querier Present timer has expired
"""
self.interface_state.other_querier_present_timeout(self)
def change_interface_state(self, querier: bool):
"""
Change state regarding querier state machine (Querier/NonQuerier)
"""
if querier:
self.interface_state = Querier
self.router_state_logger.debug('change querier state to -> Querier')
else:
self.interface_state = NonQuerier
self.router_state_logger.debug('change querier state to -> NonQuerier')
############################################
# group state methods
############################################
def get_group_state(self, group_ip):
"""
Get object that monitors a given group (with group_ip IP address)
"""
with self.group_state_lock.genRlock():
if group_ip in self.group_state:
return self.group_state[group_ip]
with self.group_state_lock.genWlock():
if group_ip in self.group_state:
group_state = self.group_state[group_ip]
else:
group_state = GroupState(self, group_ip)
self.group_state[group_ip] = group_state
return group_state
def receive_v1_membership_report(self, packet: ReceivedPacket):
"""
Received IGMP Version 1 Membership Report packet
"""
igmp_group = packet.payload.group_address
self.get_group_state(igmp_group).receive_v1_membership_report()
def receive_v2_membership_report(self, packet: ReceivedPacket):
"""
Received IGMP Membership Report packet
"""
igmp_group = packet.payload.group_address
self.get_group_state(igmp_group).receive_v2_membership_report()
def receive_leave_group(self, packet: ReceivedPacket):
"""
Received IGMP Leave packet
"""
igmp_group = packet.payload.group_address
self.get_group_state(igmp_group).receive_leave_group()
def receive_query(self, packet: ReceivedPacket):
"""
Received IGMP Query packet
"""
self.interface_state.receive_query(self, packet)
igmp_group = packet.payload.group_address
# process group specific query
if igmp_group != "0.0.0.0" and igmp_group in self.group_state:
max_response_time = packet.payload.max_resp_time
self.get_group_state(igmp_group).receive_group_specific_query(max_response_time)
def remove(self):
"""
Remove this IGMP interface
Clear all state
"""
for group in self.group_state.values():
group.remove()
| 2.296875 | 2 |
model/get_raid.py | H3C/hdm-redfish-script | 4 | 12790269 | ###
# Copyright 2021 New H3C Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
# -*- coding: utf-8 -*-
from exception.ToolException import FailException
from utils.client import RedfishClient, RestfulClient
from utils.common import Constant
from utils.model import BaseModule
from utils import globalvar
from utils.predo import GetVersion
class Controller:
def __init__(self):
self.member_id = None
self.manufacturer = None
self.model = None
self.supported_device_protocols = None
self.sas_address = None
self.firmware_version = None
self.maintain_pd_fail_history = None
self.copy_back_state = None
self.jbod_state = None
self.min_stripe_size_bytes = None
self.max_stripe_size_bytes = None
self.memory_size_mib = None
self.supported_raid_levels = None
self.ddrecc_count = None
self.temperature_celsius = None
self.package_version = None
@property
def dict(self):
return {
"MemberId": self.member_id,
"Manufacturer": self.manufacturer,
"Model": self.model,
"SupportedDeviceProtocols": self.supported_device_protocols,
"SASAddress": self.sas_address,
"FirmwareVersion": self.firmware_version,
"MaintainPDFailHistory": self.maintain_pd_fail_history,
"CopyBackState": self.copy_back_state,
"JBODState": self.jbod_state,
"MinStripeSizeBytes": self.min_stripe_size_bytes,
"MaxStripeSizeBytes": self.max_stripe_size_bytes,
"MemorySizeMiB": self.memory_size_mib,
"SupportedRAIDLevels": self.supported_raid_levels,
"DDRECCCount": self.ddrecc_count,
"TemperatureCelsius": self.temperature_celsius,
"PackageVersion": self.package_version
}
def pack_ctrl(self, controller):
self.member_id = controller.get("MemberId", None)
self.manufacturer = controller.get("Manufacturer", None)
self.model = controller.get("Name", None)
self.supported_device_protocols = (
controller.get("SupportedDeviceProtocols", None))
self.firmware_version = controller.get("FirmwareVersion", None)
self.maintain_pd_fail_history = controller.get("MaintainPDFailHistory")
self.copy_back_state = controller.get("CopyBackState", None)
if (controller.get("Oem", None) and
isinstance(controller["Oem"].get("Public", None), dict)):
oem_info = controller["Oem"]["Public"]
self.jbod_state = oem_info.get("JBODState", None)
self.package_version = oem_info.get("PackageVersion", None)
self.min_stripe_size_bytes = oem_info.get("MinStripeSizeBytes",
None)
self.max_stripe_size_bytes = oem_info.get("MaxStripeSizeBytes",
None)
if self.maintain_pd_fail_history is None:
self.maintain_pd_fail_history = oem_info.get(
"MaintainPDFailHistory", None)
if self.copy_back_state is None:
self.copy_back_state = oem_info.get("CopyBackState", None)
if oem_info.get("DDRECCCount", None) is not None:
self.ddrecc_count = oem_info.get("DDRECCCount")
else:
self.ddrecc_count = controller.get("DDRECCCount", None)
self.memory_size_mib = oem_info.get("MemorySizeMiB", None)
if oem_info.get("SupportedRAIDLevels", None) is not None:
self.supported_raid_levels = (
", ".join(oem_info["SupportedRAIDLevels"]))
self.sas_address = oem_info.get("SASAddress", None)
self.temperature_celsius = controller.get("TemperatureCelsius", None)
class Raid:
def __init__(self):
self.name = None
self.location = "mainboard"
self.manufacturer = None
self.serial_number = None
self.state = None
self.health = None
self.controller = []
@property
def dict(self):
return {
"Name": self.name,
"Location": self.location,
"Manufacturer": self.manufacturer,
"SerialNumber": self.serial_number,
"State": self.state,
"Health": self.health,
"Controller": self.controller
}
def pack_raid_resource(self, resp):
self.name = resp.get("Name", None)
raid_ctrls = resp.get("StorageControllers", None)
if isinstance(raid_ctrls, list):
for controller in raid_ctrls:
ctrl = Controller()
ctrl.pack_ctrl(controller)
self.controller.append(ctrl)
self.serial_number = controller.get("SerialNumber", None)
self.manufacturer = controller.get("Manufacturer", None)
if controller.get("Status", None):
self.state = controller["Status"].get("State", None)
self.health = controller["Status"].get("Health", None)
class GetRaid(BaseModule):
def __init__(self):
super().__init__()
self.overall_health = None
self.maximum = None
self.raids = []
@property
def dict(self):
return {
"OverallHealth": self.overall_health,
"Maximum": None,
"Raids": self.raids
}
@GetVersion()
def run(self, args):
is_adapt_b01 = globalvar.IS_ADAPT_B01
if is_adapt_b01:
client = RestfulClient(args)
try:
self._get_b01_raid(client)
finally:
if client.cookie:
client.delete_session()
else:
client = RedfishClient(args)
self._get_raid(client)
if self.suc_list:
return self.suc_list
client = RestfulClient(args)
try:
self._get_health_info(client)
finally:
if client.cookie:
client.delete_session()
return self.suc_list
def _get_health_info(self, client):
status_dict = {
"0": "OK",
"1": "Caution",
"2": "Warning",
"3": "Critical"
}
url = "/api/health_info"
resp = client.send_request("GET", url)
if (isinstance(resp, dict) and
Constant.SUCCESS_0 == resp.get("cc", None)):
raid_health = status_dict.get(str(resp.get("disk", None)), None)
self.overall_health = raid_health
else:
self.err_list.append("Failure: failed to get overall health "
"status information")
raise FailException(*self.err_list)
def _get_raid(self, client):
systems_id = client.get_systems_id()
url = "/redfish/v1/Systems/%s/Storages" % systems_id
resp = client.send_request("GET", url)
if (isinstance(resp, dict) and
resp.get("status_code", None) in Constant.SUC_CODE):
raid_members = resp["resource"].get("Members", None)
if not raid_members:
self.suc_list.append("Success: raid card resource is empty")
return
for member in raid_members:
url = member.get("@odata.id", None)
resp = client.send_request("GET", url)
if (isinstance(resp, dict) and
resp.get("status_code", None) in Constant.SUC_CODE):
raid = Raid()
raid.pack_raid_resource(resp["resource"])
self.raids.append(raid)
else:
self.err_list.append("Failure: failed to get raid card "
"details")
raise FailException(*self.err_list)
else:
self.err_list.append("Failure: failed to get raid card"
" collection information")
raise FailException(*self.err_list)
def _get_b01_raid(self, client):
try:
url = "/api/settings/storageinfo"
resp1 = client.send_request("GET", url)
if isinstance(resp1, dict) and \
Constant.SUCCESS_0 == resp1.get("cc"):
raid_members = resp1.get("adapter")
if not raid_members:
self.suc_list.append(
"Success: raid card resource is empty")
return
raid = Raid()
ctrl = Controller()
name = raid_members.get("type")
raid.name = name
raid.serial_number = raid_members.get("serial")
url = "/api/system/pcie"
resp2 = client.send_request("GET", url)
if isinstance(resp2, dict) and Constant.SUCCESS_0 == \
resp1.get("cc"):
pcie_members = resp2.get("pcie_info", None)
for member in pcie_members:
if member.get("produce_name") == name:
raid.location = member.get("slot", None)
ctrl.member_id = member.get("device_id", None)
ctrl.model = name
ctrl.memory_size_mib = \
raid_members.get("ddr_size", None)
raid.controller.append(ctrl)
self.raids.append(raid)
else:
self.err_list.append("Failure: failed to get raid card"
" collection information")
raise FailException(*self.err_list)
finally:
if client.cookie:
client.delete_session()
| 1.6875 | 2 |
yaml_query/tests/test.py | majidaldo/yaml2table | 1 | 12790270 | <filename>yaml_query/tests/test.py
select_test = \
"""
select * from yaml
"""
def test('test.yaml','from ):
return
| 1.664063 | 2 |
seq2seq/corpus.py | shinoyuki222/torch-light | 310 | 12790271 | import torch
import argparse
import logging
from utils import corpora2idx, normalizeString
from const import *
class Dictionary(object):
def __init__(self):
self.word2idx = {
WORD[BOS]: BOS,
WORD[EOS]: EOS,
WORD[PAD]: PAD,
WORD[UNK]: UNK
}
self.idx = 4
def add(self, word):
if self.word2idx.get(word) is None:
self.word2idx[word] = self.idx
self.idx += 1
def __call__(self, sents, min_count):
words = [word for sent in sents for word in sent]
word_count = {w: 0 for w in set(words)}
for w in words: word_count[w]+=1
ignored_word_count = 0
for word, count in word_count.items():
if count <= min_count:
ignored_word_count += 1
continue
self.add(word)
return ignored_word_count
def __len__(self):
return self.idx
def __str__(self):
return "%s(size = %d)".format(self.__class__.__name__, len(self.idx))
class Corpus(object):
def __init__(self, save_data, max_len=20, min_word_count=1):
self._save_data = save_data
self._max_len = max_len
self._min_word_count = min_word_count
self.src_sents = None
self.tgt_sents = None
self.src_valid_sents = None
self.tgt_valid_sents = None
self.src_dict = Dictionary()
self.tgt_dict = Dictionary()
def parse(self):
def gather_file(file_, max_len):
en_sents, fra_sents, en_cut_count, fra_cut_count = [], [], 0, 0
for sentences in open(file_):
en_, fra_ = [normalizeString(s) for s in sentences.strip().split('\t')]
en_ws = [word for word in en_.strip().split()]
fra_ws = [word for word in fra_.strip().split()]
if len(en_ws) > max_len:
en_cut_count += 1
en_ws = en_ws[:max_len]
en_sents.append([WORD[BOS]] + en_ws + [WORD[EOS]])
if len(fra_ws) > max_len:
fra_cut_count += 1
fra_ws = fra_ws[:max_len]
fra_sents.append([WORD[BOS]] + fra_ws + [WORD[EOS]])
return fra_sents, en_sents, fra_cut_count, en_cut_count
max_len = self._max_len - 2
src_train, tgt_train, fra_cut_count, en_cut_count = gather_file('data/train', max_len)
src_valid, tgt_valid, _, _ = gather_file('data/test', max_len)
print("English data`s length out of range numbers - [{}]".format(en_cut_count))
print("French data`s length out of range numbers - [{}]".format(fra_cut_count))
src_ignore = self.src_dict(src_train, self._min_word_count)
tgt_ignore = self.tgt_dict(tgt_train, self._min_word_count)
if src_ignore != 0:
print("Ignored src word counts - [{}]".format(src_ignore))
if tgt_ignore != 0:
print("Ignored tgt word counts - [{}]".format(tgt_ignore))
self.src_train = src_train
self.tgt_train = tgt_train
self.src_valid = src_valid
self.tgt_valid = tgt_valid
def save(self):
data = {
'max_word_len': self._max_len,
'dict': {
'src': self.src_dict.word2idx,
'src_size': len(self.src_dict),
'tgt': self.tgt_dict.word2idx,
'tgt_size': len(self.tgt_dict)
},
'train': {
'src': corpora2idx(self.src_train, self.src_dict.word2idx),
'tgt': corpora2idx(self.tgt_train, self.tgt_dict.word2idx)
},
'valid': {
'src': corpora2idx(self.src_valid, self.src_dict.word2idx),
'tgt': corpora2idx(self.tgt_valid, self.tgt_dict.word2idx)
}
}
torch.save(data, self._save_data)
print('src corpora length - [{}] | target corpora length - [{}]'.format(len(self.src_dict), len(self.tgt_dict)))
def process(self):
self.parse()
self.save()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='seq2sqe corpora')
parser.add_argument('--save-data', type=str, default='data/seq2seq.pt',
help='path to save processed data')
parser.add_argument('--max-lenth', type=int, default=20,
help='max length of sentence')
parser.add_argument('--min-word-count', type=int, default=1,
help='min corpora count to discard')
args = parser.parse_args()
corpus = Corpus(args.save_data, args.max_lenth, args.min_word_count)
corpus.process()
| 2.59375 | 3 |
app.py | lit26/streamlit-image-label | 16 | 12790272 | import streamlit as st
import os
from streamlit_img_label import st_img_label
from streamlit_img_label.manage import ImageManager, ImageDirManager
def run(img_dir, labels):
st.set_option("deprecation.showfileUploaderEncoding", False)
idm = ImageDirManager(img_dir)
if "files" not in st.session_state:
st.session_state["files"] = idm.get_all_files()
st.session_state["annotation_files"] = idm.get_exist_annotation_files()
st.session_state["image_index"] = 0
else:
idm.set_all_files(st.session_state["files"])
idm.set_annotation_files(st.session_state["annotation_files"])
def refresh():
st.session_state["files"] = idm.get_all_files()
st.session_state["annotation_files"] = idm.get_exist_annotation_files()
st.session_state["image_index"] = 0
def next_image():
image_index = st.session_state["image_index"]
if image_index < len(st.session_state["files"]) - 1:
st.session_state["image_index"] += 1
else:
st.warning('This is the last image.')
def previous_image():
image_index = st.session_state["image_index"]
if image_index > 0:
st.session_state["image_index"] -= 1
else:
st.warning('This is the first image.')
def next_annotate_file():
image_index = st.session_state["image_index"]
next_image_index = idm.get_next_annotation_image(image_index)
if next_image_index:
st.session_state["image_index"] = idm.get_next_annotation_image(image_index)
else:
st.warning("All images are annotated.")
next_image()
def go_to_image():
file_index = st.session_state["files"].index(st.session_state["file"])
st.session_state["image_index"] = file_index
# Sidebar: show status
n_files = len(st.session_state["files"])
n_annotate_files = len(st.session_state["annotation_files"])
st.sidebar.write("Total files:", n_files)
st.sidebar.write("Total annotate files:", n_annotate_files)
st.sidebar.write("Remaining files:", n_files - n_annotate_files)
st.sidebar.selectbox(
"Files",
st.session_state["files"],
index=st.session_state["image_index"],
on_change=go_to_image,
key="file",
)
col1, col2 = st.sidebar.columns(2)
with col1:
st.button(label="Previous image", on_click=previous_image)
with col2:
st.button(label="Next image", on_click=next_image)
st.sidebar.button(label="Next need annotate", on_click=next_annotate_file)
st.sidebar.button(label="Refresh", on_click=refresh)
# Main content: annotate images
img_file_name = idm.get_image(st.session_state["image_index"])
img_path = os.path.join(img_dir, img_file_name)
im = ImageManager(img_path)
img = im.get_img()
resized_img = im.resizing_img()
resized_rects = im.get_resized_rects()
rects = st_img_label(resized_img, box_color="red", rects=resized_rects)
def annotate():
im.save_annotation()
image_annotate_file_name = img_file_name.split(".")[0] + ".xml"
if image_annotate_file_name not in st.session_state["annotation_files"]:
st.session_state["annotation_files"].append(image_annotate_file_name)
next_annotate_file()
if rects:
st.button(label="Save", on_click=annotate)
preview_imgs = im.init_annotation(rects)
for i, prev_img in enumerate(preview_imgs):
prev_img[0].thumbnail((200, 200))
col1, col2 = st.columns(2)
with col1:
col1.image(prev_img[0])
with col2:
default_index = 0
if prev_img[1]:
default_index = labels.index(prev_img[1])
select_label = col2.selectbox(
"Label", labels, key=f"label_{i}", index=default_index
)
im.set_annotation(i, select_label)
if __name__ == "__main__":
custom_labels = ["", "dog", "cat"]
run("img_dir", custom_labels) | 2.578125 | 3 |
m3c/__init__.py | rkingan/m3c | 1 | 12790273 | r"""
Library routines for minimally 3-connected graph generation.
This program requires cython.
"""
import pyximport
pyximport.install(language_level=3)
| 1.75 | 2 |
main.py | felamaslen/neural-net | 0 | 12790274 | <filename>main.py
#!/usr/bin/python3
from environment import Environment
env = Environment()
| 1.742188 | 2 |
python/gpusim_search.py | Mariewelt/gpusimilarity | 61 | 12790275 | from PyQt5 import QtCore, QtNetwork
import random
from gpusim_utils import smiles_to_fingerprint_bin
def parse_args():
import argparse
parser = argparse.ArgumentParser(description="Sample GPUSim Server - "
"run an HTTP server that loads fingerprint data onto GPU and " #noqa
"responds to queries to find most similar fingperints.") #noqa
parser.add_argument('dbname', help=".fsim file containing fingerprint "
"data to be searched")
parser.add_argument('dbkey', default="", help="Key for fsim file")
return parser.parse_args()
def main():
args = parse_args()
app = QtCore.QCoreApplication([])
socket = QtNetwork.QLocalSocket(app)
smiles = input("Smiles: ")
dbcount = 1
dbname = args.dbname
dbkey = args.dbkey
socket.connectToServer('gpusimilarity')
while smiles and smiles.lower() not in ('quit', 'exit'):
return_count = 20
similarity_cutoff = 0
fp_binary, _ = smiles_to_fingerprint_bin(smiles)
fp_qba = QtCore.QByteArray(fp_binary)
output_qba = QtCore.QByteArray()
output_qds = QtCore.QDataStream(output_qba, QtCore.QIODevice.WriteOnly)
output_qds.writeInt(dbcount)
output_qds.writeString(dbname.encode())
output_qds.writeString(dbkey.encode())
request_num = random.randint(0, 2**31)
output_qds.writeInt(request_num)
output_qds.writeInt(return_count)
output_qds.writeFloat(similarity_cutoff)
output_qds << fp_qba
socket.write(output_qba)
socket.flush()
socket.waitForReadyRead(30000)
output_qba = socket.readAll()
smiles = []
scores = []
ids = []
data_reader = QtCore.QDataStream(output_qba)
returned_request = data_reader.readInt()
if request_num != returned_request:
raise RuntimeError("Incorrect result ID returned!")
return_count = data_reader.readInt()
approximate_matches = data_reader.readUInt64()
for i in range(return_count):
smiles.append(data_reader.readString())
for i in range(return_count):
ids.append(data_reader.readString())
for i in range(return_count):
scores.append(data_reader.readFloat())
print("Approximate total matches: {0}, returning {1}".format(
approximate_matches, return_count))
for cid, smi, score in zip(ids, smiles, scores):
print("{0} {1}: {2}".format(cid, smi, score))
smiles = input("Smiles: ")
if __name__ == '__main__':
main()
| 2.796875 | 3 |
models/credentials.py | govle-192-21-2/govle | 0 | 12790276 | <gh_stars>0
from abc import ABC
from dataclasses import dataclass, field
from typing import List
@dataclass
class LearningEnvCredentials(ABC):
pass
@dataclass
class GoogleCredentials(LearningEnvCredentials):
# Associated user ID
user_id: str = ''
# Associated e-mail address
email: str = ''
# OAuth2 access token
token: str = ''
# OAuth2 refresh token
refresh_token: str = ''
# OAuth2 token URI
token_uri: str = ''
# OAuth2 client ID
client_id: str = ''
# OAuth2 client secret
client_secret: str = ''
# OAuth2 scopes
scopes: List[str] = field(default_factory=list)
# OpenID token
id_token: str = ''
# OAuth2 expiry
expiry: str = ''
@dataclass
class MoodleCredentials(LearningEnvCredentials):
# Username
username: str = ''
# Token
password: str = ''
# Moodle server URL
server: str = ''
| 2.65625 | 3 |
aoc/cli/commands/new.py | juanrgon/advent-of-code | 3 | 12790277 | import click
import pendulum
import subprocess
import os
from pathlib import Path
from aoc.script import Script
import aoc.paths
import pendulum
@click.command()
@click.option("-y", "--year", type=str)
@click.option("-d", "--day", type=str)
def new(year: str, day: str):
"""Create new script for AOC"""
if not year:
year = click.prompt(f"Year", default=_get_year())
if not day:
day = click.prompt(f"Day", default=_get_day(year))
script_file = _new_script(year=year, day=day)
print(f"Created script {script_file}!")
if "EDITOR" in os.environ:
subprocess.Popen(
f"$EDITOR {script_file}",
shell=True,
)
def _get_year() -> int:
east = "US/Eastern"
now = pendulum.now(tz=east)
if now.month == 12:
if now.hour == 23:
# if it's right before 12AM in December, use tomorrow as the default date
# because it's almost AOC time
return pendulum.tomorrow(east).year
elif now.hour == 0:
# if it's after 12AM in December, use yestrday as the default date because
# you probably want to do yesteray's date
return pendulum.today(east).year
return int(os.environ.get("AOC_YEAR", 0)) or now.year
def _get_day(year: str) -> str:
year_dir = Path(__file__).parent.parent.parent.parent / str(year)
if not year_dir.exists():
return "1"
else:
return str(max([int(p.stem) for p in year_dir.iterdir()]) + 1)
def _new_script(year: str, day: str, overwrite: bool = False) -> Path:
day = day.zfill(2)
script = Script.from_year_day(year=year, day=day)
script_dir = script.path.parent
if script_dir.parent.exists() and not overwrite:
if pendulum.now() > pendulum.datetime(year=int(year), month=12, day=int(day)):
print("Allow override of solution file, because script date in the future")
else:
raise RuntimeError(f"Script already exists for {year}-{day}!!!")
script_dir.mkdir(parents=True, exist_ok=True)
script.path.touch(exist_ok=True)
script.path.write_text(
(aoc.paths.AOC_PKG / "templates" / "script" / script.path.name).read_text()
)
return script.path
| 2.890625 | 3 |
src/semnav/learning/behavior_net/behavior_rnn.py | kchen92/graphnav | 17 | 12790278 | <filename>src/semnav/learning/behavior_net/behavior_rnn.py
import torch
import torch.nn as nn
import torch.nn.functional as F
class BehaviorRNN(nn.Module):
def __init__(self, rnn_type, hidden_size, num_layers):
super(BehaviorRNN, self).__init__()
self.is_recurrent = True
self.hidden_size = hidden_size
self.n_layers = num_layers
self.rnn_type = rnn_type
if self.rnn_type == 'rnn':
rnn_class = nn.RNN
elif self.rnn_type == 'gru':
rnn_class = nn.GRU
elif self.rnn_type == 'lstm':
rnn_class = nn.LSTM
else:
raise ValueError('Invalid RNN type.')
# Input resolution: 320x240
self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=2, padding=0)
self.conv1_bn = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=2, padding=0)
self.conv2_bn = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=0)
self.conv3_bn = nn.BatchNorm2d(128)
self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=0)
self.conv4_bn = nn.BatchNorm2d(256)
self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=0)
self.conv5_bn = nn.BatchNorm2d(512)
self.conv6 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=0)
self.conv6_bn = nn.BatchNorm2d(512)
rnn_input_size = 512 * 2 * 3
self.rnn = rnn_class(input_size=rnn_input_size, hidden_size=hidden_size,
num_layers=self.n_layers)
self.fc4 = nn.Linear(hidden_size, 2)
def forward(self, cur_input, cur_hidden):
"""Forward pass a single input (seq_len == 1) through the CNN-RNN.
Args:
cur_input: Input of shape (batch_size x n_channels x height x width). Since we would
like to reuse this code for train and test, we only process one input at a time.
Thus, seq_len = 1 and the input should be (1 x batch_size x input_size).
cur_hidden: Current (previous?) hidden state.
Returns:
output: Hidden state for each output. It has shape (seq_len x batch_size x hidden_size).
Since our seq_len is usually 1, this will generally be of shape
(1 x batch_size x hidden_size).
"""
# CNN encoder
x = cur_input
x = F.relu(self.conv1_bn(self.conv1(x)))
x = F.relu(self.conv2_bn(self.conv2(x)))
x = F.relu(self.conv3_bn(self.conv3(x)))
x = F.relu(self.conv4_bn(self.conv4(x)))
x = F.relu(self.conv5_bn(self.conv5(x)))
x = F.relu(self.conv6_bn(self.conv6(x)))
# x now has size torch.Size([32, 512, 2, 3])
batch_size = x.size(0)
x = torch.reshape(x, (batch_size, -1)) # Flatten
x = torch.unsqueeze(x, dim=0) # Add a seq_len dimension of size 1
# RNN
output, hidden = self.rnn(x, cur_hidden)
# output should have shape torch.Size([1, batch_size, hidden_size])
# hidden should have shape torch.Size([self.n_layers, batch_size, hidden_size])
output = torch.squeeze(output, dim=0)
output = self.fc4(output)
return output, hidden
def initial_hidden(self, batch_size):
"""Initial hidden state. Note that the default hidden state is zeros if not provided.
"""
if (self.rnn_type == 'rnn') or (self.rnn_type == 'gru'):
return torch.zeros(self.n_layers, batch_size, self.hidden_size)
elif self.rnn_type == 'lstm':
return [torch.zeros(self.n_layers, batch_size, self.hidden_size) for _ in range(2)] # 2 because cell state and hidden state
| 2.734375 | 3 |
third_party/upb/docs/render.py | echo80313/grpc | 515 | 12790279 | <reponame>echo80313/grpc<gh_stars>100-1000
#!/usr/bin/env python3
import subprocess
import sys
import shutil
import os
if len(sys.argv) < 2:
print("Must pass a filename argument")
sys.exit(1)
in_filename = sys.argv[1]
out_filename = in_filename.replace(".in.md", ".md")
out_dir = in_filename.replace(".in.md", "")
if in_filename == out_filename:
print("File must end in .in.md")
sys.exit(1)
if os.path.isdir(out_dir):
shutil.rmtree(out_dir)
os.mkdir(out_dir)
file_num = 1
with open(out_filename, "wb") as out_file, open(in_filename, "rb") as in_file:
for line in in_file:
if line.startswith(b"```dot"):
dot_lines = []
while True:
dot_line = next(in_file)
if dot_line == b"```\n":
break
dot_lines.append(dot_line)
dot_input = b"".join(dot_lines)
svg_filename = out_dir + "/" + str(file_num) + ".svg"
svg = subprocess.check_output(['dot', '-Tsvg', '-o', svg_filename], input=dot_input)
out_file.write(b"<div align=center>\n")
out_file.write(b"<img src='%s'/>\n" % (svg_filename.encode('utf-8')))
out_file.write(b"</div>\n")
file_num += 1
else:
out_file.write(line)
| 2.453125 | 2 |
ch2/echoclient.py | cybaek/twisted-network-programming-essentials-2nd-edition-python3 | 0 | 12790280 | <reponame>cybaek/twisted-network-programming-essentials-2nd-edition-python3
from twisted.internet import reactor, protocol
class EchoClient(protocol.Protocol):
def connectionMade(self):
self.transport.write(u'Hello, world!'.encode('utf-8'))
def dataReceived(self, data):
print("Server said: ", data.decode('utf-8'))
self.transport.loseConnection()
class EchoFactory(protocol.ClientFactory):
def buildProtocol(self, addr):
return EchoClient()
def clientConnectionFailed(self, connector, reason):
print("Connection Failed")
reactor.stop()
def clientConnectionLost(self, connector, reason):
print("Client Connection Lost")
reactor.stop()
reactor.connectTCP("localhost", 8000, EchoFactory())
reactor.run()
| 3.15625 | 3 |
login.py | lij321/test | 0 | 12790281 | <gh_stars>0
a = 2
print(a)
b = "hello"
print(b)
c = 123123
print(c)
d = c
print(d)
def fun()
print("*"*5)
fun()
| 2.9375 | 3 |
setup.py | imposeren/pynames | 0 | 12790282 | # coding: utf-8
import setuptools
setuptools.setup(
name = 'Pynames',
version = '0.1.0',
author = '<NAME>',
author_email = '<EMAIL>',
packages = setuptools.find_packages(),
url = 'https://github.com/Tiendil/pynames',
license = 'LICENSE',
description = "characters' name generation library",
long_description = open('README.md').read(),
include_package_data = True, # setuptools-git MUST be installed
test_suite = 'tests',
install_requires = ['unicodecsv'],
# package_data = { '': ['*.json'] }
)
| 1.242188 | 1 |
setup.py | DahlitzFlorian/python-color-changer | 3 | 12790283 | from setuptools import setup
setup(
name='color-changer',
version='1.0.5',
packages=['colorchanger', ],
license='MIT',
description='Reads in an image and swap specified colors ',
long_description=open('README.rst').read(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/DahlitzFlorian/python-color-changer',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='color changer color-changer opencv numpy',
install_requires=[
'click',
'numpy'
],
) | 1.507813 | 2 |
setup.py | sgielen/trac-plugin-softduedate | 0 | 12790284 | from setuptools import setup, find_packages
setup(
name='TracSoftDueDate', version='1.0',
packages=find_packages(exclude=['*.tests*']),
entry_points = {
'trac.plugins': [
'softduedate = softduedate',
],
},
)
| 1.226563 | 1 |
AktuelFinder/operations/PdfTask.py | maysu1914/AktuelFinder | 1 | 12790285 | import os
from urllib.parse import urlparse
import requests
from PyPDF2 import PdfFileReader
def download_pdf(url):
parse = urlparse(url)
base_url = parse.scheme + '://' + parse.netloc
try:
redirect = requests.get(url, allow_redirects=False)
except requests.exceptions.ConnectionError as e:
print(e, 2)
raise
if redirect.status_code == 302:
url = base_url + redirect.headers['location']
else:
pass
filename = url.split('/')[-1]
if not is_pdf(filename):
return None
if os.path.isfile(filename):
return filename.strip()
else:
print(filename, 'downloading')
request = requests.get(url)
# https://stackoverflow.com/questions/34503412/download-and-save-pdf-file-with-python-requests-module
with open(filename, 'wb') as f:
f.write(request.content)
return filename.strip()
def is_pdf(filename):
if filename[-4:] != '.pdf':
return False
else:
return True
def get_pdf_title(filename):
# http://www.blog.pythonlibrary.org/2018/04/10/extracting-pdf-metadata-and-text-with-python/
with open(filename, 'rb') as f:
pdf = PdfFileReader(f)
info = pdf.getDocumentInfo()
pdf.getNumPages()
title = info.title if info.title else filename
return title.strip()
| 3.46875 | 3 |
cmpds.py | jlinoff/cmpds | 0 | 12790286 | #!/usr/bin/env python
r'''
Compare two datasets to determine whether there is a significant
difference between them for a specific confidence level using the
t-test methodology for unpaired observations.
Please note that this is not, strictly, a t-test because it switches
over to the standard normal distribution (SND) when the number of
effective degrees of freedom (DOF) is larger than 32.
It is really useful for determining whether runtime or memory use has
changed between two different versions of software. The datasets are
completely independent of the program (i.e. the data values are
created by tools like /usr/bin/time) so they can be used in a black
box testing environment.
Each dataset contains a series of numbers to be compared. The numbers
must be greater than 0. That is a reasonable constraint given that
they typically represent something like elapsed time or memory used.
The size of the datasets can be different because we are treating
the samples as unpaired observations (t-test) but the smallest one
must have more than 2 entries. Typically you would like to have
at least 50 entries in each dataset.
You must specify the confidence level that you want to use to
determine whether the datasets differ. Typical confidence levels 0.90
(90%), 0.95 (95%) and 0.99 (99%). The tool will automatically
determine the associated z-value based on the confidence level and the
number of effective degrees of freedom. No table look ups are
necessary. The methodology used to calculate the z-value is described
in detail here: https://github.com/jlinoff/ztables.
To determine significance, you specify the confidence level that you
want to use to determine significance. Typical confidence levels 0.90
(90%), 0.95 (95%) and 0.99 (99%). The tool will automatically
determine the associated z-value based on the confidence level and the
number of effective degrees of freedom. No table look ups are
necessary.
EXAMPLE 1 - two datasets in one file
Here is an example to make sense of it all.
We want to compare two versions of the foobar program to see if the
second version is faster than the first for the same inputs. The
versions are 1.1 and 1.2. The program takes about 2 minutes to run
(120 seconds) and we want to determine whether v1.2 is faster.
The table below shows sample data 10 runs for each version.
# Run time data collected for v1.1 and v1.2.
#
# Num v1.1 v1.2
# === ======= =======
1 119.041 117.038
2 119.670 119.733
3 120.675 118.346
4 118.628 117.261
5 120.363 118.863
6 118.076 117.545
7 120.539 119.751
8 118.880 119.042
9 120.164 116.203
10 119.134 118.049
For this example we assume that the data is stored in a single file
but normally it is easier to have it exist in two separate files
because, by default, the tool looks at the first token on each line
and collects it if the token is a floating point number. When the data
is not in a single column in a file, you must explicitly specify the
which column to collect. In this case, the first dataset is in column
2 and the second dataset is in column 3 of the same file. Blank lines
and lines where the token is not a floating point number are ignored.
Here is what the run looks like:
$ ./cmpds.py -c 0.95 -k 2 3 data.txt
With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.1%.
As you can see, dataset-2 (v1.2) is slightly faster.
Note that we use -k to specify the columns because -c is already
reserved for specifying the confidence level.
If you reverse the columns, you will get the opposite result:
$ ./cmpds.py -c 0.95 -k 3 2 data.txt
With 95.0% confidence, dataset-2 is larger than dataset-1 by about 1.1%.
EXAMPLE 2 - datasets in separate files
A more realistic example would be running a program called blackbox-v1
50 times and collecting the timing output to a file and then running
blackbox-v2 and collecting its output. Here is how you might do it:
$ rm -f /tmp/blackbox-v1.out /tmp/blackbox-v2.out
$ for((i=1;i<=50;i++)) ; do printf '\nExp %03d\n' $i ; /usr/bin/time -p blackbox-v1 >> /tmp/v1.out ; done
$ for((i=1;i<=50;i++)) ; do printf '\nExp %03d\n' $i ; /usr/bin/time -p blackbox-v2 >> /tmp/v2.out ; done
We can now capture the real run time data by simply grepping out the
data like this:
$ grep -w ^real /tmp/v1.out > /tmp/v1.ds
$ grep -w ^real /tmp/v2.out > /tmp/v2.ds
The above command takes advantage of the fact that posix time format
(-p) outputs the time data on 3 separate lines as shown in this simple
example:
$ /usr/bin/time -p sleep 0.3
real 0.30
user 0.00
sys 0.00
At this point we have the unpaired observations from both runs in two
different files so we can use cmpds.py to figure out whether v2 is
faster than v1 at a 95% confidence level.
$ ./cmpds.py -c 0.95 /tmp/v1.ds /tmp/v2.ds
With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.3%.
That tells us that v2 is indeed slightly faster.
'''
# License: MIT Open Source
# Copyright (c) 2016 by <NAME>
#REFERENCES:
# <NAME> (1991). "The Art Computer Systems Performance Analysis", <NAME>iley and Sons, New York.
import argparse
import datetime
import inspect
import math
import os
import sys
#VERSION='0.1' # Initial load.
VERSION='0.2' # Made the std dev calculation simpler.
# ================================================================
#
# Message utility functions.
#
# ================================================================
def _msg(prefix, frame, msg, ofp=sys.stdout):
'''
Base for printing messages.
'''
lineno = inspect.stack()[frame][2]
now = datetime.datetime.now()
ofp.write('{!s:<26} {} {:>5} - {}\n'.format(now, prefix, lineno, msg))
def info(msg, f=1):
'''
Write an info message to stdout.
'''
_msg('INFO', f+1, msg)
def infov(opts, msg, f=1):
'''
Write an info message to stdout.
'''
if opts.verbose > 0:
_msg('INFO', f+1, msg)
def warn(msg, f=1):
'''
Write a warning message to stdout.
'''
_msg('WARNING', f+1, msg)
def err(msg, f=1):
'''
Write an error message to stderr and exit.
'''
_msg('ERROR', f+1, msg, sys.stderr)
sys.exit(1)
# ================================================================
#
# Statistical utility functions.
# See https://github.com/jlinoff/ztables for background.
#
# ================================================================
def gamma(x):
'''
Gamma function.
Uses the Lanczos approximation and natural logarithms.
For integer values of x we can use the exact value of (x-1)!.
gamma(1/2) = 1.77245385091
gamma(3/2) = 0.886226925453
gamma(5/2) = 1.32934038818
gamma(7/2) = 3.32335097045
gamma(4) = 6.0
'''
if (x - int(x)) == 0:
# Optimization for integer values: (x-1)!.
return reduce(lambda a, b: a * b, [float(i) for i in range(1, int(x))])
# Lanczos approximation, page 214 of Numerical Recipes in C.
c = [76.18009172947146,
-86.50532032941677,
24.01409824083091,
-1.231739572450155,
0.1208650973866179e-2,
-0.5395239384953e-5,
]
c0 = 1.000000000190015
c1 = 2.5066282746310005
x1 = float(x) + 5.5
x2 = (float(x) + 0.5) * math.log(x1)
x3 = x1 - x2
x4 = c0
x5 = float(x)
for i in range(6):
x5 += 1.0
x4 += c[i] / x5
x6 = math.log((c1 * x4) / float(x))
x7 = -x3 + x6 # ln(gamma(x))
g = math.exp(x7)
return g
def pdf_t(x, dof):
'''
Calculate the probability density function (PDF) at x for a
student-t distribution with dof degrees of freedom.
This is basically the height of the curve at x.
'''
assert dof > 2
x1 = gamma((float(dof) + 1.0) / 2.0)
x2 = math.sqrt(dof * math.pi) * gamma((float(dof) / 2.0))
x3 = 1.0 + (float((x ** 2)) / float(dof))
x4 = float((dof + 1)) / 2.0
x5 = x3 ** -x4
y = (x1 * x5) / x2
return y
def pdf_nd(x, s=1.0, u=0.0):
'''
Calculate the probability density function (PDF) for a normal
distribution.
s = standard deviation (1 for a standard normal distribution)
u = mean (0 for a standard normal distribution)
This is the height of the curve at x.
'''
dx = float(x) - float(u)
dx2 = dx * dx
xden = 2 * (s ** 2)
den = s * math.sqrt(2 * math.pi)
exp = math.e ** ( -dx2 / xden )
y = exp / den
return y
def pdf_snd(x):
'''
Calculate the probability density function (PDF) for a standard
normal distribution.
s = standard deviation (1 for a standard normal distribution)
u = mean (0 for a standard normal distribution)
This is the height of the curve at x.
It is exactly the same as pdf_nd(x, 1, 0) but is somewhat more
efficient.
'''
dx2 = float(x) ** 2
den = math.sqrt(2 * math.pi)
exp = math.e ** - (dx2 / 2)
y = exp / den
return y
def area_under_curve(x1, x2, intervals, fct, *args, **kwargs):
'''
Calculate the approximate area under a curve using trapezoidal
approximation.
It breaks the interval between x1 and x2 into trapezoids whose
width is fixed (proportional to how the interval is sliced). The
height of each rectangle is the pdf function value for x at the
start of the interval. The accumulation of the areas provides an
estimate of the area under the curve.
The greater the number of intervals the better the estimate is at
the cost of performance.
'''
assert x2 > x1 # just a sanity check
assert intervals > 1 # another sanity check
total_area = 0.0
width = (float(x2) - float(x1)) / float(intervals)
x = float(x1)
py = float(fct(x, *args, **kwargs))
for i in range(intervals):
y = float(fct(x, *args, **kwargs))
rectangle_area = width * y # area of rectangle at x with height y
triangle_area = ((y - py) * width) / 2.0 # adjustment based on height change
total_area += rectangle_area + triangle_area # trapezoid area
x += width # advance to the next edge
py = y # remember the previous height
return total_area
def binary_search_for_z(probability, tolerance, maxtop, minval, iterations, v, fct, *args):
'''
Get the z value that matches the specified percentage.
'''
# Binary search to find the closest value.
z = 0.0
adjustment = float(maxtop) / 2.0
top = maxtop
bot = 0.0
diff = tolerance * 2 # start the loop
while diff > tolerance:
mid = bot + ((top - bot) / 2.0)
z = mid - adjustment
q = area_under_curve(minval, z, iterations, fct, *args)
cp = 1.0 - (2.0 * (1.0 - q))
diff = abs(cp - probability)
if v:
info('p={}, cp={}, t={:f}, mt={}, mv={}, i={}, top={}, bot={}, mid={}, z={}, q={}'.format(
probability, cp, tolerance, maxtop, minval, iterations, top, bot, mid, z, q))
if probability < cp:
# It is to the right.
top = mid
elif probability > cp:
# It is to the left.
bot = mid
else:
break
# Sanity checks.
assert top <= maxtop
assert bot >= 0
return z
# ================================================================
#
# t-test implementation
#
# ================================================================
def ttest(a, b, opts):
'''
Analyze unpaired observations to determine whether they are
significantly different.
'''
cl = opts.conf
infov(opts, 'a: {:>3} {}'.format(len(a), a))
infov(opts, 'b: {:>3} {}'.format(len(b), b))
infov(opts, 'confidence level: {:.1f}%'.format(100.*cl))
na = float(len(a))
nb = float(len(b))
infov(opts, 'na: {}'.format(na))
infov(opts, 'nb: {}'.format(nb))
# means
ma = sum(a) / na
mb = sum(b) / nb
infov(opts, 'mean a: {:.3f}'.format(ma))
infov(opts, 'mean b: {:.3f}'.format(mb))
# variances
vara = sum([(xa - ma) ** 2 for xa in a]) / float(na - 1.)
varb = sum([(xb - mb) ** 2 for xb in b]) / float(nb - 1.)
infov(opts, 'variance a: {:.3f}'.format(vara))
infov(opts, 'variance b: {:.3f}'.format(varb))
# standard deviations
stddeva = math.sqrt(vara)
stddevb = math.sqrt(varb)
infov(opts, 'stddev a: {:.3f}'.format(stddeva))
infov(opts, 'stddev b: {:.3f}'.format(stddevb))
# mean difference
md = ma - mb
infov(opts, 'mean diff: {:.3f}'.format(md))
# standard deviation of the mean difference
sa2qna = stddeva**2 / na
sb2qnb = stddevb**2 / nb
sdmd = math.sqrt(sa2qna + sb2qnb)
infov(opts, 'stddev of the mean diff: {:.3f}'.format(sdmd))
# effective degrees of freedom
dof_num = (sa2qna + sb2qnb)**2
dof_dena = (1. / (na + 1.)) * sa2qna**2
dof_denb = (1. / (nb + 1.)) * sb2qnb**2
dof = (dof_num / (dof_dena + dof_denb)) - 2.0
infov(opts, 'effective DOF: {:.2f}'.format(dof))
dofr = int('{:.0f}'.format(dof))
infov(opts, 'effective DOF (rounded): {}'.format(dofr))
# confidence interval for the mean difference
z = 0.0
# allow the user to play with the parameters
t = opts.internal[0]
lb = opts.internal[1]
ub = opts.internal[2]
intervals = int(opts.internal[3])
maxv = 2 * round(abs(lb) + ub + 0.5, 0)
minv = -maxv
infov(opts, 'internal threshold: {:.1f}'.format(t))
infov(opts, 'internal lower bound: {}'.format(lb))
infov(opts, 'internal upper bound: {}'.format(ub))
infov(opts, 'internal intervals: {}'.format(intervals))
infov(opts, 'internal minval: {}'.format(minv))
infov(opts, 'internal maxval: {}'.format(maxv))
v = True if opts.verbose > 1 else False
if dofr > opts.snd_threshold:
# use standard normal distribution (SND)
infov(opts, 'use standard normal distribution (SND)')
z = binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_snd)
else:
infov(opts, 'use t-{} distribution'.format(dofr))
z = binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_t, dof)
x = (1. - cl) / 2.
q = cl + x
infov(opts, '{:.3f}-quantile of t-variate with {} degrees of freedom: {:.2f}'.format(q, dofr, z))
cllb = md - z * sdmd
club = md + z * sdmd
infov(opts, '{:.1f}% confidence interval for difference: [{:3f} .. {:3f}]'.format(100.*cl, cllb, club))
crosses_zero = cllb < 0 < club
significant = not crosses_zero
infov(opts, 'crosses zero: {}'.format(crosses_zero))
infov(opts, 'reject the null hypothesis: {}'.format(significant))
# Report the result.
clp = cl * 100.
if significant:
per = 100. * abs(md) / ma
infov(opts, 'percentage: {}'.format(per))
if club < 0:
print('With {:.1f}% confidence, dataset-2 is larger than dataset-1 by about {:,.1f}%.'.format(clp, per))
else:
print('With {:.1f}% confidence, dataset-2 is smaller than dataset-1 by about {:,.1f}%.'.format(clp, per))
else:
print('With {:.1f}% confidence, there is no significant difference between the datasets.'.format(clp))
# ================================================================
#
# Options
#
# ================================================================
def getopts():
'''
Get the command line options using argparse.
'''
# Make sure that the confidence level is in the proper range.
def get_conf_level():
class GetConfLevel(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if 0. < values < 1.0:
setattr(args, self.dest, values)
else:
msg = 'argument "{}" out of range (0..1)'.format(self.dest)
parser.error(msg)
return GetConfLevel
# Trick to capitalize the built-in headers.
# Unfortunately I can't get rid of the ":" reliably.
def gettext(s):
lookup = {
'usage: ': 'USAGE:',
'positional arguments': 'POSITIONAL ARGUMENTS',
'optional arguments': 'OPTIONAL ARGUMENTS',
'show this help message and exit': 'Show this help message and exit.\n ',
}
return lookup.get(s, s)
argparse._ = gettext # to capitalize help headers
base = os.path.basename(sys.argv[0])
name = os.path.splitext(base)[0]
usage = '\n {0} [OPTIONS] <DATASET-1> [<DATASET-2>]'.format(base)
desc = 'DESCRIPTION:{0}'.format('\n '.join(__doc__.split('\n')))
epilog = r'''
EXAMPLES:
# Example 1: help
$ {0} -h
# Example 2: No significant difference with 95% confidence.
# The dataset is used.
$ ./gends.py 10 100 120 > ds-10-100-120.txt
$ {0} ds-10-100-120.txt ds-10-100-120.txt
With 95.0% confidence, there is no significant difference between the datasets.
# Example 3: Dataset-2 is slightly smaller (has faster runtime) with 95% confidence.
# Both runs have 50 samples.
# The data is specifically generated to show the difference.
$ ./gends.py 50 110 112 > ds-50-110-112.txt
$ ./gends.py 50 108 112 > ds-50-108-112.txt
$ {0} ds-50-110-112.txt ds-50-108-112.txt
With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 0.8%.
# Example 4: Dataset-2 is slightly smaller (has faster runtime) with 99% confidence.
# Both runs have 50 samples.
$ {0} ds-50-110-112.txt ds-50-108-112.txt
With 99.0% confidence, dataset-2 is smaller than dataset-1 by about 0.8%.
# Example 5: Dataset-1 and dataset-2 are in the same file.
$ cat data.txt
# v1.1 v1.2
# ======= =======
1 119.041 117.038
2 119.670 119.733
3 120.675 118.346
4 118.628 117.261
5 120.363 118.863
6 118.076 117.545
7 120.539 119.751
8 118.880 119.042
9 120.164 116.203
10 119.134 118.049
$ {0} --cols 2 3 data.txt
With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.1%.
'''.format(base)
afc = argparse.RawTextHelpFormatter
parser = argparse.ArgumentParser(formatter_class=afc,
description=desc[:-2],
usage=usage,
epilog=epilog)
parser.add_argument('-c', '--conf',
type=float,
default=0.95,
action=get_conf_level(),
metavar=('FLOAT'),
help='''The confidence level such that 0 < c < 1.
The default is %(default)s.
''')
parser.add_argument('--internal',
type=float,
nargs=4,
default=[0.00001, -3.4, 3.4, 10000],
metavar=('TOLERANCE', 'LOWER', 'UPPER', 'INTERVALS'),
help='''Factors used for internal computations.
You should never need to change these.
Defaults: %(default)s.
''')
parser.add_argument('-k', '--cols',
nargs=2,
type=int,
default=[1,1],
metavar=('COL1', 'COL2'),
help='''The columns that define each dataset.
The first column is for the first dataset.
The second column is for the second dataset.
If the value in the column is not a floating point
number it is ignored.
The default is column 1 for both datasets.
''')
parser.add_argument('-s', '--snd-threshold',
type=int,
default=32,
metavar=('UINT'),
help='''The standard normal distribution (SND) threshold.
When the number of effective degrees of freedom (DOF)
exceeds this threshold, the SND is used instead of a
t-distribution.
The default is %(default)s.
''')
parser.add_argument('-v', '--verbose',
action='count',
default=0,
help='''Increase the level of verbosity.
Specify -v to see the values that make up the computation.
Specify -v -v to internal details about the z value lookup and
values that were discarded during file reads.
''')
parser.add_argument('-V', '--version',
action='version',
version='%(prog)s v{0}'.format(VERSION),
help="""Show program's version number and exit.
""")
# Positional arguments at the end.
parser.add_argument('FILES',
nargs='+',
help='''The files with the run time data.
The data must be organized in columns with one entry per line.
Non-numeric data is ignored which allows you to add comments
and blank spaces.
You can see the ignored data in verbose mode.
If only one file is specified, is used for both datasets.
''')
opts = parser.parse_args()
if opts.cols[0] < 1:
parser.error('column 1 must be greater then 0')
if opts.cols[1] < 1:
parser.error('column 1 must be greater then 0')
if len(opts.FILES) > 2:
parser.error('only 1 or 2 files may be specified')
if opts.snd_threshold < 30:
parser.error('it does not make sense to use SND for {} elements'.format(opts.snd_threshold))
return opts
# ================================================================
#
# Read file data.
#
# ================================================================
def read_file(opts, fn, col):
'''
Read column data from the file.
'''
ds = []
try:
with open(fn, 'r') as ifp:
ln = 0
for line in ifp.readlines():
ln += 1
line = line.strip()
tokens = line.split()
if len(tokens) < col:
continue
token = tokens[col-1]
try:
f = float(token)
if f < 0.0001: # avoid divide by 0 errors
if opts.verbose > 1:
info('skipping line {} in {}: number is too small {}'.format(ln, fn, token))
continue
ds.append(f)
except ValueError:
if opts.verbose > 1:
info('skipping line {} in {}: not a number: {}'.format(ln, fn, token))
continue
except IOError:
err('could not read file: {}'.format(fn))
if len(ds) < 3:
err('too few data points at column {}, found {}, need at least 3 in file: {}'.format(col, len(ds), fn))
return ds
# ================================================================
#
# Main
#
# ================================================================
def main():
opts = getopts()
af = opts.FILES[0]
bf = opts.FILES[1] if len(opts.FILES) == 2 else af
ac = opts.cols[0]
bc = opts.cols[1]
infov(opts, 'dataset-1 file: {}'.format(af))
infov(opts, 'dataset-2 file: {}'.format(bf))
infov(opts, 'dataset-1 col: {}'.format(ac))
infov(opts, 'dataset-2 col: {}'.format(bc))
a = read_file(opts, af, ac)
b = read_file(opts, bf, bc)
ttest(a, b, opts)
if __name__ == '__main__':
main()
| 2.75 | 3 |
test/test_extension.py | sekikawattt/mkdocs-linkpatcher-plugin | 0 | 12790287 | <filename>test/test_extension.py
# coding: utf-8
from __future__ import unicode_literals
import re
import unittest
import markdown
from markdown.util import etree
from mkdocs import config, nav
import linkpatcher.plugin as plugin
from linkpatcher.extension import (LinkPatcherTreeProcessor,
LinkPathcerInlineProcessor)
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
class LinkPatcherExtensionTestBase(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.tree_processor = LinkPatcherTreeProcessor()
self.conf = config.Config(schema=config.DEFAULT_SCHEMA)
self.conf.load_dict({
"pages": [{
'Home': 'index.md'
}, {
'testpage': 'nest/nest.md'
}]
})
self.site_navigation = nav.SiteNavigation(self.conf)
self.site_navigation.file_context.set_current_path('nest/nest.md')
class TestMakeAnchor(LinkPatcherExtensionTestBase):
def setUp(self):
super(TestMakeAnchor, self).setUp()
self.tree_processor.db_value_map = {
"text1": "/test1.html",
"text2": "/test2.html",
"text3": "/test3.html"
}
self.tree_processor.db_keys_re = re.compile("|".join(
self.tree_processor.db_value_map.keys()))
def dotest(self, current_url, link_to, text):
self.site_navigation.url_context.set_current_url(current_url)
self.tree_processor = LinkPatcherTreeProcessor()
self.tree_processor.db_value_map = {}
self.tree_processor.db_value_map[text] = link_to
plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals(
page={}, site_navigation=self.site_navigation)
return self.tree_processor.make_anchor(text)
def test_make_anchor(self):
test_patterns = [{
"expectation":
"""<a class="linkpatcher_link" href="../../index.html#linkpatcher_test">test</a>""",
"params": {
"current_url": "/nest/nest/index.html",
"link_to": '/index.html#linkpatcher_test',
"text": "test"
}
}, {
"expectation":
"""<a class="linkpatcher_link" href="./index.html#linkpatcher_test">あいうえお</a>""",
"params": {
"current_url": "/index.html",
"link_to": '/index.html#linkpatcher_test',
"text": "あいうえお"
}
}]
for pattern in test_patterns:
test_result = self.dotest(**pattern["params"])
try:
self.assertEqual(
etree.tostring(test_result, encoding='unicode'),
pattern['expectation'])
except LookupError:
self.assertEqual(
etree.tostring(test_result, encoding='utf-8'),
pattern['expectation'].encode('utf-8'))
class TestNewElemFromText(LinkPatcherExtensionTestBase):
def setUp(self):
super(TestNewElemFromText, self).setUp()
self.tree_processor.db_value_map = {
"text1": "/test1.html",
"text2": "/test2.html",
"text3": "/test3.html"
}
self.tree_processor.db_keys_re = re.compile("|".join(
self.tree_processor.db_value_map.keys()))
def dotest(self, text):
plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals(
page={}, site_navigation=self.site_navigation)
return self.tree_processor.newelem_from_text(text)
def test_newelem_from_text(self):
text_patterns = [{
"param": "",
"expectation": ["", []]
}, {
"param": "abcabc acd",
"expectation": ["abcabc acd", []]
}, {
"param":
"abcabc text1 acd text2 text3",
"expectation": [
"abcabc ", [
"""<a class="linkpatcher_link" href="./test1.html">text1</a> acd """,
"""<a class="linkpatcher_link" href="./test2.html">text2</a> """,
"""<a class="linkpatcher_link" href="./test3.html">text3</a>"""
]
]
}]
for pattern in text_patterns:
text, elems = self.dotest(pattern["param"])
expectation = pattern["expectation"]
self.assertEqual(text, expectation[0])
self.assertEqual(
list(map(lambda e: etree.tostring(e).decode(), elems)),
expectation[1])
class TestInsertPatchedLink(LinkPatcherExtensionTestBase):
def setUp(self):
super(TestInsertPatchedLink, self).setUp()
self.tree_processor.db_value_map = {
"text1": "/test1.html",
"text2": "/test2.html",
"text3": "/test3.html"
}
self.tree_processor.db_keys_re = re.compile("|".join(
self.tree_processor.db_value_map.keys()))
self.site_navigation.url_context.set_current_url("/index.html")
plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals(
page={}, site_navigation=self.site_navigation)
def test_insert_patchedlink(self):
elemstr = """<p id="0">
<p id="1">
text1 0123
text2 4567
text3 8901
<a id="2">text2</a>
text3 9999
</p>
text1 abcd
text2 efgh
text3 hijk
</p>"""
expectation = """<p id="0">
<p id="1">
<a class="linkpatcher_link" href="./test1.html">text1</a> 0123
<a class="linkpatcher_link" href="./test2.html">text2</a> 4567
<a class="linkpatcher_link" href="./test3.html">text3</a> 8901
<a id="2">text2</a>
<a class="linkpatcher_link" href="./test3.html">text3</a> 9999
</p>
<a class="linkpatcher_link" href="./test1.html">text1</a> abcd
<a class="linkpatcher_link" href="./test2.html">text2</a> efgh
<a class="linkpatcher_link" href="./test3.html">text3</a> hijk
</p>"""
elem = etree.fromstring(elemstr)
self.tree_processor.insert_patchedlink(elem)
self.assertEqual(etree.tostring(elem).decode(), expectation)
class TestRun(LinkPatcherExtensionTestBase):
def setUp(self):
super(TestRun, self).setUp()
self.site_navigation.url_context.set_current_url("/index.html")
plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals(
page={}, site_navigation=self.site_navigation)
plugin.TABLE.insert_multiple([{
"text": "text1",
"link": "/test1.html"
}, {
"text": "text2",
"link": "/test2.html"
}, {
"text": "text3",
"link": "/test3.html"
}])
def test_run(self):
elemstr = """<p>
<p>text1 text2text2
text2
text3.</p>
</p>"""
expectation = """<p>
<p><a class="linkpatcher_link" href="./test1.html">text1</a> text2text2
<a class="linkpatcher_link" href="./test2.html">text2</a>
<a class="linkpatcher_link" href="./test3.html">text3</a>.</p>
</p>"""
elem = etree.fromstring(elemstr)
self.tree_processor.run(elem)
self.assertEqual(
etree.tostring(elem, encoding='utf-8').decode(), expectation)
class LinkPatcherExtensionTest(markdown.Extension):
def extendMarkdown(self, md, md_globals):
md.inlinePatterns['linkpatcher'] = LinkPathcerInlineProcessor(
LinkPathcerInlineProcessor.pattern)
class TestHandleMatch(LinkPatcherExtensionTestBase):
def setUp(self):
super(TestHandleMatch, self).setUp()
self.inline_processor = LinkPathcerInlineProcessor(
LinkPathcerInlineProcessor.pattern)
self.md = markdown.Markdown(extensions=[LinkPatcherExtensionTest()])
page = nav.Page(
title='', path='', url_context=nav.URLContext(), config=self.conf)
plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals(
page=page, site_navigation=self.site_navigation)
def test_handleMatch(self):
test_patterns = [{
"param": ": あ",
"expectation": {
"element": ": あ",
"db": []
}
}, {
"param": ":: あ",
"expectation": {
"element":
"""
<h2 class="linkpatcher" id="linkpatcher_%E3%81%82">あ</h2>
""",
"db": [{
'text': 'あ',
'link': '//#linkpatcher_%s' % quote("あ".encode('utf-8'))
}]
}
}, {
"param": "::: あ",
"expectation": {
"element":
"""
<h3 class="linkpatcher" id="linkpatcher_%E3%81%82">あ</h3>
""",
"db": [{
'text': 'あ',
'link': '//#linkpatcher_%s' % quote("あ".encode('utf-8'))
}]
}
}, {
"param": ":::: あ",
"expectation": {
"element":
"""
<h4 class="linkpatcher" id="linkpatcher_%E3%81%82">あ</h4>
""",
"db": [{
'text': 'あ',
'link': '//#linkpatcher_%s' % quote("あ".encode('utf-8'))
}]
}
}, {
"param": "::::: あ",
"expectation": {
"element":
"""
<h5 class="linkpatcher" id="linkpatcher_%E3%81%82">あ</h5>
""",
"db": [{
'text': 'あ',
'link': '//#linkpatcher_%s' % quote("あ".encode('utf-8'))
}]
}
}, {
"param": ":::::: あ",
"expectation": {
"element":
"""
<h6 class="linkpatcher" id="linkpatcher_%E3%81%82">あ</h6>
""",
"db": [{
'text': 'あ',
'link': '//#linkpatcher_%s' % quote("あ".encode('utf-8'))
}]
}
}, {
"param": ":: !あ",
"expectation": {
"element":
"""
<h2 class="linkpatcher" id="linkpatcher_%E3%81%82">あ</h2>
""",
"db": []
}
}, {
"param": ":: あ, い",
"expectation": {
"element":
"""
<h2 class="linkpatcher" id="linkpatcher_%E3%81%82">あ</h2>
""",
"db": [{
'text': 'あ',
'link': '//#linkpatcher_%s' % quote("あ".encode('utf-8'))
}, {
'text': 'い',
'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8'))
}]
},
}, {
"param": ":: !あ, い",
"expectation": {
"element":
"""
<h2 class="linkpatcher" id="linkpatcher_%E3%81%82">あ</h2>
""",
"db": [{
'text': 'い',
'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8'))
}]
},
}]
for test_pattern in test_patterns:
plugin.TABLE.purge()
result = self.md.convert(test_pattern["param"])
self.assertEqual(
result,
"""<p>%s</p>""" % test_pattern["expectation"]['element'])
self.assertEqual(plugin.TABLE.all(),
test_pattern["expectation"]['db'])
if __name__ == '__main__':
unittest.main()
| 2.28125 | 2 |
Extra Exercises/hello.py | luizpavanello/python_udacity | 0 | 12790288 | # This program says hello and asks for my name
print('Hello, world!')
print('What is your name? ') # ask for their name
myName = input()
print(f'It is good to meet you, {myName}!')
print(f'The length of your name is: {len(myName)}')
print('What is your age?') #ask for their age
myAge = input()
print('You will be ' + str(int(myAge) + 1) + ' in a year.')
| 4.0625 | 4 |
User/tests/test_selenium.py | LukaszHoszowski/Django_ProEstate | 1 | 12790289 | <reponame>LukaszHoszowski/Django_ProEstate<filename>User/tests/test_selenium.py
import os
import time
import pytest
@pytest.mark.usefixtures('driver_init')
class TestUrlChrome:
def test_open_url(self, live_server):
self.driver.get(f'{live_server.url}/admin/')
assert 'Zaloguj się | Administracja stroną Django' in self.driver.title
def take_screenshot(driver, name):
time.sleep(1)
os.makedirs(os.path.join('screenshot', os.path.dirname(name)), exist_ok=True)
driver.save_screenshot(os.path.join('screenshot', name))
@pytest.mark.usefixtures('driver_init')
class Screenshot:
def screenshot_admin(self, live_server):
self.driver.get(f'{live_server.url}/admin/')
take_screenshot(self.driver, f'admin/admin_{self.browser}.png')
assert 'Zaloguj się | Administracja stroną Django' in self.driver.title
| 2.1875 | 2 |
src/freetype/src/tools/chktrcmp.py | fenollp/wex | 39 | 12790290 | #!/usr/bin/env python
#
# Check trace components in FreeType 2 source.
# Author: <NAME>, 2009, 2013
#
# This code is explicitly into the public domain.
import sys
import os
import re
SRC_FILE_LIST = []
USED_COMPONENT = {}
KNOWN_COMPONENT = {}
SRC_FILE_DIRS = ["src"]
TRACE_DEF_FILES = ["include/freetype/internal/fttrace.h"]
# --------------------------------------------------------------
# Parse command line options
#
for i in range(1, len(sys.argv)):
if sys.argv[i].startswith("--help"):
print
"Usage: %s [option]" % sys.argv[0]
print
"Search used-but-defined and defined-but-not-used trace_XXX macros"
print
""
print
" --help:"
print
" Show this help"
print
""
print
" --src-dirs=dir1:dir2:..."
print
" Specify the directories of C source files to be checked"
print
" Default is %s" % ":".join(SRC_FILE_DIRS)
print
""
print
" --def-files=file1:file2:..."
print
" Specify the header files including FT_TRACE_DEF()"
print
" Default is %s" % ":".join(TRACE_DEF_FILES)
print
""
exit(0)
if sys.argv[i].startswith("--src-dirs="):
SRC_FILE_DIRS = sys.argv[i].replace("--src-dirs=", "", 1).split(":")
elif sys.argv[i].startswith("--def-files="):
TRACE_DEF_FILES = sys.argv[i].replace("--def-files=", "", 1).split(":")
# --------------------------------------------------------------
# Scan C source and header files using trace macros.
#
c_pathname_pat = re.compile('^.*\.[ch]$', re.IGNORECASE)
trace_use_pat = re.compile('^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_')
for d in SRC_FILE_DIRS:
for (p, dlst, flst) in os.walk(d):
for f in flst:
if c_pathname_pat.match(f) != None:
src_pathname = os.path.join(p, f)
line_num = 0
for src_line in open(src_pathname, 'r'):
line_num = line_num + 1
src_line = src_line.strip()
if trace_use_pat.match(src_line) != None:
component_name = trace_use_pat.sub('', src_line)
if component_name in USED_COMPONENT:
USED_COMPONENT[component_name].append("%s:%d" % (src_pathname, line_num))
else:
USED_COMPONENT[component_name] = ["%s:%d" % (src_pathname, line_num)]
# --------------------------------------------------------------
# Scan header file(s) defining trace macros.
#
trace_def_pat_opn = re.compile('^.*FT_TRACE_DEF[ \t]*\([ \t]*')
trace_def_pat_cls = re.compile('[ \t\)].*$')
for f in TRACE_DEF_FILES:
line_num = 0
for hdr_line in open(f, 'r'):
line_num = line_num + 1
hdr_line = hdr_line.strip()
if trace_def_pat_opn.match(hdr_line) != None:
component_name = trace_def_pat_opn.sub('', hdr_line)
component_name = trace_def_pat_cls.sub('', component_name)
if component_name in KNOWN_COMPONENT:
print
"trace component %s is defined twice, see %s and fttrace.h:%d" % \
(component_name, KNOWN_COMPONENT[component_name], line_num)
else:
KNOWN_COMPONENT[component_name] = "%s:%d" % \
(os.path.basename(f), line_num)
# --------------------------------------------------------------
# Compare the used and defined trace macros.
#
print
"# Trace component used in the implementations but not defined in fttrace.h."
cmpnt = USED_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in KNOWN_COMPONENT:
print
"Trace component %s (used in %s) is not defined." % (c, ", ".join(USED_COMPONENT[c]))
print
"# Trace component is defined but not used in the implementations."
cmpnt = KNOWN_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in USED_COMPONENT:
if c != "any":
print
"Trace component %s (defined in %s) is not used." % (c, KNOWN_COMPONENT[c])
| 2.234375 | 2 |
pysm/semantic_modeling/assembling/autolabel/maxf1.py | binh-vu/semantic-modeling | 3 | 12790291 | <reponame>binh-vu/semantic-modeling<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
from typing import Dict, Tuple, List, Union, Optional, Set
from data_structure import Graph, GraphLink
from experiments.evaluation_metrics import DataNodeMode
from semantic_modeling.assembling.autolabel.align_graph import align_graph
# from semantic_modeling.assembling.undirected_graphical_model.model_core import numbering_link_labels
# from semantic_modeling.assembling.undirected_graphical_model.model_extra import get_numbered_link_label
def get_gold_triples(gold_sm: Graph, is_blurring_label: bool) -> Set[Tuple[int, bytes, Union[bytes, int]]]:
gold_triples = set()
for node in gold_sm.iter_class_nodes():
outgoing_links: List[GraphLink] = list(node.iter_outgoing_links())
numbered_links = numbering_link_labels(outgoing_links)
for link in outgoing_links:
dest_node = link.get_target_node()
if dest_node.is_class_node():
dest_label = link.target_id
else:
dest_label = get_numbered_link_label(
"DATA_NODE", numbered_links[link.id]) if is_blurring_label else dest_node.label
triple = (link.source_id, link.label, dest_label)
gold_triples.add(triple)
return gold_triples
def max_f1(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int, bytes, Union[bytes, int]]]):
alignment = align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH)
bijection = alignment['_bijections'][0]
link2label = {}
# build example from this candidate model
for node in pred_sm.iter_class_nodes():
outgoing_links = list(node.iter_outgoing_links())
numbered_links = numbering_link_labels(outgoing_links)
for link in outgoing_links:
dest_node = link.get_target_node()
if dest_node.is_class_node():
dest_label = bijection.prime2x[link.target_id]
else:
dest_label = get_numbered_link_label(
"DATA_NODE", numbered_links[link.id]) if is_blurring_label else dest_node.label
triple = (bijection.prime2x[link.source_id], link.label, dest_label)
link2label[link.id] = triple in gold_triples
return link2label, bijection.prime2x, alignment['f1']
def max_f1_no_ambiguous(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int, bytes, Union[bytes, int]]]):
alignment = align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH)
if len(alignment['_bijections']) != 1:
return None, None, None
bijection = alignment['_bijections'][0]
link2label = {}
# build example from this candidate model
for node in pred_sm.iter_class_nodes():
outgoing_links = list(node.iter_outgoing_links())
numbered_links = numbering_link_labels(outgoing_links)
for link in outgoing_links:
dest_node = link.get_target_node()
if dest_node.is_class_node():
dest_label = bijection.prime2x[link.target_id]
else:
dest_label = get_numbered_link_label(
"DATA_NODE", numbered_links[link.id]) if is_blurring_label else dest_node.label
triple = (bijection.prime2x[link.source_id], link.label, dest_label)
link2label[link.id] = triple in gold_triples
return link2label, bijection.prime2x, alignment['f1']
# Copied from model_core and model_extra
def numbering_link_labels(links: List[GraphLink]) -> Dict[int, int]:
accum_numbered_links = {}
numbered_links = {}
for l in links:
if l.label not in accum_numbered_links:
accum_numbered_links[l.label] = 1
else:
accum_numbered_links[l.label] += 1
for l in links:
numbered_links[l.id] = accum_numbered_links[l.label]
accum_numbered_links[l.label] -= 1
return numbered_links
def get_numbered_link_label(link_label: str, number: int) -> str:
"""Number a link"""
return "%s:_%d" % (link_label, number) | 2.265625 | 2 |
tools/data_loader.py | ZhengjieFANG/SalienGAN | 0 | 12790292 | import os
import torch
import utils
import torchvision.transforms as T
from torch.utils import data
from PIL import Image
#定义自己的数据集合
class MyDataSet(data.Dataset):
def __init__(self,root,transform):
# 所有图片的绝对路径
imgs=os.listdir(root)
self.imgs=[os.path.join(root,k) for k in imgs]
self.transform=transform
def __getitem__(self, index):
img_path = self.imgs[index]
# 1. Load the image
pil_img = Image.open(img_path)
# 2. Resize and normalize the images using torchvision.
img = self.transform(pil_img)
return img
def __len__(self):
return len(self.imgs)
#定义自己的数据集合
class DataSetWithSalieny(data.Dataset):
def __init__(self,root,saliency_root,transform,transform_saliency):
# 所有图片的绝对路径
self.imgs=os.listdir(root)
self.imgs=[os.path.join(root,k) for k in self.imgs]
self.root = root
self.saliency_root = saliency_root
self.transform=transform
self.transform_saliency = transform_saliency
def __getitem__(self, index):
img_path = self.imgs[index]
img_name = self.imgs[index].split('/')[-1]
saliency_name = img_name.split('.')[0]+".png"
saliency_path = os.path.join(self.saliency_root, saliency_name)
# 1. Load the image
pil_img = Image.open(img_path)
pil_saliency = Image.open(saliency_path)
# 2. Resize and normalize the images using torchvision.
img = self.transform(pil_img)
saliency_1channel = self.transform_saliency(pil_saliency)
saliency = utils.get_saleincy_2channel(saliency_1channel)
return img, saliency
def __len__(self):
return len(self.imgs)
def get_saliency_dataloader(image_dir,saliency_dir, img_size, batch_size):
compose = [
T.Resize((img_size[0], img_size[1])),
T.ToTensor(), #转到[0,1]
T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1]
]
transform = T.Compose(compose)
compose_saliency = [
T.Resize((img_size[0], img_size[1])),
T.Grayscale(num_output_channels=1),
T.ToTensor(), #转到[0,1]
]
transform_saliency = T.Compose(compose_saliency)
dataset = DataSetWithSalieny(image_dir,saliency_dir,transform,transform_saliency)
dataloader = iter(torch.utils.data.DataLoader(dataset,
batch_size,
num_workers = 1))
return dataloader #返回的是一个dataloader的迭代器
def get_gray_dataloader(image_dir, img_size, batch_size):
compose = [
T.Resize((img_size[0], img_size[1])),
T.Grayscale(num_output_channels=3),
T.ToTensor(),
T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1]
]
transform = T.Compose(compose)
dataset = MyDataSet(image_dir,transform)
dataloader = iter(torch.utils.data.DataLoader(dataset,
batch_size,
num_workers = 1))
return dataloader #返回的是一个dataloader的迭代器
# if __name__ == '__main__':
# pil_saliency = Image.open(saliency_path)
# # 2. Resize and normalize the images using torchvision.
# img = self.transform(pil_img)
# saliency_1channel = self.transform_saliency(pil_saliency)
# saliency = get_saleincy_2channel(saliency_1channel)
# return img, saliency
| 2.890625 | 3 |
tests/types/test_nullable.py | rfloriano/preggy | 10 | 12790293 | # -*- coding: utf-8 -*-
# preggy assertions
# https://github.com/heynemann/preggy
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2013 <NAME> <EMAIL>
from preggy import expect
#-----------------------------------------------------------------------------
def test_to_be_null():
expect(None).to_be_null()
try:
expect(None).not_to_be_null()
except AssertionError:
return
assert False, 'Should not have gotten this far'
def test_not_to_be_null():
expect('something').Not.to_be_null()
expect('something').not_to_be_null()
try:
expect('something').to_be_null()
except AssertionError:
return
assert False, 'Should not have gotten this far'
| 2.484375 | 2 |
nodeClass.py | EdwardG5/tempCrossword | 0 | 12790294 | <reponame>EdwardG5/tempCrossword<filename>nodeClass.py<gh_stars>0
class Node:
def __init__(self, parent, letter, word):
self._parent = parent # Pointer to parent
self._letter = letter # Current letter
self.word = word # Bool (Yes or no)
# Pointers to other child nodes
self._pointers = [None for x in range(26)]
if parent: # Current depth (first letter is depth = 1)
self._depth = parent._depth+1
else:
self._depth = 0 # Root node
self._height = 0 # Longest path existing underneath this nodes
# Longest string on whose path node is on (includes the current node)
self._maxLength = self._depth
self._update_parent() # Update height and maxLength of parent
def __repr__(self):
return f"({self._letter},{self.word})"
def __iter__(self):
return iter(self._pointers)
# Raises exception/Returns None on failure
def __getitem__(self, i):
if isinstance(i, int):
return self._pointers[i]
elif isinstance(i, str):
return self._pointers[self._charToInt(i)]
else:
raise TypeError("Node indices must be int or char")
def __setitem__(self, i, value):
if isinstance(i, int):
self._pointers[i] = value
elif isinstance(i, str):
self._pointers[self._charToInt(i)] = value
else:
raise TypeError("Node indices must be int or char")
def __eq__(self, other):
# if other check asserts that other != None
if other:
attrs = ["_letter", "_depth", "word", "_height", "_maxLength"]
if all(getattr(self, attr) == getattr(other, attr) for attr in attrs):
return all(self[i] == other[i] for i in range(26))
return False
def _update_parent(self):
parent = self._parent
if parent:
if self._height+1 > parent._height:
parent._height = self._height+1
parent._update_max_length()
parent._update_parent()
def _update_max_length(self):
self._maxLength = self._depth+self._height
# Calculates an index from a corresponding char (e.g. 'a' = 0)
@staticmethod
def _charToInt(letter):
return ord(letter.lower())-97
# Return word represented by node.
def whichWord(self):
node = self
str = node._letter
while node._parent:
node = node._parent
str += node._letter
return str[::-1]
# Return max word length on node's path
def maxLength(self):
return self._maxLength
| 3.28125 | 3 |
tests/test_utils_dataset_info_is_valid.py | jic-dtool/dtool_lookup_server | 4 | 12790295 | """Test dtool_lookup_server.utils.dataset_info_is_valid helper function."""
# Minimum data required to register a dataset.
INFO = {
"uuid": "af6727bf-29c7-43dd-b42f-a5d7ede28337",
"type": "dataset",
"uri": "file:///tmp/a_dataset",
"name": "my-dataset",
"readme": {"description": "test dataset"},
"manifest": {
"dtoolcore_version": "3.7.0",
"hash_function": "md5sum_hexdigest",
"items": {}
},
"base_uri": "file:///tmp",
"creator_username": "olssont",
"frozen_at": 1536238185.881941,
"annotations": {"stars": 5},
"tags": ["empty", "dataset"],
}
def test_dataset_info_is_valid_returns_true_on_valid_info():
from dtool_lookup_server.utils import dataset_info_is_valid
info = INFO.copy()
assert dataset_info_is_valid(info)
def test_dataset_info_returns_false_when_key_data_is_missing():
from dtool_lookup_server.utils import dataset_info_is_valid
for key in INFO.keys():
info = INFO.copy()
del info[key]
assert not dataset_info_is_valid(info), key
def test_dataset_info_returns_false_when_type_is_not_dataset():
from dtool_lookup_server.utils import dataset_info_is_valid
info = INFO.copy()
info["type"] = "protodataset"
assert not dataset_info_is_valid(info)
def test_dataset_info_returns_false_if_uuid_looks_invalid():
from dtool_lookup_server.utils import dataset_info_is_valid
info = INFO.copy()
info["uuid"] = "af6727bf-29c7-43dd-b42f"
assert not dataset_info_is_valid(info)
def test_dataset_info_is_valid_returns_false_if_base_uri_ends_with_slash():
from dtool_lookup_server.utils import dataset_info_is_valid
info = INFO.copy()
info["base_uri"] = "file:///tmp/"
assert not dataset_info_is_valid(info)
| 2.296875 | 2 |
site_project/movement/admin.py | clockworkk/Personal-Stat-Tracker | 0 | 12790296 | from django.contrib import admin
from .models import Activity, Fitbit
class ActivityAdmin(admin.ModelAdmin):
fieldsets = [
('Date Information', {'fields': ['entry_date']}),
('Fitbit Data', {'fields': ['steps', 'distance'], 'classes' : ['collapse']}),
]
list_display = ('entry_date' , 'steps', 'distance')
class FitbitAdmin(admin.ModelAdmin):
fieldsets = [
('Date Information', {'fields': ['entry_date']}),
('Fitbit Data', {'fields': ['steps', 'distance', 'active_minutes', 'weight'], 'classes' : ['collapse']}),
]
list_display = ('entry_date', 'steps', 'distance', 'active_minutes', 'weight')
# Register your models here.
admin.site.register(Activity)
admin.site.register(Fitbit)
| 1.75 | 2 |
imdb_rating/dependencies/__init__.py | PeregHer/imdb-rating-predictions | 0 | 12790297 | from .models import Movie
from .spiders import IMDBSpider
__all__ = ["Movie", "IMDBSpider"]
| 1.15625 | 1 |
predict.py | KnightZhang625/TF_ESTIMATOR_STANDARD_PARADIGM | 1 | 12790298 | <gh_stars>1-10
# coding:utf-8
import numpy as np
import tensorflow as tf
from tensorflow.contrib import predictor
from pathlib import Path
from config import config as _cg
def predict():
# find the pb file
subdirs = [x for x in Path(_cg.infer_pb_path).iterdir()
if x.is_dir() and 'temp' not in str(x)]
latest_model = str(sorted(subdirs)[-1])
print(latest_model)
predict_fn = predictor.from_saved_model(latest_model)
return predict_fn
if __name__ == '__main__':
predict_fn = predict()
data = [[1, 2]] # although single data, however, must be batch format
data_batch = [[2, 3], [5, 6], [7, 8]]
features = {'input_x': np.array(data, dtype=np.float32)}
features_batch = {'input_x': np.array(data_batch, dtype=np.float32)}
predictions = predict_fn(features)['result_1']
predictions_batch = predict_fn(features_batch)['result_1']
print(predictions)
print(predictions_batch)
| 2.4375 | 2 |
solutions/two_sum.py | lishengfeng/leetcode | 0 | 12790299 | # Given an array of integers, return indices of the two numbers such that
# they add up to a specific target.
#
# You may assume that each input would have exactly one solution, and you
# may not use the same element twice.
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
possible_values = {}
for index, val in enumerate(nums):
if val in possible_values:
return [possible_values[val], index]
else:
possible_values[target-val] = index | 3.71875 | 4 |
External/astrometry.net/astrometry/python/pyfits/NA_pyfits.py | simonct/CoreAstro | 3 | 12790300 | <filename>External/astrometry.net/astrometry/python/pyfits/NA_pyfits.py<gh_stars>1-10
#!/usr/bin/env python
# $Id: NA_pyfits.py 329 2007-07-06 13:11:54Z jtaylor2 $
"""
A module for reading and writing FITS files and manipulating their contents.
A module for reading and writing Flexible Image Transport System
(FITS) files. This file format was endorsed by the International
Astronomical Union in 1999 and mandated by NASA as the standard format
for storing high energy astrophysics data. For details of the FITS
standard, see the NASA/Science Office of Standards and Technology
publication, NOST 100-2.0.
License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE
For detailed examples of usage, see the I{PyFITS User's Manual} available from
U{http://www.stsci.edu/resources/software_hardware/pyfits/Users_Manual1.pdf}
Epydoc markup used for all docstrings in this module.
@group Header-related Classes: Card, CardList, _Card_with_continue,
Header, _Hierarch
@group HDU Classes: _AllHDU, BinTableHDU, _CorruptedHDU, _ExtensionHDU,
GroupsHDU, ImageHDU, _ImageBaseHDU, PrimaryHDU, TableHDU,
_TableBaseHDU, _TempHDU, _ValidHDU
@group Table-related Classes: ColDefs, Column, FITS_rec, _FormatP,
_FormatX, _VLF
"""
"""
Do you mean: "Profits"?
- Google Search, when asked for "PyFITS"
"""
import re, os, tempfile, exceptions
import operator
import __builtin__
import urllib
import tempfile
import gzip
import zipfile
import numarray as num
import numarray.generic as ndarray
import numarray.strings as chararray
import numarray.records as rec
import numarray.objects as objects
import numarray.memmap as Memmap
from string import maketrans
import copy
import signal
import threading
# Module variables
_blockLen = 2880 # the FITS block size
_python_mode = {'readonly':'rb', 'copyonwrite':'rb', 'update':'rb+', 'append':'ab+'} # open modes
_memmap_mode = {'readonly':'r', 'copyonwrite':'c', 'update':'r+'}
TRUE = True # deprecated
FALSE = False # deprecated
_INDENT = " "
DELAYED = "delayed" # used for lazy instantiation of data
ASCIITNULL = 0 # value for ASCII table cell with value = TNULL
# this can be reset by user.
_isInt = "isinstance(val, (int, long))"
# Functions
def _padLength(stringLen):
"""Bytes needed to pad the input stringLen to the next FITS block."""
return (_blockLen - stringLen%_blockLen) % _blockLen
def _tmpName(input):
"""Create a temporary file name which should not already exist.
Use the directory of the input file and the base name of the mktemp()
output.
"""
dirName = os.path.dirname(input)
if dirName != '':
dirName += '/'
_name = dirName + os.path.basename(tempfile.mktemp())
if not os.path.exists(_name):
return _name
else:
raise _name, "exists"
class VerifyError(exceptions.Exception):
"""Verify exception class."""
pass
class _ErrList(list):
"""Verification errors list class. It has a nested list structure
constructed by error messages generated by verifications at different
class levels.
"""
def __init__(self, val, unit="Element"):
list.__init__(self, val)
self.unit = unit
def __str__(self, tab=0):
"""Print out nested structure with corresponding indentations.
A tricky use of __str__, since normally __str__ has only one
argument.
"""
result = ""
element = 0
# go through the list twice, first time print out all top level messages
for item in self:
if not isinstance(item, _ErrList):
result += _INDENT*tab+"%s\n" % item
# second time go through the next level items, each of the next level
# must present, even it has nothing.
for item in self:
if isinstance(item, _ErrList):
_dummy = item.__str__(tab=tab+1)
# print out a message only if there is something
if _dummy.strip():
if self.unit:
result += _INDENT*tab+"%s %s:\n" % (self.unit, element)
result += _dummy
element += 1
return result
class _Verify:
"""Shared methods for verification."""
def run_option(self, option="warn", err_text="", fix_text="Fixed.", fix = "pass", fixable=1):
"""Execute the verification with selected option."""
_text = err_text
if not fixable:
option = 'unfixable'
if option in ['warn', 'exception']:
#raise VerifyError, _text
#elif option == 'warn':
pass
# fix the value
elif option == 'unfixable':
_text = "Unfixable error: %s" % _text
else:
exec(fix)
#if option != 'silentfix':
_text += ' ' + fix_text
return _text
def verify (self, option='warn'):
"""Wrapper for _verify."""
_option = option.lower()
if _option not in ['fix', 'silentfix', 'ignore', 'warn', 'exception']:
raise ValueError, 'Option %s not recognized.' % option
if (_option == "ignore"):
return
x = str(self._verify(_option)).rstrip()
if _option in ['fix', 'silentfix'] and x.find('Unfixable') != -1:
raise VerifyError, '\n'+x
if (_option != "silentfix") and x:
print 'Output verification result:'
print x
if _option == 'exception' and x:
raise VerifyError
def _pad(input):
"""Pad balnk space to the input string to be multiple of 80."""
_len = len(input)
if _len == Card.length:
return input
elif _len > Card.length:
strlen = _len % Card.length
if strlen == 0:
return input
else:
return input + ' ' * (Card.length-strlen)
# minimum length is 80
else:
strlen = _len % Card.length
return input + ' ' * (Card.length-strlen)
def _floatFormat(value):
"""Format the floating number to make sure it gets the decimal point."""
valueStr = "%.16G" % value
if "." not in valueStr and "E" not in valueStr:
valueStr += ".0"
return valueStr
class Undefined:
"""Undefined value."""
pass
class Delayed:
"""Delayed file-reading data."""
def __init__(self, hdu=None, field=None):
self.hdu = hdu
self.field = field
# translation table for floating value string
_fix_table = maketrans('de', 'DE')
_fix_table2 = maketrans('dD', 'eE')
class Card(_Verify):
# string length of a card
length = 80
# String for a FITS standard compliant (FSC) keyword.
_keywd_FSC = r'[A-Z0-9_-]* *$'
_keywd_FSC_RE = re.compile(_keywd_FSC)
# A number sub-string, either an integer or a float in fixed or
# scientific notation. One for FSC and one for non-FSC (NFSC) format:
# NFSC allows lower case of DE for exponent, allows space between sign,
# digits, exponent sign, and exponents
_digits_FSC = r'(\.\d+|\d+(\.\d*)?)([DE][+-]?\d+)?'
_digits_NFSC = r'(\.\d+|\d+(\.\d*)?) *([deDE] *[+-]? *\d+)?'
_numr_FSC = r'[+-]?' + _digits_FSC
_numr_NFSC = r'[+-]? *' + _digits_NFSC
# This regex helps delete leading zeros from numbers, otherwise
# Python might evaluate them as octal values.
_number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*(?P<digt>' + _digits_FSC+')')
_number_NFSC_RE = re.compile(r'(?P<sign>[+-])? *0*(?P<digt>' + _digits_NFSC + ')')
# FSC commentary card string which must contain printable ASCII characters.
_ASCII_text = r'[ -~]*$'
_comment_FSC_RE = re.compile(_ASCII_text)
# Checks for a valid value/comment string. It returns a match object
# for a valid value/comment string.
# The valu group will return a match if a FITS string, boolean,
# number, or complex value is found, otherwise it will return
# None, meaning the keyword is undefined. The comment field will
# return a match if the comment separator is found, though the
# comment maybe an empty string.
_value_FSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
# The <strg> regex is not correct for all cases, but
# it comes pretty darn close. It appears to find the
# end of a string rather well, but will accept
# strings with an odd number of single quotes,
# instead of issuing an error. The FITS standard
# appears vague on this issue and only states that a
# string should not end with two single quotes,
# whereas it should not end with an even number of
# quotes to be precise.
#
# Note that a non-greedy match is done for a string,
# since a greedy match will find a single-quote after
# the comment separator resulting in an incorrect
# match.
r'\'(?P<strg>([ -~]+?|\'\'|)) *?\'(?=$|/| )|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_FSC + ')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_FSC + ') *, *(?P<imag>' + _numr_FSC + ') *\))'
r')? *)'
r'(?P<comm_field>'
r'(?P<sepr>/ *)'
r'(?P<comm>[!-~][ -~]*)?'
r')?$')
_value_NFSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
r'\'(?P<strg>([ -~]+?|\'\'|)) *?\'(?=$|/| )|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_NFSC + ')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_NFSC + ') *, *(?P<imag>' + _numr_NFSC + ') *\))'
r')? *)'
r'(?P<comm_field>'
r'(?P<sepr>/ *)'
r'(?P<comm>.*)'
r')?$')
# keys of commentary cards
_commentaryKeys = ['', 'COMMENT', 'HISTORY']
def __init__(self, key='', value='', comment=''):
"""Construct a card from key, value, and (optionally) comment.
Any specifed arguments, except defaults, must be compliant to
FITS standard.
key: keyword name, default=''.
value: keyword value, default=''.
comment: comment, default=''.
"""
if key != '' or value != '' or comment != '':
self._setkey(key)
self._setvalue(value)
self._setcomment(comment)
# for commentary cards, value can only be strings and there
# is no comment
if self.key in Card._commentaryKeys:
if not isinstance(self.value, str):
raise ValueError, 'Value in a commentary card must be a string'
else:
self.__dict__['_cardimage'] = ' '*80
def __repr__(self):
return self._cardimage
def __getattr__(self, name):
""" instanciate specified attribute object."""
if name == '_cardimage':
self.ascardimage()
elif name == 'key':
self._extractKey()
elif name in ['value', 'comment']:
self._extractValueComment(name)
else:
raise AttributeError, name
return getattr(self, name)
def _setkey(self, val):
"""Set the key attribute, surrogate for the __setattr__ key case."""
if isinstance(val, str):
val = val.strip()
if len(val) <= 8:
val = val.upper()
if val == 'END':
raise ValueError, "keyword 'END' not allowed"
self._checkKey(val)
else:
if val[:8].upper() == 'HIERARCH':
val = val[8:].strip()
self.__class__ = _Hierarch
else:
raise ValueError, 'keyword name %s is too long (> 8), use HIERARCH.' % val
else:
raise ValueError, 'keyword name %s is not a string' % val
self.__dict__['key'] = val
def _setvalue(self, val):
"""Set the value attribute."""
if isinstance(val, (str, int, long, float, complex, bool, Undefined)):
if isinstance(val, str):
self._checkText(val)
self.__dict__['_valueModified'] = 1
else:
raise ValueError, 'Illegal value %s' % str(val)
self.__dict__['value'] = val
def _setcomment(self, val):
"""Set the comment attribute."""
if isinstance(val,str):
self._checkText(val)
else:
if val is not None:
raise ValueError, 'comment %s is not a string' % val
self.__dict__['comment'] = val
def __setattr__(self, name, val):
if name == 'key':
raise SyntaxError, 'keyword name cannot be reset.'
elif name == 'value':
self._setvalue(val)
elif name == 'comment':
self._setcomment(val)
else:
raise AttributeError, name
# When an attribute (value or comment) is changed, will reconstructe
# the card image.
self._ascardimage()
def ascardimage(self, option='silentfix'):
"""Generate a (new) card image from the attributes: key, value,
and comment, or from raw string.
option: verification option, default=silentfix.
"""
# Only if the card image already exist (to avoid infinite loop),
# fix it first.
if self.__dict__.has_key('_cardimage'):
self._check(option)
self._ascardimage()
return self.__dict__['_cardimage']
def _ascardimage(self):
"""Generate a (new) card image from the attributes: key, value,
and comment. Core code for ascardimage.
"""
# keyword string
if self.__dict__.has_key('key') or self.__dict__.has_key('_cardimage'):
if isinstance(self, _Hierarch):
keyStr = 'HIERARCH %s ' % self.key
else:
keyStr = '%-8s' % self.key
else:
keyStr = ' '*8
# value string
# check if both value and _cardimage attributes are missing,
# to avoid infinite loops
if not (self.__dict__.has_key('value') or self.__dict__.has_key('_cardimage')):
valStr = ''
# string value should occupies at least 8 columns, unless it is
# a null string
elif isinstance(self.value, str):
if self.value == '':
valStr = "''"
else:
_expValStr = self.value.replace("'","''")
valStr = "'%-8s'" % _expValStr
valStr = '%-20s' % valStr
# must be before int checking since bool is also int
elif isinstance(self.value , bool):
valStr = '%20s' % `self.value`[0]
elif isinstance(self.value , (int, long)):
valStr = '%20d' % self.value
# XXX need to consider platform dependence of the format (e.g. E-009 vs. E-09)
elif isinstance(self.value, float):
if self._valueModified:
valStr = '%20s' % _floatFormat(self.value)
else:
valStr = '%20s' % self._valuestring
elif isinstance(self.value, complex):
if self._valueModified:
_tmp = '(' + _floatFormat(self.value.real) + ', ' + _floatFormat(self.value.imag) + ')'
valStr = '%20s' % _tmp
else:
valStr = '%20s' % self._valuestring
elif isinstance(self.value, Undefined):
valStr = ''
# conserve space for HIERARCH cards
if isinstance(self, _Hierarch):
valStr = valStr.strip()
# comment string
if keyStr.strip() in Card._commentaryKeys: # do NOT use self.key
commentStr = ''
elif self.__dict__.has_key('comment') or self.__dict__.has_key('_cardimage'):
if self.comment in [None, '']:
commentStr = ''
else:
commentStr = ' / ' + self.comment
else:
commentStr = ''
# equal sign string
eqStr = '= '
if keyStr.strip() in Card._commentaryKeys: # not using self.key
eqStr = ''
if self.__dict__.has_key('value'):
valStr = str(self.value)
# put all parts together
output = keyStr + eqStr + valStr + commentStr
# need this in case card-with-continue's value is shortened
if not isinstance(self, _Hierarch):
self.__class__ = Card
else:
# does not support CONTINUE for HIERARCH
if len(keyStr + eqStr + valStr) > Card.length:
raise ValueError, "The keyword %s with its value is too long." % self.key
if len(output) <= Card.length:
output = "%-80s" % output
# longstring case (CONTINUE card)
else:
# try not to use CONTINUE if the string value can fit in one line.
# Instead, just truncate the comment
if isinstance(self.value, str) and len(valStr) > (Card.length-10):
self.__class__ = _Card_with_continue
output = self._breakup_strings()
else:
print 'card is too long, comment is truncated.'
output = output[:Card.length]
self.__dict__['_cardimage'] = output
def _checkText(self, val):
"""Verify val to be printable ASCII text."""
if Card._comment_FSC_RE.match(val) is None:
self.__dict__['_err_text'] = 'Unprintable string %s' % repr(val)
self.__dict__['_fixable'] = 0
raise ValueError, self._err_text
def _checkKey(self, val):
"""Verify the keyword to be FITS standard."""
# use repr (not str) in case of control character
if Card._keywd_FSC_RE.match(val) is None:
self.__dict__['_err_text'] = 'Illegal keyword name %s' % repr(val)
self.__dict__['_fixable'] = 0
raise ValueError, self._err_text
def _extractKey(self):
"""Returns the keyword name parsed from the card image."""
head = self._getKeyString()
if isinstance(self, _Hierarch):
self.__dict__['key'] = head.strip()
else:
self.__dict__['key'] = head.strip().upper()
def _extractValueComment(self, name):
"""Exatrct the keyword value or comment from the card image."""
# for commentary cards, no need to parse further
if self.key in Card._commentaryKeys:
self.__dict__['value'] = self._cardimage[8:].rstrip()
self.__dict__['comment'] = ''
return
valu = self._check(option='parse')
if name == 'value':
if valu is None:
raise ValueError, "Unparsable card, fix it first with .verify('fix')."
if valu.group('bool') != None:
_val = valu.group('bool')=='T'
elif valu.group('strg') != None:
_val = re.sub("''", "'", valu.group('strg'))
elif valu.group('numr') != None:
# Check for numbers with leading 0s.
numr = Card._number_NFSC_RE.match(valu.group('numr'))
_digt = numr.group('digt').translate(_fix_table2, ' ')
if numr.group('sign') == None:
_val = eval(_digt)
else:
_val = eval(numr.group('sign')+_digt)
elif valu.group('cplx') != None:
# Check for numbers with leading 0s.
real = Card._number_NFSC_RE.match(valu.group('real'))
_rdigt = real.group('digt').translate(_fix_table2, ' ')
if real.group('sign') == None:
_val = eval(_rdigt)
else:
_val = eval(real.group('sign')+_rdigt)
imag = Card._number_NFSC_RE.match(valu.group('imag'))
_idigt = imag.group('digt').translate(_fix_table2, ' ')
if imag.group('sign') == None:
_val += eval(_idigt)*1j
else:
_val += eval(imag.group('sign') + _idigt)*1j
else:
_val = UNDEFINED
self.__dict__['value'] = _val
if '_valuestring' not in self.__dict__:
self.__dict__['_valuestring'] = valu.group('valu')
if '_valueModified' not in self.__dict__:
self.__dict__['_valueModified'] = 0
elif name == 'comment':
self.__dict__['comment'] = ''
if valu is not None:
_comm = valu.group('comm')
if isinstance(_comm, str):
self.__dict__['comment'] = _comm.rstrip()
def _fixValue(self, input):
"""Fix the card image for fixable non-standard compliance."""
_valStr = None
# for the unparsable case
if input is None:
_tmp = self._getValueCommentString()
try:
slashLoc = _tmp.index("/")
self.__dict__['value'] = _tmp[:slashLoc].strip()
self.__dict__['comment'] = _tmp[slashLoc+1:].strip()
except:
self.__dict__['value'] = _tmp.strip()
elif input.group('numr') != None:
numr = Card._number_NFSC_RE.match(input.group('numr'))
_valStr = numr.group('digt').translate(_fix_table, ' ')
if numr.group('sign') is not None:
_valStr = numr.group('sign')+_valStr
elif input.group('cplx') != None:
real = Card._number_NFSC_RE.match(input.group('real'))
_realStr = real.group('digt').translate(_fix_table, ' ')
if real.group('sign') is not None:
_realStr = real.group('sign')+_realStr
imag = Card._number_NFSC_RE.match(input.group('imag'))
_imagStr = imag.group('digt').translate(_fix_table, ' ')
if imag.group('sign') is not None:
_imagStr = imag.group('sign') + _imagStr
_valStr = '(' + _realStr + ', ' + _imagStr + ')'
self.__dict__['_valuestring'] = _valStr
self._ascardimage()
def _locateEq(self):
"""Locate the equal sign in the card image before column 10 and
return its location. It returns None if equal sign is not present,
or it is a commentary card.
"""
# no equal sign for commentary cards (i.e. part of the string value)
_key = self._cardimage[:8].strip().upper()
if _key in Card._commentaryKeys:
eqLoc = None
else:
if _key == 'HIERARCH':
_limit = Card.length
else:
_limit = 10
try:
eqLoc = self._cardimage[:_limit].index("=")
except:
eqLoc = None
return eqLoc
def _getKeyString(self):
"""Locate the equal sign in the card image and return the string
before the equal sign. If there is no equal sign, return the
string before column 9.
"""
eqLoc = self._locateEq()
if eqLoc is None:
eqLoc = 8
_start = 0
if self._cardimage[:8].upper() == 'HIERARCH':
_start = 8
self.__class__ = _Hierarch
return self._cardimage[_start:eqLoc]
def _getValueCommentString(self):
"""Locate the equal sign in the card image and return the string
after the equal sign. If there is no equal sign, return the
string after column 8.
"""
eqLoc = self._locateEq()
if eqLoc is None:
eqLoc = 7
return self._cardimage[eqLoc+1:]
def _check(self, option='ignore'):
"""Verify the card image with the specified option. """
self.__dict__['_err_text'] = ''
self.__dict__['_fix_text'] = ''
self.__dict__['_fixable'] = 1
if option == 'ignore':
return
elif option == 'parse':
# check the value only, no need to check key and comment for 'parse'
result = Card._value_NFSC_RE.match(self._getValueCommentString())
# if not parsable (i.e. everything else) result = None
return result
else:
# verify the equal sign position
if self.key not in Card._commentaryKeys and self._cardimage.find('=') != 8:
if option in ['exception', 'warn']:
self.__dict__['_err_text'] = 'Card image is not FITS standard (equal sign not at column 8).'
raise ValueError, self._err_text, '\n%s' % self._cardimage
elif option in ['fix', 'silentfix']:
result = self._check('parse')
self._fixValue(result)
if option == 'fix':
self.__dict__['_fix_text'] = 'Fixed card to be FITS standard.: %s' % self.key
# verify the key, it is never fixable
# always fix silently the case where "=" is before column 9,
# since there is no way to communicate back to the _keylist.
self._checkKey(self.key)
# verify the value, it may be fixable
result = Card._value_FSC_RE.match(self._getValueCommentString())
if result is not None or self.key in Card._commentaryKeys:
return result
else:
if option in ['fix', 'silentfix']:
result = self._check('parse')
self._fixValue(result)
if option == 'fix':
self.__dict__['_fix_text'] = 'Fixed card to be FITS standard.: %s' % self.key
else:
self.__dict__['_err_text'] = 'Card image is not FITS standard (unparsable value string).'
raise ValueError, self._err_text + '\n%s' % self._cardimage
# verify the comment (string), it is never fixable
if result is not None:
_str = result.group('comm')
if _str is not None:
self._checkText(_str)
def fromstring(self, input):
"""Construct a Card object from a (raw) string. It will pad the
string if it is not the length of a card image (80 columns).
If the card image is longer than 80, assume it contains CONTINUE
card(s).
"""
self.__dict__['_cardimage'] = _pad(input)
if self._cardimage[:8].upper() == 'HIERARCH':
self.__class__ = _Hierarch
# for card image longer than 80, assume it contains CONTINUE card(s).
elif len(self._cardimage) > Card.length:
self.__class__ = _Card_with_continue
# remove the key/value/comment attributes, some of them may not exist
for name in ['key', 'value', 'comment', '_valueModified']:
if self.__dict__.has_key(name):
delattr(self, name)
return self
def _ncards(self):
return len(self._cardimage) / Card.length
def _verify(self, option='warn'):
"""Card class verification method."""
_err = _ErrList([])
try:
self._check(option)
except:
pass
_err.append(self.run_option(option, err_text=self._err_text, fix_text=self._fix_text, fixable=self._fixable))
return _err
class _Hierarch(Card):
"""Cards begins with HIERARCH which allows keyword name longer than 8
characters.
"""
def _verify(self, option='warn'):
"""No verification (for now)."""
return _ErrList([])
class _Card_with_continue(Card):
"""Cards having more than one 80-char "physical" cards, the cards after
the first one must start with CONTINUE and the whole card must have
string value.
"""
def __str__(self):
"""Format a list of cards into a printable string."""
kard = self._cardimage
output = ''
for i in range(len(kard)/80):
output += kard[i*80:(i+1)*80] + '\n'
return output[:-1]
def _extractValueComment(self, name):
"""Exatrct the keyword value or comment from the card image."""
longstring = ''
ncards = self._ncards()
for i in range(ncards):
# take each 80-char card as a regular card and use its methods.
_card = Card().fromstring(self._cardimage[i*80:(i+1)*80])
if i > 0 and _card.key != 'CONTINUE':
raise ValueError, 'Long card image must have CONTINUE cards after the first card.'
if not isinstance(_card.value, str):
raise ValueError, 'Cards with CONTINUE must have string value.'
if name == 'value':
_val = re.sub("''", "'", _card.value).rstrip()
# drop the ending "&"
if _val[-1] == '&':
_val = _val[:-1]
longstring = longstring + _val
elif name == 'comment':
_comm = _card.comment
if isinstance(_comm, str) and _comm != '':
longstring = longstring + _comm.rstrip() + ' '
self.__dict__[name] = longstring.rstrip()
def _breakup_strings(self):
"""Break up long string value/comment into CONTINUE cards.
This is a primitive implementation, it will put the value
string in one block and the comment string in another.
Also, it does not break at the blank space between words.
So it may not look pretty.
"""
val_len = 67
comm_len = 64
output = ''
# do the value string
valfmt = "'%-s&'"
val = self.value.replace("'", "''")
val_list = self._words_group(val, val_len)
for i in range(len(val_list)):
if i == 0:
headstr = "%-8s= " % self.key
else:
headstr = "CONTINUE "
valstr = valfmt % val_list[i]
output = output + '%-80s' % (headstr + valstr)
# do the comment string
if self.comment is None:
comm = ''
else:
comm = self.comment
commfmt = "%-s"
if not comm == '':
nlines = len(comm) / comm_len + 1
comm_list = self._words_group(comm, comm_len)
for i in comm_list:
commstr = "CONTINUE '&' / " + commfmt % i
output = output + '%-80s' % commstr
return output
def _words_group(self, input, strlen):
"""Split a long string into parts where each part is no longer than
strlen and no word is cut into two pieces. But if there is one
single word which is longer than strlen, then it will be split in
the middle of the word.
"""
list = []
_nblanks = input.count(' ')
nmax = max(_nblanks, len(input)/strlen+1)
arr = chararray.array(input+' ', itemsize=1)
# locations of the blanks
blank_loc = num.nonzero(arr == ' ')[0]
offset = 0
xoffset = 0
for i in range(nmax):
try:
loc = num.nonzero(blank_loc >= strlen+offset)[0][0]
offset = blank_loc[loc-1] + 1
if loc == 0:
offset = -1
except:
offset = len(input)
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = xoffset + strlen
# collect the pieces in a list
tmp = input[xoffset:offset]
list.append(tmp)
if len(input) == offset:
break
xoffset = offset
return list
class Header:
"""FITS header class."""
def __init__(self, cards=[]):
"""Construct a Header from a CardList.
cards: A list of Cards, default=[].
"""
# decide which kind of header it belongs to
try:
if cards[0].key == 'SIMPLE':
if 'GROUPS' in cards._keylist and cards['GROUPS'].value == True:
self._hdutype = GroupsHDU
elif cards[0].value == True:
self._hdutype = PrimaryHDU
else:
self._hdutype = _ValidHDU
elif cards[0].key == 'XTENSION':
xtension = cards[0].value.rstrip()
if xtension == 'TABLE':
self._hdutype = TableHDU
elif xtension == 'IMAGE':
self._hdutype = ImageHDU
elif xtension in ('BINTABLE', 'A3DTABLE'):
self._hdutype = BinTableHDU
else:
self._hdutype = _ExtensionHDU
else:
self._hdutype = _ValidHDU
except:
self._hdutype = _CorruptedHDU
# populate the cardlist
self.ascard = CardList(cards)
def __getitem__ (self, key):
"""Get a header keyword value."""
return self.ascard[key].value
def __setitem__ (self, key, value):
"""Set a header keyword value."""
self.ascard[key].value = value
self._mod = 1
def __delitem__(self, key):
"""Delete card(s) with the name 'key'."""
# delete ALL cards with the same keyword name
if isinstance(key, str):
while 1:
try:
del self.ascard[key]
self._mod = 1
except:
return
# for integer key only delete once
else:
del self.ascard[key]
self._mod = 1
def __str__(self):
return self.ascard.__str__()
def ascardlist(self):
"""Returns a CardList."""
return self.ascard
def items(self):
"""Return a list of all keyword-value pairs from the CardList."""
pairs = []
for card in self.ascard:
pairs.append((card.key, card.value))
return pairs
def has_key(self, key):
"""Check for existence of a keyword. Returns 1 if found, otherwise, 0.
key: keyword name. If given an index, always returns 0.
"""
try:
key = key.strip().upper()
if key[:8] == 'HIERARCH':
key = key[8:].strip()
_index = self.ascard._keylist.index(key)
return 1
except:
return 0
def rename_key(self, oldkey, newkey, force=0):
"""Rename a card's keyword in the header.
oldkey: old keyword, can be a name or index.
newkey: new keyword, must be a string.
force: if new key name already exist, force to have duplicate name.
"""
oldkey = oldkey.strip().upper()
newkey = newkey.strip().upper()
if newkey == 'CONTINUE':
raise ValueError, 'Can not rename to CONTINUE'
if newkey in Card._commentaryKeys or oldkey in Card._commentaryKeys:
if not (newkey in Card._commentaryKeys and oldkey in Card._commentaryKeys):
raise ValueError, 'Regular and commentary keys can not be renamed to each other.'
elif (force == 0) and (newkey in self.ascard._keylist):
raise ValueError, 'Intended keyword %s already exists in header.' % newkey
_index = self.ascard.index_of(oldkey)
_comment = self.ascard[_index].comment
_value = self.ascard[_index].value
self.ascard[_index] = Card(newkey, _value, _comment)
# self.ascard[_index].__dict__['key']=newkey
# self.ascard[_index].ascardimage()
# self.ascard._keylist[_index] = newkey
def get(self, key, default=None):
"""Get a keyword value from the CardList.
If no keyword is found, return the default value.
key: keyword name or index
default: if no keyword is found, the value to be returned.
"""
try:
return self[key]
except:
return default
def update(self, key, value, comment=None, before=None, after=None):
"""Update one header card."""
"""
If the keyword already exists, it's value/comment will be updated.
If it does not exist, a new card will be created and it will be
placed before or after the specified location. If no "before"
or "after" is specified, it will be appended at the end.
key: keyword name
value: keyword value (to be used for updating)
comment: keyword comment (to be used for updating), default=None.
before: name of the keyword, or index of the Card before which
the new card will be placed. The argument `before' takes
precedence over `after' if both specified. default=None.
after: name of the keyword, or index of the Card after which
the new card will be placed. default=None.
"""
if self.has_key(key):
j = self.ascard.index_of(key)
if comment is not None:
_comment = comment
else:
_comment = self.ascard[j].comment
self.ascard[j] = Card(key, value, _comment)
elif before != None or after != None:
_card = Card(key, value, comment)
self.ascard._pos_insert(_card, before=before, after=after)
else:
self.ascard.append(Card(key, value, comment))
self._mod = 1
def add_history(self, value, before=None, after=None):
"""Add a HISTORY card.
value: History text to be added.
before: [same as in update()]
after: [same as in update()]
"""
self._add_commentary('history', value, before=before, after=after)
def add_comment(self, value, before=None, after=None):
"""Add a COMMENT card.
value: Comment text to be added.
before: [same as in update()]
after: [same as in update()]
"""
self._add_commentary('comment', value, before=before, after=after)
def add_blank(self, value='', before=None, after=None):
"""Add a blank card.
value: Text to be added.
before: [same as in update()]
after: [same as in update()]
"""
self._add_commentary(' ', value, before=before, after=after)
def get_history(self):
"""Get all histories as a list of string texts."""
output = []
for _card in self.ascardlist():
if _card.key == 'HISTORY':
output.append(_card.value)
return output
def get_comment(self):
"""Get all comments as a list of string texts."""
output = []
for _card in self.ascardlist():
if _card.key == 'COMMENT':
output.append(_card.value)
return output
def _add_commentary(self, key, value, before=None, after=None):
"""Add a commentary card.
If before and after are None, add to the last occurrence of
cards of the same name (except blank card). If there is no card
(or blank card), append at the end.
"""
new_card = Card(key, value)
if before != None or after != None:
self.ascard._pos_insert(new_card, before=before, after=after)
else:
if key[0] == ' ':
useblanks = new_card._cardimage != ' '*80
self.ascard.append(new_card, useblanks=useblanks, bottom=1)
else:
try:
_last = self.ascard.index_of(key, backward=1)
self.ascard.insert(_last+1, new_card)
except:
self.ascard.append(new_card, bottom=1)
self._mod = 1
def copy(self):
"""Make a copy of the Header."""
tmp = Header(self.ascard.copy())
# also copy the class
tmp._hdutype = self._hdutype
return tmp
def _strip(self):
"""Strip cards specific to a certain kind of header.
Strip cards like SIMPLE, BITPIX, etc. so the rest of the header
can be used to reconstruct another kind of header.
"""
try:
# have both SIMPLE and XTENSION to accomodate Extension
# and Corrupted cases
del self['SIMPLE']
del self['XTENSION']
del self['BITPIX']
_naxis = self['NAXIS']
if issubclass(self._hdutype, _TableBaseHDU):
_tfields = self['TFIELDS']
del self['NAXIS']
for i in range(_naxis):
del self['NAXIS'+`i+1`]
if issubclass(self._hdutype, PrimaryHDU):
del self['EXTEND']
del self['PCOUNT']
del self['GCOUNT']
if issubclass(self._hdutype, PrimaryHDU):
del self['GROUPS']
if issubclass(self._hdutype, _ImageBaseHDU):
del self['BSCALE']
del self['BZERO']
if issubclass(self._hdutype, _TableBaseHDU):
del self['TFIELDS']
for name in ['TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE', 'TUNIT']:
for i in range(_tfields):
del self[name+`i+1`]
if issubclass(self._hdutype, BinTableHDU):
for name in ['TDISP', 'TDIM', 'THEAP']:
for i in range(_tfields):
del self[name+`i+1`]
if issubclass(self._hdutype == TableHDU):
for i in range(_tfields):
del self['TBCOL'+`i+1`]
except:
pass
class CardList(list):
"""FITS header card list class."""
def __init__(self, cards=[], keylist=None):
"""Construct the CardList object from a list of Cards.
cards: A list of Cards, default=[].
"""
list.__init__(self, cards)
self._cards = cards
# if the key list is not supplied (as in reading in the FITS file),
# it will be constructed from the card list.
if keylist is None:
self._keylist = [k.upper() for k in self.keys()]
else:
self._keylist = keylist
# find out how many blank cards are *directly* before the END card
self._blanks = 0
self.count_blanks()
def __getitem__(self, key):
"""Get a Card by indexing or by the keyword name."""
_key = self.index_of(key)
return super(CardList, self).__getitem__(_key)
def __getslice__(self, start, end):
_cards = super(CardList, self).__getslice__(start,end)
result = CardList(_cards, self._keylist[start:end])
return result
def __setitem__(self, key, value):
"""Set a Card by indexing or by the keyword name."""
if isinstance (value, Card):
_key = self.index_of(key)
# only set if the value is different from the old one
if str(self[_key]) != str(value):
super(CardList, self).__setitem__(_key, value)
self._keylist[_key] = value.key.upper()
self.count_blanks()
self._mod = 1
else:
raise SyntaxError, "%s is not a Card" % str(value)
def __delitem__(self, key):
"""Delete a Card from the CardList."""
_key = self.index_of(key)
super(CardList, self).__delitem__(_key)
del self._keylist[_key] # update the keylist
self.count_blanks()
self._mod = 1
def count_blanks(self):
"""Find out how many blank cards are *directly* before the END card."""
for i in range(1, len(self)):
if str(self[-i]) != ' '*Card.length:
self._blanks = i - 1
break
def append(self, card, useblanks=1, bottom=0):
"""Append a Card to the CardList.
card: The Card to be appended.
useblanks: Use any *extra* blank cards? default=1.
If useblanks != 0, and if there are blank cards directly
before END, it will use this space first, instead of
appending after these blank cards, so the total space
will not increase (default). When useblanks == 0, the
card will be appended at the end, even if there are
blank cards in front of END.
bottom: If =0 (default) the card will be appended after the last
non-commentary card. If =1, the card will be appended
after the last non-blank card.
"""
if isinstance (card, Card):
nc = len(self) - self._blanks
i = nc - 1
if not bottom:
for i in range(nc-1, -1, -1): # locate last non-commentary card
if self[i].key not in Card._commentaryKeys:
break
super(CardList, self).insert(i+1, card)
self._keylist.insert(i+1, card.key.upper())
if useblanks:
self._use_blanks(card._ncards())
self.count_blanks()
self._mod = 1
else:
raise SyntaxError, "%s is not a Card" % str(card)
def _pos_insert(self, card, before, after, useblanks=1):
"""Insert a Card to the location specified by before or after.
The argument `before' takes precedence over `after' if both
specified. They can be either a keyword name or index.
"""
if before != None:
loc = self.index_of(before)
self.insert(loc, card, useblanks=useblanks)
elif after != None:
loc = self.index_of(after)
self.insert(loc+1, card, useblanks=useblanks)
def insert(self, pos, card, useblanks=1):
"""Insert a Card to the CardList.
pos: The position (index, keyword name will not be allowed) to
insert. The new card will be inserted before it.
card: The Card to be inserted.
useblanks: Use any *extra* blank cards? default=1.
If useblanks != 0, and if there are blank cards directly
before END, it will use this space first, instead of
appending after these blank cards, so the total space
will not increase (default). When useblanks == 0, the
card will be appended at the end, even if there are
blank cards in front of END.
"""
if isinstance (card, Card):
super(CardList, self).insert(pos, card)
self._keylist.insert(pos, card.key) # update the keylist
self.count_blanks()
if useblanks:
self._use_blanks(card._ncards())
self.count_blanks()
self._mod = 1
else:
raise SyntaxError, "%s is not a Card" % str(card)
def _use_blanks(self, how_many):
if self._blanks > 0:
for i in range(min(self._blanks, how_many)):
del self[-1] # it also delete the keylist item
def keys(self):
"""Return a list of all keywords from the CardList."""
return map(lambda x: getattr(x,'key'), self)
def index_of(self, key, backward=0):
"""Get the index of a keyword in the CardList.
key: the keyword name (a string) or the index (an integer).
backward: search the index from the END, i.e. backward? default=0.
If backward = 1, search from the end.
"""
if isinstance(key, (int, long)):
return key
elif isinstance(key, str):
_key = key.strip().upper()
if _key[:8] == 'HIERARCH':
_key = _key[8:].strip()
_keylist = self._keylist
if backward:
_keylist = self._keylist[:] # make a copy
_keylist.reverse()
try:
_indx = _keylist.index(_key)
if backward:
_indx = len(_keylist) - _indx - 1
return _indx
except:
raise KeyError, 'Keyword %s not found.' % `key`
else:
raise KeyError, 'Illegal key data type %s' % type(key)
def copy(self):
"""Make a (deep)copy of the CardList."""
cards = [None]*len(self)
for i in range(len(self)):
cards[i]=Card('').fromstring(str(self[i]))
return CardList(cards)
def __repr__(self):
"""Format a list of cards into a string."""
block = ''
for card in self:
block = block + repr(card)
return block
def __str__(self):
"""Format a list of cards into a printable string."""
output = ''
for card in self:
output += str(card) + '\n'
return output[:-1]
# ----------------------------- HDU classes ------------------------------------
class _AllHDU:
"""Base class for all HDU (header data unit) classes."""
pass
class _CorruptedHDU(_AllHDU):
"""A Corrupted HDU class."""
""" This class is used when one or more mandatory Cards are
corrupted (unparsable), such as the 'BITPIX', 'NAXIS', or 'END' cards.
A corrupted HDU usually means that the data size cannot be
calculated or the 'END' card is not found. In the case of a
missing 'END' card, the Header may also contain the binary data(*).
(*) In future it may be possible to decipher where the last block
of the Header ends, but this task may be difficult when the
extension is a TableHDU containing ASCII data.
"""
def __init__(self, data=None, header=None):
self._file, self._offset, self._datLoc = None, None, None
self.header = header
self.data = data
self.name = None
def size(self):
"""Returns the size (in bytes) of the HDU's data part."""
self._file.seek(0, 2)
return self._file.tell() - self._datLoc
def _summary(self):
return "%-10s %-11s" % (self.name, "CorruptedHDU")
def verify(self):
pass
class _ValidHDU(_AllHDU, _Verify):
"""Base class for all HDUs which are not corrupted."""
# 0.6.5.5
def size(self):
"""Size (in bytes) of the data portion of the HDU."""
size = 0
naxis = self.header.get('NAXIS', 0)
if naxis > 0:
size = 1
for j in range(naxis):
size = size * self.header['NAXIS'+`j+1`]
bitpix = self.header['BITPIX']
gcount = self.header.get('GCOUNT', 1)
pcount = self.header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) / 8
return size
def copy(self):
"""Make a copy of the HDU, both header and data are copied."""
if self.data is not None:
_data = self.data.copy()
else:
_data = None
return self.__class__(data=_data, header=self.header.copy())
def writeto(self, name, output_verify='exception', clobber=False):
"""Write the HDU to a new file. This is a convenience method
to provide a user easier output interface if only one HDU
needs to be written to a file.
name: output FITS file name to be written to.
output_verify: output verification option, default='exception'.
clobber: Overwrite the output file if exists, default = False.
"""
if isinstance(self, _ExtensionHDU):
hdulist = HDUList([PrimaryHDU(), self])
elif isinstance(self, PrimaryHDU):
hdulist = HDUList([self])
hdulist.writeto(name, output_verify, clobber=clobber)
def _verify(self, option='warn'):
_err = _ErrList([], unit='Card')
isValid = "val in [8, 16, 32, 64, -32, -64]"
# Verify location and value of mandatory keywords.
# Do the first card here, instead of in the respective HDU classes,
# so the checking is in order, in case of required cards in wrong order.
if isinstance(self, _ExtensionHDU):
firstkey = 'XTENSION'
firstval = self._xtn
else:
firstkey = 'SIMPLE'
firstval = True
self.req_cards(firstkey, '== 0', '', firstval, option, _err)
self.req_cards('BITPIX', '== 1', _isInt+" and "+isValid, 8, option, _err)
self.req_cards('NAXIS', '== 2', _isInt+" and val >= 0 and val <= 999", 0, option, _err)
naxis = self.header.get('NAXIS', 0)
if naxis < 1000:
for j in range(3, naxis+3):
self.req_cards('NAXIS'+`j-2`, '== '+`j`, _isInt+" and val>= 0", 1, option, _err)
# verify each card
for _card in self.header.ascard:
_err.append(_card._verify(option))
return _err
def req_cards(self, keywd, pos, test, fix_value, option, errlist):
"""Check the existence, location, and value of a required Card."""
"""If pos = None, it can be anywhere. If the card does not exist,
the new card will have the fix_value as its value when created.
Also check the card's value by using the "test" argument.
"""
_err = errlist
fix = ''
cards = self.header.ascard
try:
_index = cards.index_of(keywd)
except:
_index = None
fixable = fix_value is not None
# if pos is a string, it must be of the syntax of "> n",
# where n is an int
if isinstance(pos, str):
_parse = pos.split()
if _parse[0] in ['>=', '==']:
insert_pos = eval(_parse[1])
# if the card does not exist
if _index is None:
err_text = "'%s' card does not exist." % keywd
fix_text = "Fixed by inserting a new '%s' card." % keywd
if fixable:
# use repr to accomodate both string and non-string types
# Boolean is also OK in this constructor
_card = "Card('%s', %s)" % (keywd, `fix_value`)
fix = "self.header.ascard.insert(%d, %s)" % (insert_pos, _card)
_err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable))
else:
# if the supposed location is specified
if pos is not None:
test_pos = '_index '+ pos
if not eval(test_pos):
err_text = "'%s' card at the wrong place (card %d)." % (keywd, _index)
fix_text = "Fixed by moving it to the right place (card %d)." % insert_pos
fix = "_cards=self.header.ascard; dummy=_cards[%d]; del _cards[%d];_cards.insert(%d, dummy)" % (_index, _index, insert_pos)
_err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix))
# if value checking is specified
if test:
val = self.header[keywd]
if not eval(test):
err_text = "'%s' card has invalid value '%s'." % (keywd, val)
fix_text = "Fixed by setting a new value '%s'." % fix_value
if fixable:
fix = "self.header['%s'] = %s" % (keywd, `fix_value`)
_err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable))
return _err
class _TempHDU(_ValidHDU):
"""Temporary HDU, used when the file is first opened. This is to
speed up the open. Any header will not be initialized till the
HDU is accessed.
"""
def _getname(self):
"""Get the extname and extver from the header."""
re_extname = re.compile(r"EXTNAME\s*=\s*'([ -&(-~]*)'")
re_extver = re.compile(r"EXTVER\s*=\s*(\d+)")
mo = re_extname.search(self._raw)
if mo:
name = mo.group(1).rstrip()
else:
name = ''
mo = re_extver.search(self._raw)
if mo:
extver = int(mo.group(1))
else:
extver = 1
return name, extver
def _getsize(self, block):
"""Get the size from the first block of the HDU."""
re_simple = re.compile(r'SIMPLE =\s*')
re_bitpix = re.compile(r'BITPIX =\s*(-?\d+)')
re_naxis = re.compile(r'NAXIS =\s*(\d+)')
re_naxisn = re.compile(r'NAXIS(\d) =\s*(\d+)')
re_gcount = re.compile(r'GCOUNT =\s*(-?\d+)')
re_pcount = re.compile(r'PCOUNT =\s*(-?\d+)')
re_groups = re.compile(r'GROUPS =\s*(T)')
simple = re_simple.search(block[:80])
mo = re_bitpix.search(block)
if mo is not None:
bitpix = int(mo.group(1))
else:
raise ValueError("BITPIX not found where expected")
mo = re_gcount.search(block)
if mo is not None:
gcount = int(mo.group(1))
else:
gcount = 1
mo = re_pcount.search(block)
if mo is not None:
pcount = int(mo.group(1))
else:
pcount = 0
mo = re_groups.search(block)
if mo and simple:
groups = 1
else:
groups = 0
mo = re_naxis.search(block)
if mo is not None:
naxis = int(mo.group(1))
pos = mo.end(0)
else:
raise ValueError("NAXIS not found where expected")
if naxis == 0:
datasize = 0
else:
dims = [0]*naxis
for i in range(naxis):
mo = re_naxisn.search(block, pos)
pos = mo.end(0)
dims[int(mo.group(1))-1] = int(mo.group(2))
datasize = reduce(operator.mul, dims[groups:])
size = abs(bitpix) * gcount * (pcount + datasize) / 8
if simple and not groups:
name = 'PRIMARY'
else:
name = ''
return size, name
def setupHDU(self):
"""Read one FITS HDU, data portions are not actually read here, but
the beginning locations are computed.
"""
_cardList = []
_keyList = []
blocks = self._raw
if (len(blocks) % _blockLen) != 0:
raise IOError, 'Header size is not multiple of %d: %d' % (_blockLen, len(blocks))
elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']):
raise IOError, 'Block does not begin with SIMPLE or XTENSION'
for i in range(0, len(blocks), Card.length):
_card = Card('').fromstring(blocks[i:i+Card.length])
_key = _card.key
if _key == 'END':
break
else:
_cardList.append(_card)
_keyList.append(_key)
# Deal with CONTINUE cards
# if a long string has CONTINUE cards, the "Card" is considered
# to be more than one 80-char "physical" cards.
_max = _keyList.count('CONTINUE')
_start = 0
for i in range(_max):
_where = _keyList[_start:].index('CONTINUE') + _start
for nc in range(1, _max+1):
if _where+nc >= len(_keyList):
break
if _cardList[_where+nc]._cardimage[:10].upper() != 'CONTINUE ':
break
# combine contiguous CONTINUE cards with its parent card
if nc > 0:
_longstring = _cardList[_where-1]._cardimage
for c in _cardList[_where:_where+nc]:
_longstring += c._cardimage
_cardList[_where-1] = _Card_with_continue().fromstring(_longstring)
del _cardList[_where:_where+nc]
del _keyList[_where:_where+nc]
_start = _where
# if not the real CONTINUE card, skip to the next card to search
# to avoid starting at the same CONTINUE card
else:
_start = _where + 1
if _keyList[_start:].count('CONTINUE') == 0:
break
# construct the Header object, using the cards.
try:
header = Header(CardList(_cardList, keylist=_keyList))
hdu = header._hdutype(data=DELAYED, header=header)
# pass these attributes
hdu._file = self._file
hdu._hdrLoc = self._hdrLoc
hdu._datLoc = self._datLoc
hdu._datSpan = self._datSpan
hdu._ffile = self._ffile
hdu.name = self.name
hdu._extver = self._extver
hdu._new = 0
hdu.header._mod = 0
hdu.header.ascard._mod = 0
except:
pass
return hdu
class _ExtensionHDU(_ValidHDU):
"""An extension HDU class.
This class is the base class for the TableHDU, ImageHDU, and
BinTableHDU classes.
"""
def __init__(self, data=None, header=None):
self._file, self._offset, self._datLoc = None, None, None
self.header = header
self.data = data
self._xtn = ' '
def __setattr__(self, attr, value):
"""Set an HDU attribute."""
if attr == 'name' and value:
if not isinstance(value, str):
raise TypeError, 'bad value type'
value = value.upper()
if self.header.has_key('EXTNAME'):
self.header['EXTNAME'] = value
else:
self.header.ascard.append(Card('EXTNAME', value, 'extension name'))
self.__dict__[attr] = value
def _verify(self, option='warn'):
_err = _ValidHDU._verify(self, option=option)
# Verify location and value of mandatory keywords.
naxis = self.header.get('NAXIS', 0)
self.req_cards('PCOUNT', '== '+`naxis+3`, _isInt+" and val >= 0", 0, option, _err)
self.req_cards('GCOUNT', '== '+`naxis+4`, _isInt+" and val == 1", 1, option, _err)
return _err
# 0.8.8
def _iswholeline(indx, naxis):
if isinstance(indx, (int, long)):
if indx >= 0 and indx < naxis:
if naxis > 1:
return _SinglePoint(1, indx)
elif naxis == 1:
return _OnePointAxis(1, 0)
else:
raise IndexError, 'Index %s out of range.' % indx
elif isinstance(indx, slice):
indx = _normalize_slice(indx, naxis)
if (indx.start == 0) and (indx.stop == naxis) and (indx.step == 1):
return _WholeLine(naxis, 0)
else:
if indx.step == 1:
return _LineSlice(indx.stop-indx.start, indx.start)
else:
return _SteppedSlice((indx.stop-indx.start)/indx.step, indx.start)
else:
raise IndexError, 'Illegal index %s' % indx
def _normalize_slice(input, naxis):
"""Set the slice's start/stop in the regular range."""
def _normalize(indx, npts):
if indx < -npts:
indx = 0
elif indx < 0:
indx += npts
elif indx > npts:
indx = npts
return indx
_start = input.start
if _start is None:
_start = 0
elif isinstance(_start, (int, long)):
_start = _normalize(_start, naxis)
else:
raise IndexError, 'Illegal slice %s, start must be integer.' % input
_stop = input.stop
if _stop is None:
_stop = naxis
elif isinstance(_stop, (int, long)):
_stop = _normalize(_stop, naxis)
else:
raise IndexError, 'Illegal slice %s, stop must be integer.' % input
if _stop < _start:
raise IndexError, 'Illegal slice %s, stop < start.' % input
_step = input.step
if _step is None:
_step = 1
elif isinstance(_step, (int, long)):
if _step <= 0:
raise IndexError, 'Illegal slice %s, step must be positive.' % input
else:
raise IndexError, 'Illegal slice %s, step must be integer.' % input
return slice(_start, _stop, _step)
class _KeyType:
def __init__(self, npts, offset):
self.npts = npts
self.offset = offset
class _WholeLine(_KeyType):
pass
class _SinglePoint(_KeyType):
pass
class _OnePointAxis(_KeyType):
pass
class _LineSlice(_KeyType):
pass
class _SteppedSlice(_KeyType):
pass
class Section:
"""Image section."""
def __init__(self, hdu):
self.hdu = hdu
def __getitem__(self, key):
dims = []
if not isinstance(key, tuple):
key = (key,)
naxis = self.hdu.header['NAXIS']
if naxis < len(key):
raise IndexError, 'too many indices.'
elif naxis > len(key):
key = key + (slice(None),) * (naxis-len(key))
offset = 0
for i in range(naxis):
_naxis = self.hdu.header['NAXIS'+`naxis-i`]
indx = _iswholeline(key[i], _naxis)
offset = offset * _naxis + indx.offset
# all elements after the first WholeLine must be WholeLine or
# OnePointAxis
if isinstance(indx, (_WholeLine, _LineSlice)):
dims.append(indx.npts)
break
elif isinstance(indx, _SteppedSlice):
raise IndexError, 'Subsection data must be contiguous.'
for j in range(i+1,naxis):
_naxis = self.hdu.header['NAXIS'+`naxis-j`]
indx = _iswholeline(key[j], _naxis)
dims.append(indx.npts)
if not isinstance(indx, _WholeLine):
raise IndexError, 'Subsection data is not contiguous.'
# the offset needs to multiply the length of all remaining axes
else:
offset *= _naxis
if dims == []:
dims = [1]
npt = 1
for n in dims:
npt *= n
# Now, get the data (does not include bscale/bzero for now XXX)
_bitpix = self.hdu.header['BITPIX']
code = _ImageBaseHDU.NumCode[_bitpix]
self.hdu._file.seek(self.hdu._datLoc+offset*abs(_bitpix)/8)
raw_data = num.fromfile(self.hdu._file, type=code, shape=dims)
raw_data._byteorder = 'big'
return raw_data
class _ImageBaseHDU(_ValidHDU):
"""FITS image HDU base class."""
"""Attributes:
header: image header
data: image data
_file: file associated with array (None)
_datLoc: starting byte location of data block in file (None)
"""
# mappings between FITS and numarray typecodes
NumCode = {8:'UInt8', 16:'Int16', 32:'Int32', 64:'Int64', -32:'Float32', -64:'Float64'}
ImgCode = {'UInt8':8, 'Int16':16, 'Int32':32, 'Int64':64, 'Float32':-32, 'Float64':-64}
def __init__(self, data=None, header=None):
self._file, self._datLoc = None, None
if header is not None:
if not isinstance(header, Header):
raise ValueError, "header must be a Header object"
if data is DELAYED:
# this should never happen
if header is None:
raise ValueError, "No header to setup HDU."
# if the file is read the first time, no need to copy, and keep it unchanged
else:
self.header = header
else:
# construct a list of cards of minimal header
if isinstance(self, _ExtensionHDU):
c0 = Card('XTENSION', 'IMAGE', 'Image extension')
else:
c0 = Card('SIMPLE', True, 'conforms to FITS standard')
_list = CardList([
c0,
Card('BITPIX', 8, 'array data type'),
Card('NAXIS', 0, 'number of array dimensions'),
])
if isinstance(self, GroupsHDU):
_list.append(Card('GROUPS', True, 'has groups'))
if isinstance(self, (_ExtensionHDU, GroupsHDU)):
_list.append(Card('PCOUNT', 0, 'number of parameters'))
_list.append(Card('GCOUNT', 1, 'number of groups'))
if header is not None:
hcopy = header.copy()
hcopy._strip()
_list.extend(hcopy.ascardlist())
self.header = Header(_list)
self._bzero = self.header.get('BZERO', 0)
self._bscale = self.header.get('BSCALE', 1)
if (data is DELAYED): return
self.data = data
# update the header
self.update_header()
self._bitpix = self.header['BITPIX']
# delete the keywords BSCALE and BZERO
del self.header['BSCALE']
del self.header['BZERO']
def update_header(self):
"""Update the header keywords to agree with the data."""
old_naxis = self.header.get('NAXIS', 0)
if isinstance(self.data, GroupData):
self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.data.type()]
axes = list(self.data.data.getshape())[1:]
axes.reverse()
axes = [0] + axes
elif isinstance(self.data, num.NumArray):
self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()]
axes = list(self.data.getshape())
axes.reverse()
elif self.data is None:
axes = []
else:
raise ValueError, "incorrect array type"
self.header['NAXIS'] = len(axes)
# add NAXISi if it does not exist
for j in range(len(axes)):
try:
self.header['NAXIS'+`j+1`] = axes[j]
except:
if (j == 0):
_after = 'naxis'
else :
_after = 'naxis'+`j`
self.header.update('naxis'+`j+1`, axes[j], after = _after)
# delete extra NAXISi's
for j in range(len(axes)+1, old_naxis+1):
try:
del self.header.ascard['NAXIS'+`j`]
except KeyError:
pass
if isinstance(self.data, GroupData):
self.header.update('GROUPS', True, after='NAXIS'+`len(axes)`)
self.header.update('PCOUNT', len(self.data.parnames), after='GROUPS')
self.header.update('GCOUNT', len(self.data), after='PCOUNT')
npars = len(self.data.parnames)
(_scale, _zero) = self.data._get_scale_factors(npars)[3:5]
if _scale:
self.header.update('BSCALE', self.data._coldefs.bscales[npars])
if _zero:
self.header.update('BZERO', self.data._coldefs.bzeros[npars])
for i in range(npars):
self.header.update('PTYPE'+`i+1`, self.data.parnames[i])
(_scale, _zero) = self.data._get_scale_factors(i)[3:5]
if _scale:
self.header.update('PSCAL'+`i+1`, self.data._coldefs.bscales[i])
if _zero:
self.header.update('PZERO'+`i+1`, self.data._coldefs.bzeros[i])
def __getattr__(self, attr):
"""Get the data attribute."""
if attr == 'section':
return Section(self)
elif attr == 'data':
self.__dict__[attr] = None
if self.header['NAXIS'] > 0:
_bitpix = self.header['BITPIX']
self._file.seek(self._datLoc)
if isinstance(self, GroupsHDU):
dims = self.size()*8/abs(_bitpix)
else:
dims = self._dimShape()
code = _ImageBaseHDU.NumCode[self.header['BITPIX']]
if self._ffile.memmap:
_mmap = self._ffile._mm[self._datLoc:self._datLoc+self._datSpan]
raw_data = num.array(_mmap, type=code, shape=dims)
else:
raw_data = num.fromfile(self._file, type=code, shape=dims)
raw_data._byteorder = 'big'
if (self._bzero != 0 or self._bscale != 1):
if _bitpix > 0: # scale integers to Float32
self.data = num.array(raw_data, type=num.Float32)
else: # floating point cases
if self._ffile.memmap:
self.data = raw_data.copy()
# if not memmap, use the space already in memory
else:
self.data = raw_data
if self._bscale != 1:
num.multiply(self.data, self._bscale, self.data)
if self._bzero != 0:
self.data += self._bzero
# delete the keywords BSCALE and BZERO after scaling
del self.header['BSCALE']
del self.header['BZERO']
self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()]
else:
self.data = raw_data
try:
return self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
def _dimShape(self):
"""Returns a tuple of image dimensions, reverse the order of NAXIS."""
naxis = self.header['NAXIS']
axes = naxis*[0]
for j in range(naxis):
axes[j] = self.header['NAXIS'+`j+1`]
axes.reverse()
return tuple(axes)
def _summary(self):
"""Summarize the HDU: name, dimensions, and formats."""
class_name = str(self.__class__)
type = class_name[class_name.rfind('.')+1:]
# if data is touched, use data info.
if 'data' in dir(self):
if self.data is None:
_shape, _format = (), ''
else:
# the shape will be in the order of NAXIS's which is the
# reverse of the numarray shape
if isinstance(self, GroupsHDU):
_shape = list(self.data.data.getshape())[1:]
_format = `self.data._parent.field(0).type()`
else:
_shape = list(self.data.getshape())
_format = `self.data.type()`
_shape.reverse()
_shape = tuple(_shape)
_format = _format[_format.rfind('.')+1:]
# if data is not touched yet, use header info.
else:
_shape = ()
for j in range(self.header['NAXIS']):
if isinstance(self, GroupsHDU) and j == 0:
continue
_shape += (self.header['NAXIS'+`j+1`],)
_format = self.NumCode[self.header['BITPIX']]
if isinstance(self, GroupsHDU):
_gcount = ' %d Groups %d Parameters' % (self.header['GCOUNT'], self.header['PCOUNT'])
else:
_gcount = ''
return "%-10s %-11s %5d %-12s %s%s" % \
(self.name, type, len(self.header.ascard), _shape, _format, _gcount)
def scale(self, type=None, option="old", bscale=1, bzero=0):
"""Scale image data by using BSCALE/BZERO.
Call to this method will scale self.data and update the keywords
of BSCALE and BZERO in self.header. This method should only be
used right before writing to the output file, as the data will be
scaled and is therefore not very usable after the call.
type (string): destination data type, use numarray attribute format,
(e.g. 'UInt8', 'Int16', 'Float32' etc.). If is None, use the
current data type.
option: how to scale the data: if "old", use the original BSCALE
and BZERO values when the data was read/created. If
"minmax", use the minimum and maximum of the data to scale.
The option will be overwritten by any user specified
bscale/bzero values.
bscale/bzero: user specified BSCALE and BZERO values.
"""
if self.data is None:
return
# Determine the destination (numarray) data type
if type is None:
type = self.NumCode[self._bitpix]
_type = getattr(num, type)
# Determine how to scale the data
# bscale and bzero takes priority
if (bscale != 1 or bzero !=0):
_scale = bscale
_zero = bzero
else:
if option == 'old':
_scale = self._bscale
_zero = self._bzero
elif option == 'minmax':
if isinstance(_type, num.FloatingType):
_scale = 1
_zero = 0
else:
# flat the shape temporarily to save memory
dims = self.data.getshape()
self.data.setshape(self.data.nelements())
min = num.minimum.reduce(self.data)
max = num.maximum.reduce(self.data)
self.data.setshape(dims)
if `_type` == 'UInt8': # UInt8 case
_zero = min
_scale = (max - min) / (2.**8 - 1)
else:
_zero = (max + min) / 2.
# throw away -2^N
_scale = (max - min) / (2.**(8*_type.bytes) - 2)
# Do the scaling
if _zero != 0:
self.data += -_zero # 0.9.6.3 to avoid out of range error for BZERO = +32768
self.header.update('BZERO', _zero)
else:
del self.header['BZERO']
if _scale != 1:
self.data /= _scale
self.header.update('BSCALE', _scale)
else:
del self.header['BSCALE']
if self.data._type != _type:
self.data = num.array(num.around(self.data), type=_type) #0.7.7.1
class PrimaryHDU(_ImageBaseHDU):
"""FITS primary HDU class."""
def __init__(self, data=None, header=None):
"""Construct a primary HDU.
data: the data in the HDU, default=None.
header: the header to be used (as a template), default=None.
If header=None, a minimal Header will be provided.
"""
_ImageBaseHDU.__init__(self, data=data, header=header)
self.name = 'PRIMARY'
# insert the keywords EXTEND
if header is None:
dim = `self.header['NAXIS']`
if dim == '0':
dim = ''
self.header.update('EXTEND', True, after='NAXIS'+dim)
class ImageHDU(_ExtensionHDU, _ImageBaseHDU):
"""FITS image extension HDU class."""
def __init__(self, data=None, header=None, name=None):
"""Construct an image HDU.
data: the data in the HDU, default=None.
header: the header to be used (as a template), default=None.
If header=None, a minimal Header will be provided.
name: The name of the HDU, will be the value of the keywod EXTNAME,
default=None.
"""
# no need to run _ExtensionHDU.__init__ since it is not doing anything.
_ImageBaseHDU.__init__(self, data=data, header=header)
self._xtn = 'IMAGE'
self.header._hdutype = ImageHDU
# insert the require keywords PCOUNT and GCOUNT
dim = `self.header['NAXIS']`
if dim == '0':
dim = ''
# set extension name
if (name is None) and self.header.has_key('EXTNAME'):
name = self.header['EXTNAME']
self.name = name
def _verify(self, option='warn'):
"""ImageHDU verify method."""
_err = _ExtensionHDU._verify(self, option=option)
self.req_cards('PCOUNT', None, _isInt+" and val == 0", 0, option, _err)
return _err
class GroupsHDU(PrimaryHDU):
"""FITS Random Groups HDU class."""
_dict = {8:'B', 16:'I', 32:'J', 64:'K', -32:'E', -64:'D'}
def __init__(self, data=None, header=None, name=None):
PrimaryHDU.__init__(self, data=data, header=header)
self.header._hdutype = GroupsHDU
self.name = name
if self.header['NAXIS'] <= 0:
self.header['NAXIS'] = 1
self.header.update('NAXIS1', 0, after='NAXIS')
def __getattr__(self, attr):
"""Get the 'data' or 'columns' attribute. The data of random group
FITS file will be like a binary table's data.
"""
if attr == 'data': # same code as in _TableBaseHDU
size = self.size()
if size:
self._file.seek(self._datLoc)
data = GroupData(_get_tbdata(self))
data._coldefs = self.columns
data.parnames = self.columns._pnames
else:
data = None
self.__dict__[attr] = data
elif attr == 'columns':
_cols = []
_pnames = []
_pcount = self.header['PCOUNT']
_format = GroupsHDU._dict[self.header['BITPIX']]
for i in range(self.header['PCOUNT']):
_bscale = self.header.get('PSCAL'+`i+1`, 1)
_bzero = self.header.get('PZERO'+`i+1`, 0)
_pnames.append(self.header['PTYPE'+`i+1`].lower())
_cols.append(Column(name='c'+`i+1`, format = _format, bscale = _bscale, bzero = _bzero))
data_shape = self._dimShape()[:-1]
dat_format = `int(num.array(data_shape).sum())` + _format
_bscale = self.header.get('BSCALE', 1)
_bzero = self.header.get('BZERO', 0)
_cols.append(Column(name='data', format = dat_format, bscale = _bscale, bzero = _bzero))
_coldefs = ColDefs(_cols)
_coldefs._shape = self.header['GCOUNT']
_coldefs._dat_format = _fits2rec[_format]
_coldefs._pnames = _pnames
self.__dict__[attr] = _coldefs
elif attr == '_theap':
self.__dict__[attr] = 0
try:
return self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
# 0.6.5.5
def size(self):
"""Returns the size (in bytes) of the HDU's data part."""
size = 0
naxis = self.header.get('NAXIS', 0)
# for random group image, NAXIS1 should be 0, so we skip NAXIS1.
if naxis > 1:
size = 1
for j in range(1, naxis):
size = size * self.header['NAXIS'+`j+1`]
bitpix = self.header['BITPIX']
gcount = self.header.get('GCOUNT', 1)
pcount = self.header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) / 8
return size
def _verify(self, option='warn'):
_err = PrimaryHDU._verify(self, option=option)
# Verify locations and values of mandatory keywords.
self.req_cards('NAXIS', '== 2', _isInt+" and val >= 1 and val <= 999", 1, option, _err)
self.req_cards('NAXIS1', '== 3', _isInt+" and val == 0", 0, option, _err)
_after = self.header['NAXIS'] + 3
# if the card EXTEND exists, must be after it.
try:
_dum = self.header['EXTEND']
#_after += 1
except:
pass
_pos = '>= '+`_after`
self.req_cards('GCOUNT', _pos, _isInt, 1, option, _err)
self.req_cards('PCOUNT', _pos, _isInt, 0, option, _err)
self.req_cards('GROUPS', _pos, 'val == True', True, option, _err)
return _err
# --------------------------Table related code----------------------------------
# lists of column/field definition common names and keyword names, make
# sure to preserve the one-to-one correspondence when updating the list(s).
# Use lists, instead of dictionaries so the names can be displayed in a
# preferred order.
_commonNames = ['name', 'format', 'unit', 'null', 'bscale', 'bzero', 'disp', 'start', 'dim']
_keyNames = ['TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO', 'TDISP', 'TBCOL', 'TDIM']
# mapping from TFORM data type to numarray data type (code)
_booltype = 'i1'
_fits2rec = {'L':_booltype, 'B':'u1', 'I':'i2', 'E':'f4', 'D':'f8', 'J':'i4', 'A':'a', 'C':'c8', 'M':'c16', 'K':'i8'}
# the reverse dictionary of the above
_rec2fits = {}
for key in _fits2rec.keys():
_rec2fits[_fits2rec[key]]=key
class _FormatX(str):
"""For X format in binary tables."""
pass
class _FormatP(str):
"""For P format in variable length table."""
pass
# TFORM regular expression
_tformat_re = re.compile(r'(?P<repeat>^[0-9]*)(?P<dtype>[A-Za-z])(?P<option>[!-~]*)')
# table definition keyword regular expression
_tdef_re = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)')
def _parse_tformat(tform):
"""Parse the TFORM value into repeat, data type, and option."""
try:
(repeat, dtype, option) = _tformat_re.match(tform.strip()).groups()
except:
print 'Format "%s" is not recognized.' % tform
if repeat == '': repeat = 1
else: repeat = eval(repeat)
return (repeat, dtype, option)
def _convert_format(input_format, reverse=0):
"""Convert FITS format spec to record format spec. Do the opposite
if reverse = 1.
"""
fmt = input_format
(repeat, dtype, option) = _parse_tformat(fmt)
if reverse == 0:
if dtype in _fits2rec.keys(): # FITS format
if dtype == 'A':
output_format = _fits2rec[dtype]+`repeat`
# to accomodate both the ASCII table and binary table column
# format spec, i.e. A7 in ASCII table is the same as 7A in
# binary table, so both will produce 'a7'.
if fmt.lstrip()[0] == 'A' and option != '':
output_format = _fits2rec[dtype]+`int(option)` # make sure option is integer
else:
_repeat = ''
if repeat != 1:
_repeat = `repeat`
output_format = _repeat+_fits2rec[dtype]
elif dtype == 'X':
nbytes = ((repeat-1) / 8) + 1
# use an array, even if it is only ONE u1 (i.e. use tuple always)
output_format = _FormatX(`(nbytes,)`+'u1')
output_format._nx = repeat
elif dtype == 'P':
output_format = _FormatP('2i4')
output_format._dtype = _fits2rec[option[0]]
elif dtype == 'F':
output_format = 'f8'
else:
raise ValueError, "Illegal format %s" % fmt
else:
if dtype == 'a':
output_format = option+_rec2fits[dtype]
elif isinstance(dtype, _FormatX):
print 'X format'
elif dtype+option in _rec2fits.keys(): # record format
_repeat = ''
if repeat != 1:
_repeat = `repeat`
output_format = _repeat+_rec2fits[dtype+option]
else:
raise ValueError, "Illegal format %s" % fmt
return output_format
def _convert_ASCII_format(input_format):
"""Convert ASCII table format spec to record format spec. """
ascii2rec = {'A':'a', 'I':'i4', 'F':'f4', 'E':'f4', 'D':'f8'}
_re = re.compile(r'(?P<dtype>[AIFED])(?P<width>[0-9]*)')
# Parse the TFORM value into data type and width.
try:
(dtype, width) = _re.match(input_format.strip()).groups()
dtype = ascii2rec[dtype]
if width == '':
width = None
else:
width = eval(width)
except:
raise ValueError, 'Illegal format `%s` for ASCII table.' % input_format
return (dtype, width)
def _get_index(nameList, key):
"""
Get the index of the key in the name list.
The key can be an integer or string. If integer, it is the index
in the list. If string,
(a) Field (column) names are case sensitive: you can have two
different columns called 'abc' and 'ABC' respectively.
(b) When you *refer* to a field (presumably with the field method),
it will try to match the exact name first, so in the example in
(a), field('abc') will get the first field, and field('ABC') will
get the second field.
If there is no exact name matched, it will try to match the name
with case insensitivity. So, in the last example, field('Abc')
will cause an exception since there is no unique mapping. If
there is a field named "XYZ" and no other field name is a case
variant of "XYZ", then field('xyz'), field('Xyz'), etc. will get
this field.
"""
if isinstance(key, (int, long)):
indx = int(key)
elif isinstance(key, str):
# try to find exact match first
try:
indx = nameList.index(key.rstrip())
except ValueError:
# try to match case-insentively,
_key = key.lower().rstrip()
_list = map(lambda x: x.lower().rstrip(), nameList)
_count = operator.countOf(_list, _key) # occurrence of _key in _list
if _count == 1:
indx = _list.index(_key)
elif _count == 0:
raise NameError, "Key '%s' does not exist." % key
else: # multiple match
raise NameError, "Ambiguous key name '%s'." % key
else:
raise NameError, "Illegal key '%s'." % `key`
return indx
def _unwrapx(input, output, nx):
"""Unwrap the X format column into a Boolean array.
input: input Uint8 array of shape (s, nbytes)
output: output Boolean array of shape (s, nx)
nx: number of bits
"""
pow2 = [128, 64, 32, 16, 8, 4, 2, 1]
nbytes = ((nx-1) / 8) + 1
for i in range(nbytes):
_min = i*8
_max = min((i+1)*8, nx)
for j in range(_min, _max):
num.bitwise_and(input[...,i], pow2[j-i*8], output[...,j])
def _wrapx(input, output, nx):
"""Wrap the X format column Boolean array into an UInt8 array.
input: input Boolean array of shape (s, nx)
output: output Uint8 array of shape (s, nbytes)
nx: number of bits
"""
output[...] = 0 # reset the output
nbytes = ((nx-1) / 8) + 1
unused = nbytes*8 - nx
for i in range(nbytes):
_min = i*8
_max = min((i+1)*8, nx)
for j in range(_min, _max):
if j != _min:
num.lshift(output[...,i], 1, output[...,i])
num.add(output[...,i], input[...,j], output[...,i])
# shift the unused bits
num.lshift(output[...,i], unused, output[...,i])
def _makep(input, desp_output, dtype):
"""Construct the P format column array, both the data descriptors and
the data. It returns the output "data" array of data type dtype.
The descriptor location will have a zero offset for all columns
after this call. The final offset will be calculated when the file
is written.
input: input object array
desp_output: output "descriptor" array of data type 2Int32
dtype: data type of the variable array
"""
_offset = 0
data_output = _VLF([None]*len(input))
data_output._dtype = dtype
if dtype == 'a':
_nbytes = 1
else:
_nbytes = num.getType(dtype).bytes
for i in range(len(input)):
if dtype == 'a':
data_output[i] = chararray.array(input[i], itemsize=1)
else:
data_output[i] = num.array(input[i], type=dtype)
desp_output[i,0] = len(data_output[i])
desp_output[i,1] = _offset
_offset += len(data_output[i]) * _nbytes
return data_output
class _VLF(objects.ObjectArray):
"""variable length field object."""
def __init__(self, input):
"""
input: a sequence of variable-sized elements.
"""
objects.ObjectArray.__init__(self, input)
self._max = 0
def __setitem__(self, key, value):
"""To make sure the new item has consistent data type to avoid
misalignment.
"""
if isinstance(value, num.NumArray) and value.type() == self._dtype:
pass
elif isinstance(value, chararray.CharArray) and value.itemsize() == 1:
pass
elif self._dtype == 'a':
value = chararray.array(value, itemsize=1)
else:
value = num.array(value, type=self._dtype)
objects.ObjectArray.__setitem__(self, key, value)
self._max = max(self._max, len(value))
class Column:
"""Column class which contains the definition of one column, e.g.
ttype, tform, etc. and the array. Does not support theap yet.
"""
def __init__(self, name=None, format=None, unit=None, null=None, \
bscale=None, bzero=None, disp=None, start=None, \
dim=None, array=None):
"""Construct a Column by specifying attributes. All attributes
except format can be optional.
name: column name, corresponding to TTYPE keyword
format: column format, corresponding to TFORM keyword
unit: column unit, corresponding to TUNIT keyword
null: null value, corresponding to TNULL keyword
bscale: bscale value, corresponding to TSCAL keyword
bzero: bzero value, corresponding to TZERO keyword
disp: display format, corresponding to TDISP keyword
start: column starting position (ASCII table only),
corresponding to TBCOL keyword
dim: column dimension corresponding to TDIM keyword
"""
# any of the input argument (except array) can be a Card or just
# a number/string
for cname in _commonNames:
value = eval(cname) # get the argument's value
keyword = _keyNames[_commonNames.index(cname)]
if isinstance(value, Card):
setattr(self, cname, value.value)
else:
setattr(self, cname, value)
# if the column data is not NDarray, make it to be one, i.e.
# input arrays can be just list or tuple, not required to be NDArray
if format is not None:
# check format
try:
# legit FITS format? convert to record format (e.g. '3J'->'3i4')
recfmt = _convert_format(format)
except:
try:
# legit RecArray format?
recfmt = format
format = _convert_format(recfmt, reverse=1)
except:
raise ValueError, "Illegal format `%s`." % format
self.format = format
# does not include Object array because there is no guarantee
# the elements in the object array are consistent.
if not isinstance(array, (num.NumArray, chararray.CharArray, Delayed)):
try: # try to convert to a numarray first
array = num.array(array)
except:
try: # then try to conver it to a strings array
array = chararray.array(array, itemsize=eval(recfmt[1:]))
# then try variable length array
except:
if isinstance(recfmt, _FormatP):
try:
_func = lambda x: num.array(x, type=recfmt._dtype)
array = _VLF(map(_func, array))
except:
try:
# this handles ['abc'] and [['a','b','c']]
# equally, beautiful!
_func = lambda x: chararray.array(x, itemsize=1)
array = _VLF(map(_func, array))
except:
raise ValueError, "Inconsistent input data array: %s" % array
array._dtype = recfmt._dtype
else:
raise ValueError, "Data is inconsistent with the format `%s`." % format
else:
raise ValueError, "Must specify format to construct Column"
# scale the array back to storage values if there is bscale/bzero
if isinstance(array, num.NumArray):
# boolean needs to be scaled too
if recfmt == _booltype:
_out = num.zeros(array.shape, type=recfmt)
num.where(array==0, ord('F'), ord('T'), _out)
array = _out
# make a copy if scaled, so as not to corrupt the original array
if bzero not in ['', None, 0] or bscale not in ['', None, 1]:
array = array.copy()
if bzero not in ['', None, 0]:
array += -bzero
if bscale not in ['', None, 1]:
array /= bscale
self.array = array
def __repr__(self):
text = ''
for cname in _commonNames:
value = getattr(self, cname)
if value != None:
text += cname + ' = ' + `value` + '\n'
return text[:-1]
def copy(self):
tmp = Column(format='I') # just use a throw-away format
tmp.__dict__=self.__dict__.copy()
return tmp
class ColDefs(object):
"""Column definitions class. It has attributes corresponding to the
Column attributes (e.g. ColDefs has the attribute .names while Column
has .name), Each attribute in ColDefs is a list of corresponding
attribute values from all Columns.
"""
def __init__(self, input, tbtype='BinTableHDU'):
"""input: a list of Columns, an (table) HDU
tbtype: which table HDU, 'BinTableHDU' (default) or
'TableHDU' (text table).
"""
ascii_fmt = {'A':'A1', 'I':'I10', 'E':'E14.6', 'F':'F16.7', 'D':'D24.16'}
self._tbtype = tbtype
if isinstance(input, ColDefs):
self.data = [col.copy() for col in input.data]
# if the input is a list of Columns
elif isinstance(input, (list, tuple)):
for col in input:
if not isinstance(col, Column):
raise "Element %d in the ColDefs input is not a Column." % input.index(col)
self.data = [col.copy() for col in input]
# if the format of an ASCII column has no width, add one
if tbtype == 'TableHDU':
for i in range(len(self)):
(type, width) = _convert_ASCII_format(self.data[i].format)
if width is None:
self.data[i].format = ascii_fmt[self.data[i].format[0]]
elif isinstance(input, _TableBaseHDU):
hdr = input.header
_nfields = hdr['TFIELDS']
self._width = hdr['NAXIS1']
self._shape = hdr['NAXIS2']
# go through header keywords to pick out column definition keywords
dict = [{} for i in range(_nfields)] # definition dictionaries for each field
for _card in hdr.ascardlist():
_key = _tdef_re.match(_card.key)
try:
keyword = _key.group('label')
except:
continue # skip if there is no match
if (keyword in _keyNames):
col = eval(_key.group('num'))
if col <= _nfields and col > 0:
cname = _commonNames[_keyNames.index(keyword)]
dict[col-1][cname] = _card.value
# data reading will be delayed
for col in range(_nfields):
dict[col]['array'] = Delayed(input, col)
# now build the columns
tmp = [Column(**attrs) for attrs in dict]
self.data = tmp
else:
raise TypeError, "input to ColDefs must be a table HDU or a list of Columns"
def __getattr__(self, name):
"""Populate the attributes."""
cname = name[:-1]
if cname in _commonNames:
attr = [''] * len(self)
for i in range(len(self)):
val = getattr(self[i], cname)
if val != None:
attr[i] = val
elif name == '_arrays':
attr = [col.array for col in self.data]
elif name == '_recformats':
if self._tbtype == 'BinTableHDU':
attr = [_convert_format(fmt) for fmt in self.formats]
elif self._tbtype == 'TableHDU':
self._Formats = self.formats
if len(self) == 1:
dummy = []
else:
dummy = map(lambda x, y: x-y, self.starts[1:], [1]+self.starts[1:-1])
dummy.append(self._width-self.starts[-1]+1)
attr = map(lambda y: 'a'+`y`, dummy)
elif name == 'spans':
# make sure to consider the case that the starting column of
# a field may not be the column right after the last field
if self._tbtype == 'TableHDU':
last_end = 0
attr = [0] * len(self)
for i in range(len(self)):
(_format, _width) = _convert_ASCII_format(self.formats[i])
if self.starts[i] is '':
self.starts[i] = last_end + 1
_end = self.starts[i] + _width - 1
attr[i] = _end - last_end
last_end = _end
self._width = _end
else:
raise KeyError, 'Attribute %s not defined.' % name
self.__dict__[name] = attr
return self.__dict__[name]
"""
# make sure to consider the case that the starting column of
# a field may not be the column right after the last field
elif tbtype == 'TableHDU':
(_format, _width) = _convert_ASCII_format(self.formats[i])
if self.starts[i] is '':
self.starts[i] = last_end + 1
_end = self.starts[i] + _width - 1
self.spans[i] = _end - last_end
last_end = _end
self._Formats = self.formats
self._arrays[i] = input[i].array
"""
def __getitem__(self, key):
x = self.data[key]
if isinstance(key, (int, long)):
return x
else:
return ColDefs(x)
def __len__(self):
return len(self.data)
def __repr__(self):
return 'ColDefs'+ `tuple(self.data)`
def __coerce__(self, other):
pass # needed for __add__
def __add__(self, other, option='left'):
if isinstance(other, Column):
b = [other]
elif isinstance(other, ColDefs):
b = list(other.data)
else:
raise TypeError, 'Wrong type of input'
if option == 'left':
tmp = list(self.data) + b
else:
tmp = b + list(self.data)
return ColDefs(tmp)
def __radd__(self, other):
return self.__add__(other, 'right')
def __sub__(self, other):
if not isinstance(other, (list, tuple)):
other = [other]
_other = [_get_index(self.names, key) for key in other]
indx=range(len(self))
for x in _other:
indx.remove(x)
tmp = [self[i] for i in indx]
return ColDefs(tmp)
def _setup(self):
""" Initialize all attributes to be a list of null strings."""
for cname in _commonNames:
setattr(self, cname+'s', ['']*self._nfields)
setattr(self, '_arrays', [None]*self._nfields)
def add_col(self, column):
"""Append one Column to the column definition."""
return self+column
def del_col(self, col_name):
"""Delete (the definition of) one Column."""
indx = _get_index(self.names, col_name)
for cname in _commonNames:
attr = getattr(self, cname+'s')
del attr[indx]
del self._arrays[indx]
self._nfields -= 1
def change_attrib(self, col_name, attrib, new_value):
"""Change an attribute (in the commonName list) of a Column."""
indx = _get_index(self.names, col_name)
getattr(self, attrib+'s')[indx] = new_value
def change_name(self, col_name, new_name):
"""Change a Column's name."""
if new_name != col_name and new_name in self.names:
raise ValueError, 'New name %s already exists.' % new_name
else:
self.change_attrib(col_name, 'name', new_name)
def change_unit(self, col_name, new_unit):
"""Change a Column's unit."""
self.change_attrib(col_name, 'unit', new_unit)
def info(self, attrib='all'):
"""Get attribute(s) information of the column definition."""
"""The attrib can be one or more of the attributes listed in
_commonNames. The default is "all" which will print out
all attributes. It forgives plurals and blanks. If there are
two or more attribute names, they must be separated by comma(s).
"""
if attrib.strip().lower() in ['all', '']:
list = _commonNames
else:
list = attrib.split(',')
for i in range(len(list)):
list[i]=list[i].strip().lower()
if list[i][-1] == 's':
list[i]=list[i][:-1]
for att in list:
if att not in _commonNames:
print "'%s' is not an attribute of the column definitions."%att
continue
print "%s:" % att
print ' ', getattr(self, att+'s')
#def change_format(self, col_name, new_format):
#new_format = _convert_format(new_format)
#self.change_attrib(col_name, 'format', new_format)
def _get_tbdata(hdu):
""" Get the table data from input (an HDU object)."""
tmp = hdu.columns
# get the right shape for the data part of the random group,
# since binary table does not support ND yet
if isinstance(hdu, GroupsHDU):
tmp._recformats[-1] = `hdu._dimShape()[:-1]` + tmp._dat_format
if hdu._ffile.memmap:
_mmap = hdu._ffile._mm[hdu._datLoc:hdu._datLoc+hdu._datSpan]
_data = rec.RecArray(_mmap, formats=tmp._recformats, names=tmp.names, shape=tmp._shape)
else:
_data = rec.array(hdu._file, formats=tmp._recformats, names=tmp.names, shape=tmp._shape)
if isinstance(hdu._ffile, _File):
_data._byteorder = 'big'
# pass datLoc, for P format
_data._heapoffset = hdu._theap + hdu._datLoc
_data._file = hdu._file
_tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2']
_data._gap = hdu._theap - _tbsize
# comment out to avoid circular reference of _pcount
# pass the attributes
for attr in ['formats', 'names']:
setattr(_data, attr, getattr(tmp, attr))
for i in range(len(tmp)):
tmp._arrays[i] = _data.field(i)
return FITS_rec(_data)
def new_table (input, header=None, nrows=0, fill=0, tbtype='BinTableHDU'):
"""Create a new table from the input column definitions."""
"""
input: a list of Columns or a ColDefs object.
header: header to be used to populate the non-required keywords
nrows: number of rows in the new table
fill: if = 1, will fill all cells with zeros or blanks
if = 0, copy the data from input, undefined cells will still
be filled with zeros/blanks.
tbtype: table type to be created (BinTableHDU or TableHDU)
"""
# construct a table HDU
hdu = eval(tbtype)(header=header)
if isinstance(input, ColDefs):
if input._tbtype == tbtype:
tmp = hdu.columns = input
else:
raise ValueError, 'column definitions have a different table type'
elif isinstance(input, FITS_rec): # input is a FITS_rec
tmp = hdu.columns = input._coldefs
else: # input is a list of Columns
tmp = hdu.columns = ColDefs(input, tbtype)
# read the delayed data
for i in range(len(tmp)):
_arr = tmp._arrays[i]
if isinstance(_arr, Delayed):
tmp._arrays[i] = _arr.hdu.data._parent.field(_arr.field)
# use the largest column shape as the shape of the record
if nrows == 0:
for arr in tmp._arrays:
if arr is not None:
dim = arr._shape[0]
else:
dim = 0
if dim > nrows:
nrows = dim
if tbtype == 'TableHDU':
_formats = ''
_itemsize = 0
for i in range(len(tmp)):
_formats += 'a%d,' % tmp.spans[i]
_itemsize += tmp.spans[i]
hdu.data = FITS_rec(rec.array(' '*_itemsize*nrows, formats=_formats[:-1], names=tmp.names, shape=nrows))
else:
hdu.data = FITS_rec(rec.array(None, formats=tmp._recformats, names=tmp.names, shape=nrows))
hdu.data._coldefs = hdu.columns
# populate data to the new table
for i in range(len(tmp)):
if tmp._arrays[i] is None:
size = 0
else:
size = len(tmp._arrays[i])
n = min(size, nrows)
if fill:
n = 0
(_scale, _zero, bscale, bzero) = hdu.data._get_scale_factors(i)[3:]
if n > 0:
if isinstance(tmp._recformats[i], _FormatX):
if tmp._arrays[i][:n].shape[-1] == tmp._recformats[i]._nx:
_wrapx(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._nx)
else: # from a table parent data, just pass it
hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n]
elif isinstance(tmp._recformats[i], _FormatP):
hdu.data._convert[i] = _makep(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._dtype)
else:
if tbtype == 'TableHDU':
# string no need to convert,
if isinstance(tmp._arrays[i], chararray.CharArray):
hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n]
else:
hdu.data._convert[i] = num.zeros(nrows, type=tmp._arrays[i].type())
if _scale or _zero:
_arr = tmp._arrays[i].copy()
else:
_arr = tmp._arrays[i]
if _scale:
_arr *= bscale
if _zero:
_arr += bzero
hdu.data._convert[i][:n] = _arr
else:
hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n]
if n < nrows:
if tbtype == 'BinTableHDU':
if isinstance(hdu.data._parent.field(i), num.NumArray):
# make the scaled data = 0, not the stored data
hdu.data._parent.field(i)[n:] = -bzero/bscale
else:
hdu.data._parent.field(i)[n:] = ''
hdu.update()
return hdu
class FITS_rec(rec.RecArray):
"""FITS record array class. FITS record array is the data part of a
table HDU's data part. This is a layer over the RecArray, so we
can deal with scaled columns.
"""
def __init__(self, input):
"""Construct a FITS record array from a RecArray."""
# input should be a record array
self.__setstate__(input.__getstate__())
# _parent is the original (storage) array,
# _convert is the scaled (physical) array.
self._parent = input
self._convert = [None]*self._nfields
self.names = self._names
def copy(self):
r = rec.RecArray.copy(self)
r.__class__ = rec.RecArray
r._coldefs = self._coldefs
f = FITS_rec(r)
f._convert = copy.deepcopy(self._convert)
return f
def _clone(self, shape):
"""Overload this to make mask array indexing work properly."""
hdu = new_table(self._coldefs, nrows=shape[0])
return hdu.data
def __repr__(self):
tmp = rec.RecArray.__repr__(self)
loc = tmp.rfind('\nnames=')
tmp = tmp[:loc+7] + `self._coldefs.names` + ')'
return tmp
# synchronize the sliced FITS_rec and its ._parent
def __getitem__(self, key):
tmp = rec.RecArray.__getitem__(self, key)
if isinstance(key, slice):
out = tmp
out._parent = rec.RecArray.__getitem__(self._parent, key)
out._convert = [None]*self._nfields
for i in range(self._nfields):
# touch all fields to expand the original ._convert list
# so the sliced FITS_rec will view the same scaled columns as
# the original
dummy = self.field(i)
if self._convert[i] is not None:
out._convert[i] = ndarray.NDArray.__getitem__(self._convert[i], key)
del dummy
return out
# if not a slice, do this because Record has no __getstate__.
# also more efficient.
else:
return tmp
def _get_scale_factors(self, indx):
"""
Get the scaling flags and factors for one field.
indx is the index of the field.
"""
if self._coldefs._tbtype == 'BinTableHDU':
_str = 'a' in self._coldefs.formats[indx]
_bool = self._coldefs._recformats[indx][-2:] == _booltype
else:
_str = self._coldefs.formats[indx][0] == 'A'
_bool = 0 # there is no boolean in ASCII table
_number = not(_bool or _str)
bscale = self._coldefs.bscales[indx]
bzero = self._coldefs.bzeros[indx]
_scale = bscale not in ['', None, 1]
_zero = bzero not in ['', None, 0]
# ensure bscale/bzero are numbers
if not _scale:
bscale = 1
if not _zero:
bzero = 0
return (_str, _bool, _number, _scale, _zero, bscale, bzero)
def field(self, key):
"""A view of a Column's data as an array."""
indx = _get_index(self._coldefs.names, key)
if (self._convert[indx] is None):
# for X format
if isinstance(self._coldefs._recformats[indx], _FormatX):
_nx = self._coldefs._recformats[indx]._nx
dummy = num.zeros(self._parent.shape+(_nx,), type=num.Bool)
_unwrapx(self._parent.field(indx), dummy, _nx)
self._convert[indx] = dummy
return self._convert[indx]
(_str, _bool, _number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx)
# for P format
if isinstance(self._coldefs._recformats[indx], _FormatP):
dummy = _VLF([None]*len(self._parent))
dummy._dtype = self._coldefs._recformats[indx]._dtype
for i in range(len(self._parent)):
_offset = self._parent.field(indx)[i,1] + self._heapoffset
self._file.seek(_offset)
if self._coldefs._recformats[indx]._dtype is 'a':
dummy[i] = chararray.array(self._file, itemsize=self._parent.field(indx)[i,0], shape=1)
else:
dummy[i] = num.array(self._file, type=self._coldefs._recformats[indx]._dtype, shape=self._parent.field(indx)[i,0])
dummy[i]._byteorder = 'big'
# scale by TSCAL and TZERO
if _scale or _zero:
for i in range(len(self._parent)):
dummy[i][:] = dummy[i]*bscale+bzero
# Boolean (logical) column
if self._coldefs._recformats[indx]._dtype is _booltype:
for i in range(len(self._parent)):
dummy[i] = num.equal(dummy[i], ord('T'))
self._convert[indx] = dummy
return self._convert[indx]
if _str:
return self._parent.field(indx)
# ASCII table, convert strings to numbers
if self._coldefs._tbtype == 'TableHDU':
_dict = {'I':num.Int32, 'F':num.Float32, 'E':num.Float32, 'D':num.Float64}
_type = _dict[self._coldefs._Formats[indx][0]]
# if the string = TNULL, return ASCIITNULL
nullval = self._coldefs.nulls[indx].strip()
dummy = num.zeros(len(self._parent), type=_type)
dummy[:] = ASCIITNULL
self._convert[indx] = dummy
for i in range(len(self._parent)):
if self._parent.field(indx)[i].strip() != nullval:
dummy[i] = float(self._parent.field(indx)[i].replace('D', 'E'))
else:
dummy = self._parent.field(indx)
# further conversion for both ASCII and binary tables
if _number and (_scale or _zero):
# only do the scaling the first time and store it in _convert
self._convert[indx] = num.array(dummy, type=num.Float64)
if _scale:
num.multiply(self._convert[indx], bscale, self._convert[indx])
if _zero:
self._convert[indx] += bzero
elif _bool:
self._convert[indx] = num.equal(dummy, ord('T'))
else:
return dummy
return self._convert[indx]
def _scale_back(self):
"""Update the parent array, using the (latest) scaled array."""
_dict = {'A':'s', 'I':'d', 'F':'f', 'E':'E', 'D':'E'}
# calculate the starting point and width of each field for ASCII table
if self._coldefs._tbtype == 'TableHDU':
_loc = [1]
_width = []
for i in range(self._nfields):
_loc.append(_loc[-1]+self._parent.field(i).itemsize())
_width.append(_convert_ASCII_format(self._coldefs._Formats[i])[1])
self._heapsize = 0
for indx in range(self._nfields):
if (self._convert[indx] is not None):
if isinstance(self._coldefs._recformats[indx], _FormatX):
_wrapx(self._convert[indx], self._parent.field(indx), self._coldefs._recformats[indx]._nx)
continue
(_str, _bool, _number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx)
# add the location offset of the heap area for each
# variable length column
if isinstance(self._coldefs._recformats[indx], _FormatP):
desc = self._parent.field(indx)
desc[:] = 0 # reset
_npts = map(len, self._convert[indx])
desc[:len(_npts),0] = _npts
_dtype = num.getType(self._coldefs._recformats[indx]._dtype)
desc[1:,1] = num.add.accumulate(desc[:-1,0])*_dtype.bytes
desc[:,1][:] += self._heapsize
self._heapsize += desc[:,0].sum()*_dtype.bytes
# conversion for both ASCII and binary tables
if _number or _str:
if _number and (_scale or _zero):
dummy = self._convert[indx].copy()
if _zero:
dummy -= bzero
if _scale:
dummy /= bscale
elif self._coldefs._tbtype == 'TableHDU':
dummy = self._convert[indx]
else:
continue
# ASCII table, convert numbers to strings
if self._coldefs._tbtype == 'TableHDU':
_format = self._coldefs._Formats[indx].strip()
_lead = self._coldefs.starts[indx] - _loc[indx]
if _lead < 0:
raise ValueError, "column `%s` starting point overlaps to the previous column" % indx+1
_trail = _loc[indx+1] - _width[indx] - self._coldefs.starts[indx]
if _trail < 0:
raise ValueError, "column `%s` ending point overlaps to the next column" % indx+1
if 'A' in _format:
_pc = '%-'
else:
_pc = '%'
_fmt = ' '*_lead + _pc + _format[1:] + _dict[_format[0]] + ' '*_trail
# not using numarray.strings's num2char because the
# result is not allowed to expand (as C/Python does).
for i in range(len(dummy)):
x = _fmt % dummy[i]
if len(x) > (_loc[indx+1]-_loc[indx]):
raise ValueError, "number `%s` does not fit into the output's itemsize of %s" % (x, _width[indx])
else:
self._parent.field(indx)[i] = x
if 'D' in _format:
self._parent.field(indx).sub('E', 'D')
# binary table
else:
if isinstance(self._parent.field(indx)._type, num.IntegralType):
dummy = num.around(dummy)
self._parent.field(indx)[:] = dummy
del dummy
# ASCII table does not have Boolean type
elif _bool:
self._parent.field(indx)[:] = num.choose(self._convert[indx], (ord('F'),ord('T')))
class GroupData(FITS_rec):
"""Random groups data object.
Allows structured access to FITS Group data in a manner analogous to tables
"""
def __init__(self, input=None, bitpix=None, pardata=None, parnames=[],
bscale=None, bzero=None, parbscales=None, parbzeros=None):
"""input: input data, either the group data itself (a numarray) or
a record array (FITS_rec) which will contain both group
parameter info and the data. The rest of the arguments are
used only for the first case.
bitpix: data type as expressed in FITS BITPIX value
(8, 16, 32, 64, -32, or -64)
pardata: parameter data, as a list of (numeric) arrays.
parnames: list of parameter names.
bscale: BSCALE of the data
bzero: BZERO of the data
parbscales: list of bscales for the parameters
parbzeros: list of bzeros for the parameters
"""
if isinstance(input, num.NumArray):
_formats = ''
_cols = []
if pardata is None:
npars = 0
else:
npars = len(pardata)
if parbscales is None:
parbscales = [None]*npars
if parbzeros is None:
parbzeros = [None]*npars
if bitpix is None:
bitpix = _ImageBaseHDU.ImgCode[input.type()]
fits_fmt = GroupsHDU._dict[bitpix] # -32 -> 'E'
_fmt = _fits2rec[fits_fmt] # 'E' -> 'f4'
_formats = (_fmt+',') * npars
data_fmt = '%s%s' % (`input.shape[1:]`, _fmt)
_formats += data_fmt
gcount = input.shape[0]
for i in range(npars):
_cols.append(Column(name='c'+`i+1`, format = fits_fmt, bscale = parbscales[i], bzero = parbzeros[i]))
_cols.append(Column(name='data', format = fits_fmt, bscale = bscale, bzero = bzero))
self._coldefs = ColDefs(_cols)
self.parnames = [i.lower() for i in parnames]
tmp = FITS_rec(rec.array(None, formats=_formats, shape=gcount, names= self._coldefs.names))
self.__setstate__(tmp.__getstate__())
for i in range(npars):
(_scale, _zero) = self._get_scale_factors(i)[3:5]
if _scale or _zero:
self._convert[i] = pardata[i]
else:
self._parent.field(i)[:] = pardata[i]
(_scale, _zero) = self._get_scale_factors(npars)[3:5]
if _scale or _zero:
self._convert[npars] = input
else:
self._parent.field(npars)[:] = input
else:
self.__setstate__(input.__getstate__())
def __getattr__(self, attr):
if attr == 'data':
self.__dict__[attr] = self.field('data')
elif attr == '_unique':
_unique = {}
for i in range(len(self.parnames)):
_name = self.parnames[i]
if _name in _unique:
_unique[_name].append(i)
else:
_unique[_name] = [i]
self.__dict__[attr] = _unique
try:
return self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
def par(self, parName):
"""Get the group parameter values."""
if isinstance(parName, (int, long)):
result = self.field(parName)
else:
indx = self._unique[parName.lower()]
if len(indx) == 1:
result = self.field(indx[0])
# if more than one group parameter have the same name
else:
result = self.field(indx[0]).astype('f8')
for i in indx[1:]:
result += self.field(i)
return result
def setpar(self, parName, value):
"""Set the group parameter values."""
if isinstance(parName, (int, long)):
self.field(parName)[:] = value
else:
indx = self._unique[parName]
if len(indx) == 1:
self.field(indx[0])[:] = value
# if more than one group parameter have the same name, the
# value must be a list (or tuple) containing arrays
else:
if isinstance(value, (list, tuple)) and len(indx) == len(value):
for i in range(len(indx)):
self.field(indx[i])[:] = value[i]
else:
raise ValueError, "parameter value must be a sequence with %d arrays/numbers." % len(indx)
def _getitem(self, offset):
row = (offset - self._byteoffset) / self._strides[0]
return _Group(self, row)
class _Group(rec.Record):
"""One group of the random group data."""
def __init__(self, input, row=0):
rec.Record.__init__(self, input, row)
def par(self, fieldName):
"""Get the group parameter value."""
return self.array.par(fieldName)[self.row]
def setpar(self, fieldName, value):
"""Set the group parameter value."""
self.array[self.row:self.row+1].setpar(fieldName, value)
class _TableBaseHDU(_ExtensionHDU):
"""FITS table extension base HDU class."""
def __init__(self, data=None, header=None, name=None):
"""
header: header to be used
data: data to be used
name: name to be populated in EXTNAME keyword
"""
if header is not None:
if not isinstance(header, Header):
raise ValueError, "header must be a Header object"
if data is DELAYED:
# this should never happen
if header is None:
raise ValueError, "No header to setup HDU."
# if the file is read the first time, no need to copy, and keep it unchanged
else:
self.header = header
else:
# construct a list of cards of minimal header
_list = CardList([
Card('XTENSION', '', ''),
Card('BITPIX', 8, 'array data type'),
Card('NAXIS', 2, 'number of array dimensions'),
Card('NAXIS1', 0, 'length of dimension 1'),
Card('NAXIS2', 0, 'length of dimension 2'),
Card('PCOUNT', 0, 'number of group parameters'),
Card('GCOUNT', 1, 'number of groups'),
Card('TFIELDS', 0, 'number of table fields')
])
if header is not None:
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = header.copy()
hcopy._strip()
_list.extend(hcopy.ascardlist())
self.header = Header(_list)
if (data is not DELAYED):
if isinstance(data, rec.RecArray):
self.header['NAXIS1'] = data._itemsize
self.header['NAXIS2'] = data._shape[0]
self.header['TFIELDS'] = data._nfields
self.data = data
self.columns = data._coldefs
self.update()
elif data is None:
pass
else:
raise TypeError, "table data has incorrect type"
# set extension name
if not name and self.header.has_key('EXTNAME'):
name = self.header['EXTNAME']
self.name = name
def __getattr__(self, attr):
"""Get the 'data' or 'columns' attribute."""
if attr == 'data':
size = self.size()
if size:
self._file.seek(self._datLoc)
data = _get_tbdata(self)
data._coldefs = self.columns
else:
data = None
self.__dict__[attr] = data
elif attr == 'columns':
class_name = str(self.__class__)
class_name = class_name[class_name.rfind('.')+1:]
self.__dict__[attr] = ColDefs(self, tbtype=class_name)
elif attr == '_theap':
self.__dict__[attr] = self.header.get('THEAP', self.header['NAXIS1']*self.header['NAXIS2'])
elif attr == '_pcount':
self.__dict__[attr] = self.header.get('PCOUNT', 0)
try:
return self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
def _summary(self):
"""Summarize the HDU: name, dimensions, and formats."""
class_name = str(self.__class__)
type = class_name[class_name.rfind('.')+1:]
# if data is touched, use data info.
if 'data' in dir(self):
if self.data is None:
_shape, _format = (), ''
_nrows = 0
else:
_nrows = len(self.data)
_ncols = len(self.columns.formats)
_format = self.columns.formats
# if data is not touched yet, use header info.
else:
_shape = ()
_nrows = self.header['NAXIS2']
_ncols = self.header['TFIELDS']
_format = '['
for j in range(_ncols):
_format += self.header['TFORM'+`j+1`] + ', '
_format = _format[:-2] + ']'
_dims = "%dR x %dC" % (_nrows, _ncols)
return "%-10s %-11s %5d %-12s %s" % \
(self.name, type, len(self.header.ascard), _dims, _format)
def get_coldefs(self):
"""Returns the table's column definitions."""
return self.columns
def update(self):
""" Update header keywords to reflect recent changes of columns."""
_update = self.header.update
_append = self.header.ascard.append
_cols = self.columns
_update('naxis1', self.data._itemsize, after='naxis')
_update('naxis2', self.data._shape[0], after='naxis1')
_update('tfields', len(_cols), after='gcount')
# Wipe out the old table definition keywords. Mark them first,
# then delete from the end so as not to confuse the indexing.
_list = []
for i in range(len(self.header.ascard)-1,-1,-1):
_card = self.header.ascard[i]
_key = _tdef_re.match(_card.key)
try: keyword = _key.group('label')
except: continue # skip if there is no match
if (keyword in _keyNames):
_list.append(i)
for i in _list:
del self.header.ascard[i]
del _list
# populate the new table definition keywords
for i in range(len(_cols)):
for cname in _commonNames:
val = getattr(_cols, cname+'s')[i]
if val != '':
keyword = _keyNames[_commonNames.index(cname)]+`i+1`
if cname == 'format' and isinstance(self, BinTableHDU):
val = _cols._recformats[i]
if isinstance(val, _FormatX):
val = `val._nx` + 'X'
elif isinstance(val, _FormatP):
VLdata = self.data.field(i)
VLdata._max = max(map(len, VLdata))
val = 'P' + _convert_format(val._dtype, reverse=1) + '(%d)' % VLdata._max
else:
val = _convert_format(val, reverse=1)
#_update(keyword, val)
_append(Card(keyword, val))
def copy(self):
"""Make a copy of the table HDU, both header and data are copied."""
# touch the data, so it's defined (in the case of reading from a
# FITS file)
self.data
return new_table(self.columns, header=self.header, tbtype=self.columns._tbtype)
def _verify(self, option='warn'):
"""_TableBaseHDU verify method."""
_err = _ExtensionHDU._verify(self, option=option)
self.req_cards('NAXIS', None, 'val == 2', 2, option, _err)
self.req_cards('BITPIX', None, 'val == 8', 8, option, _err)
self.req_cards('TFIELDS', '== 7', _isInt+" and val >= 0 and val <= 999", 0, option, _err)
tfields = self.header['TFIELDS']
for i in range(tfields):
self.req_cards('TFORM'+`i+1`, None, None, None, option, _err)
return _err
class TableHDU(_TableBaseHDU):
"""FITS ASCII table extension HDU class."""
__format_RE = re.compile(
r'(?P<code>[ADEFI])(?P<width>\d+)(?:\.(?P<prec>\d+))?')
def __init__(self, data=None, header=None, name=None):
"""data: data of the table
header: header to be used for the HDU
name: the EXTNAME value
"""
_TableBaseHDU.__init__(self, data=data, header=header, name=name)
self._xtn = 'TABLE'
if self.header[0].rstrip() != self._xtn:
self.header[0] = self._xtn
self.header.ascard[0].comment = 'ASCII table extension'
'''
def format(self):
strfmt, strlen = '', 0
for j in range(self.header['TFIELDS']):
bcol = self.header['TBCOL'+`j+1`]
valu = self.header['TFORM'+`j+1`]
fmt = self.__format_RE.match(valu)
if fmt:
code, width, prec = fmt.group('code', 'width', 'prec')
else:
raise ValueError, valu
size = eval(width)+1
strfmt = strfmt + 's'+str(size) + ','
strlen = strlen + size
else:
strfmt = '>' + strfmt[:-1]
return strfmt
'''
def _verify(self, option='warn'):
"""TableHDU verify method."""
_err = _TableBaseHDU._verify(self, option=option)
self.req_cards('PCOUNT', None, 'val == 0', 0, option, _err)
tfields = self.header['TFIELDS']
for i in range(tfields):
self.req_cards('TBCOL'+`i+1`, None, _isInt, None, option, _err)
return _err
class BinTableHDU(_TableBaseHDU):
"""Binary table HDU class."""
def __init__(self, data=None, header=None, name=None):
"""data: data of the table
header: header to be used for the HDU
name: the EXTNAME value
"""
_TableBaseHDU.__init__(self, data=data, header=header, name=name)
self._xtn = 'BINTABLE'
hdr = self.header
if hdr[0] != self._xtn:
hdr[0] = self._xtn
hdr.ascard[0].comment = 'binary table extension'
class StreamingHDU:
"""
A class that provides the capability to stream data to a FITS file
instead of requiring data to all be written at once.
The following psudo code illustrates its use:
header = pyfits.Header()
for all the cards you need in the header:
header.update(key,value,comment)
shdu = pyfits.StreamingHDU('filename.fits',header)
for each piece of data:
shdu.write(data)
shdu.close()
"""
def __init__(self, name, header):
"""
Construct a StreamingHDU object given a file name and a header.
:Parameters:
name : string
The name of the file to which the header and data will be
streamed.
header : Header
The header object associated with the data to be written
to the file.
:Returns:
None
Notes
-----
The file will be opened and the header appended to the end of
the file. If the file does not already exist, it will be created
and if the header represents a Primary header, it will be written
to the beginning of the file. If the file does not exist and the
provided header is not a Primary header, a default Primary HDU will
be inserted at the beginning of the file and the provided header
will be added as the first extension. If the file does already
exist, but the provided header represents a Primary header, the
header will be modified to an image extension header and appended
to the end of the file.
"""
self.header = header.copy()
#
# Check if the file already exists. If it does not, check to see
# if we were provided with a Primary Header. If not we will need
# to prepend a default PrimaryHDU to the file before writing the
# given header.
#
if not os.path.exists(name):
if not self.header.has_key('SIMPLE'):
hdulist = HDUList([PrimaryHDU()])
hdulist.writeto(name, 'exception')
else:
if self.header.has_key('SIMPLE') and os.path.getsize(name) > 0:
#
# This will not be the first extension in the file so we
# must change the Primary header provided into an image
# extension header.
#
self.header.update('XTENSION','IMAGE','Image extension',
after='SIMPLE')
del self.header['SIMPLE']
if not self.header.has_key('PCOUNT'):
dim = self.header['NAXIS']
if dim == 0:
dim = ''
else:
dim = str(dim)
self.header.update('PCOUNT', 0, 'number of parameters',
after='NAXIS'+dim)
if not self.header.has_key('GCOUNT'):
self.header.update('GCOUNT', 1, 'number of groups',
after='PCOUNT')
self._ffo = _File(name, 'append')
self._ffo.getfile().seek(0,2)
self._hdrLoc = self._ffo.writeHDUheader(self)
self._datLoc = self._ffo.getfile().tell()
self._size = self.size()
if self._size != 0:
self.writeComplete = 0
else:
self.writeComplete = 1
def write(self,data):
"""
Write the given data to the stream.
:Parameters:
data : NumArray
Data to stream to the file.
:Returns:
writeComplete : integer
Flag that when true indicates that all of the required data
has been written to the stream.
Notes
-----
Only the amount of data specified in the header provided to the
class constructor may be written to the stream. If the provided
data would cause the stream to overflow, an IOError exception is
raised and the data is not written. Once sufficient data has been
written to the stream to satisfy the amount specified in the header,
the stream is padded to fill a complete FITS block and no more data
will be accepted. An attempt to write more data after the stream
has been filled will raise an IOError exception. If the dtype of
the input data does not match what is expected by the header, a
TypeError exception is raised.
"""
if self.writeComplete:
raise IOError, "The stream is closed and can no longer be written"
curDataSize = self._ffo.getfile().tell() - self._datLoc
if curDataSize + data.itemsize()*data._size > self._size:
raise IOError, "Supplied data will overflow the stream"
if _ImageBaseHDU.NumCode[self.header['BITPIX']] != data.type():
raise TypeError, "Supplied data is not the correct type."
if data._byteorder != 'big':
#
# byteswap little endian arrays before writing
#
output = data.byteswapped()
else:
output = data
output.tofile(self._ffo.getfile())
if self._ffo.getfile().tell() - self._datLoc == self._size:
#
# the stream is full so pad the data to the next FITS block
#
self._ffo.getfile().write(_padLength(self._size)*'\0')
self.writeComplete = 1
self._ffo.getfile().flush()
return self.writeComplete
def size(self):
"""
Return the size (in bytes) of the data portion of the HDU.
:Parameters:
None
:Returns:
size : integer
The number of bytes of data required to fill the stream
per the header provided in the constructor.
"""
size = 0
naxis = self.header.get('NAXIS', 0)
if naxis > 0:
simple = self.header.get('SIMPLE','F')
randomGroups = self.header.get('GROUPS','F')
if simple == 'T' and randomGroups == 'T':
groups = 1
else:
groups = 0
size = 1
for j in range(groups,naxis):
size = size * self.header['NAXIS'+`j+1`]
bitpix = self.header['BITPIX']
gcount = self.header.get('GCOUNT', 1)
pcount = self.header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) / 8
return size
def close(self):
"""
Close the 'physical' FITS file.
:Parameters:
None
:Returns:
None
"""
self._ffo.close()
class ErrorURLopener(urllib.FancyURLopener):
"""A class to use with urlretrieve to allow IOError exceptions to be
raised when a file specified by a URL cannot be accessed"""
def http_error_default(self, url, fp, errcode, errmsg, headers):
raise IOError, (errcode, errmsg, url)
urllib._urlopener = ErrorURLopener() # Assign the locally subclassed opener
# class to the urllibrary
urllib._urlopener.tempcache = {} # Initialize tempcache with an empty
# dictionary to enable file cacheing
class _File:
"""A file I/O class"""
def __init__(self, name, mode='copyonwrite', memmap=0):
if mode not in _python_mode.keys():
raise "Mode '%s' not recognized" % mode
if mode != 'append' and not os.path.exists(name):
self.name, fileheader = urllib.urlretrieve(name)
else:
self.name = name
self.mode = mode
self.memmap = memmap
if memmap and mode not in ['readonly', 'copyonwrite', 'update']:
raise "Memory mapping is not implemented for mode `%s`." % mode
else:
if os.path.splitext(self.name)[1] == '.gz':
# Handle gzip files
if mode in ['update', 'append']:
raise "Writing to gzipped fits files is not supported"
zfile = gzip.GzipFile(self.name)
self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits')
self.name = self.tfile.name
self.__file = self.tfile.file
self.__file.write(zfile.read())
zfile.close()
elif os.path.splitext(self.name)[1] == '.zip':
# Handle zip files
if mode in ['update', 'append']:
raise "Writing to zipped fits files is not supported"
zfile = zipfile.ZipFile(self.name)
namelist = zfile.namelist()
if len(namelist) != 1:
raise "Zip files with multiple members are not supported."
self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits')
self.name = self.tfile.name
self.__file = self.tfile.file
self.__file.write(zfile.read(namelist[0]))
zfile.close()
else:
self.__file = __builtin__.open(self.name, _python_mode[mode])
# For 'ab+' mode, the pointer is at the end after the open in
# Linux, but is at the beginning in Solaris.
self.__file.seek(0, 2)
self._size = self.__file.tell()
self.__file.seek(0)
def __getattr__(self, attr):
"""Get the _mm attribute."""
if attr == '_mm':
self.__dict__[attr] = Memmap.open(self.name, mode=_memmap_mode[self.mode])
try:
return self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
def getfile(self):
return self.__file
def _readheader(self, cardList, keyList, blocks):
"""Read blocks of header, and put each card into a list of cards.
Will deal with CONTINUE cards in a later stage as CONTINUE cards
may span across blocks.
"""
if len(block) != _blockLen:
raise IOError, 'Block length is not %d: %d' % (_blockLen, len(block))
elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']):
raise IOError, 'Block does not begin with SIMPLE or XTENSION'
for i in range(0, len(_blockLen), Card.length):
_card = Card('').fromstring(block[i:i+Card.length])
_key = _card.key
cardList.append(_card)
keyList.append(_key)
if _key == 'END':
break
def _readHDU(self):
"""Read the skeleton structure of the HDU."""
end_RE = re.compile('END'+' '*77)
_hdrLoc = self.__file.tell()
# Read the first header block.
block = self.__file.read(_blockLen)
if block == '':
raise EOFError
hdu = _TempHDU()
hdu._raw = ''
# continue reading header blocks until END card is reached
while 1:
# find the END card
mo = end_RE.search(block)
if mo is None:
hdu._raw += block
block = self.__file.read(_blockLen)
if block == '':
break
else:
break
hdu._raw += block
_size, hdu.name = hdu._getsize(hdu._raw)
# get extname and extver
if hdu.name == '':
hdu.name, hdu._extver = hdu._getname()
elif hdu.name == 'PRIMARY':
hdu._extver = 1
hdu._file = self.__file
hdu._hdrLoc = _hdrLoc # beginning of the header area
hdu._datLoc = self.__file.tell() # beginning of the data area
# data area size, including padding
hdu._datSpan = _size + _padLength(_size)
hdu._new = 0
self.__file.seek(hdu._datSpan, 1)
if self.__file.tell() > self._size:
print 'Warning: File size is smaller than specified data size. File may have been truncated.'
hdu._ffile = self
return hdu
def writeHDU(self, hdu):
"""Write *one* FITS HDU. Must seek to the correct location before
calling this method.
"""
if isinstance(hdu, _ImageBaseHDU):
hdu.update_header()
return (self.writeHDUheader(hdu),) + self.writeHDUdata(hdu)
def writeHDUheader(self, hdu):
"""Write FITS HDU header part."""
blocks = repr(hdu.header.ascard) + _pad('END')
blocks = blocks + _padLength(len(blocks))*' '
if len(blocks)%_blockLen != 0:
raise IOError
self.__file.flush()
loc = self.__file.tell()
self.__file.write(blocks)
# flush, to make sure the content is written
self.__file.flush()
return loc
def writeHDUdata(self, hdu):
"""Write FITS HDU data part."""
self.__file.flush()
loc = self.__file.tell()
_size = 0
if hdu.data is not None:
# if image, need to deal with byte order
if isinstance(hdu, _ImageBaseHDU):
if hdu.data._byteorder != 'big':
output = hdu.data.byteswapped()
else:
output = hdu.data
# Binary table byteswap
elif isinstance(hdu, BinTableHDU):
for i in range(hdu.data._nfields):
coldata = hdu.data.field(i)
coldata2 = hdu.data._parent.field(i)
if not isinstance(coldata, chararray.CharArray):
# only swap unswapped
# deal with var length table
if isinstance(coldata, _VLF):
for i in coldata:
if not isinstance(i, chararray.CharArray):
if i._type.bytes > 1:
if i._byteorder != 'big':
i.byteswap()
i._byteorder = 'big'
else:
if coldata._type.bytes > 1:
if coldata._byteorder != 'big':
coldata.byteswap()
coldata._byteorder = 'big'
if coldata2._type.bytes > 1:
# do the _parent too, otherwise the _parent
# of a scaled column may have wrong byteorder
if coldata2._byteorder != 'big':
coldata2.byteswap()
coldata2._byteorder = 'big'
# In case the FITS_rec was created in a LittleEndian machine
hdu.data._byteorder = 'big'
hdu.data._parent._byteorder = 'big'
output = hdu.data
else:
output = hdu.data
output.tofile(self.__file)
_size = output.nelements() * output._itemsize
# write out the heap of variable length array columns
# this has to be done after the "regular" data is written (above)
_where = self.__file.tell()
if isinstance(hdu, BinTableHDU):
self.__file.write(hdu.data._gap*'\0')
for i in range(hdu.data._nfields):
if isinstance(hdu.data._coldefs._recformats[i], _FormatP):
for j in range(len(hdu.data.field(i))):
coldata = hdu.data.field(i)[j]
if len(coldata) > 0:
coldata.tofile(self.__file)
_shift = self.__file.tell() - _where
hdu.data._heapsize = _shift - hdu.data._gap
_size = _size + _shift
# pad the FITS data block
if _size > 0:
self.__file.write(_padLength(_size)*'\0')
# flush, to make sure the content is written
self.__file.flush()
# return both the location and the size of the data area
return loc, _size+_padLength(_size)
def close(self):
"""Close the 'physical' FITS file."""
self.__file.close()
class HDUList(list, _Verify):
"""HDU list class. This is the top-level FITS object. When a FITS
file is opened, a HDUList object is returned.
"""
def __init__(self, hdus=[], file=None):
"""Construct a HDUList object.
hdus: Input, can be a list of HDU's or a single HDU. Default = None,
i.e. an empty HDUList.
file: The opened physical file associated with the HDUList.
Default = None.
"""
self.__file = file
if hdus is None:
hdus = []
# can take one HDU, as well as a list of HDU's as input
if isinstance(hdus, _ValidHDU):
hdus = [hdus]
elif not isinstance(hdus, (HDUList, list)):
raise "Invalid input for HDUList."
for hdu in hdus:
if not isinstance(hdu, _AllHDU):
raise "Element %d in the HDUList input is not an HDU." % hdus.index(hdu)
list.__init__(self, hdus)
def __iter__(self):
return [self[i] for i in range(len(self))].__iter__()
def __getitem__(self, key):
"""Get an HDU from the HDUList, indexed by number or name."""
key = self.index_of(key)
_item = super(HDUList, self).__getitem__(key)
if isinstance(_item, _TempHDU):
super(HDUList, self).__setitem__(key, _item.setupHDU())
return super(HDUList, self).__getitem__(key)
def __getslice__(self, start, end):
_hdus = super(HDUList, self).__getslice__(start,end)
result = HDUList(_hdus)
return result
def __setitem__(self, key, hdu):
"""Set an HDU to the HDUList, indexed by number or name."""
_key = self.index_of(key)
if isinstance(hdu, (slice, list)):
if isinstance(_key, int):
raise ValueError, "An element in the HDUList must be an HDU."
for item in hdu:
if not isinstance(item, _AllHDU):
raise ValueError, "%s is not an HDU." % item
else:
if not isinstance(hdu, _AllHDU):
raise ValueError, "%s is not an HDU." % hdu
try:
super(HDUList, self).__setitem__(_key, hdu)
except IndexError:
raise IndexError, 'Extension %s is out of bound or not found.' % key
self._resize = 1
def __delitem__(self, key):
"""Delete an HDU from the HDUList, indexed by number or name."""
key = self.index_of(key)
super(HDUList, self).__delitem__(key)
self._resize = 1
def __delslice__(self, i, j):
"""Delete a slice of HDUs from the HDUList, indexed by number only."""
super(HDUList, self).__delslice__(i, j)
self._resize = 1
def _verify (self, option='warn'):
_text = ''
_err = _ErrList([], unit='HDU')
# the first (0th) element must be a primary HDU
if len(self) > 0 and (not isinstance(self[0], PrimaryHDU)):
err_text = "HDUList's 0th element is not a primary HDU."
fix_text = 'Fixed by inserting one as 0th HDU.'
fix = "self.insert(0, PrimaryHDU())"
_text = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)
_err.append(_text)
# each element calls their own verify
for i in range(len(self)):
if i > 0 and (not isinstance(self[i], _ExtensionHDU)):
err_text = "HDUList's element %s is not an extension HDU." % `i`
_text = self.run_option(option, err_text=err_text, fixable=0)
_err.append(_text)
else:
_result = self[i]._verify(option)
if _result:
_err.append(_result)
return _err
def append(self, hdu):
"""Append a new HDU to the HDUList."""
if isinstance(hdu, _AllHDU):
super(HDUList, self).append(hdu)
hdu._new = 1
self._resize = 1
else:
raise "HDUList can only append an HDU"
# make sure the EXTEND keyword is in primary HDU if there is extension
if len(self) > 1:
self.update_extend()
def index_of(self, key):
"""Get the index of an HDU from the HDUList. The key can be an
integer, a string, or a tuple of (string, integer).
"""
if isinstance(key, (int, slice)):
return key
elif isinstance(key, tuple):
_key = key[0]
_ver = key[1]
else:
_key = key
_ver = None
if not isinstance(_key, str):
raise KeyError, key
_key = (_key.strip()).upper()
nfound = 0
for j in range(len(self)):
_name = self[j].name
if isinstance(_name, str):
_name = _name.strip().upper()
if _name == _key:
# if only specify extname, can only have one extension with
# that name
if _ver == None:
found = j
nfound += 1
else:
# if the keyword EXTVER does not exist, default it to 1
_extver = self[j]._extver
if _ver == _extver:
found = j
nfound += 1
if (nfound == 0):
raise KeyError, 'extension %s not found' % `key`
elif (nfound > 1):
raise KeyError, 'there are %d extensions of %s' % (nfound, `key`)
else:
return found
def readall(self):
"""Read data of all HDU's into memory."""
for i in range(len(self)):
if self[i].data is not None:
continue
def update_tbhdu(self):
"""Update all table HDU's for scaled fields."""
for hdu in self:
if 'data' in dir(hdu):
if isinstance(hdu, (GroupsHDU, _TableBaseHDU)) and hdu.data is not None:
hdu.data._scale_back()
if isinstance(hdu, _TableBaseHDU) and hdu.data is not None:
# check TFIELDS and NAXIS2
hdu.header['TFIELDS'] = hdu.data._nfields
hdu.header['NAXIS2'] = hdu.data.shape[0]
# calculate PCOUNT, for variable length tables
_tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2']
_heapstart = hdu.header.get('THEAP', _tbsize)
hdu.data._gap = _heapstart - _tbsize
_pcount = hdu.data._heapsize + hdu.data._gap
if _pcount > 0:
hdu.header['PCOUNT'] = _pcount
# update TFORM for variable length columns
for i in range(hdu.data._nfields):
if isinstance(hdu.data._coldefs.formats[i], _FormatP):
key = hdu.header['TFORM'+`i+1`]
hdu.header['TFORM'+`i+1`] = key[:key.find('(')+1] + `hdu.data.field(i)._max` + ')'
def flush(self, output_verify='exception', verbose=0):
"""Force a write of the HDUList back to the file (for append and
update modes only).
output_verify: output verification option, default = 'exception'.
verbose: print out verbose messages? default = 0.
"""
# Get the name of the current thread and determine if this is a single treaded application
threadName = threading.currentThread()
singleThread = (threading.activeCount() == 1) and (threadName.getName() == 'MainThread')
if singleThread:
# Define new signal interput handler
keyboardInterruptSent = False
def New_SIGINT(*args):
print "KeyboardInterrupt ignored until flush is complete!"
keyboardInterruptSent = True
# Install new handler
signal.signal(signal.SIGINT,New_SIGINT)
if self.__file.mode not in ('append', 'update'):
print "flush for '%s' mode is not supported." % self.__file.mode
return
self.update_tbhdu()
self.verify(option=output_verify)
if self.__file.mode == 'append':
for hdu in self:
if (verbose):
try: _extver = `hdu.header['extver']`
except: _extver = ''
# only append HDU's which are "new"
if hdu._new:
self.__file.writeHDU(hdu)
if (verbose):
print "append HDU", hdu.name, _extver
hdu._new = 0
elif self.__file.mode == 'update':
if not self._resize:
# determine if any of the HDU is resized
for hdu in self:
# Header:
# Add 1 to .ascard to include the END card
_nch80 = reduce(operator.add, map(Card._ncards, hdu.header.ascard))
_bytes = (_nch80+1) * Card.length
_bytes = _bytes + _padLength(_bytes)
if _bytes != (hdu._datLoc-hdu._hdrLoc):
self._resize = 1
if verbose:
print "One or more header is resized."
break
# Data:
if 'data' not in dir(hdu):
continue
if hdu.data is None:
continue
_bytes = hdu.data._itemsize*hdu.data.nelements()
_bytes = _bytes + _padLength(_bytes)
if _bytes != hdu._datSpan:
self._resize = 1
if verbose:
print "One or more data area is resized."
break
# if the HDUList is resized, need to write it to a tmp file,
# delete the original file, and rename the tmp to the original file
if self._resize:
oldName = self.__file.name
oldMemmap = self.__file.memmap
_name = _tmpName(oldName)
_hduList = open(_name, mode="append")
if (verbose): print "open a temp file", _name
for hdu in self:
(hdu._hdrLoc, hdu._datLoc, hdu._datSpan) = _hduList.__file.writeHDU(hdu)
_hduList.__file.close()
self.__file.close()
os.remove(self.__file.name)
if (verbose): print "delete the original file", oldName
# reopen the renamed new file with "update" mode
os.rename(_name, oldName)
ffo = _File(oldName, mode="update", memmap=oldMemmap)
self.__file = ffo
if (verbose): print "reopen the newly renamed file", oldName
# reset the resize attributes after updating
self._resize = 0
for hdu in self:
hdu.header._mod = 0
hdu.header.ascard._mod = 0
hdu._new = 0
hdu._file = ffo.getfile()
# if not resized, update in place
else:
for hdu in self:
if (verbose):
try: _extver = `hdu.header['extver']`
except: _extver = ''
if hdu.header._mod or hdu.header.ascard._mod:
hdu._file.seek(hdu._hdrLoc)
self.__file.writeHDUheader(hdu)
if (verbose):
print "update header in place: Name =", hdu.name, _extver
if 'data' in dir(hdu):
if hdu.data is not None:
hdu._file.seek(hdu._datLoc)
self.__file.writeHDUdata(hdu)
if (verbose):
print "update data in place: Name =", hdu.name, _extver
# reset the modification attributes after updating
for hdu in self:
hdu.header._mod = 0
hdu.header.ascard._mod = 0
if singleThread:
if keyboardInterruptSent:
raise KeyboardInterrupt
signal.signal(signal.SIGINT,signal.getsignal(signal.SIGINT))
def update_extend(self):
"""Make sure if the primary header needs the keyword EXTEND or if
it has the proper value.
"""
hdr = self[0].header
if hdr.has_key('extend'):
if (hdr['extend'] == False):
hdr['extend'] = True
else:
if hdr['naxis'] == 0:
hdr.update('extend', True, after='naxis')
else:
n = hdr['naxis']
hdr.update('extend', True, after='naxis'+`n`)
def writeto(self, name, output_verify='exception', clobber=False):
"""Write the HDUList to a new file.
name: output FITS file name to be written to.
output_verify: output verification option, default = 'exception'.
clobber: Overwrite the output file if exists, default = False.
"""
if (len(self) == 0):
print "There is nothing to write."
return
self.update_tbhdu()
if output_verify == 'warn':
output_verify = 'exception'
self.verify(option=output_verify)
# check if the output file already exists
if os.path.exists(name):
if clobber:
print "Overwrite existing file '%s'." % name
os.remove(name)
else:
raise IOError, "File '%s' already exist." % name
# make sure the EXTEND keyword is there if there is extension
if len(self) > 1:
self.update_extend()
hduList = open(name, mode="append")
for hdu in self:
hduList.__file.writeHDU(hdu)
hduList.close(output_verify=output_verify)
def close(self, output_verify='exception', verbose=0):
"""Close the associated FITS file and memmap object, if any.
output_verify: output verification option, default = 'exception'.
verbose: print out verbose messages? default = 0.
This simply calls the close method of the _File class. It has this
two-tier calls because _File has ts own private attribute __file.
"""
if self.__file != None:
if self.__file.memmap == 1:
self.mmobject = self.__file._mm
if self.__file.mode in ['append', 'update']:
self.flush(output_verify=output_verify, verbose=verbose)
self.__file.close()
# close the memmap object, it is designed to use an independent
# attribute of mmobject so if the HDUList object is created from files
# other than FITS, the close() call can also close the mm object.
try:
self.mmobject.close()
except:
pass
def info(self):
"""Summarize the info of the HDU's in this HDUList."""
if self.__file is None:
_name = '(No file associated with this HDUList)'
else:
_name = self.__file.name
results = "Filename: %s\nNo. Name Type"\
" Cards Dimensions Format\n" % _name
for j in range(len(self)):
results = results + "%-3d %s\n"%(j, self[j]._summary())
results = results[:-1]
print results
def open(name, mode="copyonwrite", memmap=0):
"""Factory function to open a FITS file and return an HDUList object.
name: Name of the FITS file to be opened.
mode: Open mode, 'readonly' (default), 'update', or 'append'.
memmap: Is memmory mapping to be used? default=0.
"""
# instantiate a FITS file object (ffo)
ffo = _File(name, mode=mode, memmap=memmap)
hduList = HDUList(file=ffo)
# read all HDU's
while 1:
try:
hduList.append(ffo._readHDU())
except EOFError:
break
# check in the case there is extra space after the last HDU or corrupted HDU
except ValueError:
print 'Warning: Required keywords missing when trying to read HDU #%d.\n There may be extra bytes after the last HDU or the file is corrupted.' % (len(hduList)+1)
break
# initialize/reset attributes to be used in "update/append" mode
# CardList needs its own _mod attribute since it has methods to change
# the content of header without being able to pass it to the header object
hduList._resize = 0
return hduList
fitsopen = open
# Convenience functions
class _Zero(int):
def __init__(self):
self = 0
def _getext(filename, mode, *ext1, **ext2):
"""Open the input file, return the HDUList and the extension."""
hdulist = open(filename, mode=mode)
n_ext1 = len(ext1)
n_ext2 = len(ext2)
keys = ext2.keys()
# parse the extension spec
if n_ext1 > 2:
raise ValueError, "too many positional arguments"
elif n_ext1 == 1:
if n_ext2 == 0:
ext = ext1[0]
else:
if isinstance(ext1[0], (int, tuple)):
raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2
if isinstance(ext1[0], str):
if n_ext2 == 1 and 'extver' in keys:
ext = ext1[0], ext2['extver']
raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2
elif n_ext1 == 2:
if n_ext2 == 0:
ext = ext1
else:
raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2
elif n_ext1 == 0:
if n_ext2 == 0:
ext = _Zero()
elif 'ext' in keys:
if n_ext2 == 1:
ext = ext2['ext']
elif n_ext2 == 2 and 'extver' in keys:
ext = ext2['ext'], ext2['extver']
else:
raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2
else:
if 'extname' in keys:
if 'extver' in keys:
ext = ext2['extname'], ext2['extver']
else:
ext = ext2['extname']
else:
raise KeyError, 'Insufficient keyword argument: %s' % ext2
return hdulist, ext
def getheader(filename, *ext, **extkeys):
"""Get the header from an extension of a FITS file.
@param filename: input FITS file name
@type: string
@param ext: The rest of the arguments are for extension specification.
See L{getdata} for explanations/examples.
@rtype: L{Header} object
@return: header
"""
hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys)
hdu = hdulist[_ext]
hdr = hdu.header
hdulist.close()
return hdr
def getdata(filename, *ext, **extkeys):
"""Get the data from an extension of a FITS file (and optionally the header).
@type filename: string
@param filename: input FITS file name
@param ext: The rest of the arguments are for extension specification. They are
flexible and are best illustrated by examples:
No extra arguments implies the primary header
>>> getdata('in.fits')
By extension number:
>>> getdata('in.fits', 0) # the primary header
>>> getdata('in.fits', 2) # the second extension
>>> getdata('in.fits', ext=2) # the second extension
By name, i.e., EXTNAME value (if unique):
>>> getdata('in.fits', 'sci')
>>> getdata('in.fits', extname='sci') # equivalent
Note EXTNAMEs are not case sensitive
By combination of EXTNAME and EXTVER, as separate arguments or as a tuple:
>>> getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2
>>> getdata('in.fits', extname='sci', extver=2) # equivalent
>>> getdata('in.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception, e.g.,
>>> getdata('in.fits', ext=('sci',1), extname='err', extver=2)
@return: an array, record array (i.e. table), or groups data object
depending on the type of the extension being referenced
If the optional keyword 'header' is set to True, this function will
return a (data, header) tuple.
"""
if 'header' in extkeys:
_gethdr = extkeys['header']
del extkeys['header']
else:
_gethdr = False
hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys)
hdu = hdulist[_ext]
_data = hdu.data
if _data is None and isinstance(_ext, _Zero):
try:
hdu = hdulist[1]
_data = hdu.data
except IndexError:
raise IndexError, 'No data in this HDU.'
if _data is None:
raise IndexError, 'No data in this HDU.'
if _gethdr:
_hdr = hdu.header
hdulist.close()
if _gethdr:
return _data, _hdr
else:
return _data
def getval(filename, key, *ext, **extkeys):
"""Get a keyword's value from a header in a FITS file.
@type filename: string
@param filename: input FITS file name
@type key: string
@param key: keyword name
@param ext: The rest of the arguments are for extension specification.
See L{getdata} for explanations/examples.
@return: keyword value
@rtype: string, integer, or float
"""
_hdr = getheader(filename, *ext, **extkeys)
return _hdr[key]
def _makehdu(data, header):
if header is None:
if isinstance(data, num.NumArray):
hdu = ImageHDU(data)
elif isinstance(data, FITS_rec):
hdu = BinTableHDU(data)
else:
raise KeyError, 'data must be numarray or table data.'
else:
hdu=header._hdutype(data=data, header=header)
return hdu
def writeto(filename, data, header=None, **keys):
"""Create a new FITS file using the supplied data/header.
@type filename: string
@param filename: name of the new FITS file to write to
@type data: array, record array, or groups data object
@param data: data to write to the new file
@type header: L{Header} object or None
@param header: the header associated with 'data', if None, a
header of the appropriate type is created for the supplied
data. This argument is optional.
@keyword clobber: (optional) if True and if filename already exists, it
will overwrite the file. Default is False.
"""
if header is None:
if 'header' in keys:
header = keys['header']
hdu=_makehdu(data, header)
if not isinstance(hdu, PrimaryHDU):
hdu = PrimaryHDU(data, header=header)
clobber = keys.get('clobber', False)
hdu.writeto(filename, clobber=clobber)
def append(filename, data, header=None):
"""Append the header/data to FITS file if filename exists, create if not.
If only data is supplied, a minimal header is created
@type filename: string
@param filename: name of the file to append to
@type data: array, table, or group data object
@param data: the new data used for appending
@type header: L{Header} object or None
@param header: the header associated with 'data', if None,
an appropriate header will be created for the data object
supplied.
"""
if not os.path.exists(filename):
writeto(filename, data, header)
else:
hdu=_makehdu(data, header)
if isinstance(hdu, PrimaryHDU):
hdu = ImageHDU(data, header)
f = open(filename, mode='update')
f.append(hdu)
f.close()
def update(filename, data, *ext, **extkeys):
"""Update the specified extension with the input data/header.
@type filename: string
@param filename: name of the file to be updated
data: the new data used for updating
The rest of the arguments are flexible:
the 3rd argument can be the header associated with the data.
If the 3rd argument is not a header, it (and other positional
arguments) are assumed to be the extension specification(s).
Header and extension specs can also be keyword arguments.
For example:
>>> update(file, dat, hdr, 'sci') # update the 'sci' extension
>>> update(file, dat, 3) # update the 3rd extension
>>> update(file, dat, hdr, 3) # update the 3rd extension
>>> update(file, dat, 'sci', 2) # update the 2nd SCI extension
>>> update(file, dat, 3, header=hdr) # update the 3rd extension
>>> update(file, dat, header=hdr, ext=5) # update the 5th extension
"""
# parse the arguments
header = None
if len(ext) > 0:
if isinstance(ext[0], Header):
header = ext[0]
ext = ext[1:]
elif not isinstance(ext[0], (int, long, str, tuple)):
raise KeyError, 'Input argument has wrong data type.'
if 'header' in extkeys:
header = extkeys['header']
del extkeys['header']
new_hdu=_makehdu(data, header)
hdulist, _ext = _getext(filename, 'update', *ext, **extkeys)
hdulist[_ext] = new_hdu
hdulist.close()
def info(filename):
"""Print the summary information on a FITS file.
This includes the name, type, length of header, data shape and type
for each extension.
@type filename: string
@param filename: input FITS file name
"""
f = open(filename)
f.info()
f.close()
UNDEFINED = Undefined()
__credits__="""
Copyright (C) 2004 Association of Universities for Research in Astronomy (AURA)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. The name of AURA and its representatives may not be used to
endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
| 1.984375 | 2 |
tools/real_world_impact/nsfw_urls.py | zealoussnow/chromium | 14,668 | 12790301 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""NSFW urls in the Alexa top 2000 sites."""
nsfw_urls = set([
"http://xhamster.com/",
"http://xvideos.com/",
"http://livejasmin.com/",
"http://pornhub.com/",
"http://redtube.com/",
"http://youporn.com/",
"http://xnxx.com/",
"http://tube8.com/",
"http://youjizz.com/",
"http://adultfriendfinder.com/",
"http://hardsextube.com/",
"http://yourlust.com/",
"http://drtuber.com/",
"http://beeg.com/",
"http://largeporntube.com/",
"http://nuvid.com/",
"http://bravotube.net/",
"http://spankwire.com/",
"http://discreethearts.com/",
"http://keezmovies.com/",
"http://xtube.com/",
"http://alphaporno.com/",
"http://4tube.com/",
"http://nudevista.com/",
"http://porntube.com/",
"http://xhamstercams.com/",
"http://porn.com/",
"http://video-one.com/",
"http://perfectgirls.net/",
"http://slutload.com/",
"http://sunporno.com/",
"http://tnaflix.com/",
"http://pornerbros.com/",
"http://h2porn.com/",
"http://adult-empire.com/",
"http://pornhublive.com/",
"http://sexitnow.com/",
"http://pornsharia.com/",
"http://freeones.com/",
"http://tubegalore.com/",
"http://xvideos.jp/",
"http://brazzers.com/",
"http://fapdu.com/",
"http://pornoxo.com/",
"http://extremetube.com/",
"http://hot-sex-tube.com/",
"http://xhamsterhq.com/",
"http://18andabused.com/",
"http://tubepleasure.com/",
"http://18schoolgirlz.com/",
"http://chaturbate.com/",
"http://motherless.com/",
"http://yobt.com/",
"http://empflix.com/",
"http://hellporno.com/",
"http://ashemaletube.com/",
"http://watchmygf.com/",
"http://redtubelive.com/",
"http://met-art.com/",
"http://gonzoxxxmovies.com/",
"http://shufuni.com/",
"http://vid2c.com/",
"http://dojki.com/",
"http://cerdas.com/",
"http://overthumbs.com/",
"http://xvideoslive.com/",
"http://playboy.com/",
"http://caribbeancom.com/",
"http://tubewolf.com/",
"http://xmatch.com/",
"http://ixxx.com/",
"http://nymphdate.com/",
]) | 1.148438 | 1 |
setup.py | EhwaZoom/bpgen | 0 | 12790302 | <reponame>EhwaZoom/bpgen
import setuptools
setuptools.setup(
name = 'bpgen',
version = '0.1.0',
description = 'Boilerplate generator.',
url = 'https://github.com/EhwaZoom/bpgen',
author = 'EhwaZoom',
author_email = '<EMAIL>',
maintainer = 'EhwaZoom',
maintainer_email = '<EMAIL>',
packages = setuptools.find_packages(),
entry_points = {
'console_scripts': ['bpgen=bpgen.main:main']
}
) | 1.132813 | 1 |
HW3/HW3_1.py | kolyasalubov/Lv-677.PythonCore | 0 | 12790303 | zen_of_P = """Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the
temptation to guess.
There should be one-- and preferably
only one --obvious way to do it.
Although that way may not be obvious
at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!"""
better_number = (zen_of_P.count("better"))
is_number = (zen_of_P.count("is"))
never_number = (zen_of_P.count("never"))
print ("Word BETTER used", better_number, "times")
print ("Word IS used", is_number, "times")
print ("Word NEVER used", never_number, "times")
upper_case = (zen_of_P.upper())
print(upper_case)
replacing_i = (zen_of_P.replace('i','&'))
print(replacing_i)
| 3.59375 | 4 |
webdriver_recorder/plugin.py | UWIT-IAM/webdriver-recorder | 6 | 12790304 | import logging
import os
import sys
import tempfile
from contextlib import contextmanager
from typing import Callable, Dict, List, Optional, Type, Union
import pytest
from pydantic import BaseSettings, validator
from selenium import webdriver
from .browser import BrowserError, BrowserRecorder, Chrome, Remote
from .models import Outcome, Report, ReportResult, TestResult, Timed
from .report_exporter import ReportExporter
_here = os.path.abspath(os.path.dirname(__file__))
logger = logging.getLogger(__name__)
class EnvSettings(BaseSettings):
"""
Automatically derives from environment variables and
translates truthy/falsey strings into bools. Only required
for code that must be conditionally loaded; all others
should be part of 'pytest_addoption()'
"""
# If set to True, will generate a new browser instance within every request
# for a given scope, instead of only creating a single instance and generating
# contexts for each test.
# This has a significant performance impact,
# but sometimes cannot be avoided.
disable_session_browser: Optional[bool] = False
@validator("*", pre=True, always=True)
def handle_empty_string(cls, v):
if not v:
return None
return v
_SETTINGS = EnvSettings()
def pytest_addoption(parser):
group = parser.getgroup("webdriver_recorder")
group.addoption(
"--selenium-server",
action="store",
dest="selenium_server",
default=os.environ.get("REMOTE_SELENIUM"),
help="Remote selenium webdriver to connect to (eg localhost:4444)",
)
group.addoption(
"--report-dir",
action="store",
dest="report_dir",
default=os.environ.get("REPORT_DIR", os.path.join(os.getcwd(), "webdriver-report")),
help="The path to the directory where artifacts should be stored.",
)
group.addoption(
"--jinja-template",
action="store",
dest="report_template",
default=os.path.join(_here, "report.template.html"),
)
group.addoption(
"--report-title",
action="store",
dest="report_title",
default="Webdriver Recorder Summary",
help="An optional title for your report; if not provided, a default will be used. "
"You may also provide a constant default by overriding the report_title fixture.",
)
@pytest.fixture(scope="session", autouse=True)
def clean_screenshots(report_dir):
screenshots_dir = os.path.join(report_dir, "screenshots")
if os.path.exists(screenshots_dir):
old_screenshots = os.listdir(screenshots_dir)
for png in old_screenshots:
os.remove(os.path.join(screenshots_dir, png))
@pytest.fixture(scope="session", autouse=True)
def test_report(report_title) -> Report:
args = []
if len(sys.argv) > 1:
args.extend(sys.argv[1:])
return Report(
arguments=" ".join(args),
outcome=Outcome.never_started,
title=report_title,
)
@pytest.fixture(scope="session")
def selenium_server(request) -> Optional[str]:
"""Returns a non-empty string or None"""
value = request.config.getoption("selenium_server")
if value:
return value.strip()
return None
@pytest.fixture(scope="session")
def chrome_options() -> webdriver.ChromeOptions:
"""
An extensible instance of ChromeOptions with default
options configured for a balance between performance
and test isolation.
You can extend this:
@pytest.fixture(scope='session')
def chrome_options(chrome_options) -> ChromeOptions:
chrome_options.add_argument("--option-name")
return chrome_options
or override it entirely:
@pytest.fixture(scope='session')
def chrome_options() -> ChromeOptions:
return ChromeOptions()
"""
options = webdriver.ChromeOptions()
# Our default options promote a balance between
# performance and test isolation.
options.add_argument("--headless")
options.add_argument("--incognito")
options.add_argument("--disable-application-cache")
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
return options
@pytest.fixture(scope="session")
def browser_args(selenium_server, chrome_options) -> Dict[str, Optional[Union[webdriver.ChromeOptions, str]]]:
args = {"options": chrome_options}
if selenium_server:
args["command_executor"] = f"http://{selenium_server}/wd/hub"
return args
@pytest.fixture(scope="session")
def browser_class(browser_args) -> Type[BrowserRecorder]:
if browser_args.get("command_executor"):
return Remote
return Chrome
@pytest.fixture(scope="session")
def build_browser(browser_args, browser_class) -> Callable[..., BrowserRecorder]:
logger.info(
"Browser generator will build instances using the following settings:\n"
f" Browser class: {browser_class.__name__}\n"
f" Browser args: {dict(browser_args)}"
)
def inner() -> BrowserRecorder:
return browser_class(**browser_args)
return inner
@pytest.fixture(scope="session")
def session_browser(build_browser) -> BrowserRecorder:
"""
A browser instance that is kept open for the entire test run.
Only instantiated if it is used, but by default will be used in both the
'browser' and 'class_browser' fixtures, unless "disable_session_browser=1"
is set in the environment.
"""
browser = build_browser()
try:
yield browser
finally:
browser.quit()
@pytest.fixture(scope="session")
def browser_context() -> Callable[..., Chrome]:
"""
This fixture allows you to create a fresh context for a given
browser instance.
The default behavior of the `browser` fixture is to always run in a context of the session scope, so
you only need to use this if you are not using (or are overriding) the `browser` fixture.
The fixture itself simply passes the context manager, so you can use it like so:
def test_something(browser_context):
with browser_context() as browser:
browser.get('https://www.uw.edu')
You may also provide a list of urls to visit to clear cookies at the end of your session,
if the default 'delete_all_cookies' behavior is not enough to cover your use case.
"""
@contextmanager
def inner(browser: BrowserRecorder, cookie_urls: Optional[List[str]] = None) -> BrowserRecorder:
browser.open_tab()
cookie_urls = cookie_urls or []
try:
yield browser
finally:
browser.delete_all_cookies()
for url in cookie_urls:
browser.get(url)
browser.delete_all_cookies()
browser.close_tab()
return inner
if _SETTINGS.disable_session_browser:
logger.warning("Disabling auto-use of 'session_browser', this may significantly decrease test performance.")
@pytest.fixture(scope="session")
def session_browser_disabled() -> bool:
return True
@pytest.fixture
def browser(build_browser) -> BrowserRecorder:
"""Creates a fresh instance of the browser using the configured chrome_options fixture."""
browser = build_browser()
try:
yield browser
finally:
browser.quit()
@pytest.fixture(scope="class")
def class_browser(build_browser, request) -> BrowserRecorder:
"""
Creates a fresh instance of the browser for use in the requesting class, using the configure
chrome_options fixture.
"""
browser = build_browser()
request.cls.browser = browser
try:
yield browser
finally:
browser.quit()
else:
logger.info(
"Enabling auto-use of 'session_browser'; if your tests appear stuck, try disabling "
"by setting 'disable_session_browser=1' in your environment."
)
@pytest.fixture
def browser(session_browser, browser_context) -> BrowserRecorder:
"""
Creates a function-scoped tab context for the session_browser which cleans
up after itself (to the best of its ability). If you need a fresh instance
each test, you can set `disable_session_browser=1` in your environment.
"""
with browser_context(session_browser) as browser:
yield browser
@pytest.fixture(scope="class")
def class_browser(request, session_browser, browser_context) -> BrowserRecorder:
"""
Creates a class-scoped tab context and binds it to the requesting class
as 'self.browser'; this tab will close once all tests in the class have run,
and will clean up after itself (to the best of its ability). If you need
a fresh browser instance for each class, you can set `disable_session_browser=1` in your
environment.
"""
with browser_context(session_browser) as browser:
request.cls.browser = browser
yield browser
@pytest.fixture(scope="session")
def session_browser_disabled() -> bool:
return False
@pytest.fixture(scope="session")
def report_dir(request):
dir_ = request.config.getoption("report_dir")
os.makedirs(dir_, exist_ok=True)
return dir_
@pytest.fixture(scope="session", autouse=True)
def report_generator(report_dir, test_report):
with tempfile.NamedTemporaryFile(prefix="worker.", dir=report_dir) as worker_file:
suffix = ".".join(worker_file.name.split(".")[1:])
yield
test_report.stop_timer()
exporter = ReportExporter()
workers = list(f for f in os.listdir(report_dir) if f.startswith("worker."))
worker_results = list(f for f in os.listdir(report_dir) if f.endswith(".result.json"))
if not workers:
test_report.outcome = Outcome.success
# Aggregate worker reports into this "root" report.
for result_file in [os.path.join(report_dir, f) for f in worker_results]:
worker_report = Report.parse_file(result_file)
test_report.results.extend(worker_report.results)
os.remove(result_file)
exporter.export_all(test_report, report_dir)
else:
# If there are other workers, only export the report json of the
# current worker. The last worker running will be responsible for aggregating and reporting results.
exporter.export_json(test_report, report_dir, dest_filename=f"{suffix}.result.json")
@pytest.fixture(autouse=True)
def report_test(report_generator, request, test_report):
"""
Print the results to report_file after a test run. Without this, the results of the test will not be saved.
"""
tb = None
console_logs = []
timer: Timed
with Timed() as timer:
yield
call_summary = getattr(request.node, "report_result", None)
if call_summary:
doc = call_summary.doc
test_name = call_summary.report.nodeid
outcome = Outcome.failure if call_summary.report.failed else Outcome.success
if call_summary and call_summary.excinfo and not tb:
outcome = Outcome.failure
exception: BaseException = call_summary.excinfo.value
exception_msg = f"{exception.__class__.__name__}: {str(exception)}"
if isinstance(exception, BrowserError):
if exception.orig:
tb = f"{exception_msg}\n{exception.orig=}"
console_logs = [log.get("message", "") for log in exception.logs]
if not tb:
tb = f"{exception_msg}\n(No traceback is available)"
else:
logging.error(
f"Test {request.node} reported no outcomes; "
f"this usually indicates a fixture caused an error when setting up the test."
)
doc = None
test_name = f"{request.node.name}"
outcome = Outcome.never_started
# TODO: Figure out a way to include class docs if they exist
# class TestFooBar:
# """
# When Foo is bar
# """
# def test_a_baz(self):
# """and baz is bop"""
# do_work('bop')
# The report output should then read "When foo is bar and baz is bop"
result = TestResult(
pngs=BrowserRecorder.pngs,
test_name=test_name,
test_description=doc,
outcome=outcome,
start_time=timer.start_time,
end_time=timer.end_time,
traceback=tb,
console_errors=console_logs,
)
BrowserRecorder.pngs = []
test_report.results.append(result)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""
This gives us hooks from which to report status post test-run.
"""
outcome = yield
report = outcome.get_result()
if report.when == "call":
doc = getattr(getattr(item, "function", None), "__doc__", None)
item.report_result = ReportResult(report=report, excinfo=call.excinfo, doc=doc)
@pytest.fixture(scope="session")
def report_title(request) -> str:
return request.config.getoption("report_title")
| 2.21875 | 2 |
___Language___/Python/Iter/Zip and Unzip & range.py | JUD210/Study-Note | 0 | 12790305 | <gh_stars>0
a = [1, 2, 3]
b = [4, 5]
r = ((x, y) for x in a for y in b)
print(r)
# <generator object <genexpr> at 0x00000182580A0B88>
print(*r)
# (1, 4) (1, 5) (2, 4) (2, 5) (3, 4) (3, 5)
r = [(x, y) for x in a for y in b]
print(r)
# [(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)]
print(*r)
# (1, 4) (1, 5) (2, 4) (2, 5) (3, 4) (3, 5)
print(*r)
# Prints Nothing
l = []
l.append([1, 2, 3])
l.append([4, 5])
print(*zip(*l))
# (1, 4) (2, 5)
print(*zip((1, 2, 3), (10, 20, 30), (100, 200, 300)), sep="\n")
# (1, 10, 100)
# (2, 20, 200)
# (3, 30, 300)
print(*zip((1,), (10, 20), (100, 200, 300)), sep="\n")
# (1, 10, 100)
print(*[(1, 2, 3), (10, 20, 30), (100, 200, 300)], sep="\n")
# (1, 2, 3)
# (10, 20, 30)
# (100, 200, 300)
print([*range(1, 4)], [*range(10, 40, 10)], *range(100, 400, 100), sep="\n")
# [1, 2, 3]
# [10, 20, 30]
# 100
# 200
# 300
| 2.796875 | 3 |
saintBioutils/utilities/file_io/get_paths.py | HobnobMancer/saintBioutils | 1 | 12790306 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) University of St Andrews 2020-2021
# (c) University of Strathclyde 2020-2021
# (c) James Hutton Institute 2020-2021
#
# Author:
# <NAME>
#
# Contact
# <EMAIL>
#
# <NAME>,
# Biomolecular Sciences Building,
# University of St Andrews,
# <NAME>,
# St Andrews,
# KY16 9ST
# Scotland,
# UK
#
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Script containing functions to retrieve paths to files and directories"""
from pathlib import Path
def get_file_paths(directory, prefixes=None, suffixes=None):
"""Retrieve paths to all files in input dir.
:param directory: Path, path to directory from which files are to be retrieved
:param prefixes: List of Str, prefixes of the file names to be retrieved
:param suffixes: List of Str, suffixes of the file names to be retrieved
Returns list of paths to fasta files.
"""
# create empty list to store the file entries, to allow checking if no files returned
file_paths = []
# retrieve all files from input directory
files_in_entries = (entry for entry in Path(directory).iterdir() if entry.is_file())
if prefixes is None and suffixes is None:
for item in files_in_entries:
file_paths.append(item)
elif prefixes is not None and suffixes is None:
for item in files_in_entries:
for prefix in prefixes:
if item.name.startswith(prefix):
file_paths.append(item)
elif prefixes is None and suffixes is not None:
for item in files_in_entries:
for suffix in suffixes:
if item.name.endswith(suffix):
file_paths.append(item)
else:
for item in files_in_entries:
for suffix in suffixes:
for prefix in prefixes:
if item.name.startswith(prefix) and item.name.endswith(suffix):
file_paths.append(item)
return file_paths
def get_dir_paths(directory, prefixes=None, suffixes=None):
"""Retrieve paths to all directories in input dir.
:param directory: Path, path to directory from which files are to be retrieved
:param prefixes: List of Str, prefixes of the file names to be retrieved
:param suffixes: List of Str, suffixes of the file names to be retrieved
Returns list of paths to fasta files.
"""
# create empty list to store the file entries, to allow checking if no files returned
dir_paths = []
# retrieve all files from input directory
files_in_entries = (entry for entry in Path(directory).iterdir() if entry.is_dir())
if prefixes is None and suffixes is None:
for item in files_in_entries:
dir_paths.append(item)
elif prefixes is not None and suffixes is None:
for item in files_in_entries:
for prefix in prefixes:
if item.name.startswith(prefix):
dir_paths.append(item)
elif prefixes is None and suffixes is not None:
for item in files_in_entries:
for suffix in suffixes:
if item.name.endswith(suffix):
dir_paths.append(item)
else:
for item in files_in_entries:
for suffix in suffixes:
for prefix in prefixes:
if item.name.startswith(prefix) and item.name.endswith(suffix):
dir_paths.append(item)
return dir_paths
| 1.601563 | 2 |
training/Metrics.py | sdwalker62/Log-Diagnostics-Archive | 3 | 12790307 | import tensorflow as tf
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True)
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_sum(loss_)/tf.reduce_sum(mask)
def accuracy_function(real, pred):
accuracies = tf.equal(real, tf.argmax(pred, axis=1))
mask = tf.math.logical_not(tf.math.equal(real, 0))
accuracies = tf.math.logical_and(mask, accuracies)
accuracies = tf.cast(accuracies, dtype=tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
return tf.reduce_sum(accuracies) / tf.reduce_sum(mask)
| 2.53125 | 3 |
pynq_chainer/overlays/__init__.py | tkat0/pynq-chainer | 6 | 12790308 | <filename>pynq_chainer/overlays/__init__.py<gh_stars>1-10
from .mmult import Mmult
from .bin_mmult import BinMmult
| 1.179688 | 1 |
host/models.py | Sindhuja-SRL/back-end | 0 | 12790309 | <reponame>Sindhuja-SRL/back-end
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Event(models.Model):
host = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
name = models.CharField(max_length=200)
status = models.CharField(max_length=50) # Choices: Created, Running, Ended, Archived
# Event settings parameters
x_label_min = models.CharField(max_length=200, default="", null=True)
x_label_max = models.CharField(max_length=200, default="", null=True)
class Question(models.Model):
event = models.ForeignKey(Event, on_delete=models.CASCADE)
description = models.CharField(max_length=200)
class AnswerChoice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
description = models.CharField(max_length=200)
value = models.IntegerField()
| 2.46875 | 2 |
GPU/python/CUDA_numba.py | maxtcurie/Parallel_programming | 0 | 12790310 | import numpy as np
from numba import cuda, float32
import time
@cuda.jit
def matmul(A, B, C):
"""Perform square matrix multiplication of C = A * B
"""
i, j = cuda.grid(2)
if i < C.shape[0] and j < C.shape[1]:
tmp = 0.
for k in range(A.shape[1]):
tmp += A[i, k] * B[k, j]
C[i, j] = tmp
import time
start=time.time()
A, B, C
np
matmul(A, B, C)
end=time.time()
print(f"Runtime of the program is {end - start} s") | 3.484375 | 3 |
pleiades_script.py | jagfu/Qanalysis | 0 | 12790311 | <reponame>jagfu/Qanalysis
#!/usr/bin/python3
import requests
import random
import time
import calendar
import os
import argparse
import datetime
# Console arguments
parser = argparse.ArgumentParser(
description="Estimate number of cars at given rectangles (latitude-longitude) on given timeframes"
)
parser.add_argument("-p", "--project_id", help="Project ID from UP42 Console", type=str, required=True)
parser.add_argument("-k", "--api_key", help="API Key from UP42 Console", type=str, required=True)
parser.add_argument("-c", "--coordinates", nargs="+",
help="List of latitude-longitude pairs, each representing 2 corners of a square. Sample: " +
"37.327035,-121.941054:37.323451,-121.940485", required=True)
parser.add_argument("-t", "--timeframes", nargs="+",
help="List of latitude-longitude pairs, each representing 2 corners of a square. Sample: " +
"2019-12-01:2020-02-28", required=True)
parser.add_argument("-v", "--verbose", help="Output more debug information", action='store_true')
parser.add_argument("--no_store", help="Disables saving of raw archives from UP42", action='store_false')
parser.add_argument("--workflow_name_prefix",
help="Workflow name prefix to be passed to UP42 console. Default: covid19_car_estimate_",
default="covid19_car_estimate_")
parser.add_argument("--no_cleanup", help="Keep workflow in UP42 project after script is done", action='store_false')
args = parser.parse_args()
DEBUG_LOGGING = args.verbose
PROJECT_ID = args.project_id
API_KEY = args.api_key
SAVE_ALL_JOB_RESULTS = args.no_store
BASE_URL = "https://api.up42.com"
BASE_WORKFLOW_NAME = args.workflow_name_prefix
CLEANUP_WORKFLOW = args.no_cleanup
# Process input polygon - validate and convert into UP42 input format.
POLYGONS = []
for coordinate_pair in args.coordinates:
poly = coordinate_pair.split(":")
if len(poly) != 2:
print("Bad coordinate pair: "+coordinate_pair)
exit()
converted_poly = []
for point in poly:
pt = point.split(",")
if len(pt) != 2:
print("Bad coordinate pair: " + coordinate_pair)
exit()
pt[0] = float(pt[0])
pt[1] = float(pt[1])
converted_poly.append(pt)
prepared_poly = [
[converted_poly[0][1], converted_poly[0][0]],
[converted_poly[1][1], converted_poly[0][0]],
[converted_poly[1][1], converted_poly[1][0]],
[converted_poly[0][1], converted_poly[1][0]],
[converted_poly[0][1], converted_poly[0][0]]
]
POLYGONS.append(prepared_poly)
# Date validation helper.
def _validate_date(date_text):
try:
datetime.datetime.strptime(date_text, '%Y-%m-%d')
return True
except ValueError:
return False
# Process input timeframes - validate and convert into UP42 input format.
TIME_LIMITS = []
for timeframe in args.timeframes:
tf = timeframe.split(":")
if len(tf) != 2:
print("Bad timeframe: "+timeframe)
exit()
if not _validate_date(tf[0]):
print("Bad date: "+tf[0])
exit()
if not _validate_date(tf[1]):
print("Bad date: "+tf[1])
exit()
TIME_LIMITS.append(
tf[0]+"T00:00:00+00:00/"+tf[1]+"T23:59:59+00:00"
)
# Random name for workflow to help determine which one is it in UI later.
randomized_name = BASE_WORKFLOW_NAME + (''.join(random.choice("0123456789abcdef") for i in range(16)))
# Storage folder name for all tarballs.
current_timestamp = calendar.timegm(time.gmtime())
folder = "raw_job_" + str(current_timestamp)
# Constant block names for automatic search.
data_block_name = 'oneatlas-pleiades-aoiclipped'
processing_block_name = 'sm_veh-detection'
# API INTEGRATION #
# Simple bearer auth implementation to reduce amount of external dependencies.
class BearerAuth(requests.auth.AuthBase):
def __init__(self, client_id, client_secret, timeout=60):
self.ts = time.time() - timeout * 2
self.timeout = timeout
self.client_id = client_id
self.client_secret = client_secret
def _get_token(self):
return requests.post(BASE_URL + "/oauth/token",
auth=(self.client_id, self.client_secret),
headers={'Content-Type': 'application/x-www-form-urlencoded'},
data={'grant_type': 'client_credentials'}
).json()['access_token']
def __call__(self, r):
if time.time() - self.ts > self.timeout:
self.token = self._get_token()
self.ts = time.time()
r.headers["authorization"] = "Bearer " + self.token
return r
# API Client abstraction
class ApiClient(object):
def __init__(self, base_url, project_id, api_key):
self.base_url = base_url
self.project_id = project_id
self.api_key = api_key
self.bearer_auth = BearerAuth(project_id, api_key)
def _get_query(self, url):
return requests.get(self.base_url + url, auth=self.bearer_auth)
def _post_query(self, url, data):
return requests.post(self.base_url + url, auth=self.bearer_auth, json=data)
def _put_query(self, url, data):
return requests.put(self.base_url + url, auth=self.bearer_auth, json=data)
def _delete_query(self, url):
return requests.delete(self.base_url + url, auth=self.bearer_auth)
def get_blocks(self):
return self._get_query("/blocks").json()
def create_workflow(self, name, description):
return self._post_query("/projects/" + self.project_id + "/workflows", data={
'name': name,
'description': description
}).json()
# This will fill a chain of blocks, one by one, and create it.
def set_workflow_tasks(self, workflow_id, task_list):
tasks = []
previous_name = None
for task in task_list:
tasks.append({
"name": task["name"],
"parentName": previous_name,
"blockId": task["id"]
})
previous_name = task["name"]
return self._post_query("/projects/" + self.project_id + "/workflows/" + workflow_id + "/tasks",
data=tasks).json()
def delete_workflow(self, workflow_id):
return self._delete_query("/projects/" + self.project_id + "/workflows/" + workflow_id)
def get_job(self, job_id):
return self._get_query("/projects/" + self.project_id + "/jobs/" + job_id).json()
def get_job_output(self, job_id):
return self._get_query("/projects/" + self.project_id + "/jobs/" + job_id + "/outputs/data-json").json()
def run_job(self, workflow_id, name, is_dry, job_parameters):
data = job_parameters.copy()
if is_dry:
data['config'] = {"mode": "DRY_RUN"}
return self._post_query("/projects/" + self.project_id + "/workflows/" + workflow_id + "/jobs?name=" + name,
data).json()
def get_job_tasks(self, job_id):
return self._get_query("/projects/" + self.project_id + "/jobs/" + job_id + "/tasks").json()
def get_task_signed_url(self, job_id, task_id):
return self._get_query(
"/projects/" + self.project_id + "/jobs/" + job_id + "/tasks/" + task_id + "/downloads/results").json()
def dump_task_url(self, url, target_folder, target_name):
try:
os.mkdir(target_folder)
except:
# We already have a preexisting directory with this name.
pass
output_location = os.path.join(target_folder, target_name)
content = requests.get(url).content
with open(output_location, "wb") as f:
f.write(content)
api_client = ApiClient(BASE_URL, PROJECT_ID, API_KEY)
# Specific method which will extract target blocks that will be used in workflow.
# data_block_name will be used as source satellite data
# processing_block_name will be used for vehicle detection
def extract_target_blocks():
block_listing = api_client.get_blocks()["data"]
ret = []
for block in block_listing:
if block["name"] == data_block_name:
ret.append(block)
break
for block in block_listing:
if block["name"] == processing_block_name:
ret.append(block)
break
return ret
# Create workflow and initialize the tasks based on target parameters.
def initialize_workflow():
targets = extract_target_blocks()
workflow = api_client.create_workflow(randomized_name, 'Temp workflow for covid19 script')
api_client.set_workflow_tasks(workflow['data']['id'], targets)
return workflow['data']['id']
# Run a job with randomized name and templated parameters
def run_job(workflow_id, polygon, time_period, is_dry):
job_params = {
processing_block_name: {},
data_block_name: {
"ids": None,
"time": time_period,
"limit": 1,
"intersects": {
"type": "Polygon",
"coordinates": [
polygon
]
},
"zoom_level": 18,
"time_series": None,
"max_cloud_cover": 100,
"panchromatic_band": False
}
}
job_name = randomized_name + "_job_" + (''.join(random.choice("0123456789abcdef") for i in range(16)))
return api_client.run_job(workflow_id, job_name, is_dry, job_params)
# await job completion for up to ~tries * 5 seconds. Defaults to ~25 minutes.
def await_job_completion(job, tries=300):
try_counter = 0
if DEBUG_LOGGING:
print("[+] Awaiting job completion")
while try_counter < tries:
job_status = api_client.get_job(job['data']['id'])
extracted_status = job_status['data']['status']
if extracted_status == 'FAILED':
return False
if extracted_status == 'SUCCEEDED':
return True
try_counter += 1
time.sleep(5)
return False
# Process 1 polygon/time period pair
def get_one_polygon(polygon_num, time_num, workflow_id, polygon, time_period):
if DEBUG_LOGGING:
print("[+] Running test query first")
job = run_job(workflow_id, polygon, time_period, True)
is_success = await_job_completion(job)
# We can't get this time period. Output a -
if not is_success:
if DEBUG_LOGGING:
print("[-] Job failed")
return "-"
if DEBUG_LOGGING:
test_job_output = api_client.get_job_output(job['data']['id'])
print("[+] Acquisition date: " + test_job_output["features"][0]["properties"]["acquisitionDate"])
print("[+] Estimated credits: " + str(test_job_output["features"][0]["estimatedCredits"]))
print("[+] Now running actual job")
job = run_job(workflow_id, polygon, time_period, False)
is_success = await_job_completion(job)
if not is_success:
if DEBUG_LOGGING:
print("[-] Job failed")
return "-"
actual_output = api_client.get_job_output(job['data']['id'])
if SAVE_ALL_JOB_RESULTS:
if DEBUG_LOGGING:
print("[+] Storing job results")
tasks = api_client.get_job_tasks(job['data']['id'])['data']
task_num = 0
for task in tasks:
task_num += 1
url = api_client.get_task_signed_url(job['data']['id'], task['id'])['data']['url']
api_client.dump_task_url(url, folder,
"polygon_" + str(polygon_num) + "_timestamp_" + str(time_num) + "_task_" + str(
task_num) + ".tar.gz")
return str(len(actual_output["features"][0]["properties"]["det_details"]))
if __name__ == '__main__':
if DEBUG_LOGGING:
print("[+] Creating workflow...")
workflow_id = initialize_workflow()
if DEBUG_LOGGING:
print("[+] Created workflow: " + workflow_id)
polygon_num = 0
for polygon in POLYGONS:
polygon_num += 1
time_limit_num = 0
for time_limit in TIME_LIMITS:
time_limit_num += 1
print(
"Polygon " + str(polygon_num) + " interval " + str(time_limit_num) + ": " + get_one_polygon(polygon_num,
time_limit_num,
workflow_id,
polygon,
time_limit))
# This may be useful if the user wants to manually download or view detection data later in UI or API.
if CLEANUP_WORKFLOW:
if DEBUG_LOGGING:
print("[+] Cleaning workflow up")
api_client.delete_workflow(workflow_id)
| 2.90625 | 3 |
send_eadmin.py | huxiba/nagios-plugins | 0 | 12790312 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import urllib.request
import urllib.parse
import sys
from datetime import datetime
url = 'http://zzzzzz/api/upload.php'
def sendmessage(message):
print(message)
params = urllib.parse.urlencode(message)
params = params.encode("ascii")
req = urllib.request.Request(url, data=params, headers={'content-type': 'application/x-www-form-urlencoded'})
with urllib.request.urlopen(req) as response:
#print(response.read().decode("unicode_escape"))
#print(response.getcode())
pass
args = sys.argv
msg = {"act": "serverwarning",
"time": datetime.now().strftime('%Y-%m-%d %H:%M:%S'), }
for line in sys.stdin:
if line is not None and line.strip() != "":
k, v = line.split(":")
msg[k] = v.strip()
sendmessage(msg) | 3 | 3 |
python/강길웅1.py | gilwoong-kang/education.cloudsecurity | 0 | 12790313 | <reponame>gilwoong-kang/education.cloudsecurity<gh_stars>0
import numpy as np
import re
result = []
month = [0,0,0,0,0,0,0,0,0,0,0,0]
with open('./output.txt','w') as output:
with open('./input.txt','r') as file:
data = file.readline()
for data in file:
output.write('{}'.format(data.strip()))
if data.startswith('#'):
output.write('\n')
continue
data = data.split('\t')
for d in range(1,len(data)):
split = re.findall('[^,]',data[d])
word = ''
for j in split:
word += j
value = int(word.strip())
data[d] = value
month[d-1] += value
list = np.array(data[1:])
avg = np.mean(list)
sum = np.sum(list)
output.write('\t{}\t{}\n'.format(sum,round(avg,2)))
result.append([avg,sum])
# 월별 총합값
month.append(np.sum(month))
month.append(round(month[12]/12,2))
output.write('sum\t\t ')
for i in range(len(month)):
output.write('{}\t'.format(month[i]))
output.write('\navg\t ')
for i in range(len(month)-1):
output.write('{}\t'.format(round(month[i]/11,2))) | 2.921875 | 3 |
recodoc2/apps/codeutil/parser.py | bartdag/recodoc2 | 9 | 12790314 | <filename>recodoc2/apps/codeutil/parser.py
from __future__ import unicode_literals
def create_match(parent, children=None):
if children is None:
children = tuple()
return (parent, tuple(children))
def is_valid_match(match, matches, filtered):
'''Returns true if the match is new, bigger than an existing match,
and not encapsulated by an existing match.'''
valid = True
((start, end, kind, priority), _) = match
for temp_match in matches:
if temp_match in filtered or match == temp_match:
continue
((temp_start, temp_end, temp_kind, temp_priority), _) = temp_match
if start == temp_start and end == temp_end:
if priority <= temp_priority:
# More precise classification exists.
valid = False
break
elif start >= temp_start and end <= temp_end:
# Encapsulated in an existing match.
valid = False
break
return valid
def find_parent_reference(current_kind, references, kinds_hierarchy):
parent_kind = kinds_hierarchy[current_kind]
for reference in reversed(references):
if reference.kind_hint.kind == parent_kind:
return reference
| 2.765625 | 3 |
swagger_server/controllers/default_controller.py | Surya2709/FlaskSwaggerDemo | 0 | 12790315 | <reponame>Surya2709/FlaskSwaggerDemo<filename>swagger_server/controllers/default_controller.py
import connexion
import six
from swagger_server.models.alert import Alert # noqa: E501
from swagger_server.models.alert_array import AlertArray # noqa: E501
from swagger_server.models.updatealert import Updatealert # noqa: E501
from swagger_server import util
def alert_delete(alert_id): # noqa: E501
"""delete alert
takes the alert id as feed to remove the alert from the alert list # noqa: E501
:param alert_id: id of the alert need to be removed
:type alert_id: str
:rtype: None
"""
return 'do some magic!'
def alert_get(alert_id=None): # noqa: E501
"""obtain alert list
get method to obtain all the alerts # noqa: E501
:param alert_id: identifier for the alert
:type alert_id: str
:rtype: AlertArray
"""
return 'do some magic!'
def alert_post(body): # noqa: E501
"""add alerts
Adds the alerts into the list # noqa: E501
:param body:
:type body: list | bytes
:rtype: None
"""
if connexion.request.is_json:
body = [Alert.from_dict(d) for d in connexion.request.get_json()] # noqa: E501
return 'do some magic!'
def alert_put(body): # noqa: E501
"""update the alerts
updates the alerts in the alerts list # noqa: E501
:param body:
:type body: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
body = Updatealert.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
| 2.390625 | 2 |
neurogaze/analyze/grouping.py | chrizzzlybear/neurogaze_research | 0 | 12790316 | import numpy as np
def time_between_values(df, cols):
gap_df = df[cols].dropna(how='any')
return gap_df.index.to_series().diff(-1).dt.total_seconds().abs()
def distance_to_monitor(df):
dist = np.sqrt(
df.left_gaze_origin_in_user_coordinate_system_x ** 2
+ df.left_gaze_origin_in_user_coordinate_system_y ** 2
+ df.left_gaze_origin_in_user_coordinate_system_z ** 2
)
dist.index = df.time
return dist
def group_by_hour_of_day(series):
return series.groupby(series.index.to_series().dt.hour)
def blinks_per_minute_by_hour_of_day(df):
gaps = time_between_values(
df.set_index('time'), ['left_pupil_diameter', 'right_pupil_diameter'])
blinks = gaps[(gaps < 0.5) & (gaps > 0.1)]
blinks_per_hour_of_day = group_by_hour_of_day(blinks).count()
seconds_recorded_per_hour_of_day = (
group_by_hour_of_day(gaps).count()
/ 60 # Divide by Frequency
)
return blinks_per_hour_of_day / seconds_recorded_per_hour_of_day * 60
| 3.03125 | 3 |
src/skmultiflow/core/base_object.py | lckr/scikit-multiflow | 0 | 12790317 | from abc import ABCMeta, abstractmethod
class BaseObject(metaclass=ABCMeta):
""" BaseObject
The most basic object, from which target_values in scikit-multiflow
derive from. It guarantees that all target_values have at least the
two basic functions described in this base class.
"""
@abstractmethod
def get_class_type(self):
""" get_class_type
The class type is a string that identifies the type of object
generated by that module.
Returns
-------
The class type
"""
raise NotImplementedError
@abstractmethod
def get_info(self):
""" get_info
A sum up of all important characteristics of a class.
The default format of the return string is as follows:
ClassName: attribute_one: value_one - attribute_two: value_two \
- info_one: info_one_value
Returns
-------
A string with the class' relevant information.
"""
raise NotImplementedError
| 3.6875 | 4 |
SoftLabelCCRF/run.py | liujch1998/SoftLabelCCRF | 17 | 12790318 | <reponame>liujch1998/SoftLabelCCRF
import os, sys
import argparse
import logging
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
sys.path.insert(0, os.path.dirname(__file__))
from model import Model
from utils.data import load_tokens
from utils.vision import iou, clip_bbox_to_image, deparameterize_bbox_error
from utils.feats import collect_feats
from utils.stat import StatLoss, StatResult
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%Y/%m/%d %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def set_seed (seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
'''
X
tokenid (batch, seqlen, .) int
span (batch, max_n_mentions, 2) int
cat (batch, max_n_mentions, n_cats) bool
image (batch, d_image)
region (batch, max_n_regions, d_region)
n_mentions [batch * int]
n_regions [batch * int]
_aff (batch, max_n_mentions, max_n_regions, d_aff=1)
_reg (batch, max_n_mentions, max_n_regions, d_reg=4)
'''
def train (args, model, optimizer, tokens_train, tokens_dev):
model.train()
stat_loss = StatLoss()
for it in range(args.iters):
samples = random.choices(tokens_train, k=args.batch)
X = collect_feats(args, tokens=samples)
optimizer.zero_grad()
loss = model(X)
loss.backward()
nn.utils.clip_grad_value_(model.parameters(), args.max_grad_norm)
optimizer.step()
stat_loss.insert(loss.item())
if (it + 1) % args.print_every == 0:
logger.info('Iter %d / %d\tloss_train = %.4f' % (it+1, args.iters, stat_loss.loss_avg))
stat_loss = StatLoss()
if (it + 1) % args.eval_every == 0:
eval(args, model, tokens_dev, split='dev')
model.train()
if (it + 1) % args.save_every == 0:
checkpoint = {
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}
model_path = os.path.join(args.output_dir, 'model.%06d.pth' % (it+1))
torch.save(checkpoint, model_path)
def eval (args, model, tokens, split):
model.eval()
stat_loss = StatLoss()
stat_result = StatResult()
for token in tokens:
X, instance = collect_feats(args, token=token)
with torch.no_grad():
loss, opt, reg = model(X)
stat_loss.insert(loss.item())
for c, caption in enumerate(instance.captions):
for m, mention in enumerate(caption.mentions):
r = opt[c][m]
if not args.no_box_regression:
mention.bbox_pred = clip_bbox_to_image(deparameterize_bbox_error(reg[c,m,r].tolist(), instance.regions[r].bbox, instance.image_size), instance.image_size)
else:
mention.bbox_pred = instance.regions[r].bbox
stat_result.insert(instance)
if args.visualize:
instance.visualize_prediction(args.output_dir, split)
logger.info('loss_eval = %.4f' % stat_loss.loss_avg)
stat_result.print(logger)
def main ():
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir', default=None, type=str, required=True)
parser.add_argument('--model_name', default='model.pth', type=str)
parser.add_argument('--do_train', action='store_true')
parser.add_argument('--do_test', action='store_true')
parser.add_argument('--n_train', default=29783, type=int)
parser.add_argument('--n_dev', default=1000, type=int)
parser.add_argument('--n_test', default=1000, type=int)
parser.add_argument('--iters', default=50000, type=int)
parser.add_argument('--batch', default=16, type=int)
parser.add_argument('--lr', default=5e-5, type=float)
parser.add_argument('--drop_prob', default=0.2, type=float)
parser.add_argument('--max_grad_norm', default=10.0, type=float)
parser.add_argument('--no_box_regression',action='store_true')
parser.add_argument('--gamma', default=10.0, type=float)
parser.add_argument('--seed', default=19980430, type=int)
parser.add_argument('--print_every', default=500, type=int)
parser.add_argument('--eval_every', default=5000, type=int)
parser.add_argument('--save_every', default=5000, type=int)
parser.add_argument('--visualize', action='store_true')
parser.add_argument('--kld', action='store_true')
parser.add_argument('--crf', action='store_true')
parser.add_argument('--tran_context', default='none', type=str, help='Transition score context. One of [none, m, mlr, mlrg]')
parser.add_argument('--decode', default='none', type=str, help='Decode algo. One of [viterbi, smoothing] when --crf')
args = parser.parse_args()
args.d_lang = 1024
args.max_n_mentions = 20
args.n_cats = 8
args.d_image = 1024
args.d_region_visual = 2048
args.d_region_spatial = 5
args.d_region = args.d_region_visual + args.d_region_spatial
args.max_n_regions = 1000
args.d_rank = 1024
args.d_fusion = 1024
args.d_aff = 1
args.d_reg = 4
set_seed(args.seed)
args.device = torch.device('cuda')
os.makedirs(args.output_dir, exist_ok=True)
if args.visualize:
if args.do_train:
os.makedirs(os.path.join(args.output_dir, 'visualize', 'train'), exist_ok=True)
os.makedirs(os.path.join(args.output_dir, 'visualize', 'dev'), exist_ok=True)
if args.do_test:
os.makedirs(os.path.join(args.output_dir, 'visualize', 'test'), exist_ok=True)
if args.do_train:
tokens_train = load_tokens('train', args.n_train)
tokens_dev = load_tokens('dev', args.n_dev)
logger.info('Initializing model ...')
model = Model(args).to(args.device)
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, betas=(0.9,0.98))
logger.info('Training model ...')
train(args, model, optimizer, tokens_train, tokens_dev)
if args.do_test:
tokens_test = load_tokens('test', args.n_test)
logger.info('Loading model ...')
if not args.do_train:
model = Model(args).to(args.device)
model_path = os.path.join(args.output_dir, args.model_name)
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint['model_state_dict'])
logger.info('Testing model ...')
eval(args, model, tokens_test, split='test')
if __name__ == '__main__':
main()
| 1.984375 | 2 |
cli.py | MurmurWheel/Raft | 2 | 12790319 | <reponame>MurmurWheel/Raft<filename>cli.py
#!/usr/bin/python3
# coding:utf-8
# 命令行工具
import argparse
import socket
import json
import sys
# 发送请求
def send_request(request: str) -> str:
# 创建套接字
client = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
# 连接
client.connect(("127.0.0.1", 1024))
# 发送请求
client.send(request.encode())
# 接受回复
response = client.recv(65536).decode()
# 关闭套接字
client.close()
# 打印操作结果
print("request: {}".format(request))
print("response: {}".format(response))
return response
# 设置
def send_set_request(key, value):
request = {
"op": "set",
"params": {
"key": key,
"value": value
}
}
send_request(json.dumps(request))
# 获取
def send_get_request(key):
request = {
"op": "get",
"params": {
"key": key
}
}
send_request(json.dumps(request))
# 退出
def send_exit_request():
request = {
"op": "exit",
"params": {}
}
send_request(json.dumps(request))
# echo
def send_echo_request():
request = {
"op": "echo",
"params": {}
}
send_request(json.dumps(request))
# 主函数
if __name__ == "__main__":
if len(sys.argv) == 1:
print("usage: cli.py <cmd> <params>")
else:
try:
cmd = sys.argv[1]
params = sys.argv[2:]
if cmd == "echo":
send_echo_request()
elif cmd == "set":
try:
send_set_request(params[0], params[1])
except IndexError:
print("usage: cli.py set <key> <value>")
elif cmd == "get":
try:
send_get_request(params[0])
except IndexError:
print("usage: cli.py get <key>")
elif cmd == "exit":
send_exit_request()
else:
print("usage: cli.py <cmd> <params>")
except Exception as e:
print(e)
| 2.796875 | 3 |
1.py | Time2003/lr7 | 0 | 12790320 | <reponame>Time2003/lr7
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Ввести список А из 10 элементов, найти разность положительных элементов кратных 11, их
# количество и вывести результаты на экран.
if __name__ == '__main__':
lst = [0] * 10
count = 0
dif = 0
b = 0
for i in range(10):
print("Введите", i+1, "элемент")
lst[i] = int(input())
if lst[i] > 0:
if b == 0 and lst[i] % 11 == 0:
b = lst[i]
if lst[i] % 11 == 0:
dif -= lst[i]
count += 1
print("Изначальный список: ", lst, "разность положительных элементов кратных 11:", dif + (b * 2), "количество", count) | 3.453125 | 3 |
mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py | yarenty/mindsdb | 0 | 12790321 | import pandas as pd
from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode
class IntegrationDataNode(DataNode):
type = 'integration'
def __init__(self, integration_name, data_store):
self.integration_name = integration_name
self.data_store = data_store
def get_type(self):
return self.type
def get_tables(self):
return []
def has_table(self, tableName):
return True
def get_table_columns(self, tableName):
return []
def select(self, query):
sql_query = str(query)
dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': sql_query})
data = dso.df.to_dict(orient='records')
column_names = list(dso.df.columns)
for column_name in column_names:
if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]):
pass_data = dso.df[column_name].dt.to_pydatetime()
for i, rec in enumerate(data):
rec[column_name] = pass_data[i].timestamp()
return data, column_names
| 2.703125 | 3 |
pswalker/callbacks.py | ZLLentz/pswalker | 0 | 12790322 | <filename>pswalker/callbacks.py
"""
Specialized Callbacks for Skywalker
"""
############
# Standard #
############
import logging
import simplejson as sjson
from pathlib import Path
###############
# Third Party #
###############
import lmfit
import pandas as pd
import numpy as np
from lmfit.models import LinearModel
from bluesky.callbacks import (LiveFit, LiveFitPlot, CallbackBase, LivePlot)
##########
# Module #
##########
from .utils.argutils import isiterable
logger = logging.getLogger(__name__)
def apply_filters(doc, filters=None, drop_missing=True):
"""
Filter an event document
Parameters
----------
doc : dict
Bluesky Document to filter
filters : dict
Filters are provided in a dictionary of key / callable pairs that take
a single input from the data stream and return a boolean value.
drop_missing : bool, optional
Only include documents who have associated data for each filter key.
This includes events missing the key entirely, reporting NaN or
reporting Inf.
Returns
-------
resp : bool
Whether the event passes all provided filters
Example
------
..code::
apply_filters(doc, filters = {'a' : lambda x : x > 0,
'c' : lambda x : 4 < x < 6})
"""
resp = []
filters = filters or dict()
#Iterate through filters
for key, func in filters.items():
try:
#Check iterables for nan and inf
if isiterable(doc[key]):
if any(np.isnan(doc[key])) or any(np.isinf(doc[key])):
resp.append(not drop_missing)
continue
#Check string entries for nan and inf
elif isinstance(doc[key], str):
if "inf" == doc[key].lower() or "nan" == doc[key].lower():
resp.append(not drop_missing)
continue
#Handle all other types
else:
if np.isnan(doc[key]) or np.isinf(doc[key]):
resp.append(not drop_missing)
continue
#Evaluate filter
resp.append(bool(func(doc[key])))
#Handle missing information
except KeyError:
resp.append(not drop_missing)
#Handle improper filter
except Exception as e:
logger.critical('Filter associated with event_key {}'\
'reported exception "{}"'\
''.format(key, e))
#Summarize
return all(resp)
def rank_models(models, target, **kwargs):
"""
Rank a list of models based on the accuracy of their prediction
Parameters
----------
models : list
List of models to evaluate
target : float
Actual value of target
kwargs :
All of the keys the models will need to evaluate
Returns
-------
model_ranking : list
List of models sorted by accuracy of predictions
"""
#Initialize values
model_ranking = np.asarray(models)
diffs = list()
bad_models = list()
#Calculate error of each model
for model in models:
try:
estimate = model.eval(**kwargs)
diffs.append(np.abs(estimate-target))
logger.debug("Model {} predicted a value of {}"
"".format(model.name, estimate))
except RuntimeError as e:
bad_models.append(model)
diffs.append(np.inf)
logger.debug("Unable to yield estimate from model {}"
"".format(model.name))
logger.debug(e)
#Rank performances
model_ranking = model_ranking[np.argsort(diffs)]
#Remove models who failed to make an estimate
return [model for model in model_ranking
if model not in bad_models]
class LiveBuild(LiveFit):
"""
Base class for live model building in Skywalker
Parameters
----------
model : lmfit.Model
y: string
Key of dependent variable
indpendent_vars : dict
Map independent variables names to keys in the event document stream
init_guess: dict, optional
Initial guesses for other values if expected
update_every : int or None, optional
Update rate of the model. If set to None, the model will only be
computed at the end of the run. By default, this is set to 1 i.e
update on every new event
"""
def __init__(self, model, y, independent_vars, init_guess=None,
update_every=1, filters=None, drop_missing=True,
average=1):
super().__init__(model, y, independent_vars,
init_guess=init_guess,
update_every=update_every)
#Add additional keys
self.average = average
self.filters = filters or {}
self.drop_missing = drop_missing
self._avg_cache = list()
@property
def name(self):
"""
Name of the model
"""
return self.model.name
@property
def field_names(self):
"""
Name of all the keys associated with the fit
"""
return [self.y] + list(self.independent_vars.values())
def install_filters(self, filters):
"""
Install additional filters
Parameters
----------
filters : dict
Filters are provided in a dictionary of key / callable pairs that
take a single input from the data stream and return a boolean
value.
"""
self.filters.update(filters)
def event(self, doc):
#Run event through filters
if not apply_filters(doc['data']):
return
#Add doc to average cache
self._avg_cache.append(doc)
#Check we have the right number of shots to average
if len(self._avg_cache) >= self.average:
#Overwrite event number
#This can be removed with an update to Bluesky Issue #684
doc['seq_num'] = len(self.ydata) +1
#Rewrite document with averages
for key in self.field_names:
doc['data'][key] = np.mean([d['data'][key]
for d in self._avg_cache])
#Send to callback
super().event(doc)
#Clear cache
self._avg_cache.clear()
def eval(self, *args, **kwargs):
"""
Estimate a point based on the current fit of the model.
Reimplemented by subclasses
"""
logger.debug("Evaluating model {} with args : {}, kwargs {}"
"".format(self.name, args, kwargs))
if not self.result:
raise RuntimeError("Can not evaluate without a saved fit, "\
"use .update_fit()")
def backsolve(self, target, **kwargs):
"""
Use the most recent fit to find the independent variables that create
the requested dependent variable
..note::
For multivariable functions the user may have to specify which
variable to solve for, and which to keep fixed
"""
logger.debug("Backsolving model {} for target {} and kwargs {}"
"".format(self.name, target, kwargs))
if not self.result:
raise RuntimeError("Can not backsolve without a saved fit, "\
"use .update_fit()")
class LinearFit(LiveBuild):
"""
Model to fit a linear relationship between a single variable axis and a
depended variable
Parameters
----------
y : str
Keyword in the event document that reports the dependent variable
x: str
Keyword in the event document that reports the independent variable
init_guess : dict, optional
Initialization guess for the linear fit, available keys are ``slope``
and ``intercept``
name : optional , str
Name for the contained model. When None (default) the name is the same
as the model function
update_every : int or None, optional
Update rate of the model. If set to None, the model will only be
computed at the end of the run. By default, this is set to 1 i.e
update on every new event
"""
def __init__(self, y, x, init_guess=None,
update_every=1, name=None,
average=1):
#Create model
model = LinearModel(missing='drop', name=name)
#Initialize parameters
init = {'slope' : 0, 'intercept' : 0}
if init_guess:
init.update(init_guess)
#Initialize fit
super().__init__(model, y, {'x': x},
init_guess=init,
update_every=update_every,
average=average)
def eval(self, **kwargs):
"""
Evaluate the predicted outcome based on the most recent fit of
the given information.
Parameters
----------
x : float or int, optional
Independent variable to evaluate linear model
kwargs :
The value for the indepenedent variable can also be given as the
field name in the event document
Returns
-------
estimate : float
Y value as determined by current linear fit
"""
#Check result
super().eval(**kwargs)
#Standard x setup
if kwargs.get('x'):
x = kwargs['x']
elif self.independent_vars['x'] in kwargs.keys():
x = kwargs[self.independent_vars['x']]
else:
raise ValueError("Must supply keyword `x` or use fieldname {}"
"".format(self.independent_vars['x']))
#Structure input add past result
kwargs = {'x' : np.asarray(x)}
kwargs.update(self.result.values)
#Return prediction
return self.result.model.eval(**kwargs)
def backsolve(self, target, **kwargs):
"""
Find the ``x`` position that solves the reaches the given target
Parameters
----------
target : float
Desired ``y`` value
Returns
-------
x : dict
Variable name and floating value
"""
#Make sure we have a fit
super().backsolve(target, **kwargs)
#Gather line information
(m, b) = (self.result.values['slope'],
self.result.values['intercept'])
#Return x position
if m == 0 and b != target:
raise ValueError("Unable to backsolve, because fit is horizontal "
" after {} data points".format(len(self.ydata)))
return {'x' : (target-b)/m}
class MultiPitchFit(LiveBuild):
"""
Model to fit centroid position of two mirror system
Parameters
----------
centroid : str
Keyword in the event document that reports centroid position
alphas : tuple of str
Tuple fo the mirror pitches (a1, a2)
init_guess : dict, optional
Initialization guess for the linear fit, available keys are be ``x0``,
``x1``, and ``x2``
name : optional , str
Name for the contained model. When None (default) the name is the same
as the model function
update_every : int or None, optional
Update rate of the model. If set to None, the model will only be
computed at the end of the run. By default, this is set to 1 i.e
update on every new event
"""
def __init__(self, centroid, alphas,
name=None, init_guess=None,
update_every=1, average=1):
#Simple model of two-bounce system
def two_bounce(a0, a1, x0, x1, x2):
return x0 + a0*x1 + a1*x2
#Create model
model = lmfit.Model(two_bounce,
independent_vars = ['a0', 'a1'],
missing='drop')
#Initialize parameters
init = {'x0' : 0, 'x1': 0, 'x2' : 0}
if init_guess:
init.update(init_guess)
#Initialize fit
super().__init__(model, centroid,
independent_vars={'a0' : alphas[0],
'a1' : alphas[1]},
init_guess=init, update_every=update_every,
average=average)
def eval(self, a0=0., a1=0., **kwargs):
"""
Evaluate the predicted outcome based on the most recent fit of
the given information
Parameters
----------
a0 : float
Pitch of the first mirror
a1 : float
Pitch of the second mirror
Returns
-------
centroid : float
Position of the centroid as predicted by the current model fit
"""
#Check result
super().eval(a0, a1)
#Structure input and add past result
kwargs = {'a0' : np.asarray(a0),
'a1' : np.asarray(a1)}
kwargs.update(self.result.values)
#Return prediction
return self.result.model.eval(**kwargs)
def backsolve(self, target, a0=None, a1=None):
"""
Find the mirror configuration to reach a certain pixel value
Because this is a multivariable function you must fix one of the
mirrors in place, while the other one is solved for.
Parameters
----------
target : float
Desired pixel location
a0 : float, optional
Fix the first mirror in the system
a1 : float, optional
Fix the second mirror in the system
Returns
-------
angles : dict
Dictionary with the variable mirror key and solvable value
"""
#Make sure we have a fit
super().backsolve(target, a0=a0, a1=a1)
#Check for valid request
if not any([a0,a1]) or all([a0,a1]):
raise ValueError("Exactly one of the mirror positions "\
"must be specified to backsolve for the target")
#Gather fit information
(x0, x1, x2) = (self.result.values['x0'],
self.result.values['x1'],
self.result.values['x2'])
#Return computed value
if a0:
return {'a1' : (target - x0 - a0*x1)/ x2,
'a0' : a0}
else:
return {'a0' : (target - x0 - a1*x2)/ x1,
'a1' : a1}
class LivePlotWithGoal(LivePlot):
"""
Build a function that updates a plot from a stream of Events.
Parameters
----------
y : str
the name of a data field in an Event
x : str, optional
the name of a data field in an Event, or 'seq_num' or 'time'
If None, use the Event's sequence number.
Special case: If the Event's data includes a key named 'seq_num' or
'time', that takes precedence over the standard 'seq_num' and 'time'
recorded in every Event.
goal : float
the target pixel
tolerance : float, optional
the tolerance for the pixel
averages : float, optional
The number of images to average. If None is specified, every point is rendered as they come,
otherwise the graph will update every ```averages``` points.
legend_keys : list, optional
The list of keys to extract from the RunStart document and format
in the legend of the plot. The legend will always show the
scan_id followed by a colon ("1: "). Each
xlim : tuple, optional
passed to Axes.set_xlim
ylim : tuple, optional
passed to Axes.set_ylim
ax : Axes, optional
matplotib Axes; if none specified, new figure and axes are made.
fig : Figure, optional
deprecated: use ax instead
epoch : {'run', 'unix'}, optional
If 'run' t=0 is the time recorded in the RunStart document. If 'unix',
t=0 is 1 Jan 1970 ("the UNIX epoch"). Default is 'run'.
kwargs :
All additional keyword arguments are passed through to ``Axes.plot``.
Notes
-----
If your figure blocks the main thread when you are trying to scan with this
callback, call `plt.ion()` in your IPython session.
Examples
--------
>>> my_plotter = LivePlotWithGoals('det', 'motor', goal=10.0, tolerance=1.5, averages=None, legend_keys=['sample'])
>>> RE(my_scan, my_plotter)
"""
def __init__(self, y, x=None, *, goal=0.0, tolerance=0.0, averages=None, **kwargs):
super().__init__(y, x, **kwargs)
self.legend_title = None
self.goal = goal
self.tolerance = tolerance
self.averages = averages
self.event_count = 0
def start(self, doc):
self.goal_data = []
self.goal_axis, = self.ax.plot([],[],'r--', label='Target')
super().start(doc)
def event(self, doc):
super().event(doc)
self.event_count += 1
def update_plot(self, force=False):
if self.averages is None or (self.averages is not None and self.event_count % self.averages == 0) or force:
self.goal_axis.set_data(self.x_data, self.goal_data)
goal = np.asarray(self.goal_data)
distance = 2 if self.averages is None else self.averages+1
if force:
distance -= 1
self.ax.fill_between(self.x_data[-distance:], goal[-distance:]-self.tolerance, goal[-distance:]+self.tolerance,
alpha=0.2, facecolor='r')
super().update_plot()
self.ax.set_xlim(left=0, right=None, auto=True)
def update_caches(self, x, y):
self.goal_data.append(self.goal)
super().update_caches(x, y)
def stop(self, doc):
# Ensure that the last events are plotted
# Only necessary when we are grouping the points
if self.averages is not None:
self.update_plot(force=True)
super().stop(doc)
| 2.515625 | 3 |
ppln/filterCallNoCall.py | asalomatov/nextgen-pipeline | 4 | 12790323 | '''
'''
import sys
from sets import Set
sys.path.insert(0, '/nethome/asalomatov/projects/ppln')
import logProc
if len(sys.argv) == 1:
print 'Usage:'
print sys.argv[0], 'input.bed', 'output.bed', 'logdir', 'filter1', 'filter2', 'filter3'
N = 4
inf, outf, outdir = sys.argv[1:N]
fltrs = sys.argv[N:]
print fltrs
cmd = ' '
logProc.logProc(outf, outdir, cmd, 'started')
with open(outf, 'w') as fout:
with open(inf, 'r') as fin:
for l in fin:
ls = l.split()
if ls[3] in fltrs:
fout.write('\t'.join(ls)+'\n')
logProc.logProc(outf, outdir, cmd, 'finished')
| 2 | 2 |
tacker_horizon/openstack_dashboard/dashboards/nfv/vnffgmanager/tables.py | grechny/tacker-horizon | 0 | 12790324 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.http import Http404
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import messages
from horizon import tables
from openstack_dashboard import policy
from tacker_horizon.openstack_dashboard import api
from tackerclient.common.exceptions import NotFound
class VNFFGManagerItem(object):
def __init__(self, id, name, description, status):
self.id = id
self.name = name
self.description = description
self.status = status
class VNFFGManagerItemList(object):
VNFFGLIST_P = []
@classmethod
def get_obj_given_id(cls, vnffg_id):
for obj in cls.VNFFGLIST_P:
if obj.id == vnffg_id:
return obj
@classmethod
def add_item(cls, item):
cls.VNFFGLIST_P.append(item)
@classmethod
def clear_list(cls):
cls.VNFFGLIST_P = []
class MyFilterAction(tables.FilterAction):
name = "myfilter"
class VNFFGUpdateRow(tables.Row):
ajax = True
def can_be_selected(self, datum):
return datum.status != 'DELETE_COMPLETE'
def get_data(self, request, vnffg_id):
try:
item = VNFFGManagerItemList.get_obj_given_id(vnffg_id)
vnffg_instance = api.tacker.get_vnffg(request, vnffg_id)
if not vnffg_instance and not item:
# TODO(NAME) - bail with error
return None
if not vnffg_instance and item:
# API failure, just keep the current state
return item
vnffg = vnffg_instance['vnffg']
try:
vnffg_desc_str = vnffg['description']
except KeyError:
vnffg_desc_str = ""
if not item:
# Add an item entry
item = VNFFGManagerItem(vnffg['name'], vnffg_desc_str,
vnffg['status'], vnffg['id'])
else:
item.description = vnffg_desc_str
item.status = vnffg['status']
return item
except (Http404, NotFound):
raise Http404
except Exception as e:
messages.error(request, e)
raise
class DeleteVNFFG(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Terminate VNFFG",
u"Terminate VNFFGs",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Terminate VNFFG",
u"Terminate VNFFGs",
count
)
def action(self, request, obj_id):
api.tacker.delete_vnffg(request, obj_id)
class DeployVNFFG(tables.LinkAction):
name = "deployvnffg"
verbose_name = _("Deploy VNFFG")
classes = ("ajax-modal",)
icon = "plus"
url = "horizon:nfv:vnffgmanager:deployvnffg"
class VNFFGManagerTable(tables.DataTable):
STATUS_CHOICES = (
("ACTIVE", True),
("ERROR", False),
)
STACK_STATUS_DISPLAY_CHOICES = (
("init_in_progress", pgettext_lazy("current status of stack",
u"Init In Progress")),
("init_complete", pgettext_lazy("current status of stack",
u"Init Complete")),
("init_failed", pgettext_lazy("current status of stack",
u"Init Failed")),
("create_in_progress", pgettext_lazy("current status of stack",
u"Create In Progress")),
("create_complete", pgettext_lazy("current status of stack",
u"Create Complete")),
("create_failed", pgettext_lazy("current status of stack",
u"Create Failed")),
("delete_in_progress", pgettext_lazy("current status of stack",
u"Delete In Progress")),
("delete_complete", pgettext_lazy("current status of stack",
u"Delete Complete")),
("delete_failed", pgettext_lazy("current status of stack",
u"Delete Failed")),
("update_in_progress", pgettext_lazy("current status of stack",
u"Update In Progress")),
("update_complete", pgettext_lazy("current status of stack",
u"Update Complete")),
("update_failed", pgettext_lazy("current status of stack",
u"Update Failed")),
("rollback_in_progress", pgettext_lazy("current status of stack",
u"Rollback In Progress")),
("rollback_complete", pgettext_lazy("current status of stack",
u"Rollback Complete")),
("rollback_failed", pgettext_lazy("current status of stack",
u"Rollback Failed")),
("suspend_in_progress", pgettext_lazy("current status of stack",
u"Suspend In Progress")),
("suspend_complete", pgettext_lazy("current status of stack",
u"Suspend Complete")),
("suspend_failed", pgettext_lazy("current status of stack",
u"Suspend Failed")),
("resume_in_progress", pgettext_lazy("current status of stack",
u"Resume In Progress")),
("resume_complete", pgettext_lazy("current status of stack",
u"Resume Complete")),
("resume_failed", pgettext_lazy("current status of stack",
u"Resume Failed")),
("adopt_in_progress", pgettext_lazy("current status of stack",
u"Adopt In Progress")),
("adopt_complete", pgettext_lazy("current status of stack",
u"Adopt Complete")),
("adopt_failed", pgettext_lazy("current status of stack",
u"Adopt Failed")),
("snapshot_in_progress", pgettext_lazy("current status of stack",
u"Snapshot In Progress")),
("snapshot_complete", pgettext_lazy("current status of stack",
u"Snapshot Complete")),
("snapshot_failed", pgettext_lazy("current status of stack",
u"Snapshot Failed")),
("check_in_progress", pgettext_lazy("current status of stack",
u"Check In Progress")),
("check_complete", pgettext_lazy("current status of stack",
u"Check Complete")),
("check_failed", pgettext_lazy("current status of stack",
u"Check Failed")),
)
name = tables.Column("name",
link="horizon:nfv:vnffgmanager:detail",
verbose_name=_("VNFFG Name"))
description = tables.Column("description",
verbose_name=_("Description"))
status = tables.Column("status",
hidden=False,
status=True,
status_choices=STATUS_CHOICES)
class Meta(object):
name = "vnffgmanager"
verbose_name = _("VNFFGManager")
status_columns = ["status", ]
row_class = VNFFGUpdateRow
table_actions = (DeployVNFFG, DeleteVNFFG, MyFilterAction,)
| 1.710938 | 2 |
we_guitar/urls.py | guoshijiang/we_guitar | 2 | 12790325 | <reponame>guoshijiang/we_guitar
#encoding=utf-8
from django.contrib import admin
from django.urls import path, re_path, include
from django.conf import settings
from django.conf.urls.static import static
from django.views.static import serve
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
path('ueditor/', include('DjangoUeditor.urls')),
re_path('^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 1.484375 | 1 |
Desafios/desafio074.py | VanessaCML/python | 0 | 12790326 | <filename>Desafios/desafio074.py<gh_stars>0
from random import randint
n = randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10)
print(f'Os números sorteados foram: ', end='')
for num in n:
print(num, end=' ')
print(f'\nO menor número é {min(n)} e o maior é {max(n)}.')
'''from random import randint
a = tuple(randint(1, 5) for i in range(5))
print(f'Os números sorteados foram {a}, o maior é {max(a)} e o menor é {min(a)}.')'''
| 3.84375 | 4 |
kite-python/kite_common/kite/codeexamples/context.py | kiteco/kiteco-public | 17 | 12790327 | <reponame>kiteco/kiteco-public
import __builtin__
import types
import ast
from contextlib import contextmanager
from pprint import pprint
from utils import node_to_code
LITERAL_NODES = {
ast.Num: "number",
ast.Str: "str",
ast.List: "list",
ast.Tuple: "tuple",
ast.Set: "set",
ast.Dict: "dict",
}
NUMBER_TYPES = {
int: "int",
float: "float",
long: "long",
complex: "complex",
}
def is_literal(node):
if type(node) in LITERAL_NODES:
return True
if isinstance(node, ast.Name):
if node.id in ('True', 'False', 'None'):
return True
if type(node) is ast.Lambda:
return True
return False
def literal_value(node):
if not is_literal(node):
return ''
if isinstance(node, ast.Num):
return str(node.n)
if isinstance(node, ast.Str):
return node.s
if (isinstance(node, ast.Name) and
node.id in ('True', 'False', 'None')):
return node.id
if isinstance(node, ast.Lambda):
return node_to_code(node)
return ''
def _process_lambda(node):
try:
gen = ASTCodeGenerator()
gen.visit(node)
return gen.line
except Exception as e:
return ""
def node_to_type(node):
if type(node) in LITERAL_NODES:
if type(node) == ast.Num:
for t, name in NUMBER_TYPES.items():
if isinstance(node.n, t):
return name
return LITERAL_NODES[type(node)]
else:
return LITERAL_NODES[type(node)]
if type(node) == ast.Name:
if (node.id == 'True' or node.id == 'False'):
return "__builtin__.bool"
elif node.id == 'None':
return "__builtin__.None"
if type(node) == ast.Lambda:
return "LambdaType"
return "unknown"
class TypeScope(object):
def __contains__(self, name):
return hasattr(types, name)
def __getitem__(self, key):
if key in self:
return "types.%s" % key
raise KeyError("%s is not a type" % key)
class BuiltinScope(object):
def __contains__(self, name):
return hasattr(__builtin__, name)
def __getitem__(self, key):
if key in self:
return "__builtin__.%s" % key
raise KeyError("%s not a builtin" % key)
class Context(object):
def __init__(self):
self.imports = {}
self.func_scope = {}
self.class_scope = {}
self.global_scope = {}
self.type_scope = TypeScope()
self.builtin_scope = BuiltinScope()
self.in_func = False
self.in_class = False
def resolve_node(self, node):
name = self._get_name(node)
if not name:
return None, None
scopes = [('types', self.type_scope),
('builtin', self.builtin_scope),
('imports', self.imports),
('global', self.global_scope)]
if self.in_func:
scopes.append(('function', self.func_scope))
if self.in_class:
scopes.append(('class', self.class_scope))
scopes.reverse()
for scopeName, scope in scopes:
if self._check_in(name, scope):
return scopeName, self._resolve_in(name, scope)
return None, None
def in_class_scope(self, node):
name = self._get_name(node)
if not name:
return False
return self._check_in(name, self.class_scope)
@contextmanager
def function_context(self):
self.start_FunctionDef()
yield
self.end_FunctionDef()
@contextmanager
def class_context(self):
self.start_ClassDef()
yield
self.end_ClassDef()
def start_FunctionDef(self):
self.func_scope = {}
self.in_func = True
def end_FunctionDef(self):
self.func_scope = {}
self.in_func = False
def start_ClassDef(self):
self.class_scope = {}
self.in_class = True
def end_ClassDef(self):
self.class_scope = {}
self.in_class = False
def visit_Import(self, node):
""" Handle imports. """
for im in node.names:
if im.asname is None:
self.imports[im.name] = im.name
else:
self.imports[im.asname] = im.name
def visit_ImportFrom(self, node):
""" Handle from imports. """
if node.module is None:
return
for im in node.names:
full_import = node.module + '.' + im.name
if im.asname is None:
self.imports[im.name] = full_import
else:
self.imports[im.asname] = full_import
def visit_Assign(self, node):
"""
On an assignment expression, we add variables to local or global scope
based on whether we are in a function. We resolved the LHS and RHS to
their full names. Then:
- If the RHS is from an imported statement, we add it into scope bound
to the provided LHS variable
- If the RHS is a literal, we add it into scope bound
to the provided LHS variable
- Otherwise, if the LHS is already in scope, we remove it.
"""
# Check to see if we are in a function.
scope = self.global_scope
if self.in_func:
scope = self.func_scope
# Check to see if the RHS is a known assignment type
if not self._known_assignment_type(node.value):
return
# Only consider single-assignments for now
if len(node.targets) != 1:
return
# Resolve left-hand and right-hand side of assignment expression
lhs = self._get_name(node.targets[0])
rhs = self._get_name(node.value)
if lhs is None or rhs is None:
return
# Use class scope if we are in a class lhs starts with "self."
if self.in_class and lhs.startswith("self."):
scope = self.class_scope
# Check imports
if self._check_in(rhs, self.imports):
scope[lhs] = self._resolve_in(rhs, self.imports)
# Check current scope (global, func or class)
elif self._check_in(rhs, scope):
scope[lhs] = self._resolve_in(rhs, scope)
# Check rhs in class scope
elif self.in_class and rhs.startswith("self."):
if self._check_in(rhs, self.class_scope):
scope[lhs] = self._resolve_in(rhs, self.class_scope)
# Check builtins
elif self._check_in(rhs, self.builtin_scope):
scope[lhs] = self._resolve_in(rhs, self.builtin_scope)
# Check literals
elif rhs in LITERAL_NODES.values():
scope[lhs] = rhs
# Remove re-assignments that were unrecognized
elif lhs in scope:
del scope[lhs]
def _check_in(self, name, scope):
if name in scope:
return True
parts = name.split(".")
for i in range(1, len(parts)):
im = '.'.join(parts[:-i])
if im in scope:
return True
return False
def _resolve_in(self, name, scope):
if name in scope:
return scope[name]
parts = name.split(".")
for i in range(1, len(parts)):
im = '.'.join(parts[:-i])
if im in scope:
return scope[im] + name[len(im):]
return None
def _known_assignment_type(self, target):
"""
List of types we support for the RHS in assignment expressions.
"""
return (is_literal(target) or
isinstance(target, (ast.Call, ast.Attribute)))
def _get_name(self, node):
"""
Resolve a node to its full name.
"""
if is_literal(node):
return node_to_type(node)
n = node
if isinstance(node, ast.Call):
n = node.func
parts = []
while isinstance(n, ast.Attribute):
# For function calls that are nested in selector expressions,
# e.g os.path.join, they are chained together as a series of
# ast.Attribute nodes. Extract them one by one.
parts.append(n.attr)
n = n.value
# If we actually ended up at an ast.Name node, we have a
# all the components of the selector expression that make up the call.
# We just have to reverse the parts we added above.
if isinstance(n, ast.Name):
parts.append(n.id)
parts.reverse()
return '.'.join(parts)
if isinstance(n, ast.Call):
return self._get_name(n)
if is_literal(n):
nodeType = node_to_type(n)
parts.append(nodeType)
parts.reverse()
return '.'.join(parts)
return None
| 2.5625 | 3 |
test/typeclasses/test_functor.py | victoradan/pythonZeta | 0 | 12790328 | <reponame>victoradan/pythonZeta<filename>test/typeclasses/test_functor.py<gh_stars>0
from toolz import identity, compose, curry
from hypothesis import given
import hypothesis.strategies as st
from pyzeta.typeclasses.functor import fmap
## TODO: use hypothesis
def test_functor_identity():
assert_functor_identity([1,2,3])
def test_functor_composition():
f = lambda x: x+1
g = lambda x: x+2
fa = [1,2,3]
assert_functor_composition(f, g, fa)
def test_functor_curried():
f = curry(fmap)
assert f(lambda x: x+1)([1,2,3]) == [2,3,4]
def test_functor_as_func():
assert fmap(lambda x: x+1, [1,2,3]) == [2,3,4]
## Laws ##
def assert_functor_identity(fa):
assert identity |fmap| fa == fa
def assert_functor_composition(f, g, fa):
assert g |fmap| (f |fmap| fa) == compose(g, f) |fmap| fa
| 2.671875 | 3 |
app/study/filterVolumeProfileDailySetup.py | kyoungd/material-stock-finder-app | 0 | 12790329 | <gh_stars>0
import requests
import json
import logging
from scipy import stats, signal
import numpy as np
import pandas as pd
from util import StockAnalysis, AllStocks
from alpaca import AlpacaHistorical, AlpacaSnapshots
class FilterVolumeProfileDailySetup:
def __init__(self):
self.sa = StockAnalysis()
self.jsonData = self.sa.GetJson
lastDate = self.getLastDate()
self.starttime = lastDate + 'T00:00:00'
self.endtime = lastDate + 'T23:59:59'
def getLastDate(self):
app = AlpacaSnapshots(favorites={}, minPrice=0, maxPrice=0, minVolume=0, maxVolume=0)
data = app.HistoricalSnapshots(['AAPL'])
snapshots = json.loads(data.text)
symbols = ''
for symbol in snapshots:
try:
dailyBar = snapshots[symbol]['dailyBar']
date = dailyBar['t'].split('T')[0]
return date
except Exception as e:
logging.error(f'AlpacaSnapshot.getSnapshot(). ERROR: {e}')
return None
def HistoricalPrices(self, symbol, timeframe, datatype=None, starttime=None, endtime=None):
start = self.startime
end = self.endtime
tf = '1Min'
url = AlpacaHistorical.ALPACA_URL % (
symbol, start, end, tf)
data = requests.get(url, headers=self.custom_header)
return data
def volumeProfiles(self, df):
df.rename(columns={'Close': 'close'}, inplace=True)
df.rename(columns={'Volume': 'volume'}, inplace=True)
data = df
volume = data['volume']
close = data['close']
kde_factor = 0.05
num_samples = len(df)
kde = stats.gaussian_kde(close, weights=volume, bw_method=kde_factor)
xr = np.linspace(close.min(), close.max(), num_samples)
kdy = kde(xr)
ticks_per_sample = (xr.max() - xr.min()) / num_samples
peaks, _ = signal.find_peaks(kdy)
pkx = xr[peaks]
pky = kdy[peaks]
min_prom = kdy.max() * 0.3
peaks, peak_props = signal.find_peaks(kdy, prominence=min_prom)
pkx = xr[peaks]
pky = kdy[peaks]
return pkx
def isNearVP(self, close, vpros):
for vpro in vpros:
if (abs(close - vpro) / close < self.nearMargin):
return True, vpro
return False, 0
def Run(self, symbol=None):
if symbol is None:
symbol = self.symbol
else:
self.symbol = symbol
isLoaded, tp = AllStocks.GetDailyStockData(symbol)
if isLoaded:
try:
price = tp.Close[0]
volProfiles = self.volumeProfiles(tp)
isNear, vpro = self.isNearVP(price, volProfiles)
self.sa.UpdateFilter(self.jsonData, self.symbol,
'vpro', isNear)
self.sa.UpdateFilter(self.jsonData, self.symbol,
'vpros', round(float(vpro), 2))
except Exception as e:
print(e)
self.sa.UpdateFilter(self.jsonData, self.symbol,
'vpro', False)
self.sa.UpdateFilter(self.jsonData, self.symbol,
'vpros', 0)
return False
@staticmethod
def All():
filter = FilterVolumeProfileDailySetup()
AllStocks.Run(filter.Run, False)
filter.sa.WriteJson(filter.jsonData)
if __name__ == '__main__':
FilterVolumeProfileDailySetup.All()
print('---------- done ----------')
# filter = FilterVolumeProfileDailySetup()
# filter.Run('AAPL')
# print('---------- done ----------')
| 2.265625 | 2 |
datastructure/graph/undirected_graph_node.py | NLe1/Pyrithms | 1 | 12790330 | from typing import Dict, List
class UndirectedGraphNode:
"""
Definition of GraphNode
For the weighted undirected graph:
A <-> B (cost 4)
A <-> C (cost 1)
B <-> C (cost 7)
B <-> D (cost 2)
We will have
[GraphNode {
val = 'A'
edges = {
'B': 4
'C': 1
}
},
GraphNode {
val = 'B'
edges = {
'A': 4
'C': 7
'D': 2
}
},
GraphNode {
val = 'C'
edges = {
'A': 1
'B': 7
}
},
GraphNode {
val = 'D'
edges = {
'B': 2
}
},
"""
def __init__(self, val: str):
self.edges: Dict[UndirectedGraphNode, list] = {}
self.val = val
| 3.9375 | 4 |
ppo/Buffer.py | leonjovanovic/drl-ml-agents-3dball | 0 | 12790331 | import torch
import Config
class Buffer:
# Since the enviroment we use has multiple agents that work in parallel and PPO requires to store whole episodes in
# buffer so the advantage can be calculated, each agent will have separate episode buffer in which will store each
# step of only its episode. When episode for certain agent ends, whole episode buffer is inserted to the main buffer
def __init__(self, num_workers, state_shape, action_shape, episode_length):
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.buffer_index = 0
self.episode_length = episode_length
#------------------------------------------------- MAIN BUFFER -------------------------------------------------
self.states = torch.zeros(Config.batch_size, state_shape).to(self.device)
self.actions = torch.zeros(Config.batch_size, action_shape).to(self.device)
self.logprob = torch.zeros(Config.batch_size, action_shape).to(self.device)
self.rewards = torch.zeros(Config.batch_size).to(self.device)
self.new_states = torch.zeros(Config.batch_size, state_shape).to(self.device)
self.dones = torch.zeros(Config.batch_size).to(self.device)
#----------------------------------------------- EPISODE BUFFER ------------------------------------------------
self.states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device)
self.actions_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device)
self.logprob_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device)
self.rewards_episode = torch.zeros(num_workers, self.episode_length).to(self.device)
self.new_states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device)
self.dones_episode = torch.zeros(num_workers, self.episode_length).to(self.device)
self.episode_step = torch.zeros(num_workers, dtype=torch.long).to(self.device)
self.gt = torch.zeros(Config.batch_size + 1).to(self.device)
self.advantages = torch.zeros(Config.batch_size + 1).to(self.device)
self.full = False
def add_old(self, decision_steps, actions, logprob):
cnt = 0
actionsTensor = torch.Tensor(actions).to(self.device)
for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id):
self.states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs)
self.actions_episode[a_id, self.episode_step[a_id]] = actionsTensor[cnt]
self.logprob_episode[a_id, self.episode_step[a_id]] = logprob[cnt]
cnt += 1
def add(self, decision_steps, terminal_steps):
for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id):
if decision_steps.reward[a_id] == 0: # TERMINALNI JE KORAK, SKIPUJ OVO
continue
self.rewards_episode[a_id, self.episode_step[a_id]] = 0.1
self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs)
self.dones_episode[a_id, self.episode_step[a_id]] = 0
self.episode_step[a_id] += 1
for obs, a_id in zip(terminal_steps.obs[0], terminal_steps.agent_id):
self.rewards_episode[a_id, self.episode_step[a_id]] = -1
self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs)
self.dones_episode[a_id, self.episode_step[a_id]] = 1
self.episode_step[a_id] += 1
if not self.full:
last_index = min(self.buffer_index + self.episode_step[a_id], Config.batch_size)
self.states[self.buffer_index: last_index] = self.states_episode[a_id, : last_index - self.buffer_index]
self.actions[self.buffer_index: last_index] = self.actions_episode[a_id, : last_index - self.buffer_index]
self.logprob[self.buffer_index: last_index] = self.logprob_episode[a_id, : last_index - self.buffer_index]
self.rewards[self.buffer_index: last_index] = self.rewards_episode[a_id, : last_index - self.buffer_index]
self.new_states[self.buffer_index: last_index] = self.new_states_episode[a_id, : last_index - self.buffer_index]
self.dones[self.buffer_index: last_index] = self.dones_episode[a_id, : last_index - self.buffer_index]
self.buffer_index = last_index % Config.batch_size
if self.buffer_index == 0:
self.full = True
self.episode_step[a_id] = 0
def advantage(self, state_values, last_state_value):
self.full = False
gt = last_state_value
for i in reversed(range(Config.batch_size)):
gt = self.rewards[i] + Config.gamma * gt * (1 - self.dones[i])
self.gt[i] = gt
self.advantages[i] = gt - state_values[i]
def gae_advantage(self, state_values, new_state_values):
self.full = False
self.gt[Config.batch_size] = new_state_values[-1]
for i in reversed(range(Config.batch_size)):
delta = self.rewards[i] + Config.gamma * new_state_values[i] * (1 - self.dones[i]) - state_values[i]
self.advantages[i] = delta + Config.gae_lambda * Config.gamma * self.advantages[i+1] * (1 - self.dones[i])
# For critic
self.gt[i] = self.rewards[i] + Config.gamma * self.gt[i+1] * (1 - self.dones[i])
def reset(self, full=False):
if full:
self.buffer_index = 0
self.episode_step[self.episode_step != 0] = 0
| 2.40625 | 2 |
acme/setup.py | stewnorriss/letsencrypt | 1 | 12790332 | <reponame>stewnorriss/letsencrypt
import sys
from setuptools import setup
from setuptools import find_packages
install_requires = [
'argparse',
# load_pem_private/public_key (>=0.6)
# rsa_recover_prime_factors (>=0.8)
'cryptography>=0.8',
'mock<1.1.0', # py26
'pyrfc3339',
'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304)
'pyasn1', # urllib3 InsecurePlatformWarning (#304)
# Connection.set_tlsext_host_name (>=0.13), X509Req.get_extensions (>=0.15)
'PyOpenSSL>=0.15',
'pytz',
'requests',
'six',
'werkzeug',
]
# env markers in extras_require cause problems with older pip: #517
if sys.version_info < (2, 7):
# only some distros recognize stdlib argparse as already satisfying
install_requires.append('argparse')
testing_extras = [
'nose',
'tox',
]
setup(
name='acme',
packages=find_packages(),
install_requires=install_requires,
extras_require={
'testing': testing_extras,
},
entry_points={
'console_scripts': [
'jws = acme.jose.jws:CLI.run',
],
},
test_suite='acme',
)
| 1.890625 | 2 |
tests/conftest.py | avallbona/periskop-python | 0 | 12790333 | <gh_stars>0
import pytest
from periskop_client.collector import ExceptionCollector
from periskop_client.exporter import ExceptionExporter
from periskop_client.models import HTTPContext
@pytest.fixture
def collector():
return ExceptionCollector()
@pytest.fixture
def exporter(collector):
return ExceptionExporter(collector)
@pytest.fixture
def sample_http_context():
return HTTPContext(request_method="GET", request_url="http://example.com",
request_headers={"Cache-Control": "no-cache"})
def get_exception_with_context(collector):
return list(collector._aggregated_exceptions.values())[0].latest_errors[0]
| 1.976563 | 2 |
test_script.py | ROBACON/mobspy | 0 | 12790334 | <reponame>ROBACON/mobspy<filename>test_script.py
# This script tests the model creation and compilation
# It also test the calculation capabilities
# It uses some simple model and assertions
import pytest
from mobspy import *
import sys
# TODO Plot has random order for species names
# Compare results with expected file
def compare_model(comp_results, file_name):
with open(file_name, 'r') as file:
for r, line in zip(comp_results.split('\n'), file.readlines()):
line = line.replace('\n', '')
if r != line:
return False
return True
# Model to test the basics
def test_model_1():
A, B, C = BaseSpecies(3)
A + B >> C[1]
MySim = Simulation(A | B | C)
MySim.level = 0
results = MySim.compile()
assert compare_model(results, 'test_tools/model_1.txt')
# Model to test basic inheritance
def test_model_2():
Carnivore, Herbivore = BaseSpecies(2)
Cat, Dog = New(Carnivore, 2)
Carnivore + Herbivore(1 * u.mol) >> Carnivore[1]
Cat(1 * u.mol), Dog(1 * u.mol)
MySim = Simulation(Cat | Dog | Herbivore)
MySim.level = 0
MySim.volume = 1 * u.meter ** 2
results = MySim.compile()
assert compare_model(results, 'test_tools/model_2.txt')
# Model to test species multiplication
def test_model_3():
MGMT, Blue_Oyster_Cult, The_Smiths = BaseSpecies(3)
MGMT.eletric_fell, MGMT.little_dark_age, MGMT.kids
Blue_Oyster_Cult.burning_for_you >> Blue_Oyster_Cult.reaper[1]
The_Smiths.stop_me >> The_Smiths.charming_man[1]
Music = MGMT * Blue_Oyster_Cult * The_Smiths
MySim = Simulation(Music)
MySim.level = 0
results = MySim.compile()
assert compare_model(results, 'test_tools/model_3.txt')
# Model to test inheritance queries
# All bacterias are infected by any virus here
def test_model_4():
Bacteria, Virus = BaseSpecies(2)
B1, B2 = New(Bacteria, 2)
V1, V2 = New(Virus, 2)
Bacteria.not_infected + Virus >> Bacteria.infected[1]
MySim = Simulation(B1 | B2 | V1 | V2)
MySim.level = 0
results = MySim.compile()
assert compare_model(results, 'test_tools/model_4.txt')
# Model to test round-robin and stoichiometry
def test_model_5():
A = BaseSpecies(1)
B, C = New(A, 2)
A >> 2 * A[1]
2 * A >> 3 * A[1]
MySim = Simulation(B | C)
MySim.level = 0
results = MySim.compile()
assert compare_model(results, 'test_tools/model_5.txt')
def test_model_6():
# This model tests species that are not referenced in the reactants (we call them Born Species)
A = BaseSpecies(1)
B = New(A)
C = New(A)
B.b1, B.b2, C.c1, C.c2
Zero >> 2 * A[1]
MySim = Simulation(B | C)
results = MySim.compile()
assert compare_model(results, 'test_tools/model_6.txt')
def test_model_7():
def oscillator(beta_m=5, beta_p=10, gamma_m=1, gamma_p=0.01, k=1, n=4, leaky=0.0001):
Mortal, Creator = BaseSpecies(2)
mRNA = Mortal * Creator
Protein = New(Mortal)
# Repression reactions
for m, p in zip(['m1', 'm2', 'm3'], ['x2', 'x3', 'x1']):
Protein.c(p) >> Protein.c(p) + mRNA.c(m)[lambda pro: f'{beta_m}/(1 + ({pro}/{k})**{n}']
# Production reactions
for m, p in zip(['m1', 'm2', 'm3'], ['x1', 'x2', 'x3']):
mRNA.c(m) >> mRNA.c(m) + Protein.c(p)[beta_p]
# We need the rate of degradation to be different from proteins and mRNA
Mortal >> Zero[lambda r1: gamma_p if r1.is_a(Protein) else gamma_m]
# This is the leaky mRNA expression, it needs to be low
Zero >> Creator[leaky]
MySim = Simulation(mRNA | Protein)
return MySim.compile()
assert compare_model(oscillator(), 'test_tools/model_7.txt')
# Model to test well defined orthogonal spaces
def orthogonal_spaces():
try:
A, B = BaseSpecies(2)
A.a, A.b
C = New(B)
C.a, C.b
MySim = Simulation(A | C)
MySim.level = 0
MySim.compile()
return False
except SystemExit:
return True
def test_orthogonal():
assert orthogonal_spaces()
# Model to test dimensional inconsistency
def dimensional_inconsistency():
try:
A, B, C = BaseSpecies(3)
A(1 * u.mol / u.meter ** 3) + B(1 * u.mol / u.meter ** 2) >> C[1]
MySim = Simulation(A | B | C)
MySim.level = 0
MySim.compile()
return False
except SystemExit:
print('Dimensional inconsistency model Ok')
return True
def test_dimensional_inconsistency():
assert dimensional_inconsistency()
| 2.484375 | 2 |
mkauthlist/__init__.py | alexji/mkauthlist | 3 | 12790335 | <filename>mkauthlist/__init__.py<gh_stars>1-10
#!/usr/bin/env python
"""
Nothing to see here.
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 1.445313 | 1 |
src/grid_plot.py | heitor57/ant-colony-tsp | 0 | 12790336 | import os
from concurrent.futures import ProcessPoolExecutor
import itertools
import yaml
import sys
import copy
import numpy as np
import pandas as pd
from lib.constants import *
from lib.utils import *
TOP_N = 15
loader = yaml.SafeLoader
loader.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(u'''^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
parser = argparse.ArgumentParser()
parser.add_argument('--config_file','-c',
default="config.yaml",
type=str,
help="Configuration file.")
args = parser.parse_args()
f = open(args.config_file)
config = yaml.load(f,Loader=loader)
to_search = {
'pheromony_policies': {'AntSystem':{"rho": [0.3,0.5,0.7],
"Q": [75, 100, 125]}},
"selection":{"beta": [3,5,7]},
'parameters':{
# "instance_name": ['lau15','sgb128'],
"eid": list(range(1,NUM_EXECUTIONS+1))},
}
# parameters_names=['rho','Q','betas','eid']
keys_to_value, combinations=utils.get_names_combinations(config,to_search)
result_df = pd.DataFrame(columns=
[keys[-1] for keys in keys_to_value])
parameters_names = [i[-1] for i in keys_to_value]
i = 0
for combination in combinations:
for keys, v in zip(keys_to_value,combination):
tmp = config
for k in keys[:-1]:
tmp = tmp[k]
tmp[keys[-1]] = v
result_df.loc[i,keys[-1]] = v
ac = AntColony(pheromony_kwargs=config['pheromony_policies'][config['parameters']['pheromony_policy']],
selection_policy_kwargs=config['selection'],
**config['parameters'])
df = ac.load_results()
result_df.loc[i,parameters_names] = combination
result_df.loc[i,'Best fitness global'] = df.iloc[-1]['Best fitness global']
result_df.loc[i,'Best fitness'] = df.iloc[-1]['Best fitness']
result_df.loc[i,'Mean fitness'] = df.iloc[-1]['Mean fitness']
result_df.loc[i,'Median fitness'] = df.iloc[-1]['Median fitness']
result_df.loc[i,'Worst fitness'] = df.iloc[-1]['Worst fitness']
i += 1
result_df['eid']=pd.to_numeric(result_df['eid'])
# print('Top best fitness')
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(result_df)
pd.set_option('display.expand_frame_repr', False)
tmp = copy.copy(parameters_names)
tmp.remove('eid')
a=result_df.groupby(list(set(result_df.columns)-{'Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'})).\
agg({i: ['mean','std'] for i in {'Best fitness global', 'Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'}}).\
sort_values(by=[('Best fitness global','mean')],ascending=True).reset_index()[tmp+['Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness',]].head(TOP_N)
open(f"../doc/{config['parameters']['instance_name']}_output.tex",'w').write(a.to_latex())
# print('Top mean fitness')
# print(result_df.groupby(list(set(result_df.columns)-{'Best fitness','Mean fitness', 'eid'})).\
# agg({i: ['mean','median','std'] for i in {'Best fitness','Mean fitness', 'eid'}}).\
# sort_values(by=[('Mean fitness','mean')],ascending=True).reset_index()[list(set(to_update.keys())-{'eid'})+['Best fitness','Mean fitness']].head(TOP_N))
| 1.945313 | 2 |
python/keepsake/version.py | jsemric/keepsake | 810 | 12790337 | # This file is auto-generated by the root Makefile. Do not edit manually.
version = "0.4.2"
| 1 | 1 |
pedrec/models/validation/validation_results.py | noboevbo/PedRec | 1 | 12790338 | <gh_stars>1-10
from dataclasses import dataclass
from pedrec.models.validation.env_position_validation_results import EnvPositionValidationResults
from pedrec.models.validation.orientation_validation_results import OrientationValidationResults
from pedrec.models.validation.pose_2d_validation_conf_results import Pose2DValidationConfResults
from pedrec.models.validation.pose_2d_validation_pck_results import Pose2DValidationPCKResults
from pedrec.models.validation.pose_3d_validation_results import Pose3DValidationResults
@dataclass()
class ValidationResults(object):
loss: float
val_duration: float
pose2d_pck: Pose2DValidationPCKResults = None
pose2d_conf: Pose2DValidationConfResults = None
pose3d: Pose3DValidationResults = None
orientation: OrientationValidationResults = None
env_position: EnvPositionValidationResults = None
| 2.140625 | 2 |
main.py | 2017Kirill2017/Python_Audio_Synth | 0 | 12790339 | <reponame>2017Kirill2017/Python_Audio_Synth<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed May 9 2018
@author: Glu(<NAME>
"""
import os
from random import randint
from pygame import mixer
import time
from gtts import gTTS
from langdetect import detect_langs
from langdetect import DetectorFactory
DetectorFactory.seed = 0
class Voice_Synth:
def __init__(self, rem=False):
self.tts = None
self.last_path = None
self.remove_old = rem
mixer.init()
def to_mp3(self, txt, language=None):
if not language:
language = max(detect_langs(txt), key=lambda x: x.prob).lang
self.tts = gTTS(text=txt, lang=language)
def save_mp3(self, path):
if self.tts:
self.tts.save(path)
if self.remove_old:
if self.last_path:
mixer.music.load(path)
mixer.stop
mixer.quit
if(os.path.exists(self.last_path)):
os.remove(self.last_path)
self.last_path = path
def play_mp3(self, path=None):
if not path: path = self.last_path
mixer.music.load(path)
mixer.music.play()
while mixer.music.get_busy(): time.sleep(0.1)
def kill(self):
self.to_mp3("Надеюсь, что вам понравилась сия программа", language="ru")
name = "Bye"+str(randint(0,999999))+".sn.mp3"
self.save_mp3(name)
pass
def main():
print("Хотите просмотреть возможности программы?")
answer = input("[y/n]:")
if "y" in answer.lower():
synth = Voice_Synth(True)
synth.to_mp3('Привет, человек! Как у тебя дела?')
synth.save_mp3("1.mp3")
synth.play_mp3()
synth.to_mp3('Пока, человек! До скорой встречи!')
synth.save_mp3("2.mp3")
synth.play_mp3()
synth.kill()
synth.play_mp3()
else:
synth = Voice_Synth()
while True:
print("Если хотите выйти из программы - введите 'q'. Иначе - пустую строку")
if input(">>").lower() == "q":
synth.kill()
synth.play_mp3()
break
print("Введите текст:")
txt = input(">>")
print("Какой это язык ('ru'-русский, 'en'- ангийский, '*'- я не знаю)?")
language = input(">>")
if "*" in language: language = None
synth.to_mp3(txt, language)
print("Введите путь, по которому я сохраню mp3")
path = input(">>")
while True:
try:
synth.save_mp3(path)
break
except BaseException as e:
print("Что-то пошло не так:", e)
print("Введите другой путь, по которому я сохраню mp3")
path = input(">>")
print("Проиграть Вам mp3 файл?")
answer = input("[y/n]:")
if "y" in answer.lower():
synth.play_mp3()
pass
if __name__ == "__main__":
main() | 2.75 | 3 |
register/util/signals/__init__.py | kws/building-register | 0 | 12790340 | from .slack import *
| 1.085938 | 1 |
exams/validators.py | ayhanfuat/scheduler | 0 | 12790341 | <gh_stars>0
from django.core.exceptions import ValidationError
def validate_noexam(exam_pk):
from .models import Exam, NoExam
exam = Exam.objects.get(pk=exam_pk)
qs = NoExam.objects.filter(course=exam.offering.course, period=exam.period)
if qs.count() > 0:
raise ValidationError(
f"There is a NoExam record for {exam}.", params={"exam": exam}
)
| 2.546875 | 3 |
tests/pydevtest/configuration.py | cyverse/irods | 0 | 12790342 | <filename>tests/pydevtest/configuration.py
import socket
import os
RUN_IN_TOPOLOGY = False
TOPOLOGY_FROM_RESOURCE_SERVER = False
HOSTNAME_1 = HOSTNAME_2 = HOSTNAME_3 = socket.gethostname()
USE_SSL = False
ICAT_HOSTNAME = socket.gethostname()
PREEXISTING_ADMIN_PASSWORD = '<PASSWORD>'
# TODO: allow for arbitrary number of remote zones
class FEDERATION(object):
LOCAL_IRODS_VERSION = (4, 2, 0)
REMOTE_IRODS_VERSION = (4, 2, 0)
RODSUSER_NAME_PASSWORD_LIST = [('zonehopper', '<PASSWORD>')]
RODSADMIN_NAME_PASSWORD_LIST = []
IRODS_DIR = '/var/lib/irods/iRODS'
LOCAL_ZONE = 'dev'
REMOTE_ZONE = 'buntest'
REMOTE_HOST = 'buntest'
REMOTE_RESOURCE = 'demoResc'
REMOTE_VAULT = '/var/lib/irods/iRODS/Vault'
TEST_FILE_SIZE = 4*1024*1024
LARGE_FILE_SIZE = 64*1024*1024
TEST_FILE_COUNT = 300
MAX_THREADS = 16
| 1.984375 | 2 |
python/csvToJson.py | Hopingocean/Demo | 0 | 12790343 | <filename>python/csvToJson.py
import csv
import json
import codecs
import pandas as pd
with codecs.open('E:/Demo/python/enum.csv', 'r') as csvfile:
jsonfile = open('E:/Demo/python/enum.json','w', encoding='utf-8')
key = pd.read_csv('E:/Demo/python/enum.csv', encoding='gbk')
fieldnames1 = key.columns
keys = tuple(fieldnames1)
reader = csv.DictReader(csvfile, keys)
for row in reader:
json.dump(row, jsonfile, sort_keys=True, indent=2, separators=(',', ': '), ensure_ascii=False)
jsonfile.write('\n')
jsonfile.close()
csvfile.close()
| 3.1875 | 3 |
ChatApp/server.py | xckomorebi/ChatApp | 0 | 12790344 | <reponame>xckomorebi/ChatApp<gh_stars>0
import threading
from socket import *
from ChatApp.models import Message, User
from ChatApp.msg import Msg, MsgType
from ChatApp.settings import DEBUG, TIMEOUT
from ChatApp.utils import get_timestamp
def send(msg: Msg, receiver):
global send_all_need_update
sock = socket(AF_INET, SOCK_DGRAM)
sock.settimeout(TIMEOUT)
msg.to = receiver
msg.send(sock)
if msg.type_ == MsgType.UPDATE_TABLE:
pass
elif msg.type_ == MsgType.SEND_ALL:
try:
ack_msg = sock.recv(2048)
if Msg.unpack(ack_msg).type_ == MsgType.SEND_ALL_ACK:
pass
except timeout:
user = User.get_by_name(receiver)
user.status = "no"
send_all_need_update = True
user.save_or_update()
msg.to_message(receiver).save()
return sock
def broadcast(msg: Msg):
receivers = msg.get_receiver_list()
threads = []
for receiver in receivers:
thread = threading.Thread(target=send, args=(msg, receiver, ))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def server_handle_received_msg(sock, msg, addr):
rcv_msg = Msg.unpack(msg)
if DEBUG:
print(addr, rcv_msg.__dict__)
type_ = rcv_msg.type_
if type_ == MsgType.REG:
user = User(rcv_msg.from_, addr[0], int(rcv_msg.content))
existing_user = User.get_by_name(rcv_msg.from_)
need_update = (existing_user != user)
msg = Msg(to=rcv_msg.from_, addr=addr)
if existing_user == None:
msg.type_ = MsgType.CREATED
else:
msg.type_ = MsgType.REG_ACK
msg.content = Message.retrieve_by_name(user.name) or ""
msg.send(sock)
if need_update:
user.save_or_update()
if existing_user and user.addr != existing_user.addr:
Msg(type_=MsgType.LOGOUT,
to=rcv_msg.from_,
addr=existing_user.addr).send(sock)
msg = Msg(content=User.get_all(),
from_=user.name,
type_=MsgType.UPDATE_TABLE)
broadcast(msg)
Msg(content=User.get_all(),
to=user.name,
type_=MsgType.UPDATE_TABLE).send(sock)
elif type_ == MsgType.STORE:
try:
test_sock = send(Msg(type_=MsgType.TEST, to=rcv_msg.to), rcv_msg.to)
rcv_pkt = test_sock.recv(2048)
if Msg.unpack(rcv_pkt).type_ == MsgType.TEST:
Msg(to=rcv_msg.from_,
type_=MsgType.USER_EXIST,
addr=addr).send(sock)
user = User.get_by_name(rcv_msg.to)
user.status = "yes"
user.save_or_update()
Msg(content=User.get_all(),
to=rcv_msg.from_,
type_=MsgType.UPDATE_TABLE).send()
return
except timeout:
pass
message = Message(rcv_msg.content,
rcv_msg.from_,
rcv_msg.to,
type_="send",
timestamp=get_timestamp())
message.save()
user = User.get_by_name(rcv_msg.to)
if user:
if user.status == "yes":
user.status = "no"
user.save_or_update()
Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock)
msg = Msg(content=user.__dict__,
type_=MsgType.UPDATE_TABLE)
broadcast(msg)
else:
Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock)
elif type_ == MsgType.SEND_ALL:
global send_all_need_update
send_all_need_update = False
Msg(type_=MsgType.SEND_ALL_SERVER_ACK,
addr=addr).send(sock)
rcv_msg.to_server = False
broadcast(rcv_msg)
if DEBUG:
print(User.get_all_inactive_users())
for user_dict in User.get_all_inactive_users():
rcv_msg.to_message(user_dict.get("name")).save()
if send_all_need_update:
msg = Msg(content=User.get_all(),
type_=MsgType.UPDATE_TABLE)
broadcast(msg)
elif type_ == MsgType.DEREG:
user = User.get_by_name(rcv_msg.from_)
user.status = "no"
user.save_or_update()
Msg(type_=MsgType.DEREG_ACK, addr=addr).send(sock)
msg = Msg(content=user.__dict__,
type_=MsgType.UPDATE_TABLE)
broadcast(msg)
def server_main(port: int):
sock = socket(AF_INET, SOCK_DGRAM)
sock.bind(("", port))
while True:
msg, addr = sock.recvfrom(2048)
server_handle_received_msg(sock, msg, addr)
| 2.515625 | 3 |
kernel_pm_acc.py | owensgroup/ml_perf_model | 0 | 12790345 | <gh_stars>0
# BSD 3-Clause License
#
# Copyright (c) 2021, The Regents of the University of California, Davis
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
from analysis.inference import infer
if __name__ == '__main__':
parser = argparse.ArgumentParser('Get performance model error for ops.')
parser.add_argument('--op-type', type=str, default='all')
parser.add_argument('--backward', action='store_true', default=False)
args = parser.parse_args()
if args.op_type == 'all':
op_list = ['embedding_lookup', 'fully_connected', 'conv2d', 'conv1d', 'concat', 'memcpy', 'transpose', 'bn', 'tril']
pass_list = ['forward', 'backward']
else:
op_list = [args.op_type]
pass_list = ['backward' if args.backward else 'forward']
for op_type in op_list:
for p in pass_list:
if (op_type == 'fully_connected' or \
op_type == 'transpose' or \
op_type == 'concat' or \
op_type == 'memcpy') and \
p == 'backward': # No backward for these ops
continue
if op_type == 'embedding_lookup':
for big in [False, True]:
for hit_rate_estimation in [False, True]:
for fbgemm in [False, True]:
infer(op_type, p=='backward', big=big, hit_rate_estimation=hit_rate_estimation, fbgemm=fbgemm)
else:
infer(op_type, p=='backward')
| 1.484375 | 1 |
Meta-learning_all.py | nobodymx/resilient_swarm_communications_with_meta_graph_convolutional_networks | 15 | 12790346 | from Main_algorithm_GCN.CR_MGC import CR_MGC
from Configurations import *
import matplotlib.pyplot as plt
from copy import deepcopy
from torch.optim import Adam
import Utils
# the range of the number of remained UAVs
meta_type = [i for i in range(2, 201)]
print("Meta Learning Starts...")
print("-----------------------------------")
for mt in meta_type:
meta_cr_gcm_n = CR_MGC()
# list of tuples [('', ...), ('',...)]
meta_params = dict(meta_cr_gcm_n.gcn_network.named_parameters())
# param name list
param_name = meta_cr_gcm_n.gcn_network.state_dict().keys()
# meta training
num_remain = mt
meta_seed = 0
loss_list = []
for epi in range(config_meta_training_epi):
# create the training gcn
training_cr_gcm_n = CR_MGC()
training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.001)
# decrease the learning rate as the meta learning moves on
if epi > 100:
training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.0001)
if epi > 250:
training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.00001)
# generate the support set of the training task
meta_training_support = np.zeros((num_remain, 3))
while True:
meta_training_support[:, 0] = np.random.rand(num_remain) * config_width
meta_training_support[:, 1] = np.random.rand(num_remain) * config_length
meta_training_support[:, 2] = np.random.rand(num_remain) * config_height
meta_seed += 1
np.random.seed(meta_seed)
cf, nc = Utils.check_if_a_connected_graph(meta_training_support, num_remain)
if not cf:
# print(cf)
break
# endow the initial values of the GCN with the meta parameter
for key in training_cr_gcm_n.gcn_network.state_dict().keys():
training_cr_gcm_n.gcn_network.state_dict()[key].copy_(meta_params[key].data)
# train the network on the support set
training_cr_gcm_n.train_support_set_single(meta_training_support, num_remain)
# generate the query set of the training task
meta_training_query = np.zeros((num_remain, 3))
while True:
meta_training_query[:, 0] = np.random.rand(num_remain) * config_width
meta_training_query[:, 1] = np.random.rand(num_remain) * config_length
meta_training_query[:, 2] = np.random.rand(num_remain) * config_height
meta_seed += 1
np.random.seed(meta_seed)
cf, nc = Utils.check_if_a_connected_graph(meta_training_query, num_remain)
if not cf:
# print(cf)
break
# train on the query set and return the gradient
gradient, loss = training_cr_gcm_n.train_query_set_single(meta_training_query, num_remain)
print("%d episode %d remain UAVs -- destroy %d UAVs -- loss %f" % (
epi, num_remain, config_num_of_agents - num_remain, loss))
loss_list.append(deepcopy(loss))
# update the meta parameter
for key in param_name:
meta_params[key].data += gradient[key].data
if epi >= 1:
x_axis = [i for i in range(epi + 1)]
fig = plt.figure()
plt.plot(x_axis, loss_list, linewidth=2.0)
plt.xlim((0, epi + 1))
plt.ylim((0, 1400))
plt.savefig('Meta_Learning_Results/meta_loss_pic/meta_%d.png' % num_remain)
plt.close()
# plt.show()
for key in meta_params.keys():
meta_params[key] = meta_params[key].cpu().data.numpy()
np.save('Meta_Learning_Results/meta_parameters/meta_%d.npy' % num_remain, meta_params)
| 2.296875 | 2 |
wielder/util/data_conf.py | hamshif/Wielder | 0 | 12790347 | #!/usr/bin/env python
__author__ = '<NAME>'
import os
import argparse
import yaml
from collections import namedtuple
import logging
from wielder.util.arguer import LogLevel, convert_log_level
from wielder.util.log_util import setup_logging
class Conf:
def __init__(self):
self.template_ignore_dirs = []
def attr_list(self, should_print=False):
items = self.__dict__.items()
if should_print:
logging.debug("Conf items:\n______\n")
[logging.debug(f"attribute: {k} value: {v}") for k, v in items]
return items
def get_datalake_parser():
parser = argparse.ArgumentParser(description=
'Data Orchestration Reactive Framework.')
parser.add_argument(
'-cf', '--conf_file',
type=str,
help='Full path to config file with all arguments.\nCommandline args override those in the file.'
)
parser.add_argument(
'-pl', '--plan',
type=bool,
default=False,
help='plan means to create template instances/files but not deploy them e.g. conf.yml.tmpl => conf.yml.'
)
parser.add_argument(
'-e', '--env',
type=str,
default='qe',
help='Deployment environment local means dev refers to git branches ...'
)
parser.add_argument(
'-re', '--runtime_env',
type=str,
default='local-docker',
help='Runtime environment eg local-docker, local, gcp, gcp-shared-vpc etc...'
)
parser.add_argument(
'-cpr', '--cloud_provider',
type=str,
choices=['gcp', 'aws', 'azure'],
help='Cloud provider will only mean something if not local:'
)
parser.add_argument(
'-edb', '--enable_debug',
type=bool,
help='Enabling Debug ports for remote debugging:'
)
parser.add_argument(
'-ll', '--log_level',
type=LogLevel,
choices=list(LogLevel),
help='LogLevel: as in Python logging',
default=LogLevel.INFO
)
return parser
def extract_gcp_to_conf(conf):
raw = conf.raw_config_args['gcp']
gcp = Conf()
gcp.gcp_project = raw['project']
gcp.gcp_image_repo_zone = raw['image_repo_zone']
gcp.is_shared_vpc = raw['is_shared_vpc']
gcp.region = raw['region']
gcp.zone = raw['zone']
gcp.image_repo_zone = raw['image_repo_zone']
gcp.service_accounts = raw['service_accounts']
gcp.network = raw['network']
gcp.subnetwork = raw['subnetwork']
conf.gcp = gcp
gcp_services = raw['services']
if 'dataproc' in gcp_services:
raw_dataproc = gcp_services['dataproc']
dataproc = Conf()
dataproc.high_availability = raw_dataproc['high_availability']
dataproc.extra_tags = raw_dataproc['extra_tags']
dataproc.region = raw_dataproc['region']
dataproc.zone = raw_dataproc['zone']
dataproc.internal_ip_only = raw_dataproc['internal_ip_only']
dataproc.master_machine_type = raw_dataproc['master_machine_type']
dataproc.worker_machine_type = raw_dataproc['worker_machine_type']
dataproc.master_boot_disk_size = raw_dataproc['master_boot_disk_size']
dataproc.worker_boot_disk_size = raw_dataproc['worker_boot_disk_size']
dataproc.num_worker_nodes = raw_dataproc['num_worker_nodes']
conf.gcp.dataproc = dataproc
def process_args(cmd_args):
if cmd_args.conf_file is None:
dir_path = os.path.dirname(os.path.realpath(__file__))
cmd_args.conf_file = dir_path + '/data_conf.yaml'
log_level = convert_log_level(cmd_args.log_level)
logging.basicConfig(
format='%(asctime)s %(levelname)s :%(message)s',
level=log_level,
datefmt='%m/%d/%Y %I:%M:%S %p'
)
with open(cmd_args.conf_file, 'r') as yaml_file:
conf_args = yaml.load(yaml_file, Loader=yaml.FullLoader)
if not hasattr(conf_args, 'plan'):
conf_args['plan'] = False
logging.debug('Configuration File Arguments:')
config_items = cmd_args.__dict__.items()
for k, v in config_items:
if v is not None:
conf_args[k] = v
named_tuple = namedtuple("Conf1", conf_args.keys())(*conf_args.values())
conf = Conf()
conf.plan = named_tuple.plan
conf.conf_file = named_tuple.conf_file
conf.deploy_env = named_tuple.deploy_env
conf.enable_debug = named_tuple.enable_debug
conf.enable_dev = named_tuple.enable_dev
conf.deploy_strategy = named_tuple.deploy_strategy
conf.supported_deploy_envs = named_tuple.supported_deploy_envs
conf.cloud_provider = named_tuple.cloud_provider
conf.template_ignore_dirs = named_tuple.template_ignore_dirs
conf.template_variables = named_tuple.template_variables
conf.script_variables = named_tuple.script_variables
conf.git_super_repo = named_tuple.git_super_repo
conf.git_branch = named_tuple.git_branch
conf.git_commit = named_tuple.git_commit
conf.raw_config_args = conf_args
if conf.cloud_provider == 'gcp':
extract_gcp_to_conf(conf)
conf.attr_list(True)
return conf
if __name__ == "__main__":
setup_logging(log_level=logging.DEBUG)
datalake_args, other_args = get_datalake_parser().parse_known_args()
_conf = process_args(datalake_args)
logging.debug('break point')
logging.info(f"datalake_args:\n{datalake_args}\n")
logging.info(f"other_args:\n{other_args}")
| 2.015625 | 2 |
beartype_test/a00_unit/a00_util/cache/test_utilcachecall.py | posita/beartype | 1,056 | 12790348 | <gh_stars>1000+
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype callable caching utility unit tests.**
This submodule unit tests the public API of the private
:mod:`beartype._util.cache.utilcachecall` submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from beartype.roar._roarwarn import _BeartypeUtilCallableCachedKwargsWarning
from beartype_test.util.mark.pytmark import ignore_warnings
from pytest import raises
# ....................{ TESTS }....................
# Prevent pytest from capturing and displaying all expected non-fatal
# beartype-specific warnings emitted by the @callable_cached decorator.
@ignore_warnings(_BeartypeUtilCallableCachedKwargsWarning)
def test_callable_cached_pass() -> None:
'''
Test successful usage of the
:func:`beartype._util.cache.utilcachecall.callable_cached` decorator.
'''
# Defer heavyweight imports.
from beartype._util.cache.utilcachecall import callable_cached
# Callable memoized by this decorator.
@callable_cached
def still_i_rise(bitter, twisted, lies):
# If an arbitrary condition, raise an exception whose value depends on
# these parameters to exercise this decorator's conditional caching of
# exceptions.
if len(lies) == 6:
raise ValueError(lies)
# Else, return a value depending on these parameters to exercise this
# decorator's conditional caching of return values.
return bitter + twisted + lies
# Objects to be passed as parameters below.
bitter = ('You', 'may', 'write', 'me', 'down', 'in', 'history',)
twisted = ('With', 'your', 'bitter,', 'twisted,', 'lies.',)
lies = ('You', 'may', 'trod,', 'me,', 'in', 'the', 'very', 'dirt',)
dust = ('But', 'still,', 'like', 'dust,', "I'll", 'rise',)
# Assert that memoizing two calls passed the same positional arguments
# caches and returns the same value.
assert (
still_i_rise(bitter, twisted, lies) is
still_i_rise(bitter, twisted, lies))
# Assert that memoizing two calls passed the same positional and keyword
# arguments in the same order caches and returns the same value.
assert (
still_i_rise(bitter, twisted=twisted, lies=lies) is
still_i_rise(bitter, twisted=twisted, lies=lies))
# Assert that memoizing two calls passed the same keyword arguments in the
# same order cache and return the same value.
assert (
still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is
still_i_rise(bitter=bitter, twisted=twisted, lies=lies))
# Assert that memoizing a call expected to raise an exception does so.
with raises(ValueError) as exception_first_info:
still_i_rise(bitter, twisted, dust)
# Assert that repeating that call reraises the same exception.
with raises(ValueError) as exception_next_info:
still_i_rise(bitter, twisted, dust)
assert exception_first_info is exception_next_info
# Assert that memoizing two calls passed the same keyword arguments in a
# differing order cache and return differing values.
assert (
still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is not
still_i_rise(twisted=twisted, lies=lies, bitter=bitter))
# Assert that passing one or more unhashable parameters to this callable
# succeeds with the expected return value.
assert still_i_rise(
('Just', 'like', 'moons',),
('and', 'like', 'suns',),
('With the certainty of tides',),
) == (
'Just', 'like', 'moons',
'and', 'like', 'suns',
'With the certainty of tides',
)
def test_callable_cached_fail() -> None:
'''
Test unsuccessful usage of the
:func:`beartype._util.cache.utilcachecall.callable_cached` decorator.
'''
# Defer heavyweight imports.
from beartype._util.cache.utilcachecall import callable_cached
from beartype.roar._roarexc import _BeartypeUtilCallableCachedException
# Assert that attempting to memoize a callable accepting one or more
# variadic positional parameters fails with the expected exception.
with raises(_BeartypeUtilCallableCachedException):
@callable_cached
def see_me_broken(*args):
return args
# Assert that attempting to memoize a callable accepting one or more
# variadic keyword parameters fails with the expected exception.
with raises(_BeartypeUtilCallableCachedException):
@callable_cached
def my_soulful_cries(**kwargs):
return kwargs
| 1.898438 | 2 |
tools/kernelCollection.py | pelperscience/arctic-connectivity | 0 | 12790349 | """Kernels for advecting particles in Parcels"""
from parcels import (JITParticle, Variable)
import numpy as np
class unbeachableBoundedParticle(JITParticle):
# Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts
# beached : 0 sea, 1 beached, 2 after non-beach dyn, 3 after beach dyn, 4 please unbeach
beached = Variable('beached', dtype=np.int32, initial=0.)
unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.)
# inBounds : 1 yes, 0 no
inBounds = Variable('inBounds', dtype=np.int32, initial=1.)
class unbeachableParticle(JITParticle):
# Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts
# beached : 0 sea, 1 beached, 2 after non-beach dyn, 3 after beach dyn, 4 please unbeach
beached = Variable('beached', dtype=np.int32, initial=0.)
unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.)
class boundedParticle(JITParticle):
# inBounds : 1 yes, 0 no
inBounds = Variable('inBounds', dtype=np.int32, initial=1.)
# Kernels for circular boundary
def wrapLon(particle, fieldset, time):
if particle.lon > 180.:
particle.lon = particle.lon - 360.
if particle.lon < -180.:
particle.lon = particle.lon + 360.
def northPolePushBack(particle, fieldset, time):
if particle.lat > 89.915:
particle.lat = 89.915
# Freeze particles that get out of bounds
def freezeOutOfBoundsWedge(particle, fieldset, time):
lon, lat = (particle.lon, particle.lat)
if lon > 65. or lon < -45. or lat > 85. or lat < 60.:
particle.inBounds = 0
# Freeze particles that get out of bounds
def freezeOutOfBoundsArctic(particle, fieldset, time):
lon, lat = (particle.lon, particle.lat)
if lat < 60.:
particle.inBounds = 0
def freezeOutOfBoundsArctic65(particle, fieldset, time):
lon, lat = (particle.lon, particle.lat)
if lat < 65.:
particle.inBounds = 0
def freezeOutOfBoundsArctic70(particle, fieldset, time):
lon, lat = (particle.lon, particle.lat)
if lat < 70.:
particle.inBounds = 0
# Advection kernel. Checks first whether a particle is within bounds and whether it is not beached.
def UnbeachBoundedAdvectionRK4(particle, fieldset, time):
if particle.inBounds == 1:
if particle.beached == 0:
(u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon]
lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt)
(u2, v2) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat1, lon1]
lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt)
(u3, v3) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat2, lon2]
lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt)
(u4, v4) = fieldset.UV[time + particle.dt, particle.depth, lat3, lon3]
particle.lon += (u1 + 2*u2 + 2*u3 + u4) / 6. * particle.dt
particle.lat += (v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt
particle.beached = 2
def UnbeachAdvectionRK4(particle, fieldset, time):
if particle.beached == 0:
(u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon]
lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt)
(u2, v2) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat1, lon1]
lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt)
(u3, v3) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat2, lon2]
lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt)
(u4, v4) = fieldset.UV[time + particle.dt, particle.depth, lat3, lon3]
particle.lon += (u1 + 2*u2 + 2*u3 + u4) / 6. * particle.dt
particle.lat += (v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt
particle.beached = 2
def BoundedAdvectionRK4(particle, fieldset, time):
if particle.inBounds == 1:
(u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon]
lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt)
(u2, v2) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat1, lon1]
lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt)
(u3, v3) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat2, lon2]
lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt)
(u4, v4) = fieldset.UV[time + particle.dt, particle.depth, lat3, lon3]
particle.lon += (u1 + 2*u2 + 2*u3 + u4) / 6. * particle.dt
particle.lat += (v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt
def deleteParticle(particle, fieldset, time):
print(f"Particle {particle.id} deleted: ({particle.lon}, {particle.lat} at {particle.time})")
particle.delete()
# Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts
def beachTesting(particle, fieldset, time):
if particle.beached == 2 or particle.beached == 3:
(u, v) = fieldset.UV[time, particle.depth, particle.lat, particle.lon]
if fabs(u) < 1e-14 and fabs(v) < 1e-14:
if particle.beached == 2:
particle.beached = 4
else:
particle.beached = 1
else:
particle.beached = 0
# Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts
def unBeaching(particle, fieldset, time):
if particle.beached == 4:
(ub, vb) = fieldset.UVunbeach[time, particle.depth, particle.lat, particle.lon]
particle.lon += ub * particle.dt
particle.lat += vb * particle.dt
particle.beached = 0
particle.unbeachCount += 1 | 2.71875 | 3 |
ghia/web.py | tumapav/ghia | 0 | 12790350 | import click
import configparser
import hmac
import os
from flask import Flask
from flask import request
from flask import render_template
from .ghia_patterns import GhiaPatterns
from .ghia_requests import GhiaRequests
from .ghia_issue import Issue
BAD_REQUEST = 400
ALLOWED_ACTIONS = ["opened", "edited", "transferred", "reopened", "assigned", "unassigned", "labeled", "unlabeled"]
def prepare_app():
env_conf = os.getenv('GHIA_CONFIG')
if env_conf is None:
raise click.BadParameter("GHIA_CONFIG is missing from the environment.")
conf_paths = env_conf.split(":")
config_content = ""
for path in conf_paths:
with open(path, 'r') as file:
config_content += file.read() + "\n"
config = configparser.ConfigParser()
config.optionxform = str # maintain case sensitivity in keys
config.read_string(config_content)
if "github" not in config or "token" not in config["github"]:
raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR)
if "patterns" not in config:
raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR)
if "secret" not in config["github"]:
secret = None
else:
secret = config["github"]["secret"]
token = config["github"]["token"]
return token, secret, config
def prepare_app_test(conf):
session = conf["session"]
config = conf["config"]
token = conf["TOKEN"]
secret = conf["SECRET"]
return token, secret, config, session
def create_app(conf):
app = Flask(__name__)
if conf and "test" in conf and conf["test"]:
token, secret, config, session = prepare_app_test(conf)
else:
token, secret, config = prepare_app()
session = None
ghia_patterns = GhiaPatterns(config)
ghia_patterns.set_strategy('append')
req = GhiaRequests(token, session=session)
user = req.get_user()
def github_verify_request():
github_signed = request.headers.get('X-Hub-Signature')
if github_signed is None and secret is None:
# Signature check is skipped only if the secret is missing in the ghia-config and in the webhook config
return True
elif github_signed is None or secret is None:
# GitHub request has signature but ghia-config is missing the secret
# or ghia-config has secret but webhook doesn't send signed request
raise ValueError("Signature verification failed.")
try:
hash_name, hash_value = github_signed.split('=', maxsplit=2)
except ValueError:
raise ValueError("Signature header has incorrect format.")
if hash_name != 'sha1':
raise ValueError("GitHub signatures are expected to use SHA1.")
computed_hash = hmac.new(
bytearray(secret, "utf-8"), # get the secret as bytes
digestmod='sha1',
msg=request.get_data()
)
if computed_hash.hexdigest() != hash_value:
raise RuntimeError("The request signature is wrong.")
def process_issues():
data = request.get_json(silent=True)
if data is None:
return "Webhook request missing JSON data.", BAD_REQUEST
if data["issue"]["state"] == "closed":
return "Closed issue is ignored."
action = data["action"]
if action not in ALLOWED_ACTIONS:
return "This issue action is ignored."
issue = Issue(data["issue"])
req.slug = data["repository"]["full_name"]
updated_issue = ghia_patterns.apply_to(issue)
if updated_issue:
req.update_issue(updated_issue)
return "Issue update done."
def process_webhook():
event_type = request.headers.get('X-Github-Event')
try:
github_verify_request()
except RuntimeError as e:
return str(e), BAD_REQUEST
if event_type == "issues":
return process_issues()
elif event_type == "ping":
return "Ping OK"
else:
return "Event type ignored."
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
return process_webhook()
return render_template('index.html', user=user, patterns=ghia_patterns)
return app
| 2.46875 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.