blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f46f3f29cb80c2826087623308da18f78f72a5fc
|
91f948b849a03f27c96aa6b76980a5fa68970b70
|
/experiments/__init__.py
|
de913a706b51dac74f50aafe9917d627f649419c
|
[
"MIT"
] |
permissive
|
satyam-cyc/MASS-Learning
|
3d987af7622f604db02b64313179590651285170
|
0d40de5227c94d1a5e4b18e44d16374e12821ad2
|
refs/heads/master
| 2022-01-10T02:23:06.670225 | 2019-06-11T19:41:35 | 2019-06-11T19:41:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 304 |
py
|
from .LogModelParameters import LogModelParameters
from .MASSLossTerms import MASSLossTerms
from .ModelLossAndAccuracy import ModelLossAndAccuracy
from .OODDetection import OODDetection
from .SaveModelParameters import SaveModelParameters
from .UncertaintyQuantification import UncertaintyQuantification
|
[
"[email protected]"
] | |
85041057b18077c038426fd96461f5dbd0ed30a7
|
747febe786dd6b7fd6c63cfe73dbe3023354daa8
|
/src/the_tale/the_tale/game/companions/tests/test_abilities_effects.py
|
433f74ad27be13a54043c1f878032a3d17dfde97
|
[
"BSD-3-Clause"
] |
permissive
|
the-tale/the-tale
|
4e4b8d91dc873a5fb935fe58e9721a877baa6d3f
|
e8450bd2332344da805b1851e728da5a3e5bf0ef
|
refs/heads/develop
| 2023-08-01T13:53:46.835667 | 2022-12-25T18:04:56 | 2022-12-25T18:04:56 | 1,949,167 | 98 | 52 |
BSD-3-Clause
| 2023-02-15T18:57:33 | 2011-06-24T18:49:48 |
Python
|
UTF-8
|
Python
| false | false | 35,535 |
py
|
import smart_imports
smart_imports.all()
effects = companions_abilities_effects
MODIFIERS = heroes_relations.MODIFIERS
class BaseEffectsTests(utils_testcase.TestCase):
def setUp(self):
super(BaseEffectsTests, self).setUp()
game_logic.create_test_map()
self.account = self.accounts_factory.create_account()
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(self.account.id)
self.hero = self.storage.accounts_to_heroes[self.account.id]
self.companion_record = logic.create_companion_record(utg_name=game_names.generator().get_test_name(),
description='description',
type=tt_beings_relations.TYPE.random(),
max_health=10,
dedication=relations.DEDICATION.random(),
archetype=game_relations.ARCHETYPE.random(),
mode=relations.MODE.random(),
abilities=companions_abilities_container.Container(),
communication_verbal=tt_beings_relations.COMMUNICATION_VERBAL.random(),
communication_gestures=tt_beings_relations.COMMUNICATION_GESTURES.random(),
communication_telepathic=tt_beings_relations.COMMUNICATION_TELEPATHIC.random(),
intellect_level=tt_beings_relations.INTELLECT_LEVEL.random(),
structure=tt_beings_relations.STRUCTURE.random(),
features=frozenset((tt_beings_relations.FEATURE.random(), tt_beings_relations.FEATURE.random())),
movement=tt_beings_relations.MOVEMENT.random(),
body=tt_beings_relations.BODY.random(),
size=tt_beings_relations.SIZE.random(),
orientation=tt_beings_relations.ORIENTATION.random(),
weapons=[artifacts_objects.Weapon(weapon=artifacts_relations.STANDARD_WEAPON.random(),
material=tt_artifacts_relations.MATERIAL.random(),
power_type=artifacts_relations.ARTIFACT_POWER_TYPE.random())],
state=relations.STATE.ENABLED)
self.hero.set_companion(logic.create_companion(self.companion_record))
def apply_ability(self, ability):
container = companions_abilities_container.Container(common=(),
start=frozenset((ability,)),
coherence=None,
honor=None,
peacefulness=None)
self.companion_record.abilities = container
self.hero.reset_accessors_cache()
def get_ability(self, *argv):
return random.choice([ability
for ability in effects.ABILITIES.records
if any(isinstance(ability.effect, effect) for effect in argv)])
class CommonTests(BaseEffectsTests):
def test_aprox(self):
self.assertEqual(effects.aprox(1, 2, 1), 1.2)
self.assertEqual(effects.aprox(1, 2, 2), 1.4)
self.assertEqual(effects.aprox(1, 2, 3), 1.6)
self.assertEqual(effects.aprox(1, 2, 4), 1.8)
self.assertEqual(effects.aprox(1, 2, 5), 2)
class CoherenceSpeedTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CoherenceSpeed(0.8)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COHERENCE_EXPERIENCE, 10), 8)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COHERENCE_EXPERIENCE, 11), 8.8)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COHERENCE_EXPERIENCE,)), 11), 11)
effect = effects.CoherenceSpeed(1.2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COHERENCE_EXPERIENCE, 10), 12)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COHERENCE_EXPERIENCE, 11), 13.2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COHERENCE_EXPERIENCE,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CoherenceSpeed)
self.hero.companion.coherence = c.COMPANIONS_MAX_COHERENCE - 1
self.hero.companion.experience = 0
self.hero.companion.add_experience(10)
old_delta = self.hero.companion.experience
self.hero.companion.experience = 0
self.apply_ability(ability)
self.hero.companion.add_experience(10)
new_delta = self.hero.companion.experience
self.assertEqual(int(round(old_delta * ability.effect.multiplier_left)), new_delta)
class ChangeHabitsTests(BaseEffectsTests):
def test_effect(self):
effect = effects.ChangeHabits(habit_type=game_relations.HABIT_TYPE.HONOR,
habit_sources=(heroes_relations.HABIT_CHANGE_SOURCE.COMPANION_HONOR_NEUTRAL_1,
heroes_relations.HABIT_CHANGE_SOURCE.COMPANION_HONOR_NEUTRAL_2))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.HABITS_SOURCES, set()), set((heroes_relations.HABIT_CHANGE_SOURCE.COMPANION_HONOR_NEUTRAL_1,
heroes_relations.HABIT_CHANGE_SOURCE.COMPANION_HONOR_NEUTRAL_2)))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.HABITS_SOURCES,)), set()), set())
def check_habits_changed(self, honor, peacefulness, honor_check, peacefulness_check):
self.hero.habit_honor.set_habit(honor)
self.hero.habit_peacefulness.set_habit(peacefulness)
for habit_source in self.hero.companion.modify_attribute(heroes_relations.MODIFIERS.HABITS_SOURCES, set()):
self.hero.update_habits(habit_source)
self.assertTrue(honor_check(self.hero.habit_honor.raw_value))
self.assertTrue(peacefulness_check(self.hero.habit_peacefulness.raw_value))
def test_in_game__aggressive(self):
self.apply_ability(effects.ABILITIES.AGGRESSIVE)
self.check_habits_changed(honor=-c.HABITS_BORDER, peacefulness=0,
honor_check=lambda v: v == -c.HABITS_BORDER,
peacefulness_check=lambda v: v < 0)
self.check_habits_changed(honor=0, peacefulness=c.HABITS_BORDER,
honor_check=lambda v: v == 0,
peacefulness_check=lambda v: v < c.HABITS_BORDER)
self.check_habits_changed(honor=c.HABITS_BORDER, peacefulness=-c.HABITS_BORDER,
honor_check=lambda v: v == c.HABITS_BORDER,
peacefulness_check=lambda v: v == -c.HABITS_BORDER)
def test_in_game__peaceful(self):
self.apply_ability(effects.ABILITIES.PEACEFUL)
self.check_habits_changed(honor=-c.HABITS_BORDER, peacefulness=0,
honor_check=lambda v: v == -c.HABITS_BORDER,
peacefulness_check=lambda v: v > 0)
self.check_habits_changed(honor=0, peacefulness=c.HABITS_BORDER,
honor_check=lambda v: v == 0,
peacefulness_check=lambda v: v == c.HABITS_BORDER)
self.check_habits_changed(honor=c.HABITS_BORDER, peacefulness=-c.HABITS_BORDER,
honor_check=lambda v: v == c.HABITS_BORDER,
peacefulness_check=lambda v: v > -c.HABITS_BORDER)
def test_in_game__reserved(self):
self.apply_ability(effects.ABILITIES.RESERVED)
self.check_habits_changed(honor=-c.HABITS_BORDER, peacefulness=0,
honor_check=lambda v: v == -c.HABITS_BORDER,
peacefulness_check=lambda v: v == 0)
self.check_habits_changed(honor=0, peacefulness=c.HABITS_BORDER,
honor_check=lambda v: v == 0,
peacefulness_check=lambda v: v < c.HABITS_BORDER)
self.check_habits_changed(honor=c.HABITS_BORDER, peacefulness=-c.HABITS_BORDER,
honor_check=lambda v: v == c.HABITS_BORDER,
peacefulness_check=lambda v: v > -c.HABITS_BORDER)
def test_in_game__canny(self):
self.apply_ability(effects.ABILITIES.CANNY)
self.check_habits_changed(honor=-c.HABITS_BORDER, peacefulness=0,
honor_check=lambda v: v > -c.HABITS_BORDER,
peacefulness_check=lambda v: v == 0)
self.check_habits_changed(honor=0, peacefulness=c.HABITS_BORDER,
honor_check=lambda v: v == 0,
peacefulness_check=lambda v: v == c.HABITS_BORDER)
self.check_habits_changed(honor=c.HABITS_BORDER, peacefulness=-c.HABITS_BORDER,
honor_check=lambda v: v < c.HABITS_BORDER,
peacefulness_check=lambda v: v == -c.HABITS_BORDER)
def test_in_game__honest(self):
self.apply_ability(effects.ABILITIES.HONEST)
self.check_habits_changed(honor=-c.HABITS_BORDER, peacefulness=0,
honor_check=lambda v: v > -c.HABITS_BORDER,
peacefulness_check=lambda v: v == 0)
self.check_habits_changed(honor=0, peacefulness=c.HABITS_BORDER,
honor_check=lambda v: v > 0,
peacefulness_check=lambda v: v == c.HABITS_BORDER)
self.check_habits_changed(honor=c.HABITS_BORDER, peacefulness=-c.HABITS_BORDER,
honor_check=lambda v: v == c.HABITS_BORDER,
peacefulness_check=lambda v: v == -c.HABITS_BORDER)
def test_in_game__sneaky(self):
self.apply_ability(effects.ABILITIES.SNEAKY)
self.check_habits_changed(honor=-c.HABITS_BORDER, peacefulness=0,
honor_check=lambda v: v == -c.HABITS_BORDER,
peacefulness_check=lambda v: v == 0)
self.check_habits_changed(honor=0, peacefulness=c.HABITS_BORDER,
honor_check=lambda v: v < 0,
peacefulness_check=lambda v: v == c.HABITS_BORDER)
self.check_habits_changed(honor=c.HABITS_BORDER, peacefulness=-c.HABITS_BORDER,
honor_check=lambda v: v < c.HABITS_BORDER,
peacefulness_check=lambda v: v == -c.HABITS_BORDER)
class QuestMoneyRewardTests(BaseEffectsTests):
def test_effect(self):
effect = effects.QuestMoneyReward(0.5)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.QUEST_MONEY_REWARD, 10), 10.5)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.QUEST_MONEY_REWARD, 11), 11.5)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.QUEST_MONEY_REWARD,)), 11), 11)
effect = effects.QuestMoneyReward(2.0)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.QUEST_MONEY_REWARD, 10), 12)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.QUEST_MONEY_REWARD, 11), 13)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.QUEST_MONEY_REWARD,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.QuestMoneyReward)
with self.check_changed(lambda: self.hero.quest_money_reward_multiplier()):
self.apply_ability(ability)
class MaxBagSizeTests(BaseEffectsTests):
def test_effect(self):
effect = effects.MaxBagSize(666)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.MAX_BAG_SIZE, 10), 676)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.MAX_BAG_SIZE, 11), 677)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.MAX_BAG_SIZE,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.MaxBagSize)
with self.check_changed(lambda: self.hero.max_bag_size):
self.apply_ability(ability)
class PoliticsPowerTests(BaseEffectsTests):
def test_effect(self):
effect = effects.PoliticsPower(3)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.POWER, 11), 14.0)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.POWER, )), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.PoliticsPower)
with self.check_changed(lambda: self.hero.politics_power_modifier):
self.apply_ability(ability)
class MagicDamageBonusTests(BaseEffectsTests):
def test_effect(self):
effect = effects.MagicDamageBonus(2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.MAGIC_DAMAGE, 10), 20)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.PHYSIC_DAMAGE, 10), 10)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.MAGIC_DAMAGE,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.MagicDamageBonus)
with self.check_changed(lambda: self.hero.magic_damage_modifier):
self.apply_ability(ability)
class PhysicDamageBonusTests(BaseEffectsTests):
def test_effect(self):
effect = effects.PhysicDamageBonus(2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.MAGIC_DAMAGE, 10), 10)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.PHYSIC_DAMAGE, 10), 20)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.PHYSIC_DAMAGE,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.PhysicDamageBonus)
with self.check_changed(lambda: self.hero.physic_damage_modifier):
self.apply_ability(ability)
class SpeedTests(BaseEffectsTests):
def test_effect(self):
effect = effects.Speed(2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.SPEED, 10), 20)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.SPEED,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.Speed)
with self.check_changed(lambda: self.hero.move_speed):
self.apply_ability(ability)
class BattleAbilityTests(BaseEffectsTests):
def test_effect(self):
effect = effects.BattleAbilityFireball()
self.assertEqual(effect._modify_attribute({}, MODIFIERS.INITIATIVE, 10), 10.25)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.ADDITIONAL_ABILITIES, []), [effect.ABILITY])
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.INITIATIVE, MODIFIERS.ADDITIONAL_ABILITIES)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.BattleAbilityHit,
effects.BattleAbilityStrongHit,
effects.BattleAbilityRunUpPush,
effects.BattleAbilityFireball,
effects.BattleAbilityPoisonCloud,
effects.BattleAbilityFreezing)
with self.check_changed(lambda: self.hero.initiative):
with self.check_changed(lambda: len(self.hero.companion.modify_attribute(heroes_relations.MODIFIERS.ADDITIONAL_ABILITIES,
heroes_relations.MODIFIERS.ADDITIONAL_ABILITIES.default()))):
self.apply_ability(ability)
class InitiativeTests(BaseEffectsTests):
def test_effect(self):
effect = effects.Initiative(2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.INITIATIVE, 10), 20)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.INITIATIVE,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.Initiative)
with self.check_changed(lambda: self.hero.initiative):
self.apply_ability(ability)
class BattleProbabilityTests(BaseEffectsTests):
def test_effect(self):
effect = effects.BattleProbability(1.5)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.BATTLES_PER_TURN, 10), 11.5)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.BATTLES_PER_TURN,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.BattleProbability)
with self.check_changed(lambda: self.hero.battles_per_turn_summand):
self.apply_ability(ability)
class LootProbabilityTests(BaseEffectsTests):
def test_effect(self):
effect = effects.LootProbability(2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.LOOT_PROBABILITY, 10), 20)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.LOOT_PROBABILITY,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.LootProbability)
with self.check_changed(lambda: self.hero.loot_probability(mobs_storage.mobs.all()[0])):
self.apply_ability(ability)
class CompanionDamageTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionDamage(3)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_DAMAGE, 10), 13)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_DAMAGE,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionDamage)
with mock.patch('the_tale.game.balance.constants.COMPANIONS_BONUS_DAMAGE_PROBABILITY', 6666666666):
with self.check_changed(lambda: self.hero.companion_damage):
self.apply_ability(ability)
class CompanionDamageProbabilityTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionDamageProbability(3)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_DAMAGE_PROBABILITY, 10), 30)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_DAMAGE_PROBABILITY,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionDamageProbability)
with self.check_changed(lambda: self.hero.companion_damage_probability):
self.apply_ability(ability)
class CompanionStealMoneyTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionStealMoney(3)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_STEAL_MONEY))
self.assertFalse(effect._check_attribute(MODIFIERS.COMPANION_STEAL_MONEY_MULTIPLIER))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_STEAL_MONEY, MODIFIERS.COMPANION_STEAL_MONEY_MULTIPLIER))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_STEAL_MONEY, 10), 10)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_STEAL_MONEY_MULTIPLIER, 10), 30)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_STEAL_MONEY, MODIFIERS.COMPANION_STEAL_MONEY_MULTIPLIER)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionStealMoney)
with self.check_changed(lambda: self.hero.can_companion_steal_money()):
with self.check_changed(lambda: self.hero.companion_steal_money_modifier):
self.apply_ability(ability)
class CompanionStealItemTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionStealItem(3)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_STEAL_ITEM))
self.assertFalse(effect._check_attribute(MODIFIERS.COMPANION_STEAL_ITEM_MULTIPLIER))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_STEAL_ITEM, MODIFIERS.COMPANION_STEAL_ITEM))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_STEAL_ITEM, 10), 10)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_STEAL_ITEM_MULTIPLIER, 10), 30)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_STEAL_ITEM, MODIFIERS.COMPANION_STEAL_ITEM_MULTIPLIER)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionStealItem)
with self.check_changed(lambda: self.hero.can_companion_steal_item()):
with self.check_changed(lambda: self.hero.companion_steal_artifact_probability_multiplier):
self.apply_ability(ability)
class CompanionSparePartsTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionSpareParts()
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_SPARE_PARTS))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_SPARE_PARTS,))))
def test_in_game(self):
ability = self.get_ability(effects.CompanionSpareParts)
with self.check_changed(lambda: self.hero.can_companion_broke_to_spare_parts()):
self.apply_ability(ability)
class CompanionSayWisdomTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionSayWisdom(3)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_SAY_WISDOM))
self.assertFalse(effect._check_attribute(MODIFIERS.COMPANION_SAY_WISDOM_PROBABILITY))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_SAY_WISDOM, MODIFIERS.COMPANION_SAY_WISDOM_PROBABILITY))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_SAY_WISDOM, 10), 10)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_SAY_WISDOM_PROBABILITY, 10), 30)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_SAY_WISDOM, MODIFIERS.COMPANION_SAY_WISDOM_PROBABILITY)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionSayWisdom)
with self.check_changed(lambda: self.hero.can_companion_say_wisdom()):
with self.check_changed(lambda: self.hero.companion_say_wisdom_probability):
self.apply_ability(ability)
class CompanionExpPerHealTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionExpPerHeal(2)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_EXP_PER_HEAL))
self.assertFalse(effect._check_attribute(MODIFIERS.COMPANION_EXP_PER_HEAL_PROBABILITY))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_EXP_PER_HEAL, MODIFIERS.COMPANION_EXP_PER_HEAL_PROBABILITY))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_EXP_PER_HEAL, 10), 10)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_EXP_PER_HEAL_PROBABILITY, 10), 20)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_EXP_PER_HEAL, MODIFIERS.COMPANION_EXP_PER_HEAL_PROBABILITY)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionExpPerHeal)
with self.check_changed(lambda: self.hero.can_companion_exp_per_heal()):
with self.check_changed(lambda: self.hero.companion_exp_per_heal_probability):
self.apply_ability(ability)
class DoubleReligionProfitTests(BaseEffectsTests):
def test_effect(self):
effect = effects.DoubleReligionProfit(0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.DOUBLE_RELIGION_PROFIT, 0), 0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.DOUBLE_RELIGION_PROFIT,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.DoubleReligionProfit)
with self.check_changed(lambda: self.hero.double_religion_profit_probability):
self.apply_ability(ability)
class CompanionEatCorpsesTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionEatCorpses(3)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_EAT_CORPSES))
self.assertFalse(effect._check_attribute(MODIFIERS.COMPANION_EAT_CORPSES_PROBABILITY))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_EAT_CORPSES, MODIFIERS.COMPANION_EAT_CORPSES_PROBABILITY))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_EAT_CORPSES, 1), 1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_EAT_CORPSES_PROBABILITY, 1), 3)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_EAT_CORPSES, MODIFIERS.COMPANION_EAT_CORPSES_PROBABILITY)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionEatCorpses)
with self.check_changed(lambda: self.hero.can_companion_eat_corpses()):
with self.check_changed(lambda: self.hero.companion_eat_corpses_probability):
self.apply_ability(ability)
class CompanionRegenerateTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionRegenerate(2)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_REGENERATE))
self.assertFalse(effect._check_attribute(MODIFIERS.COMPANION_REGENERATE_PROBABILITY))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_REGENERATE, MODIFIERS.COMPANION_REGENERATE_PROBABILITY))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_REGENERATE, 10), 10)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_REGENERATE_PROBABILITY, 10), 20)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_REGENERATE, MODIFIERS.COMPANION_REGENERATE_PROBABILITY)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionRegenerate)
with self.check_changed(lambda: self.hero.can_companion_regenerate()):
with self.check_changed(lambda: self.hero.companion_regenerate_probability):
self.apply_ability(ability)
class CompanionEatAndDiscountTest(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionEat(0.5)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_MONEY_FOR_FOOD))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_MONEY_FOR_FOOD,))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_MONEY_FOR_FOOD, 2), 1.0)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_MONEY_FOR_FOOD,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionEat)
with self.check_changed(lambda: self.hero.can_companion_eat()):
with self.check_changed(lambda: self.hero.companion_money_for_food_multiplier):
self.apply_ability(ability)
class CompanionDrinkArtifactTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionDrinkArtifact(0.5)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_DRINK_ARTIFACT))
self.assertFalse(effect._check_attribute(MODIFIERS.COMPANION_DRINK_ARTIFACT_PROBABILITY))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_DRINK_ARTIFACT, MODIFIERS.COMPANION_DRINK_ARTIFACT_PROBABILITY))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_DRINK_ARTIFACT, 2), 2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_DRINK_ARTIFACT_PROBABILITY, 2), 1.0)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_DRINK_ARTIFACT, MODIFIERS.COMPANION_DRINK_ARTIFACT_PROBABILITY,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionDrinkArtifact)
with self.check_changed(lambda: self.hero.can_companion_drink_artifact()):
with self.check_changed(lambda: self.hero.companion_drink_artifact_probability):
self.apply_ability(ability)
class CompanionExorcistTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionExorcist(0.5)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_EXORCIST))
self.assertFalse(effect._check_attribute(MODIFIERS.COMPANION_EXORCIST_PROBABILITY))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_EXORCIST, MODIFIERS.COMPANION_EXORCIST_PROBABILITY))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_EXORCIST, 2), 2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_EXORCIST_PROBABILITY, 2), 1.0)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_EXORCIST, MODIFIERS.COMPANION_EXORCIST_PROBABILITY,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionExorcist)
with self.check_changed(lambda: self.hero.can_companion_do_exorcism()):
with self.check_changed(lambda: self.hero.companion_do_exorcism_probability):
self.apply_ability(ability)
class RestLenghtTests(BaseEffectsTests):
def test_effect(self):
effect = effects.RestLenght(3)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.REST_LENGTH, 12), 36)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.REST_LENGTH,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.RestLenght)
with self.check_changed(lambda: self.hero.rest_length):
self.apply_ability(ability)
class IDLELenghtTests(BaseEffectsTests):
def test_effect(self):
effect = effects.IDLELenght(3)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.IDLE_LENGTH, 12), 36)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.IDLE_LENGTH,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.IDLELenght)
with self.check_changed(lambda: self.hero.idle_length):
self.apply_ability(ability)
class CompanionBlockProbabilityTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionBlockProbability(3)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_BLOCK_PROBABILITY, 12), 36)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_BLOCK_PROBABILITY, )), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionBlockProbability)
with self.check_changed(lambda: self.hero.companion_block_probability_multiplier):
self.apply_ability(ability)
class HucksterTests(BaseEffectsTests):
def test_effect(self):
effect = effects.Huckster(buy_bonus_left=3, buy_bonus_right=3,
sell_bonus_left=2, sell_bonus_right=2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.BUY_PRICE, 12), 15)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.BUY_PRICE, MODIFIERS.SELL_PRICE)), 11), 11)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.SELL_PRICE, 130), 132)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.BUY_PRICE, MODIFIERS.SELL_PRICE)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.Huckster)
with self.check_changed(self.hero.buy_price):
with self.check_changed(self.hero.sell_price):
self.apply_ability(ability)
class EtherealMagnetTests(BaseEffectsTests):
def test_effect(self):
effect = effects.EtherealMagnet(0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.CHARACTER_QUEST_PRIORITY, 0), 0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.CHARACTER_QUEST_PRIORITY,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.EtherealMagnet)
with self.check_changed(lambda: self.hero.attribute_modifier(heroes_relations.MODIFIERS.CHARACTER_QUEST_PRIORITY)):
self.apply_ability(ability)
class CompanionTeleportTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionTeleport(0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_TELEPORTATOR, 0), 0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_TELEPORTATOR,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionTeleport)
with self.check_changed(lambda: self.hero.companion_teleport_probability):
self.apply_ability(ability)
class CompanionFly(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionFly(0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_FLYER, 0), 0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_FLYER,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionFly)
with self.check_changed(lambda: self.hero.companion_fly_probability):
self.apply_ability(ability)
class UnsociableTests(BaseEffectsTests):
def test_effect(self):
effect = effects.Unsociable(0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_LEAVE_IN_PLACE, 0), 0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_LEAVE_IN_PLACE,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.Unsociable)
with self.check_changed(lambda: self.hero.companion_leave_in_place_probability):
self.apply_ability(ability)
|
[
"[email protected]"
] | |
cefea000be2b8713b9d4ea548c735c4984caf7de
|
3904a5773c5aa047692895dce1225be7d84f5cc7
|
/ML_AI_TechWithTim/K-Means/K_Means.py
|
f33bc323b87c4aba7ff873f2b6d3cbe38641d449
|
[] |
no_license
|
snehilk1312/ML_1
|
063038586296c4f6f0ab92422a6c60dd007c4068
|
8e3b081b1037ab999ca78fa282ce7041730d082a
|
refs/heads/master
| 2020-09-07T20:01:45.509060 | 2020-03-15T15:44:54 | 2020-03-15T15:44:54 | 220,898,676 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,057 |
py
|
# Importing modules
import numpy as np
import sklearn
from sklearn.preprocessing import scale
from sklearn.datasets import load_digits
from sklearn.cluster import KMeans
from sklearn import metrics
# Loading Data sets
digits = load_digits()
data = scale(digits.data)
y = digits.target
k = len(np.unique(y)) # or here k=10
samples, features = data.shape
def bench_k_means(estimator, name, data):
estimator.fit(data)
print('%-9s\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, estimator.inertia_,
metrics.homogeneity_score(y, estimator.labels_),
metrics.completeness_score(y, estimator.labels_),
metrics.v_measure_score(y, estimator.labels_),
metrics.adjusted_rand_score(y, estimator.labels_),
metrics.adjusted_mutual_info_score(y, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean')))
clf = KMeans(n_clusters=k, init="random", n_init=10)
bench_k_means(clf, "1", data)
|
[
"[email protected]"
] | |
d39d72257df172bff8f5cd5099afaa33c2f712dd
|
923e430212ab0640d9e9d3a56332415821a1d734
|
/dz3/2.py
|
c0f13fed7d0bc66f6a14c4cda38d9b4e7563f3b7
|
[] |
no_license
|
OzoNeTT/aisd-labs
|
e054c87cb30497cd0e9d4db8b97c8754d0421921
|
416ae3a8538f949e08000d590dcafdaa85be31b5
|
refs/heads/master
| 2023-01-21T15:09:43.871769 | 2020-12-06T16:21:49 | 2020-12-06T16:21:49 | 318,797,904 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 904 |
py
|
matrix = [
['*', '*', '*', '*', '*'],
['*', ' ', '*', ' ', '*'],
['*', ' ', ' ', ' ', '*'],
['*', ' ', '*', ' ', 'E'],
['*', 'S', '*', '*', '*']
]
already_visited=[]
def solve1(x,y):
print((x,y))
if matrix[x][y] == "E":
print(*matrix, sep='\n')
return True
if matrix[x][y] == "*":
return False
if matrix[x][y] == "x":
return False
matrix[x][y] = "x"
if (x,y) in already_visited:
return False
already_visited.append((x,y))
if (x < len(matrix)-1 and solve1(x+1,y)):
return True
elif (y > 0 and solve1(x,y-1)):
return True
elif (x > 0 and solve1(x-1,y)):
return True
elif (y < len(matrix)-1 and solve1(x,y+1)):
return True
else:
return False
solve1(4,1)
|
[
"[email protected]"
] | |
4b4f20ebd7680757a8764a77720b31af1cef4c8a
|
17d4b72032e404ed45057c4fc5af04670b7c27b5
|
/7.11-Number Pattern.py
|
8f7ad45768d4c5b5237b66dcf1a3bcae9253a923
|
[] |
no_license
|
Mahendra710/Number_Pattern
|
0e10e93dec6f0f28c6e0916f813bfe4340f647db
|
2d2a3e0fb1b281092465700e965a87350227aafc
|
refs/heads/main
| 2023-04-18T01:44:27.456717 | 2021-05-12T05:34:11 | 2021-05-12T05:34:11 | 366,267,263 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 241 |
py
|
num=input("Enter an odd length number:")
length=len(num)
for i in range(length):
for j in range(length):
if i==j or i+j==length-1:
print(num[i],end=" ")
else:
print(" ",end=" ")
print()
|
[
"[email protected]"
] | |
cb9691b3450709a614b89aa79f2514474029d6d9
|
ea0c3e2e8e4167f5bc982d9b8619888b42c20e23
|
/py/bing_search_api.py
|
c017fdc369b5271963e4287b388e92c2f53cd3a6
|
[] |
no_license
|
data-bit/scripts
|
01eceb4ccf3532a9b02e232984316c72bb2d2df7
|
6e7cb8ee630b084a52af3e0c50b87b7d07e4197e
|
refs/heads/main
| 2023-05-09T00:36:28.347027 | 2021-06-01T19:11:52 | 2021-06-01T19:11:52 | 339,446,837 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,627 |
py
|
# $ mkdir dataset/charmander
# $ python search_bing_api.py --query "charmander" --output dataset/charmander
from requests import exceptions
import argparse
import requests
from cv2 import cv2
import os
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-q", "--query", required=True,
help="search query to search Bing Image API for")
ap.add_argument("-o", "--output", required=True,
help="path to output directory of images")
args = vars(ap.parse_args())
# set your Microsoft Cognitive Services API key along with (1) the
# maximum number of results for a given search and (2) the group size
# for results (maximum of 50 per request)
API_KEY = "YOUR_API_KEY_GOES_HERE"
MAX_RESULTS = 250
GROUP_SIZE = 50
# set the endpoint API URL
URL = "https://api.cognitive.microsoft.com/bing/v7.0/images/search"
# when attempting to download images from the web both the Python
# programming language and the requests library have a number of
# exceptions that can be thrown so let's build a list of them now
# so we can filter on them
EXCEPTIONS = set([IOError, FileNotFoundError,
exceptions.RequestException, exceptions.HTTPError,
exceptions.ConnectionError, exceptions.Timeout])
# store the search term in a convenience variable then set the
# headers and search parameters
term = args["query"]
headers = {"Ocp-Apim-Subscription-Key" : API_KEY}
params = {"q": term, "offset": 0, "count": GROUP_SIZE}
# make the search
print("[INFO] searching Bing API for '{}'".format(term))
search = requests.get(URL, headers=headers, params=params)
search.raise_for_status()
# grab the results from the search, including the total number of
# estimated results returned by the Bing API
results = search.json()
estNumResults = min(results["totalEstimatedMatches"], MAX_RESULTS)
print("[INFO] {} total results for '{}'".format(estNumResults,
term))
# initialize the total number of images downloaded thus far
total = 0
# loop over the estimated number of results in `GROUP_SIZE` groups
for offset in range(0, estNumResults, GROUP_SIZE):
# update the search parameters using the current offset, then
# make the request to fetch the results
print("[INFO] making request for group {}-{} of {}...".format(
offset, offset + GROUP_SIZE, estNumResults))
params["offset"] = offset
search = requests.get(URL, headers=headers, params=params)
search.raise_for_status()
results = search.json()
print("[INFO] saving images for group {}-{} of {}...".format(
offset, offset + GROUP_SIZE, estNumResults))
# loop over the results
for v in results["value"]:
# try to download the image
try:
# make a request to download the image
print("[INFO] fetching: {}".format(v["contentUrl"]))
r = requests.get(v["contentUrl"], timeout=30)
# build the path to the output image
ext = v["contentUrl"][v["contentUrl"].rfind("."):]
p = os.path.sep.join([args["output"], "{}{}".format(
str(total).zfill(8), ext)])
# write the image to disk
f = open(p, "wb")
f.write(r.content)
f.close()
# catch any errors that would not unable us to download the
# image
except Exception as e:
# check to see if our exception is in our list of
# exceptions to check for
if type(e) in EXCEPTIONS:
print("[INFO] skipping: {}".format(v["contentUrl"]))
continue
# try to load the image from disk
image = cv2.imread(p)
# if the image is `None` then we could not properly load the
# image from disk (so it should be ignored)
if image is None:
print("[INFO] deleting: {}".format(p))
os.remove(p)
continue
# update the counter
total += 1
|
[
"[email protected]"
] | |
a3cdabb184babdd424c3116978f8451da2f52324
|
96ee09c4da0fe9342339f900cb950aa73f5a7c27
|
/Winter 2020/CCPS 305/Assignments/Assignment 2/Ass2-Template.py
|
007671beb5519c6435839ecbded6bc8da5530436
|
[] |
no_license
|
kwxza/ryerson_school_work
|
191eefce7b7f789d3d705d68afdf581a5deafe8d
|
4d3300b78d1d17da474ef9e6a6708c068024a055
|
refs/heads/master
| 2022-10-29T22:55:44.976967 | 2020-06-18T01:29:30 | 2020-06-18T01:29:30 | 273,114,207 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 864 |
py
|
class MyTrie:
def __init__(self):
# Initialize the trie node as needed
pass
def insert(self, word):
# Insert a word into this trie node
pass
def exists(self, word, position=0):
# Return true if the passed word exists in this trie node
# A terminal node will return true if the word passed is ""
pass
def isTerminal(self):
# Return true if this node is the terminal point of a word
pass
def autoComplete(self, prefix, position=0):
# Return every word that extends this prefix in alphabetical order
pass
def __len__(self):
# Return the number of words that either terminate at this node or descend from this node
# A terminal leaf should have length 1, the node A with terminal child leaves B|C should have length 2
pass
|
[
"[email protected]"
] | |
c3abef289fdb1b5edf9c22750beeb472ac9b5966
|
6d5d9cad3ef750c36da30e6cee16cc0165f05821
|
/venv/bin/python-config
|
a0723c82b24c1d5f3655350bb9118ce7becff764
|
[] |
no_license
|
CarloMara/FreeRoomFinder
|
f024ec966a614365fbca2e91d3a6550ae99320ef
|
3061e76006a839323b1ad9fa721e42a360e8693b
|
refs/heads/master
| 2021-04-30T08:26:28.772677 | 2018-02-13T13:48:56 | 2018-02-13T13:48:56 | 121,371,975 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,370 |
#!/home/carlo/Documents/uni/dei_free_room_scraper/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"[email protected]"
] | ||
e953daf74af26ba80d58f622e7985c62eaf4cadd
|
76de53bd3923a57a36d0ed4b4a900b56050ebb31
|
/SW Expert Academy/190926/1263_사람 네트워크2.py
|
61dbab0dcf1c40b17376a408ca7e36d21934b1bb
|
[] |
no_license
|
Seungjin22/Algorithm
|
5b4fd53ae5742d830594d116e536531959b3454d
|
753dda47334e445f7a9e1e41df5e44564d99e79e
|
refs/heads/master
| 2020-09-04T08:54:01.359518 | 2020-02-03T10:41:05 | 2020-02-03T10:41:05 | 219,697,780 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 821 |
py
|
import sys
sys.stdin = open('1263_input.txt')
def AllPairsShortest(D):
for k in range(N):
for i in range(N):
if i != k:
for j in range(N):
if j != k and j != i:
D[i][j] = min(D[i][k] + D[k][j], D[i][j])
T = int(input())
for tc in range(1, T + 1):
data = list(map(int, input().split()))
N = data.pop(0)
dist = [[987654321] * N for _ in range(N)]
idx = 0
for i in range(N):
for j in range(N):
if i == j:
dist[i][j] = 0
if data[idx]:
dist[i][j] = data[idx]
idx += 1
AllPairsShortest(dist)
mini = 987654321
for i in range(N):
if sum(dist[i]) < mini:
mini = sum(dist[i])
print('#{} {}'.format(tc, mini))
|
[
"[email protected]"
] | |
cfe436e359c52cb80c53b6b3d45d67431576f12c
|
16f173135e81215d05ee8f475c13a16e3796e1fa
|
/Deep_Learning_with_Keras_in_Python/3.Improving_Your_Model_Performance/Learning the digits.py
|
4219e773851dd4e8ea25cc68e96088e4bed25bb3
|
[] |
no_license
|
jerry-mkpong/DataCamp
|
1b53821f1a32b48efdc8465251401721ba75bb56
|
10445bad35ef11567910ffab6ac70a980555a1b7
|
refs/heads/master
| 2022-11-11T03:57:21.923366 | 2020-06-28T17:36:10 | 2020-06-28T17:36:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,060 |
py
|
'''
You're going to build a model on the digits dataset, a sample dataset that comes pre-loaded with scikit learn. The digits dataset consist of 8x8 pixel handwritten digits from 0 to 9:
You want to distinguish between each of the 10 possible digits given an image, so we are dealing with multi-class classification.
The dataset has already been partitioned into X_train, y_train, X_test, and y_test using 30% of the data as testing data. The labels are one-hot encoded vectors, so you don't need to use Keras to_categorical() function.
Let's build this new model!
'''
# Instantiate a Sequential model
model = Sequential()
# Input and hidden layer with input_shape, 16 neurons, and relu
model.add(Dense(16, input_shape = (64,), activation = 'relu'))
# Output layer with 10 neurons (one per digit) and softmax
model.add(Dense(10, activation='softmax'))
# Compile your model
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Test if your model works and can process input data
print(model.predict(X_train))
|
[
"[email protected]"
] | |
b4577f6dc2ca7a3c75449f92e21cad3aa1b6b5fe
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2814/60652/240209.py
|
19b8d713af73e09dfece90f18c9ba12646de0b4a
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 162 |
py
|
n = int(input())
l=list(map(int,input().split()))
l.sort()
num_s=0
wait_time=0
for i in l:
if i>=wait_time:
num_s+=1
wait_time+=i
print(num_s)
|
[
"[email protected]"
] | |
f929e4e55338565906826f7583e63f82f9012ae0
|
9da2102516703f1c148085aabe867e7e9ec0b07c
|
/split/sample_prefixes.py
|
58129b9e2f64306a8a4a4e61e45a836a738adff8
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
CurtisASmith/comet-atomic-2020-gpt2-colab
|
d46c82aa4084a7ae0d56d9fd3f96a0bd81084943
|
6835d4ba9ef99d02811c9728e4e4c4b405eb278a
|
refs/heads/master
| 2023-03-27T04:40:46.890301 | 2021-03-12T11:37:21 | 2021-03-12T11:37:21 | 346,646,398 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,823 |
py
|
import argparse
import random
from utils import read_csv, write_jsonl
def sample_kg(args):
random.seed(args.random_seed)
data_file = args.input_file
data = read_csv(data_file, delimiter="\t")
prefixes = {}
for l in data:
prefix = l[0] + " " + l[1]
if prefix not in prefixes.keys():
prefixes[prefix] = {"head": l[0], "relation":l[1], "tails": []}
prefixes[prefix]["tails"].append(l[2])
excluded_relations = [
"HasPainIntensity",
"LocatedNear",
"LocationOfAction",
"DesireOf",
"NotMadeOf",
"InheritsFrom",
"InstanceOf",
"RelatedTo",
"SymbolOf",
"CreatedBy",
"NotHasA",
"NotIsA",
"NotHasProperty",
"NotCapableOf",
"IsA",
"DefinedAs"
]
print(len(list(prefixes.keys())))
rel_prefixes = [p for p in prefixes.keys() if prefixes[p]["relation"] not in excluded_relations]
print(len(rel_prefixes))
sampled_prefixes = random.sample(list(prefixes.keys()), args.sample_size)
samples = [prefixes[k] for k in sampled_prefixes]
rel_samples = [s for s in samples if s["relation"] not in excluded_relations]
print(len(rel_samples))
return samples
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input-file', type=str, help='Dataset filename')
parser.add_argument('--output-file', type=str, help='Dataset filename')
parser.add_argument('--random-seed', type=int, default=30, help='Random seed')
parser.add_argument('--sample-size', type=int, default=5000, help='Dev size')
args = parser.parse_args()
# Load KG data
samples = sample_kg(args)
# Write tsv files
write_jsonl(args.output_file, samples)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
fc270e6c65f91953c840ea615d7b05fff50ca4c1
|
3555cea3c629130357ced9fb6f68f982fe6e11d1
|
/hypergan/trainers/sgd_adam_trainer.py
|
2475913fa8ef6391b3cf842b79225466c4c6fe33
|
[
"MIT"
] |
permissive
|
karimpedia/HyperGAN
|
0fbd75d7af918d59af2117fad675e021d5e59bbc
|
d354a5d92d69143a519e862d2b976accd86c5e2c
|
refs/heads/master
| 2021-01-11T15:17:50.976539 | 2017-01-21T21:21:00 | 2017-01-21T21:21:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,300 |
py
|
import tensorflow as tf
import numpy as np
from hypergan.util.globals import *
def initialize(config, d_vars, g_vars):
d_loss = get_tensor('d_loss')
g_loss = get_tensor('g_loss')
g_lr = np.float32(config['trainer.sgd_adam.generator.lr'])
d_lr = np.float32(config['trainer.sgd_adam.discriminator.lr'])
g_optimizer = capped_optimizer(tf.train.AdamOptimizer, g_lr, g_loss, g_vars)
d_optimizer = tf.train.GradientDescentOptimizer(d_lr).minimize(d_loss, var_list=d_vars)
return g_optimizer, d_optimizer
iteration = 0
def train(sess, config):
x_t = get_tensor('x')
g_t = get_tensor('g')
g_loss = get_tensor("g_loss_sig")
d_loss = get_tensor("d_loss")
d_fake_loss = get_tensor('d_fake_loss')
d_real_loss = get_tensor('d_real_loss')
g_optimizer = get_tensor("g_optimizer")
d_optimizer = get_tensor("d_optimizer")
d_class_loss = get_tensor("d_class_loss")
g_class_loss = get_tensor("g_class_loss")
_, d_cost = sess.run([d_optimizer, d_loss])
_, g_cost,d_fake,d_real,d_class = sess.run([g_optimizer, g_loss, d_fake_loss, d_real_loss, d_class_loss])
print("%2d: g cost %.2f d_fake %.2f d_real %.2f d_class %.2f" % (iteration, g_cost,d_fake, d_real, d_class ))
global iteration
iteration+=1
return d_cost, g_cost
|
[
"[email protected]"
] | |
00bb139bc7606403b576ce7cbadcf0745f8fc7fb
|
cc1eeb43eb9e4e83078f4c87e40a5c7fe56b109f
|
/Day05/shuixianhua.py
|
8cb8f1e3429d4bb2394b367a322d9a2886c2fb28
|
[] |
no_license
|
test-wsl/learn_100
|
d57ac4e8e7c062472273622351374decbae6d213
|
9fbb83455c15115b3cdec80d17c542e0aba2a6df
|
refs/heads/master
| 2020-08-29T22:43:10.800177 | 2019-11-04T08:17:38 | 2019-11-04T08:17:38 | 218,192,964 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 326 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
寻找水仙花数
水仙花数为一个三位数,每位上的立方之和正好等于本身
version: 0.1
"""
for num in range(100, 1000):
low = num %10
mid = num // 10 %10
high = num // 100
if num == low ** 3 + mid ** 3 + high **3:
print (num)
|
[
"weishl"
] |
weishl
|
b7cefdc89ffe8af8e4f60dd0df293e6dd83ec5c0
|
d08b3e6f9cb52b073d26b7a20a49e541268f0b44
|
/curso-online/teste02.py
|
83fa4e1ceb9a777ce16136e1e9afb79d01aef837
|
[] |
no_license
|
LaneriTiago/scripts-python
|
7d04179f01f1f764a27f40a35438d5128e6dbc18
|
22c6cfa47337633638ddadbebabe6d7dd883e511
|
refs/heads/master
| 2023-01-04T18:34:56.675749 | 2020-10-24T00:33:32 | 2020-10-24T00:33:32 | 291,186,304 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 115 |
py
|
num1 = input ('Digite o numero 1: ')
num2 = input ('Digite o numero 2: ')
resultado = num1 + num2
print (resultado)
|
[
"[email protected]"
] | |
6c3e19bfc980c5147668a28b04c39f84b4d67bef
|
83c7be03506bd42fb20616613c628acb00822de6
|
/utils/Send_email.py
|
0ae0fb78fc106f9f08a57956eb3ce8f9c289e44f
|
[] |
no_license
|
haoshf/AutoRF
|
5a1a6573715faf263785468d51c5ccc11b8cc8db
|
28dd50b808a5c04ae552a3ed73ea8efb773048e5
|
refs/heads/master
| 2023-05-12T04:11:30.240631 | 2021-06-04T05:49:14 | 2021-06-04T05:49:14 | 290,464,411 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,736 |
py
|
#coding=utf8
import smtplib
from email.header import Header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import datetime
from repository import models
# 第三方 SMTP 服务
mail_host = "smtp.mxhichina.com" # SMTP服务器
mail_user = "[email protected]" # 用户名
mail_pass = "Lu201314" # 授权密码,非登录密码
sender = '[email protected]'# 发件人邮箱(最好写全, 不然会失败)
#邮件配置
receivers = ['[email protected]'] # 接收邮件,可设置为你的QQ邮箱或者其他邮箱
def sendEmail(project_id,log_path):
date = datetime.datetime.now().strftime('%Y-%m-%d %X')
smtp = models.Smtp.objects.filter(project_name=project_id).first()
if smtp.enable:
title = smtp.title.replace('${date}',date).replace('${project}',smtp.project_name.project_name) # 邮件主题
# message = MIMEText(content, 'plain', 'utf-8') # 内容, 格式, 编码
message = MIMEMultipart()
message['From'] = "{}".format(smtp.mail_user)
message['Subject'] = title
message.attach(MIMEText(smtp.documentation.replace('${date}',date).replace('${project}',smtp.project_name.project_name), 'html', 'utf-8'))
att1 = MIMEText(open('%slog.html'%log_path, 'rb').read(), 'base64', 'utf-8')
att1["Content-Type"] = 'application/octet-stream'
att1["Content-Disposition"] = 'attachment; filename="log.html"'
message.attach(att1)
att2 = MIMEText(open('%sreport.html'%log_path, 'rb').read(), 'base64', 'utf-8')
att2["Content-Type"] = 'application/octet-stream'
att2["Content-Disposition"] = 'attachment; filename="report.html"'
message.attach(att2)
try:
smtpObj = smtplib.SMTP_SSL(smtp.mail_host, 465) # 启用SSL发信, 端口一般是465
smtpObj.login(smtp.mail_user, smtp.mail_pass) # 登录验证
message['To'] = smtp.receivers.replace('\n','').replace('\t','')
receivers = message['To'].split(',')
smtpObj.sendmail(smtp.mail_user, receivers, message.as_string()) # 发送
print("mail has been send successfully.")
except smtplib.SMTPException as e:
print(e)
# def send_email2(SMTP_host, from_account, from_passwd, to_account, subject, content):
# email_client = smtplib.SMTP(SMTP_host)
# email_client.login(from_account, from_passwd) # create msg
# msg = MIMEText(content, 'plain', 'utf-8')
# msg['Subject'] = Header(subject, 'utf-8') # subject
# msg['From'] = from_account
# msg['To'] = to_account
# email_client.sendmail(from_account, to_account, msg.as_string())
# email_client.quit()
# if __name__ == '__main__':
# sendEmail()
return True
|
[
"[email protected]"
] | |
ed6e780555fec531ecbb2776df56d400039000ec
|
bfeb52b7c0faa33cff389cd6c197266aeeb5002a
|
/lab1/gauss.py
|
835866b807dda95a9b53e3ec1b4a549359588042
|
[] |
no_license
|
NaylyaZh99/numeric_methods
|
e7e737f56ca865d5ddd6debdc0eee5ed29c91ecd
|
cef2f4f4a9a0a13b8a90ce62f23f64c99e0fd396
|
refs/heads/master
| 2022-01-28T05:12:47.791370 | 2019-06-01T03:39:12 | 2019-06-01T03:39:12 | 189,677,020 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,297 |
py
|
import numpy as np
import time
import matplotlib.pyplot as plt
def gauss(n, A, f):
res = np.zeros(n)
for k in range(n):
for j in range(k + 1, n):
A[k][j] = A[k][j] / A[k][k]
f[k] = f[k] / A[k][k]
A[k][k] = 1
for i in range(k + 1, n):
for j in range(k + 1, n):
A[i][j] = A[i][j] - A[k][j] * A[i][k];
f[i] = f[i] - f[k] * A[i][k]
A[i][k] = 0
for i in range(n - 1, -1, -1):
res[i] = f[i]
for j in range(i + 1, n):
res[i] = res[i] - A[i][j] * res[j]
return res
X = np.array(0)
Y = np.array(0)
Y_lib = np.array(0)
n = int(input())
shift = int(input())
wastedTime = 0
while wastedTime <= 1:
X = np.append(X, n)
A = np.random.rand(n,n)
for i in range(n):
Sum = 0
for j in range(n):
if j != i:
Sum += abs(A[i][j])
A[i][i] += Sum
A_lib = np.array(A)
f = np.random.rand(n)
f_lib = np.array(f)
start = time.time()
x = gauss(n, A, f)
wastedTime = time.time() - start
print(wastedTime)
Y = np.append(Y, wastedTime)
start = time.time()
x_lib = np.linalg.solve(A_lib, f_lib)
wastedTime_lib = time.time() - start
Y_lib = np.append(Y_lib, wastedTime_lib)
n = n + shift
print(X)
print(Y)
print(Y_lib)
plt.plot(X, Y)
plt.plot(X, Y_lib)
plt.xlabel('matrix size')
plt.ylabel('sec')
plt.legend(("my realization", "integrated fuction"))
plt.show()
|
[
"[email protected]"
] | |
8196a6d153f61f9ad7d3d169b3850fb382e2b167
|
6963f191a3574edcfaecc265a363bc10d4cdfc19
|
/osf/management/commands/osf_shell.py
|
11ed88684d009e3e84f839751c5ea9a4012a6410
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
rdm-dev12/RDM-osf.io
|
8f3c2f7057b17512921292e84578d24ad4ca2bb5
|
14d9a924b8c6bc7d79fd34b87830ffa29acafed1
|
refs/heads/timestamp-v18.2.7.file_upload_x_of_y
| 2022-12-09T06:23:43.320341 | 2019-02-27T07:39:12 | 2019-02-27T07:39:12 | 172,862,723 | 0 | 0 |
Apache-2.0
| 2022-09-16T17:58:51 | 2019-02-27T07:07:48 |
Python
|
UTF-8
|
Python
| false | false | 7,816 |
py
|
"""Enhanced python shell.
Includes all features from django-extension's shell_plus command plus OSF-specific
niceties.
By default, sessions run in a transaction, so changes won't be commited until
you execute `commit()`.
All models are imported by default, as well as common OSF and Django objects.
To add more objects, set the `OSF_SHELL_USER_IMPORTS` Django setting
to a dictionary or a callable that returns a dictionary.
Example: ::
from django.apps import apps
def get_user_imports():
User = apps.get_model('osf.OSFUser')
Node = apps.get_model('osf.AbstractNode')
me = User.objects.get(username='[email protected]')
node = Node.objects.first()
return {
'me': me,
'node': node,
}
OSF_SHELL_USER_IMPORTS = get_user_imports
"""
from django.conf import settings
from django.db import transaction
from django.utils.termcolors import colorize
from django.db.models import Model
from django_extensions.management.commands import shell_plus
from django_extensions.management.utils import signalcommand
def header(text):
return colorize(text, fg='green', opts=('bold', ))
def format_imported_objects(models, osf, transaction, other, user):
def format_dict(d):
return ', '.join(sorted(d.keys()))
ret = """
{models_header}
{models}
{osf_header}
{osf}
{transaction_header}
{transaction}
{other_header}
{other}""".format(
models_header=header('Models:'),
models=format_dict(models),
osf_header=header('OSF:'),
osf=format_dict(osf),
transaction_header=header('Transaction:'),
transaction=format_dict(transaction),
other_header=header('Django:'),
other=format_dict(other),
)
if user:
ret += '\n\n{user_header}\n{user}'.format(
user_header=header('User Imports:'),
user=format_dict(user)
)
return ret
# kwargs will be the grouped imports, e.g. {'models': {...}, 'osf': {...}}
def make_banner(auto_transact=True, **kwargs):
logo = """
.+yhhys/`
`smmmmmmmmd:
`--.` ommmmmmmmmmm. `.--.
`odmmmmmh/ smmmhhyhdmmm- :ymmmmmdo.
-dmmmmmmmmmy .hho+++++sdo smmmmmmmmmm:
smmmmmmmmmmm: `++++++++: -mmmmmmmmmmmy
+mmmmmmmmmmmo: :+++++++.:+mmmmmmmmmmmo
+dmmmmmmmds++. .://:-``++odmmmmmmmmo
`:osyhys+++/ :+++oyhyso/`
`/shddds/``.-::-. `-::-.``/shdddy/`
-dmmmmmds++++/. ./++++sdmmmmmd:
hmmmmmmo+++++++. .++++++++dmmmmmd`
hmmmmmmo+++++++. .++++++++dmmmmmd`
-dmmmmmds++++/. ./++++sdmmmmmd:
`/shddhs/``.-::-. `-::-.``/shdddy/`
`:osyhys+++/ :+++oyhyso/`
+dmmmmmmmds++. .://:- `++odmmmmmmmmo
+mmmmmmmmmmmo: /++++++/`:+mmmmmmmmmmmo
smmmmmmmmmmm: `++++++++. -mmmmmmmmmmmy
-dmmmmmmmmmy `s++++++y/ smmmmmmmmmm:
`odmmmmmh/ hmmhyyhdmm/ :ymmmmmds.
`--.` `mmmmmmmmmmo `.--.
/mmmmmmmmh`
`+shhyo:
"""
greeting = 'Welcome to the OSF Shell. Happy hacking!'
imported_objects = format_imported_objects(**kwargs)
transaction_warning = """
*** TRANSACTION AUTOMATICALLY STARTED ***
To persist changes, run 'commit()'.
Keep in mind that changing documents will lock them.
This feature can be disabled with the '--no-transaction' flag."""
no_transaction_warning = """
*** AUTO-TRANSACTION DISABLED ***
All changes will persist. Transactions must be handled manually."""
template = """{logo}
{greeting}
{imported_objects}
{warning}
"""
if auto_transact:
warning = colorize(transaction_warning, fg='yellow')
else:
warning = colorize(no_transaction_warning, fg='red')
return template.format(
logo=colorize(logo, fg='cyan'),
greeting=colorize(greeting, opts=('bold', )),
imported_objects=imported_objects,
warning=warning,
)
class Command(shell_plus.Command):
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--no-transaction', action='store_false', dest='transaction',
help="Don't run session in transaction. Transactions must be "
'started manually with start_transaction()'
)
def get_osf_imports(self):
"""Return a dictionary of common OSF objects and utilities."""
from osf.management.utils import print_sql
from website import settings as website_settings
from framework.auth import Auth, get_user
ret = {
'print_sql': print_sql,
'Auth': Auth,
'get_user': get_user,
'website_settings': website_settings,
}
try: # faker isn't a prod requirement
from faker import Factory
except ImportError:
pass
else:
fake = Factory.create()
ret['fake'] = fake
return ret
def get_grouped_imports(self, options):
"""Return a dictionary of grouped import of the form:
{
'osf': {
'Auth': <framework.auth.Auth>,
....
}
'models': {...}
'transaction': {...}
'other': {...}
}
"""
auto_transact = options.get('transaction', True)
def start_transaction():
self.atomic.__enter__()
print('New transaction opened.')
def commit():
self.atomic.__exit__(None, None, None)
print('Transaction committed.')
if auto_transact:
start_transaction()
def rollback():
exc_type = RuntimeError
exc_value = exc_type('Transaction rollback')
self.atomic.__exit__(exc_type, exc_value, None)
print('Transaction rolled back.')
if auto_transact:
start_transaction()
groups = {
'models': {},
'other': {},
'osf': self.get_osf_imports(),
'transaction': {
'start_transaction': start_transaction,
'commit': commit,
'rollback': rollback,
},
'user': self.get_user_imports(),
}
# Import models and common django imports
shell_plus_imports = shell_plus.Command.get_imported_objects(self, options)
for name, object in shell_plus_imports.items():
if isinstance(object, type) and issubclass(object, Model):
groups['models'][name] = object
else:
groups['other'][name] = object
return groups
def get_user_imports(self):
imports = getattr(settings, 'OSF_SHELL_USER_IMPORTS', None)
if imports:
if callable(imports):
imports = imports()
return imports
else:
return {}
# Override shell_plus.Command
def get_imported_objects(self, options):
# Merge all the values of grouped_imports
imported_objects = {}
for imports in self.grouped_imports.values():
imported_objects.update(imports)
return imported_objects
# Override shell_plus.Command
@signalcommand
def handle(self, *args, **options):
self.atomic = transaction.atomic()
auto_transact = options.get('transaction', True)
options['quiet_load'] = True # Don't show default shell_plus banner
self.grouped_imports = self.get_grouped_imports(options)
banner = make_banner(auto_transact=auto_transact, **self.grouped_imports)
print(banner)
if auto_transact:
self.atomic.__enter__()
super(Command, self).handle(*args, **options)
|
[
"[email protected]"
] | |
c98aeb07e64d3799b2f504ba1c4037f3fe5d4773
|
0561347e739592204aa0711ff54c03ad5d484f25
|
/app_v1.0.py
|
6304529efec43df5bc416b5cae6712df5dddfa47
|
[] |
no_license
|
AikoChou/cht2017-demo
|
fd13aaec9294c30e937c2b61113ee4abb0daff03
|
6412f0675fd194caf7adc59e4844a0a7b7ed4396
|
refs/heads/master
| 2021-01-02T08:17:22.522379 | 2017-09-19T08:30:04 | 2017-09-19T08:30:04 | 98,986,123 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,525 |
py
|
import dash
from dash.dependencies import Input, Output, State, Event
import dash_core_components as dcc
import dash_html_components as html
import plotly.plotly as py ###
from plotly import graph_objs as go
from plotly.graph_objs import *
from flask import Flask
import pandas as pd
import numpy as np
import os
import copy
import functools32
server = Flask('my app')
server.secret_key = os.environ.get('secret_key', 'secret')
app = dash.Dash('CHTApp', server=server, url_base_pathname='/cht2017-demo/v1.0', csrf_protect=False)
if 'DYNO' in os.environ:
app.scripts.append_script({
'external_url': 'https://cdn.rawgit.com/chriddyp/ca0d8f02a1659981a0ea7f013a378bbd/raw/e79f3f789517deec58f41251f7dbb6bee72c44ab/plotly_ga.js'
})
external_css = ["https://cdnjs.cloudflare.com/ajax/libs/skeleton/2.0.4/skeleton.min.css",
"//fonts.googleapis.com/css?family=Raleway:400,300,600",
"//fonts.googleapis.com/css?family=Dosis:Medium",
"https://fonts.googleapis.com/css?family=Overpass:300,300i",
"https://cdn.rawgit.com/plotly/dash-app-stylesheets/dab6f937fd5548cebf4c6dc7e93a10ac438f5efb/dash-technical-charting.css"
"https://cdn.rawgit.com/plotly/dash-app-stylesheets/62f0eb4f1fadbefea64b2404493079bf848974e8/dash-uber-ride-demo.css",
"https://maxcdn.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css",
"https://cdnjs.cloudflare.com/ajax/libs/normalize/7.0.0/normalize.min.css",
]
#"https://cdn.rawgit.com/plotly/dash-app-stylesheets/5047eb29e4afe01b45b27b1d2f7deda2a942311a/goldman-sachs-report.css"
for css in external_css:
app.css.append_css({"external_url": css})
external_js = [ "https://code.jquery.com/jquery-3.2.1.min.js",
"https://cdn.rawgit.com/plotly/dash-app-stylesheets/a3401de132a6d0b652ba11548736b1d1e80aa10d/dash-goldman-sachs-report-js.js" ]
for js in external_js:
app.scripts.append_script({ "external_url": js })
mapbox_access_token = 'pk.eyJ1IjoiYWlrb2Nob3UiLCJhIjoiY2o1bWF2emI4M2ZoYjJxbnFmbXFrdHQ0ZCJ9.w0_1-IC0JCPukFL7Bpa92w'
# Global map layout
layout = dict(
showlegend = True,
legend={'x': 0, 'y': 1},
height=700,
margin=Margin(l=0, r=0, t=0, b=0),
autosize=True,
hovermode='closest',
mapbox=dict(
accesstoken=mapbox_access_token,
bearing=0,
center=dict(
lat=25.032969,
lon=121.565418
),
pitch=0,
zoom=9,
style='streets'
),
)
def initialize():
uid = "u_466924201064380"
file_list = ['20161021','20161123','20161220','20170116','20170213','20170217','20170222','20170303','20170317','20170512']
raw_dfs = {}
prepro_dfs = {}
for selectedData in ["raw", "prepro"]:
for f in file_list:
filename = f + '_' + selectedData + '.csv'
if selectedData == 'raw':
df = pd.read_csv(os.path.join('data', uid, selectedData, filename), dtype={'lon': str, 'lat': str})
raw_dfs[f] = df
else:
df = pd.read_csv(os.path.join('data', uid, selectedData, filename), dtype=str)
prepro_dfs[f] = df
cellular_dfs = {}
for f in file_list:
filename = f + '.csv'
df = pd.read_csv(os.path.join('data', uid, filename), dtype=str)
cellular_dfs[f] = df
result_dfs = pd.read_csv(os.path.join('data', uid, 'result.csv'), dtype={'uid':str, 'date':str})
return raw_dfs, prepro_dfs, cellular_dfs, result_dfs
app.layout = html.Div([
html.Div([
html.Div([
html.H2("Transportation Mode Detection",
style={'font-family': 'Dosis', 'float': 'left', 'position': 'relative', 'top': '30px'}),
html.P("Public transportation mode detection with cellular data.\
Select different users and days using the dropdowns below.\
", className="explanationParagraph twelve columns",
style={'float': 'left', 'position': 'relative', 'top': '20px', 'fontSize': 20}),
], className='row'),
html.Hr(style={'margin': '0', 'margin-bottom': '5'}),
html.Div([
html.Div([
html.P('Select user:', style={'fontSize': 17, 'marginBottom': 1}),
dcc.Dropdown(
id='user-dropdown',
options=[
{'label': 'WCPeng :)', 'value': 'u_466924201064380'},
],
value="u_466924201064380",
placeholder="Please choose an user",
className="user-picker"
),
html.Div([
dcc.RadioItems(
id='radio-selector',
options=[
{'label': 'Raw Cellular Data ', 'value': 'raw'},
{'label': 'Preprocessed Data ', 'value': 'prepro'},
{'label': 'Mode Detection ', 'value': 'mode'}
],
value='raw',
labelStyle={'display': 'inline-block'}
),
],style={'marginTop': '10', 'marginLeft': '7'}),
],className='six columns'),
html.Div([
html.P('Select day:', style={'fontSize': 17, 'marginBottom': 1}),
dcc.Dropdown(
id='day-dropdown',
placeholder="Please choose a day",
value='20161123',
),
html.Div([
dcc.Checklist(
id='lock-selector',
options=[
{'label': 'Lock camera', 'value': 'lock'}
],
values=[],
inputStyle={"z-index": "3"}
),
],style={'marginTop': '10', 'marginLeft': '7'})
], className='six columns'),
], className='row'),
html.Div([
dcc.Graph(id='map-graph'),
]),
html.P("", id="popupAnnotation", className="popupAnnotation", style={'color': 'black', 'fontSize': 20, 'font-family': 'Dosis'}),
], style={'margin': 'auto auto'}),
html.Hr(style={'margin': '0', 'margin-bottom': '5'}),
dcc.Markdown("[NCTU-ADSL/cht2017-demo](https://github.com/NCTU-ADSL-public/cht2017-demo)",
className="source"),
], className='ten columns offset-by-one')
def fetch_raw_prepro_dataframe(uid, date, selectedData):
if selectedData == 'raw':
df = raw_dfs[date]
else:
df = prepro_dfs[date]
return df
def fetch_mode_dataframe(uid, date):
result_df = result_dfs[result_dfs.date == date]
ntrips = result_df.shape[0]
trip_dfs = []
trip_mode = []
for i in range(ntrips):
trip_start_t = result_df.iloc[i]['start_t']
trip_end_t = result_df.iloc[i]['end_t']
cellular_df = cellular_dfs[date]
cellular_df.ctimestamp = pd.to_datetime(cellular_df.ctimestamp)
trip_df = cellular_df[(cellular_df.ctimestamp >= pd.to_datetime(trip_start_t))&(cellular_df.ctimestamp <= pd.to_datetime(trip_end_t))]
trip_df = trip_df.sort_values(by='ctimestamp')
trip_dfs.append(trip_df)
trip_mode.append(result_df.iloc[i]['mode'])
return result_df, trip_dfs, trip_mode
@app.callback(Output('day-dropdown','options'),[
Input("user-dropdown", "value"), Input('radio-selector', 'value')])
def set_day_options(uid, selectedData):
if uid == "u_466924201064380":
options=[
{'label': '2016-10-21', 'value': '20161021'},
{'label': '2016-11-23', 'value': '20161123'},
{'label': '2016-12-20', 'value': '20161220'},
{'label': '2017-01-16', 'value': '20170116'},
{'label': '2017-02-13', 'value': '20170213'},
{'label': '2017-02-17', 'value': '20170217'},
{'label': '2017-02-22', 'value': '20170222'},
{'label': '2017-03-03', 'value': '20170303'},
{'label': '2017-03-17', 'value': '20170317'},
{'label': '2017-05-12', 'value': '20170512'},]
return options
@app.callback(Output("popupAnnotation", "children"),
[Input("user-dropdown", "value"), Input("day-dropdown", "value"),
Input("radio-selector", "value")])
def set_pop_annotation(uid, date, selectedData):
if date in ['20170217','20170303','20170317'] and selectedData == 'mode':
return "No transportation mode detected!"
else:
return ""
@app.callback(Output("map-graph", "figure"),[
Input("user-dropdown", "value"), Input("day-dropdown", "value"),
Input("radio-selector", "value")],[
State('lock-selector', 'values'),
State('map-graph', 'relayoutData')])
def update_graph(uid, date, selectedData, lockSelector, prevLayout):
if selectedData == 'raw':
df = fetch_raw_prepro_dataframe(uid, date, selectedData)
total = df['pop'].sum()
df['text'] = 'Occurrence ' + df['pop'].astype(str) + ' / ' + str(total)
data = Data([
Scattermapbox(
lon=df['lon'],
lat=df['lat'],
mode='markers',
marker=dict(
size=df['pop']*21,
sizemode='area',
opacity=0.3,
color='black',
),
hoverinfo='skip',
),
Scattermapbox(
lat=df['lat'],
lon=df['lon'],
text=df['text'],
mode='markers',
marker=dict(
size=df['pop']*20,
sizemode = 'area',
color='mediumvioletred',
opacity=0.8,
),
hoverinfo = "text",
name = "Cellular Points",
),
])
layout['mapbox']['center']['lon'] = df['lon'].astype(float).mean()
layout['mapbox']['center']['lat'] = df['lat'].astype(float).mean()
layout['showlegend'] = False
elif selectedData == 'prepro' or (date in ['20170217','20170303','20170317'] and selectedData == 'mode'):
df = fetch_raw_prepro_dataframe(uid, date, selectedData)
endpt_size=20
scale=30
data = Data([
Scattermapbox(
lat=[df.lat.loc[i] for i in df.index],
lon=[df.lon.loc[i] for i in df.index],
text=[df.start_t.loc[i]+' - '+df.end_t.loc[i]+'<br>Stayed '+df.stay_t.loc[i]+'s' for i in df.index],
mode='markers+lines',
marker=Marker(
color="dimgray",
size=15,
),
hoverinfo = "text",
name = "Cellular Trajectory"
)
])
layout['mapbox']['center']['lon'] = np.mean([float(df.lon.loc[i]) for i in df.index])
layout['mapbox']['center']['lat'] = np.mean([float(df.lat.loc[i]) for i in df.index])
layout['showlegend'] = True
elif selectedData == 'mode':
result_df, trip_dfs, trip_mode = fetch_mode_dataframe(uid, date)
colors = {"hsr": "rgb(0,116,217)", "mrt": "rgb(255,65,54)", "bus": "rgb(133,20,75)", "train": "rgb(255,133,27)"}
names = {"hsr": "HSR trip", "mrt": "MRT trip", "bus": "BUS trip", "train": "TRA trip"}
endpt_size = 25
trips = []
for k in range(len(trip_dfs)):
trip_df = trip_dfs[k]
labels = [trip_df.ctime.loc[i].split(',')[0] for i in trip_df.index]
labels[0] = result_df.iloc[k].s_id+' : '+result_df.iloc[k].s_name+'<br>'+labels[0]
labels[-1]= result_df.iloc[k].e_id+' : '+result_df.iloc[k].e_name+'<br>'+labels[-1]
trip = Scattermapbox(
lat = [trip_df.y.loc[i] for i in trip_df.index],
lon = [trip_df.x.loc[i] for i in trip_df.index],
text= labels,
mode= 'markers+lines',
marker=Marker(
size=[endpt_size] + [10 for j in range(len(trip_df.index) - 2)] + [endpt_size],
color=colors[trip_mode[k]]
),
name = names[trip_mode[k]]+': '+result_df.iloc[k].s_name+' --> ' + result_df.iloc[k].e_name,
hoverinfo = "text",
)
trips.append(trip)
data = Data(trips)
layout['showlegend'] = True
if (prevLayout is not None and lockSelector is not None and
'lock' in lockSelector):
layout['mapbox']['center']['lon'] = float(prevLayout['mapbox']['center']['lon'])
layout['mapbox']['center']['lat'] = float(prevLayout['mapbox']['center']['lat'])
layout['mapbox']['zoom'] = float(prevLayout['mapbox']['zoom'])
fig = dict(data=data, layout=layout)
return fig
@app.server.before_first_request
def defineTotalDF():
global raw_dfs, prepro_dfs, cellular_dfs, result_dfs
raw_dfs, prepro_dfs, cellular_dfs, result_dfs = initialize()
if __name__ == '__main__':
#app.run_server(debug=True) #localhost
app.run_server(host='0.0.0.0', port=8050, debug=False)
|
[
"[email protected]"
] | |
9fa0535a2ff8ad4adeef16e987ef257e79ac819b
|
8434c17248f32817626b9865ebcc8912e18cf18e
|
/beginner_source/examples_nn/two_layer_net_optim.py
|
6f0bd5ba1ff18b32eb3531e4da649ec12d5da719
|
[
"BSD-3-Clause"
] |
permissive
|
codingbowoo/PyTorch-tutorials-kr
|
47868f8ec5aa45ff8de087f92d15d9d1aaed08d0
|
0a41550cdba4c5b1d67b652ccc66cb30ce16a762
|
refs/heads/master
| 2022-12-18T14:17:53.251189 | 2020-09-10T00:56:33 | 2020-09-10T00:56:33 | 278,267,902 | 2 | 0 |
BSD-3-Clause
| 2020-08-13T03:44:15 | 2020-07-09T05:03:57 | null |
UTF-8
|
Python
| false | false | 2,667 |
py
|
# -*- coding: utf-8 -*-
"""
PyTorch: optim
--------------
하나의 은닉층(hidden layer)과 편향(bias)이 없는 완전히 연결된 ReLU 신경망을,
유클리드 거리(Euclidean distance) 제곱을 최소화하는 식으로 x로부터 y를 예측하도록
학습하겠습니다.
이번에는 PyTorch의 nn 패키지를 사용하여 신경망을 구현해보겠습니다.
지금까지 해왔던 것처럼 직접 모델의 가중치를 갱신하는 대신, optim 패키지를 사용하여
가중치를 갱신할 Optimizer를 정의합니다. optim 패키지는 일반적으로 딥러닝에 사용하는
SGD+momentum, RMSProp, Adam 등과 같은 다양한 최적화(Optimization) 알고리즘을
정의합니다.
"""
import torch
# N은 배치 크기이며, D_in은 입력의 차원입니다;
# H는 은닉층의 차원이며, D_out은 출력 차원입니다.
N, D_in, H, D_out = 64, 1000, 100, 10
# 입력과 출력을 저장하기 위해 무작위 값을 갖는 Tensor를 생성합니다.
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
# nn 패키지를 사용하여 모델과 손실 함수를 정의합니다.
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
loss_fn = torch.nn.MSELoss(reduction='sum')
# optim 패키지를 사용하여 모델의 가중치를 갱신할 Optimizer를 정의합니다.
# 여기서는 Adam을 사용하겠습니다; optim 패키지는 다른 다양한 최적화 알고리즘을
# 포함하고 있습니다. Adam 생성자의 첫번째 인자는 어떤 Tensor가 갱신되어야 하는지
# 알려줍니다.
learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for t in range(500):
# 순전파 단계: 모델에 x를 전달하여 예상되는 y 값을 계산합니다.
y_pred = model(x)
# 손실을 계산하고 출력합니다.
loss = loss_fn(y_pred, y)
if t % 100 == 99:
print(t, loss.item())
# 역전파 단계 전에, Optimizer 객체를 사용하여 (모델의 학습 가능한 가중치인)
# 갱신할 변수들에 대한 모든 변화도를 0으로 만듭니다. 이렇게 하는 이유는
# 기본적으로 .backward()를 호출할 때마다 변화도가 버퍼(buffer)에 (덮어쓰지 않고)
# 누적되기 때문입니다. 더 자세한 내용은 torch.autograd.backward에 대한 문서를
# 참조하세요.
optimizer.zero_grad()
# 역전파 단계: 모델의 매개변수에 대한 손실의 변화도를 계산합니다.
loss.backward()
# Optimizer의 step 함수를 호출하면 매개변수가 갱신됩니다.
optimizer.step()
|
[
"[email protected]"
] | |
2886b9662212764044f3302a3eccaad0cdff4912
|
67ceb35320d3d02867350bc6d460ae391e0324e8
|
/practice/0189-Rotate_Array.py
|
14b099e5eeadce2d9badb37bee07eebce138511c
|
[] |
no_license
|
mattjp/leetcode
|
fb11cf6016aef46843eaf0b55314e88ccd87c91a
|
88ccd910dfdb0e6ca6a70fa2d37906c31f4b3d70
|
refs/heads/master
| 2023-01-22T20:40:48.104388 | 2022-12-26T22:03:02 | 2022-12-26T22:03:02 | 184,347,356 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 425 |
py
|
# 189. Rotate Array
#
# Given an array, rotate the array to the right by k steps, where k is
# non-negative.
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
results = [None] * len(nums)
for i, num in enumerate(nums):
results[(i+k) % len(nums)] = num
for i, result in enumerate(results):
nums[i] = result
|
[
"[email protected]"
] | |
fa4afd515b85832e48027c3718d29480726e3223
|
8954988f7c9aa9dd9ae984b5f214d45d981b612b
|
/global_codes/global_values.py
|
8171196e470210747db9bdc582c0b47d4e341453
|
[] |
no_license
|
MoamerEncsConcordiaCa/GED_python
|
5494c7f0523d938b9832ee155d71c0dca5d3e1f6
|
7ad9f96e4d7b2ca6b1b91ec2386b4fa93ceacd67
|
refs/heads/master
| 2021-01-10T22:11:57.151730 | 2016-11-19T06:48:14 | 2016-11-19T06:48:14 | 42,900,832 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 657 |
py
|
'''
Created on Nov 30, 2014
@author: ameriPC
'''
gt = 'gt'
cv_list = ['cv1', 'cv2', 'cv3', 'cv4']
experiment_set_path = 'set_of_ids'
experiment_scores = 'scores'
experiment_parameters = 'parameters'
data_set_path = '../../wordspotting_dataset'
out_dir_path = '../../out_dir'
task_list_path = 'jobs'
Ce= [0, 1, 3, 5]
Cn= [1, 3, 5]
alpha = [.5]
alg_type = ['HED']
norm_type_list = ['nodes_number_method','ins_del_method']
DATA_SET_PATH = 'wordspotting_dataset'
DATA_SET_PATH_GRAPHS = 'graphs'
DATA_SET_PATH_IMAGES = 'images'
PERFORMANCE_PATH = 'performace'
GLOBAL_TRESHOLD_PATH = 'global_treshold'
LOCAL_TRESHOLD_PATH = 'local_threshod'
|
[
"[email protected]"
] | |
a236a68804d645dd9a685c2644be5573fa15f618
|
7ee7e0e24f24e77dc3f532561554ffaf9d59d75e
|
/insertTable.py
|
36735944f084b71d2afddbe0a272b56cd100328f
|
[] |
no_license
|
vidhlakh/SQLiteExploration
|
58b74f3051f3f52179b624130fd55470f268d636
|
3136fc067b9285e1758699fa9b3be2d6e13f3bf5
|
refs/heads/master
| 2022-08-06T18:56:11.720261 | 2020-05-21T14:19:09 | 2020-05-21T14:19:09 | 265,867,473 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 744 |
py
|
import sqlite3
conn = sqlite3.connect("mydatabase.db") # or use :memory: to put it in RAM
cursor = conn.cursor()
# insert some data
cursor.execute("INSERT INTO albums VALUES ('Glow', 'Andy Hunter', '7/24/2012', 'Xplore Records', 'MP3')")
# save data to database
conn.commit()
# insert multiple records using the more secure "?" method
albums = [('Exodus', 'Andy Hunter', '7/9/2002', 'Sparrow Records', 'CD'),
('Until We Have Faces', 'Red', '2/1/2011', 'Essential Records', 'CD'),
('The End is Where We Begin', 'Thousand Foot Krutch', '4/17/2012', 'TFKmusic', 'CD'),
('The Good Life', 'Trip Lee', '4/10/2012', 'Reach Records', 'CD')]
cursor.executemany("INSERT INTO albums VALUES (?,?,?,?,?)", albums)
conn.commit()
|
[
"[email protected]"
] | |
8a9dc1cce37df30579ca1364c3462cffc1c5aa18
|
6c32f0308700ca3c635ea83780f911298d2585cc
|
/LongestRepeatSubstring.py
|
28913161bbb984be6602caebacad51aee1a77d66
|
[] |
no_license
|
bitpeng/algorithm-exercise
|
ece2d461c93d7752674586a8ab894216e54f7b07
|
d621326e3cbc2cc2d6a127cf9e70ccbb8d010541
|
refs/heads/master
| 2021-01-01T17:05:53.685921 | 2015-09-05T15:34:00 | 2015-09-05T15:34:00 | 41,278,409 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 811 |
py
|
import random
import time
t = time.time()
def LongestPrefix(s1, s2):
if not s1 or not s2:
return 0, ""
i = 0
n1 = len(s1); n2 = len(s2)
while i < n1 and i < n2 and s1[i] == s2[i]:
i += 1
return i, s1[:i]
def LongestRepeatSubstring(s):
if not s:
return ""
a = [s[i:] for i in range(len(s))]
a.sort()
ret = 0; retstr = ""
for i in range(len(a) - 1):
tmp, substr = LongestPrefix(a[i], a[i + 1])
if tmp > ret:
ret = tmp; retstr = substr
return ret, retstr
print LongestRepeatSubstring("abcdefcdef")
print LongestRepeatSubstring("abcdefcdefc")
print LongestRepeatSubstring("abcdefcdefcd")
s = ''.join([chr(random.randint(97, 105)) for i in range(5000)])
#print s
print LongestRepeatSubstring(s)
print time.time() - t
|
[
"[email protected]"
] | |
066b82d0dff811aba0d736008f3c2018c5ac0134
|
62ad933ee61f67847d8f2d1cb74f8b2582efac2c
|
/scripts/a0_optimize_vkdv.py
|
e026eda67f59f7dfd88c510ec7a168977e9e128e
|
[] |
no_license
|
liqun1981/kdv_uncertainty
|
add808d3327263c55124cff3c46965d043dceb31
|
24ed0a3f7944bcf4e503115f22a9fb5b5f6e6fd6
|
refs/heads/master
| 2020-06-26T06:51:52.835686 | 2019-07-26T07:35:00 | 2019-07-26T07:35:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,236 |
py
|
# coding: utf-8
# # Find initial conditions for $$a_0$$ by optimizing the KdV model
# In[3]:
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
from datetime import datetime
import h5py
from scipy import signal
from scipy.optimize import minimize
from soda.utils.timeseries import timeseries, skill, rmse
from soda.utils.uspectra import uspectra, getTideFreq
from soda.utils.othertime import SecondsSince
#from iwaves.kdv.solve import solve_kdv
from iwaves.kdv.vkdv import vKdV
import os
import matplotlib as mpl
import yaml
def double_tanh_6(beta, z):
return beta[0,...] - beta[1,...]*(np.tanh((z+beta[2,...])/beta[3,...])
+ np.tanh((z+beta[2,...] + beta[4,...])/beta[5,...]))
def maximum_amplitude_finder(amp_signal):
amp_min = np.min(amp_signal)
amp_max = np.max(amp_signal)
if np.abs(amp_min)>amp_max:
return amp_min, np.argwhere(amp_signal==amp_min)[0][0]
else:
return amp_max, np.argwhere(amp_signal==amp_max)[0][0]
def zeroic(x, a_0, L_w, x0=0.):
return 0*x
def start_kdv(infile, rho, z, depthfile):
# Parse the yaml file
with open(infile, 'r') as f:
args = yaml.load(f)
kdvargs = args['kdvargs']
kdvargs.update({'wavefunc':zeroic})
kdvargs.update({'verbose':False})
runtime = args['runtime']['runtime']
ntout = args['runtime']['ntout']
xpt = args['runtime']['xpt']
# Parse the density and depth files
depthtxt = np.loadtxt(depthfile, delimiter=',')
# Initialise the KdV class
mykdv = vKdV(rho, z, depthtxt[:,1], x=depthtxt[:,0], **kdvargs)
return mykdv
def run_vkdv(a0,mykdv, infile, verbose=True):
# Need to reset the amplitude variables and time step
mykdv.B *= 0
mykdv.B_n_m1 *= 0
mykdv.B_n_m2 *= 0
mykdv.B_n_p1 *= 0
mykdv.t = 0
with open(infile, 'r') as f:
args = yaml.load(f)
kdvargs = args['kdvargs']
kdvargs.update({'wavefunc':zeroic})
runtime = args['runtime']['runtime']
ntout = args['runtime']['ntout']
xpt = args['runtime']['xpt']
# Find the index of the output point
idx = np.argwhere(mykdv.x > xpt)[0][0]
# Initialise an output array
nsteps = int(runtime//mykdv.dt_s)
nout = int(runtime//ntout)
B = np.zeros((nout, mykdv.Nx)) # Spatial amplitude function
tout = np.zeros((nout,))
B_pt = np.zeros((nsteps, )) # Spatial amplitude function
tfast = np.zeros((nsteps,))
output = []
def bcfunc(t):
omega = 2*np.pi/(12.42*3600.)
return -a0*np.sin(omega*t)
## Run the model
nn=0
for ii in range(nsteps):
# Log output
point = nsteps//100
if verbose:
if(ii % (5 * point) == 0):
print( '%3.1f %% complete...'%(float(ii)/nsteps*100))
print(mykdv.B.max(), bcfunc(mykdv.t))
if mykdv.solve_step(bc_left=bcfunc(mykdv.t)) != 0:
print( 'Blowing up at step: %d'%ii)
break
## Evalute the function
#if myfunc is not None:
# output.append(myfunc(mykdv))
# Output data
if (mykdv.t%ntout) < mykdv.dt_s:
#print ii,nn, mykdv.t
B[nn,:] = mykdv.B[:]
tout[nn] = mykdv.t
nn+=1
# Output single point
B_pt[ii] = mykdv.B[idx]
tfast[ii] = mykdv.t
# Save to netcdf
ds = mykdv.to_Dataset()
xray = xr
# Create a dataArray from the stored data
coords = {'x':mykdv.x, 'time':tout}
attrs = {'long_name':'Wave amplitude', 'units':'m'}
dims = ('time','x')
Bda = xray.DataArray(B,
dims = dims,\
coords = coords,\
attrs = attrs,\
)
coords = {'timefast':tfast}
attrs = {'long_name':'Wave Amplitude Point',
'units':'m',
'x-coord':xpt}
dims = ('timefast',)
Bpt = xray.DataArray(B_pt,
dims = dims,\
coords = coords,\
attrs = attrs,\
)
ds2 = xray.Dataset({'B_t':Bda,'B_pt':Bpt})
#return ds2.merge( ds, inplace=True )
#return ds.merge(ds2, inplace=True)
#return ds.merge( xray.Dataset({'B_t':Bda,'B_pt':Bpt}), inplace=False )
return maximum_amplitude_finder(B_pt)[0], ds.merge( ds2, )
def fdiff(a0, Amax, mykdv, infile):
"""
Optimization function
"""
Aguess,ds = run_vkdv(a0, mykdv, infile, verbose=False)
# Change the sign Amax if the two aren't equal...
if np.sign(Amax) != np.sign(Aguess):
Amaxo = -1*Amax
else:
Amaxo = 1*Amax
print(a0, Amaxo, Aguess )
return (Aguess - Amaxo)**2.
def optimize_kdv(csvfile, depthfile, infile, outfilestr, overwrite=True):
#pd.read_csv?
# Load the csv file with the representative beta's and target A_max
data = pd.read_csv(csvfile, sep=', ', parse_dates=['time','timemax'])
# Load the depth
depthtxt = np.loadtxt(depthfile, delimiter=',')
z = np.arange(-depthtxt[0,1],5,5)[::-1]
# For each time step
for tt in data.index:
print(72*'#')
print('Optimizing for time step %s'%data['time'][tt])
print(72*'#')
outfile = '%s_%s.nc'%(outfilestr,data['time'][tt].strftime('%Y-%m-%d'))
if os.path.exists(outfile):
if overwrite is False:
print('File %s exists moving on...'%outfile)
continue
# Load beta and Amax
beta = np.array([data['beta0'][tt],\
data['beta1'][tt],\
data['beta2'][tt],\
data['beta3'][tt],\
data['beta4'][tt],\
data['beta5'][tt]])
Amax = data['Amax'][tt]
rho = double_tanh_6(beta,z)
# Launch a KdV instance
mykdv = start_kdv(infile, rho, z, depthfile)
# Minimize
print('Optimizing...')
print('a_0, A_max, A_model')
a0guess = min(np.abs(Amax),20)
soln = minimize(fdiff, a0guess,
args = (Amax,mykdv, infile),
bounds=[(0,50.0)],
method='SLSQP', options={'eps':1e-4, 'ftol':1e-1}
#method='CG',options={'eps':1e-4, 'gtol':1e-2}
)
print(soln)
# Run the model again with the optimal solution and save the output
Aguess, ds2 = run_vkdv( soln['x'], mykdv, infile, verbose=False)
# Update the global attributes
ds2.attrs.update({'Amax':Amax,'a0':soln['x'][0]})
#plt.figure()
#ds2.B_pt.plot()
#
#plt.figure()
#ds2.B_t.plot()
#ds2
# Write to a file
print('Saving to file: %s...'%outfile)
ds2.to_netcdf(outfile)
####################################
#
## Prelude input
#csvfile = 'data/vkdv_inputs_prelude.csv'
#depthfile = 'data/kdv_bathy_Prelude.csv'
#infile = 'data/kdvin_prelude_ekdv.yml'
#outfilestr = 'data/ekdv_optimal_a0_Prelude'
#optimize_kdv(csvfile, depthfile, infile, outfilestr)
# IMOS PIL transect
#csvfile = 'data/vkdv_inputs_mode2_imospil200.csv'
#depthfile = 'data/kdv_bathy_PILIMOS_curved.csv'
#infile = 'data/kdvin_imospil_mode2_ekdv.yml'
#outfilestr = 'data/ekdv_mode2_optimal_a0_PILIMOS'
#overwrite=True
#optimize_kdv(csvfile, depthfile, infile, outfilestr, overwrite=overwrite)
csvfile = 'data/vkdv_inputs_mode2_imospil200.csv'
depthfile = 'data/kdv_bathy_PILIMOS_curved.csv'
infile = 'data/kdvin_imospil_mode2_kdv.yml'
outfilestr = 'data/kdv_mode2_optimal_a0_PILIMOS'
overwrite=True
optimize_kdv(csvfile, depthfile, infile, outfilestr, overwrite=overwrite)
#csvfile = 'data/vkdv_inputs_imospil200.csv'
#depthfile = 'data/kdv_bathy_PILIMOS_curved.csv'
#infile = 'data/kdvin_imospil_ekdv.yml'
#outfilestr = 'data/ekdv_optimal_a0_PILIMOS'
#overwrite=True
#optimize_kdv(csvfile, depthfile, infile, outfilestr, overwrite=overwrite)
#
#csvfile = 'data/vkdv_inputs_imospil200.csv'
#depthfile = 'data/kdv_bathy_PILIMOS_curved.csv'
#infile = 'data/kdvin_imospil.yml'
#outfilestr = 'data/kdv_optimal_a0_PILIMOS'
#optimize_kdv(csvfile, depthfile, infile, outfilestr, overwrite=overwrite)
###############
|
[
"[email protected]"
] | |
ed7c33aabc46b64f71b665ec8a3893f6541bdb34
|
e0062e1a23e9dc1dbd3215d1aad601610e114078
|
/App/csv_scraper.py
|
e1b21e303c9b57e78aa724be0c44fb6b06830ba8
|
[] |
no_license
|
Vedantsahai18/Aviation-Winners
|
293d3a9c260d9be85b19495ce076d70426d8da58
|
35ee757812154742c2d62bb488ccd28b140f3963
|
refs/heads/master
| 2022-12-10T14:36:57.952197 | 2020-06-21T18:14:06 | 2020-06-21T18:14:06 | 174,552,809 | 0 | 0 | null | 2022-12-06T04:30:09 | 2019-03-08T14:34:26 |
HTML
|
UTF-8
|
Python
| false | false | 3,095 |
py
|
# import pathlib
# import xml.dom.minidom
# import sys
# from os import listdir, getcwd
# from os.path import join, isfile
# import csv
# csv_path = "XXXXXX"
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer
from sqlalchemy.orm import sessionmaker
import sys
import csv
import os
import pathlib
from os.path import join, isfile
db_config = {'user': 'postgres', 'password': 'postgres',
'netloc': 'localhost', 'port': '5432', 'dbname': 'aerodrome'}
def GenerateUri(db_config: map):
return 'postgresql+psycopg2://' + db_config['user'] + ':' + db_config['password'] + '@' + db_config['netloc'] + ':' + db_config['port'] + '/' + db_config['dbname']
db = create_engine(GenerateUri(db_config))
base = declarative_base()
class Obstacles(base):
__tablename__ = 'obstacles'
obs_id = Column(Integer, primary_key=True)
icao = Column(String)
affected = Column(String)
obs_type = Column(String)
latitude = Column(String)
longitude = Column(String)
elevation = Column(String)
marking = Column(String)
remark = Column(String)
def __init__(self, icao, row: list):
self.icao = icao
self.affected = row[0]
self.obs_type = row[1]
self.latitude = row[2]
self.longitude = row[3]
self.elevation = row[4]
self.marking = row[5]
self.remark = row[6]
Session = sessionmaker(db)
session = Session()
base.metadata.create_all(db)
def InsertIfNotPresent(obstacle: Obstacles):
if session.query(Obstacles)\
.filter(Obstacles.affected == obstacle.affected)\
.filter(Obstacles.elevation == obstacle.elevation)\
.filter(Obstacles.icao == obstacle.icao)\
.filter(Obstacles.latitude == obstacle.latitude)\
.filter(Obstacles.longitude == obstacle.longitude)\
.filter(Obstacles.marking == obstacle.marking)\
.filter(Obstacles.remark == obstacle.remark)\
.count() == 0:
session.add(obstacle)
def IsFloat(text: str):
try:
x = float(text)
return True
except:
return False
def CSVToDB(csv_path):
csvReader = csv.reader(open(csv_path), delimiter=',')
for row in csvReader:
if len(row) < 9 or not (IsFloat(row[2]) and IsFloat(row[3]) and IsFloat(row[4])):
continue
uid = row[0]
obs_type = row[1]
latitude = str(float(row[2]) + float(row[3])/60 + float(row[4])/3600)
longitude = str(float(row[5]) + float(row[6])/60 + float(row[7])/3600)
elevation = row[8]
remark = row[9]
obstacle = [uid,obs_type,latitude,longitude,elevation,"NIL",remark]
icao = uid[0:4].upper()
element = Obstacles(icao, obstacle)
InsertIfNotPresent(element)
print(icao)
def main():
obstacle_file = sys.argv[1]
CSVToDB(obstacle_file)
session.commit()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
05bcc41b1fda06b38e94d20c17eae7dd2767364c
|
c1f4647ba499ab6af84b62525c7a7586e4e59f70
|
/learn_python_01/test_module.py
|
d5a9195384128b1086fbf7a5339287367ee651b6
|
[] |
no_license
|
madecai/learn-python
|
e406269cb70beb368b54a67499df080acbe777e9
|
65b34efef931b565a62e7af2a54b44d98b004f3d
|
refs/heads/main
| 2022-12-26T06:29:15.390188 | 2020-10-09T08:56:58 | 2020-10-09T08:56:58 | 302,584,433 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 135 |
py
|
print('********** 我是test_module模块')
print('**********', __name__) # test_module
a = 10
_b = 20
c = 30
_d = 40
e = 50
_f = 60
|
[
"[email protected]"
] | |
2c9bfe0886844534963fb82e8839ee2a8d6ef61f
|
e18a3d43c02d33b342381c219a6ec25fd0581be5
|
/DataStructures/linkedlist/floyd-cycle-finding-algo(loop detect).py
|
e32b7891e50d5c6a669d3301e8a11ca05483ae40
|
[] |
no_license
|
ItsSamarth/ds-python
|
1187e799f54421e351709bad5eecb16c4466f0b0
|
c7d35a3d1421c4a40243efc1e06ac01a1f7c220d
|
refs/heads/master
| 2020-03-25T06:50:03.515978 | 2019-05-04T06:49:57 | 2019-05-04T06:49:57 | 143,526,509 | 2 | 0 | null | 2018-08-12T07:35:14 | 2018-08-04T12:23:33 |
Python
|
UTF-8
|
Python
| false | false | 1,057 |
py
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
def printll(self):
temp = self.head
while temp:
print(temp.data)
temp = temp.next
def detectLoop(self):
slow_p = self.head
fast_p = self.head
while slow_p and fast_p and fast_p.next:
slow_p = slow_p.next
fast_p = fast_p.next.next
if slow_p == fast_p:
return True
return False
if __name__ == "__main__":
ll = LinkedList()
ll.push(10)
ll.push(20)
ll.push(30)
ll.push(40)
ll.push(50)
ll.push(60)
ll.printll()
print("Checking loop in linkedlist")
ll.head.next.next.next.next = ll.head
if(ll.detectLoop()):
print("Loop found")
else:
print("Not Found")
|
[
"[email protected]"
] | |
4b07d1427059017a5efe9aaa2f4d709d14931aa8
|
d4ea1f9747799bf503523b86b8b5ee29bab65eff
|
/gyun/cli/iaas_client/actions/s2/modify_s2_shared_target_attributes.py
|
a1ea167b28dd9c16607678be1997a591d7b7c26d
|
[
"Apache-2.0"
] |
permissive
|
gyun-gome/gyun-cli
|
88b5493d90a19c5bf56a1bba4bf301d1b4a3156d
|
275b6664335e2ef21a01a48f8c06d6a89dd63467
|
refs/heads/master
| 2021-06-28T13:53:01.300135 | 2017-09-13T04:44:01 | 2017-09-13T04:44:01 | 103,353,093 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,206 |
py
|
# encoding: utf-8
# =========================================================================
# ©2017-2018 北京国美云服科技有限公司
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from gyun.cli.misc.utils import explode_array
from gyun.cli.iaas_client.actions.base import BaseAction
class ModifyS2SharedTargetAttributesAction(BaseAction):
action = 'ModifyS2SharedTargetAttributes'
command = 'modify-s2-shared-target-attributes'
usage = '%(prog)s -s <shared_target> -o <operation> [-p <parameters> ...] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument("-s", "--shared-target", dest="shared_target",
action="store", type=str, default=None,
help="the ID of shared target.")
parser.add_argument("-o", "--operation", dest="operation",
action="store", type=str, default=None,
help="valid values includes add, modify, delete, set.")
parser.add_argument("-p", "--parameters", dest="parameters",
action="store", type=str, default=None,
help="please refer http://docs.qc.gyun.com/api/s2/describle_s2_default_parameters.html")
parser.add_argument("-i", "--initiator-names", dest="initiator_names",
action="store", type=str, default=None,
help="client IQN.")
parser.add_argument("-S", "--s2-group", dest="s2_group",
action="store", type=str, default=None,
help="the ID of permission group.")
parser.add_argument("-n", "--export-name", dest="export_name",
action="store", type=str, default=None,
help="the name of shared target, available in vnas.")
@classmethod
def build_directive(cls, options):
for key in ['shared_target', 'operation']:
if not hasattr(options, key):
print("error: [%s] should be specified." % key)
return None
directive = {
"shared_target": options.shared_target,
"operation": options.operation,
"parameters": explode_array(options.parameters),
"initiator_names": explode_array(options.initiator_names),
"s2_group": options.s2_group,
"export_name": options.export_name,
}
return directive
|
[
"[email protected]"
] | |
57a16be36238a704d13ed3389b205542b728c0e4
|
f3b670732a1bd7661e4dd21294d18e2d949190f5
|
/flowers_agent/messenger_bot/views.py
|
4767483a950b61dd7ad7d7212548ecaf2db6a4fa
|
[
"MIT"
] |
permissive
|
darkowic/flowers-agent
|
0bc0f94205f52348a1c73e2fe162290509379ebe
|
f47ac7ba228781d1c3b096f42ea6a2da2b1af32b
|
refs/heads/master
| 2021-07-12T18:42:14.673030 | 2018-09-20T07:43:44 | 2018-09-20T07:43:44 | 145,862,067 | 0 | 0 |
MIT
| 2020-06-05T19:06:23 | 2018-08-23T14:05:06 |
JavaScript
|
UTF-8
|
Python
| false | false | 2,233 |
py
|
import json
from django.views import generic as views
from django.http import HttpResponse, HttpResponseForbidden, Http404
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from .helpers import WebhookHandler
# Create your views here.
class MessengerWebhookView(views.View):
http_method_names = ['get', 'post']
def get(self, request, *args, **kwargs):
# in get method facebook api subscribes to the webhook
mode = request.GET.get('hub.mode')
token = request.GET.get('hub.verify_token')
if mode == 'subscribe' and token == getattr(settings, 'MESSENGER_VERIFY_TOKEN'):
return HttpResponse(request.GET.get('hub.challenge'))
return HttpResponseForbidden()
def post(self, request, *args, **kwargs):
if not request.content_type == 'application/json':
raise Http404
body = json.loads(request.body)
print('request body', body)
# Check the webhook event is from a Page subscription
if body.get('object', None) == 'page':
# Iterate over each entry - there may be multiple if batched
for entry in body.get('entry', []):
# Get the webhook event. entry.messaging is an array, but
# will only ever contain one event, so we get index 0
webhook_event = entry['messaging'][0]
handler = WebhookHandler(
webhook_event['recipient']['id'],
webhook_event['sender']['id']
)
try:
handler.handle(webhook_event)
except ValueError:
print('Not supported webhook event', webhook_event)
return HttpResponse('EVENT_RECEIVED')
raise Http404
messenger_webhook_view = csrf_exempt(MessengerWebhookView.as_view())
class IndexView(views.TemplateView):
template_name = 'messenger_bot/index.html'
def get_context_data(self, **kwargs):
context_data = super(IndexView, self).get_context_data(**kwargs)
context_data['data'] = {
"APP_ID": settings.MESSENGER_APP_ID
}
return context_data
index_view = IndexView.as_view()
|
[
"[email protected]"
] | |
d1e360b771fc125f22caf4d80ca157b3557aa97c
|
96d7b268ed589c3cf2caa47b5478f6c24f883e34
|
/爬取不得解视频.py
|
6bff5b6bf076a93dbe23f3c99b2ff5b4c21e63dd
|
[] |
no_license
|
494589939/learn-python3-
|
2c5836ae2af415a606f2a0398b175c8e7fa6686c
|
b13b600ab334f7fa04183b777a0579388aab9a02
|
refs/heads/master
| 2021-01-20T15:27:07.728339 | 2018-03-18T08:46:19 | 2018-03-18T08:46:19 | 82,815,559 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,001 |
py
|
#! /usr/bin/env python
from bs4 import BeautifulSoup
import requests,urllib
#解析URL
def req(url):
try:
headers={"User-Agent":"Mozilla/5.0"}
html=requests.get(url,headers=headers)
html.raise_for_status()
html.encoding=html.apparent_encoding
soup=BeautifulSoup(html.text,"html.parser")
return soup
except:
print("输入错误")
#获取视频URL地址并下载
def filt(soup):
for names in soup.select(".j-video-c"):#筛选名字
for video in names.select(".j-video"):#筛选URL
name=names.get("data-title")[:-3]
mp=video.get("data-mp4")
urllib.request.urlretrieve(mp,r'D:\python项目\video\%s.mp4'%name)
print("正在下载:"+name+mp)
if __name__=="__main__":
page=input("请输入要结束的页数:")
i=1
while i <=int(page):
url="http://www.budejie.com/video/%s"%i
filt(req(url))
i+=1
|
[
"[email protected]"
] | |
e5e84e25c5a9c701c6e2880d5b7603b6945e33a5
|
b685f62e0f41349b39fc573f10338fa6148e3dd7
|
/wucaicheng_Sign v2.py
|
e5bc18c47b8bf3601f209f9c038b8f1b27296bb0
|
[] |
no_license
|
ifr4me/MyPython
|
61e46fa1c9e5886b589ab104eaf50690623ff2a2
|
6e4b64d3c3dad30bbcb5e76a48b704f10d73b838
|
refs/heads/master
| 2021-09-12T13:27:30.526976 | 2018-04-17T08:47:45 | 2018-04-17T08:47:45 | 103,880,499 | 0 | 0 | null | 2017-09-18T02:16:11 | 2017-09-18T02:16:11 | null |
UTF-8
|
Python
| false | false | 2,069 |
py
|
# coding=utf-8
#cron 58 6-10/1 * * * python /root/wucaicheng.py
__author__="iframe"
__Date__="20171018"
import requests,json,time
import logging
LOG_FILE = '/var/log/wucaicheng.log'
#LOG_FILE = 'd:\\wucaicheng.log'
#If find date in LOG_FILE, exit. so you need set cron execute twice everyday, when the server is not available.
date = time.strftime("%Y-%m-%d", time.localtime())
print date
log = open(LOG_FILE, "a+")
try:
all_log = log.read()
result = all_log.find(date)
print result
if result > 0 :
exit()
finally:
log.close()
# prepare login and sign
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'X - Requested - With':'XMLHttpRequest',
'Origin':'http://bj.wucaicheng.com.cn'}
wucaichengUrl = "http://bj.wucaicheng.com.cn/html/member/api/1/login"
signUrl = 'http://bj.wucaicheng.com.cn/html/member/api/1/sign'
postData = {'type':'2','phone':'18611111111','phonecode':'','password':'yourpassword'}
log_level = logging.DEBUG
logger = logging.getLogger("loggingmodule.NomalLogger")
handler = logging.FileHandler(LOG_FILE)
formatter = logging.Formatter("[%(asctime)s]%(message)s")
#formatter = logging.Formatter("[%(levelname)s][%(funcName)s][%(asctime)s]%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(log_level)
#logger.info("this is a info msg!")
# request = urllib2.Request(wucaichengurl ,data=postData ,headers = headers)
# response = urllib2.urlopen(url = wucaichengurl, data=urllib.urlencode(postData))
# print response .read()
print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
s = requests.session()
login = s.post(wucaichengUrl, data=postData, headers=headers)
print login.content
response = s.post(signUrl, cookies=login.cookies, headers = headers)
print response.content
decode = json.loads(response.content)
print decode['meta']['msg']
msg = '%s\n%s\n%s\n' % (login.content , response.content , decode['meta']['msg'])
#print msg
logger.info(msg=msg)
s.close()
|
[
"[email protected]"
] | |
5d7b1f7b81a1e99a2ebca3c53db18a84add46beb
|
1f9a83114758e7be45062ce95e1e53cc54d5c41c
|
/cs231n/classifiers/k_nearest_neighbor.py
|
5818811857353f819e9a9dab4c892ae4c657c093
|
[] |
no_license
|
aluckyi/cs231n-assignment1
|
4a9e95bdfbebf14185a13580d03fe2a80ebba436
|
5de3b70a1b1c80fd0784cef593ef6029d7118839
|
refs/heads/master
| 2020-03-30T12:51:14.974146 | 2018-10-02T11:36:24 | 2018-10-02T11:36:24 | 151,228,182 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,266 |
py
|
import numpy as np
class KNearestNeighbor(object):
""" a kNN classifier with L2 distance """
def __init__(self):
pass
def train(self, X, y):
"""
Train the classifier. For k-nearest neighbors this is just
memorizing the training data.
Inputs:
- X: A numpy array of shape (num_train, D) containing the training data
consisting of num_train samples each of dimension D.
- y: A numpy array of shape (N,) containing the training labels, where
y[i] is the label for X[i].
"""
self.X_train = X
self.y_train = y
def predict(self, X, k=1, num_loops=0):
"""
Predict labels for test data using this classifier.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data consisting
of num_test samples each of dimension D.
- k: The number of nearest neighbors that vote for the predicted labels.
- num_loops: Determines which implementation to use to compute distances
between training points and testing points.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
elif num_loops == 2:
dists = self.compute_distances_two_loops(X)
else:
raise ValueError('Invalid value %d for num_loops' % num_loops)
return self.predict_labels(dists, k=k)
def compute_distances_two_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a nested loop over both the training data and the
test data.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data.
Returns:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
is the Euclidean distance between the ith test point and the jth training
point.
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
for j in range(num_train):
#####################################################################
# TODO: #
# Compute the l2 distance between the ith test point and the jth #
# training point, and store the result in dists[i, j]. You should #
# not use a loop over dimension. #
#####################################################################
# pass
dists[i, j] = np.sqrt(np.sum(np.square(self.X_train[j, :] - X[i, :])))
#####################################################################
# END OF YOUR CODE #
#####################################################################
return dists
def compute_distances_one_loop(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a single loop over the test data.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
#######################################################################
# TODO: #
# Compute the l2 distance between the ith test point and all training #
# points, and store the result in dists[i, :]. #
#######################################################################
# pass
dists[i, :] = np.sqrt(np.sum(np.square(self.X_train - X[i, :]), axis=1))
#######################################################################
# END OF YOUR CODE #
#######################################################################
return dists
def compute_distances_no_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
#########################################################################
# TODO: #
# Compute the l2 distance between all test points and all training #
# points without using any explicit loops, and store the result in #
# dists. #
# #
# You should implement this function using only basic array operations; #
# in particular you should not use functions from scipy. #
# #
# HINT: Try to formulate the l2 distance using matrix multiplication #
# and two broadcast sums. #
#########################################################################
# pass
dists = np.multiply(np.dot(X, self.X_train.T), -2)
sq1 = np.sum(np.square(X), axis=1, keepdims=True)
sq2 = np.sum(np.square(self.X_train.T), axis=0, keepdims=True)
# sq2 = np.sum(np.square(self.X_train), axis=1)
dists = np.add(dists, sq1)
dists = np.add(dists, sq2)
dists = np.sqrt(dists)
#########################################################################
# END OF YOUR CODE #
#########################################################################
return dists
def predict_labels(self, dists, k=1):
"""
Given a matrix of distances between test points and training points,
predict a label for each test point.
Inputs:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
gives the distance betwen the ith test point and the jth training point.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in range(num_test):
# A list of length k storing the labels of the k nearest neighbors to
# the ith test point.
closest_y = []
#########################################################################
# TODO: #
# Use the distance matrix to find the k nearest neighbors of the ith #
# testing point, and use self.y_train to find the labels of these #
# neighbors. Store these labels in closest_y. #
# Hint: Look up the function numpy.argsort. #
#########################################################################
# pass
closest_y = self.y_train[np.argsort(dists[i, :])[:k]]
#########################################################################
# TODO: #
# Now that you have found the labels of the k nearest neighbors, you #
# need to find the most common label in the list closest_y of labels. #
# Store this label in y_pred[i]. Break ties by choosing the smaller #
# label. #
#########################################################################
# pass
y_pred[i] = np.argmax(np.bincount(closest_y))
#########################################################################
# END OF YOUR CODE #
#########################################################################
return y_pred
|
[
"[email protected]"
] | |
a976399b9c9b2cd65b81c38fc1ab67a1eaf7764a
|
ce9e6fc86466eb0d0080bbc5e3762ec19e63c6db
|
/Python/24_yaml.py
|
9a463c30708d42f9858d77b028814f70bd289b4b
|
[
"Unlicense"
] |
permissive
|
tomoki-99-ikomot/programing_languages
|
1ee0f96741bee67c2385e763a104f41fe3300e0d
|
7aa2354f4a366b2400d3c0b86863d03073387486
|
refs/heads/main
| 2023-07-22T00:16:58.288735 | 2021-09-10T13:07:58 | 2021-09-10T13:07:58 | 364,067,804 | 0 | 0 |
Unlicense
| 2021-05-03T21:39:06 | 2021-05-03T21:39:05 | null |
UTF-8
|
Python
| false | false | 751 |
py
|
################################################################################
# yaml ファイル
import yaml
# 設定値の定義
# yaml ファイルの書き込み
with open('files/24_config.yml', mode='w') as yaml_file:
yaml.dump({
'web_server': {
'host': '127.0.0.1',
'port': 80
},
'db_server': {
'host': '127.0.0.1',
'port': 3306
}
}, yaml_file)
# yaml ファイルの読み込み
with open('files/24_config.yml', mode='r') as yaml_file:
data = yaml.load(yaml_file, Loader=yaml.FullLoader)
print(data['web_server']['host'])
print(data['web_server']['port'])
# 以下のように表示される
"""
127.0.0.1
80
"""
|
[
"[email protected]"
] | |
8a395168a28dd3d92b9f315c65da262c70b7c02d
|
68d9deebaf5c5c1fe3a01b54aa07860e186cb69c
|
/run/PlotDraw_Wplusmunu5.py
|
294e62513d2ccc3a051f24b42fc0fd4ccbb93b5b
|
[] |
no_license
|
Hicham-ATMANI/XSCalceFW
|
731667ea9389d8f6e8a4fb7e02f98cce014be8e8
|
3309a1e33a897c7922cf8b60f8b41b8a5919b53b
|
refs/heads/master
| 2022-12-04T01:31:25.253993 | 2020-08-25T11:25:50 | 2020-08-25T11:25:50 | 274,935,442 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,941 |
py
|
#!/usr/bin/env python
# -*-coding:Latin-1 -*
from math import *
import ROOT
import ROOT as root
from ROOT import gROOT, TCanvas, TFile, THStack, TH1F, TPad, TLine, TH1
from source.PlotClass1D import Plot1D
from source.SystVariations import SystVariations
from source.PlotClass2D import Plot2D
from source.OpitmisationStudy import OpitmisationStudy
from source.BackgroundClass import BackgroundClass
from source.CrossSection import CrossSection
from source.CrossSectionDev import CrossSectionDev
from source.ComparisonUnfoldedMC import ComparisonUnfoldedMC
from source.Test import Test
import matplotlib.pyplot as plt
import numpy as np
""" Define all the Objects needed for next part """
NominalPlots = Plot1D()
SystematicsStudy = SystVariations()
MatrixPlots = Plot2D()
Optimisation = OpitmisationStudy()
BackgroundPlot = BackgroundClass()
CrossSectionDeter = CrossSection()
CrossSectionDeterDev = CrossSectionDev()
TestPlot = Test()
Unfolded_MC = ComparisonUnfoldedMC()
""" ********************************************************************************************************************************************************* """
""" ****************************************************************** Define the input ********************************************************************* """
""" ********************************************************************************************************************************************************* """
# plus enu 5 TeV
MCsamples_plusmunu5 = ROOT.TFile.Open("/afs/cern.ch/work/h/hatmani/HistMaker/Processing/pTW_Input/pTWanalysis_ptwplusmunu_MC_5TeV/Nominal/mc16_5TeV.361101.PowhegPythia8EvtGen_AZNLOCTEQ6L1_Wplusmunu.e4916_s3238_r10243_r10210_p3665.root")
Summarize_plusmunu5 = ROOT.TFile.Open("/afs/cern.ch/work/h/hatmani/HistMaker_v1/Plotting/RooUnfold/buckup/old/output_Wplusmunu5/WpT/Summarize_Wplusmunu5.root")
Bias_plusmunu5 = ROOT.TFile.Open("/afs/cern.ch/work/h/hatmani/HistMaker_v1/Plotting/RooUnfold/buckup/old/output_Wplusmunu5/WpT/Bias_Wplusmunu5.root")
IsoSF_plusmunu5 = ROOT.TFile.Open("/afs/cern.ch/work/h/hatmani/HistMaker_v1/Plotting/RooUnfold/buckup/old/output_Wplusmunu5/WpT/Syst_MuIsoSys.root")
RecoSF_plusmunu5 = ROOT.TFile.Open("/afs/cern.ch/work/h/hatmani/HistMaker_v1/Plotting/RooUnfold/buckup/old/output_Wplusmunu5/WpT/Syst_MuRecoSys.root")
TrigSF_plusmunu5 = ROOT.TFile.Open("/afs/cern.ch/work/h/hatmani/HistMaker_v1/Plotting/RooUnfold/buckup/old/output_Wplusmunu5/WpT/Syst_MuTrigSys.root")
Recoil_plusmunu5 = ROOT.TFile.Open("/afs/cern.ch/work/h/hatmani/HistMaker_v1/Plotting/RooUnfold/buckup/old/output_Wplusmunu5/WpT/Recoil_Syst.root")
""" ********************************************************************************************************************************************************* """
""" ****************************************************************** Get Nominal Plot ********************************************************************* """
""" ********************************************************************************************************************************************************* """
''' Wplusmunu 5TeV '''
#NominalPlots.GetEpsilonFactors(Summarize_plusmunu5, "Wplusmunu5", "W^{+}#rightarrow #mu^{+}#nu, 5TeV")
#NominalPlots.GetAcceptanceFactors(Summarize_plusmunu5, "Wplusmunu5", "W^{+}#rightarrow #mu^{+}#nu, 5TeV")
#NominalPlots.MigrationMatrix(Summarize_plusmunu5, "Wplusmunu5", "W^{+}#rightarrow #mu^{+}#nu, 5TeV")
#NominalPlots.ShowNominalDistribution(Summarize_plusmunu5, "Wplusmunu5", "W^{+}#rightarrow #mu^{+}#nu, 5TeV")
#NominalPlots.CompareBias(Bias_plusmunu5, 1, 10, "Wplusmunu5", "W^{+}#rightarrow #mu^{+}#nu, 5TeV")
#NominalPlots.CompareBias(Bias_plusmunu5, 10, 19, "Wplusmunu5", "W^{+}#rightarrow #mu^{+}#nu, 5TeV")
#NominalPlots.CompareStatError(Summarize_plusmunu5, 1, 10, "Wplusmunu5", "W^{+}#rightarrow #mu^{+}#nu, 5TeV")
#NominalPlots.CompareStatError(Summarize_plusmunu5, 10, 19, "Wplusmunu5", "W^{+}#rightarrow #mu^{+}#nu, 5TeV")
#NominalPlots.BiasProcedure( Summarize_plusmunu5, Bias_plusmunu5, "Wplusmunu5", "W^{+}#rightarrow #mu^{+}#nu, 5TeV" )
""" ********************************************************************************************************************************************************* """
""" ****************************************************************** Get System Plots ********************************************************************* """
""" ********************************************************************************************************************************************************* """
''' Wplusmunu 5TeV '''
#SystematicsStudy.CompareSystIso( IsoSF_plusmunu5, 1, 10, "Wplusmunu5", "W^{+}#rightarrow #mu^{+}#nu, 5TeV")
#SystematicsStudy.CompareSystReco( RecoSF_plusmunu5, 1, 10, "Wplusmunu5", "W^{+}#rightarrow #mu^{+}#nu, 5TeV")
#SystematicsStudy.CompareSystTrig( TrigSF_plusmunu5, 1, 10, "Wplusmunu5", "W^{+}#rightarrow #mu^{+}#nu, 5TeV")
#SystematicsStudy.CompareSystRecoil( Summarize_plusmunu5, Recoil_plusmunu5, 1, 10, "Wplusmunu5", "W^{+}#rightarrow #mu^{+}#nu, 5TeV")
""" ********************************************************************************************************************************************************* """
""" ***************************************************************** Optimisation Study ******************************************************************** """
""" ********************************************************************************************************************************************************* """
#Optimisation.StatStudy(Summarize_plusmunu5, 1, 20, 0, 600, "Wplusmunu5", "$p^{T}_{W}$") # define the number of iterations for the study
#Optimisation.StatStudy(Summarize_plusmunu5, 1, 20, 0, 60, "Wplusmunu5", "$p^{T}_{W}$") # define the number of iterations for the study
#Optimisation.StatStudy(Summarize_plusmunu5, 1, 20, 0, 40, "Wplusmunu5", "$p^{T}_{W}$") # define the number of iterations for the study
#Optimisation.StatStudy(Summarize_plusmunu5, 1, 20, 0, 20, "Wplusmunu5", "$p^{T}_{W}$") # define the number of iterations for the study
#Optimisation.StatStudy(Summarize_plusmunu5, 1, 20, 0, 10, "Wplusmunu5", "$p^{T}_{W}$") # define the number of iterations for the study
#Optimisation.BiasStudy(Summarize_plusmunu5, Bias_plusmunu5, 1, 20, 0, 600, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.BiasStudy(Summarize_plusmunu5, Bias_plusmunu5, 1, 20, 0, 60, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.BiasStudy(Summarize_plusmunu5, Bias_plusmunu5, 1, 20, 0, 40, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.BiasStudy(Summarize_plusmunu5, Bias_plusmunu5, 1, 20, 0, 20, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.BiasStudy(Summarize_plusmunu5, Bias_plusmunu5, 1, 20, 0, 10, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.EffSystematicStudy(Summarize_plusmunu5, TrigSF_plusmunu5, RecoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, 1, 19, 0, 600, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.EffSystematicStudy(Summarize_plusmunu5, TrigSF_plusmunu5, RecoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, 1, 19, 0, 60, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.EffSystematicStudy(Summarize_plusmunu5, TrigSF_plusmunu5, RecoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, 1, 19, 0, 40, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.EffSystematicStudy(Summarize_plusmunu5, TrigSF_plusmunu5, RecoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, 1, 19, 0, 20, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.EffSystematicStudy(Summarize_plusmunu5, TrigSF_plusmunu5, RecoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, 1, 19, 0, 10, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.RecoilSystematicStudy(Summarize_plusmunu5, Recoil_plusmunu5, 1, 19, 0, 600, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.RecoilSystematicStudy(Summarize_plusmunu5, Recoil_plusmunu5, 1, 19, 0, 60, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.RecoilSystematicStudy(Summarize_plusmunu5, Recoil_plusmunu5, 1, 19, 0, 40, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.RecoilSystematicStudy(Summarize_plusmunu5, Recoil_plusmunu5, 1, 19, 0, 20, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.RecoilSystematicStudy(Summarize_plusmunu5, Recoil_plusmunu5, 1, 19, 0, 10, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.TotalSystematicStudy(Summarize_plusmunu5, Bias_plusmunu5, TrigSF_plusmunu5, RecoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, Recoil_plusmunu5, Recoil_plusmunu5, 1, 19, 0, 600, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.TotalSystematicStudy(Summarize_plusmunu5, Bias_plusmunu5, TrigSF_plusmunu5, RecoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, Recoil_plusmunu5, Recoil_plusmunu5, 1, 19, 0, 60, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.TotalSystematicStudy(Summarize_plusmunu5, Bias_plusmunu5, TrigSF_plusmunu5, RecoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, Recoil_plusmunu5, Recoil_plusmunu5, 1, 19, 0, 40, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.TotalSystematicStudy(Summarize_plusmunu5, Bias_plusmunu5, TrigSF_plusmunu5, RecoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, Recoil_plusmunu5, Recoil_plusmunu5, 1, 19, 0, 20, "Wplusmunu5", "$p^{T}_{W}$")
#Optimisation.TotalSystematicStudy(Summarize_plusmunu5, Bias_plusmunu5, TrigSF_plusmunu5, RecoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, Recoil_plusmunu5, Recoil_plusmunu5, 1, 19, 0, 10, "Wplusmunu5", "$p^{T}_{W}$")
""" ********************************************************************************************************************************************************* """
""" ******************************************************************* Nominal 2D Plot ********************************************************************* """
""" ********************************************************************************************************************************************************* """
#MatrixPlots.StatCovarianceMatrix(Summarize_plusmunu5, 4, "Wplusmunu5", "W^{+}#rightarrow #mu^{+}#nu, 5TeV", "5TeV")
#MatrixPlots.BiasCovarianceMatrix(Bias_plusmunu5, 4, "Wplusmunu5", "W^{+}#rightarrow #mu^{+}#nu, 5TeV", "5TeV")
""" ********************************************************************************************************************************************************* """
""" ******************************************************************* Differential Xs ********************************************************************* """
""" ********************************************************************************************************************************************************* """
CrossSectionDeter.GetptwTables(Summarize_plusmunu5, Bias_plusmunu5, TrigSF_plusmunu5, RecoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, Recoil_plusmunu5, "5TeV", "W$^{+}$ $\\rightarrow$ $\\mu ^{+} \\nu $, 5TeV, Uncertainties in (\%)", "Wplusmunu5", 256.827, 4)
#CrossSectionDeter.GetDiffernetialXs(Summarize_plusmunu5, Bias_plusmunu5, TrigSF_plusmunu5, RecoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, Recoil_plusmunu5, "5TeV", "W$^{+}$ $\\rightarrow$ $\\mu ^{+} \\nu $, 5TeV, Uncertainties in (\%)", "Wplusmunu5", 256.827)
#CrossSectionDeter.GetDiffernetialXsPlot(Summarize_plusmunu5, Bias_plusmunu5, TrigSF_plusmunu5, RecoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, Recoil_plusmunu5, "5TeV", "W^{+}#rightarrow \\mu^{+}#nu, 5TeV", "Wplusmunu5", 256.827)
#CrossSectionDeter.GetDiffernetialXsPlotN(Summarize_plusmunu5, Bias_plusmunu5, TrigSF_plusmunu5, RecoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, Recoil_plusmunu5, "5TeV", "W^{+}#rightarrow \\mu^{+}#nu, 5TeV", "Wplusmunu5", 256.827)
""" ********************************************************************************************************************************************************* """
""" ********************************************************************* fiducial Xs *********************************************************************** """
""" ********************************************************************************************************************************************************* """
#CrossSectionDeter.GetFiducialXs(Summarize_minusenu5, Bias_minusenu5, "5TeV", "W^{+} \\rightarrow e^{+} \\nu, 5TeV", "Wminusenu5")
#CrossSectionDeter.GetSummaringTable(Summarize_plusmunu5, Bias_plusmunu5, TrigSF_plusmunu5, RecoSF_plusmunu5, IsoSF_plusmunu5, IsoSF_plusmunu5, Recoil_plusmunu5, Recoil_plusmunu5, "5TeV", "W$^{+}$ $\\rightarrow$ $\\mu$ $^{+} \\nu $, 5TeV, Uncertainties in (\%)", "Wplusmunu5", 256.827, 0.361)
|
[
"[email protected]"
] | |
392439ff9f7190ef628cf4a49689016dfe12271f
|
cb68a4aee95efc3a88c2be2213732fdc18443be7
|
/astropy/io/fits/tests/test_checksum.py
|
badff8141950bbf78f204e60dd022ace3c18eb0d
|
[] |
no_license
|
zblz/astropy
|
2026efdb20ec8a5a9ad3e223060723c325f39388
|
5a54fd4485003823b237888eb9629fb0065cffd4
|
refs/heads/master
| 2021-01-15T21:35:21.590257 | 2013-02-07T00:03:54 | 2013-02-07T00:03:54 | 8,071,464 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,874 |
py
|
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import shutil
import warnings
import numpy as np
from ....io import fits
from ....tests.helper import pytest
from . import FitsTestCase
class TestChecksumFunctions(FitsTestCase):
def setup(self):
super(TestChecksumFunctions, self).setup()
self._oldfilters = warnings.filters[:]
warnings.filterwarnings(
'error',
message='Checksum verification failed')
warnings.filterwarnings(
'error',
message='Datasum verification failed')
def teardown(self):
super(TestChecksumFunctions, self).teardown()
warnings.filters = self._oldfilters
def test_sample_file(self):
hdul = fits.open(self.data('checksum.fits'), checksum=True)
hdul.close()
def test_image_create(self):
n = np.arange(100)
hdu = fits.PrimaryHDU(n)
hdu.writeto(self.temp('tmp.fits'), clobber=True, checksum=True)
hdul = fits.open(self.temp('tmp.fits'), checksum=True)
hdul.close()
def test_nonstandard_checksum(self):
hdu = fits.PrimaryHDU(np.arange(10.0 ** 6))
hdu.writeto(self.temp('tmp.fits'), clobber=True,
checksum='nonstandard')
del hdu
hdul = fits.open(self.temp('tmp.fits'), checksum='nonstandard')
def test_scaled_data(self):
hdul = fits.open(self.data('scale.fits'))
orig_data = hdul[0].data.copy()
hdul[0].scale('int16', 'old')
hdul.writeto(self.temp('tmp.fits'), clobber=True, checksum=True)
hdul1 = fits.open(self.temp('tmp.fits'), checksum=True)
assert (hdul1[0].data == orig_data).all()
hdul.close()
hdul1.close()
def test_uint16_data(self):
hdul = fits.open(self.data('o4sp040b0_raw.fits'), uint=True)
hdul.writeto(self.temp('tmp.fits'), clobber=True, checksum=True)
hdul1 = fits.open(self.temp('tmp.fits'), uint=True, checksum=True)
hdul.close()
hdul1.close()
def test_groups_hdu_data(self):
imdata = np.arange(100.0)
imdata.shape = (10, 1, 1, 2, 5)
pdata1 = np.arange(10) + 0.1
pdata2 = 42
x = fits.hdu.groups.GroupData(imdata, parnames=['abc', 'xyz'],
pardata=[pdata1, pdata2], bitpix=-32)
hdu = fits.GroupsHDU(x)
hdu.writeto(self.temp('tmp.fits'), clobber=True, checksum=True)
hdul1 = fits.open(self.temp('tmp.fits'), checksum=True)
hdul1.close()
def test_binary_table_data(self):
a1 = np.array(['NGC1001', 'NGC1002', 'NGC1003'])
a2 = np.array([11.1, 12.3, 15.2])
col1 = fits.Column(name='target', format='20A', array=a1)
col2 = fits.Column(name='V_mag', format='E', array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.new_table(cols)
tbhdu.writeto(self.temp('tmp.fits'), clobber=True, checksum=True)
hdul = fits.open(self.temp('tmp.fits'), checksum=True)
hdul.close()
def test_variable_length_table_data(self):
c1 = fits.Column(name='var', format='PJ()',
array=np.array([[45.0, 56], np.array([11, 12, 13])], 'O'))
c2 = fits.Column(name='xyz', format='2I', array=[[11, 3], [12, 4]])
tbhdu = fits.new_table([c1, c2])
tbhdu.writeto(self.temp('tmp.fits'), clobber=True, checksum=True)
hdul = fits.open(self.temp('tmp.fits'), checksum=True)
hdul.close()
def test_ascii_table_data(self):
a1 = np.array(['abc', 'def'])
r1 = np.array([11.0, 12.0])
c1 = fits.Column(name='abc', format='A3', array=a1)
c2 = fits.Column(name='def', format='E', array=r1, bscale=2.3,
bzero=0.6)
c3 = fits.Column(name='t1', format='I', array=[91, 92, 93])
x = fits.ColDefs([c1, c2, c3], tbtype='TableHDU')
hdu = fits.new_table(x, tbtype='TableHDU')
hdu.writeto(self.temp('tmp.fits'), clobber=True, checksum=True)
hdul = fits.open(self.temp('tmp.fits'), checksum=True)
hdul.close()
def test_compressed_image_data(self):
hdul = fits.open(self.data('comp.fits'))
hdul.writeto(self.temp('tmp.fits'), clobber=True, checksum=True)
hdul1 = fits.open(self.temp('tmp.fits'), checksum=True)
hdul1.close()
hdul.close()
def test_compressed_image_data_int16(self):
n = np.arange(100, dtype='int16')
hdu = fits.ImageHDU(n)
comp_hdu = fits.CompImageHDU(hdu.data, hdu.header)
comp_hdu.writeto(self.temp('tmp.fits'), checksum=True)
hdul = fits.open(self.temp('tmp.fits'), checksum=True)
hdul.close()
def test_compressed_image_data_float32(self):
n = np.arange(100, dtype='float32')
comp_hdu = fits.CompImageHDU(n)
comp_hdu.writeto(self.temp('tmp.fits'), checksum=True)
hdul = fits.open(self.temp('tmp.fits'), checksum=True)
hdul.close()
def test_open_with_no_keywords(self):
hdul = fits.open(self.data('arange.fits'), checksum=True)
hdul.close()
def test_append(self):
hdul = fits.open(self.data('tb.fits'))
hdul.writeto(self.temp('tmp.fits'), clobber=True)
n = np.arange(100)
fits.append(self.temp('tmp.fits'), n, checksum=True)
hdul.close()
hdul = fits.open(self.temp('tmp.fits'), checksum=True)
assert hdul[0]._checksum == None
hdul.close()
def test_writeto_convenience(self):
n = np.arange(100)
fits.writeto(self.temp('tmp.fits'), n, clobber=True, checksum=True)
hdul = fits.open(self.temp('tmp.fits'), checksum=True)
self._check_checksums(hdul[0])
hdul.close()
def test_hdu_writeto(self):
n = np.arange(100, dtype='int16')
hdu = fits.ImageHDU(n)
hdu.writeto(self.temp('tmp.fits'), checksum=True)
hdul = fits.open(self.temp('tmp.fits'), checksum=True)
self._check_checksums(hdul[0])
hdul.close()
def test_datasum_only(self):
n = np.arange(100, dtype='int16')
hdu = fits.ImageHDU(n)
hdu.writeto(self.temp('tmp.fits'), clobber=True, checksum='datasum')
hdul = fits.open(self.temp('tmp.fits'), checksum=True)
if not (hasattr(hdul[0], '_datasum') and hdul[0]._datasum):
pytest.fail(msg='Missing DATASUM keyword')
if not (hasattr(hdul[0], '_checksum') and not hdul[0]._checksum):
pytest.fail(msg='Non-empty CHECKSUM keyword')
if not (hasattr(hdul[0], '_datasum_comment') and
hdul[0]._datasum_comment):
pytest.fail(msg='Missing DATASUM Card comment')
if not (hasattr(hdul[0], '_checksum_comment') and
not hdul[0]._checksum_comment):
pytest.fail(msg='Non-empty CHECKSUM Card comment')
def test_open_update_mode_preserve_checksum(self):
"""
Regression test for #148 where checksums are being removed from headers
when a file is opened in update mode, even though no changes were made
to the file.
"""
shutil.copy(self.data('checksum.fits'), self.temp('tmp.fits'))
with fits.open(self.temp('tmp.fits')) as hdul:
data = hdul[1].data.copy()
hdul = fits.open(self.temp('tmp.fits'), mode='update')
hdul.close()
with fits.open(self.temp('tmp.fits')) as hdul:
assert 'CHECKSUM' in hdul[1].header
assert 'DATASUM' in hdul[1].header
assert (data == hdul[1].data).all()
def test_open_update_mode_update_checksum(self):
"""
Regression test for #148, part 2. This ensures that if a file contains
a checksum, the checksum is updated when changes are saved to the file,
even if the file was opened with the default of checksum=False.
An existing checksum and/or datasum are only stripped if the file is
opened with checksum='remove'.
"""
shutil.copy(self.data('checksum.fits'), self.temp('tmp.fits'))
with fits.open(self.temp('tmp.fits')) as hdul:
header = hdul[1].header.copy()
data = hdul[1].data.copy()
with fits.open(self.temp('tmp.fits'), mode='update') as hdul:
hdul[1].header['FOO'] = 'BAR'
hdul[1].data[0]['TIME'] = 42
with fits.open(self.temp('tmp.fits')) as hdul:
header2 = hdul[1].header
data2 = hdul[1].data
assert header2[:-3] == header[:-2]
assert 'CHECKSUM' in header2
assert 'DATASUM' in header2
assert header2['FOO'] == 'BAR'
assert (data2['TIME'][1:] == data['TIME'][1:]).all()
assert data2['TIME'][0] == 42
with fits.open(self.temp('tmp.fits'), mode='update',
checksum='remove') as hdul:
pass
with fits.open(self.temp('tmp.fits')) as hdul:
header2 = hdul[1].header
data2 = hdul[1].data
assert header2[:-1] == header[:-2]
assert not 'CHECKSUM' in header2
assert not 'DATASUM' in header2
assert header2['FOO'] == 'BAR'
assert (data2['TIME'][1:] == data['TIME'][1:]).all()
assert data2['TIME'][0] == 42
def _check_checksums(self, hdu):
if not (hasattr(hdu, '_datasum') and hdu._datasum):
pytest.fail(msg='Missing DATASUM keyword')
if not (hasattr(hdu, '_checksum') and hdu._checksum):
pytest.fail(msg='Missing CHECKSUM keyword')
if not (hasattr(hdu, '_datasum_comment') and hdu._datasum_comment):
pytest.fail(msg='Missing DATASUM Card comment')
if not (hasattr(hdu, '_checksum_comment') and hdu._checksum_comment):
pytest.fail(msg='Missing CHECKSUM Card comment')
|
[
"[email protected]"
] | |
f686c14d3f3ccf88ac38fcd8a34d6d9f001befd4
|
3c0f50b6563e2c9c6306f7ca2216ff46c8250b96
|
/address/migrations/0003_usuario.py
|
b33eb28800063dbfeb0a7fb4e8513ef46fb1f55c
|
[] |
no_license
|
JoamirS/project-curriculo
|
895e72b34a8a51478c3fe5958d509bfa89be761e
|
490ed533dae740a7d2e1b652ce36fdb2af294eb3
|
refs/heads/master
| 2020-06-01T19:50:42.019259 | 2019-06-22T22:16:18 | 2019-06-22T22:16:18 | 190,904,296 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 576 |
py
|
# Generated by Django 2.2.2 on 2019-06-12 17:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('address', '0002_auto_20190612_1127'),
]
operations = [
migrations.CreateModel(
name='Usuario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=100)),
('senha', models.CharField(max_length=30)),
],
),
]
|
[
"[email protected]"
] | |
b1268a6115f43f5420f6644711db3168fe16eca5
|
540e6d092fb1e57191b7dd5a14509892b91f6fbb
|
/scripts/python/mirs_tc/libMIRSTC.py
|
d7f7b72dc7509478d48105d84195f083b0cd0a7d
|
[
"BSD-2-Clause"
] |
permissive
|
ScottLongmore/MIRS_TC
|
a2ab6c08639cd1b9280da416e184c33bf3f1adba
|
51c3964b8192f71716f075fbe3fd2ffbdb656fb5
|
refs/heads/master
| 2021-01-25T10:21:22.815953 | 2018-02-28T21:53:25 | 2018-02-28T21:53:25 | 123,347,375 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 15,578 |
py
|
#!/usr/bin/python
"""
MIRS_TC - library of MIRS_TC routines
"""
# Stock modules
import sys
import os
import re
import logging
import traceback
import datetime
import collections
import operator
import shutil
import json
import jsonschema
import numpy
import pandas as pd
import math
import pprint
# Local modules
import error_codes
import utils
import fileAction
# Pretty Printer
pp=pprint.PrettyPrinter(indent=4)
# Logging
LOG = logging.getLogger(__name__)
mirsTimeQueryOffsets={
0:{"after":0,"before":21600},
1:{"after":3600,"before":21600},
2:{"after":7200,"before":21600},
3:{"after":10800,"before":21600},
4:{"after":14400,"before":21600},
5:{"after":18000,"before":21600}
}
basins={
"al":"Atlantic",
"ep":"Eastern Pacific",
"cp":"Central Pacific",
"wp":"Western Pacific",
"io":"Indian Ocean",
"sh":"Southern Hemisphere"
}
gfsGribFields={
"u70":":UGRD:70 mb:", "u100":":UGRD:100 mb:", "u150":":UGRD:150 mb:", "u200":":UGRD:200 mb:", "u250":":UGRD:250 mb:",
"u300":":UGRD:300 mb:", "u400":":UGRD:400 mb:", "u500":":UGRD:500 mb:", "u600":":UGRD:600 mb:", "u700":":UGRD:700 mb:",
"u850":":UGRD:850 mb:", "u925":":UGRD:925 mb:", "u1000":":UGRD:1000 mb:", "u1070":":UGRD:0.995 sigma level:",
"v70":":VGRD:70 mb:", "v100":":VGRD:100 mb:", "v150":":VGRD:150 mb:", "v200":":VGRD:200 mb:", "v250":":VGRD:250 mb:",
"v300":":VGRD:300 mb:", "v400":":VGRD:400 mb:", "v500":":VGRD:500 mb:", "v600":":VGRD:600 mb:", "v700":":VGRD:700 mb:",
"v850":":VGRD:850 mb:", "v925":":VGRD:925 mb:", "v1000":":VGRD:1000 mb:", "v1070":":VGRD:0.995 sigma level:",
"height70":":HGT:70 mb:", "height100":":HGT:100 mb:", "height150":":HGT:150 mb:", "height200":":HGT:200 mb:", "height250":":HGT:250 mb:",
"height300":":HGT:300 mb:", "height400":":HGT:400 mb:", "height500":":HGT:500 mb:", "height600":":HGT:600 mb:", "height700":":HGT:700 mb:",
"height850":":HGT:850 mb:", "height925":":HGT:925 mb:", "height1000":":HGT:1000 mb:", "height1070":":HGT:surface:",
"temp70":":TMP:70 mb:", "temp100":":TMP:100 mb:", "temp150":":TMP:150 mb:", "temp200":":TMP:200 mb:", "temp250":":TMP:250 mb:",
"temp300":":TMP:300 mb:", "temp400":":TMP:400 mb:", "temp500":":TMP:500 mb:", "temp600":":TMP:600 mb:", "temp700":":TMP:700 mb:",
"temp850":":TMP:850 mb:", "temp925":":TMP:925 mb:", "temp1000":":TMP:1000 mb:", "temp1070":":TMP:0.995 sigma level:",
"relh100":":RH:100 mb:", "relh70":":RH:70 mb:", "relh150":":RH:150 mb:", "relh200":":RH:200 mb:", "relh250":":RH:250 mb:",
"relh300":":RH:300 mb:", "relh400":":RH:400 mb:", "relh500":":RH:500 mb:", "relh600":":RH:600 mb:", "relh700":":RH:700 mb:",
"relh850":":RH:850 mb:", "relh925":":RH:925 mb:", "relh1000":":RH:1000 mb:", "relh1070":":RH:0.995 sigma level:",
"p1070":":PRMSL:mean sea level:"
}
DTFormat="%Y%m%d%H%M%S%f"
erad=6371.0 # Radius of Earth
dtr=math.pi/180.0 # Degrees to radians conversion
def last_synoptic_datetime(dt):
hours=[6,12,18]
synHour=0
for hour in hours:
if dt.hour >= hour:
synHour=hour
else:
break
return(dt.replace(hour=synHour,minute=0,second=0,microsecond=0))
def next_synoptic_datetime(dt):
hours=[18,12,6]
synHour=0
for hour in hours:
if dt.hour < hour:
synHour=hour
else:
break
return(dt.replace(hour=synHour,minute=0,second=0,microsecond=0))
def datetimes(jcsDTS,jceDTS):
try:
jcsDTG=datetime.datetime.strptime(jcsDTS,DTFormat)
jceDTG=datetime.datetime.strptime(jceDTS,DTFormat)
except:
msg="Time: {} invalid format (YYYYMMDDHHMMSSS)".format(jcsDTS)
utils.error(LOG,msg,error_codes.EX_IOERR)
try:
initDTG=jceDTG.replace(minute=0,second=0,microsecond=0) # Last Hour floor e.g. 5:05:23 -> 5:00:00
lastSynpDTG=last_synoptic_datetime(initDTG) # 0,6,12,18
synpDelta=int((initDTG-lastSynpDTG).total_seconds() // 3600) # Synoptic Hour Delta
datetimes={
"jcsDTG":jcsDTG,
"jceDTG":jceDTG,
"initDTG":initDTG,
"synpDTG":lastSynpDTG,
"mirsSecsAfter":mirsTimeQueryOffsets[synpDelta]['after'],
"mirsSecsBefore":mirsTimeQueryOffsets[synpDelta]['before']
}
except:
msg="Problem determining datetimes"
utils.error(LOG,msg,error_codes.EX_IOERR)
return(datetimes)
def datalinks(metadata,datasetKeys):
# Fill datalinks with references to metadata objects by dataname and link value
dataLinks={}
for dataname in datasetKeys:
datasetKey=datasetKeys[dataname] #Value containing object property key that links metadata (e.g. ATMS 'startDT')
for dataset in metadata[dataname]:
linkID=dataset.get(datasetKey) #Unique value (linkID) common between dataset files such as 'startDT' e.g. 20170601120000
# print("dataname: {} datasetKey: {} linkID: {} ".format(dataname,datasetKey,linkID))
if linkID not in dataLinks:
dataLinks[linkID]={}
dataLinks[linkID][dataname]=dataset
#print("linkID: {} dataname: {} filename: {}".format(linkID,dataname,dataLinks[linkID][dataname].get("filename")))
#for linkID in dataLinks:
# print dataLinks[linkID].keys()
# for dataname in dataLinks[linkID]:
# print("linkID: {} dataname: {} filename: {}".format(linkID,dataname,dataLinks[linkID][dataname].get("filename")))
# Keep only datalinks that have references to all datasets
deleteLinks=[]
for linkID in dataLinks:
for dataname in datasetKeys:
if dataname not in dataLinks[linkID]:
deleteLinks.append(linkID)
# dataset=dataLinks[linkID]
# if not all(dataname in dataset for dataname in datasetKeys):
# deleteLinks.append(linkID)
for linkID in deleteLinks:
#print("Deleting: {}".format(linkID))
dataLinks.pop(linkID,None)
return(dataLinks)
def grib2pack(gribDate,gribTime,gribFhour,gribFile,wgribExe,bin2packExe,logDir):
gribString="{}{}_F{}".format(gribDate,gribTime,gribFhour)
LOG.info("Extracting binary fields from grib file: {}".format(gribFile))
for field in gfsGribFields:
pattern=gfsGribFields[field]
binFile="{}_{}.bin".format(gribString,field)
# Extract grids from grib files via WGRIB (needs to be replaced with grib_api module)
commandList=[wgribExe]
commandArgs=[]
commandArgs.extend(["-match",pattern,"-no_header",gribFile,"-order","raw","-bin",binFile])
commandID=field
stdoutFile=os.path.join(logDir,"wgrib_{}_{}.log".format(gribString,field))
stderrFile=os.path.join(logDir,"wgrib_{}_{}.err".format(gribString,field))
LOG.info("Running wgrib2")
LOG.debug("Executing: {}".format(" ".join(commandList+commandArgs)))
if not utils.execute(commandList,commandArgs,commandID,logDir,stdoutFile=stdoutFile,stderrFile=stderrFile):
LOG.warning("Problem executing {}".format(" ".join(commandList+commandArgs)))
LOG.warning("See sub-process log file: {}".format(stdoutFile))
LOG.warning("See sub-process error file: {}".format(stderrFile))
return(False)
# Combine binanry files to pack file
commandList=[bin2packExe]
commandArgs=[]
commandArgs.extend([gribDate,gribTime,gribFhour])
commandID="bin2pack"
stdoutFile=os.path.join(logDir,"bin2pack_{}.log".format(gribString))
stderrFile=os.path.join(logDir,"bin2pack_{}.err".format(gribString))
LOG.info("Running bin2pack")
LOG.info("Executing: {}".format(" ".join(commandList+commandArgs)))
if not utils.execute(commandList,commandArgs,commandID,logDir,stdoutFile=stdoutFile,stderrFile=stderrFile):
LOG.warning("Problem executing {}".format(" ".join(commandList+commandArgs)))
LOG.warning("See sub-process log file: {}".format(stdoutFile))
LOG.warning("See sub-process error file: {}".format(stderrFile))
return(False)
return(True)
def getInputVars(filepath,column,value):
# Fix for bad column name 'DELTA T' with space
names=['DATE','TIME','DELTA_T','LATITUDE','LONGITUDE',\
'DIRECTION','SPEED','RMAX','VMAX','NAME','BOGUS']
if column not in names:
msg="Column {} does not exist in short term track input table: {}".format(column,filepath)
utils.error(LOG,msg,error_codes.EX_IOERR)
if os.path.isfile(filepath):
with open(filepath) as f:
inputDF = pd.read_table(f, sep='\s+', index_col=0, header=None, names=names, skiprows=3, parse_dates={"DATETIME":[0,1]})
inputDF=inputDF.drop('BOGUS',axis=1)
rows=inputDF.loc[inputDF[column] == value].to_dict(orient="index")
else:
msg="File doesn't exist: {}".format(filepath)
utils.error(LOG,msg,error_codes.EX_IOERR)
return(rows)
def getInputAnalysisVars(filepath):
rows=getInputVars(filepath,'DELTA_T',0)
varDTG=rows.keys()[0] # Assume only one
vars={}
for col in rows[varDTG]:
vars[col]=rows[varDTG][col]
vars['DTG']=varDTG
if vars['LONGITUDE'] > 180:
vars['LONGITUDE']=(360-vars['LONGITUDE'])*-1
return(vars)
def getLinkFilenames(dataLink, inputDatasets, inputDatasetKey, inputFilePaths, outputDataname):
# Get dataset filenames, linkIDs
dataFileLinks={}
for dataset in inputDatasets:
dataFileLinks[dataset.get('filename')]=dataset.get(inputDatasetKey)
# Match input filenames to filenames
outputFilenames=[]
for inputFilePath in inputFilePaths:
inputFile=os.path.basename(inputFilePath)
if inputFile in dataFileLinks:
try:
linkID=dataFileLinks[inputFile]
outputFile=dataLink[linkID][outputDataname].get('filename')
outFilename=os.path.join(dataLink[linkID][outputDataname].get('path'),outputFile)
outputFilenames.append(outFilename)
LOG.info("Linked Files input: {} output: {}".format(inputFile,outputFile))
except:
LOG.warning("No matching input file: {} in output: {}".format(inputFile,outputDataname))
return(outputFilenames)
def extractTarFiles(tarCommand,tarFile,stringID,logDir):
# Extract files from tar file
LOG.info("Extracting {} data files from archive file: {}".format(stringID,tarFile))
commandList=[tarCommand]
commandArgs=[]
commandArgs.extend(["--warning=no-timestamp","--strip-components=1","-xvf",tarFile])
commandID="tar_{}".format(stringID)
stdoutFile=os.path.join(logDir,"{}.stdout".format(commandID))
stderrFile=os.path.join(logDir,"{}.stderr".format(commandID))
LOG.debug("Executing: {}".format(" ".join(commandList+commandArgs)))
if not utils.execute(commandList,commandArgs,commandID,logDir,stdoutFile=stdoutFile,stderrFile=stderrFile):
LOG.warning("Problem executing {}".format(" ".join(commandList+commandArgs)))
LOG.warning("See sub-process log file: {}".format(stdoutFile))
LOG.warning("See sub-process error file: {}".format(stderrFile))
return(False)
return(True)
def poolType(value,dtype):
pValue=None
if dtype=='int':
pValue=int(value)
elif dtype=='float':
pValue=float(value)
else:
msg="Unknown Pool Type: {}".format(dtype)
utils.error(LOG,msg,error_codes.EX_IOERR)
return(pValue)
def readPoolTextFile(filename):
try:
fh=open(filename,"r")
rank=int(fh.readline().rstrip().split(":")[1])
dimsString=fh.readline().rstrip().split(":")[1]
dims=map(int,dimsString.split(","))
dtype=fh.readline().rstrip().split(":")[1]
nRecords=reduce(operator.mul, dims, 1)
records=[]
for line in fh:
fields=line.rstrip().split(',')
record=map(int,fields[:-1]) # convert indexes from string to int
record.append(poolType(fields[-1],dtype))
records.append(record)
records=map(list,zip(*records)) # flip dimenstions
except:
LOG.warning("Problem reading pool text file: {}".format(filename))
ptfDict={
'rank':rank,
'dims':dims,
'type':dtype,
'records':records
}
return(ptfDict)
def writePoolTextFile(ptfDict,filename):
try:
newFH=open(filename,"w")
newFH.write("rank:{:03d}\n".format(ptfDict['rank']))
newFH.write("dimensions:{}\n".format(','.join(map(lambda x:"{:09d}".format(x), ptfDict['dims']))))
newFH.write("type:{}\n".format(ptfDict['type']))
records=numpy.array(ptfDict['records'][-1]).reshape(ptfDict['dims'])
for index,npValue in numpy.ndenumerate(records):
idxStr=','.join(map(lambda i:"{:09d}".format(i),index))
value=npValue.astype(ptfDict['type'])
newFH.write("{},{:015.5f}\n".format(idxStr,value))
newFH.close()
except:
LOG.warning("Problem writing pool text file: {}".format(filename))
return False
return True
def createTimeFieldFiles(timeFilename):
ptfDict=readPoolTextFile(timeFilename) # index:records[0], epoch seconds record[1]
timeVars={
"years":"%Y",
"months":"%m",
"days":"%d",
"hours":"%H",
"minutes":"%M",
"seconds":"%S"
}
timeFiles={}
timeFHs={}
for timeVar in timeVars:
timeFiles[timeVar]="{}.txt".format(timeVar)
timeFHs[timeVar]=open(timeFiles[timeVar],"w")
timeFHs[timeVar].write("rank:{:03d}\n".format(ptfDict['rank']))
timeFHs[timeVar].write("dimensions:{}\n".format(','.join(map(lambda x:"{:09d}".format(x), ptfDict['dims']))))
timeFHs[timeVar].write("type:int\n")
records=ptfDict['records']
for rn in xrange(0,ptfDict['dims'][-1]):
DTG=datetime.datetime.fromtimestamp(float(records[1][rn]))
for timeVar in timeVars:
timeFHs[timeVar].write("{:09d},{:09d}\n".format(records[0][rn],int(DTG.strftime(timeVars[timeVar]))))
for timeVar in timeVars:
timeFHs[timeVar].close()
return True
def scaleOffsetThreshPoolTextFile(scale,offset,lowThresh,highThresh,inputMissing,outputMissing,filename,newfilename):
try:
ptfDict=readPoolTextFile(filename)
values=ptfDict['records'][-1]
for index in xrange(len(values)):
values[index]=scaleOffsetThreshValue(values[index],scale,offset,lowThresh,highThresh,inputMissing,outputMissing)
ptfDict['type']='float'
writePoolTextFile(ptfDict,newfilename)
except:
LOG.warning("Problem with scaleOffsetThreshPoolTextFile")
return False
return True
def scaleOffsetThreshValue(value,scale,offset,lowThresh,highThresh,inputMissing,outputMissing):
newValue=None
if value != inputMissing:
newValue=(value*scale)+offset
if newValue < lowThresh or newValue > highThresh:
newValue = outputMissing
else:
newValue=outputMissing
return(newValue)
def createCOORTIMES(coordFile,timeFile,satIdent,instrIdent,coortimeFile):
try:
with open(coortimeFile,'wb') as wfd:
for f in [coordFile,timeFile]:
with open(f,'rb') as fd:
shutil.copyfileobj(fd, wfd)
wfd.write("{}\n{}\n".format(satIdent,instrIdent))
except:
return False
return True
|
[
"[email protected]"
] | |
9ccb7264a662062475936c66c4c5e44cd9570c80
|
56c35b477af8eb5972709a703f6f63f1eeed4672
|
/CombineDetailsToReviews.py
|
7f19d91b2269440331246f0103d1f8995e189c40
|
[] |
no_license
|
abhishekkshk68/Play-Store-Deeplearning-Analysis
|
a8e47152798073b3a8aa8b274a6c78813b1ef0ec
|
4f5c2a7011056d31808ea6f66cc75efba2de43e3
|
refs/heads/master
| 2020-06-01T00:45:48.852401 | 2019-06-10T15:55:12 | 2019-06-10T15:55:12 | 190,565,136 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,944 |
py
|
#Combine Total Average Rating , Installs , Total Number of Reviews , Required Android Version , Main Category Unique Val as per Application ID.
#Required Android Version
ReqAndroidVer_Apps=data_from_CleanedDetails[['required_android_version','app_id']]
ReqAndroidVer_Apps_New=ReqAndroidVer_Apps
ReqAndroidVer_Apps_List=ReqAndroidVer_Apps_New.values.T.tolist()
print("**************************************************")
print('ReqAndroidVer_Apps_List length ::',len(ReqAndroidVer_Apps_List))
DetailsLen=int(7762)
def HelpReqAndroidVer(z_app):
j=0
i=1
reqandroid_ver=0
while j < (DetailsLen):
if((ReqAndroidVer_Apps_List[i][j])==z_app):
reqandroid_ver=ReqAndroidVer_Apps_List[i-1][j]
#print('reqandroid_ver:',reqandroid_ver)
break
elif(((ReqAndroidVer_Apps_List[i][j])!=z_app) & (j<DetailsLen)):
j=j+1
continue
return reqandroid_ver
data_from_Rev['required_android_version'] = data_from_Rev['rev_app_id'].apply(HelpReqAndroidVer)
#Append Broad Categories From Details Sheet To Reviews Sheet .
#data_from_CleanedReviews :: Dataframe consisting of Cleaned Reviews Sheet
#data_from_CleanedDetails :: Dataframe consisting of Cleaned Details Sheet
##Reading From Cleaned Details sheet and combining important columns to Cleaned Reviews Sheet.
fetched_0=data_from_CleanedDetails.loc[data_from_CleanedDetails['Category_UniqueVal'] == 0]
App_id_Cat0=fetched_0.app_id
List_App_id_Cat0=App_id_Cat0.unique()
List_App_id_Cat0_cleaned = [item.replace(' ',',') for item in List_App_id_Cat0]
fetched_1=data_from_CleanedDetails.loc[data_from_CleanedDetails['Category_UniqueVal'] == 1]
List_App_id_Cat1=fetched_1.app_id.unique()
List_App_id_Cat1_cleaned = [item.replace(' ',',') for item in List_App_id_Cat1]
fetched_2=data_from_CleanedDetails.loc[data_from_CleanedDetails['Category_UniqueVal'] == 2]
List_App_id_Cat2=fetched_2.app_id.unique()
List_App_id_Cat2_cleaned = [item.replace(' ',',') for item in List_App_id_Cat2]
fetched_3=data_from_CleanedDetails.loc[data_from_CleanedDetails['Category_UniqueVal'] == 3]
List_App_id_Cat3=fetched_3.app_id.unique()
List_App_id_Cat3_cleaned = [item.replace(' ',',') for item in List_App_id_Cat3]
fetched_4=data_from_CleanedDetails.loc[data_from_CleanedDetails['Category_UniqueVal'] == 4]
List_App_id_Cat4=fetched_4.app_id.unique()
List_App_id_Cat4_cleaned = [item.replace(' ',',') for item in List_App_id_Cat4]
fetched_5=data_from_CleanedDetails.loc[data_from_CleanedDetails['Category_UniqueVal'] == 5]
List_App_id_Cat5=fetched_5.app_id.unique()
List_App_id_Cat5_cleaned = [item.replace(' ',',') for item in List_App_id_Cat5]
fetched_6=data_from_CleanedDetails.loc[data_from_CleanedDetails['Category_UniqueVal'] == 6]
List_App_id_Cat6=fetched_6.app_id.unique()
List_App_id_Cat6_cleaned = [item.replace(' ',',') for item in List_App_id_Cat6]
fetched_7=data_from_CleanedDetails.loc[data_from_CleanedDetails['Category_UniqueVal'] == 7]
List_App_id_Cat7=fetched_7.app_id.unique()
List_App_id_Cat7_cleaned = [item.replace(' ',',') for item in List_App_id_Cat7]
fetched_8=data_from_CleanedDetails.loc[data_from_CleanedDetails['Category_UniqueVal'] == 8]
List_App_id_Cat8=fetched_8.app_id.unique()
List_App_id_Cat8_cleaned = [item.replace(' ',',') for item in List_App_id_Cat8]
fetched_9=data_from_CleanedDetails.loc[data_from_CleanedDetails['Category_UniqueVal'] == 9]
List_App_id_Cat9=fetched_9.app_id.unique()
List_App_id_Cat9_cleaned = [item.replace(' ',',') for item in List_App_id_Cat9]
fetched_10=data_from_CleanedDetails.loc[data_from_CleanedDetails['Category_UniqueVal'] == 10]
List_App_id_Cat10=fetched_10.app_id.unique()
List_App_id_Cat10_cleaned = [item.replace(' ',',') for item in List_App_id_Cat10]
fetched_11=data_from_CleanedDetails.loc[data_from_CleanedDetails['Category_UniqueVal'] == 11]
List_App_id_Cat11=fetched_11.app_id.unique()
List_App_id_Cat11_cleaned = [item.replace(' ',',') for item in List_App_id_Cat11]
fetched_12=data_from_CleanedDetails.loc[data_from_CleanedDetails['Category_UniqueVal'] == 12]
List_App_id_Cat12=fetched_12.app_id.unique()
List_App_id_Cat12_cleaned = [item.replace(' ',',') for item in List_App_id_Cat12]
#Adding Main_Category_UniqueVal column
def CheckCat(App_Ids_Reviews):
if App_Ids_Reviews in List_App_id_Cat0_cleaned:
return 0
elif App_Ids_Reviews in List_App_id_Cat1_cleaned:
return 1
elif App_Ids_Reviews in List_App_id_Cat2_cleaned:
return 2
elif App_Ids_Reviews in List_App_id_Cat3_cleaned:
return 3
elif App_Ids_Reviews in List_App_id_Cat4_cleaned:
return 4
elif App_Ids_Reviews in List_App_id_Cat5_cleaned:
return 5
elif App_Ids_Reviews in List_App_id_Cat6_cleaned:
return 6
elif App_Ids_Reviews in List_App_id_Cat7_cleaned:
return 7
elif App_Ids_Reviews in List_App_id_Cat8_cleaned:
return 8
elif App_Ids_Reviews in List_App_id_Cat9_cleaned:
return 9
elif App_Ids_Reviews in List_App_id_Cat10_cleaned:
return 10
elif App_Ids_Reviews in List_App_id_Cat11_cleaned:
return 11
elif App_Ids_Reviews in List_App_id_Cat12_cleaned:
return 12
else:
return 20
data_from_CleanedReviews['Main_Category_UniqueVal'] = data_from_CleanedReviews['rev_app_id'].map(CheckCat)
print("Added unique Category to Reviews sheet")
#Average Rate and Review Count of Apps
ReviewCount_Ratings_Apps=data_from_CleanedDetails[['reviews','app_id','score']]
ReviewCount_Ratings_Apps_1=ReviewCount_Ratings_Apps
RevRatAppList=ReviewCount_Ratings_Apps_1.values.T.tolist()
print('RevRatAppList length ::',len(RevRatAppList))
DetailsLen=int(10404)
def HelpReviews(z_app):
j=0
i=1
while j < (DetailsLen):
if((RevRatAppList[i][j])==z_app):
revno=RevRatAppList[i-1][j]
break
elif(((RevRatAppList[i][j])!=z_app) & (j<DetailsLen)):
j=j+1
continue
elif(((RevRatAppList[i][j])!=z_app) & (j==DetailsLen-1)):
revno=0
break
return revno
Implementation_df_1['TotalNumOfReviews'] = Implementation_df_1['rev_app_id'].apply(HelpReviews)
print('RevRatAppList length ::',len(RevRatAppList))
DetailsLen=int(10404)
def HelpRatings(z_app):
j=0
i=1
while j < (DetailsLen):
if((RevRatAppList[i][j])==z_app ):
ratingno=RevRatAppList[i+1][j]
break
elif(((RevRatAppList[i][j])!=z_app) & (j<DetailsLen)):
j=j+1
continue
elif(((RevRatAppList[i][j])!=z_app) & (j==DetailsLen-1)):
ratingno=0
break
return ratingno
Implementation_df_1['TotalAverageRating'] = Implementation_df_1['rev_app_id'].apply(HelpRatings)
|
[
"[email protected]"
] | |
cff93f064b230f06153b1a99ce69e4f99f7623ed
|
82f5a3e139034da846db0c3516848e3a797a52f0
|
/sixteen.py
|
16eeac1b1be9cf18646774eb3f75c61a77c5b307
|
[] |
no_license
|
Yanl05/LeetCode-git
|
d98f807d05d80b7df6c0a4f69cf233e25b0695b5
|
ce617247645517f15d513c29e12c7fff33e1cccf
|
refs/heads/master
| 2020-04-12T08:33:30.416069 | 2018-12-19T06:07:36 | 2018-12-19T06:07:36 | 162,388,594 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,272 |
py
|
'''
给定一个包括 n 个整数的数组 nums 和 一个目标值 target。
找出 nums 中的三个整数,使得它们的和与 target 最接近。返回这三个数的和。假定每组输入只存在唯一答案。
例如,给定数组 nums = [-1,2,1,-4], 和 target = 1.
与 target 最接近的三个数的和为 2. (-1 + 2 + 1 = 2).
'''
class Solution:
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums.sort()
print(nums)
difmin = 9999999
ans = 0
lenn = len(nums)
for i in range(lenn - 2):
left = i + 1
right = lenn - 1
while left < right:
count = nums[i] + nums[left] + nums[right] -target
if count == 0:
return target
else:
dif = abs(count)
if dif <= difmin:
ans = count + target
difmin = dif
if count + target < target:
left += 1
else:
right -= 1
return ans
print(Solution().threeSumClosest([-1,2,1,-4], 1))
|
[
"[email protected]"
] | |
afcab4259b346d2f19de83a8aec1a7b2832093a9
|
9acc826c5a80f31d5fbc179a41fb3062775678b2
|
/listings/migrations/0001_initial.py
|
2d792cc9f0d6a691dc18d251ad376fadd508d403
|
[] |
no_license
|
adace123/django-realestate
|
f2687ddc4b8ea9bca58b875b8c510557e6d1f4b8
|
e16d9918a11a7e6fd6de985786a02dc439906daf
|
refs/heads/master
| 2020-04-20T18:27:49.888357 | 2019-02-04T03:25:17 | 2019-02-04T03:25:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,842 |
py
|
# Generated by Django 2.1.2 on 2019-01-18 07:55
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('realtors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField()),
('address', models.CharField(max_length=100)),
('city', models.CharField(max_length=50)),
('zipcode', models.IntegerField(validators=[django.core.validators.MaxValueValidator(99999)])),
('is_published', models.BooleanField(default=True)),
('price', models.FloatField()),
('bedrooms', models.FloatField()),
('bathrooms', models.FloatField()),
('garage', models.IntegerField(default=1)),
('sqft', models.FloatField()),
('lot_size', models.FloatField()),
('list_date', models.DateTimeField()),
('realtor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='realtors.Realtor')),
],
),
migrations.CreateModel(
name='ListingPhoto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(default=None, upload_to='')),
('listing', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='listings.Listing')),
],
),
]
|
[
"[email protected]"
] | |
96a1a29185bb89e216a1143bf81d52bf7b007e15
|
07f6d1a07e9c1b078e9175df2050ce501b88d8c1
|
/Text/Count_Vowels.py
|
5548e24f7173d7b163ef4f1028e132429edd4db7
|
[
"MIT"
] |
permissive
|
023Sparrow/Projects
|
06413bd24dbba60fb1fd695ad02c92994a849320
|
b14fc9ac8fcf760396dbdff2f918910b5dc96742
|
refs/heads/master
| 2020-03-16T22:59:35.699661 | 2018-11-27T07:18:16 | 2018-11-27T07:18:16 | 130,497,117 | 0 | 0 | null | 2018-04-21T17:33:23 | 2018-04-21T17:33:23 | null |
UTF-8
|
Python
| false | false | 568 |
py
|
def count(w,s):
countnum = 0
for i in s:
if i == w:
countnum += 1
return countnum
def countvowels(s):
a = count('a',s)
e = count('e',s)
i = count('i',s)
o = count('o',s)
u = count('u',s)
return a+e+i+o+u
testtext = 'vnaofnoaijnavavijfvapvnpofj'
print('''there are {} vowels in vnaofnoaijnavavijfvapvnpofj.
{} a
{} e
{} i
{} o
{} u'''.format(countvowels(testtext),count('a',testtext),count('e',testtext),count('i',testtext),count('o',testtext),count('u',testtext)))
|
[
"[email protected]"
] | |
724bbe70f98b91f343a99b80f80d0d4dce28efe1
|
8cc53c1fb137cc8508ff2ec7b570b8fa8eabba14
|
/LAMARCK_ML/architectures/losses/Loss_pb2.py
|
168ff9a80d4171d4e04b0217803f53307ee929e7
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
JonasDHomburg/LAMARCK_ML
|
d1611624f8397ca267e87def5531a8854c62057b
|
0e372c908ff59effc6fd68e6477d04c4d89e6c26
|
refs/heads/master
| 2021-08-16T06:41:19.149852 | 2020-04-12T16:36:14 | 2020-04-12T16:36:14 | 209,362,976 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | true | 5,132 |
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: LAMARCK_ML/architectures/losses/Loss.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from LAMARCK_ML.data_util import Attribute_pb2 as LAMARCK__ML_dot_data__util_dot_Attribute__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='LAMARCK_ML/architectures/losses/Loss.proto',
package='LAMARCK_ML',
syntax='proto3',
serialized_pb=_b('\n*LAMARCK_ML/architectures/losses/Loss.proto\x12\nLAMARCK_ML\x1a$LAMARCK_ML/data_util/Attribute.proto\"\x84\x01\n\tLossProto\x12\x10\n\x08\x63ls_name\x18\x01 \x01(\t\x12\x0f\n\x07id_name\x18\x02 \x01(\t\x12*\n\treduction\x18\x05 \x01(\x0e\x32\x17.LAMARCK_ML.ReduceProto\x12(\n\x04\x61ttr\x18\n \x03(\x0b\x32\x1a.LAMARCK_ML.AttributeProto*^\n\x0bReduceProto\x12\x08\n\x04MEAN\x10\x00\x12\x0b\n\x07MINIMUM\x10\x01\x12\x0b\n\x07MAXIMUM\x10\x02\x12\x07\n\x03SUM\x10\x03\x12\x0b\n\x07PRODUCT\x10\x04\x12\x0c\n\x08VARIANCE\x10\x05\x12\x07\n\x03STD\x10\x06\x62\x06proto3')
,
dependencies=[LAMARCK__ML_dot_data__util_dot_Attribute__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_REDUCEPROTO = _descriptor.EnumDescriptor(
name='ReduceProto',
full_name='LAMARCK_ML.ReduceProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MEAN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MINIMUM', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MAXIMUM', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUM', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRODUCT', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VARIANCE', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STD', index=6, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=231,
serialized_end=325,
)
_sym_db.RegisterEnumDescriptor(_REDUCEPROTO)
ReduceProto = enum_type_wrapper.EnumTypeWrapper(_REDUCEPROTO)
MEAN = 0
MINIMUM = 1
MAXIMUM = 2
SUM = 3
PRODUCT = 4
VARIANCE = 5
STD = 6
_LOSSPROTO = _descriptor.Descriptor(
name='LossProto',
full_name='LAMARCK_ML.LossProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cls_name', full_name='LAMARCK_ML.LossProto.cls_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id_name', full_name='LAMARCK_ML.LossProto.id_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reduction', full_name='LAMARCK_ML.LossProto.reduction', index=2,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attr', full_name='LAMARCK_ML.LossProto.attr', index=3,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=97,
serialized_end=229,
)
_LOSSPROTO.fields_by_name['reduction'].enum_type = _REDUCEPROTO
_LOSSPROTO.fields_by_name['attr'].message_type = LAMARCK__ML_dot_data__util_dot_Attribute__pb2._ATTRIBUTEPROTO
DESCRIPTOR.message_types_by_name['LossProto'] = _LOSSPROTO
DESCRIPTOR.enum_types_by_name['ReduceProto'] = _REDUCEPROTO
LossProto = _reflection.GeneratedProtocolMessageType('LossProto', (_message.Message,), dict(
DESCRIPTOR = _LOSSPROTO,
__module__ = 'LAMARCK_ML.architectures.losses.Loss_pb2'
# @@protoc_insertion_point(class_scope:LAMARCK_ML.LossProto)
))
_sym_db.RegisterMessage(LossProto)
# @@protoc_insertion_point(module_scope)
|
[
"[email protected]"
] | |
59c09463d5b70bbc47faab715f64998ec8fc7423
|
167ce2917fd646749629fd05223d739961841a20
|
/mysite/mysite/urls.py
|
d2d1a106d8d77149928b8bbc7c25f1947082f3d4
|
[] |
no_license
|
lemon-l/Python-beginner
|
70b8e2604826d2d7c767578345ac5e3276857977
|
5e1ebfc5c4f15a81ee7b33a2336556f6b32f61c8
|
refs/heads/master
| 2023-02-16T03:22:36.535084 | 2021-01-19T09:32:21 | 2021-01-19T09:32:21 | 235,269,899 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 800 |
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('polls/',include('polls.urls')),
path('control/', admin.site.urls),
]
|
[
"[email protected]"
] | |
bf604c5c88de4b1652ed6c32594f61c0e84a082f
|
b6a59c78b4143441077f9ce81c9a6951687f9103
|
/quiz/common/templatetags/common_tags.py
|
c22c495f3760396d2cbf01c3943b9cb2026abee6
|
[] |
no_license
|
EkaterinaEIvanova/quiz
|
7389bd26eb891ba5a7033b91698321cbba7d2d7d
|
6f93a5d6e604f127be0d29e8eebbb07c10eb9d47
|
refs/heads/master
| 2023-03-22T00:54:27.100204 | 2021-03-10T07:35:08 | 2021-03-10T07:35:08 | 346,270,553 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 179 |
py
|
from django import template
register = template.Library()
@register.simple_tag()
def get_name_or_email(user):
name = user.name if user.name else user.email
return name
|
[
"[email protected]"
] | |
348eb8ac220b6481e2b57cbfeb6ccf7416321623
|
5478a6265b2aace785b2eb805cbddcad4c07fb61
|
/src/server/app/main/models/SubmissionOutputsModel.py
|
74c7196d7a3ae3dbb6f73cf6ace353e338880fab
|
[
"MIT"
] |
permissive
|
albseb511/execode
|
352b88a47df7b000a735b6471b7c285e9355c18b
|
4dfb373ba1146ad41f7389c4b8d1a230cac5e3e1
|
refs/heads/master
| 2022-04-26T11:28:09.173801 | 2020-04-25T13:39:56 | 2020-04-25T13:39:56 | 258,724,856 | 0 | 0 |
MIT
| 2020-04-25T08:39:38 | 2020-04-25T08:39:38 | null |
UTF-8
|
Python
| false | false | 846 |
py
|
from .. import db
import datetime
class SubmissionOutputs(db.Model):
"""
[summary]
Args:
SubmissionOutputsMixin ([type]): [description]
db ([type]): [description]
"""
__tablename__ = "submission_outputs"
id = db.Column(db.Integer, primary_key=True)
output_file = db.Column(db.String(250), nullable=False)
time_taken = db.Column(db.Integer, nullable=False)
memory_taken = db.Column(db.Integer, nullable=False)
passed = db.Column(db.Boolean, nullable=False)
created_at = db.Column(db.DateTime(timezone=False),
nullable=False, default=datetime.datetime.now())
submission_id = db.Column(db.Integer, db.ForeignKey(
'submissions.id'), nullable=False)
test_case_id = db.Column(db.Integer, db.ForeignKey(
'test_cases.id'), nullable=False)
|
[
"[email protected]"
] | |
8c4bc2f8647439b9567f65c61e77308b8808c395
|
63b0fed007d152fe5e96640b844081c07ca20a11
|
/ABC/ABC200~ABC299/ABC275/a.py
|
8e0374be934c1e0a79209fb5d3a1594e525966c5
|
[] |
no_license
|
Nikkuniku/AtcoderProgramming
|
8ff54541c8e65d0c93ce42f3a98aec061adf2f05
|
fbaf7b40084c52e35c803b6b03346f2a06fb5367
|
refs/heads/master
| 2023-08-21T10:20:43.520468 | 2023-08-12T09:53:07 | 2023-08-12T09:53:07 | 254,373,698 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 100 |
py
|
n = int(input())
h = list(map(int, input().split()))
maxh = max(h)
ans = h.index(maxh)+1
print(ans)
|
[
"[email protected]"
] | |
d4877edd8d5a2d480e47bd50800d5ab3ebf850c1
|
411e5de8629d6449ff9aad2eeb8bb1dbd5977768
|
/AlgoExpert/array/largestRange/1.py
|
821c57749e45fe5a0fdc1c4e4747e82157bdece3
|
[
"MIT"
] |
permissive
|
Muzque/Leetcode
|
cd22a8f5a17d9bdad48f8e2e4dba84051e2fb92b
|
2c37b4426b7e8bfc1cd2a807240b0afab2051d03
|
refs/heads/master
| 2022-06-01T20:40:28.019107 | 2022-04-01T15:38:16 | 2022-04-01T15:39:24 | 129,880,002 | 1 | 1 |
MIT
| 2022-04-01T15:39:25 | 2018-04-17T09:28:02 |
Python
|
UTF-8
|
Python
| false | false | 395 |
py
|
def largestRange(array):
array.sort()
h = len(array)
mid = int(h/2) if h % 2 == 0 else int(h/2)+1
left, right = array[0], array[h-1]
for i in range(1, mid):
j = h - i - 1
if array[i] not in (array[i-1], array[i - 1]+1):
left = array[i]
if array[j] not in (array[j+1], array[j + 1]-1):
right = array[j]
return [left, right]
|
[
"[email protected]"
] | |
df35d4e2bc4e83da4aa1b6939db8d9e229e0bd70
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/travelport/models/type_agency_profile_level_4.py
|
5ee1ce876f63b649381647bc034c48d77dea4ecb
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 |
Python
|
UTF-8
|
Python
| false | false | 318 |
py
|
from __future__ import annotations
from enum import Enum
__NAMESPACE__ = "http://www.travelport.com/schema/common_v37_0"
class TypeAgencyProfileLevel4(Enum):
"""
Profile levels in the Agency Hierarchy.
"""
AGENCY = "Agency"
BRANCH = "Branch"
BRANCH_GROUP = "BranchGroup"
AGENT = "Agent"
|
[
"[email protected]"
] | |
af9c85a29106aea32c52488a1f5cbc2dc870b522
|
a6652818efd2371f797011ca1c25be9f1d2d8db8
|
/hello/migrations/0003_remove_usercomments_img_caption.py
|
454fa863237b41b37c3973dda01f00796f9b9a12
|
[] |
no_license
|
Sulaiha-Shameena-teah/sara-bday
|
4ccef78f3c95bdf13b3cbb4a2b58b1c8ebbdfe67
|
6bde5f48e55090461cbbfceb1552680a0cdb71bb
|
refs/heads/main
| 2023-04-21T06:48:48.302078 | 2021-05-27T11:24:33 | 2021-05-27T11:24:33 | 369,998,182 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 331 |
py
|
# Generated by Django 3.1.7 on 2021-04-08 17:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hello', '0002_usercomments'),
]
operations = [
migrations.RemoveField(
model_name='usercomments',
name='img_caption',
),
]
|
[
"https://github.com/Sulaiha-Shameena-teah"
] |
https://github.com/Sulaiha-Shameena-teah
|
1bca72762d89325a8b67cb7ba5698f05032c6c51
|
1868fa53882265e74019893897afa5667c2afd6c
|
/models/stacking_v1.py
|
2aaf616a7a7e4e4e64f10e22e1a3bd733eea9568
|
[] |
no_license
|
jinbaoziyl/CCF-Risk-Enterprises-Fund-Rasing
|
5d8e3a0204db6deb97e6d4f68a19c268a28f1f8c
|
fb93ea94f471440848b5419001871238eefb9b79
|
refs/heads/main
| 2023-01-07T07:56:05.886675 | 2020-11-05T09:48:45 | 2020-11-05T09:48:45 | 306,074,133 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,037 |
py
|
# -*- coding: utf-8 -*-
from heamy.dataset import Dataset
from heamy.estimator import Regressor, Classifier
from heamy.pipeline import ModelsPipeline
import pandas as pd
import xgboost as xgb
import datetime
from sklearn.metrics import roc_auc_score
import lightgbm as lgb
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
import numpy as np
import pickle
import time
import warnings
warnings.filterwarnings("ignore")
train_file = '../pre_data/training.pkl'
data_set = pickle.load(open(train_file,'rb'))
data_set.fillna(0.,inplace=True)
label = data_set['label'].values # ndarray
feature_list = list(data_set.columns)
feature_list.remove('id')
feature_list.remove('label')
training = data_set[feature_list].values
test_data = pickle.load(open('../pre_data/eval.pkl','rb'))
test_data.fillna(0.,inplace=True)
sub_df = test_data['id'].copy()
del test_data['id']
test_data = test_data.values
def xgb_feature(X_train, y_train, X_test, y_test=None):
# 模型参数
params = {'booster': 'gbtree',
'objective':'reg:linear',
'eval_metric' : 'rmse',
'eta': 0.02,
'max_depth': 5, # 4 3
'colsample_bytree': 0.7,#0.8
'subsample': 0.7,
'min_child_weight': 1, # 2 3
'seed': 1111,
# 'silent': True,
}
dtrain = xgb.DMatrix(X_train, label=y_train)
dvali = xgb.DMatrix(X_test)
model = xgb.train(params, dtrain, num_boost_round=800)
predict = model.predict(dvali)
minmin = min(predict)
maxmax = max(predict)
vfunc = np.vectorize(lambda x:(x-minmin)/(maxmax-minmin))
return vfunc(predict)
def xgb_feature2(X_train, y_train, X_test, y_test=None):
# 模型参数
params = {'booster': 'gbtree',
'objective':'reg:linear',
'eval_metric' : 'rmse',
'eta': 0.015,
'max_depth': 5, # 4 3
'colsample_bytree': 0.7,#0.8
'subsample': 0.7,
'min_child_weight': 1, # 2 3
'seed': 11,
# 'silent': True,
}
dtrain = xgb.DMatrix(X_train, label=y_train)
dvali = xgb.DMatrix(X_test)
model = xgb.train(params, dtrain, num_boost_round=1200)
predict = model.predict(dvali)
minmin = min(predict)
maxmax = max(predict)
vfunc = np.vectorize(lambda x:(x-minmin)/(maxmax-minmin))
return vfunc(predict)
def xgb_feature3(X_train, y_train, X_test, y_test=None):
# 模型参数
params = {'booster': 'gbtree',
'objective':'reg:linear',
'eval_metric' : 'rmse',
'eta': 0.01,
'max_depth': 5, # 4 3
'colsample_bytree': 0.7,#0.8
'subsample': 0.7,
'min_child_weight': 1, # 2 3
'seed': 1,
# 'silent': True,
}
dtrain = xgb.DMatrix(X_train, label=y_train)
dvali = xgb.DMatrix(X_test)
model = xgb.train(params, dtrain, num_boost_round=2000)
predict = model.predict(dvali)
minmin = min(predict)
maxmax = max(predict)
vfunc = np.vectorize(lambda x:(x-minmin)/(maxmax-minmin))
return vfunc(predict)
def et_model(X_train, y_train, X_test, y_test=None):
model = ExtraTreesClassifier(max_features = 'log2', n_estimators = 1000 , n_jobs = -1).fit(X_train,y_train)
return model.predict_proba(X_test)[:,1]
def gbdt_model(X_train, y_train, X_test, y_test=None):
model = GradientBoostingClassifier(learning_rate = 0.02, max_features = 0.7, n_estimators = 700 , max_depth = 5).fit(X_train,y_train)
predict = model.predict_proba(X_test)[:,1]
minmin = min(predict)
maxmax = max(predict)
vfunc = np.vectorize(lambda x:(x-minmin)/(maxmax-minmin))
return vfunc(predict)
def logistic_model(X_train, y_train, X_test, y_test=None):
model = LogisticRegression(penalty = 'l2').fit(X_train,y_train)
return model.predict_proba(X_test)[:,1]
def lgb_feature(X_train, y_train, X_test, y_test=None):
lgb_train = lgb.Dataset(X_train, y_train)
lgb_test = lgb.Dataset(X_test)
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric':'mse',
'num_leaves': 25,
'learning_rate': 0.01,
'feature_fraction': 0.7,
'bagging_fraction': 0.7,
'bagging_freq': 5,
'min_data_in_leaf':5,
'max_bin':200,
'verbose': 0,
}
gbm = lgb.train(params,
lgb_train,
num_boost_round=2000)
predict = gbm.predict(X_test)
minmin = min(predict)
maxmax = max(predict)
vfunc = np.vectorize(lambda x:(x-minmin)/(maxmax-minmin))
return vfunc(predict)
lgb_dataset = Dataset(training,label,test_data,use_cache=False)
xgb_dataset = Dataset(X_train=training,y_train=label,X_test=test_data,y_test=None,use_cache=False)
#heamy
model_xgb = Regressor(dataset=xgb_dataset, estimator=xgb_feature,name='xgb',use_cache=False)
model_xgb2 = Regressor(dataset=xgb_dataset, estimator=xgb_feature2,name='xgb2',use_cache=False)
model_xgb3 = Regressor(dataset=xgb_dataset, estimator=xgb_feature3,name='xgb3',use_cache=False)
model_lgb = Regressor(dataset=lgb_dataset, estimator=lgb_feature,name='lgb',use_cache=False)
model_gbdt = Regressor(dataset=xgb_dataset, estimator=gbdt_model,name='gbdt',use_cache=False)
pipeline = ModelsPipeline(model_xgb,model_xgb2,model_xgb3,model_lgb,model_gbdt)
stack_ds = pipeline.stack(k=5, seed=111, add_diff=False, full_test=True)
stacker = Regressor(dataset=stack_ds, estimator=LinearRegression,parameters={'fit_intercept': False})
predict_result = stacker.predict()
print(predict_result)
pcol =pd.DataFrame(list(predict_result))
sub_df = pd.concat([sub_df, pcol], axis=1)
sub_df.to_csv('submission.csv',sep=',',header=False,index=False,encoding='utf8')
|
[
"[email protected]"
] | |
e8eefc2a5b75f134fcd23181b0c301b932082508
|
14f9b7a41bd3b93d820ca8f0a0507c673e2902db
|
/game.py
|
0fc4e4cff5ac7fc1a52d6f53368cf33c3c21bb46
|
[] |
no_license
|
adamsudol36/Battleship-game
|
898e23ac7411b3aefd6cb4057f338cdbac9c9805
|
831817d2b416ff06d5952e144b0a29a405d030cb
|
refs/heads/master
| 2023-04-22T02:13:27.706301 | 2021-05-10T20:59:12 | 2021-05-10T20:59:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,334 |
py
|
from matrix_generation import (
show_actual_board,
make_player_matrix,
generate_computer_matrix,
get_basic_info
)
from game_functions import (
show_actual_state,
better_bot_shoot,
player_shoot
)
from time import sleep
def main():
'''
Whole game function.
Gets info from player. Creates bot and player fleets.
Conducts whole game process and when needed, prints
messages to player.
'''
introduction()
dim, size_list = get_basic_info()
Bot_Matrix, Bot_Fleet = generate_computer_matrix(size_list, dim)
Player_Matrix, Player_Fleet = make_player_matrix(size_list, dim)
print('Grajmy!')
show_actual_state(Player_Matrix, Bot_Matrix, dim)
shot_list = []
while Player_Fleet.if_fleet_is() and Bot_Fleet.if_fleet_is():
result_b, result_p = True, True
while result_p:
Bot_Matrix, Bot_Fleet, result_p = player_shoot(Bot_Matrix,
Bot_Fleet, dim)
if not Bot_Fleet.if_fleet_is():
show_actual_state(Player_Matrix, Bot_Matrix, dim)
print('Wygrałeś!')
print('Koniec gry :)')
return
else:
print(show_actual_board(Bot_Matrix.get_matrix(), dim, True))
print('Gramy dalej')
else:
show_actual_state(Player_Matrix, Bot_Matrix, dim)
while result_b:
(Player_Matrix, result_b,
Player_Fleet, shot_list) = better_bot_shoot(Player_Matrix,
Player_Fleet,
shot_list, dim)
if not Player_Fleet.if_fleet_is():
show_actual_state(Player_Matrix, Bot_Matrix, dim)
print('Przegrałeś!')
print('Koniec gry :)')
return
else:
show_actual_state(Player_Matrix, Bot_Matrix, dim)
else:
show_actual_board(Player_Matrix.get_matrix(), dim)
print('Gramy dalej')
def introduction():
'''
Makes introduction in polish
'''
print('Dzień dobry!')
print('Zaczynasz grę w statki.')
print('Na początek kilka reguł.')
print('Statki nie mogą leżeć obok siebie ani się przecinać.')
print('Czytaj, nie mogą się nakładać ani stykać rogami czy bokami.')
print('Czytaj uważnie co gra do Ciebie pisze.')
print('Ilość statków i wielkość planszy będzie zależała od Twojego wyboru')
print('Będziesz miał do dyspozycji statek o największej ilości kadłubów')
print('o jeden mniej statków o ilości kadłubów mniejszej o jeden etc.')
print('Jeżeli generowanie planszy będzie trwało zbyt długo,')
print('zresetuj grę.')
print('W odpowiednich momentach będzie wyświetlana Twoja plansza')
print('bądź plansza przeciwnika albo obie:')
print('Twoja po lewej, bota po prawej stronie.')
print('A oto oznaczenia:')
print('□ - to oznacza, że w tym miejscu jest Twój statek')
print('⊠ - to oznacza, że statek został trafiony.')
print('○ - to oznacza, że w tym miejscu zostało spudłowane')
print('Miłej gry życzę :)')
sleep(20.)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
ac1791beab3601ae73fcf4a4b76e4d448860528f
|
80038a1cc4efc01de7cc4c5375af4628f84798a8
|
/SpangAP/pizzaproject/stores/urls.py
|
09682bd54f39bb9668a19944c05f19a0dfc8e49b
|
[] |
no_license
|
PeteSpangler/SpangApi
|
8e3477a03a41a435f77afd0293f00623723cfcb2
|
b38a237387ff2ebc014e9fa4161f60b798fd197c
|
refs/heads/master
| 2023-03-20T10:21:02.399422 | 2021-03-21T01:15:14 | 2021-03-21T01:15:14 | 345,256,647 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 551 |
py
|
from django.urls import path
from . import views
urlpatterns = [ path('', views.PizzeriaListAPIView.as_view(), name='pizzeria_list'),
path('<int:id>/', views.PizzeriaRetrieveAPIView.as_view(), name='pizzeria_detail'),
path('create/', views.PizzeriaCreateAPIView.as_view(), name='pizzeria_create'),
path('update/<int:id>/', views.PizzeriaRetrieveUpdateAPIView.as_view(), name='pizzeria_update'),
path('delete/<int:id>/', views.PizzeriaDestroyAPIView.as_view(), name='pizzeria_delete'), ]
|
[
"[email protected]"
] | |
6beae38fe42d7b43c779382d774f761cdd035349
|
39a64a85dfd9e2f3805061c1126b8e96c75a7643
|
/step_seven.py
|
e0adb7ce4d46a903eed8c50bb4f65007fd197247
|
[] |
no_license
|
juliapochynok/Rubik-sCube_withLA
|
dde1dad5f786341e32670b48cc3ba80db181f672
|
57b9d187a00142bfbd0dd149c25f94316be3af22
|
refs/heads/master
| 2022-10-29T23:02:23.828939 | 2020-06-17T19:46:04 | 2020-06-17T19:46:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,072 |
py
|
from solve import find_cubie
from step_six import is_fish, is_H, find_unsolved_edges
def solve_down_edges(cube, result_steps):
lst = find_unsolved_edges(cube)
if len(lst) == 0:
return
if is_H(cube, lst):
H_method(cube, lst, result_steps)
elif is_fish(cube, lst):
fish_method(cube, lst, result_steps)
def fish_method(cube, lst, result_steps):
pos1 = find_cubie(cube, lst[0])
pos2 = find_cubie(cube, lst[1])
sum_el = cube.ideal_cube[pos1[0]].face[pos1[1]][pos1[2]] + cube.ideal_cube[pos2[0]].face[pos2[1]][pos2[2]]
fish_method_steps = {92 : ["b", "l", "r", "d", "U", "f", "f", "d", "U", "d", "U", "b", "d", "d", "B", "D", "u", "D", "u", "f", "f", "D", "u", "R", "d", "d", "L", "B"], # 47+45
87: ["r", "b", "f", "d", "U", "l", "l", "d", "U", "d", "U", "r", "d", "d", "R", "D", "u", "D", "u", "l", "l", "D", "u", "F", "d", "d", "B", "R"], # 42+45
86: ["f", "r", "l", "d", "U", "b", "b", "d", "U", "d", "U", "f", "d", "d", "F", "D", "u", "D", "u", "b", "b", "D", "u", "L", "d", "d", "R", "F"], # 42+44
91: ["l", "f", "b", "d", "U", "r", "r", "d", "U", "d", "U", "l", "d", "d", "L", "D", "u", "D", "u", "r", "r", "D", "u", "B", "d", "d", "F", "L"]} # 44+47
steps = fish_method_steps[sum_el]
for move in steps:
cube.rotate(move)
result_steps.append(move)
def H_method(cube, lst, result_steps):
pos1 = find_cubie(cube, lst[0])
pos2 = find_cubie(cube, lst[1])
maxel = max(cube.ideal_cube[pos1[0]].face[pos1[1]][pos1[2]], cube.ideal_cube[pos2[0]].face[pos2[1]][pos2[2]])
fish_method_steps = {45 : ["r", "d", "U", "f", "f", "d", "U", "d", "U", "b", "d", "d", "B", "D", "u", "D", "u", "f", "f", "D", "u", "R", "d", "d"], # 44+45
47 : ["f", "d", "U", "l", "l", "d", "U", "d", "U", "r", "d", "d", "R", "D", "u", "D", "u", "l", "l", "D", "u", "F", "d", "d"]} # 42+47
steps = fish_method_steps[maxel]
for move in steps:
cube.rotate(move)
result_steps.append(move)
|
[
"[email protected]"
] | |
7e88584d8c9462e3c4b2497010a721f9e983291c
|
8ef898365843bd96b17967ba374f15d9bcff8fe7
|
/practica_PI/settings.py
|
a9f8511014d5ec914e9aeb9bd07c4cbe6dfce554
|
[] |
no_license
|
hbarcelos/piudc
|
0dc6d309a9a0a97671664407e2db2fec33709a0a
|
7c01fe5ad9e726ef9b00dd04a7403bf85e727ba7
|
refs/heads/master
| 2020-12-25T22:07:06.096134 | 2013-05-31T15:06:31 | 2013-05-31T15:06:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,789 |
py
|
import os
import django
# Django settings for practica_PI project.
# Because actually hard-coding absolute paths into your code would be bad...
PROJECT_DIR = os.path.dirname(__file__)
BASE_DIR = os.path.dirname(PROJECT_DIR)
APP_DIR = os.path.join(BASE_DIR, 'images')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'db/development.db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Madrid'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'es-ES'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
# STATIC_URL = '/static/'
STATIC_URL = '/assets/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, "static"),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'gg()!smtj9%0=*$j7uipx$=e&g@9kdu)vv3$m%dup%n4org0_b'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'practica_PI.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'practica_PI.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(APP_DIR, 'static', 'templates'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'images',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"[email protected]"
] | |
ad6bd82e49c1f8590dc46011936d01613bf6168c
|
1aa36d549995a2ec5ef214037d36de01563ea94b
|
/appointments/booking_request.py
|
548bff0a54528fefbfd1fd0323759e4e44af0073
|
[] |
no_license
|
vinithegit/BookingMeetings
|
9291dc95b5d9feb8944ab07aca5cf6060434874d
|
52985e921c793f9ed6c4fd699a315718a7e6e869
|
refs/heads/master
| 2020-03-09T13:54:40.568619 | 2018-04-09T19:25:37 | 2018-04-09T19:25:37 | 128,822,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 709 |
py
|
from datetime import datetime
class BookingRequest:
def __init__(self, booking_request_date, booking_request_time, booking_request_employee_id,
meeting_date, meeting_start_time, meeting_duration):
self.booking_request_date_time = datetime.strptime(booking_request_date + ' ' +
booking_request_time, '%Y-%m-%d %H:%M:%S')
self.booking_request_employee_id = booking_request_employee_id
self.meeting_start_date_time = datetime.strptime(meeting_date + ' ' + meeting_start_time,
'%Y-%m-%d %H:%M')
self.meeting_duration = int(meeting_duration)
|
[
"[email protected]"
] | |
a92eb8b71c3f2252f56375b6717c1b22b1a03928
|
aafce2cfe6a3edee05ff27dc7be7fcdb37c85e73
|
/Basic/ModuleTest.py
|
80764030dc8b142c7e2bd9186cd2c047c97364f7
|
[] |
no_license
|
kvijaya-datascientist/Python
|
cfea0fa64f7fc83c15c2fecb846b368471fad676
|
efef999b2cd15813484f74bcb0c2a328eda3aba0
|
refs/heads/master
| 2023-06-24T03:47:16.582221 | 2023-06-14T07:20:59 | 2023-06-14T07:20:59 | 247,455,811 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 283 |
py
|
"""
pip = pip install packages
pip is a tool used to install python modules
NOTE : install required module in python terminal
EX : pip install calendar , pip install matplotlib
"""
import math
print(math.sqrt(16))
print(math.pow(2,5))
import calendar
print(calendar.month(2021,6))
|
[
"[email protected]"
] | |
748c9a45f6de3823112f3b66a56ef8e9d71ef7c3
|
99c616c8262b3e95ca705420b1b93185151dae8b
|
/job_listing/job_listing/jobs/forms.py
|
d7e59a23e2f88721bcda55dc4e63e9492b9068fa
|
[
"MIT"
] |
permissive
|
mtaziz/listing
|
23edfd4274c2710a11d6743fab44c381a5fc0c8e
|
657e1dc9f2c97ddf3058a2d24155306ddf867387
|
refs/heads/master
| 2020-03-22T17:46:49.210455 | 2018-04-18T13:18:09 | 2018-04-18T13:18:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 282 |
py
|
from django.forms import ModelForm
from django import forms
from .models import JobPosting
class NewJobListing(ModelForm):
title = forms.CharField()
body = forms.CharField(widget=forms.Textarea)
class Meta:
model = JobPosting
fields = ['title', 'body']
|
[
"[email protected]"
] | |
ac1742c4a595eeb68f9fec77a4fa18f37ea7ef11
|
db745606ab7e9df7f6cd85eaff3ce100578eacfa
|
/LeetCode/lc_241_DifferentWaysToAddParentheses.py
|
1c021f675c46b3abdac98c52e442224c1ab67f43
|
[] |
no_license
|
testcoder11/Algorithms
|
2ffa3b10e46856d0cd0dc5949342ba778cbb3d16
|
497d7faf821a3609611a907cd4cab0422ae02c16
|
refs/heads/master
| 2020-03-15T03:54:29.330108 | 2017-12-05T16:01:25 | 2017-12-05T16:01:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,983 |
py
|
# -*- coding:utf-8 -*-
class TreeNode(object):
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def __str__(self):
print self.value
class Solution(object):
def dfs_for_building_operation_tree(self, start, end):
lst = []
# 利用数组range的上下限, 来控制递归的结束情况,
# 即如果start == end, 则循环只执行一次, 返回一个以start数值为根的节点
# 如果 start > end, 循环不执行, 直接返回空列表, (相当于递归结束)
for i in range(start, end + 1):
# 分别计算左右子树可能的根
left_child_candidates = self.dfs_for_building_operation_tree(start, i - 1)
right_child_candidates = self.dfs_for_building_operation_tree(i + 1, end)
# 如果左或右子树为空, 则手动向其中插入一个None值, 否则后续的双重for循环无法执行
if not left_child_candidates:
left_child_candidates.append(None)
if not right_child_candidates:
right_child_candidates.append(None)
# 通过左右子树不同的组合, 包括为None的情况, 来生成不同的root, 并将结果存入lst
for left_child in left_child_candidates:
for right_child in right_child_candidates:
root = TreeNode(i)
root.left = left_child
root.right = right_child
lst.append(root)
return lst
def extract_operators_numbers(self, s):
i, j = 0, 0
operators = []
numbers = []
while True:
if j == len(s):
numbers.append(int(s[i:j]))
break
if s[j] in ('+', '-', '*'):
numbers.append(int(s[i:j]))
operators.append(s[j])
i = j + 1
j += 1
return numbers, operators
def dfs_for_calculating_value(self, root):
from operator import add, mul, sub
numbers = self.numbers
operators = self.operators
op_mapping = {
'+': add,
'-': sub,
'*': mul,
}
op_function = op_mapping[operators[root.value]]
if root.left is None:
valueL = numbers[root.value]
else:
valueL = self.dfs_for_calculating_value(root.left)
if root.right is None:
valueR = numbers[root.value + 1]
else:
valueR = self.dfs_for_calculating_value(root.right)
result = op_function(valueL, valueR)
return result
def calculate(self, roots):
results = []
for root in roots:
results.append(self.dfs_for_calculating_value(root))
return results
def diffWaysToCompute(self, input):
"""
:type n: int
:rtype: List[TreeNode]
"""
self.numbers, self.operators = self.extract_operators_numbers(input)
numbers = self.numbers
operators = self.operators
if not operators and numbers:
the_only_number = numbers[0]
return [the_only_number]
# 以下两步, 我们可以合并成一步(即在构建树的同时返回结果), 此处分为两步, 方便理解
'''
以 "2*3-4*5" 为例
* * * - *
\ / / / \ \
* - * * * -
/ / \ \
- * - *
'''
# 构建树型结构, 一次dfs
n = len(operators)
roots = self.dfs_for_building_operation_tree(0, n - 1)
# 根据不同的树型, 来分别计算最终的结果, 又一次dfs
results = self.calculate(roots)
return results
s = "2*3-4*5"
# s = "2-1-1"
# s = "0"
print Solution().diffWaysToCompute(s)
|
[
"[email protected]"
] | |
3215e7794a5d89a0eaaf39bf83143b223ab9638b
|
b2e2adc73147a6ae6fa642bc9448923888a713ca
|
/python/whlie.py
|
c9ff6911ee7f92d133df9ef4ad3c9d4a87596901
|
[] |
no_license
|
cypggs/HeadFirstPython
|
100d7ad52fa7c93baa082bbf4812ecdb064f7722
|
703b85c8482eb24fcef2afec39d20001b94d09ff
|
refs/heads/master
| 2020-03-17T23:58:39.975841 | 2018-07-26T15:52:15 | 2018-07-26T15:52:15 | 134,071,579 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 80 |
py
|
#!/usr/bin/env python
# coding=utf-8
a = 0
while a < 10:
a += 1
print a
|
[
"[email protected]"
] | |
8d4c6fcfac3f511aafca5a4bbb3875f5d7e09ee6
|
039385db928314c55b03e90549ef40010529ef5e
|
/Shop/urls.py
|
8b23e81e5b39ab9e8e67903b6f297982cd4d4811
|
[] |
no_license
|
DragonStuff/Shop-Django
|
5334692aa23512c3ed8350abe495220017fc3be3
|
0b720f001af5bd393cc103d7f9cc3563304a8720
|
refs/heads/master
| 2021-06-13T07:48:43.299176 | 2019-07-24T23:21:01 | 2019-07-24T23:21:01 | 150,483,530 | 4 | 3 | null | 2021-03-19T22:08:13 | 2018-09-26T20:04:53 |
Python
|
UTF-8
|
Python
| false | false | 818 |
py
|
"""dj110 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('product_manager.urls')),
]
|
[
"[email protected]"
] | |
d17e4401738223e0d38787c397ce0ebfd2deeac6
|
39c0b85e7bf37bf11341ce8d06a8f402e5ca08cc
|
/src/pymortests/iosys_add_sub_mul.py
|
0f0d8633880728e279d69bfb3e59e714554492d2
|
[
"BSD-2-Clause"
] |
permissive
|
josefinez/pymor
|
a28e9ed24f0234f3ca54a09e02cd72381d08ad31
|
c3304e6c9250495f12796ed6430f431f48cb3ee1
|
refs/heads/main
| 2023-07-01T00:04:31.961366 | 2021-08-09T07:38:52 | 2021-08-09T07:38:52 | 368,589,801 | 0 | 0 |
NOASSERTION
| 2021-05-18T16:00:17 | 2021-05-18T16:00:16 | null |
UTF-8
|
Python
| false | false | 3,709 |
py
|
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2021 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
import pytest
from pymor.models.iosys import LTIModel, TransferFunction, SecondOrderModel, LinearDelayModel
from pymor.operators.numpy import NumpyMatrixOperator
type_list = [
'LTIModel',
'TransferFunction',
'SecondOrderModel',
'LinearDelayModel',
]
def get_model(name):
if name == 'LTIModel':
A = np.array([[-1]])
B = np.array([[1]])
C = np.array([[1]])
D = np.array([[1]])
return LTIModel.from_matrices(A, B, C, D)
elif name == 'TransferFunction':
H = lambda s: np.array([[1 / (s + 1)]])
dH = lambda s: np.array([[-1 / (s + 1)**2]])
return TransferFunction(1, 1, H, dH)
elif name == 'SecondOrderModel':
M = np.array([[1]])
E = np.array([[1]])
K = np.array([[1]])
B = np.array([[1]])
C = np.array([[1]])
D = np.array([[1]])
return SecondOrderModel.from_matrices(M, E, K, B, C, D=D)
elif name == 'LinearDelayModel':
A = NumpyMatrixOperator(np.array([[-1]]))
Ad = NumpyMatrixOperator(np.array([[-0.1]]))
B = NumpyMatrixOperator(np.array([[1]]))
C = NumpyMatrixOperator(np.array([[1]]))
D = NumpyMatrixOperator(np.array([[1]]))
tau = 1
return LinearDelayModel(A, (Ad,), (tau,), B, C, D)
def expected_return_type(m1, m2):
if type(m1) is TransferFunction or type(m2) is TransferFunction:
return TransferFunction
if type(m1) is type(m2):
return type(m1)
if type(m1) is LTIModel:
if type(m2) is SecondOrderModel:
return LTIModel
else: # LinearDelayModel
return LinearDelayModel
elif type(m1) is SecondOrderModel:
if type(m2) is LinearDelayModel:
return LinearDelayModel
else:
return expected_return_type(m2, m1)
else:
return expected_return_type(m2, m1)
@pytest.mark.parametrize('m1', type_list)
@pytest.mark.parametrize('m2', type_list)
def test_add(m1, m2):
m1 = get_model(m1)
m2 = get_model(m2)
m = m1 + m2
assert type(m) is expected_return_type(m1, m2)
assert np.allclose(m.eval_tf(0), m1.eval_tf(0) + m2.eval_tf(0))
assert np.allclose(m.eval_dtf(0), m1.eval_dtf(0) + m2.eval_dtf(0))
assert np.allclose(m.eval_tf(1j), m1.eval_tf(1j) + m2.eval_tf(1j))
assert np.allclose(m.eval_dtf(1j), m1.eval_dtf(1j) + m2.eval_dtf(1j))
@pytest.mark.parametrize('m1', type_list)
@pytest.mark.parametrize('m2', type_list)
def test_sub(m1, m2):
m1 = get_model(m1)
m2 = get_model(m2)
m = m1 - m2
assert type(m) is expected_return_type(m1, m2)
assert np.allclose(m.eval_tf(0), m1.eval_tf(0) - m2.eval_tf(0))
assert np.allclose(m.eval_dtf(0), m1.eval_dtf(0) - m2.eval_dtf(0))
assert np.allclose(m.eval_tf(1j), m1.eval_tf(1j) - m2.eval_tf(1j))
assert np.allclose(m.eval_dtf(1j), m1.eval_dtf(1j) - m2.eval_dtf(1j))
@pytest.mark.parametrize('m1', type_list)
@pytest.mark.parametrize('m2', type_list)
def test_mul(m1, m2):
m1 = get_model(m1)
m2 = get_model(m2)
m = m1 * m2
assert type(m) is expected_return_type(m1, m2)
assert np.allclose(m.eval_tf(0), m1.eval_tf(0) @ m2.eval_tf(0))
assert np.allclose(m.eval_dtf(0), m1.eval_dtf(0) @ m2.eval_tf(0) + m1.eval_tf(0) @ m2.eval_dtf(0))
assert np.allclose(m.eval_tf(1j), m1.eval_tf(1j) @ m2.eval_tf(1j))
assert np.allclose(m.eval_dtf(1j), m1.eval_dtf(1j) @ m2.eval_tf(1j) + m1.eval_tf(1j) @ m2.eval_dtf(1j))
|
[
"[email protected]"
] | |
6ffac5ea208ba2d6e273b1fdd1775d31f9762364
|
9eab77cb998e94ceb2b2d08738b05a98982505f1
|
/sentiment-analysis/pythoncodes/01-text-to-id.py
|
16b8e56535efcf07addf12250c40f7bd8382a0a7
|
[] |
no_license
|
behrouzmadahian/python
|
1584dd13cde8531e69bb6fab76f148dc3fc0da57
|
5d4dbde8d570623fe785e78a3e45cd05ea80aa08
|
refs/heads/master
| 2021-06-28T16:53:09.927450 | 2020-09-21T14:02:55 | 2020-09-21T14:02:55 | 156,713,696 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 827 |
py
|
import re
from nltk.corpus import stopwords
from nltk import word_tokenize
stop_words = stopwords.words('english')
def remove_stop_words(word_list, stopwords):
filtered_list = [w for w in word_list if not w in stopwords]
return filtered_list
# Removes punctuation, parentheses, question marks, etc., and leaves only alphanumeric characters
def clean_sentences(string):
strip_special_chars = re.compile("[^a-zA-Z0-9_]+")
string = string.lower().replace("<br />", " ")
return re.sub(strip_special_chars, "", string.lower())
def text_to_ids(text, vocab_list):
text_cleaned = clean_sentences(text)
word_list = word_tokenize(text_cleaned)
word_list = remove_stop_words(word_list, stop_words)
word_inds = [vocab_list.index(w) for w in word_list]
return word_inds
|
[
"[email protected]"
] | |
f0e0f44f0622cc09917ec205144aca3599559424
|
302734e285f1c77a1985f72da18d92810dda6151
|
/features/environment.py
|
f2db32270068fb2417c84bc95d4ec33200c490c4
|
[
"MIT"
] |
permissive
|
zhammer/morning-cd-playlists
|
924dd297188d86d019d7f176a9cf49ff33f31237
|
10426bc1435d4f34761d8782ad0599f40865596f
|
refs/heads/master
| 2022-12-11T02:29:12.972491 | 2019-02-07T00:35:18 | 2019-02-07T00:35:18 | 162,019,835 | 0 | 0 |
MIT
| 2022-12-10T03:33:27 | 2018-12-16T16:28:45 |
Python
|
UTF-8
|
Python
| false | false | 605 |
py
|
import os
import behave
from features.support.fixtures import (
with_aws_lambda_environment_variables,
with_empty_db
)
TEST_DATABASE_CONNECTION_STRING = os.environ.get(
'TEST_DATABASE_CONNECTION_STRING',
'sqlite:///morning_cd_behave_tests.db'
)
def before_scenario(context: behave.runner.Context, scenario: behave.model.Scenario) -> None:
behave.use_fixture(
with_aws_lambda_environment_variables,
context,
TEST_DATABASE_CONNECTION_STRING
)
behave.use_fixture(
with_empty_db,
context,
TEST_DATABASE_CONNECTION_STRING
)
|
[
"[email protected]"
] | |
a476355eb2604e1fbd519da46642dde0a0225e7f
|
d4873ecfc5bc6141cf80d54d35426f27e7c78116
|
/bacisforms/bacisforms/settings.py
|
842c095e0ec9312d7c6007ea9fcf14c16a7a29ea
|
[] |
no_license
|
epedzich/My_Django_Stuff
|
9ccbc3670447c2f4d578342c64a5b9d6a0ffd9eb
|
fc1f56ce8a689569fea8bb49aa39bc215240a3f0
|
refs/heads/master
| 2021-09-02T15:33:03.849445 | 2018-01-03T12:00:45 | 2018-01-03T12:00:45 | 110,693,924 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,187 |
py
|
"""
Django settings for bacisforms project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')52u0fnhc-&k!+xqe_-ecg^p%1g--tniszv%qvdtkz0t$+6)93'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'basicapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bacisforms.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bacisforms.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
5bd54955034dce898b16e994b82d4b750b30dc95
|
3005ec1c7adfc269ea5d3ab676199881f68853f0
|
/SVM.py
|
da368c7afb61636eb245ab0894cbbf6fbb2ba6c0
|
[] |
no_license
|
idamsa/Machine-Learning-With-Python-Course
|
d2d59b4a74091b1ecdb81befa92078bcc939120c
|
868572b01bee0309e4d92ec4d2cda5c046f672ab
|
refs/heads/master
| 2021-02-24T04:14:07.860458 | 2020-03-12T09:12:50 | 2020-03-12T09:12:50 | 245,419,224 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 513 |
py
|
import sklearn
from sklearn import datasets
from sklearn import svm
from sklearn import metrics
cancer = datasets.load_breast_cancer()
print(cancer.feature_names)
print(cancer.target_names)
X = cancer.data
y = cancer.target
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, test_size=0.2)
classes = ['malignant', 'benign']
clf = svm.SVC(kernel="linear", C=2)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
acc = metrics.accuracy_score(y_test, y_pred)
print(acc)
|
[
"[email protected]"
] | |
ca86cc40c3726385022d4803c5ece0dda47cc10b
|
a1041e0f8df3ca2724929339606511fd657d92f7
|
/Book/Book/pipelines.py
|
cb332c6268e6948933e5cc097d96d6e4d150206c
|
[] |
no_license
|
mikailceren/Web-Mining
|
3d1242383fa2317affa60e93cfa4c4479d994579
|
e31662ca14d3ae75c792b34241347f6bc9b846b6
|
refs/heads/master
| 2021-01-18T17:18:48.855718 | 2015-06-17T11:58:44 | 2015-06-17T11:58:44 | 33,506,333 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,523 |
py
|
from scrapy.http import Request
from scrapy.contrib.pipeline.images import ImagesPipeline
from scrapy.exceptions import DropItem
import MySQLdb
import MySQLdb.cursors
import sys
class BookPipeline(object):
def __init__(self):
db = MySQLdb.connect(host='localhost', user='mcrn46', passwd='mcrn46', db='web_mining', charset='utf8',
use_unicode=True)
self.c = db.cursor()
self.c.connection.autocommit(True)
def process_item(self, item, spider):
try:
self.c.execute("""INSERT INTO books (title,yayinevi,summary,language,year,img,price)
VALUES (%s, %s, %s, %s, %s, %s, %s)""",
(item['title'],
item['yayinevi'],
item['summary'],
item['language'],
item['date'],
item['images'][0]['path'],
item['price'],
))
self.c.execute("""INSERT INTO authors (name)
VALUES (%s)""",
(item['author'],
))
self.c.execute("""INSERT INTO genres (genre)
VALUES (%s)""",
(item['genre'],
))
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit(1)
return item
|
[
"[email protected]"
] | |
6803fab74994e780eb31f5665279ec1ea6fd3290
|
e1755c044d7922dc6e25d02003188717edc6c3ef
|
/hurdles/__init__.py
|
46b6252cc99b65e1c73e28298caa0edfd5e38d15
|
[
"MIT"
] |
permissive
|
oleiade/Hurdles
|
0a9ca0c52df50380c7c905a9eaef899dd45cc5b4
|
51f42919216087a7a6af65e007cb1c0a17983178
|
refs/heads/master
| 2020-04-24T05:10:22.957389 | 2013-02-23T10:19:34 | 2013-02-23T10:19:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 325 |
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 theo crevon
#
# See the file LICENSE for copying permission.
from __future__ import absolute_import
from hurdles.base import BenchCase, BenchSuite
version = (0, 0, 1)
__title__ = "Hurdles"
__author__ = "Oleiade"
__license__ = "MIT"
__version__ = '.'.join(map(str, version))
|
[
"[email protected]"
] | |
c50419bef15bebb081749b2a06382dacd4da0b95
|
0e91204becbe621e0cd04d409f3eea4658bc315d
|
/venv/lib/python3.6/site-packages/google/cloud/container_v1/proto/cluster_service_pb2_grpc.py
|
9c555774cbef963ea76e7b8e153d17bc0cc2d2ce
|
[] |
no_license
|
hssmikey/Launch_APP
|
97b1bac5fa6df849ef0530ff8bae3e82f7c3c720
|
0d3743b6121361bace4143a639a645159459b5af
|
refs/heads/master
| 2022-07-18T14:49:14.092568 | 2020-05-14T21:27:49 | 2020-05-14T21:27:49 | 263,176,734 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 35,227 |
py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.container_v1.proto import (
cluster_service_pb2 as google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class ClusterManagerStub(object):
"""Google Kubernetes Engine Cluster Manager v1
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListClusters = channel.unary_unary(
"/google.container.v1.ClusterManager/ListClusters",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListClustersRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListClustersResponse.FromString,
)
self.GetCluster = channel.unary_unary(
"/google.container.v1.ClusterManager/GetCluster",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.GetClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Cluster.FromString,
)
self.CreateCluster = channel.unary_unary(
"/google.container.v1.ClusterManager/CreateCluster",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.CreateClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.UpdateCluster = channel.unary_unary(
"/google.container.v1.ClusterManager/UpdateCluster",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.UpdateClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.UpdateNodePool = channel.unary_unary(
"/google.container.v1.ClusterManager/UpdateNodePool",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.UpdateNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNodePoolAutoscaling = channel.unary_unary(
"/google.container.v1.ClusterManager/SetNodePoolAutoscaling",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetNodePoolAutoscalingRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLoggingService = channel.unary_unary(
"/google.container.v1.ClusterManager/SetLoggingService",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetLoggingServiceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetMonitoringService = channel.unary_unary(
"/google.container.v1.ClusterManager/SetMonitoringService",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetMonitoringServiceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetAddonsConfig = channel.unary_unary(
"/google.container.v1.ClusterManager/SetAddonsConfig",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetAddonsConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLocations = channel.unary_unary(
"/google.container.v1.ClusterManager/SetLocations",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetLocationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.UpdateMaster = channel.unary_unary(
"/google.container.v1.ClusterManager/UpdateMaster",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.UpdateMasterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetMasterAuth = channel.unary_unary(
"/google.container.v1.ClusterManager/SetMasterAuth",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetMasterAuthRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.DeleteCluster = channel.unary_unary(
"/google.container.v1.ClusterManager/DeleteCluster",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.DeleteClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.ListOperations = channel.unary_unary(
"/google.container.v1.ClusterManager/ListOperations",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListOperationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListOperationsResponse.FromString,
)
self.GetOperation = channel.unary_unary(
"/google.container.v1.ClusterManager/GetOperation",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.GetOperationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.CancelOperation = channel.unary_unary(
"/google.container.v1.ClusterManager/CancelOperation",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.CancelOperationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetServerConfig = channel.unary_unary(
"/google.container.v1.ClusterManager/GetServerConfig",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.GetServerConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ServerConfig.FromString,
)
self.ListNodePools = channel.unary_unary(
"/google.container.v1.ClusterManager/ListNodePools",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListNodePoolsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListNodePoolsResponse.FromString,
)
self.GetNodePool = channel.unary_unary(
"/google.container.v1.ClusterManager/GetNodePool",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.GetNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.NodePool.FromString,
)
self.CreateNodePool = channel.unary_unary(
"/google.container.v1.ClusterManager/CreateNodePool",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.CreateNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.DeleteNodePool = channel.unary_unary(
"/google.container.v1.ClusterManager/DeleteNodePool",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.DeleteNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.RollbackNodePoolUpgrade = channel.unary_unary(
"/google.container.v1.ClusterManager/RollbackNodePoolUpgrade",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.RollbackNodePoolUpgradeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNodePoolManagement = channel.unary_unary(
"/google.container.v1.ClusterManager/SetNodePoolManagement",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetNodePoolManagementRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLabels = channel.unary_unary(
"/google.container.v1.ClusterManager/SetLabels",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetLabelsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLegacyAbac = channel.unary_unary(
"/google.container.v1.ClusterManager/SetLegacyAbac",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetLegacyAbacRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.StartIPRotation = channel.unary_unary(
"/google.container.v1.ClusterManager/StartIPRotation",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.StartIPRotationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.CompleteIPRotation = channel.unary_unary(
"/google.container.v1.ClusterManager/CompleteIPRotation",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.CompleteIPRotationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNodePoolSize = channel.unary_unary(
"/google.container.v1.ClusterManager/SetNodePoolSize",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetNodePoolSizeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNetworkPolicy = channel.unary_unary(
"/google.container.v1.ClusterManager/SetNetworkPolicy",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetNetworkPolicyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetMaintenancePolicy = channel.unary_unary(
"/google.container.v1.ClusterManager/SetMaintenancePolicy",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetMaintenancePolicyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.ListUsableSubnetworks = channel.unary_unary(
"/google.container.v1.ClusterManager/ListUsableSubnetworks",
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListUsableSubnetworksRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListUsableSubnetworksResponse.FromString,
)
class ClusterManagerServicer(object):
"""Google Kubernetes Engine Cluster Manager v1
"""
def ListClusters(self, request, context):
"""Lists all clusters owned by a project in either the specified zone or all
zones.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetCluster(self, request, context):
"""Gets the details of a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateCluster(self, request, context):
"""Creates a cluster, consisting of the specified number and type of Google
Compute Engine instances.
By default, the cluster is created in the project's
[default network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks).
One firewall is added for the cluster. After cluster creation,
the Kubelet creates routes for each node to allow the containers
on that node to communicate with all other instances in the
cluster.
Finally, an entry is added to the project's global metadata indicating
which CIDR range the cluster is using.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateCluster(self, request, context):
"""Updates the settings of a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateNodePool(self, request, context):
"""Updates the version and/or image type for the specified node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetNodePoolAutoscaling(self, request, context):
"""Sets the autoscaling settings for the specified node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetLoggingService(self, request, context):
"""Sets the logging service for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetMonitoringService(self, request, context):
"""Sets the monitoring service for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetAddonsConfig(self, request, context):
"""Sets the addons for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetLocations(self, request, context):
"""Sets the locations for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateMaster(self, request, context):
"""Updates the master for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetMasterAuth(self, request, context):
"""Sets master auth materials. Currently supports changing the admin password
or a specific cluster, either via password generation or explicitly setting
the password.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteCluster(self, request, context):
"""Deletes the cluster, including the Kubernetes endpoint and all worker
nodes.
Firewalls and routes that were configured during cluster creation
are also deleted.
Other Google Compute Engine resources that might be in use by the cluster,
such as load balancer resources, are not deleted if they weren't present
when the cluster was initially created.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListOperations(self, request, context):
"""Lists all operations in a project in a specific zone or all zones.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetOperation(self, request, context):
"""Gets the specified operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CancelOperation(self, request, context):
"""Cancels the specified operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetServerConfig(self, request, context):
"""Returns configuration info about the Google Kubernetes Engine service.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListNodePools(self, request, context):
"""Lists the node pools for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetNodePool(self, request, context):
"""Retrieves the requested node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateNodePool(self, request, context):
"""Creates a node pool for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteNodePool(self, request, context):
"""Deletes a node pool from a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RollbackNodePoolUpgrade(self, request, context):
"""Rolls back a previously Aborted or Failed NodePool upgrade.
This makes no changes if the last upgrade successfully completed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetNodePoolManagement(self, request, context):
"""Sets the NodeManagement options for a node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetLabels(self, request, context):
"""Sets labels on a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetLegacyAbac(self, request, context):
"""Enables or disables the ABAC authorization mechanism on a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def StartIPRotation(self, request, context):
"""Starts master IP rotation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CompleteIPRotation(self, request, context):
"""Completes master IP rotation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetNodePoolSize(self, request, context):
"""Sets the size for a specific node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetNetworkPolicy(self, request, context):
"""Enables or disables Network Policy for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetMaintenancePolicy(self, request, context):
"""Sets the maintenance policy for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListUsableSubnetworks(self, request, context):
"""Lists subnetworks that are usable for creating clusters in a project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_ClusterManagerServicer_to_server(servicer, server):
rpc_method_handlers = {
"ListClusters": grpc.unary_unary_rpc_method_handler(
servicer.ListClusters,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListClustersRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListClustersResponse.SerializeToString,
),
"GetCluster": grpc.unary_unary_rpc_method_handler(
servicer.GetCluster,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.GetClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Cluster.SerializeToString,
),
"CreateCluster": grpc.unary_unary_rpc_method_handler(
servicer.CreateCluster,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.CreateClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"UpdateCluster": grpc.unary_unary_rpc_method_handler(
servicer.UpdateCluster,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.UpdateClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"UpdateNodePool": grpc.unary_unary_rpc_method_handler(
servicer.UpdateNodePool,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.UpdateNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetNodePoolAutoscaling": grpc.unary_unary_rpc_method_handler(
servicer.SetNodePoolAutoscaling,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetNodePoolAutoscalingRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetLoggingService": grpc.unary_unary_rpc_method_handler(
servicer.SetLoggingService,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetLoggingServiceRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetMonitoringService": grpc.unary_unary_rpc_method_handler(
servicer.SetMonitoringService,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetMonitoringServiceRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetAddonsConfig": grpc.unary_unary_rpc_method_handler(
servicer.SetAddonsConfig,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetAddonsConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetLocations": grpc.unary_unary_rpc_method_handler(
servicer.SetLocations,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetLocationsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"UpdateMaster": grpc.unary_unary_rpc_method_handler(
servicer.UpdateMaster,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.UpdateMasterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetMasterAuth": grpc.unary_unary_rpc_method_handler(
servicer.SetMasterAuth,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetMasterAuthRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"DeleteCluster": grpc.unary_unary_rpc_method_handler(
servicer.DeleteCluster,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.DeleteClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"ListOperations": grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListOperationsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListOperationsResponse.SerializeToString,
),
"GetOperation": grpc.unary_unary_rpc_method_handler(
servicer.GetOperation,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.GetOperationRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"CancelOperation": grpc.unary_unary_rpc_method_handler(
servicer.CancelOperation,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.CancelOperationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"GetServerConfig": grpc.unary_unary_rpc_method_handler(
servicer.GetServerConfig,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.GetServerConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ServerConfig.SerializeToString,
),
"ListNodePools": grpc.unary_unary_rpc_method_handler(
servicer.ListNodePools,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListNodePoolsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListNodePoolsResponse.SerializeToString,
),
"GetNodePool": grpc.unary_unary_rpc_method_handler(
servicer.GetNodePool,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.GetNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.NodePool.SerializeToString,
),
"CreateNodePool": grpc.unary_unary_rpc_method_handler(
servicer.CreateNodePool,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.CreateNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"DeleteNodePool": grpc.unary_unary_rpc_method_handler(
servicer.DeleteNodePool,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.DeleteNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"RollbackNodePoolUpgrade": grpc.unary_unary_rpc_method_handler(
servicer.RollbackNodePoolUpgrade,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.RollbackNodePoolUpgradeRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetNodePoolManagement": grpc.unary_unary_rpc_method_handler(
servicer.SetNodePoolManagement,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetNodePoolManagementRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetLabels": grpc.unary_unary_rpc_method_handler(
servicer.SetLabels,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetLabelsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetLegacyAbac": grpc.unary_unary_rpc_method_handler(
servicer.SetLegacyAbac,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetLegacyAbacRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"StartIPRotation": grpc.unary_unary_rpc_method_handler(
servicer.StartIPRotation,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.StartIPRotationRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"CompleteIPRotation": grpc.unary_unary_rpc_method_handler(
servicer.CompleteIPRotation,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.CompleteIPRotationRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetNodePoolSize": grpc.unary_unary_rpc_method_handler(
servicer.SetNodePoolSize,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetNodePoolSizeRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetNetworkPolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetNetworkPolicy,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetNetworkPolicyRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetMaintenancePolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetMaintenancePolicy,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetMaintenancePolicyRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"ListUsableSubnetworks": grpc.unary_unary_rpc_method_handler(
servicer.ListUsableSubnetworks,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListUsableSubnetworksRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListUsableSubnetworksResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.container.v1.ClusterManager", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
|
[
"[email protected]"
] | |
e01c6ae624a61f1dbf75ab56e391078604fe1b12
|
ad77e334bba34e1aa17debc66e90dd1e78d32af9
|
/Clg/HW/MultipleVal.py
|
d5a926c4f74d3cd395651ada2f07e630a776776e
|
[] |
no_license
|
SnehuD/Python-Programs
|
7c08bfa82789dd9139b2687ec3d6cf3d13a33fc5
|
ecf6c92ce2673885b4345e0ae70cfe3d01677911
|
refs/heads/main
| 2023-08-23T06:33:46.535386 | 2021-10-03T17:02:51 | 2021-10-03T17:02:51 | 413,140,529 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 110 |
py
|
# Assigning multiple values to multiple variables
a, b, c = 5, 3.2, "Hello"
print(a)
print(b)
print(c)
|
[
"[email protected]"
] | |
6ba923e2897f1f34a8d9fefb279295ca6d447ad8
|
3c88b31090d6568435e811a455ce934604fa5c9f
|
/category/migrations/0004_auto_20210604_1515.py
|
1598b851990f23fb79ef630c2f5f22ca87780b4e
|
[] |
no_license
|
kamran1231/Great-Cart-Django-Website
|
09e0e7b5085737cf54614b45b5424ac5c273bb5b
|
a674593d5c8cb15be7b24dca397f9027659033e2
|
refs/heads/main
| 2023-05-17T08:52:31.092404 | 2021-06-09T20:21:08 | 2021-06-09T20:21:08 | 373,645,947 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 453 |
py
|
# Generated by Django 3.1.7 on 2021-06-04 09:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('category', '0003_alter_category_category_name'),
]
operations = [
migrations.AlterField(
model_name='category',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
[
"[email protected]"
] | |
e978fbd36bc6430b82b7ef6ce4e7d29fc5402e43
|
7f4bfec61051b4ddc8f747c16b6294b2591b64e4
|
/test.py
|
346a341f44084790b3d3d411dc3cc81a54f2c191
|
[] |
no_license
|
JCWanghub/pytorch_demo
|
094d93e83fe911a14a80bf1d9b9490c52528fd1d
|
4ea3a48c2fefefa5d426df3218a717c76713c38e
|
refs/heads/master
| 2022-09-17T01:59:44.239423 | 2020-05-30T12:53:34 | 2020-05-30T12:53:34 | 266,676,453 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 231 |
py
|
import os
data_dir = os.path.join("..", "..", "Data", "cifar-10-batches-py",'test_batch')
data_dir = ".\\Data\\cifar-10-batches-py"
if os.path.exists(data_dir):
print(data_dir+' exist!')
else:
print(data_dir+' not exist')
|
[
"[email protected]"
] | |
a49ecab130db3c3d6818db8eec5ff5c4c879e481
|
b18ef6eca63c7db88107ccf720491b32a785e0de
|
/mig_main/forms.py
|
6c212692160aa28a52826b5cfd953fc0aae70753
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
tbpmig/mig-website
|
368c270d0427f09245a3e13f31757ca2d52f21d0
|
527f9dd39a6b50caa24ea5d0d97b19a0c9b675d1
|
refs/heads/master
| 2023-06-08T07:15:19.792102 | 2023-06-05T15:57:17 | 2023-06-05T15:57:17 | 17,345,471 | 7 | 4 |
Apache-2.0
| 2022-11-22T00:36:54 | 2014-03-02T19:53:15 |
Python
|
UTF-8
|
Python
| false | false | 12,564 |
py
|
from django.contrib.auth.models import User
from django import forms
from django.forms import ModelForm, BaseModelFormSet
from django.forms.models import modelformset_factory
from django.db import IntegrityError
#from django.utils.timezone import now
from django_select2.forms import (
Select2MultipleWidget,
Select2Widget,
)
from electees.models import electee_stopped_electing
from mig_main.models import (
AcademicTerm,
Major,
MemberProfile,
UserProfile,
TBPChapter,
UserPreference,
)
class ChangeMemberStandingForm(ModelForm):
uniqname = forms.CharField(disabled=True)
first_name = forms.CharField(disabled=True)
last_name = forms.CharField(disabled=True)
class Meta:
model = MemberProfile
fields = ['uniqname',
'first_name',
'last_name',
'standing',
]
class BaseMakeMembersAlumniFormSet(BaseModelFormSet):
def __init__(self, *args, **kwargs):
super(BaseMakeMembersAlumniFormSet,
self).__init__(*args, **kwargs)
# create filtering here whatever that suits you needs
self.queryset = MemberProfile.objects.exclude(
standing__name='Alumni').order_by(
'last_name',
'first_name',
'uniqname')
MakeMembersAlumniFormSet = modelformset_factory(
MemberProfile,
form=ChangeMemberStandingForm,
formset=BaseMakeMembersAlumniFormSet,
extra=0
)
class MemberProfileForm(ModelForm):
major = forms.ModelMultipleChoiceField(
widget=Select2MultipleWidget(),
queryset=Major.objects.all().order_by('name')
)
class Meta:
model = MemberProfile
exclude = (
'user',
'uniqname',
'status',
'UMID',
'still_electing',
'edu_bckgrd_form'
)
class ElecteeProfileForm(MemberProfileForm):
class Meta:
model = MemberProfile
exclude = (
'user',
'uniqname',
'status',
'init_chapter',
'UMID',
'init_term',
'still_electing',
'standing',
'alum_mail_freq',
'job_field',
'employer',
'meeting_speak',
'edu_bckgrd_form'
)
class MemberProfileNewActiveForm(MemberProfileForm):
init_term = forms.ModelChoiceField(
widget=Select2Widget(),
queryset=AcademicTerm.get_rchron(),
label='Initiation Term'
)
init_chapter = forms.ModelChoiceField(
widget=Select2Widget(),
queryset=TBPChapter.objects.all(),
label='Initiating Chapter'
)
class Meta:
model = MemberProfile
exclude = (
'user',
'uniqname',
'status',
'edu_bckgrd_form',
'still_electing'
)
class MemberProfileNewElecteeForm(MemberProfileForm):
class Meta:
model = MemberProfile
exclude = (
'user',
'uniqname',
'status',
'standing',
'init_chapter',
'alum_mail_freq',
'init_term',
'still_electing',
'edu_bckgrd_form'
)
class NonMemberProfileForm(ModelForm):
class Meta:
model = UserProfile
exclude = ('user', 'uniqname')
class ConvertNonMemberToMemberForm(MemberProfileForm):
def save(self, userprofile, commit=True):
if commit is False:
raise IntegrityError(
'Saving logic complicated, commit must be enabled')
if userprofile.is_member():
raise IntegrityError('Model is already MemberProfile')
# 1. clone profile
uniqname = userprofile.uniqname
marysuec = userprofile
marysuec_user = userprofile.user
marysuec_user.username = 'marysuec'
marysuec_user.id = None
marysuec_user.pk = None
marysuec_user.save()
marysuec.user = marysuec_user
# 2. change uniqname to marysuec
marysuec.uniqname = 'marysuec'
marysuec.save()
userprofile = UserProfile.objects.get(uniqname=uniqname)
# 3. reassign all relationships of interest from profile A to marysuec
nepp = userprofile.noneventprojectparticipant_set.all().distinct()
shifts = userprofile.event_attendee.all().distinct()
announcement_blurbs = userprofile.announcementblurb_set.all(
).distinct()
waitlist_slot = userprofile.waitlistslot_set.all().distinct()
itembring = userprofile.usercanbringpreferreditem_set.all().distinct()
praise_giver = userprofile.praise_giver.all().distinct()
praise_receiver = userprofile.praise_recipient.all().distinct()
prefs = userprofile.userpreference_set.all().distinct()
background_check = userprofile.backgroundcheck_set.all().distinct()
for n in nepp:
n.participant = marysuec
n.save()
for s in shifts:
s.attendees.add(marysuec)
s.attendees.remove(userprofile)
for a in announcement_blurbs:
a.contacts.add(marysuec)
a.contacts.remove(userprofile)
for w in waitlist_slot:
w.user = marysuec
w.save()
for item in itembring:
item.user = marysuec
item.save()
for p in praise_giver:
p.giver = marysuec
p.save()
for p in praise_receiver:
p.recipient = marysuec
p.save()
for p in prefs:
p.user = marysuec
p.save()
for b in background_check:
b.member = marysuec
b.save()
# 4. delete profile A
userprofile.delete()
# 5. create profile A'
m = super(ConvertNonMemberToMemberForm, self).save(commit=False)
m.uniqname = uniqname
m.user = User.objects.get(username=uniqname)
m.nickname = marysuec.nickname
m.first_name = marysuec.first_name
m.middle_name = marysuec.middle_name
m.last_name = marysuec.last_name
m.suffix = marysuec.suffix
m.maiden_name = marysuec.maiden_name
m.title = marysuec.title
# 6. save profile A'
m.save()
# 7. reassign all relationships from profile marysuec to A'
for n in nepp:
n.participant = m
n.save()
for s in shifts:
s.attendees.add(m)
s.attendees.remove(marysuec)
for a in announcement_blurbs:
a.contacts.add(m)
a.contacts.remove(marysuec)
for w in waitlist_slot:
w.user = m
w.save()
for item in itembring:
item.user = m
item.save()
for p in praise_giver:
p.giver = m
p.save()
for p in praise_receiver:
p.recipient = m
p.save()
for p in prefs:
p.user = m
p.save()
for b in background_check:
b.member = m
b.save()
# 8. delete marysuec
marysuec.delete()
marysuec_user.delete()
class MemberProfileElecteeFromNonMemberForm(ConvertNonMemberToMemberForm):
class Meta:
model = MemberProfile
exclude = ('alum_mail_freq', 'edu_bckgrd_form', 'first_name',
'init_chapter', 'init_term', 'last_name',
'maiden_name', 'middle_name', 'nickname',
'standing', 'status', 'still_electing',
'suffix', 'title', 'uniqname', 'user')
class MemberProfileActiveFromNonMemberForm(ConvertNonMemberToMemberForm):
class Meta:
model = MemberProfile
exclude = ('edu_bckgrd_form', 'first_name',
'last_name',
'maiden_name', 'middle_name', 'nickname',
'status', 'still_electing',
'suffix', 'title', 'uniqname', 'user')
class ManageElecteeStillElectingForm(ModelForm):
electee = forms.CharField(
widget=forms.TextInput(
attrs={
'class': 'disabled',
'readonly': 'readonly'
}
)
)
uniqname = forms.CharField(
widget=forms.TextInput(
attrs={
'class': 'disabled',
'readonly': 'readonly'
}
)
)
class Meta:
model = MemberProfile
fields = ['electee', 'uniqname', 'still_electing']
def __init__(self, *args, **kwargs):
super(ManageElecteeStillElectingForm, self).__init__(*args, **kwargs)
if not self.instance:
return
self.fields['electee'].initial = self.instance.get_firstlast_name()
def save(self, commit=True):
uniqname = self.cleaned_data['uniqname']
was_electing = MemberProfile.objects.get(
uniqname=uniqname).still_electing
self.cleaned_data.pop('electee', None)
instance = super(ManageElecteeStillElectingForm, self).save(
commit=commit)
if was_electing and not instance.still_electing:
electee_stopped_electing(instance)
return instance
class BaseManageElecteeStillElectingFormSet(BaseModelFormSet):
def __init__(self, *args, **kwargs):
super(BaseManageElecteeStillElectingFormSet,
self).__init__(*args, **kwargs)
# create filtering here whatever that suits you needs
self.queryset = MemberProfile.objects.filter(
status__name='Electee').order_by(
'last_name',
'first_name',
'uniqname')
ManageElecteeStillElectingFormSet = modelformset_factory(
MemberProfile,
form=ManageElecteeStillElectingForm,
formset=BaseManageElecteeStillElectingFormSet,
extra=0
)
class PreferenceForm(forms.Form):
def __init__(self, *args, **kwargs):
prefs = kwargs.pop('prefs', None)
user = kwargs.pop('user', None)
users_prefs = UserPreference.objects.filter(user=user)
super(PreferenceForm, self).__init__(*args, **kwargs)
for pref in prefs:
this_pref = users_prefs.filter(preference_type=pref['name'])
self.fields[pref['name']] = forms.ChoiceField(
choices=[
(pref['values'].index(d), d)
for d in pref['values']
]
)
self.fields[pref['name']].label = pref['verbose']
if this_pref.exists():
init_val = pref['values'].index(
this_pref[0].preference_value
)
else:
init_val = pref['values'].index(pref['default'])
self.fields[pref['name']].initial = init_val
def save(self, user, prefs):
UserPreference.objects.filter(user=user).delete()
for key, value in self.cleaned_data.items():
value_name = [d['values'][int(value)]
for d in prefs
if d['name'] == key][0]
up = UserPreference(
user=user,
preference_type=key,
preference_value=value_name,
)
up.save()
|
[
"[email protected]"
] | |
225ceeb7e8183ff4fe55fd640c53ec2f3624a6c8
|
2bcc421ee345b00cf805c543b37d18b5d019dc04
|
/adafruit-circuitpython-bundle-6.x-mpy-20201126/examples/matrixportal_simpletest.py
|
6acecd14cfce626edae83fa7e034c7cadbe1bf85
|
[] |
no_license
|
saewoonam/sc-current-source-titano
|
5a1ad46889c1b09c168424901fd71cb4eab5c61b
|
1c136aa8b61268d9ac0b5a682b30ece70ab87663
|
refs/heads/main
| 2023-03-02T22:12:26.685537 | 2021-02-09T03:28:01 | 2021-02-09T03:28:01 | 317,299,900 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,528 |
py
|
# SPDX-FileCopyrightText: 2020 Melissa LeBlanc-Williams, written for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
"""
This example checks the current Bitcoin price and displays it in the middle of the screen
"""
import time
import board
import terminalio
from adafruit_matrixportal.matrixportal import MatrixPortal
# You can display in 'GBP', 'EUR' or 'USD'
CURRENCY = "USD"
# Set up where we'll be fetching data from
DATA_SOURCE = "https://api.coindesk.com/v1/bpi/currentprice.json"
DATA_LOCATION = ["bpi", CURRENCY, "rate_float"]
def text_transform(val):
if CURRENCY == "USD":
return "$%d" % val
if CURRENCY == "EUR":
return "€%d" % val
if CURRENCY == "GBP":
return "£%d" % val
return "%d" % val
# the current working directory (where this file is)
cwd = ("/" + __file__).rsplit("/", 1)[0]
matrixportal = MatrixPortal(
url=DATA_SOURCE, json_path=DATA_LOCATION, status_neopixel=board.NEOPIXEL,
)
matrixportal.add_text(
text_font=terminalio.FONT,
text_position=(16, 16),
text_color=0xFFFFFF,
text_transform=text_transform,
)
matrixportal.preload_font(b"$012345789") # preload numbers
matrixportal.preload_font((0x00A3, 0x20AC)) # preload gbp/euro symbol
while True:
try:
value = matrixportal.fetch()
print("Response is", value)
except (ValueError, RuntimeError) as e:
print("Some error occured, retrying! -", e)
time.sleep(3 * 60) # wait 3 minutes
|
[
"[email protected]"
] | |
fed3c0f64e13f63329bc763774c51426ea1d2b67
|
a43ad70e7fb1b3c3e564f22e14921e0446c2c770
|
/M2TExample.py
|
e5572a1150ff0daf432b4e286a05d2eeb318335b
|
[
"Apache-2.0"
] |
permissive
|
yindaheng98/GANomaly-Tensorflow
|
99786390e5c31e975ca53178fc8877d18ec64902
|
8ba5fada15c9b8aae697b7ea5839d5c665ed0150
|
refs/heads/master
| 2020-03-30T19:57:18.681283 | 2018-10-06T12:28:16 | 2018-10-06T12:28:16 | 151,566,959 | 6 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,052 |
py
|
import tensorflow as tf
import cv2
from GANomaly.Coders import Encoder, Decoder
from GANomaly.Movie2TFRecord import dataset_from_movie
#options
input_shape = 1024
#model
x = tf.placeholder(tf.float32, [None, input_shape, input_shape, 3])
z = Encoder(x)
x_ = Decoder(z)
loss = tf.reduce_mean(tf.abs(x - x_))
optimizer = tf.train.AdamOptimizer(1.0).minimize(loss)
#dataset
dataset = dataset_from_movie('test/33M00S.mp4')
dataset = dataset.map(lambda e: tf.image.resize_images(e['image'], [input_shape, input_shape], method=1))
dataset = dataset.repeat()
dataset = dataset.batch(1)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
#run
with tf.Session() as sess:
sess.run(iterator.initializer)
sess.run(tf.global_variables_initializer())
for i in range(0, 670):
img = sess.run(next_element)
'''
frame = img[0, :, :, :]
cv2.imshow('hhh', frame[:, :, 0:3])
cv2.waitKey(25)
'''
l, _ = sess.run([loss, optimizer], feed_dict={x: img})
print(l)
|
[
"[email protected]"
] | |
9d1b5f66a246d990332925b934287b88f6a5b75a
|
9d3f8a896b97ee19a32a57b25c7ebcb17ac76730
|
/Text_cleaner.py
|
7ce7e240568289cdd0139c635b6971de3b1f5281
|
[] |
no_license
|
iam-Raghav/Translation_using_attention
|
c0b9210ffa1f6704c6213f68995a2c4a53aec1bc
|
1577e8f7d3f4f2b76b9a3b2c081391559a4b2272
|
refs/heads/master
| 2020-11-27T12:22:54.723514 | 2019-12-21T14:43:35 | 2019-12-21T14:43:35 | 229,439,221 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,391 |
py
|
from __future__ import print_function
import numpy as np
import pandas as pd
import re
import unicodedata
#word_tokenize accepts a string as an input, not a file.
rawtext_filename = "E:\_Translation\data\eng-fra.txt" #KEY IN PATH OF SOURCE FILE
cleantext_filename = "E:\_Translation\data\eng-fra_clean.txt" #KEY IN PATH OF THE DESTINATION AND CLEAN TEXT FILE
max_length = 8
#File Loading
###################################
df = pd.read_csv(rawtext_filename,header=None,encoding = "utf-8", sep='\t')
###################################
#Converts text to ascii and remove unwanted special characters.
###################################
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
#Removing punctuations from the text
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
df1=pd.DataFrame()
for i in range(len(df.iloc[:,1])):
if len(df.iloc[i,0].split()) < max_length:
df.iloc[i, 0] = normalizeString(df.iloc[i, 0])
df.iloc[i, 1] = normalizeString(df.iloc[i, 1])
df1 = df1.append(df.loc[i], ignore_index= False)
df1.to_csv(cleantext_filename,sep='\t',header=False,index = False)
print("DONE...")
|
[
"[email protected]"
] | |
42c5ba3daabc90deacaf00f303deeec5868ff29e
|
7b6dac52b16febbcade84c0949fa2d91914119ad
|
/poirazi/cells/inh.py
|
58778b833f2d7b5ba22dfabd3f02bd00c17a8364
|
[] |
no_license
|
Neurosim-lab/EEE_network
|
122ef87bc89477d441d99e7da0dc165340ffd2cb
|
bbad1336b74d71005d4d7dd2bd6d86ff2f65e214
|
refs/heads/master
| 2021-07-19T08:54:09.241884 | 2020-05-05T14:26:35 | 2020-05-05T14:26:35 | 153,055,050 | 2 | 3 | null | 2019-01-22T15:04:54 | 2018-10-15T04:54:21 |
Python
|
UTF-8
|
Python
| false | false | 1,905 |
py
|
#//Interneuron for PFC - fast spiking parvalbumin interneuron
#//Based on Durstewitz and Gabriel 2006
#//"Irregular spiking in NMDA-driven prefrontal cortex neurons"
from neuron import h
# Soma properties
soma_nseg = 1
soma_L = 53.0
soma_diam = 42.0
# Axon properties
axon_nseg = 1
axon_L = 113.2
axon_diam = 0.7
# General properties
cm = 1.2 # microF/cm2
Ra = 150.0
g_pas = 1.0/15000 # mho/cm
e_pas = -70
h.ko0_k_ion = 3.82 # mM
h.ki0_k_ion = 140.0 # mM
class inh():
def __init__(self):
self.create_cell()
self.add_soma_channels()
self.add_axon_channels()
def create_cell(self):
self.soma = h.Section(name='soma')
self.soma.L = soma_L
self.soma.diam = soma_diam
self.soma.nseg = soma_nseg
self.soma.cm = cm
self.soma.Ra = Ra
self.axon = h.Section(name='axon')
self.axon.L = axon_L
self.axon.diam = axon_diam
self.axon.nseg = axon_nseg
self.axon.cm = cm
self.axon.Ra = Ra
self.axon.connect(self.soma(0.5))
def add_soma_channels(self):
self.soma.insert('pas')
self.soma.g_pas = g_pas
self.soma.e_pas = e_pas
self.soma.insert('Naf')
self.soma.gnafbar_Naf = 0.045 * 5
self.soma.insert('kdr')
self.soma.gkdrbar_kdr = 0.018
self.soma.insert('IKs')
self.soma.gKsbar_IKs = 0.000725 * 0.1
def add_axon_channels(self):
self.axon.insert('pas')
self.axon.g_pas = g_pas
self.axon.e_pas = e_pas
self.axon.insert('Naf')
self.axon.gnafbar_Naf = 0.045 * 12
self.axon.insert('kdr')
self.axon.gkdrbar_kdr = 0.018
############################################
# Function for importing cell into NetPyNE
############################################
def makeCell():
cell = inh()
return cell
|
[
"[email protected]"
] | |
507cd1e57aedaba705c21a91c02080b0a76e9250
|
a2350d94f82e73e7ec078ca513e85fa5919440a0
|
/Connect4Game.py
|
9786068f30e1127c15911f93a64701b74370f779
|
[] |
no_license
|
adrielk/ConnectX
|
c551d44741abec71651365e665ea3a03e6836730
|
44486015c8beb44d6a7f00531a0c16fe8aa4ee86
|
refs/heads/master
| 2022-11-21T03:47:43.811372 | 2020-07-27T01:01:22 | 2020-07-27T01:01:22 | 274,325,600 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,122 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 26 19:55:03 2020
Connect 4 - Text Version
Exact same implementation as TicTacToe but with game baord's gravity enabled!
@author: Adriel Kim
"""
from GameBoardClass import GameGrid as grid
#Dependancy that deals with managing player turns and round set up for games
import GameRoundManager as gm
def PlayConnectX(player1, player2, dims, length):
"""
Parameters
----------
player1 : PlayerClass object
PlayerClass object representing player1.
player2 : PlayerClass object
PlayerClass object representing player2.
dims : tuple
Dimensions (R,C) of our game board.
length : int
Minimum length of sequence required to win the game.
Returns
-------
None.
"""
#general vars
players = [player1, player2]
gridX = grid(dims[0],dims[1],pieces = [1,2], gravity = True)
#player and piece vars
firstPlr = gm.decideFirst(players)#Learning note: players can be changed by this method as they are refernces
p1Piece = firstPlr.getPieces()[0]
players.remove(firstPlr)
secondPlr = players[0]
p2Piece = secondPlr.getPieces()[0]
print("\nWelcome to Connect -",length,'!\n')
while gridX.isFull() == False and gm.hasSequence(gridX, length) == False:
gm.displayBoard(gridX)
print(str(firstPlr.getName()), "'s turn")
gm.playerTurn(gridX, p1Piece, rowMatters = False)
gm.displayBoard(gridX)
if gm.hasSequence(gridX, length):
break
print(str(secondPlr.getName()),"s turn")
gm.playerTurn(gridX, p2Piece, rowMatters = False)
gm.displayBoard(gridX)
if gm.hasSequence(gridX, length) == True:
winningPiece = gm.getSequenceOwner(gridX, length)
if winningPiece.getPieceNum() == p1Piece.getPieceNum():
print(str(firstPlr.getName()), " Wins!")
else:
print(str(secondPlr.getName()), " Wins!")
else:
print("Draw!")
player1 = gm.playerSetUp("O")
player2 = gm.playerSetUp("X")
PlayConnectX(player1, player2, (6,7), 4)
|
[
"[email protected]"
] | |
07e9212800cc725a94c4b26bd76f2998a46dc66c
|
ee71d6baed9dfd99e454aa49733990c8ca43ef63
|
/robust/receivers.py
|
aafe45fccc7f559a2651d4f181f4d21f90b4ceea
|
[] |
no_license
|
Cybernisk/django-robust
|
d72a1097ee3c3e62352d48d3a29eb40a306f02ea
|
595f3a7cd8a2fb0fd48a787660589bfd6c584e47
|
refs/heads/master
| 2020-12-11T16:35:39.331505 | 2018-09-16T15:34:36 | 2018-09-16T15:34:36 | 233,899,107 | 0 | 0 | null | 2020-01-14T17:39:20 | 2020-01-14T17:39:20 | null |
UTF-8
|
Python
| false | false | 1,744 |
py
|
from django.conf import settings
from django.core.signals import setting_changed
from django.db import models, connection
from django.dispatch import receiver
from django.utils import timezone
from .models import Task, RateLimitRun, TaskManager
from .signals import task_started
@receiver(signal=models.signals.pre_save, sender=Task)
def task_fields_defaults(instance, **kwargs):
"""
:type instance: Task
"""
if instance.payload is None:
instance.payload = {}
if instance.tags is None:
instance.tags = []
@receiver(signal=models.signals.post_save, sender=Task)
def create_log_record(instance, created, **kwargs):
"""
:type instance: Task
:type created: bool
"""
if getattr(settings, 'ROBUST_LOG_EVENTS', True):
instance.events.create(
status=instance.status,
eta=instance.eta,
created_at=instance.created_at if created else instance.updated_at
)
@receiver(signal=models.signals.post_save, sender=Task)
def notify_change(instance, **kwargs):
"""
:type instance: Task
"""
if instance.status in (Task.PENDING, Task.RETRY):
with connection.cursor() as cursor:
cursor.execute('NOTIFY robust')
@receiver(signal=task_started)
def update_ratelimit(tags, **kwargs):
"""
:type tags: list[str]
"""
if tags:
runtime = timezone.now()
RateLimitRun.objects.using('robust_ratelimit').bulk_create(
[RateLimitRun(tag=tag, created_at=runtime) for tag in tags]
)
@receiver(signal=setting_changed)
def reset_query_cache(setting, **kwargs):
"""
:type setting: str
"""
if setting == 'ROBUST_RATE_LIMIT':
TaskManager.reset_query_cache()
|
[
"[email protected]"
] | |
62ba4516cecfc475a1122352092cb23d07a9500a
|
f1e19808b558ec4ff7c296b40ba08f1f7300e048
|
/spotty/project_resources/stack.py
|
e91948662c95c9325d0eac30f2d3fa052f21fa61
|
[
"MIT"
] |
permissive
|
giserh/spotty
|
300a6ec470f98319819cabfda0982ef3e46618ca
|
e12547ae32e39cdffa9c0187787ea7ca404090dd
|
refs/heads/master
| 2020-04-11T18:57:54.218929 | 2018-12-14T00:18:06 | 2018-12-14T00:18:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 15,543 |
py
|
import os
import yaml
from botocore.exceptions import EndpointConnectionError
from cfn_tools import CfnYamlLoader, CfnYamlDumper
from spotty.helpers.resources import get_snapshot, is_gpu_instance, stack_exists, get_volume, get_ami
from spotty.helpers.spot_prices import get_current_spot_price
from spotty.project_resources.key_pair import KeyPairResource
from spotty.utils import data_dir
class StackResource(object):
def __init__(self, cf, project_name: str, region: str):
self._cf = cf
self._project_name = project_name
self._region = region
self._stack_name = 'spotty-instance-%s' % project_name
@property
def name(self):
return self._stack_name
def stack_exists(self):
return stack_exists(self._cf, self._stack_name)
def get_stack_info(self):
try:
res = self._cf.describe_stacks(StackName=self._stack_name)
except EndpointConnectionError:
res = {}
return res['Stacks'][0]
def prepare_template(self, ec2, availability_zone: str, subnet_id: str, instance_type: str, volumes: list,
ports: list, max_price, on_demand, docker_commands):
"""Prepares CloudFormation template to run a Spot Instance."""
# read and update CF template
with open(data_dir('run_container.yaml')) as f:
template = yaml.load(f, Loader=CfnYamlLoader)
# ending letters for the devices (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html)
device_letters = 'fghijklmnop'
# create and attach volumes
for i, volume in enumerate(volumes):
device_letter = device_letters[i]
volume_resources, volume_availability_zone = self._get_volume_resources(ec2, volume, device_letter)
# existing volume will be attached to the instance
if availability_zone and volume_availability_zone and (availability_zone != volume_availability_zone):
raise ValueError('The availability zone in the configuration file doesn\'t match the availability zone '
'of the existing volume or you have two existing volumes in different availability '
'zones.')
# update availability zone
if volume_availability_zone:
availability_zone = volume_availability_zone
# update template resources
template['Resources'].update(volume_resources)
# set availability zone
if availability_zone:
template['Resources']['SpotInstanceLaunchTemplate']['Properties']['LaunchTemplateData']['Placement'] = {
'AvailabilityZone': availability_zone,
}
# set subnet
if subnet_id:
template['Resources']['SpotInstanceLaunchTemplate']['Properties']['LaunchTemplateData']['NetworkInterfaces'] = [{
'SubnetId': subnet_id,
'DeviceIndex': 0,
'Groups': template['Resources']['SpotInstanceLaunchTemplate']['Properties']['LaunchTemplateData']['SecurityGroupIds'],
}]
del template['Resources']['SpotInstanceLaunchTemplate']['Properties']['LaunchTemplateData']['SecurityGroupIds']
# make sure that the lambda to update log group retention was called after
# the log group was created
template['Resources']['RenameSnapshotFunctionRetention']['DependsOn'] = [
resource_name for resource_name, resource in template['Resources'].items()
if resource['Type'] == 'Custom::SnapshotRenaming'
]
# delete calls of the SetLogsRetentionFunction lambda
if not template['Resources']['RenameSnapshotFunctionRetention']['DependsOn']:
del template['Resources']['RenameSnapshotFunctionRetention']
# make sure that the lambda to update log group retention was called after
# the log group was created
template['Resources']['DeleteSnapshotFunctionRetention']['DependsOn'] = [
resource_name for resource_name, resource in template['Resources'].items()
if resource['Type'] == 'Custom::SnapshotDeletion'
]
# delete calls of the SetLogsRetentionFunction lambda
if not template['Resources']['DeleteSnapshotFunctionRetention']['DependsOn']:
del template['Resources']['DeleteSnapshotFunctionRetention']
# TerminateInstanceFunction lambda should depend on all volume attachments
template['Resources']['TerminateInstance']['DependsOn'] = [
resource_name for resource_name, resource in template['Resources'].items()
if resource['Type'] == 'AWS::EC2::VolumeAttachment'
]
# add ports to the security group
for port in set(ports):
if port != 22:
template['Resources']['InstanceSecurityGroup']['Properties']['SecurityGroupIngress'] += [{
'CidrIp': '0.0.0.0/0',
'IpProtocol': 'tcp',
'FromPort': port,
'ToPort': port,
}, {
'CidrIpv6': '::/0',
'IpProtocol': 'tcp',
'FromPort': port,
'ToPort': port,
}]
# run on-demand instance
if on_demand:
del template['Resources']['SpotInstanceLaunchTemplate']['Properties']['LaunchTemplateData']['InstanceMarketOptions']
if max_price and not on_demand:
# check the maximum price
current_price = get_current_spot_price(ec2, instance_type, availability_zone)
if current_price > max_price:
raise ValueError('Current price for the instance (%.04f) is higher than the maximum price in the '
'configuration file (%.04f).' % (current_price, max_price))
# set maximum price
template['Resources']['SpotInstanceLaunchTemplate']['Properties']['LaunchTemplateData'] \
['InstanceMarketOptions']['SpotOptions']['MaxPrice'] = max_price
# set initial docker commands
if docker_commands:
template['Resources']['SpotInstanceLaunchTemplate']['Metadata']['AWS::CloudFormation::Init'] \
['docker_container_config']['files']['/tmp/docker/docker_commands.sh']['content'] = docker_commands
return yaml.dump(template, Dumper=CfnYamlDumper)
def create_stack(self, ec2, template: str, instance_profile_arn: str, instance_type: str, ami_name: str,
root_volume_size: int, mount_dirs: list, bucket_name: str, remote_project_dir: str,
project_name: str, project_dir: str, docker_config: dict):
"""Runs CloudFormation template."""
# get default VPC ID
res = ec2.describe_vpcs(Filters=[{'Name': 'isDefault', 'Values': ['true']}])
if not len(res['Vpcs']):
raise ValueError('Default VPC not found')
vpc_id = res['Vpcs'][0]['VpcId']
# get image info
ami_info = get_ami(ec2, ami_name)
if not ami_info:
raise ValueError('AMI with name "%s" not found.\n'
'Use "spotty create-ami" command to create an AMI with NVIDIA Docker.' % ami_name)
ami_id = ami_info['ImageId']
# check root volume size
image_volume_size = ami_info['BlockDeviceMappings'][0]['Ebs']['VolumeSize']
if root_volume_size and root_volume_size < image_volume_size:
raise ValueError('Root volume size cannot be less than the size of AMI (%dGB).' % image_volume_size)
elif not root_volume_size:
root_volume_size = image_volume_size + 5
# create key pair
project_key = KeyPairResource(ec2, self._project_name, self._region)
key_name = project_key.create_key()
# working directory for the Docker container
working_dir = docker_config['workingDir']
if not working_dir:
working_dir = remote_project_dir
# get the Dockerfile path and the build's context path
dockerfile_path = docker_config.get('file', '')
docker_context_path = ''
if dockerfile_path:
if not os.path.isfile(os.path.join(project_dir, dockerfile_path)):
raise ValueError('File "%s" doesn\'t exist.' % dockerfile_path)
dockerfile_path = remote_project_dir + '/' + dockerfile_path
docker_context_path = os.path.dirname(dockerfile_path)
# create stack
params = {
'VpcId': vpc_id,
'InstanceProfileArn': instance_profile_arn,
'InstanceType': instance_type,
'KeyName': key_name,
'ImageId': ami_id,
'RootVolumeSize': str(root_volume_size),
'VolumeMountDirectories': ('"%s"' % '" "'.join(mount_dirs)) if mount_dirs else '',
'DockerDataRootDirectory': docker_config['dataRoot'],
'DockerImage': docker_config.get('image', ''),
'DockerfilePath': dockerfile_path,
'DockerBuildContextPath': docker_context_path,
'DockerNvidiaRuntime': 'true' if is_gpu_instance(instance_type) else 'false',
'DockerWorkingDirectory': working_dir,
'InstanceNameTag': project_name,
'ProjectS3Bucket': bucket_name,
'ProjectDirectory': remote_project_dir,
}
res = self._cf.create_stack(
StackName=self._stack_name,
TemplateBody=template,
Parameters=[{'ParameterKey': key, 'ParameterValue': value} for key, value in params.items()],
Capabilities=['CAPABILITY_IAM'],
OnFailure='DO_NOTHING',
)
return res
def delete_stack(self):
self._cf.delete_stack(StackName=self._stack_name)
@staticmethod
def _get_volume_resources(ec2, volume: dict, device_letter: str):
resources = {}
availability_zone = ''
# VolumeAttachment resource
attachment_resource_name = 'VolumeAttachment' + device_letter.upper()
attachment_resource = {
'Type': 'AWS::EC2::VolumeAttachment',
'Properties': {
'Device': '/dev/sd' + device_letter,
'InstanceId': {'Ref': 'SpotInstance'},
},
}
volume_name = volume['name']
volume_size = volume['size']
deletion_policy = volume['deletionPolicy']
# check that the volume name is specified
if not volume_name and deletion_policy != 'delete':
raise ValueError('Volume name is required if the deletion policy isn\'t set to "delete".')
volume_info = get_volume(ec2, volume_name) if volume_name else {}
if volume_info:
# set availability zone
availability_zone = volume_info['AvailabilityZone']
# set volume ID for the VolumeAttachment resource
attachment_resource['Properties']['VolumeId'] = volume_info['VolumeId']
# check size of the volume
if volume_size and (volume_size != volume_info['Size']):
raise ValueError('Specified size for the "%s" volume (%dGB) doesn\'t match the size of the '
'existing volume (%dGB).' % (volume_name, volume_size, volume_info['Size']))
else:
# new volume will be created
volume_resource_name = 'Volume' + device_letter.upper()
volume_resource = {
'Type': 'AWS::EC2::Volume',
'Properties': {
'AvailabilityZone': {'Fn::GetAtt': ['SpotInstance', 'AvailabilityZone']},
},
}
# update VolumeAttachment resource with the reference to new volume
attachment_resource['Properties']['VolumeId'] = {'Ref': volume_resource_name}
# check if a snapshot with the specified name exists
snapshot_info = get_snapshot(ec2, volume_name) if volume_name else {}
if snapshot_info:
# volume will be restored from the snapshot
# check size of the volume
if volume_size and (volume_size < snapshot_info['VolumeSize']):
raise ValueError('Specified size for the "%s" volume (%dGB) is less than size of the '
'snapshot (%dGB).'
% (volume_name, volume_size, snapshot_info['VolumeSize']))
# set snapshot ID
orig_snapshot_id = snapshot_info['SnapshotId']
volume_resource['Properties']['SnapshotId'] = orig_snapshot_id
# rename or delete the original snapshot on stack deletion
if deletion_policy == 'create_snapshot':
# rename the original snapshot once new snapshot is created
s_renaming_resource_name = 'RenameSnapshot' + device_letter.upper()
resources[s_renaming_resource_name] = {
'Type': 'Custom::SnapshotRenaming',
'Properties': {
'ServiceToken': {'Fn::GetAtt': ['RenameSnapshotFunction', 'Arn']},
'SnapshotId': orig_snapshot_id,
},
}
volume_resource['DependsOn'] = s_renaming_resource_name
elif deletion_policy == 'update_snapshot':
# delete the original snapshot once new snapshot is created
s_deletion_resource_name = 'DeleteSnapshot' + device_letter.upper()
resources[s_deletion_resource_name] = {
'Type': 'Custom::SnapshotDeletion',
'Properties': {
'ServiceToken': {'Fn::GetAtt': ['DeleteSnapshotFunction', 'Arn']},
'SnapshotId': orig_snapshot_id,
},
}
volume_resource['DependsOn'] = s_deletion_resource_name
else:
# empty volume will be created, check that the size is specified
if not volume_size:
raise ValueError('Size for the new volume is required.')
# set size of the volume
if volume_size:
volume_resource['Properties']['Size'] = volume_size
# set the Name tag for new volume (it's the future snapshot name as well)
if volume_name:
volume_resource['Properties']['Tags'] = [{'Key': 'Name', 'Value': volume_name}]
if deletion_policy in ['create_snapshot', 'update_snapshot']:
# create snapshots on termination
volume_resource['DeletionPolicy'] = 'Snapshot'
elif deletion_policy == 'retain':
# retain the volume on termination
volume_resource['DeletionPolicy'] = 'Retain'
elif deletion_policy == 'delete':
# delete the volume on termination
volume_resource['DeletionPolicy'] = 'Delete'
else:
raise ValueError('Unsupported deletion policy: "%s".' % deletion_policy)
# update resources
resources[volume_resource_name] = volume_resource
# update resources
resources[attachment_resource_name] = attachment_resource
return resources, availability_zone
|
[
"[email protected]"
] | |
d48020e09346ac0479a76ae076694a0326303d2c
|
ae011f14f841c0acdc6a77a09badd76f6496f434
|
/AtCoder/ARC 049/connectivity.py
|
a714a7207a65ccc529ecb481bcf21b49fa6b9edf
|
[] |
no_license
|
ASHIJANKEN/CodingTest
|
d6d51f3ad8d2631483a48b605ba0b4c1958dbcf8
|
dfd0abbe4ea4090a9bef2495673523344be46c19
|
refs/heads/master
| 2022-11-07T07:29:47.462872 | 2020-06-27T14:04:34 | 2020-06-27T14:04:34 | 128,972,038 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,279 |
py
|
N, K, L = map(int, input().split())
roads = []
for i in range(K):
roads.append(list(map(int, input().split())))
trains = []
for i in range(L):
trains.append(list(map(int, input().split())))
parents_road = [i for i in range(N+1)]
rank_road = [0 for i in range(N+1)]
parents_train = [i for i in range(N+1)]
rank_train = [0 for i in range(N+1)]
def find(parents,v):
if parents[v] == v:
return v
else:
return find(parents, parents[v])
def unite(parents, rank, v1, v2):
v1 = find(parents, v1)
v2 = find(parents, v2)
if v1 == v2:
return
elif rank[v1] < rank[v2]:
parents[v1] = v2
else:
parents[v2] = v1
if rank[v1] == rank[v2]:
rank[v1] += 1
def is_connected(parents, v1, v2):
v1 = find(parents, v1)
v2 = find(parents, v2)
if v1 == v2:
return True
else:
return False
for road in roads:
unite(parents_road, rank_road, road[0], road[1])
for train in trains:
unite(parents_train, rank_train, train[0], train[1])
for i in range(N):
count = 1
for j in range(N):
if is_connected(parents_road, i, j) == True and is_connected(parents_train, i, j) == True:
count += 1
if i != N-1:
print("{} ".format(count), end="")
else:
print(count)
|
[
"[email protected]"
] | |
48248070eee31b5da804a1282adf2c9f311b079b
|
7b2e225055512eda6122a28a64c2a977f6fafb9e
|
/examples/get_active_courses.py
|
18a44362b60f6c552e7341ee9c6cf110c00ce5e9
|
[
"MIT"
] |
permissive
|
RmDr/Stepik-API
|
f42c3283e5301f0d940360f8f08695501ff2f903
|
1292857131d06b242942c1bdbc80d28f546164fc
|
refs/heads/master
| 2020-12-03T01:58:19.697499 | 2017-03-28T14:24:35 | 2017-03-28T14:24:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,803 |
py
|
# Script that produces:
# a) courses, started in the last six months, in which there are new lessons in the last month,
# b) courses created over the past six months, in which more than 10 students
#
# Issues (stdout + csv file):
# 0) the condition a) or b) or a) + b)
# 1) a reference to the course
# 2) the name of the course
# 3) name of the author of the course
# 4) the course author email
import csv
import json
import requests
import datetime
from dateutil import parser
def get_token():
# Get your keys at https://stepic.org/oauth2/applications/
# (client type = confidential, authorization grant type = client credentials)
client_id = '...'
client_secret = '...'
auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
resp = requests.post('https://stepik.org/oauth2/token/',
data={'grant_type': 'client_credentials'},
auth=auth)
token = json.loads(resp.text)['access_token']
return token
def get_data(pageNum):
api_url = 'https://stepik.org/api/courses?page={}'.format(pageNum)
course = json.loads(requests.get(api_url, headers={'Authorization': 'Bearer ' + get_token()}).text)
return course
limit_6m = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=31*6)
limit_1m = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=31)
def get_courses():
page = 1
while True:
api_url = 'https://stepik.org/api/courses?page={}'.format(page)
courses = json.loads(requests.get(api_url, headers={'Authorization': 'Bearer ' + get_token()}).text)['courses']
for course in courses:
if parser.parse(course['create_date']) < limit_6m:
return
# a) courses, started in the last six months, in which there are new lessons in the last month
a = ''
api_url = 'https://stepik.org/api/lessons?course={}&order=-id'.format(course['id'])
lessons = json.loads(requests.get(api_url, headers={'Authorization': 'Bearer ' + get_token()}).text)['lessons']
if lessons and parser.parse(lessons[0]['create_date']) > limit_1m:
a = 'A'
# b) courses created over the past six months, in which more than 10 students
b = ''
api_url = 'https://stepik.org/api/members?group={}'.format(course['learners_group'])
members = json.loads(requests.get(api_url, headers={'Authorization': 'Bearer ' + get_token()}).text)['members']
if len(members) > 10:
b = 'B'
# Issues a row
if a or b:
owner = course['owner']
api_url = 'https://stepik.org/api/users/{}'.format(owner)
user = json.loads(requests.get(api_url, headers={'Authorization': 'Bearer ' + get_token()}).text)['users'][0]
owner_name = user['first_name'] + ' ' + user['last_name']
api_url = 'https://stepik.org/api/email-addresses?user={}&is_primary=true'.format(owner)
owner_email = json.loads(requests.get(api_url, headers={'Authorization': 'Bearer ' + get_token()}).text)['email-addresses'][0]['email']
link = 'https://stepik.org/{}'.format(course['id'])
row = [a, b, link, course['title'], owner_name, owner_email]
yield row
page += 1
csv_file = open('get_active_courses-{}.csv'.format(datetime.date.today()), 'w')
csv_writer = csv.writer(csv_file)
header = ['A?', 'B?', 'Link', 'Title', 'Owner', 'OwnerEmail']
csv_writer.writerow(header)
print('\t'.join(header))
for row in get_courses():
csv_writer.writerow(row)
print('\t'.join(row))
csv_file.close()
|
[
"[email protected]"
] | |
630fdf34cffd61483b80bff67efb54fe38eda8f2
|
6e95d66027599e960af7f0a82dbc94f029f6ee69
|
/run.py
|
11e151c302e7a2c1d3a538b56cce4b191d768f45
|
[] |
no_license
|
martinmaguire89/flask
|
ddb63c4f33c1aa4ac38a94de75e0cc12840e84c2
|
592c45340805f6cf8920eeb1fb9c7c85106f395c
|
refs/heads/master
| 2023-05-11T04:17:50.033322 | 2020-07-14T13:27:02 | 2020-07-14T13:27:02 | 231,933,322 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,240 |
py
|
import os
import json
from flask import Flask, render_template, request, flash
app = Flask(__name__)
app.secret_key = 'some_secret'
@app.route('/')
def index():
return render_template("index.html")
@app.route('/about')
def about():
data = []
with open("data/company.json", "r") as json_data:
data = json.load(json_data)
return render_template("about.html", page_title="About", company=data)
@app.route('/about/<member_name>')
def about_member(member_name):
member = {}
with open("data/company.json", "r") as json_data:
data = json.load(json_data)
for obj in data:
if obj["url"] == member_name:
member = obj
return render_template("member.html", member=member)
@app.route('/contact', methods=["GET", "POST"])
def contact():
if request.method == "POST":
flash("Thanks{}, we have received your message!".format(request.form["name"]))
return render_template("contact.html", page_title="Contact")
@app.route('/careers')
def careers():
return render_template("careers.html", page_title="Careers")
if __name__ == '__main__':
app.run(host=os.environ.get('IP'),
port=int(os.environ.get('PORT')),
debug=True)
|
[
"[email protected]"
] | |
6bc9cfedf2ac61a6012b493b3f2430a79c3a77a4
|
b4fd7421a2796672276219f32085371f732b76c5
|
/untitled3.py
|
dd0b589b66e3d72061004840da1a941b6bccd669
|
[] |
no_license
|
sanjanapatill/CDR-Analysis-with-Insights
|
16e4d1ac29f44f3ee9897a928b790f3a5c904db6
|
16705dd3dea99d5962d2d04e5a65e2986fbf1999
|
refs/heads/main
| 2023-01-01T19:17:06.802197 | 2020-10-25T15:43:55 | 2020-10-25T15:43:55 | 307,131,212 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,414 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 25 19:34:38 2020
@author: Sanjana
"""
# https://pastebin.com/QbYwcrgh
"""
This is the initial stage of the CDR Analysis project.
We are going to learn about loading the CDR data and transform it for our
Data Analysis and visualization purpose for Business perspective.
"""
"""
column no - actual name
1 serviceProvider
4 group Information
5 direction
9 startTime
13 EndTime
14 Miss Call Information
31 Group ID
120 userId
180 twoStageDialingDigits
146 relatedCallId
147 relatedCallIdReason
267 vpDialingfacResult
312 locationType
345 userAgent
date
starttime
endtime
duration
hourly_range
weekly_range
"""
# Loading all your libraries
import pandas as pd
# Declaring your Global variables
# main function to load the data and transform it for further use
def main():
dataset_name = "cdr_data.csv"
# Required columns
call_columns = ["4", "5","14", "31", "120", "147", "267", "312", "345", \
"date","starttime", "endtime","duration", "hourly_range","weekly_range"]
# We have used low_memory = False as some columns contains mixed datatype
# header = None is used as dataset doesn't contain column name
call_dataset = pd.read_csv(dataset_name, usecols = call_columns,low_memory = False)
# coulmns for service data
service_columns = ["31", "120", "147", "345","date", "starttime", "endtime","duration"]
service_dataset = call_dataset[service_columns]
# columns for device data
device_columns = ["5", "31", "120", "312", "345", "date","starttime", "endtime","duration"]
device_dataset = call_dataset[device_columns]
# Output
# Renaming columns name according to the Client
call_dataset = call_dataset.rename(columns = {"4":"Group", "5":"Call_Direction","14":"Missed Calls",
"31":"GroupID", "120":"UserID", "147":"Features", "267":" vpDialingfacResult",
"312":"UsageDeviceType",
"345":"UserDeviceType"})
service_dataset = service_dataset.rename(columns={"120":"UserID",
"31":"GroupID", "147":"FeatureName",
"345":"UserDeviceType","date":"FeatureEventDate"
})
device_dataset = device_dataset.rename(columns={"5": "DeviceEventTypeDirection",
"120":"UserID", "31":"GroupID",
"345":"UserDeviceType","date":"DeviceEventDate",
"312":"UsageDeviceType"})
call_dataset.to_csv("Call_data.csv", index=None)
service_dataset.to_csv("Service_data.csv", index=None)
device_dataset.to_csv("Device_data.csv", index=None)
if (__name__ == '__main__'):
main()
|
[
"[email protected]"
] | |
0d7dd8e40973b221f489ff37dce90f802a587cf1
|
385d46cd1fc7df47814f68f450b8949df466131e
|
/easypolls.net.py
|
d26e7b8400005e076a4f88cfaa00ca4a75e277d2
|
[] |
no_license
|
ashaheedq/Voting-bot
|
e5c049b9e771a0ea5241dd41719fcb8016d6aefe
|
98ab208f1b7799108f82cf2ff341f6d26147807e
|
refs/heads/master
| 2022-12-12T07:44:17.698515 | 2020-09-10T08:11:18 | 2020-09-10T08:11:18 | 294,345,367 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,547 |
py
|
#!/usr/bin/python
url = 'http://www.easypolls.net/poll?jsoncallback=jQuery1102043759123584718296_1436987933237&command=getPoll&pollId=5ecad213e4b017b74559a5ce&isMobile=false&_=1436987933238'
import mechanize, http.cookiejar as cookielib, json, time
from random import randint
br = mechanize.Browser()
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
# Browser options
br.set_handle_equiv(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [ ('Host','www.easypolls.net'), ('User-Agent','Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:38.0) Gecko/20100101 Firefox/38.0'), ('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'), ('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'), ('Accept-Language','en-US,en;q=0.5'), ('Accept-Encoding','deflate') ]
count = 1
while True:
cj.clear()
res = br.open(url)
page = res.read()
search = 'pollKey":"'
pos = page.find(search) + len(search)
pkey = page[pos:pos+7]
submit = 'http://www.easypolls.net/poll?jsoncallback=jQuery1102048485376518992906_143698468448%s&multResp2=2&pollId=5ecad213e4b017b74559a5ce&pollKey=%s&command=saveResponse&_=1590362878722' % (str(randint(1,9)),pkey)
res = br.open(submit)
page = res.read()
page = page[ page.find('{') : page.rfind('}')+1 ]
data = json.loads(page)
if data['resultCode'] == '0':
print ('Successful:', count)
count += 1
else:
print ('Unsuccessful')
#time.sleep(2)
|
[
"[email protected]"
] | |
3b59eeb39cfdbba61656424e03607f2a49000569
|
de116cb6ff5f4793bf01df128c8add41c552d3e5
|
/CodeWars/camelCase/textToCamelCase.py
|
d5eabc6dae1298a265705796a8cd6159e251c627
|
[] |
no_license
|
joshplateros/Practice-Questions
|
ba6c668552782b337f0957bd489b5fd833560323
|
a065c85d397934d1735279f0121a6fdd6d0e4437
|
refs/heads/master
| 2021-07-09T17:52:18.631855 | 2020-10-21T23:39:03 | 2020-10-21T23:39:03 | 205,966,725 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,039 |
py
|
# www.codewars.com/kata/517abf86da9663f1d2000003/train/python
import re
def to_camel_case(text):
splitted = []
result = text.find("-")
# completedWord = []
completedWord = ""
if (len(text) == 0):
return ''
splitted = re.split('[- _]', text)
checkList = []
# Go over each word and each letter
for i in range(0, len(splitted)):
# i is every word, [0] is the
# Check for Upper Camel Case
checkList.append(list(splitted[i]))
if (i == 0):
if (checkList[i][0].isupper() == True):
checkList[i][0] = checkList[i][0].upper()
# Fix it here
else:
if (checkList[i][0].isupper() == False):
checkList[i][0] = checkList[i][0].upper()
# Convert back to string
checkList[i] = ''.join(checkList[i])
print (checkList)
print ("YUP: ", completedWord.join(checkList))
return completedWord.join(checkList)
to_camel_case("the_stealth_warrior")
|
[
"[email protected]"
] | |
8045ae8341fd3cac151ea395a4d62f7e5a92a745
|
cc8336cf7b733c6675a3dd4871f9badb3656e3e8
|
/bookmark/models.py
|
6551d2f45651be17bba0fc8b16da27a400209123
|
[] |
no_license
|
ssh521/bookmark
|
befd52158ebd9a5f1ac83bbddb810db4a8b11b18
|
a46f4f9585f1ba93baa23d44995d6ad052ba0506
|
refs/heads/master
| 2021-11-23T08:11:14.435300 | 2021-10-31T13:48:15 | 2021-10-31T13:48:15 | 214,645,894 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 382 |
py
|
from django.db import models
from django.urls import reverse
class Bookmark (models.Model):
site_name = models.CharField(max_length=100)
url = models.URLField('Site URL')
def __str__(self):
#객체를 출력할 때 나타날 값
return self.site_name + " " + self.url
def get_absolute_url(self):
return reverse('detail', args=[self.id])
|
[
"[email protected]"
] | |
fff70a531184f845afd5d314ce82c007a8d99ba6
|
882ce52c80c01e69cf00476320351140a5ab055a
|
/gen.py
|
87b0037936b2fbc9cffd33b58b4a859d3fc008f6
|
[] |
no_license
|
adiultra/CollatzRsrch
|
37591528bb359622ff782c68c4e626c42f4f4c85
|
619203ee03bf1aa1cf28dd36cb0ab613d2756895
|
refs/heads/master
| 2021-01-13T03:14:46.279527 | 2016-12-29T11:04:12 | 2016-12-29T11:04:12 | 77,605,809 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 311 |
py
|
large = [1,0]
def fn(x):
j =x
global large
n = 0
while x!= 1:
if x %2 == 0:
x = x/2
n += 1
else:
x = (3*x+1)/2
n+=2
if large[1]<n:
large = [j,n]
print(large)
return n
for i in range(1,10000000):
fn(i)
|
[
"[email protected]"
] | |
d8985677a59029b17e03e42069b38812e14ecf8d
|
743d58c35caf21568feddc86946bbee340174721
|
/leet_code/labuladong/stack/p0739_Daily_Temperatures.py
|
0d1b05a124037d6c13f214f35b0a1ee5f41b145f
|
[] |
no_license
|
klgentle/lc_python
|
38009ed82614c8f21ca9af6e3779a2e0898af09f
|
aabe56e690b40e4b93afef99bfe46d9a06e20cea
|
refs/heads/master
| 2022-12-02T05:23:55.585659 | 2022-08-07T12:11:38 | 2022-08-07T12:11:38 | 176,750,473 | 2 | 0 | null | 2022-11-15T23:42:06 | 2019-03-20T14:21:51 |
Python
|
UTF-8
|
Python
| false | false | 926 |
py
|
"""
739. Daily Temperatures
Medium
Given a list of daily temperatures T, return a list such that, for each day in the input, tells you how many days you would have to wait until a warmer temperature. If there is no future day for which this is possible, put 0 instead.
For example, given the list of temperatures T = [73, 74, 75, 71, 69, 72, 76, 73], your output should be [1, 1, 4, 2, 1, 1, 0, 0].
Note: The length of temperatures will be in the range [1, 30000]. Each temperature will be an integer in the range [30, 100].
"""
class Solution:
def dailyTemperatures(self, T: List[int]) -> List[int]:
res = [0] * len(T)
stack = []
for i in range(len(T)-1, -1, -1):
while stack and T[stack[-1]] <= T[i]:
stack.pop()
if stack:
res[i] = stack[-1] - i
stack.append(i)
#print(f"stack:{stack}")
return res
|
[
"[email protected]"
] | |
430b886607c68f95ee1443b58e22c10b06ca0c36
|
b2135e3fc77666f043f0fbafd0d88ed9865d5b4f
|
/7183 Python Basics/32 Chapter 6 - About Properties/07 test_validation3/78794_01_code.py
|
2f8efd74a7afa4db194872d5c8b652ef492fbd27
|
[] |
no_license
|
Felienne/spea
|
164d05e9fbba82c7b7df8d00295f7157054f9248
|
ecb06c66aaf6a2dced3f141ca415be9efb7dbff5
|
refs/heads/master
| 2020-03-17T17:35:27.302219 | 2018-05-17T10:14:49 | 2018-05-17T10:14:49 | 133,794,299 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 568 |
py
|
#
class AboutAssignments(unittest.TestCase):
class Doctor:
def __init__(self):
self._age = 903
@property
def age(self):
return self._age
@age.setter
def age(self, value):
if value < self.age:
pass
# nice try! you can't get any younger
else:
self._age = value
def test_validation3(self):
jodie = self.Doctor()
self.assertEqual(903, jodie.age)
jodie.age += 9
self.assertEqual(__, jodie.age)
|
[
"[email protected]"
] | |
5a4251faa3640ee14864c166bae3382666f4e194
|
376ba4a0a531f67d601b5f31794321313f09df62
|
/data_structures/heap/heap.py
|
45c86c5768d608613ea4446bd047615a99534f88
|
[
"MIT"
] |
permissive
|
scottwillmoore/algorithms
|
d967f9cb8a6d42da077840cd3d8546f7e442eb71
|
16f859f8a76e7522249fdd970bb2c0d7bbb9f880
|
refs/heads/master
| 2023-08-02T22:39:33.461294 | 2021-09-16T10:46:14 | 2021-09-16T10:47:35 | 389,867,280 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 997 |
py
|
from __future__ import annotations
from collections.abc import Collection
from typing import Callable, Protocol, TypeVar
Element = TypeVar("Element")
Comparator = Callable[[Element, Element], bool]
MIN_COMPARATOR: Comparator = lambda x, y: x < y
MAX_COMPARATOR: Comparator = lambda x, y: x < y
class Node(Protocol[Element]):
element: Element
# FEATURE: At the moment you cannot define associated types in a Protocol...
# https://github.com/python/typing/issues/548
# https://github.com/python/mypy/issues/7790
class Heap(Collection[Element], Protocol[Element]):
comparator: Comparator[Element]
def decrease_node(self, node: Node[Element]) -> None:
pass
def delete_node(self, node: Node[Element]) -> None:
pass
def merge(self, heap: Heap[Element]) -> None:
pass
def peek_node(self) -> Node[Element]:
pass
def pop_node(self) -> Node[Element]:
pass
def push_node(self, node: Node[Element]) -> None:
pass
|
[
"[email protected]"
] | |
66401cd91bc96aaedbc392e2073c9fd118eade0e
|
b095339e88576cf0e9992e49d86ee72c4d9c4be0
|
/ProjAlgo/Route.py
|
1bf4bd1581beaa044dd9031c0486e23a69340544
|
[] |
no_license
|
NanoClem/Graph_Langage
|
48101296157ef01794c3b7e960536d18e6dc4c8a
|
b51328983da73042f31a24aff6798a10e9ab27b7
|
refs/heads/master
| 2020-04-08T12:16:44.517959 | 2019-03-08T12:55:10 | 2019-03-08T12:55:10 | 159,340,501 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,673 |
py
|
"""
creator : decoopmc
"""
import time
from datetime import datetime
from Arc import Arc
class Route :
"""
Cette classe represente le trajet complet d'un bus, aussi appele route \n
La route empruntee correspond aux differents chemins entre les arrets
compris dans le trajet du bus (arcs)
"""
def __init__(self, new_bus) :
"""
CONSTUCTEUR de route
param new_bus: bus dont la route est a construire
ATTRIBUTE unWays : liste des chemins non ponderes de la route
attribute ways: liste des chemins ponderes (arcs) de la route
"""
self.bus = new_bus
self.unWays = []
self.ways = []
def getBus(self) :
"""
Retourne le bus concerne par le trajet
return : bus effectuant cette route
"""
return self.bus
def getUnWeightRoute(self) :
"""
Retourne la route du bus
return : liste des arcs empruntes par le bus
"""
return self.unWays
def getWeightRoute(self) :
"""
Retourne la route du bus
return : liste des arcs empruntes par le bus, avec prise en compte des horaires
"""
return self.ways
def isWeekEnd(self) :
"""
Renvoie vrai si l'on est en week-end, faux sinon
RETURN TYPE : boolean
"""
days = ["Lundi", "Mardi", "Mercredi", "Jeudi", "Vendredi", "Samedi", "Dimanche"]
if days[time.localtime()[6]] in days[5:7] : #si aujourd'hui est dans le week-end
return True
return False
def nextHour(self, hours, idCurrentH) :
"""
Recupere la prochaine heure de passage du bus
par rapport a l'heure passee en parametre
PARAM hours : liste des horaires du bus, list[] time
PARAM idCurrentH : index de l'heure dont on veut connaître la suivante
"""
ret = None
hours = list(filter(None.__ne__, hours)) # DEUXIEME SECURITE SUR LES HEURES NON DISPONIBLES
if hours[idCurrentH] == hours[-1] : # si l'heure courrante est la derniere de la journee
ret = hours[0] # on renvoie la premiere heure disponible du lendemain
else : # sinon
ret = hours[idCurrentH + 1] # l'heure suivante est trouvee
return ret
def toMinutes(self, myTime) :
"""
Calcule les minutes totales a partir
d'un type time
PARAM myTime : variable de type time
RETURN : nombre de minutes totales
"""
minutesTime = myTime.hour*60 + myTime.minute
return minutesTime
def calcWeight(self, sts1, sts2, dateNow, isGo = True) :
"""
Calcule le poids en minutes d'un arc liant deux arrets
Il est a remarquer que cette methode calcule le temps de trajet
entre deux arrets quels qu'ils soient
INPUT :
PARAM sts1, sts2 : arrets entre lesquels on veut calculer le poids de l'arc les liant
PARAM dateNow : heure actuelle
PARAM isGo : Vrai si on est en sens aller, faux si on est en sens retour
OUTPUT :
RETURN : poids de l'arc en minutes entre les deux arrets en parametre
"""
#RECUPERATION DES HORAIRES : week-end ou semaine
schedules = self.bus.getSchedules().getWeHolidaysDate()
if not self.isWeekEnd() :
schedules = self.bus.getSchedules().getRegularDate() # horaires sens aller pour les tests
#TEST ALLER OU RETOUR
sch = self.bus.getSchedules().toDatetime(schedules[1]) # conversion des values en datetime
if isGo :
sch = self.bus.getSchedules().toDatetime(schedules[0])
sts = [sts1.getName(), sts2.getName()] # nom des arret dont on veut determiner
weight = 0 # poids de l'arc en minutes
goodHour = [] # horaires correspondant au prochain passage
for s in sts :
sch[s] = list(filter(None.__ne__, sch[s])) # ON ENLEVE LES HEURES OU LE BUS NE PASSE PAS
for t in sch[s] :
if self.toMinutes(t) < self.toMinutes(dateNow) : # si l'heure proposee est depassee
goodHour.append(self.nextHour(sch[s], sch[s].index(t))) # prochaine heure disponible de passage du bus
break
else :
goodHour.append(t)
break
weight = self.toMinutes(goodHour[0]) - self.toMinutes(goodHour[1]) # temps en minutes entre le trajet de deux arrets
return weight
def buildUnWeightRoute(self) :
"""
Construit la route effectuee par le bus
Cette methode ne prends pas en compte les poids des arcs
"""
stsBus = self.bus.getStations() # liste des arrets du bus
for i in range(len(stsBus)-1) :
new_arc = Arc(stsBus[i], stsBus[i+1]) # nouveau chemin entre deux arrets
self.unWays.append(new_arc) # ajout du chemin au trajet du bus
#NE PAS OUBLIER DE PRENDRE EN COMPTE LE SENS POUR LES HORAIRES ALLER OU RETOUR
def buildWeightRoute(self, isGo = True) :
"""
Construit la route effectuee par le bus
Cette methode prends en compte les horaires des bus,
et donc les poids des arcs
PARAM isGo : sens aller ou retour
"""
stsBus = self.bus.getStations() # liste des arrets du bus
timeNow = datetime.now().time() # heure actuelle
for i in range(len(stsBus)-1) :
weight = self.calcWeight(stsBus[i], stsBus[i+1], timeNow) # poids du chemin en minutes
new_arc = Arc(stsBus[i], stsBus[i+1], weight) # nouveau chemin entre deux arrets
self.ways.append(new_arc) # ajout du chemin au trajet du bus
def printRoute(self) :
"""
Affiche la route du bus
"""
#ARRETS SUR LE TRAJET
print("ROUTE LIGNE " + str(self.bus.getNum()))
for i in range(len(self.ways)) :
print(self.ways[i])
#TERMINUS
print("TERMINUS : " + self.bus.getTerminus().getName())
print('\n')
def __eq__(self, route) :
"""
Operateur d'egalite pour
la classe Route
"""
return self.bus.getNum() == route.getBus().getNum()
|
[
"[email protected]"
] | |
c5ae4257150c3e1e1fdd2e16e204f478f52129fc
|
6cdd24734609c6e4fd3aa5a7a4862d8af80daa0e
|
/djangorestframework_extensions/models.py
|
7ee9bd0c1ea2355e04915559c875db851c4ba43d
|
[
"BSD-2-Clause"
] |
permissive
|
MechanisM/djangorestframework_extensions
|
e8a79725b99d2439642b4c61c48c97285f8f3e8d
|
937639218ff385d3ca7f921bd14d8bfd485f2e54
|
refs/heads/master
| 2020-04-08T13:51:06.957932 | 2012-07-10T17:05:13 | 2012-07-10T17:05:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 918 |
py
|
from django.dispatch import receiver
from django.db.models.signals import post_syncdb
from django.contrib.contenttypes.management import update_contenttypes
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
@receiver(post_syncdb)
def ensure_view_permission(app, created_models, **kwargs):
update_contenttypes(app, created_models, **kwargs) # This is normally called by post_syncdb, but we cannot guarantee ordering so we call it here
for m in created_models:
content_type = ContentType.objects.get_for_model(m)
meta = m._meta
obj_name = meta.object_name.lower()
perm, created = Permission.objects.get_or_create(
name="Can view %s" % obj_name,
content_type=content_type,
codename="view_%s" % obj_name)
if created:
print "Added view_%s permission" % obj_name
|
[
"[email protected]"
] | |
107188cdde394f7226c7ddb3f1de922a7ce8517e
|
e4acc426fabf4a9f346bf07293cf2c854a02e770
|
/web.py
|
c4dd19f189cc63e971f50d4b803aac8ce67e9fb0
|
[] |
no_license
|
bridgecrew-perf4/windows-jenkins-terraform
|
d6652da9a489f77179bf3a863d53b80555ad52a6
|
6089a413d4e6581a528bd1842a9c34b43e1549ec
|
refs/heads/master
| 2023-03-17T13:01:26.803918 | 2021-03-02T00:58:29 | 2021-03-02T00:58:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 210 |
py
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def main():
return render_template('index.html')
if __name__ == "__main__":
app.run(host="0.0.0.0", port=80, debug=True)
|
[
"[email protected]"
] | |
c35197f9f570cf811da57298c472a5e7507c805c
|
636a23ea51c1d3220e9b6b67404d99a12aa6e53b
|
/utils/permutation_loss.py
|
a3aefba3392de2ee17f24c3f4d8eb8e089a8d89d
|
[] |
no_license
|
alwc/GraphLineMatching
|
6249a6efd4ef98220a66aa82698dbbfba69b8f3d
|
4cbb42c491c5aef564b8e9f4010b0dc7a2d995c1
|
refs/heads/master
| 2022-07-15T23:59:58.090654 | 2020-05-18T12:19:49 | 2020-05-18T12:19:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,423 |
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class CrossEntropyLoss(nn.Module):
"""
Cross entropy loss between two permutations.
"""
def __init__(self):
super(CrossEntropyLoss, self).__init__()
def forward(self, pred_perm, gt_perm, pred_ns, gt_ns):
batch_num = pred_perm.shape[0]
#for b in range(batch_num):
# debug=pred_perm[b, (pred_ns[b]+1), (gt_ns[b]+1)]
# ff=0
pred_perm = pred_perm.to(dtype=torch.float32)
#print("bef min:{},max:{}".format(torch.min(pred_perm).item(), torch.max(pred_perm).item()))
if(not torch.all((pred_perm >= 0) * (pred_perm <= 1))):
if(torch.sum(torch.isnan(pred_perm)).item()==0):
pred_perm=pred_perm/torch.max(pred_perm)
assert torch.all((pred_perm >= 0) * (pred_perm <= 1))
assert torch.all((gt_perm >= 0) * (gt_perm <= 1))
loss = torch.tensor(0.).to(pred_perm.device)
n_sum = torch.zeros_like(loss)
for b in range(batch_num):
loss += F.binary_cross_entropy(
pred_perm[b, :(pred_ns[b] + 1), :(gt_ns[b] + 1)],
gt_perm[b, :(pred_ns[b] + 1), :(gt_ns[b] + 1)],
weight=gt_perm[b, :(pred_ns[b] + 1), :(gt_ns[b] + 1)],
reduction='sum')
n_sum += (pred_ns[b]+1).to(n_sum.dtype).to(pred_perm.device)
return loss / n_sum
|
[
"[email protected]"
] | |
19e0444bb8521b323469f3fcd68ba49b76ce7b7b
|
41e89ebedbb760382de217754d9d8ec00340b680
|
/Homework_beta/wsgi.py
|
c0f6c9120da5539d06fa711773c24aeb73f1183f
|
[] |
no_license
|
Dimagudilin/RIP_Homework
|
64c894200e7f80f45581407dd649219fd6fed8ea
|
8810b881e0c8bfe649129e38309d41cfe1ca356c
|
refs/heads/master
| 2020-09-25T08:16:03.064479 | 2019-12-19T20:54:54 | 2019-12-19T20:54:54 | 225,959,899 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 403 |
py
|
"""
WSGI config for Homework_beta project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Homework_beta.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
ffd932dbd780505eb4bef606f414e3d7a4c848cc
|
fa93e53a9eee6cb476b8998d62067fce2fbcea13
|
/build/position_controllers/catkin_generated/pkg.installspace.context.pc.py
|
23b00e2290c58c2e5784fc5a4572705354fb4fd1
|
[] |
no_license
|
oyetripathi/ROS_conclusion_project
|
2947ee2f575ddf05480dabc69cf8af3c2df53f73
|
01e71350437d57d8112b6cec298f89fc8291fb5f
|
refs/heads/master
| 2023-06-30T00:38:29.711137 | 2021-08-05T09:17:54 | 2021-08-05T09:17:54 | 392,716,311 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 518 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "controller_interface;forward_command_controller".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lposition_controllers".split(';') if "-lposition_controllers" != "" else []
PROJECT_NAME = "position_controllers"
PROJECT_SPACE_DIR = "/home/sandeepan/tiago_public_ws/install"
PROJECT_VERSION = "0.4.2"
|
[
"[email protected]"
] | |
f9b92cfbc1d8a521f0e1b94fab75fc5c6d9ce986
|
5f7a2753b88be27c995863067cdae62fef5f2d6a
|
/soft-attention/core/orisolver.py
|
610f31b14bd0c22a981baa5b68b13839bec28de6
|
[] |
no_license
|
16GMCN/COMS4995-Deep-Learning
|
118164f91760c17dbd38487268a4bca337be33dd
|
4332ff68a23d54f2e233c60039d0852a0e001ec7
|
refs/heads/master
| 2020-04-05T16:58:31.603167 | 2018-11-20T18:09:56 | 2018-11-20T18:09:56 | 157,037,933 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,563 |
py
|
import tensorflow as tf
import matplotlib.pyplot as plt
import skimage.transform
import numpy as np
import time
import os
import pickle
from scipy import ndimage
from core.utils import *
from core.bleu import evaluate
class CaptioningSolver(object):
def __init__(self, model, data, val_data, **kwargs):
"""
Required Arguments:
- model: Show Attend and Tell caption generating model
- data: Training data; dictionary with the following keys:
- features: Feature vectors of shape (82783, 196, 512)
- file_names: Image file names of shape (82783, )
- captions: Captions of shape (400000, 17)
- image_idxs: Indices for mapping caption to image of shape (400000, )
- word_to_idx: Mapping dictionary from word to index
- val_data: validation data; for print out BLEU scores for each epoch.
Optional Arguments:
- n_epochs: The number of epochs to run for training.
- batch_size: Mini batch size.
- update_rule: A string giving the name of an update rule
- learning_rate: Learning rate; default value is 0.01.
- print_every: Integer; training losses will be printed every print_every iterations.
- save_every: Integer; model variables will be saved every save_every epoch.
- pretrained_model: String; pretrained model path
- model_path: String; model path for saving
- test_model: String; model path for test
"""
self.model = model
self.data = data
self.val_data = val_data
self.n_epochs = kwargs.pop('n_epochs', 10)
self.batch_size = kwargs.pop('batch_size', 4)
self.update_rule = kwargs.pop('update_rule', 'adam')
self.learning_rate = kwargs.pop('learning_rate', 0.01)
self.print_bleu = kwargs.pop('print_bleu', False)
self.print_every = kwargs.pop('print_every', 100)
self.save_every = kwargs.pop('save_every', 1)
self.log_path = kwargs.pop('log_path', './log/')
self.model_path = kwargs.pop('model_path', './model/')
self.pretrained_model = kwargs.pop('pretrained_model', None)
self.test_model = kwargs.pop('test_model', './model/lstm/model-1')
# set an optimizer by update rule
if self.update_rule == 'adam':
self.optimizer = tf.train.AdamOptimizer
elif self.update_rule == 'momentum':
self.optimizer = tf.train.MomentumOptimizer
elif self.update_rule == 'rmsprop':
self.optimizer = tf.train.RMSPropOptimizer
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
def train(self):
# train/val dataset
n_examples = self.data['captions'].shape[0]
#n_examples = 5000
n_iters_per_epoch = int(np.ceil(float(n_examples)/self.batch_size))
features = self.data['features']
captions = self.data['captions']
image_idxs = self.data['image_idxs']
val_features = self.val_data['features']
n_iters_val = int(np.ceil(float(val_features.shape[0])/self.batch_size))
# build graphs for training model and sampling captions
loss = self.model.build_model()
with tf.variable_scope(tf.get_variable_scope()) as scope:
with tf.name_scope('optimizer'):
tf.get_variable_scope().reuse_variables()
_, _, generated_captions = self.model.build_sampler(max_len=20)
optimizer = self.optimizer(learning_rate=self.learning_rate)
grads = tf.gradients(loss, tf.trainable_variables())
grads_and_vars = list(zip(grads, tf.trainable_variables()))
train_op = optimizer.apply_gradients(grads_and_vars=grads_and_vars)
# summary op
tf.summary.scalar('batch_loss', loss)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
for grad, var in grads_and_vars:
tf.summary.histogram(var.op.name+'/gradient', grad)
summary_op = tf.summary.merge_all()
print "The number of epoch: %d" %self.n_epochs
print "Data size: %d" %n_examples
print "Batch size: %d" %self.batch_size
print "Iterations per epoch: %d" %n_iters_per_epoch
config = tf.ConfigProto(allow_soft_placement = True)
#config.gpu_options.per_process_gpu_memory_fraction=0.9
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
tf.initialize_all_variables().run()
summary_writer = tf.summary.FileWriter(self.log_path, graph=tf.get_default_graph())
saver = tf.train.Saver(max_to_keep=20)
if self.pretrained_model is not None:
print "Start training with pretrained Model."
saver.restore(sess, self.pretrained_model)
prev_loss = -1
curr_loss = 0
start_t = time.time()
for e in range(self.n_epochs):
rand_idxs = np.random.permutation(n_examples)
captions = captions[rand_idxs]
image_idxs = image_idxs[rand_idxs]
for i in range(n_iters_per_epoch):
captions_batch = captions[i*self.batch_size:(i+1)*self.batch_size]
image_idxs_batch = image_idxs[i*self.batch_size:(i+1)*self.batch_size]
print image_idxs_batch
features_batch = features[image_idxs_batch]
feed_dict = {self.model.features: features_batch, self.model.captions: captions_batch}
_, l = sess.run([train_op, loss], feed_dict)
curr_loss += l
# write summary for tensorboard visualization
if i % 10 == 0:
summary = sess.run(summary_op, feed_dict)
summary_writer.add_summary(summary, e*n_iters_per_epoch + i)
if (i+1) % self.print_every == 0:
print ("\nTrain loss at epoch %d & iteration %d (mini-batch): %.5f" %(e+1, i+1, l))
ground_truths = captions[image_idxs == image_idxs_batch[0]]
decoded = decode_captions(ground_truths, self.model.idx_to_word)
for j, gt in enumerate(decoded):
print ("Ground truth %d: %s" %(j+1, gt))
gen_caps = sess.run(generated_captions, feed_dict)
decoded = decode_captions(gen_caps, self.model.idx_to_word)
print ("Generated caption: %s\n" %decoded[0])
print ("Previous epoch loss: ", prev_loss)
print ("Current epoch loss: ", curr_loss)
print ("Elapsed time: ", time.time() - start_t)
prev_loss = curr_loss
curr_loss = 0
# print out BLEU scores and file write
if self.print_bleu:
all_gen_cap = np.ndarray((val_features.shape[0], 20))
for i in range(n_iters_val):
features_batch = val_features[i*self.batch_size:(i+1)*self.batch_size]
feed_dict = {self.model.features: features_batch}
gen_cap = sess.run(generated_captions, feed_dict=feed_dict)
all_gen_cap[i*self.batch_size:(i+1)*self.batch_size] = gen_cap
all_decoded = decode_captions(all_gen_cap, self.model.idx_to_word)
save_pickle(all_decoded, "./data/val/val.candidate.captions.pkl")
scores = evaluate(data_path='./data', split='val', get_scores=True)
write_bleu(scores=scores, path=self.model_path, epoch=e)
# save model's parameters
if (e+1) % self.save_every == 0:
saver.save(sess, os.path.join(self.model_path, 'model'), global_step=e+1)
print "model-%s saved." %(e+1)
def test(self, data, split='train', attention_visualization=True, save_sampled_captions=True):
'''
Args:
- data: dictionary with the following keys:
- features: Feature vectors of shape (5000, 196, 512)
- file_names: Image file names of shape (5000, )
- captions: Captions of shape (24210, 17)
- image_idxs: Indices for mapping caption to image of shape (24210, )
- features_to_captions: Mapping feature to captions (5000, 4~5)
- split: 'train', 'val' or 'test'
- attention_visualization: If True, visualize attention weights with images for each sampled word. (ipthon notebook)
- save_sampled_captions: If True, save sampled captions to pkl file for computing BLEU scores.
'''
features = data['features']
# build a graph to sample captions
alphas, betas, sampled_captions = self.model.build_sampler(max_len=20) # (N, max_len, L), (N, max_len)
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
saver = tf.train.Saver()
saver.restore(sess, self.test_model)
features_batch, image_files = sample_coco_minibatch(data, self.batch_size)
feed_dict = { self.model.features: features_batch }
alps, bts, sam_cap = sess.run([alphas, betas, sampled_captions], feed_dict) # (N, max_len, L), (N, max_len)
decoded = decode_captions(sam_cap, self.model.idx_to_word)
if attention_visualization:
for n in range(10):
print ("Sampled Caption: %s" %decoded[n])
# Plot original image
img = ndimage.imread(image_files[n])
plt.subplot(4, 5, 1)
plt.imshow(img)
plt.axis('off')
# Plot images with attention weights
words = decoded[n].split(" ")
for t in range(len(words)):
if t > 18:
break
plt.subplot(4, 5, t+2)
plt.text(0, 1, '%s(%.2f)'%(words[t], bts[n,t]) , color='black', backgroundcolor='white', fontsize=8)
plt.imshow(img)
alp_curr = alps[n,t,:].reshape(14,14)
alp_img = skimage.transform.pyramid_expand(alp_curr, upscale=16, sigma=20)
plt.imshow(alp_img, alpha=0.85)
plt.axis('off')
plt.show()
if save_sampled_captions:
all_sam_cap = np.ndarray((features.shape[0], 20))
num_iter = int(np.ceil(float(features.shape[0]) / self.batch_size))
for i in range(num_iter):
features_batch = features[i*self.batch_size:(i+1)*self.batch_size]
feed_dict = { self.model.features: features_batch }
all_sam_cap[i*self.batch_size:(i+1)*self.batch_size] = sess.run(sampled_captions, feed_dict)
all_decoded = decode_captions(all_sam_cap, self.model.idx_to_word)
save_pickle(all_decoded, "./data/%s/%s.candidate.captions.pkl" %(split,split))
|
[
"[email protected]"
] | |
717e92a6feda1f345899d0e6678b31ee0ff08998
|
5da11a9ee7f860b8713afebe838e75d749b295a3
|
/app/app/settings.py
|
4b0589119e6aa79b98f87a86958888bee66f3860
|
[
"MIT"
] |
permissive
|
abhishek-anand-78/recipe-app-api
|
390a9d852d8ca43f4de9585ba0fc8c204f992c8a
|
7fc5cf142cb94a76b2c79e04a5f021b6984fce1c
|
refs/heads/main
| 2023-07-15T09:29:02.360041 | 2021-08-31T06:06:06 | 2021-08-31T06:06:06 | 401,362,923 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,080 |
py
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x^th7pt3==_(iab^gw%6mtbjr2$yhwd8ir4dfgfy4a*x*(pzzx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
d2e05c3cfab86f28eff962c80f13b1284c221ed6
|
fee2a81dfa111038c1986e7797f371b1f791ef46
|
/src/tajo/rowdecoder.py
|
f1277b96d30d62d6078e0a726b010dc5cc2d873e
|
[
"Apache-2.0"
] |
permissive
|
charsyam/python-tajo-rest-api
|
863abe0f1b087ba52e5d34c7dcbc551f1dd991c1
|
739581acaa50f50162192cc395331a7175ee2b59
|
refs/heads/master
| 2021-01-22T17:47:35.153669 | 2015-07-28T23:51:43 | 2015-07-28T23:51:43 | 37,017,406 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,750 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import struct
import math
from tajo.schema import Schema
from tajo.datatypes import TajoDataTypes as ttype
class RowDecoder:
def __init__(self, schema):
self.schema = Schema(schema)
self.headerSize = int(math.ceil(float(len(self.schema.columns)) / 8))
def toTuples(self, serializedTuples):
results = []
for serializedTuple in serializedTuples:
results.append(self.toTuple(serializedTuple))
return tuple(results)
def toTuple(self, serializedTuple):
size = len(self.schema.columns)
nullFlags = serializedTuple[:self.headerSize]
bb = io.BytesIO(serializedTuple[self.headerSize:])
results = []
for i in range(size):
column = self.schema.columns[i]
results.append(self.convert(0, column, bb))
return tuple(results)
def convert(self, isNull, column, bb):
ftype = column.datatype
flen = column.length
if (isNull == 1):
return "NULL"
if ftype == ttype.INT1:
v = bb.read(1)
return struct.unpack("b", v)[0]
if ftype == ttype.INT2:
v = bb.read(2)
return struct.unpack(">h", v)[0]
if ftype == ttype.INT4 or ftype == ttype.DATE:
v = bb.read(4)
return struct.unpack(">i", v)[0]
if ftype == ttype.INT8 or ftype == ttype.TIME or ftype == ttype.TIMESTAMP:
v = bb.read(8)
return struct.unpack(">q", v)[0]
if ftype == ttype.FLOAT4:
v = bb.read(4)
return struct.unpack(">f", v)[0]
if ftype == ttype.FLOAT8:
v = bb.read(8)
return struct.unpack(">d", v)[0]
if ftype == ttype.CHAR:
return bb.read(flen)
if ftype == ttype.TEXT or ftype == ttype.BLOB:
l = bb.read(4)
l2 = struct.unpack(">i", l)[0]
v = bb.read(l2)
return v
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.