query_id
stringlengths
32
32
query
stringlengths
9
4.01k
positive_passages
listlengths
1
1
negative_passages
listlengths
88
101
da7a2df3f31b97024c5a6eeab2efd4f5
Validates an RGB hex string.
[ { "docid": "0872061ec045c3138ccc910b7eb9d2a7", "score": "0.8695411", "text": "def validate_hex_color(hex_str):\n match_str = r'#[0-9a-fA-F]{6}'\n if re.match(match_str, hex_str) is None:\n raise ValidationError(u'{} is not a valid RGB hex string.'.format(hex_str))", "title": "" } ]
[ { "docid": "cd2bbf914df6bd31966866bb868315f3", "score": "0.7722918", "text": "def is_hex_color(string):\n return bool(re.match(r'^(#[0-9a-fA-F]{3}|#[0-9a-fA-F]{6})$', string))", "title": "" }, { "docid": "7a76e6bb7522a826fdd5fb27b885a8d8", "score": "0.7716813", "text": "def test_invalidRGB_hex(self):\n assert utils.validHex(\"#444ABC\") and not utils.validRGB(\"#444ABC\")", "title": "" }, { "docid": "790ee25a477d17f80024323f09515a20", "score": "0.7655844", "text": "def valid_color_hex(color_code):\n return color_hex_re.match(color_code)", "title": "" }, { "docid": "27f3e516ecf23854d5fb32c3fdfda46b", "score": "0.7649849", "text": "def test_invalidHex_rgb(self):\n assert utils.validRGB(\n (38, 29, 103)) and not utils.validHex((38, 29, 103))", "title": "" }, { "docid": "05c7e6dd022377ddb18d25eacfc4956c", "score": "0.7329374", "text": "def validate_color(s):\n if s.startswith('#'):\n if len(s) > 7:\n return s[0:7]\n return s", "title": "" }, { "docid": "8fab0cc0cd9d569d724efd63c4900fff", "score": "0.7271975", "text": "def is_color_hex(color):\n if isinstance(color, basestring):\n match = re.search(r\"^#(?:[0-9a-fA-F]{3}){1,2}$\", color)\n if match:\n return True\n return False\n return False", "title": "" }, { "docid": "106b2e55578fdd432b3e65994f1ae160", "score": "0.7213711", "text": "def is_color(color_str):\n return re.match(\"#[0-9a-fA-F]{6}\", color_str) is not None", "title": "" }, { "docid": "54273c561c97da1831d4b0ba4e2f3828", "score": "0.7159889", "text": "def test_validHex(self):\n assert utils.validHex(\"#0066CC\") and utils.validHex(\"01A368\")", "title": "" }, { "docid": "234d8da9178001046c239c16d194f286", "score": "0.7097443", "text": "def is_hex(s):\n match = re.search(r\"^#(?:[0-9a-fA-F]{3}){1,2}$\", s)\n if match:\n return True\n else:\n return False", "title": "" }, { "docid": "625c665d76d1654e4d784b734e34a12f", "score": "0.7070549", "text": "def isAValidHex(e: str) -> str:\n if re.match(\"[^0-9a-fA-F]\", e):\n raise ValueError(f\"{e} is not a valid hex\")\n return e", "title": "" }, { "docid": "1c8962508293b67a5fb19a3df545e705", "score": "0.70598215", "text": "def test_invalidHex_format(self):\n assert not utils.validHex(\"#0066GG\")", "title": "" }, { "docid": "c6e38ddc0767cc51e278d2a7827d5ce8", "score": "0.69936156", "text": "def test_invalidHex_length(self):\n assert not utils.validHex(\"#0066C\") and not utils.validHex(\n \"#06CC\") and not utils.validHex(\"#00666CC\")", "title": "" }, { "docid": "6cde1e160de935bd1036cdb259dcb0be", "score": "0.69749147", "text": "def test_invalidRGB_format(self):\n assert not utils.validRGB((-1, 25, 70)) and not utils.validRGB((\n 400, 50, 82)) and not utils.validRGB((25, 'a', 93)) and not utils.validRGB((4.2, 0, 16))", "title": "" }, { "docid": "6412e28fccd9a05c86d088d8df96a4ca", "score": "0.6965132", "text": "def check_hex_to_rgb(term):\n def callback(data):\n global hex_to_rgb\n hex_to_rgb = data\n expected = 'rgba(170, 171, 33, 0.2)'\n color = '#aaab21'\n term.body.runJavaScript(PREFIX + \"hexToRGB('{}')\".format(color),\n callback)\n try:\n return hex_to_rgb == expected\n except NameError:\n return False", "title": "" }, { "docid": "2bbabf7be181657be866a96c59b2e85d", "score": "0.69519037", "text": "def validate_color (color):\n\tfor val in color:\n\t\tif val < 0 or val > 255:\n\t\t\treturn False\n\treturn True", "title": "" }, { "docid": "a6b8a8466534767d709486ebb0f8ce94", "score": "0.6893435", "text": "def test_hex_validation(hex_color: str) -> None:\n # make sure noting is raised when instantiating this class\n assert HexColor(__root__=hex_color)", "title": "" }, { "docid": "923e42442d27bff1e5c8fd23ae021aca", "score": "0.68675923", "text": "def test_handles_invalid_hex() -> None:\n with pytest.raises(ValidationError):\n HexColor(__root__=\"#123456789\")", "title": "" }, { "docid": "b30d337df6a131aae3505157203f5711", "score": "0.68136007", "text": "def validator_for_hex(value):\n if value and not re.match(r'^([0-9a-fA-F]+)$', value):\n raise ValidationError(\"Not an hexadecimal value %s\" % value)", "title": "" }, { "docid": "f34ea1932e883a4fdef8822a6507745a", "score": "0.6796929", "text": "def colour_validity_check(color: str, field: str) -> bool:\n # eye colour must be one of specified values\n if field == 'ecl' and color in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']:\n return True\n # hair colour must be valid hex code\n elif field == 'hcl' and re.match('#[a-f0-9]{6}', color):\n return True\n else:\n return False", "title": "" }, { "docid": "701fdef53fd8a6907798bd448f59bc59", "score": "0.6788764", "text": "def colorValidator(value):\n if not isinstance(value, str):\n return False\n parts = value.split(\",\")\n if len(parts) != 4:\n return False\n for part in parts:\n part = part.strip()\n converted = False\n try:\n part = int(part)\n converted = True\n except ValueError:\n pass\n if not converted:\n try:\n part = float(part)\n converted = True\n except ValueError:\n pass\n if not converted:\n return False\n if part < 0:\n return False\n if part > 1:\n return False\n return True", "title": "" }, { "docid": "9e48b721ac6464155063846d38c1e5be", "score": "0.6760745", "text": "def test_invalidRGB_length(self):\n assert not utils.validRGB(\n (42, 42)) and not utils.validRGB((42, 200, 100, 38))", "title": "" }, { "docid": "0326a1037e0e9bb6bd5b7b665697e1da", "score": "0.67358506", "text": "def validate_integer_color(val):\n if val < 0 or val > 255:\n raise ValidationError(u'{} is not a valid RGB color integer value.'.format(val))", "title": "" }, { "docid": "0934a5e287927b1393a109d76aaf63ed", "score": "0.65487856", "text": "def is_hex_string(s):\n return (isinstance(s, str) or isinstance(s, unicode)) \\\n and len(s) > 2 and s[:2] == '0x' and len(s) % 2 == 0", "title": "" }, { "docid": "29e07fbb8953590e03cb3c5688dc5254", "score": "0.64729124", "text": "def test_hex_to_rgb_conversion(hex_value):\n red, green, blue = utils.hex_to_rgb(hex_value)\n assert '#{:02X}{:02X}{:02X}'.format(red, green, blue).lower() == hex_value.lower()", "title": "" }, { "docid": "ba68b261ba292ce47fa508259e7946df", "score": "0.6397582", "text": "def is_hex_string(s: str) -> bool:\n hex_digits = set(string.hexdigits)\n # each byte is represented by 2 hex digits\n return len(s) % 2 == 0 and all(c in hex_digits for c in s)", "title": "" }, { "docid": "b0cc76844118e6e46ae2152683090dc9", "score": "0.6364707", "text": "def test_hex_to_rgb_error(hex_error_value):\n with pytest.raises(ValueError):\n utils.hex_to_rgb(hex_error_value)", "title": "" }, { "docid": "5b80fbee891c0227bfb37525df80f304", "score": "0.6360805", "text": "def test_parse_legacy_color_hex(self):\n test_values = (u'#000',\n u'#000000',\n u'#fff',\n u'#ffffff',\n u'#000080')\n for value in test_values:\n self.assertEqual(webcolors.hex_to_rgb(value),\n webcolors.html5_parse_legacy_color(value))", "title": "" }, { "docid": "af183f52b9de6e5b1d9e2254004f1f12", "score": "0.62787586", "text": "def hex_to_rgb(hex_str):\n return eval('(0x{0}{1}, 0x{2}{3}, 0x{4}{5})'.format(*hex_str.lstrip('#')))", "title": "" }, { "docid": "bbc32eff3fd936df9bd80f36b1871604", "score": "0.62777007", "text": "def is_color(color: str) -> str:\n # check if color is\n # 1) the default empty value\n # 2) auto\n # 3) a color name from the 16 color palette\n # 4) a color index from the 256 color palette\n # 5) an HTML-style color code\n if (color in ['', 'auto'] or\n color in COLORS.keys() or\n (color.isdigit() and int(color) >= 0 and int(color) <= 255) or\n (color.startswith('#') and (len(color) in [4, 7, 9]) and\n all(c in '01234567890abcdefABCDEF' for c in color[1:]))):\n return color\n raise VdtValueError(color)", "title": "" }, { "docid": "62316d4cec6f13580f43d2fa7c23312f", "score": "0.6270945", "text": "def stringToRGB(self, hexString):\n\t\tglobal last_color\n\t\t_hexStringLower = hexString.lower()\n\t\tprint(_hexStringLower)\n\t\ttry:\n\t\t r, g, b = int(_hexStringLower[:2],16), int(_hexStringLower[2:4],16), int(_hexStringLower[4:],16)\n\t\texcept:\n\t\t return last_color\n\t\treturn (r, g, b)", "title": "" }, { "docid": "7cd6c1e0423953343eb763a556732452", "score": "0.6263386", "text": "def test_parse_simple_color_error(self):\n test_values = (u'0099ccc',\n u'#09c',\n u'#0000',\n u'#0000000',\n u'#0000gg',\n u'#000000'.encode('ascii'))\n for value in test_values:\n self.assertRaises(ValueError,\n webcolors.html5_parse_simple_color,\n value)", "title": "" }, { "docid": "b0983dca418d5492774c154e43666302", "score": "0.62427336", "text": "def from_hex(cls, hexstr):\r\n assert (isinstance(hexstr, basestring) and\r\n hexstr.startswith('#') and\r\n len(hexstr) == 7\r\n ), \"need a 24-bit hexadecimal string, e.g. #000000\"\r\n\r\n RGB = hexstr[1:3], hexstr[3:5], hexstr[5:]\r\n return cls(*[int('0x'+cc, base=16) for cc in RGB])", "title": "" }, { "docid": "82d782c0829220a0fbf7ec6973cc7728", "score": "0.6181449", "text": "def _parse_hex_color(color):\n int_ = int\n match = _HEX_RE.match(color)\n if match is not None:\n hex_str = match.group(1)\n if len(hex_str) == 3:\n r = int_(hex_str[0], 16)\n r |= (r << 4)\n g = int_(hex_str[1], 16)\n g |= (g << 4)\n b = int_(hex_str[2], 16)\n b |= (b << 4)\n else:\n r = int_(hex_str[:2], 16)\n g = int_(hex_str[2:4], 16)\n b = int_(hex_str[4:6], 16)\n return Color(r, g, b, 255)\n match = _HEXA_RE.match(color)\n if match is not None:\n hex_str = match.group(1)\n if len(hex_str) == 4:\n r = int_(hex_str[0], 16)\n r |= (r << 4)\n g = int_(hex_str[1], 16)\n g |= (g << 4)\n b = int_(hex_str[2], 16)\n b |= (b << 4)\n a = int_(hex_str[3], 16)\n a |= (a << 4)\n else:\n r = int_(hex_str[:2], 16)\n g = int_(hex_str[2:4], 16)\n b = int_(hex_str[4:6], 16)\n a = int_(hex_str[6:8], 16)\n return Color(r, g, b, a)", "title": "" }, { "docid": "3ee3248d2f471c37640a4df27cbf0837", "score": "0.61207616", "text": "def is_hexadecimal(color):\n for ch in color:\n if ch == \"#\":\n continue\n if ch in hex_set:\n continue\n else:\n return False\n return True", "title": "" }, { "docid": "e81b6db3bf6872fb4330a22e91888b8c", "score": "0.60933924", "text": "def is_hex_string(value, length=12):\n if not length is None:\n if len(value) != length:\n return False\n\n for char in value:\n if not char in HEX_CHARS:\n return False\n return True", "title": "" }, { "docid": "74479897d424c933c930ea508f5cd004", "score": "0.60680485", "text": "def isHexEncodedString(subject):\n\n return re.match(r\"\\A[0-9a-fA-Fx]+\\Z\", subject) is not None", "title": "" }, { "docid": "df5209f9d088200b029f268edba8a2ef", "score": "0.6053338", "text": "def isrgb(t: tuple):\n return len(t) == 3 and all(v >= 0 and v <= 255 for v in t)", "title": "" }, { "docid": "42816d6f52c3451dc90fa62a58fe116c", "score": "0.60432947", "text": "def ishex(hexstr):\n return all(char in string.hexdigits for char in hexstr)", "title": "" }, { "docid": "039b72751f8dd583c8dd340812778d8f", "score": "0.602643", "text": "def check_hexsha(hex, error_msg):\r\n try:\r\n hex_to_sha(hex)\r\n except (TypeError, AssertionError):\r\n raise ObjectFormatException(\"%s %s\" % (error_msg, hex))", "title": "" }, { "docid": "af00c41db5c028e42d0e181d9735bbe8", "score": "0.60234416", "text": "def validate_hcl(self, hcl):\n if not self.valid:\n return\n if not re.fullmatch(\"^#[0-9a-fA-F]{6}\", hcl):\n self.mark_invalid(self, 'hcl')", "title": "" }, { "docid": "03255bf71aa14a9f77cbcce4f1f83ddd", "score": "0.598397", "text": "def parse_color(c: str) -> Tuple[int, int, int]:\n try:\n c = hex_to_rgb(c)\n (red, green, blue) = c.red, c.green, c.blue\n except ValueError as e:\n regex_rgb = r\"^rgb\\((\\d{1,3}),(\\d{1,3}),(\\d{1,3})\\)$\"\n _match = match(regex_rgb, c)\n if _match is not None:\n red = int(_match.groups()[0])\n green = int(_match.groups()[1])\n blue = int(_match.groups()[2])\n if 0 > red or 0 > green or 0 > blue or 255 < red or 255 < green or 255 < blue:\n raise ValueError()\n else:\n raise ValueError()\n return red, green, blue", "title": "" }, { "docid": "7749585a86f7fd8bbc9200b4c75be527", "score": "0.5982583", "text": "def is_valid(cls, color):\n # [R, G, B] or [R, G, B, A]\n if (\n isinstance(color, (list, tuple))\n and all([issubclass(type(v), int) for v in color])\n and (3 <= len(color) <= 4)\n ):\n return all(0 <= v <= 255 for v in color)\n # [r, g, b] or [r, g, b, a] (float)\n elif (\n isinstance(color, (list, tuple))\n and all([issubclass(type(v), float) for v in color])\n and (3 <= len(color) <= 4)\n ):\n return all(0 <= v <= 1 for v in color)\n # Hexadecimal RGBA\n elif issubclass(type(color), int):\n return 0 <= color <= 0xFFFFFFFF\n # RGBA string\n elif isinstance(color, str):\n try:\n n = int(color.rsplit(\"#\")[-1], 16)\n return 0 <= n <= 0xFFFFFFFF\n except ValueError:\n return False\n return False", "title": "" }, { "docid": "b790cf244f355ed8024b5d171e544ad3", "score": "0.59657294", "text": "def test_parse_legacy_color_error(self):\n test_values = (u'#000000'.encode('ascii'),\n u\"transparent\",\n u'')\n for value in test_values:\n self.assertRaises(ValueError,\n webcolors.html5_parse_legacy_color,\n value)", "title": "" }, { "docid": "31eec2587aed9afdf92cd1f3fc1bb689", "score": "0.5929919", "text": "def _assert_is_valid_rgb(self, rgb: Sequence[float]) -> None:\n all_float = all([isinstance(val, (float, np.floating)) for val in rgb])\n all_between_0_1 = all([(val >= 0.0 and val <= 1.0) for val in rgb])\n all_int = all([isinstance(val, (int, np.integer)) for val in rgb])\n all_between_0_255 = all([(val >= 0 and val <= 255) for val in rgb])\n\n if not (\n (all_float and all_between_0_1) or (all_int and all_between_0_255)\n ):\n raise StimulationDeviceError(\n \"Invalid RGB specification. Please use tuples of float \"\n + \"(>=0.|<=1.) or int (>=0|<=255).\"\n )", "title": "" }, { "docid": "864f7b9049f76d499cb67eb1296dd244", "score": "0.58862144", "text": "def is_color_rgb(color):\n if isinstance(color, (tuple, list)):\n if len(color) == 3:\n if all(isinstance(c, float) for c in color):\n if all(c >= 0.0 and c <= 1.0 for c in color):\n return True\n elif all(isinstance(c, int) for c in color):\n if all(c >= 0 and c <= 255 for c in color):\n return True\n return False", "title": "" }, { "docid": "5e7719a4fac0a49c10c7c53697cd2cfa", "score": "0.5845025", "text": "def isValidArgument(s):\n return '\\r' not in s and '\\n' not in s and '\\x00' not in s", "title": "" }, { "docid": "58bd993c1c890796f8ca546ef43ba508", "score": "0.5841136", "text": "def HTMLColorToRGB(self,colorstring):\n colorstring = colorstring.strip()\n if colorstring[0] == '#': colorstring = colorstring[1:]\n if len(colorstring) != 6:\n raise ValueError, \"input #%s is not in #RRGGBB format\" % colorstring\n r, g, b = colorstring[:2], colorstring[2:4], colorstring[4:]\n r, g, b = [int(n, 16) for n in (r, g, b)]\n return (r, g, b)", "title": "" }, { "docid": "08c4ab6d39c4230f1a50fc3ad4e4fe47", "score": "0.5821459", "text": "def HTMLColorToRGB(colorstring):\n colorstring = colorstring.strip()\n if colorstring[0] == '#': colorstring = colorstring[1:]\n if len(colorstring) != 6:\n raise ValueError(\"input #%s is not in #RRGGBB format\" % colorstring)\n r, g, b = colorstring[:2], colorstring[2:4], colorstring[4:]\n r, g, b = [int(n, 16) for n in (r, g, b)]\n return (r, g, b)", "title": "" }, { "docid": "26c7a7031b8559ad6e107ea260d1fa8a", "score": "0.58167624", "text": "def colorCheck(fc_bgcolor,fcbg):\n if isinstance(fc_bgcolor,str):\n if fc_bgcolor.startswith(\"#\"):\n fc_bgcolor = QColor(fc_bgcolor)\n elif fc_bgcolor.isdigit():\n \"\"\" color is int a map from int to r,g,b triplets from pickled color map file \"\"\"\n tc = int(fc_bgcolor)\n tc = tc*2\n if tc < len(colorMap):\n pickledColor = colorMap[tc]\n else:\n pickledColor = (255, 0, 0)\n fc_bgcolor = QColor(*pickledColor)\n\n elif fc_bgcolor.isalpha() or fc_bgcolor.isalnum():\n fc_bgcolor = validColorcheck(fc_bgcolor)\n else:\n fc_bgcolor = QColor(*eval(fc_bgcolor))\n # fc_bgcolor = validColorcheck(fc_bgcolor)\n return(fc_bgcolor)", "title": "" }, { "docid": "fbea0b3feede34f352a0d84075c8493b", "score": "0.57997787", "text": "def _validatePlotColor(plot_color):\n if not is_color_like(plot_color):\n raise ValueError(\"Invalid color: \", plot_color)\n else:\n return True", "title": "" }, { "docid": "e4dd48a43135e7f2f2d12056dd9d4abb", "score": "0.5793591", "text": "def color(self, value: str):\r\n if not COLOR_HEX_MATCHER.match(value):\r\n raise ValueError(\r\n \"value was not a properly formatted color hex, i.e. #000000.\"\r\n )\r\n self.update_attribute_value(Attribute.color, value)", "title": "" }, { "docid": "18d48323126378243afcc7ea45671343", "score": "0.5793291", "text": "def color_from_string(cstr):\n\n return gxapi.GXMVIEW.color(str(cstr))", "title": "" }, { "docid": "73b7cd99dd149794aed7d8bf3711b844", "score": "0.5788715", "text": "def hex_verify(contentin):\n try:\n int(contentin, 16)\n except ValueError:\n return False\n else:\n return True", "title": "" }, { "docid": "07d9eb564f933e9b5cd3eb58afd9eb46", "score": "0.5779373", "text": "def HTMLColorToRGB(colorstring):\n colorstring = colorstring.strip()\n if colorstring[0] == '#':\n colorstring = colorstring[1:]\n if len(colorstring) != 6:\n raise ValueError(\n \"input #{0} is not in #RRGGBB format\".format(colorstring))\n r, g, b = colorstring[:2], colorstring[2:4], colorstring[4:]\n r, g, b = [int(n, 16) for n in (r, g, b)]\n return (r, g, b)", "title": "" }, { "docid": "a9286ae2742f846025862d0da0f12b4d", "score": "0.5744118", "text": "def valid_color(cls, color):\n a=wx.App()\n p = wx.Pen(color)\n a.Destroy()\n return p.Colour.IsOk()", "title": "" }, { "docid": "09fcae01f6048bed1e264cf028ad633d", "score": "0.5736506", "text": "def parse_color(color_str: str) -> GTPColor:\n if color_str.lower() == GTPColor.BLACK.value:\n return GTPColor.BLACK\n\n if color_str.lower() == GTPColor.WHITE.value:\n return GTPColor.WHITE\n\n raise ValueError(\"Cannot parse string `%s`\" % color_str)", "title": "" }, { "docid": "1d9e3083d6ef15e77bfd5a8e5305ecc6", "score": "0.57008195", "text": "def _read_color(ctk_scene: Scene, user_settings: dict | None = None) -> str | None:\n color = _read_properties(ctk_scene, \"color\", user_settings)\n # strip the # from the color if it exists\n if color is None:\n return None\n\n # if is string\n if isinstance(color, str):\n if color.startswith(\"#\"):\n return color[1:]\n return color\n\n raise ValueError(\n f\"Color {color} is not a valid color. Please use a hex color string.\"\n )", "title": "" }, { "docid": "1e2e05ebe947f010ae71d7f604125bad", "score": "0.5680248", "text": "def hex2rgb(hex_str):\n return tuple(int('0x' + hexnum, 16)\n for hexnum in (hex_str[1:3], hex_str[3:5], hex_str[5:7]))", "title": "" }, { "docid": "bf66d0cd4ce44344326897d3bc6a1791", "score": "0.5679591", "text": "def parse_color(color_str: str, alpha: int = 255) -> RGBATuple:\n\n # case independent parsing\n color_str = color_str.lower()\n\n # named colors\n named_colors = {\n \"white\": (255, 255, 255, alpha),\n \"black\": (0, 0, 0, alpha),\n \"transparent\": (0, 0, 0, 0),\n }\n if color_str in named_colors:\n return named_colors[color_str]\n\n # 3 digit hex code\n if re.match(\"#[a-f0-9]{3}$\", color_str):\n r = int(color_str[1] * 2, 16)\n g = int(color_str[2] * 2, 16)\n b = int(color_str[3] * 2, 16)\n return (r, g, b, alpha)\n\n # 4 digit hex code\n if re.match(\"#[a-f0-9]{4}$\", color_str):\n r = int(color_str[1] * 2, 16)\n g = int(color_str[2] * 2, 16)\n b = int(color_str[3] * 2, 16)\n a = int(color_str[4] * 2, 16)\n return (r, g, b, a)\n\n # 6 digit hex code\n if re.match(\"#[a-f0-9]{6}$\", color_str):\n r = int(color_str[1:3], 16)\n g = int(color_str[3:5], 16)\n b = int(color_str[5:7], 16)\n return (r, g, b, alpha)\n\n # 8 digit hex code\n if re.match(\"#[a-f0-9]{8}$\", color_str):\n r = int(color_str[1:3], 16)\n g = int(color_str[3:5], 16)\n b = int(color_str[5:7], 16)\n a = int(color_str[7:9], 16)\n return (r, g, b, a)\n\n raise ValueError(f\"cannot parse color - {color_str}\")", "title": "" }, { "docid": "9ac3bf7dbc282de4a8a31cde8740f74a", "score": "0.567461", "text": "def parse_color(color: str) -> Tuple[int, int, int]:\n if color.startswith('#'):\n try:\n r = int(color[1:3], base=16)\n g = int(color[3:5], base=16)\n b = int(color[5:], base=16)\n except ValueError:\n LOGGER.warning('Invalid RGB value: \"{}\"!', color)\n r = g = b = 128\n else:\n r, g, b = map(int, Vec.from_str(color, 128, 128, 128))\n return r, g, b", "title": "" }, { "docid": "66540518ffbd1d7bb0752fa643ce8a46", "score": "0.56733745", "text": "def from_hex(cls, hex_: str):\n if isinstance(hex_, int):\n hex_ = str(hex(hex_))\n if hex_.startswith(\"0x\"):\n hex_ = hex_[2:]\n r, g, b, a = col_comps_from_hex(hex_)\n return Color(r, g, b, a)", "title": "" }, { "docid": "c48b9fe9e7320c8a20dfe353ffa05c9a", "score": "0.5637073", "text": "def test_hex(self):\n testcase = r\"\"\" 0XAF7468 0xFFFF0000 0xABCDEF0123456789\n 0x1\"\"\"\n expect = r\"\"\"0XAF7468,0xFFFF0000,0xABCDEF0123456789,0x1,<EOF>\"\"\"\n self.assertTrue(TestLexer.checkLexeme(testcase, expect, 112))", "title": "" }, { "docid": "bca2e6551ac2a152995d9893e1c14903", "score": "0.5633399", "text": "def __color_val_in_range(red: int, green: int, blue: int) -> bool:\n if 255 >= red >= 0 and 255 >= green >= 0 and 255 >= blue >= 0:\n return False\n else:\n return True", "title": "" }, { "docid": "629b316436e71d31431c71bdc1206e4d", "score": "0.5629781", "text": "def html_rgb(color='#000000'): #000000 é um valor default, se a funcção for chamada sem parâmetro\r\n if color.startswith('#'):\r\n color = color[1:]\r\n \r\n r = int(color[:2], 16)\r\n g = int(color[2:4], 16)\r\n b = int(color[4:], 16)\r\n return r, g, b", "title": "" }, { "docid": "9360bc1dc0b59fb5890d3a10760bf166", "score": "0.5614504", "text": "def parse_color(value: ResolvableColor) -> str:\n if isinstance(value, int):\n if value >= 0 and value <= 0xFFFFFF:\n return f'{value:0>6x}'\n elif isinstance(value, str):\n neat_value = value.lstrip('#').upper()\n if re.match('[0-9A-F]{6}', neat_value):\n return neat_value\n if value.upper() in Colour.__members__:\n return Colour.__members__[value.upper()].value\n elif isinstance(value, Color):\n return value.value\n elif isinstance(value, Pixel):\n # Remove leading \"#\".\n return str(value)[1:]\n elif isinstance(value, tuple):\n if len(value) == 3:\n for col_byte in value:\n if not isinstance(col_byte, int):\n raise TypeError(f\"Expected tuple of 3 integers, found {col_byte.__class__.__name__}\")\n if col_byte < 0 or col_byte > 255:\n raise ValueError(f\"Colors in rgb tuple must follow 0 <= x <= 255, got {col_byte}\")\n\n return f\"{value[0]:0>2x}{value[1]:0>2x}{value[2]:0>2x}\"\n\n raise ValueError(f'Invalid colour \"{value}\".')", "title": "" }, { "docid": "f1d6de02a8f8ada711968f20b6196e2f", "score": "0.56137985", "text": "def deserialize_hex_color_code(symbol: str) -> HexColorCode:\n if not isinstance(symbol, str):\n raise DeserializationError(\n f'Failed to deserialize color code from {type(symbol).__name__} entry',\n )\n\n try:\n color_value = int(symbol, 16)\n except ValueError:\n raise DeserializationError(\n f'The given color code value \"{symbol}\" could not be processed as a hex color value',\n )\n\n if color_value < 0 or color_value > 16777215:\n raise DeserializationError(\n f'The given color code value \"{symbol}\" is out of range for a normal color field',\n )\n\n if len(symbol) != 6:\n raise DeserializationError(\n f'The given color code value \"{symbol}\" does not have 6 hexadecimal digits',\n )\n\n return HexColorCode(symbol)", "title": "" }, { "docid": "227566894da45333334faf9e4ad6ff66", "score": "0.5610937", "text": "def hue_is_red(r, c):\n return 5 <= h[r][c] <= 12 and s[r][c] >= 65 and v[r][c] >= 128", "title": "" }, { "docid": "8d38d2d34c5995d899610bd5448e1da4", "score": "0.56103945", "text": "def test_check_color():\n color = BaseVisual._check_color(\"teal\")\n assert isinstance(color, tuple)\n assert len(color) == 3\n assert all(isinstance(c, int) for c in color)\n assert all(0 <= c <= 255 for c in color)\n\n color = BaseVisual._check_color((100, 20, 101))\n assert color == (100, 20, 101)\n\n with pytest.raises(TypeError, match=\"must be an instance of\"):\n BaseVisual._check_color([100, 20, 101])\n with pytest.raises(AssertionError):\n BaseVisual._check_color((100, 101))\n with pytest.raises(AssertionError):\n BaseVisual._check_color((101, -101, 101))", "title": "" }, { "docid": "c258247ccbdf9c6b004e8f2569898b9a", "score": "0.56029356", "text": "def hex_to_rgb(hx):\r\n if len(hx) != 7:\r\n raise ValueError(\"Hex must be #------\")\r\n hx = hx[1:] # omit the '#'\r\n r = int('0x'+hx[:2], 16)\r\n g = int('0x'+hx[2:4], 16)\r\n b = int('0x'+hx[4:6], 16)\r\n return (r,g,b)", "title": "" }, { "docid": "bc350ba467603a9560e82d937560ad8c", "score": "0.5595062", "text": "def convert_hex_2_rgb(h):\n\n # Chech if the leading charactar is \"#\".\n if ( \"#\" != h[0] ):\n Exception(\"The leading charactar is not #. h = %s.\" % (h))\n\n if ( 7 != len(h) ):\n Exception(\"The length of the input string is wrong. h = %s.\" % (h))\n\n colorCode = h[1:]\n\n # Conver the hexadecimal values.\n rgb = []\n for i in range(3):\n hexString = colorCode[ i*2:(i*2+2) ]\n rgb.append( int(hexString, 16) )\n \n return rgb", "title": "" }, { "docid": "abc657eb66146515d7d3ad6043594a08", "score": "0.5587306", "text": "def hex2rgb(hexcode):\n assert isinstance(hexcode, str)\n assert len(hexcode) == 7\n r = int(hexcode[1:3], 16)\n g = int(hexcode[3:5], 16)\n b = int(hexcode[5:7], 16)\n\n return r, g, b", "title": "" }, { "docid": "2a03564f38d7065997dd32c95a2682ff", "score": "0.55505085", "text": "def parse_rgb(v, include_number_sign=True):\n if isinstance(v, tuple):\n v = ''.join('{:02x}'.format(d) for d in v)\n if v.startswith('#'):\n v = v[1:]\n if len(v) == 3:\n v = u''.join(s + s for s in v)\n return u'#' + v if include_number_sign else v", "title": "" }, { "docid": "f713066f42327581d4d638224ad9f9c4", "score": "0.554964", "text": "def check_valid_data(self, index, r, g, b):\n valid = True\n if (index >= self.num_of_leds):\n print (\"bad index\", index, self.num_of_leds)\n valid = False\n if (r < 0 or r > 255):\n print (\"bad color value (r)\", r)\n valid = False\n if (g < 0 or g > 255):\n print (\"bad color value (g)\", g)\n valid = False\n if (b < 0 or b > 255):\n print (\"bad color value (b)\", b)\n valid = False\n\n return valid", "title": "" }, { "docid": "1350d4474376eb4b701c5e84ca417a0c", "score": "0.5547662", "text": "def isrgb1(t: tuple):\n return len(t) == 3 and all(v >= 0 and v <= 1 for v in t)", "title": "" }, { "docid": "e1e234cff8023d03b475136ecd44ffd1", "score": "0.5535551", "text": "def _check_colors(self):\n for c in [\"background\", \"foreground\", \"selected_background\", \"selected_foreground\"]:\n col = getattr(self, c, None)\n if col is None:\n continue\n\n if not isinstance(col, str) or not RGB.match(col):\n logger.warning(\n \"Invalid extension '%s' color: %s. Must be #RGB or #RRGGBB string.\", c, col\n )\n setattr(self, c, None)\n continue\n\n if not col.startswith(\"#\"):\n col = f\"#{col}\"\n setattr(self, c, col)", "title": "" }, { "docid": "efecdec0fca157efd28d470c828b49d2", "score": "0.553181", "text": "def get_background_color():\n print(\"Background color\")\n background_color = str(input(\"Choose the name of a color or #XXXXXX: \"))\n if background_color[0] == \"#\":\n checker = is_hexadecimal(background_color)\n if checker is True:\n return background_color\n else:\n print(\"Illegal format\")\n return get_background_color()\n\n elif background_color.lower() in color_set:\n return background_color\n else:\n print(\"Illegal format\")\n return get_background_color()", "title": "" }, { "docid": "1984f8563c8ae1e010475e66e07a56a7", "score": "0.55309814", "text": "def test_parse_simple_color(self):\n test_pairs = ((u'#ffffff', (255, 255, 255)),\n (u'#000080', (0, 0, 128)),\n (u'#daa520', (218, 165, 32)))\n for pair in test_pairs:\n self.assertEqual(pair[1],\n webcolors.html5_parse_simple_color(pair[0]))", "title": "" }, { "docid": "ae9d68b00f83000164a865ee30613acb", "score": "0.55070615", "text": "def validcolor(c):\n try:\n ret = [clamp(int(v+0.5), 0, 255) for v in c]\n return type(c)(ret)\n except TypeError:\n return clamp(int(v+0.5), 0, 255)", "title": "" }, { "docid": "c92c37e225c9afa1d848676c34d92a6b", "score": "0.55004776", "text": "def validate_geohash(value):\n \n if len(value) > 12:\n return False\n \n return all(v in BASE32 for v in value)", "title": "" }, { "docid": "f8bf3fba046b0dbbf7e44a80c90fd7b0", "score": "0.5494275", "text": "def is_valid_hcl(value: str) -> bool:\n if value[0] != \"#\":\n return False\n return all((char.isdigit() or char in \"abcdef\" for char in value[1:]))", "title": "" }, { "docid": "7101b2e7639a4a17ecd2b3c8502a54d5", "score": "0.54899", "text": "def is_hexadecimal(s):\n if isinstance(s, str)==False:\n return False\n elif len(s)==0:\n return False\n elif len(s)==1:\n return s in string.hexdigits\n else:\n return all([is_hexadecimal(ch) for ch in s])", "title": "" }, { "docid": "7714fc1a3c762851a84dff93b1b195ab", "score": "0.5480081", "text": "def isNegativeHex(e: str) -> bool:\n return e[0] == \"-\"", "title": "" }, { "docid": "eac3878e52195b5b6656335c32346fda", "score": "0.54759145", "text": "def _parse_hsl_color(color):\n float_ = float\n int_ = int\n min_ = min\n max_ = max\n match = _HSL_RE.match(color)\n if match is not None:\n hs, ss, ls = match.groups()\n h = ((float_(hs) % 360.0 + 360.0) % 360.0) / 360.0\n s = max_(0.0, min_(100.0, float_(ss))) / 100.0\n l = max_(0.0, min_(100.0, float_(ls))) / 100.0\n r, g, b = hls_to_rgb(h, l, s)\n r = int_(255 * r)\n g = int_(255 * g)\n b = int_(255 * b)\n return Color(r, g, b, 255)\n\n match = _HSLA_RE.match(color)\n if match is not None:\n hs, ss, ls, as_ = match.groups()\n h = ((float_(hs) % 360.0 + 360.0) % 360.0) / 360.0\n s = max_(0.0, min_(100.0, float_(ss))) / 100.0\n l = max_(0.0, min_(100.0, float_(ls))) / 100.0\n a = max_(0.0, min_(1.0, float_(as_)))\n r, g, b = hls_to_rgb(h, l, s)\n r = int_(255 * r)\n g = int_(255 * g)\n b = int_(255 * b)\n a = int_(255 * a)\n return Color(r, g, b, a)", "title": "" }, { "docid": "a4e85680b66a255829d156f608e29bcd", "score": "0.5473866", "text": "def validate(self, input: str) -> bool:\n if all(i in string.hexdigits for i in input):\n return self.hash_length() == len(input)\n return False", "title": "" }, { "docid": "95a944fce76189c0063e06f9b0b0ea8f", "score": "0.54701555", "text": "def is_valid_guess(guess):\n if not guess or (guess and len(guess) != 4):\n return False\n else:\n for color in guess:\n if color not in GameService.colors:\n return False\n \n return True", "title": "" }, { "docid": "6f99a8ce0260946bdcb109c3c9e9de7e", "score": "0.54606014", "text": "def _parse_rgb_color(color):\n int_ = int\n min_ = min\n max_ = max\n match = _RGB_NUM_RE.match(color)\n if match is not None:\n rs, gs, bs = match.groups()\n r = max_(0, min_(255, int_(rs)))\n g = max_(0, min_(255, int_(gs)))\n b = max_(0, min_(255, int_(bs)))\n return Color(r, g, b, 255)\n\n float_ = float\n match = _RGB_PER_RE.match(color)\n if match is not None:\n rs, gs, bs = match.groups()\n r = max_(0.0, min_(100.0, float_(rs))) / 100.0\n g = max_(0.0, min_(100.0, float_(gs))) / 100.0\n b = max_(0.0, min_(100.0, float_(bs))) / 100.0\n r = int_(255 * r)\n g = int_(255 * g)\n b = int_(255 * b)\n return Color(r, g, b, 255)\n\n match = _RGBA_NUM_RE.match(color)\n if match is not None:\n rs, gs, bs, as_ = match.groups()\n r = max_(0, min_(255, int_(rs)))\n g = max_(0, min_(255, int_(gs)))\n b = max_(0, min_(255, int_(bs)))\n a = max_(0.0, min_(1.0, float_(as_)))\n a = int_(255 * a)\n return Color(r, g, b, a)\n\n match = _RGBA_PER_RE.match(color)\n if match is not None:\n rs, gs, bs, as_ = match.groups()\n r = max_(0.0, min_(100.0, float_(rs))) / 100.0\n g = max_(0.0, min_(100.0, float_(gs))) / 100.0\n b = max_(0.0, min_(100.0, float_(bs))) / 100.0\n a = max_(0.0, min_(1.0, float_(as_)))\n r = int_(255 * r)\n g = int_(255 * g)\n b = int_(255 * b)\n a = int_(255 * a)\n return Color(r, g, b, a)", "title": "" }, { "docid": "a8b9b1532f081adc67d4a2a10df4a67c", "score": "0.5446876", "text": "def rgb_to_hex(rgb):\n if not all(0 <= value <= 255 for value in rgb):\n raise ValueError\n return \"#%02x%02x%02x\".upper() % rgb", "title": "" }, { "docid": "977007351238cc8c6c5e9fd42810b36e", "score": "0.5426835", "text": "def check_hash(hashstring):\n if not isinstance(hashstring, str):\n return False\n if len(hashstring) not in config.validhexhashlengths:\n return False\n # don't allow uppercase as it could confuse clients+servers and lowercase is default\n if not all(c in \"0123456789abcdef\" for c in hashstring):\n return False\n return True", "title": "" }, { "docid": "3eb66c86d85b4f87b6fe26d4abe8f042", "score": "0.541524", "text": "def hex_to_rgb(hex_color):\r\n hex_color = hex_color.lstrip('#')\r\n h_len = len(hex_color)\r\n return tuple(int(hex_color[i:i + h_len // 3], 16) for i in range(0, h_len, h_len // 3))", "title": "" }, { "docid": "551a244ed82b5465d0271130fcc5a326", "score": "0.5414123", "text": "def __ishex(_input: str) -> bool:\n\n return all(map(Assembler.__ishex_tool1, _input))", "title": "" }, { "docid": "d9eb040c775871fb0d0ac9d2b0c862bb", "score": "0.54050547", "text": "def hex_to_rgb(hex_value: str) -> Tuple[int, int, int]:\n if len(hex_value) != 7 or hex_value[0] != \"#\":\n raise ValueError(\n \"the color has to be specified by '#XXXXXX'. Invalid value %s\" % hex_value\n )\n hex_value = hex_value.lstrip(\"#\")\n try:\n int(hex_value, 16)\n except ValueError:\n raise ValueError(\n \"the color value has to be a valid hexadecimal number. Invalid value %s\"\n % hex_value\n )\n return int(hex_value[0:2], 16), int(hex_value[2:4], 16), int(hex_value[4:6], 16)", "title": "" }, { "docid": "d3b6243c5c18d207b6cfd221617bdd7f", "score": "0.5399049", "text": "def _assert_color(self, name, r, g, b):\n\n colors = [r, g, b]\n for color in colors:\n assert color >= 0 and color <= 1, 'Color parameter of ' + \\\n name + ' must be in [0,1]'", "title": "" }, { "docid": "9c7efc693d042ee448a00320aaf51053", "score": "0.53979814", "text": "def test_wrong_color(self):\n\n with pytest.raises(ValueError):\n self.validator(value=-1, field_name=self.field_name)", "title": "" }, { "docid": "75ba3fab3044524e3d60686f6b86fa95", "score": "0.5391608", "text": "def _parse_hextet(cls, hextet_str):\n # Reject non-ASCII digits.\n if not cls._HEX_DIGITS.issuperset(hextet_str):\n raise ValueError(\"Only hex digits permitted in %r\" % hextet_str)\n # We do the length check second, since the invalid character error\n # is likely to be more informative for the user\n if len(hextet_str) > 4:\n msg = \"At most 4 characters permitted in %r\"\n raise ValueError(msg % hextet_str)\n # Length check means we can skip checking the integer value\n return int(hextet_str, 16)", "title": "" }, { "docid": "1c5f2a740c3a34b609da8d814bd5bea4", "score": "0.53835464", "text": "def validate_uuid_string(uuid_obj, uuid_version=4):\n uuid_string = str(uuid_obj).lower()\n try:\n uuid.UUID(uuid_string, version=uuid_version)\n except ValueError:\n # If it's a value error, then the string\n # is not a valid hex code for a UUID.\n return False\n\n return True", "title": "" }, { "docid": "023951db96865b4b96950cf23abf51ed", "score": "0.53765094", "text": "def rgba_from_hex(hex_):\n\tcolor = Gdk.RGBA()\n\tcolor.parse(hex_)\n\treturn color", "title": "" }, { "docid": "bf0d06f8ac983bc1ca926138ca089ac7", "score": "0.537007", "text": "def is_red(color: Color) -> bool:\n return color == color.red", "title": "" }, { "docid": "cd2d72d7c45ebf6adbb0582bcd8f8847", "score": "0.5361729", "text": "def hex_to_rgb(hex_color):\n hex_color = hex_color.lstrip('#')\n h_len = len(hex_color)\n return tuple(int(hex_color[i:i + h_len // 3], 16) for i in range(0, h_len, h_len // 3))", "title": "" }, { "docid": "68922cca1bf27f71e264e3e9a007ea66", "score": "0.53611577", "text": "def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return int(value[0:0 + lv // 3], 16), int(value[2:2 + lv // 3], 16), int(value[4:4 + lv // 3], 16)", "title": "" }, { "docid": "8cdcf4bd58472a4e9a97d3cd4d08013b", "score": "0.53592795", "text": "def hexToRgb(self,hexColor):\n red = int(hexColor[1:3],16)\n green = int(hexColor[3:5],16)\n blue = int(hexColor[5:7],16)\n return (float(red)/255,float(green)/255,float(blue)/255)", "title": "" } ]
cb8257fde6a717dc3afb0edd26d64e01
get the crystal scatter, background scatter, and photon counting noise for the reflections listed in the table R
[ { "docid": "1f95c0828e2b8648d84a4bced66f2729", "score": "0.0", "text": "def integrate2(R, badmask, data, gain=28, fit_bg=True, zscore=8, sz=8):\n from dials.algorithms.shoebox import MaskCode\n fg_code = MaskCode.Foreground.real\n Nrefl = len(R)\n fs_dim = 194\n ss_dim = 185\n\n Rpp = spot_utils.refls_by_panelname(R) # this is a dictionary whose key (0-63) unlock that panels reflections\n allspotmask = {}\n for pid in Rpp:\n # load the spot mask for all strong spots for this panel\n allspotmask[pid] = spot_utils.strong_spot_mask(Rpp[pid], (ss_dim, fs_dim))\n\n signa = np.zeros(Nrefl)\n bg = np.zeros(Nrefl)\n noise = np.zeros_like(signa)\n pix_per = np.zeros(Nrefl, int)\n for i_r, refl in enumerate(R):\n pid = refl['panel']\n\n spotmask = refl['shoebox'].mask.as_numpy_array() & fg_code == fg_code\n f1, f2, s1, s2, _, _ = refl['shoebox'].bbox # fast scan and slow scan edges of bounding box\n icent,jcent,_ = refl['xyzobs.px.value']\n\n thisspotmask = np.zeros_like(allspotmask[pid])\n thisspotmask[s1:s2, f1:f2] = spotmask\n\n i1 = int(max(icent-.5-sz, 0))\n i2 = int(min(icent-.5+sz , fs_dim))\n j1 = int(max(jcent-.5-sz, 0))\n j2 = int(min(jcent-.5+sz , ss_dim))\n sub_data = data[pid][j1:j2, i1:i2]\n sub_mask = ((~allspotmask[pid]) * badmask[pid] )[j1:j2, i1:i2]\n\n \n sub_thisspotmask = thisspotmask[j1:j2,i1:i2]\n Is = sub_data[sub_thisspotmask].sum()\n \n if fit_bg:\n tilt, bgmask, coeff = tilting_plane(sub_data,\n mask=sub_mask, zscore=zscore)\n \n bg_fit_mask = np.logical_and(~bgmask, sub_mask)\n m = sub_thisspotmask.sum()\n n = bg_fit_mask.sum()\n m2n = float(m)/float(n) # ratio of number of background to number of strong spot pixels\n \n # modifuf Is according to background plane fit\n Is = Is - tilt[sub_thisspotmask].sum()\n # store background pix according to Leslie 99\n Ibg = m2n*sub_data[bg_fit_mask].sum()\n else:\n Ibg = 0\n \n signa[i_r] = Is # signal in the spot\n bg[i_r] = Ibg # background around the spot\n noise[i_r] = (Is + Ibg + m2n*Ibg) / gain\n pix_per[i_r] = thisspotmask.sum()\n\n return signa, bg, noise, pix_per", "title": "" } ]
[ { "docid": "a3f78ab103b74584dba2bd44abbf3b38", "score": "0.58177507", "text": "def get_colors( simtable, dz=0.1 ):\n if 'FLT' not in simtable.__dict__ :\n simtable.getLightCurves()\n i7 = np.where( simtable.FLT=='7' )\n i8 = np.where( simtable.FLT=='8' )\n iI = np.where( simtable.FLT=='I' )\n iP = np.where( simtable.FLT=='P' )\n iN = np.where( simtable.FLT=='N' )\n m7,m8,mI,mP,mN = simtable.MAG[i7],simtable.MAG[i8],simtable.MAG[iI],simtable.MAG[iP],simtable.MAG[iN]\n c7Ipts = m7-mI\n c8Ipts = m8-mI\n cPNpts = mP-mN\n return( c7Ipts, c8Ipts, cPNpts )", "title": "" }, { "docid": "81e285d6247881821871ad5a4f5e7d77", "score": "0.5435437", "text": "def crystal_data(mtz_file):\n\n reflection_file = reflection_file_reader.any_reflection_file(file_name=mtz_file)\n content = reflection_file.file_content()\n space_group = content.space_group_name().replace(\" \", \"\")\n resolution = content.max_min_resolution()[1]\n cell_parameters = content.crystals()[0].unit_cell_parameters()\n\n return space_group, resolution, cell_parameters", "title": "" }, { "docid": "fc66ba1d145c98cda1ee59d2a74d4a20", "score": "0.5395334", "text": "def wht_noise(path_to_folder, subfolder, info):\n\n # define sample rate\n FS = 20000\n\n # define the Gauss kernel width (= 1SD)\n sigma = 0.001 # seconds, from Berman & Maler 1998\n\n # define the input data\n filename = ppjoin(path_to_folder, subfolder, \"stimulus-whitenoise-spikes.dat\")\n\n # get the file, extract the three data subunits\n relacs_file = load(filename)\n\n # extract RePro indices\n try:\n ReProIx = relacs_file.fields[('ReProIndex',)]\n\n except:\n return None\n\n # convert set objet into a list\n ReProIx = list(ReProIx)\n ReProIx.sort()\n\n # define empty list containing figure names\n fnames = []\n\n for ix in ReProIx:\n\n # if relacs file is empty or too short due to aborted RePro presentation\n # \"try:\" provides normal termination of the loop instead of error\n try:\n metas, _, datas = relacs_file.select({\"ReProIndex\": ix})\n\n except:\n return None\n\n print(\"ReProIx\", ix, \"Iterations\", len(metas))\n\n # determine figure handles\n fig = figure_handles()\n\n # FFT is defined as something + 1, due to mlab reasoning\n nFFT = 2048\n FFT = (nFFT/2)+1\n\n # prepare empty variables\n coh = np.zeros([len(metas), FFT], )\n coh_short = np.zeros([len(metas), FFT], )\n P_csd = np.zeros([len(metas), FFT], dtype=complex)\n P_csd_short = np.zeros([len(metas), FFT], dtype=complex)\n P_psd = np.zeros([len(metas), FFT])\n P_psd_short = np.zeros([len(metas), FFT])\n H = np.zeros([len(metas), FFT],)# dtype=complex)\n H_short = np.zeros([len(metas), FFT],)# dtype=complex)\n MI = np.zeros([len(metas), FFT], )\n MI_short = np.zeros([len(metas), FFT], )\n # number of stimulus iterations\n\n for i in range(0, len(metas)):\n\n color_spread = np.linspace(0.35,0.8, len(metas))\n cmap = [ cm.Greys(x) for x in color_spread ]\n\n # extract meta infos\n wnFname = metas[i][\"envelope\"]\n wnDur = float(metas[i][\"Settings\"][\"Waveform\"][\"duration\"].split(\"m\")[0]) # duration in miliseconds\n\n # conversions\n spikes = np.array(datas[i])\n\n # conversions\n wnDur /= 1000 # conversion to miliseconds\n spikes /= 1000\n\n print(spikes.shape)\n convolved_Train, _ = train_convolve(spikes, sigma, FS, wnDur)\n print(sum(convolved_Train)/FS)\n wNoise = process_wn(path_to_folder, wnFname, len(convolved_Train))\n\n # compute coherence, mutual information, transfer and the power spectra and cross-spectra density\n freq, coh[i,:], coh_short[i,:], H[i,:], H_short[i,:], MI[i,:], MI_short[i,:], \\\n P_csd[i,:], P_csd_short[i,:], P_psd[i,:], P_psd_short[i,:] \\\n = cohere_transfere_MI (convolved_Train, wNoise, nFFT, FS)\n\n # plot coherence, mutual information etc....\n plot_the_lot(fig, freq, coh, coh_short, MI, MI_short, H, H_short, metas, cmap, np.array(datas))\n\n avgCoh, avgCoh_short, avgH, avgH_short, mut_inf, mut_inf_short = compute_avgs(coh, coh_short, H, H_short, MI, MI_short)\n\n plot_the_lot(fig, freq, avgCoh, avgCoh_short, mut_inf, mut_inf_short, avgH, avgH_short, metas, cmap = [cm.Reds(0.6)],raster='empty', annotation=True)\n\n fig[2].text(0.05, 0.95, \" \".join(['Species:', info[\"Subject\"][\"Species\"]]), transform=fig[1].transAxes,\n fontsize=10)\n fig[2].text(0.05, 0.90, \" \".join(['ELL Segment:', info[\"Cell\"][\"Location\"]]), transform=fig[1].transAxes,\n fontsize=10)\n\n # define file name\n filename = \"\".join([str(metas[i][\"ReProIndex\"]), '_', 'whitenoise'])\n fnames.append(filename)\n\n fig[0].savefig(ppjoin(path_to_folder, subfolder, \".\".join([filename, 'png'])), transparent=True)\n fig[0].savefig(ppjoin(path_to_folder, subfolder, \".\".join([filename, 'svg'])), transparent=True)\n\n plt.close()\n\n return fnames", "title": "" }, { "docid": "f0993e65592ca396dfcbbc60fb1681d3", "score": "0.5332134", "text": "def _filter_reflections(reflections):\n\n cols = [\n \"id\",\n \"miller_index\",\n \"panel\",\n \"s1\",\n \"xyzobs.mm.value\",\n \"xyzobs.px.value\",\n \"xyzcal.px\",\n \"xyzobs.mm.variance\",\n \"flags\",\n \"shoebox\",\n \"delpsical.weights\",\n ]\n # NB xyzobs.px.value & xyzcal.px required by SauterPoon outlier rejector\n # NB delpsical.weights is used by ExternalDelPsiWeightingStrategy\n rt = flex.reflection_table()\n\n # copy columns to the new table. Could use the select method\n # for this except that 's1' is optional in the input so would want\n # to copy that in like this if present anyway\n for k in cols:\n if k in reflections:\n rt[k] = reflections[k]\n\n return rt", "title": "" }, { "docid": "32ed0bb65fa3904562264fe1f5f519dd", "score": "0.52956575", "text": "def OB140613_OGLE_phot_table():\n ob140613_mult = get_Rchi2('ob140613', 'mult', ogle_phot['ob140613_mult'], 'c8_')\n rchi2 = ob140613_mult[0]\n\n data = munge.getdata2('ob140613',\n phot_data=['I_OGLE'],\n ast_data=['Kp_Keck']) \n\n dir = ogle_phot['ob140613_mult']\n runid = 'c8_'\n\n fitter = model_fitter.PSPL_phot_parallax_merr_Solver(data,\n outputfiles_basename=dir + runid)\n \n labels = ['$t_0$ (MJD)', '$u_0$', '$t_E$ (days)', '$\\pi_{E,E}$',\n '$\\pi_{E,N}$', '$b_{SFF}$', '$I_{src}$ (mag)', r'$\\varepsilon_m$'] \n\n res = fitter.load_mnest_results_for_dynesty()\n\n ci = []\n ml = []\n\n for nn in np.arange(len(labels)):\n # Calculate 68% credible interval. \n ci.append(model_fitter.weighted_quantile(res['samples'][:, nn], [0.16, 0.84], sample_weight=res['weights']))\n ml.append(res['samples'][-1, nn]) # CHECK\n\n maxlogL = -0.5 * res['loglike'][-1]\n\n with open('OB140613_OGLE_phot.txt', 'a+') as tab_file:\n tab_file.write('log$\\mathcal{L}$' + ' & ' \n + '{0:.2f}'.format(maxlogL) + r' & \\\\ ' + '\\n'\n +\n '$\\chi^2_{dof}$' + ' & ' \n + '{0:.2f}'.format(rchi2) + r' & \\\\ ' + '\\n'\n + r'\\hline ' + '\\n')\n for ll, label in enumerate(labels):\n tab_file.write(label + ' & ' \n + '{0:.2f}'.format(ml[ll]) + ' & '\n + '[{0:.2f}, {1:.2f}]'.format(ci[ll][0], ci[ll][1]) + r' \\\\ ' + '\\n')\n\n return", "title": "" }, { "docid": "44df89fbfce97799a43e826841922111", "score": "0.5291101", "text": "def summarize_plates():\n pnames=plate.get_all_plate_names()\n nlen=min(max(map(len,pnames)), 25)\n namefmt='{:'+'{}'.format(nlen)+'}'\n lines=[]\n for pname in pnames:\n p=plate.get_plate(pname)\n lines.append((namefmt+' {} Field(s), {:4} holes').format(pname, len(p.fields),\n len(p.all_holes)))\n \n \n #field name string length\n fnames=[f.name for f in p.fields]\n nlen=min(max(map(len,fnames)), 25)\n fnamefmt='{:'+'{}'.format(nlen)+'}'\n \n #magnitude keys\n magkey=list(set(k for f in p.fields for t in f.targets\n for k in t.user.keys()\n if k in ['v','b','r','g','i'] or 'mag' in k))\n\n indent=' '\n fmt=(indent+fnamefmt+' {} {:4} {:3} {:3} {}'+' {:4}'*len(magkey))\n lines.append(fmt.format('Field', 'RA Dec', 'N Targ', 'N Sky', 'minsky',\n 'mustkeep', *magkey))\n \n\n\n for f in p.fields:\n \n mags=[np.array([float(t.user[k]) for t in f.targets if k in t.user and floatable(t.user[k])]) for k in magkey]\n \n for m in mags:m[(m<5) | (m > 30)]=np.nan\n meanmag=[np.nanmean(m) for m in mags]\n\n meanmag=['{:2.1f}'.format(m) for m in meanmag]\n \n lines.append(fmt.format(f.name, f.info['(ra, dec)'],\n len(f.targets), len(f.skys), f.info.get('minsky',0),\n f.info.get('mustkeep', False), *meanmag))\n\n\n return lines", "title": "" }, { "docid": "fb870e4e1088f02cf3e59f59425e093f", "score": "0.5280287", "text": "def OB150029_OGLE_phot_table():\n ob150029_add = get_Rchi2('ob150029', 'add', ogle_phot['ob150029_add'], 'd8_')\n rchi2 = ob150029_add[0]\n\n data = munge.getdata2('ob150029',\n phot_data=['I_OGLE'],\n ast_data=['Kp_Keck']) \n\n dir = ogle_phot['ob150029_add']\n runid = 'd8_'\n\n fitter = model_fitter.PSPL_phot_parallax_err_Solver(data,\n outputfiles_basename=dir + runid)\n\n labels = ['$t_0$ (MJD)', '$u_0$', '$t_E$ (days)', '$\\pi_{E,E}$',\n '$\\pi_{E,N}$', '$b_{SFF}$', '$I_{src}$ (mag)', r'$\\varepsilon_a$ (mag)'] \n\n res = fitter.load_mnest_results_for_dynesty()\n\n ci = []\n ml = []\n\n for nn in np.arange(len(labels)):\n # Calculate 68% credible interval. \n ci.append(model_fitter.weighted_quantile(res['samples'][:, nn], [0.16, 0.84], sample_weight=res['weights']))\n ml.append(res['samples'][-1, nn]) # CHECK\n\n maxlogL = -0.5 * res['loglike'][-1]\n\n with open('OB150029_OGLE_phot.txt', 'a+') as tab_file:\n tab_file.write('log$\\mathcal{L}$' + ' & ' \n + '{0:.2f}'.format(maxlogL) + r' & \\\\ ' + '\\n'\n +\n '$\\chi^2_{dof}$' + ' & ' \n + '{0:.2f}'.format(rchi2) + r' & \\\\ ' + '\\n'\n + r'\\hline ' + '\\n')\n for ll, label in enumerate(labels):\n if label == r'$\\varepsilon_a$ (mag)':\n tab_file.write(label + ' & ' \n + '{0:.4f}'.format(ml[ll]) + ' & '\n + '[{0:.4f}, {1:.4f}]'.format(ci[ll][0], ci[ll][1]) + r' \\\\ ' + '\\n')\n else:\n tab_file.write(label + ' & ' \n + '{0:.2f}'.format(ml[ll]) + ' & '\n + '[{0:.2f}, {1:.2f}]'.format(ci[ll][0], ci[ll][1]) + r' \\\\ ' + '\\n')\n\n return", "title": "" }, { "docid": "3e5e75d5d2126fb7c9d1e4120c125bf9", "score": "0.5272374", "text": "def background_components():\n\n curtain = glob.glob('/Users/brammer/3DHST/Spectra/Work/Incoming/ibhj44*flt.fits.gz')\n \n pure_zodi = glob.glob('/Users/brammer/3DHST/Spectra/Work/Incoming/ibhm39*flt.fits.gz')\n \n ims = []\n for im in pure_zodi:\n ims.append(pyfits.open(im))\n \n FLAT_F140W = pyfits.open(os.path.join(os.getenv('iref'), 'uc721143i_pfl.fits'))[1].data[5:-5, 5:-5]\n FLAT_F105W = pyfits.open(os.path.join(os.getenv('iref'), 'uc72113oi_pfl.fits'))[1].data[5:-5, 5:-5]\n FLAT_G141 = pyfits.open(os.path.join(os.getenv('iref'), 'u4m1335mi_pfl.fits'))[1].data[5:-5, 5:-5]\n \n object = ims[0][1].data*FLAT_G141/FLAT_F140W\n dy = 15\n \n xarr = np.arange(1014)\n for i in range(0,1000,100):\n plt.plot(xarr+i/900.*dy, np.median(object[i:i+100, :], axis=0), alpha=0.5)\n #\n shift = np.round(xarr/900.*dy-dy/2.)\n sh = object*0.\n for i in range(1014):\n print i\n sh[i,:] += nd.shift(object[i,:], shift[i])\n #\n model1 = np.median(sh, axis=0)\n model = object*0.\n for i in range(1014):\n model[i,:] = nd.shift(model1, -shift[i])", "title": "" }, { "docid": "457367f94ba7acaa6628f4dce31d4a84", "score": "0.5259393", "text": "def extractStars(filename):\n #Read the file into a table:\n hdu = astropy.io.fits.open(filename)\n table = astropy.table.Table(hdu[1].data)\n\n #Create a statement to get rid of stars with parallax error larger than 20% or with negative distances:\n# fraction = np.abs(table['parallax_error']/table['parallax'])\n# ok1 = fraction < 0.2\n# ok2 = table['parallax']>0\n# ok = ok1*ok2\n# add [ok] after .data to make use of the info above \n \n \n \n #From the table, extract parallax and other useful info, take only those rows for which [ok] is true:\n \n Parallax= table['parallax'].data #.data takes only the numbers, getting rid of the tittle of the column\n \n Parallax = Parallax/1000 # ZKBT: the GAIA parallaxes are in units of milliarcseconds; let's convert to arcsec\n negative = Parallax<0\n Parallax[negative] = 10**(-6) #replace al negative parallaxes with 10^-6 arcseconds\n uncertainty = (table['parallax_error']/table['parallax']) > 0.5\n Parallax[uncertainty] = 10**(-6) #replace all parallaxes with uncertainty greater than 50% with 10^-6 arcseconds\n Dec = (table['dec'].data)*(pi/180) #change degrees to radians.\n Fluxes= table['phot_g_mean_flux'].data\n Magnitudes = table['phot_g_mean_mag'].data\n \n RA = (table['ra'].data)\n over180 = RA > 180 #want RA values to be from -180 to 180\n RA[over180] = (RA[over180] - 360)\n RA = RA*(pi/180) #change degrees to radians.\n \n #Use formulas to produce other useful arrays:\n Distances = F.ParallaxToDistance(Parallax)\n X,Y,Z= F.toXYZ(RA,Dec,Distances)\n AbsoluteMagnitudes = F.AbsoluteMagnitude(Distances,Magnitudes)\n\n #Create a label dependant on the file name:\n temp = filename.replace('.fits','.png')\n tempList = temp.split('_')\n label = tempList[-1]\n\n return X,Y,Z,RA,Dec,Magnitudes,Distances,AbsoluteMagnitudes,Fluxes,label", "title": "" }, { "docid": "4e02bdaa6a19eb43ce736e7de3149ff7", "score": "0.52391315", "text": "def phantom_preparation(self):\n\n for i in [self.emiss_hdr, self.att_hdr, self.original_pet]:\n if i:\n rcommand = \"cambia_formato_hdr %s %s fl \" % (i,i)\n apple.osrun(rcommand,self.logfile)\n \n zpix, zsize, xpix, xsize, ypix, ysize = apple.read_analyze_header(self.emiss_hdr,self.logfile)\n\n new_x_dims = int(xsize*xpix/self.target_size[0])\n new_y_dims = int(ysize*ypix/self.target_size[1])\n new_z_dims = int(zsize*zpix/self.target_size[2])\n\n for i in [self.emiss_hdr, self.att_hdr, self.original_pet]:\n if i:\n output_i = i[0:-4]+\"_\" + self.scanner + \".hdr\"\n rcommand = 'cambia_matriz_imagen_hdr %s %s %s %s %s novecino' % (i, output_i, new_x_dims, new_y_dims, new_z_dims)\n apple.osrun(rcommand,self.logfile)\n\n rcommand = 'gen_hdr %s %s %s %s fl %s %s %s 0' % (output_i[0:-4], new_x_dims, new_y_dims, new_z_dims,\n self.target_size[0], self.target_size[1], self.target_size[2])\n apple.osrun(rcommand,self.logfile)\n\n if i == self.emiss_hdr:\n # If the input is the activity map it applies positron range and non-colinearity\n output_img = output_i[0:-3] + \"img\"\n mfile = os.path.join(self.phantom_dir, \"smooth.m\")\n smoothed = spm.smoothing_xyz(self.spm_run,mfile,output_i,2,2,2,\"s\",self.logfile)\n shutil.move(smoothed,output_img)\n\n if i == self.att_hdr:\n # If the input is the attenuation map it removes higher values\n rcommand = \"cambia_valores_de_un_intervalo %s %s 1 10000000000000 1 \" % (output_i,output_i)\n apple.osrun(rcommand,self.logfile)\n\n shutil.copy(output_i[0:-3] + \"img\", output_i[0:-3] + \"v\")\n\n apple.write_interfile_header(output_i[0:-3] + \"hv\", new_x_dims, self.target_size[0],\n new_y_dims, self.target_size[1],new_z_dims, self.target_size[2])\n\n self.emiss_hdr = self.emiss_hdr[0:-4]+\"_\" + self.scanner + \".hv\"\n self.att_hdr = self.att_hdr[0:-4]+\"_\" + self.scanner + \".hv\"", "title": "" }, { "docid": "f4d07ab07142f2448f06db2d2659697f", "score": "0.5238225", "text": "def generated_refl_for_splitting_1():\n reflections = flex.reflection_table()\n reflections[\"intensity\"] = flex.double([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n reflections[\"variance\"] = flex.double(6, 1.0)\n reflections[\"inverse_scale_factor\"] = flex.double(6, 1.0)\n reflections[\"miller_index\"] = flex.miller_index(\n [(1, 0, 0), (2, 0, 0), (0, 0, 1), (2, 2, 2), (1, 0, 0), (2, 0, 0)]\n )\n reflections[\"d\"] = flex.double([0.8, 2.1, 2.0, 1.4, 1.6, 2.5])\n reflections[\"partiality\"] = flex.double(6, 1.0)\n reflections[\"xyzobs.px.value\"] = flex.vec3_double(\n [\n (0.0, 0.0, 0.0),\n (0.0, 0.0, 5.0),\n (0.0, 0.0, 8.0),\n (0.0, 0.0, 10.0),\n (0.0, 0.0, 12.0),\n (0.0, 0.0, 15.0),\n ]\n )\n reflections[\"s1\"] = flex.vec3_double(\n [\n (0.0, 0.1, 1.0),\n (0.0, 0.1, 1.0),\n (0.0, 0.1, 1.0),\n (0.0, 0.1, 1.0),\n (0.0, 0.1, 1.0),\n (0.0, 0.1, 1.0),\n ]\n )\n reflections.set_flags(flex.bool(6, True), reflections.flags.integrated)\n reflections.set_flags(flex.bool(6, False), reflections.flags.bad_for_scaling)\n return reflections", "title": "" }, { "docid": "efbd544b75a501e41c2f744fc291fde0", "score": "0.520738", "text": "def gen_collective_spectrum():\n wavelengths = np.arange(520, 545, 0.01) * u.nm\n probe_wavelength = 532 * u.nm\n n = 5e17 * u.cm ** -3\n probe_vec = np.array([1, 0, 0])\n scatter_vec = np.array([0, 1, 0])\n Te = 10 * u.eV\n Ti = np.array([10]) * u.eV\n ion_species = [\"C-12 5+\"]\n\n alpha, Skw = thomson.spectral_density(\n wavelengths,\n probe_wavelength,\n n,\n Te,\n Ti,\n ion_species=ion_species,\n probe_vec=probe_vec,\n scatter_vec=scatter_vec,\n )\n\n return alpha, wavelengths, Skw", "title": "" }, { "docid": "1b5cdd1fd5b975edd2540bc43a9f6ac6", "score": "0.52059096", "text": "def _extract_data(self, simulation=None, cycle = -1):\n if simulation is not None:\n gt = self.ground_truth[simulation]\n marginals = self.marginals[simulation, cycle]\n else:\n gt = onp.reshape(self.ground_truth, (-1,))\n cycle_first_marginals = onp.transpose(self.marginals, (1, 0, 2))[cycle]\n marginals = onp.reshape(cycle_first_marginals, (-1,))\n return gt[~onp.isnan(marginals)], marginals[~onp.isnan(marginals)]", "title": "" }, { "docid": "9ed610b22f72d6872fad28d08448db0c", "score": "0.5202795", "text": "def risk(table,p,q,color):", "title": "" }, { "docid": "82ca26896e276b854cf6aad7defd1f43", "score": "0.5181584", "text": "def SetupDics(mass, model):\n # Number of observations\n obs_sub_r1 = 259399#358932#\n obs_sub_r2 = 105053#147630#\n obs_sub_r3 = 26760#38906#\n\n # Model dependent uncertainties for different models\n model_pdf_alphas = {\n 'mhmodp_200' : {300: '1.0155', 350: '1.0159', 400: '1.01712', 500: '1.0176', 600: '1.023', 700: '1.02637', 900: '1.03369', 1100:'1.03984', 1300:'1.04598'},\n '2hdm' : {300: '1.0155', 350: '1.0159', 400: '1.01712', 500: '1.0176', 600: '1.023', 700: '1.02637', 900: '1.03369', 1100:'1.03984', 1300:'1.04598'},\n 'tau_phobic' : {300: '1.01478', 350: '1.01636', 400: '1.03722', 500: '1.03418', 600: '1.03224', 700: '1.0284', 900: '1.02639', 1100:'1.02405', 1300:'1.02403'},\n 'light_stop' : {300: '1.02464', 350: '1.02686', 400: '1.03083', 500: '1.04167', 600: '1.02439', 700: '', 900: '', 1100:'', 1300:''},\n 'light_stau' : {300: '1.0191', 350: '1.02131', 400: '1.02443', 500: '1.02801', 600: '1.0365', 700: '1.0379', 900: '1.03369', 1100:'1.03984', 1300:'1.04598'},\n 'hMSSM' : {300: '1.01549', 350: '1.01483', 400: '1.01907', 500: '1.02115', 600: '1.02451', 700: '1.02762', 900: '1.03436', 1100:'1.03986', 1300:'1.046'},\n 'independent' : {300: '', 350: '', 400: '', 500: '', 600: '', 700: '', 900: '', 1100:'', 1300:''},\n }\n model_scale = {\n 'mhmodp_200' : {300: '1.04264', 350: '1.03887', 400: '1.03253', 500: '1.02933', 600: '1.02906', 700: '1.02637', 900: '1.02639', 1100:'1.02405', 1300:'1.02403'},\n '2hdm' : {300: '1.04264', 350: '1.03887', 400: '1.03253', 500: '1.02933', 600: '1.02906', 700: '1.02637', 900: '1.02639', 1100:'1.02405', 1300:'1.02403'},\n 'tau_phobic' : {300: '1.04356', 350: '1.04127', 400: '1.03722', 500: '1.03418', 600: '1.03224', 700: '1.0284', 900: '1.02639', 1100:'1.02405', 1300:'1.02403'},\n 'light_stop' : {300: '1.06957', 350: '1.06989', 400: '1.063', 500: '1.06644', 600: '1.03223', 700: '', 900: '', 1100:'', 1300:''},\n 'light_stau' : {300: '1.05382', 350: '1.05246', 400: '1.04723', 500: '1.04622', 600: '1.04757', 700: '1.03883', 900: '1.02639', 1100:'1.02405', 1300:'1.02403'},\n 'hMSSM' : {300: '1.04425', 350: '1.04025', 400: '1.03814', 500: '1.03462', 600: '1.03105', 700: '1.02901', 900: '1.02622', 1100:'1.02403', 1300:'1.02402'},\n 'independent' : {300: '', 350: '', 400: '', 500: '', 600: '', 700: '', 900: '', 1100:'', 1300:''},\n }\n \n # add_path \n signal_path = cmsswBase + '/src/Analysis/MssmHbb/output/'\n bg_path = signal_path\n if not blinded: bg_path += 'unblinded/' \n # Specify ditionaries with TAG vs VALUE\n dictionary = {\n 300 : {'OBSERVATION' : obs_sub_r1,\n 'SIGNAL_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-300/workspace/signal_workspace.root',\n 'BG_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr1/background_workspace_TurnOnFix_5000bins.root',\n 'BIAS_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-300/workspace/signal_bias_workspace.root',\n 'DATA_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr1/background_workspace_TurnOnFix_5000bins.root',\n 'OFFLINE_SFL' : '1.0002',\n 'ONLINE_SFB' : '1.0167',\n 'PILEUP' : '1.0091',\n 'MODEL_PDFAS' : model_pdf_alphas[model][300],\n 'MODEL_SCALE' : model_scale[model][300],\n 'SHAPE_BG1' : 'peak flatParam',\n 'SHAPE_BG2' : 'tail flatParam',\n 'SHAPE_BG3' : 'width flatParam',\n 'SHAPE_BG4' : 'par4 flatParam',\n 'SHAPE_BG5' : 'slope_novoeff flatParam',#'',#'slope_novoeff flatParam',#\n 'SHAPE_BG6' : 'turnon_novoeff flatParam',\n 'MASS' : '300',\n 'BIAS_ERR' : '7.05244'},#''},#'turnon_novoeff flatParam'},#\n 350 : {'OBSERVATION' : obs_sub_r1,\n 'SIGNAL_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-350/workspace/signal_workspace.root',\n 'BG_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr1/background_workspace_TurnOnFix_5000bins.root',\n 'BIAS_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-350/workspace/signal_bias_workspace.root',\n 'DATA_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr1/background_workspace_TurnOnFix_5000bins.root', \n 'OFFLINE_SFL' : '1.00022',\n 'ONLINE_SFB' : '1.016',\n 'PILEUP' : '1.0045',\n 'MODEL_PDFAS' : model_pdf_alphas[model][350],\n 'MODEL_SCALE' : model_scale[model][350],\n 'SHAPE_BG1' : 'peak flatParam',\n 'SHAPE_BG2' : 'tail flatParam',\n 'SHAPE_BG3' : 'width flatParam',\n 'SHAPE_BG4' : 'par4 flatParam',\n 'SHAPE_BG5' : 'slope_novoeff flatParam',#'',#'slope_novoeff flatParam',\n 'SHAPE_BG6' : 'turnon_novoeff flatParam',\n 'MASS' : '350',\n 'BIAS_ERR' : '4.13302'},#''},#'turnon_novoeff flatParam'},\n 400 : {'OBSERVATION' : obs_sub_r1, \n 'SIGNAL_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-400/workspace/signal_workspace.root', \n 'BG_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr1/background_workspace_TurnOnFix_5000bins.root',\n 'BIAS_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-400/workspace/signal_bias_workspace.root', \n 'DATA_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr1/background_workspace_TurnOnFix_5000bins.root', \n 'OFFLINE_SFL' : '1.00042',\n 'ONLINE_SFB' : '1.0154',\n 'PILEUP' : '1.0046',\n 'MODEL_PDFAS' : model_pdf_alphas[model][400],\n 'MODEL_SCALE' : model_scale[model][400],\n 'SHAPE_BG1' : 'peak flatParam',\n 'SHAPE_BG2' : 'tail flatParam',\n 'SHAPE_BG3' : 'width flatParam',\n 'SHAPE_BG4' : 'par4 flatParam',\n 'SHAPE_BG5' : 'slope_novoeff flatParam',#'',#'slope_novoeff flatParam',\n 'SHAPE_BG6' : 'turnon_novoeff flatParam',\n 'MASS' : '400',\n 'BIAS_ERR' : '2.30296'},#''},#'turnon_novoeff flatParam'},\n 500 : {'OBSERVATION' : obs_sub_r2, \n 'SIGNAL_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-500/workspace/signal_workspace.root', \n 'BIAS_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-500/workspace/signal_bias_workspace.root', \n 'BG_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr2/background_workspace_5000bins.root',\n 'DATA_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr2/background_workspace_5000bins.root', \n 'OFFLINE_SFL' : '1.0006',\n 'ONLINE_SFB' : '1.0149',\n 'PILEUP' : '1.0045',\n 'MODEL_PDFAS' : model_pdf_alphas[model][500],\n 'MODEL_SCALE' : model_scale[model][500],\n 'SHAPE_BG1' : 'peak1 flatParam',\n 'SHAPE_BG2' : 'tail1 flatParam',\n 'SHAPE_BG3' : 'width1 flatParam',\n 'SHAPE_BG4' : '',\n 'SHAPE_BG5' : '',#'',#'slope_novoeff flatParam',\n 'SHAPE_BG6' : '',\n 'MASS' : '500',\n 'BIAS_ERR' : '0.287686'},\n 600 : {'OBSERVATION' : obs_sub_r2, \n 'SIGNAL_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-600/workspace/signal_workspace.root', \n 'BG_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr2/background_workspace_5000bins.root',\n 'BIAS_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-600/workspace/signal_bias_workspace.root', \n 'DATA_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr2/background_workspace_5000bins.root', \n 'OFFLINE_SFL' : '1.0009',\n 'ONLINE_SFB' : '1.0153',\n 'PILEUP' : '1.0018',\n 'MODEL_PDFAS' : model_pdf_alphas[model][600],\n 'MODEL_SCALE' : model_scale[model][600],\n 'SHAPE_BG1' : 'peak1 flatParam',\n 'SHAPE_BG2' : 'tail1 flatParam',\n 'SHAPE_BG3' : 'width1 flatParam',\n 'SHAPE_BG4' : '',\n 'SHAPE_BG5' : '',\n 'SHAPE_BG6' : '',\n 'MASS' : '600',\n 'BIAS_ERR' : '0.1606918'},\n 700 : {'OBSERVATION' : obs_sub_r2, \n 'SIGNAL_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-700/workspace/signal_workspace.root', \n 'BG_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr2/background_workspace_5000bins.root',\n 'BIAS_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-700/workspace/signal_bias_workspace.root', \n 'DATA_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr2/background_workspace_5000bins.root', \n 'OFFLINE_SFL' : '1.0012',\n 'ONLINE_SFB' : '1.0164',\n 'PILEUP' : '1.0033',\n 'MODEL_PDFAS' : model_pdf_alphas[model][700],\n 'MODEL_SCALE' : model_scale[model][700],\n 'SHAPE_BG1' : 'peak1 flatParam',\n 'SHAPE_BG2' : 'tail1 flatParam',\n 'SHAPE_BG3' : 'width1 flatParam',\n 'SHAPE_BG4' : '',\n 'SHAPE_BG5' : '',\n 'SHAPE_BG6' : '',\n 'MASS' : '700',\n 'BIAS_ERR' : '0.1135502'},\n 900 : {'OBSERVATION' : obs_sub_r2, \n 'SIGNAL_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-900/workspace/signal_workspace.root', \n 'BG_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr2/background_workspace_5000bins.root',\n 'BIAS_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-900/workspace/signal_bias_workspace.root', \n 'DATA_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr2/background_workspace_5000bins.root', \n 'OFFLINE_SFL' : '1.0016',\n 'ONLINE_SFB' : '1.0196',\n 'PILEUP' : '1.0017',\n 'MODEL_PDFAS' : model_pdf_alphas[model][900],\n 'MODEL_SCALE' : model_scale[model][900],\n 'SHAPE_BG1' : 'peak1 flatParam',\n 'SHAPE_BG2' : 'tail1 flatParam',\n 'SHAPE_BG3' : 'width1 flatParam',\n 'SHAPE_BG4' : '',\n 'SHAPE_BG5' : '',\n 'SHAPE_BG6' : '',\n 'MASS' : '900',\n 'BIAS_ERR' : '0.0715888'},\n 1100: {'OBSERVATION' : obs_sub_r3, \n 'SIGNAL_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-1100/workspace/signal_workspace.root', \n 'BG_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr3/background_workspace_5000bins.root',\n 'BIAS_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-1100/workspace/signal_bias_workspace.root', \n 'DATA_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr3/background_workspace_5000bins.root', \n 'OFFLINE_SFL' : '1.0024',\n 'ONLINE_SFB' : '1.0231',\n 'PILEUP' : '1.0026',\n 'MODEL_PDFAS' : model_pdf_alphas[model][1100],\n 'MODEL_SCALE' : model_scale[model][1100],\n 'SHAPE_BG1' : 'peak1 flatParam',\n 'SHAPE_BG2' : 'tail flatParam',\n 'SHAPE_BG3' : 'width1 flatParam',\n 'SHAPE_BG4' : '',\n 'SHAPE_BG5' : '',\n 'SHAPE_BG6' : '',\n 'MASS' : '1100',\n 'BIAS_ERR' : '0.073350875'},\n 1300: {'OBSERVATION' : obs_sub_r3, \n 'SIGNAL_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-1300/workspace/signal_workspace.root', \n 'BG_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr3/background_workspace_5000bins.root', \n 'BIAS_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-1300/workspace/signal_bias_workspace.root',\n 'DATA_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr3/background_workspace_5000bins.root', \n 'OFFLINE_SFL' : '1.0025',\n 'ONLINE_SFB' : '1.0272',\n 'PILEUP' : '1.0036',\n 'MODEL_PDFAS' : model_pdf_alphas[model][1300],\n 'MODEL_SCALE' : model_scale[model][1300],\n 'SHAPE_BG1' : 'peak1 flatParam',\n 'SHAPE_BG2' : 'tail flatParam',\n 'SHAPE_BG3' : 'width1 flatParam',\n 'SHAPE_BG4' : '',\n 'SHAPE_BG5' : '',\n 'SHAPE_BG6' : '',\n 'MASS' : '1300',\n 'BIAS_ERR' : '0.07426975'}}\n \n if(adjoint == True): \n dictionary[500] = {'OBSERVATION' : obs_sub_r1,\n 'SIGNAL_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-500_SR1/workspace/signal_workspace.root',\n 'BG_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr1/background_workspace_TurnOnFix_5000bins.root',\n 'BIAS_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-500_SR1/workspace/signal_bias_workspace.root',\n 'DATA_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr1/background_workspace_TurnOnFix_5000bins.root',\n 'OFFLINE_SFL' : '1.0006',\n 'ONLINE_SFB' : '1.0149',\n 'PILEUP' : '1.0045',\n 'MODEL_PDFAS' : model_pdf_alphas[model][500],\n 'MODEL_SCALE' : model_scale[model][500],\n 'SHAPE_BG1' : 'peak flatParam',\n 'SHAPE_BG2' : 'tail flatParam',\n 'SHAPE_BG3' : 'width flatParam',\n 'SHAPE_BG4' : 'par4 flatParam',\n 'SHAPE_BG5' : 'slope_novoeff flatParam',#'',#'slope_novoeff flatParam',\n 'SHAPE_BG6' : 'turnon_novoeff flatParam',\n 'MASS' : '500',\n 'BIAS_ERR' : '1.66481'}\n dictionary[1100] = {'OBSERVATION' : obs_sub_r2, \n 'SIGNAL_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-1100_SR2/workspace/signal_workspace.root', \n 'BG_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr2/background_workspace_5000bins.root',\n 'BIAS_SHAPE_WS' : signal_path + 'test_ReReco_signal_M-1100_SR2/workspace/signal_bias_workspace.root', \n 'DATA_SHAPE_WS' : bg_path + 'ReReco_bg_fit/sr2/background_workspace_5000bins.root', \n 'OFFLINE_SFL' : '1.0024',\n 'ONLINE_SFB' : '1.0231',\n 'PILEUP' : '1.0026',\n 'MODEL_PDFAS' : model_pdf_alphas[model][1100],\n 'MODEL_SCALE' : model_scale[model][1100],\n 'SHAPE_BG1' : 'peak1 flatParam',\n 'SHAPE_BG2' : 'tail1 flatParam',\n 'SHAPE_BG3' : 'width1 flatParam',\n 'SHAPE_BG4' : '',\n 'SHAPE_BG5' : '',\n 'SHAPE_BG6' : '',\n 'MASS' : '1100',\n 'BIAS_ERR' : '0.0741005'}\n \n dictionary[mass]['SIGNAL_GROUP'] = 'signal group = lumi_13TeV CMS_eff_l_13TeV CMS_PU_13TeV CMS_eff_bonl_13TeV pdf_gg QCDscale CMS_scale_j_13TeV CMS_res_j_13TeV CMS_eff_pTonl_13TeV CMS_eff_b_13TeV pdf_Higgs_gg bias' \n if(model == 'independent'): dictionary[mass]['SIGNAL_GROUP'] = 'signal group = lumi_13TeV CMS_eff_l_13TeV CMS_PU_13TeV CMS_eff_bonl_13TeV CMS_scale_j_13TeV CMS_res_j_13TeV CMS_eff_pTonl_13TeV CMS_eff_b_13TeV pdf_Higgs_gg bias'\n if dictionary[mass] != None:\n return dictionary[mass]\n else:\n raise AttributeError(\"No rulles for mass = \" + mass + \" were specified in SetupDics\")", "title": "" }, { "docid": "138b26cb1e66f18d73940cbe7e5cb3c0", "score": "0.51603657", "text": "def generate_test_reflections(n=2):\n reflections = flex.reflection_table()\n for id_ in range(0, n):\n r = flex.reflection_table()\n r[\"id\"] = flex.int(10, id_)\n r[\"xyzobs.px.value\"] = flex.vec3_double([(0, 0, i + 0.5) for i in range(0, 10)])\n r.experiment_identifiers()[id_] = str(id_)\n r.set_flags(flex.bool(10, True), r.flags.integrated)\n r.set_flags(flex.bool(10, True), r.flags.scaled)\n reflections.extend(r)\n return reflections", "title": "" }, { "docid": "eda586a9305076d792c4c15f801a2553", "score": "0.51526654", "text": "def generated_refl_for_splitting_2():\n reflections = flex.reflection_table()\n reflections[\"intensity\"] = flex.double([7.0, 8.0, 9.0, 10.0, 11.0])\n reflections[\"variance\"] = flex.double(5, 1.0)\n reflections[\"inverse_scale_factor\"] = flex.double(5, 1.0)\n reflections[\"miller_index\"] = flex.miller_index(\n [(2, 2, 2), (2, 0, 0), (0, 0, 1), (2, 2, 2), (1, 0, 0)]\n )\n reflections[\"d\"] = flex.double([0.8, 2.1, 2.0, 1.4, 1.6])\n reflections[\"partiality\"] = flex.double(5, 1.0)\n reflections[\"xyzobs.px.value\"] = flex.vec3_double(\n [\n (0.0, 0.0, 0.0),\n (0.0, 0.0, 5.0),\n (0.0, 0.0, 8.0),\n (0.0, 0.0, 10.0),\n (0.0, 0.0, 12.0),\n ]\n )\n reflections[\"s1\"] = flex.vec3_double(\n [\n (0.0, 0.1, 1.0),\n (0.0, 0.1, 1.0),\n (0.0, 0.1, 1.0),\n (0.0, 0.1, 1.0),\n (0.0, 0.1, 1.0),\n ]\n )\n reflections.set_flags(flex.bool(5, True), reflections.flags.integrated)\n reflections.set_flags(flex.bool(5, False), reflections.flags.bad_for_scaling)\n return reflections", "title": "" }, { "docid": "87250159f494829027412fd1a2dcbfd7", "score": "0.5131863", "text": "def generate_refl_2():\n reflections = flex.reflection_table()\n reflections[\"intensity\"] = flex.double([60.0, 30.0, 10.0, 30.0])\n reflections[\"variance\"] = flex.double([60.0, 30.0, 10.0, 30.0])\n reflections[\"inverse_scale_factor\"] = flex.double(4, 2.0)\n reflections[\"miller_index\"] = flex.miller_index(\n [(1, 0, 0), (0, 4, 0), (10, 0, 0), (0, 4, 0)]\n )\n reflections.set_flags(flex.bool(4, True), reflections.flags.integrated)\n return reflections", "title": "" }, { "docid": "afcc5484522b84174f9d16a55ea259a1", "score": "0.51305085", "text": "def GenerateReflections(spcGrp,cell,Qmax=None,dmin=None,TTmax=None,wave=None):\n\n import GSASIIlattice as G2lat\n if len(cell) != 6:\n raise G2ScriptException(\"GenerateReflections: Invalid unit cell:\" + str(cell))\n opts = (Qmax is not None) + (dmin is not None) + (TTmax is not None)\n if Qmax:\n dmin = 2 * np.pi / Qmax\n #print('Q,d',Qmax,dmin)\n elif TTmax and wave is None:\n raise G2ScriptException(\"GenerateReflections: specify a wavelength with TTmax\")\n elif TTmax:\n dmin = wave / (2.0 * np.sin(np.pi*TTmax/360.))\n #print('2theta,d',TTmax,dmin)\n if opts != 1:\n raise G2ScriptException(\"GenerateReflections: specify one Qmax, dmin or TTmax\")\n err,SGData = G2spc.SpcGroup(spcGrp)\n if err != 0:\n print('GenerateReflections space group error:',G2spc.SGErrors(err))\n raise G2ScriptException(\"GenerateReflections: Invalid space group: \" + str(spcGrp))\n A = G2lat.cell2A(cell)\n return G2lat.GenHLaue(dmin,SGData,A)", "title": "" }, { "docid": "7e40126246409040c174c1b6b6b9f559", "score": "0.5130174", "text": "def get_contrasts_all_palettes(ctrstd_palettes): \n ids = [] \n coh = []\n ldc = []\n cwc = []\n cc = []\n gr = []\n bo = []\n vy = []\n cs = []\n ce = [] \n red = []\n orange = []\n yellow = []\n green = []\n blue = []\n violet = [] \n dark = []\n light = [] \n saturated = []\n desaturated = []\n \n for i in range(len(ctrstd_palettes)):\n el = ctrstd_palettes[i][2]\n ids.append(el)\n if 'coh' in ctrstd_palettes[i][-4]: \n coh.append(1)\n else: \n coh.append(0)\n if 'ldc' in ctrstd_palettes[i][-4]: \n ldc.append(1)\n else: \n ldc.append(0)\n if 'cwc' in ctrstd_palettes[i][-4]: \n cwc.append(1)\n else: \n cwc.append(0)\n if 'cc' in ctrstd_palettes[i][-4]: \n cc.append(1)\n else: \n cc.append(0)\n if 'cc: g-r' in ctrstd_palettes[i][-4]: \n gr.append(1)\n else: \n gr.append(0)\n if 'cc: b-o' in ctrstd_palettes[i][-4]: \n bo.append(1)\n else: \n bo.append(0)\n if 'cc: v-y' in ctrstd_palettes[i][-4]: \n vy.append(1)\n else: \n vy.append(0)\n if 'cs' in ctrstd_palettes[i][-4]: \n cs.append(1)\n else: \n cs.append(0)\n if 'ce' in ctrstd_palettes[i][-4]: \n ce.append(1)\n else: \n ce.append(0)\n if 'red' in ctrstd_palettes[i][-3].index: \n red.append(round(ctrstd_palettes[i][-3].loc['red'][0],2))\n else: \n red.append(0)\n if 'orange' in ctrstd_palettes[i][-3].index: \n orange.append(round(ctrstd_palettes[i][-3].loc['orange'][0],2))\n else: \n orange.append(0)\n if 'yellow' in ctrstd_palettes[i][-3].index: \n yellow.append(round(ctrstd_palettes[i][-3].loc['yellow'][0],2))\n else: \n yellow.append(0)\n if 'green' in ctrstd_palettes[i][-3].index: \n green.append(round(ctrstd_palettes[i][-3].loc['green'][0],2))\n else: \n green.append(0)\n if 'blue' in ctrstd_palettes[i][-3].index: \n blue.append(round(ctrstd_palettes[i][-3].loc['blue'][0],2))\n else: \n blue.append(0) \n if 'violet' in ctrstd_palettes[i][-3].index: \n violet.append(round(ctrstd_palettes[i][-3].loc['violet'][0],2))\n else: \n violet.append(0) \n if 'dark' in ctrstd_palettes[i][-2].index: \n dark.append(round(ctrstd_palettes[i][-2].loc['dark'][0],2))\n else: \n dark.append(0) \n if 'light' in ctrstd_palettes[i][-2].index: \n light.append(round(ctrstd_palettes[i][-2].loc['light'][0],2))\n else: \n light.append(0) \n if 'saturated' in ctrstd_palettes[i][-1].index: \n saturated.append(round(ctrstd_palettes[i][-1].loc['saturated'][0],2))\n else: \n saturated.append(0) \n if 'desaturated' in ctrstd_palettes[i][-1].index: \n desaturated.append(round(ctrstd_palettes[i][-1].loc['desaturated'][0],2))\n else: \n desaturated.append(0) \n \n data = {'Palette_id': ids,\n 'Contrast of hue': coh, \n 'Light-dark contrast': ldc,\n 'Cold-warm contrast': cwc,\n 'Complementary contrast': cc, \n 'Green-red': gr, \n 'Blue-orange': bo,\n 'Violet-yellow': vy, \n 'Contrast of saturation': cs,\n 'Contrast of extension': ce,\n \n 'Red': red,\n 'Orange': orange,\n 'Yellow': yellow, \n 'Green': green, \n 'Blue': blue, \n 'Violet': violet, \n \n 'Dark': dark, \n 'Light': light, \n \n 'Saturated': saturated,\n 'Desaturated': desaturated} \n \n data = pd.DataFrame(data)\n data = data.sort_values(by='Palette_id', ascending=True).reset_index(drop=True)\n rnbw = list(zip([r if r == 0 else 1 for r in list(data['Red'])], [o if o == 0 else 1 for o in list(data['Orange'])],[y if y == 0 else 1 for y in list(data['Yellow'])], [g if g == 0 else 1 for g in list(data['Green'])], [b if b == 0 else 1 for b in list(data['Blue'])], [v if v == 0 else 1 for v in list(data['Violet'])]))\n data['colors'] = [sum(rnb) for rnb in rnbw]\n warm = list(zip([r for r in list(data['Red'])], [o for o in list(data['Orange'])],[y for y in list(data['Yellow'])]))\n kalt = list(zip([g for g in list(data['Green'])], [b for b in list(data['Blue'])], [v for v in list(data['Violet'])]))\n data['warm'] = [round(sum(wrm),2) for wrm in warm]\n data['cold'] = [round(sum(klt),2) for klt in kalt]\n total = [sum(el) for el in list(zip([sa for sa in list(data['Saturated'])], [de for de in list(data['Desaturated'])]))]\n sat_ratio = [round(el[0]/el[1],2) for el in list(zip([s for s in list(data['Saturated'])], [t for t in total]))]\n desat_ratio = [round(el[0]/el[1],2) for el in list(zip([d for d in list(data['Desaturated'])], [t for t in total]))]\n data['sat-ratio'] = sat_ratio\n data['desat-ratio'] = desat_ratio\n return data", "title": "" }, { "docid": "c2997de51bb5d74761871b252471cac7", "score": "0.51218563", "text": "def gen_non_collective_spectrum():\n wavelengths = np.arange(500, 570, 0.01) * u.nm\n probe_wavelength = 532 * u.nm\n n = 5e15 * u.cm ** -3\n probe_vec = np.array([1, 0, 0])\n scatter_vec = np.array([0, 1, 0])\n Te = 100 * u.eV\n Ti = np.array([10]) * u.eV\n ion_species = [\"H+\"]\n\n alpha, Skw = thomson.spectral_density(\n wavelengths,\n probe_wavelength,\n n,\n Te,\n Ti,\n ion_species=ion_species,\n probe_vec=probe_vec,\n scatter_vec=scatter_vec,\n )\n\n return alpha, wavelengths, Skw", "title": "" }, { "docid": "a576f7a3c7253aa8e6f724e594e7cdd5", "score": "0.5104013", "text": "def stat(self):\n\t\tout = {}\n\t\tout.update(self.draw_s()) # Here, it's called for each catalog, and supercedes the galaxy values.\n\t\tout.update(self.draw_psf()) # Idem, one random PSF for each catalog\n\t\tout.update(self.draw_constants())\n\t\t\n\t\treturn out", "title": "" }, { "docid": "471be153c05cbe5d3f0b2474a381ae67", "score": "0.5102362", "text": "def sed(axes, label, type, delta_s, e_break, magnetic_field, hydrogen_density, Wp, We):\n\n electrons = SmoothBrokenPowerLaw(\n total_energy=We,\n index1 = 1.5, # p10 in text\n index2 = 1.5 + delta_s, # index2 = index1 + delta_s\n e_break = e_break,\n e_scale = 1*u.GeV,\n beta = 2.0,\n emin = 10*u.MeV, # from footnote to table 1\n emax = u.TeV, # Yasunobu told me this was the emax when I asked\n )\n\n protons = SmoothBrokenPowerLaw(\n total_energy=Wp,\n index1 = 1.5, # p10 in text\n index2 = 1.5 + delta_s, # index2 = index1 + delta_s\n e_break = e_break,\n e_scale = 1*u.GeV,\n beta = 2.0,\n emin = 10*u.MeV, # from footnote to table 1\n emax = u.TeV, # Yasunobu told me this when I asked\n )\n\n magnetic_field = magnetic_field \n\n distance = 6*u.kiloparsec # from page 8 in text\n\n hydrogen_density = hydrogen_density\n helium_density = 0.1*hydrogen_density # yasunobu told me this when I asked\n\n # Photon fields take from the footnote of table 1 in the text\n cmb = CMB()\n infrared = ThermalSpectrum(kT=3e-3*u.eV, energy_density=0.9*u.eV*u.cm**-3)\n optical = ThermalSpectrum(kT=0.25*u.eV, energy_density=0.84*u.eV*u.cm**-3)\n photon_fields = CompositeSpectrum(cmb, infrared, optical)\n\n # Make some nice diagnostic plots\n plot_photon_fields(type, CMB=cmb, infrared=infrared, optical=optical)\n print_photon_field(CMB=cmb, infrared=infrared, optical=optical)\n plot_electrons(type, electrons)\n\n # Create the radiation processes\n\n synch = Synchrotron(electron_spectrum=electrons, \n magnetic_field=magnetic_field)\n\n ic = InverseCompton(electron_spectrum=electrons,\n photon_spectrum=photon_fields)\n\n pi0 = Pi0Decay(proton_spectrum=protons, \n hydrogen_density = hydrogen_density,\n scaling_factor = 1.85 # from p10 in the text\n )\n\n brems = Bremsstrahlung(\n electron_spectrum = electrons,\n hydrogen_density = hydrogen_density,\n helium_density = helium_density)\n\n sed = SEDPlotter(\n distance=distance,\n x_units_string='eV',\n y_units_string='erg*cm^-2*s^-1',\n axes=axes,\n )\n\n # Overlay the Synchrotron and Inverse Compton radiation\n sed.plot(synch, color='black')\n sed.plot(pi0, color='black', dashes=[9,2])\n sed.plot(ic, color='black', dashes=[2,2])\n sed.plot(brems, color='black', dashes=[4,2])\n\n at = AnchoredText(label, frameon=False, loc=2, prop=dict(size=14))\n axes.add_artist(at)", "title": "" }, { "docid": "462574c18f059cb8784b2c5ff88b1ad0", "score": "0.5067139", "text": "def readReflect(self):\n if self.colorSensor is not None:\n if self.colorSensor.mode != \"COL-REFLECT\":\n self.colorSensor.mode = \"COL-REFLECT\"\n return self.colorSensor.reflected_light_intensity\n else:\n print(\"There is no color sensor set up\")", "title": "" }, { "docid": "72ac50c4e1d92b3d67005bc2e6027141", "score": "0.50644463", "text": "def extract_info(raw=False, trace=None, average=False, info=None, header=False):\n if raw:\n\n if trace is None:\n print(\"Trace is required to store the raw episode information\")\n return\n\n info_line = game.get_tile_distribution()\n info_line = np.append(info_line, game.board.max())\n info_line = np.append(info_line, game.get_board_sum())\n info_line = np.append(info_line, len(trace))\n info_line = np.append(info_line, np.sum(np.array(trace)[:, 2]))\n\n boolean_distribution = np.zeros(12)\n boolean_distribution[0:int(np.log2(game.board.max()))+1] = 1\n info_line = np.concatenate([info_line, boolean_distribution])\n return info_line\n\n elif average:\n\n if info is None:\n print(\"Info list not supplied. It is needed for averaging\")\n return\n\n all_info_line = np.average(info[:, 0:12], axis=0) # extract and average board distribution\n all_info_line = np.append(all_info_line, np.average(info[:, 12])) # extract and average max tiles\n all_info_line = np.append(all_info_line, np.average(info[:, 13])) # extract and average board sum\n all_info_line = np.append(all_info_line, np.max(info[:, 12])) # extract and store batch max tile\n all_info_line = np.append(all_info_line, np.average(info[:, 14])) # extract and average trace length\n all_info_line = np.append(all_info_line, np.average(info[:, 15])) # extract and average reward\n all_info_line = np.concatenate([all_info_line, np.average(info[:, 16:28], axis=0)])\n # extract and average boolean board distribution\n\n return all_info_line\n\n elif header:\n\n info_header = \"0,2,4,8,16,32,64,128,256,512,1024,2048,max tiles average,\" \\\n \"board sum average,max tile for batch,average trace length,average reward,\" \\\n \"0,2,4,8,16,32,64,128,256,512,1024,2048\"\n\n return info_header", "title": "" }, { "docid": "22eb227a9598ca4a19ee9bbb90dd20ef", "score": "0.5049874", "text": "def basic_info(ITSs):\n\n py = [i.PY for i in ITSs]\n\n # XXX RESULT: the \"A\" part contributes more to correlation with PY\n # than the \"G\" part of the pyrimidines for dg100, and much more for dg400.\n # for DG400 correlation is significant starting at +4, dips at +5, and then\n # increases gradually to +11. Correlation with G isn't significant at all\n # until + 10 or so! But gradually increases from +8 all the way up to + 15.\n # for DG400 G is actually significantly ANTI correlated with PY at +3 and +4!\n # this is also true for DG100, but not significant, and -0.18 or so.\n # The DG400 stuff can be an artefact from biasing toward the 3rd and 4th\n # nucleotides by forcing the dinucleotide structure.\n # Still, i'd say that for DG100 your conclusion still stands. How big is\n # the AG difference in Hein and Malinen?\n # Considering the U > C > A > G for going to the backtracked state, for\n # DG100 U has a stronger anti-correlation with PY than\n # There is an assymetry here though:\n # A is more strongly correlated than G -> together they are stronger\n # C and T are similarly anti-correlated -> together they are stronger (or\n # just the negative of the AG correlation)\n\n # This can be result 1 in your work. Strong correlation with number of\n # purines. Has been shown ... G..., butstronger with A than with G.\n # Further, no correlation between A and G; if A and G were equally\n # beneficial to avoid paused and backtracked state, one would expect these\n # two values to have some correlation since they would be equally likely to\n # appear in the high-PY variants, and equally likely not to appear in the\n # low-PY variants.\n\n #ladder_pre = [[1 if nuc in ['G', 'A'] else 0 for nuc in i.sequence[:i.msat]] for i in ITSs]\n ladder_pre = [[1 if nuc in ['A'] else 0 for nuc in i.sequence[:20]] for i in ITSs]\n #ladder = [sum(ladder_pre[:i])]\n #xx=15\n #ladder_G = [[1 if nuc in ['G'] else 0 for nuc in i.sequence[:20]] for i in ITSs]\n #ladder_A = [[1 if nuc in ['A'] else 0 for nuc in i.sequence[:20]] for i in ITSs]\n #ladder_C = [[1 if nuc in ['C'] else 0 for nuc in i.sequence[:20]] for i in ITSs]\n #ladder_T = [[1 if nuc in ['T'] else 0 for nuc in i.sequence[:20]] for i in ITSs]\n #ladder_G = [[1 if nuc in ['G'] else 0 for nuc in i.sequence[:i.msat]] for i in ITSs]\n #ladder_A = [[1 if nuc in ['A'] else 0 for nuc in i.sequence[:i.msat]] for i in ITSs]\n #ladder_C = [[1 if nuc in ['C'] else 0 for nuc in i.sequence[:i.msat]] for i in ITSs]\n #ladder_T = [[1 if nuc in ['T'] else 0 for nuc in i.sequence[:i.msat]] for i in ITSs]\n #lad_np_G = [sum(ladder_G[i][:xx]) for i in range(len(ladder_pre))]\n #lad_np_A = [sum(ladder_A[i][:xx]) for i in range(len(ladder_pre))]\n #lad_np_C = [sum(ladder_C[i][:xx]) for i in range(len(ladder_pre))]\n #lad_np_T = [sum(ladder_T[i][:xx]) for i in range(len(ladder_pre))]\n #msats = [i.msat for i in ITSs]\n\n #nts = ('G', 'A', 'C', 'T')\n #nr_nts = (lad_np_G, lad_np_A, lad_np_C, lad_np_T)\n #for nt, nr_nt in zip(nts, nr_nts):\n #print nt, spearmanr(msats, nr_nt)\n\n avg_corr = []\n np_corr = []\n\n coeff = [0.25, 0.0, 0.55]\n for i in ITSs:\n i.calc_keq(*coeff, msat_normalization=True, rna_len=20)\n\n #rna_lenghts = range(2,10)\n\n #for x in rna_lenghts:\n #lad_np = [sum(ladder_pre[i][:x]) for i in range(len(ladder_pre))]\n ##avg_kbt = [np.mean(i.keq[:x]) for i in ITSs]\n #measure_values = [nanmean(i.keq[:x-1]) for i in ITSs]\n #np_corr.append(spearmanr(py, lad_np)[0])\n #avg_corr.append(spearmanr(py, measure_values)[0])\n\n rna_lenghts = range(12,16)\n for x in rna_lenghts:\n lad_np = [sum(ladder_pre[i][12:x]) for i in range(len(ladder_pre))]\n #avg_kbt = [np.mean(i.keq[:x]) for i in ITSs]\n measure_values = [nanmean(i.keq[12:x-1]) for i in ITSs]\n np_corr.append(spearmanr(py, lad_np)[0])\n avg_corr.append(spearmanr(py, measure_values)[0])\n\n fig, ax = plt.subplots()\n ax.plot(rna_lenghts, [-i for i in avg_corr], label='Average $K_{bt}$ up to RNA length')\n ax.plot(rna_lenghts, np_corr, label='Number of purines up to RNA length')\n\n ax.set_xlabel('RNA length')\n ax.set_ylabel('Spearman correlation with PY')\n ax.grid()\n\n ax.legend(loc='best')\n plt.show()", "title": "" }, { "docid": "98d71e711c9c74f7461ef98bd44acd5a", "score": "0.50434446", "text": "def get_reflectance_values(self):\r\n ret = self.get_sensor_values(ReflectanceSensors)\r\n return ret", "title": "" }, { "docid": "b7828fa5ba7338c7584f66fd381df977", "score": "0.5040579", "text": "def generate_refl_1():\n reflections = flex.reflection_table()\n reflections[\"intensity\"] = flex.double([100.0, 100.0, 80.0, 60.0, 30.0, 40.0, 60.0])\n reflections[\"inverse_scale_factor\"] = flex.double(7, 2.0)\n reflections[\"variance\"] = flex.double([90.0, 100.0, 90.0, 60.0, 30.0, 50.0, 50.0])\n reflections[\"miller_index\"] = flex.miller_index(\n [(1, 0, 0), (0, 0, 1), (-1, 0, 0), (0, 2, 0), (0, 4, 0), (0, 0, -2), (0, 0, 2)]\n )\n reflections.set_flags(flex.bool(7, True), reflections.flags.integrated)\n return reflections", "title": "" }, { "docid": "033e20c4c4a00d9127f380f7994dec52", "score": "0.50395095", "text": "def create_nicmos_table(output, detector, useafter, pht_table, **kwargs):\n with fits.open(pht_table) as pht:\n modes = np.char.strip(pht[1].data['photmode']).tolist()\n kwargs['mode_list'] = modes\n create_table(output, 'nicmos', **kwargs)", "title": "" }, { "docid": "a3ae46ec4093dbfd503c6e6f4db1f34a", "score": "0.5014108", "text": "def main():\n # First, get numbers/data to manipulate\n dataimgdir = os.path.join('..','data-image')\n l, b, col_density, v_ave, v_std = getData(dataimgdir)\n l = np.mod(l + np.pi, 2*np.pi) - np.pi\n\n # Chief (and kind of annoying but fun tweakables)\n # colormap, contours, norm\n\n # Column density\n ctrs = np.array([0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,2,4,6,8,10,12,14,16]) * 1e21\n print min(col_density),max(col_density)\n #ctrs = np.linspace(min(col_density), max(col_density), 50)\n cmap = cubehelix.cmap(reverse=True)\n makeplot(l, b, col_density, ctrs, cmap, '../plots/col_density.pdf',\n norm=clrs.LogNorm())\n\n # Velocity\n ctrs = np.linspace(-50, 50, 19)\n print min(v_ave), max(v_ave)\n cmap = 'bwr' # Prefer a proper diverging scheme here...\n makeplot(l, b, v_ave, ctrs, cmap, '../plots/veloc_mean.pdf')\n\n # Velocity dispersion\n ctrs = np.linspace(0, 40, 19)\n print min(v_std), max(v_std)\n cmap = cubehelix.cmap(reverse=True)\n makeplot(l, b, v_std, ctrs, cmap, '../plots/veloc_std.pdf')", "title": "" }, { "docid": "61f88e6c84d32333eaec26755c24aa0e", "score": "0.5003094", "text": "def table_ocr(self):\n pass", "title": "" }, { "docid": "1111f48145210bb148fbfd1885899677", "score": "0.49984717", "text": "def __init__ (self,msname,column=\"DATA\"):;\n #os.system(\"addbitflagcol %s\"%msname)\n self.msname = msname;\n tab = table(msname,ack=False,readonly=False)\n self.A0 = A0 = tab.getcol('ANTENNA1')\n self.A1 = A1 = tab.getcol('ANTENNA2')\n # load the data and the weights\n data = tab.getcol(column);\n data_desc_id = tab.getcol(\"DATA_DESC_ID\")\n weight = tab.getcol(\"WEIGHT\")\n flagfreq = tab.getcol(\"FLAG\")\n nfreq = data.shape[1]\n\n self.data = data.copy();\n self.data_desc_id = data_desc_id.copy()\n self.weight = weight.copy()\n print \"Visibility column shape:\",data.shape\n self.na = na = np.max(A1)+1\n # do not consider auto-correlation\n self.nbl = (na*(na-1))/2 \n self.ncorr = data.shape[2]\n self.nbins = data.shape[0]\n self.nfreq = nfreq\n self.flagfreq = flagfreq.copy();\n self.UVW = tab.getcol(\"UVW\")\n\n # get frequency and wavelength (per channel) from SPECTRAL_WINDOW subtable\n #t2 = table(tab.getkeyword(\"SPECTRAL_WINDOW\"),readonly=False)\n #self.channels = t2.getcol(\"CHAN_FREQ\",0);\n #self.freqs = t2.getcol(\"CHAN_FREQ\",0)[0,freq0:freq0+nfreq];\n #t2.close()\n tab.close()", "title": "" }, { "docid": "ada423d02e748c0b53e9f714cbdcb308", "score": "0.49923027", "text": "def triple():\n \n df1 = pd.read_csv('/home/lc585/Dropbox/IoA/nirspec/tables/masterlist_liam.csv', index_col=0)\n df1 = df1[df1.WARN_Ha == 0]\n df1 = df1[(df1.WARN_CIV == 0)]\n df1 = df1[df1.BAL_FLAG != 1]\n \n df2 = pd.read_csv('/home/lc585/Dropbox/IoA/nirspec/tables/masterlist_liam.csv', index_col=0)\n df2 = df2[df2.WARN_Hb == 0]\n df2 = df2[(df2.WARN_CIV == 0)]\n df2 = df2[df2.BAL_FLAG != 1] \n \n df = pd.concat([df1, df2]).drop_duplicates()\n\n df = df[(df.INSTR == 'TRIPLE')]\n \n \n names = []\n dic = {'h':'', 'm':'', 's':'', 'd': ''}\n for idx, row in df.iterrows():\n name = 'J' + row.RA + row.DEC\n for i, j in dic.iteritems():\n name = name.replace(i, j)\n names.append(name)\n \n z = []\n zsource = []\n for idx, row in df.iterrows():\n if 'SDSS' in str(row.SPEC_OPT):\n z.append(row.z_HW_DR7)\n zsource.append('HW')\n elif 'BOSS' in str(row.SPEC_OPT):\n z.append(row.z_PCA_DR12)\n zsource.append('PCA_DR12')\n else:\n z.append(row.z)\n zsource.append(row.z_source)\n\n z = [format(i, '.4f') for i in z]\n\n imag = np.around(df.psfMag_i.values, decimals=2)\n specopt = df.SPEC_OPT.values\n specopt[specopt == 'BOSS+SDSS'] = 'BOSS'\n \n snr_ha, snr_hb, snr_civ = [], [], []\n \n bal = np.zeros(len(df), dtype=np.int)\n bal[np.where((df.BAL_FLAG_DR12 == 1) | (df.BAL_FLAG_ALLEN == 1) | (df.BAL_FLAG_S11 == 1))[0]] = 1\n \n radio = np.asarray(df.RADIO_FLAG, dtype=np.int)\n radio[radio < -99] = -1\n \n dates = []\n for idx, row in df.iterrows():\n \tdates.append(row.DATE)\n \n exptimes = []\n for idx, row in df.iterrows():\n exptimes.append(int(float(row.EXPTIME)))\n\n for i, row in df.iterrows():\n \n if row.WARN_CIV == 0:\n snr_civ.append(row.SNR_CIV)\n else:\n snr_civ.append('')\n if row.WARN_Ha == 0:\n snr_ha.append(row.SNR_Ha)\n else:\n snr_ha.append('')\n if row.WARN_Hb == 0:\n snr_hb.append(row.SNR_Hb)\n else:\n snr_hb.append('')\n\n \n # j, h, k = True, True, True \n \n # snr_flag_ha = ['QSO108', 'QSO118', 'QSO110', 'QSO111', 'QSO175']\n # edge_flag_ha = ['QSO130']\n # abs_flag_ha = [] \n \n \n # for i, row in df.iterrows():\n # if (row.WARN_Ha == 2) | np.isnan(row.SNR_Ha) | (row.Ha == 0):\n # snr_ha.append('')\n # elif i in edge_flag_ha:\n # if j:\n # snr_ha.append(str(np.around(row.SNR_Ha, decimals=1)) + '\\\\footnote{\\\\label{footnote2}Line close to edge.}')\n # j = False\n # else:\n # snr_ha.append(str(np.around(row.SNR_Ha, decimals=1)) + '\\\\footref{footnote2}')\n # elif i in snr_flag_ha:\n # if h:\n # snr_ha.append(str(np.around(row.SNR_Ha, decimals=1)) + '\\\\footnote{\\\\label{footnote1}Poor S/N}')\n # h = False \n # else:\n # snr_ha.append(str(np.around(row.SNR_Ha, decimals=1)) + '\\\\footref{footnote1}')\n # elif i in abs_flag_ha:\n # if k:\n # snr_ha.append(str(np.around(row.SNR_Ha, decimals=1)) + '\\\\footnote{\\\\label{footnote3}Absorption}')\n # k = False \n # else:\n # snr_ha.append(str(np.around(row.SNR_Ha, decimals=1)) + '\\\\footref{footnote3}')\n # else:\n # snr_ha.append(np.around(row.SNR_Ha, decimals=1))\n \n # snr_flag_hb = ['QSO154', 'QSO138', 'QSO135', 'QSO146', 'QSO114', 'QSO123', 'QSO152', 'QSO173', 'QSO121', 'QSO137', 'QSO144', 'QSO145', 'QSO156', 'QSO161', 'QSO171', 'QSO175', 'QSO111', 'QSO118']\n # edge_flag_hb = ['QSO150', 'QSO130', 'QSO110'] \n # abs_flag_hb = []\n \n \n # for i, row in df.iterrows():\n # if (row.WARN_Hb == 2) | np.isnan(row.SNR_Hb):\n # snr_hb.append('')\n # elif i in edge_flag_hb:\n # if j:\n # snr_hb.append(str(np.around(row.SNR_Hb, decimals=1)) + '\\\\footnote{\\\\label{footnote2}Line close to edge.}')\n # j = False\n # else:\n # snr_hb.append(str(np.around(row.SNR_Hb, decimals=1)) + '\\\\footref{footnote2}')\n # elif i in snr_flag_hb:\n # if h:\n # snr_hb.append(str(np.around(row.SNR_Hb, decimals=1)) + '\\\\footnote{\\\\label{footnote1}Poor S/N}')\n # h = False \n # else:\n # snr_hb.append(str(np.around(row.SNR_Hb, decimals=1)) + '\\\\footref{footnote1}')\n # elif i in abs_flag_hb:\n # if k:\n # snr_hb.append(str(np.around(row.SNR_Hb, decimals=1)) + '\\\\footnote{\\\\label{footnote3}Absorption}')\n # k = False \n # else:\n # snr_hb.append(str(np.around(row.SNR_Hb, decimals=1)) + '\\\\footref{footnote3}')\n # else:\n # snr_hb.append(np.around(row.SNR_Hb, decimals=1))\n \n # edge_flag_civ = [] \n # abs_flag_civ = ['QSO135', 'QSO146']\n # snr_flag_civ = ['QSO138', 'QSO175']\n \n # for i, row in df.iterrows():\n # if (row.WARN_CIV == 2) | np.isnan(row.SNR_CIV):\n # snr_civ.append('')\n # elif i in edge_flag_civ:\n # if j:\n # snr_civ.append(str(np.around(row.SNR_CIV, decimals=1)) + '\\\\footnote{\\\\label{footnote2}Line close to edge.}')\n # j = False\n # else:\n # snr_civ.append(str(np.around(row.SNR_CIV, decimals=1)) + '\\\\footref{footnote2}')\n # elif i in snr_flag_civ:\n # if h:\n # print i\n # snr_civ.append(str(np.around(row.SNR_CIV, decimals=1)) + '\\\\footnote{\\\\label{footnote1}Poor S/N}')\n # h = False \n # else:\n # snr_civ.append(str(np.around(row.SNR_CIV, decimals=1)) + '\\\\footref{footnote1}')\n # elif i in abs_flag_civ:\n # if k:\n # snr_civ.append(str(np.around(row.SNR_CIV, decimals=1)) + '\\\\footnote{\\\\label{footnote3}Absorption}')\n # k = False \n # else:\n # snr_civ.append(str(np.around(row.SNR_CIV, decimals=1)) + '\\\\footref{footnote3}')\n # else:\n # snr_civ.append(np.around(row.SNR_CIV, decimals=1))\n \n tnew = Table()\n \n tnew['Name'] = names\n tnew['Date'] = dates \n tnew['Exp'] = exptimes\n tnew['S/N Ha'] = snr_ha\n tnew['S/N Hb'] = snr_hb\n tnew['Opt. Spec.'] = specopt\n tnew['S/N CIV'] = snr_civ\n tnew['z'] = z\n tnew['imag'] = imag \n # tnew['BAL'] = bal\n tnew['Radio'] = radio \n tnew['zsource'] = zsource\n \n tnew.sort('Name')\n \n # tnew.remove_rows(((tnew['S/N Ha'] == '') & (tnew['S/N Hb'] == '')) | (tnew['S/N CIV'] == ''))\n \n ascii.write(tnew, format='latex')", "title": "" }, { "docid": "52adfa00c0a3dda4c898fbc8bccb1b0c", "score": "0.49922717", "text": "def get_spectra():\n\n # PROSPECT-D\n prospect_d_spectraf = pkgutil.get_data(\n \"prosail\", \"prospect_d_spectra.txt\"\n )\n _, nr, kab, kcar, kant, kbrown, kw, km = np.loadtxt(\n BytesIO(prospect_d_spectraf), unpack=True\n )\n prospect_d_spectra = ProspectDSpectra(nr, kab, kcar, kbrown, kw, km, kant)\n # PROSPECT-PRO\n prospect_pro_spectraf = pkgutil.get_data(\n \"prosail\", \"prospect_pro_spectra.txt\"\n )\n _, nr, kab, kcar, kant, kbrown, kw, km, kprot, kcbc = np.loadtxt(\n BytesIO(prospect_pro_spectraf), unpack=True\n )\n prospect_pro_spectra = ProspectPROSpectra(\n nr, kab, kcar, kbrown, kw, km, kant, kprot, kcbc\n )\n # PROSPECT 5\n prospect_5_spectraf = pkgutil.get_data(\"prosail\", \"prospect5_spectra.txt\")\n nr, kab, kcar, kbrown, kw, km = np.loadtxt(\n BytesIO(prospect_5_spectraf), unpack=True\n )\n prospect_5_spectra = Prospect5Spectra(nr, kab, kcar, kbrown, kw, km)\n # SOIL\n soil_spectraf = pkgutil.get_data(\"prosail\", \"soil_reflectance.txt\")\n rsoil1, rsoil2 = np.loadtxt(BytesIO(soil_spectraf), unpack=True)\n soil_spectra = SoilSpectra(rsoil1, rsoil2)\n # LIGHT\n light_spectraf = pkgutil.get_data(\"prosail\", \"light_spectra.txt\")\n es, ed = np.loadtxt(BytesIO(light_spectraf), unpack=True)\n light_spectra = LightSpectra(es, ed)\n spectra = Spectra(\n prospect_5_spectra,\n prospect_d_spectra,\n prospect_pro_spectra,\n soil_spectra,\n light_spectra,\n )\n return spectra", "title": "" }, { "docid": "ae7a070a80dd3bb2daa1662cecb593f2", "score": "0.49922487", "text": "def extract_data(self, table):\n team1 = table.loc[2][0]\n team2 = table.loc[3][0]\n pace = table.loc[3][1]\n team1_OR = table.loc[2][6]\n team2_OR = table.loc[3][6]\n return team1, team2, team1_OR, team2_OR, pace", "title": "" }, { "docid": "87548f95dfa4edc4171f829714e4b0ec", "score": "0.49801907", "text": "def backscatterCrossSectionToReflectivity(self, ref):\n ret=numpy.log10(ref/self.radar_constant)*10.0; #backscat cross section\n return ret", "title": "" }, { "docid": "1080bac60c841f87f62325eb6ffadac1", "score": "0.49797386", "text": "def information_density_measurement(self):\n\t\t# TODO: implement (if needed)\n\t\tpass", "title": "" }, { "docid": "b0f2c19f3e303d6a5bdf2e6c6472570b", "score": "0.49735478", "text": "def pic_scatter():\n vu.pic_scatter(soft_assets_indexes, 'soft_assets')", "title": "" }, { "docid": "0f9522bf1e710105f7414e8be2dec24d", "score": "0.49700594", "text": "def exp01_01():\n\n processaCsv('data/refsComLegenda.csv', 'data/procRefs.csv',\n outHeader=['node','tamanho',\n 'Commenter_in_color',\n 'Commenter_out_color',\n 'Liker_in_color',\n 'Liker_out_color',\n 'Post_Author_in_color',\n 'Post_Author_out_color',\n 'Commenter_in_gray',\n 'Commenter_out_gray',\n 'Liker_in_gray',\n 'Liker_out_gray',\n 'Post_Author_in_gray',\n 'Post_Author_out_gray',\n 'Node silhouette_gray'\n ],\n procFuncs=[\n genIdentity(0),\n genNumElemToTamanho(8,100, 9910),\n genColorFromNtiles(1,[0.0, 0.0 , 0.0 , 0.0117, 0.3082]),\n genColorFromNtiles(2,[0.0, 0.0 , 0.0512, 0.1567, 0.6458]),\n genColorFromNtiles(3,[0.0, 0.0 , 0.0 , 0.0314, 0.3758]),\n genColorFromNtiles(4,[0.0, 0.0385, 0.0810, 0.1510, 0.5409]),\n genColorFromNtiles(6,[0.0, 0.0001, 0.0035, 0.0972, 0.7917]),\n genColorFromNtiles(7,[0.0, 0.0 , 0.0 , 0.3333, 0.4583]),\n genColorFromNtiles(1,[0.0, 0.0 , 0.0 , 0.0117, 0.3082],True),\n genColorFromNtiles(2,[0.0, 0.0 , 0.0512, 0.1567, 0.6458],True),\n genColorFromNtiles(3,[0.0, 0.0 , 0.0 , 0.0314, 0.3758],True),\n genColorFromNtiles(4,[0.0, 0.0385, 0.0810, 0.1510, 0.5409],True),\n genColorFromNtiles(6,[0.0, 0.0001, 0.0035, 0.0972, 0.7917],True),\n genColorFromNtiles(7,[0.0, 0.0 , 0.0 , 0.3333, 0.4583],True),\n genColorFromNtiles(5,[-0.2318, 0.0, 0.1003, 0.5156, 1.0],True),\n ],\n filterFuncs=[genSkipRows([0])])", "title": "" }, { "docid": "ce3e54326e66011917aca3130bda06cf", "score": "0.49310026", "text": "def find_tfl_lights(c_image: np.ndarray, **kwargs):\n src_image = c_image\n c_image = np.dot(c_image[..., :3], [0.299, 0.587, 0.114])\n\n kernel = create_kernel()\n\n #c_image = ndimage.gaussian_filter(c_image, sigma=1)\n mat_konvulutzia = ndimage.convolve(c_image, kernel, mode='constant', cval=0.0)\n\n #plt.imshow(mat_konvulutzia, cmap=\"gray\")\n\n data_max = maximum_filter(mat_konvulutzia, size=5)\n data_max[data_max <= kwargs['some_threshold']] = 0\n\n data_max[data_max != mat_konvulutzia] = 0\n data_max[0:22, :] = 0\n data_max[-22:-1, :] = 0\n data_max[:, 0:22] = 0\n data_max[:, -22:-1] = 0\n\n\n tuple_x, tuple_y = get_lights_indexes(data_max)\n\n tuple_x_zip = ()\n tuple_y_zip = ()\n for i in range(len(tuple_x)):\n current_index_x = tuple_x[i]\n current_index_y = tuple_y[i]\n if not calc_if_small_distance(current_index_x, current_index_y,tuple_x, tuple_y):\n tuple_x_zip += (current_index_x, )\n tuple_y_zip += (current_index_y, )\n\n x_red, y_red, x_green, y_green = seperate_color(tuple_x_zip, tuple_y_zip, src_image)\n\n return x_red, y_red, x_green, y_green", "title": "" }, { "docid": "83c88c66fce82114fc23ef878d6ddcd6", "score": "0.4923593", "text": "def active8(w):\n points = []\n for x in range(5):\n for y in range(5):\n points.append((x, y))\n c = 0\n # character of the vortex\n Cv = -1\n trails = []\n for (x0, y0) in points:\n for (x1, y1) in points:\n if x0 != x1:\n continue\n for (x2, y2) in points:\n for (x3, y3) in points:\n if x2 != x3:\n continue\n for (x4, y4) in points:\n for (x5, y5) in points:\n if x4 != x5:\n continue\n for (x6, y6) in points:\n for (x7, y7) in points:\n # from I, II and III\n if x0 == x1 and x2 == x3 and x4 == x5 and x6 == x7\\\n and y1 == y2 and y3 == y4 and y5 == y6 and y7 == y0:\n # z0 == z1 and z2 == z3 and z4 == z5\n # from IV\n dv = rho(x0, y0, w) + rho(x2, y2, w) + rho(x4, y4, w) + rho(x6, y6, w) - (rho(x1, y1, w) + rho(x3, y3, w) + rho(x5, y5, w) + rho(x7, y7, w))\n # dv = rho(x0, y0, w) + rho(x2, y2, w) + rho(x4, y4, w) - (rho(x1, y1, w) + rho(x3, y3, w) + rho(x5, y5, w))\n dv = abs(dv)\n if dv % w == 0:\n if pi(x1 , y1,0, w)[0] == pi(x2, y2,0, w)[0] and pi(x3 , y3,0, w)[0] == pi(x4, y4,0, w)[0] and pi(x5, y5,0, w)[0] == pi(x6, y6,0, w)[0] and pi(x7, y7,0, w)[0] == pi(x0, y0,0, w)[0]:\n # choose z0 freely\n for z0 in range(0, 5):\n z2 = (z0 + rho(x1, y1, w) - rho(x2, y2, w)) % w\n z4 = (z2 + rho(x3, y3, w) - rho(x4, y4, w)) % w\n z6 = (z4 + rho(x5, y5, w) - rho(x6, y6, w)) % w\n # verification step\n if z0 != (z6 + rho(x7, y7, w) - rho(x0, y0, w)) % w:\n continue\n if dv > 0:\n Cv = max(Cv, log2(dv))\n a00 = (x0, y0, z0)\n a01 = (x1, y1, z0)\n a02 = (x2, y2, z2)\n a03 = (x3, y3, z2)\n a04 = (x4, y4, z4)\n a05 = (x5, y5, z4)\n a06 = (x6, y6, z6)\n a07 = (x7, y7, z6)\n if not allunique2(a00, a01, a02, a03, a04, a05, a06, a07):\n continue\n c += 1\n # print(a00, a01, a02, a03, a04, a05)\n trails.append( [a00, a01, a02, a03, a04, a05, a06, a07] )\n print(\"No. of trails = \", c, \", Character of Vortex : \", Cv)\n return trails", "title": "" }, { "docid": "fdb442b2ab7d7e307689e2b48edce43e", "score": "0.49230337", "text": "def analyze_reflected_charge(top, reflectors, comm_world=None):\n\n cond_ids = []\n cond_objs = []\n reflected_charge = {}\n for reflector in reflectors:\n cond_objs.append(reflector._conductor)\n cond_ids.append(reflector._conductor.condid)\n for i, ids in enumerate(cond_ids):\n reflected_charge[ids] = np.copy(cond_objs[i].emitparticles_data[:, 0:5])\n if comm_world:\n all_reflected_charge = {}\n for ids in cond_ids:\n all_reflected_charge[ids] = comm_world.gather(reflected_charge[ids], root=0)\n if comm_world.rank == 0:\n all_reflected_charge[ids] = np.vstack(all_reflected_charge[ids])\n return all_reflected_charge\n else:\n return reflected_charge", "title": "" }, { "docid": "c29601addfd79891c6b5f737ca74e3a6", "score": "0.4921089", "text": "def extract_veloce_1dim(data, sim, lenslet_profile='simu', rnoise=4.0, cmb=0, altvar=0, indiv_fib=0, simprof=0, debug_mode=0):\n \n \n start_time = time.time()\n ex_time = 0.\n \n #ny = sim.x_map.shape[1]\n #nm = sim.x_map.shape[0]\n #nx = sim.szx\n \n #number of columns\n nx = data.shape[0]\n #number of rows\n ny = data.shape[1]\n #number of spectral orders\n nm = sim.x_map.shape[0]\n #number of fibres\n nfib = sim.nl\n #number of \"objects\" (ie star, sky, calibration)\n no = 3\n if indiv_fib == 1:\n no = 28\n \n #load the exact individual-fibre profile that I have reverse-engineered from M.I.'s simulator code\n if simprof == 1:\n standard_phi_os = np.loadtxt('/Users/christoph/UNSW/simulated_spectra/simulated_fibre_profile_N2048.dat')\n #phi = np.loadtxt('/Users/christoph/UNSW/simulated_spectra/phi_N88.txt')\n \n #these are the exact locations of the individual fibres in the 2048-element array used in M.I.'s simulator code\n if indiv_fib == 1:\n #peaklocs = np.arange(316,1378+59,59) that's the stellar ones only\n peaklocs = np.arange(198,1791+59,59)\n peaks = np.zeros(2048)\n peaks[peaklocs] = 1\n \n# #Number of \"objects\" (are the \"objects\" star, sky and calibration???)\n# no = sim.square_profile.shape[1]\n# extracted_flux = np.zeros( (nm,ny,no) )\n# extracted_var = np.zeros( (nm,ny,no) )\n \n #Number of \"objects\" (are the \"objects\" star, sky and calibration???)\n #no = sim.square_profile.shape[1]\n extracted_flux = np.zeros( (nm,ny,no) )\n extracted_var = np.zeros( (nm,ny,no) )\n \n #Assuming that the data are in photo-electrons, construct a simple model for the\n #pixel inverse variance.\n #pixel_inv_var = 1.0/(np.maximum(data,0)/sim.gain + rnoise**2)\n# pixel_inv_var[sim.badpixmask]=0.0\n w = 1.0 / (np.maximum(data,0)/sim.gain + rnoise*rnoise)\n \n #Loop through all orders then through all y pixels.\n for i in range(nm):\n #for i in range(1):\n print(\"Extracting order: {0:d}\".format(i))\n #Based on the profile we're using, create the local offsets and profile vectors\n if lenslet_profile == 'square':\n offsets = sim.square_offsets[:,i]\n profile = sim.square_profile\n elif lenslet_profile == 'simu':\n offsets = sim.sim_offsets[:,i]\n profile = sim.sim_profile\n nx_cutout = 2*int( (np.max(offsets) - np.min(offsets))/2 ) + 2\n if simprof == 0:\n phi = np.empty( (nx_cutout,no) )\n \n #timing tests\n print(\"Time taken until loop over pixel columns starts: \",time.time()-start_time, 'seconds')\n \n for j in range(ny):\n #Check for NaNs\n if sim.x_map[i,j] != sim.x_map[i,j]:\n extracted_var[i,j,:] = np.nan\n continue\n #Create our column cutout for the data and the PSF. !!! Is \"round\" correct on the next line???\n #x_map gives the central locations of each order \n x_ix = int(np.round(sim.x_map[i,j])) - nx_cutout//2 + np.arange(nx_cutout,dtype=int) + nx//2\n \n if indiv_fib == 0:\n #CMB 09/08/2017 - added check that the new x-coordinate sequence is increasing (otherwise it returns rubbish)\n if np.all(np.diff(offsets) > 0):\n for k in range(no): \n phi[:,k] = np.interp(x_ix - sim.x_map[i,j] - nx//2, offsets, profile[:,k])\n phi[:,k] /= np.sum(phi[:,k]) #this only works because step size is equal to 1\n else:\n #print 'ERROR: x-coordinate sequence not in increasing order!!!'\n quit()\n elif indiv_fib == 1:\n \n #timing tests\n if i == 0 and j == 0:\n fibtime = 0.\n fibreftime = time.time()\n \n #CMB 09/08/2017 - added check that the new x-coordinate sequence is increasing (otherwise it returns rubbish)\n if np.all(np.diff(offsets) > 0):\n xgrid = x_ix - sim.x_map[i,j] - nx//2\n offsets_peaklocs = offsets[peaklocs]\n phi_os = np.zeros((len(offsets),no))\n phi = np.zeros((len(xgrid),no))\n testphi=phi\n for k in range(no): \n if simprof == 0:\n phi_os[:,k] = fibmodel(offsets,offsets_peaklocs[k],2,norm=1)\n elif simprof == 1:\n phi_os[:,k] = np.roll(standard_phi_os, peaklocs[k]-1024)\n phi[:,k] = np.interp(xgrid, offsets, phi_os[:,k])\n #TESTING\n #intfunc = scipy.interpolate.interp1d(offsets, standard_phi_os)\n #testphi[:,k] = intfunc(xgrid)\n #print(\"phi == testphi ??? : \", (phi==testphi).all() )\n phi[:,k] /= np.sum(phi[:,k]) #this only works because step size is equal to 1\n #TODO: 3 fibres are zero in the ghost configuration!!!\n \n else:\n #print 'ERROR: x-coordinate sequence not in increasing order!!!'\n quit()\n \n #timing tests\n fib_delta_t = time.time() - fibreftime\n fibtime += fib_delta_t \n \n \n #Deal with edge effects...\n ww = np.where( (x_ix >= nx) | (x_ix < 0) )[0]\n x_ix[ww]=0\n phi[ww,:]=0.0\n \n #Stop here. \n# if i==10:\n# pdb.set_trace()\n \n #Cut out our data and inverse variance.\n col_data = data[j,x_ix]\n col_w = w[j,x_ix]\n \n #timing test\n if i == 0 and j == 0:\n ex_time = 0.\n ref_time = time.time()\n \n if cmb == 1:\n eta,var = linalg_extract_column(col_data, col_w, phi, altvar=1)\n extracted_flux[i,j,:] = eta\n extracted_var[i,j,:] = var\n elif cmb == 0:\n #this is Mike Ireland's original version\n #Fill in the \"c\" matrix and \"b\" vector from Sharp and Birchall equation 9\n #Simplify things by writing the sum in the computation of \"b\" as a matrix\n #multiplication. We can do this because we're content to invert the \n #(small) matrix \"c\" here. Equation 17 from Sharp and Birchall \n #doesn't make a lot of sense... so lets just calculate the variance in the\n #simple explicit way.\n col_w_mat = np.reshape(col_w.repeat(no), (nx_cutout,no) ) #why do the weights have to be the same for every \"object\"?\n b_mat = phi * col_w_mat\n c_mat = np.dot(phi.T,phi * col_w_mat)\n pixel_weights = np.dot(b_mat,np.linalg.inv(c_mat)) #pixel weights are the z_ki in M.I.'s description\n extracted_flux[i,j,:] = np.dot(col_data,pixel_weights) #these are the etas\n extracted_var[i,j,:] = np.dot(1.0/np.maximum(col_w,1e-12),pixel_weights**2)\n #if ((i % 5)==1) & (j==ny//2):\n #if (i%5==1) & (j==ny//2):\n #if (j==ny//2):\n # pdb.set_trace()\n \n delta_t = time.time() - ref_time\n ex_time += delta_t\n \n print('Total time: ', time.time() - start_time, 'seconds')\n if indiv_fib == 1:\n print('Time for creation of phi: ',fibtime, 'seconds')\n print('Time for extraction: ',ex_time, 'seconds')\n \n return extracted_flux, extracted_var", "title": "" }, { "docid": "d91b2158ba99de48bcd09a39302ceae1", "score": "0.4918805", "text": "def small_reflection_table():\n return generate_refl_2()", "title": "" }, { "docid": "d7b6d6cadf2e6f80ca5e2c14a5c81dfd", "score": "0.4912909", "text": "def gen_spectra():\n X = np.random.random(size=(5, 1, 16))\n X_ = np.random.random(size=(5, 1, 16))\n return X, X_", "title": "" }, { "docid": "76df7de1b5b38bfe953ad9ab256567ec", "score": "0.49106288", "text": "def getReflectivity(self):\n \n pass", "title": "" }, { "docid": "b6775aec9c0778a8075a46dd168cb062", "score": "0.49014875", "text": "def get_level3(self):\n\n level3 = [['image', 'IMAGE', '100A', ''],\n [0, 'HJD', 'E', ''],\n [1, 'EXPTIME', 'E', 's'],\n [2, 'SKYBKGD', 'E', 'counts'],\n [3, 'SKYSIG', 'E', 'counts'],\n [4, 'FWHM', 'E', 'pix'],\n [5, 'NSTARS', 'I', ''],\n [None, 'AIRMASS', 'E', ''],\n [None, 'MOONSEP', 'E', 'degrees'],\n [None, 'MOONFRAC', 'E', '%'],\n ]\n image_list = list(self.imred.keys())\n image_list.sort\n data = []\n for image in image_list:\n data.append(self.imred[image])\n data = np.array(data)\n table = []\n for col, key, fstr, unit in level3:\n if col == 'image':\n table.append(fits.Column(name=key, format=fstr,\n array=np.array(image_list),\n unit=unit))\n elif col != None and col > 0:\n table.append(fits.Column(name=key, format=fstr,\n array=data[:, col],\n unit=unit))\n else:\n table.append(fits.Column(name=key, format=fstr,\n array=np.zeros(len(data[:, 0])),\n unit=unit))\n tbhdu = fits.BinTableHDU.from_columns(table)\n\n return tbhdu", "title": "" }, { "docid": "4c1de7e150ffda9fbfd5bfab3b9b693d", "score": "0.4900326", "text": "def gen_multiple_ion_species_spectrum():\n wavelengths = np.arange(520, 545, 0.01) * u.nm\n probe_wavelength = 532 * u.nm\n n = 5e17 * u.cm ** -3\n probe_vec = np.array([1, 0, 0])\n scatter_vec = np.array([0, 1, 0])\n ifract = np.array([0.7, 0.3])\n Te = 10 * u.eV\n Ti = np.array([5, 5]) * u.eV\n electron_vel = np.array([[300, 0, 0]]) * u.km / u.s\n ion_vel = np.array([[-500, 0, 0], [0, 500, 0]]) * u.km / u.s\n\n # Use this to also test passing in ion species as Particle objects\n ion_species = [Particle(\"p+\"), Particle(\"C-12 5+\")]\n\n alpha, Skw = thomson.spectral_density(\n wavelengths,\n probe_wavelength,\n n,\n Te,\n Ti,\n ifract=ifract,\n ion_species=ion_species,\n probe_vec=probe_vec,\n scatter_vec=scatter_vec,\n electron_vel=electron_vel,\n ion_vel=ion_vel,\n )\n\n return alpha, wavelengths, Skw", "title": "" }, { "docid": "68e0237f5ed970bfc740dd211148de63", "score": "0.48922256", "text": "def __extractDescriptors(self, image, bottom, fixer, circles):\n res = []\n \n # Prepare stencil for concentricity detection\n gray = cv2.cvtColor(bottom, cv2.COLOR_BGR2GRAY)\n (_,stencil) = cv2.threshold(gray,100,255,cv2.THRESH_BINARY_INV)\n\n # Prepare contrasted gray for concentricity detection\n gray = (cv2.cvtColor(image, cv2.COLOR_BGR2HSV))[:,:,1]\n concentricImg = gray\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n concentricImg = clahe.apply(concentricImg)\n concentricImg = np.clip(concentricImg * 1.2, 0, 255).astype(np.uint8)\n\n if circles is not None:\n\n circles = np.round(circles[0, :]).astype(\"int\")\n for (x, y, r) in circles:\n\n # Extract regions of interest\n roi = self.__getRoi(r, x, y, image, gray, fixer)\n stencilRoi = stencil[y - r - self.noncircularExtend:y + r + self.noncircularExtend, x - r - self.noncircularExtend:x + r + self.noncircularExtend]\n concentricRoi = concentricImg[y - r:y + r, x - r:x + r]\n\n cv2.imwrite('img/roi_{0}_{1}.jpg'.format(self.iteration, r), roi)\n # Make predictions to form descriptor vector\n material = self.__predictMaterial(roi) \n concentric = self.__predictConcentric(concentricRoi, r)\n noncircular = self.__predictNoncircular(stencilRoi, r + self.noncircularExtend)\n\n # Append the vector \n res.append((x, y, r, material, concentric, noncircular))\n print(res[-1])\n\n return res", "title": "" }, { "docid": "29396fa13edd2d11196a3aa85518b281", "score": "0.48886326", "text": "def generate_scatter_diagram(self):\n\n # Generate seascatter diagram\n self.df_scatter = calc_seascatter_diagram(self.hs, self.tp, self.hs_bins, self.tp_bins)\n\n # Apply sea scatter to table and plot Hs/Tp distributions\n self.set_scatter_table(self.df_scatter)\n self.plot_hs_tp_distribution()", "title": "" }, { "docid": "0d043fe938518bf2ed2c0de15e0e73d3", "score": "0.48680958", "text": "def thomson_scattering_data(ods, pulse, revision='BLESSED', _get_measurements=True):\n systems = ['TANGENTIAL', 'DIVERTOR', 'CORE']\n\n # get the actual data\n query = {'calib_nums': f'.ts.{revision}.header.calib_nums'}\n for system in systems:\n for quantity in ['R', 'Z', 'PHI']:\n query[f'{system}_{quantity}'] = f'.TS.{revision}.{system}:{quantity}'\n if _get_measurements:\n for quantity in ['TEMP', 'TEMP_E', 'DENSITY', 'DENSITY_E', 'TIME']:\n query[f'{system}_{quantity}'] = f'.TS.{revision}.{system}:{quantity}'\n tsdat = mdsvalue('d3d', treename='ELECTRONS', pulse=pulse, TDI=query).raw()\n\n # Read the Thomson scattering hardware map to figure out which lens each chord looks through\n cal_set = tsdat['calib_nums'][0]\n query = {}\n for system in systems:\n query[f'{system}_hwmapints'] = f'.{system}.hwmapints'\n hw_ints = mdsvalue('d3d', treename='TSCAL', pulse=cal_set, TDI=query).raw()\n\n # assign data in ODS\n i = 0\n for system in systems:\n if isinstance(tsdat[f'{system}_R'], Exception):\n continue\n nc = len(tsdat[f'{system}_R'])\n if not nc:\n continue\n\n # determine which lenses were used\n ints = hw_ints[f'{system}_hwmapints']\n if len(np.shape(ints)) < 2:\n # Contingency needed for cases where all view-chords are taken off of divertor laser and reassigned to core\n ints = ints.reshape(1, -1)\n lenses = ints[:, 2]\n\n # Assign data to ODS\n for j in range(nc):\n ch = ods['thomson_scattering']['channel'][i]\n if not _get_measurements:\n ch['name'] = 'TS_{system}_r{lens:+0d}_{ch:}'.format(\n system=system.lower(), ch=j, lens=lenses[j] if lenses is not None else -9\n )\n ch['identifier'] = f'{system[0]}{j:02d}'\n ch['position']['r'] = tsdat[f'{system}_R'][j]\n ch['position']['z'] = tsdat[f'{system}_Z'][j]\n ch['position']['phi'] = -tsdat[f'{system}_PHI'][j] * np.pi / 180.0\n else:\n ch['n_e.time'] = tsdat[f'{system}_TIME'] / 1e3\n ch['n_e.data'] = unumpy.uarray(tsdat[f'{system}_DENSITY'][j], tsdat[f'{system}_DENSITY_E'][j])\n ch['t_e.time'] = tsdat[f'{system}_TIME'] / 1e3\n ch['t_e.data'] = unumpy.uarray(tsdat[f'{system}_TEMP'][j], tsdat[f'{system}_TEMP_E'][j])\n i += 1", "title": "" }, { "docid": "a7cbd30096b533b4ec0af0f5e53c7ac5", "score": "0.48679417", "text": "def getMeasurements(maskedData, seedValue, wcs, time, streak, diff, std):\n\n x_array, y_array, ra_array, dec_array, az_array,alt_array = [], [], [], [], [], []\n amp_array, snr_array = [], []\n\n points = np.array(np.where(maskedData > 0)).T\n\n for p in points:\n #print(\"p {0}\".format(p))\n row, col = p\n #print(\"row {1} col {2}\".format(p,row, col))\n amp_array.append(maskedData[row, col])\n sigma = diff[row, col]/std\n snr_array.append(sigma)\n x_array.append(col)\n y_array.append(row)\n #ra, dec = xy2RaDec(col, row, wcs)\n #ra_array.append(ra)\n #dec_array.append(dec)\n #alt, az = getAltAz(col, row, time, wcs)\n #el_array.append(alt)\n #az_array.append(az)\n\n ra_array, dec_array = xy2RaDec(x_array, y_array, wcs)\n alt_array, az_array = getAltAz(ra_array, dec_array, time, wcs)\n\n \n return x_array, y_array, ra_array, dec_array, az_array,alt_array, amp_array, snr_array", "title": "" }, { "docid": "f374f3680865e4215b24e745a383362f", "score": "0.48674154", "text": "def get_level4(self):\n\n level1 = [['image', 'IMAGE', '100A', ''],\n [0, 'A0', 'E', ''],\n [1, 'A1', 'E', 's'],\n [2, 'A2', 'E', 'counts'],\n [3, 'A3', 'E', 'counts'],\n [4, 'A4', 'E', 'pix'],\n [5, 'A5', 'E', ''],\n [6, 'A6', 'E', ''],\n [7, 'NSMATCH', 'I', 'degrees'],\n [8, 'RMSX', 'E', '%'],\n [9, 'RMSY', 'E', '%'],\n ]\n\n image_list = list(self.gimred.keys())\n image_list.sort\n data = []\n for image in image_list:\n data.append(self.gimred[image])\n data = np.array(data)\n table = []\n for col, key, fstr, unit in level1:\n if col == 'image':\n table.append(fits.Column(name=key, format=fstr,\n array=np.array(image_list),\n unit=unit))\n elif col != None and col > 0:\n table.append(fits.Column(name=key, format=fstr,\n array=data[:, col],\n unit=unit))\n else:\n table.append(fits.Column(name=key, format=fstr,\n array=np.zeros(len(data[:, 0])),\n unit=unit))\n tbhdu = fits.BinTableHDU.from_columns(table)\n\n return tbhdu", "title": "" }, { "docid": "5a6a0c9897f7c1200704565c29c3c05e", "score": "0.48662657", "text": "def getphisg(zcen=0.1,addcolor=0):\r\n if (zcen>0.2):\r\n return(N.array([1,1]), N.array([1,1]),0,0,0,0)\r\n ctypelist=(\"all\",\"quiescent\",\"starforming\")\r\n logm = 9.0+ N.arange(31)*1./10. \r\n if (addcolor==0): #only have fsps\r\n logphi = N.array([-1.899,-1.923,-1.970,-2.031,-2.055,-2.106,-2.144,-2.179,-2.188,-2.216,-2.234,-2.235,-2.262,-2.252,-2.285,-2.317,-2.365,-2.419,-2.504,-2.607,-2.728,-2.888,-3.104,-3.332,-3.606,-3.953,-4.363,-4.778,-5.255,-5.87,-6.49])\r\n logphi_plus = N.array([0.017,0.017,0.015,0.015,0.014,0.012,0.012,0.012,0.010,0.0086,0.0080,0.0069,0.0063,0.0056,0.0051,0.0047,0.0044,0.0041,0.0040,0.0039,0.0040,0.0043,0.0049,0.0059,0.0080,0.012,0.020,0.033,0.060,0.010,0.030])\r\n logphi_minus = N.array([-0.017,-0.016,-0.015,-0.014,-0.013,-0.012,-0.011,-0.012,-0.010,-0.0084,-0.0078,-0.0068,-0.0062,-0.0056,-0.0051,-0.0046,-0.0044,-0.0041,-0.0040,-0.0039,-0.0040,-0.0043,-0.0048,-0.0059,-0.0079,-0.012,-0.019,-0.031,-0.053,-0.010,-0.020])\r\n if (addcolor==1): #only have fsps\r\n logphi = N.array([-2.495,-2.486,-2.485,-2.523,-2.576,-2.603,-2.634,-2.642,-2.652,-2.655,-2.649,-2.614,-2.607,-2.5640,-2.5640,-2.5800,-2.6050,-2.6450,-2.7050,-2.7860,-2.8840,-3.0190,-3.2090,-3.4130,-3.6670,-4.002,-4.401,-4.806,-5.296,-5.93,-6.16])\r\n logphi_plus = N.array([0.048,0.044,0.038,0.037,0.033,0.030,0.026,0.028,0.021,0.018,0.015,0.013,0.011,0.0089,0.0077,0.0069,0.0062,0.0057,0.0053,0.0050,0.0049,0.0050,0.0055,0.0065,0.0085,0.013,0.021,0.034,0.063,0.10,0.40])\r\n logphi_minus = N.array([-0.043,-0.041,-0.035,-0.034,-0.031,-0.028,-0.025,-0.026,-0.020,-0.017,-0.015,-0.012,-0.011,-0.0087,-0.0076,-0.0068,-0.0061,-0.0056,-0.0052,-0.0050,-0.0049,-0.0050,-0.0054,-0.0064,-0.0084,-0.012,-0.020,-0.032,-0.056,-0.10,-0.20])\r\n if (addcolor==2): #only have fsps\r\n logphi = N.array([-2.026,-2.062,-2.129,-2.201,-2.211,-2.272,-2.313,-2.362,-2.371,-2.4120,-2.4450,-2.4700,-2.5240,-2.5410,-2.6090,-2.6600,-2.7370,-2.8110,-2.9340,-3.0770,-3.2500,-3.4720,-3.769,-4.102,-4.487,-4.930,-5.437,-5.98,-6.30,-6.77,-7.09])\r\n logphi_plus = N.array([0.018,0.017,0.015,0.014,0.014,0.012,0.012,0.011,0.011,0.0092,0.0090,0.0079,0.0074,0.0071,0.0066,0.0063,0.0062,0.0059,0.0061,0.0064,0.0071,0.0085,0.011,0.016,0.024,0.042,0.079,0.20,0.30,0.60,1.00])\r\n logphi_minus = N.array([-0.017,-0.016,-0.015,-0.014,-0.013,-0.012,-0.012,-0.011,-0.011,-0.0090,-0.0088,-0.0078,-0.0072,-0.0070,-0.0065,-0.0062,-0.0061,-0.0059,-0.0060,-0.0063,-0.0070,-0.0084,-0.010,-0.015,-0.023,-0.038,-0.067,-0.10,-0.20,-0.30,-0.40])\r\n \r\n phi = 10**logphi\r\n phip = phi*(10**logphi_plus -1)\r\n phim =phi*(1-10**logphi_minus)\r\n return(logm,phi,phip,phim,0.01,0.2)", "title": "" }, { "docid": "2e3b36396f73aec5cc1242a7fd6c1bab", "score": "0.48661268", "text": "def get_nonlin_fields(inpath, outpath):\n bigf = File(inpath)\n header = bigf.open('Header')\n boxsize = header.attrs['BoxSize'][0]\n redshift = 1./header.attrs['Time'][0] - 1\n \n Ng = header.attrs['TotNumPart'][1] ** (1/3)\n Ng = int(np.rint(Ng))\n\n cellsize = boxsize / Ng\n\n pid_ = bigf.open('1/ID')[:] - 1 # so that particle id starts from 0\n pos_ = bigf.open('1/Position')[:]\n pos = np.empty_like(pos_)\n pos[pid_] = pos_\n pos = pos.reshape(Ng, Ng, Ng, 3)\n \n vel_ = bigf.open('1/Velocity')[:]\n vel = np.empty_like(vel_)\n vel[pid_] = vel_\n vel = vel.reshape(Ng, Ng, Ng, 3)\n del pid_, pos_, vel_\n\n dis = pos2dis(pos, boxsize, Ng)\n del pos\n\n dis = dis.astype('f4')\n vel = vel.astype('f4')\n \n dis = np.moveaxis(dis,-1,0)\n vel = np.moveaxis(vel,-1,0)\n \n disp = cosmology.disnorm(dis,z=redshift)\n velocity = cosmology.velnorm(vel,z=redshift)\n catnorm = np.concatenate([disp,velocity],axis=0)\n catnorm = catnorm.astype('f4')\n print (\"z=%.1f\"%redshift,\"catnorm shape:\",np.shape(catnorm))\n \n np.save(outpath,catnorm)", "title": "" }, { "docid": "c237220d7b8941cab5aad6389641782c", "score": "0.4863102", "text": "def cat_snrcat():\n gammasky.make_snrcat_catalog_data()", "title": "" }, { "docid": "9540b7eeb7224f56f3409fd1ebe0ca82", "score": "0.48558974", "text": "def cgpstudy():\n from numpy import concatenate as cat\n\n gt = np.array(genotypes)\n par = cat([monogenicpar(g, hetpar=par0, absvar=absvar) for g in gt])\n ph = cat([par2ph(p) for p in par])\n agg = cat([ph2agg(p) for p in ph])\n \n summarize(gt, agg)\n plt.show()", "title": "" }, { "docid": "98452b9711e9a904ef200c4b8225b81d", "score": "0.48558894", "text": "def get_shadow_values(self):\n for n in self.lp.constraints:\n try:\n print n, self.lp.constraints[n].pi\n except:\n print n, 0.", "title": "" }, { "docid": "425f04e50af9e31863631439fd469586", "score": "0.4854773", "text": "def print_out_of_sample_rmsd_table(self):\n\n # check if it makes sense to proceed\n if \"out_of_sample_rmsd\" not in self._refinery.history:\n return\n nref = len(self.get_free_reflections())\n if nref < 10:\n return # don't do anything if very few refs\n\n logger.info(\"\\nRMSDs for out-of-sample (free) reflections:\")\n\n rmsd_multipliers = []\n header = [\"Step\", \"Nref\"]\n for (name, units) in zip(self._target.rmsd_names, self._target.rmsd_units):\n if units == \"mm\":\n header.append(name + \"\\n(mm)\")\n rmsd_multipliers.append(1.0)\n elif units == \"rad\": # convert radians to degrees for reporting\n header.append(name + \"\\n(deg)\")\n rmsd_multipliers.append(RAD2DEG)\n else: # leave unknown units alone\n header.append(name + \"\\n(\" + units + \")\")\n\n rows = []\n for i in range(self._refinery.history.get_nrows()):\n rmsds = [\n r * m\n for r, m in zip(\n self._refinery.history[\"out_of_sample_rmsd\"][i], rmsd_multipliers\n )\n ]\n rows.append([str(i), str(nref)] + [f\"{e:.5g}\" for e in rmsds])\n\n logger.info(dials.util.tabulate(rows, header))\n\n return", "title": "" }, { "docid": "932aaaf82f92edcca43afee76f380cbe", "score": "0.4853871", "text": "def Thresholds( self ) :\n\t\td = {}\n\t\tfrom Hlt2Lines.EW.Lines import EWLines\n\t\t\n\t\td.update( { EWLines : {\n\t\t\t\t\t'Prescale' : {'Hlt2EWDiMuonDY1' : 0.02,\n\t\t\t\t\t\t 'Hlt2EWDiMuonDY2' : 0.3,\n\t\t\t\t\t\t 'Hlt2EWSingleMuonLowPt' : 0.002,\n\t\t\t\t\t\t 'Hlt2EWSingleMuonHighPt' : 0.1,\n\t\t\t\t\t\t 'Hlt2EWSingleElectronLowPt' : 0.001,\n\t\t\t\t\t\t 'Hlt2EWSingleElectronHighPt' : 0.01,\n\t\t\t\t\t\t 'Hlt2EWSingleTauHighPt2Prong' : 0.2,\n\t\t\t\t\t\t 'Hlt2EWSingleTauHighPt3Prong' : 0.5},\n\t\t\t\t\t# DiMuon\n\t\t\t\t\t'DiMuonZ' : {'MinMass' : 40000 * MeV,\n\t\t\t\t\t\t\t\t 'Pt' : 0 * MeV},\n\t\t\t\t\t\n\t\t\t\t\t'DiMuonDY1' : {'MinMass' : 2500 * MeV,\n\t\t\t\t\t\t\t\t 'MinExcMass' : 3000 * MeV,\n\t\t\t\t\t\t\t\t 'MaxExcMass' : 3200 * MeV,\n\t\t\t\t\t\t\t\t 'MuPt' : 800 * MeV,\n\t\t\t\t\t\t\t\t 'Pt' : 0 * MeV,\n\t\t\t\t\t\t\t\t 'TkChi2' : 10},\n\t\t\t\t\t\n\t\t\t\t\t'DiMuonDY2' : {'MinMass' : 5000 * MeV,\n\t\t\t\t\t\t\t\t 'MuPt' : 1000 * MeV,\n\t\t\t\t\t\t\t\t 'Pt' : 0 * MeV,\n\t\t\t\t\t\t\t\t 'TkChi2' : 10},\n\t\t\t\t\t\n\t\t\t\t\t'DiMuonDY3' : {'MinMass' : 10000 * MeV,\n\t\t\t\t\t\t\t\t 'MuPt' : 0 * MeV,\n\t\t\t\t\t\t\t\t 'TkChi2' : 10},\n\t\t\t\t\t\n\t\t\t\t\t'DiMuonDY4' : {'MinMass' : 20000 * MeV,\n\t\t\t\t\t\t\t\t 'MuPt' : 0 * MeV,\n\t\t\t\t\t\t\t\t 'TkChi2' : 10},\n\t\t\t\t\t\n\t\t\t\t\t# Single Muon\n\t\t\t\t\t'SingleMuonLowPt' : {'Pt' : 4800 * MeV,\n\t\t\t\t\t\t\t\t 'TkChi2' : 10},\n\t\t\t\t\t\n\t\t\t\t\t'SingleMuonHighPt' : {'Pt' : 10000 * MeV},\n\t\t\t\t\t\n\t\t\t\t\t'SingleMuonVHighPt' : {'Pt' : 12500 * MeV},\n\t\t\t\t\t\n\t\t\t\t\t# DiElectron\n\t\t\t\t\t'DiElectronDY' : {'L0Req' : \"L0_CHANNEL('Electron')\",\n\t\t\t\t\t\t\t\t 'Hlt1Req' : \"HLT_PASS_RE('Hlt1(Track|.*Electron).*Decision')\",\n\t\t\t\t\t\t\t\t 'MinMass' : 10000 * MeV,\n\t\t\t\t\t\t\t\t 'MaxMass' : 1e+10 * MeV,\n\t\t\t\t\t\t\t\t 'VtxChi2' : 25,\n\t\t\t\t\t\t\t\t 'Pt' : -999 * MeV,\n\t\t\t\t\t\t\t\t 'ElecPt' : 1000 * MeV,\n\t\t\t\t\t\t\t\t 'ElecPIDe' : 1.5,\n\t\t\t\t\t\t\t\t 'ElecTkChi2' : 10,\n\t\t\t\t\t\t\t\t 'PrsMin' : 50,\n\t\t\t\t\t\t\t\t 'EcalMin' : 0.1,\n\t\t\t\t\t\t\t\t 'HcalMax' : 0.05},\n\t\t\t\t\t\n\t\t\t\t\t'DiElectronHighMass' : {'L0Req' : \"L0_CHANNEL('Electron')\",\n\t\t\t\t\t\t\t\t 'Hlt1Req' : \"HLT_PASS_RE('Hlt1(Track|.*Electron).*Decision')\",\n\t\t\t\t\t\t\t\t 'MinMass' : 20000 * MeV,\n\t\t\t\t\t\t\t\t 'VtxChi2' : 25,\n\t\t\t\t\t\t\t\t 'TkChi2' : 10,\n\t\t\t\t\t\t\t\t 'Pt' : -999 * MeV,\n\t\t\t\t\t\t\t\t 'ElecPt' : 10000 * MeV,\n\t\t\t\t\t\t\t\t 'PrsMin' : 50,\n\t\t\t\t\t\t\t\t 'EcalMin' : 0.1,\n\t\t\t\t\t\t\t\t 'HcalMax' : 0.05},\n\t\t\t\t\t\n\t\t\t\t\t# Single Electron\n\t\t\t\t\t'SingleElectronLowPt' : {'L0Req' : \"L0_CHANNEL('Electron')\",\n\t\t\t\t\t\t\t\t 'Hlt1Req' : \"HLT_PASS_RE('Hlt1(Track|.*Electron).*Decision')\",\n\t\t\t\t\t\t\t\t 'Pt' : 4800 * MeV,\n\t\t\t\t\t\t\t\t 'PIDe' : 4,\n\t\t\t\t\t\t\t\t 'TkChi2' : 5,\n\t\t\t\t\t\t\t\t 'PrsMin' : 50,\n\t\t\t\t\t\t\t\t 'EcalMin' : 0.1,\n\t\t\t\t\t\t\t\t 'HcalMax' : 0.05},\n\t\t\t\t\t\n\t\t\t\t\t'SingleElectronHighPt' : {'L0Req' : \"L0_CHANNEL('Electron')\",\n\t\t\t\t\t\t\t\t 'Hlt1Req' : \"HLT_PASS_RE('Hlt1(Track|.*Electron).*Decision')\",\n\t\t\t\t\t\t\t\t 'Pt' : 10000 * MeV,\n\t\t\t\t\t\t\t\t 'PrsMin' : 50,\n\t\t\t\t\t\t\t\t 'EcalMin' : 0.1,\n\t\t\t\t\t\t\t\t 'HcalMax' : 0.05,\n\t\t\t\t\t\t\t\t 'TkChi2' : 20},\n\t\t\t\t\t\n\t\t\t\t\t'SingleElectronVHighPt' : {'L0Req' : \"L0_CHANNEL('Electron')\",\n\t\t\t\t\t\t\t\t 'Hlt1Req' : \"HLT_PASS_RE('Hlt1(Track|.*Electron).*Decision')\",\n\t\t\t\t\t\t\t\t 'Pt' : 15000 * MeV,\n\t\t\t\t\t\t\t\t 'PrsMin' : 50,\n\t\t\t\t\t\t\t\t 'EcalMin' : 0.1,\n\t\t\t\t\t\t\t\t 'HcalMax' : 0.05,\n\t\t\t\t\t\t\t\t 'TkChi2' : 20},\n\t\t\t\t\t# Single Tau\n\t\t\t\t\t'TauTrkFilter' : {'trk_PT' : 2 * GeV,\n\t\t\t\t\t\t\t\t 'trk_TRCHI2DOF_MAX' : 3},\n\t\t\t\t\t\n\t\t\t\t\t'TauRhoCombiner' : {'RHO_M_MIN' : 0 * GeV,\n\t\t\t\t\t\t\t\t 'RHO_M_MAX' : 2 * GeV,\n\t\t\t\t\t\t\t\t 'RHO_PT_MIN' : 0 * GeV,\n\t\t\t\t\t\t\t\t 'PI_PT_MIN' : 2 * GeV},\n\t\t\t\t\t\n\t\t\t\t\t'Tau3PiCombiner' : {'sumPT' : 8 * GeV,\n\t\t\t\t\t\t\t\t 'PT' : 8 * GeV,\n\t\t\t\t\t\t\t\t 'childPT' : 2 * GeV,\n\t\t\t\t\t\t\t\t 'maxchildPT' : 5 * GeV,\n\t\t\t\t\t\t\t\t 'FDCHI2_MIN' : 10,\n\t\t\t\t\t\t\t\t 'FDT_MIN' : 0.5,\n\t\t\t\t\t\t\t\t 'VCHI2_NDOF_MAX' : 20,\n\t\t\t\t\t\t\t\t 'DOCA_MAX' : 0.1,\n\t\t\t\t\t\t\t\t 'CORRM_MIN' : 1.2 * GeV,\n\t\t\t\t\t\t\t\t 'CORRM_MAX' : 2.0 * GeV},\n\t\t\t\t\t\n\t\t\t\t\t'TauPiPi0Combiner' : {'sumPT' : 8 * GeV,\n\t\t\t\t\t\t\t\t 'PT' : 8 * GeV,\n\t\t\t\t\t\t\t\t 'childPT' : 5 * GeV,\n\t\t\t\t\t\t\t\t 'maxchildPT' : 5 * GeV,\n\t\t\t\t\t\t\t\t 'M_MIN' : 0 * GeV,\n\t\t\t\t\t\t\t\t 'M_MAX' : 1.5 * GeV},\n\t\t\t\t\t\n\t\t\t\t\t'SingleTauHighPt2Prong' : {'PT' : 15 * GeV},\n\t\t\t\t\t\n\t\t\t\t\t'SingleTauHighPt3Prong' : {'PT' : 12 * GeV}\n\t\t\t\t\t\n\t\t\t\t\t} } )\n\t\t\n\t\treturn d", "title": "" }, { "docid": "0823452d69c9759d1d5185d4293471b0", "score": "0.48529094", "text": "def detection_efficiency(plant,cat):\n\n # provide the plant and detection cat run to find efficiency\n # unpack the plant (the image and locations)\n plant_im,pixels=plant \n # unpack the detection catalog objs (cat,image,threshold,segm)\n catalog,image,threshold,segm,targ_obj = cat\n\n hdr=image.header\n Nfakes=hdr['NfakeSNe']\n magfakes=hdr['fakeSNmag']\n #print('Nfakes ~ {} (= {} quick sanity check) planted in this image'.format(Nfakes,len(pixels)))\n #print('Nsources ~ {} detected in the image'.format(len(catalog)))\n \n # use locations and a search radius on detections and plant locations to get true positives\n tbl = catalog.to_table()\n tbl_x,tbl_y = [i.value for i in tbl['xcentroid']], [i.value for i in tbl['ycentroid']]\n tbl_pixels = list(zip(tbl_x,tbl_y))\n tbl.add_column(Column(tbl_pixels),name='pix') # adding this for easier use indexing tbl later\n search = 5 # fwhm*n might be better criteria\n truths = []\n for pixel in tbl_pixels:\n for i in pixels:\n if pixel[0] > i[0] - search and pixel[0] < i[0] + search and pixel[1] > i[1] - search and pixel[1] < i[1] + search:\n truths.append([i,pixel])\n #print(i,pixel)\n else:\n continue\n #print('{} source detections within search radius criteria'.format(len(truths)))\n # TODO: get the tbl_pixels which were outside the search radius criteria and return them as false positives\n \n # break truths into the plant pixels and det src pixel lists; easier to work w\n plant_pixels = []\n det_src_pixels = []\n for i in truths:\n plant_pix = i[0]\n det_src_pix = i[1]\n plant_pixels.append(plant_pix)\n det_src_pixels.append(det_src_pix)\n # the plant pixels which had multiple sources detected around it\n repeat_plant = [item for item, count in collections.Counter(plant_pixels).items() if count > 1]\n # the plant pixels which only had one source detected \n single_plant = [item for item, count in collections.Counter(plant_pixels).items() if count == 1]\n N_plants_detected = len(single_plant) + len(repeat_plant)\n # adding nearby_plantpix col to src table; using None if source wasnt within the search radius of plant\n plant_col = []\n for i in tbl:\n tbl_x,tbl_y = i['xcentroid'].value,i['ycentroid'].value\n if (tbl_x,tbl_y) in det_src_pixels:\n idx = det_src_pixels.index((tbl_x,tbl_y))\n plant_col.append(plant_pixels[idx])\n else:\n plant_col.append(None)\n tbl.add_column(Column(plant_col),name='nearby_plantpix')\n \n # index table to grab false source detections\n false_tbl = tbl[tbl['nearby_plantpix']==None]\n truth_tbl = tbl[tbl['nearby_plantpix']!=None]\n \n single_truth_tbl,repeat_truth_tbl = [],[]\n for i in truth_tbl:\n if i['nearby_plantpix'] in repeat_plant:\n repeat_truth_tbl.append(i)\n else:\n single_truth_tbl.append(i)\n # should use a check on length rather than try/except below here\n # try/excepting is to avoid error for empty lists\n # mainly an issue on repeat truth tbl \n try:\n single_truth_tbl = vstack(single_truth_tbl)\n except:\n pass\n try:\n repeat_truth_tbl = vstack(repeat_truth_tbl)\n except:\n pass \n #print('Final: {} planted SNe, {} clean single detections, {} as multi-sources near a plant, {} false detections'.format(Nfakes,len(single_truth_tbl),len(repeat_truth_tbl),len(false_tbl)))\n print('{} planted SNe had single clean source detected, {} planted SNe had multiple sources detected nearby, {} false detections'.format(len(single_plant),len(repeat_plant),len(false_tbl)))\n\n efficiency = N_plants_detected/len(pixels)\n\n print('Detection efficiency (N_plants_detected/N_plants) ~ {} on mag ~ {} SNe'.format(efficiency,magfakes))\n return efficiency,magfakes,tbl,single_truth_tbl,repeat_truth_tbl,false_tbl", "title": "" }, { "docid": "aaf953682ba9453f7708c6182f561f42", "score": "0.48525953", "text": "def generateEyeSight (np) :\n for i in range(0, len(Vision)) :\n key = list(Vision.keys())[i]\n perc = Vision[key]\n for j in range (0, round((perc*np)/100)) :\n vision.append(key)", "title": "" }, { "docid": "83f84a4bc43be569603d3fd5a26f3700", "score": "0.48461956", "text": "def get_data_ch7():\n data = np.genfromtxt('../data/airfoil_self_noise.dat', delimiter='\\t')\n data = (data - data.mean(axis=0)) / data.std(axis=0)\n return nd.array(data[:, :-1]), nd.array(data[:, -1])", "title": "" }, { "docid": "a93771aed5c8dd827cdcff5fe93229cf", "score": "0.4843774", "text": "def Info( self ):\n # Lattice\n\n def beamlabels( V0, w , Type):\n if len(np.unique(V0))==1:\n Vlabel = '$V_{%s}=%.1fE_{R}$' % (Type, V0[0] )\n else:\n Vlabel = '$V_{%sx}=%.1f, V_{%sy}=%.1f, V_{%sz}=%.1f$' % \\\n (Type,V0[0],Type,V0[1],Type,V0[2] ) \n\n \n waists = sum( w, () ) \n if len( np.unique( waists )) == 1: \n wlabel = '$w_{%s}=%d\\,\\mu\\mathrm{m}$' % (Type, w[0][0] )\n else:\n coords = ['x','y','z']\n wlabel = ''\n for i,wp in enumerate(w):\n wlabel += '$w_{%s%s}=(%d,%d)\\,\\mu\\mathrm{m}%' % \\\n (Type, coord[i], wp[0], wp[1] ) \n if i < 2 : wlabel += '$\\mathrm{,}\\ $' \n\n return Vlabel + '$\\mathrm{,}\\ $' + wlabel\n \n Llabel = beamlabels( self.s0, self.w, 'L') \n Glabel = beamlabels( self.g0, self.GRw, 'G')\n\n return Llabel, Glabel", "title": "" }, { "docid": "ad6b214f259ce81ecba4fd00c729e00b", "score": "0.48368087", "text": "def reflect(filename):\n img = SimpleImage('images/mt-rainier.jpg')\n img_blank = SimpleImage.blank(img.width, img.height * 2)\n for x in range(img.width):\n for y in range(img.height):\n pixel = img.get_pixel(x, y)\n p1 = img_blank.get_pixel(x, y)\n p2 = img_blank.get_pixel(x, img_blank.height - 1 - y)\n p1.red = pixel.red\n p1.green = pixel.green\n p1.blue = pixel.blue\n p2.red = pixel.red\n p2.green = pixel.green\n p2.blue = pixel.blue\n return img_blank", "title": "" }, { "docid": "bef7b73da282dca268952b37e87bfd92", "score": "0.48239547", "text": "def printSimulInfo(fname, ntk_info, ext_info, d_noise_std=0):\n data = dict()\n\n \"\"\" Print Summary Info \"\"\"\n data[\"NumofCells\"] = len(ntk_info[\"cell_types\"])\n data[\"NumofSyns\"] = 0\n data[\"NumofPinputs\"] = 0\n data[\"NumOfPinputSynapses\"] = 0\n\n \"\"\" Print Cell Info \"\"\"\n N = len(ntk_info[\"cell_types\"])\n cell_infos = []\n for i in range(N):\n info = dict()\n\n c = ntk_info[\"cell_types\"][i]\n params = ntk_info[\"cell_params\"][c]\n \n info[\"type\"] = ntk_info[\"cell_type_names\"][c]\n info[\"tau\"] = params[\"tau\"]\n info[\"r\"] = params[\"r\"]\n info[\"e\"] = params[\"e\"]\n info[\"v\"] = np.random.normal(loc=-65, scale=1)\n info[\"vth\"] = params[\"vth\"]\n info[\"vahp\"] = params[\"vahp\"]\n info[\"vmax\"] = params[\"vmax\"]\n info[\"t_refrac\"] = params[\"t_refrac\"]\n\n cell_infos.append(info)\n data[\"Cells\"] = cell_infos\n\n \"\"\" Print Synapse Info \"\"\"\n # convert params\n for param in ntk_info[\"syn_params\"]:\n convert_syn_param(param)\n\n syn_infos = []\n for i in range(N):\n c_pre = ntk_info[\"cell_types\"][i]\n for j in ntk_info[\"adj_list\"][i]:\n c_post = ntk_info[\"cell_types\"][j]\n\n tp = ntk_info[\"syn_types\"][c_pre][c_post]\n params = ntk_info[\"syn_params\"][tp]\n\n info = dict()\n \n info[\"type\"] = ntk_info[\"syn_type_names\"][tp]\n info[\"tau1\"] = params[\"tau1\"]\n info[\"tau2\"] = params[\"tau2\"]\n info[\"A\"] = params[\"A\"]\n info[\"gmax\"] = params[\"gmax\"]\n info[\"e\"] = params[\"e\"]\n info[\"d\"] = params[\"d\"] + np.random.normal(loc=0, scale=d_noise_std)\n info[\"id_pre\"] = i\n info[\"id_post\"] = j\n\n syn_infos.append(info)\n\n data[\"NumofSyns\"] = len(syn_infos)\n data[\"Synapses\"] = syn_infos\n\n \"\"\" Print Stim Info - Poisson input \"\"\"\n Next = len(ext_info[\"ext_types\"])\n input_infos = []\n for i in range(Next):\n\n info = dict()\n tp = ext_info[\"ext_types\"][i]\n\n info[\"type\"] = ext_info[\"ext_type_names\"][tp]\n info[\"tstart\"] = t_infos[i][0]\n info[\"tend\"] = t_infos[i][1]\n info[\"f\"] = f[i]\n\n input_infos.append(info)\n\n data[\"NumofPinputs\"] = len(input_infos)\n data[\"Pinputs\"] = input_infos\n\n \"\"\" Print Stim Synapse info\"\"\"\n # convert params\n for param in ext_info[\"input_params\"]:\n convert_syn_param(param)\n\n input_syn_infos = []\n for i in range(Next):\n c_pre = ext_info[\"ext_types\"][i]\n for j in ext_info[\"adj_list\"][i]:\n c_post = ntk_info[\"cell_types\"][j]\n\n tp = ext_info[\"syn_types\"][c_pre][c_post]\n params = ext_info[\"input_params\"][tp]\n\n info = dict()\n\n info[\"type\"] = ext_info[\"syn_type_names\"][tp]\n info[\"tau1\"] = params[\"tau1\"]\n info[\"tau2\"] = params[\"tau2\"]\n info[\"A\"] = params[\"A\"]\n info[\"gmax\"] = params[\"gmax\"]\n info[\"e\"] = params[\"e\"]\n info[\"d\"] = params[\"d\"]\n info[\"id_pre\"] = i\n info[\"id_post\"] = j\n\n input_syn_infos.append(info)\n \n data[\"NumOfPinputSynapses\"] = len(input_syn_infos)\n data[\"PinputSynapses\"] = input_syn_infos\n\n json_data = json.dumps(data)\n \n with open(fname, \"w\", encoding=\"utf-8\") as fid:\n json.dump(data, fid, ensure_ascii=False, indent=\"\\t\")", "title": "" }, { "docid": "615bf674a050a95bd0f42c2378a4c2ee", "score": "0.48179668", "text": "def clean_ice_field_vs_DISORT_NIR():\n\n densities = [400,450,500,550,600,650,700,750,800,850,900]\n\n grainsize = [500,700,900,1100,1300,1500,2000,3000,5000,8000,10000,15000,20000,25000,30000]\n\n\n CIsites = ['21_7_S4', '13_7_SB3', '15_7_S4', '15_7_SB1', '15_7_SB5', '21_7_S2',\n '21_7_SB3', '22_7_S2', '22_7_S4', '23_7_SB1', '23_7_SB2', '23_7_S4',\n 'WI_1', 'WI_2', 'WI_4', 'WI_5', 'WI_6', 'WI_7', 'WI_9', 'WI_10', 'WI_11',\n 'WI_12', 'WI_13', '27_7_16_SITE3_WHITE1', '27_7_16_SITE3_WHITE2',\n '27_7_16_SITE2_ICE2', '27_7_16_SITE2_ICE4', '27_7_16_SITE2_ICE6',\n '5_8_16_site2_ice1', '5_8_16_site2_ice2', '5_8_16_site2_ice3',\n '5_8_16_site2_ice4', '5_8_16_site2_ice6', '5_8_16_site2_ice8',\n '5_8_16_site3_ice1', '5_8_16_site3_ice4', \n 'fox11_25_',\t'fox11_2_', 'fox11_7_', 'fox11_8_', 'fox13_1b_', 'fox13_2_',\n 'fox13_2a_', 'fox13_2b_', 'fox13_3_', 'fox13_3a_',\n 'fox13_3b_', 'fox13_6a_', 'fox13_7_', 'fox13_7a_', 'fox13_7b_', 'fox13_8_',\t\n 'fox13_8a_', 'fox13_8b_', 'fox14_2b_', 'fox14_3_', 'fox14_3a_', 'fox17_8_',\n 'fox17_8a_', 'fox17_8b_', 'fox17_9b_', 'fox24_17_']\n\n\n spectra = pd.read_csv('/home/joe/Code/Remote_Ice_Surface_Analyser/Training_Data/HCRF_master_16171819.csv')\n LUT = np.load('/home/joe/Code/Remote_Ice_Surface_Analyser/Process_Dir/LUT_cz05.npy')\n \n params = []\n # reformat LUT: flatten LUT from 3D to 2D array with one column per combination\n # of RT params, one row per wavelength\n\n wavelengths = np.arange(0.3,5,0.01)\n \n LUT = LUT.reshape(len(densities)*len(grainsize),len(wavelengths))\n\n LUT = LUT[:,60:80] # reduce wavelengths to NIR\n\n OutDF = pd.DataFrame(columns=['colname','densities','grainsize','min_error'],index=None)\n\n densitylist = []\n grainlist = []\n errorlist = []\n collist = []\n\n\n for i in np.arange(0,len(spectra.columns),1):\n\n if (i != 'wavelength') & (spectra.columns[i] in CIsites):\n\n colname = spectra.columns[i]\n spectrum = np.array(spectra[colname])\n spectrum = spectrum[550:750:10]\n error_array = abs(LUT - spectrum)\n mean_error = np.mean(error_array,axis=1)\n index = np.argmin(mean_error)\n min_error= np.min(mean_error)\n param_idx = np.unravel_index(index,[len(densities),len(grainsize)])\n\n densitylist.append(densities[param_idx[0]])\n grainlist.append(grainsize[param_idx[1]])\n errorlist.append(min_error)\n collist.append(colname)\n\n OutDF['colname'] = collist\n OutDF['densities'] = densitylist\n OutDF['grainsize'] = grainlist\n OutDF['min_error'] = errorlist\n\n OutData.to_csv('/home/joe/Code/Remote_Ice_Surface_Analyser/RISA_OUT/Figures_and_Tables/field_disort_NIR_comparison.csv')\n \n \n return", "title": "" }, { "docid": "8e5fc717b01dc5079d46db3766348e24", "score": "0.4815894", "text": "def digitisation_noise(info_dict):\n dig_noise = info_dict['cameras'][info_dict['channel']]['gain'] /np.sqrt(12.)\n info_dict['dig_noise']=dig_noise\n return info_dict", "title": "" }, { "docid": "a223ad9a17d82461985aaf7a347dc597", "score": "0.48151997", "text": "def pressureDefT(topo):\n def pressureDefPre(topo): # Count how much pb and pf was sampled from topo\n pfNN = np.zeros(topo.shape[1])\n pbNN = np.zeros(topo.shape[1])\n\n for j in range(topo.shape[1]):\n for i in range(topo.shape[0]):\n try:\n if np.isnan(topo[i,j]) and ~np.isnan(topo[i-1,j]): # fontface\n pfNN[j] +=1\n #print('cao')\n if np.isnan(topo[i,j]) and ~np.isnan(topo[i+1,j]): # back face\n pbNN[j] +=1\n except:\n 0 \n return(pfNN,pbNN)\n\n \n topo = np.transpose(topo)\n pfNN,pbNN = pressureDefPre(topo)\n\n # Initialization\n pf = []\n pb = []\n pfO = []\n pbO = []\n distO = []\n pfN = 0\n pbN = 0\n dist = []\n o1 = 0; o2 = 0; o3 = 0\n for j in range(topo.shape[1]):\n if ~np.isnan(topo[0,j]) and ~np.isnan(topo[-1,j]): # no B grid in the first and the end - normal row\n for i in range(topo.shape[0]):\n try:\n if np.isnan(topo[i,j]) and ~np.isnan(topo[i-1,j]): # fontface\n pf.append(topo[i-1,j])\n pfN += 1\n itmp = i\n if np.isnan(topo[i,j]) and ~np.isnan(topo[i+1,j]): # back face\n pb.append(topo[i+1,j])\n pbN += 1\n dist.append(i-itmp+1) # index of the paired pf and pb\n except:\n 0\n \n if ~np.isnan(topo[0,j]) and np.isnan(topo[-1,j]): # no B grid in the first but the end\n count = 0\n #print('Outlier 1 found at' + str(j) + ' th row')\n o1+=1\n for i in range(topo.shape[0]):\n\n try:\n if np.isnan(topo[i,j]) and ~np.isnan(topo[i-1,j]): # fontface normal except for the last\n if count == pfNN[j]-1: # if that's the last frontal face \n \n pf.append(topo[i-1,j])\n pb.append(topo[0,j])\n pfN += 1\n pbN += 1\n dist.append(topo.shape[0]-i) # should be the length of the last continued building grids\n #print(topo.shape[0]-i)\n \n else:\n pf.append(topo[i-1,j])\n pfN += 1\n itmp = i\n count += 1 \n if np.isnan(topo[i,j]) and ~np.isnan(topo[i+1,j]): # back face normal\n pb.append(topo[i+1,j])\n pbN += 1\n dist.append(i-itmp+1) # index of the paired pf and pb\n \n except:\n 1\n \n if np.isnan(topo[0,j]) and ~np.isnan(topo[-1,j]): # no B grid in the end but the first\n \n first = True\n #print('Outlier 2 found at' + str(j) + ' th row')\n o2+=1\n \n for i in range(topo.shape[0]):\n try:\n if np.isnan(topo[i,j]) and ~np.isnan(topo[i-1,j]): # fontface normal\n #print('sampling pf'+str(i)+str(j))\n pf.append(topo[i-1,j])\n pfN += 1\n itmp = i\n \n if np.isnan(topo[i,j]) and ~np.isnan(topo[i+1,j]): # back face normal except for the first\n if first: \n #print('sampling pb'+str(i)+str(j))\n pb.append(topo[i+1,j])\n pbN += 1\n dist.append(i) # grid count to the end - distance fixed\n first = False\n else:\n pb.append(topo[i+1,j])\n #print('sampling pb'+str(i)+str(j))\n pbN += 1\n dist.append(i-itmp+1) # index of the paired pf and pb \n except:\n 2\n\n if np.isnan(topo[0,j]) and np.isnan(topo[-1,j]): # B grid in the end and the first, record them in a seperate array\n count = 0\n first = True\n o3+=1\n #print('Outlier 3 found at' + str(j) + ' th row')\n for i in range(topo.shape[0]):\n \n try:\n if np.isnan(topo[i,j]) and ~np.isnan(topo[i-1,j]): # fontface normal except for the last\n \n if count == pfNN[j]-1: # if that's the last frontal face\n pfO.append(topo[i-1,j])\n upSize = topo.shape[0]-i\n \n else:\n pf.append(topo[i-1,j])\n pfN += 1\n itmp = i\n count += 1\n except:\n 3 \n for i in range(topo.shape[0]):\n try:\n if np.isnan(topo[i,j]) and ~np.isnan(topo[i+1,j]): # back face normal except for the first\n \n if first:\n #print('qppen')\n pbO.append(topo[i+1,j])\n lowSize = i\n distO.append(lowSize+upSize+1) # index of the paired pf and pb\n\n first = False\n \n else:\n pb.append(topo[i+1,j])\n pbN += 1\n dist.append(i-itmp+1) # index of the paired pf and pb\n \n except:\n 3\n pf.extend(pfO)\n pb.extend(pbO)\n dist.extend(distO) \n return(np.array(pf),np.array(pb),np.array(dist))", "title": "" }, { "docid": "f356aa7edd7c0572c0b9e7c5d9fb387d", "score": "0.48118234", "text": "def plot_noise_experiment(output_table):\n\n # \"real\" values from Quadrant I for comparison - these are plotted as dotted lines\n real_nclouds = 389\n real_totalmass = 9.69\n\n real_larson_A = 0.52\n err_larson_A = 0.08\n real_larson_beta = 0.51\n err_larson_beta = 0.04\n\n real_mspec_M0 = 8.19\n err_mspec_M0 = 2.57\n real_mspec_gamma = -1.59\n err_mspec_gamma = 0.13\n\n if type(output_table) is dict:\n output_table = build_table_from_output_dict(output_table)\n\n fig = plt.figure(figsize=(8,8))\n\n ax_nclouds = fig.add_subplot(321)\n ax_totalmass = fig.add_subplot(322, sharex=ax_nclouds)\n\n ax_larson_A = fig.add_subplot(323, sharex=ax_nclouds)\n ax_larson_beta = fig.add_subplot(324, sharex=ax_nclouds)\n\n ax_mspec_M0 = fig.add_subplot(325, sharex=ax_nclouds)\n ax_mspec_gamma = fig.add_subplot(326, sharex=ax_nclouds)\n\n noise_added = output_table['noise_added']\n\n ax_nclouds.plot(noise_added, output_table['n_clouds'], 'ko', ms=5)\n ax_nclouds.set_ylim(0, 450)\n ax_nclouds.set_yticks(np.linspace(0,400,5))\n # ax_nclouds.set_ylabel(\"Number of clouds\")\n\n ax_nclouds.text(0.025, 200, \"(a) Number of clouds\", fontsize=14, family='serif')\n\n ax_nclouds.plot([0, 0.5], [real_nclouds]*2, 'b--', lw=0.5, scalex=False, scaley=False)\n\n\n ax_nclouds.set_xticks([0]+list(set(noise_added) - set([0.045])))\n ax_nclouds.set_xlim(0, 0.4)\n\n\n ax_totalmass.plot(noise_added, output_table['total_mass']/1e7, 'ko', ms=5)\n ax_totalmass.set_ylim(0, 11)\n # ax_totalmass.set_ylabel(\"Total mass / $10^7 M_\\odot$\")\n\n ax_totalmass.text(0.025, 3, \"(b) Total mass of clouds\\n$(\\\\times 10^7 M_\\odot)$\", fontsize=14, family='serif')\n\n ax_totalmass.plot([0, 0.5], [real_totalmass]*2, 'b--', lw=0.5, scalex=False, scaley=False)\n\n\n ax_larson_A.plot(noise_added, output_table['inner_larson_A'], 'ko', ms=5)\n ax_larson_A.set_ylim(0, 0.65)\n\n ax_larson_A.plot([0, 0.5], [real_larson_A]*2, 'b--', lw=0.5, scalex=False, scaley=False)\n ax_larson_A.plot([0, 0.5], [real_larson_A+err_larson_A]*2, 'k:', lw=0.5, scalex=False, scaley=False)\n ax_larson_A.plot([0, 0.5], [real_larson_A-err_larson_A]*2, 'k:', lw=0.5, scalex=False, scaley=False)\n\n ax_larson_A.text(0.025, 0.1, \"(c) Size-linewidth $A$\\nfrom $\\\\sigma_v = A \\\\times R^\\\\beta$\", fontsize=14, family='serif')\n\n\n ax_larson_beta.plot(noise_added, output_table['inner_larson_beta'], 'ko', ms=5)\n ax_larson_beta.set_ylim(0, 0.6)\n\n ax_larson_beta.plot([0, 0.5], [real_larson_beta]*2, 'b--', lw=0.5, scalex=False, scaley=False)\n ax_larson_beta.plot([0, 0.5], [real_larson_beta+err_larson_beta]*2, 'k:', lw=0.5, scalex=False, scaley=False)\n ax_larson_beta.plot([0, 0.5], [real_larson_beta-err_larson_beta]*2, 'k:', lw=0.5, scalex=False, scaley=False)\n\n ax_larson_beta.text(0.025, 0.1, \"(d) Size-linewidth $\\\\beta$\\nfrom $\\\\sigma_v = A \\\\times R^\\\\beta$\", fontsize=14, family='serif')\n\n\n ax_mspec_M0.plot(noise_added, output_table['inner_M0']/1e6, 'ko', ms=5)\n ax_mspec_M0.set_ylim(0, 12)\n\n ax_mspec_M0.plot([0, 0.5], [real_mspec_M0]*2, 'b--', lw=0.5, scalex=False, scaley=False)\n ax_mspec_M0.plot([0, 0.5], [real_mspec_M0+err_mspec_M0]*2, 'k:', lw=0.5, scalex=False, scaley=False)\n ax_mspec_M0.plot([0, 0.5], [real_mspec_M0-err_mspec_M0]*2, 'k:', lw=0.5, scalex=False, scaley=False)\n\n ax_mspec_M0.set_xlabel(\"Noise added (K)\", family='serif', fontsize=14)\n ax_mspec_M0.text(0.025, 1, \"(e) Mass spectrum\\ntruncation mass $M_0$\\n$(\\\\times 10^6 M_\\odot)$\", fontsize=14, family='serif')\n\n\n ax_mspec_gamma.plot(noise_added, output_table['inner_gamma'], 'ko', ms=5)\n ax_mspec_gamma.set_ylim(-2, -1.4)\n\n ax_mspec_gamma.plot([0, 0.5], [real_mspec_gamma]*2, 'b--', lw=0.5, scalex=False, scaley=False)\n ax_mspec_gamma.plot([0, 0.5], [real_mspec_gamma+err_mspec_gamma]*2, 'k:', lw=0.5, scalex=False, scaley=False)\n ax_mspec_gamma.plot([0, 0.5], [real_mspec_gamma-err_mspec_gamma]*2, 'k:', lw=0.5, scalex=False, scaley=False)\n\n ax_mspec_gamma.text(0.025, -1.9, \"(f) Mass spectrum slope $\\\\gamma$\", fontsize=14, family='serif')\n\n\n return fig", "title": "" }, { "docid": "ea2c113e51ccc44f9fe86bfe197314c2", "score": "0.47935793", "text": "def get_cube_names():\n output = os.path.join(wdir, \"info.txt\")\n if os.path.exists(output):\n table = Table.read(output, format=\"ascii\")\n return table\n filenames = [_ for _ in os.listdir(wdir) if _.endswith(\".fits\") and\n _.startswith(\"ADP\")]\n ids, dtypes, fields = [], [], []\n for fname in filenames:\n obj = fits.getval(fname, \"OBJECT\", 0)\n id = obj.split(\"3311_\")[1].split()\n dtype = \"img\" if len(id) == 2 else \"cube\"\n ids.append(id[0])\n dtypes.append(dtype)\n table = Table([ids, dtypes, filenames], names=[\"id\", \"dtype\", \"filename\"])\n ids, cubes, imgs = [], [], []\n for id in np.unique(table[\"id\"]):\n idx = np.where(table[\"id\"] == id)\n t = table[idx]\n idx_cube = np.where(t[\"dtype\"]==\"cube\")[0][0]\n idx_img = np.where(t[\"dtype\"]==\"img\")[0][0]\n ids.append(id)\n cubes.append(t[\"filename\"][idx_cube])\n imgs.append(t[\"filename\"][idx_img])\n table = Table([ids, cubes, imgs], names=[\"id\", \"cube\", \"img\"])\n table.write(output, format=\"ascii\", overwrite=True)\n return table", "title": "" }, { "docid": "b282ae8f5e7c76c68f3178120e55666f", "score": "0.479043", "text": "def calculate_pseudochrom_traces(\n samples,\n ):\n \n \n for sample in samples:\n trace = []\n for id, row in samples[sample].iterrows():\n rt_start = float(row[\"rt_start\"])\n rt_stop = float(row[\"rt_stop\"])\n rt = float(row[\"retention_time\"])\n rt_range = float(row[\"rt_stop\"]) - float(row[\"rt_start\"])\n fwhm = float(row[\"fwhm\"])\n norm_int = float(row[\"norm_intensity\"])\n\n #fwhm cannot be larger than the rt_range. If so, set to rt_range\n if fwhm > rt_range:\n fwhm = rt_range\n\n #calculate start of fwhm. if fwhm_start would be less than\n #rt_start, set fwhm_start to rt_start and add fwhm\n fwhm_start = rt - (0.5 * fwhm)\n if fwhm_start < rt_start:\n fwhm_start = rt_start\n\n #calculate start of fwhm. if stop would be greater than\n #rt_stop, set to fwhm_stop to rt_stop and move the fwhm_start\n fwhm_stop = fwhm_start + fwhm\n if fwhm_stop > rt_stop:\n fwhm_stop = rt_stop\n fwhm_start = fwhm_stop - fwhm\n\n #additional data point (left) to make chrom look nicer\n xG = fwhm_start - (0.5 * (fwhm_start - rt_start))\n if xG < rt_start:\n xG = rt_start\n\n #additional data point (right) to make chrom look nicer\n xK = fwhm_stop + (0.5 * (rt_stop - fwhm_stop))\n if xK > rt_stop:\n xK = rt_stop\n elif xK <= fwhm_stop:\n xK = fwhm_stop + (0.5 * (rt_stop - fwhm_stop))\n\n trace.append([\n [rt_start,\n xG,\n fwhm_start,\n rt,\n fwhm_stop,\n xK,\n rt_stop,\n ],\n [0,\n (norm_int * 0.15),\n (norm_int * 0.5),\n norm_int,\n (norm_int * 0.5),\n (norm_int * 0.15),\n 0,],\n ])\n \n #append to dataframe\n samples[sample]['pseudo_chrom_trace'] = trace\n \n return samples", "title": "" }, { "docid": "adaccff572636da71ce64fe17ad486c3", "score": "0.47902665", "text": "def active6(w):\n points = []\n for x in range(5):\n for y in range(5):\n points.append((x, y))\n c = 0\n # character of the vortex\n Cv = -1\n trails = []\n for (x0, y0) in points:\n for (x1, y1) in points:\n if x0 != x1 :\n continue\n for (x2, y2) in points:\n for (x3, y3) in points:\n if x2 != x3 :\n continue\n for (x4, y4) in points:\n for (x5, y5) in points:\n # from I, II and III\n if x0 == x1 and x2 == x3 and x4 == x5\\\n and y1 == y2 and y3 == y4 and y5 == y0:\n # z0 == z1 and z2 == z3 and z4 == z5\n # from IV\n # dv = rho(x0, y0, w) + rho(x2, y2, w) + rho(x4, y4, w) + rho(x6, y6, w) - (rho(x1, y1, w) + rho(x3, y3, w) + rho(x5, y5, w) + rho(x7, y7, w))\n dv = rho(x0, y0, w) + rho(x2, y2, w) + rho(x4, y4, w) - (rho(x1, y1, w) + rho(x3, y3, w) + rho(x5, y5, w))\n dv = abs(dv)\n if dv % w == 0:\n if pi(x1, y1, 0, w)[0] == pi(x2, y2, 0, w)[0] and pi(x3, y3, 0, w)[0] == pi(x4, y4, 0, w)[0] and pi(x5, y5, 0, w)[0] == pi(x0, y0, 0, w)[0]:\n for z0 in range(0, 5):\n # choose z0 freely\n z2 = (z0 + rho(x1, y1, w) - rho(x2, y2, w)) % w\n z4 = (z2 + rho(x3, y3, w) - rho(x4, y4, w)) % w\n # verification step\n if z0 != (z4 + rho(x5, y5, w) - rho(x0, y0, w)) % w:\n continue\n if dv > 0:\n Cv = max(Cv, log2(dv))\n a00 = (x0, y0, z0)\n a01 = (x1, y1, z0)\n a02 = (x2, y2, z2)\n a03 = (x3, y3, z2)\n a04 = (x4, y4, z4)\n a05 = (x5, y5, z4)\n if not allunique(a00, a01, a02, a03, a04, a05):\n continue\n c += 1\n # print(a00, a01, a02, a03, a04, a05)\n trails.append( [a00, a01, a02, a03, a04, a05] )\n print(\"No. of trails = \", c, \", Character of Vortex : \", Cv)\n return trails", "title": "" }, { "docid": "244b9bc3dc6c5cd26ee2b41077739c9e", "score": "0.47879788", "text": "def log_white_noise(self):\n \n self.print_header( \"Testing white noise\" )\n\n #record values for 5 seconds\n readings = self.take_mag_measurements( 5 )\n \n #log matrix and finish procedure\n self.logger.put( self.avg_list( readings ) )\n \n self.print_header( \"Whitespace vector matrix created\" )", "title": "" }, { "docid": "7aa13f8c8eae281ec2bbf235bb32547b", "score": "0.47845694", "text": "def find_NN_properties(tdata,arg):\n\n\tNN_sources = np.invert(np.isnan(tdata['new_NN_RA']))\n\ttdata_NN = tdata[NN_sources]\n\n\tx = []\n\n\tmosaicid_prev = 'initializing'\n\tfor i in range(0,len(tdata)):\n\t\tif NN_sources[i]:\n\t\t\ttry:\n\t\t\t\tmosaicid = tdata['Mosaic_ID_2'][i]\n\t\t\texcept KeyError: # No Mosaic_ID_2 because no matches yet, so have to use this\n\t\t\t\tmosaicid = tableMosaic_to_full(tdata['Mosaic_ID'][i])\n\n\n\t\t\tif mosaicid != mosaicid_prev: # only open new file if we need to\n\t\t\t\tNNfile = '../source_filtering/NN/'+mosaicid+'NearestNeighbours_efficient_spherical2.fits'\n\t\t\t\tNNfile = Table(fits.open(NNfile)[1].data)\n\t\t\t\tmosaicid_prev = mosaicid\n\t\t\tx.append(NNfile[arg][tdata['new_NN_index'][i]])\n\t\telse:\n\t\t\tx.append(np.nan)\n\t\n\treturn x", "title": "" }, { "docid": "8f9ff3ead5aaa80d70b48f9866a83194", "score": "0.47812438", "text": "def cond_info_c(pot, x_nds, y_nds, verbose=False):\n if x_nds is None:\n x_nds = []\n if y_nds is None:\n y_nds = []\n\n xy_nds = x_nds + y_nds\n pot_xy = pot.get_new_marginal(xy_nds)\n hxy = Entropy.ent_c(pot_xy, xy_nds)\n hy = Entropy.ent_c(pot_xy, y_nds)\n info = hxy - hy\n if verbose:\n print('\\ncond_info for', [z.name for z in x_nds], '|',\n [z.name for z in y_nds])\n print(info)\n return info", "title": "" }, { "docid": "473d4894179d0c11e12bbf6424763af1", "score": "0.47807828", "text": "def draw_constants(self):\n\t\t\n\t\ttru_type = 1 # Sersic\n\t\tsnc_type = self.snc_type # The type of shape noise cancellation. 0 means none, n means n-fold\n\t\t\n\t\ttru_sky_level = (pixel_scale)**2 * (exptime/gain) * 10**(-0.4*(skyback - zeropoint)) # in ADU, just for generating noise, will not remain in the image\n\t\t\n\t\ttru_gain = gain\n\t\ttru_read_noise = ron # in e-, given that gain is > 0\n\t\t\n\t\ttru_pixel = -1.0 # If positive, adds an extra convolution by that many pixels to the simulation process\n\t\t\n\t\treturn {\"snc_type\":snc_type, \"tru_type\":tru_type, \"tru_sky_level\":tru_sky_level,\n\t\t\t\"tru_gain\":tru_gain, \"tru_read_noise\":tru_read_noise, \"tru_pixel\":tru_pixel,\n\t\t\t}", "title": "" }, { "docid": "5f12af7b86459fbfa827abf4c15da57b", "score": "0.47735718", "text": "def make_noise(self):\n\n print(self.noise)", "title": "" }, { "docid": "5d31cbc34ec859a7bbdeebcf1f8b3042", "score": "0.47721866", "text": "def PrintInfo():\n\t#Atmosphere photon density\n\tT_eff = 5770\n\tN_photon_atmosphere = (20/2*np.pi)*T_eff**3\n\n\tprint(\"C = %.3E\" %Estimate_C())\n\n\tprint (\"Photon density at deepest model location: %g\" % N_Photon(np.amin(h)))\n\tprint (\"Hydrogen density at deepest model location: %g\" % nhyd[np.argwhere(h == np.amin(h))[0][0]])\n\tprint (\"Photon density at highest model location: %g\" %N_Photon(np.amax(h)))\n\tprint (\"Photon density at highest model location (analytic): %g\" %N_photon_atmosphere)\n\tprint (\"Hydrogen density at highest model location: %g\" % nhyd[np.argwhere(h == np.amax(h))[0][0]])", "title": "" }, { "docid": "018ec8ebc2db365dde91915b93c096a4", "score": "0.47710267", "text": "def run(self, data):\n # First we need:\n # 1) The TOD data\n # 2) The feature bits to select just the observing period\n # 3) Elevation to remove the atmospheric component\n tod = data['level2/averaged_tod'][...]\n az = data['level1/spectrometer/pixel_pointing/pixel_az'][...]\n el = data['level1/spectrometer/pixel_pointing/pixel_el'][...]\n feeds = data['level1/spectrometer/feeds'][:]\n feat = np.log(data['level1/spectrometer/features'][...])/np.log(2)\n\n\n\n # Looping over Feed - Band - Channel, perform 1/f noise fit\n nFeeds, nBands, nChannels, nSamples = tod.shape\n self.opacity = np.zeros((nFeeds, nBands, nChannels))\n self.opacity_err = np.zeros((nFeeds, nBands, nChannels))\n self.Tzen = np.zeros((nFeeds, nBands, nChannels))\n self.Tzen_err = np.zeros((nFeeds, nBands, nChannels))\n\n pbar = tqdm(total=((nFeeds-1)*nBands*nChannels))\n\n\n for ifeed in range(nFeeds):\n if feeds[ifeed] == 20:\n continue\n\n skydip_select = (el[ifeed]>self.dipLo) & (el[ifeed]<self.dipHi) & (feat == 8)\n pyplot.imshow(tod[0,0,:,:],aspect='auto')\n pyplot.show()\n for iband in range(nBands):\n\n for ichan in range(nChannels):\n x = 1/(np.cos(el[ifeed,skydip_select[ifeed]]*(np.pi/180)))\n y = tod[ifeed,iband,ichan,skydip_select[ifeed]]\n\n total = np.shape(x)[0]\n boot_no = int(np.rint(total*0.9))\n coeffs = np.zeros((self.poly_iter,2))\n coeffs[:] = np.nan\n if np.all(np.isnan(y))==False:\n for n in range(self.poly_iter):\n boot_sel = np.random.randint(0,high=total,size=boot_no)\n try:\n coeffs[n] = np.polyfit(x[boot_sel],y[boot_sel],1)\n except:\n pass\n\n avg = np.nanmean(coeffs,axis=1)\n std = np.nanstd(coeffs,axis=1)\n else:\n avg = np.asarray((np.nan,np.nan))\n std = np.asarray((np.nan,np.nan))\n\n pyplot.plot(x,y,',')\n pyplot.plot(x,np.poly1d(avg)(x))\n pyplot.show()\n #assume Tatm=300K\n self.opacity[ifeed,iband,ichan] = avg[1]/300#K\n self.opacity_err[ifeed,iband,ichan] = std[1]/300#K\n self.Tzen[ifeed,iband,ichan] = avg[0]\n self.Tzen_err[ifeed,iband,ichan] = std[0]\n\n pbar.update(1)\n\n pbar.close()", "title": "" }, { "docid": "a8ec3fba5f621c2456d5ab396a364030", "score": "0.4770272", "text": "def sdss_dark_variance(filter, camcol, run):\n dvTable1 = Table.read(\"\"\"\n camcol u g r i z\n 1 9.61 15.6025 1.8225 7.84 0.81\n 2 12.6025 1.44 1.00 5.76 1.0\n 3 8.7025 1.3225 1.3225 4.6225 1.0\n 4 12.6025 1.96 1.3225 6.25 9.61\n 5 9.3025 1.1025 0.81 7.84 1.8225\n 6 7.0225 1.8225 0.9025 5.0625 1.21\n \"\"\", format='ascii')\n dvTable2 = Table.read(\"\"\"\n camcol u g r i z\n 1 9.61 15.6025 1.8225 7.84 0.81\n 2 12.6025 1.44 1.00 6.25 1.0\n 3 8.7025 1.3225 1.3225 4.6225 1.0\n 4 12.6025 1.96 1.3225 7.5625 12.6025\n 5 9.3025 1.1025 0.81 7.84 2.1025\n 6 7.0225 1.8225 0.9025 5.0625 1.21\n \"\"\", format='ascii')\n if run < 1500:\n return dvTable1[camcol-1][filter]\n else:\n return dvTable2[camcol-1][filter]", "title": "" }, { "docid": "3dc2a7af541582fbc6d44a8c425847bf", "score": "0.47668868", "text": "def clean_synthetic(num_sens):\n df = pd.read_csv('dataset/synthetic.csv')\n df = df.dropna()\n y_col = df.shape[1]-1\n y = df.iloc[:, y_col]\n df = df.iloc[:, 0:y_col]\n x_prime = df.iloc[:, 0:num_sens]\n return df, x_prime, y", "title": "" }, { "docid": "54dcaea8522261266910f85783b6d4db", "score": "0.47647256", "text": "def getAnisotropicReflectivity(self):\n \n pass", "title": "" }, { "docid": "1ead83792c5c73dc987323799ee1c422", "score": "0.47638756", "text": "def lightSpecular(self):\n \n pass", "title": "" }, { "docid": "10adc2d8c25085a3e34be5045c5a8ab1", "score": "0.47635284", "text": "def getphit(zcen=0.45,addcolor=0):\r\n # now need to find right redshift and type\r\n # first redshift\r\n zrange = N.array([0.2,0.5,0.75,1.0,1.25, 1.5,2.0,2.5,3.0])\r\n jjz = N.nonzero(zcen>=zrange)[0]\r\n if ((jjz.size==0)|(zcen>3)):\r\n return(N.array([1,1]),N.array([1,1]),0.,0.,0.,0.)\r\n if (jjz.size>1):\r\n jjz = jjz.max()\r\n zmin = zrange[jjz]\r\n zmax = zrange[jjz+1]\r\n print \"using ZFOURGE range %3.2f < z < %3.2f \"%(zrange[jjz],zrange[jjz+1])\r\n print \"Bruzual Charlot used to calculate M*, solar Z, Av in [0,4] \"\r\n \r\n colornamelist =(\"all\",\"quiescent\",\"starforming\")\r\n ff = open(\"smf_zfourge_%s_supergrid.txt\"%(colornamelist[addcolor]))\r\n phitom = N.loadtxt(ff,usecols=(0,1,3,5,6,7))\r\n ff.close()\r\n #zlo0 zhi 1 logm2 logphi3 logphim4 logphip5 \r\n jj = N.nonzero((zcen> phitom[:,0])&(zcen<=phitom[:,1]))[0]\r\n phitom = phitom[jj,:] # now have right redshift and right color sel\r\n logm = phitom[:,2]\r\n logphi = phitom[:,3]\r\n logphim = phitom[:,4]\r\n logphip = phitom[:,5]\r\n \r\n phi = 10**logphi\r\n phip = phi*(10**logphip-1)\r\n phim = phi*(1-10**(-logphim))\r\n return(logm,phi,phip,phim,zmin,zmax)", "title": "" }, { "docid": "79e86cbaba8cc819417bbe8cd76f5c86", "score": "0.47572953", "text": "def calculate_plate_summaries(self):\n channel_map = self.cell_metadata.groupby(TenX_Runs.SAMPLE_MAPPING)\n\n total_reads = self.mapping_stats['Number of Reads']\n\n # percent_rn45s = pd.Series(self.genes['Rn45s'].todense()).groupby(\n # self.cell_metadata[TenX_Runs.SAMPLE_MAPPING]\n # ).sum() / total_reads\n\n percent_ercc = channel_map['ercc'].sum().divide(total_reads, axis=0)\n\n plate_summaries = pd.concat(\n [self.mapping_stats,\n pd.DataFrame(OrderedDict([\n # ('Percent Rn45s', percent_rn45s),\n (TenX_Runs.PERCENT_ERCC, percent_ercc),\n ('n_barcodes', channel_map.size())\n ]))], axis=1\n )\n\n return plate_summaries", "title": "" }, { "docid": "5ea443782089d4784d66ecd9466b21ac", "score": "0.47545597", "text": "def get_specs(self):\n \n delta = self.radians360[1] -self.radians360[0]\n self.directivity = self.get_directivity(self.radians, self.intensity)\n self.hpbw_in_radians = self.get_hpbw(self.radians360, self.intensity360, delta)\n self.hpbw_in_degs = np.degrees(self.hpbw_in_radians)\n self.side_lobes = pd.DataFrame(self.get_side_lobes(self.radians, self.intensity, self.side_lobe_level),\n columns = ['angle', 'value', 'rel_height'])", "title": "" }, { "docid": "bbe54ba40f7811ce95a0049173ed5c59", "score": "0.4753961", "text": "def _generate_reflectance_plot(data, alignment, mirror_index,\n lab_index=N_AIR, normal_vector=None):\n\n reflectivities = {}\n number_of_beams = 2 if alignment.front_reflections is None else 1\n calibrated_input_beams = [\n beam.transform(rotation_matrix(alignment.beams[0].direction))\n for beam in alignment.beams]\n for index in range(number_of_beams):\n reflectivities[index] = {'i':[], 'r':[], 'e':[]}\n for sample in data:\n input_positions = alignment.input_positions(\n sample.mirror_position)\n for beam_index, reflected_beam in enumerate(sample.beams):\n if reflected_beam is None:\n continue\n r_value, r_error = reflectance(\n reflected_beam, calibrated_input_beams[beam_index],\n alignment, mirror_index, lab_index, normal_vector)\n reflectivities[beam_index]['i'].append(input_positions[beam_index][0])\n reflectivities[beam_index]['r'].append(r_value)\n reflectivities[beam_index]['e'].append(max(r_error))\n for index in reflectivities.keys():\n plt.errorbar(\n reflectivities[index]['i'],\n reflectivities[index]['r'],\n reflectivities[index]['e'],\n marker='o', ls='None', color=BEAM_COLORS[index],\n label='Beam {}'.format(index))\n plt.title('Reflectance')\n plt.legend(loc='best', numpoints=1)\n plt.ylabel(\"Reflectance\")\n plt.xlabel(\"Input Position [mm]\")", "title": "" }, { "docid": "4f4c8297ceb1baa0e4f1dc9ea2e4180f", "score": "0.47530982", "text": "def make_reg():\n seg = '0a'\n filters = ['f606w', 'f814w' ]\n main_path='/nfs/slac/g/ki/ki19/deuce/AEGIS/testing/zero_pt/comb_det/'+ seg +'/'\n main_file = 'EGS_10134_'+ seg +'_acs_wfc_30mas_unrot_added_drz.fits'\n header1 ='# Filename:' + main_file + '\\n'\n header2 ='global color=green dashlist=8 3 width=1 font=\"helvetica 10 normal\" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1 \\n'\n header3 = 'physical\\n'\n for filter1 in filters:\n main_cat_file = main_path + filter1 + '_clean.cat'\n main_cat = Table.read(main_cat_file, format='ascii.basic')\n reg_file = main_path + filter1+'_'+ seg +'_seg.reg'\n print reg_file\n f = open(reg_file,'w')\n f.write(header1+header2+header3)\n cond1 = (main_cat['IS_STAR'] == 0)\n cond2 = (main_cat['IN_MASK'] == 0) \n cond3 = (main_cat['SNR'] >= 10)\n q, = np.where(cond1 & cond2 & cond3)\n for i in q:\n x0 = main_cat['X_IMAGE'][int(i)]\n y0 = main_cat['Y_IMAGE'][int(i)]\n r = main_cat['FLUX_RADIUS'][int(i)]\n t = (main_cat['THETA_IMAGE'][int(i)])*np.pi/180.\n e = main_cat['ELLIPTICITY'][int(i)]\n A = 2.5*(main_cat['A_IMAGE'][int(i)])*(main_cat['KRON_RADIUS'][int(i)])\n x_size = A*(np.absolute(np.sin(t))+(1-e)*np.absolute(np.cos(t)))\n y_size = A*(np.absolute(np.cos(t))+(1-e)*np.absolute(np.sin(t)))\n\n\n str1 = 'box(' + str(x0) +','+ str(y0) +','+ str(y_size) +','+ str(x_size)+','+ str(0) + ')'\n str2 = str1 + ' # color=green text={'+ str(main_cat['NUMBER'][int(i)]) + '} font={\"2\"} \\n' \n f.write(str2)\n\n #str1 = 'text(' + str(x0+x_size/2 ) +','+ str(y0+y_size/2 ) +') # text={'+ str(main_cat['NUMBER'][int(i)] +1) + '} \\n'\n #f.write(str1)\n if main_cat['IS_BRIGHT'][int(i)] == 1:\n str1 ='circle('+ str(x0) +','+ str(y0) +',' + str(r*2) + ') # color=red \\n'\n else:\n str1 ='circle('+ str(x0) +','+ str(y0) +',' + str(r*2) + ') # color=blue \\n'\n f.write(str1) \t\n f.close()", "title": "" }, { "docid": "03c247239f3a7c82c69518bfbc838349", "score": "0.47511658", "text": "def extractStatsFromWavelets(self):\n self.mean = np.ndarray((self.nchannels, 5))\n self.var = np.ndarray((self.nchannels, 5))\n self.std = np.ndarray((self.nchannels, 5))\n self.kurtosis = np.ndarray((self.nchannels, 5))\n self.skew = np.ndarray((self.nchannels, 5))\n ## ojo, dimensions are transposed here\n for i in range(0, self.nchannels):\n self.mean[i, :] = [np.mean(self.cA4[i]), np.mean(self.cD4[i]), np.mean(self.cD3[i]), np.mean(self.cD2[i]),\n np.mean(self.cD1[i])]\n self.var[i, :] = [np.var(self.cA4[i]), np.var(self.cD4[i]), np.var(self.cD3[i]), np.var(self.cD2[i]),\n np.var(self.cD1[i])]\n self.std[i, :] = [np.std(self.cA4[i]), np.std(self.cD4[i]), np.std(self.cD3[i]), np.std(self.cD2[i]),\n np.std(self.cD1[i])]\n self.kurtosis[i, :] = [kurtosis(self.cA4[i]), kurtosis(self.cD4[i]), kurtosis(self.cD3[i]),\n kurtosis(self.cD2[i]), kurtosis(self.cD1[i])]\n self.skew[i, :] = [skew(self.cA4[i]), skew(self.cD4[i]), skew(self.cD3[i]), skew(self.cD2[i]),\n skew(self.cD1[i])]", "title": "" }, { "docid": "83f5bf3c3d65978c4b9ffd884482b4e7", "score": "0.47463462", "text": "def _predict_core(self, reflections):\n\n # do prediction (updates reflection table in situ).\n self._reflection_predictor(reflections)\n\n x_obs, y_obs, _ = reflections[\"xyzobs.mm.value\"].parts()\n x_calc, y_calc, _ = reflections[\"xyzcal.mm\"].parts()\n\n # calculate residuals and assign columns\n reflections[\"x_resid\"] = x_calc - x_obs\n reflections[\"x_resid2\"] = reflections[\"x_resid\"] ** 2\n reflections[\"y_resid\"] = y_calc - y_obs\n reflections[\"y_resid2\"] = reflections[\"y_resid\"] ** 2\n reflections[\"delpsical2\"] = reflections[\"delpsical.rad\"] ** 2\n\n return reflections", "title": "" }, { "docid": "56dec5781b47419241b472df026773b7", "score": "0.4746326", "text": "def descriptorCS(vertices,facet, coef_num_sqrt=13):\n\t# get the sample value of r(u)\n\tzi = gridSampleRU(vertices,facet)\n\t# generate the sherical harmonics coefficients\n\tcoeffs = np.abs(SHExpandDHC(zi,sampling=2)) \n\tcoeffs_trunc=[[coeffs[0,k,:(k+1)].tolist(),coeffs[1,k,1:(k+1)].tolist()] for k in range(coef_num_sqrt)]\n\tcoeffs_trunc = [var for sublist in coeffs_trunc for subsublist in sublist for var in subsublist]\n\tcoeffs_trunc = np.array(coeffs_trunc)\n\treturn coeffs_trunc", "title": "" }, { "docid": "6cff1eba8bfbe51b981831665d442ab6", "score": "0.47457027", "text": "def HelixTilts ( self ):\n\n return [ TM. Tilt ( ) for TM in self.Content ]", "title": "" }, { "docid": "349f43785dd0f01414828e9d05ff49d6", "score": "0.4742124", "text": "def stats(self):\n return self.skrot,self.nazwa,self.p_s,self.oslona,self.atak", "title": "" }, { "docid": "143b1c17082da4c6f08c835f624a8762", "score": "0.47416934", "text": "def get_photons(self, particle, device=\"cpu\"):\n raw_data = self.get_pattern_without_corrections(particle=particle, device=device)\n return self.add_correction_and_quantization(raw_data)", "title": "" }, { "docid": "0905ecf7f7baedc33b2d880a86fb4f44", "score": "0.47398037", "text": "def tclean(vis='', selectdata=True, field='', spw='', timerange='', uvrange='', antenna='', scan='', observation='', intent='', datacolumn='corrected', imagename='', imsize=[100], cell=[\"1arcsec\"], phasecenter='', stokes='I', projection='SIN', startmodel='', specmode='mfs', reffreq='', nchan=-1, start='', width='', outframe='LSRK', veltype='radio', restfreq=[], interpolation='linear', perchanweightdensity=True, gridder='standard', facets=1, psfphasecenter='', chanchunks=1, wprojplanes=1, vptable='', mosweight=True, aterm=True, psterm=False, wbawp=True, conjbeams=False, cfcache='', usepointing=False, computepastep=360.0, rotatepastep=360.0, pointingoffsetsigdev=0.0, pblimit=0.2, normtype='flatnoise', deconvolver='hogbom', scales=[], nterms=2, smallscalebias=0.0, restoration=True, restoringbeam=[], pbcor=False, outlierfile='', weighting='natural', robust=0.5, noise='1.0Jy', npixels=0, uvtaper=[''], niter=0, gain=0.1, threshold=0.0, nsigma=0.0, cycleniter=-1, cyclefactor=1.0, minpsffraction=0.05, maxpsffraction=0.8, interactive=False, usemask='user', mask='', pbmask=0.0, sidelobethreshold=3.0, noisethreshold=5.0, lownoisethreshold=1.5, negativethreshold=0.0, smoothfactor=1.0, minbeamfrac=0.3, cutthreshold=0.01, growiterations=75, dogrowprune=True, minpercentchange=-1.0, verbose=False, fastnoise=True, restart=True, savemodel='none', calcres=True, calcpsf=True, parallel=False):\n if type(uvtaper)==str: uvtaper=[uvtaper]\n\n#\n# The following is work around to avoid a bug with current python translation\n#\n mytmp = {}\n\n mytmp['vis'] = vis\n mytmp['selectdata'] = selectdata\n mytmp['field'] = field\n mytmp['spw'] = spw\n mytmp['timerange'] = timerange\n mytmp['uvrange'] = uvrange\n mytmp['antenna'] = antenna\n mytmp['scan'] = scan\n mytmp['observation'] = observation\n mytmp['intent'] = intent\n mytmp['datacolumn'] = datacolumn\n mytmp['imagename'] = imagename\n mytmp['imsize'] = imsize\n mytmp['cell'] = cell\n mytmp['phasecenter'] = phasecenter\n mytmp['stokes'] = stokes\n mytmp['projection'] = projection\n mytmp['startmodel'] = startmodel\n mytmp['specmode'] = specmode\n mytmp['reffreq'] = reffreq\n mytmp['nchan'] = nchan\n mytmp['start'] = start\n mytmp['width'] = width\n mytmp['outframe'] = outframe\n mytmp['veltype'] = veltype\n mytmp['restfreq'] = restfreq\n mytmp['interpolation'] = interpolation\n mytmp['perchanweightdensity'] = perchanweightdensity\n mytmp['gridder'] = gridder\n mytmp['facets'] = facets\n mytmp['psfphasecenter'] = psfphasecenter\n mytmp['chanchunks'] = chanchunks\n mytmp['wprojplanes'] = wprojplanes\n mytmp['vptable'] = vptable\n mytmp['mosweight'] = mosweight\n mytmp['aterm'] = aterm\n mytmp['psterm'] = psterm\n mytmp['wbawp'] = wbawp\n mytmp['conjbeams'] = conjbeams\n mytmp['cfcache'] = cfcache\n mytmp['usepointing'] = usepointing\n mytmp['computepastep'] = computepastep\n mytmp['rotatepastep'] = rotatepastep\n mytmp['pointingoffsetsigdev'] = pointingoffsetsigdev\n mytmp['pblimit'] = pblimit\n mytmp['normtype'] = normtype\n mytmp['deconvolver'] = deconvolver\n mytmp['scales'] = scales\n mytmp['nterms'] = nterms\n mytmp['smallscalebias'] = smallscalebias\n mytmp['restoration'] = restoration\n mytmp['restoringbeam'] = restoringbeam\n mytmp['pbcor'] = pbcor\n mytmp['outlierfile'] = outlierfile\n mytmp['weighting'] = weighting\n mytmp['robust'] = robust\n mytmp['noise'] = noise\n mytmp['npixels'] = npixels\n mytmp['uvtaper'] = uvtaper\n mytmp['niter'] = niter\n mytmp['gain'] = gain\n mytmp['threshold'] = threshold\n mytmp['nsigma'] = nsigma\n mytmp['cycleniter'] = cycleniter\n mytmp['cyclefactor'] = cyclefactor\n mytmp['minpsffraction'] = minpsffraction\n mytmp['maxpsffraction'] = maxpsffraction\n mytmp['interactive'] = interactive\n mytmp['usemask'] = usemask\n mytmp['mask'] = mask\n mytmp['pbmask'] = pbmask\n mytmp['sidelobethreshold'] = sidelobethreshold\n mytmp['noisethreshold'] = noisethreshold\n mytmp['lownoisethreshold'] = lownoisethreshold\n mytmp['negativethreshold'] = negativethreshold\n mytmp['smoothfactor'] = smoothfactor\n mytmp['minbeamfrac'] = minbeamfrac\n mytmp['cutthreshold'] = cutthreshold\n mytmp['growiterations'] = growiterations\n mytmp['dogrowprune'] = dogrowprune\n mytmp['minpercentchange'] = minpercentchange\n mytmp['verbose'] = verbose\n mytmp['fastnoise'] = fastnoise\n mytmp['restart'] = restart\n mytmp['savemodel'] = savemodel\n mytmp['calcres'] = calcres\n mytmp['calcpsf'] = calcpsf\n mytmp['parallel'] = parallel\n pathname='file://' + xmlpath( ) + '/'\n trec = casac.utils().torecord(pathname+'tclean.xml')\n\n casalog.origin('tclean')\n if trec.has_key('tclean') and casac.utils().verify(mytmp, trec['tclean']) :\n result = task_tclean.tclean(vis, selectdata, field, spw, timerange, uvrange, antenna, scan, observation, intent, datacolumn, imagename, imsize, cell, phasecenter, stokes, projection, startmodel, specmode, reffreq, nchan, start, width, outframe, veltype, restfreq, interpolation, perchanweightdensity, gridder, facets, psfphasecenter, chanchunks, wprojplanes, vptable, mosweight, aterm, psterm, wbawp, conjbeams, cfcache, usepointing, computepastep, rotatepastep, pointingoffsetsigdev, pblimit, normtype, deconvolver, scales, nterms, smallscalebias, restoration, restoringbeam, pbcor, outlierfile, weighting, robust, noise, npixels, uvtaper, niter, gain, threshold, nsigma, cycleniter, cyclefactor, minpsffraction, maxpsffraction, interactive, usemask, mask, pbmask, sidelobethreshold, noisethreshold, lownoisethreshold, negativethreshold, smoothfactor, minbeamfrac, cutthreshold, growiterations, dogrowprune, minpercentchange, verbose, fastnoise, restart, savemodel, calcres, calcpsf, parallel)\n\n else :\n result = False\n return result", "title": "" }, { "docid": "9785f6743bdd7127d38b7cc51691f1e2", "score": "0.47387964", "text": "def stat(self):\n\t\t\n\t\ttru_s1 = 0 # Leaving these lensing parameters to the integers 0 0 1 means that the \"lens\" method will not get called.\n\t\ttru_s2 = 0\n\t\ttru_mu = 1\n\t\t\n\t\tsnc_type = 0 # 0 means no shape noise cancellation\n\t\t\n\t\treturn {\n\t\t\t\"tru_s1\" : tru_s1, # shear component 1, in \"g\" convention\n\t\t\t\"tru_s2\" : tru_s2, # component 2\n\t\t\t\"tru_mu\" : tru_mu, # magnification\n\t\t\t\"snc_type\" : snc_type, # The type of shape noise cancellation. 0 means none, n means n-fold\n\t\t}", "title": "" }, { "docid": "88ca0a6fbc0347a9e1ecb576520fda68", "score": "0.47386476", "text": "def computeSnow(cam):\n #print(\" __computeSnow()__\")\n\n#For each pixel:\n countSnow = 0 #Number of pixels that are almost white\n t = 0.75 #Threshold for almost white-- can adjust between 0.0 and 1.0\n\n for i in range(cam.shape[0]): # rows\n for j in range(cam.shape[1]): # columns\n #print(i,j,cam[i,j,0],cam[i,j,1],cam[i,j,2])\n #Check if red, green, and blue pixels are > t for each i,j location:\n if (cam[i,j,0] > t) and (cam[i,j,1] > t) and (cam[i,j,2] > t): # the Red Green Blue values (channels of color)\n countSnow = countSnow + 1\n return countSnow", "title": "" }, { "docid": "af49a2fa9af5059bee3fa7830b426e4c", "score": "0.4737546", "text": "def apply_visual_servoing(mgpi):", "title": "" }, { "docid": "a66e15dd2aacbf783dcb79aa9685757b", "score": "0.47368798", "text": "def plot_drift_velocities(outLoc, df, fields):\n df['kpt_mag'] = np.sqrt(df['kx [1/A]'].values**2 + df['ky [1/A]'].values**2 +\n df['kz [1/A]'].values**2)\n df['ingamma'] = df['kpt_mag'] < 0.3 # Boolean. In gamma if kpoint magnitude less than some amount\n g_inds = df.loc[df['ingamma'] == 1].index\n l_inds = df.loc[df['ingamma'] == 0].index\n\n g_df = df.loc[g_inds]\n l_df = df.loc[l_inds]\n v_g = []\n v_l = []\n ng_3 = []\n nl_3 = []\n n_3 = []\n vd_3 = []\n n = utilities.calculate_density(df)\n noise_3 =[]\n mu_3 = []\n Tn_3 = []\n for ee in fields:\n chi = np.load(outLoc + 'chi_3_gmres_{:.1e}.npy'.format(ee))\n v_g.append(np.sum(np.multiply(chi[g_inds]+g_df['k_FD'].values,g_df['vx [m/s]'])/np.sum(df['k_FD'])))\n v_l.append(np.sum(np.multiply(chi[l_inds]+l_df['k_FD'].values,l_df['vx [m/s]'])/np.sum(df['k_FD'])))\n\n # v_g.append(utilities.mean_velocity(chi[g_inds], g_df))\n # v_l.append(utilities.mean_velocity(chi[l_inds], l_df))\n vd_3.append(utilities.mean_velocity(chi, df))\n n_3.append(utilities.calculate_noneq_density(chi, df))\n ng_i, nl_i, _, _ = utilities.calc_L_Gamma_pop(chi, df)\n ng_3.append(ng_i)\n nl_3.append(nl_i)\n g_3_i = np.load(outLoc + 'g_3_gmres_{:.1e}.npy'.format(ee))\n noise_3.append(noise_solver.lowfreq_diffusion(g_3_i, df))\n mu_3 = (utilities.calc_diff_mobility(chi, df, ee))\n Tn_3.append(noise_solver.noiseT(in_Loc, noise_3[-1]+(np.asarray(v_l[-1])-np.asarray(v_g[-1]))**2*(np.asarray(ng_3[-1])*np.asarray(nl_3[-1])/n**2*10**-11), mu_3, df, True))\n plt.figure()\n plt.plot(fields*1e-5, v_l, MarkerSize=5,label='L-Valley Drift')\n plt.plot(fields*1e-5, v_g, MarkerSize=5,label=r'$\\Gamma$-Valley Drift')\n plt.title(pp.title_str)\n plt.xlabel('Field [kV/cm]')\n plt.ylabel('Drift velocity [m/s]')\n plt.legend()\n\n plt.figure()\n plt.plot(fields*1e-5, np.asarray(v_l)+np.asarray(v_g),MarkerSize=5,label='Summed components')\n plt.plot(fields*1e-5, vd_3, '--',label=r'Total drift')\n plt.title(pp.title_str)\n plt.xlabel('Field [kV/cm]')\n plt.ylabel('Drift velocity [m/s]')\n plt.legend()\n\n plt.figure()\n plt.plot(fields*1e-5, v_l, MarkerSize=5,label='L-Valley Drift')\n plt.title(pp.title_str)\n plt.xlabel('Field [kV/cm]')\n plt.ylabel('Drift velocity [m/s]')\n plt.legend()\n\n plt.figure()\n plt.plot(g_df['kx [1/A]'], g_df['vx [m/s]'], '.',label=r'$Gamma$-Valley')\n plt.plot(l_df['kx [1/A]'], l_df['vx [m/s]'], '.',label='L-Valley')\n\n plt.title(pp.title_str)\n plt.xlabel('kx [1/A]')\n plt.ylabel('Group velocity [m/s]')\n plt.legend()\n\n plt.figure()\n plt.plot(g_df['energy'], g_df['vx [m/s]'], '.',label=r'$Gamma$-Valley')\n plt.plot(l_df['energy'], l_df['vx [m/s]'], '.',label='L-Valley')\n\n plt.title(pp.title_str)\n plt.xlabel('Energy [eV]')\n plt.ylabel('Group velocity [m/s]')\n plt.legend()\n\n plt.figure()\n plt.plot(fields*1e-5, noise_3 + (np.asarray(v_l)-np.asarray(v_g))**2*(np.asarray(ng_3)*np.asarray(nl_3)/n**2*10**-11),MarkerSize=5,label='Summed components')\n plt.title(pp.title_str)\n plt.xlabel('Fields [kV/cm]')\n plt.ylabel('Non-equilibrium diffusion coefficeint [m^2/s]')\n plt.legend()\n\n plt.figure()\n plt.plot(fields*1e-5,Tn_3)\n plt.xlabel('Field [kV/cm]')\n plt.ylabel('Noise Temperature [K]')\n plt.title(pp.title_str)\n plt.legend()", "title": "" }, { "docid": "48713092554a4d5d2d39c50c15b52dfc", "score": "0.4735025", "text": "def check_data(packed,t_bin,wl_bin):\r\n t,wl,trace = unpack_trace(downsample_rebin(packed,t_bin,wl_bin))\r\n print(np.diff(t))\r\n plt.figure()\r\n plt.pcolormesh(t,wl,trace,vmin=np.percentile(trace,50),vmax=np.percentile(trace,99))\r\n return trace", "title": "" } ]
f0baa7c754f29a744c55ad7ba6c87488
Read Darcs changeset information for the Bcfg2 repository.
[ { "docid": "bae4f46d9c2febcf535a0977b0c8c9b4", "score": "0.5081105", "text": "def get_revision(self):\r\n try:\r\n data = Popen(\"env LC_ALL=C darcs changes\",\r\n shell=True,\r\n cwd=self.vcs_root,\r\n stdout=PIPE).stdout.readlines()\r\n revision = data[0].strip('\\n')\r\n except:\r\n msg = \"Failed to read darcs repository\"\r\n self.logger.error(msg)\r\n self.logger.error('Ran command \"darcs changes\" from directory %s' %\r\n self.vcs_root)\r\n raise Bcfg2.Server.Plugin.PluginExecutionError(msg)\r\n return revision", "title": "" } ]
[ { "docid": "3cf297d97dffd87d3b90afe85d8fd56d", "score": "0.5314179", "text": "def read_dependencies():\r\n\r\n\t## read from file: prog2default.csv\r\n\tdependencies_file = file_list(\"dependencies\")\r\n\treturn(HCGB_main.get_data(dependencies_file, ',', 'index_col=0'))", "title": "" }, { "docid": "75fb2322343b49c559145bdfc7e9b4be", "score": "0.5280131", "text": "def get_changesets(self):\n url = urllib.basejoin(self.api_base, 'changesets.xml')\n req = self._open_url(url)\n dom = minidom.parseString(req)\n changesets = dom.getElementsByTagName('revision-cache')\n results = []\n for change in changesets:\n results.append({\n 'repo_id': self._get_text(change.getElementsByTagName('repository-id')[0]),\n 'revision': self._get_text(change.getElementsByTagName('revision')[0]),\n 'message': self._get_text(change.getElementsByTagName('message')[0]),\n 'author': self._get_text(change.getElementsByTagName('author')[0]),\n 'email': self._get_text(change.getElementsByTagName('email')[0])\n })\n return results", "title": "" }, { "docid": "524b554b7dd7e439f2401819f847bf59", "score": "0.51479024", "text": "def read_configuration(name, vcs):\n\n config_file = get_vcs(vcs)\n parser = ConfigParser.SafeConfigParser()\n parser.read(config_file)\n\n name_revisions = {}\n if parser.has_section(name):\n name_revisions[name] = [e.strip() for e in parser.get(name, 'revisions').split(',')]\n\n return name_revisions", "title": "" }, { "docid": "141da7c1fb8229453f0f554bbe27eee5", "score": "0.5030486", "text": "def getChangeSet(self) -> ghidra.framework.data.DomainObjectDBChangeSet:\n ...", "title": "" }, { "docid": "cf18666fd003dd66338676f59fd69a49", "score": "0.50144565", "text": "def read_chandb(file_chandb):\n chans = {}\n with open(file_chandb, 'rb') as chandb:\n for line in chandb:\n if line[0] == ';': continue\n line = line.rstrip()\n if line[:2] != \"->\":\n channel, dummy0, founded, updated = line.split(' ')\n if chans.has_key(channel):\n raise RuntimeError, \"Malformed chan.db file detected - {} more than once in file\".format(channel)\n chans[channel] = {\n 'channel': channel,\n 'dummy0': dummy0,\n 'founded': founded,\n 'updated': NOW_UNIX,\n 'ops_users': {},\n 'otherlines': [],\n }\n elif line[:2] == \"->\":\n if line[:8] == \"->ACCESS\":\n splitline = line.split(' ')\n if len(splitline) < 6: # Can this ever happen? Not sure...\n splitline.extend([None] * (6-len(splitline)))\n access, user_host, alvl, ctime, mtime, addedby = splitline\n user_host = user_host.split('!')\n if len(user_host) < 2: # this can definitely happen\n user_host.append(None)\n username, host = user_host\n chans[channel]['ops_users'][username] = (host, alvl, ctime, mtime, addedby)\n else:\n chans[channel]['otherlines'].append(line)\n return chans", "title": "" }, { "docid": "3b61b8db9f3fcdeac57b115ad416e854", "score": "0.49922833", "text": "async def get_channel_config(self):\n logger.info(f\"E2E: Get channel {self.channel_name} config start\")\n\n orgs = [\"org1.example.com\"]\n for org in orgs:\n org_admin = self.client.get_user(org, \"Admin\")\n responses = await self.client.get_channel_config(\n requestor=org_admin,\n channel_name=self.channel_name,\n peers=['peer0.' + org, 'peer1.' + org]\n )\n self.assertEqual(responses[0].config.sequence,\n 1, \"Get Config Failed\")\n\n logger.info(\"E2E: Query installed chaincode done\")", "title": "" }, { "docid": "07f7a124e0e8535417b6b221ff7d7665", "score": "0.4962136", "text": "def get_changeset_lines(repo_dir):\n cmds = ['cd %s' % repo_dir, 'git log --reverse --format=\"%H|%ct|%s\"']\n return execute(' && '.join(cmds)).splitlines()", "title": "" }, { "docid": "c1c65a99018bc66d1e58b3baaefb1d28", "score": "0.49596632", "text": "def get_changeset_branches(self, changeset):\n raise NotImplementedError(\"Abstract method\")", "title": "" }, { "docid": "0918de0c8fbea026fec681dd389b9e08", "score": "0.4938005", "text": "def get_changeset_details_from_osm(changeset_id):\n url = os.path.join(\n config.OSM_API_BASE_URL,\n 'changeset',\n changeset_id,\n 'download')\n response = requests.get(url)\n return analyze_changeset(ET.fromstring(response.content))", "title": "" }, { "docid": "fe869cf7491931e223449c5fb697c202", "score": "0.48469183", "text": "def retrieveConfigurationInformation(self):\n with open(self.CONFIG_FILE_PATH,'r+') as f:\n lines = f.read().splitlines()\n\n line_number = 1\n for line in lines:\n if line_number == 1:\n if line.split()[1] == '0':\n print(\"Config file is not locked.\\n\\n\")\n else:\n self.isLocked = True\n print(\"Config file is locked.\\n\\n\")\n if line_number == 2:\n drinks = line.split(\" \")\n for i in range(len(drinks)-1):\n self.drink_names.append(drinks[i+1])\n line_number+=1", "title": "" }, { "docid": "f5728076196f2a3e8c7817d2dcb0981a", "score": "0.4831443", "text": "def get_change_files(self, start_commit, end_commit):\n command = \"git diff --name-only {start} {end}\".format(start=start_commit, end=end_commit)\n\n logger_server.info(\n \"Get change files from {start}...{end} [CMD:{cmd}]...\".format(start=start_commit, end=end_commit,\n cmd=command))\n\n self.cwd(self.git_path)\n\n change_files = []\n\n if start_commit is not None and end_commit is not None:\n change_content = self._run_shell_command(command=command)\n\n for one_file in change_content.split('\\n'):\n change_files.append(one_file)\n # reduce 1 more blank line\n change_files = change_files[:-1]\n\n if change_files:\n return change_files\n else:\n return None", "title": "" }, { "docid": "99c4e19870dd697f32d0fbcf809aff63", "score": "0.48118988", "text": "def GetCLInfo(review_host, change_id, auth_cookie='', include_messages=False,\n include_detailed_accounts=False):\n url = f'{review_host}/changes/{change_id}'\n params = []\n if include_messages:\n params.append(('o', 'MESSAGES'))\n if include_detailed_accounts:\n params.append(('o', 'DETAILED_ACCOUNTS'))\n if params:\n url = url + '?' + urllib.parse.urlencode(params)\n pool_manager = PoolManager(ca_certs=certifi.where())\n pool_manager.headers['Cookie'] = auth_cookie\n pool_manager.headers['Content-Type'] = 'application/json'\n pool_manager.headers['Connection'] = 'close'\n try:\n r = pool_manager.urlopen('GET', url)\n except urllib3.exceptions.HTTPError:\n raise GitUtilException(f'invalid url {url}')\n if r.status != http.client.OK:\n raise GitUtilException(f'request unsuccessfully with code {r.status}')\n\n try:\n # the response starts with a magic prefix line for preventing XSSI which\n # should be stripped.\n stripped_json = r.data.split(b'\\n', 1)[1]\n json_data = json_utils.LoadStr(stripped_json)\n except Exception:\n raise GitUtilException('Response format Error: %r' % (r.data, ))\n\n def _ConvertGerritCLMessage(json_data):\n return CLMessage(\n json_data['message'],\n json_data['author']['email'] if include_detailed_accounts else None)\n\n try:\n return CLInfo(json_data['change_id'], json_data['_number'],\n _GERRIT_CL_STATUS_TO_CL_STATUS[json_data['status']],\n [_ConvertGerritCLMessage(x) for x in json_data['messages']]\n if include_messages else None)\n except Exception as ex:\n logging.debug('Unexpected Gerrit API response for CL info: %r', json_data)\n raise GitUtilException('failed to parse the Gerrit API response') from ex", "title": "" }, { "docid": "e11f25fb40d75c8082b06eb521a2f279", "score": "0.4804703", "text": "def get_commit_info(repo, commit_id):\n res = repo._client.get(repo.url('/commits/{}/changes'.format(commit_id)))\n return json.loads(res.content)", "title": "" }, { "docid": "a002690a8e75931c8b7b03b11a330011", "score": "0.47785848", "text": "def getListOfCommits():\n os.chdir(pathToProject) \n git_log = check_output([\"git\", \"log\"])\n git_log = git_log.split(\"\\n\")\n for line in git_log:\n if re.match(\"commit [0-9a-f]{40}\", line):\n yield line[7:47]", "title": "" }, { "docid": "6be1fc9e614888d741f73e83329d7305", "score": "0.47614622", "text": "def read(self, node):\n d, s = self._revisiondata(node)\n c = changelogrevision(\n d, s, self._copiesstorage == b'changeset-sidedata'\n )\n return (c.manifest, c.user, c.date, c.files, c.description, c.extra)", "title": "" }, { "docid": "055994324a5b37ec42368d0920adff1f", "score": "0.47601902", "text": "def get_changesets(self, start=None, end=None, start_date=None,\n end_date=None, branch_name=None, reverse=False):\n start_raw_id = self._get_revision(start)\n start_pos = self.revisions.index(start_raw_id) if start else None\n end_raw_id = self._get_revision(end)\n end_pos = self.revisions.index(end_raw_id) + 1 if end else None\n\n if (start_pos and end_pos) and start_pos > end_pos:\n raise RepositoryError('start cannot be after end')\n\n if branch_name and branch_name not in self.branches.keys():\n raise BranchDoesNotExistError('Such branch %s does not exists for'\n ' this repository' % branch_name)\n\n slice = reversed(self.revisions[start_pos:end_pos]) if reverse else \\\n self.revisions[start_pos:end_pos]\n\n for id_ in slice:\n cs = self.get_changeset(id_)\n if branch_name and cs.branch != branch_name:\n continue\n if start_date and cs.date < start_date:\n continue\n if end_date and cs.date > end_date:\n continue\n\n yield cs", "title": "" }, { "docid": "5916fca119b4eefbe5602f4d6d03e1b8", "score": "0.47359604", "text": "def get_confs():\n confs = builder_configs\n if confs and len(confs) > 1:\n return confs\n else:\n return []", "title": "" }, { "docid": "b04ed5373753e7b687a8ce10c5b11b50", "score": "0.4729419", "text": "def read_config(cfname):\n with open(cfname) as data_file:\n data = json.load(data_file)\n return data", "title": "" }, { "docid": "5ad7573594cf66c8ad1cedf78a2518eb", "score": "0.47278142", "text": "def get_changelog(debpath, changelogpath, baseversion, updateversion):\n # create tmp dir\n randomstring = gen_string(10)\n TMPDIR = '/tmp/diffchangelog-' + randomstring\n\n extractcmd = \"dpkg-deb -x \" + debpath + \" \" + TMPDIR\n\n extractdeb = os.system(extractcmd)\n # extract deb failed?\n if extractdeb != 0:\n log_print(\"extract deb file failed.\")\n return 9\n\n zcatcmd = \"cd \" + TMPDIR + \" && zcat \" + changelogpath\n\n changelogs = os.popen(zcatcmd).read()\n\n # clean TMPDIR\n cleancmd = \"rm -rf \" + TMPDIR\n os.system(cleancmd)\n\n return changelogs", "title": "" }, { "docid": "09b04b71c15cb34388a9620df8ad1d74", "score": "0.47242367", "text": "def getCAreacs():\n CA_reacs = []\n f=open(ratdbpath, 'r')\n beginparse = False\n parsing = False\n while beginparse == False:\n stuff = str(f.readline())\n if stuff.find('C A N A D A') != -1:\n beginparse = True\n parsing = True\n while parsing == True:\n parseline = f.readline()\n if parseline.find('U S A') != -1:\n parsing = False\n line_pieces = parseline.split(\":\")\n if line_pieces[0] == 'index':\n CA_reacs.append(line_pieces[1].rstrip(\"\\\",\\n\").lstrip(\" \\\"\"))\n return CA_reacs", "title": "" }, { "docid": "133c1b2f7ee7044613496fbf259aff23", "score": "0.47194755", "text": "def get_repositoryInfo(self):\n # TODO: I think we should keep this, but I think Jeff's patch moves it to the top of the fileimport ConfigParser\n\n config = ConfigParser.RawConfigParser()\n dirname = os.path.dirname(self.binary)\n repository = { }\n\n for file, section in [('application', 'App'), ('platform', 'Build')]:\n config.read(os.path.join(dirname, '%s.ini' % file))\n\n for key, id in [('SourceRepository', 'repository'),\n ('SourceStamp', 'changeset')]:\n try:\n repository['%s_%s' % (file, id)] = config.get(section, key);\n except:\n repository['%s_%s' % (file, id)] = None\n\n return repository", "title": "" }, { "docid": "24e863267c6aaa051efdad1e306f49d4", "score": "0.47154847", "text": "def view_config_changes():", "title": "" }, { "docid": "c908e9b718e41adfd1c217e86e3f43a1", "score": "0.47121155", "text": "def find_modified_lines() -> Coverage:\n base_branch = os.getenv(\"BUILDKITE_PULL_REQUEST_BASE_BRANCH\", \"main\") or os.getenv(\n \"BUILDKITE_PIPELINE_DEFAULT_BRANCH\", \"main\"\n )\n # Make sure we have the latest state to correctly identify the merge base\n subprocess.run([\"git\", \"fetch\", \"origin\", base_branch], check=True)\n result = subprocess.run(\n [\"git\", \"merge-base\", \"HEAD\", f\"origin/{base_branch}\"],\n check=True,\n capture_output=True,\n )\n merge_base = result.stdout.strip()\n print(f\"Merge base: {merge_base.decode('utf-8')}\")\n result = subprocess.run(\n [\"git\", \"diff\", \"-U0\", merge_base], check=True, capture_output=True\n )\n\n coverage: Coverage = {}\n file = None\n for line_raw in result.stdout.splitlines():\n line = line_raw.decode(\"utf-8\")\n # +++ b/src/adapter/src/coord/command_handler.rs\n if line.startswith(\"+++\"):\n file = line.removeprefix(\"+++ b/\")\n if not line.endswith(\".rs\"):\n continue\n coverage[file] = OrderedDict()\n # @@ -641,7 +640,6 @@ impl Coordinator {\n elif line.startswith(\"@@ \") and file in coverage:\n # We only care about the second value (\"+640,6\" in the example),\n # which contains the line number and length of the modified block\n # in new code state.\n parts = line.split(\" \")[2]\n if \",\" in parts:\n start, length = map(int, parts.split(\",\"))\n else:\n start = int(parts)\n length = 1\n for line_nr in range(start, start + length):\n coverage[file][line_nr] = None\n return coverage", "title": "" }, { "docid": "93952f93954ea8c69070ca9d72cb8240", "score": "0.47064587", "text": "def all_commits(change_id, curr_project, curr_ref):\n commits = []\n manifest = ET.ElementTree(file='.repo/manifest.xml')\n commits.append((curr_project, project_path(manifest, curr_project), curr_ref))\n\n url = (GERRIT_ROOT + 'changes/?o=CURRENT_REVISION&q=status:open+' +\n change_id)\n changes = request.urlopen(url)\n for change in parse_response(changes):\n project = change['project']\n fetch = change['revisions'][change['current_revision']]['fetch']\n # The `ref` is the same for every download scheme, hence we can use\n # the first one that is there\n ref = fetch.values()[0]['ref']\n path = project_path(manifest, project, change['branch'])\n if path and project != curr_project:\n commits.append((project, path, ref))\n\n return commits", "title": "" }, { "docid": "342aee88407a107bcaba222a19251e44", "score": "0.47046775", "text": "def _get_branches(self):\n return [x[2:] for x in self._do(['branch'], as_lines=True)]", "title": "" }, { "docid": "4a1a425a81c2246ff58a0c72d3b35912", "score": "0.47021234", "text": "def parse(self):\n self.flush()\n self.seek(0)\n data = utils.ruby_lines(self.readlines())\n data = [tuple(j.strip() for j in line.split(None, 1))\n for line in data]\n datamap = {}\n for line in data:\n if len(line) == 1:\n datamap[line[0]] = True\n elif len(line) == 2:\n key, value = line\n if key == 'cookbook':\n datamap.setdefault('cookbook', {})\n value = [utils.ruby_strip(v) for v in value.split(',')]\n lib, detail = value[0], value[1:]\n datamap['cookbook'].setdefault(lib, {})\n # if there is additional dependency data but its\n # not the ruby hash, its the version constraint\n if detail and not any(\"\".join(detail).startswith(o)\n for o in self.berks_options):\n constraint, detail = detail[0], detail[1:]\n datamap['cookbook'][lib]['constraint'] = constraint\n if detail:\n for deet in detail:\n opt, val = [\n utils.ruby_strip(i)\n for i in deet.split(':', 1)\n ]\n if not any(opt == o for o in self.berks_options):\n raise ValueError(\n \"Cookbook detail '%s' does not specify \"\n \"one of '%s'\" % (opt, self.berks_options))\n else:\n datamap['cookbook'][lib][opt.strip(':')] = (\n utils.ruby_strip(val))\n elif key == 'source':\n datamap.setdefault(key, [])\n datamap[key].append(utils.ruby_strip(value))\n elif key:\n datamap[key] = utils.ruby_strip(value)\n self.seek(0)\n return datamap", "title": "" }, { "docid": "313b33824ac7ff154f07071d250af1ae", "score": "0.46843684", "text": "def get_revset(self, cs_from=None, cs_to=None,\n branch=None, keyword=None, date=None):\n raise NotImplementedError(\"Abstract method\")", "title": "" }, { "docid": "51e79cc7d9ee6b969dc6a5fede049577", "score": "0.46819222", "text": "def get_changes(self):\n\n self.change_list = []\n device_paths = self.zfs_options[\"device_paths\"]\n\n device_path = device_paths[0]\n\n pool_name = self.zfs_options[\"pool_name\"]\n\n if self.zfs_options[\"scheme\"] == \"GPT\":\n self.append_change(\"delete\", device_path)\n if not self.uefi:\n self.append_change(\"create\", device_path, \"BIOS boot (2MB)\")\n self.append_change(\"create\", device_path,\n \"Reborn Boot (512MB)\")\n else:\n # UEFI\n if self.bootloader == \"grub2\":\n self.append_change(\"create\", device_path,\n \"UEFI System (200MB)\")\n self.append_change(\"create\", device_path,\n \"Reborn Boot (512MB)\")\n else:\n self.append_change(\"create\", device_path,\n \"Reborn Boot (512MB)\")\n else:\n # MBR\n self.append_change(\"delete\", device_path)\n self.append_change(\"create\", device_path, \"Reborn Boot (512MB)\")\n\n msg = \"Reborn ZFS pool ({0})\".format(pool_name)\n self.append_change(\"create\", device_path, msg)\n self.append_change(\"create\", device_path, \"Reborn ZFS vol (swap)\")\n\n if self.settings.get(\"use_home\"):\n self.append_change(\"create\", device_path,\n \"Reborn ZFS vol (/home)\")\n\n # Now init all other devices that will form part of the pool\n for device_path in device_paths[1:]:\n self.append_change(\"delete\", device_path)\n msg = \"Reborn ZFS pool ({0})\".format(pool_name)\n self.append_change(\"add\", device_path, msg)\n\n return self.change_list", "title": "" }, { "docid": "3d75f63b001f5e0e34ca739dbafbe97b", "score": "0.46816814", "text": "def defineChangeSet():\n changes = []\n with open('input.txt') as g:\n dataset = g.readlines()\n for line in dataset:\n if line[0] == '+':\n change = int(line[1:])\n else:\n change = int(line)\n changes.append(change)\n return changes", "title": "" }, { "docid": "0002ebf85cc589502350a9005dc3cf0d", "score": "0.46773097", "text": "def bitbucket_get_changes(self, repo, branch):\n\n files_changed = []\n next_page_start = 0\n while True:\n url = AtlassianUtils.BITBUCKET_GET_CHANGES_URL.format(self.project_key,\n repo,\n branch,\n next_page_start)\n response = self.rest_get(url)\n if response.status_code != HttpStatusCodes.SUCCESS_OK:\n raise RuntimeError('Could not get changes for branch {0}'.format(branch))\n\n data = json.loads(response.content)\n changes = data['values']\n for c in changes:\n files_changed.append(c['path']['toString'])\n\n if data['isLastPage']:\n break\n\n if not data['values']:\n break\n\n next_page_start = data['nextPageStart']\n\n return files_changed", "title": "" }, { "docid": "ac0c97da5dd5639138eb9d7fda6a90c6", "score": "0.4666202", "text": "def get_file_changeset(self, path):\n fctx = self._get_filectx(path)\n changeset = self.repository.get_changeset(fctx.linkrev())\n return changeset", "title": "" }, { "docid": "43019d4fc9effab2fe46c396b34f3dd4", "score": "0.46627426", "text": "def get_branches_info(self):\r\n\r\n rows = select(self.conn, \"\"\"\r\n select distinct v.FullPath, f.FileLength\r\n from tbl_Version v\r\n inner join tbl_File f on v.FileId = f.FileId\"\"\")\r\n\r\n # branch names, files outside of a branch (branch_extract hook)\r\n rowsWithBranchAndLocalPath = ((r, self.hooks.branch_extract(tfs_unmangle_path(r.FullPath))) for r in rows)\r\n rowsWithLocalPathByBranch = build_keyed_dict(rowsWithBranchAndLocalPath, lambda i: i[1][0], lambda i: (i[0], i[1][1]))\r\n\r\n unassigned = sorted({tfs_unmangle_path(i[0].FullPath) for i in rowsWithLocalPathByBranch[None]})\r\n rowsWithLocalPathByBranch.pop(None)\r\n\r\n names = sorted(rowsWithLocalPathByBranch.keys())\r\n\r\n # ignored files within a branch (file_filter hook)\r\n assigned_by_branch = {}\r\n ignored_by_branch = {}\r\n\r\n for branch in rowsWithLocalPathByBranch:\r\n rowsWithLocalPath = rowsWithLocalPathByBranch[branch]\r\n\r\n tmp = build_keyed_dict(rowsWithLocalPath, lambda i: (not i[1]) or self.hooks.file_filter(branch, i[1]))\r\n\r\n rowsWithLocalPathByBranch[branch] = tmp[True]\r\n\r\n assigned_by_branch[branch] = sorted({i[1] for i in tmp[True]})\r\n ignored_by_branch[branch] = sorted({i[1] for i in tmp[False]})\r\n\r\n # oversized files\r\n oversized_by_branch = {b:sorted({i[1] for i in items if i[0].FileLength > oversize_warning_limit}) for b, items in rowsWithLocalPathByBranch.items()}\r\n \r\n # done\r\n return BranchesInfo(names, unassigned, assigned_by_branch, ignored_by_branch, oversized_by_branch)", "title": "" }, { "docid": "ffcd04b8b4a8b8e9c12f701df1655541", "score": "0.46432602", "text": "def fetch_gh_repo_branch_file_path_recent_commits_details(self):\n filepaths = self.config.get('org.auditree.repo_integrity.filepaths')\n current_url = None\n github = None\n for repo_url, repo_branches in filepaths.items():\n parsed = urlparse(repo_url)\n base_url = f'{parsed.scheme}://{parsed.hostname}'\n repo = parsed.path.strip('/')\n for branch, repo_filepaths in repo_branches.items():\n for filepath in repo_filepaths:\n ev_file_prefix = f'{repo}_{branch}_{filepath}'.lower()\n for symbol in [' ', '/', '-', '.']:\n ev_file_prefix = ev_file_prefix.replace(symbol, '_')\n path = [\n 'auditree', f'gh_{ev_file_prefix}_recent_commits.json'\n ]\n if base_url != current_url:\n github = Github(self.config.creds, base_url)\n current_url = base_url\n self.config.add_evidences(\n [\n RepoCommitEvidence(\n path[1],\n path[0],\n DAY,\n (\n f'Github recent commits for {repo} repo '\n f'{branch} branch, {filepath} file path'\n )\n )\n ]\n )\n joined_path = os.path.join(*path)\n with raw_evidence(self.locker, joined_path) as evidence:\n if evidence:\n meta = self.locker.get_evidence_metadata(\n evidence.path\n )\n if meta is None:\n meta = {}\n utcnow = datetime.utcnow()\n now = utcnow.strftime(LOCKER_DTTM_FORMAT)\n since = datetime.strptime(\n meta.get('last_update', now),\n LOCKER_DTTM_FORMAT\n )\n evidence.set_content(\n json.dumps(\n github.get_commit_details(\n repo, since, branch, filepath\n )\n )\n )", "title": "" }, { "docid": "eeaab5a61c1b82e02ebdfe7774673af2", "score": "0.46404654", "text": "def changelist(filepath):\n change_number = direct.opened(filepath)[0]\n return change_number['change']", "title": "" }, { "docid": "9ff215f3335c766277c7703ddcfbb949", "score": "0.46353608", "text": "def test_repo_get_all_commits(self):\n pass", "title": "" }, { "docid": "be17b508a3045448dd3bb3d7e86ac331", "score": "0.46075422", "text": "def get_config(self, commit_identifier=None):\n if not commit_identifier: # Current working directory\n filename = self.get_config_filename()\n with open(filename, 'r') as infp:\n return self._parse_config(infp, filename)\n else: # Arbitrary commit\n filename = '{}:valohai.yaml'.format(commit_identifier)\n config_bytes = get_file_at_commit(self.directory, commit_identifier, 'valohai.yaml')\n config_sio = six.StringIO(config_bytes.decode('utf-8'))\n return self._parse_config(config_sio, filename)", "title": "" }, { "docid": "a0484cbbe4024773e8f5cf042eaeb9f9", "score": "0.4589766", "text": "def config_read():\n\twith open(CONFIG_FILE, 'r') as cnf_file:\n\t\treturn json.load(cnf_file)", "title": "" }, { "docid": "0d6ceddf48cd4a52a450d8f1f010584b", "score": "0.45894402", "text": "def read_obc(self, obcfile):\n # ----------------------------------------------\n # open and read the \"_obc.dat\" file\n fobc = open(obcfile, 'r')\n obclines = fobc.readlines()\n fobc.close()\n\n # ----------------------------------------------\n # range the obc data\n\n # the first line has obc_number\n obcnum = int(obclines[0].strip().split('=')[1])\n\n # Check if the obc_number and data lines is match.\n if (obcnum + 1) != len(obclines):\n print(\"Error: The '_obc.dat' file 'OBC Node Number' is not match the file data lines!\")\n sys.exit()\n\n OBIDlist = []\n OBtypelist = []\n for k in range(1, obcnum + 1):\n if len(obclines[k].strip().split()) != 3:\n print(\"Error: The obc.dat file has wrong format data , the data is not 3 class !\")\n\n OBID = int(obclines[k].strip().split()[1])\n if OBID < 1:\n print(\"Error: The OBC_Node_ID is wrong (<1) !\")\n\n OBtype = int(obclines[k].strip().split()[2])\n if OBtype < 1 and OBtype > 10:\n print(\"Error: The OBC_Node_type is wrong ,It is must be greater than 0 and less than 11!\")\n\n OBIDlist.append(OBID)\n OBtypelist.append(OBtype)\n\n # to np.array\n\n OBIDarray = np.array(OBIDlist, dtype=int)\n OBtypearray = np.array(OBtypelist, dtype=int)\n\n print(\" FVCOM '_obc' file has been read successfully! \")\n print((\" PATH: \" + obcfile))\n return (OBIDarray, OBtypearray)", "title": "" }, { "docid": "46138639f6bfd4629e95ff9c6955c18e", "score": "0.45796856", "text": "def extract_changelog(changes_file, component, pool_dir):\n global config, options\n\n extract_dir = '/tmp/changelog_extract'\n control_file = DebianControlFile(changes_file)\n name = control_file['Source']\n name_version = name + '_' + control_file.version()\n if name.startswith('lib'):\n prefix = name[:4]\n else:\n prefix = name[0]\n\n pool_dir = join(options.output_dir, 'pool', \\\n component, prefix, name)\n dirname = os.path.dirname(changes_file)\n if changes_file.endswith('_source.changes'): # Really extract\n if os.path.isdir(extract_dir):\n shutil.rmtree(extract_dir)\n for file in control_file.files_list():\n if file.name.endswith('.dsc'):\n (rc, output) = commands.getstatusoutput('dpkg-source -x %s %s' % \\\n (join(pool_dir, file.name), extract_dir))\n if rc <> 0 or not os.path.isdir(extract_dir):\n Log.print_(output)\n Log.print_(\"Unable to extract source to retrieve changelog\")\n else:\n extacted_changelog = os.path.join(extract_dir, 'debian', 'changelog')\n if not exists(extacted_changelog):\n Log.print_(\"Unable to find changelog on source\")\n if not os.path.exists(pool_dir):\n os.makedirs(pool_dir, 0755)\n print pool_dir\n changelog_fn = join(pool_dir, os.path.basename(changes_file).rsplit('.',1)[0]+'.changelog')\n shutil.copy(extacted_changelog, changelog_fn)\n if os.path.isdir(extract_dir):\n shutil.rmtree(extract_dir)\n else: # binary build .changes, create a link to the corresponding source\n files = control_file.files_list()\n for file in files:\n if file.name.endswith('.deb'):\n try:\n os.symlink(name_version+\"_source.changelog\", \\\n join(pool_dir, file.name.rsplit('.', 1)[0]+'.changelog'))\n except OSError: # Already exists ?\n pass", "title": "" }, { "docid": "43db9c4747b21dc4b043f18bf55357e2", "score": "0.4579115", "text": "def getchanges(self, version, full):\n raise NotImplementedError", "title": "" }, { "docid": "fd5fbdda99737b0448a39e8ee9579725", "score": "0.4569693", "text": "def read_config(self,confile):\n\n\n print(\"reading:\",confile)\n with open(confile) as parf:\n data=yaml.load(parf)\n\n\n return data", "title": "" }, { "docid": "f3959d7391e51a93314a319039934ce0", "score": "0.45593622", "text": "def get_changed(base, pull):\n diff = check_output(\n 'git', 'diff', '--name-only',\n '--diff-filter=d', '%s...%s' % (base, pull))\n return check_output(\n 'bazel', 'query',\n '--noshow_progress',\n 'set(%s)' % diff).split('\\n')", "title": "" }, { "docid": "b6db1b46fd024be17015af8e48ed70d7", "score": "0.4559063", "text": "def _get_remote_refs(self):\n return frozenset([line[2:].strip() for line in self._do(['branch', '-r'], as_lines=True)])", "title": "" }, { "docid": "3023c3d7087f6cedf807ab5c02fd1970", "score": "0.45540303", "text": "def format_changes_as_diff(self):\n self.changes = bf.changes_to_diff(self.infile1only, self.infile2only)\n return None", "title": "" }, { "docid": "d31034ec48d7422e35d01ca0047223ce", "score": "0.4540112", "text": "def get_file_history(self, path):\n fctx = self._get_filectx(path)\n nodes = [fctx.filectx(x).node() for x in fctx.filelog()]\n changesets = [self.repository.get_changeset(hex(node))\n for node in reversed(nodes)]\n return changesets", "title": "" }, { "docid": "3c9f5e25da4d0c7d52a36d64b8eb9f00", "score": "0.45359874", "text": "def get_bbc_config(self):\n dat = self.make_message_structure(None, MsgType.REQUEST_GET_CONFIG)\n return self.send_msg(dat)", "title": "" }, { "docid": "6594525082758959ce9d17a274568280", "score": "0.45294586", "text": "def main(argv):\n args = process_args(argv)\n\n if args.verbose:\n logging.basicConfig(level=logging.INFO)\n logging.info(\"Verbose on\")\n\n if args.cmd == 'blank-config':\n print blank_config\n sys.exit(0)\n\n config = parse_config(args.cf)\n if args.cmd == 'list-repos':\n pretty = json.dumps(config[\"repos\"], sort_keys=True,\n indent=4, separators=(',', ': '))\n print pretty\n exit(0)\n if not \"label\" in args:\n raise Exception(\"You must choose a repo from config.\")\n repos = [r for r in config[\"repos\"] if r[\"label\"] == args.label]\n if not repos:\n raise Exception(\"You must specify a valid repo.\")\n\n\n\n repo = repos[0]\n\n if args.cmd in (\"update-pulls\", \"init-pulls\"):\n # TODO: This validation belongs with process_args.\n gh_owner = repo[\"github_owner\"]\n gh_repo = repo[\"github_repo\"]\n gh_user = config[\"credentials\"][\"github_personal_access_token\"]\n db_file = config[\"paths\"][\"database\"]\n\n if args.cmd == \"update-pulls\":\n lib.operations.update_pulls(db_file, gh_user, gh_owner, gh_repo)\n else:\n lib.operations.init_pulls(db_file, gh_user, gh_owner, gh_repo)\n elif args.cmd == \"list-merge-commits\":\n commits = lib.grvgit.get_merge_commits(repo[\"git_repo_dir\"], repo[\"branch\"])\n for c in commits:\n print \"%s, %s, %s\" % (c.hexsha, c.parents, c.time)\n elif args.cmd == \"list-direct-commits\":\n commits = lib.grvgit.get_direct_commits(repo[\"git_repo_dir\"], repo[\"branch\"])\n for c in commits:\n date = datetime.datetime.fromtimestamp(float(c.time))\n print \"%s, %s, %s, %s, %s\" % (c.hexsha, c.parents, date, c.author, c.email)\n elif args.cmd == \"list-all-commits\":\n commits = lib.grvgit.get_all_commits(repo[\"git_repo_dir\"], repo[\"branch\"])\n for c in commits:\n date = datetime.datetime.fromtimestamp(float(c.time))\n print \"%s, %s, %s, %s, %s\" % (c.hexsha, c.parents, date, c.author, c.email)\n elif args.cmd == \"update-repo\":\n print lib.grvgit.update(repo[\"git_repo_dir\"], repo[\"branch\"])\n elif args.cmd == \"list-pulls\":\n pullsdb = lib.grvdb.Pulls(config[\"paths\"][\"database\"])\n all_pulls = pullsdb.readall()\n for pull in all_pulls:\n print pull.base_sha, pull.head_sha, pull.pull_requester, pull.pull_reviewer\n elif args.cmd == \"report-all\":\n gh_owner = repo[\"github_owner\"]\n gh_repo = repo[\"github_repo\"]\n db_file = config[\"paths\"][\"database\"]\n result = lib.operations.report_all(db_file, repo[\"git_repo_dir\"], repo[\"branch\"])\n print_commits(result, args.since)\n elif args.cmd == \"list-violations\":\n gh_owner = repo[\"github_owner\"]\n gh_repo = repo[\"github_repo\"]\n db_file = config[\"paths\"][\"database\"]\n result = lib.operations.report_all(db_file, repo[\"git_repo_dir\"], repo[\"branch\"])\n result = [x for x in result if not x.pr_reviewer]\n print_commits(result, args.since)", "title": "" }, { "docid": "0caae2028a9496374b19856de921a178", "score": "0.45205992", "text": "def read_config(): # pragma: no cover\n return {\n \"github_username\": get_env(\"BF_GITHUB_USERNAME\"),\n \"github_password\": get_env(\"BF_GITHUB_PASSWORD\"),\n \"repo_username\": get_env(\"BF_REPO_USERNAME\", raise_exception=True),\n \"repo_id\": get_env(\"BF_REPO_ID\", raise_exception=True),\n \"branch_id\": get_env(\"BF_BRANCH_ID\", raise_exception=True)\n }", "title": "" }, { "docid": "ad859e0907ce6ea8388f1bfde14b78c3", "score": "0.451767", "text": "def test_repo_get_branch(self):\n pass", "title": "" }, { "docid": "172b2447c01cb1379a91635beb3ed6b6", "score": "0.4508488", "text": "def changes_command(args):\n changes = list_changes(args.project_id, args.name)\n for change in changes:\n print(change)", "title": "" }, { "docid": "ccef7cdcfefcc63d89eb18826c4038a4", "score": "0.4505311", "text": "def read(self):\n\n # Add options from config file.\n print self._config.get_all()\n for id, (val, type) in self._config.get_all().items():\n if type == 'src' and not self.check(id, val): # Don't use wrong paths\n log.warning(_('idg.options.not.valid.use.default') + id +\\\n \" \" + val)\n continue\n self._opts[id] = [val, type]\n\n dom = self._config.dom()\n if dom is None:\n log.error(_('idg.options.cant.parse.config.file') +\\\n self._config.path())\n return\n else:\n log.info(_('idg.options.using.config.file') + self._config.path())", "title": "" }, { "docid": "1418d0ba1f65e8d7c5de7eb61c983269", "score": "0.44924036", "text": "def changesets(self):\r\n\r\n # TODO: \"MayHaveMerges\" could be more precise, but this would mean duplication of logic (maintenance, correctness)\r\n # and also layering conflicts\r\n csrows = select(self.conn, \"\"\"\r\n select \r\n cs.*, \r\n case when exists(select null from tbl_MergeHistory mh where mh.TargetVersionFrom = cs.ChangeSetId) then 1 else 0 end as MayHaveMerges\r\n from tbl_ChangeSet cs\r\n where cs.Comment != ?\r\n order by cs.ChangeSetId\"\"\", \r\n ['All of the changes in this changeset have been destroyed.'])\r\n\r\n for csrow in csrows:\r\n filerowRelpathsByBranch = Changeset.filerowsRelpathsByBranch(csrow.ChangeSetId, self.conn, self.hooks)\r\n mergerowsByTargetBranch = Changeset.mergeRowsByTargetBranch(csrow.ChangeSetId, self.conn, self.hooks) if csrow.MayHaveMerges else {}\r\n\r\n for branch, filerowsRelpaths in filerowRelpathsByBranch.items():\r\n yield Changeset(self.conn, self.tempdir, self.hooks, \r\n csrow.ChangeSetId, \r\n self.get_user(csrow.OwnerId), \r\n tfs_unmangle_timestamp(csrow.CreationDate),\r\n csrow.Comment, \r\n self.get_user(csrow.CommitterId), \r\n branch, \r\n filerowsRelpaths, \r\n mergerowsByTargetBranch.get(branch, []))", "title": "" }, { "docid": "7d15f1aaa5b5a0d6f23b8aa0ae019895", "score": "0.44922706", "text": "def get_commits_range(self, pull_content):\n pattern = r'^Updating (\\w{7})\\.\\.(\\w{7})'\n\n for one_line in pull_content.split('\\n'):\n match = re.match(pattern, one_line)\n if match:\n start_commit = match.group(1)\n end_commit = match.group(2)\n return start_commit, end_commit\n\n return None, None", "title": "" }, { "docid": "96ad23df8971390030d492f99d1ea627", "score": "0.44842315", "text": "def get_conn_graph_changed_conns(self, key, ip_blocks, is_added):\n old_peers = self.config1.peer_container.get_all_peers_group()\n new_peers = self.config2.peer_container.get_all_peers_group()\n allowed_labels = self.config1.allowed_labels.union(self.config2.allowed_labels)\n topology_peers = new_peers | ip_blocks if is_added else old_peers | ip_blocks\n updated_key = key.replace(\"Changed\", \"Added\") if is_added else key.replace(\"Changed\", \"Removed\")\n if self.output_config.queryName:\n query_name = 'semantic_diff, config1: ' + self.config1.name + ', config2: ' + self.config2.name + ', key: ' + updated_key\n else:\n # omit the query name prefix if self.output_config.queryName is empty (single query from command line)\n query_name = updated_key\n output_config = OutputConfiguration(self.output_config, query_name)\n is_k8s_config = self.config1.type == NetworkConfig.ConfigType.K8s\n return ConnectivityGraph(topology_peers, allowed_labels, output_config, is_k8s_config)", "title": "" }, { "docid": "91149b7af6b087b5dd7ad84356b0b2d8", "score": "0.44810995", "text": "def _get_outgoing_changesets(self, current_branch, remote):\n\n # We must handle the special case where there are no outgoing commits\n # as mercurial has a non-zero return value in this case.\n outgoing_changesets = []\n raw_outgoing = execute(['hg', '-q', 'outgoing', '--template',\n 'b:{branches}\\nr:{rev}\\n\\n', remote],\n env=self._hg_env,\n extra_ignore_errors=(1,))\n\n for pair in raw_outgoing.split('\\n\\n'):\n if not pair.strip():\n continue\n\n # Ignore warning messages that hg might put in, such as\n # \"warning: certificate for foo can't be verified (Python too old)\"\n branch, rev = [l for l in pair.strip().split('\\n')\n if not l.startswith('warning: ')]\n\n branch_name = branch[len('b:'):].strip()\n branch_name = branch_name or 'default'\n revno = rev[len('r:'):]\n\n if branch_name == current_branch and revno.isdigit():\n logging.debug('Found outgoing changeset %s for branch %r'\n % (revno, branch_name))\n outgoing_changesets.append(int(revno))\n\n return outgoing_changesets", "title": "" }, { "docid": "6ddb97e1becf17f356c4af32f3dec91b", "score": "0.44736627", "text": "def read(self):\r\n if not os.path.exists(self._filename):\r\n return\r\n f = GitFile(self._filename, 'rb')\r\n try:\r\n f = SHA1Reader(f)\r\n for x in read_index(f):\r\n self[x[0]] = tuple(x[1:])\r\n # FIXME: Additional data?\r\n f.read(os.path.getsize(self._filename)-f.tell()-20)\r\n f.check_sha()\r\n finally:\r\n f.close()", "title": "" }, { "docid": "a9b131febb52f911c74040df50f37382", "score": "0.44639271", "text": "def changes(self, email):\n # suffix = \"/changes/?q=owner:\\\"{}\\\"&o=MESSAGES\".format(email) # gets the messages like jenkins builds info\n # suffix = \"/changes/?q=owner:\\\"{}\\\"&o=COMMIT_FOOTERS\".format(email)\n # suffix = \"/changes/?q=owner:\\\"{}\\\"&o=DETAILED_ACCOUNTS\".format(email) # gets the owner full details\n # suffix = \"/changes/?q=owner:\\\"{}\\\"&o=ALL_COMMITS\".format(email)\n suffix = \"/changes/?q=owner:\\\"{}\\\"&o=ALL_REVISIONS&o=COMMIT_FOOTERS\".format(email)\n\n # suffix = \"/changes/?q=owner:\\\"{}\\\"\".format(email)\n # COMMIT_FOOTERS\n data = self._get(url=\"{}{}\".format(self.url, suffix))\n result = []\n if data is not None:\n for item in data:\n result.append(ChangeInfo(**item))\n\n return result", "title": "" }, { "docid": "5214455f9ee1375c5c8c3d43f4c8863a", "score": "0.44637057", "text": "def _get_dependencies(pkgbuild):\n\n print(f\"Getting all dependencies within PKGBUILD file\")\n dependencies = []\n\n within_depends = False\n for line in pkgbuild.split('\\n'):\n\n # Remove any unnecessary whitespace\n line = line.strip()\n\n # Search until we find depends\n if not within_depends and line.startswith('depends'):\n within_depends = True\n continue\n\n # Extract the packages\n if within_depends and line != ')':\n # Remove comments\n pkgs = [pkg for pkg in re.sub('#.*', '', line).strip().split(' ')\n if len(pkg) > 0]\n dependencies.extend(pkgs)\n\n # Continue until the closing bracket\n if within_depends and line == ')':\n within_depends = False\n\n print(f\"Pulled {len(dependencies)} dependencies\")\n return dependencies", "title": "" }, { "docid": "317bb5fff41087f4e54a7326653f39cb", "score": "0.44526353", "text": "def get_commits(\n self,\n repository: Repository,\n branch: Optional[str] = None,\n start: Optional[str] = None,\n ) -> Sequence[Commit]:\n raise NotImplementedError", "title": "" }, { "docid": "60eaa2e80d786dd634d3138df18d521a", "score": "0.44515845", "text": "def get_changes(self) -> pd.DataFrame:\n logger.info(\"get companies changes......\")\n res = []\n for _url in self._get_change_notices_url():\n _df = self._read_change_from_url(_url)\n if not _df.empty:\n res.append(_df)\n logger.info(\"get companies changes finish\")\n return pd.concat(res, sort=False)", "title": "" }, { "docid": "82070c486f2b789970365ab294cb3f5e", "score": "0.44485202", "text": "def read_cags(self):\n df = pd.read_hdf(self.hdf5_fp,\"/abund/CAGs\")\n self.cags_df = df", "title": "" }, { "docid": "33388ce377fff1dbee6f5220dbc9d28a", "score": "0.44459587", "text": "def get_branch_commits(self, branch):\n raise NotImplementedError()", "title": "" }, { "docid": "73092d9f9116bc4c85e7f61b7c8d0709", "score": "0.44455093", "text": "def read_file(self):\n with open(self.fName, 'r') as f:\n datLines = []\n start = False\n for line in f:\n line = line.strip()\n while not start:\n if line == '---cmdGraph---':\n start = True\n line = ''\n continue\n if line == '':\n continue\n else:\n datLines.append(line)\n if not start:\n print(\"Not recognised as a cmdGraph file.\")\n else:\n self.dat = datLines\n return self", "title": "" }, { "docid": "1e55d4e4b66ab7d8bb1f8edee9c911e1", "score": "0.44433376", "text": "def api_get_commits(self, repo_name, branch_name, start=None):\n url = self._get_commits_api_url(repo_name, branch_name=branch_name)\n\n if start is not None:\n url = '%s?start=%s' % (url, start)\n\n return self.http_get(url).json", "title": "" }, { "docid": "f791340f2b36ad61f1f63da24ee832da", "score": "0.44354007", "text": "def _read_config(self):\n self._key_map = {}\n self._children = []\n root_dct = self.root.get_dict()\n base_directory = os.path.dirname(self.root.file)\n for section, contents in root_dct.items():\n # find all !includedir lines, add configuration to self._children and self._sectionmap\n if section.startswith('!includedir'):\n relative_directory = section.split(' ', 1)[1]\n directory = os.path.abspath(os.path.join(base_directory, relative_directory))\n # include all files in the directory\n for filename in iglob(os.path.join(directory, '*.cnf')):\n # order is not guaranteed, according to mysql docs\n # parse every file, return parsing result\n self._read_child_config(filename)\n elif section.startswith('!'):\n raise NotImplementedError()", "title": "" }, { "docid": "a36e37a010721e7efc87100270de7783", "score": "0.4422655", "text": "def test_new_repo_branch_commits(self):\n branches = self.config.get('org.auditree.repo_integrity.branches')\n for repo_url, repo_branches in branches.items():\n parsed = urlparse(repo_url)\n service = 'gh'\n if 'gitlab' in parsed.hostname:\n service = 'gl'\n elif 'bitbucket' in parsed.hostname:\n service = 'bb'\n repo = parsed.path.strip('/')\n for repo_branch in repo_branches:\n # If included, skip check on the evidence locker\n if (repo_url == self.locker.repo_url\n and repo_branch == self.locker.branch):\n continue\n filename = [\n service,\n repo.lower().replace('/', '_').replace('-', '_'),\n repo_branch.lower().replace('-', '_'),\n 'recent_commits.json'\n ]\n path = f'raw/auditree/{\"_\".join(filename)}'\n with evidences(self, path) as raw:\n commits = RepoCommitEvidence.from_evidence(raw)\n for commit in commits.author_info:\n commit['repo'] = repo_url\n commit['branch'] = repo_branch\n self.add_warnings('Recent Commits Found', commit)", "title": "" }, { "docid": "b171170b2e3c24664ce3912f1794eee0", "score": "0.44113678", "text": "def commits_log(self, obj1, obj2):\n return self._repo.iter_commits(rev='%(obj1)s..%(obj2)s' % {'obj1': obj1, 'obj2': obj2})", "title": "" }, { "docid": "d088abf53cac2cffb5d28eeb3aae369c", "score": "0.4405345", "text": "def read(self, params, file_name):\n self.branch_geometry = read_polydata(file_name)", "title": "" }, { "docid": "3423d5f979c6fb99490eac658cc4c99b", "score": "0.44005135", "text": "def read_BGC_data(self, bgc_record, bgc_filename_updated, id):\n \n bgc_sequence = bgc_record.seq._data\n \n # Collect relevant data (or what we believe might become relevant)\n PFAM_domain_data = [] \n PFAM_domains = [] \n feature_types =[] \n bgc_knownclusters = [] \n genes = []\n bgc_info = {}\n \n # Go through all features and look for the most relevant ones\n for i, feature in enumerate(bgc_record.features):\n feature_types.append(feature.type)\n \n if \"product\" in bgc_record.features[i].qualifiers: \n bgc_info = {}\n bgc_info[\"BGC type\"] = bgc_record.features[i].qualifiers[\"product\"][0]\n if \"probability\" in bgc_record.features[i].qualifiers: \n bgc_info[\"BGC type probability\"] = bgc_record.features[i].qualifiers[\"probability\"][0]\n else:\n bgc_info[\"BGC type probability\"] = 0\n \n if \"knownclusterblast\" in bgc_record.features[i].qualifiers: \n for m in range(0,len(bgc_record.features[i].qualifiers[\"knownclusterblast\"])):\n \n teststring = bgc_record.features[i].qualifiers[\"knownclusterblast\"][m]\n bgc_knownclusters.append([teststring.split(\"\\t\")[0][teststring.find(\"B\"):],\n [float(s) for s in re.findall(r'-?\\d+\\.?\\d*', teststring.split(\"\\t\")[1])][-1]\n ])\n \n # collect key genes (= CDS only?):\n if feature.type == \"CDS\":\n location = bgc_record.features[i].location\n features = []\n features.append(bgc_record.features[i].qualifiers[\"locus_tag\"][0])\n if \"location\" in bgc_record.features[i].qualifiers:\n features.append([location.nofuzzy_start, location.nofuzzy_end, location._strand],)\n else:\n features.append([])\n if \"note\" in bgc_record.features[i].qualifiers: \n features.append(bgc_record.features[i].qualifiers[\"note\"][0])\n else:\n features.append([])\n if \"sec_met\" in bgc_record.features[i].qualifiers:\n features.append(bgc_record.features[i].qualifiers[\"sec_met\"][0])\n else:\n features.append([])\n# bgc_record.features[i].qualifiers[\"translation\"][0]\n \n genes.append(features)\n \n # collect PFAM domains (and antiSMASH scores):\n if feature.type == \"PFAM_domain\":\n# if \"db_xref\" in feature.qualifiers:\n PFAM_domains.append(feature.qualifiers['db_xref'][0][6:])\n PFAM_domain_data.append([feature.qualifiers['db_xref'][0][6:],\n feature.qualifiers[\"evalue\"][0],\n feature.qualifiers[\"score\"][0],\n float(feature.qualifiers[\"note\"][1][27:])])\n \n self.id = id\n if \"BGC type\" not in bgc_info:\n bgc_info[\"BGC type\"] = \"unkown\"\n bgc_info[\"BGC type probability\"] = \"unkown\"\n print(\"Missing feature: bgc type.\" )\n self.bgc_type = (bgc_info[\"BGC type\"], bgc_info[\"BGC type probability\"])\n self.pfam_domains = PFAM_domains\n self.pfam_domain_data = PFAM_domain_data\n self.genes = genes\n self.sequences = bgc_sequence\n self.bgc_knownclusters = bgc_knownclusters", "title": "" }, { "docid": "e40fc26f90f587011a7420d65373f009", "score": "0.43983528", "text": "def getReposFromConfigFile(self, repofn, repo_age=None, validate=None):\n if repo_age is None:\n repo_age = os.stat(repofn)[8]\n \n confpp_obj = ConfigPreProcessor(repofn, vars=self.conf.yumvar)\n parser = ConfigParser()\n try:\n parser.readfp(confpp_obj)\n except ParsingError, e:\n raise Errors.ConfigError(exception2msg(e))\n\n # Check sections in the .repo file that was just slurped up\n for section in parser.sections():\n\n if section in ['main', 'installed']:\n continue\n\n # Check the repo.id against the valid chars\n bad = misc.validate_repoid(section)\n\n if bad:\n self.logger.warning(\"Bad id for repo: %s, byte = %s %d\" %\n (section, bad, section.find(bad)))\n continue\n\n try:\n thisrepo = self.readRepoConfig(parser, section)\n except (Errors.RepoError, Errors.ConfigError), e:\n self.logger.warning(e)\n continue\n else:\n thisrepo.repo_config_age = repo_age\n thisrepo.repofile = repofn\n\n thisrepo.base_persistdir = self.conf._repos_persistdir\n\n # do the wildcard ones first\n # The keys are in indeterminate order at this point, *sigh*.\n for i in sorted(self.repo_setopts):\n # Skip normal names, as we want to do wildcard matches first\n # and then override with specific id stuff.\n if not misc.re_glob(i):\n continue\n\n if fnmatch.fnmatch(thisrepo.id, i):\n for opt in self.repo_setopts[i].items:\n if not hasattr(thisrepo, opt):\n msg = \"Repo %s did not have a %s attr. before setopt\"\n self.logger.warning(msg % (thisrepo.id, opt))\n setattr(thisrepo, opt, getattr(self.repo_setopts[i], opt))\n \n if thisrepo.id in self.repo_setopts:\n for opt in self.repo_setopts[thisrepo.id].items:\n if not hasattr(thisrepo, opt):\n msg = \"Repo %s did not have a %s attr. before setopt\"\n self.logger.warning(msg % (thisrepo.id, opt))\n setattr(thisrepo, opt, getattr(self.repo_setopts[thisrepo.id], opt))\n \n if validate and not validate(thisrepo):\n continue\n \n if thisrepo.ssl_check_cert_permissions:\n for fn in (thisrepo.sslcacert,\n thisrepo.sslclientcert, thisrepo.sslclientkey):\n if not fn:\n continue\n # If we can't read the SSL certs. we need to skip the repo.\n # if we don't have all the data.\n if not os.access(fn, os.R_OK):\n msg=\"Repo %s forced skip_if_unavailable=True due to: %s\"\n if thisrepo.enabled:\n # Don't spam messages for disabled repos.\n self.logger.warning(msg % (thisrepo.id, fn))\n thisrepo.skip_if_unavailable = True\n\n # Got our list of repo objects, add them to the repos\n # collection\n try:\n self._repos.add(thisrepo)\n except Errors.RepoError, e:\n self.logger.warning(e)", "title": "" }, { "docid": "07f593e37c2f08c24ee3cbd302ac9965", "score": "0.43941355", "text": "def get_git_changeset():\n repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True,\n cwd=repo_dir,\n universal_newlines=True)\n\n timestamp = git_log.communicate()[0]\n try:\n timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))\n except ValueError: # pragma: nocover\n return None # pragma: nocover\n return timestamp.strftime('%Y%m%d%H%M%S')", "title": "" }, { "docid": "69127604ea5e2aa29bb92e43c1d422df", "score": "0.4390578", "text": "def getAllBranches(server,repo):\n branches=[]\n url=server+\"/repos/\"+repo+\"/branches\"\n res=conn.get(url)\n dicres=json.loads(res.text)\n for branch in dicres:\n branches.append((branch.get(\"name\"),branch.get(\"commit\").get(\"sha\")))\n return branches", "title": "" }, { "docid": "2f96c1cf185e1b98efaa00a47419a3ff", "score": "0.43904936", "text": "def get_changed_acceptance_test_config(diff_regex: Optional[str] = None) -> Set[str]:\n airbyte_repo = git.Repo(search_parent_directories=True)\n\n if diff_regex is None:\n diff_command_args = (\"--name-only\", DIFFED_BRANCH)\n else:\n diff_command_args = (\"--name-only\", f\"-G{diff_regex}\", DIFFED_BRANCH)\n\n changed_acceptance_test_config_paths = {\n file_path\n for file_path in airbyte_repo.git.diff(*diff_command_args).split(\"\\n\")\n if file_path.startswith(SOURCE_CONNECTOR_PATH_PREFIX) and file_path.endswith(ACCEPTANCE_TEST_CONFIG_FILE_NAME)\n }\n return {Connector(get_connector_name_from_path(changed_file)) for changed_file in changed_acceptance_test_config_paths}", "title": "" }, { "docid": "d4e511df200e1aa76d7408190b758c8e", "score": "0.43899542", "text": "def read_blockchain(self, path='/data'):\n info_path = path + '/info'\n data_path = path + '/data'\n\n self._wallet_file = open(f'{self.base_dir}/info/wallet', 'a+')\n self._data_file = open(\n f'{self.base_dir}/data/data-{self._index}', 'a+')\n self._read_metadata(info_path)\n self._read_wallet_pool_data(info_path)\n self._read_transaction_data(info_path)\n self._read_genesis_data(data_path)\n self._read_blocks_data(data_path)", "title": "" }, { "docid": "301a31744b2808eba9217edda011a14a", "score": "0.43889073", "text": "def main():\n\n parser = argparse.ArgumentParser(\n description='Determine potential missing commits'\n )\n parser.add_argument('-d', '--debug', action='store_true',\n help='Show additional information during run')\n parser.add_argument('-i', '--ignore', dest='ignore_file',\n help='File to store \"ignored\" commits',\n default='ignored_commits.txt')\n parser.add_argument('-m', '--merge', dest='merge_file',\n help='File to store \"merged\" projects info',\n default='merged_projects.txt')\n parser.add_argument('product', help='Product to check')\n parser.add_argument('old_manifest', help='Base manifest to check against')\n parser.add_argument('new_manifest', help='Current manifest to verify')\n parser.add_argument('--reporef_dir',\n help='Path to repo mirror reference directory')\n args = parser.parse_args()\n\n # Set up logging\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n\n ch = logging.StreamHandler()\n if not args.debug:\n ch.setLevel(logging.INFO)\n\n logger.addHandler(ch)\n\n # Read in 'ignored' commits\n # Form of each line is: '<project> <commit SHA>'\n ignored_commits = list()\n\n try:\n with open(args.ignore_file) as fh:\n for entry in fh.readlines():\n if entry.startswith('#'):\n continue # Skip comments\n\n try:\n project, commit = entry.split()\n except ValueError:\n logger.warning(f'Malformed line in ignored commits file, '\n f'skipping: {entry}')\n else:\n ignored_commits.append(commit)\n except FileNotFoundError:\n logger.warning(f'Ignored commits file, {args.ignore_file}, '\n f'not found. Continuing...')\n\n # Read in 'merged' projects information\n # Form of each line is: '<merged project> [<original project> [...]]'\n pre_merge = list()\n post_merge = list()\n merge_map = dict()\n\n try:\n with open(args.merge_file) as fh:\n for entry in fh.readlines():\n if entry.startswith('#'):\n continue # Skip comments\n\n try:\n post, *pre = entry.split()\n except ValueError:\n logger.warning(f'Empty line in merged projects file, '\n f'skipping')\n else:\n if pre:\n pre_merge.extend(pre)\n post_merge.append(post)\n merge_map[post] = pre\n else:\n logger.warning(f'Malformed line in merged projects '\n f'file, skipping: {entry}')\n except FileNotFoundError:\n logger.warning(f'Merged projects file, {args.merge_file}, '\n f'not found. Continuing...')\n\n # Setup file paths and search for missing commits\n product_dir = pathlib.Path(args.product)\n old_manifest = pathlib.Path(args.old_manifest)\n new_manifest = pathlib.Path(args.new_manifest)\n reporef_dir = pathlib.Path(args.reporef_dir)\n\n miss_comm = MissingCommits(\n logger, product_dir, old_manifest, new_manifest, reporef_dir,\n ignored_commits, pre_merge, post_merge, merge_map\n )\n miss_comm.determine_diffs()\n\n if miss_comm.missing_commits_found:\n sys.exit(1)\n else:\n print (\"\\n\\nNo missing commits discovered!\")", "title": "" }, { "docid": "4e284931847fb02aa2d63130981c9f2a", "score": "0.43856174", "text": "def GerritDependencies(self):\n return []", "title": "" }, { "docid": "5f299d3488b57a46cecc1a081e7e7933", "score": "0.43830147", "text": "def get_config_reader(filepath):\n return git.Repo(filepath).config_reader()", "title": "" }, { "docid": "c7ec23d759fac620994ea23f0c10be69", "score": "0.43740526", "text": "def deb_changelogs(new_snap, pkg_changes):\n # type: (str, Dict[str, Tuple[str, str]]) -> Dict[str, str]\n changelogs = {} # type: Dict[str, str]\n with tmpdir() as tmp:\n unsquashfs(tmp, new_snap, \"/usr/share/doc/*\")\n for name in pkg_changes:\n old_ver, new_ver = pkg_changes[name]\n # split of multi-arch tag\n fsname = name.split(\":\")[0]\n for chglogname in [\"changelog.Debian.gz\", \"changelog.gz\"]:\n changelog_path = os.path.join(\n tmp,\"usr/share/doc\", fsname, chglogname)\n if not os.path.exists(changelog_path):\n continue\n if not name in changelogs:\n changelogs[name] = \"\"\n changelogs[name] = changelog_until(changelog_path, old_ver)\n break\n return changelogs", "title": "" }, { "docid": "e12fff4982712c5f87a6f19d5fbbec9c", "score": "0.4361047", "text": "def read_commits_standalone(lines_file: str) -> set:\n with open(lines_file, 'r') as in_file:\n commits = set((x.strip(\"\\n\") for x in in_file.readlines()))\n commits.discard(\"\")\n if len(commits) == 0:\n logger.warning(f\"{lines_file} does contain commits\")\n return commits", "title": "" }, { "docid": "8f231961248b31fd8cc78dc459ace014", "score": "0.43529415", "text": "def read(self):\r\n self._config.read(self.filename)", "title": "" }, { "docid": "ca3fdfa234a3936a4affd90e7ef8cf8c", "score": "0.43480074", "text": "def view_config_changes(self):\n pass", "title": "" }, { "docid": "5f169d9438c4638c1bf14268bc998c51", "score": "0.43412545", "text": "def __readConfig(self):\r\n\r\n\t\tfr = open(self.__configFilePath, 'r')\r\n\t\t\r\n\r\n\t\tfor line in fr.readlines():\r\n\t\t\tline = line.strip()\r\n\t\t\tif line == \"\":\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\tif line[0] != '#': # ignore lines start by #\r\n\t\t\t\tsp = line.split('=')\r\n\t\t\t\tif len(sp) == 2:\r\n\t\t\t\t\tkey = sp[0].strip()\r\n\t\t\t\t\tval = sp[1].strip()\r\n\t\t\t\t\tself.__configDict[key] = val\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.__print(\"Ignore config line: \" + line)\r\n\r\n\t\tself.__print(\"Read configs from: %s\\n%d configs read!\" \\\r\n\t\t\t\t\t\t\t\t % (self.__configFilePath, len(self.__configDict)) \\\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\t\tfr.close()", "title": "" }, { "docid": "637384ef6d4276b2b669f9b8c6ffab13", "score": "0.43391338", "text": "def read_repo(self):\n return self._init_github(self._gh, self.team_name, self.repo_name)", "title": "" }, { "docid": "5a881d56932621e928e8e5b95833d6de", "score": "0.4335908", "text": "def parse(self, headers, body):\n\n data = json.loads(body.decode())\n\n repository = str(data['repository']['full_name'])\n event = str(headers['X_EVENT_KEY']).replace(\"repo:\", \"\")\n\n branches = []\n if event in data:\n for change in data[event]['changes']:\n if change['new']['type'] == 'branch':\n branches.append(change['new']['name'])\n if change['old']['type'] == 'branch':\n branches.append(change['old']['name'])\n\n return repository, event, list(set(branches)), data", "title": "" }, { "docid": "d672cbcae4430bef334d922881487f1c", "score": "0.43358395", "text": "def PaladinDependencies(self, git_repo):\n dependencies = []\n logging.debug('Checking for CQ-DEPEND dependencies for change %s', self)\n\n # Only fetch the commit message if needed.\n if self.commit_message is None:\n self.Fetch(git_repo)\n\n try:\n dependencies = GetPaladinDeps(self.commit_message)\n except ValueError as e:\n raise BrokenCQDepends(self, str(e))\n\n if dependencies:\n logging.debug('Found %s Paladin dependencies for change %s',\n dependencies, self)\n return dependencies", "title": "" }, { "docid": "a878c9169515ff8595c78bb228190980", "score": "0.43349797", "text": "def describe_changes(cloudformation: Cloudformation, changeset_name: str) -> None:\n response = cloudformation.client.describe_change_set(\n ChangeSetName=changeset_name,\n StackName=cloudformation.stack_name,\n )\n print(\"StackName: %s\" % (cloudformation.stack_name))\n print(\"ChangeSetName: %s\" % (changeset_name))\n print(\"Changes:\")\n print(json.dumps(response[\"Changes\"], indent=4, default=str))", "title": "" }, { "docid": "9f273794562523b978129e950e35a8fe", "score": "0.4333734", "text": "def log(self):\n with open('commits/.log', 'r') as f:\n output = f.readlines()\n return output", "title": "" }, { "docid": "779572a15d1c74cbe88842fc74076e3b", "score": "0.43318227", "text": "def _GetConfigurationChanges(args):\n changes = []\n\n # FlagIsExplicitlySet can't be used here because args.image is also set from\n # code in deploy.py.\n if hasattr(args, 'image') and args.image is not None:\n changes.append(config_changes.ImageChange(args.image))\n\n changes.extend(_GetScalingChanges(args))\n if _HasEnvChanges(args):\n changes.append(_GetEnvChanges(args))\n\n if _HasCloudSQLChanges(args):\n region = GetRegion(args)\n project = getattr(\n args, 'project', None\n ) or properties.VALUES.core.project.Get(required=True)\n if _EnabledCloudSqlApiRequired(args):\n _CheckCloudSQLApiEnablement()\n changes.append(config_changes.CloudSQLChanges(project, region, args))\n\n if _HasSecretsChanges(args):\n changes.extend(_GetSecretsChanges(args))\n\n if _HasConfigMapsChanges(args):\n changes.extend(_GetConfigMapsChanges(args))\n\n if 'cpu' in args and args.cpu:\n changes.append(config_changes.ResourceChanges(cpu=args.cpu))\n if 'memory' in args and args.memory:\n changes.append(config_changes.ResourceChanges(memory=args.memory))\n if 'service_account' in args and args.service_account:\n changes.append(\n config_changes.ServiceAccountChanges(\n service_account=args.service_account\n )\n )\n if _HasLabelChanges(args):\n additions = (\n args.labels\n if FlagIsExplicitlySet(args, 'labels')\n else args.update_labels\n )\n diff = labels_util.Diff(\n additions=additions,\n subtractions=args.remove_labels if 'remove_labels' in args else [],\n clear=args.clear_labels if 'clear_labels' in args else False,\n )\n if diff.MayHaveUpdates():\n changes.append(config_changes.LabelChanges(diff))\n if 'vpc_connector' in args and args.vpc_connector:\n changes.append(config_changes.VpcConnectorChange(args.vpc_connector))\n if FlagIsExplicitlySet(args, 'vpc_egress'):\n changes.append(\n config_changes.SetTemplateAnnotationChange(\n container_resource.EGRESS_SETTINGS_ANNOTATION, args.vpc_egress\n )\n )\n if 'clear_vpc_connector' in args and args.clear_vpc_connector:\n # MUST be after 'vpc_egress' change.\n changes.append(config_changes.ClearVpcConnectorChange())\n if 'command' in args and args.command is not None:\n # Allow passing an empty string here to reset the field\n changes.append(config_changes.ContainerCommandChange(args.command))\n if 'args' in args and args.args is not None:\n # Allow passing an empty string here to reset the field\n changes.append(config_changes.ContainerArgsChange(args.args))\n if FlagIsExplicitlySet(args, 'binary_authorization'):\n changes.append(\n config_changes.SetAnnotationChange(\n k8s_object.BINAUTHZ_POLICY_ANNOTATION, args.binary_authorization\n )\n )\n if FlagIsExplicitlySet(args, 'clear_binary_authorization'):\n changes.append(\n config_changes.DeleteAnnotationChange(\n k8s_object.BINAUTHZ_POLICY_ANNOTATION\n )\n )\n if FlagIsExplicitlySet(args, 'breakglass'):\n changes.append(\n config_changes.SetAnnotationChange(\n k8s_object.BINAUTHZ_BREAKGLASS_ANNOTATION, args.breakglass\n )\n )\n if FlagIsExplicitlySet(args, 'key'):\n changes.append(\n config_changes.SetTemplateAnnotationChange(\n container_resource.CMEK_KEY_ANNOTATION, args.key\n )\n )\n if FlagIsExplicitlySet(args, 'post_key_revocation_action_type'):\n changes.append(\n config_changes.SetTemplateAnnotationChange(\n container_resource.POST_CMEK_KEY_REVOCATION_ACTION_TYPE_ANNOTATION,\n args.post_key_revocation_action_type,\n )\n )\n if FlagIsExplicitlySet(args, 'encryption_key_shutdown_hours'):\n changes.append(\n config_changes.SetTemplateAnnotationChange(\n container_resource.ENCRYPTION_KEY_SHUTDOWN_HOURS_ANNOTATION,\n args.encryption_key_shutdown_hours,\n )\n )\n if FlagIsExplicitlySet(args, 'clear_key'):\n changes.append(\n config_changes.DeleteTemplateAnnotationChange(\n container_resource.CMEK_KEY_ANNOTATION\n )\n )\n changes.append(\n config_changes.DeleteTemplateAnnotationChange(\n container_resource.POST_CMEK_KEY_REVOCATION_ACTION_TYPE_ANNOTATION\n )\n )\n changes.append(\n config_changes.DeleteTemplateAnnotationChange(\n container_resource.ENCRYPTION_KEY_SHUTDOWN_HOURS_ANNOTATION\n )\n )\n if FlagIsExplicitlySet(args, 'clear_post_key_revocation_action_type'):\n changes.append(\n config_changes.DeleteTemplateAnnotationChange(\n container_resource.POST_CMEK_KEY_REVOCATION_ACTION_TYPE_ANNOTATION\n )\n )\n changes.append(\n config_changes.DeleteTemplateAnnotationChange(\n container_resource.ENCRYPTION_KEY_SHUTDOWN_HOURS_ANNOTATION\n )\n )\n if FlagIsExplicitlySet(args, 'clear_encryption_key_shutdown_hours'):\n changes.append(\n config_changes.DeleteTemplateAnnotationChange(\n container_resource.ENCRYPTION_KEY_SHUTDOWN_HOURS_ANNOTATION\n )\n )\n if FlagIsExplicitlySet(args, 'description'):\n changes.append(\n config_changes.SetAnnotationChange(\n k8s_object.DESCRIPTION_ANNOTATION, args.description\n )\n )\n if 'execution_environment' in args and args.execution_environment:\n changes.append(config_changes.SandboxChange(args.execution_environment))\n if (\n FlagIsExplicitlySet(args, 'network')\n or FlagIsExplicitlySet(args, 'subnet')\n or FlagIsExplicitlySet(args, 'network_tags')\n or FlagIsExplicitlySet(args, 'clear_network_tags')\n ):\n network_tags_is_set = FlagIsExplicitlySet(args, 'clear_network_tags')\n network_tags = None\n if FlagIsExplicitlySet(args, 'network_tags'):\n network_tags_is_set = True\n network_tags = args.network_tags\n changes.append(\n config_changes.NetworkInterfacesChange(\n FlagIsExplicitlySet(args, 'network'),\n args.network,\n FlagIsExplicitlySet(args, 'subnet'),\n args.subnet,\n network_tags_is_set,\n network_tags,\n )\n )\n if 'clear_network' in args and args.clear_network:\n # MUST be after 'vpc_egress' change.\n changes.append(config_changes.ClearNetworkInterfacesChange())\n if _HasCustomAudiencesChanges(args):\n changes.append(config_changes.CustomAudiencesChanges(args))\n return changes", "title": "" }, { "docid": "39753ac1bd06c5daffed3b21a6a0fb23", "score": "0.4321164", "text": "def _read_config(self):\n if os.path.exists(self._devrc):\n self._config.read(self._devrc)", "title": "" }, { "docid": "7a7687bedfb407bc1d4c6835e1df101f", "score": "0.43183368", "text": "def test_repo_list_branches(self):\n pass", "title": "" }, { "docid": "99558608c65733316ee05558fef9585a", "score": "0.43160215", "text": "async def branch(self, branch):\n\n\t\ttry:\n\t\t\tlist = open(\"cogs/njc/dirTag.csv\")\n\t\t\treader = csv.reader(list,delimiter=\"\t\")\n\t\t\tline = []\n\t\texcept:\n\t\t\tawait self.bot.say(\"I couldn't find the branch information file.\")\n\t\t\treturn\n\n\t\ttry: # GETS INFO FROM FILE\n\t\t\tfor row in reader:\n\t\t\t\tif str(row[0]) == branch:\n\t\t\t\t\tline = row\n\t\t\t\t\tdata = discord.Embed(title=\"Branch Information for `{}`\".format(branch),colour=discord.Colour(value=15801115))\n\n\n\t\t\t\t\ttry:\n# ROUTE\n\t\t\t\t\t\tif line[1] == \"\":\n\t\t\t\t\t\t\tdata.add_field(name=\"Route:\", value=\"undefined\",inline='false')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdata.add_field(name=\"Route:\", value=line[1],inline='false')\n\n# STARTS\n\t\t\t\t\t\tif line[2] == \"\":\n\t\t\t\t\t\t\tdata.add_field(name=\"Starts from:\", value=\"undefined\",inline='false')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdata.add_field(name=\"Starts from:\", value=line[2],inline='false')\n\n# ENDS\n\t\t\t\t\t\tif line[3] == \"\":\n\t\t\t\t\t\t\tdata.add_field(name=\"Ends at:\", value=\"undefined\",inline='false')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdata.add_field(name=\"Ends at:\", value=line[3],inline='false')\n\n# BRANCHES\n\t\t\t\t\t\tif line[4] == \"\":\n\t\t\t\t\t\t\tdata.add_field(name=\"Sign:\", value=\"undefined\",inline='false')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdata.add_field(name=\"Sign:\", value=\"{}\".format(line[4]),inline='false')\n\n# NOTES\n\t\t\t\t\t\tif line[5] == \"\":\n\t\t\t\t\t\t\tdata.add_field(name=\"Notes:\", value=\"undefined\",inline='false')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdata.add_field(name=\"Notes:\", value=\"{}\".format(line[5]),inline='false')\n\n# Division\n\t\t\t\t\t\tif line[6] == \"\":\n\t\t\t\t\t\t\tdata.add_field(name=\"Branch divisions:\", value=\"undefined\",inline='false')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdata.add_field(name=\"Branch divisions:\", value=\"{}\".format(line[6]),inline='false')\n\n# Long Description\n\t\t\t\t\t\tif line[7] == \"\":\n\t\t\t\t\t\t\tdata.add_field(name=\"Long description:\", value=\"Not available.\",inline='false')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdata.add_field(name=\"Long description:\", value=\"{}\".format(line[7]),inline='false')\n\n\n\t\t\t\t\t\tdata.set_footer(text=\"Information last updated <future information>\")\n\n\t\t\t\t\texcept Exception as errer:\n\t\t\t\t\t\tawait self.bot.say(errer)\n\t\t\t\t\tawait self.bot.say(embed=data)\n\n\t\texcept Exception as errer:\n\t\t\tawait self.bot.say(errer)", "title": "" }, { "docid": "d45d09f36e273141704659ecdffe4c13", "score": "0.4312349", "text": "def fetch(self):\n\n response = self._connection.execute(\n 'GET',\n 'abapgit/repos',\n accept='application/abapgit.adt.repos.v2+xml'\n )\n\n root = ElementTree.fromstring(response.text)\n\n try:\n self._repo = next(repo for repo in list(root)\n if get_repo_text(repo, 'package') == self._package_name)\n except StopIteration as no_exist:\n raise KeyError(f'Repository for package {self._package_name} not found.') from no_exist", "title": "" }, { "docid": "338cb17ed6598d43ae7471d73c8a02e3", "score": "0.43071866", "text": "def get_commit_log(repo_path,b_name = None):\n if b_name == None:\n commits = []\n s = subprocess.check_output(\"cd %s; git log -1\" % repo_path, shell=True)\n r = re.compile(\"commit (.*?)\\n.*?Author: (.*?)\\n.*?Date:(.*?)\\n\\n(.*?)\\n\", re.M+re.S+re.U+re.I)\n \n matches = r.findall(s)\n for m in matches:\n commits.append(dict(commit_hash=m[0].strip(), author=m[1].strip(), datetime=m[2].strip(), message=m[3].strip()))\n\n return commits\n \n else:\n \n\tcommits = []\n s = subprocess.check_output(\"cd %s; git checkout %s; git log \" % (repo_path,b_name), shell=True)\n \n #r = re.compile(\"commit (.*?)\\n.*?Author: (.*?)\\n.*?Date:(.*?)\\n\\n(.*?)\\n\\n\", re.M+re.S+re.U+re.I)\n r = re.compile(\"commit (.*?)\\n.*?Author: (.*?)\\n.*?Date:(.*?)\\n\\n(.*?)\\n\", re.M+re.S+re.U+re.I)\n matches = r.findall(s)\n for m in matches:\n commits.append(dict(commit_hash=m[0].strip(), author=m[1].strip(), datetime=m[2].strip(), message=m[3].strip()))\n\n return commits", "title": "" }, { "docid": "748b2b3abd93b9353e6a372d76d0c75c", "score": "0.43024173", "text": "def branchinfo(self, rev):\n extra = self.read(rev)[5]\n return encoding.tolocal(extra.get(b\"branch\")), b'close' in extra", "title": "" }, { "docid": "643ff5b6826e514cac2a3abddfb7b1cb", "score": "0.43001357", "text": "def pull_all_data_dependencies(self):\n self._overwrite_dvc_config()\n\n # checkout dvc pull files according to git checkout\n subprocess.check_call([\"dvc\", \"pull\", \"-r\", self.remote_repo])\n logging.getLogger(__name__).info(\"Pulling right data version from remote dvc storage... \"\n \"Done\")", "title": "" }, { "docid": "27a2aa444cd1b935cfcc38e47b9c3107", "score": "0.42998862", "text": "def read_ricc2(in_name):\n with open(in_name) as data:\n lines = data.readlines()\n\n grad_x = []\n grad_y = []\n grad_z = []\n energy = None\n\n for line in lines:\n if \"Total energy of excited state:\" in line:\n energy = float(line.split()[5])\n if \"Final\" in line:\n scf_energy = float(line.split()[5])\n if line.strip():\n if line[0:2] == \"dE\":\n nums = [float(i.replace(\"D\", \"E\")) for i in line.split()[1:]]\n if line.split()[0] == \"dE/dx\":\n grad_x.extend(nums)\n if line.split()[0] == \"dE/dy\":\n grad_y.extend(nums)\n if line.split()[0] == \"dE/dz\":\n grad_z.extend(nums)\n grad = []\n\n # combine in correct format\n for dx, dy, dz in zip(grad_x, grad_y, grad_z):\n grad.append(dx)\n grad.append(dy)\n grad.append(dz)\n # for ground state\n if not energy:\n energy = scf_energy\n grad = np.array(grad)\n return energy, grad, scf_energy", "title": "" }, { "docid": "ab76a244f34d6fce2341ed3611726bb7", "score": "0.4294668", "text": "def read_dlcoal_recon(filename, stree,\n exts={\"coal_tree\": \".coal.tree\",\n \"coal_recon\": \".coal.recon\",\n \"locus_tree\": \".locus.tree\",\n \"locus_recon\": \".locus.recon\",\n \"daughters\": \".daughters\"\n },\n filenames={},\n check=True):\n\n recon = Recon()\n return recon.read(filename, stree,\n exts, filenames,\n check=check)", "title": "" }, { "docid": "56d02eb1554cd3a5f270129cc218bb13", "score": "0.42942363", "text": "def pull(self):\n self.LOGGER.info(f\"pulling changes from {self.repo.remotes.origin.url} -> Branch {self.repo.active_branch}\")\n return self.repo.remotes.origin.pull(refspec=self.repo.active_branch)", "title": "" }, { "docid": "af36af9a3e9671bc08d6019e3f9e6311", "score": "0.4293596", "text": "def get_comit_log(repo_path):\n \n commits = []\n #s = subprocess.check_output(\"cd %s; git checkout %s; git log\" % (repo_path,b_name), shell=True)\n s = subprocess.check_output(\"cd %s; git log \" % repo_path, shell=True)\n r = re.compile(\"commit (.*?)\\n.*?Author: (.*?)\\n.*?Date:(.*?)\\n\\n(.*?)\\n\", re.M+re.S+re.U+re.I)\n matches = r.findall(s)\n for m in matches:\n commits.append(dict(commit_hash=m[0].strip(), author=m[1].strip(), datetime=m[2].strip(), message=m[3].strip()))\n\n return commits", "title": "" }, { "docid": "430f108a69c0e267e7eda67d5f1a5599", "score": "0.42934918", "text": "def get_changes(self):\n\n results_change = []\n\n # use the service of the change api to retrieve the changes\n changes = self.change_api.get_changes(self.id)\n\n # put all the changes into the returned array\n for change in changes:\n results_change.append(self.Change(change))\n\n return results_change", "title": "" } ]
1e252dea36c1c96f1a91d2d3b7d0f701
convert grayscale annotation to inner bound
[ { "docid": "ad655c4f31bcbbd1fe7c5e371ebb8c14", "score": "0.5521034", "text": "def gray2innerbound(gray, width):\n h, w = gray.shape[:2]\n gray[gray == 76] = 255\n gray[gray == 151] = 255\n\n label = gray2mask(gray)\n label_binary = label_binarize(label.flatten(), classes=range(0, 3))\n label_binary = np.reshape(label_binary, (h, w, -1))\n\n tmp = ndimage.distance_transform_cdt(label_binary[:, :, 1], 'taxicab')\n inner_bound = np.logical_and(tmp >= 1, tmp <= width).astype(np.uint8)\n\n return inner_bound", "title": "" } ]
[ { "docid": "c72b82fd27377184306b8188a338c97e", "score": "0.6239534", "text": "def classify_image(self):", "title": "" }, { "docid": "109a085d249590784c5f23a9833de41f", "score": "0.6049097", "text": "def convert_to_mask(x_coord, y_coord, sample_name, class_name, ann_img):\n \n # coordinates must be joined to x-y pairs and into a numpy array \n # because thats how cv2's fillConvexPoly() wants them\n all_poly_coords = []\n for xy in zip(x_coord, y_coord):\n all_poly_coords.append(xy)\n all_poly_coords = np.array(all_poly_coords)\n \n # depending of the class of that region, paint it a different shade\n if class_name == \"branch\":\n cv2.fillConvexPoly(ann_img, all_poly_coords, 1)\n if class_name == \"box\":\n cv2.fillConvexPoly(ann_img, all_poly_coords, 2)\n if class_name == \"camera-bag\":\n cv2.fillConvexPoly(ann_img, all_poly_coords, 3)\n if class_name == \"tree\":\n cv2.fillConvexPoly(ann_img, all_poly_coords, 4)\n\n # return mask with added region\n return ann_img", "title": "" }, { "docid": "d87208e726028cb48b952a8a64d6eba3", "score": "0.5976198", "text": "def gray_image(self):\r\n self.image = self.image.convert(\"L\")", "title": "" }, { "docid": "cbcbcd463289789b28d9f72322fbe07f", "score": "0.5922888", "text": "def gray2m11range(image):\n return 2.0 * image / 255.0 - 1.0", "title": "" }, { "docid": "49349aab7f3e2983dd9379d18c92ec32", "score": "0.58568263", "text": "def preprocess(self, image, annotations, image_min_side, image_max_side):\n\t\traise NotImplementedError()", "title": "" }, { "docid": "1749a729d5c3f9b93027e7c2aee5ec90", "score": "0.5824374", "text": "def segment_border_to_interior_intensity(vol, segment, label_map):\n\n\n\tbox_bounds = segment.bounding_box\n\n\n\t\n\tcropped_vol = vol[box_bounds.xmin:box_bounds.xmax, box_bounds.ymin:box_bounds.ymax, box_bounds.zmin:box_bounds.zmax]\n\t\n\t\n\t#cropped_mask = np.zeros((box_bounds.xmax - box_bounds.xmin, box_bounds.ymax - box_bounds.ymin, box_bounds.zmax - box_bounds.zmin))\n\t\n\t\n\tcropped_mask = label_map[box_bounds.xmin:box_bounds.xmax, box_bounds.ymin:box_bounds.ymax, box_bounds.zmin:box_bounds.zmax] == segment.label\n\t\n\t#for voxel in segment.list_of_voxel_tuples:\n\t#\tcropped_mask[voxel[0] - box_bounds.xmin-1, voxel[1] - box_bounds.ymin-1, voxel[2] - box_bounds.zmin-1] = 1\n\n\tcropped_mask_dilated = ndimage.morphology.binary_dilation(cropped_mask )\n\tcropped_mask_eroded = ndimage.morphology.binary_erosion(cropped_mask )\n\t\n\tif cropped_mask_eroded.sum() < 10:\n\t\tcropped_mask_eroded = cropped_mask\n\t\n\tcropped_mask_border = cropped_mask_dilated - cropped_mask_eroded\n\n\tinterior_intensity = avg_intensity(cropped_vol,cropped_mask_eroded)\t\n\tborder_intensity = avg_intensity(cropped_vol,cropped_mask_border)\n\t\n\tif interior_intensity < 0.001:\n\t\tinterior_intensity = 0.001\n\n\n\treturn border_intensity / interior_intensity", "title": "" }, { "docid": "3300d589bc20105596b3577079b4e977", "score": "0.574878", "text": "def grayscale(img):\n return #TODO call openCV function grayscale", "title": "" }, { "docid": "7a92cd74d52ea01f82c9c6c8b1bf400f", "score": "0.56862825", "text": "def image_preprocessing(image):\n\timage = np.reshape(image, (-1, 16))\n\timage = image.astype(np.float32)\n\t#image = image/255.0 - 0.5\n\treturn image", "title": "" }, { "docid": "4e83bedb26979b1b921f6d7cc9acdfb9", "score": "0.56780964", "text": "def apply_ocr(image):\n\n image = array(image)\n image[image<=125]=0\n image[image>126] = 255\n image = blur(image, (2,2))\n \n return i2d(image, output_type=Output.DICT)", "title": "" }, { "docid": "0635307a575f67ddfa62ed0aaf67eb1c", "score": "0.5663966", "text": "def preprocess_image(img):\n\n # Converting image color BGR to GRAY\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n plt.imshow(gray) # display the gray image\n plt.show()\n\n # Using binary threshold\n _, binary = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY_INV)\n plt.imshow(binary, cmap='gray') # display the threshold image\n plt.show()\n\n return gray, binary", "title": "" }, { "docid": "52c0e7fe525106a7348ab62060a78db4", "score": "0.5643106", "text": "def image_preprocessing(image):\n\treturn cv2.GaussianBlur(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), (5,5), 0)", "title": "" }, { "docid": "3b31c79ea8fd601c43e780950c3df42d", "score": "0.56367666", "text": "def detectandlabel(mymodel,img,labels,thresh = 0.0):\r\n img = img.copy()\r\n for bbox,subimg in segment_characters(binarize(img)):\r\n c,prob = predict_class(mymodel,subimg)\r\n if c >= len(labels) or prob < thresh:\r\n continue\r\n label = labels[c]\r\n img = cv2.rectangle(img, bbox,(255,0,0), 5)\r\n cv2.putText(img,label,(bbox[0],bbox[1]-50),cv2.FONT_HERSHEY_SIMPLEX,3,(255,0,0),7)\r\n return img", "title": "" }, { "docid": "201af15aa75984990d99621ba666dee3", "score": "0.5633936", "text": "def get_gray_and_ab(image: dict) -> Tuple[dict, tf.Tensor]:\n img = image['input_1']\n gray = rgb_to_gray(img)\n lab = rgb_to_lab(img)\n ab = lab[:, :, 1:]\n image['input_1'] = gray\n image['input_2'] = gray\n return image, ab", "title": "" }, { "docid": "a8dbf05e1a064d8dbd39aa167b218c39", "score": "0.5612047", "text": "def _process_image(directory, name):\n # Read the image file.\n filename = os.path.join(directory, DIRECTORY_IMAGES, name + '.jpg')\n print(filename)\n image_data = cv2.imread(filename)\n # Read the XML annotation file.\n filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml')\n tree = ET.parse(filename)\n root = tree.getroot()\n\n # Image shape.\n size = root.find('size')\n shape = [int(size.find('height').text),\n int(size.find('width').text),\n int(size.find('depth').text)]\n # Find annotations.\n bboxes = []\n labels = []\n labels_text = []\n difficult = []\n truncated = []\n for obj in root.findall('object'):\n label = obj.find('name').text\n labels_text.append(label.encode('ascii'))\n\n if obj.find('difficult'):\n difficult.append(int(obj.find('difficult').text))\n else:\n difficult.append(0)\n if obj.find('truncated'):\n truncated.append(int(obj.find('truncated').text))\n else:\n truncated.append(0)\n\n bbox = obj.find('bndbox')\n bboxes.append((float(bbox.find('ymin').text) / shape[0],\n float(bbox.find('xmin').text) / shape[1],\n float(bbox.find('ymax').text) / shape[0],\n float(bbox.find('xmax').text) / shape[1]\n ))\n x0 = int(bbox.find('xmin').text)\n y0 = int(bbox.find('ymin').text)\n x1 = int(bbox.find('xmax').text)\n y1 = int(bbox.find('ymax').text)\n cv2.rectangle(image_data, (x0, y0), (x1, y1), (255, 0, 0), 2)\n\n # cv2.imshow(\"disp\", image_data)\n # cv2.waitKey(0)\n return image_data, shape, bboxes, labels_text, difficult, truncated", "title": "" }, { "docid": "4b98460d02c4a2bcc3d45276f6725d3f", "score": "0.56032085", "text": "def image_preprocess(dataframe):", "title": "" }, { "docid": "4aad45d16b1b6a69d355ab3879fbc77d", "score": "0.55752164", "text": "def get_ann(ann):\n box = ann[\"bbox\"]\n return bbox(ann[\"image_id\"], ann[\"category_id\"], box[0], box[1], box[2], box[3])", "title": "" }, { "docid": "38b2daa35fce9be1f7f04fd02a458c31", "score": "0.55323774", "text": "def FindGreyScale( self ):\n\t\treturn None", "title": "" }, { "docid": "04132b1dd76e24b380b8015463c12e43", "score": "0.5518384", "text": "def get_gt_dots(ann_path, img_height, img_width, mode=\"train\"):\n txt_list = open(ann_path, 'r').readlines()\n gt = format_label(mode, txt_list)\n assert gt.shape[1] == 3\n gt[:, 0] = gt[:, 0].clip(0, img_width - 1)\n gt[:, 1] = gt[:, 1].clip(0, img_height - 1)\n return gt", "title": "" }, { "docid": "3816cdd60cb5da6a46770260564a75bd", "score": "0.5515101", "text": "def generate_from_image_diff(array_foreground, array_background, threshold=10):\n if len(array_foreground.shape) == 3:\n array_foreground = np.mean(array_foreground, 2)\n if len(array_background.shape) == 3:\n array_background = np.mean(array_background, 2)\n diffarray = abs(array_foreground[:, :] - array_background[:, :])\n masked_array = np.ma.masked_where(diffarray < threshold, diffarray, copy=True)\n binary_array = np.invert(masked_array.mask)\n labeled_map, n = ndimage.label(binary_array)\n return labeled_map", "title": "" }, { "docid": "1be163585e57e4cb1aad73077643359c", "score": "0.5513965", "text": "def visual2bbox(face_image_np_rec,face_locations_rec,face_image_np,face_locations):\n print('bbox of rec ', face_locations_rec)\n print('bbox of alinment ', face_locations)\n \n plt.figure(figsize = (8,8)) \n x ,y , width, height = rect_to_bbox(face_locations_rec[0])\n x2 ,y2 = x + width, y + height \n cv2.rectangle(face_image_np_rec, (x,y), (x2,y2), (0,0,255), 1)\n plt.imshow(face_image_np_rec) \n \n plt.figure(figsize = (8,8)) \n x ,y , width, height = face_locations[0]\n x2 ,y2 = x + width, y + height \n cv2.rectangle(face_image_np, (x,y), (x2,y2), (0,0,255), 1)\n plt.imshow(face_image_np) \n plt.show()\n \n plt.figure(figsize = (8,8)) \n fl = [{'top_lip':face_landmarks[0]['top_lip'] ,\n 'bottom_lip':face_landmarks_rec[0]['bottom_lip']\n \n }]\n \n fll = draw_landmarks(self.face_path ,fl)\n \n image = draw_landmarks_withcv2(face_image_np, fll, color=(255, 0, 0), thickness=2)\n plt.imshow(image)\n plt.axis('off')\n plt.show()", "title": "" }, { "docid": "c00ff6057e414d3d27967385ba53d30e", "score": "0.55122477", "text": "def to_grey(self):\n\n self.feed = cv2.cvtColor(self.feed, cv2.COLOR_BGR2GRAY)", "title": "" }, { "docid": "3b4eef60ccf658e0d447b79e91ba78d3", "score": "0.55050904", "text": "def bgr_to_lab(image: torch.Tensor) -> torch.Tensor:\n \n # Convert from Linear RGB to sRGB\n b: torch.Tensor = image[..., 0, :, :]\n g: torch.Tensor = image[..., 1, :, :]\n r: torch.Tensor = image[..., 2, :, :]\n\n rs: torch.Tensor = torch.where(r > 0.04045, torch.pow(((r + 0.055) / 1.055), 2.4), r / 12.92)\n gs: torch.Tensor = torch.where(g > 0.04045, torch.pow(((g + 0.055) / 1.055), 2.4), g / 12.92)\n bs: torch.Tensor = torch.where(b > 0.04045, torch.pow(((b + 0.055) / 1.055), 2.4), b / 12.92)\n\n image_s = torch.stack([rs, gs, bs], dim=-3)\n\n xyz_im: torch.Tensor = rgb_to_xyz(image_s)\n\n # normalize for D65 white point\n xyz_ref_white = torch.tensor([0.95047, 1., 1.08883], device=xyz_im.device, dtype=xyz_im.dtype)[..., :, None, None]\n xyz_normalized = torch.div(xyz_im, xyz_ref_white)\n\n power = torch.pow(xyz_normalized, 1 / 3)\n scale = 7.787 * xyz_normalized + 4. / 29.\n xyz_int = torch.where(xyz_normalized > 0.008856, power, scale)\n\n x: torch.Tensor = xyz_int[..., 0, :, :]\n y: torch.Tensor = xyz_int[..., 1, :, :]\n z: torch.Tensor = xyz_int[..., 2, :, :]\n\n L: torch.Tensor = (116. * y) - 16.\n a: torch.Tensor = 500. * (x - y)\n _b: torch.Tensor = 200. * (y - z)\n\n out: torch.Tensor = torch.stack([L, a, _b], dim=-3)\n\n return out", "title": "" }, { "docid": "2e1231319778a8d782ef80912e563f1d", "score": "0.549889", "text": "def annotate(self, img):\n arr = np.asarray(img)\n face_locations = self.detector.detect(arr)\n\n for (x1, y1, x2, y2) in face_locations:\n arr = cv2.rectangle(arr,\n (int(x1), int(y1)),\n (int(x2), int(y2)),\n self.BOX_LINE_COLOR,\n self.BOX_LINE_WIDTH)\n ret = Image.fromarray(arr)\n\n return ret", "title": "" }, { "docid": "ae3043ab57c5ba1f51821e1693ebe638", "score": "0.5487968", "text": "def bbox_to_image(img, bbox, value, line_thickness=2):\n cv2.rectangle(img, (int(bbox[0]), int(bbox[1])),\n (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])), value,\n line_thickness)", "title": "" }, { "docid": "8ef17d1d8e72f50e74779ad2a99972b8", "score": "0.5486478", "text": "def grayscale(img):\n\n grayscale_img = img.convert(\"L\")\n return grayscale_img", "title": "" }, { "docid": "7d1b29d2c325c235d94572921aa57ac9", "score": "0.5479673", "text": "def image_converter(image_sr_array, image_mid_array, image_high_array, flag, boundarypixels, MAX_RGB):\n psnr_sr = 10 * np.log10(MAX_RGB ** 2 / (np.mean(np.square(image_high_array - image_sr_array))))\n psnr_itp = 10 * np.log10(MAX_RGB ** 2 / (np.mean(np.square(image_high_array - image_mid_array))))\n # from IPython import embed; embed(); exit()\n ssim_sr = ssim(np.uint8(image_sr_array*255/MAX_RGB), np.uint8(image_high_array*255/MAX_RGB), gaussian_weights=True, use_sample_covariance=False)\n ssim_itp = ssim(np.uint8(image_mid_array*255/MAX_RGB), np.uint8(image_high_array*255/MAX_RGB), gaussian_weights=True, use_sample_covariance=False)\n score = [psnr_itp, psnr_sr, ssim_itp, ssim_sr]\n return score", "title": "" }, { "docid": "27286a2b27524eb9966a4349a927bff9", "score": "0.54617", "text": "def get_grayscale(c):\n min = min(c)\n max = max(c)\n\n greyscale_values = np.interp(c, [min, max], [0, 255]).astype(int)", "title": "" }, { "docid": "adcd6d33352bec2c55f1017f692f5400", "score": "0.54579765", "text": "def get_ann_info(self, idx):\n\n img_id = self.data_infos[idx]['id']\n xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')\n tree = ET.parse(xml_path)\n root = tree.getroot()\n bboxes = []\n labels = []\n bboxes_ignore = []\n labels_ignore = []\n for obj in root.findall('object'):\n name = obj.find('label').text\n if name not in self.CLASSES:\n continue\n label = self.cat2label[name]\n difficult = int(obj.find('difficult').text)\n bnd_box = obj.find('bndbox')\n # Coordinates may be float type\n bbox = [\n int(float(bnd_box.find('x0').text)),\n int(float(bnd_box.find('y0').text)),\n int(float(bnd_box.find('x1').text)),\n int(float(bnd_box.find('y1').text)),\n int(float(bnd_box.find('x2').text)),\n int(float(bnd_box.find('y2').text)),\n int(float(bnd_box.find('x3').text)),\n int(float(bnd_box.find('y3').text)), \n ]\n # drop ignore and difficult\n # ignore = False\n # if self.min_size:\n # assert not self.test_mode\n # w = bbox[2] - bbox[0]\n # h = bbox[3] - bbox[1]\n # if w < self.min_size or h < self.min_size:\n # ignore = True\n # if difficult or ignore:\n # bboxes_ignore.append(bbox)\n # labels_ignore.append(label)\n # else:\n bboxes.append(bbox)\n labels.append(label)\n if not bboxes:\n bboxes = np.zeros((0, 5))\n labels = np.zeros((0, ))\n else:\n bboxes = np.array(bboxes, ndmin=2) - 1\n labels = np.array(labels)\n if not bboxes_ignore:\n bboxes_ignore = np.zeros((0, 5))\n labels_ignore = np.zeros((0, ))\n else:\n bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1\n labels_ignore = np.array(labels_ignore)\n \n n_bboxes = []\n for i in range(bboxes.shape[0]):\n\n bbox = bboxes[i, :]\n\n cx = (bbox[0] + bbox[2] + bbox[4] + bbox[6]) / 4\n cy = (bbox[1] + bbox[3] + bbox[5] + bbox[7]) / 4\n w = math.sqrt(math.pow((bbox[0] - bbox[2]), 2) + math.pow((bbox[1] - bbox[3]), 2))\n h = math.sqrt(math.pow((bbox[2] - bbox[4]), 2) + math.pow((bbox[3] - bbox[5]), 2))\n\n if w < h:\n w, h = h, w\n theta = math.atan((bbox[5] - bbox[3]) / (bbox[4] - bbox[2] + 1e-3))\n else:\n theta = math.atan((bbox[3] - bbox[1]) / (bbox[2] - bbox[0] + 1e-3))\n n_bboxes.append([cx, cy, w, h, theta])\n\n ann = dict(\n bboxes=np.array(n_bboxes).astype(np.float32),\n labels=np.array(labels).astype(np.int64),\n bboxes_ignore=bboxes_ignore.astype(np.float32),\n labels_ignore=labels_ignore.astype(np.int64))\n return ann", "title": "" }, { "docid": "c15616ccd27939352089a6374d599824", "score": "0.54550046", "text": "def detect(original_image, min_score, max_overlap, top_k, suppress=None):\n image = np.array(original_image).astype('float32')\n H, W, C = image.shape\n image = transform(image)\n image = jt.array(image[np.newaxis,:]).float32()\n predicted_locs, predicted_scores = model(image)\n det_boxes, det_labels, det_scores = model.detect_objects(predicted_locs, predicted_scores, min_score=min_score, max_overlap=max_overlap, top_k=top_k)\n det_boxes = det_boxes[0]\n original_dims = np.array([[W, H, W, H]])\n det_boxes = det_boxes * original_dims\n det_labels = [rev_label_map[l] for l in det_labels[0]]\n if det_labels == ['background']:\n return original_image\n annotated_image = original_image\n draw = ImageDraw.Draw(annotated_image)\n font = ImageFont.truetype(\"ahronbd.ttf\", 15)\n for i in range(det_boxes.shape[0]):\n if suppress is not None:\n if det_labels[i] in suppress:\n continue\n box_location = det_boxes[i].tolist()\n draw.rectangle(xy=box_location, outline=label_color_map[det_labels[i]])\n draw.rectangle(xy=[l + 1. for l in box_location], outline=label_color_map[det_labels[i]]) \n text_size = font.getsize(det_labels[i].upper())\n text_location = [box_location[0] + 2., box_location[1] - text_size[1]]\n textbox_location = [box_location[0], box_location[1] - text_size[1], box_location[0] + text_size[0] + 4., box_location[1]]\n draw.rectangle(xy=textbox_location, fill=label_color_map[det_labels[i]])\n draw.text(xy=text_location, text=det_labels[i].upper(), fill='white', font=font)\n del draw\n return annotated_image", "title": "" }, { "docid": "378ccca69540fbe4a52206825536682a", "score": "0.5453775", "text": "def get_unet_border_weight_map(\n annotation,\n w0=5.0,\n sigma=13.54591536778324,\n eps=1e-32):\n # https://github.com/czbiohub/microDL/blob/master/micro_dl/utils/masks.py\n # if there is only one label, zero return the array as is\n if np.sum(annotation) == 0:\n return annotation\n\n # Masks could be saved as .npy bools, if so convert to uint8 and generate\n # labels from binary\n if annotation.dtype == bool:\n annotation = annotation.astype(np.uint8)\n assert annotation.dtype in [\n np.uint8,\n np.uint16,\n ], \"Expected data type uint, it is {}\".format(annotation.dtype)\n labeled_array = annotation.copy()\n inner = distance_transform_edt(annotation)\n inner = (inner.max() - inner) / inner.max()\n inner[annotation == 0] = 0\n # if there is only one label or only background\n if len(np.unique(labeled_array)) == 1:\n return inner\n # if there is only one label and background\n if len(np.unique(labeled_array)) == 2:\n if 0 in np.unique(labeled_array):\n return inner\n # cells instances for distance computation\n # 4 connected i.e default (cross-shaped)\n # structuring element to measure connectivy\n # If cells are 8 connected/touching they are labeled as one single object\n # Loss metric on such borders is not useful\n # class balance weights w_c(x)\n unique_values = np.unique(labeled_array).tolist()\n weight_map = [0] * len(unique_values)\n for index, unique_value in enumerate(unique_values):\n mask = np.zeros(\n (annotation.shape[0],\n annotation.shape[1]),\n dtype=np.float64)\n mask[annotation == unique_value] = 1\n weight_map[index] = 1 / mask.sum()\n\n # this normalization is important - foreground pixels must have weight 1\n weight_map = [i / max(weight_map) for i in weight_map]\n\n wc = np.zeros((annotation.shape[0], annotation.shape[1]), dtype=np.float64)\n for index, unique_value in enumerate(unique_values):\n wc[annotation == unique_value] = weight_map[index]\n # cells instances for distance computation\n # 4 connected i.e default (cross-shaped)\n # structuring element to measure connectivy\n # If cells are 8 connected/touching they are labeled as one single object\n # Loss metric on such borders is not useful\n # Not NEED to find labels\n # labeled_array, _ = scipy.ndimage.measurements.label(annotation)\n # cells distance map\n border_loss_map = np.zeros(\n (annotation.shape[0], annotation.shape[1]), dtype=np.float64\n )\n distance_maps = np.zeros(\n (annotation.shape[0], annotation.shape[1], np.max(labeled_array)),\n dtype=np.float64,\n )\n\n if np.max(labeled_array) >= 2:\n for index in range(np.max(labeled_array)):\n mask = np.ones_like(labeled_array)\n mask[labeled_array == index + 1] = 0\n distance_maps[:, :,\n index] = distance_transform_edt(mask)\n distance_maps = np.sort(distance_maps, 2)\n d1 = distance_maps[:, :, 0]\n d2 = distance_maps[:, :, 1]\n border_loss_map = w0 * np.exp((-1 * (d1 + d2) ** 2) / (2 * (sigma ** 2)))\n\n zero_label = np.zeros(\n (annotation.shape[0],\n annotation.shape[1]),\n dtype=np.float64)\n zero_label[labeled_array == 0] = 1\n border_loss_map = np.multiply(border_loss_map, zero_label)\n return border_loss_map + inner + wc", "title": "" }, { "docid": "646ae4de8bb929bdccba1fe2ddf1dec7", "score": "0.5451614", "text": "def preprocess(self, resized_inputs):\n return (2.0 / 255.0) * resized_inputs - 1.0", "title": "" }, { "docid": "646ae4de8bb929bdccba1fe2ddf1dec7", "score": "0.5451614", "text": "def preprocess(self, resized_inputs):\n return (2.0 / 255.0) * resized_inputs - 1.0", "title": "" }, { "docid": "7823fc57ce949d79608ec31b1e09d519", "score": "0.544763", "text": "def annotate(input_file, output_dir):\n logger.info(\"---\")\n logger.info('Input image: \"{}\"'.format(os.path.abspath(input_file)))\n image = Image.from_file(input_file)\n intensity = mean_intensity_projection(image)\n norm_intensity = normalise(intensity)\n norm_rgb = np.dstack([norm_intensity, norm_intensity, norm_intensity])\n\n name = fpath2name(input_file)\n png_name = name + \".png\"\n csv_name = name + \".csv\"\n png_path = os.path.join(output_dir, png_name)\n csv_path = os.path.join(output_dir, csv_name)\n\n tubes = find_tubes(input_file, output_dir)\n grains, difficult = find_grains(input_file, output_dir)\n tubes = remove_tubes_not_touching_grains(tubes, grains)\n tubes = remove_tubes_that_are_grains(tubes, grains)\n\n ann = AnnotatedImage.from_grayscale(intensity)\n\n num_grains = 0\n for n, i in enumerate(grains.identifiers):\n n = n + 1\n region = grains.region_by_identifier(i)\n ann.mask_region(region.inner.inner.inner.border.dilate(),\n color=(0, 255, 0))\n num_grains = n\n\n num_tubes = 0\n for n, i in enumerate(tubes.identifiers):\n n = n + 1\n region = tubes.region_by_identifier(i)\n highlight = norm_rgb * pretty_color(i)\n ann[region] = highlight[region]\n ann.mask_region(region.dilate(3).border.dilate(3),\n color=pretty_color(i))\n num_tubes = n\n\n ann.text_at(\"Num grains: {:3d}\".format(num_grains), (10, 10),\n antialias=True, color=(0, 255, 0), size=48)\n logger.info(\"Num grains: {:3d}\".format(num_grains))\n\n ann.text_at(\"Num tubes : {:3d}\".format(num_tubes), (60, 10),\n antialias=True, color=(255, 0, 255), size=48)\n logger.info(\"Num tubes : {:3d}\".format(num_tubes))\n\n logger.info('Output image: \"{}\"'.format(os.path.abspath(png_path)))\n with open(png_path, \"wb\") as fh:\n fh.write(ann.png())\n\n logger.info('Output csv: \"{}\"'.format(os.path.abspath(csv_path)))\n with open(csv_path, \"w\") as fh:\n fh.write(\"{},{},{}\\n\".format(png_name, num_grains, num_tubes))\n\n return png_name, num_grains, num_tubes", "title": "" }, { "docid": "22d0c6c4c5596266802bd98c3672e8c3", "score": "0.5437551", "text": "def decode_segmap(label_mask, n_classes, plot=False):\n label_colours = get_pascal_labels()\n r = label_mask.copy()\n g = label_mask.copy()\n b = label_mask.copy()\n for ll in range(0, n_classes):\n r[label_mask == ll] = label_colours[ll, 0]\n g[label_mask == ll] = label_colours[ll, 1]\n b[label_mask == ll] = label_colours[ll, 2]\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))\n rgb[:, :, 0] = r / 255.0\n rgb[:, :, 1] = g / 255.0\n rgb[:, :, 2] = b / 255.0\n if plot:\n plt.imshow(rgb)\n plt.show()\n else:\n return rgb", "title": "" }, { "docid": "14bd3e62a67824897eee545d17d32a3b", "score": "0.54360145", "text": "def _parse_ann_info(self, img_info, ann_info):\n gt_bboxes = []\n gt_labels = []\n gt_bboxes_ignore = []\n gt_masks_ann = []\n\n for i, ann in enumerate(ann_info):\n if ann.get('ignore', False):\n continue\n x1, y1, w, h = ann['bbox']\n if ann['area'] <= 0 or w < 1 or h < 1:\n continue\n bbox = [x1, y1, x1 + w - 1, y1 + h - 1]\n if ann.get('iscrowd', False):\n gt_bboxes_ignore.append(bbox)\n else:\n gt_bboxes.append(bbox)\n gt_labels.append(self.cat2label[ann['category_id']])\n gt_masks_ann.append(ann['segmentation'])\n\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n seg_map = img_info['filename'].replace('jpg', 'png')\n\n ann = dict(\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks=gt_masks_ann,\n seg_map=seg_map)\n\n return ann", "title": "" }, { "docid": "bdc5bedc4a5685d850b5b4e7b7c1f296", "score": "0.5432011", "text": "def gray2innerouterbound(gray, width):\n h, w = gray.shape[:2]\n gray_cp = gray.copy()\n gray_cp[gray == 76] = 255\n gray_cp[gray == 151] = 255\n bound = np.zeros_like(gray, dtype=np.uint8)\n label = gray2mask(gray_cp)\n\n label_binary = label_binarize(label.flatten(), classes=range(0, 3))\n label_binary = np.reshape(label_binary, (h, w, -1))\n bound_binary = np.zeros_like(label_binary)\n\n for i in range(3): # number of classes before edge detection\n tmp = ndimage.distance_transform_cdt(label_binary[:, :, i], 'taxicab')\n cdt = np.logical_and(tmp >= 1, tmp <= width)\n bound_binary[:, :, i] = cdt\n\n bound[bound_binary[:, :, 0] != 0] = 2 # outer bound marked as 2\n bound[bound_binary[:, :, 1] != 0] = 1 # inner bound marked as 1\n\n return bound", "title": "" }, { "docid": "ce8edc317aa172caa555242d2014d56c", "score": "0.5431405", "text": "def transform_label(label_orig, sz):\n label = copy.deepcopy(label_orig)\n label = Image.fromarray(label.squeeze().astype(np.uint8))\n label = label.resize( (sz[0],sz[1]),Image.NEAREST)\n label = np.array(label, dtype=np.int32)\n return label", "title": "" }, { "docid": "9f58f2d34528419447fb4a73a7a8bcea", "score": "0.5426623", "text": "def preprocess(img):\n # Convert to HSV and get value (luminance) channel, in order to make more robust\n hsv = rgb_to_hsv(img)\n v = hsv[:,:,2]\n \n # Apply Gaussian Blur to reduce the noise in edge detection\n kernel_size = 5\n out = gaussian_blur(v, kernel_size) \n return out", "title": "" }, { "docid": "45f80b58a8c1f5a1061741d189034192", "score": "0.5425294", "text": "def label2mask(label_array):\n img = Image.new('RGB', (label_array.shape[1], label_array.shape[0]))\n pixels = img.load()\n for j_, j in enumerate(label_array):\n for k_, k in enumerate(j):\n if k <= len(label_colours):\n pixels[k_, j_] = label_colours[k]\n return img", "title": "" }, { "docid": "7a39aad3f519921e53b932b570b6014c", "score": "0.54225767", "text": "def make_label(xml_file, rgb_img):\n\n row_max, col_max = rgb_img.shape[0:2]\n res = np.zeros((row_max, col_max))\n\n mydoc = minidom.parse(xml_file)\n polygons = mydoc.getElementsByTagName('polygon')\n for poly in polygons:\n rows = []\n cols = []\n for point in poly.getElementsByTagName('pt'):\n x = int(point.getElementsByTagName('x')[0].firstChild.data)\n y = int(point.getElementsByTagName('y')[0].firstChild.data)\n rows.append(y)\n cols.append(x)\n rr, cc = polygon(rows, cols)\n res[rr, cc] = 1\n\n return res", "title": "" }, { "docid": "796506f92d742ca133d23d9190dee3db", "score": "0.5421", "text": "def PrepareGreyScale( self ):\n\t\treturn None", "title": "" }, { "docid": "ca7ac06e7929e0b943eb3bcac0604d9c", "score": "0.5418515", "text": "def __preprocessImage(img):\n\n imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n imgThresh = cv2.adaptiveThreshold(imgGray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2) \n return imgThresh", "title": "" }, { "docid": "731ae5ec16cfa2d7c143197b660408da", "score": "0.54180425", "text": "def original_image(x):\n\n x = x.copy()\n x = x + VGG16_OFFSET\n x = x / 255.0\n # To RGB\n x = x[::-1,:,:]\n return x", "title": "" }, { "docid": "846f5b78427f89caca12bbb068f0a175", "score": "0.5415394", "text": "def decode_labels(image, label, input_img_shapes, sess_tmp, threshold=0.3):\n image = cv2.resize(image, (input_img_shapes[1], input_img_shapes[0]))\n box_priors = np.array([[0.57273, 0.677385], [1.87446, 2.06253], [3.33843, 5.47434], [7.88282, 3.52778], [9.77052, 9.16828]])\n obj_prob = label[..., 4]\n # print(obj_prob[:2, 0, :])\n # print('the max of conf:{}'.format(np.max(obj_prob)))\n class_prob = label[..., 5:]\n # class_prob = np.max(class_prob, -1)\n # print('the max of class prob:{}'.format(np.max(class_prob)))\n\n mask = label[..., 4] > threshold\n boxes = label[mask]\n\n box_number = np.sum(np.int64(mask))\n\n if box_number >= 0:\n biases = np.array([[[[i, j, k] for k in range(5)] for j in range(13)] for i in range(13)])\n # biases: [box_number, 3], for each box, the coordinate information is [cell_x, cell_y, in_which_cell]\n biases = biases[mask]\n\n # convert the box coordinate from the feature map (1x1) to be the real coordinate (height, width),\n # define the x-axis direction is down and y-axis direction is right\n rate_x = input_img_shapes[0] / 13.0\n rate_y = input_img_shapes[1] / 13.0\n\n original_coordinates = np.zeros((box_number, 5), dtype=np.int64)\n for box_i in range(box_number):\n x = (boxes[box_i][0] + biases[box_i, 0]) * rate_x\n y = (boxes[box_i][1] + biases[box_i, 1]) * rate_y\n higth = boxes[box_i][2] * box_priors[biases[box_i, 2], 0] * rate_x\n width = boxes[box_i][3] * box_priors[biases[box_i, 2], 1] * rate_y\n\n x1 = np.int64(x - higth / 2.0)\n y1 = np.int64(y - width / 2.0)\n x2 = np.int64(x + higth / 2.0)\n y2 = np.int64(y + width / 2.0)\n # here, the last dim should be the class id, start from 1\n original_coordinates[box_i] = [x1, y1, x2, y2, np.argmax(boxes[box_i][5:])]\n\n box_coor = original_coordinates[..., :4]\n box_score = boxes[..., 4]\n\n selected_indeces = sess_tmp.run(tf.image.non_max_suppression(box_coor, box_score, max_output_size=3))\n selected_boxes = sess_tmp.run(tf.gather(original_coordinates, selected_indeces))\n image = draw_boxes_on_image(image, selected_boxes)\n image = cv2.cvtColor(np.uint8(image), cv2.COLOR_RGB2BGR)\n return image", "title": "" }, { "docid": "a97d2eabc47bf8d07da8a2d8e44d42da", "score": "0.54126084", "text": "def calc_brightRange(img, ROI):\n pass", "title": "" }, { "docid": "3c9b3cbb1b9e44041839f13c6e0dc334", "score": "0.5412483", "text": "def _parse_ann_info(self, img_info, ann_info):\n gt_bboxes = []\n gt_labels = []\n gt_bboxes_ignore = []\n gt_masks_ann = []\n for i, ann in enumerate(ann_info):\n # {'id': 46226, 'image_id': 3637, 'category_id': 8, 'a_segm':, 'i_segm': , 'a_bbox': [1141, 224, 82, 123], 'i_bbox': [1141, 224, 82, 123], 'a_area': 10086.0, 'i_area': 10086.0, 'oco_id': 3, 'ico_id': 0}\n # if ann.get('ignore', False):\n # continue\n x1, y1, w, h = ann['a_bbox']\n inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))\n inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))\n if inter_w * inter_h == 0:\n continue\n if ann['a_area'] <= 0 or w < 1 or h < 1:\n continue\n if ann['category_id'] not in self.cat_ids:\n continue\n bbox = [x1, y1, x1 + w, y1 + h]\n # if ann.get('iscrowd', False):\n # gt_bboxes_ignore.append(bbox)\n # else:\n gt_bboxes.append(bbox)\n gt_labels.append(self.cat2label[ann['category_id']])\n # gt_masks_ann.append(ann.get('i_segm', None))\n\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n # seg_map = img_info['filename'].replace('jpg', 'png')\n\n ann = dict(\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks=gt_masks_ann)\n\n return ann", "title": "" }, { "docid": "ee736008db31a2a4a5723b1d37606d14", "score": "0.5394742", "text": "def load_cocottributes_annotation(idx, im_size, label_ids):\n patch= Patch.query.get(idx)\n im = patch.crop(savename = None, make_square = True, resize = im_size)\n ann_vec = patch.consensus_vec(label_ids)\n if ann_vec == []:\n ann_vec,_ = patch.annotation_vector(label_ids, consensus=True)\n #multilabel = np.array([1 if x >= 0.5 else -1 if x == -1 or (x > 0 and x < 0.5) else 0 for x in ann_vec])\n multilabel = np.array([1 if x >= 0.5 else 0 for x in ann_vec])\n return im, multilabel", "title": "" }, { "docid": "0a7cf7ba928dc000a30a11fd13a0a27e", "score": "0.53932697", "text": "def visualize(img, mask):\n out = np.float32(img)/255\n msk = CMAP[mask % CMAP.shape[0], :]\n msk[mask == 0, :] = 0.\n out = out*0.5 + msk*0.5\n return (out*255).astype(np.uint8)", "title": "" }, { "docid": "019ac208b36b3bd66c4eeb2e5d097c39", "score": "0.5383998", "text": "def _get_imganno(self, idx):\n return self.annos[idx]", "title": "" }, { "docid": "bcc29fadca6fe8d2febccc388558038b", "score": "0.53713936", "text": "def draw_labels(image, np_mask, label):\n if np.sum(np_mask) > 0:\n x, y = np.argwhere(np_mask == 1)[0]\n image = imgaug.imgaug.draw_text(image, x, y, label, color=(255, 255, 255), size=50)\n return image", "title": "" }, { "docid": "e3b94f7c4b433b33b55de7dd4d8909b6", "score": "0.5356755", "text": "def gray2bound(gray, n_classes=3, width=2):\n\n h, w = gray.shape[:2]\n if n_classes <= 3: # if n_classes less than 3, cal and noncal are not considered\n gray[gray == 76] = 255\n gray[gray == 151] = 255\n\n label = gray2mask(gray)\n label_binary = label_binarize(label.flatten(), classes=range(0, n_classes))\n label_binary = np.reshape(label_binary, (h, w, -1))\n bound_binary = np.zeros_like(label_binary)\n\n for i in range(n_classes): # number of classes before edge detection\n tmp = ndimage.distance_transform_cdt(label_binary[:, :, i], 'taxicab')\n cdt = np.logical_and(tmp >= 1, tmp <= width)\n bound_binary[:, :, i] = cdt\n\n bound = np.any(bound_binary, axis=2).astype(np.uint8)\n\n return bound", "title": "" }, { "docid": "86ff3013cd81d98298f403d925bd655e", "score": "0.5355123", "text": "def colorMotionSuperpixels(self):\n if not hasattr(self, 'motion_superpixel_labels'):\n return None\n\n boundary_img = self.getSegmentedImage()\n\n motion_superpix_img = np.copy(boundary_img)\n\n for label in self.motion_superpixel_labels:\n label_pix_x, label_pix_y = np.where(self.label_frame == label) \n \n motion_superpix_img[label_pix_x,label_pix_y,:] = np.array([0,255,0])\n\n return motion_superpix_img", "title": "" }, { "docid": "b5d778d234bc603e4996ba820d95a8b6", "score": "0.5343431", "text": "def decode_segmap(label_mask, n_classes=6, label_colours=get_seismic_labels()):\n r = label_mask.copy()\n g = label_mask.copy()\n b = label_mask.copy()\n for ll in range(0, n_classes):\n r[label_mask == ll] = label_colours[ll, 0]\n g[label_mask == ll] = label_colours[ll, 1]\n b[label_mask == ll] = label_colours[ll, 2]\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], label_mask.shape[2], 3))\n rgb[:, :, :, 0] = r / 255.0\n rgb[:, :, :, 1] = g / 255.0\n rgb[:, :, :, 2] = b / 255.0\n return np.transpose(rgb, (0, 3, 1, 2))", "title": "" }, { "docid": "b26e5b5685c4b50e7f1a05902eedc5ff", "score": "0.5342992", "text": "def img2map(self, ob):\n ob_tranformed = []\n t = np.array([[1, 0, 0, -300],\n [0, -1, 0, 300],\n [0, 0, -1, 0],\n [0, 0, 0, 1]])\n for p in ob:\n p = np.array(p) # in case it is not already numpy array\n p = np.hstack((p, [0, 1]))\n p = t.dot(p).astype(int)\n ob_tranformed.append(p[:2])\n return np.array(ob_tranformed)", "title": "" }, { "docid": "93d06564a6dba1e7a95485b1118b8307", "score": "0.5341198", "text": "def segment_image(self, img):\n # YOUR CODE HERE\n # author @Siddarth A53299801\n # normalize image\n if img.dtype == np.uint8:\n img = img.astype(np.float32)\n img = img / 255.0\n\n sh = img.shape\n X = np.reshape(img, (sh[0] * sh[1], 3))\n o = np.ones((X.shape[0], 1))\n X = np.concatenate((o, X), 1)\n\n # Load the weights\n w = self.w\n\n print(\"\\nBounding Box - B.B - Statistics are as follows: \")\n print(X.shape, w.shape)\n print(X.dtype, np.min(X), np.max(X))\n\n # predicting the mask\n y_pred = np.matmul(X, w) >= 0\n y_pred = y_pred.astype(np.uint8)\n print(\"YPred Stats: \", y_pred.shape, y_pred.dtype, np.min(y_pred), np.max(y_pred))\n mask_img = np.reshape(y_pred, (sh[0], sh[1]))\n print(\"Mask Image Stats: \", mask_img.shape, mask_img.dtype, np.min(mask_img), np.max(mask_img))\n return mask_img", "title": "" }, { "docid": "1e96f6bb70adf046aa5f7df6eebd264b", "score": "0.5340764", "text": "def preProcessLabel(label, size):\n shape = label.shape\n\n label = imgResize(label, size)\n label = binarize(label, shape)\n return label", "title": "" }, { "docid": "7cc40b1c1dc79367e0c0f62791edc007", "score": "0.53315467", "text": "def extract_features(self, im, **kwargs):", "title": "" }, { "docid": "a0216c7478f27dec0f79a821ff9d5433", "score": "0.5327406", "text": "def visual_result(image, label, alpha=0.7):\n image = (image * rgb_std + rgb_mean) * 255\n image, label = image.astype(np.int), label.astype(np.int)\n H, W, C = image.shape\n masks_color = np.zeros(shape=[H, W, C])\n inv_masks_color = np.zeros(shape=[H, W, C])\n cls = []\n for i in range(H):\n for j in range(W):\n cls_idx = label[i, j]\n masks_color[i, j] = np.array(colormap[cls_idx])\n cls.append(cls_idx)\n if classes[cls_idx] == \"background\":\n inv_masks_color[i, j] = alpha * image[i, j]\n\n show_image = np.zeros(shape=[224, 672, 3])\n cls = set(cls)\n for x in cls:\n print(\"=> \", classes[x])\n show_image[:, :224, :] = image\n show_image[:, 224:448, :] = masks_color\n show_image[:, 448:, :] = (1-alpha)*image + alpha*masks_color + inv_masks_color\n show_image = Image.fromarray(np.uint8(show_image))\n return show_image", "title": "" }, { "docid": "92b74ec0a599d1835d3e7cacf30f1478", "score": "0.53197855", "text": "def rgb2gray_genome(rgb):\r\n return np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])", "title": "" }, { "docid": "18b48c6cdb18fc4eede8db3b7f4f661b", "score": "0.5309658", "text": "def visual_result(no, target_features, image, prediction,label,alpha=0.5):\n image, label = np.asarray(image).astype(np.uint8), np.asarray(label).astype(np.uint8)\n prediction = np.asarray(prediction)\n image = image[0]\n\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n prediction = prediction[0]\n numpy_prediction = np.zeros_like(prediction)\n label = label[0]\n\n H, W, C = image.shape\n H, W, nC = label.shape\n masks_color = np.zeros(shape=[H, W, C])\n lables_color = np.zeros(shape=[H, W, C])\n inv_masks_color = np.zeros(shape=[H, W, C])\n result_image = np.zeros_like(image)\n no2name_dict = dict()\n no2name_dict['1'] = 'Third_eyelid_protrude'\n no2name_dict['2'] = 'blepharitis_inflammation'\n no2name_dict['3'] = 'blepharitis_inner_inflammation'\n no2name_dict['4'] = 'corneal_pus'\n no2name_dict['5'] = 'corneal_scratch'\n no2name_dict['6'] = 'corneal'\n no2name_dict['7'] = 'conjunctivitis_flare'\n no2name_dict['8'] = 'conjunctivitis_swll'\n no2name_dict['9'] = 'conjunctivitis_white_inflammation'\n no2name_dict['10'] = 'gataract'\n no2name_dict['11'] = 'gataract_initial'\n\n result_dict = dict()\n result_dict['Third_eyelid_protrude'] = 0\n result_dict['blepharitis_inflammation'] = 0\n result_dict['blepharitis_inner_inflammation'] = 0\n result_dict['corneal_pus'] = 0\n result_dict['corneal_scratch'] = 0\n result_dict['corneal'] = 0\n result_dict['conjunctivitis_flare'] = 0\n result_dict['conjunctivitis_swll'] = 0\n result_dict['conjunctivitis_white_inflammation'] = 0\n result_dict['gataract'] = 0\n result_dict['gataract_initial'] = 0\n\n result_prob_dict = dict()\n result_prob_dict['Third_eyelid_protrude'] = 0\n result_prob_dict['blepharitis_inflammation'] = 0\n result_prob_dict['blepharitis_inner_inflammation'] = 0\n result_prob_dict['corneal_pus'] = 0\n result_prob_dict['corneal_scratch'] = 0\n result_prob_dict['corneal'] = 0\n result_prob_dict['conjunctivitis_flare'] = 0\n result_prob_dict['conjunctivitis_swll'] = 0\n result_prob_dict['conjunctivitis_white_inflammation'] = 0\n result_prob_dict['gataract'] = 0\n result_prob_dict['gataract_initial'] = 0\n\n cls = []\n c_i=1\n for k in range(nC):\n tag_label=0\n if k in target_features:\n NofValidPixels = 0\n maxRatio = 0\n # if np.max(label[...,k])> 0 and k> 0:\n print(\"no\",no, np.max(prediction[...,k]))\n for i in range(H):\n for j in range(W):\n if prediction[i, j,k] > 0.3:\n numpy_prediction[i, j, k] = 1\n NofValidPixels += 1\n if maxRatio < prediction[i, j, k]:\n maxRatio = prediction[i, j, k]\n else:\n numpy_prediction[i, j, k] = 0\n cls_idx = label[i, j, k]\n\n if cls_idx >0 and k>0:\n tag_label=1\n lables_color[i, j] = np.array(colormap[2])\n # cls.append(cls_idx)\n else:\n lables_color[i, j] = np.array(colormap[0])\n\n if numpy_prediction[i, j, k] >0.5 and k>0:\n masks_color[i, j] = np.array(colormap[1])\n # cls.append(cls_idx)\n else:\n masks_color[i, j] = np.array(colormap[0])\n\n\n\n c_i += 1\n masks_color = masks_color.astype(np.uint8)\n show_image = np.zeros(shape=[512, 1024, 3])\n cls = set(cls)\n # /\n NofPixels = masks_color.shape[0] * masks_color.shape[1]\n NofValidPixels = NofValidPixels\n print(\"ratio valid pixels : \", NofValidPixels / NofPixels)\n if NofValidPixels/NofPixels<0.9:\n if NofValidPixels/NofPixels>0.01:\n result_image += masks_color + lables_color.astype(np.uint8)\n disease_name=no2name_dict[str(c_i-1)]\n result_dict[disease_name]+=1\n result_prob_dict[disease_name]=maxRatio\n else:\n masks_color = np.zeros(shape=[H, W, C]).astype(np.uint8)\n\n\n\n # show_image = np.floor((1-alpha)*image) + np.floor((alpha*2/3)*masks_color) + np.floor((alpha/3)*lables_color)\n # show_image = (1-alpha)*image + alpha*lables_color\n show_image[:,:512,:] = image\n show_image[:,512:,:] = masks_color+lables_color.astype(np.uint8)\n # print(\"no\", k, np.max(masks_color), len(np.where(masks_color > 0)[0]))\n # show_image = Image.fromarray(np.uint8(show_image))\n show_image = show_image.astype(np.uint8)\n base_save_folder = '/home/projects/src/refineData/outputs_2nd/snapshot/'\n if not os.path.isdir(base_save_folder):\n os.makedirs(base_save_folder)\n os.chmod(base_save_folder,0o777)\n if tag_label==1:\n cv2.imwrite(base_save_folder + '/sol_no_{}_{}.png'.format(no, k), show_image)\n else:\n cv2.imwrite(base_save_folder + '/snapshot_no_{}_{}.png'.format(no, k), show_image)\n # cv2.imwrite(base_save_folder + '/result_no_{}_{}.png'.format(no, k), result_image)\n # cv2.imwrite(base_save_folder+'/original_no_{}_{}.png'.format(no,k),image)\n # cv2.WaitKey(0)\n print(result_dict)", "title": "" }, { "docid": "4bb50e11607dc01b25c0d91814389ced", "score": "0.5309595", "text": "def grayscale(image):\n newImage = image.copy()\n pixels = newImage.load()\n minX, minY, width, height = image.getbbox()\n for y in range(height):\n for x in range(width):\n rgb = pixels[x,y]\n avg = int((rgb[0] + rgb[1] + rgb[1])/3)\n pixels[x,y] = (avg, avg, avg)\n return newImage", "title": "" }, { "docid": "4a71ebb668e3d2a1be7cbbee68f22730", "score": "0.53089267", "text": "def transform_annotations(self, annotation, transforms, image_size, orig_image_size):\n bbox = BoxMode.convert(annotation[\"bbox\"], annotation[\"bbox_mode\"], BoxMode.XYXY_ABS)\n # Note that bbox is 1d (per-instance bounding box)\n annotation[\"bbox\"] = transforms.apply_box([bbox])[0]\n annotation[\"bbox_mode\"] = BoxMode.XYXY_ABS\n\n # each instance contains 1 mask\n annotation[\"segmentation\"] = self._process_mask(annotation[\"segmentation\"], transforms, orig_image_size)\n\n # camera\n h, w = image_size\n #annotation[\"K\"] = [annotation[\"K\"][0], w / 2.0, h / 2.0]\n if \"pose\" in annotation.keys():\n annotation[\"pose\"] = torch.tensor(annotation[\"pose\"])\n annotation[\"parameters\"] = torch.tensor(annotation[\"parameters\"])\n\n return annotation", "title": "" }, { "docid": "021fd470d3045beca0b325109670bdd4", "score": "0.5301394", "text": "def decode_segmap(self, label_mask, plot=False):\n label_colours = self.get_pascal_labels()\n r = label_mask.copy()\n g = label_mask.copy()\n b = label_mask.copy()\n for ll in range(0, self.n_classes):\n r[label_mask == ll] = label_colours[ll, 0]\n g[label_mask == ll] = label_colours[ll, 1]\n b[label_mask == ll] = label_colours[ll, 2]\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))\n rgb[:, :, 0] = r / 255.0\n rgb[:, :, 1] = g / 255.0\n rgb[:, :, 2] = b / 255.0\n if plot:\n plt.imshow(rgb)\n plt.show()\n else:\n return rgb", "title": "" }, { "docid": "e22be9521638dc9501dfa586e3cc7a2b", "score": "0.5300996", "text": "def grayify(image):\n grayscale_image = image.convert(\"L\")\n return(grayscale_image)", "title": "" }, { "docid": "2139d2a9efea5bb4c3b316ce24032d3c", "score": "0.5294917", "text": "def image2island(impath,g,nsuper=4,size=10,color=\"black\"):\n from PIL import Image\n im = Image.open(impath)\n im = im.convert('RGBA')\n data = np.array(im) # convert to array\n red, green, blue, alpha = data.T # store data for readbility\n if color==\"black\": #retain the black color\n retain = (red < 20) & (blue < 20) & (green < 20)\n elif color==\"red\": #retain the black color\n retain = (red > 200) & (blue < 20) & (green < 20)\n elif color==\"blue\": #retain the black color\n retain = (red < 20) & (blue > 200) & (green < 20)\n elif color==\"green\": #retain the black color\n retain = (red < 20) & (blue < 20) & (green > 200)\n else: raise # unrecognized\n data[..., :-1][retain.T] = (0, 0, 0) # set as black\n data[..., :-1][np.logical_not(retain.T)] = (255, 255, 255) # set as white\n# data[..., :-1][not retain.T] = (255, 255, 255) # set as black\n im2 = Image.fromarray(data) # convert to image\n im2 = im2.convert(\"L\") # to black and white\n bw = np.asarray(im2).copy() # convert to array\n bw[bw < 128] = 0 # Black\n bw[bw >= 128] = 1 # White\n bw = bw.transpose() # transpose image\n # now create a supercell\n nx,ny = bw.shape # size of the image\n go = g.supercell(nsuper*size) # build supercell\n go.center()\n minx = -size\n maxx = size\n miny = -size*bw.shape[1]/bw.shape[0]\n maxy = size*bw.shape[1]/bw.shape[0]\n def finter(rtmp):\n x = rtmp[0]\n y = rtmp[1]\n x = (x - minx)/(maxx-minx)\n y = (y - miny)/(maxy-miny)\n xi = (nx-1)*x # normalized\n yi = (ny-1)*y # normalized\n xi,yi = int(round(xi)),int(round(yi)) # integer\n if not 0<xi<bw.shape[0]: return False\n if not 0<yi<bw.shape[1]: return False\n if bw[xi,yi]==0: return True\n else: return False\n go = intersec(go,finter)\n go.dimensionality = 0 # zero dimensional\n go.celldis = None\n return go", "title": "" }, { "docid": "c4d1c7d20c118b360561e5fefec95f13", "score": "0.5294035", "text": "def change(image_array, map_draw_flag, binary_flag, output):\n for i in range(0, image_array.shape[0]):\n for j in range(0, image_array.shape[1]):\n if map_draw_flag:\n if mapinner(image_array[i][j]):\n output[i][j][0] = 1 if binary_flag == 1 else 255\n else:\n if drawinner(image_array[i][j]):\n output[i][j][1] = 1 if binary_flag == 1 else 255", "title": "" }, { "docid": "136d4361576498210ac03ada4ec077ba", "score": "0.52925843", "text": "def _normalize_data(image, label):\r\n image = tf.cast(image, tf.float32)\r\n image = image / 255.0\r\n image = image - 0.5\r\n image = image * 2.0\r\n\r\n #label = tf.cast(label, tf.float32)\r\n #label = label / 255.0\r\n\r\n return image, label", "title": "" }, { "docid": "283e8d7b527430c8954ff786dc21e51b", "score": "0.5285756", "text": "def extractFeature(img):\n\treturn 0", "title": "" }, { "docid": "be081b500036afb7c234bea39d83d9c6", "score": "0.5283656", "text": "def convert_label_to_input_image(in_label):\n\n # Convert to binary [0, 1] mask\n # Weird scaling issue when the labels were generated (are loaded) ?\n # For individual contours the second contour has a different value so it has three unique\n # values. Even for full labels this scaling is needed.\n in_label[in_label >= 0.25] = 1\n in_label[in_label < 0.25] = 0\n\n # increase dimensionality to match input\n # assumes first channel (0) is for batch size\n in_label = torch.cat((in_label, in_label, in_label), 1)\n\n return in_label", "title": "" }, { "docid": "5ae86974f229eaf8652023159e9b5017", "score": "0.5281471", "text": "def draw_img_preds(img, depth_pred, seg_pred, uncertainty_threshold=0.0, apply_depth_mask=False):\n plt.figure(0, figsize=(8, 6))\n\n # plot input img\n plt.subplot(2, 3, 1)\n plt.title('RGB')\n plt.imshow(img)\n plt.gca().axes.get_yaxis().set_ticks([])\n plt.gca().axes.get_xaxis().set_ticks([])\n\n # plot depth image\n plt.subplot(2, 3, 2)\n plt.title('depth estimation')\n depth_pred = depth_pred[0, :, :]\n plt.imshow(depth_pred)\n plt.gca().axes.get_yaxis().set_ticks([])\n plt.gca().axes.get_xaxis().set_ticks([])\n\n # plot segmentation\n plt.subplot(2, 3, 3)\n plt.title('segmentation')\n seg_labels = np.argmax(seg_pred, 0) + 1\n mask = np.zeros(shape=(seg_labels.shape[0], seg_labels.shape[1], 3))\n for key in CLASSES:\n class_mask = np.isin(seg_labels, np.asarray(key))\n mask[:, :, 0] += class_mask * CLASS_COLORS[key][0]\n mask[:, :, 1] += class_mask * CLASS_COLORS[key][1]\n mask[:, :, 2] += class_mask * CLASS_COLORS[key][2]\n mask = np.clip(mask, 0, 1)\n plt.imshow(img)\n plt.imshow(mask, alpha=0.3)\n plt.gca().axes.get_yaxis().set_ticks([])\n plt.gca().axes.get_xaxis().set_ticks([])\n\n # plot masked depth image\n plt.subplot(2, 3, 5)\n plt.title('masked de')\n if apply_depth_mask:\n # mask high gradient regions ~ these are usually not as accurate\n grad = np.asarray(np.gradient(depth_pred))\n grad = np.abs(grad[0, :, :]) + np.abs(grad[1, :, :])\n grad_mask = grad < 0.9\n\n depth_mask = depth_pred < 50.0 # mask everything that is farther than 50m\n depth_pred = depth_pred * depth_mask * grad_mask\n\n plt.imshow(depth_pred)\n plt.gca().axes.get_yaxis().set_ticks([])\n plt.gca().axes.get_xaxis().set_ticks([])\n\n # plot masked seg\n plt.subplot(2, 3, 6)\n plt.title('masked seg')\n # mask out pixels where the certainty of the class prediction is lower than the uncertainty threshold\n uc = np.max(seg_pred, 0)\n uc_mask = uc > uncertainty_threshold\n seg_labels = np.argmax(seg_pred, 0) + 1\n seg_labels *= uc_mask\n mask = np.zeros(shape=(seg_labels.shape[0], seg_labels.shape[1], 3))\n for key in CLASSES:\n class_mask = np.isin(seg_labels, np.asarray(key))\n mask[:, :, 0] += class_mask * CLASS_COLORS[key][0]\n mask[:, :, 1] += class_mask * CLASS_COLORS[key][1]\n mask[:, :, 2] += class_mask * CLASS_COLORS[key][2]\n mask = np.clip(mask, 0, 1)\n plt.imshow(img)\n plt.imshow(mask, alpha=0.3)\n plt.gca().axes.get_yaxis().set_ticks([])\n plt.gca().axes.get_xaxis().set_ticks([])\n\n plt.draw()", "title": "" }, { "docid": "8f7cde410292617b7892b4851d894942", "score": "0.5281428", "text": "def mark_regions_image(self, image, hist):\n\n\n temp=image\n\n h = temp.shape\n size2 = h[0], h[1], 3\n temp2 = np.zeros(size2, dtype=np.uint8)\n\n for px in range(1, h[0]-10):\n for py in range(1, h[1]-10):\n\n if (temp[px][py]!=0) and [hist[int(temp[px,py])]>15]:\n\n if (temp[px,py]%4==0):\n\n temp2[px,py]=(255,0,0)\n\n elif (temp[px, py] % 6 == 0):\n temp2[px, py] = (0, 255, 0)\n\n elif (temp[px, py] % 2 == 0):\n temp2[px, py] = (0, 0, 255)\n\n elif (temp[px, py] % 5 == 0):\n temp2[px, py] = (255, 255, 0)\n elif (temp[px, py] % 3 == 0):\n temp2[px, py] = (155, 0, 155)\n elif (temp[px, py] % 7 == 0):\n temp2[px, py] = (255, 155, 0)\n else:\n temp2[px, py] = (0, 255, 255)\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n #cv2.putText(temp2, 'OpenCV', (50, 250), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.imshow('Colored Regions', temp2)\n cv2.waitKey(0)\n return temp2", "title": "" }, { "docid": "a440041fec16cb496b5698526a3ae84b", "score": "0.5281239", "text": "def labeled_mask_from_experiment(exp):\n with PIL.Image.open(\n exp['ophys_average_intensity_projection_image']) as fim:\n im = np.array(fim)\n # layered mask with uint32 ids as intensities\n mask = layered_mask_from_rois(exp['cell_rois'], im.shape)\n\n # relabel as ordered uint16 and a translating map\n x = np.unique(mask)\n relabeled = np.zeros_like(mask).astype('uint16')\n rdict = {}\n for i, ix in enumerate(x):\n ind = np.nonzero(mask == ix)\n relabeled[ind] = i\n rdict[str(i)] = int(ix)\n\n # do not list the background\n rdict.pop(\"0\")\n\n return relabeled, rdict", "title": "" }, { "docid": "e04a8fd744df8111d152281a46d5bbcf", "score": "0.5276558", "text": "def generate_from_single_image(decoder, threshold=50, preprocess=None, decoder_callback=None):\n img_array = decoder.decode(callback=decoder_callback)\n if len(img_array.shape) == 3:\n img_array = np.mean(img_array, 2)\n if preprocess is not None:\n array = preprocess(img_array)\n else:\n array = img_array\n masked_array = np.ma.masked_where(array > threshold, array, copy=True)\n labeled_map, n = ndimage.label(masked_array.mask)\n return (img_array, labeled_map)", "title": "" }, { "docid": "cbb53b6bdc4c7b5121f218575e927e37", "score": "0.5275968", "text": "def drawThreshold(img):\n font = cv2.FONT_HERSHEY_SIMPLEX\n gray = img[:,:,1]\n blur = cv2.GaussianBlur(gray,(5,5),0)\n img1 = adaptiveThreshold(blur,detectThreshold)\n img1Not = cv2.bitwise_not(img1)\n \n zeros = np.zeros_like(img)\n fg = cv2.bitwise_or(zeros,(0,0,255),mask = img1)\n bg = cv2.bitwise_and(img,(255,255,255),mask = img1Not)\n \n newImg = cv2.add(fg,bg)\n s = \"detectThreshold = \"+ \"%5.2f\" % detectThreshold\n cv2.putText(newImg,s,(10,50), font, 1,(0,0,255),1,cv2.LINE_AA)\n\n return newImg", "title": "" }, { "docid": "a7661958c8c0ee395130ac12d72182f9", "score": "0.5270205", "text": "def binarize(img,filter_size = 21):\r\n img = cv2.cvtColor(img.copy(), cv2.COLOR_RGB2GRAY)\r\n img = cv2.GaussianBlur(img,(21,21),0)\r\n _,img = cv2.threshold(img,0,255,cv2.THRESH_OTSU | cv2.THRESH_BINARY )\r\n\r\n # if the image is predominately white, flip the color\r\n whitefrac = np.sum(img)/(255*img.shape[0]*img.shape[0])\r\n if whitefrac > .5:\r\n img = 255 - img\r\n return img", "title": "" }, { "docid": "b12e17e31a75fc4dfd9f8f938d11a7a4", "score": "0.52680737", "text": "def add_eges_grayscale(image):\n greyscale = rgb2gray(image)\n laplacian = np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]])\n edges = scipy.ndimage.filters.correlate(greyscale, laplacian)\n for index,value in np.ndenumerate(edges):\n edges[index] = 255-greyscale[index] if value == 0 else 0\n return edges", "title": "" }, { "docid": "9367db02a22a294ced74ce20d6e20566", "score": "0.5266831", "text": "def draw_above_threshold(self,threshold):\n \n print \"threshold luminance: {}\".format(threshold)\n size = self.image.width * self.image.height\n px = []\n py = []\n \n #store a 2d array of pixels\n array_2 = []\n \n #append pixels that are likely part of an event to array_2\n first_px = [self.image.width-1,self.image.height-1]\n last_px = [0,0]\n for y in range(0,self.image.height-1):\n row = []\n for x in range(0,self.image.width-1):\n r,g,b = self.pixelObj[x,y]\n brightness = (r + g + b)/3\n if brightness >= threshold:\n \n if x < first_px[0]:\n first_px[0] = x\n if y < first_px[1]:\n first_px[1] = y \n \n if x > last_px[0]:\n last_px[0] = x\n if y > last_px[1]:\n last_px[1] = y\n \n self.pixelObj[x,y] = (100,100,100)\n row.append([brightness,brightness,brightness])\n px.append(x)\n py.append(y)\n else:\n row.append([0,0,0])\n array_2.append(row)\n \n #crop the image so only the selected pixels are visible\n np_array = np.array(array_2)\n crop = (first_px[0],first_px[1],last_px[0],last_px[1])\n \n width, height = (abs(last_px[0]-first_px[0]),abs(last_px[1]-first_px[1]))\n if width <= 3 or height <= 3:\n print \"Insufficient area: {} px ({}x{})\".format(width * height,width,height)\n return\n\n \n straight = Image.new('RGB', (len(array_2[0]), len(array_2)))\n straight.putdata([tuple(p) for row in array_2 for p in row])\n straight = straight.crop(crop)\n \n #save image and replace if specified\n straight.save('{}_edited.png'.format(self.name))\n if self.replaces:\n os.system('rm -f {}'.format(self.name))\n new_arr = np.array(straight)", "title": "" }, { "docid": "1fe08a8f0eb731026e221ce025880fca", "score": "0.5264102", "text": "def np_image(self):\n pass", "title": "" }, { "docid": "efd3db6c4181aa5f7a8e87f05fd0e96e", "score": "0.5261619", "text": "def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)", "title": "" }, { "docid": "efd3db6c4181aa5f7a8e87f05fd0e96e", "score": "0.5261619", "text": "def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)", "title": "" }, { "docid": "efd3db6c4181aa5f7a8e87f05fd0e96e", "score": "0.5261619", "text": "def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)", "title": "" }, { "docid": "2eae5919cf87a63381b2a6be4b28f03c", "score": "0.52526844", "text": "def shapefile_to_annotations(shapefile, rgb, savedir=\".\"):\n #Read shapefile\n gdf = gp.read_file(shapefile)\n \n #get coordinates\n df = gdf.geometry.bounds\n \n #raster bounds\n with rasterio.open(rgb) as src:\n left, bottom, right, top = src.bounds\n \n #Transform project coordinates to image coordinates\n df[\"tile_xmin\"] = df.minx - left\n df[\"tile_xmin\"] = df[\"tile_xmin\"].astype(int)\n \n df[\"tile_xmax\"] = df.maxx - left\n df[\"tile_xmax\"] = df[\"tile_xmax\"].astype(int)\n \n #UTM is given from the top, but origin of an image is top left\n \n df[\"tile_ymax\"] = top - df.miny \n df[\"tile_ymax\"] = df[\"tile_ymax\"].astype(int)\n \n df[\"tile_ymin\"] = top - df.maxy\n df[\"tile_ymin\"] = df[\"tile_ymin\"].astype(int) \n \n #Add labels is they exist\n if \"label\" in gdf.columns:\n df[\"label\"] = gdf[\"label\"]\n else:\n df[\"label\"] = \"Tree\"\n \n #add filename\n df[\"image_path\"] = os.path.basename(rgb)\n \n #select columns\n result = df[[\"image_path\",\"tile_xmin\",\"tile_ymin\",\"tile_xmax\",\"tile_ymax\",\"label\"]]\n result = result.rename(columns={\"tile_xmin\":\"xmin\",\"tile_ymin\":\"ymin\",\"tile_xmax\":\"xmax\",\"tile_ymax\":\"ymax\"})\n image_name = os.path.splitext(os.path.basename(rgb))[0]\n csv_filename = os.path.join(savedir, \"{}.csv\".format(image_name))\n \n #ensure no zero area polygons due to rounding to pixel size\n result = result[~(result.xmin == result.xmax)]\n result = result[~(result.ymin == result.ymax)]\n \n #write file\n result.to_csv(csv_filename,index=False)", "title": "" }, { "docid": "45c56c247babfe47cc3fdeaee381930e", "score": "0.5250915", "text": "def darknetify(bbox, imshape):\n xmin, ymin, xmax, ymax = bbox\n imheight = imshape[0]\n imwidth = imshape[1]\n x = (xmin + xmax)/2 / imwidth\n y = (ymin + ymax)/2 / imheight\n w = (xmax - xmin) / imwidth\n h = (ymax - ymin) / imheight\n clsid = 0 # We're only using 1 class for targets;\n #a separate nn will classify their type\n return ' '.join(str(x) for x in (clsid, x, y, w, h))", "title": "" }, { "docid": "8d8baf23037545d3b79a0c4550e1e2b5", "score": "0.5247526", "text": "def grayscale_img(image):\n return np.dot(image[..., :3], [0.299, 0.587, 0.114])", "title": "" }, { "docid": "f879a660703c474fb558676f8d2cc93d", "score": "0.5245138", "text": "def assemble_labels (viewer,raw_label,mod_label): \r\n tmp = binary_fill_holes(mod_label)\r\n thresh = 1\r\n points = viewer.layers['Points'].data \r\n im = raw_label\r\n seeds = []\r\n for j in range(points.shape[0]):\r\n seeds.append(Point(round(points[j,0]),round(points[j,1])))\r\n binaryImg = regionGrow(im,seeds,thresh)\r\n inverse = 1-binaryImg.astype(int)\r\n multiplied = cv2.multiply(inverse, im,dtype=cv2.CV_8U)\r\n merged = join_segmentations(multiplied.astype(int),tmp.astype(int))\r\n viewer.add_labels(merged>0, name='merged_mask')\r\n viewer.add_labels(label(merged.astype(int)), name='merged_labels')", "title": "" }, { "docid": "76c3eca0853e916f7b31b73eff121783", "score": "0.5244108", "text": "def enhance_img(img):\r\n kernel = morp.disk(32)\r\n img_local = rank.equalize(img.astype(np.uint8), selem=kernel)\r\n\r\n enhanced = cv2.GaussianBlur(img_local, (5, 5), 0)\r\n return enhanced", "title": "" }, { "docid": "1c304133994ed3bc35b575c937bf8f3e", "score": "0.5244088", "text": "def format_img_channels(img, C):", "title": "" }, { "docid": "e713bc09f26011af2c03a14a75ebe33a", "score": "0.52440834", "text": "def __blur_edges__(self, imaged):\n image = self.np.array(imaged)\n image = self.cv2.cvtColor(image, self.cv2.COLOR_RGBA2BGRA)\n # extract alpha channel\n a = image[:, :, 3]\n # blur alpha channel\n ab = self.cv2.GaussianBlur(a, (0, 0), sigmaX=2, sigmaY=2, borderType=self.cv2.BORDER_DEFAULT)\n # stretch so that 255 -> 255 and 127.5 -> 0\n aa = self.skimage.exposure.rescale_intensity(ab, in_range=(140, 255), out_range=(0, 255))\n # replace alpha channel in input with new alpha channel\n out = image.copy()\n out[:, :, 3] = aa\n image = self.cv2.cvtColor(out, self.cv2.COLOR_BGRA2RGBA)\n return Image.fromarray(image)", "title": "" }, { "docid": "5290b605828d908ddfb73ccd42f3c4b3", "score": "0.52413917", "text": "def gray2outerbound(gray, width):\n h, w = gray.shape[:2]\n gray[gray == 76] = 255\n gray[gray == 151] = 255\n\n label = gray2mask(gray)\n label_binary = label_binarize(label.flatten(), classes=range(0, 3))\n label_binary = np.reshape(label_binary, (h, w, -1))\n\n tmp = ndimage.distance_transform_cdt(label_binary[:, :, 0], 'taxicab')\n outer_bound = np.logical_and(tmp >= 1, tmp <= width).astype(np.uint8)\n\n return outer_bound", "title": "" }, { "docid": "fbcbe70221e47f73663d47e71e89a270", "score": "0.52410036", "text": "def resize_annotation_bbox(df, target_size):\n target_size = int(target_size)\n y_new = []\n for i, bbox in enumerate(df['bbox']):\n path = df['fn_orig'][i]\n # the original microfiber images has different sizes\n img = cv2.imread(str(path))\n r, c, _ = img.shape # original img size\n y = np.array([int(b) for b in bbox.split()])\n if len(y) == 4:\n new_bb = bb_hw_numpy(resize_bbox((r, c), y,\n (target_size, target_size)))\n y_new.append(' '.join([str(int(n)) for n in new_bb]))\n elif len(y) > 4:\n temp_bbs = []\n for j in range(int(len(y) / 4)):\n resized_bb = bb_hw_numpy(resize_bbox((r, c),\n y[j * 4:(j * 4 + 4)],\n (target_size, target_size)))\n resized_bb = ' '.join([str(int(n)) for n in resized_bb])\n temp_bbs.append(resized_bb)\n y_new.append(' '.join(temp_bbs))\n\n df['bbox_resized'] = y_new", "title": "" }, { "docid": "130e7e47c4b2faf71fa0291e9e8c8113", "score": "0.5237399", "text": "def toimage(arr, high=..., low=..., cmin=..., cmax=..., pal=..., mode=..., channel_axis=...):\n ...", "title": "" }, { "docid": "4b568252713111d0df71b19e997abd00", "score": "0.5234785", "text": "def denormalise(self,img):\r\n img *= (0.229, 0.224, 0.225)\r\n img += (0.485, 0.456, 0.406)\r\n img *= 255.000\r\n img=img.astype(np.uint8).clip(0,255)\r\n return img", "title": "" }, { "docid": "7117476a9b5f5dfcacfd75c5e1ef3142", "score": "0.5228705", "text": "def blob_coloring(self, image):\n\n\n h = image.shape\n size = h[0], h[1], 1\n\n temp = np.zeros(size, dtype=np.uint8)\n\n\n\n image=255-image\n\n\n\n\n\n k=1\n\n\n b1=h[0]-10\n b2= h[1]-10\n for py in range(1, h[1]-1):\n for px in range(1, h[0]-1):\n\n if (image[px, py] == 0):\n continue\n else:\n\n if (image[px,py]==255) and (image[px,py-1]==0) and (image[px-1,py]==0):\n temp[px,py]=k\n\n k=k+1\n if (image[px, py] == 255) and (image[px, py - 1] == 0) and (image[px - 1, py] == 255):\n temp[px, py]= temp[px-1, py]\n\n\n if (image[px, py] == 255) and (image[px, py - 1] == 255) and (image[px - 1, py] == 0):\n temp[px, py] = temp[px , py-1]\n if (image[px, py] == 255) and (image[px, py - 1] == 255) and (image[px - 1, py] == 255):\n temp[px, py] = temp[px, py-1]\n temp[px-1 , py]=temp[px , py-1]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n regions = temp\n\n return regions,k,image", "title": "" }, { "docid": "efb6711c682a95a9202190d76e73c933", "score": "0.5222325", "text": "def prepro(image):\n image = image[::4, ::4, :] # downsample by factor of 4\n image = color.rgb2gray(image) # turn to grayscale\n return image - 0.5 # 0-center", "title": "" }, { "docid": "88cd23e56c02c6b9c2256cd0cf5f62ae", "score": "0.5219541", "text": "def preprocess_image(image):\n image = image / 255.\n\n return image", "title": "" }, { "docid": "f7ba29dc81517a7394c962522f93da86", "score": "0.52192533", "text": "def convert_to_grey_scale(image):\n out = color.rgb2gray(image)\n\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return out", "title": "" }, { "docid": "b3b69cf13de3911f53a159920d6f7ed4", "score": "0.5218723", "text": "def _parse_ann_info(self, img_info, ann_info):\n # shared annotations for visible and full objects\n gt_labels = []\n gt_layer_orders = []\n gt_pair_orders = []\n gt_bboxes_ignore = []\n # visible annotations\n gt_v_bboxes = []\n gt_v_masks = []\n # full annotations\n gt_f_bboxes = []\n gt_f_masks = []\n w, h = img_info['width'], img_info['height']\n\n for i, ann in enumerate(ann_info):\n if ann.get('ignore', False):\n continue\n if 'inmodal_bbox' in ann.keys(): # processing the old kins dataset\n ann['a_bbox'] = ann['bbox']\n ann['i_bbox'] = ann['inmodal_bbox']\n x1, y1, w1, h1 = ann['a_bbox']\n x2, y2, w2, h2 = ann['i_bbox']\n if w1 < 1 or h1 < 1 or w2 < 1 or h2 < 1:\n continue\n f_bbox = [x1, y1, x1 + w1 - 1, y1 + h1 - 1]\n gt_f_bboxes.append(f_bbox)\n v_bbox = [x2, y2, x2 + w2 - 1, y2 + h2 - 1]\n gt_v_bboxes.append(v_bbox)\n\n gt_labels.append(self.cat2label[ann['category_id']])\n if 'layer_order' in ann.keys():\n ann['ico_id'] = ann['layer_order']\n gt_layer_orders.append(ann['ico_id'])\n if 'inmodal_seg' in ann.keys():\n ann['i_segm'] = ann['inmodal_seg']\n gt_v_masks.append(ann['i_segm'])\n if 'segmentation' in ann.keys():\n gt_f_masks.append(ann['segmentation'])\n else:\n # decide the amodal mask first to get pairwise order\n rles = maskUtils.frPyObjects(ann['a_segm'], h, w)\n rle = maskUtils.merge(rles)\n f_mask = maskUtils.decode(rle).squeeze()\n gt_f_masks.append(f_mask)\n if 'pair_order' in ann.keys():\n gt_pair_orders.append(list(ann['pair_order'].values()))\n\n if 'pair_order' not in ann.keys():\n gt_pair_orders = pairwise_ranking(gt_f_masks, gt_layer_orders)\n\n if gt_f_bboxes:\n gt_labels = np.array(gt_labels, dtype=np.int64)\n gt_layer_orders = np.array(gt_layer_orders, dtype=np.int64)\n gt_pair_orders = np.array(gt_pair_orders, dtype=np.int64)\n gt_v_bboxes = np.array(gt_v_bboxes, dtype=np.float32)\n gt_f_bboxes = np.array(gt_f_bboxes, dtype=np.float32)\n else:\n gt_labels = np.array([], dtype=np.int64)\n gt_layer_orders = np.array([], dtype=np.int64)\n gt_pair_orders = np.array([], dtype=np.int64)\n gt_v_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_f_bboxes = np.zeros((0, 4), dtype=np.float32)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n ann = dict(\n bboxes=gt_v_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks=gt_v_masks,\n f_bboxes=gt_f_bboxes,\n f_masks=gt_f_masks,\n l_orders=gt_layer_orders,\n p_orders=gt_pair_orders,\n cat2label=self.cat2label\n )\n\n return ann", "title": "" }, { "docid": "f643836c0b6f1fde4be8361515a326ec", "score": "0.5217195", "text": "def visusalize_detections(img_path, xmin, ymin, xmax, ymax, score, plt_name='output', ext='.png', visualization_folder=None, thresh=0.5):\n \n im = cv2.imread(img_path)\n xmin = np.array(xmin)\n ymin = np.array(ymin)\n xmax = np.array(xmax)\n ymax = np.array(ymax)\n score = np.array(score)\n \n inds = np.where(score[:] >= thresh)[0]\n #print('inds:',inds)\n xmin = xmin[inds]\n xmax = xmax[inds]\n ymin = ymin[inds]\n ymax = ymax[inds]\n score = score[inds]\n \n #print(xmin)\n #print(ymin)\n #print(xmax)\n #print(ymax)\n fig, ax = plt.subplots(figsize=(12, 12))\n\n\n if im.shape[0] == 3:\n im_cp = im.copy()\n im_cp = im_cp.transpose((1, 2, 0))\n #if im.min() < 0:\n # pixel_means = cfg.PIXEL_MEANS\n # im_cp = im_cp + pixel_means\n\n im = im_cp.astype(dtype=np.uint8)\n\n im = im[:, :, (2, 1, 0)]\n\n ax.imshow(im, aspect='equal')\n\t\n if score.shape[0] == 0: \n return \n\t\t\n for i in range(score.shape[0]): \n ax.add_patch(\n plt.Rectangle((xmin[i], ymin[i]),\n xmax[i]-xmin[i],\n ymax[i]-ymin[i], fill=False,\n edgecolor=(0, score[i], 0), linewidth=3)\n )\n\t \n\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n if visualization_folder is not None:\n if not os.path.exists(visualization_folder):\n os.makedirs(visualization_folder)\n plt_name += ext\n plt.savefig(os.path.join(visualization_folder,plt_name),bbox_inches='tight')\n print('Saved {}'.format(os.path.join(visualization_folder, plt_name)))\n else:\n print('Visualizing {}!'.format(plt_name))\n plt.show()\n plt.clf()\n plt.cla()", "title": "" }, { "docid": "c6b22161c67a3b00e9649b9cda767fc8", "score": "0.52135235", "text": "def __call__(self, img):\n return ImageOps.equalize(img.convert('RGB'))", "title": "" }, { "docid": "3fcfd045d13a4db639c68b66fca7e9b1", "score": "0.5210693", "text": "def __init__(self, image, skin_mask, label_number,rectangle_slices):\n \n self.label_number = label_number\n self.y0 = rectangle_slices[0].start\n self.y1 = rectangle_slices[0].stop\n self.x0 = rectangle_slices[1].start \n self.x1 = rectangle_slices[1].stop\n\n self.bounding_rectangle_size = (self.x1-self.x0)*(self.y1-self.y0)\n self.bounding_rectangle_skin_pixels = np.count_nonzero(skin_mask[rectangle_slices])\n self.bounding_ratio_skin_pixels = self.bounding_rectangle_skin_pixels / self.bounding_rectangle_size\n self.image_ratio_skin_pixels = self.bounding_rectangle_skin_pixels / (image.shape[0]*image.shape[1])\n self.bounding_rectangle_average_pixel_intensity = np.average(image[rectangle_slices].take([0], axis=2))", "title": "" } ]
032798927d4a503e48066d0471f63a83
Label connected domains, calculate area fractions of dominant domain Also marks domains that are present on both sides with BLUE v0.2
[ { "docid": "3462743ba1f2afaed0f77efcbc9ed176", "score": "0.5651883", "text": "def Label(im, Opt):\n BCount=(im==0).sum()\n WCount=(im.size-BCount)\n BFrac=BCount/(im.size)\n WFrac=WCount/(im.size)\n LabArray, LNumFeat = scipy.ndimage.measurements.label(im)\n WDomFrac=(LabArray==1).sum()/(WCount)\n WDomI=1\n for i in range(2,LNumFeat):\n TestFrac=(LabArray==i).sum()/(WCount)\n if TestFrac > WDomFrac:\n WDomFrac=TestFrac\n WDomI=i\n \n #print(\"Dominant index %d is %f of total\" % (LDomI, LDomFrac))\n WDomMask= ( LabArray==WDomI )*255;\n WDomMaskI=Image.fromarray(WDomMask).convert(mode=\"L\")\n \"\"\"\n Part 2 doing domains top.bottom\n \"\"\"\n #Size=int(Opt.NmPP*10) # what is the top zone height? here 10 nm\n Size = 2 # cus lower res IDE \n TLab=np.unique(LabArray[:Size]);BLab=np.unique(LabArray[-Size:]); # What areas are at top? Which are at bottom? \n ThroughLab=np.intersect1d(TLab,BLab, assume_unique=True);\n ThroughLab=ThroughLab[ThroughLab!=0]; # remove zero as it is background\n ThroughMask=np.in1d(LabArray, ThroughLab).reshape(LabArray.shape)\n ThroughMaskI=Image.fromarray(ThroughMask*255).convert(mode=\"L\") \n \n \"\"\"\n Part 3 making images\n \"\"\"\n (CIMH,CIMW)=im.shape\n \n RImage=Image.new('RGB',(CIMW,CIMH),'Red')\n BImage=Image.new('RGB',(CIMW,CIMH),'Blue')\n WLabI=scipy.misc.toimage(LabArray).convert(mode=\"RGB\") # Labeled image\n WDomCI=Image.composite(RImage,Image.fromarray(100*np.uint8(im)).convert(mode=\"RGB\"),WDomMaskI) # Red Dom on Original\n WLabDomCI=Image.composite(RImage,WLabI,WDomMaskI) # red dom on mask image\n WThroughCI=Image.composite(BImage,Image.fromarray(100*np.uint8(im)).convert(mode=\"RGB\"),ThroughMaskI)\n if Opt.LabelSh == 1: \n WLabI.show()\n WDomCI.show()\n WLabDomCI.show()\n WThroughCI.show()\n if Opt.LabelSa == 1:\n WLabI.save(os.path.join(Opt.FPath,\"output\",Opt.FName+\"Lab.tif\"))\n WDomCI.save(os.path.join(Opt.FPath,\"output\",Opt.FName+\"DomC.tif\"))\n WDomCI.save(os.path.join(Opt.FPath,\"output\",Opt.FName+\"LabDomC.tif\"))\n WThroughCI.save(os.path.join(Opt.FPath,\"output\",Opt.FName+\"ThroughDomC.tif\"))\n return(WFrac, BFrac, WDomI, WDomFrac);", "title": "" } ]
[ { "docid": "581cae9893a0ea09573afb9484c4db18", "score": "0.613723", "text": "def areaDisjFraction(hemlabels, dablabels):\n botharea = np.count_nonzero(np.logical_or(hemlabels, dablabels))\n if botharea == 0:\n return 0.\n else:\n return float(np.count_nonzero(dablabels)) / botharea", "title": "" }, { "docid": "0ca3fc0cb691f970a3590f3c082c4e8c", "score": "0.55446583", "text": "def _stat_domain(self):\n dump_log(\"Start calculating domain stats\")\n for domain, value in self.d_count.items():\n count_1 = value[\"1st_count\"]\n count_2 = value[\"2nd_count\"]\n count_no_1 = self.g_count_1 - count_1\n count_no_2 = self.g_count_2 - count_2\n data = \\\n numpy.array([[count_1, count_2], [count_no_1, count_no_2]])\n x2, p, dof, exp = stats.chi2_contingency(data)\n self.d_count[domain][\"p_value\"] = p\n if p < self.d_config[\"CONFIG\"][\"THRESHOLD_P_VALUE\"]:\n l_exp = list(exp.ravel())\n b_check = False\n for num_exp in l_exp:\n if num_exp <= self.d_config[\"CONFIG\"][\"COCHRAN_RULE\"]:\n b_check = True\n break\n if b_check:\n continue\n res = data - exp\n res_var = numpy.zeros(res.shape)\n it = numpy.nditer(data, flags=[\"multi_index\"])\n d_sum = data.sum()\n while not it.finished:\n var = (1 - (data[:, it.multi_index[1]].sum() / d_sum)) * \\\n (1 - (data[it.multi_index[0], :].sum() / d_sum))\n res_var[it.multi_index[0], it.multi_index[1]] = var\n it.iternext()\n stdres = res / numpy.sqrt(exp * res_var)\n if stdres[0][0] >= 1.96:\n self.d_count_1st[domain] = dict()\n self.d_count_1st[domain][\"1st_count\"] = count_1\n self.d_count_1st[domain][\"2nd_count\"] = count_2\n self.d_count_1st[domain][\"p_value\"] = p\n if stdres[0][1] >= 1.96:\n self.d_count_2nd[domain] = dict()\n self.d_count_2nd[domain][\"1st_count\"] = count_1\n self.d_count_2nd[domain][\"2nd_count\"] = count_2\n self.d_count_2nd[domain][\"p_value\"] = p\n\n return True", "title": "" }, { "docid": "ced0ca938f410dd45560da34d448adc7", "score": "0.5361558", "text": "def area_covered(self):\r\n\r\n a = 0\r\n c = self.get_leaf_list()\r\n\r\n for r in c:\r\n a += get_rectangle_area(r.value)\r\n\r\n return a", "title": "" }, { "docid": "a48d0a382d7956facbf2bd423fcecb7f", "score": "0.5319947", "text": "def calculate_population_category_diversity(y_predicted, content_array):\n ave_coverage = content_array.map(lambda (id, array): sum(array)).mean()\n rating_array_raw = y_predicted.keyBy(lambda row: row[1]).join(content_array)\\\n .map(lambda (id, (rating, array)): array).collect()\n rating_array = map(sum,zip(*np.array(rating_array_raw)))\n cat_diversity = sum([r/float(len(rating_array_raw)) for r in rating_array])*ave_coverage/float(len(rating_array))\n\n return cat_diversity", "title": "" }, { "docid": "d5299cc38a281f95812a32ee21986749", "score": "0.5289708", "text": "def calculate_gene_dstrarea(genes_df, temp_df, normalize=1):\n result = (temp_df.query('AUC_UseFLAG == 1')\n .groupby('GeneID')['PrecursorArea_dstrAdj']\n .sum()\n .divide(normalize)\n .to_frame(name='AreaSum_dstrAdj')\n )\n genes_df = genes_df.merge(result, how='left',\n left_on='GeneID', right_index=True)\n genes_df.loc[genes_df['IDSet'] == 3, 'AreaSum_dstrAdj'] = 0\n return genes_df", "title": "" }, { "docid": "ca924ccf5bfe43cb0ac9efbbe2580f35", "score": "0.5283437", "text": "def getDivisions():", "title": "" }, { "docid": "d45f875431ccd4a13005e9f13b9fb5e3", "score": "0.52229595", "text": "def labels(c1, c2):\n if (c2[1] - c1[0]) >= 0:\n return 1 # green bar\n else:\n return 0", "title": "" }, { "docid": "2a8a3b3099cac27ed7a3d4ddba7a99a4", "score": "0.52054596", "text": "def label_region_nd(label_img):\n\n group = regionprops(label_img)\n cluster_area = np.zeros(len(group))\n for k in range(len(group)):\n cluster_area[k] = group[k].area\n max_cluster_area = np.max(cluster_area)\n\n return max_cluster_area", "title": "" }, { "docid": "f53034c5e55ed6a8b40f1f969b28e5e2", "score": "0.5112679", "text": "def graph_connectivity(adata, label_key):\n if 'neighbors' not in adata.uns:\n raise KeyError(\n 'Please compute the neighborhood graph before running this function!'\n )\n\n clust_res = []\n\n for label in adata.obs[label_key].cat.categories:\n adata_sub = adata[adata.obs[label_key].isin([label])]\n _, labels = connected_components(\n adata_sub.obsp['connectivities'],\n connection='strong'\n )\n tab = pd.value_counts(labels)\n clust_res.append(tab.max() / sum(tab))\n\n return np.mean(clust_res)", "title": "" }, { "docid": "8d1979f08df5c1d6a066ec115f369a79", "score": "0.5094451", "text": "def _nsd_base(a_to_b, b_to_a, threshold):\n if isinstance(a_to_b, int):\n return 0\n if isinstance(b_to_a, int):\n return 0\n numel_a = len(a_to_b)\n numel_b = len(b_to_a)\n tp_a = np.sum(a_to_b <= threshold) / numel_a\n tp_b = np.sum(b_to_a <= threshold) / numel_b\n fp = np.sum(a_to_b > threshold) / numel_a\n fn = np.sum(b_to_a > threshold) / numel_b\n dc = (tp_a + tp_b) / (tp_a + tp_b + fp + fn + sys.float_info.min)\n return dc", "title": "" }, { "docid": "ec9ea10c89593d3dbc82ee25534f25e2", "score": "0.50753975", "text": "def dissimilarity(self):\n totDist = 0\n for c in self.clusters:\n totDist += c.variability()\n return totDist", "title": "" }, { "docid": "25a1a0de91c555830b70497318475de2", "score": "0.5062052", "text": "def ComputeArea(self):\r\n pass", "title": "" }, { "docid": "7aa08d2cd19a94cc7b5ee290946f94b8", "score": "0.50553507", "text": "def areas_matched(self, class_labels):\n # set up storage\n areas = np.zeros(len(self.shapes.names()))\n \n # each UnitCell shape knows its area, so use it\n if self.shapes.shape_type() is 'UnitCell':\n for ishape, n_matched in enumerate(self.count_matched(class_labels)):\n uc_area = self.shapes[self.shapes.names()[ishape]].area()\n logging.debug('%s area %0.2f' % (self.shapes.names()[ishape],\n uc_area))\n areas[ishape] = float(n_matched) * uc_area\n\n # or use Delaunay triangulation to estimate enclosed area\n else: \n for ishape, sn in enumerate(self.shapes.names()):\n ids = self.particles_matched(class_labels, sn)\n if len(ids) > 0:\n area, edges = self.delaunay_neighbors.area_of_point_set(ids,\n self.config.x[ids],\n self.config.y[ids])\n areas[ishape] = area\n \n # return\n return areas", "title": "" }, { "docid": "adcb8c1f59cde5cb4b97ceec01585327", "score": "0.5050305", "text": "def discrimAnalysis(x, y):\r\n num_males = np.count_nonzero(y == 1)\r\n num_females = np.count_nonzero(y == 2)\r\n N = x.shape[0]\r\n\r\n indicator_male = y.copy()\r\n indicator_male[indicator_male == 2] = 0\r\n indicator_male = np.expand_dims(indicator_male, axis=1)\r\n indicator_male_double = np.concatenate([indicator_male, indicator_male], axis=1)\r\n\r\n indicator_female = y.copy()\r\n indicator_female[indicator_female == 1] = 0\r\n indicator_female[indicator_female == 2] = 1 \r\n indicator_female = np.expand_dims(indicator_female, axis=1)\r\n indicator_female_double = np.concatenate([indicator_female, indicator_female], axis=1)\r\n\r\n mu_male = np.sum(np.multiply(indicator_male_double, x), axis=0) / num_males\r\n mu_female = np.sum(np.multiply(indicator_female_double, x), axis=0) / num_females\r\n print(\"Male means\")\r\n print(mu_male)\r\n print('Female means')\r\n print(mu_female)\r\n\r\n cov = np.zeros((x.shape[1], x.shape[1]))\r\n for n in range(0, N):\r\n if y[n] == 1:\r\n diff = np.expand_dims(x[n], axis=1)-np.expand_dims(mu_male, axis=1)\r\n cov = cov + np.dot(diff, diff.T) \r\n else:\r\n diff = np.expand_dims(x[n], axis=1)-np.expand_dims(mu_female, axis=1)\r\n cov = cov + np.dot(diff, diff.T)\r\n cov = cov / (N)\r\n print('cov')\r\n print(cov)\r\n\r\n cov_male = np.zeros(cov.shape)\r\n cov_female = np.zeros(cov.shape)\r\n for n in range(0, N):\r\n if y[n] == 1:\r\n diff = np.expand_dims(x[n], axis=1)-np.expand_dims(mu_male, axis=1)\r\n cov_male = cov_male + np.dot(diff, diff.T)\r\n else:\r\n diff = np.expand_dims(x[n], axis=1)-np.expand_dims(mu_female, axis=1)\r\n cov_female = cov_female + np.dot(diff, diff.T)\r\n cov_male = cov_male / (num_males)\r\n cov_female = cov_female / (num_females)\r\n print('cov_male')\r\n print(cov_male)\r\n print('cov_female')\r\n print(cov_female)\r\n\r\n # plot N data points\r\n male_height = []\r\n male_weight = []\r\n female_height = []\r\n female_weight = []\r\n for n in range(0, N):\r\n if y[n] == 1:\r\n male_height.append(x[n][0])\r\n male_weight.append(x[n][1])\r\n else:\r\n female_height.append(x[n][0])\r\n female_weight.append(x[n][1])\r\n\r\n # there are N height and N weight values \r\n x_limits = np.linspace(50, 80, N) \r\n y_limits = np.linspace(80, 280, N) \r\n x_mesh, y_mesh = np.meshgrid(x_limits, y_limits)\r\n\r\n male_lda_criteria = []\r\n male_qda_criteria = []\r\n female_lda_criteria = []\r\n female_qda_criteria = []\r\n x_coordinates = x_mesh[0].reshape(100, 1)\r\n for n in range(0, N):\r\n y_coordinates = y_mesh[n].reshape(100, 1)\r\n x_set = np.concatenate((x_coordinates, y_coordinates), axis=1)\r\n male_lda_criteria.append(util.density_Gaussian(mu_male,cov,x_set))\r\n female_lda_criteria.append(util.density_Gaussian(mu_female,cov,x_set))\r\n male_qda_criteria.append(util.density_Gaussian(mu_male,cov_male,x_set))\r\n female_qda_criteria.append(util.density_Gaussian(mu_female,cov_female,x_set))\r\n\r\n plt.scatter(male_height, male_weight, color = 'b', label='Male')\r\n plt.scatter(female_height, female_weight, color = 'r', label='Female')\r\n plt.legend(loc=2)\r\n male_CS = plt.contour(x_mesh, y_mesh, male_lda_criteria, colors='b')\r\n female_CS = plt.contour(x_mesh, y_mesh, female_lda_criteria, colors='r')\r\n lda_decision_boundary = np.asarray(male_lda_criteria) - np.asarray(female_lda_criteria)\r\n plt.contour(x_mesh, y_mesh, lda_decision_boundary, colors='k', levels=[0])\r\n plt.xlabel('Height')\r\n plt.ylabel('Weight')\r\n plt.title('LDA Contours and Decision Boundary')\r\n plt.savefig('lda.pdf')\r\n plt.show()\r\n\r\n plt.scatter(male_height, male_weight, color = 'b', label='Male')\r\n plt.scatter(female_height, female_weight, color = 'r', label='Female')\r\n plt.legend(loc=2)\r\n plt.contour(x_mesh, y_mesh, male_qda_criteria, colors='b')\r\n plt.contour(x_mesh, y_mesh, female_qda_criteria, colors='r')\r\n qda_decision_boundary = np.asarray(male_qda_criteria) - np.asarray(female_qda_criteria)\r\n plt.contour(x_mesh, y_mesh, qda_decision_boundary, colors='k', levels=[0])\r\n plt.xlabel('Height')\r\n plt.ylabel('Weight')\r\n plt.title('QDA Contours and Decision Boundary')\r\n plt.savefig('qda.pdf')\r\n plt.show()\r\n return (mu_male,mu_female,cov,cov_male,cov_female)", "title": "" }, { "docid": "c1203927d5e9e82e5dc9a32a4057e97e", "score": "0.5047957", "text": "def score_bridges(self):\n\n scored_bridges = []\n bridge_nodes = set()\n for node1, node2 in self.bridges:\n size1 = self.cluster_size[node1]\n size2 = self.cluster_size[node2]\n # using harmonic average\n score = (size1*size2)/(size1+size2)\n if size1 >= size2:\n if node1 not in bridge_nodes:\n scored_bridges.append((node1, score))\n bridge_nodes.add(node1)\n else: \n if node2 not in bridge_nodes:\n scored_bridges.append((node2, score))\n bridge_nodes.add(node2)\n\n # can optimize a tiny bit applying set to scored_bridges instead. \n self.scored_bridges = sorted(list(scored_bridges), key=lambda x:-x[1])", "title": "" }, { "docid": "487d592b5ec2504c6dab0dd2b6d80c31", "score": "0.50285745", "text": "def test_discretize_domain():\n test_class = FlowFieldTest()\n x, y, z = test_class.instance._discretize_turbine_domain()\n assert np.shape(x) == (2, 5, 5) and type(x) is np.ndarray \\\n and np.shape(y) == (2, 5, 5) and type(y) is np.ndarray \\\n and np.shape(z) == (2, 5, 5) and type(z) is np.ndarray", "title": "" }, { "docid": "10f6a06a9b8eb43358b89d14f50b89ca", "score": "0.5025853", "text": "def remove_disconnects(rho):\n # connected components analysis\n label_img, nr_labels = measure.label(rho,background=0,return_num=True)\n # only keep the two largest labels (background + largest component)\n max_labels = np.argsort([np.sum(label_img==i) for i in range(nr_labels+1)])[-2:]\n # mask on all labels not part of the largest components\n small_label_mask = np.logical_and(label_img!=max_labels[0],label_img!=max_labels[1])\n # set all small labels to background\n label_img[small_label_mask] = 0\n # convert to zero-one\n label_img[label_img>0] = 1\n return label_img", "title": "" }, { "docid": "5736ebb877496517d45243bef5062842", "score": "0.4992796", "text": "def addtotalareas(cls): #class parameter is traditionally cls\n total = 0\n for eachareacircle in cls.allcircleslist:\n total = total + eachareacircle.area()\n return total", "title": "" }, { "docid": "540bb6bf2d926d296d36588f0ef86e47", "score": "0.4992326", "text": "def LF_DaG_NO_CONCLUSION(c):\n positive_num = np.sum(\n [\n LF_DaG_ASSOCIATION(c) == POSITIVE,\n LF_DG_IS_BIOMARKER(c) == POSITIVE,\n LF_DG_DIAGNOSIS(c) == POSITIVE,\n LF_DaG_CELLULAR_ACTIVITY(c) == POSITIVE,\n LF_DaG_WEAK_ASSOCIATION(c) == NEGATIVE,\n LF_DaG_NO_ASSOCIATION(c) == NEGATIVE,\n ]\n )\n negative_num = np.abs(\n np.sum(\n [\n LF_DG_METHOD_DESC(c) == NEGATIVE,\n LF_DG_TITLE(c) == NEGATIVE,\n LF_DG_NO_VERB(c) == NEGATIVE,\n ]\n )\n )\n if positive_num - negative_num >= 1:\n return ABSTAIN\n return NEGATIVE", "title": "" }, { "docid": "e48fac7c0af2bafba0c813fc14a997c2", "score": "0.4971619", "text": "def discourse_diversity(self):\n # entropy\n nodes = []\n bcs = []\n coms = []\n \n for n, data in self.finalGraph.nodes(data=True):\n nodes.append(n)\n bcs.append(data[\"bc\"])\n coms.append(data[\"com\"])\n\n df = pd.DataFrame({\"node\": nodes, \"bc\": bcs, \"com\": coms}) # Because Pandas DataFrames are cool\n nodes_df = df.sort_values(\"bc\", ascending=False)\n self.nodes_df = nodes_df # store the original graph\n\n self.stats[\"communities\"] = len(nodes_df.com.value_counts())\n\n top_nodes = nodes_df # If topn_nodes is preferred .ìloc[[:self.topn_nodes]\n\n top_comms = nodes_df.com.value_counts() # If topn_nodes is preferred .ìloc[:self.topn_nodescomms]\n\n # Count topn nodes in topn communities\n cnt = Counter() # keeps values sorted, as opposed to dictionary\n for com_number in list(top_comms.index):\n cnt[com_number] = 0\n for word in top_nodes.node.values:\n if word in df[df[\"com\"] == com_number][\"node\"].values:\n cnt[com_number] += 1\n\n ord_dict = OrderedDict()\n for community_no, count in cnt.most_common(): # self.topn_comms if preferred\n ord_dict[community_no] = count\n\n coms = []\n counts = []\n for com, count in cnt.most_common(): # defaults to all, if topn is preferred self.topn_comms\n coms.append(com)\n counts.append(count)\n\n topn_dist_df = pd.DataFrame({\"count\": counts}, index=coms)\n \n # print(topn_dist_df) # how many nodes in every community\n\n # print(self.bc_top_split['community'])\n\n TSN = len(self.bc_top_split['community']) # Total Split Nodes (top influential nodes Jenks=2)\n E_split = eta(list(self.bc_top_split['community']))\n E_split_ideal = eta(list(range(1, len(self.bc_top_split['community']) + 1)))\n self.stats[\"TopBCNodesInComm\"] = TSN\n \n # entropy ration for Split Nodes\n if E_split_ideal > 0:\n ES = E_split/E_split_ideal\n else:\n ES = 0\n \n # proportion of the top community in BC nodes\n rs_mode = self.bc_top_split['community'].mode()\n if len(rs_mode) > 0:\n most_freq_com_SN = self.bc_top_split['community'].mode()[0]\n RSN = len(self.bc_top_split[self.bc_top_split['community']==most_freq_com_SN])/len(self.bc_top_split['community'])\n else:\n RSN = 0\n self.stats[\"TopBCNodesProp\"] = RSN\n\n\n\n TTN = len(self.bc_top['community']) # Total Top Nodes\n E_top = eta(list(self.bc_top['community']))\n E_top_ideal = eta(list(range(1, len(self.bc_top['community']) + 1)))\n self.stats[\"BCNodesInComm\"] = TTN\n \n # entropy for Top Nodes\n if E_top_ideal > 0:\n ET = E_top/E_top_ideal\n else:\n ET = 0\n\n # proportion of the top community in BC nodes\n rt_mode = self.bc_top['community'].mode()\n if len(rt_mode) > 0:\n most_freq_com_TN = self.bc_top['community'].mode()[0]\n RTN = len(self.bc_top[self.bc_top['community']==most_freq_com_TN])/len(self.bc_top['community'])\n else:\n RTN = 0\n self.stats[\"BCNodesProp\"] = RTN\n\n\n TLN = len(self.bc_top_lax['community'])\n E_top_lax = eta(list(self.bc_top_lax['community']))\n E_top_lax_ideal = eta(list(range(1, len(self.bc_top_lax['community']) + 1)))\n self.stats[\"BCNodesLaxInComm\"] = TLN\n\n\n if E_top_lax_ideal > 0:\n EL = E_top_lax/E_top_lax_ideal\n else:\n EL = 0\n\n # proportion of the top community in BC nodes\n rl_mode = self.bc_top_lax['community'].mode()\n if len(rl_mode) > 0:\n most_freq_com_LN = self.bc_top_lax['community'].mode()[0]\n RLN = len(self.bc_top_lax[self.bc_top_lax['community']==most_freq_com_LN])/len(self.bc_top_lax['community'])\n else:\n RLN = 0\n self.stats[\"BCNodesLaxProp\"] = RLN\n\n\n self.stats[\"entropyTopFirst\"] = ES\n self.stats[\"entropyTop\"] = ET\n self.stats[\"entropyTopLax\"] = EL\n\n E = EL\n\n # percentage of nodes in largest community\n nr_of_nodes = 0\n for part in self.partitions:\n for node in part:\n nr_of_nodes += 1\n\n if (len(self.partitions) > 0):\n C = len(self.partitions[0]) / nr_of_nodes\n else: \n C = 0\n self.C = C\n self.stats[\"nodesInTopCom\"] = C\n\n # how many of the top BC nodes in the top component\n sn_in_top_c = 0\n tn_in_top_c = 0\n ln_in_top_c = 0\n \n if (len(self.partitions) > 0):\n for node in self.partitions[0]:\n if ((self.bc_top_split).index == node).any():\n sn_in_top_c += 1\n if ((self.bc_top).index == node).any():\n tn_in_top_c += 1\n if ((self.bc_top_lax).index == node).any():\n ln_in_top_c += 1\n\n \n # number of nodes from the top comm in BC to total length of inf nodes\n if len(self.bc_top_split) > 0:\n BCST = sn_in_top_c/len(self.bc_top_split)\n else: \n BCST = 0\n\n if len(self.bc_top) > 0: \n BCTT = tn_in_top_c/len(self.bc_top)\n else:\n BCTT = 0\n\n if len(self.bc_top_lax) > 0:\n BCLT = ln_in_top_c/len(self.bc_top_lax)\n else: \n BCLT = 0\n\n # percentage of nodes in the giant component\n if (nr_of_nodes > 0):\n G = len(self.igraph.components().giant().vs) / nr_of_nodes\n else:\n G = 0\n self.G = G\n self.stats[\"nodesInGiantCom\"] = G\n\n M = self.M\n\n if nr_of_nodes == 0:\n self.stats[\"biasIndex\"] = \"Dispersed\"\n\n elif TSN <= 4: # very few top BC nodes, so influence distribution is biased – alert!\n \n if ES == 0 and G > 0.5 and TSN != 1: # they all belong to the same community and > 50% of the graph is connected\n\n # as ES == 0, all the top BC nodes belong to only 1 community, that's why BCST is either 0 or 1\n\n if TSN == 0:\n if (self.cutoffGraph.number_of_nodes() > 0):\n if G < 0.5:\n self.stats[\"biasIndex\"] = \"Dispersed\"\n else:\n if M > 0.65: # pronounced comm, several center of influence, one is focused but the rest is dispersed\n self.stats[\"biasIndex\"] = \"Dispersed\" \n elif 0.4 <= M <= 0.65: # while community structure may be high, all he most inf nodes are in the biggest comm\n self.stats[\"biasIndex\"] = \"Polarized\" #TODO may need modification\n elif 0.2 <= M < 0.4: # low comm, influence center with all top BC nodes in it\n self.stats[\"biasIndex\"] = \"Focused\"\n elif M < 0.2: # low comm, influence center with all top BC nodes in it\n self.stats[\"biasIndex\"] = \"Biased\" \n else:\n self.stats[\"biasIndex\"] = \"Dispersed\" \n \n elif C > 0.5 and BCST == 1: # > 50% of nodes in the top comm and all top BC belong to it\n \n self.stats[\"biasIndex\"] = \"Biased\" # everything is towards the same narrative\n \n elif C > 0.5 and BCST == 0: # > 50% of nodes in top comm, but all the influential words in another comm\n \n if M > 0.4: # pronounced comm, BC nodes oppose the top C\n self.stats[\"biasIndex\"] = \"Polarized\" # TODO interesting idea here \n elif 0.2 > M < 0.4: # not pronounced comm, BC nodes support the top C\n self.stats[\"biasIndex\"] = \"Focused\"\n elif M < 0.2: #low comm, 2 centers of influence, super linked\n self.stats[\"biasIndex\"] = \"Biased\"\n \n elif C <= 0.5 and BCST == 1: # less than half (but signif num) are in top C as well as all the top BC\n\n if M > 0.65: # pronounced comm, several center of influence, one is focused but the rest is dispersed\n self.stats[\"biasIndex\"] = \"Dispersed\" \n elif 0.4 < M <= 0.65: # while community structure may be high, all he most inf nodes are in the biggest comm\n self.stats[\"biasIndex\"] = \"Polarized\" #TODO may need modification\n elif M < 0.4: # low comm, influence center with all top BC nodes in it\n self.stats[\"biasIndex\"] = \"Biased\"\n\n elif C <= 0.5 and BCST == 0: # less than half in top (signif) comm but ALL top BC nodes are in another\n \n if M > 0.65: # very pronounced community structure, 2 centers of influence\n self.stats[\"biasIndex\"] = \"Diversified\" \n elif 0.65 > M >= 0.4: # pronounced comm, 2 centers of influence, connected\n self.stats[\"biasIndex\"] = \"Polarized\" #TODO interesting idea here \n elif 0.2 < M < 0.4: # not pronounced comm, 2 centers of influence, linked\n self.stats[\"biasIndex\"] = \"Focused\"\n elif M < 0.2: #low comm, 2 centers of influence, super linked\n self.stats[\"biasIndex\"] = \"Biased\" \n\n elif ES == 0 and G < 0.5 and TSN != 1: # all the nodes are in the same community but less than half nodes are in G\n\n if M > 0.65:\n self.stats[\"biasIndex\"] = \"Dispersed\" \n elif M > 0.4:\n self.stats[\"biasIndex\"] = \"Focused\" \n elif M <= 0.4:\n self.stats[\"biasIndex\"] = \"Biased\" \n \n elif ES >= 0.75 and TSN > 2:\n\n if M > 0.65:\n self.stats[\"biasIndex\"] = \"Dispersed\"\n elif 0.4 <= M <= 0.65:\n self.stats[\"biasIndex\"] = \"Diversified\"\n elif 0.2 < M < 0.4:\n self.stats[\"biasIndex\"] = \"Focused\"\n else:\n self.stats[\"biasIndex\"] = \"Biased\"\n \n elif (ES < 0.75 and ES > 0) or (ES == 0 and TSN == 1) or (TSN <= 2 and ES >= 0.75): # either it's ab aab, aabb or aaab or a\n \n # here we analyze like for TSN > 4 but with a bigger number of nodes\n if (TTN > 4) or (2 < TTN <= 4 and TSN == 1): #means influence is relatively spread\n\n if ET == 0 and G < 0.5: # all the nodes are in the same community but less than half nodes are in G\n \n if M > 0.65:\n self.stats[\"biasIndex\"] = \"Dispersed\" \n if M > 0.4:\n self.stats[\"biasIndex\"] = \"Focused\" \n elif M <= 0.4:\n self.stats[\"biasIndex\"] = \"Biased\" \n\n elif ET == 0 and G > 0.5:\n\n if BCTT == 1: # all the top nodes (and quite a few of them!) are in the tom comm\n \n if M >= 0.4: # while community structure may be high, all he most inf nodes are in the biggest comm\n self.stats[\"biasIndex\"] = \"Focused\"\n elif M < 0.4: # low comm, influence center with all top BC nodes in it\n self.stats[\"biasIndex\"] = \"Biased\"\n\n elif BCTT == 0: # less than half in top (signif) comm but ALL top BC nodes are in ANOTHER\n \n if M > 0.65: # very pronounced community structure, 2 centers of influence\n self.stats[\"biasIndex\"] = \"Diversified\" \n elif 0.65 > M >= 0.4: # pronounced comm, 2 centers of influence, connected\n self.stats[\"biasIndex\"] = \"Polarized\" #TODO interesting idea here \n elif 0.2 < M < 0.4: # not pronounced comm, 2 centers of influence, linked\n self.stats[\"biasIndex\"] = \"Focused\"\n elif M < 0.2: #low comm, 2 centers of influence, super linked\n self.stats[\"biasIndex\"] = \"Biased\" \n \n \n \n if ET > 0.42 and RTN <= 0.5: # a few nodes but most of them are not in top comm\n if M > 0.65:\n self.stats[\"biasIndex\"] = \"Dispersed\"\n elif 0.4 <= M <= 0.65:\n self.stats[\"biasIndex\"] = \"Diversified\"\n elif 0.2 < M < 0.4:\n self.stats[\"biasIndex\"] = \"Focused\"\n else:\n self.stats[\"biasIndex\"] = \"Biased\"\n\n elif ET > 0.42 and RTN > 0.5: # there's quite a few nodes, most of them are in the same comm, but the rest is quite dispersed\n\n if M > 0.65 and BCTT < 0.5: # most nodes are not in top comm\n self.stats[\"biasIndex\"] = \"Dispersed\"\n elif M >= 0.4 and BCTT <= 0.8:\n self.stats[\"biasIndex\"] = \"Diversified\"\n elif M > 0.2 and BCTT <= 0.8:\n self.stats[\"biasIndex\"] = \"Focused\"\n else:\n self.stats[\"biasIndex\"] = \"Biased\"\n\n elif (ET <= 0.42 and RTN > 0.5) or (RTN > 0.75): # there's a few nodes , most of them are in the same com, the rest is also homogeneous\n \n if M > 0.65:\n self.stats[\"biasIndex\"] = \"Diversified\"\n elif 0.65 >= M > 0.4:\n self.stats[\"biasIndex\"] = \"Polarized\" #TODO interesting idea here\n elif M < 0.4:\n self.stats[\"biasIndex\"] = \"Biased\"\n \n elif (ET <= 0.42 and RTN <= 0.5): # there's a few nodes, most of them not in the top comm, the rest is homogeneous\n\n if M > 0.65:\n self.stats[\"biasIndex\"] = \"Dispersed\"\n elif 0.65 >= M > 0.4:\n self.stats[\"biasIndex\"] = \"Diversified\"\n elif 0.2 < M < 0.4:\n self.stats[\"biasIndex\"] = \"Focused\"\n else:\n self.stats[\"biasIndex\"] = \"Biased\"\n \n # we had too few top nodes, so we expanded the search. means influence is very well spread\n elif (TLN > 4) or (TLN < 4 and TLN >= TSN):\n\n if EL > 0.42 and RLN <= 0.5: # a few nodes but most of them are not in top comm\n \n if M > 0.65:\n self.stats[\"biasIndex\"] = \"Dispersed\"\n elif 0.4 <= M <= 0.65:\n self.stats[\"biasIndex\"] = \"Diversified\"\n elif 0.2 < M < 0.4:\n self.stats[\"biasIndex\"] = \"Focused\"\n else:\n self.stats[\"biasIndex\"] = \"Biased\"\n\n elif EL > 0.42 and RLN > 0.5: # there's quite a few nodes, most of them are in the same comm, but the rest is quite dispersed\n\n if M > 0.65 and BCLT < 0.5: # most nodes are not in top comm\n self.stats[\"biasIndex\"] = \"Dispersed\"\n elif M >= 0.4 and BCLT <= 0.8:\n self.stats[\"biasIndex\"] = \"Diversified\"\n elif M > 0.2 and BCLT <= 0.8:\n self.stats[\"biasIndex\"] = \"Focused\"\n else:\n self.stats[\"biasIndex\"] = \"Biased\"\n\n elif (EL <= 0.42 and RLN > 0.5) or (RLN > 0.75): # there's a few nodes , most of them are in the same com, the rest is also homogeneous\n \n if M > 0.65:\n self.stats[\"biasIndex\"] = \"Diversified\"\n elif 0.65 >= M > 0.4:\n self.stats[\"biasIndex\"] = \"Polarized\" #TODO interesting idea here\n elif M < 0.4:\n self.stats[\"biasIndex\"] = \"Biased\"\n \n elif (EL <= 0.42 and RLN <= 0.5): # there's a few nodes, most of them not in the top comm, the rest is homogeneous\n\n if M > 0.65:\n self.stats[\"biasIndex\"] = \"Dispersed\"\n elif 0.65 >= M > 0.4:\n self.stats[\"biasIndex\"] = \"Diversified\"\n elif 0.2 < M < 0.4:\n self.stats[\"biasIndex\"] = \"Focused\"\n else:\n self.stats[\"biasIndex\"] = \"Biased\" \n\n else: # didn't find \n if RSN > 0.5: # aaab or aab\n # we didn't find any other separation, so probably these N nodes are outliers\n if M > 0.65:\n if BCST > 0.5: #dispersed community but most BC nodes are in top C \n self.stats[\"biasIndex\"] = \"Diversified\"\n elif BCST <= 0.5:\n self.stats[\"biasIndex\"] = \"Dispersed\"\n elif 0.4 <= M <= 0.65:\n if BCST > 0.5:\n self.stats[\"biasIndex\"] = \"Focused\"\n elif BCST <= 0.5:\n self.stats[\"biasIndex\"] = \"Diversified\"\n elif 0.2 < M < 0.4:\n if BCST > 0.5:\n self.stats[\"biasIndex\"] = \"Biased\"\n elif BCST <= 0.5:\n self.stats[\"biasIndex\"] = \"Focused\"\n else:\n self.stats[\"biasIndex\"] = \"Biased\"\n\n elif RSN <= 0.5: #ab #aabb\n #TODO interesting idea here\n if M > 0.65:\n self.stats[\"biasIndex\"] = \"Dispersed\"\n elif 0.4 <= M <= 0.65:\n self.stats[\"biasIndex\"] = \"Diversified\"\n elif 0.2 < M < 0.4:\n self.stats[\"biasIndex\"] = \"Focused\"\n elif M < 0.2:\n self.stats[\"biasIndex\"] = \"Biased\"\n \n\n\n\n \n elif TSN > 4:\n\n if ES == 0 and G < 0.5: # all the nodes are in the same community but less than half nodes are in G\n \n if M > 0.65:\n self.stats[\"biasIndex\"] = \"Dispersed\" \n if M > 0.4:\n self.stats[\"biasIndex\"] = \"Focused\" \n elif M <= 0.4:\n self.stats[\"biasIndex\"] = \"Biased\" \n\n elif ES == 0 and G > 0.5:\n\n if BCST == 1: # all the top nodes (and quite a few of them!) are in the tom comm\n \n if M >= 0.4: # while community structure may be high, all he most inf nodes are in the biggest comm\n self.stats[\"biasIndex\"] = \"Focused\"\n elif M < 0.4: # low comm, influence center with all top BC nodes in it\n self.stats[\"biasIndex\"] = \"Biased\"\n\n if BCST == 0: # less than half in top (signif) comm but ALL top BC nodes are in ANOTHER\n \n if M > 0.65: # very pronounced community structure, 2 centers of influence\n self.stats[\"biasIndex\"] = \"Diversified\" \n elif 0.65 > M >= 0.4: # pronounced comm, 2 centers of influence, connected\n self.stats[\"biasIndex\"] = \"Polarized\" #TODO interesting idea here \n elif 0.2 < M < 0.4: # not pronounced comm, 2 centers of influence, linked\n self.stats[\"biasIndex\"] = \"Focused\"\n elif M < 0.2: #low comm, 2 centers of influence, super linked\n self.stats[\"biasIndex\"] = \"Biased\" \n \n elif ES > 0.42 and RSN <= 0.5:\n \n if M > 0.65:\n self.stats[\"biasIndex\"] = \"Dispersed\"\n elif 0.4 <= M <= 0.65:\n self.stats[\"biasIndex\"] = \"Diversified\" #TODO interesting thing here\n elif 0.2 < M < 0.4:\n self.stats[\"biasIndex\"] = \"Focused\"\n else:\n self.stats[\"biasIndex\"] = \"Biased\"\n\n elif ES > 0.42 and RSN > 0.5:\n\n if M > 0.65 and BCST < 0.5:\n self.stats[\"biasIndex\"] = \"Dispersed\"\n elif M >= 0.4 and BCST <= 0.8:\n self.stats[\"biasIndex\"] = \"Diversified\"\n elif M > 0.2 and BCST <= 0.8:\n self.stats[\"biasIndex\"] = \"Focused\"\n else:\n self.stats[\"biasIndex\"] = \"Biased\"\n\n elif (ES <= 0.42 and RSN > 0.5) or (RSN > 0.75):\n \n if M > 0.65:\n self.stats[\"biasIndex\"] = \"Diversified\"\n elif 0.65 >= M > 0.4:\n self.stats[\"biasIndex\"] = \"Focused\"\n elif M < 0.4:\n self.stats[\"biasIndex\"] = \"Biased\"\n \n elif (ES <= 0.42 and RSN <= 0.5):\n\n if M > 0.65:\n self.stats[\"biasIndex\"] = \"Dispersed\"\n elif 0.65 >= M > 0.4:\n self.stats[\"biasIndex\"] = \"Diversified\"\n elif 0.2 < M < 0.4:\n self.stats[\"biasIndex\"] = \"Focused\"\n else:\n self.stats[\"biasIndex\"] = \"Biased\"\n \n # END OF BIAS INDEX CALCULATION\n\n self.biasIndex = self.stats[\"biasIndex\"]\n\n if self.biasIndex == 'Biased':\n print(\"{} bias index: BIASED\".format(self.textname))\n elif self.biasIndex == 'Focused':\n print(\"{} bias index: FOCUSED\".format(self.textname))\n elif self.biasIndex == 'Diversified':\n print(\"{} bias index: DIVERSIFIED\".format(self.textname))\n elif self.biasIndex == 'Polarized':\n print(\"{} bias index: POLARIZED\".format(self.textname))\n elif self.biasIndex == 'Dispersed':\n print(\"{} bias index: DISPERSED\".format(self.textname))\n\n\n # Possible plotting, in notebook add ``%matplotlib inline`` and ``import matplotlib.pyplot as plt``\n if (nr_of_nodes > 0):\n if self.plot:\n print('plotting ...')\n xticks = [str(i) for i in list(topn_dist_df.index)]\n ax = topn_dist_df.plot(\n title=\"Node-distribution in communities.\\n{} entropy:{:.3f}\".format(\n self.cutoffGraph.graph['name'],\n E), use_index=False)\n ax.set_xlabel(\"Community number\\n\")\n ax.set_ylabel(\"Words (nodes) per community.\")\n ax.set_xticks(np.arange(len(xticks)))\n ax.set_xticklabels(xticks)\n plt.show()\n\n #TODO - this will change, only testing\n self.plottable=Discourse(self)", "title": "" }, { "docid": "b3f765654e120d4c3bd9f01bd53d30a3", "score": "0.4969649", "text": "def compute_label_confs(self):\n label_confs = [label + ', id=' + str(i) + ', ' + str(format(\n self.confs[i] * 100, '.2f')) + '%' for i, label in enumerate(self.labels)]\n return label_confs", "title": "" }, { "docid": "584c2717aaf6c458129a2e1ba0253daa", "score": "0.4967278", "text": "def cal_AA(self):\n\n df_edge_list_reverse = pd.DataFrame()\n df_edge_list_reverse['source'] = self.df_edge_list['target']\n df_edge_list_reverse['target'] = self.df_edge_list['source']\n\n df_all_nodes_pair = pd.concat([df_edge_list_reverse, self.df_edge_list])\n df_neighbor_count = df_all_nodes_pair.groupby(['source']).count()\n\n df_neighbor_count = df_neighbor_count.reset_index()\n df_neighbor_count.rename(columns={'target': 'count'}, inplace=True)\n \"\"\"\n get common neighbours\n \"\"\"\n\n df_common_neighbor = pd.merge(df_all_nodes_pair, df_all_nodes_pair, on=['target'], how='left').dropna()\n df_common_neighbor = df_common_neighbor[df_common_neighbor['source_x'] != df_common_neighbor['source_y']]\n\n df_common_neighbor = pd.merge(df_common_neighbor, df_neighbor_count, left_on=['target'], right_on=['source'],\n how='left').dropna()\n\n df_common_neighbor = df_common_neighbor[['source_x', 'source_y', 'count']]\n df_common_neighbor['count'] = df_common_neighbor['count'].map(lambda x: 1.0 / math.log(x))\n\n df_AA_list = df_common_neighbor.groupby(['source_x', 'source_y']).sum()\n df_AA_list = df_AA_list.reset_index()\n\n df_AA_list.rename(columns={'count': 'similarity', 'source_x': 'source', 'source_y': 'target'}, inplace=True)\n return df_AA_list", "title": "" }, { "docid": "4fdb21730026d454270f3730ab56fe65", "score": "0.4967269", "text": "def __call__(self, labels, attr):\n regions_set = set(labels)\n obj_val = sum(\n self.metric(attr[i].reshape(1, -1), attr[j].reshape(1, -1))\n for r in regions_set\n for i, j in itertools.combinations(np.where(labels == r)[0], 2))\n \n return float(obj_val)", "title": "" }, { "docid": "6ad523bcf55494d1b0aa631668c55d8d", "score": "0.4965129", "text": "def apply_domain(self, df):\n super().apply_domain(df, col=self.label_col)", "title": "" }, { "docid": "4a318ad05709973f3d5cb357691d53c1", "score": "0.49583808", "text": "def calc_objective(labels, label_ce):\n objv = 0.0\n for label in labels:\n idx = np.isfinite(label)\n objv += normalized_mutual_info_score(\n label_ce[idx], label[idx], average_method='geometric')\n objv /= labels.shape[0]\n return objv", "title": "" }, { "docid": "8b5721bc31f05d6a65f6ec93c2ca8800", "score": "0.49500594", "text": "def lcv(domains, domain):\n # Initialize array for count of each value\n count = [0 for i in range(int(math.sqrt(len(domains))))]\n\n # Iterate through domains, counting each time the values are used\n for dom in domains.items():\n for val in dom[1]:\n count[val-1] += 1\n\n # Now sort by the count of each value\n domain_count = [(count[val-1], val) for val in domain]\n domain_count.sort()\n new_domain = [val[1] for val in domain_count]\n return new_domain", "title": "" }, { "docid": "75b9d487abe6bce35a603ca0d0fd96ef", "score": "0.49497637", "text": "def get_can_label(self):\n if not len(self.graph):\n self.create_mol_graph()\n syms = self.ase_atoms.get_chemical_symbols()\n weights = [io_ptable.masses[io_ptable.elements.index(x)] for x in syms]\n inds = np.nonzero(self.graph)\n tmpgraph = self.graph.copy()\n for j in range(len(syms)): # Set diagonal to weights\n tmpgraph[j, j] = weights[j]\n for i, x in enumerate(inds[0]):\n y = inds[1][i]\n # Add factor of 100\n tmpgraph[x, y] = weights[x]*weights[y]*tmpgraph[x, y] / 100.0\n with np.errstate(over='raise'):\n try:\n det = np.linalg.det(tmpgraph)\n except:\n det = np.linalg.det(tmpgraph/100.0)\n if 'e+' in str(det):\n safedet = str(det).split(\n 'e+')[0][0:10]+'e+'+str(det).split('e+')[1]\n else:\n safedet = str(det)[0:10]\n return safedet", "title": "" }, { "docid": "28934d7eec711fede248b71151342aac", "score": "0.4945146", "text": "def averageClusteringUndirected(self):\r\n graph = self.getGraph()\r\n dirGraph = {}\r\n \r\n for i in graph.keys():\r\n dirGraph[i] = set()\r\n for i in graph.keys():\r\n for j in graph[i]:\r\n dirGraph[i].add(j)\r\n dirGraph[j].add(i)\r\n \r\n total = 0 \r\n for i in dirGraph:\r\n ''' calculate the number of the pairs of neighbors of node i \r\n which are adiacents\r\n ''' \r\n neighs = len(dirGraph[i])\r\n pairsNeigh = ( neighs * (neighs-1) )/2 # all pairs\r\n ''' check how many nehgbors of node i have a connection '''\r\n triangles = 0\r\n for j in dirGraph[i]:\r\n for k in dirGraph[i]:\r\n if k in dirGraph[j]:\r\n triangles += 1\r\n if pairsNeigh > 0:\r\n total += float(triangles)/(2*pairsNeigh)\r\n return float(total)/len(dirGraph)", "title": "" }, { "docid": "f1b3a728064b60588ca076a157b2dc10", "score": "0.49318907", "text": "def my_use_percentage(segments,before,after=None):\n return len(before.vetoed(segments.active))/len(before)*100", "title": "" }, { "docid": "6185c2ee6b1510c7649112dde2462765", "score": "0.49272957", "text": "def normalized_cut(graph):\n labels = cut_normalized(range(10), graph.to_undirected())\n print(labels)", "title": "" }, { "docid": "568e8c88887ebbefb170fcfc73a59ebc", "score": "0.49272773", "text": "def ticks(self, domain_min, domain_max):\n def coverage(dmin, dmax, lmin, lmax):\n range = dmax - dmin\n return 1 - 0.5 * (numpy.power(dmax - lmax,\n 2) + numpy.power(dmin - lmin,\n 2)) / numpy.power(0.1 * range,\n 2)\n\n def coverage_max(dmin, dmax, span):\n range = dmax - dmin\n if span > range:\n half = (span - range) / 2.0\n return 1 - numpy.power(half, 2) / numpy.power(0.1 * range, 2)\n else:\n return 1\n\n def density(k, m, dmin, dmax, lmin, lmax):\n r = (k - 1.0) / (lmax - lmin)\n rt = (m - 1.0) / (max(lmax, dmax) - min(lmin, dmin))\n return 2 - max(r / rt, rt / r)\n\n def density_max(k, m):\n if k >= m:\n return 2 - (k - 1.0) / (m - 1.0)\n else:\n return 1\n\n def simplicity(q, Q, j, lmin, lmax, lstep):\n eps = 1e-10\n n = len(Q)\n i = Q.index(q) + 1\n v = 1 if ((lmin % lstep < eps or (lstep - lmin % lstep)\n < eps) and lmin <= 0 and lmax >= 0) else 0\n return (n - i) / (n - 1.0) + v - j\n\n def simplicity_max(q, Q, j):\n n = len(Q)\n i = Q.index(q) + 1\n v = 1\n return (n - i) / (n - 1.0) + v - j\n\n def legibility(lmin, lmax, lstep):\n return 1\n\n def legibility_max(lmin, lmax, lstep):\n return 1 # pragma: no cover\n\n def extended(dmin, dmax, m, Q, only_inside, w):\n n = len(Q)\n best_score = -2.0\n\n j = 1.0\n while j < float('infinity'):\n for q in Q:\n sm = simplicity_max(q, Q, j)\n\n if w[0] * sm + w[1] + w[2] + w[3] < best_score:\n j = float('infinity')\n break\n\n k = 2.0\n while k < float('infinity'):\n dm = density_max(k, m)\n\n if w[0] * sm + w[1] + w[2] * dm + w[3] < best_score:\n break\n\n delta = (dmax - dmin) / (k + 1.0) / j / q\n z = numpy.ceil(numpy.log10(delta))\n\n while z < float('infinity'):\n step = j * q * numpy.power(10, z)\n cm = coverage_max(dmin, dmax, step * (k - 1.0))\n\n if w[0] * sm + w[1] * cm + \\\n w[2] * dm + w[3] < best_score:\n break\n\n min_start = numpy.floor(\n dmax / step) * j - (k - 1.0) * j\n max_start = numpy.ceil(dmin / step) * j\n\n if min_start > max_start:\n z = z + 1\n break\n\n for start in range(\n int(min_start),\n int(max_start) + 1):\n lmin = start * (step / j)\n lmax = lmin + step * (k - 1.0)\n lstep = step\n\n s = simplicity(q, Q, j, lmin, lmax, lstep)\n c = coverage(dmin, dmax, lmin, lmax)\n d = density(k, m, dmin, dmax, lmin, lmax)\n l = legibility(lmin, lmax, lstep)\n\n score = w[0] * s + w[1] * \\\n c + w[2] * d + w[3] * l\n\n if score > best_score and (not only_inside or (lmin >= dmin and lmax <= dmax)):\n best_score = score\n best = (lmin, lmax, lstep, q, k)\n z = z + 1\n k = k + 1\n j = j + 1\n return best\n\n lmin, lmax, lstep, q, k = extended(\n domain_min, domain_max, self._count - 1, self._steps, self._only_inside, self._weights)\n locations = numpy.arange(k) * lstep + lmin\n digits = max(0, int(numpy.max(-numpy.floor(numpy.log10(lstep)), 0)))\n labels = [self._format.format(location, digits=digits) for location in locations]\n titles = numpy.repeat(None, len(labels))\n return locations, labels, titles", "title": "" }, { "docid": "dac7f50deb782c2f9e2c6c991dc509fc", "score": "0.49205077", "text": "def _calculate_adj_area(self, coords):\n\n pol = Polygon(coords)\n cmp_index = GenUtil.calculate_compactness_index(pol.area, pol.length)\n adj_area = GenUtil.calculate_adjusted_area(pol.area, cmp_index)\n\n return adj_area", "title": "" }, { "docid": "7c1a6a0caa28490951b02cdabecd69a7", "score": "0.49166757", "text": "def visualize_balance_of_dataset(y, name):\n u, counts = np.unique(y, return_counts=True)\n sum_counts = np.sum(counts)\n distro_list = []\n for i in counts:\n distro =(i / sum_counts) * 100\n distro_list.append(distro)\n # print('distribution = ', distro_list) \n plt.figure(figsize=(10, 5))\n if name == \"Train\":\n col = \"blue\"\n else:\n col = \"red\"\n plt.bar(u, counts, color=col)\n plt.title(name + \" Dataset Distribution\")\n plt.xticks(np.arange(min(u), max(u)+1, 1.0))\n plt.xlabel(\"Label Categories - Numerical Characters\")\n plt.ylabel(\"Number of Label Category Occurrences\")\n plt.savefig(name + \"_barChart.png\")", "title": "" }, { "docid": "5debbc7f0327ebf6cb02b78f1f74eb3f", "score": "0.49151975", "text": "def __call__(self, labels, attr):\n regions_set = set(labels)\n region_totals =[sum(attr[i] for i in np.where(labels == r)[0])\n for r in regions_set]\n obj_val = float(max(region_totals)-min(region_totals))\n return obj_val", "title": "" }, { "docid": "eb150baf5d7326470ad041c893bd27db", "score": "0.49095762", "text": "def markovsScore (models, data, labels):\n all = labels.size\n corrects = 0\n for i in range (len (data)):\n if markovsCouncil (models, data[i]) == labels[i]:\n corrects += 1\n return corrects / all * 100", "title": "" }, { "docid": "b609b34284a5206423902169462bf53a", "score": "0.4907743", "text": "def get_area_under_graph(graph):\n # bla bla bla.\n pass", "title": "" }, { "docid": "9a219022c3a2886d13c6474b9fe6b2a8", "score": "0.49073094", "text": "def REGULAR(dseglist):\r\n ordered = order_segments(dseglist)\r\n ordered = ordered[0]\r\n \r\n # for seg in dseglist:\r\n # output( \"s,e=\",get_ds_ends(seg))\r\n # output('len ordered=',len(ordered))\r\n # for seg in ordered:\r\n # output( \"ordered s,e=\",get_ds_ends(seg))\r\n\r\n numsides = len(dseglist)\r\n \r\n # this makes the sides equal to largest of existing sides:\r\n sidelength = max(map(lambda seg: wxPointUtil.distance(*get_ds_ends(seg)), dseglist))\r\n \r\n # this makes the sides equal to average of existing sides:\r\n # sidelength = 0\r\n # for seg in dseglist:\r\n # sidelength += wxPointUtil.distance(*get_ds_ends(seg))\r\n #sidelength = sidelength / numsides\r\n\r\n # positive polarity is this end is common to the next seg.\r\n polarity = []\r\n s,e = get_ds_ends(ordered[0]) # only need e here\r\n for i in range(len(ordered)-1):\r\n sn,en = get_ds_ends(ordered[(i+1)%len(ordered)])\r\n polarity.append((close_enough(e[0],sn[0]) and close_enough(e[1],sn[1])) or \r\n (close_enough(e[0],en[0]) and close_enough(e[1],en[1])))\r\n e = en\r\n polarity.append(polarity[0])\r\n angle = 2*math.pi / numsides\r\n \r\n # get angle of first segment, use this as the starting angle\r\n # if positive polarity, end is the anchor\r\n \r\n\r\n \r\n angleincrement = 2*math.pi/numsides\r\n for i in range(len(ordered)-1):\r\n if polarity[i]:\r\n anchor,free = get_ds_ends(ordered[i])\r\n else:\r\n free,anchor = get_ds_ends(ordered[i])\r\n \r\n if i == 0:\r\n firstanchor = anchor\r\n vector = free - anchor\r\n startangle = -math.atan2(vector[1],vector[0]) # - is kicad angle polarity CCW\r\n #wxPointUtil.scale(vector*sidelength/mag(vector))\r\n\r\n angle = startangle+2*math.pi*i/numsides\r\n # output( 'anchor,free,angle=',anchor,free,angle*180/math.pi)\r\n vector = wxPointUtil.towxPoint(sidelength,angle)\r\n\r\n endpoint = anchor+vector\r\n # TODO: update to work with S_ARC\r\n if polarity[i]:\r\n ordered[i].SetEnd(endpoint) \r\n # if i==0:\r\n # output( 'setend')\r\n else:\r\n ordered[i].SetStart(endpoint)\r\n # if i==0:\r\n # output( 'setstart')\r\n if polarity[i+1]:\r\n ordered[i+1].SetStart(endpoint) \r\n # output( 'setstart')\r\n else:\r\n ordered[i+1].SetEnd(endpoint)\r\n # output( 'setend')\r\n\r\n # ordered[i].SetEnd(endpoint) if polarity[i] else ordered[i].SetStart(endpoint)\r\n # ordered[i+1].SetStart(endpoint) if polarity[i+1] else ordered[i+1].SetEnd(endpoint)\r", "title": "" }, { "docid": "cb178b272f22c83cc765ef6711a46b96", "score": "0.49059832", "text": "def aa(y_true, y_pred):\n acc_cl = []\n for label in np.unique(y_pred):\n acc_cl.append(np.sum(y_true[y_pred == label] == y_pred[y_pred == label]) / len(y_pred[y_pred == label]))\n return np.nanmean(acc_cl), acc_cl", "title": "" }, { "docid": "18e2674a3cf59a7e00f285c36044e1cc", "score": "0.4888339", "text": "def __call__(self, labels, attr):\n g = gpd.GeoDataFrame(attr, columns = ['geometry'])\n g['templab'] = labels\n lengths = g.dissolve(by ='templab').length\n \n return sum(lengths)", "title": "" }, { "docid": "d50324420c94de8e0776a431a6e79143", "score": "0.48878697", "text": "def calc_conservation_per_domain(alignmentfiles, repr_accs, normalize = True,gapcutoff=0.8):\n\n from Bio import AlignIO\n regexFull = re.compile(\"^(\\S+_\\S+)\\|(\\S+)\\/(\\d+-\\d+): (.+)\\|(\\w+)(\\/.+)?\")\n\n entropyDictFull = defaultdict(lambda: np.NaN)\n entropyH10DictFull = defaultdict(lambda: np.NaN)\n if type(alignmentfiles) == type(''):\n alignmentfiles = [alignmentfiles]\n\n for alignmentfile in alignmentfiles:\n alignment = AlignIO.read(alignmentfile, \"fasta\")\n alignment_filtered = filter_alignment(alignment, repr_accs)\n\n # Calculate Shannon entropy\n msa = [str(x.seq).replace('.', '-').upper() for x in alignment_filtered]\n msaH10 = convert_H10_as_array(alignment_filtered)\n aliSize = len(msa[0])\n entropy = []\n entropyH10 = []\n\n\n\n for record in alignment:\n header = record.description\n match = regexFull.match(header)\n if match:\n prositeName = match.group(4)\n prositeID = match.group(5)\n else:\n print(header)\n\n shannon = []\n shannonH10 = []\n for i in range(aliSize):\n shannon.append(calculate_shannon_entropy([x[i] for x in msa], gapcutoff))\n shannonH10.append(calculate_shannon_entropy([x[i] for x in msaH10],gapcutoff))\n\n #We will use a dataframe to store our data because it's faster to merge with our original dataset later on\n if normalize:\n shannon = norm_0_1(shannon, maxvalue=4.34)\n shannonH10 = norm_0_1(shannonH10, maxvalue=3.15)\n\n df = pd.DataFrame(list(zip(shannon, shannonH10)),\n columns=[\"shannon\", 'shannonH10'])\n df[\"prositeName\"] = prositeName\n df[\"prositeID\"] = prositeID\n df = df.reset_index()\n #transform the index (0-based) into \"alignment_position\" which is also 0-based\n df = df.rename(columns={'index':'alignment_position'})\n\n return df", "title": "" }, { "docid": "8459a8ccfd6f80bfd900fdcc5702d995", "score": "0.48792765", "text": "def plot_domain_frequencies(self):\n return functools.reduce(\n operator.add,\n (\n self.plot_cpu_frequencies(domain[0]).relabel(\n f'Frequencies of domain CPUS {\", \".join(map(str, domain))}'\n )\n for domain in self.trace.plat_info['freq-domains']\n )\n ).cols(1)", "title": "" }, { "docid": "ccee7076dbba737460b251506a85b027", "score": "0.48785737", "text": "def plot_prob_contours(Gaus_dist_a, Gaus_dist_b, with_unbalance=False):\n\n assert(isinstance(Gaus_dist_a, model.GausDS) and isinstance(Gaus_dist_b, model.GausDS))\n\n X_a, X_b = Gaus_dist_a.data, Gaus_dist_b.data\n\n n_a = len(X_a)\n n_b = len(X_b)\n\n l_s_scalar_min = -9\n l_s_scalar_max = 9\n ls_x1 = np.linspace(l_s_scalar_min, l_s_scalar_max, 100)\n ls_x2 = np.linspace(l_s_scalar_min, l_s_scalar_max, 100)\n mg_x1, mg_x2 = np.meshgrid(ls_x1, ls_x2)\n\n pdf_a = Gaus_dist_a.Gaussian_pdf(mg_x1, mg_x2, 100)\n pdf_b = Gaus_dist_b.Gaussian_pdf(mg_x1, mg_x2, 100)\n\n pdf_a = pdf_a * n_a/(n_a+n_b)\n pdf_b = pdf_b * n_b/(n_a+n_b)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.axis('equal') # !!! axis equals so that make circle a circle\n\n # ax.set_title(\"Sa != Sb\")\n ax.set_title(\"2 Class Classification\")\n\n ax.scatter(X_a[:, 0], X_a[:, 1], marker='.', c='r', label='class a')\n ax.scatter(X_b[:, 0], X_b[:, 1], marker='+', c='b', label='class b')\n\n ax.contour(mg_x1, mg_x2, pdf_a, 10)\n ax.contour(mg_x1, mg_x2, pdf_b, 10)\n\n # get the decision border\n log_odds = np.log(pdf_a) - np.log(pdf_b)\n list_border = []\n for i in range(99):\n for j in range(99):\n if (log_odds[i][j]*log_odds[i][j+1] < 0) or (log_odds[i][j]*log_odds[i+1][j] < 0) \\\n or log_odds[i][j] == 0:\n list_border.append([i, j])\n\n bd = np.array(list_border)\n X1 = np.linspace(l_s_scalar_min, l_s_scalar_max, 100)\n Y1 = np.linspace(l_s_scalar_min, l_s_scalar_max, 100)\n ax.scatter(X1[bd[:, 0]], Y1[bd[:, 1]], marker='.', s=15, color='brown', label='decision border')\n\n # optimal choice of w\n init_w = np.array([1, -2]).reshape(-1, 1)\n\n # plot the line of w with balanced fisher score\n # equal num of points between class a and class b\n fs_clf = model.FisherScoreClassifier(X_a, X_b, init_w)\n w_star = fs_clf.classify(plot=False)\n w_star = np.array(w_star)\n xielv = w_star[1]/w_star[0]\n x_point = np.linspace(-5, 3, 100)\n y_point = x_point * xielv - 4\n plt.plot(x_point, y_point, c='g', label='optimal w')\n\n # plot the line of w with unbalanced fisher score\n # different num of points between class a and class b\n if with_unbalance:\n w_star = fs_clf.classify(balanced=False, plot=False)\n w_star = np.array(w_star)\n xielv = w_star[1]/w_star[0]\n x_point = np.linspace(-5, 3, 100)\n y_point = x_point * xielv - 4\n plt.plot(x_point, y_point, c='purple', label='unbalanced F(w)')\n\n leg = ax.legend(loc='upper right', fancybox=True, fontsize=8)\n leg.get_frame().set_alpha(0.5)\n\n plt.show()", "title": "" }, { "docid": "6485832d8d4709f98440cd4ff8b11697", "score": "0.48777118", "text": "def area_metric_robust(D1, D2):\n\n if np.size(D1) > np.size(D2):\n d1 = D2\n d2 = D1\n else:\n d1 = D1\n d2 = D2 # D1 will always be the larger data set\n\n Pxs, xs = ecdf(d1) # Compute the ecdf of the data sets\n Pys, ys = ecdf(d2)\n\n Pys_eqx = Pxs\n Pys_pure = Pys[0:-1] # this does not work with a single datum\n Pall = np.sort(np.append(Pys_eqx, Pys_pure))\n\n ys_eq_all = np.zeros(len(Pall))\n ys_eq_all[0] = ys[0]\n ys_eq_all[-1] = ys[-1]\n for k in range(1, len(Pall)-1):\n ys_eq_all[k] = interpCDF_2(ys, Pys, Pall[k])\n\n xs_eq_all = np.zeros(len(Pall))\n xs_eq_all[0] = xs[0]\n xs_eq_all[-1] = xs[-1]\n for k in range(1, len(Pall)-1):\n xs_eq_all[k] = interpCDF_2(xs, Pxs, Pall[k])\n\n diff_all_s = abs(ys_eq_all-xs_eq_all)\n diff_all_s = diff_all_s[range(1, len(diff_all_s))]\n diff_all_p = np.diff(Pall)\n area = np.matrix(diff_all_p) * np.matrix(diff_all_s).T\n\n return np.array(area)[0]", "title": "" }, { "docid": "a9ed6f39d3ad032d6e5969166dbd6719", "score": "0.487672", "text": "def cal_dcd_scores(self, n_samples=1000, lognorm=True, n_components=2):\n # smaller is better\n train, test = self.create_divergence_matrix(n_samples=n_samples,\n lognorm=lognorm,\n n_components=n_components,\n normalize_per_code=True,\n decode=False)\n # diag = np.diagflat(np.diag(density_mat))\n # higher is better\n train = 1. - train\n test = 1 - test\n d = (metrics.disentanglement_score(train) +\n metrics.disentanglement_score(test)) / 2.\n c = (metrics.completeness_score(train) +\n metrics.completeness_score(test)) / 2.\n return d, c", "title": "" }, { "docid": "f634eda25444928e474adbf1490df080", "score": "0.48718885", "text": "def plot_boundaries():\n\n attributes = [\n \"acousticness\", \"danceability\", \"energy\", \"instrumentalness\",\n \"liveness\", \"loudness\", \"speechiness\", \"valence\"\n ]\n\n count = 0\n\n track_a = {\"id\": \"6j7ShkX2wTd7pXSmRQQtrK\"}\n track_b = {\"id\": \"2AVkArcfALVk2X8sfPRzya\"}\n\n example_track_a = TrackData.get_track(track_a).get_metadata()\n example_track_b = TrackData.get_track(track_b).get_metadata()\n\n for user, session in Session.get_users_with_surveys():\n\n if not (count == 10 or count == 11):\n count += 1\n continue\n count += 1\n\n binary_boundary = BinaryBoundary(user)\n features_boundary = BinaryBoundaryWithFeatures(user)\n kde_boundary = KDEBoundary(user)\n histogram_boundary = HistogramBoundary(user)\n print(f\"User: {count}\")\n\n fig, axs = plt.subplots(nrows=4, ncols=2, figsize=(13, 13))\n x_index = 0\n y_index = 0\n\n handles = None\n labels = None\n\n for feature in attributes:\n\n histogram_data = histogram_boundary.boundaries[feature]\n kde_data = kde_boundary.boundaries[feature]\n\n hist_values = np.array(list(histogram_data.values()))\n max_y_value = max(hist_values)\n\n bins = np.arange(0.1, 1.2, 0.1)\n\n min_boundary = binary_boundary.boundaries[feature][\"min\"]\n max_boundary = binary_boundary.boundaries[feature][\"max\"]\n\n if max_boundary < min_boundary + 0.02:\n max_boundary = max_boundary + 0.01\n\n widths = np.array([-0.1 for _ in bins])\n\n kde_bins = np.linspace(0.0, 1.0, 1000)[: np.newaxis]\n log_score = kde_data.score_samples(kde_bins.reshape(-1, 1))\n\n ax1 = axs[y_index][x_index]\n ax1.bar(bins, height=hist_values, width=widths, alpha=0.5, label=\"Histogram\", align=\"edge\", color=\"blue\")\n\n ax1.vlines(\n [min_boundary, max_boundary], ymin=0, ymax=max_y_value,\n linewidth=2, color=\"#d95f02\", label=\"Boundary\"\n )\n\n ax1.set_xlim(right=1.0)\n\n ax2 = ax1.twinx()\n ax2.plot(kde_bins, np.exp(log_score), color=\"#1b9e77\", label=\"KDE\", linewidth=2)\n ax2.set_ylim(bottom=0)\n ax2.set_xlim(right=1.0)\n\n ax1.vlines(\n example_track_a[feature], ymin=0, ymax=max_y_value,\n linewidth=4, color=\"black\", label=\"Example Track 1\", linestyle=\"dotted\"\n )\n ax1.vlines(\n example_track_b[feature], ymin=0, ymax=max_y_value,\n linewidth=4, color=\"black\", label=\"Example Track 2\", linestyle=\"dashed\"\n )\n\n h1, l1 = ax1.get_legend_handles_labels()\n h2, l2 = ax2.get_legend_handles_labels()\n\n handles = h1 + h2\n labels = l1 + l2\n\n ax1.set_xticks(np.arange(0, 1.1, 0.1))\n ax1.tick_params('x', rotation=45)\n ax1.set_title(f\"{feature.capitalize()}\", fontsize=14)\n\n x_index += 1\n if x_index >= 2:\n y_index += 1\n x_index = 0\n\n # 0: Boundary\n # 1: Example Track 1\n # 2: Example Track 2\n # 3: Histogram\n # 4: KDE\n order = [0, 3, 4, 1, 2]\n\n fig.suptitle(f\"Example boundaries, user {'A' if count == 11 else 'B'}\", fontsize=20)\n fig.tight_layout()\n fig.legend(\n [handles[idx] for idx in order], [labels[idx] for idx in order],\n loc=9, bbox_to_anchor=(0.5, 0.95), prop={'size': 15}, framealpha=1\n )\n\n plt.show()", "title": "" }, { "docid": "841240b5394554198d896ea778f8ba3e", "score": "0.48688507", "text": "def check_convergence(labels, folder='./'):\n from emcee import autocorr \n x = np.load(folder+'samples.npy')\n inter_var = []\n fig, axarr = plt.subplots(2, figsize=(10,10))\n ax,bx = axarr.flatten()\n for j in range(len(x[0,0])):\n chain = x[:, :, j].T\n # Compute the estimators for a few different chain lengths\n N = np.exp(np.linspace(np.log(100), np.log(chain.shape[1]), 10)).astype(int)\n gw2010 = np.empty(len(N))\n new = np.empty(len(N))\n test = np.empty(len(N))\n for i, n in enumerate(N):\n test[i] = autocorr.integrated_time(chain[:,:n],c=5)\n ax.loglog(N, test, \"o-\", label=labels[j])\n\n #compute the interchain variance\n inter_var.append(np.std(chain))\n\n # Plot the comparisons\n ax.set_xlabel(\"number of samples, $N$\")\n ax.set_ylabel(r\"$\\tau$ estimates\")\n ax.legend(fontsize=14)\n\n intra_var = []\n for i in range(x.shape[1]):\n param_var = []\n for j in range(x.shape[2]):\n chain = x[:,i,j].T \n #compute intra-chain variance\n param_var.append(np.std(chain))\n intra_var.append(param_var)\n\n #compute the ratios of the intra and interchain variances\n ylim = 10\n for j in range(len(inter_var)):\n all_vals = []\n for var in intra_var[j]:\n ratio = inter_var[j] / var\n all_vals.append(ratio)\n if ratio > ylim:\n bx.scatter(j,ylim-0.5,marker='^', alpha=0.75, c='dodgerblue')\n else:\n bx.scatter(j, ratio,c='dodgerblue')\n bx.scatter(j, np.mean(all_vals), marker='s', s=100, c='k')\n bx.plot([0,j],[1,1], 'r--', label='Ideal Ratio')\n bx.legend(fontsize=14)\n bx.set_xlabel('parameter')\n bx.set_ylabel(r'${\\rm Var}_{\\rm inter} / {\\rm Var}_{\\rm Intra}$ for each chain')\n bx.set_ylim([0,ylim])\n plt.show()\n plt.close()", "title": "" }, { "docid": "e7a9f4a4e4e412128ed9a7cbbab56702", "score": "0.4860509", "text": "def similarity_cal_module_5(dgassos, graph, gncutoff=1):\n gvs = set(graph.vs['name'])\n\n dgassos_new = {}\n for d in dgassos.keys():\n dgleft = gvs.intersection(dgassos[d])\n if len(dgleft) >= gncutoff:\n dgassos_new[d] = dgleft\n diseases = list(dgassos_new.keys())\n print(\"there are {} diseases can be calculated.\".format(len(diseases)))\n\n sims = {}\n for i in range(0, len(diseases)-1):\n sims[diseases[i]] = {}\n for j in range(i+1, len(diseases)):\n gsi = dgassos_new[diseases[i]]\n gsj = dgassos_new[diseases[j]]\n gsintersect = gsi.intersection(gsj)\n gsid = gsi.difference(gsj)\n gsjd = gsj.difference(gsi)\n conncount = 0\n if len(gsintersect) != 0:\n for g in gsid:\n conncount += connected_count(graph, g, gsintersect)\n for g in gsjd:\n conncount += connected_count(graph, g, gsintersect)\n for g in gsid:\n conncount += connected_count(graph, g, gsjd)\n sim = (len(gsintersect)**2+conncount)/(len(gsi)*len(gsj))\n sims[diseases[i]][diseases[j]] = sim\n if diseases[j] not in sims.keys():\n sims[diseases[j]] = {}\n sims[diseases[j]][diseases[i]] = sim\n print(i, \"done..\")\n normsims = {}\n maxsims = {}\n for d in diseases:\n maxsims[d] = max([i for i in sims[d].values()])\n for i in range(0, len(diseases)-1):\n normsims[diseases[i]] = {}\n for j in range(i+1, len(diseases)):\n avgmaxsim = (maxsims[diseases[i]] + maxsims[diseases[j]])/2\n if avgmaxsim == 0:\n normsims[diseases[i]][diseases[j]] = 0\n else:\n normsims[diseases[i]][diseases[j]] = sims[diseases[i]][diseases[j]]/avgmaxsim\n print(i, \"done...\")\n return normsims", "title": "" }, { "docid": "9411031f46df9daa9353a8655f8e4b22", "score": "0.48600683", "text": "def district_area(self):\n if self.district:\n try:\n return (self.district.transform(102009, clone=True).area / 1000000) * 0.38610\n except Exception:\n return", "title": "" }, { "docid": "47925b3ffc37ade2d0a85b17595a7676", "score": "0.48596826", "text": "def _marginalize_over_angerr(self, c_e, c_d, c_e_r):\n\n presel_data = self.dataset[np.intersect1d(np.nonzero(np.isclose(self.dataset[:, 0], self.true_energy_bins[c_e])),\n np.nonzero(np.isclose(self.dataset[:, 2], np.rad2deg(self.declination_bins[c_d]))))]\n\n reduced_data = presel_data[np.nonzero(np.isclose(presel_data[:, 4], self.reco_energy_bins[c_e, c_d][c_e_r]))]\n\n\n bins = np.array(sorted(list(set(reduced_data[:, 6]).union(\n set(reduced_data[:, 7])))))\n if bins.shape[0] != 0:\n frac_counts = np.zeros(bins.shape[0]-1)\n\n #marginalise over uninteresting quantities\n for c_b, b in enumerate(bins[:-1]):\n indices = np.nonzero(np.isclose(b, reduced_data[:, 6]))\n frac_counts[c_b] = np.sum(reduced_data[indices, -1])\n return frac_counts, bins\n\n else:\n return None, None", "title": "" }, { "docid": "7ab043cd3334b0fb8296cbc693cea23e", "score": "0.48583674", "text": "def addtotalareas():\n total = 0\n for eachareacircle in CircleTotalAreas.allcircleslist:\n total = total + eachareacircle.area()\n return total", "title": "" }, { "docid": "925e61b04df0c54a46e1922deaa77eff", "score": "0.48567036", "text": "def io_ratio(observations, group_labels):\n group_labels = np.array(group_labels)\n unique_groups = np.unique(group_labels)\n in_group = 0.0\n out_group = 0.0\n\n for g in unique_groups:\n if np.sum(group_labels == g) == 0:\n continue\n\n in_obs = np.array(observations[group_labels == g, :], ndmin=2)\n out_obs = np.array(observations[group_labels != g, :], ndmin=2)\n\n in_group += np.mean(pdist(in_obs))\n out_group += np.mean(cdist(in_obs, out_obs))\n\n return in_group / out_group", "title": "" }, { "docid": "7980d7f499cf4f565b1bdf2d21261775", "score": "0.48430055", "text": "def get_kl_divergence(domain, targets, scores):\n\n nbin = 50 # Number of histogram bins\n a_b_pos, b_a_pos = smoothed_hist_kl_distance(scores[np.logical_and(domain == 0, targets == 1)],\n scores[np.logical_and(\n domain == 1, targets == 1)],\n nbins=nbin)\n a_b_neg, b_a_neg = smoothed_hist_kl_distance(scores[np.logical_and(domain == 0, targets == 0)],\n scores[np.logical_and(\n domain == 1, targets == 0)],\n nbins=nbin)\n\n return np.median([a_b_pos] + [a_b_neg] + [b_a_pos] + [b_a_neg])", "title": "" }, { "docid": "efeeb3fa5d706540ad6f12a0d5916a16", "score": "0.48415655", "text": "def _get_complete_domain(self):\n domain = dict().fromkeys(self.variables)\n for key in domain:\n domain[key] = range(2) if is_aux(key) else range(10)\n domain[self.op1[-1]] = range(1, 10)\n domain[self.op2[-1]] = range(1, 10)\n domain[self.result[-1]] = range(1, 10)\n if len(self.result) > max(len(self.op1), len(self.op2)):\n domain[self.result[-1]] = [1]\n if len(self.op1) == len(self.op2):\n domain[self.extra_variables[-1]] = [1]\n return domain", "title": "" }, { "docid": "64793ed830f1c26af715630e8bbdf578", "score": "0.48310858", "text": "def create_domain(self):\n print \"creating domain ...\"\n a1 = Orange.feature.Continuous(\"Optimal\")\n a2 = Orange.feature.Continuous(\"Manhattan\")\n a3 = Orange.feature.Continuous(\"NCorrect\")\n a4 = Orange.feature.Continuous(\"NCorrectOrdered\")\n a5 = Orange.feature.Continuous(\"ManhattanFirst\")\n a6 = Orange.feature.Continuous(\"ManhattanFirstZero\")\n a7 = Orange.feature.Continuous(\"ManhattanProd\")\n a8 = Orange.feature.Continuous(\"DistanceZeroMid\")\n a9 = Orange.feature.Continuous(\"DistanceZeroFirst\")\n a10 = Orange.feature.Discrete(\"Finished\", values=[\"yes\",\"no\"])\n a11 = Orange.feature.Continuous(\"MaxMDistance\")\n a12 = Orange.feature.Continuous(\"DistPairs\")\n a13 = Orange.feature.Continuous(\"SequenceScore\")\n a14 = Orange.feature.Discrete(\"123on401\", values=[\"yes\",\"no\"])\n a15 = Orange.feature.Discrete(\"741on401\", values=[\"yes\",\"no\"])\n #positions = [Orange.feature.Continuous(\"Dist(%d on place%d)\"%(i,v)) for i in range(9) for v in range(9)]\n positions = [Orange.feature.Discrete(\"Dist(%d on place%d)\"%(i,v), values = [\"yes\", \"no\"]) \n for i in range(9) for v in range(9)]\n \n #self.attributes = [a2, a3, a4, a5, a11] + positions\n self.attributes = positions + [a10]\n \n \n\n self.funcs = {a1.name: optimal, a2.name: manhattan, a3.name: ncorrect,\n a4.name: ncorrect_ordered, a5.name: manhattan_first,\n a6.name: manhattan_first_zero, a7.name: manhattan_prod,\n a8.name: distance_zero_mid, a9.name: distance_zero_first,\n a10.name: finished, a11.name: max_manh, a12.name: distance_pairs,\n a13.name: sequence_score, a14.name: pos123, a15.name: pos741}\n for pi in range(9):\n for vi in range(9):\n #self.funcs[\"Dist(%d on place%d)\"%(pi,vi)] = Position(pi,vi)\n self.funcs[\"Dist(%d on place%d)\"%(pi,vi)] = PositionDiscrete(pi,vi)\n \n# for ai, a in enumerate(self.attributes):\n# print \"Index: %d, name = %s\"%(ai, a.name)\n \n self.att_ids = {a.name:i for i,a in enumerate(self.attributes)}\n self.domain = Orange.data.Domain(self.attributes, a10)\n id = Orange.feature.String(\"id\")\n mid = Orange.feature.Descriptor.new_meta_id()\n self.domain.add_meta(mid, id)\n\n eval = Orange.feature.Continuous(\"eval\") \n mid = Orange.feature.Descriptor.new_meta_id()\n self.domain.add_meta(mid, eval)\n \n trace = Orange.feature.String(\"trace\") \n mid = Orange.feature.Descriptor.new_meta_id()\n self.domain.add_meta(mid, trace)", "title": "" }, { "docid": "79a12382d34cd70e7500a21be4d6e895", "score": "0.48259488", "text": "def norm_change_annual_flow(sites: list, before_bc: pd.DataFrame, after_bc: pd.DataFrame, colors = list,\n fontsize_title=60, fontsize_labels=40, fontsize_tick= 30):\n\n mpl.rcParams['figure.figsize']=(30,20)\n\n WY_grouper = calc_water_year(before_bc)\n after_bc_annual = after_bc.groupby(WY_grouper).sum()\n before_bc_annual = before_bc.groupby(WY_grouper).sum()\n\n after_bc_annual, before_bc_annual = dst.normalize_pair(data=after_bc_annual, norming_data=before_bc_annual)\n\n diff_annual = after_bc_annual - before_bc_annual\n\n max_mag = np.abs(np.max(diff_annual.max()))\n min_mag = np.abs(np.min(diff_annual.min()))\n extreme_mag = np.max([max_mag, min_mag])\n\n n_rows, n_cols = determine_row_col(len(sites))\n\n fig, axs = plt.subplots(n_rows, n_cols)\n\n plt.suptitle(\"Change in Annual Flow Volume from Bias Correction\",\n fontsize=fontsize_title, y=1.05)\n\n axs_list = axs.ravel().tolist()\n\n i=0\n for site in sites:\n ax = axs_list[i]\n ax.bar(diff_annual.index, diff_annual[site].values, color=color_list[i])\n ax.set_title(site, fontsize=fontsize_tick)\n ax.set_ylim(top = extreme_mag, bottom=-extreme_mag)\n ax.tick_params(axis='both', labelsize=fontsize_tick)\n i += 1\n\n while i < len(axs_list):\n axs_list[i].axis('off')\n i += 1\n\n fig.text(0.5, -0.04, 'Hydrologic Year',\n ha='center', va = 'bottom', fontsize=fontsize_labels);\n fig.text(-0.04, 0.5, \"Normalized Change in Annual Flow Volume\",\n va='center', rotation = 'vertical', fontsize=fontsize_labels);\n\n plt.tight_layout()", "title": "" }, { "docid": "740bd89375e563a95897769b31b787bb", "score": "0.4825399", "text": "def calcul_acc(labels, preds):\n return sum(1 for x, y in zip(labels, preds) if x == y) / len(labels)", "title": "" }, { "docid": "357d60301aea4661c916d5ba6c2dd11f", "score": "0.48163047", "text": "def calc_segmentation_consistency(output, reference, divergence_types=['kl', 'contour'],\n divergence_weights=[1.0, 0.5], class_weights=None, scales=[0],\n mask=None, is_gt=False):\n dist = 0.\n num_classes = reference.size(1)\n if mask is None:\n # apply masks so that only gradients on certain regions will be backpropagated.\n mask = torch.ones_like(output).float().to(reference.device)\n mask .requires_grad = False\n for scale in scales:\n if scale > 0:\n output_reference = torch.nn.AvgPool2d(2 ** scale)(reference)\n output_new = torch.nn.AvgPool2d(2 ** scale)(output)\n else:\n output_reference = reference\n output_new = output\n for divergence_type, d_weight in zip(divergence_types, divergence_weights):\n loss = 0.\n if divergence_type == 'kl':\n '''\n standard kl loss\n '''\n loss = kl_divergence(\n pred=output_new, reference=output_reference, mask=mask, is_gt=is_gt)\n elif divergence_type == 'ce':\n loss = cross_entropy_2D(\n input=output_new, target=output_reference, mask=mask, is_gt=is_gt)\n elif divergence_type == 'weighted ce':\n assert class_weights is not None, 'must assign class weights'\n loss = cross_entropy_2D(\n input=output_new, target=output_reference, mask=mask, is_gt=is_gt, weight=class_weights)\n elif divergence_type == 'Dice':\n use_gpu = False if output_reference.device == torch.device(\n 'cpu') else True\n loss = SoftDiceLoss(n_classes=num_classes, use_gpu=use_gpu)(\n input=output_new, target=output_reference, mask=mask, is_gt=is_gt)\n elif divergence_type == 'mse':\n n, h, w = output_new.size(\n 0), output_new.size(2), output_new.size(3)\n if not is_gt:\n target_pred = torch.softmax(output_reference, dim=1)\n else:\n target_pred = output_reference\n input_pred = torch.softmax(output_new, dim=1)\n loss = torch.nn.MSELoss(reduction='sum')(\n target=target_pred * mask, input=input_pred * mask)\n loss = loss / (n * h * w)\n elif divergence_type == 'contour': # contour-based loss\n if not is_gt:\n target_pred = torch.softmax(output_reference, dim=1)\n else:\n target_pred = output_reference\n input_pred = torch.softmax(output_new, dim=1)\n cnt = 0\n for i in range(1, num_classes):\n cnt += 1\n loss += contour_loss(input=input_pred[:, [i], ], target=(target_pred[:, [i]]), ignore_background=False, mask=mask,\n one_hot_target=False)\n if cnt > 0:\n loss /= cnt\n\n else:\n raise NotImplementedError\n\n print('{}:{}'.format(divergence_type, loss.item()))\n\n dist += 2 ** scale * (d_weight * loss)\n return dist / (1.0 * len(scales))", "title": "" }, { "docid": "2504d3257a30e684a088cf488fe17f28", "score": "0.48009068", "text": "def discrimAnalysis(x, y):\r\n ### TODO: Write your code here\r\n male_x, female_x = splitData(x, y)\r\n \r\n ## mean\r\n mu_male = meanEstimator(male_x)\r\n mu_female = meanEstimator(female_x)\r\n mu = meanEstimator(x)\r\n \r\n ## mean values print\r\n #print(\"male mu: \", mu_male)\r\n #print(\"female mu: \", mu_female)\r\n \r\n ## covariance values\r\n cov = covarianceMatrix(x, mu)\r\n cov_male = covarianceMatrix(male_x, mu_male)\r\n cov_female = covarianceMatrix(female_x, mu_female)\r\n\r\n #print(\"Overall cov. matrix:\\n\", cov) \r\n #print(\"Male cov. matrix:\\n\", cov_male)\r\n #print(\"Female cov. matrix:\\n\", cov_female)\r\n \r\n # LDA calc.\r\n height, weight, result = LDA([0.5, 0.5], cov, mu_male, mu_female)\r\n \r\n #QDA calc.\r\n height_Q, weight_Q, result_Q = QDA([0.5, 0.5], \r\n cov_male, \r\n cov_female, \r\n mu_male, \r\n mu_female)\r\n \r\n ## gaussian calculations\r\n x_set = np.dstack((height, weight))\r\n x_set = np.reshape(x_set, (len(height)*len(weight), 2))\r\n density_male = util.density_Gaussian(mu_male, cov_male, x_set).reshape(height.shape[0], height.shape[1])\r\n density_female = util.density_Gaussian(mu_female, cov_female, x_set).reshape(height.shape[0], height.shape[1])\r\n \r\n ## figure 1 (just scatter)\r\n dataScatterPlot(male_x, female_x)\r\n plotDecisionBoundary(height, weight, result)\r\n gaussianContour(height, weight, density_male, density_female)\r\n plt.savefig(\"lda.pdf\")\r\n \r\n #figure 2 (QDA)\r\n dataScatterPlot(male_x, female_x)\r\n plotDecisionBoundary(height, weight, result_Q)\r\n gaussianContour(height, weight, density_male, density_female)\r\n plt.savefig(\"qda.pdf\")\r\n \r\n return (mu_male,mu_female,cov,cov_male,cov_female)", "title": "" }, { "docid": "16bf88605a8c374121f344a4eb6dcfdc", "score": "0.4800308", "text": "def accuracy(label_trues, label_preds, n_class=7):\n hist = np.zeros((n_class, n_class))\n for lt, lp in zip(label_trues, label_preds):\n hist += _fast_hist(lt.flatten(), lp.flatten(), n_class) # n_class, n_class\n\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n\n return acc_cls", "title": "" }, { "docid": "3dc23adb60cb374f713afabcd5bac400", "score": "0.47988722", "text": "def labeledge(dag,p):\n \"\"\" Chickering, D. M., Learning equivalence classes of Bayesian-network structures, The Journal of Machine Learning Research,2002. \"\"\"\n\n ansdag=[[] for i in range(p)]\n edgenum=[[] for i in range(p)]\n edgeord=edgeorder(dag,p)\n NEdge=len(dag)\n for i in range(NEdge):\n ansdag[edgeord[i][1]].append(edgeord[i][0])\n edgenum[edgeord[i][1]].append(i)\n unlabled=len(dag)\n edgelabel=[0 for i in range(unlabled)]\n while unlabled>0:\n v=edgelabel.index(0) #step 4 , v is index of x to y\n x=edgeord[v][0]\n y=edgeord[v][1]\n for wi in [i for i in range(len(ansdag[x])) if edgelabel[edgenum[x][i]]==1]:\n if ansdag[x][wi] not in ansdag[y]:\n for j in edgenum[y]:\n if edgelabel[j]==0:\n unlabled-=1\n edgelabel[j]=1\n continue\n else:\n wyi=ansdag[y].index(ansdag[x][wi])\n if edgelabel[edgenum[y][wyi]]==0:\n unlabled-=1\n edgelabel[edgenum[y][wyi]]=1\n #begin step 8 \n if len([z for z in ansdag[y] if z!=x and z not in ansdag[x]])>0:\n for yi in edgenum[y]:\n if edgelabel[yi]==0:\n unlabled-=1\n edgelabel[yi]=1\n else:\n for yi in edgenum[y]:\n if edgelabel[yi]==0:\n unlabled-=1\n edgelabel[yi]=-1\n for i in range(len(dag)):\n edgeord[i].append(edgelabel[i]) \n return(edgeord)", "title": "" }, { "docid": "d978b3cba7bd6f28ca8f5a2fe2ea044e", "score": "0.47960457", "text": "def segregation(self):\n _, _, _, frac_same = self.count_neighbors()\n return np.nanmean(frac_same)", "title": "" }, { "docid": "b6b22a60df3908037d22ada56869a61c", "score": "0.4795801", "text": "def area_of_interest(domain='WY'):\n if domain == 'CONUS':\n # Restrict cells to the coterminous US\n bbox = {\n 'latmax' : 50,\n 'latmin' : 25,\n 'lonmax': -65,\n 'lonmin': -125,\n }\n elif domain == 'NAmer':\n # Restrict cells to North America (US and Canada, including Alaska)\n # For now it's not including much of Alaska\n bbox = {\n 'latmax' : 71,\n 'latmin' : 25,\n 'lonmax': -65,\n 'lonmin': -169,\n }\n else:\n # Load CSO domain JSON\n cso_domains_url = \"https://raw.githubusercontent.com/snowmodel-tools/preprocess_python/master/CSO_domains.json\"\n domains_resp = requests.get(cso_domains_url)\n domains = domains_resp.json()\n # Domain bounding box and projection\n bbox = domains[domain]['Bbox']\n # bbox_delta = 0.1\n\n bbox_proj = 'epsg:4326'\n bbox_latlon_gs = gpd.GeoSeries(\n box(bbox['lonmin'], bbox['latmin'], bbox['lonmax'], bbox['latmax']),\n crs=bbox_proj\n )\n bbox_ease2_gs = bbox_latlon_gs.to_crs(EASE2G_epsg_str)\n\n return bbox_latlon_gs, bbox_ease2_gs", "title": "" }, { "docid": "b19a910b81f257df07d549092362498d", "score": "0.47759676", "text": "def calculate_full_areas(genes_df, temp_df, area_col, normalize):\n qstring = 'AUC_UseFLAG == 1'\n full = temp_df.query(qstring).groupby('GeneID')[area_col].sum()/normalize\n full.name = 'AreaSum_max'\n\n # full_adj = (temp_df.query(qstring)\n # .assign(gpAdj = lambda x: x[area_col] / x['GeneIDCount_All'])\n # .groupby('GeneID')['gpAdj'] # temp column\n # .sum()/normalize\n # )\n # full_adj.name = 'AreaSum_gpcAdj'\n\n # qstring_s = qstring + ' & IDG < 4'\n # strict = temp_df.query(qstring_s).groupby('GeneID')[area_col].sum()\n # strict.name = ''\n\n qstring_u = qstring + ' & GeneIDCount_All == 1'\n uniq = temp_df.query(qstring_u).groupby('GeneID')[area_col].sum()/normalize\n uniq.name = 'AreaSum_u2g_all'\n\n qstring_u0 = qstring_u + ' & MissedCleavages == 0'\n uniq_0 = temp_df.query(qstring_u0).groupby('GeneID')[area_col].sum()/normalize\n uniq_0.name = 'AreaSum_u2g_0'\n result = pd.concat( (full, uniq, uniq_0), copy=False, axis=1) .fillna(0)\n genes_df = genes_df.merge(result, how='left',\n left_on='GeneID', right_index=True)\n return genes_df", "title": "" }, { "docid": "5b28feb969388d7b4dcf1d1e4decb1b1", "score": "0.4775404", "text": "def labels():", "title": "" }, { "docid": "89d4fb23ddc860e94cff93a8d809f3de", "score": "0.47730404", "text": "def sensitivity(edges, f_dists):\n\n nodes = np.arange(max(edges[0]) + 1)\n # It is unlikely, that a node has no edges.\n # For data with many geographic outliers use\n # nodes = list(set(edges[0]))\n # to get unique nodes (and adjust code below)\n denominator = np.zeros(nodes.shape)\n for i, d in enumerate(f_dists):\n denominator[edges[0][i]] = denominator[edges[0][i]] + d\n\n return 1 - f_dists / denominator[edges[0]]", "title": "" }, { "docid": "3472ebdce72306781febcf52735c4e6b", "score": "0.47722837", "text": "def plot_SpecSubsetConc_comparisons():\n\n # --- Datasets to use?\n RunDict = {}\n RunDict['Iso.Unlimited'] = '/mnt/lustre/users/ts551/GC/rundirs/P_ARNA/geosfp_4x5_aciduptake.v12.9.0.ARNA.Isotherm.Diags.v9.Iso.UnlimitedAll/OutputDir/'\n\n RunDict['Base'] = '/mnt/lustre/users/ts551/GC/rundirs/P_ARNA/geosfp_4x5_aciduptake.v12.9.0.ARNA.Isotherm.Diags.v9.Base/OutputDir/'\n\n # Get IGAC runs\n res = '4x5'\n GC_version ='v13.4'\n RunSet = 'IGAC.ARNAv12.72L'\n folder4netCDF = True\n RunDict = ar.get_dict_of_GEOSChem_model_output(RunSet=RunSet,\n GC_version=GC_version,\n res=res,\n folder4netCDF=folder4netCDF)\n\n # --- ARNA\n campaign = 'ARNA'\n dsD = {}\n for key in RunDict.keys():\n folder = RunDict[ key ]\n print(key, folder)\n # Get the data of species to plot\n # HNO2, NOx, NO, NO2, CO, ozone,\n data = get_model_noon_vertical_conc(folder=folder, campaign=campaign)\n\n dsD[key] = data\n\n\n # Setup a PDF to store values\n# savetitle = 'ARNA_SpecConcSub_Noon_mean'\n savetitle = 'ARNA_SpecConcSub_daytime_mean'\n if campaign == 'FIREX-AQ' :\n savetitle = 'ARNA_FIREXAQ_SpecConcSub_daytime_mean'\n# savetitle = 'ARNA_FIREXAQ_SpecConcSub_noon_mean'\n\n pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)\n\n\n # Units to use?\n units_d = {\n 'CO': 'ppbv', 'O3': 'ppbv', 'NO2': 'pptv', 'NO': 'pptv', 'NOx': 'pptv',\n 'HNO2': 'pptv', 'HONO': 'pptv',\n 'NIT': 'pptv', 'NITs': 'pptv', 'SO4s': 'pptv', 'SO4': 'pptv',\n 'NH4': 'pptv',\n 'SO4-all': 'pptv', 'NIT-all': 'pptv', 'SAL-all': 'pptv',\n }\n\n # Plot up\n vars2plot = 'O3', 'HNO2', 'NOx', 'NO', 'NO2', 'CO', 'SAL-all', 'NIT-all'\n for var2plot in vars2plot:\n\n # Loop model data\n for key in dsD.keys():\n # Get units and scaling\n units = units_d[var2plot]\n scaleby = AC.get_unit_scaling(units)\n\n # Select data to plot\n X = dsD[key][var2plot] * scaleby\n\n# Y = dsD[key].lev\n PressVar = 'Met_PMID'\n Y = dsD[key][PressVar].values\n Y = AC.hPa_to_Km(Y)\n\n # plot up\n plt.plot(X, Y, label=key)\n\n\n # invert y axis\n ax = plt.gca()\n# ax.invert_yaxis()\n\n # Set x scale as log\n ax.set_xscale('log')\n\n # Add a second axis in kilometres?\n# AC.hPa2Km\n\n\n # plot\n plt.legend()\n\n # Beautify plot\n plt.title( var2plot)\n\n # Save to PDF\n AC.plot2pdfmulti(pdff, savetitle, dpi=dpi, tight=True)\n plt.close()\n\n # Save entire pdf\n AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)\n plt.close('all')", "title": "" }, { "docid": "a8a9d968e9a3cf1a41399dd125e2704a", "score": "0.47696343", "text": "def denormalize_ged(g1, g2, nged):\n return round(nged * (g1.num_nodes + g2.num_nodes) / 2)", "title": "" }, { "docid": "972d25dc5cf8e97b18e15f3048f95c09", "score": "0.4769071", "text": "def cal_CN(self):\n\n df_edge_list_reverse = pd.DataFrame()\n df_edge_list_reverse['source'] = self.df_edge_list['target']\n df_edge_list_reverse['target'] = self.df_edge_list['source']\n\n df_all_nodes_pair = pd.concat([df_edge_list_reverse, self.df_edge_list])\n\n \"\"\"\n get common neighbours\n \"\"\"\n\n df_common_neighbor = pd.merge(df_all_nodes_pair, df_all_nodes_pair, on=['target'], how='left').dropna()\n df_common_neighbor = df_common_neighbor[df_common_neighbor['source_x'] != df_common_neighbor['source_y']]\n df_common_neighbor_count = df_common_neighbor.groupby(['source_x', 'source_y']).count()\n df_common_neighbor_count = df_common_neighbor_count.reset_index()\n\n df_common_neighbor_count.rename(columns={'target': 'similarity'}, inplace=True)\n df_common_neighbor_count.rename(columns={'source_x': 'source', 'source_y': 'target'}, inplace=True)\n return df_common_neighbor_count", "title": "" }, { "docid": "f20e82bed113b663a573b26a9b4847c9", "score": "0.47685957", "text": "def calculate_fraction(self, labels):\n return sum(labels) / len(labels)", "title": "" }, { "docid": "dcc409643cf18370c9b43b7b19c17b3e", "score": "0.4765537", "text": "def get_alternative_domains(self):\n domain1 = []\n domain2 = []\n for i, p in enumerate(self.coords):\n # the two adjacent vertices and the middle points with them\n q1 = self.coords[(i + 1) % len(self.coords)]\n q2 = self.coords[i - 1]\n m1 = helpers.normalize((p + q1) / 2)\n m2 = helpers.normalize((p + q2) / 2)\n\n if self.type:\n if (len(self.word) + i) % 2 == 0:\n domain1.append((m1, p, m2, self.center))\n else:\n domain2.append((m1, p, m2, self.center))\n\n else:\n if len(self.word) % 2 == 0:\n domain1.append((m1, p, self.center))\n domain2.append((m2, p, self.center))\n else:\n domain1.append((m2, p, self.center))\n domain2.append((m1, p, self.center))\n\n return domain1, domain2", "title": "" }, { "docid": "a6d4c3acd1e400b1d5e3ced958cf4292", "score": "0.47535765", "text": "def apply_LDA(Nchoose, features, labels, train_set, test_set, test_subsets, Nbp, \n acc_thresh=None, topX=5, plot=True):\n Nsubsets = test_subsets.shape[-1]\n colour_inds = np.array(get_colour_inds(Nbp, Nchoose=Nchoose))\n accuracy = np.zeros((len(colour_inds), Nsubsets))\n for ii in range(len(colour_inds)):\n X_train = features[train_set,:][:, colour_inds[ii]]\n Y_train = labels[train_set]\n X_train = StandardScaler().fit_transform(X_train)\n# print(colour_inds[ii],X_train.shape, Y_train.shape)\n\n lda = LinearDiscriminantAnalysis(store_covariance=True)\n lda.fit(X_train, Y_train).transform(X_train)\n for jj in range(Nsubsets):\n X_test = features[test_subsets[:,jj],:][:, colour_inds[ii]]\n X_test = StandardScaler().fit_transform(X_test)\n Y_test = labels[test_subsets[:,jj]]\n lda_pred = lda.predict(X_test)\n lda_wrong = (abs(lda_pred-Y_test)>0)\n# print(\"test{}: Nwrong={}, {}, {}\".format(jj, lda_wrong.sum(), lda_pred[lda_wrong], Y_test[lda_wrong]))\n accuracy[ii, jj] = 1.-(lda_wrong.sum()/float(X_test.shape[0]))\n# print(\"Nchoose={}, avg false positive:{}\".format(Nchoose, \n# np.nanmean(accuracy, axis=1)))\n if plot:\n plt.figure(figsize=(6,4))\n plt.hist(np.nanmean(accuracy, axis=1))\n plt.axvline(acc_thresh, ls='--', color='C1', label='Threshold')\n plt.legend()\n plt.xlabel('Avg Accuracy for {} filters'.format(Nchoose))\n# good = (np.nanmean(accuracy, axis=1)>=acc_thresh) \n# good = good | (np.nanmean(accuracy, axis=1) == np.nanmax(np.nanmean(accuracy, axis=1)))\n if acc_thresh is not None:\n good = (np.nanmean(accuracy, axis=1)>=acc_thresh) \n else:\n good = np.argsort(np.nanmean(accuracy, axis=1))[::-1][:topX]\n# print(\"Best filters={}; Avg Accuracy={}; Std={}\".format(colour_inds[good], np.nanmean(accuracy, axis=1)[good], \n# np.nanstd(accuracy, axis=1)[good]))\n return colour_inds[good], np.nanmean(accuracy, axis=1)[good], \\\n np.nanstd(accuracy, axis=1)[good]", "title": "" }, { "docid": "ef52c89c73626b4e5db05a3c6b5b62a5", "score": "0.4749354", "text": "def plot_rejection_classification_diagram(in_probs,\n in_labels,\n out_probs,\n num_thresholds=100,\n plot_theoretical_max=True,\n ax=None,\n **plot_kwargs):\n in_probs = np.array(in_probs)\n out_probs = np.array(out_probs)\n in_probs, _ = verify_probability_shapes(in_probs)\n out_probs, _ = verify_probability_shapes(out_probs)\n\n in_labels = np.array(in_labels)\n percents_rejected = np.linspace(0.0, 0.99, num=num_thresholds)\n # Iterate over fine-grained thresholds to find closest corresponding threshold\n thresholds = np.linspace(0.0, 1.0, num=100 * num_thresholds)\n in_confidences = np.max(in_probs, axis=1)\n out_confidences = np.max(out_probs, axis=1)\n confidences = np.hstack([in_confidences, out_confidences])\n in_predictions = np.argmax(in_probs, axis=1)\n in_predictions_correct = (in_predictions == in_labels)\n accuracies = []\n theoretical_max_accuracies = []\n\n num_in = in_confidences.shape[0]\n num_out = out_confidences.shape[0]\n percent_out = num_out / (num_in + num_out)\n\n # Initialize variables to run through a subset of thresholds for each p\n # instead of running through the entire range of thresholds.\n init_threshold = 0\n final_threshold = len(thresholds)\n\n for p in percents_rejected:\n if p < percent_out:\n theoretical_max_accuracies.append(\n num_in / ((1. - p) * (num_in + num_out)))\n else:\n theoretical_max_accuracies.append(1.0)\n for i in range(init_threshold, final_threshold):\n threshold = thresholds[i]\n p_at_threshold = np.sum(confidences < threshold) / (num_in + num_out)\n if p_at_threshold >= p:\n num_in_above_p = np.sum(in_confidences >= threshold)\n num_out_above_p = np.sum(out_confidences >= threshold)\n num_in_above_p_correct = np.sum(\n in_predictions_correct[in_confidences >= threshold])\n\n accuracies.append((num_in_above_p_correct + 1e-6) /\n (num_in_above_p + num_out_above_p + 1e-6))\n # For future percents_rejected, only look at thresholds greater than i\n init_threshold = i\n break\n\n if ax is None:\n ax = plt.gca()\n ax.plot(percents_rejected, accuracies, **plot_kwargs)\n if plot_theoretical_max:\n ax.plot(percents_rejected, theoretical_max_accuracies,\n label='Theoretical maximum', color='g')\n ax.set_xlabel(r'Percent of data rejected by uncertainty')\n ax.set_ylabel(r'Accuracy')\n ax.set_title('Rejection Classification Diagram', fontsize=20)\n ax.set_xlim([0.0, 1.0])\n return ax", "title": "" }, { "docid": "5096a0bf90e34a0af6b2bd27d8d0eb14", "score": "0.47418585", "text": "def cal_dcmi_scores(self, mean=True, n_neighbors=3):\n train, test = self.create_mutualinfo_matrix(mean=mean,\n n_neighbors=n_neighbors)\n d = (metrics.disentanglement_score(train) +\n metrics.disentanglement_score(test)) / 2.\n c = (metrics.completeness_score(train) +\n metrics.completeness_score(test)) / 2.\n return d, c", "title": "" }, { "docid": "e30876c485fd9b58413bf79253b880cc", "score": "0.4741051", "text": "def domain_shape(self):\n return self._domain_shape()", "title": "" }, { "docid": "ccce7b64bb380d490e3d77bf33d8997f", "score": "0.47359517", "text": "def test_edge_confidence(self):\r\n\r\n def extraction(Xs, Ys, mask):\r\n return mask[Ys, Xs].mean()\r\n\r\n # Create a regular dummy mask\r\n mask = np.arange(25).reshape([5, 5]).repeat(5, axis=0).repeat(5, axis=1)\r\n mask[12, 12] = 25 # add an iregular center\r\n\r\n # make the object with this mask\r\n obj = segment_group(mask)\r\n\r\n # get the mask with only edges\r\n edge_mask = obj._outline()\r\n\r\n # assign the edge confidences\r\n obj.edge_confidence(extraction, [edge_mask])\r\n\r\n # test they are all one\r\n for seg in obj.seg_dict.values():\r\n for edge_val in seg.conf_dict.values():\r\n assert (edge_val == 1.), 'every edge pixel averaged should just be 1'", "title": "" }, { "docid": "b9816a32499f54c478fb9ea890115b26", "score": "0.47324613", "text": "def kl_divergence_annual_compare(flow_dataset: xr.Dataset, sites: list,\n raw_var: str, raw_name: str,\n ref_var: str, ref_name: str,\n bc_vars: list, bc_names: list,\n plot_colors: list, title = \"Annual KL Diveregence Before/After Bias Correction\",\n fontsize_title = 40, fontsize_tick = 30, fontsize_labels = 40,\n fontsize_legend = 30,\n showfliers = False, sharex = True, sharey = 'row', TINY_VAL = 1e-6,\n figsize = (30,20), show_y_grid = True):\n\n raw_flows = flow_dataset[raw_var].to_pandas()\n ref_flows = flow_dataset[ref_var].to_pandas()\n bc_flows = list()\n for bc_var in bc_vars:\n bc_flows.append(flow_dataset[bc_var].to_pandas())\n\n WY_grouper = calc_water_year(raw_flows)\n WY_array = np.arange(WY_grouper[0], WY_grouper[-1], 1)\n\n n_rows, n_cols = determine_row_col(len(sites))\n fig, axs = plt.subplots(n_rows, n_cols, figsize = figsize, sharex = sharex, sharey = sharey)\n axs_list = axs.ravel().tolist()\n\n kldiv_refraw_annual = pd.DataFrame(index = WY_array, columns = sites)\n kldiv_refbc_annuals = list()\n for bc_var in bc_vars:\n kldiv_refbc_annuals.append(pd.DataFrame(index = WY_array, columns = sites))\n\n plt.suptitle(title, fontsize = fontsize_title, y = 1.05)\n\n for WY in WY_array:\n raw_flow_WY = raw_flows[f\"{WY}-10-01\":f\"{WY+1}-09-30\"]\n ref_flow_WY = ref_flows[f\"{WY}-10-01\":f\"{WY+1}-09-30\"]\n bc_flow_WYs = list()\n for bc_flow in bc_flows:\n bc_flow_WYs.append(bc_flow[f\"{WY}-10-01\":f\"{WY+1}-09-30\"])\n total_bins = int(np.sqrt(len(raw_flow_WY.index)))\n\n for site in sites:\n ref_WY_site_vals = ref_flow_WY[site].values\n raw_WY_site_vals = raw_flow_WY[site].values\n bc_WY_site_vals = list()\n for bc_flow_WY in bc_flow_WYs:\n bc_WY_site_vals.append(bc_flow_WY[site].values)\n\n ref_WY_site_pdf, ref_WY_site_edges = np.histogram(ref_WY_site_vals, bins = total_bins,\n density = True)\n raw_WY_site_pdf = np.histogram(raw_WY_site_vals, bins = ref_WY_site_edges, density = True)[0]\n bc_WY_site_pdfs = list()\n for bc_WY_site_val in bc_WY_site_vals:\n bc_WY_site_pdf = np.histogram(bc_WY_site_val, bins = ref_WY_site_edges, density = True)[0]\n bc_WY_site_pdf[bc_WY_site_pdf == 0] = TINY_VAL\n bc_WY_site_pdfs.append(bc_WY_site_pdf)\n\n ref_WY_site_pdf[ref_WY_site_pdf == 0] = TINY_VAL\n raw_WY_site_pdf[raw_WY_site_pdf == 0] = TINY_VAL\n\n kldiv_refraw_annual.loc[WY][site] = scipy.stats.entropy(pk = raw_WY_site_pdf, qk = ref_WY_site_pdf)\n for i, (kldiv_refbc_annual, bc_WY_site_pdf) in enumerate(zip(kldiv_refbc_annuals, bc_WY_site_pdfs)):\n kldiv_refbc_annual.loc[WY][site] = scipy.stats.entropy(pk = bc_WY_site_pdf, qk = ref_WY_site_pdf)\n kldiv_refbc_annuals[i] = kldiv_refbc_annual\n\n plot_labels = [raw_name]\n plot_labels.extend(bc_names)\n\n for i, site in enumerate(sites):\n ax=axs_list[i]\n plot_vals = [kldiv_refraw_annual[site].values]\n plot_vals.extend([kldiv_refbc_annual[site].values for kldiv_refbc_annual in kldiv_refbc_annuals])\n box_dict = ax.boxplot(plot_vals, patch_artist = True, showfliers = showfliers, widths = 0.8, notch = True)\n for item in ['boxes', 'fliers', 'medians', 'means']:\n for sub_item, color in zip(box_dict[item], plot_colors):\n plt.setp(sub_item, color = color)\n ax.set_title(site, fontsize = fontsize_labels)\n ax.set_xticks(np.arange(1, len(plot_labels)+1))\n ax.set_xticklabels(plot_labels, fontsize = fontsize_tick, rotation = 45)\n ax.tick_params(axis = 'both', labelsize = fontsize_tick)\n if show_y_grid:\n ax.grid(which = 'major', axis = 'y', alpha = 0.5)\n\n # gets rid of any spare axes\n i += 1\n while i < len(axs_list):\n axs_list[i].axis('off')\n i += 1\n # ensures last axes is off to make room for the legend\n axs_list[-1].axis('off')\n\n fig.text(-0.04, 0.5, \"Annual KL Divergence\",\n va = 'center', rotation = 'vertical', fontsize = fontsize_labels);\n\n fig.text(0.5, -0.04, r'$KL(P_{' + f'{ref_name}' + r'} || P_{scenario})$',\n va = 'bottom', ha = 'center', fontsize = fontsize_labels);\n\n axs_list[-1].legend(handles=custom_legend(names=plot_labels, colors = plot_colors),\n fontsize = fontsize_legend, loc = 'center')\n\n plt.tight_layout()\n\n return fig, axs", "title": "" }, { "docid": "f9467ee4949403a5c45ea5eda99ec413", "score": "0.47255844", "text": "def computing_active_inactive_ratio(df,prot_id):\n counting = df.loc[:,[\"DeepAffinity Protein ID\", \"label\"]].groupby([\"DeepAffinity Protein ID\", \"label\"]).size()\n count_df = counting.to_frame(name = 'size').reset_index()\n count_prot = count_df.loc[count_df[\"DeepAffinity Protein ID\"]==prot_id, :]\n if count_prot.shape[0] == 0:\n return np.nan\n n_labels = count_prot.shape[0]\n if n_labels == 1:\n label_label = count_prot[\"label\"].values[0]\n if label_label == 1.0:\n ratio = 1.0\n else:\n ratio = 0.0\n else:\n n_actives = count_prot.loc[count_prot[\"label\"]==1.0, \"size\"].values[0]\n n_total = n_actives + count_prot.loc[count_prot[\"label\"]==0.0, \"size\"].values[0]\n ratio = n_actives/n_total\n return ratio", "title": "" }, { "docid": "539afbca79c6f97b876e8a062a2ef684", "score": "0.47239718", "text": "def deltaCon(A1, A2, g):\n vcount = A1.shape[0] # the number of nodes\n\n # random partition\n partitions = dict()\n for i in range(g):\n partitions[i] = np.zeros(vcount)\n for idx, membership in enumerate(np.random.choice(range(g), vcount)):\n partitions[membership][idx] += 1\n \n for i in range(g):\n s0 = partitions[i]\n s1 = fastBeliefPropagation(A1, s0)\n s2 = fastBeliefPropagation(A2, s0)\n if i == 0:\n S1 = s1.reshape((-1, 1))\n S2 = s1.reshape((-1, 1))\n else:\n S1 = np.concatenate((S1, s1.reshape((-1, 1))), axis=1)\n S2 = np.concatenate((S2, s2.reshape((-1, 1))), axis=1)\n\n\n similarity = 1 / (1 + __rootED(S1, S2))\n return similarity", "title": "" }, { "docid": "deb588ef60c8f89d4e5405e1280691e6", "score": "0.47206712", "text": "def _compute_conservative_assignment_and_score_lists(\n confidence_df,\n assignment_df,\n label_graph,\n label_to_name,\n og,\n blacklist_labels=None\n ):\n print('Adjusting results to compute conservative metrics.')\n # Instantiate list of blacklisted labels\n if blacklist_labels is None:\n blacklist_labels = set()\n\n # Map each item to its set of annotated labels\n item_to_labels = {\n item: [\n label\n for label in assignment_df.columns\n if assignment_df.loc[item][label]\n ]\n for item in assignment_df.index\n }\n\n # Map each item to its most-specific labels\n print(\"Mapping items to their most-specific labels...\")\n all_labels = set(label_graph.get_all_nodes())\n item_to_ms_labels = {}\n for item, labels in item_to_labels.items():\n ms_item_labels = label_graph.most_specific_nodes(\n set(labels) & all_labels\n )\n ms_item_labels = ms_item_labels - blacklist_labels\n item_to_ms_labels[item] = ms_item_labels\n print(\"done.\")\n\n # Create new nodes in the label graph corresponding to \n # joint-labels -- that is, sets of labels for which there\n # exists a sample labelled with both labels. For example,\n # if an experiment is labelled with both 'PBMC' and\n # 'T cell', then we create a new label 'PBMC & T cell'\n mod_label_graph = label_graph.copy()\n mod_label_to_name = {\n label: name\n for label, name in label_to_name.items()\n }\n\n # Create all joint-nodes\n item_to_new_ms_labels = defaultdict(lambda: set())\n for item, ms_labels in item_to_ms_labels.items():\n # Create a joint label\n if len(ms_labels) > 1:\n joint_label = frozenset(ms_labels)\n mod_label_to_name[joint_label] = \" & \".join([\n mod_label_to_name[ms_label]\n for ms_label in ms_labels\n ])\n item_to_new_ms_labels[item].add(joint_label)\n # Create from the joint label to the labels that \n # it includes.\n for ms_label in ms_labels:\n mod_label_graph.add_edge(ms_label, joint_label)\n print(\"Created joint label '%s' (%s)\" % (\n mod_label_to_name[joint_label],\n joint_label\n ))\n\n # Make a 'deep' copy of the mappings from experiments to most-specific \n # labels. Then recompute the most-specific labels and predictions now \n # that we have added these new join-labels\n mod_item_to_ms_labels = {\n item: set(ms_labels)\n for item, ms_labels in item_to_ms_labels.items()\n }\n for item, new_ms_labels in item_to_new_ms_labels.items():\n mod_item_to_ms_labels[item].update(new_ms_labels)\n mod_item_to_ms_labels = {\n item: mod_label_graph.most_specific_nodes(labels)\n for item, labels in mod_item_to_ms_labels.items()\n }\n\n # If the sample is most-specifically labeled as PBMC, then\n # for our purposes, we treat mononuclear cell as its most\n # specific label \n item_to_ms_labels = mod_item_to_ms_labels\n for item, ms_labels in item_to_ms_labels.items():\n if PBMC_TERM_ID in ms_labels:\n ms_labels.add(\n MONONUCLEAR_CELL_TERM_ID\n ) \n\n # For each item, get all of the ancestors of all descendants\n # of it's most-specific labels\n item_to_anc_desc_ms_labels = {}\n for item, ms_labels in item_to_ms_labels.items():\n desc_ms_labels = set()\n for ms_label in ms_labels:\n desc_ms_labels.update(\n mod_label_graph.descendent_nodes(ms_label)\n )\n anc_desc_ms_labels = set()\n for desc_ms_label in desc_ms_labels:\n anc_desc_ms_labels.update(\n mod_label_graph.ancestor_nodes(desc_ms_label)\n )\n # Make sure that the item's labels are not included in\n # this set\n anc_desc_ms_labels = anc_desc_ms_labels - set(item_to_labels[item])\n item_to_anc_desc_ms_labels[item] = anc_desc_ms_labels\n\n # Iterate over all labels and construct the list of assignment-values \n # (True or False) and classifier-produced confidence-scores for only the \n # set of items that are relevant for computing the conservative-metrics\n # for the label\n skipped_pairs = set()\n pair_to_items = defaultdict(lambda: set())\n label_to_cons_assigneds = {}\n label_to_cons_scores = {}\n for curr_label in set(all_labels) & set(assignment_df.columns):\n print(\"Examining label %s\" % og.id_to_term[curr_label].name)\n\n # Assignment-values for this label\n filtered_assignments = []\n\n # Classifier-scores for this label\n filtered_scores = []\n\n # The set of items not considered for this label\n skipped_items = set()\n\n # Ancestors of the current label\n anc_labels = set(mod_label_graph.ancestor_nodes(curr_label)) - set([curr_label])\n\n # Iterate over each item and determine whether it should be included\n # in the computation of curr_label's metrics\n for item in assignment_df.index:\n assigned = assignment_df.loc[item][curr_label]\n score = confidence_df.loc[item][curr_label] \n ms_labels = item_to_ms_labels[item]\n anc_desc_ms_labels = item_to_anc_desc_ms_labels[item]\n # NOTE this is the crucial step in which we skip\n # samples that have a most-specific label that is\n # an ancestor of the current label or an ancestor\n # of a descendent of the current label\n if len(set(ms_labels) & anc_labels) > 0:\n for ms_label in set(ms_labels) & set(anc_labels):\n pair = (ms_label, curr_label)\n skipped_pairs.add(pair)\n pair_to_items[pair].add(item)\n skipped_items.add(item)\n continue\n if curr_label in anc_desc_ms_labels:\n skipped_items.add(item)\n continue\n filtered_assignments.append(assigned)\n filtered_scores.append(score)\n label_to_cons_assigneds[curr_label] = filtered_assignments\n label_to_cons_scores[curr_label] = filtered_scores\n print(\"Label %s\" % label_to_name[curr_label])\n print(\"N samples in ranking: %d\" % len(filtered_assignments))\n print(\"N skipped: %d\" % len(skipped_items))\n print(\"Sample of skipped %s\" % list(skipped_items)[0:20])\n print()\n label_to_assigneds = dict(label_to_cons_assigneds)\n label_to_scores = dict(label_to_cons_scores) \n\n # Print some data on which samples were filtered from this analysis\n filtering_da = [\n (\n og.id_to_term[pair[0]].name, \n og.id_to_term[pair[1]].name, \n len(pair_to_items[pair])\n )\n for pair in skipped_pairs\n ]\n filtering_df = pd.DataFrame(\n data = filtering_da,\n columns = ['', '', 'Number of samples removed'] # TODO\n )\n return label_to_assigneds, label_to_scores", "title": "" }, { "docid": "074445f34e39f85a2fe49772123013b9", "score": "0.47205225", "text": "def _discr_rdf(dissimilarities, labels):\n # calculates test statistics distribution\n rdfs = []\n\n for i, label in enumerate(labels):\n di = dissimilarities[i]\n\n # All other samples except its own label\n idx = labels == label\n Dij = di[~idx]\n\n # All samples except itself\n idx[i] = False\n Dii = di[idx]\n\n rdf = [1 - ((Dij < d).sum() + 0.5 * (Dij == d).sum()) / Dij.size for d in Dii]\n rdfs.append(rdf)\n\n out = np.full((len(rdfs), max(map(len, rdfs))), np.nan)\n for i, rdf in enumerate(rdfs):\n out[i, : len(rdf)] = rdf\n\n return out", "title": "" }, { "docid": "eb4fed8132c6844801b3aeba8b7e1e03", "score": "0.47031784", "text": "def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True):\n if not per_image:\n preds, labels = (preds,), (labels,)\n ious = []\n for pred, label in zip(preds, labels):\n intersection = ((label == 1) & (pred == 1)).sum()\n union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()\n if not union:\n iou = EMPTY\n else:\n iou = float(intersection) / union\n ious.append(iou)\n iou = mean(ious) # mean accross images if per_image\n return 100 * iou", "title": "" }, { "docid": "4b7ddb2f81aee58175dd6397516b12ec", "score": "0.47011453", "text": "def calc_area(self):\n raise NotImplementedError", "title": "" }, { "docid": "b1e43d2554fb301836572f27cfae4eb5", "score": "0.4694676", "text": "def area(self):\n if self.point_1[1] == self.point_2[1]:\n A = self.point_1 \n D = self.point_2\n B = self.point_3\n C = self.point_4\n elif self.point_1[1] == self.point3[1] :\n A = self.point_1\n D = self.point_3\n B = self.point_2\n C = self.point_4\n else:\n A = self.point_1\n D = self.point_4\n B = self.point_2\n C = self.point_3\n \n base_1 = A[0] + D[0]\n base_2 = B[0] + C[0]\n if C[1] > D[1]:\n height = C[1] - D[1]\n else:\n height = D[1] - C[1]\n return height * ((base_1 + base_2)/2)", "title": "" }, { "docid": "89c20d1a0df39f477d826685f9c3b292", "score": "0.46939436", "text": "def ki_67_percentage(mask_positive, mask_negative):\n total_area = mask_positive.shape[0] * mask_positive.shape[1]\n area_positive = np.sum(mask_positive)\n visible_area_negative = np.sum(mask_negative)\n visible_area_negative_percentage = visible_area_negative / (total_area - area_positive)\n hidden_area_negative = visible_area_negative_percentage * area_positive\n area_negative = visible_area_negative + hidden_area_negative\n return area_positive / (area_positive + area_negative)", "title": "" }, { "docid": "a93c6da28e50217b0827a055452d84d6", "score": "0.46878663", "text": "def test_DomainArchitecture(self):\r\n #Because we short circult interation, must close handle explicitly\r\n handle = open(EX_APAF)\r\n tree = next(PhyloXMLIO.parse(handle))\r\n handle.close()\r\n clade = tree.clade[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n darch = clade.sequences[0].domain_architecture\r\n self.assertTrue(isinstance(darch, PX.DomainArchitecture))\r\n self.assertEqual(darch.length, 1249)\r\n for domain, start, end, conf, value in zip(darch.domains,\r\n (6, 109, 605, 647, 689, 733, 872, 993, 1075, 1117, 1168),\r\n (90, 414, 643, 685, 729, 771, 910, 1031, 1113, 1155, 1204),\r\n (7.0e-26, 7.2e-117, 2.4e-6, 1.1e-12, 2.4e-7, 4.7e-14, 2.5e-8,\r\n 4.6e-6, 6.3e-7, 1.4e-7, 0.3),\r\n ('CARD', 'NB-ARC', 'WD40', 'WD40', 'WD40', 'WD40', 'WD40',\r\n 'WD40', 'WD40', 'WD40', 'WD40')):\r\n self.assertTrue(isinstance(domain, PX.ProteinDomain))\r\n self.assertEqual(domain.start + 1, start)\r\n self.assertEqual(domain.end, end)\r\n self.assertAlmostEqual(domain.confidence, conf)\r\n self.assertEqual(domain.value, value)", "title": "" }, { "docid": "014acbd2d3e65672d0cda2f2b360b8b4", "score": "0.46777096", "text": "def survey_overlap(xs, ys, xs1, ys1, c_graph = True, radius = -0.1, nbins = 1000):\n\n\timport numpy as np\n\timport matplotlib.pyplot as plt\n\n\txwalls = np.linspace( min(xs) - 5.0, max(xs) + 5.0, nbins + 1 )\n\tywalls = np.linspace( min(ys) - 5.0, max(ys) + 5.0, nbins + 1 )\n\n\tim, xs_bin, ys_bin, ax = plt.hist2d(xs, ys, bins = (xwalls, ywalls) )\n\txs_mids = 0.5*(xs_bin[:-1] + xs_bin[1:])\n\tys_mids = 0.5*(ys_bin[:-1] + ys_bin[1:])\n\tplt.close()\n\tim[im>0] = 1\n\tconts = plt.contour(xs_mids, ys_mids, im.T, 1)\n\t\n\txwalls1 = np.linspace( min(xs1) - 5.0, max(xs1) + 5.0, nbins + 1)\n\tywalls1 = np.linspace( min(ys1) - 5.0, max(ys1) + 5.0, nbins + 1)\n\tim1, xs_bin1, ys_bin1, ax1 = plt.hist2d(xs1, ys1, bins = (xwalls1, ywalls1))\n\txs_mids1 = 0.5*(xs_bin1[:-1] + xs_bin1[1:])\n\tys_mids1 = 0.5*(ys_bin1[:-1] + ys_bin1[1:])\n\tplt.close()\n\tim1[im1>0] = 1\n\n\tconts = plt.contour(xs_mids, ys_mids, im.T, 1)\n\tconts1 = plt.contour(xs_mids1, ys_mids1, im1.T, 1)\n\tplt.show()", "title": "" }, { "docid": "2b399fb557d69e3b663db4421b17dc2f", "score": "0.46743822", "text": "def dstat(genotypes, groups, gind):\n\n num = 0\n den = 0\n snpnum = -1\n\n baba = []\n abba = []\n\n def af(gt, group, gind):\n \"\"\"\n Frequency of the major allele.\n \"\"\"\n gts = [int(gt[i]) for i in gind[group] if gt[i] != \"9\"]\n n_inds = len(gts)\n if n_inds == 0:\n return None\n return float(sum(gts)) / n_inds\n\n for row in genotypes:\n snpnum += 1\n\n w = af(row, groups[0], gind)\n x = af(row, groups[1], gind)\n y = af(row, groups[2], gind)\n z = af(row, groups[3], gind)\n\n if None in (w, x, y, z):\n continue\n\n # Follow the BABA-ABBA convention, as in AdmixTools.\n num += (w - x)*(y - z)\n den += (w + x - 2*w*x)*(y + z - 2*y*z)\n\n if (abs(w-y) < abs(w-x) and abs(w-y) < abs(y-x) and\n abs(w-y) < abs(w-z) and abs(w-y) < abs(y-z) and\n abs(x-z) < abs(w-x) and abs(x-z) < abs(y-x) and\n abs(x-z) < abs(w-z) and abs(x-z) < abs(y-z)):\n baba.append(snpnum)\n elif (abs(x-y) < abs(x-w) and abs(x-y) < abs(y-w) and\n abs(x-y) < abs(x-z) and abs(x-y) < abs(y-z) and\n abs(w-z) < abs(x-w) and abs(w-z) < abs(y-w) and\n abs(w-z) < abs(x-z) and abs(w-z) < abs(y-z)):\n abba.append(snpnum)\n\n try:\n d = num / den\n except ZeroDivisionError:\n d = 0\n\n return d, frozenset(baba), frozenset(abba)", "title": "" }, { "docid": "1ced48af9c8ab780e119bbcb612dcfd7", "score": "0.4673081", "text": "def plotDensityMap(scores):\r\n\r\n\r\n TRIANGLE=np.array([[math.cos(math.pi*0.5), math.sin(math.pi*0.5)],\r\n [math.cos(math.pi*1.166), math.sin(math.pi*1.166)],\r\n [math.cos(math.pi*1.833), math.sin(math.pi*1.833)]])\r\n\r\n\r\n pointsX=[score.dot(TRIANGLE)[0] for score in scores]\r\n pointsY=[score.dot(TRIANGLE)[1] for score in scores]\r\n\r\n vertices=[]\r\n vertices.append(np.array([1,0,0]).dot(TRIANGLE))\r\n vertices.append(np.array([0,1,0]).dot(TRIANGLE))\r\n vertices.append(np.array([0,0,1]).dot(TRIANGLE))\r\n for i in range(3):\r\n p1=vertices[i]\r\n if i==2:\r\n p2=vertices[0]\r\n else:\r\n p2=vertices[i+1]\r\n c=0.5*(p1+p2)\r\n plt.plot([p1[0], p2[0]], [p1[1], p2[1]], color='k', linestyle='-', linewidth=2)\r\n plt.plot([0, c[0]], [0, c[1]], color='k', linestyle='-', linewidth=1)\r\n\r\n\r\n\r\n ax=plt.gca()\r\n ax.set_xlim([-1.2, 1.32])\r\n ax.set_ylim([-0.7,1.3])\r\n\r\n ax.text(0.8, -0.6, 'Bipolar')\r\n ax.text(-1.1, -0.6, 'Healthy')\r\n ax.text(-0.15, 1.05, 'Borderline')\r\n\r\n\r\n data=[[pointsX[i], pointsY[i]] for i in range(len(pointsX))]\r\n\r\n H, xedges, yedges=np.histogram2d(pointsX,pointsY,bins=40,normed=True)\r\n norm=H.sum()\r\n contour1=0.75\r\n target1=norm*contour1\r\n def objective(limit, target):\r\n w=np.where(H>limit)\r\n count=H[w]\r\n return count.sum()-target\r\n\r\n level1=scipy.optimize.bisect(objective, H.min(), H.max(), args=(target1,))\r\n levels=[level1]\r\n\r\n data=np.array(data)\r\n #plt.scatter(np.array(pointsX), np.array(pointsY))\r\n sns.kdeplot(np.array(pointsX), np.array(pointsY), shade=True, ax=ax)\r\n sns.kdeplot(np.array(pointsX), np.array(pointsY), n_levels=3, ax=ax, cmap=\"Reds\")\r\n plt.show()", "title": "" }, { "docid": "9ed95984ffb2dba87d4beae82512ced6", "score": "0.4669756", "text": "def det_anomaly_score(actual, predicted, dimension):\n actual = grouper(dimension, actual[0])\n predicted = grouper(dimension, predicted[0])\n n_seq = len(actual)\n the_sum = 0\n for item in list(zip(actual, predicted)):\n diff = [a_i - b_i for a_i, b_i in zip(item[0], item[1])]\n the_sum += np.linalg.norm(diff, 2)\n return((1 / n_seq) * the_sum)", "title": "" }, { "docid": "85df212172129682af7cf4a0c6090630", "score": "0.46675202", "text": "def py_cpu_nms(dets, probs, thresh):\n # [cx, cy, w, h]\n x1 = dets[:, 0] - dets[:, 2]/2.0\n y1 = dets[:, 1] - dets[:, 3]/2.0\n x2 = dets[:, 0] + dets[:, 2]/2.0\n y2 = dets[:, 1] + dets[:, 3]/2.0\n \n areas = dets[:, 2] * dets[:, 3]\n order = probs.argsort()[::-1]\n keep = []\n keep_ = [False]*len(order)\n while order.size > 0:\n i = order[0]\n # keep.append(i)\n keep_[i] = True\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n \n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n inds = np.where(ovr < thresh)[0]\n order = order[inds + 1]\n return keep_", "title": "" }, { "docid": "0e54e859951e87e95c8ca58597d2002a", "score": "0.46670318", "text": "def cal_HDI(self):\n\n df_edge_list_reverse = pd.DataFrame()\n df_edge_list_reverse['source'] = self.df_edge_list['target']\n df_edge_list_reverse['target'] = self.df_edge_list['source']\n # print df_edge_list_reverse\n\n df_all_nodes_pair = pd.concat([df_edge_list_reverse, self.df_edge_list])\n df_neighbor_count = df_all_nodes_pair.groupby(['source']).count()\n df_neighbor_count = df_neighbor_count.reset_index()\n df_neighbor_count.rename(columns={'target': 'nei_count'}, inplace=True)\n\n \"\"\"\n get common neighbours\n \"\"\"\n\n df_common_neighbor = pd.merge(df_all_nodes_pair, df_all_nodes_pair, on=['target'], how='left').dropna()\n df_common_neighbor = df_common_neighbor[df_common_neighbor['source_x'] != df_common_neighbor['source_y']]\n\n df_common_neighbor_count = df_common_neighbor.groupby(['source_x', 'source_y']).count()\n df_common_neighbor_count = df_common_neighbor_count.reset_index()\n # print df_common_neighbor_count\n\n df_common_neighbor_count.rename(columns={'target': 'CN'}, inplace=True)\n\n df_common_neighbor_with_total_neighbor = pd.merge(df_common_neighbor_count, df_neighbor_count,\n left_on=['source_x'], right_on=['source'], how='left')\n\n df_common_neighbor_with_total_neighbor = df_common_neighbor_with_total_neighbor[\n ['source_x', 'source_y', 'CN', 'nei_count']]\n df_common_neighbor_with_total_neighbor = pd.merge(df_common_neighbor_with_total_neighbor, df_neighbor_count,\n left_on=['source_y'], right_on=['source'], how='left')\n\n df_common_neighbor_with_total_neighbor['nei_min'] = df_common_neighbor_with_total_neighbor[\n ['nei_count_x', 'nei_count_y']].max(axis=1)\n # print df_common_neighbor_with_total_neighbor.head()\n\n df_common_neighbor_with_total_neighbor['similarity'] = df_common_neighbor_with_total_neighbor['CN'] / \\\n df_common_neighbor_with_total_neighbor['nei_min']\n\n df_HDI_list = df_common_neighbor_with_total_neighbor[['source_x', 'source_y', 'similarity']].copy()\n\n df_HDI_list.rename(columns={'source_x': 'source', 'source_y': 'target'}, inplace=True)\n\n # print df_HDI_list.head()\n return df_HDI_list", "title": "" }, { "docid": "a6d0febca310972f4d694ee6e14aaeea", "score": "0.46629694", "text": "def label_region(img_size, img_idx, l_stat, c_alpha):\n\n img_sub = np.unravel_index(img_idx, img_size)\n img_tp = np.zeros(shape=img_size)\n img_tp[img_sub] = l_stat\n img_tp[img_tp <= c_alpha] = 0\n img_tp[img_tp > c_alpha] = 1\n\n def label_region_1d(label_img):\n \"\"\"\n Label connected regions and return the corresponding maximum area.\n\n Args:\n label_img (vector): vector or matrix after thresholding\n \"\"\"\n\n idx_roi = np.where(label_img == 1)\n run = []\n group = [run]\n expect = None\n for v in idx_roi:\n if (v == expect) or (expect is None):\n run.append(v)\n else:\n run = [v]\n group.append(run)\n expect = v + 1\n\n cluster_len = np.zeros(len(group))\n for k in range(len(group)):\n cluster_len[k] = len(group[k])\n max_cluster_len = np.max(cluster_len)\n\n return max_cluster_len\n\n def label_region_nd(label_img):\n \"\"\"\n Label connected regions and return the corresponding maximum area.\n\n Args:\n label_img (matrix): vector or matrix after thresholding\n \"\"\"\n\n group = regionprops(label_img)\n cluster_area = np.zeros(len(group))\n for k in range(len(group)):\n cluster_area[k] = group[k].area\n max_cluster_area = np.max(cluster_area)\n\n return max_cluster_area\n\n if img_size[0] == 1:\n max_area = label_region_1d(img_tp)\n else:\n max_area = label_region_nd(img_tp)\n\n return max_area", "title": "" }, { "docid": "818d0f2658adda9cb543fd52fe030b14", "score": "0.46628585", "text": "def cal_RA(self):\n df_edge_list_reverse = pd.DataFrame()\n df_edge_list_reverse['source'] = self.df_edge_list['target']\n df_edge_list_reverse['target'] = self.df_edge_list['source']\n\n df_all_nodes_pair = pd.concat([df_edge_list_reverse, self.df_edge_list])\n df_neighbor_count = df_all_nodes_pair.groupby(['source']).count()\n\n df_neighbor_count = df_neighbor_count.reset_index()\n df_neighbor_count.rename(columns={'target': 'count'}, inplace=True)\n \"\"\"\n get common neighbours\n \"\"\"\n\n df_common_neighbor = pd.merge(df_all_nodes_pair, df_all_nodes_pair, on=['target'], how='left').dropna()\n df_common_neighbor = df_common_neighbor[df_common_neighbor['source_x'] != df_common_neighbor['source_y']]\n\n df_common_neighbor = pd.merge(df_common_neighbor, df_neighbor_count, left_on=['target'], right_on=['source'],\n how='left').dropna()\n\n df_common_neighbor = df_common_neighbor[['source_x', 'source_y', 'count']]\n df_common_neighbor['count'] = df_common_neighbor['count'].map(lambda x: 1.0 / x)\n\n df_RA_list = df_common_neighbor.groupby(['source_x', 'source_y']).sum()\n df_RA_list = df_RA_list.reset_index()\n\n df_RA_list.rename(columns={'source_x': 'source', 'source_y': 'target', 'count': 'similarity'}, inplace=True)\n print(df_RA_list.head(10))\n return df_RA_list", "title": "" }, { "docid": "3c33d484f8cbffbd6104a31eef9c5107", "score": "0.46618295", "text": "def lengthPerArea(self, _x1, _x2 = 0.5):\r\n if _x2 < _x1:\r\n _temp = _x2\r\n _x2 = _x1\r\n _x1 = _temp\r\n \r\n if self.semiMajor0 == self.semiMajor1:\r\n semiMajor = self.semiMajor0\r\n if self.semiMinor0 == self.semiMinor1:\r\n # cylinder\r\n semiMinor = self.semiMinor0\r\n coneFact = _x2 - _x1\r\n else:\r\n # semi-minor axis is changing, semi-major is constant\r\n semiMinor = 0.5 * (self.semiMinor0 + self.semiMinor0)\r\n minorRatio = semiMinor / (self.semiMinor1 - self.semiMinor0)\r\n coneFact = minorRatio * \\\r\n scipy.log( (minorRatio + _x2 - 0.5) / (minorRatio + _x1 - 0.5) )\r\n if coneFact < 0:\r\n coneFact = -coneFact\r\n else:\r\n semiMajor = 0.5 * (self.semiMajor0 + self.semiMajor0)\r\n majorRatio = semiMajor / (self.semiMajor1 - self.semiMajor0)\r\n if self.semiMinor0 == self.semiMinor1:\r\n # semi-major axis is changing, semi-minor is constant\r\n semiMinor = self.semiMinor0\r\n coneFact = majorRatio * \\\r\n scipy.log( (majorRatio + _x2 - 0.5) / (majorRatio + _x1 - 0.5) )\r\n if coneFact < 0:\r\n coneFact = -coneFact\r\n else:\r\n semiMinor = 0.5 * (self.semiMinor0 + self.semiMinor0)\r\n minorRatio = semiMinor / (self.semiMinor1 - self.semiMinor0)\r\n ratioProd = majorRatio * minorRatio\r\n avgRatio = 0.5 * (minorRatio + majorRatio)\r\n scale1 = (_x1 - 0.5) / ratioProd + avgRatio\r\n scale2 = (_x2 - 0.5) / ratioProd + avgRatio\r\n \r\n if avgRatio > 1.0:\r\n # answer in terms of logs\r\n ratioRoot = scipy.sqrt(avgRatio**2 - 1.0)\r\n scale1 /= ratioRoot\r\n scale2 /= ratioRoot\r\n coneFact = 0.5 * ratioProd / ratioRoot * \\\r\n scipy.log((scale2 - 1) * (scale1 + 1) / ((scale2 + 1) * scale1 - 1))\r\n elif avgRatio < 1.0:\r\n # answer in terms of arctan\r\n ratioRoot = scipy.sqrt(1.0 - avgRatio**2)\r\n scale1 /= ratioRoot\r\n scale2 /= ratioRoot\r\n coneFact = ratioProd / ratioRoot * \\\r\n (scipy.arctan(scale2) - scipy.arctan(scale1))\r\n else:\r\n # answer in terms of 1/x\r\n coneFact = ratioProd / scale1 - ratioProd / scale2\r\n \r\n return coneFact * self.length / (scipy.pi * semiMajor * semiMinor)", "title": "" }, { "docid": "4ee87041b063f88158fba157dcd42505", "score": "0.46573743", "text": "def plot_domain_frequency_transitions(self, pct: bool=False):\n return functools.reduce(\n operator.add,\n (\n self.plot_cpu_frequency_transitions(\n cpu=domain[0],\n domain_label=True,\n pct=pct,\n )\n for domain in self.trace.plat_info['freq-domains']\n )\n ).cols(1)", "title": "" }, { "docid": "ae2bd2891bc00720ae57fdb126dc92c0", "score": "0.46563694", "text": "def averageClustering(self):\r\n total = 0\r\n for i in self.graphDict:\r\n ''' calculate the number of the pairs of neighbors of node i which are adiacents '''\r\n neighs = len(self.graphDict[i])\r\n pairsNeigh = ( neighs * (neighs-1) )/2 # all pairs\r\n ''' check how many nehgbors of node i have a connection '''\r\n triangles = 0\r\n for j in self.graphDict[i]:\r\n for k in self.graphDict[i]:\r\n if k in self.graphDict[j]:\r\n triangles += 1\r\n if pairsNeigh > 0:\r\n total += float(triangles)/pairsNeigh\r\n \r\n return float(total)/self.numOfVertices()", "title": "" }, { "docid": "c70236f3df7318741c52edc077a09588", "score": "0.46499154", "text": "def nms(self, dets, scores):\r\n x1 = dets[:, 0] #xmin\r\n y1 = dets[:, 1] #ymin\r\n x2 = dets[:, 2] #xmax\r\n y2 = dets[:, 3] #ymax\r\n\r\n areas = (x2 - x1) * (y2 - y1) # the size of bbox\r\n order = scores.argsort()[::-1] # sort bounding boxes by decreasing order\r\n\r\n keep = [] # store the final bounding boxes\r\n while order.size > 0:\r\n i = order[0] #the index of the bbox with highest confidence\r\n keep.append(i) #save it to keep\r\n xx1 = np.maximum(x1[i], x1[order[1:]])\r\n yy1 = np.maximum(y1[i], y1[order[1:]])\r\n xx2 = np.minimum(x2[i], x2[order[1:]])\r\n yy2 = np.minimum(y2[i], y2[order[1:]])\r\n\r\n w = np.maximum(1e-28, xx2 - xx1)\r\n h = np.maximum(1e-28, yy2 - yy1)\r\n inter = w * h\r\n\r\n # Cross Area / (bbox + particular area - Cross Area)\r\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\r\n #reserve all the boundingbox whose ovr less than thresh\r\n inds = np.where(ovr <= self.nms_thresh)[0]\r\n order = order[inds + 1]\r\n\r\n return keep", "title": "" }, { "docid": "30b9327dbd2f94d5fb4364901a9416e5", "score": "0.46436828", "text": "def calculate_lb_score_per_taxa(\n self,\n avg_PDis:list,\n avg_dist:float\n ) -> list:\n LBis = []\n for PDi in avg_PDis:\n try:\n LBis.append((((PDi/avg_dist)-1)*100))\n except ZeroDivisionError:\n try:\n print(\"Invalid tree. Tree should contain branch lengths\")\n sys.exit()\n except BrokenPipeError:\n pass\n \n return LBis", "title": "" }, { "docid": "7061c4647e3718b65ce6ee85b1c43a97", "score": "0.4640851", "text": "def select_node_counts(self):\n self.discretize_perimeter()\n\n # Precalculate density fields along perimeter:\n\n (left_i,left_i_rigid),(right_i,right_i_rigid)=self.calculate_coord_count(self.angle_to_segments[0],\n self.angle_to_segments[180],\n self.perimeter_scales[:,0])\n (left_j,left_j_rigid),(right_j,right_j_rigid)=self.calculate_coord_count(self.angle_to_segments[90],\n self.angle_to_segments[270],\n self.perimeter_scales[:,1])\n\n # Necessary to have order match grid order below\n left_i=left_i[::-1]\n left_i_rigid=left_i_rigid[::-1]\n right_i=right_i[::-1]\n right_i_rigid=right_i_rigid[::-1]\n\n self.left_i=left_i\n self.left_i_rigid=left_i_rigid\n self.left_j=left_j\n self.left_j_rigid=left_j_rigid\n \n self.right_i=right_i\n self.right_i_rigid=right_i_rigid\n self.right_j=right_j\n self.right_j_rigid=right_j_rigid", "title": "" }, { "docid": "31dbe07890d971688a66715d505c2865", "score": "0.46408102", "text": "def precision(database_code, database_labels, validation_code, validation_labels, Rs, dist_type='hamming'):\n assert set(np.unique(database_code).tolist()) == set([-1, 1])\n assert set(np.unique(validation_code).tolist()) == set([-1, 1])\n assert len(database_labels.shape) == 2\n assert len(validation_labels.shape) == 2\n \n query_num = validation_code.shape[0]\n \n if dist_type == 'hamming':\n dist = calc_hammingDist(database_code, validation_code)\n ids = np.argsort(dist, axis=0)\n elif dist_type == 'cosine':\n sim = np.dot(database_code, validation_code.T)\n ids = np.argsort(-sim, axis=0)\n else:\n raise Exception('Unsupported distance type: {}'.format(dist_type))\n \n \n APx = {R: [] for R in Rs}\n\n for i in tqdm(range(query_num)):\n label = validation_labels[i]\n idx = ids[:, i]\n imatch = (np.dot(database_labels[idx, :], label) > 0).astype(np.int)\n for R in Rs:\n relevant_num = np.sum(imatch[:R])\n if relevant_num != 0:\n APx[R].append(float(relevant_num) / R)\n \n #Compute 2 types of precisions: one ignores 0-relevant and one includes 0-relevant\n return {R: (np.mean(np.array(APxR)), np.sum(np.array(APxR)) / query_num) for (R, APxR) in APx.items()}", "title": "" } ]
d47023e0d7e2286a238bfd7f0b51b634
Computes the mean area of all labeled objects and takes the square root. Gives a length in number of pixels. To get a length in physical units, multiply the output of this function by the physical pixel size.
[ { "docid": "cb1160591f9c92c2a9ec269dca24c420", "score": "0.752876", "text": "def mean_length_scale(object_labels):\n objects_area = _get_objects_area(object_labels=object_labels)\n return np.sqrt(np.mean(objects_area))", "title": "" } ]
[ { "docid": "56fa449ff4f5652e6c489edb3c0794f9", "score": "0.715483", "text": "def mean_perimeter_length(object_labels):\n objects_perim = _get_objects_property(\n object_labels=object_labels, property_name=\"perimeter\"\n )\n return np.mean(objects_perim)", "title": "" }, { "docid": "3ce44fed37196cdb2cf26284180c236b", "score": "0.6749016", "text": "def compute_area(self):\n return self.width * self.height", "title": "" }, { "docid": "b0e91e3c5913262701b6339a9a346b4c", "score": "0.66430056", "text": "def square_area(length: float) -> float:\n return length ** 2", "title": "" }, { "docid": "0196d23ab14b9653074373e9051de9d7", "score": "0.66395944", "text": "def area_of_my_square(self):\n return self.width * self.height", "title": "" }, { "docid": "11cbdd02d85c0f871e4d16a1b3a815ee", "score": "0.6614885", "text": "def compute_mean_size_object(image_labelled):\n # check parameters\n stack.check_array(image_labelled,\n ndim=2,\n dtype=[np.uint8, np.uint16, np.int64])\n\n # compute properties of the segmented object\n props = regionprops(image_labelled)\n\n # get equivalent diameter and average it\n diameter = []\n for prop in props:\n diameter.append(prop.equivalent_diameter)\n mean_diameter = np.mean(diameter)\n\n return mean_diameter", "title": "" }, { "docid": "8df7875fd6359fa785adcfc68d5ccc88", "score": "0.66104954", "text": "def area(self):\n return (self.__size)**2", "title": "" }, { "docid": "cc560509d171f28ad9169598ae18015b", "score": "0.65229714", "text": "def area(width, height):\n return width * height + 1", "title": "" }, { "docid": "1d5bc0284883708ab618585aaa8493f4", "score": "0.6492608", "text": "def area(self):\n return self.width * self.width", "title": "" }, { "docid": "51d0384899c304888407a75e294fd2f1", "score": "0.64796484", "text": "def area(self):\n from Drawables.Point import Point\n lengths = list()\n prev = self.vertices[-1]\n for cur in self.vertices:\n l = Point.distanceTo(prev, point=cur)\n lengths.append(l)\n prev = cur\n lengths = np.array(lengths)\n s = np.sum(lengths) / 2\n A = s * np.prod(s - lengths)\n return float(A ** 0.5)", "title": "" }, { "docid": "1801b407a6bf1fefab177058b19b807b", "score": "0.6464848", "text": "def computeArea(self):\n a = self.line1.computeLength()\n b = self.line2.computeLength()\n c = self.line3.computeLength()\n s = (a+b+c)/2\n return math.sqrt(s*(s-a)*(s-b)*(s-c))", "title": "" }, { "docid": "1eaf0a0501096bc1a5e448b66d080a56", "score": "0.64603716", "text": "def area(self):\n return self.width * self.height", "title": "" }, { "docid": "1eaf0a0501096bc1a5e448b66d080a56", "score": "0.64603716", "text": "def area(self):\n return self.width * self.height", "title": "" }, { "docid": "1eaf0a0501096bc1a5e448b66d080a56", "score": "0.64603716", "text": "def area(self):\n return self.width * self.height", "title": "" }, { "docid": "b4fa79c887aca93e554ff1ffadcc0453", "score": "0.6443007", "text": "def rectangle_area(length: float, width: float) -> float:\n return length * width", "title": "" }, { "docid": "cdc9f85756cc22562acb072b27668909", "score": "0.64401346", "text": "def area(self) -> int:\n return self.width * self.height", "title": "" }, { "docid": "a58e3931987230e2232b190d5d1188a3", "score": "0.6432257", "text": "def area(self):\n return self.__size ** 2", "title": "" }, { "docid": "a58e3931987230e2232b190d5d1188a3", "score": "0.6432257", "text": "def area(self):\n return self.__size ** 2", "title": "" }, { "docid": "a58e3931987230e2232b190d5d1188a3", "score": "0.6432257", "text": "def area(self):\n return self.__size ** 2", "title": "" }, { "docid": "a58e3931987230e2232b190d5d1188a3", "score": "0.6432257", "text": "def area(self):\n return self.__size ** 2", "title": "" }, { "docid": "a58e3931987230e2232b190d5d1188a3", "score": "0.6432257", "text": "def area(self):\n return self.__size ** 2", "title": "" }, { "docid": "a58e3931987230e2232b190d5d1188a3", "score": "0.6432257", "text": "def area(self):\n return self.__size ** 2", "title": "" }, { "docid": "a58e3931987230e2232b190d5d1188a3", "score": "0.6432257", "text": "def area(self):\n return self.__size ** 2", "title": "" }, { "docid": "a58e3931987230e2232b190d5d1188a3", "score": "0.6432257", "text": "def area(self):\n return self.__size ** 2", "title": "" }, { "docid": "a58e3931987230e2232b190d5d1188a3", "score": "0.6432257", "text": "def area(self):\n return self.__size ** 2", "title": "" }, { "docid": "3b9098684aebbe1f418405bcb5eda92b", "score": "0.64302576", "text": "def area(self):\n return (self.__size ** 2)", "title": "" }, { "docid": "3b9098684aebbe1f418405bcb5eda92b", "score": "0.64302576", "text": "def area(self):\n return (self.__size ** 2)", "title": "" }, { "docid": "3b9098684aebbe1f418405bcb5eda92b", "score": "0.64302576", "text": "def area(self):\n return (self.__size ** 2)", "title": "" }, { "docid": "3b9098684aebbe1f418405bcb5eda92b", "score": "0.64302576", "text": "def area(self):\n return (self.__size ** 2)", "title": "" }, { "docid": "4fe87dff825c2594165dfe170175e4a8", "score": "0.64300156", "text": "def area(self):\n return self.width ** 2", "title": "" }, { "docid": "e219e8ae69bb4c3b7b2e3d132229ecbc", "score": "0.6426745", "text": "def area(self):\n return self.__size * self.__size", "title": "" }, { "docid": "e219e8ae69bb4c3b7b2e3d132229ecbc", "score": "0.6426745", "text": "def area(self):\n return self.__size * self.__size", "title": "" }, { "docid": "e219e8ae69bb4c3b7b2e3d132229ecbc", "score": "0.6426745", "text": "def area(self):\n return self.__size * self.__size", "title": "" }, { "docid": "e219e8ae69bb4c3b7b2e3d132229ecbc", "score": "0.6426745", "text": "def area(self):\n return self.__size * self.__size", "title": "" }, { "docid": "75e1dd077d4e7aff85a5c3511a516f17", "score": "0.6423921", "text": "def area(self):\n return self._size * self._size", "title": "" }, { "docid": "b0ef9559d9ea42bd81330a4f90c6e934", "score": "0.6401423", "text": "def area(self):\n return (self.__size * self.__size)", "title": "" }, { "docid": "b0ef9559d9ea42bd81330a4f90c6e934", "score": "0.6401423", "text": "def area(self):\n return (self.__size * self.__size)", "title": "" }, { "docid": "b0ef9559d9ea42bd81330a4f90c6e934", "score": "0.6401423", "text": "def area(self):\n return (self.__size * self.__size)", "title": "" }, { "docid": "77761e642e451ab70710b124f8d31635", "score": "0.63924414", "text": "def area(self):\n return self.__width * self.__height", "title": "" }, { "docid": "77761e642e451ab70710b124f8d31635", "score": "0.63924414", "text": "def area(self):\n return self.__width * self.__height", "title": "" }, { "docid": "77761e642e451ab70710b124f8d31635", "score": "0.63924414", "text": "def area(self):\n return self.__width * self.__height", "title": "" }, { "docid": "77761e642e451ab70710b124f8d31635", "score": "0.63924414", "text": "def area(self):\n return self.__width * self.__height", "title": "" }, { "docid": "77761e642e451ab70710b124f8d31635", "score": "0.63924414", "text": "def area(self):\n return self.__width * self.__height", "title": "" }, { "docid": "77761e642e451ab70710b124f8d31635", "score": "0.63924414", "text": "def area(self):\n return self.__width * self.__height", "title": "" }, { "docid": "77761e642e451ab70710b124f8d31635", "score": "0.63924414", "text": "def area(self):\n return self.__width * self.__height", "title": "" }, { "docid": "77761e642e451ab70710b124f8d31635", "score": "0.63924414", "text": "def area(self):\n return self.__width * self.__height", "title": "" }, { "docid": "77761e642e451ab70710b124f8d31635", "score": "0.63924414", "text": "def area(self):\n return self.__width * self.__height", "title": "" }, { "docid": "77761e642e451ab70710b124f8d31635", "score": "0.63924414", "text": "def area(self):\n return self.__width * self.__height", "title": "" }, { "docid": "9fb1ca34be55053b492d073a6c584e39", "score": "0.63843477", "text": "def area(self):\n return self.__size**2", "title": "" }, { "docid": "9fb1ca34be55053b492d073a6c584e39", "score": "0.63843477", "text": "def area(self):\n return self.__size**2", "title": "" }, { "docid": "87ccea6cb972da63e45e4bbeda979184", "score": "0.63697034", "text": "def area(self):\n return (self.height * self.width)", "title": "" }, { "docid": "9d11b3418b79f9f25fe3d2dcb94b6d9d", "score": "0.6369675", "text": "def area(self):\n area = self.__size ** 2\n return area", "title": "" }, { "docid": "67fdd7965639e463769d428fce9adfc9", "score": "0.63323814", "text": "def area(self):\n return (self.__width * self.__height)", "title": "" }, { "docid": "7c781e1650030569da06c399e0169dcd", "score": "0.6330832", "text": "def area(self):\n return self.height * self.width", "title": "" }, { "docid": "7a6e0abfeb7b3f3a4479adef81f9fb57", "score": "0.6281324", "text": "def area(self):\n area = 0\n for ind in xrange(-1, len(self.vertices) - 1):\n pi = self.vertices[ind]\n pii = self.vertices[ind + 1]\n area += pi[0]*pii[1] - pii[0]*pi[1]\n return simplify(area) / 2", "title": "" }, { "docid": "a73ccf671b262d030d3c86557df91251", "score": "0.6265703", "text": "def area(self):\n\n return self.__size ** 2", "title": "" }, { "docid": "a73ccf671b262d030d3c86557df91251", "score": "0.6265703", "text": "def area(self):\n\n return self.__size ** 2", "title": "" }, { "docid": "21f6d912851a755c4d2d57240e3be65a", "score": "0.62395215", "text": "def area(self):\n\n return self.__width * self.__height", "title": "" }, { "docid": "21f6d912851a755c4d2d57240e3be65a", "score": "0.62395215", "text": "def area(self):\n\n return self.__width * self.__height", "title": "" }, { "docid": "49568ad9490c00f6d412fb50383a5ffe", "score": "0.62114555", "text": "def areaRect(length,width):\n return length * width", "title": "" }, { "docid": "1b9781b643821fd01eb67ca91cf09725", "score": "0.61661184", "text": "def getArea(self):\n\n return self.getLengthX() * self.getLengthY()", "title": "" }, { "docid": "697ea12818fc747f8739fd22a31e8f6c", "score": "0.6122307", "text": "def area(self):\n return self.w * self.h", "title": "" }, { "docid": "7387164449584327da1c419c52563ed5", "score": "0.61129534", "text": "def area(self):\n result = 0\n for f in self.split_to_triangles():\n p = f.perimeter / 2.0\n result += abs(math.sqrt(p * (p - f.a) * (p - f.b) * (p - f.c)))\n return result", "title": "" }, { "docid": "94648ee03fdb67dc6e1fc7c9e38ffd22", "score": "0.61019933", "text": "def primitive_area(prim: hou.Prim) -> float:\n return prim.intrinsicValue(\"measuredarea\")", "title": "" }, { "docid": "f6a549f42ac7326e8637b009d45e4d6a", "score": "0.60971195", "text": "def area(bbox: Box2d) -> float:\n return bbox.area", "title": "" }, { "docid": "d3be8297dad7108d1e6b1d1d2c6135aa", "score": "0.6076301", "text": "def test_house_size():\n houses = thh.house(10, cfg.SENIOR_WEIGHT, house_size=3)\n mean = np.mean([len(home) for home in houses])\n\n assert mean == 3", "title": "" }, { "docid": "b86a2fa45e475d17d09d2ef7387fda52", "score": "0.60216737", "text": "def calculate_area(data):\n data = np.array(data)\n\n if len(data.shape) == 1:\n data = np.reshape(data, (-1, 3))\n\n width = min(data[:, 0]) - max(data[:, 0])\n height = min(data[:, 1]) - max(data[:, 1])\n\n return np.abs(width * height)", "title": "" }, { "docid": "51757e280c1e8ceaeff0ce46cf956edb", "score": "0.6021363", "text": "def area(self):\n area = self.areaNormals()[0]\n return area.sum()", "title": "" }, { "docid": "8ffbc752d0e5f383218530d22d1de606", "score": "0.59885675", "text": "def area(self):\n return math.pi * self._shape_params[\"radius\"] ** 2", "title": "" }, { "docid": "ce6a212b0219135551462372492abe5c", "score": "0.5978771", "text": "def area(self) -> float:\n s = self.perimeter() / 2\n return sqrt(s * (s - self.a) * (s - self.b) * (s - self.c))", "title": "" }, { "docid": "66821f5448621507a285770b68ba0794", "score": "0.5974613", "text": "def get_area(self):\n area = 0.5 * (math.pi - math.sqrt(3)) * (self.width ** 2)\n return area", "title": "" }, { "docid": "99f8504cbf92f1c12e26d6add334049e", "score": "0.5972971", "text": "def area(window: list) -> float:\n area = 0\n for i in range(len(window) - 1):\n area += (window[i+1] + window[i]) / 2\n\n return area", "title": "" }, { "docid": "fdec6e7db10a0bbf3eacbc17229c8171", "score": "0.5964923", "text": "def total_dimension(self):\n return self._width * self._height", "title": "" }, { "docid": "c93fe036e53f6c109bf2bab20519f21e", "score": "0.59429264", "text": "def area_of_rectangle(length, width = None):\n \n if width == None:\n width = length\n \n return length * width", "title": "" }, { "docid": "aef2568ad1064a5c3c43360143b78c5d", "score": "0.59407866", "text": "def cnt_area(cnt):\n return cv2.moments(cnt)[\"m00\"]", "title": "" }, { "docid": "bc9dae298a6e25a3985b2157c4aee72a", "score": "0.5907465", "text": "def average_length(snakes):\n return sum([snake.length() for snake in snakes]) / len(snakes)", "title": "" }, { "docid": "f278b30396695f4ed3b2348731111ee9", "score": "0.5904704", "text": "def __len__(self):\n\t\treturn self.width * self.height", "title": "" }, { "docid": "37a0cbddd5580fe79bb25b405c867a09", "score": "0.58689845", "text": "def area_square(side_length: float) -> float:\n if side_length < 0:\n raise ValueError(\"area_square() only accepts non-negative values\")\n return side_length ** 2", "title": "" }, { "docid": "df1e1370d607b8edbca6b3f22cbf9a79", "score": "0.5847009", "text": "def area(self):\n return self.bounding_box.area()", "title": "" }, { "docid": "f7fbf53867dc5f5332663fe1defe1c14", "score": "0.58083177", "text": "def _count_mean(coords):\n left_top, right_bottom = _bounding_box(coords)\n mean_x = (left_top[0]+right_bottom[0])/2\n mean_y = (left_top[1]+right_bottom[1])/2\n return (mean_x,mean_y)", "title": "" }, { "docid": "5d3a8f1e6430a7f5f9fbc3c0f72ddfea", "score": "0.5804078", "text": "def _count_mean(self) -> None:\n self._mean = self._sumArea / len(self._areaFeat)", "title": "" }, { "docid": "46edea3320b90e090b783407d380cf15", "score": "0.57812524", "text": "def rectangle_area(length,breadth):\n return length*breadth", "title": "" }, { "docid": "aea84041d096f43e74ccbc5f56afbb36", "score": "0.57809865", "text": "def _compute_area(self):\n y, z = self.pts[:,0], self.pts[:,1]\n a = 0.5 * np.sum(y[:-1]*z[1:] - y[1:]*z[:-1])\n\n return abs(a)", "title": "" }, { "docid": "f401c9b2c98ea90b135f4526ef123ee5", "score": "0.5771206", "text": "def area(self):\n return self._base * self._height", "title": "" }, { "docid": "6bb06fa093f0bcb8510c45c3968119c2", "score": "0.57652247", "text": "def area(self):\n return math.pi * self.radius ** 2", "title": "" }, { "docid": "cd894825d7a0326e3a99970311410566", "score": "0.5762593", "text": "def area(self): \n major,minor,alpha = self.ellipse()\n return math.pi*major*minor", "title": "" }, { "docid": "0efa6459d5805eb4ed3f462f39a10daa", "score": "0.57512975", "text": "def area(self):\n return float()", "title": "" }, { "docid": "3eefee3aded7f3b04f784635d1e84634", "score": "0.57471055", "text": "def calculate_area(self):\n return 3.14 * self.__radius * self.__radius", "title": "" }, { "docid": "53ac28a0c9fa4b476ac90411c5980879", "score": "0.5745936", "text": "def calculate_area(radius):\n return radius ** 2 * pi", "title": "" }, { "docid": "d0ad210350d58dfaa4540c951b9e9777", "score": "0.5745788", "text": "def area(self):\n p=self.__perimeter()\n radius=p/2.0/math.pi\n return math.pi*radius ** 2", "title": "" }, { "docid": "363fca4558b7fa10aeebc29b144b497a", "score": "0.5734773", "text": "def pixsize(shape, wcs):\n\treturn area(shape, wcs)/np.prod(shape[-2:])", "title": "" }, { "docid": "8412479ab38bec187f45edab7d336c47", "score": "0.573467", "text": "def area(self):\n return 0.5*self.base*self.height", "title": "" }, { "docid": "774fb0e82651500fef124fb311a6c604", "score": "0.57335734", "text": "def _calculate_square_feet(self, present: Present) -> int:\n return present.total_area + min(\n present.length_wise_area, present.width_wise_area, present.height_wise_area\n )", "title": "" }, { "docid": "3951b9635c3901c7555e5782ab234c82", "score": "0.5726867", "text": "def rectangle_area(length, breadth):\n return length * breadth", "title": "" }, { "docid": "3afed942998a0ad84ac711965630b180", "score": "0.5718508", "text": "def pix_occupancy(img):\n\n if img.ptp() > 1.0:\n return np.sum(normalize(img))/(img.shape[0]*img.shape[1])\n else:\n return np.sum(img)/(img.shape[0]*img.shape[1])", "title": "" }, { "docid": "421f36a01eb7a7f37eedfbb320ab38d2", "score": "0.571371", "text": "def box_area(boxes: Tensor) -> Tensor:\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "title": "" }, { "docid": "1837cba42932dd3e3e132940dc142a35", "score": "0.571262", "text": "def width(self):\n x, y = self.example_list[0]\n return len(x)", "title": "" }, { "docid": "6f050f33dbfc5ad985765dcd2f35ad5f", "score": "0.57126087", "text": "def length(a) :\n r_sq = 0.0\n for each in a :\n r_sq += each ** 2\r\n return math.sqrt(r_sq)", "title": "" }, { "docid": "4802a1155a60d454784e9d025fa17f16", "score": "0.57115567", "text": "def area(self):\n p = 0.5 * (self._side1 + self._side2 + self._side3)\n area_2 = p * (p - self._side1) * (p - self._side2) * (p - self._side3) \n return math.sqrt(area_2)", "title": "" }, { "docid": "267941f34c186e25fb28fecd782bfa4a", "score": "0.5707024", "text": "def area(self):\r\n import math\r\n return math.pi*(self.radius**2)", "title": "" }, { "docid": "a67982d0796f634602b0412ceaca925f", "score": "0.57030284", "text": "def __len__(self):\n return self.box_width ** 2 * self.box_height ** 2", "title": "" } ]
dc4acd4f73f049a04aa09ecf11788814
Get the name of the environment either from current conda environment, from remote directory, or inferred from git repo.
[ { "docid": "f789b3b2d6563fa62e592b2215c5cb76", "score": "0.7944121", "text": "def get_env_name(infer: bool = False) -> str:\n # pylint: disable=redefined-outer-name\n if not infer:\n return get_active_conda_env_name()\n try:\n remote_dir = infer_remote_dir()\n history = EnvIO(env_directory=remote_dir).get_history()\n return history.name\n except CondaEnvTrackerHistoryNotFoundError as err:\n raise CondaEnvTrackerHistoryNotFoundError(\n f\"Cannot infer name from history, often resolved by passing the name argument. Full error: {str(err)}\"\n )", "title": "" } ]
[ { "docid": "09e74824f0c88194610f3ef561ad4b14", "score": "0.7596804", "text": "def get_conda_env_path(conn: fabric.Connection, envname: str = 'base') -> pathlib.Path:\n stream = io.StringIO()\n conn.run('conda info --envs --json', replace_env=False, out_stream=stream)\n info = json.loads(stream.getvalue())\n return next(iter(p for p in (pathlib.Path(s) for s in info['envs']) if p.name == envname))", "title": "" }, { "docid": "36476d535564412f8f7170704174b69c", "score": "0.68478495", "text": "def environment(self) -> str:\n return pulumi.get(self, \"environment\")", "title": "" }, { "docid": "00dd702087c0b52f8a7481a2ffc3aac8", "score": "0.683109", "text": "def get_env():\n if 'test' in basedir:\n return 'test'\n if 'revmic' in basedir:\n return 'prod'\n if 'michael' in basedir:\n return 'dev'\n return 'default'", "title": "" }, { "docid": "ff18d946107a03e6c7160f7a820b1ab2", "score": "0.6823848", "text": "def environment_name(self) -> Optional[str]:\n return pulumi.get(self, \"environment_name\")", "title": "" }, { "docid": "7a2da517ccb0e81974290f004bb160e0", "score": "0.67405933", "text": "def get_env(env_variable: str) -> str:\n return os.environ.get(env_variable, 'local').upper()", "title": "" }, { "docid": "1e205fa470f17331c63fff4ea738e6de", "score": "0.6703609", "text": "def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")", "title": "" }, { "docid": "d0bbdbcd0736cd7984536c705f7fecea", "score": "0.66822726", "text": "def get_environment():\n os_env = os.environ.get('ENVIRONMENT', '')\n\n if os_env.lower() == 'production':\n env = 'production'\n elif os_env.lower() == 'development':\n env = 'development'\n else:\n env = 'local'\n\n return env", "title": "" }, { "docid": "4de4e2441ebcdab4ebef0d395cee66df", "score": "0.66553897", "text": "def get_env():\n return envs[environ.get('FLASK_ENV', default='config')]", "title": "" }, { "docid": "aded0abe03989e1642fa5994323b3659", "score": "0.6638746", "text": "def get_environment_name(self):\n raise NotImplementedError()", "title": "" }, { "docid": "0e0de3cb625ac5e59cf57739a67f568f", "score": "0.65355885", "text": "def env_name(self):\n return self.env.name", "title": "" }, { "docid": "7c63460c0b0236489496afa97c4eaff9", "score": "0.6528589", "text": "def environment(self) -> str:\n return self.__environment", "title": "" }, { "docid": "db99aee86b5dcc6ca4088783d944c591", "score": "0.64826673", "text": "def env_info():\n if 'ipykernel' in sys.modules:\n return 'jupyter_notebook'\n elif 'IPython' in sys.modules:\n return 'ipython_terminal'\n else:\n return 'terminal'", "title": "" }, { "docid": "e0b4fd65fbdbcad88fb28dd8bd86d300", "score": "0.6470066", "text": "def get_env():\n name = request.args.get('name')\n try:\n lib = __import__(name)\n except ImportError:\n return 'Cannot import {}'.format(name)\n return lib.__name__", "title": "" }, { "docid": "58fc9dc82d14faca97c7ccdd70b82b31", "score": "0.6432391", "text": "def get_envs():\n json_str = check_output([\"conda\", \"info\", \"--json\"]).decode()\n info = json.loads(json_str)\n return info['envs']", "title": "" }, { "docid": "919279f384287cf0bcbc3ac9ff7e547f", "score": "0.6383709", "text": "def environment(self) -> Optional[pulumi.Input['EnvironmentArgs']]:\n return pulumi.get(self, \"environment\")", "title": "" }, { "docid": "919279f384287cf0bcbc3ac9ff7e547f", "score": "0.6383709", "text": "def environment(self) -> Optional[pulumi.Input['EnvironmentArgs']]:\n return pulumi.get(self, \"environment\")", "title": "" }, { "docid": "307fe351cbbc5e39b0a81c631686b62d", "score": "0.6374366", "text": "def environment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment_name\")", "title": "" }, { "docid": "307fe351cbbc5e39b0a81c631686b62d", "score": "0.6374366", "text": "def environment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment_name\")", "title": "" }, { "docid": "104020a9ae1d2eb674cee222ccd21658", "score": "0.6367172", "text": "def get_environment(name):\n return _environments_cache[name]", "title": "" }, { "docid": "d5280cbc06c9ab86382e1ce34a7cfe79", "score": "0.6324078", "text": "def environment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment\")", "title": "" }, { "docid": "b5d7e72a1fb24d6376253d1f52777e42", "score": "0.6287024", "text": "def get_environment(self, env_name: str) -> Optional[Environment]:\n env_list = self._env_api.environments_get()\n for env in env_list:\n if env.name == env_name:\n return env\n return None", "title": "" }, { "docid": "e78bb8e02987eda34fd901099a8d58b1", "score": "0.627856", "text": "def get_env(self):\n return self._env", "title": "" }, { "docid": "155bd3d7ed3f0e394dd67e68c169206e", "score": "0.6236693", "text": "def get_virtualenv_path():\n if hasattr(sys, 'real_prefix'):\n return sys.prefix\n\n if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:\n return sys.prefix\n\n if 'conda' in sys.prefix:\n return sys.prefix\n\n return None", "title": "" }, { "docid": "9155438a6a6755a7394a585138a28175", "score": "0.6227984", "text": "def env(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvVarArgs']]]]:\n return pulumi.get(self, \"env\")", "title": "" }, { "docid": "9155438a6a6755a7394a585138a28175", "score": "0.6227984", "text": "def env(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvVarArgs']]]]:\n return pulumi.get(self, \"env\")", "title": "" }, { "docid": "1a96f6e673fa2427c1e1a99a5c87e7c1", "score": "0.6190845", "text": "def env(self) -> Dict[str, str]:\n return ORIG_ENV", "title": "" }, { "docid": "8b8978366a32caae13415c49ba5744d5", "score": "0.617872", "text": "def _GetEnviron(self, name):\n try:\n return os.environ[name]\n except KeyError:\n raise ConfigurationError('%s is not set in environment.' % name)", "title": "" }, { "docid": "86c36db4d8344f8b00c16818391aadbb", "score": "0.61784595", "text": "def env(self) -> Optional[Sequence['outputs.StorageClusterSpecAutopilotEnv']]:\n return pulumi.get(self, \"env\")", "title": "" }, { "docid": "968bd71c8f5a0918129a01252a7f237d", "score": "0.61698496", "text": "def get_environment_config():\n from ..connect import main as _glconnect\n unity = _glconnect.get_unity()\n return unity.list_globals(False)", "title": "" }, { "docid": "9323201948eb865b124c095efe501b70", "score": "0.6144131", "text": "def default_environment():\n\n # NOTE(dittrich): I know this code has multiple return points\n # but it is simpler and easier to understand this way.\n #\n # Highest priority is inhereted environment variable.\n environment = os.getenv('D2_ENVIRONMENT', None)\n if environment is not None:\n return environment\n #\n # Next is saved file in current working directory.\n local_default = get_saved_default_environment()\n if local_default not in ['', None]:\n return local_default\n #\n # Lowest priority is the directory path basename.\n return os.path.basename(os.getcwd())", "title": "" }, { "docid": "9323201948eb865b124c095efe501b70", "score": "0.6144131", "text": "def default_environment():\n\n # NOTE(dittrich): I know this code has multiple return points\n # but it is simpler and easier to understand this way.\n #\n # Highest priority is inhereted environment variable.\n environment = os.getenv('D2_ENVIRONMENT', None)\n if environment is not None:\n return environment\n #\n # Next is saved file in current working directory.\n local_default = get_saved_default_environment()\n if local_default not in ['', None]:\n return local_default\n #\n # Lowest priority is the directory path basename.\n return os.path.basename(os.getcwd())", "title": "" }, { "docid": "cdc4ad6d3ade3954be11f15f81ad893f", "score": "0.6135233", "text": "def env(self) -> Optional[Sequence['outputs.StorageClusterSpecEnv']]:\n return pulumi.get(self, \"env\")", "title": "" }, { "docid": "b9725402aec524c393e8fc0544f4cd50", "score": "0.610127", "text": "def get_default_environment():\n virtual_env = _get_virtual_env_from_var()\n if virtual_env is not None:\n return virtual_env\n\n for environment in find_python_environments():\n return environment", "title": "" }, { "docid": "f31626c66eb31d6d81eb88ad9720e9e2", "score": "0.6081558", "text": "def fromEnv(self):\n\t\treturn env.getEnv(self)", "title": "" }, { "docid": "b96d51e4226144f89b3e4aec7d75da01", "score": "0.6080151", "text": "def _get_environment(self):\n return self.__environment", "title": "" }, { "docid": "c0b8627ea0176c8cb54f84ccf727ef5c", "score": "0.60656536", "text": "def env(self) -> Optional[Sequence['outputs.StorageClusterSpecUserInterfaceEnv']]:\n return pulumi.get(self, \"env\")", "title": "" }, { "docid": "ee3ef732755920c61bc5a3ce95debd4e", "score": "0.6060195", "text": "def _get_conda_python(self):\n try:\n m = self._get_conda_manager()\n return m.get_conda_python()\n except ImportError: # modules directory is not available\n\n # -----------------------------------------------------------------------\n # duplicate logic from get_conda_python function in install_conda.py\n # since install_conda.py may not be available\n def m_get_conda_python(self):\n m_conda_python = os.path.join('bin', 'python')\n if self.isPlatformWindows():\n m_conda_python = self.op.join('python.exe')\n elif self.isPlatformMacOSX():\n m_conda_python = os.path.join('python.app', 'Contents',\n 'MacOS', 'python')\n return m_conda_python\n # -----------------------------------------------------------------------\n\n conda_python = None\n\n # (case 1)\n # use default location or file provided to --use-conda\n if self.use_conda == '' or os.path.isfile(self.use_conda):\n conda_python = self.op.join('..', 'conda_base',\n m_get_conda_python(self))\n if self.isPlatformWindows():\n conda_python = self.op.join(os.getcwd(), 'conda_base', m_get_conda_python(self))\n # (case 2)\n # use path provided to --use-conda\n elif os.path.isdir(self.use_conda):\n self.use_conda = os.path.abspath(self.use_conda)\n conda_python = os.path.join(self.use_conda, m_get_conda_python(self))\n else:\n raise RuntimeError(\"\"\"\nThe --use-conda flag can accept a directory to a conda environment or a\nfile that defines a conda environment. Please make sure a valid conda\nenvironment exists in or is defined by {conda_env}.\n\"\"\".format(conda_env=self.use_conda))\n\n if conda_python is None:\n raise RuntimeError('A conda version of python could not be found.')\n\n return conda_python", "title": "" }, { "docid": "e2fc92f03bfb6f7df5d5aa1cb99b784a", "score": "0.60249835", "text": "def get_env(name, default=None):\n for var in api.current_actor().configuration.leapp_env_vars:\n if var.name == name:\n return var.value\n return default", "title": "" }, { "docid": "0801a57f74a50729b81423c01540efad", "score": "0.6014137", "text": "def get_environment(self):\n return self._environment", "title": "" }, { "docid": "1dfdb4cb3dfe421b77b6d4cd4930b1c6", "score": "0.6008921", "text": "def get_env_name(env_var_name) -> str:\r\n virtual_env_path = os.environ.get(env_var_name)\r\n if not virtual_env_path:\r\n return 'default'\r\n else:\r\n return os.path.basename(virtual_env_path)", "title": "" }, { "docid": "4e01be076e55705668fae0d3fd34dcda", "score": "0.5983337", "text": "def get_environment_config():\n unity = _glconnect.get_unity()\n return unity.list_globals(False)", "title": "" }, { "docid": "18770a1668c4c20c6933f0b65db36aba", "score": "0.5965647", "text": "def _get_env_var(self, name):\n return os.environ[name] if name in os.environ else \"\"", "title": "" }, { "docid": "8b3ebe5104bea2a987453cb999d2107f", "score": "0.595991", "text": "def env(self) -> Optional[Sequence['outputs.StorageClusterSpecNodesEnv']]:\n return pulumi.get(self, \"env\")", "title": "" }, { "docid": "1b7243287848f891a6830cc994217b93", "score": "0.5956502", "text": "def test_get_conda_env():\n pytest_enable_socket()\n\n ## Test that the base environemnet is returned\n croot = sp.check_output(['conda', 'info', '--root'])\n conda_env, conda_path = utils.get_conda_env()\n assert conda_path.strip() == croot.decode(\"utf8\").strip()\n assert conda_env.strip() == os.path.basename(croot).decode(\"utf8\").strip()\n\n\n ## Test with conda_root() set as prefix\n conda_env, conda_path = utils.get_conda_env(prefix=utils.conda_root())\n assert conda_path.strip() == croot.decode(\"utf8\").strip()\n assert conda_env.strip() == os.path.basename(croot).decode(\"utf8\").strip()\n\n ### Test with environment name\n conda_env, conda_path = utils.get_conda_env(prefix=conda_env)\n assert conda_path.strip() == croot.decode(\"utf8\").strip()\n assert conda_env.strip() == os.path.basename(croot).decode(\"utf8\").strip()\n\n\n ## Test new environment set as prefix\n env_name = \"test_croot\"\n temp_env = os.path.join(utils.conda_root(), \"envs\", env_name)\n ### Remove temp env if it already exists\n sp.check_output([\"conda\", \"env\", \"remove\", \"--name\", env_name])\n try: \n shutil.rmtree(temp_env)\n except Exception:\n pass \n\n ### Create the temp environment\n sp.check_output([\"conda\", \"create\", \"--name\", env_name])\n\n ### Test with environment name\n conda_env, conda_path = utils.get_conda_env(prefix=env_name)\n assert conda_path.strip() == str(temp_env)\n assert conda_env.strip() == env_name\n\n ### Test with environment path\n conda_env, conda_path = utils.get_conda_env(prefix=temp_env)\n assert conda_path.strip() == str(temp_env)\n assert conda_env.strip() == env_name\n\n ### Remove temp env\n sp.check_output([\"conda\", \"env\", \"remove\", \"--name\", env_name])\n try:\n shutil.rmtree(temp_env)\n except Exception:\n pass\n assert os.path.exists(temp_env) == False", "title": "" }, { "docid": "55f545cd9a6b5f94ce5f93d314d6cdbc", "score": "0.5948327", "text": "def env(scope, varname):\n return [os.environ.get(varname[0], '')]", "title": "" }, { "docid": "a62a2252d7cd4f11aabbb8438d7f9e1f", "score": "0.59405476", "text": "def env(self) -> Optional[gdb.Value]:\n return self._variant['env'] if not self.is_dynamic else None", "title": "" }, { "docid": "01766cadfe33f566244fd2ca2e776554", "score": "0.5925549", "text": "def environment(self) -> 'outputs.GetFunctionEnvironmentResult':\n return pulumi.get(self, \"environment\")", "title": "" }, { "docid": "132ec85857758086c85cd1805a7fe564", "score": "0.5911311", "text": "def conda_url(self):\n api_info = self.api.download_get_api_info()\n conda_url = api_info.get('conda_url', 'https://conda.anaconda.org')\n conda_url = conda_url[:-1] if conda_url[-1] == '/' else conda_url\n return conda_url", "title": "" }, { "docid": "05a56794898f4d8c8593b4758b7787de", "score": "0.5903882", "text": "def env(self) -> Optional[Sequence['outputs.StorageClusterSpecStorkEnv']]:\n return pulumi.get(self, \"env\")", "title": "" }, { "docid": "ba11115ff0b295cdba56d40acb4246b6", "score": "0.5857058", "text": "def get_remote_env_var(self, env_var):\n env_vars = list(map(lambda x: x.result(), self.rex().exec_broad(fr'bash -lc \"echo \\${env_var}\"')))\n\n return env_vars[0]", "title": "" }, { "docid": "b0a01cf91786029c22a9805b5ea82c0b", "score": "0.58518374", "text": "def env(self):\n return self._env_cfg", "title": "" }, { "docid": "279882fa12205b4d438fc351d4d1ed4e", "score": "0.5848244", "text": "def check_env_conda(cls, name: str) -> bool:\n args = [\"conda\", \"env\", \"list\"]\n ret = utils.cmdline(args)\n env_found = False\n for line in ret.stdout.splitlines():\n if line.startswith(name):\n env_found = True\n break\n return env_found", "title": "" }, { "docid": "56519ec76328f20517923c910ef46ffd", "score": "0.5846986", "text": "def env(self):\n git_dir = self.m.path['start_dir'].join('git')\n git_bin = git_dir.join('bin')\n return self.m.env({'PATH': self.m.path.pathsep.join(\n [str(git_dir), str(git_bin), '%(PATH)s'])})", "title": "" }, { "docid": "d20e718139a9073e3bdd7634cfdfc24a", "score": "0.5836369", "text": "def test_get_base_env():\n\n ## Get the base environmnet\n from conda.core.envs_manager import list_all_known_prefixes\n base = min(list_all_known_prefixes())\n\n ## Test that the base conda root is returned correctly \n base_prefix = utils.get_base_env(cur_prefix = utils.conda_root())\n assert base_prefix == base\n\n ## Test that the prefix is or is not in the environmnets \n ### List of enviroments\n environments = [os.path.join(x+\"/\") for x in utils.check_output([\"conda\", \"info\", \"--env\"]).strip().replace(\"*\",\"\").replace(\"\\n\",\" \").split(\" \") if os.path.isdir(x)]\n base_env = min(environments)\n env_name = \"temp_env_not_base\"\n temp_env = os.path.join(utils.conda_root(), \"envs\", env_name)\n\n try:\n utils.prefix_in_conda(temp_env)\n except utils.CondaEnvironmentNotFound as e:\n assert \"The prefix supplied is not a conda environment: {}\".format(temp_env) in str(e) \n except Exception as e:\n assert False\n\n ## Test that a non-conda prefix is handled \n try:\n utils.get_base_env(cur_prefix = env_name)\n assert False\n except utils.CondaEnvironmentNotFound as e: \n pass\n\n ## Create the env\n sp.check_output([\"conda\", \"create\", \"--name\", env_name])\n\n ## Test get_base_env on the new env\n base_prefix = utils.get_base_env(cur_prefix = env_name)\n assert base_prefix == base\n\n ## Test get_base_env on the new env\n base_prefix = utils.get_base_env(cur_prefix = temp_env)\n assert base_prefix == base\n \n ### Remove temp env\n sp.check_output([\"conda\", \"env\", \"remove\", \"--name\", env_name])\n try:\n shutil.rmtree(temp_env)\n except Exception:\n pass\n assert os.path.exists(temp_env) == False", "title": "" }, { "docid": "8a8d06669148154fd70e25a3693b1a54", "score": "0.5833431", "text": "def environment_id(self) -> Optional[str]:\n return pulumi.get(self, \"environment_id\")", "title": "" }, { "docid": "3ea81fb3aff630e3c2b90ac6d89bf0e7", "score": "0.5820396", "text": "def env_from(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvFromSourceArgs']]]]:\n return pulumi.get(self, \"env_from\")", "title": "" }, { "docid": "3ea81fb3aff630e3c2b90ac6d89bf0e7", "score": "0.5820396", "text": "def env_from(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvFromSourceArgs']]]]:\n return pulumi.get(self, \"env_from\")", "title": "" }, { "docid": "1260cb5fef173cc53f6ccfdb9aeae255", "score": "0.5820233", "text": "def get_url():\n return git('config', '--get', 'remote.origin.url', _show=False)[0]", "title": "" }, { "docid": "7c3732ab2e3372b78c4c0cd12d0da11e", "score": "0.5809245", "text": "def environ():\n from projex.envmanager import EnvManager\n\n return EnvManager.current()", "title": "" }, { "docid": "1f0bf98df77da9027536334566866064", "score": "0.57849365", "text": "def get_environment_from_request(self):\n return Environment.objects.get(api_key=self.kwargs['environment_api_key'])", "title": "" }, { "docid": "1f0bf98df77da9027536334566866064", "score": "0.57849365", "text": "def get_environment_from_request(self):\n return Environment.objects.get(api_key=self.kwargs['environment_api_key'])", "title": "" }, { "docid": "a33eb32537e15238866aa9d3d391e98a", "score": "0.57811165", "text": "def get_k8s_env() -> K8sEnv:\n out = subprocess.check_output(['kubectl', 'config', 'current-context'])\n\n outstr = out.decode('utf-8').strip()\n if outstr == 'docker-for-desktop':\n return K8sEnv.D4M\n elif 'gke' in outstr:\n return K8sEnv.GKE\n elif outstr == 'minikube':\n return K8sEnv.MINIKUBE\n else:\n raise Exception('Unable to find a matching k8s env for output \"{}\"'. format(outstr))", "title": "" }, { "docid": "c2411758ca427c5401164a4f0c3b7022", "score": "0.57805943", "text": "def list_envs() -> List[str]:\n logger.info(f\"Listing conda environments...\")\n command = copy(BASE_CONDA_ENV_COMMAND)\n command.extend([\"list\", \"--json\"])\n proc_output = run_subprocess(command)\n proc_json = loads(proc_output)\n env_locations = proc_json[\"envs\"]\n environments = []\n for env_location in env_locations:\n name = Path(env_location).stem\n if name.startswith(\"miniconda\") or name.startswith(\"anaconda\"):\n name = \"base\"\n environments.append(name)\n return environments", "title": "" }, { "docid": "a549590235840f53669517b9f8cc7ad5", "score": "0.5755601", "text": "def get_environment(self, params):\n return os.environ.copy()", "title": "" }, { "docid": "cd240d4efd4601f4731a6e6ee66a4c34", "score": "0.574717", "text": "def current():\n with _lock:\n ident = identifier()\n\n envs = _current_envs.get(ident)\n if envs:\n return envs[-1]\n return None", "title": "" }, { "docid": "56fc2a2004d0f83b93c00b8e59685000", "score": "0.5735212", "text": "def get_env(self, env):\n for env_data in self.client.execute(ENVS_QUERY)[\"envs\"]:\n if env_data[\"name\"] == env:\n env_data[\"namespaces\"] = set(n[\"name\"] for n in env_data[\"namespaces\"])\n break\n else:\n raise ValueError(f\"cannot find env '{env}'\")\n\n return env_data", "title": "" }, { "docid": "fbbbb63d79234bc9ba06464165e4b1a5", "score": "0.57295305", "text": "def default_env(self):\n return self._default_env", "title": "" }, { "docid": "f98a8edb7b271a5bb2858f35a28e7f15", "score": "0.5718124", "text": "def remote(self) -> str:\n if len(self.repo.remotes) > 1:\n self._remote = self.repo.remote(self._remote_name)\n if not self._remote:\n raise EnvironmentError(f\"Remote {self._remote_name} not found\")\n elif len(self.repo.remotes) == 1:\n self._remote = self.repo.remotes[0]\n else:\n raise EnvironmentError(\"No remote associated into the repository\")\n return self._remote", "title": "" }, { "docid": "0d326d622040696776007cbdc305c116", "score": "0.569893", "text": "def get_env_config(self):\n return self._rpc({\n \"Type\": \"Client\",\n \"Request\": \"EnvironmentGet\"})", "title": "" }, { "docid": "12207feb9deb0dde6c494af76ef8bf4d", "score": "0.56910676", "text": "def get_env(self) -> Optional[VecEnv]:\n return self.env", "title": "" }, { "docid": "74ce47d40149bc50a33380a99b0a457b", "score": "0.5674827", "text": "def _get_build_env(env):\n env_override = \"\"\n if env is None:\n return env_override\n if not isinstance(env, dict):\n raise SaltInvocationError(\"'env' must be a Python dictionary\")\n for key, value in env.items():\n env_override += \"{}={}\\n\".format(key, value)\n env_override += \"export {}\\n\".format(key)\n return env_override", "title": "" }, { "docid": "10178e41f6e62593dfd20c9183a71b9f", "score": "0.5664573", "text": "def get_remote_name() -> Optional[bytes]:\n remote_name = b''\n remote_num = 0\n get_remotes_name_cmd = 'git remote'.split()\n task = subprocess.Popen(\n get_remotes_name_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = task.communicate()\n remotes = out[:-1].split(b'\\n')\n if not err:\n for remote in remotes:\n get_remotes_url_cmd = (\n b'git config --get remote.%s.url' % remote).split()\n task = subprocess.Popen(\n get_remotes_url_cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n remote_url, err = task.communicate()\n if not err:\n if remote_url.endswith(b'oppia/oppia.git\\n'):\n remote_num += 1\n remote_name = remote\n else:\n raise ValueError(err)\n else:\n raise ValueError(err)\n\n if not remote_num:\n raise Exception(\n 'Error: Please set upstream for the lint checks to run '\n 'efficiently. To do that follow these steps:\\n'\n '1. Run the command \\'git remote -v\\'\\n'\n '2a. If upstream is listed in the command output, then run the '\n 'command \\'git remote set-url upstream '\n 'https://github.com/oppia/oppia.git\\'\\n'\n '2b. If upstream is not listed in the command output, then run the '\n 'command \\'git remote add upstream '\n 'https://github.com/oppia/oppia.git\\'\\n'\n )\n\n if remote_num > 1:\n print(\n 'Warning: Please keep only one remote branch for oppia:develop '\n 'to run the lint checks efficiently.\\n')\n return None\n return remote_name", "title": "" }, { "docid": "b6a5cf2b15f3564efc3f7fec853ab827", "score": "0.563216", "text": "def get_environment(flow_rp):\n environments_val = flow_rp.get_environments()\n environments_list = environments_val[_VALUE]\n environments = {\n env[_PROPERTIES][_DISPLAY_NAME]: env[_NAME]\n for env in environments_list\n }\n\n environment_keys = list(environments.keys())\n\n sid = prompt_choice_list('Please select an environment:', environment_keys)\n environment = environments[environment_keys[sid]]\n\n print('Environment selected: {}'.format(environment_keys[sid]))\n\n return environment", "title": "" }, { "docid": "000c00355a5af58689118e9c886d58ab", "score": "0.5629444", "text": "def get_config_from_env(config_name, default=None):\n LOG.debug(\"Getting environmental variable '%s'\", config_name)\n return os.environ.get(config_name, default)", "title": "" }, { "docid": "3b08afef138b41555fece1b167337399", "score": "0.56168485", "text": "def repo_name(self) -> str:\n return pulumi.get(self, \"repo_name\")", "title": "" }, { "docid": "db3d27120b472cdb3dcbad705a20fd67", "score": "0.5613533", "text": "def get_environment(name):\r\n if name == \"_default\":\r\n return env_from_template(name)\r\n filename = os.path.join(\"environments\", name + \".json\")\r\n try:\r\n with open(filename) as f:\r\n try:\r\n return json.loads(f.read())\r\n except ValueError as e:\r\n msg = 'LittleChef found the following error in'\r\n msg += ' \"{0}\":\\n {1}'.format(filename, str(e))\r\n abort(msg)\r\n except IOError:\r\n raise FileNotFoundError('File {0} not found'.format(filename))", "title": "" }, { "docid": "c5a6ef716a3e2dda1897421db80ee643", "score": "0.5600405", "text": "def getTopEnvName():\n return 'DAR_INST_TOP'", "title": "" }, { "docid": "753be3bfba5a1fddc06f529d2d615dee", "score": "0.5598987", "text": "def get_default_env(cls) -> \"DESEnv\":\n return DESEnv.__DEFAULT_ENV", "title": "" }, { "docid": "04de04c43b1ac7f726c8cd120efa783a", "score": "0.55968314", "text": "def environment(self):\n return ''", "title": "" }, { "docid": "76e2c2883a6c86736d685b73b0b25a50", "score": "0.55878526", "text": "def get_env(self):\n env = {}\n for line in self.execute('env'):\n key, val = line.split('=', 1)\n env[key] = val\n return env", "title": "" }, { "docid": "cb9a9f40e1f6edc9624bb81c46f219aa", "score": "0.5575313", "text": "def get_command_environment(self) -> Dict[str, str]:\n env = bases.buildd.default_command_environment()\n env[\"CHARMCRAFT_MANAGED_MODE\"] = \"1\"\n\n # Pass-through host environment that target may need.\n for env_key in [\"http_proxy\", \"https_proxy\", \"no_proxy\"]:\n if env_key in os.environ:\n env[env_key] = os.environ[env_key]\n\n return env", "title": "" }, { "docid": "91182c69007ab2de9722e98f8ba48444", "score": "0.55488896", "text": "def _repo_name(self):\n\n if self._git_repo_name is not None:\n return self._git_repo_name\n else:\n reponame = self.repo.git_dir.split(os.sep)[-2]\n if reponame.strip() == '':\n return 'unknown_repo'\n return reponame", "title": "" }, { "docid": "5c2812a4bc1b8bba361230dd9a4f5d47", "score": "0.55379623", "text": "def get_environ_variable(name):\n try:\n return os.environ[name]\n except KeyError:\n error_msg = 'Environment variable {} must be set'.format(name)\n raise ImproperlyConfigured(error_msg)", "title": "" }, { "docid": "8f3843e9f8a7996240b4be6d7f9843f8", "score": "0.55323195", "text": "def env_prefix(self):\n return self._env_prefix", "title": "" }, { "docid": "61c31783e391d8b3099a3e423fb8c4a5", "score": "0.5531388", "text": "def cf_env(self):\n args = self.workflow.args\n\n return {\n 'CF_ENV': args.environment or '',\n 'CF_PROJECT': args.project_name,\n # deprecate this env var\n 'CF_ENV_NAME': args.project_name,\n }", "title": "" }, { "docid": "b0f80d9eb528c2fbac62b659aa190e5a", "score": "0.55307907", "text": "def _locate_pip_inside_conda(env_name):\n pip = _path_to_pip_in_env_with_name(shutil.which(\"conda\"), env_name)\n\n # this might happen if the environment does not contain python/pip\n if not Path(pip).exists():\n err = (\n f\"Could not locate pip in environment {env_name!r}, make sure \"\n \"it is included in your environment.yml and try again\"\n )\n telemetry.log_api(\n \"install-error\", metadata={\"type\": \"no_pip_env\", \"exception\": err}\n )\n raise BaseException(err)\n\n return pip", "title": "" }, { "docid": "373a5bb4d5cfef8a0d84ccf5cadb645d", "score": "0.552034", "text": "def github_org(self):\n return self._config['repo']['github_org']", "title": "" }, { "docid": "601e60b5d216a2bddb616b07539ceea8", "score": "0.54993343", "text": "def get_env_requirement(var_name):\n try:\n return os.environ[var_name]\n except KeyError:\n error_msg = \"Could not find the environment variable %s\" % var_name\n raise ImproperlyConfigured(error_msg)", "title": "" }, { "docid": "cb8a24078dcc5d1af3cad7dbaaa1fa89", "score": "0.54971105", "text": "def get_from_env(self, key):\n return os.environ[key]", "title": "" }, { "docid": "7ece072070d31fcd9e742cffe70f082d", "score": "0.5493721", "text": "def environment(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TaskDefinitionKeyValuePairArgs']]]]:\n return pulumi.get(self, \"environment\")", "title": "" }, { "docid": "f08a224ada1c01009dba8fadd8ac6ee3", "score": "0.5492601", "text": "def env_from(self) -> Optional[List[\"EnvFromSource\"]]:\n return self.__env_from", "title": "" }, { "docid": "f08a224ada1c01009dba8fadd8ac6ee3", "score": "0.5492601", "text": "def env_from(self) -> Optional[List[\"EnvFromSource\"]]:\n return self.__env_from", "title": "" }, { "docid": "f3e4bd7940f2bd6eba3d7f553ec952b7", "score": "0.5491858", "text": "def get_current_branch_name():\n\tbranch_name = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n\treturn branch_name.decode(\"utf-8\").rstrip()", "title": "" }, { "docid": "0b48b980294c0333b73cabe7cc8a6a98", "score": "0.5481975", "text": "def env(self) -> Optional[List[\"EnvVar\"]]:\n return self.__env", "title": "" }, { "docid": "0b48b980294c0333b73cabe7cc8a6a98", "score": "0.5481975", "text": "def env(self) -> Optional[List[\"EnvVar\"]]:\n return self.__env", "title": "" }, { "docid": "8c38a08746e222cc9d60d44f7b91c618", "score": "0.5474452", "text": "def repo(self) -> str:\n return self._metadata.repo", "title": "" }, { "docid": "b5f361a48d6604edb5349d71d81006c5", "score": "0.5474218", "text": "def identify_current_repo(repos):\n for h in git.hashes():\n if h in repos:\n return repos[h]\n return None", "title": "" }, { "docid": "08731ead5b2ecb1d5336ad059eaf5cb6", "score": "0.54723644", "text": "def determine_env() -> int:\n cd = Path(\"../..\").resolve()\n if \"C:\" in str(cd):\n return 0\n elif \"lustre\" in str(cd):\n return 1\n else:\n return 0", "title": "" }, { "docid": "086d2669da24598dffe97c367909ad1a", "score": "0.5464385", "text": "def env_dir(self):\n return os.path.join(self._dir, 'env')", "title": "" }, { "docid": "b227d842a9de5db3338e8cff12b64c97", "score": "0.5461256", "text": "def get_environments():\n try:\n with open(environment_file) as f:\n paths = f.readlines()\n except IOError:\n paths = []\n environments = set(\n os.path.normpath(env.strip()) for env in paths if os.path.isdir(env.strip())\n )\n env_dirs = (\n os.path.join(conda_base, \"envs\"),\n os.path.join(os.path.expanduser(\"~\"), \".conda\", \"envs\"),\n )\n for env_dir in env_dirs:\n if os.path.isdir(env_dir):\n for d in os.listdir(env_dir):\n d = os.path.join(env_dir, d)\n if os.path.isdir(d):\n environments.add(d)\n\n return environments", "title": "" } ]
274993236c68c93987a2bd3d55fd0eb4
Returns the number of unread messages in the user's mbox of a given type.
[ { "docid": "940b33f472fb0674c8ac24b43ad96f72", "score": "0.82448024", "text": "def get_unread(self, site, profile_name, mbox_type='inbox'):\n total = 0\n \n if _get_mailbox(site, profile_name, mbox_type) is None:\n return total\n mbox_queues = self.get_queues(site, profile_name, mbox_type)\n for mbox_q in mbox_queues:\n for msg_no in mbox_q._messages:\n raw_msg = mbox_q._messages[msg_no]\n if not STATUS_READ in raw_msg.flags:\n total += 1\n \n return total", "title": "" } ]
[ { "docid": "98429e9cb42c0c5b03da2b2fcc1326e9", "score": "0.694137", "text": "def unread_messages_count(self) -> int:\n return self._message_storage.unread_count", "title": "" }, { "docid": "d5e526e27ef01e5174d787af7c57ef22", "score": "0.69291246", "text": "def get_unread(self):\n total = 0\n \n for msg_no in self._messages:\n raw_msg = self._messages[msg_no]\n if not STATUS_READ in raw_msg.flags:\n total += 1\n \n return total", "title": "" }, { "docid": "3673a6d5479849d40278cad95f1179fe", "score": "0.6878066", "text": "def get_count_unread(user):\n conn = connect()\n cur = conn.cursor()\n\n cur.execute(\"\"\"SELECT COUNT(messages) FROM messages WHERE messages.receiver = '%s' AND messages.status = 'Unread'\"\"\" % (user))\n results = cur.fetchall()\n all_messages_for_user = [item[0] for item in results]\n\n conn.commit()\n conn.close()\n\n return all_messages_for_user", "title": "" }, { "docid": "343465074af5a00260e0b037fd769511", "score": "0.68464667", "text": "def inbox_count_for(user):\n return Message.objects.filter(\n recipient=user,\n read__isnull=True,\n recipient_deleted__isnull=True,\n ).count()", "title": "" }, { "docid": "535805ee99d258b2368c515a47209e27", "score": "0.6800597", "text": "def count_unread_messages(self):\n\n\t\treturn self.user.received_messages.filter(date_received=None).count()", "title": "" }, { "docid": "4d91b1af85a201d19c4854c496898763", "score": "0.67822254", "text": "def get_unread_count(user):\n return UserNotification.objects.filter(recipient=user, unread=True).count()", "title": "" }, { "docid": "884634c2079360bae5fbe7924059f7bb", "score": "0.66291726", "text": "def get_unread_count(user):\n return Conversation.query.filter(\n Conversation.unread, Conversation.user_id == user.id\n ).count()", "title": "" }, { "docid": "43c84c5163dff64cd25aa934e7ff2cb8", "score": "0.6610905", "text": "def count_msg_of_type(msg_type):\n result = Message.query \\\n .filter(Message.type == msg_type) \\\n .count()\n return result", "title": "" }, { "docid": "04e0ea5fa8794520d88d8813cc1cca90", "score": "0.65492016", "text": "def count(self, message_type: type) -> int:\n self._check_bus()\n return len(self._messages_by_type[message_type])", "title": "" }, { "docid": "5031e742e4a2f5af49bdb471461eceeb", "score": "0.6225393", "text": "def unread(self):\n return self.feeditem_set.filter(is_read=False).count()", "title": "" }, { "docid": "ffdce908325e2614f24250db9b0a72ce", "score": "0.6216302", "text": "def get_unread_messages(self):\n service = self.service\n results = service.users().messages().list(userId='me', labelIds=['INBOX', 'UNREAD']).execute()\n messages = results.get('messages', [])\n\n if not messages:\n print('No unread threads found.')\n return None\n else:\n print('Found unread messages:', messages, '\\n')\n return messages", "title": "" }, { "docid": "5209c75855d399b02524775e86187946", "score": "0.58973277", "text": "def gmail_unread_count(username, password):\n # Build the authentication string\n b64auth = base64.encodestring(\"%s:%s\" % (username, password))\n auth = \"Basic \" + b64auth\n\n # Build the request\n req = urllib2.Request(\"https://mail.google.com/mail/feed/atom/\")\n req.add_header(\"Authorization\", auth)\n handle = urllib2.urlopen(req)\n\n # Build an XML dom tree of the feed\n dom = parse(handle)\n handle.close()\n\n # Get the \"fullcount\" xml object\n count_obj = dom.getElementsByTagName(\"fullcount\")[0]\n # get its text and convert it to an integer\n return int(count_obj.firstChild.wholeText)", "title": "" }, { "docid": "6b495fbd3ac2f2dec2e6fe239fa83010", "score": "0.5784117", "text": "def get_unread():", "title": "" }, { "docid": "8b77574c56de0a253a1bf82f0c2448d1", "score": "0.5777752", "text": "def set_unread_count():", "title": "" }, { "docid": "7bf8949540a29e4e3499cf574d0a3158", "score": "0.5753119", "text": "def test_count_unread_messages_for(self):\n jane = User.objects.get(pk=2)\n\n # Jane has one unread message from john\n unread_messages = MessageRecipient.objects.count_unread_messages_for(jane)\n\n self.failUnlessEqual(unread_messages, 1)", "title": "" }, { "docid": "c1a3b607669b5ad4a8f5f551e397eb4c", "score": "0.5734465", "text": "def mboxstat(self, mbox):\n try:\n sele_ret, msgs_cnt = self.imap.select(mbox, readonly = True)\n\n except imaplib.IMAP4_SSL.error:\n return False\n\n if sele_ret == \"OK\":\n return True\n else: \n return False", "title": "" }, { "docid": "274b05a2041fb86dae1f91a125cbaab6", "score": "0.5670949", "text": "def get_unread_message_count_for(parser, token):\n try:\n tag_name, arg = token.contents.split(None, 1)\n except ValueError as e:\n raise template.TemplateSyntaxError(\n \"%s tag requires arguments\" % token.contents.split()[0]\n ) from e\n m = re.search(r\"(.*?) as (\\w+)\", arg)\n if not m:\n raise template.TemplateSyntaxError(\n \"%s tag had invalid arguments\" % tag_name\n )\n user, var_name = m.groups()\n return MessageCount(user, var_name)", "title": "" }, { "docid": "29c32e8a10957b02b39416648777e15f", "score": "0.5625927", "text": "def readInbox():\n if len(inbox) == 0:\n return 0\n else:\n return inbox.pop(0)", "title": "" }, { "docid": "cd66976cf23ab2b9556d1bc012836464", "score": "0.5558872", "text": "def test_push_unread_count_message_count(self) -> None:\n # Carry out common push count tests and setup\n self._test_push_unread_count()\n\n # Carry out our option-value specific test\n #\n # We're counting every unread message, so there should now be 3 since the\n # last read receipt\n self._check_push_attempt(7, 3)", "title": "" }, { "docid": "7017be4d8e1e2f1a36b7ce4dc945813e", "score": "0.5549587", "text": "def has_unread_messages(self):\n\n\t\treturn self.user.received_messages.filter(date_received=None).count() + self.user.system_notifications.filter(resolved=False).count() > 0", "title": "" }, { "docid": "094974fb7b63e9fc099cd29e2d9b92fb", "score": "0.5525923", "text": "def get_unread(self):\n return requests.get(self.baseurl + \"unread/\", headers=self.headers).json()", "title": "" }, { "docid": "02bb48f7ffdd88460d7fca073aef1e7f", "score": "0.54457486", "text": "def read_messages_count(self) -> int:\n return self._message_storage.read_count", "title": "" }, { "docid": "a487d02b06abafcbfa192f86ebf2904f", "score": "0.5328862", "text": "def get_unread_requests(cls):\n return cls.unread_requests_counter", "title": "" }, { "docid": "a487d02b06abafcbfa192f86ebf2904f", "score": "0.5328862", "text": "def get_unread_requests(cls):\n return cls.unread_requests_counter", "title": "" }, { "docid": "3705f16db7886ababd6a2174318a7740", "score": "0.53002006", "text": "def message_count(self):\n return self._messages.qsize()", "title": "" }, { "docid": "6aeb40edd1c947efb0c920f7401e7b1c", "score": "0.5279209", "text": "def fetch_mail(self):\n\t\ts = \"\"\n\t\tself.M.select(\"Inbox\")\n\n\t\trv, data1 = self.M.search(None, \"UnSeen\") \n\t\tif rv != \"OK\":\n\t\t\treturn \"An issue was encountered retreiving unread email\"\n\n\t\tfor x in data1[0].split():\n\t\t\t# get the x-th unread message that was returned from the server\n\t\t\trv, data = self.M.fetch(x, \"(RFC822)\")\n\n\t\t\t# get subject and sender \n\t\t\tmessage = email.message_from_string(data[0][1])\n\t\t\tsender = message[\"from\"]\n\t\t\tsubject = message[\"subject\"]\n\n\t\t\t# get the body\n\t\t\tbody = \"\"\n\t\t\tif message.get_content_maintype() == \"multipart\":\n\t\t\t\tfor part in message.walk():\n\t\t\t\t\tif part.get_content_type() == \"text/plain\":\n\t\t\t\t\t\tbody = part.get_payload(decode=True)\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontinue\n\n\t\t\t# append this to all things that will be returned\n\t\t\ts += \"%s: %s\\n%s\\n\\n\" % (sender, subject, body)\n\n\t\t\t# mark as read\n\t\t\tself.M.store(x,'+FLAGS','\\Seen')\n\n\t\treturn s", "title": "" }, { "docid": "530496a057ec4868646b27a5cc60ea12", "score": "0.5259501", "text": "def is_unread(self):\n return self._unread", "title": "" }, { "docid": "60695c1f40f073dfb478269b1ce19a15", "score": "0.5239401", "text": "def getNumMessages(self):\n return self.numMessages", "title": "" }, { "docid": "665cb74bf2f275334158d76c965446b1", "score": "0.5233794", "text": "def inbox(request, template_name='messages/inbox.html'):\n message_list = None\n type='received'\n category= {}\n filter = request.GET.get(\"filter\", None)\n\n if filter is not None:\n if cat_map[filter] is not None:\n category = { 'category': cat_map[filter] }\n\n message_list = InternalMessage.objects.inbox_for(request.user, **category)\n\n pd_message_list = []\n for message in message_list:\n pd_message_list.append(ReceivedMessage(message))\n\n return render_to_response(template_name, {\n 'message_list': pd_message_list,\n 'type': type,\n 'cat_messages': True,\n 'filter': filter,\n\n }, context_instance=RequestContext(request))", "title": "" }, { "docid": "4ea4c33e0bf6ef063cb76584fb39f00b", "score": "0.52305746", "text": "async def get_read_count(self, ctx):\r\n\r\n try:\r\n msg_count = await self.bot.db.field(\"SELECT read_message_count FROM probot WHERE guild_id = $1\", ctx.guild.id)\r\n except Exception as exc:\r\n return HARD_MESSAGE_COUNT\r\n return msg_count or HARD_MESSAGE_COUNT", "title": "" }, { "docid": "aa59ea35a185a89e9bb32eb944d929cd", "score": "0.5215564", "text": "def check_messages(self, mark_read=False):\n if len(self.message_callbacks) == 0:\n return\n messages = self.reddit.get_unread(unset_has_mail=mark_read)\n total_read = 0\n for message in messages:\n with self.database_context() as db:\n editable = EditableContainer(message)\n if db.query(Editable).filter(Editable.id == editable.id).first():\n if mark_read:\n message.mark_as_read()\n continue\n\n # if a callback was made, mark as read.\n if self.check_callbacks(editable, self.message_callbacks) or mark_read:\n message.mark_as_read()\n total_read += 1\n db.add(Editable(id=editable.id))\n return total_read", "title": "" }, { "docid": "4c11af8c9a552c122e3ea926001406bb", "score": "0.5209998", "text": "def count_msgs(self) -> int:\n assert self.env is not None\n with self.env.begin(write=False) as txn:\n meta_db = self.env.open_db(\n key=persipubsub.database.META_DB, txn=txn, create=False)\n meta_stat = txn.stat(db=meta_db) # type: Dict[str, int]\n\n return meta_stat['entries']", "title": "" }, { "docid": "818ecbb69c46eb1d753d7bef4a6d722b", "score": "0.5175912", "text": "def get_message_count(user):\n return Conversation.query.filter(Conversation.user_id == user.id).count()", "title": "" }, { "docid": "adc660fd3f4add9538cf4712336f045d", "score": "0.51670116", "text": "def count(self, type_uri):\r\n return len(self.get(type_uri))", "title": "" }, { "docid": "9c6de766b1557d5f1b030ba305147bcd", "score": "0.51633817", "text": "def get_unread_message_count_between(parser, token):\n try:\n tag_name, arg = token.contents.split(None, 1)\n except ValueError as e:\n raise template.TemplateSyntaxError(\n \"%s tag requires arguments\" % token.contents.split()[0]\n ) from e\n m = re.search(r\"(.*?) and (.*?) as (\\w+)\", arg)\n if not m:\n raise template.TemplateSyntaxError(\n \"%s tag had invalid arguments\" % tag_name\n )\n um_from_user, um_to_user, var_name = m.groups()\n return MessageCount(um_from_user, var_name, um_to_user)", "title": "" }, { "docid": "3d8dc904f6d080b3a86f14fee8ee8742", "score": "0.51625633", "text": "def size(self):\n return sum(1 for m in self._list_messages())", "title": "" }, { "docid": "dcde136a8eda70db9117f262b1ad8ef4", "score": "0.5145728", "text": "def reset_unread_count():", "title": "" }, { "docid": "ba257c064da21925a0f88bbfad6178da", "score": "0.5097651", "text": "def test_count_unread_messages_between(self):\n john = User.objects.get(pk=1)\n jane = User.objects.get(pk=2)\n\n # Jane should have one unread message from john\n unread_messages = MessageRecipient.objects.count_unread_messages_between(jane, john)\n\n self.failUnlessEqual(unread_messages, 1)", "title": "" }, { "docid": "69be8e942fff354d3f76074d0296f7b9", "score": "0.49994367", "text": "def get_email_count(self, obj):\n return obj.email_count", "title": "" }, { "docid": "d5573adf301fabf091108edabcf0356c", "score": "0.49765676", "text": "def get_count(self, unit_type: UnitTypeId) -> int:\n count = 0\n\n count += self.cache.own(unit_type).amount\n\n return count", "title": "" }, { "docid": "51ecd76100af0a90c181f6e333be6ae2", "score": "0.49554616", "text": "def notifications_count(self, receiver):\r\n return Notification.objects.filter(receiver=receiver, \r\n is_seen=False, is_active=True).exclude(sender=receiver).count()", "title": "" }, { "docid": "8d9a017cb54a5dddc2f06f9dbeb00ff2", "score": "0.49499083", "text": "def _check_topic(self, topic_name, msg_type):\n types, topic_info = self.bag.get_type_and_topic_info(topic_name)\n assert topic_info[topic_name].msg_type == msg_type\n message_count = topic_info[topic_name].message_count\n assert message_count > 0\n return message_count", "title": "" }, { "docid": "9f738a74e2aa80b8177a41d33744c2b5", "score": "0.49303246", "text": "def get_count_unread_community(action, user, community='NULL', channel='NULL'):\n conn = connect()\n cur = conn.cursor()\n\n if(action == 'Community'):\n cur.execute(\"\"\"SELECT community.community_channels FROM community WHERE community_name = '%s'\"\"\" % (community))\n results = cur.fetchall()\n st = [item[0] for item in results]\n curr_com = st[0].split(',')\n #print(curr_com)\n total_count = 0\n\n for row in curr_com:\n cur.execute(\"\"\"SELECT COUNT(chan_message) FROM community_channels WHERE status = 'Unread' AND community_name = '%s' AND channel_name = '%s'\"\"\" % (community, row))\n results = cur.fetchall()\n xt = [item[0] for item in results]\n total_count = total_count + xt[0]\n \n conn.close()\n\n return total_count\n\n\n elif(action == 'Channel'):\n cur.execute(\"\"\"SELECT COUNT(chan_message) FROM community_channels WHERE status = 'Unread' AND community_name = '%s' AND channel_name = '%s'\"\"\" % (community, channel))\n results = cur.fetchall()\n st = [item[0] for item in results]\n\n conn.close()\n\n return st[0]\n\n else:\n conn.close()\n raise Exception(\"Incorrect use of function\")", "title": "" }, { "docid": "588cafb65e08c684d361b952c1c22a25", "score": "0.4914318", "text": "def message_count(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"message_count\")", "title": "" }, { "docid": "084d88b77f5e8a4ed67b2b5eabee98b3", "score": "0.4907612", "text": "def count_messages(file):\n count = 0\n while 1:\n line = file.readline()\n if not line: break\n if line[:5] == \"From \":\n count = count + 1\n file.seek(0)\n return count", "title": "" }, { "docid": "b31e4445716f71c65e98ade2eaa663d3", "score": "0.49002123", "text": "def all_messages_count(self) -> int:\n return self._message_storage.all_count", "title": "" }, { "docid": "471519bf1553fc5af4c19d06b61992a8", "score": "0.4881824", "text": "def count_failed(self, issuetype=ALL):\n return len(self.failed(issuetype))", "title": "" }, { "docid": "2721ad0f907291c04ddf14bdb66f7cee", "score": "0.4860154", "text": "def get_email_length( msg ):\n return log(len(msg.get_payload()))", "title": "" }, { "docid": "b58b3f4a1ecebaf665cdfa13ffdef3d7", "score": "0.48599574", "text": "def inboxList(self):\n function = \"messages\"", "title": "" }, { "docid": "38690b59896ab2749083930c0d1c32b8", "score": "0.48397484", "text": "def get_unread_notifications(user, limit=None):\n\n if not user:\n return None\n\n notifications = cache_mgr.get_cache(\"notification-%s\" % user.username)\n if notifications is None:\n\n notifications = {\"has_more\": False,\n \"use_facebook\": settings.MAKAHIKI_USE_FACEBOOK}\n\n # Find undisplayed alert notifications.\n notifications.update({\"alerts\": get_user_alert_notifications(user)})\n\n # Get unread notifications\n unread_notifications = user.usernotification_set.filter(\n unread=True,).order_by(\"-level\", \"-created_at\")\n if limit:\n if unread_notifications.count() > limit:\n notifications.update({\"has_more\": True})\n unread_notifications = unread_notifications[:limit]\n\n for item in unread_notifications:\n item.fb_contents = _strip_html_tag(item.contents)\n notifications.update({\"unread\": unread_notifications})\n\n cache_mgr.set_cache(\"notification-%s\" % user.username, notifications, 1800)\n return notifications", "title": "" }, { "docid": "66333e13d8fbf0246302b60aa123eb75", "score": "0.4837109", "text": "def archived_messages_count(self) -> int:\n return self._message_storage.archived_count", "title": "" }, { "docid": "0fcd3ee46eb25623da2a48e74c13a207", "score": "0.48290542", "text": "def test_get_unread_status_string(self):\n request = self.factory.get(\"\")\n request.user = self.local_user\n\n with patch(\n \"bookwyrm.activitystreams.ActivityStream.get_unread_count\"\n ) as mock_count, patch(\n \"bookwyrm.activitystreams.ActivityStream.get_unread_count_by_status_type\"\n ) as mock_count_by_status:\n mock_count.return_value = 3\n mock_count_by_status.return_value = {\"review\": 5}\n result = views.get_unread_status_string(request, \"home\")\n\n self.assertIsInstance(result, JsonResponse)\n data = json.loads(result.getvalue())\n self.assertEqual(data[\"count\"], \"Load 5 unread statuses\")", "title": "" }, { "docid": "137e21d60c5f4779aaefd2e8c6e1e1ca", "score": "0.48266402", "text": "def get_subscriber_count(self, obj):\n return obj.dispatchedemail_set.count()", "title": "" }, { "docid": "6323c72fd63447e2fd0df01e0db66546", "score": "0.48163298", "text": "def count_applied(self, issuetype=ALL):\n return len(self.applied(issuetype))", "title": "" }, { "docid": "36901768cff546948a3feb1a3101bf6b", "score": "0.48080146", "text": "def info(self):\n info = {'total': None,\n 'recent': None,\n 'unseen': None,\n 'uidnext': None,\n 'uidvalidity': None}\n status, result = self.imap.status(\n utils.b('\"') + self.selected_folder_utf7 + utils.b('\"'),\n '(MESSAGES RECENT UIDNEXT UIDVALIDITY UNSEEN)'\n )\n if result:\n \"\"\"Sample response:\n '\"INBOX\" (MESSAGES 7527 RECENT 0 UIDNEXT 21264 UIDVALIDITY 2\n UNSEEN 1)'\n \"\"\"\n where = utils.b_to_str(result[0])\n messages = re.search('MESSAGES ([0-9]+)', where)\n\n if messages:\n info['total'] = int(messages.group(1))\n recent = re.search('RECENT ([0-9]+)', where)\n if recent:\n info['recent'] = int(recent.group(1))\n unseen = re.search('UNSEEN ([0-9]+)', where)\n if unseen:\n info['unseen'] = int(unseen.group(1))\n uidnext = re.search('UIDNEXT ([0-9]+)', where)\n if uidnext:\n info['uidnext'] = int(uidnext.group(1))\n uidvalidity = re.search('UIDVALIDITY ([0-9]+)', where)\n if uidvalidity:\n info['uidvalidity'] = int(uidvalidity.group(1))\n\n return info", "title": "" }, { "docid": "55a734d7e12b4212981260c17e016b77", "score": "0.47946528", "text": "def count(self):\n return len(self.emails)", "title": "" }, { "docid": "b3dbb4b5affc72fd54af21212c474bc7", "score": "0.47647718", "text": "def count_of(self, item_type): # pragma: no cover", "title": "" }, { "docid": "30a8957cb4d964399efb2bc7cbca3abe", "score": "0.47612786", "text": "def inbox_for(self, user):\n return self.filter(\n recipient=user,\n recipient_deleted__isnull=True,\n )", "title": "" }, { "docid": "bf0c6bb1d2a7a0dfbb3df54769436a58", "score": "0.4758008", "text": "def fetch_pms(self):\n\n valid = r\"[A-Za-z0-9_-]+\"\n\n for msg in self.reddit.inbox.unread():\n author = str(msg.author)\n valid_user = re.match(valid, author)\n if msg.subject == configuration.message_subject and valid_user:\n self.process_pm(msg.body, author, msg)", "title": "" }, { "docid": "9ac47f12fa1d6c103808dec0373df111", "score": "0.47378373", "text": "def active_message_count(self) -> float:\n return pulumi.get(self, \"active_message_count\")", "title": "" }, { "docid": "a3791da60794f16b6ac549da9d6cd079", "score": "0.47312135", "text": "def unread(self, _name: str, _char: int) -> int:\n return 0", "title": "" }, { "docid": "129f7aa85a3a1be00d8364fc53457ad4", "score": "0.47259942", "text": "def mbox(self):\r\n return self._mbox", "title": "" }, { "docid": "cffe78a4ab15610cd6d8cfe684bc1d1d", "score": "0.47194615", "text": "def get_email_count_by_name(df, type=None):\n counts_df = df \\\n .groupby(type)[type] \\\n .count() \\\n .to_frame() \\\n .rename(columns={type: \"count\"}) \\\n .reset_index() \\\n .sort_values(\"count\", ascending=False) \\\n .reset_index(drop=True)\n return counts_df", "title": "" }, { "docid": "ad99e8f1520827c0cd61e2c0d17ce452", "score": "0.4713228", "text": "def count(self, event_type=None):\n\n sets = self.csets\n if event_type in sets:\n return len(sets[event_type])\n else:\n return 0", "title": "" }, { "docid": "eb6119dfbe6070b712c8edbed150fdd4", "score": "0.4709117", "text": "def get(self, type_: EventType, user: discord.Member, channel: discord.Channel) -> int:\n key = self._make_tuple(type_, user, channel)\n try:\n return self.data[key]\n except KeyError:\n return 0", "title": "" }, { "docid": "7dfcdbc12d26f9c2725cc565c2e14bbf", "score": "0.46976158", "text": "def count_actions_in(evts, msg_type=None):\n cpt = 0\n try:\n for e in evts:\n if (msg_type == e['event_type']):\n cpt += 1\n elif msg_type is None:\n cpt += 1\n except Exception as ex:\n print('%s__ e=%s' % (ex.__repr__(), e))\n return cpt", "title": "" }, { "docid": "baf48302092ed509f648feb09b52d454", "score": "0.46896192", "text": "def mailbox_list(self, account_type=\"ex\", enabled=None, **kwargs):\n if enabled:\n kwargs[\"enabled\"] = True\n elif enabled == False: #explicitly test for false in case no param passed\n kwargs[\"enabled\"] = False\n path = \"customers/me/domains/%s/%s/mailboxes\" % (self.domain, account_type)\n data = self._call(self.host, path, kwargs)\n return data", "title": "" }, { "docid": "0a69844029167abe06ccfe74f1e769f8", "score": "0.46810246", "text": "def total_active_invites(self, module):\n total = 0\n for db_user in User.objects():\n inviter = db_user.inviter\n if inviter is not None:\n if inviter.user_id == self.user_id:\n if module.guild.get_member(db_user.user_id) is not None:\n total += 1\n return total", "title": "" }, { "docid": "11c1df28b0a7e663a33dfc44427467ec", "score": "0.46707955", "text": "def getmailIDs(subject):\n imapObj = imapclient.IMAPClient('imap.gmail.com', ssl=True)\n imapObj.login(address, pw)\n imapObj.select_folder('INBOX', readonly=False)\n UIds = imapObj.search(['SINCE', today, 'SUBJECT', subject, 'UNSEEN'])\n return imapObj, UIds", "title": "" }, { "docid": "6fb9683184faca3e511fb22b8ed91422", "score": "0.46661448", "text": "def do_count(self, type):\n return self.collection.count_documents()", "title": "" }, { "docid": "03d486efb1094c332a162ab7ee6c3513", "score": "0.46449926", "text": "def _checkInbox(self):\n inbox_labels = [u'INBOX', u'UNREAD']\n details = None\n\n messages = self._getNewMessages()\n for message in messages:\n details = self._getMessageDetails(msg_id=message['id'])\n\n if set(inbox_labels) - set(details['labelIds']):\n # Not a unread message which is in Inbox.\n # Just ignore it for now.\n continue\n\n headers = {}\n for raw_header in details['payload']['headers']:\n headers[raw_header['name'].lower()] = raw_header['value']\n\n self._checkMessage(headers)\n # Any message is removed so that we will not process it later.\n self._trash(msg_id=message['id'])\n\n if details:\n self._history_id = int(details['historyId'])\n self.setRegistryValue('historyID', value=self._history_id)", "title": "" }, { "docid": "98e0f7011ee31889cca4ab2955c579f2", "score": "0.4632578", "text": "def get_mailbox_usage_mailbox_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> List[\"models.MicrosoftGraphMailboxUsageMailboxCounts\"]\n cls = kwargs.pop('cls', None) # type: ClsType[List[\"models.MicrosoftGraphMailboxUsageMailboxCounts\"]]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_mailbox_usage_mailbox_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('[MicrosoftGraphMailboxUsageMailboxCounts]', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "65de810738c049a6114a292fa709afa2", "score": "0.46321726", "text": "async def get_moderation_count(self, guild_id, user_id):\n sql = \"\"\"\n SELECT COUNT(userid) FROM {}.moderation\n WHERE serverid = $1 AND userid = $2;\n \"\"\".format(self.schema)\n return await self.pool.fetchval(sql, guild_id, user_id)", "title": "" }, { "docid": "570ece8117c345ff8e59c5b1cca0fb67", "score": "0.46282303", "text": "def check_messages(hero):\n\n hero.account.inbox_alert = False", "title": "" }, { "docid": "60685d7924f0130d68d77f041abd41c2", "score": "0.462268", "text": "def count_types(self) -> int:\n return self._count_model(Type)", "title": "" }, { "docid": "595b9b3d723decc2d5110149e0c5c997", "score": "0.46218836", "text": "async def get_count(self, guild, message):\n count = 0\n message = await self.database.get_one_starboard_message(guild.id, message.id)\n if message is not None:\n count = message.count\n return count", "title": "" }, { "docid": "a3ca9bda47d543a0b2b528e7711a98bb", "score": "0.46188736", "text": "def get_folder_message_list(self, folder, unread_only = True):\n path = '/folder/' + folder\n\n if unread_only:\n path += '/unread'\n\n response, messages = self.send_request(path)\n\n return messages", "title": "" }, { "docid": "ba7e71ada2a15f23960cec9c159f2714", "score": "0.45996648", "text": "def test_get_inbox_messages(self):\n pass", "title": "" }, { "docid": "f2506c2bc1f59243ee51cf269ce18c8f", "score": "0.459716", "text": "def checkEmail(q):\n username = TPasswords.tickerbell_email\n password = TPasswords.tickerbell_email_pw\n imap = imaplib.IMAP4_SSL(\"imap.gmail.com\", '993')\n imap.login(username, password)\n while (1):\n status, messageCount = imap.select(\"INBOX\")\n if status != 'OK':\n print(\"Could not connect to gmail inbox\")\n return\n\n #get IDs of unread emails and put into a list named IDs\n #IDs SHOULD be in order oldest to newest\n status, IDs = imap.search(None, 'UnSeen')\n IDs = IDs[0].split()\n if len(IDs) > 0:\n for i, ID in enumerate(IDs):\n #IDs is now a list of ID strings\n IDs[i] = ID.decode('utf-8')\n for ID in IDs:\n status, msg = imap.fetch(ID, \"(RFC822)\")\n if status != 'OK':\n print(\"Email {0} could not be retrieved.\".format(ID))\n else:\n #### https://www.thepythoncode.com/article/reading-emails-in-python ####\n for response in msg:\n if isinstance(response, tuple):\n # parse a bytes email into a message object\n msg = email.message_from_bytes(response[1])\n # decode the email subject\n subject = decode_header(msg[\"Subject\"])[0][0]\n if isinstance(subject, bytes):\n # if it's a bytes, decode to str\n subject = subject.decode()\n # email sender\n from_ = msg.get(\"From\")\n from_ = from_.split()\n from_ = from_[len(from_)-1].strip('<>')\n return_username = from_.split('@')[0]\n address = from_.split('@')[1]\n #AUTHORIZING RECIPIENT\n if from_ not in TAlert.emails and from_ not in TAlert.phoneNumbers:\n print(\"Email sender {0} invalid personnel\".format(from_))\n break\n commands = []\n if msg.is_multipart():\n # iterate over email parts\n for part in msg.walk():\n # extract content type of email\n content_type = part.get_content_type()\n content_disposition = str(part.get(\"Content-Disposition\"))\n try:\n # get the email body\n body = part.get_payload(decode=True).decode()\n except:\n pass\n if content_type == \"text/plain\" and \"attachment\" not in content_disposition:\n # print text/plain emails and skip attachments\n for line in body.splitlines():\n commands.append(line)\n elif \"attachment\" in content_disposition:\n # download attachment\n filename = part.get_filename()\n if filename:\n filepath = os.path.join(os.getcwd(), filename)\n # download attachment and save it\n open(filepath, \"wb\").write(part.get_payload(decode=True))\n commands = handleEmailAttachment(filename)\n os.remove(filepath)\n else:\n # extract content type of email\n content_type = msg.get_content_type()\n # get the email body\n body = msg.get_payload(decode=True).decode()\n if content_type == \"text/plain\":\n # print only text email parts\n for line in body.splitlines():\n commands.append(line.lower().strip())\n #redirect stdout temporarily\n old_stdout = sys.stdout\n sys.stdout = mystdout = StringIO()\n for command in commands:\n if command == \"alert start\":\n q.put(('alert start'))\n elif command not in [\"alert start\", \"remote on\", \"quit\"]:\n TickerBell.handleInput(command)\n sys.stdout = old_stdout\n message = mystdout.getvalue()\n if from_ in TAlert.emails:\n TAlert.sendEmail(message, receiver)\n else:\n for i in range(0, int(len(message) / 160)):\n start = i * 160\n end = start + 159\n TAlert.sendEmail(message[start:end], from_)\n sleep(2)\n imap.close()\n imap.logout()", "title": "" }, { "docid": "bf41d3fdb0ecab5bbff0f376750afef6", "score": "0.4594744", "text": "def msg_queue_size(self):\n\n rtn = 0\n try:\n cursor = self.connection.cursor()\n qry = \"SELECT COUNT(ALL) FROM msg_queue\"\n cursor.execute(qry)\n val = cursor.fetchall()\n if len(val) < 1:\n rtn = 0\n rtn = str(val[0][0])\n except Exception as e:\n logging.error(\"msg_queue_size:\" + qry + \":\" + str(e))\n cursor.close()\n return str(rtn)", "title": "" }, { "docid": "4d042c1393aadc0ac7ade9df1673cf39", "score": "0.45942438", "text": "async def unread(client, args):\n\n def callback():\n \"\"\"Print unread count(s)\"\"\"\n\n # For each service\n counts = {}\n for service in client.unread_services:\n name, direct, indirect = service\n safe_name = html.escape(name)\n\n # If it's exactly the service we're looking for, just return the count\n if safe_name == args.services:\n count = direct\n if not args.direct:\n count += indirect\n print(count)\n return\n\n # If the service in included in the services we're looking for\n if args.services in (\"total\", \"all\") or safe_name in args.services:\n counts[safe_name] = direct\n if not args.direct:\n counts[safe_name] += indirect\n\n # Get total notifications\n if args.services == \"total\":\n print(sum(counts.values()))\n return\n\n # Finally, print each service notifications on a different line\n print(\n \"\\n\".join(\n f\"{name}: {count}\"\n for name, count in counts.items()\n )\n )\n\n # Do print counts and keep running if tail mode enabled\n callback()\n if args.tail:\n client.on_change(callback)\n await asyncio.get_running_loop().create_future()", "title": "" }, { "docid": "4b381f89c6e88954411bff9c596590d7", "score": "0.45915714", "text": "def getNumberLasers(self):\r\n self.message = lumencor_httpcommand(command ='GET CHMAP', ip=self.ip)\r\n if self.message['message'][0]=='A':\r\n return len(self.message['message'].split(' '))-2\r\n return 0", "title": "" }, { "docid": "eb5c778074f66998cd119da4ce236201", "score": "0.458805", "text": "def mboxlist(self):\n mbox_ret, mbox_raw = self.imap.list()\n subm_ret, subm_raw = self.imap.lsub()\n\n list_raw = (mbox_raw + subm_raw)\n\n if (mbox_ret, subm_ret) == (\"OK\", \"OK\"):\n mbox_list = self.parsemboxlist(list_raw)\n\n else:\n raise Exception(\"Server returned invalid response to list command\")\n\n return [x for x in mbox_list if self.mboxstat(x)]", "title": "" }, { "docid": "a076a55fbc5f7c395bfaef274d654f86", "score": "0.4568376", "text": "def nb_events_by_activity_type(self) -> dict:\n return self.count_events_by_activity_type()", "title": "" }, { "docid": "26afb981945bcb465956c2eddeebeec5", "score": "0.4562087", "text": "def get_entry_count(self):\n return len(self.mime_lookup)", "title": "" }, { "docid": "26afb981945bcb465956c2eddeebeec5", "score": "0.4562087", "text": "def get_entry_count(self):\n return len(self.mime_lookup)", "title": "" }, { "docid": "c6d6d0bf82d437efe5615332e841337f", "score": "0.45607117", "text": "def _getMessagesFromInbox(self):\n label_ids = ['INBOX']\n messages = []\n\n try:\n response = self._google.users().messages().list(\n userId='me',\n labelIds=label_ids,\n ).execute()\n\n if 'messages' in response:\n messages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = self._google.users().messages().list(\n userId='me',\n labelIds=label_ids,\n pageToken=page_token,\n ).execute()\n messages.extend(response['messages'])\n\n return messages\n\n except errors.HttpError, error:\n self.log.error(\n '_getMessagesFromInbox: An error occurred: %s' % error)\n return []", "title": "" }, { "docid": "f2593b55f1e210cb832f9009ef2fcfb2", "score": "0.45593047", "text": "def get_inbox(self, inbox, auth):\n\n if inbox is None:\n inbox = self.__number\n\n return auth.messages.list(to=inbox)", "title": "" }, { "docid": "049ee2faf2e6ece5e5b59a2d3b807dc2", "score": "0.45558015", "text": "def test_get_inbox(self):\n user = User.objects.get(pk=1)\n inbox_messages = Message.objects.get_inbox_for(user)\n\n self.failUnlessEqual(len(inbox_messages), 1)\n self.failUnlessEqual(inbox_messages[0].body,\n 'Hello from your mother')", "title": "" }, { "docid": "52549237af009095a69f3bcf98fc55af", "score": "0.45504746", "text": "def count_actions(fname=None, msg_type=None):\n k = 0\n for evts in load_conv_events(fname):\n for e in evts:\n if e['event_type'] == msg_type or msg_type is None:\n k += 1\n return k", "title": "" }, { "docid": "b751d3c34db5866eceb5bd73f1aa54c9", "score": "0.45353124", "text": "def number_following(self, cr, uid, model_name=\"mail.thread\", context=None):\n user = self.pool.get('res.users').browse(cr, uid, uid, context=context)\n return self.pool.get('mail.followers').search(cr, uid, [('res_model', '=', model_name), ('partner_id', '=', user.partner_id.id)], count=True, context=context)", "title": "" }, { "docid": "0d4a3964d8c6e00c2748e922bdba6645", "score": "0.4528173", "text": "def increment_unread_requests(cls):\n cls.unread_requests_counter += 1\n return", "title": "" }, { "docid": "0d4a3964d8c6e00c2748e922bdba6645", "score": "0.4528173", "text": "def increment_unread_requests(cls):\n cls.unread_requests_counter += 1\n return", "title": "" }, { "docid": "c175e990570a55827198cf579b6bb12d", "score": "0.45063373", "text": "def count(self, key: str) -> int:\n if key == 'all':\n return len(self.data)\n elif key.title().replace('To', 'to') in ('Watching', 'Reading', 'Completed', 'On-Hold', 'Dropped', 'Plan to Watch', 'Plan to Read'):\n return len([i for i in self.data if i.my_status == key.title().replace('To', 'to')])\n return 0", "title": "" }, { "docid": "fb79963ae494c2b93bc349a7bf218250", "score": "0.4505438", "text": "def __len__(self):\n declared = self.channel.queue_declare(self.queue_name, passive=True)\n return declared.method.message_count", "title": "" }, { "docid": "4a762fb5ca3dce3480a2fa0019ea1093", "score": "0.44920695", "text": "def total_invites(self):\n total = 0\n for db_user in User.objects():\n inviter = db_user.inviter\n if inviter is not None:\n if inviter.user_id == self.user_id:\n total += 1\n return total", "title": "" }, { "docid": "c0c8fd5b9eda9543cf9180790382dbc8", "score": "0.44913486", "text": "def find_user_notifications(request):\n notifications = request.user.twitteruser.notification_set\n new_notification = 0\n for notice in notifications.get_queryset().all():\n if not notice.has_seen:\n new_notification += 1\n return {\"notification\": new_notification}", "title": "" }, { "docid": "1e86419874d7cf0b8b5356be7c6aae5d", "score": "0.44900954", "text": "def test_push_unread_count_group_by_room(self) -> None:\n # Carry out common push count tests and setup\n self._test_push_unread_count()\n\n # Carry out our option-value specific test\n #\n # This push should still only contain an unread count of 1 (for 1 unread room)\n self._check_push_attempt(7, 1)", "title": "" }, { "docid": "ba974c9b4142ba578b33d0d3f4ac83e3", "score": "0.4477009", "text": "def rooms_table_size(self):\n\n rtn = 0\n try:\n cursor = self.connection.cursor()\n qry = \"SELECT COUNT(ALL) FROM rooms\"\n cursor.execute(qry)\n val = cursor.fetchall()\n if len(val) < 1:\n rtn = 0\n rtn = int(val[0][0])\n except Exception as e:\n logging.error(\"ERROR: msg_queue_size(): \" + qry + \":\" + str(e))\n cursor.close()\n return int(rtn)", "title": "" }, { "docid": "07007717334ad495104321581ddb2f9f", "score": "0.44648585", "text": "def slacking_count(self, message_json):\n if message_json.get('type') != 'message':\n return\n if 'subtype' in message_json.keys():\n return\n if 'bot_id' in message_json.keys():\n return\n if message_json.get('user') in self.ignore_user_list:\n return\n\n self.slacking_dict[message_json.get('channel', '')][message_json.get('user', '')] += 1", "title": "" } ]
5fb757af84b6bfa9a340d6aa2823062e
Get liquor quantities and prices given our input.
[ { "docid": "9c076f0e7c9c00d70a4860a404aaf472", "score": "0.5931924", "text": "def calculate_liquor_quantities(\n config_dict: dict, user_dict: dict, rounding_method: str = CEILING\n) -> dict:\n # Calculate total required drinks\n total_required_drinks = user_dict[\"num_drinks\"]\n\n required_beer_drinks = (\n total_required_drinks * user_dict[\"percent_beer\"] / 100\n )\n required_wine_drinks = (\n total_required_drinks * user_dict[\"percent_wine\"] / 100\n )\n required_hard_liquor_drinks = (\n total_required_drinks * user_dict[\"percent_hard_liquor\"] / 100\n )\n\n if rounding_method == CEILING:\n round_fn = ceil\n elif rounding_method == FLOOR:\n round_fn = floor\n else:\n round_fn = round\n\n required_beer = round_fn(required_beer_drinks / DRINKS_PER_BEER)\n required_wine = round_fn(required_wine_drinks / DRINKS_PER_WINE)\n required_hard_liquor = round_fn(\n required_hard_liquor_drinks / DRINKS_PER_HARD_LIQUOR\n )\n\n # Calculate prices\n price_per_beer = float(config_dict[\"price_per_beer\"])\n price_per_wine = float(config_dict[\"price_per_wine\"])\n price_per_hard_liquor = float(config_dict[\"price_per_hard_liquor\"])\n\n beer_cost = required_beer * price_per_beer\n wine_cost = required_wine * price_per_wine\n hard_liquor_cost = required_hard_liquor * price_per_hard_liquor\n\n total_cost = beer_cost + wine_cost + hard_liquor_cost\n\n return {\n \"required_beer\": required_beer,\n \"required_wine\": required_wine,\n \"required_hard_liquor\": required_hard_liquor,\n \"beer_cost\": beer_cost,\n \"wine_cost\": wine_cost,\n \"hard_liquor_cost\": hard_liquor_cost,\n \"total_cost\": total_cost,\n }", "title": "" } ]
[ { "docid": "bbc54cc4d369fd04982582b0d78faad6", "score": "0.6300638", "text": "def get_subscription_prices():", "title": "" }, { "docid": "5686b248a3e2355aae9e165865fb32cf", "score": "0.6073003", "text": "def get_prices(self):\n\t\treturn self.prices", "title": "" }, { "docid": "e86dcc3479f124425c4e6a3a25182de7", "score": "0.60494536", "text": "def test_quantity_pricing(self):\n p = self.acme0001.get_price\n self.assertEqual(p(1), 10)\n self.assertEqual(p(4), 40)\n self.assertEqual(p(11), 82.5)\n self.assertEqual(p(23), 172.5)\n self.assertEqual(p(100), 350)\n\n p = self.acme0002.get_price\n self.assertEqual(p(0.5), 3.5)\n self.assertEqual(p(1), 7)\n self.assertEqual(p(2), 14)\n self.assertEqual(p(5), 35)\n self.assertEqual(p(45), 315)\n self.assertEqual(p(55), 68.75)", "title": "" }, { "docid": "9473619634de2341f7f013c43f725599", "score": "0.59383", "text": "def get_prices(self, symbols=[]):\n raise NotImplementedError('Method is required!')", "title": "" }, { "docid": "b329cd585b820b5bb7f1a72c552ad018", "score": "0.5909095", "text": "def simpleTrade3():\n price_list_eur_usd_h1 = store_price_into_memory(instrument=\"EUR_USD\", granularity=\"H1\")\n profit = 0.0\n for price in price_list_eur_usd_h1:\n if (price['closeBid'] - price['openAsk'] > 0):\n profit += (price['closeBid'] - price['openAsk'])\n else:\n profit += (price['openBid'] - price['closeAsk'])\n print(\"Profit of simple trade 3 for EUR_USD is %f\" % profit)\n\n price_list_usd_jpy_h1 = store_price_into_memory(instrument=\"USD_JPY\", granularity=\"H1\")\n profit = 0.0\n for price in price_list_usd_jpy_h1:\n if (price['closeBid'] - price['openAsk'] > 0):\n profit += (price['closeBid'] - price['openAsk'])\n else:\n profit += (price['openBid'] - price['closeAsk'])\n print(\"Profit of simple trade 2 for USD_JPY is %f\" % profit)", "title": "" }, { "docid": "ca991eda0de6cc4e833e05b083f3e8e4", "score": "0.58461964", "text": "def get_price_values(self):\n return self.market_prices", "title": "" }, { "docid": "7f177c2b0ebf60d91c51982a5f01e971", "score": "0.5773918", "text": "def get_Quandl_daily_data(ticker, start, end):\n prices = []\n symbol = format_ticker(ticker)\n valid_data_flag = True\n\n # Attempt to connect to Quandl API and retrieve price data\n try:\n data = quandl.get(\"WIKI/\" + symbol, start_date=start, end_date=end)\n except Exception, e:\n print \"Could not download QUANDL data: %s\" % e\n valid_data_flag = False\n prices = -1\n failed_data_symbols.append(ticker)\n\n if valid_data_flag and not data.empty:\n # Map the dataframe into a list of tuples for easy\n # database insertion\n rows_of_data = [list(x) for x in data.to_records(index=True)]\n\n for row in rows_of_data:\n # Format data and set precision. The tuple entries include Date, Open, High, Low,\n # Close, Adjusted Close, and Volume\n one_day_of_prices = [\"%.4f\" % row[1], \"%.4f\" % row[2],\n \"%.4f\" % row[3], \"%.4f\" % row[4], \"%.4f\" % row[11], row[5]]\n\n # Remove any nan values and include the date\n one_day_of_prices = remove_nan_values(one_day_of_prices)\n one_day_of_prices.insert(0, row[0].date())\n prices.append(one_day_of_prices)\n\n else:\n prices = -1\n failed_data_symbols.append(ticker)\n\n return prices", "title": "" }, { "docid": "81a760032fe32fda97f07038429049e1", "score": "0.5765804", "text": "def simpleTrade1():\n price_list_eur_usd_h1 = store_price_into_memory(instrument=\"EUR_USD\", granularity=\"H1\")\n profit = 0.0\n for price in price_list_eur_usd_h1:\n profit += (price['openBid'] - price['closeAsk'])\n print(\"Profit of simple trade 1 for EUR_USD is %f\" % profit)\n \n price_list_usd_jpy_h1 = store_price_into_memory(instrument=\"USD_JPY\", granularity=\"H1\")\n profit = 0.0\n for price in price_list_usd_jpy_h1:\n profit += (price['openBid'] - price['closeAsk'])\n print(\"Profit of simple trade 1 for USD_JPY is %f\" % profit)", "title": "" }, { "docid": "42a22b9bc2754adc0c53f8aa42b19db2", "score": "0.5757246", "text": "def simpleTrade2():\n price_list_eur_usd_h1 = store_price_into_memory(instrument=\"EUR_USD\", granularity=\"H1\")\n profit = 0.0\n for price in price_list_eur_usd_h1:\n profit += (price['closeBid'] - price['openAsk'])\n print(\"Profit of simple trade 2 for EUR_USD is %f\" % profit)\n\n price_list_usd_jpy_h1 = store_price_into_memory(instrument=\"USD_JPY\", granularity=\"H1\")\n profit = 0.0\n for price in price_list_usd_jpy_h1:\n profit += (price['closeBid'] - price['openAsk'])\n print(\"Profit of simple trade 2 for USD_JPY is %f\" % profit)", "title": "" }, { "docid": "9770dcfbabd3a84675a771f650f15809", "score": "0.57506967", "text": "def _getprices(self):\n base = join(self.site, 'search', self.region, self.search)\n\n # pylint: disable=protected-access\n encode_params = requests.models.RequestEncodingMixin._encode_params\n params = encode_params({\n 'bedrooms': self.bedrooms,\n 'max_price': self.maxprice,\n 'min_price': self.minprice,\n 'nh': self.neighborhood\n })\n\n urlpattern = '{}?s=%d&{}'.format(base, params)\n\n # create an iterator of all the URLs to query\n urls = (urlpattern % i for i in range(0, 2500, 100))\n\n # query pattern for prices of all n br rentals\n pattern = r'(?<=<span class=\"price\">\\$)([0-9]*?)' \\\n r'(?=</span> <span class=\"housing\">/ %dbr )' % self.bedrooms\n\n # query HTML for all 2500 rental market listings\n html = concurrentdownload(urls)\n\n # extract prices\n strings = re.findall(pattern, html)\n\n # convert list of strings into integers\n prices = [int(i) for i in strings]\n\n return prices", "title": "" }, { "docid": "23e6c581dab49a3c7fba53855c60fec9", "score": "0.57410276", "text": "def get_prices(skuids):\n # Create Query URL\n J_ID = ['J_'+skuid for skuid in skuids]\n append_id = ','.join(J_ID)\n request_url = _URL_PREFIX + 'pduid=' + _PDUID + '&skuids=' + append_id\n # Access to URL\n my_json = urllib2.urlopen(request_url).read()\n # Create return list\n p_prices = [float(item['p']) for item in json.loads(my_json)]\n return p_prices", "title": "" }, { "docid": "ce52cd2eef47519e3abb61c564e5505a", "score": "0.5634503", "text": "def get_price(pq):\n raw_price = pq(pq(\"span.price\")).text().strip().split(' ')\n price = Decimal(0)\n discount = Decimal(0)\n if raw_price:\n if len(raw_price) == 3:\n price = Decimal(raw_price[1])\n discounted = raw_price[2]\n discount = (Decimal(price) - Decimal(discounted)) / Decimal(price)\n else:\n price = raw_price[0][1:]\n\n return price, discount", "title": "" }, { "docid": "78fcf9d4ff677caa96222b1193ee265e", "score": "0.55975777", "text": "def _get_price_impl(self, t, inventory_h, price_h, price_scale, horizon, num_buyers):\n return", "title": "" }, { "docid": "4f4abebd82a976bc17db77e54c096a43", "score": "0.554711", "text": "def get_pricing(self, quantity=1, currency=None):\n if quantity <= 0:\n quantity = 1\n\n # TODO - Capacity for price comparison in different currencies\n currency = None\n\n # Currency scaler\n scaler = Decimal(1.0)\n\n part = self.get_part()\n\n ctx = {\n 'part': part,\n 'quantity': quantity,\n 'currency': currency,\n }\n\n if part is None:\n return ctx\n\n # Supplier pricing information\n if part.supplier_count > 0:\n buy_price = part.get_supplier_price_range(quantity)\n\n if buy_price is not None:\n min_buy_price, max_buy_price = buy_price\n\n min_buy_price /= scaler\n max_buy_price /= scaler\n\n min_unit_buy_price = round(min_buy_price / quantity, 3)\n max_unit_buy_price = round(max_buy_price / quantity, 3)\n\n min_buy_price = round(min_buy_price, 3)\n max_buy_price = round(max_buy_price, 3)\n\n if min_buy_price:\n ctx['min_total_buy_price'] = min_buy_price\n ctx['min_unit_buy_price'] = min_unit_buy_price\n\n if max_buy_price:\n ctx['max_total_buy_price'] = max_buy_price\n ctx['max_unit_buy_price'] = max_unit_buy_price\n\n # BOM pricing information\n if part.bom_count > 0:\n\n use_internal = InvenTreeSetting.get_setting('PART_BOM_USE_INTERNAL_PRICE', False)\n bom_price = part.get_bom_price_range(quantity, internal=use_internal)\n purchase_price = part.get_bom_price_range(quantity, purchase=True)\n\n if bom_price is not None:\n min_bom_price, max_bom_price = bom_price\n\n min_bom_price /= scaler\n max_bom_price /= scaler\n\n if min_bom_price:\n ctx['min_total_bom_price'] = round(min_bom_price, 3)\n ctx['min_unit_bom_price'] = round(min_bom_price / quantity, 3)\n\n if max_bom_price:\n ctx['max_total_bom_price'] = round(max_bom_price, 3)\n ctx['max_unit_bom_price'] = round(max_bom_price / quantity, 3)\n\n if purchase_price is not None:\n min_bom_purchase_price, max_bom_purchase_price = purchase_price\n\n min_bom_purchase_price /= scaler\n max_bom_purchase_price /= scaler\n if min_bom_purchase_price:\n ctx['min_total_bom_purchase_price'] = round(min_bom_purchase_price, 3)\n ctx['min_unit_bom_purchase_price'] = round(min_bom_purchase_price / quantity, 3)\n\n if max_bom_purchase_price:\n ctx['max_total_bom_purchase_price'] = round(max_bom_purchase_price, 3)\n ctx['max_unit_bom_purchase_price'] = round(max_bom_purchase_price / quantity, 3)\n\n # internal part pricing information\n internal_part_price = part.get_internal_price(quantity)\n if internal_part_price is not None:\n ctx['total_internal_part_price'] = round(internal_part_price, 3)\n ctx['unit_internal_part_price'] = round(internal_part_price / quantity, 3)\n\n # part pricing information\n part_price = part.get_price(quantity)\n if part_price is not None:\n ctx['total_part_price'] = round(part_price, 3)\n ctx['unit_part_price'] = round(part_price / quantity, 3)\n\n return ctx", "title": "" }, { "docid": "be10e44a8f6b4b5050cf4cb9b6886411", "score": "0.54704684", "text": "def get_price_range(store, inquiry):\n\n if store is None:\n raise Exception(\"Store is none\")\n if inquiry is None or inquiry is \"\":\n raise Exception(\"Inquiry cannot be empty\")\n # Request html from url and getting dom out of it\n page = requests.get(store.url)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n # Looking for prices and names\n # inside dom object(soup)\n prices = soup.find_all(store.keyword_container,\n {store.keyword_attribute: store.keyword_name})\n names = soup.find_all(store.product_container,\n {store.product_attribute: store.product_name})\n\n # Converting dom data to integer prices\n converted_prices = get_converted_prices(prices)\n # Getting an object with minimal Levenshtein length\n couples = get_liveshtein_couples(names, inquiry)\n recommended_length = min(couples,\n key=lambda t: t[1])[1]\n # Filter price list to leave only those\n # items that possess recommended LL\n filtered_price_list = filter_price_list(\n converted_prices, recommended_length, couples\n )\n range = LogicalModule.determine_range(filtered_price_list)\n return {\"min\": range[\"min\"], \"max\": range[\"max\"]}", "title": "" }, { "docid": "ed99f9b16b5345bc5a3aa9165f3e5a47", "score": "0.54688627", "text": "def _current_price_extractor(cls, data):", "title": "" }, { "docid": "6fe18ecc8e046b8babfe0d4b53b2ec25", "score": "0.54566216", "text": "def _get_ticker_price(html):\n\n\tjsondata = _get_json_data(html)\n\n\treturn get_value(jsondata, 'context', 'dispatcher', 'stores', 'QuoteSummaryStore', 'financialData', 'currentPrice', 'raw')", "title": "" }, { "docid": "2bb2f261dd292cee4872d6af7e5b2655", "score": "0.54565066", "text": "def get_price(self):\n\n data = self.get_data()\n\n p = float(data['latestPrice'])\n c = float(data['changePercent'])\n o = float(data['open'])\n v = float(data['avgTotalVolume'])\n\n return (p, c, o, v)", "title": "" }, { "docid": "38a55b9a6a80e1079c952d4f105bca5a", "score": "0.543991", "text": "def get_prices(\n self,\n side: str = None,\n cryptocurrency: str = None,\n query_str: str = GET_PRICES,\n ) -> Dict[str, List[Dict[str, Any]]]:\n\n if side or cryptocurrency:\n _ = validator(side=side, currency=cryptocurrency)\n\n variables = {\"cryptocurrency\": cryptocurrency, \"side\": side}\n results = self.query(query_str, variables)\n\n return results", "title": "" }, { "docid": "80deda9d659a69d8f6c596ea6e10ed85", "score": "0.54078406", "text": "def sale_price(self, quantity=0):\n return self.list_price", "title": "" }, { "docid": "afad575df49b3ac2459eb8fe30340c0e", "score": "0.5392189", "text": "def equity_pricing_mapper(self, levels, i):\n\n # get the needed price ticker\n price_ticker = self.positions_df[i].loc['position', '', 'price_ticker']\n\n # price the equity\n return levels[1][price_ticker]", "title": "" }, { "docid": "73f235263a9bd44afd1fa94c2ad152dc", "score": "0.538348", "text": "def get_current_prices():\n current_buy_price = mt5.symbol_info_tick(CRYPTO)[2]\n current_sell_price = mt5.symbol_info_tick(CRYPTO)[1]\n return current_buy_price, current_sell_price", "title": "" }, { "docid": "05392ffb916eede049039f9aeaedab65", "score": "0.5383388", "text": "def get_price(self, tickers: Union[Ticker, Sequence[Ticker]], fields: Union[PriceField, Sequence[PriceField]],\n start_date: datetime, end_date: datetime = None, frequency: Frequency = None) -> Union[\n None, PricesSeries, PricesDataFrame, QFDataArray]:\n pass", "title": "" }, { "docid": "ff43f2b54399e92aa6c50568927215de", "score": "0.537178", "text": "def list_all_prices(limit=10):", "title": "" }, { "docid": "28593c206554731f89af6eff9706b2fa", "score": "0.5352844", "text": "def prices():\n data = request(\"GET\", \"/api/v1/ticker/allPrices\")\n return {d[\"symbol\"]: d[\"price\"] for d in data}", "title": "" }, { "docid": "e7c3cda410aae3f85e26aecc88ee64bc", "score": "0.5311293", "text": "def getPrices(hour, min_market_cap=1E9):\n\n # Obtain observations from database\n conn = getDbConnection()\n cur = conn.cursor()\n\n # Get list of coins that were over the minimum market cap at any point in the past hr hours\n earliest_date = (pd.Timestamp.utcnow() - pd.Timedelta(f'+{hour}:00:00')).strftime('%Y-%m-%d %H:%M:%S')\n sql = f\"SELECT DISTINCT symbol FROM coinmarketcap WHERE timestamp >= '{earliest_date}' \" + \\\n f\"AND market_cap_USD >= '{min_market_cap}'\" # + \" AND symbol IN ('ETH', 'BTC', 'LTC')\"\n cur.execute(sql)\n symbols = cur.fetchall()\n symbol_str = \"(\" + ','.join([\"'\" + s['symbol'] + \"'\" for s in symbols]) + \")\"\n # E.g. \"('BTC','ETH','XRP','BCH','ADA','LTC','XEM','NEO')\"\n\n # Get price history for those coins\n sql = f\"SELECT timestamp, symbol, name, price_usd FROM coinmarketcap \" + \\\n f\"WHERE symbol IN {symbol_str} AND timestamp >= '{earliest_date}' \" + \\\n f\"ORDER BY timestamp\"\n\n cur.execute(sql)\n rows = cur.fetchall()\n conn.close()\n\n # Remove seconds and microseconds\n for row in rows:\n row['timestamp'] = row['timestamp'].replace(second=0, microsecond=0)\n\n df_price = pd.DataFrame(rows)\n df_price = df_price.pivot(values='price_usd', columns='symbol', index='timestamp')\n\n # Fill missing observations\n df_price.fillna(method='backfill', axis='rows', inplace=True)\n df_price.fillna(method='ffill', axis='rows', inplace=True)\n\n return df_price", "title": "" }, { "docid": "22fa1f56730be67d261f5ed9c5081206", "score": "0.5306567", "text": "def prices(self) -> str | None:\n if self.resource_type in [\n LearningResourceType.course.value,\n LearningResourceType.program.value,\n ]:\n return list(\n set(flatten([run.prices for run in self.runs.all() if run.prices]))\n )\n else:\n return 0", "title": "" }, { "docid": "4c1f6e523ddb41c95e1439f378b9e839", "score": "0.53063154", "text": "def _get_material_price_info(self, universe):\n return {\n \"sell_prices\": get_material_sell_prices(universe),\n \"buy_prices\": get_material_buy_prices(universe),\n **get_best_material_prices(universe)\n }", "title": "" }, { "docid": "c3f5f4788cb3c6fad858a6eea1af9e08", "score": "0.52664113", "text": "def get_commodities_prices(\n commodities_service: CommoditiesService = Depends(),\n filter: str | None = None,\n) -> list[CommodityPrice]:\n return commodities_service.get_commodities_prices(filter)", "title": "" }, { "docid": "d70d090fdfa511ae7976d97bf01c0443", "score": "0.5248547", "text": "def request_stock_price(company_ticker, limit ,apikey): \n base_url = 'https://financialmodelingprep.com/api/v3/quote'\n query_url = base_url+'/'+company_ticker+'?apikey='+apikey+'&limit='+str(limit)\n return requests.get(query_url)", "title": "" }, { "docid": "20d22af85558e6e5c669905941f5e49a", "score": "0.5246374", "text": "def volume(p, qs):\n balance_volume = float(qs[0])\n coefficient = qs[1]\n coefficient = float(coefficient[:coefficient.find('P')])\n balance_volume = balance_volume + coefficient * balance[0]\n print('Q =', format(balance_volume, '.2f'))\n balance.append(balance_volume)", "title": "" }, { "docid": "0f6636cbb879df3e3490a32e5f630cb5", "score": "0.5244099", "text": "def user_inputs():\n \n my_stocks={\n 'Cannibus':['APHA','KSHB','CBWTF','CRON','sndl','cgc','ammj','kern'],\n 'Drones':['NVDA','AMBA','AVAV','nkla'],\n 'Energy':['PBD','FAN'],\n 'Healthcare':['ADMS','cern','kern','cslt','nspr','ontx'],\n 'my_positions':['imgn','kern','ammj','apha','sndl','spy'],\n 'current_paper_trades':['spy','slp','adxs','plug','fcx','cron','cgc']\n }\n\n\n inputs={\n 'stock_list':my_stocks['Cannibus'],\n 'start_date':'2020',\n 'stop_date':'2021'}\n \n retrieve_OHLC_data(inputs)", "title": "" }, { "docid": "b6cfebaa2e18f59e064baa9280af8e0d", "score": "0.5240301", "text": "def get_price(item_code):\n print(\"Get price\")", "title": "" }, { "docid": "f0dcb66e2768ea3d2e828800c39bef59", "score": "0.52319324", "text": "def get_prices(self, prices):\n result = []\n for price in prices:\n result.append({\n 'currency': price.currency.contents[0],\n 'value': price.value.contents[0],\n })\n return result", "title": "" }, { "docid": "90d1b721b2c8dcb9b5830a1203e9ea57", "score": "0.5231881", "text": "def get_product_data(self, substitute):\n query = BDD.tables.Product.select().where(BDD.tables.Product.name == substitute)\n for row in query:\n name = row.name\n nutriscore = row.nutriscore\n store = row.stores\n url = row.url\n return [name, nutriscore, store, url]", "title": "" }, { "docid": "b20ee14f34ec1bde57e3d5d617560946", "score": "0.521412", "text": "def get_price_q(self, coef_rt = 0.1/1e3):\n ind_q_one = pd.Index(self.pv_q_pu_ser).get_loc(1)\n self.pv_rating = self.pv_q_var_ser[ind_q_one] \n \n self.pv_price_dollar_ser = coef_rt * (self.pv_rating - \n (self.pv_rating**2 - self.pv_q_var_ser.pow(2)).pow(0.5))", "title": "" }, { "docid": "aa7fbc373010891248fe4d2ceb8b5241", "score": "0.5213727", "text": "def get_specific_price(item_name):\n price=''\n \"\"\"\n if item_name == \"ULD\":\n url = \"https://www.uniqlo.com/au/store/men-ultra-light-down-jacket-4193800003.html\"\n search_tag = 'itemprop=\"price\">'\n elif item_name == \"Milk\":\n url = \"https://www.woolworths.com.au/shop/productdetails/405010/pauls-farmhouse-gold-milk\"\n search_tag = \n elif item_name == \"Tuna\":\n url = \"https://www.woolworths.com.au/shop/productdetails/19736/john-west-tuna-light-in-springwater\"\n search_tag = '\"price\":'\n elif item_name == \"TwiningsEnglishBreakfast\":\n url = \"https://www.woolworths.com.au/shop/productdetails/829281/twinings-extra-strong-english-breakfast-tea-bags\"\n search_tag = '\"price\":'\n elif item_name == \"BakerFlour\":\n url = \"https://www.woolworths.com.au/shop/productdetails/91304/defiance-white-baker-s-flour\"\n search_tag = '\"price\":'\n elif item_name == \"TwiningsPeppermint\":\n url = \"https://www.woolworths.com.au/shop/productdetails/888487/twinings-pure-peppermint-tea-bags\"\n search_tag = '\"price\":'\n\n \"\"\"\n search_tag, url = urlDict[item_name]\n\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n #create a response object from url\n r = requests.get(url, headers=headers)\n pos = (r.text.find(search_tag))+len(search_tag)\n query_char = '0'\n count = 0\n while query_char in '.0123456789':\n count+=1\n query_char = r.text[pos+(count-1)]\n price = price + query_char\n print(\"Pulled price: $\",price[:-1])\n return price[:-1]", "title": "" }, { "docid": "1ef04e9d388eb6449460aae39f7bfb2b", "score": "0.52125347", "text": "def electricity_price():\n date = kl.date_now()\n prices = dt.energy.select_data_of_date('electricity_price', date)\n return prices", "title": "" }, { "docid": "da2a2eaf02c1ed17aef1a413bb9fb8b3", "score": "0.5205443", "text": "def _calc_LT(self):\r\n _ = self.px_spec; n, keep_hist = _.nsteps, _.keep_hist\r\n _ = self.ref; S0, vol, q = _.S0, _.vol, _.q\r\n _ = self; T, K, rf_r, net_r, sCP = _.T, _.K, _.rf_r, _.net_r, _.signCP\r\n _ = self._LT_specs(); u, d, p, df, dt = _['u'], _['d'], _['p'], _['df_dt'], _['dt']\r\n\r\n # Get the Price based on Binomial Tree\r\n S = S0 * d ** np.arange(n, -1, -1) * u ** np.arange(0, n + 1) # terminal stock prices\r\n O = np.maximum(sCP * (S - K), 0) # terminal option payouts\r\n\r\n # The end node of tree\r\n S_tree = (tuple([float(s) for s in S]),) # use tuples of floats (instead of numpy.float)\r\n O_tree = (tuple([float(o) for o in O]),)\r\n\r\n for i in range(n, 0, -1):\r\n left = n - i + 1 # Left number until duration\r\n tleft = left * dt # Time left until duration\r\n d1 = (0 + (rf_r + vol ** 2 / 2) * tleft) / (vol * np.sqrt(tleft)) # d1 and d2 from BS model\r\n d2 = d1 - vol * np.sqrt(tleft)\r\n\r\n # payoff of not shout\r\n O = df * ((1 - p) * O[:i] + p * O[1:]) # prior option prices (@time step=i-1)\r\n S = d * S[1:i+1] # spot tree: prior stock prices (@time step=i-1)\r\n\r\n # payoff of shout\r\n shout = sCP * S / np.exp(q * tleft) * Util.norm_cdf(sCP * d1) - \\\r\n sCP * S / np.exp(rf_r * tleft) * Util.norm_cdf(sCP * d2) + \\\r\n sCP * (S - K) / np.exp(rf_r * tleft)\r\n\r\n # final payoff is the maximum of shout or not shout\r\n payout = np.maximum(shout, 0)\r\n O = np.maximum(O, payout)\r\n\r\n S_tree = (tuple([float(s) for s in S]),) + S_tree\r\n O_tree = (tuple([float(o) for o in O]),) + O_tree\r\n\r\n out = O_tree[0][0]\r\n\r\n self.px_spec.add(px=float(Util.demote(O)), sub_method='binomial tree; Hull Ch.13',\r\n ref_tree=S_tree if keep_hist else None, opt_tree=O_tree if keep_hist else None)\r\n\r\n return self", "title": "" }, { "docid": "23a162bc4c5f19bfb447eb0e08d6dbfd", "score": "0.5203774", "text": "def valuation(company_ticker,limit,apikey): \n a_is = request_data(company_ticker,limit,'income-statement',apikey)\n a_is_d = get_statement(a_is)\n a_bs = request_data(company_ticker,limit,'balance-sheet-statement',apikey)\n a_bs_d = get_statement(a_bs)\n a_cf = request_data(company_ticker,limit,'cash-flow-statement',apikey)\n a_cf_d = get_statement(a_cf)\n q = request_stock_price(company_ticker,limit,apikey).json()\n d ={}\n d['Price'] = [q[0]['price']]\n d['Market Cap'] = [d['Price'][0] * a_is_d['Weighted Average Shares Outstanding'][0]]\n d['EV'] = [d['Market Cap'][0] + a_bs_d['Total Liabilities'][0] - a_bs_d['Cash & Cash Equivalents'][0]]\n d['Sales'] = [a_is_d['Revenue'][0]]\n d['EBIDTA'] = [a_is_d['EBIDTA'][0]]\n d['EBIT'] = [d['EBIDTA'][0] - a_cf_d['Depreciation & Amortization'][0]]\n d['Earnings'] = [a_is_d['Net Income'][0]]\n d['EV/Sales'] = [d['EV'][0]/d['Sales'][0]]\n d['EV/EBIDTA'] = [d['EV'][0]/d['EBIDTA'][0]]\n d['EV/EBIT'] = [d['EV'][0]/d['EBIT'][0]]\n d['P/E'] = [d['Price'][0]/a_is_d['EPS'][0]]\n return d", "title": "" }, { "docid": "05051a11d880e8fc90ee46a70e32e29e", "score": "0.52010596", "text": "def get_prices_within_interval(self):\n prices = self._get_historical_multi_prices()\n if prices != None:\n self._insert_multiple_docs(prices, self.indices['price'])", "title": "" }, { "docid": "5c893b104e41cb33eb8cda3c4d0b0ca6", "score": "0.5200672", "text": "def retrieve_price(price_id):", "title": "" }, { "docid": "211fba6b424a3afc1916c88b2397cf95", "score": "0.518821", "text": "def retrieve_OHLC_data(inputs):\n global stock_dict,symbol,CURRENT_DATE\n stock_dict=dict()\n\n print(f'INPUTS---------->{inputs}')\n \n for i in inputs['stock_list']:\n # send_results_to_file({'TRADE DATA FOR------>':i.upper()},'a')\n symbol = i.upper() \n stock_name=symbol\n\n stock =pdr.get_data_yahoo(symbol,interval=\"d\")[inputs['start_date']:inputs['stop_date']]\n\n print(f'---------> {stock.head()}')\n \n if len(stock)<180:\n print(len(stock))\n continue\n stock_dict[i]=stock\n\n CURRENT_DATE=stock.iloc[[-1]].index.date[0].strftime(\"%Y-%m-%d\") ## This is the last (most recent trading date)\n print(CURRENT_DATE)\n\n GenerateIndicators(stock_dict[i])", "title": "" }, { "docid": "b50da2b0b269ae4874faaee409259ee5", "score": "0.5181738", "text": "def get_trading_list(self, buy_symbols=None, cutoff=-1):\n if buy_symbols is None:\n buy_symbols = self.get_buy_symbols(cutoff)\n buy_symbols.sort(key=lambda s: s[1], reverse=True)\n trading_list = []\n market_proportion = self.get_market_proportion(cutoff)\n for i in range(len(buy_symbols)):\n symbol, weight, side = buy_symbols[i]\n proportion = min(1 / min(len(buy_symbols), MAX_STOCK_PICK),\n MAX_PROPORTION) if i < MAX_STOCK_PICK else 0\n trading_list.append((symbol, proportion, side))\n if len(buy_symbols) < 1 / MAX_PROPORTION and market_proportion > 0:\n trading_list.append(\n ('TQQQ', (1 - len(buy_symbols) * MAX_PROPORTION) * market_proportion, 'long'))\n return trading_list", "title": "" }, { "docid": "df9b1feeb8ef5de8e61b47cf33c3ee76", "score": "0.5178557", "text": "def search_prices(query, limit=10):", "title": "" }, { "docid": "1ccba943821dcae3597d75fc2f4231e9", "score": "0.51729286", "text": "def helper_fuel_prices (self, ** kwargs):\n price_cord, price_pellet, prices_diesel, price_propane = \\\n self.load_fuel_prices()\n\n ## todo add statment to fix for nuqisu.. and Barrow\n price_ng = 0\n\n return {\n 'community': {\n 'diesel prices': prices_diesel,\n 'propane price': price_propane,\n 'cordwood price': price_cord,\n 'pellet price': price_pellet,\n 'natural gas price': price_ng,\n }\n }", "title": "" }, { "docid": "1f6685e8f65dca56264d022d8488325f", "score": "0.5172824", "text": "def get_sellers_and_prices_of_product(self, product_number: str) -> pd.DataFrame:\n\n soup = self._get_soup(self.base_url + \"product.php?p=\" + product_number)\n\n info_raw = None\n for script in soup.find_all(\"script\"):\n try:\n if script[\"data-script\"] == \"globals\":\n info_raw = script\n break\n except:\n pass\n\n if info_raw:\n info_split = info_raw.string.split(',{\"__typename\":\"Price\",')[1:]\n info_split = [seller for seller in info_split if \"variants\" in seller[-30:]]\n\n out = pd.DataFrame()\n for info in info_split:\n temp_df = pd.DataFrame()\n current = json.loads(\"{\" + info)\n try:\n temp_df[\"price_id\"] = [current[\"id\"]]\n temp_df[\"seller_product_name\"] = [current[\"name\"]]\n temp_df[\"stock_status\"] = [current[\"stock\"][\"status\"]]\n temp_df[\"price_incl_shipping\"] = [current[\"price\"][\"inclShipping\"]]\n temp_df[\"price_excl_shipping\"] = [current[\"price\"][\"exclShipping\"]]\n temp_df[\"seller_id\"] = [current[\"store\"][\"id\"]]\n temp_df[\"seller_name\"] = [current[\"store\"][\"name\"]]\n temp_df[\"seller_rating\"] = [\n current[\"store\"][\"userReviewSummary\"][\"rating\"]\n ]\n out = out.append(temp_df)\n except:\n pass\n\n out[\"product_number\"] = product_number\n out = out.astype(\n {\n \"price_id\": np.int64,\n \"seller_product_name\": object,\n \"stock_status\": object,\n \"price_incl_shipping\": np.float64,\n \"price_excl_shipping\": np.float64,\n \"seller_id\": np.int64,\n \"seller_name\": object,\n \"seller_rating\": np.float64,\n \"product_number\": np.int64,\n }\n )\n\n return out\n else:\n return None", "title": "" }, { "docid": "a962144077f5f765f8af2b7c24b434b6", "score": "0.51489997", "text": "def get_data(f='data/stock_prices.csv'):\n d=pandas.read_csv(f)\n x,y=get_raw_xy(d)\n yy=get_vpo(y)\n return x[:-1], yy[:-1]", "title": "" }, { "docid": "a1a9328620ce624188131e5b03920b82", "score": "0.51466906", "text": "def consult_api_price(name, vs_, ut):\n \n hour_ut = 3600\n num_hours = 72\n interval = hour_ut * num_hours\n \n from_before = int(ut - interval)\n to_before = int(ut)\n\n from_after = int(ut)\n to_after = int(ut + interval)\n\n before = cg.get_coin_market_chart_range_by_id(name.lower(), vs_, from_before, to_before)['prices']\n\n after = cg.get_coin_market_chart_range_by_id(name.lower(), vs_, from_after, to_after)['prices']\n \n price_before = [b[1] for b in before]\n price_after= [a[1] for a in after]\n\n if not price_before or not price_before or (len(price_before) < 10) or (len(price_after) < 10):\n return None\n else: \n return {'before':price_before,'after':price_after}", "title": "" }, { "docid": "e3c289f0dade3c568bd97174ecb2c163", "score": "0.51427746", "text": "async def fetch_markets(self, params={}):\n response = await self.publicGetProducts(params)\n #\n # {\n # \"meta\":{\"after\":null, \"before\":null, \"limit\":100, \"total_count\":81},\n # \"result\":[\n # # the below response represents item from perpetual market\n # {\n # \"annualized_funding\":\"5.475000000000000000\",\n # \"is_quanto\":false,\n # \"ui_config\":{\n # \"default_trading_view_candle\":\"15\",\n # \"leverage_slider_values\":[1,3,5,10,25,50],\n # \"price_clubbing_values\":[0.001,0.005,0.05,0.1,0.5,1,5],\n # \"show_bracket_orders\":false,\n # \"sort_priority\":29,\n # \"tags\":[]\n # },\n # \"basis_factor_max_limit\":\"0.15\",\n # \"symbol\":\"P-LINK-D-151120\",\n # \"id\":1584,\n # \"default_leverage\":\"5.000000000000000000\",\n # \"maker_commission_rate\":\"0.0005\",\n # \"contract_unit_currency\":\"LINK\",\n # \"strike_price\":\"12.507948\",\n # \"settling_asset\":{\n # # asset structure\n # },\n # \"auction_start_time\":null,\n # \"auction_finish_time\":null,\n # \"settlement_time\":\"2020-11-15T12:00:00Z\",\n # \"launch_time\":\"2020-11-14T11:55:05Z\",\n # \"spot_index\":{\n # # index structure\n # },\n # \"trading_status\":\"operational\",\n # \"tick_size\":\"0.001\",\n # \"position_size_limit\":100000,\n # \"notional_type\":\"vanilla\", # vanilla, inverse\n # \"price_band\":\"0.4\",\n # \"barrier_price\":null,\n # \"description\":\"Daily LINK PUT options quoted in USDT and settled in USDT\",\n # \"insurance_fund_margin_contribution\":\"1\",\n # \"quoting_asset\":{\n # # asset structure\n # },\n # \"liquidation_penalty_factor\":\"0.2\",\n # \"product_specs\":{\"max_volatility\":3,\"min_volatility\":0.3,\"spot_price_band\":\"0.40\"},\n # \"initial_margin_scaling_factor\":\"0.0001\",\n # \"underlying_asset\":{\n # # asset structure\n # },\n # \"state\":\"live\",\n # \"contract_value\":\"1\",\n # \"initial_margin\":\"2\",\n # \"impact_size\":5000,\n # \"settlement_price\":null,\n # \"contract_type\":\"put_options\", # put_options, call_options, move_options, perpetual_futures, interest_rate_swaps, futures, spreads\n # \"taker_commission_rate\":\"0.0005\",\n # \"maintenance_margin\":\"1\",\n # \"short_description\":\"LINK Daily PUT Options\",\n # \"maintenance_margin_scaling_factor\":\"0.00005\",\n # \"funding_method\":\"mark_price\",\n # \"max_leverage_notional\":\"20000\"\n # },\n # # the below response represents item from spot market\n # {\n # \"position_size_limit\": 10000000,\n # \"settlement_price\": null,\n # \"funding_method\": \"mark_price\",\n # \"settling_asset\": null,\n # \"impact_size\": 10,\n # \"id\": 32258,\n # \"auction_finish_time\": null,\n # \"description\": \"Solana tether spot market\",\n # \"trading_status\": \"operational\",\n # \"tick_size\": \"0.01\",\n # \"liquidation_penalty_factor\": \"1\",\n # \"spot_index\": {\n # \"config\": {\"quoting_asset\": \"USDT\", \"service_id\": 8, \"underlying_asset\": \"SOL\"},\n # \"constituent_exchanges\": [\n # {\"exchange\": \"binance\", \"health_interval\": 60, \"health_priority\": 1, \"weight\": 1},\n # {\"exchange\": \"huobi\", \"health_interval\": 60, \"health_priority\": 2, \"weight\": 1}\n # ],\n # \"constituent_indices\": null,\n # \"description\": \"Solana index from binance and huobi\",\n # \"health_interval\": 300,\n # \"id\": 105,\n # \"impact_size\": \"40.000000000000000000\",\n # \"index_type\": \"spot_pair\",\n # \"is_composite\": False,\n # \"price_method\": \"ltp\",\n # \"quoting_asset_id\": 5,\n # \"symbol\": \".DESOLUSDT\",\n # \"tick_size\": \"0.000100000000000000\",\n # \"underlying_asset_id\": 66\n # },\n # \"contract_type\": \"spot\",\n # \"launch_time\": \"2022-02-03T10:18:11Z\",\n # \"symbol\": \"SOL_USDT\",\n # \"disruption_reason\": null,\n # \"settlement_time\": null,\n # \"insurance_fund_margin_contribution\": \"1\",\n # \"is_quanto\": False,\n # \"maintenance_margin\": \"5\",\n # \"taker_commission_rate\": \"0.0005\",\n # \"auction_start_time\": null,\n # \"max_leverage_notional\": \"10000000\",\n # \"state\": \"live\",\n # \"annualized_funding\": \"0\",\n # \"notional_type\": \"vanilla\",\n # \"price_band\": \"100\",\n # \"product_specs\": {\"kyc_required\": False, \"max_order_size\": 2000, \"min_order_size\": 0.01, \"quoting_precision\": 4, \"underlying_precision\": 2},\n # \"default_leverage\": \"1.000000000000000000\",\n # \"initial_margin\": \"10\",\n # \"maintenance_margin_scaling_factor\": \"1\",\n # \"ui_config\": {\n # \"default_trading_view_candle\": \"1d\",\n # \"leverage_slider_values\": [],\n # \"price_clubbing_values\": [0.01, 0.05, 0.1, 0.5, 1, 2.5, 5],\n # \"show_bracket_orders\": False,\n # \"sort_priority\": 2,\n # \"tags\": []\n # },\n # \"basis_factor_max_limit\": \"10000\",\n # \"contract_unit_currency\": \"SOL\",\n # \"strike_price\": null,\n # \"quoting_asset\": {\n # \"base_withdrawal_fee\": \"10.000000000000000000\",\n # \"deposit_status\": \"enabled\",\n # \"id\": 5,\n # \"interest_credit\": False,\n # \"interest_slabs\": null,\n # \"kyc_deposit_limit\": \"100000.000000000000000000\",\n # \"kyc_withdrawal_limit\": \"10000.000000000000000000\",\n # \"min_withdrawal_amount\": \"30.000000000000000000\",\n # \"minimum_precision\": 2,\n # \"name\": \"Tether\",\n # \"networks\": [\n # {\"base_withdrawal_fee\": \"25\", \"deposit_status\": \"enabled\", \"memo_required\": False, \"network\": \"ERC20\", \"variable_withdrawal_fee\": \"0\", \"withdrawal_status\": \"enabled\"},\n # {\"base_withdrawal_fee\": \"1\", \"deposit_status\": \"enabled\", \"memo_required\": False, \"network\": \"BEP20(BSC)\", \"variable_withdrawal_fee\": \"0\", \"withdrawal_status\": \"enabled\"},\n # {\"base_withdrawal_fee\": \"1\", \"deposit_status\": \"disabled\", \"memo_required\": False, \"network\": \"TRC20(TRON)\", \"variable_withdrawal_fee\": \"0\", \"withdrawal_status\": \"disabled\"}\n # ],\n # \"precision\": 8,\n # \"sort_priority\": 1,\n # \"symbol\": \"USDT\",\n # \"variable_withdrawal_fee\": \"0.000000000000000000\",\n # \"withdrawal_status\": \"enabled\"\n # },\n # \"maker_commission_rate\": \"0.0005\",\n # \"initial_margin_scaling_factor\": \"2\",\n # \"underlying_asset\": {\n # \"base_withdrawal_fee\": \"0.000000000000000000\",\n # \"deposit_status\": \"enabled\",\n # \"id\": 66,\n # \"interest_credit\": False,\n # \"interest_slabs\": null,\n # \"kyc_deposit_limit\": \"0.000000000000000000\",\n # \"kyc_withdrawal_limit\": \"0.000000000000000000\",\n # \"min_withdrawal_amount\": \"0.020000000000000000\",\n # \"minimum_precision\": 4,\n # \"name\": \"Solana\",\n # \"networks\": [\n # {\"base_withdrawal_fee\": \"0.01\", \"deposit_status\": \"enabled\", \"memo_required\": False, \"network\": \"SOLANA\", \"variable_withdrawal_fee\": \"0\", \"withdrawal_status\": \"enabled\"},\n # {\"base_withdrawal_fee\": \"0.01\", \"deposit_status\": \"enabled\", \"memo_required\": False, \"network\": \"BEP20(BSC)\", \"variable_withdrawal_fee\": \"0\", \"withdrawal_status\": \"enabled\"}\n # ],\n # \"precision\": 8,\n # \"sort_priority\": 7,\n # \"symbol\": \"SOL\",\n # \"variable_withdrawal_fee\": \"0.000000000000000000\",\n # \"withdrawal_status\": \"enabled\"\n # },\n # \"barrier_price\": null,\n # \"contract_value\": \"1\",\n # \"short_description\": \"SOL-USDT spot market\"\n # },\n # ],\n # \"success\":true\n # }\n #\n markets = self.safe_value(response, 'result', [])\n result = []\n for i in range(0, len(markets)):\n market = markets[i]\n type = self.safe_string(market, 'contract_type')\n if type == 'options_combos':\n continue\n # settlingAsset = self.safe_value(market, 'settling_asset', {})\n quotingAsset = self.safe_value(market, 'quoting_asset', {})\n underlyingAsset = self.safe_value(market, 'underlying_asset', {})\n settlingAsset = self.safe_value(market, 'settling_asset')\n productSpecs = self.safe_value(market, 'product_specs', {})\n baseId = self.safe_string(underlyingAsset, 'symbol')\n quoteId = self.safe_string(quotingAsset, 'symbol')\n settleId = self.safe_string(settlingAsset, 'symbol')\n id = self.safe_string(market, 'symbol')\n numericId = self.safe_integer(market, 'id')\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n settle = self.safe_currency_code(settleId)\n callOptions = (type == 'call_options')\n putOptions = (type == 'put_options')\n moveOptions = (type == 'move_options')\n spot = (type == 'spot')\n swap = (type == 'perpetual_futures')\n future = (type == 'futures')\n option = (callOptions or putOptions or moveOptions)\n strike = self.safe_string(market, 'strike_price')\n expiryDatetime = self.safe_string(market, 'settlement_time')\n expiry = self.parse8601(expiryDatetime)\n contractSize = self.safe_number(market, 'contract_value')\n amountPrecision = None\n if spot:\n amountPrecision = self.parse_number(self.parse_precision(self.safe_string(productSpecs, 'underlying_precision'))) # seems inverse of 'impact_size'\n else:\n # other markets(swap, futures, move, spread, irs) seem to use the step of '1' contract\n amountPrecision = self.parse_number('1')\n linear = (settle == base)\n optionType = None\n symbol = base + '/' + quote\n if swap or future or option:\n symbol = symbol + ':' + settle\n if future or option:\n symbol = symbol + '-' + self.yymmdd(expiry)\n if option:\n type = 'option'\n letter = 'C'\n optionType = 'call'\n if putOptions:\n letter = 'P'\n optionType = 'put'\n elif moveOptions:\n letter = 'M'\n optionType = 'move'\n symbol = symbol + '-' + strike + '-' + letter\n else:\n type = 'future'\n else:\n type = 'swap'\n state = self.safe_string(market, 'state')\n result.append({\n 'id': id,\n 'numericId': numericId,\n 'symbol': symbol,\n 'base': base,\n 'quote': quote,\n 'settle': settle,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': settleId,\n 'type': type,\n 'spot': spot,\n 'margin': None if spot else False,\n 'swap': swap,\n 'future': future,\n 'option': option,\n 'active': (state == 'live'),\n 'contract': not spot,\n 'linear': None if spot else linear,\n 'inverse': None if spot else not linear,\n 'taker': self.safe_number(market, 'taker_commission_rate'),\n 'maker': self.safe_number(market, 'maker_commission_rate'),\n 'contractSize': contractSize,\n 'expiry': expiry,\n 'expiryDatetime': expiryDatetime,\n 'strike': self.parse_number(strike),\n 'optionType': optionType,\n 'precision': {\n 'amount': amountPrecision,\n 'price': self.safe_number(market, 'tick_size'),\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': self.parse_number('1'),\n 'max': self.safe_number(market, 'position_size_limit'),\n },\n 'price': {\n 'min': None,\n 'max': None,\n },\n 'cost': {\n 'min': self.safe_number(market, 'min_size'),\n 'max': None,\n },\n },\n 'info': market,\n })\n return result", "title": "" }, { "docid": "0cf88a7da49ed55ad70e037771d0dc15", "score": "0.5137139", "text": "def get_strike_prices(self,symbol=\"\"):\n \n # Safety first!\n if not utils.check(symbol):\n return []\n \n # Format\n symbol = symbol.upper()\n \n # Assemble URL\n url = self.endpoints['base'] + 'market/options/strikes.json'\n data = { 'symbol':symbol }\n \n # Create HTTP Request objects\n auth = self.create_auth()\n results = requests.get(url,params=data,auth=auth).json()\n \n # Convert to floats\n return [float(x) for x in results['response']['prices']['price']]", "title": "" }, { "docid": "914104c1baa8832d82af4cd1f46942de", "score": "0.5134123", "text": "def stocks():\n user_shares = float(repeat_input('Number of Shares: ', 'Must be positive', 'float', lambda i: float(i) > 0))\n user_purchase_price = float(repeat_input('Purchase Price ($): ', 'Must be positive', 'float',\n lambda i: float(i) > 0))\n user_sell_price = float(repeat_input('Sell Price ($): ', 'Must be positive', 'float', lambda i: float(i) > 0))\n user_buy_commission = float(repeat_input('Buy Commission (if none, put 0): ',\n 'Must be positive',\n 'float',\n lambda i: float(i) >= 0\n ))\n user_sell_commission = float(repeat_input('Sell Commission (if none, put 0): ',\n 'Must be positive',\n 'float',\n lambda i: float(i) >= 0))\n print()\n results(user_shares, user_sell_commission, user_buy_commission,\n user_sell_price, user_purchase_price)", "title": "" }, { "docid": "d01ce0034257aaefa39ce9a325380829", "score": "0.5127959", "text": "def _get_price_impl(self, t, inventory_h, price_h, price_scale, horizon, num_buyers):\n return self.constant_price", "title": "" }, { "docid": "74514d18ff01c1ad7ff3fc8a95ce12fa", "score": "0.5125321", "text": "def quantities(self):\n return self._quantities", "title": "" }, { "docid": "aeb17f3895cb6e4e752287e73db05a10", "score": "0.5119884", "text": "def test_get_all_pricing_component_values(self):\n pass", "title": "" }, { "docid": "eac608815b4e0f3dffffa27ed9330081", "score": "0.5117004", "text": "def holding_value(self):\n stock_values = sum([\n Stock.objects.get(id=res['stock']).latest_quote() * res['q_s']\n for res in\n self.trades.values('stock').annotate(q_s=models.Sum('quantity'))\n ])\n bucket_values = sum([\n InvestmentBucket.objects.get(id=res['stock']).value_on() * res['q_s']\n for res in\n self.buckettrades.values('stock').annotate(q_s=models.Sum('quantity'))\n ])\n return stock_values + bucket_values", "title": "" }, { "docid": "71911136b4cf527af3b106cdf9ca037d", "score": "0.5080445", "text": "def load_price(tic):\n \n alpha_vantage = 'https://www.alphavantage.co/query?function=TIME_SERIES_MONTHLY_ADJUSTED&symbol='\\\n + tic +'&apikey='+ demo+ '&datatype=csv'\n \n raw_price = pd.read_csv(alpha_vantage)\n raw_price['timestamps'] = pd.to_datetime(raw_price.iloc[:, 0], format = '%Y-%m-%d')\n qt_return = raw_price.sort_values(by = ['timestamps'])['adjusted close'].pct_change(periods = 2)\n \n return_tb = pd.DataFrame(data= qt_return.values, columns= ['Quarterly_return'], index= raw_price['timestamps'][::-1]).dropna()\n \n return return_tb", "title": "" }, { "docid": "060b6796473a774c7afa8e6a26060a3c", "score": "0.50734067", "text": "def calc(s:float,stockPP:float,stockSP:float,bcomm:float)->(float,float,float,float,float):\n tPrice = s * stockPP\n commPriceBought = tPrice * bcomm\n soldStock = s * stockSP\n commPriceSold = soldStock * bcomm\n ProfitLoss = (soldStock - commPriceSold) - (tPrice + commPriceBought)\n return(tPrice,commPriceBought,soldStock,commPriceSold,ProfitLoss)", "title": "" }, { "docid": "c25e447e857b26c1235769ed6d271424", "score": "0.5071181", "text": "def price(d, s):\n q1 = float(d[0])\n q2 = float(s[0])\n Q = q1 + q2\n p1 = float(s[1][:s[1].find('P')])\n p2 = float(d[1][:d[1].find('P')])\n P = p1 + p2\n balance_price = (-Q) / P\n balance.append(balance_price)\n print('P =', format(balance_price, '.2f'))", "title": "" }, { "docid": "3ed319508c31ea554c9f73f117ba7651", "score": "0.5063464", "text": "def scrap_prices(self, models_to_query) -> list:\n\t\tif len(self.models) == 0:\n\t\t\treturn None\n\n\t\t# Now, for each model we will get the prices from each URL\n\t\tdf_prices = []\n\t\tdf_not_found = []\n\t\tfor k in self.models.keys():\n\t\t\ttry:\n\t\t\t\tmodel = self.models[k]\n\t\t\t\tdf_prices_aux = self.scrap_price_from_model(model)\n\n\t\t\t\t# Add the price to the main prices list\n\t\t\t\tif len(df_prices) == 0:\n\t\t\t\t\tdf_prices = copy.copy(df_prices_aux)\n\t\t\t\telse:\n\t\t\t\t\tdf_prices = (df_prices + copy.copy(df_prices_aux))\n\t\t\t\t\t#df_prices = pd.concat([df_prices, copy.copy(df_prices_aux)])\n\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\"[ERROR] - Error scraping the price's model from ACARA for model: {\"+str(model[\"MODEL\"])+\"} - Error : {\"+str(e)+\"}\")\n\n\t\tprint(\"[INFO ] - Total number of prices found: {}\".format(len(df_prices)))\n\t\t#print(\"[INFO ] - Total number of prices not-found: {}\".format(len(df_not_found)))\n\n\t\tself.prices = copy.copy(df_prices)\n\t\tself.prices_not_found = copy.copy(df_not_found)\n\n\t\tdel df_prices\n\t\tdel df_not_found\n\n\t\treturn self.prices, self.prices_not_found", "title": "" }, { "docid": "fbcc8dcc882a59227ebf2c03cf0300e0", "score": "0.5060853", "text": "async def get_spot_prices(\n ticker: Optional[str] = \"USD\", max_length=3\n): # can also use EUR, GBP and JPY, etc.\n try:\n r = requests.get(\n f\"https://api.coinbase.com/v2/prices/spot?currency={ticker}\", timeout=15\n )\n if ticker not in tickers:\n raise HTTPException(status_code=404, detail=\"Invalid currency\")\n data = r.json()\n return data\n # Handle errors\n except requests.exceptions.RequestException:\n # return \"Error: {}\".format(e)\n raise HTTPException(status_code=500, detail=\"Error connecting, retry later\")", "title": "" }, { "docid": "714a7beaf478344ddaca8c21832ad100", "score": "0.5047118", "text": "async def fetch_funding_rates(self, symbols: Optional[List[str]] = None, params={}):\n await self.load_markets()\n symbols = self.market_symbols(symbols)\n request = {\n 'contract_types': 'perpetual_futures',\n }\n response = await self.publicGetTickers(self.extend(request, params))\n #\n # {\n # \"result\": [\n # {\n # \"close\": 30600.5,\n # \"contract_type\": \"perpetual_futures\",\n # \"funding_rate\": \"0.00602961\",\n # \"greeks\": null,\n # \"high\": 30803.0,\n # \"low\": 30265.5,\n # \"mark_basis\": \"-0.45601594\",\n # \"mark_price\": \"30600.10481568\",\n # \"oi\": \"469.9190\",\n # \"oi_change_usd_6h\": \"2226314.9900\",\n # \"oi_contracts\": \"469919\",\n # \"oi_value\": \"469.9190\",\n # \"oi_value_symbol\": \"BTC\",\n # \"oi_value_usd\": \"14385640.6802\",\n # \"open\": 30458.5,\n # \"price_band\": {\n # \"lower_limit\": \"29067.08312627\",\n # \"upper_limit\": \"32126.77608693\"\n # },\n # \"product_id\": 139,\n # \"quotes\": {\n # \"ask_iv\": null,\n # \"ask_size\": \"965\",\n # \"best_ask\": \"30600.5\",\n # \"best_bid\": \"30599.5\",\n # \"bid_iv\": null,\n # \"bid_size\": \"196\",\n # \"impact_mid_price\": null,\n # \"mark_iv\": \"-0.44931641\"\n # },\n # \"size\": 1226303,\n # \"spot_price\": \"30612.85362773\",\n # \"symbol\": \"BTCUSDT\",\n # \"timestamp\": 1689136597460456,\n # \"turnover\": 37392218.45999999,\n # \"turnover_symbol\": \"USDT\",\n # \"turnover_usd\": 37392218.45999999,\n # \"volume\": 1226.3029999999485\n # },\n # ],\n # \"success\":true\n # }\n #\n rates = self.safe_value(response, 'result', [])\n result = self.parse_funding_rates(rates)\n return self.filter_by_array(result, 'symbol', symbols)", "title": "" }, { "docid": "583e98b110f738a8e7f798e68a3aabaa", "score": "0.5042051", "text": "def fetch_quandl(symbol):\n\n ## return a Pandas dataframe, rename columne if needed (df.rename(columns = {'old': 'new'}, inplace=True))\n # new_cols = ['new1', 'new2', 'new3']\n # df.columns = new_cols\n # \n # override column\n # df = pf.read_csv('file', names=new_cols, header=0)\n # df.drop('unused_col', axis=1, inplace=True)\n ## lower case\n ###############################################################################\n # Format symbol to quandl\n # quandl format is 5-digits with leading zeros\n ###############################################################################\n symbol = symbol.lstrip(\"0\").zfill(5)\n\n ###############################################################################\n # Fetch data\n ###############################################################################\n api_key = os.getenv(\"QUANDL_API_KEY\")\n url = 'https://www.quandl.com/api/v3/datasets/XHKG/' + symbol + '/data.json?api_key=' + api_key\n\n try:\n response = requests.get(url) # pylint: disable=invalid-name\n\n except ConnectionError as error:\n print('error when accessing Quandl API: ', error)\n exit(1)\n\n fetched = response.content.decode('utf-8')\n tmp_dict = json.loads(fetched) # convert to dict\n column_names = tmp_dict['dataset_data']['column_names'] #\n quotes = tmp_dict['dataset_data']['data'] #\n quotes = np.array(quotes) # convert to numpy\n\n # remove record with volume is zero\n valids = quotes[:, 5] != 0 # valid if volume is non-zero\n quotes = quotes[valids] # extract & keep ONLY the valid rows\n\n return quotes", "title": "" }, { "docid": "e8525d2ee8e55e76de91cc981c9a0565", "score": "0.5041846", "text": "def _get_price_ext(self):\n price_ext = 0.0\n if not self.history_lock:\n if self.qtype == self.MATERIAL and self.material:\n price_ext = self.sqft * self.material.unit_price_ext * \\\n self.units\n\n elif self.qtype == self.PRODUCT and self.product:\n price_ext = self.product.price_ext * self.units\n\n elif self.qtype == self.SERVICE and self.service:\n price_ext = self.service.price_ext * self.units\n\n elif self.qtype == self.ADJUSTMENT and self.adjustment:\n price_ext = self.adjustment.price_ext * self.units\n else:\n price_ext = self.locked_price_ext\n\n return price_ext", "title": "" }, { "docid": "dd4d3061a70eabae5fa74c8ba9c63e3e", "score": "0.5037283", "text": "def get_stop_price(self, is_buy):\n raise NotImplementedError", "title": "" }, { "docid": "b0811a707c6227201bfc54a0c110bc5c", "score": "0.5028178", "text": "def getLatestPrices():\n \n conn = ur.urlopen(\"https://api.coinmarketcap.com/v1/ticker/?limit=20\") # assuming BTC, ETH, and LTC will be in top 20 coins\n raw_data = conn.read()\n coin_data = json.loads(raw_data.decode(\"utf8\"))\n print(\"Successfully got price data from CoinMarketCap\")\n\n interested_ids = { \"bitcoin\", \"ethereum\", \"litecoin\" }\n filtered_coins = [Coin(coin) for coin in coin_data if coin[\"id\"] in interested_ids]\n \n return filtered_coins", "title": "" }, { "docid": "3922f55b0e1200dd43bd61a1fc8d33a1", "score": "0.50174063", "text": "def q3(data):\n qty_index = 0\n for types in data[0]:\n\tif types==\"BOTTLE QTY\":\n\t\tbreak\n\tqty_index += 1\n\n ven_index = 0\n for types in data[0]:\n\tif types==\"STORE\":\n\t\tbreak\n\tven_index += 1\n\t\n sales = dict()\n\n for line in data[1:]:\n\tif line[ven_index] in sales:\n\t\tupdate = int(sales[line[ven_index]])+int(line[qty_index])\n\t\tsales[line[ven_index]] = update\n\telse:\n\t\tsales[line[ven_index]] = int(line[qty_index])\n\t\n\n # Try using dictionaries for this question, and make use of the sorted function available for list and dictionaries\n # https://docs.python.org/2/tutorial/datastructures.html\n \n return max(sales, key=sales.get)", "title": "" }, { "docid": "53cb58049346508c8225977c8a2a62d4", "score": "0.50118756", "text": "def get_demand(self, prices):\n # Solution to the optimal budget constrained demand set of CES utilities\n # can be found Lagrangian methods analytically.\n c = (self.rho/ self.rho - 1)\n num = self.budget *((np.power(self.valuation, 1 - c) * np.power(prices, c - 1)))\n denom = np.sum((np.power(self.valuation, 1 - c) * np.power(prices, c )))\n demand = num/denom\n \n assert demand.shape[0] == self.valuation.shape[0]\n return demand", "title": "" }, { "docid": "f9f9c5e3a6c0fd2ac3cecd46c4d99821", "score": "0.5011563", "text": "def get_bid(self, bid_type='short', EPS=1e-4):\n t = self.time\n load = self.load[t:].copy().astype(float)\n pb = self.expected_price_buy[t:].copy().astype(float)\n ps = self.expected_price_sell[t:].copy().astype(float)\n\n\n # Gets a list of all the commitments applicable\n N = self.load.shape[0]\n commitments = {}\n for ii in range(t, N + 1):\n commitments[ii - t] = self.commitments.get(ii, None)\n\n accumulated_bids = []\n ## Solves the first battery usage\n r = self.find_optimal_step(load, pb, ps, commitments) # battery usage\n x = r[2][0]\n q = x / self.eff_c if x > 0 else x * self.eff_d # Energy seen from outside the battery\n q += load[0] #* self.resolution\n q_0 = q\n buying = (q > 0) or (q == 0 and load[0] > 0)\n p = pb[0] if q > 0 else ps[0]\n if not np.allclose(q, 0):\n accumulated_bids.append([np.abs(q), p, buying])\n\n if bid_type == 'long':\n\n if buying: # Case in which user is buying by default\n future_prices = set(p_ for p_ in pb if p_ < pb[0])\n future_prices = sorted([x for x in future_prices], reverse=True)\n else:\n pbs = np.hstack([ps, pb])\n future_prices = set(p_ for p_ in pbs if p_ > ps[0])\n future_prices = sorted([x for x in future_prices], reverse=False)\n\n for fp in future_prices[:5]:\n pb_1 = pb.copy()\n ps_1 = ps.copy()\n if buying:\n pb_1[0] = fp - EPS # Force solution to be in different range\n if pb_1[0] < ps_1[0]:\n ps_1[0] = pb_1[0]\n r = self.find_optimal_step(load, pb_1, ps_1, commitments) #\n else:\n ps_1[0] = fp + EPS\n if ps_1[0] > pb_1[0]:\n pb_1[0] = ps_1[0]\n r = self.find_optimal_step(load, pb_1, ps_1, commitments) #\n x = r[2][0] # first battery usage\n q = x / self.eff_c if x > 0 else x * self.eff_d\n q += load[0] #* self.resolution\n if not np.allclose(q, 0) and ((buying and q > 0) or (not buying and q < 0)):\n accumulated_bids.append([np.abs(q), fp, buying])\n\n bids_sorted = sorted(accumulated_bids, key=itemgetter(1, 0, 2), reverse=True)\n return accumulated_bids", "title": "" }, { "docid": "da3b3ba5cd8b32768d895eedc73b27e2", "score": "0.4988485", "text": "def get_price(self, polygon):\n price = ProductPriceCalculator.get_price(\n pricing_instance=self,\n polygon=polygon\n )\n\n if price is None:\n return None, None\n\n if self.min_price and price < self.min_price:\n return self.min_price, self.base_fee\n\n # if max_price is reached, will force customer ask for a quote\n if self.max_price and price > self.max_price:\n return None, None\n\n return price, self.base_fee", "title": "" }, { "docid": "7a1c2501fbbb790292c468155bebde71", "score": "0.49833792", "text": "def get_daily_historic_data(ticker, start_date = datetime.datetime(2000, 1, 1), end_date = datetime.date.today()):\n\n #Construct the Yahoo URL with the correct integer query parameters\n\n #Try connecting to Yahoo Finance to obtain the data\n #On failure, print an error message\n try:\n table = quandl.get('WIKI/' + ticker, start_date = start_date, end_date = end_date, paginate = True)\n #data = quandl.get_table('MER/F1', ticker = 'ABT', date = { 'gte': end_date, 'lte': start_date })\n prices = table.drop(['Adj. Volume', 'Adj. Open','Adj. High','Adj. Low','Ex-Dividend','Split Ratio'], axis = 1)\n prices.reset_index(inplace = True)\n\n except Exception as e:\n print(\"Could not download Quandl data: %s\" % e)\n\n return prices", "title": "" }, { "docid": "74b70121330ca5acb701d7fe274233ca", "score": "0.49719843", "text": "def get_prices():\n base_url = 'https://min-api.cryptocompare.com/data/histohour?tsym=USD&limit=2000&aggregate=24&fsym='\n db = DatabaseWorker()\n rows = db.get_rows_without_prices()\n\n current_date = datetime.datetime.utcnow()\n for row in rows:\n coin = row[COIN_INDEX]\n coin_symbol = coin[coin.find(\"(\")+1:coin.find(\")\")] # https://stackoverflow.com/questions/4894069/regular-expression-to-return-text-between-parenthesis\n\n\n event_date = row[EVENT_DATE_INDEX]\n posted_date = row[POSTED_DATE_INDEX]\n\n response = urllib.request.urlopen(base_url + coin_symbol)\n data = response.read().decode(\"utf-8\")\n record = {'buy_low': None,\n 'buy_high': None,\n 'sell_high': None,\n 'sell_date': None,\n 'high_btc': None,\n 'late_sell_low': None,\n 'late_sell_high': None}\n for day in json.loads(data)['Data']:\n\n if (day['time'] == posted_date):\n # initial buys\n record['buy_high'] = day['high']\n record['buy_low'] = day['low']\n elif (day['time'] > posted_date and day['time'] <= event_date):\n # find date with highest sell and add the date, and high for that day\n if (not record['sell_high'] or day['high'] > record['sell_high']):\n record['sell_high'] = day['high']\n record['sell_date'] = day['time']\n elif (day['time'] == event_date + 86400):\n # if you did not sell before the event, this is the price the day after\n record['late_sell_high'] = day['high']\n record['late_sell_low'] = day['low']\n elif (day['time'] > event_date + 86400):\n # break the loop since we are done with the days we need\n break\n\n db.update(row[ID_INDEX], record)\n\n\n\n #print(current_date)", "title": "" }, { "docid": "c9b8c24c81c3f1ae49e08491e19aa411", "score": "0.49709427", "text": "def getPrice(self):\n return self.getFieldVal(self.PRICE)", "title": "" }, { "docid": "8002f468d325f68c877dfe472fff2f23", "score": "0.49706075", "text": "async def fetch_my_trades(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n request = {\n # 'product_ids': market['id'], # comma-separated\n # 'contract_types': types, # comma-separated, futures, perpetual_futures, call_options, put_options, interest_rate_swaps, move_options, spreads\n # 'start_time': since * 1000,\n # 'end_time': self.microseconds(),\n # 'after', # after cursor for pagination\n # 'before', # before cursor for pagination\n # 'page_size': limit, # number of records per page\n }\n market = None\n if symbol is not None:\n market = self.market(symbol)\n request['product_ids'] = market['numericId'] # accepts a comma-separated list of ids\n if since is not None:\n request['start_time'] = str(since) + '000'\n if limit is not None:\n request['page_size'] = limit\n response = await self.privateGetFills(self.extend(request, params))\n #\n # {\n # \"meta\":{\n # \"after\":null,\n # \"before\":null,\n # \"limit\":10,\n # \"total_count\":2\n # },\n # \"result\":[\n # {\n # \"commission\":\"0.008335000000000000\",\n # \"created_at\":\"2020-11-16T19:07:19Z\",\n # \"fill_type\":\"normal\",\n # \"id\":\"e7ff05c233a74245b72381f8dd91d1ce\",\n # \"meta_data\":{\n # \"effective_commission_rate\":\"0.0005\",\n # \"order_price\":\"16249\",\n # \"order_size\":1,\n # \"order_type\":\"market_order\",\n # \"order_unfilled_size\":0,\n # \"trading_fee_credits_used\":\"0\"\n # },\n # \"order_id\":\"152999629\",\n # \"price\":\"16669\",\n # \"product\":{\n # \"contract_type\":\"perpetual_futures\",\n # \"contract_unit_currency\":\"BTC\",\n # \"contract_value\":\"0.001\",\n # \"id\":139,\n # \"notional_type\":\"vanilla\",\n # \"quoting_asset\":{\"minimum_precision\":2,\"precision\":6,\"symbol\":\"USDT\"},\n # \"settling_asset\":{\"minimum_precision\":2,\"precision\":6,\"symbol\":\"USDT\"},\n # \"symbol\":\"BTCUSDT\",\n # \"tick_size\":\"0.5\",\n # \"underlying_asset\":{\"minimum_precision\":4,\"precision\":8,\"symbol\":\"BTC\"}\n # },\n # \"product_id\":139,\n # \"role\":\"taker\",\n # \"side\":\"sell\",\n # \"size\":1\n # }\n # ],\n # \"success\":true\n # }\n #\n result = self.safe_value(response, 'result', [])\n return self.parse_trades(result, market, since, limit)", "title": "" }, { "docid": "fea1d00141bfc7be8429a5d0e9da39a1", "score": "0.49679303", "text": "def get_subscription_price(subscription_price_id):", "title": "" }, { "docid": "b496f471ca8dc42cb6491740cfae55fe", "score": "0.4967371", "text": "def getPrices (stocklist: list, Tstart = datetime(1999,1,1), Tend = datetime.now()):\n df = None\n for stock in stocklist:\n bars = YahooData.getOHLC(stock, Tstart, Tend)\n\n values = bars[\"adjclose\"].values\n index = bars.index\n part = pd.DataFrame (data=values, index=index, columns=[stock])\n\n if df is None:\n df = part\n else:\n df = df.merge(part, left_index=True, right_index=True)\n\n return df", "title": "" }, { "docid": "db82fd30be9ce2d6eccab9741a28591f", "score": "0.49530578", "text": "def getInterestingStocks():\n #Get a list of all volatile stock for today\n activeStocks1 = \"https://finance.yahoo.com/most-active/?offset=0&count=100\"\n page1 = requests.get(activeStocks1)\n soup1 = BeautifulSoup(page1.content, 'html.parser')\n rows1 = soup1.find_all('tr', class_=\"simpTblRow\")\n\n activeStocks2= \"https://finance.yahoo.com/most-active/?count=100&offset=100\"\n page2 = requests.get(activeStocks2)\n soup2 = BeautifulSoup(page2.content, 'html.parser')\n rows2 = soup2.find_all('tr', class_=\"simpTblRow\")\n\n stocks = []\n for row in rows1:\n stocks.append( \n {\"ticker\" : row.find_all('a')[0].get_text(),\n \"pctChange\" : float(row.find_all('td')[4].get_text().strip(\"%+-\")),\n \"volume\" : float(row.find_all('td')[5].get_text().strip(\"M\"))\n } \n )\n\n for row in rows2:\n stocks.append( \n {\"ticker\" : row.find_all('a')[0].get_text(),\n \"pctChange\" : float(row.find_all('td')[4].get_text().strip(\"%+-\")),\n \"volume\" : float(row.find_all('td')[5].get_text().strip(\"M\"))\n } \n )\n\n sortedStocksChange = sorted(\n stocks, reverse=True, \n key=lambda item: item[\"pctChange\"])\n\n topStockChanges = []\n for index in range(0,20):\n\n topStockChanges.append(sortedStocksChange[index])\n\n sortedStocksVolume = sorted(\n stocks, reverse=True, \n key=lambda item: item[\"volume\"])\n\n topStockVolume = []\n for index in range(0,20):\n\n topStockVolume.append(sortedStocksVolume[index])\n\n return (topStockChanges, topStockVolume)", "title": "" }, { "docid": "6a2891c031106d801bf5e19b033619ef", "score": "0.49521703", "text": "def get_stock(stock : dict) -> dict:", "title": "" }, { "docid": "99c5f7509e49d4970c88e99448333f9c", "score": "0.49521378", "text": "def getCompanyWithLeastVolatility(stock_list):\n return None", "title": "" }, { "docid": "5ed851c0618b7475fffc1fbf71d5beb9", "score": "0.4950093", "text": "def select_stocks(self):\n selected_stocks = list()\n\n for s in self.symbols:\n resp = requests.get('https://api.robinhood.com/quotes/' + s + '/')\n if resp.status_code != 200:\n # This means something went wrong.\n print('error ' + str(resp.status_code))\n continue\n resp_json = resp.json()\n\n # populate values\n last_trade = resp_json['last_trade_price']\n prev_close = resp_json['previous_close']\n ask_price = resp_json['ask_price']\n bid_price = resp_json['bid_price']\n ask_volume = resp_json['ask_size']\n bid_volume = resp_json['bid_size']\n extended_close = resp_json['last_extended_hours_trade_price']\n\n # standard metrics\n price_progress = float(last_trade) - float(prev_close)\n bid_ask_spread_percent = (float(ask_price) - float(bid_price))/float(ask_price)\n bid_ask_volume_spread = float(ask_volume) - float(bid_volume)\n\n # integrate stocktwits functions\n messages = self.scraper.ScrapeMessages(s)\n rating = 0\n for message in messages[s]:\n rating += self.analyzer.AnalyzeSentiment(message['body'])\n\n # the magic formula\n momentum = price_progress * bid_ask_volume_spread * bid_ask_spread_percent * 0.5 * rating + rating\n\n if rating < 0 and momentum > 0:\n momentum *= -1\n\n if momentum > 2:\n selected_stocks.append(s)\n\n\n print s\n print price_progress\n print bid_ask_spread_percent\n print bid_ask_volume_spread\n print rating\n print momentum\n print '\\n'\n # print selected_stocks\n return selected_stocks", "title": "" }, { "docid": "c392f6a1003170d16a8501905a64eb45", "score": "0.49498737", "text": "def retrieve_OHLC_data(inputs):\n global stock_dict,symbol\n stock_dict=dict()\n \n for i in inputs['stock_list']:\n # send_results_to_file({'TRADE DATA FOR------>':i.upper()},'a')\n symbol = i.upper() \n stock_name=symbol\n stock =pdr.get_data_yahoo(symbol)[inputs['start_date']:inputs['stop_date']]\n stock_dict[i]=stock\n\n GenerateIndicators(stock_dict[i])", "title": "" }, { "docid": "f2dd0443f198bb12b8c10bd3516ec05a", "score": "0.49488226", "text": "def test_get_pricing_component_value(self):\n pass", "title": "" }, { "docid": "a23fe4ffc5cd6b1a55a1dab204329195", "score": "0.49476483", "text": "def product_min_prices(self) -> List[str]:\n return [element.text for element in self.driver.find_elements(*self.PRODUCT_MIN_PRICE_LOC)]", "title": "" }, { "docid": "78e97ecaf8a54019064eda6a691b4f5a", "score": "0.4945088", "text": "def pricer(spot, strike, tau, rate, vola, steps=N_STEPS):\n\n delta_t = tau / steps\n u = np.exp(vola*np.sqrt(delta_t))\n d = 1 / u\n p = (np.exp(rate*delta_t) - d) / (u - d)\n disc_factor = np.exp(-rate*tau)\n \n tmp_arr = np.arange(steps+1)\n backward = spot * np.power(u, steps-tmp_arr) * np.power(d, tmp_arr) - strike\n backward[backward < 0] = 0\n p_star = (np.exp(rate*delta_t) - d) / (u - d)\n q_star = 1 - p_star\n price_arr = comb(steps, tmp_arr) * backward * np.power(p_star, steps-tmp_arr) * np.power(q_star, tmp_arr)\n price = np.sum(price_arr) * disc_factor\n \n return price", "title": "" }, { "docid": "146a5743369791c289c2c20f73df6047", "score": "0.49283788", "text": "def _calculate_price(myself, increase_level=0):\n if myself.prices.function == \"exponential\":\n return tools.exponential(\n myself.prices.base,\n myself.prices.mult,\n myself.stat_level + increase_level\n )\n elif myself.prices.function == \"linear\":\n return tools.linear(\n myself.prices.base,\n myself.prices.mult,\n myself.stat_level + increase_level\n )\n elif myself.prices.function == \"log\":\n return tools.logarithm(\n myself.prices.base,\n myself.prices.mult,\n myself.stat_level + increase_level\n )\n elif myself.prices.function == \"linear_mult\":\n return tools.linear_multiplier(\n myself.prices.base,\n myself.prices.mult,\n max(myself.stat_level + increase_level, 0),\n G.IDLE[myself.id].level_mult,\n G.IDLE[myself.id].level_threshold\n )\n else:\n raise ValueError(\"Unsupported formula type '{}'\".format(\n myself.prices.function))", "title": "" }, { "docid": "4cd1ce9ac15a8d3ebd5369ead6164842", "score": "0.49246454", "text": "def get_price_list(date=0):\n price_row = []\n price_row.append(datetime.now().ctime()) \n for article in book[0][1:]:\n price_row.append(get_specific_price(article))\n\n return price_row", "title": "" }, { "docid": "93df6ffbf274a3d022fce8f4ed484e24", "score": "0.49196425", "text": "def get_prices(symbol, dates):\n # get Adj close prices, and normalized prices\n prices = get_data(symbol, dates) # only have trading days use SPY as ref\n prices = prices.ix[:, 1:] # remove SPY\n prices_normed = prices / prices.ix[0, :]\n\n # get tipical prices (high + low + close)/3\n prices_high = get_data(symbol, dates, colname='High')\n prices_low = get_data(symbol, dates, colname='Low')\n prices_close = get_data(symbol, dates, colname='Close')\n\n tp_prices = (prices_high.ix[:, 1:] + prices_low.ix[:, 1:] + prices_close.ix[:, 1:]) / 3\n tp_prices_normed = tp_prices / tp_prices.ix[0, :]\n\n return prices, prices_normed, tp_prices, tp_prices_normed", "title": "" }, { "docid": "69524dd33b04ad19559838d59c3da097", "score": "0.49182335", "text": "def get_stock_data(input_file):\n with open(input_file, \"r\") as fp:\n data = list(map(float, fp.read().split(\"\\n\")))\n return data", "title": "" }, { "docid": "d52a4982ee7f8e125dd16b95f58b7801", "score": "0.49163252", "text": "def input_coins():\r\n quarters = int(input(\"Input amount of quarters: \"))*0.25\r\n dimes = int(input(\"Input amount of dimes: \"))*0.1\r\n nickels = int(input(\"Input amount of nickels: \"))*0.05\r\n pennies = int(input(\"Input amount of pennies: \"))*0.01\r\n\r\n return quarters + dimes + nickels + pennies", "title": "" }, { "docid": "09db99d3641248ab95c1d42e9ade46cd", "score": "0.49153543", "text": "def _get_price_int(self):\n price_int = 0.0\n if not self.history_lock:\n if self.qtype == self.MATERIAL and self.material:\n price_int = self.sqft * self.material.unit_price_int * \\\n self.units\n\n elif self.qtype == self.PRODUCT and self.product:\n price_int = self.product.price_int * self.units\n\n elif self.qtype == self.SERVICE and self.service:\n price_int = self.service.price_int * self.units\n\n elif self.qtype == self.ADJUSTMENT and self.adjustment:\n price_int = self.adjustment.price_int * self.units\n else:\n price_int = self.locked_price_int\n\n return price_int", "title": "" }, { "docid": "bc21ab3508b1d1cf32d7c21baf9f4331", "score": "0.49064815", "text": "def guessQty(line,qtylst,sz):\n #get price\n price_lst = re.findall(\"\\d+\\.\\d\\d(?!\\d)\",line)\n qtylst = map(lambda qty:int(qty),qtylst)\n if len(price_lst)==0:\n return min(qtylst)\n price = float(price_lst[0])\n if sz==12: #12inches sandwich\n if price<1.5:\n return 0\n lst = filter(lambda unit_price:True if (unit_price[0]>=4 and unit_price[0] <9) else False,map(lambda q:(price/q,q),qtylst))\n return qtylst[0] if len(lst)==0 else lst[0][1]\n if sz==6: #6inch sandwich\n lst = filter(lambda unit_price:True if (unit_price[0]>=2 and unit_price[0] <5) else False,map(lambda q:(price/q,q),qtylst))\n return qtylst[0] if len(lst)==0 else lst[0][1]\n if sz==3: #mini sandwich\n lst = filter(lambda unit_price:True if (unit_price[0]>=1.5 and unit_price[0] <=3) else False,map(lambda q:(price/q,q),qtylst))\n return qtylst[0] if len(lst)==0 else lst[0][1]\n if sz==-1: #drink\n lst = filter(lambda unit_price:True if (unit_price[0]>=1 and unit_price[0] <=5) else False,map(lambda q:(price/q,q),qtylst))\n return qtylst[0] if len(lst)==0 else lst[0][1]\n return qtylst[0]", "title": "" }, { "docid": "2ae7fccf763c2aff94aa323050f0430d", "score": "0.4902088", "text": "def get_price(self):\n return self.price", "title": "" }, { "docid": "b93dda094ede44eb6f01b731d46917af", "score": "0.48981214", "text": "def get_product_available2(self, cr, uid, ids, context=None):\n \n if context is None:\n context = {}\n location_obj = self.pool.get('stock.location')\n warehouse_obj = self.pool.get('stock.warehouse')\n shop_obj = self.pool.get('sale.shop')\n states = context.get('states', [])\n what = context.get('what', ())\n if not ids:\n ids = self.search(cr, uid, [])\n res = {}.fromkeys(ids, 0.0)\n if not ids:\n return res\n if context.get('shop', False):\n warehouse_id = shop_obj.read(cr, uid, int(context['shop']), ['warehouse_id'])['warehouse_id'][0]\n if warehouse_id:\n context['warehouse'] = warehouse_id\n if context.get('warehouse', False):\n lot_id = warehouse_obj.read(cr, uid, int(context['warehouse']), ['lot_stock_id'])['lot_stock_id'][0]\n if lot_id:\n context['location'] = lot_id\n if context.get('location', False):\n if type(context['location']) == type(1):\n location_ids = [context['location']]\n elif type(context['location']) in (type(''), type(u'')):\n location_ids = location_obj.search(cr, uid, [('name', 'ilike', context['location'])], context=context)\n else:\n location_ids = context['location']\n else:\n location_ids = []\n wids = warehouse_obj.search(cr, uid, [], context=context)\n if not wids:\n return res\n for w in warehouse_obj.browse(cr, uid, wids, context=context):\n location_ids.append(w.lot_stock_id.id)\n if context.get('compute_child', True):\n child_location_ids = location_obj.search(cr, uid, [('location_id', 'child_of', location_ids)])\n location_ids = child_location_ids or location_ids\n product2uom = {}\n uom_ids = []\n for product in self.pool.get('product.product').read(cr, uid, ids, ['uom_id'], context=context):\n product2uom[product['id']] = product['uom_id'][0]\n uom_ids.append(product['uom_id'][0])\n uoms_o = {}\n for uom in self.pool.get('product.uom').browse(cr, uid, uom_ids, context=context):\n uoms_o[uom.id] = uom\n results = []\n results2 = []\n from_date = context.get('from_date', False)\n to_date = context.get('to_date', False)\n date_str = False\n date_values = False\n where = [tuple(location_ids), tuple(location_ids), tuple(ids), tuple(states)]\n if from_date and to_date:\n date_str = \"st.date_expected>=%s and st.date_expected<=%s\"\n where.append(tuple([from_date]))\n where.append(tuple([to_date]))\n elif from_date:\n date_str = \"st.date_expected>=%s\"\n date_values = [from_date]\n elif to_date:\n date_str = \"st.date_expected<=%s\"\n date_values = [to_date]\n if date_values:\n where.append(tuple(date_values))\n prodlot_id = context.get('prodlot_id', False)\n prodlot_clause = ''\n if prodlot_id:\n prodlot_clause = ' and st.prodlot_id = %s '\n where += [prodlot_id]\n if 'in' in what:\n cr.execute(\n 'select sum(st.product_qty), st.product_id, st.product_uom '\\\n 'from stock_move st '\\\n 'where st.location_id NOT IN %s '\\\n 'and st.location_dest_id IN %s '\\\n 'and st.product_id IN %s '\\\n 'and st.state IN %s ' + (date_str and 'and ' + date_str + ' ' or '') + ' '\\\n + prodlot_clause +\n 'group by st.product_id,st.product_uom', tuple(where))\n results = cr.fetchall()\n if 'out' in what:\n cr.execute(\n 'select sum(st.product_qty), st.product_id, st.product_uom '\\\n 'from stock_move st '\\\n 'where st.location_id IN %s '\\\n 'and st.location_dest_id NOT IN %s '\\\n 'and st.product_id IN %s '\\\n 'and st.state in %s ' + (date_str and 'and ' + date_str + ' ' or '') + ' '\\\n + prodlot_clause +\n ' group by st.product_id,st.product_uom ', tuple(where))\n results2 = cr.fetchall()\n uom_obj = self.pool.get('product.uom')\n uoms = map(lambda x: x[2], results) + map(lambda x: x[2], results2)\n if context.get('uom', False):\n uoms += [context['uom']]\n uoms = filter(lambda x: x not in uoms_o.keys(), uoms)\n if uoms:\n uoms = uom_obj.browse(cr, uid, list(set(uoms)), context=context)\n for o in uoms:\n uoms_o[o.id] = o\n context.update({'raise-exception': False})\n for amount, prod_id, prod_uom in results:\n amount = uom_obj._compute_qty_obj(cr, uid, uoms_o[prod_uom], amount,\n uoms_o[context.get('uom', False) or product2uom[prod_id]], context=context)\n res[prod_id] += amount\n for amount, prod_id, prod_uom in results2:\n amount = uom_obj._compute_qty_obj(cr, uid, uoms_o[prod_uom], amount,\n uoms_o[context.get('uom', False) or product2uom[prod_id]], context=context)\n res[prod_id] -= (amount)\n return res", "title": "" }, { "docid": "be53630f7a0617285a3be7a9a56dc848", "score": "0.48979175", "text": "def perf(df: pd.DataFrame,\n multiplier: int = 0,\n bankroll: float = 15000,\n output: bool = True,\n compound: bool = False,\n price_column_name: str = 'price',\n position_column_name: str = 'position',\n slippage: float = 0) -> NamedTuple:\n df = df.copy()\n if price_column_name != 'price':\n df.rename(columns={price_column_name: 'price'}, inplace=True)\n\n if position_column_name != 'position':\n df.rename(columns={position_column_name: 'position'}, inplace=True)\n\n if slippage:\n cost = get_min_tick(df.price) * slippage\n else:\n cost = 0\n\n df['transaction'] = (df['position'] - df['position'].shift(1)\n .fillna(0)).astype('int')\n\n df['slippage'] = df['transaction'].abs() * cost\n if (df.position[-1] != 0): # & (df.transaction[-1] == 0):\n df.slippage[-1] += np.abs(df.position[-1]) * cost\n\n df['curr_price'] = (df['position'] - df['transaction']) * df['price']\n\n df['base_price'] = (df['price'].shift(\n 1) * df['position'].shift(1)).fillna(0)\n df['pnl'] = df['curr_price'] - df['base_price'] - df['slippage']\n # however convoluted, probably is correct\n slip_return = np.log((-df['slippage'] / df['price']) + 1).fillna(0)\n price_return = np.log(((df['curr_price'] - df['base_price'])\n / abs(df['base_price'])) + 1).fillna(0)\n df['lreturn'] = slip_return + price_return\n\n # get daily returns\n if multiplier:\n df['pnl_dollars'] = df['pnl'] * multiplier\n if compound:\n c = compound_pnl(\n df[['pnl_dollars', 'position', 'transaction']], bankroll)\n df['size'] = c['size'] # for debugging only\n df['comp_pnl_dollars'] = c['comp_pnl_dollars'] # for debugging\n df['balance'] = c['balance'] # for debugging\n daily = daily_returns(c['comp_pnl_dollars'], bankroll)\n else:\n daily = daily_returns(df['pnl_dollars'], bankroll)\n else:\n daily = daily_returns_log_based(df['lreturn'])\n\n # get position stats\n if 'reason' in df.columns:\n p = pos(df['price'], df['transaction'],\n df['position'], df['reason'].shift(1), cost=cost)\n else:\n p = pos(df['price'], df['transaction'], df['position'], cost=cost)\n positions = p.positions\n assert round(positions.pnl.sum(), 4) == round(df.pnl.sum(), 4), \\\n f'Dubious pnl calcs... {positions.pnl.sum()} vs. {df.pnl.sum()}'\n\n if multiplier:\n positions['pnl'] = positions['pnl'] * multiplier\n # pnl = positions['pnl'].sum()\n\n duration = positions['duration'].mean()\n win_pos = positions[positions['pnl'] > 0]\n # positions with zero gain are loss making\n loss_pos = positions[positions['pnl'] <= 0]\n # =========================================\n\n # container for all non-pyfolio stats\n stats = pd.Series()\n stats['Win percent'] = len(win_pos) / len(positions)\n stats['Average gain'] = win_pos.pnl.sum() / len(win_pos)\n stats['Average loss'] = loss_pos.pnl.sum() / len(loss_pos)\n stats['Avg gain/loss ratio'] = abs(stats['Average gain'] /\n stats['Average loss'])\n stats['Position EV'] = ((stats['Win percent'] * stats['Average gain'])\n + ((1 - stats['Win percent'])\n * stats['Average loss']))\n days = daily.returns.count()\n num_pos = len(win_pos) + len(loss_pos)\n stats['Positions per day'] = num_pos/days\n stats['Days per position'] = days/num_pos\n stats['Actual avg. duration'] = duration.round('min')\n\n stats['Days'] = days\n stats['Positions'] = num_pos\n stats['Trades'] = p.transactions\n stats['Monthly EV'] = (stats['Positions per day'] *\n stats['Position EV'] * 21)\n stats['Annual EV'] = 12 * stats['Monthly EV']\n\n # Generate output table\n pyfolio_stats = perf_stats(daily['returns'])\n stats = pyfolio_stats.append(stats)\n if output:\n print(stats.to_string())\n daily['path'].plot(figsize=(20, 10), grid=True)\n # daily.balance.plot(figsize=(20, 10), grid=True)\n Results = namedtuple(\n 'Result', 'stats, daily, positions, df, opens, closes')\n return Results(stats, daily, positions, df, p[1], p[2])", "title": "" }, { "docid": "d16bbca10fd34bbbf60a98fa0f34b114", "score": "0.48954666", "text": "def _onchange_quantity(self):\n if self.product_id:\n # super call method first so that vendor pricelist still works correctly even without purchase pricelist\n super()._onchange_quantity()\n\n # search by variant first and then if not found by template\n price_list_item = self.order_id.purchase_pl_id.item_ids.filtered(lambda x: x.product_id == self.product_id)\n if not price_list_item:\n price_list_item = self.order_id.purchase_pl_id.item_ids.filtered(lambda x: x.product_tmpl_id == self.product_id.product_tmpl_id)\n\n if price_list_item:\n self.price_unit = price_list_item.fixed_price", "title": "" }, { "docid": "6cd0407785272b64ed5b5c4743452e8c", "score": "0.48911127", "text": "def _parse_unitful_quantities(self):\n self.daq_rate = self.Q_(self.metadata['daq']['rate']).to('Hz').magnitude\n self.voltage_retract = {'RT': self.Q_(self.metadata['voltage_retract']['RT']),\n 'LT': self.Q_(self.metadata['voltage_retract']['LT'])}\n self.speed = self.Q_(self.metadata['speed']['value'])\n self.constants = {'comment': self.metadata['constants']['comment']}\n self.voltage_limits = {'RT': {},\n 'LT': {},\n 'unit': self.metadata['voltage_limits']['unit'],\n 'comment': self.metadata['voltage_limits']['comment']}\n unit = self.voltage_limits['unit']\n for axis in ['x', 'y', 'z']:\n self.constants.update({axis: self.Q_(self.metadata['constants'][axis])})\n for temp in ['RT', 'LT']:\n lims = [lim *self.ureg(unit) for lim in sorted(self.metadata['voltage_limits'][temp][axis])]\n self.voltage_limits[temp].update({axis: lims})", "title": "" }, { "docid": "5e902ef2b17b20dc7f9113e6774855d0", "score": "0.48885852", "text": "def user_inputs(ticker,**kwargs):\n \n my_stocks={\n 'options':[ticker],\n 'stocks':[ticker],\n }\n\n tradeType='stocks'\n\n inputs={\n 'stock_list':my_stocks[tradeType],\n 'start_date':None,\n 'stop_date':None,\n 'interval':None\n }\n\n today = dt.date.today()\n two_years_ago=str(today.year-2)\n\n inputs['start_date'] = kwargs['start_date'] if 'start_date' in kwargs else two_years_ago\n inputs['stop_date'] = kwargs['stop_date'] if 'stop_date' in kwargs else None\n # interval=kwargs['interval'] if 'interval' in kwargs else '1d'\n\n retrieve_OHLC_data(inputs)", "title": "" }, { "docid": "3cee9b1bf810da1c654ea7c8e5ee7c77", "score": "0.48872775", "text": "def fetch_price_data(self):\n logger.info('`fetch_price_data` called.')\n logger.info(f'Fetching data for {self.symbol_map}.')\n\n # Make the API request\n response = requests.get(\n f'{self.API}/simple/price',\n params={\n 'ids': ','.join(list(self.symbol_map.keys())),\n 'vs_currencies': 'usd',\n 'include_24hr_change': 'true',\n },\n )\n priceData = []\n logger.info(response.json())\n\n for coinSymb, data in response.json().items():\n \n try:\n price = f\"${data['usd']:,.4f}\"\n dayChange = f\"{data['usd_24h_change']:.1f}%\"\n except KeyError:\n continue\n\n priceData.append(\n dict(\n symbol=self.symbol_map[coinSymb], price=price, dayChange=dayChange, name = coinSymb\n )\n )\n return priceData", "title": "" }, { "docid": "64a2b6726e4c492ff4191a830fed8508", "score": "0.48861513", "text": "def extract_results_QPRel(args):\n s, res = args\n\n if not isinstance(s, str):\n raise ValueError(f\"Given s must be a string, instead got an object of type {type(s)}.\")\n\n if res is None:\n return None\n\n elif s == \"lb\":\n if isinstance(res, dict):\n out = 0.0\n elif isinstance(res, list):\n try:\n out = res[1][\"certified_radius\"]\n except KeyError:\n out = np.sqrt(res[1][\"objective_value\"])\n return out\n\n elif s == \"lb_rt\":\n if isinstance(res, dict):\n out = 0.0\n elif isinstance(res, list):\n out = res[1][\"runtime\"]\n return out\n\n elif s == \"lb_max\":\n if isinstance(res, dict):\n out = 0.0\n elif isinstance(res, list):\n out = np.sqrt(res[1][\"objective_value_max\"])\n return out\n\n elif s == \"propagation_gap\":\n if isinstance(res, dict):\n out = 0.0\n elif isinstance(res, list):\n out = res[-1][\"propagation_gap\"]\n return out", "title": "" }, { "docid": "0d140b39051f8d261b9b05c98b1af18d", "score": "0.48842686", "text": "def _get_historical_multi_prices(self):\n token_addresses = [\n token['address']\n for token in self._get_cc_tokens()\n ]\n now = datetime.datetime.now()\n last_price_date = self._get_last_avail_price_date()\n days_count = self._get_days_count(now, last_price_date)\n prices = []\n for token in tqdm(token_addresses):\n price = self._make_historical_prices_req(token, days_count)\n if price != None:\n price = self._process_hist_prices(price)\n prices.append(price)\n else:\n continue\n prices = [p for price in prices for p in price]\n return prices", "title": "" } ]
e06ffe70235da5f096ff2777716e69a3
Appoint the most trusted validator as the primary validator
[ { "docid": "3245e9c37abd18fdc4c111e3a30b76a0", "score": "0.7344275", "text": "def appoint_primary_validator():\n\n current_primary_validator = get_primary_validator()\n\n most_trusted_validator = Validator.objects.filter(\n primary_validator_invalid_blocks__isnull=True\n ).order_by('-trust').first()\n\n if current_primary_validator == most_trusted_validator:\n return\n\n self_configuration = get_self_configuration(exception_class=RuntimeError)\n self_configuration.primary_validator = most_trusted_validator\n self_configuration.save()", "title": "" } ]
[ { "docid": "17d0cdba702d967f38720e05e7ad7e0e", "score": "0.68256813", "text": "def SetValidator(self, validator):", "title": "" }, { "docid": "e9c886c1c783c96ea7228212c7806ef3", "score": "0.65641963", "text": "def GetValidator(self):", "title": "" }, { "docid": "1189796a9a58877729dbf32a2cb58902", "score": "0.6202008", "text": "def configure_validator(self, validator):\n pass", "title": "" }, { "docid": "01bd199a0fe72a41b9f5a858b616d13d", "score": "0.61115324", "text": "def validator(self):\n raise NotImplementedError()", "title": "" }, { "docid": "ec70fb80398890c19d00790c90911636", "score": "0.60947347", "text": "def createValidator(self):\n return None", "title": "" }, { "docid": "1f58a2f7a81310ef09e59351c53a9e0c", "score": "0.6024325", "text": "def set_validator(validator: STACValidator) -> None:\n RegisteredValidator.set_validator(validator)", "title": "" }, { "docid": "7d7decb4f1142928917da2181837397a", "score": "0.6022229", "text": "def validate(self, validator=None):\n if validator is None:\n self.discard(~self.valid)\n else:\n validator(self)", "title": "" }, { "docid": "7acd37fc3774e8d1c1dc2c07fe663d6d", "score": "0.5985062", "text": "def builtin_validators(self):\n return None", "title": "" }, { "docid": "052bc06bfddf902bf188629f60237252", "score": "0.5979414", "text": "def set_validation(self, captain):\n self.who_validated.append(captain)", "title": "" }, { "docid": "46db168d97bfa43fe19a33a272cd5a4b", "score": "0.5972051", "text": "def validator(self):\n return self._validator", "title": "" }, { "docid": "46db168d97bfa43fe19a33a272cd5a4b", "score": "0.5972051", "text": "def validator(self):\n return self._validator", "title": "" }, { "docid": "46db168d97bfa43fe19a33a272cd5a4b", "score": "0.5972051", "text": "def validator(self):\n return self._validator", "title": "" }, { "docid": "1891d2acdb7c4f45d27d37071a9c4d95", "score": "0.5669941", "text": "def validator_instance(self, **kwargs):\n raise NotImplementedError()", "title": "" }, { "docid": "26da6d3eeeb5115ebe5ce92da8c18303", "score": "0.5524625", "text": "def validator(self, instance, value):", "title": "" }, { "docid": "b8e398c7449b8bd282fd1c994873650b", "score": "0.5520867", "text": "def validate(cls, validator_context):\n pass", "title": "" }, { "docid": "f7cf1eee9f757ed922f6638d7c0adbf8", "score": "0.5467543", "text": "def __init__(self, validator, default=None):\n self.validator = AsValidator(validator)\n self.expected_type = self.validator.expected_type\n self.default = default", "title": "" }, { "docid": "7e8e33c17a89be96ebf8bf323b2a912f", "score": "0.5446466", "text": "def run(self, model, get_related=True):\n return super(Validator, self).run(model, get_related=get_related)", "title": "" }, { "docid": "617c2f9cdb4fb010c0640212580a4f7c", "score": "0.54084355", "text": "def makeValid(self):\n\t\tif not self.parent:\n\t\t\treturn\n\n\t\tself.parent.setValidPossibility(self)", "title": "" }, { "docid": "a84deaf8f2897215cd5987fe3a717c19", "score": "0.5397208", "text": "def process_prevalidate(self):\n pass", "title": "" }, { "docid": "c9c96f3aa7bea86d2cba8a5a9cbc8869", "score": "0.5348473", "text": "def __init__(self, checker, message):\n self.checker = checker\n self.message = message\n Validator.validators_count += 1\n # Used to assert validators in the order they were registered.\n self.insertion_index = Validator.validators_count", "title": "" }, { "docid": "ff25eac20bcd670d9fa179a577d36a5f", "score": "0.5312215", "text": "def Validate(self, parent):", "title": "" }, { "docid": "bcef8ba6abd2a1114cfac4d794aac9e2", "score": "0.5304405", "text": "def collect_and_run_validators(self):\n validators = collect_validators.send(sender=self.__class__, instance=self)\n validators = [v[1] for v in validators] # instances only\n validators.sort(key=lambda v: v.priority, reverse=True)\n for validator in validators:\n validator.validate()", "title": "" }, { "docid": "32c416a86fff2451aab6df4a4db5edf2", "score": "0.5289115", "text": "def run_validator(Validator, person):\n\n return Validator.validate(person)", "title": "" }, { "docid": "06e1fb42359a4f1f717cd63077d788c0", "score": "0.52712446", "text": "def init_fast_validator ( self, *args ):\r\n pass", "title": "" }, { "docid": "06e1fb42359a4f1f717cd63077d788c0", "score": "0.52712446", "text": "def init_fast_validator ( self, *args ):\r\n pass", "title": "" }, { "docid": "56b630649e62d3dba57ebc5fe9ed3bd7", "score": "0.52130634", "text": "def _verify_validator(validator):\n # NOTE: Checking for ``_call__`` is done to match the original\n # implementation. It's not clear why ``callable()`` was not used.\n if getattr(validator, \"__call__\", None) is None:\n raise TypeError(\n \"validator must be callable or None; received {!r}\".format(\n validator\n )\n )\n\n return validator", "title": "" }, { "docid": "f1d99333d28bd2a316ad91191815787a", "score": "0.52119523", "text": "def add_validator(cls, v):\n if cls._validators is None:\n cls._validators = list()\n cls._validators.append(v)", "title": "" }, { "docid": "bd3902772b79c4d6577bffbd84929b20", "score": "0.51769865", "text": "def __init__(self):\n self.validator = EsserValidator(\n self.schema, event=self\n )", "title": "" }, { "docid": "afd8b7b5ab38e51c34ae49ff51054b3d", "score": "0.51638484", "text": "def get_entity_validator():\n return __entity_validator", "title": "" }, { "docid": "92aa96603a61913bc8b58e1334d524b0", "score": "0.5151312", "text": "def auto_valid(validation):\n if isinstance(validation, type):\n def lmb(x): return isinstance(x, validation)\n return lmb\n return validation", "title": "" }, { "docid": "4f62fd4b56192843fb72b02c2d6854fe", "score": "0.5121594", "text": "def __init__(self, validatorClass):\n self.__validatorClass = validatorClass\n self.__entities = {}", "title": "" }, { "docid": "8161bc80a055e50522a8654bdafc81f3", "score": "0.5113261", "text": "def validate(self):\n return validate(self, getattr(self, 'spec', None))", "title": "" }, { "docid": "5d77341b4346ef17ec87fd8df452c313", "score": "0.5085262", "text": "def cross_validate(self):\n pass", "title": "" }, { "docid": "eb0126de2bf05194b8faa48a01cdfa3c", "score": "0.5077746", "text": "def run(self):\n validator = self.val(self.args)\n validator.validate()", "title": "" }, { "docid": "85a4ac8fddd654b009cc3a0c2b9656bd", "score": "0.5063089", "text": "def enable_validation(is_validate=True):\n dist.enable_validation(is_validate)\n infer.enable_validation(is_validate)\n poutine.enable_validation(is_validate)", "title": "" }, { "docid": "a2d760efe5b43fedc4c3129526fc9e7e", "score": "0.5056535", "text": "def validate(self):\n pass", "title": "" }, { "docid": "dfeb538453c8a83cf298d497bd0cc927", "score": "0.50433254", "text": "def post_validation_action(self, dev_res):\n pass", "title": "" }, { "docid": "3f351c1fff356c041ee1dd1db5541a8e", "score": "0.500942", "text": "def _apply_validator_chain(chain, value, handler):\n\n if hasattr(chain, 'validate'): # not a list\n chain = [chain, ]\n\n for validator in chain:\n if hasattr(validator, 'validate'):\n value = validator.validate(value, handler)\n else:\n raise web.HTTPError(500)\n return value", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.4997301", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.4997301", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.4997301", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.4997301", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.4997301", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.4997301", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.4997301", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.4997301", "text": "def validate(self):\n pass", "title": "" }, { "docid": "51174fbc5a3a2077f5434b17dafc6ac3", "score": "0.49955255", "text": "def validate(self):", "title": "" }, { "docid": "51174fbc5a3a2077f5434b17dafc6ac3", "score": "0.49955255", "text": "def validate(self):", "title": "" }, { "docid": "51174fbc5a3a2077f5434b17dafc6ac3", "score": "0.49955255", "text": "def validate(self):", "title": "" }, { "docid": "51174fbc5a3a2077f5434b17dafc6ac3", "score": "0.49955255", "text": "def validate(self):", "title": "" }, { "docid": "8a2942fa61c199fbf1840038bad91837", "score": "0.49910387", "text": "def _complete_validate(self, validated):\n return", "title": "" }, { "docid": "1455efeb6ac1a95e50059d8fe493407d", "score": "0.49811545", "text": "def init_fast_validator ( self, *args ):\r\n self.fast_validate = args", "title": "" }, { "docid": "1455efeb6ac1a95e50059d8fe493407d", "score": "0.49811545", "text": "def init_fast_validator ( self, *args ):\r\n self.fast_validate = args", "title": "" }, { "docid": "9ad8b8f94806772d25770320608c17c8", "score": "0.49781156", "text": "def wrap_validator(validator):\n def wrapped_validator(*args, **kwargs):\n try:\n validator(*args, **kwargs)\n except Exception as ex:\n raise ValidationError(ex)\n return wrapped_validator", "title": "" }, { "docid": "19eb45a76764daac95ec9bf3f81850bb", "score": "0.49520203", "text": "def validate(self, val = None):\n validators = self.__class__._find_methods_in_reverse_mro(\"_validate\")\n\n if self._extra_validators != None:\n validators = validators[:]\n validators.extend(self._extra_validators)\n\n for validator in validators:\n try:\n validator(self, val)\n except StopValidation as s:\n break\n return self", "title": "" }, { "docid": "fd95e17eba6bd4e520c0ecda983a74e8", "score": "0.4943736", "text": "def validate():", "title": "" }, { "docid": "fd95e17eba6bd4e520c0ecda983a74e8", "score": "0.4943736", "text": "def validate():", "title": "" }, { "docid": "66941e8ebcf9bd6802a91cb6f1274fdf", "score": "0.49243912", "text": "def validate_extended(self):", "title": "" }, { "docid": "48b4bb9fd927e8f9bf1760da4a762cc9", "score": "0.49233425", "text": "def trusted(self, trusted):\n if 'trusted' not in self._pending_field_updates:\n self._pending_field_updates.add('trusted')\n\n self._trusted = trusted", "title": "" }, { "docid": "3638f295a8fb61fbff0c08180735262a", "score": "0.49106175", "text": "def __call__(self, value):\n return self.Validate(value)", "title": "" }, { "docid": "2a9fec54a2cdb34141187dead8f65281", "score": "0.4906814", "text": "def init_fast_validate ( self ):\r\n pass", "title": "" }, { "docid": "167aae9f187bd4321cfdb91b15fcba65", "score": "0.49044976", "text": "def validated_by(cls, *validators):\n # TODO: See TODO in __init__\n for validator in validators:\n # metaclass gymnastics can fool this assertion. don't do that.\n if isinstance(validator, type):\n raise TypeError(\n \"Validator %r is a type, not a callable or instance of a\"\n \"validator class. Did you mean %r()?\" % (\n validator, validator))\n cls.validators = list(validators)\n return cls", "title": "" }, { "docid": "e7704ce0253425a15e8134d06b3777a3", "score": "0.48984236", "text": "def process_validate(self, obj):\n pass", "title": "" }, { "docid": "b0ccd2c2d3f95a6463ebc79996182362", "score": "0.48231855", "text": "def validate(self):\n\t\tprint()\n\t\tprint('X - Patient Validate')\n\n\t\t# Handle Exceptions\n\t\texc_pat.handle_exceptions(self)", "title": "" }, { "docid": "cb84edea27414502d6bea6248d267094", "score": "0.47980964", "text": "def canPerformValidation(self):\n return True", "title": "" }, { "docid": "af832e422529984521de7ae542df2a82", "score": "0.4786734", "text": "def extra_validations(self, xlm_tree):\n\n pass", "title": "" }, { "docid": "edee2aa93b81b6f3cdf0bd2a92392f24", "score": "0.4781084", "text": "def __init__(self, validator):\n self.val = validator\n self.args = vars(self._define_parse().parse_args())", "title": "" }, { "docid": "377df86414fdfadd0f693e4dbf1676b9", "score": "0.47771916", "text": "def validate(a):\n raise NotImplementedError", "title": "" }, { "docid": "377df86414fdfadd0f693e4dbf1676b9", "score": "0.47771916", "text": "def validate(a):\n raise NotImplementedError", "title": "" }, { "docid": "feea4ef50535bed3b7a0e58c6c72e131", "score": "0.47750667", "text": "def validate(value):\n return Validate(value)", "title": "" }, { "docid": "88304218f3ee4e542f7a0340febd26b5", "score": "0.4772658", "text": "def validate(instance):\n return validator.validate(instance)", "title": "" }, { "docid": "d653736460190755217ca34f9977903f", "score": "0.47681782", "text": "def AsValidator(validator):\n if isinstance(validator, (str, unicode)):\n return Regex(validator, type(validator))\n if isinstance(validator, type):\n return Type(validator)\n if isinstance(validator, (list, tuple, set)):\n return Options(*tuple(validator))\n if isinstance(validator, Validator):\n return validator\n else:\n raise AttributeDefinitionError('%s is not a valid validator' %\n str(validator))", "title": "" }, { "docid": "e4d1d8d9226fb2c42bdab4e9def76d78", "score": "0.47636947", "text": "def _validate(self) -> None:\n # XXX is still loaded in constructor. What to do?!", "title": "" }, { "docid": "6e81bc60aad1f367e4047d4be3bab89f", "score": "0.47607467", "text": "def validate(self):\n raise NotImplementedError", "title": "" }, { "docid": "6e81bc60aad1f367e4047d4be3bab89f", "score": "0.47607467", "text": "def validate(self):\n raise NotImplementedError", "title": "" }, { "docid": "6e81bc60aad1f367e4047d4be3bab89f", "score": "0.47607467", "text": "def validate(self):\n raise NotImplementedError", "title": "" }, { "docid": "6e81bc60aad1f367e4047d4be3bab89f", "score": "0.47607467", "text": "def validate(self):\n raise NotImplementedError", "title": "" }, { "docid": "a667e608c2e0bf1df78c7ee58c50a1f7", "score": "0.47547153", "text": "def add_validator(self, func):\n # Check if the func is a real function\n if callable(func) and func not in self.list_validate_funct:\n self.list_validate_funct.append(func)", "title": "" }, { "docid": "582c1d5e117167a3d435334b360922d7", "score": "0.47496393", "text": "def __validate(self):\n pass", "title": "" }, { "docid": "582c1d5e117167a3d435334b360922d7", "score": "0.47496393", "text": "def __validate(self):\n pass", "title": "" }, { "docid": "582c1d5e117167a3d435334b360922d7", "score": "0.47496393", "text": "def __validate(self):\n pass", "title": "" }, { "docid": "c221b42e34cef0c36ff47741fa5830b8", "score": "0.47458375", "text": "def _run_validation(self, **kwargs):\n raise NotImplemented", "title": "" }, { "docid": "dea95f9f4c2712faaf9b05606e3dfa2f", "score": "0.4738821", "text": "def _validate(self):\n return True", "title": "" }, { "docid": "dea95f9f4c2712faaf9b05606e3dfa2f", "score": "0.4738821", "text": "def _validate(self):\n return True", "title": "" }, { "docid": "3ff958a2ff65af8b36d28ae3a4e052eb", "score": "0.47368336", "text": "def hpgooeyvalidation(self, options):\n self.cmdbase.login_select_validation(self, options)", "title": "" }, { "docid": "45f9e3a305539500f7111b13a2f4d8d1", "score": "0.47329196", "text": "def makevalidator(name):\n def wrap(f):\n @validators.utils.validator\n @wraps(f)\n def wrapped(*args, **kwargs):\n return f(*args, **kwargs)\n setattr(validators, name, wrapped)\n return wrapped\n return wrap", "title": "" }, { "docid": "c21b1dd31df74a3f00b5e7b3e7e61589", "score": "0.47239965", "text": "def validate(self):\n return self.validator(*self.args, **self.kwargs)", "title": "" }, { "docid": "b00e9d29fe8142d9a1d0c044303120f9", "score": "0.47100276", "text": "def validator_all(self, description, name):\n\n validator = CustomRecommendationValidator()\n\n validator.validator_name_update(name)\n validator.validator_description(description)", "title": "" }, { "docid": "e6bcaa6d529db67ba0b7603c5de26204", "score": "0.47067317", "text": "def __extend_with_default(validator_class):\r\n validate_properties = validator_class.VALIDATORS[\"properties\"]\r\n\r\n def set_defaults(validator, properties, instance, schema):\r\n for prop, sub_schema in properties.items():\r\n if \"default\" in sub_schema:\r\n instance.setdefault(prop, sub_schema[\"default\"])\r\n\r\n for error in validate_properties(\r\n validator, properties, instance, schema,\r\n ):\r\n yield error\r\n\r\n return validators.extend(\r\n validator_class, {\"properties\": set_defaults},\r\n )", "title": "" }, { "docid": "8e9ed5a6f25fb27324c38d2cf4fad5f6", "score": "0.47009754", "text": "def _patch_validators():\n # Add persistent font scalings\n mfonts.font_scalings['med-small'] = 0.9\n mfonts.font_scalings['med-large'] = 1.1\n\n # Define new valdiators\n # NOTE: In the future will subclass RcParams directly and control the validators\n def _validate_fontsize(s):\n fontsizes = list(mfonts.font_scalings)\n if isinstance(s, str):\n s = s.lower()\n if s in fontsizes:\n return s\n try:\n return float(s)\n except ValueError:\n raise ValueError(\n f'{s!r} is not a valid font size. Valid sizes are: '\n + ', '.join(map(repr, fontsizes))\n )\n\n def _validate_fontsize_None(s):\n if s is None or s == 'None':\n return None\n else:\n return _validate_fontsize(s)\n\n _validate_fontsizelist = None\n if hasattr(msetup, '_listify_validator'):\n _validate_fontsizelist = msetup._listify_validator(_validate_fontsize)\n\n # Apply new validators\n validate = RcParams.validate\n for key in list(validate): # modify in-place\n validator = validate[key]\n if validator is msetup.validate_fontsize:\n validate[key] = _validate_fontsize\n elif validator is getattr(msetup, 'validate_fontsize_None', None):\n validate[key] = _validate_fontsize_None\n elif validator is getattr(msetup, 'validate_fontsizelist', None):\n if _validate_fontsizelist is not None:\n validate[key] = _validate_fontsizelist", "title": "" }, { "docid": "e29893c07cfd41b5f5fb820c60cde210", "score": "0.46978825", "text": "def validate(self):\n raise NotImplementedError()", "title": "" }, { "docid": "df50c86466de80b736c4296939376552", "score": "0.46870822", "text": "def constrainable(self):\n raise NotImplementedError", "title": "" }, { "docid": "cfdf8516db816ebfcadc858d539bfc91", "score": "0.4686128", "text": "def Validate(self):", "title": "" }, { "docid": "ef970b5b99eba9135ff220f23c8bb1ed", "score": "0.4680989", "text": "def _validate(self) -> None:\n pass", "title": "" }, { "docid": "e4b7b854ef6e1ef30058e910673f076a", "score": "0.46735454", "text": "def Constrain(self) -> ConstrainType:", "title": "" }, { "docid": "16f5c1415b293b69e9f2503cdcc3fb93", "score": "0.4672016", "text": "def _validate_init(self):\r\n self._causal_validate_init()", "title": "" }, { "docid": "16f5c1415b293b69e9f2503cdcc3fb93", "score": "0.4672016", "text": "def _validate_init(self):\r\n self._causal_validate_init()", "title": "" }, { "docid": "16f5c1415b293b69e9f2503cdcc3fb93", "score": "0.4672016", "text": "def _validate_init(self):\r\n self._causal_validate_init()", "title": "" }, { "docid": "72ddd28c2b8f4e4d857e002d8e3d8901", "score": "0.46711415", "text": "def __init__(__self__, *,\n client_validation_ca: Sequence['outputs.ValidationCAResponse'],\n client_validation_mode: str,\n client_validation_trust_config: str):\n pulumi.set(__self__, \"client_validation_ca\", client_validation_ca)\n pulumi.set(__self__, \"client_validation_mode\", client_validation_mode)\n pulumi.set(__self__, \"client_validation_trust_config\", client_validation_trust_config)", "title": "" }, { "docid": "1c4948e23ac6859e679d4c13b0bcf31b", "score": "0.4666569", "text": "def validate(self) -> None:\n return", "title": "" } ]
e22ddb62e752ac2dc25b190613bd2c48
set value expecting date time
[ { "docid": "0f4c3b387a27a29c2c3cf80812c71dfc", "score": "0.64594984", "text": "def set(self, v):\n if not isinstance(v, datetime.datetime):\n raise ValueError(\"Invalid datetime value!\")\n\n if v.tzinfo is None: # naive time -> assumed to be in UTC\n self.__v = v.replace(tzinfo=et.UTC)\n\n else: # convert to UTC\n self.__v = v.astimezone(et.UTC)", "title": "" } ]
[ { "docid": "cb2f92086af4884a5e191426c2037c27", "score": "0.7544321", "text": "def date(self, value):\n self._date = value", "title": "" }, { "docid": "edf1608f8e9bbeeff627462e0a44b525", "score": "0.75029093", "text": "def setValue (self, val):\n \n if isinstance(val, datetime):\n self._obj.timeValue = rtime(long(time.mktime(val.timetuple())*1000))\n elif isinstance(val, omero.RTime):\n self._obj.timeValue = val\n else:\n self._obj.timeValue = rtime(long(val * 1000))", "title": "" }, { "docid": "477698fd639c36a97042904f555d14f5", "score": "0.7305991", "text": "def __set__(self, instance, value):\n if not value:\n return\n\n try:\n value = TimeStampType.timestamp_to_date(value)\n except TypeError:\n pass\n\n super(TimeStampType, self).__set__(instance, value)", "title": "" }, { "docid": "4c7d1363a0457678f1463054900ceae8", "score": "0.7140296", "text": "def date_setter(value: datetime):\n\n if isinstance(value, datetime) or value is None:\n return value\n elif isinstance(value, str):\n return set_date_from_string(value)\n else:\n raise TypeError(\"Type should be datetime or date\")", "title": "" }, { "docid": "b5e5ea6d57cdcd105252ceefae77311b", "score": "0.699419", "text": "def set_datetime(self, name, value):\n if settings.USE_TZ and is_aware(value):\n # tz = pytz.timezone(self.get_time_zone())\n # dd.logger.info(\"20151128 set_datetime(%r, %r)\", value, tz)\n # value = value.astimezone(tz)\n # value = tz.localize(value)\n value = value.astimezone(self.get_time_zone().tzinfo)\n\n setattr(self, name + '_date', value.date())\n t = value.time()\n if not t:\n t = None\n setattr(self, name + '_time', t)", "title": "" }, { "docid": "24913e8570dfca11bdebd6ed5cf72a8f", "score": "0.6824079", "text": "def __setattr__(self, attr, value):\n fmt = \"%Y-%m-%dT%H:%M:%S.%f\"\n\n if attr == \"created_at\" and type(value) is str:\n self.created_at = datetime.strptime(value, fmt)\n elif attr == \"updated_at\" and type(value) is str:\n self.updated_at = datetime.strptime(value, fmt)\n else:\n super().__setattr__(attr, value)", "title": "" }, { "docid": "ae01b08e418d744433231a80a34f7f3c", "score": "0.68233675", "text": "def todo_date(self, value):\n self.logger.warn(\n \"Setting values on todo_date will NOT update the remote Canvas instance.\"\n )\n self._todo_date = value", "title": "" }, { "docid": "133dcf62a37b594de8468b6c5dc17080", "score": "0.67917687", "text": "def _set_value_date_30V(self, val):\n self.swift_obj.SequenceB_TransactionDetails.ValueDate = val\n self.swift_obj.SequenceB_TransactionDetails.ValueDate.swiftTag = \"30V\"", "title": "" }, { "docid": "151ad9e97d94411a322789bcf6f8de16", "score": "0.67478627", "text": "def date(self, date):\n self._date = date", "title": "" }, { "docid": "151ad9e97d94411a322789bcf6f8de16", "score": "0.67478627", "text": "def date(self, date):\n self._date = date", "title": "" }, { "docid": "1d1e1c09f63b97ed9a99eee2f29936ec", "score": "0.67403305", "text": "def update_date(self, date_int_val, data_manager):\n self.date_int_val_ = date_int_val\n return", "title": "" }, { "docid": "0d59abbe58b93ed0da53ccae8933177d", "score": "0.6734257", "text": "def _date(self, _date):\n\n self.__date = _date", "title": "" }, { "docid": "0d59abbe58b93ed0da53ccae8933177d", "score": "0.6734257", "text": "def _date(self, _date):\n\n self.__date = _date", "title": "" }, { "docid": "a67716ba5873eb8ce2444e783a5c03fa", "score": "0.6725218", "text": "def set_time(self, iso_date):\n self.logger.info(\"Setting date to %s requested\", iso_date)\n cmd = \"{} {}\".format(settings.conf[\"COMMANDS\"][\"settime\"], iso_date)\n self.logger.debug(\n \"Executing %s\",\n cmd\n \n )\n subprocess.run(cmd.split(\" \"))\n return \"OK\"", "title": "" }, { "docid": "40846469e0f728bda6c8cad6f0eeb000", "score": "0.67223376", "text": "def set_date(self, date):\n\n self.date = date", "title": "" }, { "docid": "b9ec20fc25f873ecb83ddf18e8ed1f34", "score": "0.67196435", "text": "def date_time(self, date_time):\n\n self._date_time = date_time", "title": "" }, { "docid": "6b8ff57e7bea10b7bc22ee7ae3b31367", "score": "0.663873", "text": "def set_to_current_time(self):\n self.value = self.timestamp", "title": "" }, { "docid": "d2609b7f97c4c29594c1b7f504d17b29", "score": "0.6626409", "text": "def _set_object(self):\n self.object = datetime.strptime(self.value, self.formatter)\n return", "title": "" }, { "docid": "52574fa40795f7bf56d7a30393cb020c", "score": "0.6616696", "text": "def timestamp(self, val):\n self._set_single(\"timestamp\", val, coerce=self._date_str())", "title": "" }, { "docid": "c7b273ac87c16c0c56f56c77273c2d2e", "score": "0.66107523", "text": "def date_value(self, date_value):\n\n self._date_value = date_value", "title": "" }, { "docid": "c5bc72ed5e32aeb9e1158b30e3793f3f", "score": "0.65968156", "text": "def set_time(self, t):\n self.t = t", "title": "" }, { "docid": "701bcf6d8aafbc01d9976a5fd084978e", "score": "0.65799403", "text": "def setDate(self, date):\n timeStruct = time.strptime(date, \"%a %b %d %H:%M:%S +0000 %Y\")\n self.date = datetime.fromtimestamp(time.mktime(timeStruct))", "title": "" }, { "docid": "f8710d4ff9daea534244c449bd92b07c", "score": "0.65665895", "text": "def _set_date_time_field(self, fld, dttm):\n if dttm is None:\n dttm = datetime.datetime.now()\n # Convert to string format if needed\n if isinstance(dttm, (datetime.datetime, datetime.date)):\n dtstring = dttm.strftime(\"%Y:%m:%d %H:%M:%S\")\n else:\n dtstring = self._format_date_time(dttm)\n cmd = f\"\"\"exiftool {self._opt_expr} -{fld}='{dtstring}' \"{self.photo}\" \"\"\"\n _runproc(cmd, fpath=self.photo)", "title": "" }, { "docid": "c4653a5d744eb4c11f59acf1194db442", "score": "0.6562197", "text": "def _date(self, _date: datetime):\n\n self.__date = _date", "title": "" }, { "docid": "c4653a5d744eb4c11f59acf1194db442", "score": "0.6562197", "text": "def _date(self, _date: datetime):\n\n self.__date = _date", "title": "" }, { "docid": "e2d56672d35ee8aa072f5d697d675482", "score": "0.651176", "text": "def __set__(self, instance, value):\n\n if not value:\n return\n\n if isinstance(value, str):\n try:\n value=MillisecondType.datestring_to_millis(value)\n except TypeError:\n pass\n\n super(MillisecondType, self).__set__(instance, value)", "title": "" }, { "docid": "8c9d46fd904fdce18d019baa85356af1", "score": "0.6477212", "text": "def set_time_values(self, value, tdoa):\n return self.set_parameter_values(self._temporal_param_name, value, tdoa, None)", "title": "" }, { "docid": "d1519ef390b493d2c0e179199e1e1625", "score": "0.64366215", "text": "def set_time(self, time):\n self.time = time", "title": "" }, { "docid": "d1519ef390b493d2c0e179199e1e1625", "score": "0.64366215", "text": "def set_time(self, time):\n self.time = time", "title": "" }, { "docid": "5a1eada5f66e300a366aa0714461ede4", "score": "0.6434794", "text": "def date(self, date):\n\n self._date = date", "title": "" }, { "docid": "5a1eada5f66e300a366aa0714461ede4", "score": "0.6434794", "text": "def date(self, date):\n\n self._date = date", "title": "" }, { "docid": "5a1eada5f66e300a366aa0714461ede4", "score": "0.6434794", "text": "def date(self, date):\n\n self._date = date", "title": "" }, { "docid": "d568f474b058765fe7aae8956ae54025", "score": "0.64285165", "text": "def set_date(self):\n new_date = input(' Task Date[{}]: '.format(self.date)) or self.date\n self.date = new_date", "title": "" }, { "docid": "0b283887b65d3bcf9e2235312335c3be", "score": "0.64253354", "text": "def set_time(self, time):\n self.time_variable.set(time)", "title": "" }, { "docid": "0ce127a03d272da60ba3ae81c8da5464", "score": "0.64207256", "text": "def set_time(self, date: datetime, time: Time) -> datetime:\n seconds = self.seconds(time)\n hour = int(seconds / 3600)\n minute = int((seconds % 3600) / 60)\n second = int((seconds % 3600) % 60)\n return datetime(date.year, date.month, date.day, hour, minute, second)", "title": "" }, { "docid": "c0d22da2542f767dd59e56a92fad4b5d", "score": "0.64147", "text": "def settimeparam(ph, param, value):\n return _toolkit.settimeparam(ph, param, value)", "title": "" }, { "docid": "663a441a3ebb70a36330f8d81ce84cbb", "score": "0.634706", "text": "def setObTime(self, ts):\n self.data['valid'] = ts", "title": "" }, { "docid": "8fd54bcfce16f50678e7fce77d54d3c5", "score": "0.6327696", "text": "def datetime(self, datetime):\n\n self._datetime = datetime", "title": "" }, { "docid": "239e92e94462f91b0ff1a240d196c48e", "score": "0.63268805", "text": "def set_at_time(self, datetime):\n self.add(PROV.atTime, Literal(datetime, datatype=XSD.dateTime))", "title": "" }, { "docid": "239e92e94462f91b0ff1a240d196c48e", "score": "0.63268805", "text": "def set_at_time(self, datetime):\n self.add(PROV.atTime, Literal(datetime, datatype=XSD.dateTime))", "title": "" }, { "docid": "f35c5244198d7de6a36ff525372fa40a", "score": "0.6290176", "text": "def set_resolution_date(self, data):\n self._resolution_date = data", "title": "" }, { "docid": "831f467c8cac9f641793d5575ee094a2", "score": "0.6285845", "text": "def _set_value(self, value):\n try:\n value = float(value)\n except Exception, e:\n raise RuntimeError(\"Failed to set time value for constraint \" + \\\n \"{} // {}\".format(str(self), e))\n self._value = value", "title": "" }, { "docid": "6550720fab4f69d241e5eff2918ebe5c", "score": "0.6279202", "text": "def set(self, value, utc=None):\n with self.mutex:\n self.__value = value\n self.touch(utc=utc)", "title": "" }, { "docid": "b45ffc5d38d456646d8cad23cc1534f1", "score": "0.6271654", "text": "def set(self, key, val, time=0):\n if not time:\n time = None\n elif time < 60 * 60 * 24 * 30:\n time = datetime.datetime.now() + datetime.timedelta(0, time)\n else:\n time = datetime.datetime.fromtimestamp(time)\n self.dictionary[key] = val, time\n return 1", "title": "" }, { "docid": "2c68874538c2a61e91cd590fb4f770bb", "score": "0.6233547", "text": "async def _async_set_value(entity: TimeEntity, service_call: ServiceCall) -> None:\n return await entity.async_set_value(service_call.data[ATTR_TIME])", "title": "" }, { "docid": "dc296998daa77a3111b0b6d7915bd003", "score": "0.6222571", "text": "def date(value) -> DateValue:\n raise NotImplementedError()", "title": "" }, { "docid": "60b729fd9b4d5e7a29fb70c67286e803", "score": "0.6212052", "text": "def value(self, val):\n val = Val.__convert__(self.type.content, val)\n error = dll.wasmtime_global_set(self.__ptr__, byref(val.__raw__))\n if error:\n raise WasmtimeError.__from_ptr__(error)", "title": "" }, { "docid": "960fd0aa21c5f0159e685202670e8514", "score": "0.61996716", "text": "def replace_element_date_time(element):\n element.value = '00010101010101.000000+0000'", "title": "" }, { "docid": "68dc1cff27d86bf779ae0256719104f2", "score": "0.6189357", "text": "def datetime(self, *args, **kwargs) -> Any:\n ...", "title": "" }, { "docid": "09c608267b397439646045ec3936b594", "score": "0.6180436", "text": "def value_to_db_date(self, value):\r\n return value", "title": "" }, { "docid": "495fc5c024d9cc8f7c752181961ea571", "score": "0.6165618", "text": "def test2_get_n_set_date(self):\n self.assertEqual(self.acube_fn['dtime_1'], self.dtime_1.date())\n self.assertEqual(self.acube_fn['dtime_2'], self.dtime_2.date())\n\n new_start_time = datetime(2009, 1, 1, 12, 23, 33).date()\n self.acube_fn['dtime_1'] = new_start_time\n\n self.assertEqual(self.acube_fn['dtime_1'], new_start_time)", "title": "" }, { "docid": "470e5d1419defc95e04d608919ee4fe7", "score": "0.616411", "text": "def _set_trade_date_30T(self, val):\n self.swift_obj.SequenceB_TransactionDetails.TradeDate = val\n self.swift_obj.SequenceB_TransactionDetails.TradeDate.swiftTag = \"30T\"", "title": "" }, { "docid": "8877953b81b7f97cb84e5b59ce389e75", "score": "0.6149519", "text": "def set_date_1(self, date_obj):\n self.date_1 = date_obj\n self.file.get_screen('navi').ids.date_text.text = str(date_obj.strftime(\"%d.%m.%Y\"))", "title": "" }, { "docid": "6cd69df97312da9509ffa601b3204ab2", "score": "0.61402893", "text": "def _set_maturity_date_30P(self, val):\n self.swift_obj.SequenceB_TransactionDetails.MaturityDate = val\n self.swift_obj.SequenceB_TransactionDetails.MaturityDate.swiftTag = \"30P\"", "title": "" }, { "docid": "aad738455a333bfeb94adec8b604decd", "score": "0.61297524", "text": "def test_set_datetime(self):\n person = Person.objects.get()\n person.birthday = arrow.get(1990, 6, 1).datetime\n person.full_clean()\n self.assertIsInstance(person.birthday, arrow.Arrow)\n self.assertEqual(person.birthday, arrow.get(1990, 6, 1))", "title": "" }, { "docid": "a04d03953a575fd6ca6a8ea192da4635", "score": "0.6114085", "text": "def set_value(self, x):\n try:\n self._value = self.data_type(x)\n except ValueError:\n _log.critical(\"{} value of {} cannot be cast to {}\".format(self.point_name, x, self.data_type))\n self._value = x\n self._timestamp = datetime.now()\n return self._value", "title": "" }, { "docid": "55c21094e456bf3cb6201f51634610cd", "score": "0.6087136", "text": "def reParse(self):\n self.parsed = self.timedate", "title": "" }, { "docid": "1bd62f3d611d8f3b2bbb5dc2d412496c", "score": "0.6069135", "text": "def _set_datetime_from_dict(self, key='', dict_=None):\n if isinstance(dict_[\"date\"], datetime.date):\n dict_[\"date\"] = dict_[\"date\"].isoformat()\n datetime_string = \" \".join([dict_[\"date\"], dict_[\"time\"]])\n self._set_datetime_from_string(key=key, string=datetime_string)", "title": "" }, { "docid": "61257e07dfbac410596db2ff9efa196b", "score": "0.60592204", "text": "def set_Date(self, value):\n super(AdvancedFilterInputSet, self)._set_input('Date', value)", "title": "" }, { "docid": "cd84bde7b10dd2deb819bd31e72ecc6f", "score": "0.6053585", "text": "def testSetTime(self):\n year = 2025\n month = 9\n day_of_week = 5\n day = 19\n hour = 1\n minute = 2\n second = 3\n milliseconds = 781\n self.ctrl.set_time(\n year=year,\n month=month,\n day_of_week=day_of_week,\n day=day,\n hour=hour,\n minute=minute,\n second=second,\n milliseconds=milliseconds\n )\n\n # Retrive back the values we set\n test_date_time = self.ctrl.get_time()\n self.assertEqual(test_date_time.wYear, year)\n self.assertEqual(test_date_time.wMonth, month)\n self.assertEqual(test_date_time.wDay, day)\n self.assertEqual(test_date_time.wDayOfWeek, day_of_week)\n self.assertEqual(test_date_time.wHour, hour)\n self.assertEqual(test_date_time.wMinute, minute)\n self.assertEqual(test_date_time.wSecond, second)\n self.assertEqual(test_date_time.wMilliseconds, milliseconds)", "title": "" }, { "docid": "e5d976e12b73985141ec61d1a126d8b7", "score": "0.6030314", "text": "def set_date(self, date=None):\n if date is None:\n date = datetime.date.today()\n self.root.set('DT', date.strftime(\"%Y-%m-%d\"))", "title": "" }, { "docid": "fe060f55c997273ed5b36f7a7d01d9b8", "score": "0.6026544", "text": "def setTime(self,time):\n assert type(time) == int or type(time) == float\n assert time >= 0\n self._time = time", "title": "" }, { "docid": "edd531935a710159a7540d0e8754b7e2", "score": "0.6018941", "text": "def created_at(self, value):\n self._created_at = value", "title": "" }, { "docid": "866fceee9faf332be98ad81db9ea03b1", "score": "0.6016327", "text": "def date(self) -> datetime:", "title": "" }, { "docid": "4f513d830d73a8fa7443d6777bf262f0", "score": "0.6003543", "text": "def date(self):", "title": "" }, { "docid": "0a3a4cd8e1dd319421595578ece39890", "score": "0.6002449", "text": "def date(self, date_):\n # type: (date) -> None\n\n if date_ is not None:\n if not isinstance(date_, date):\n raise TypeError(\"Invalid type for `date`, type has to be `date`\")\n\n self._date = date_", "title": "" }, { "docid": "07e1d0f14da8dad1a2064ebd81560a8e", "score": "0.5977777", "text": "def setTime(self, time):\n self._time = time", "title": "" }, { "docid": "14af801de34e34cbcffb109dbf056bfc", "score": "0.5970332", "text": "def replace_element_date(element):\n element.value = '00010101'", "title": "" }, { "docid": "24218ed7eb957899872987bb20e4779e", "score": "0.59530747", "text": "def stamp(self):\n self._value = gmtime()", "title": "" }, { "docid": "5702433b2b6e34524269773bca4a4b34", "score": "0.5948951", "text": "def __setattr__(self, key, value):\n actual_value = getattr(value, 'value', value)\n\n if key == 'timestamp':\n self.__dict__['sent_at'] = datetime.datetime.fromtimestamp(\n actual_value)\n elif key == 'sent_at':\n self.__dict__['timestamp'] = time.mktime(actual_value.timetuple())\n \n self.__dict__[key] = value", "title": "" }, { "docid": "f78de01febdb86cb584576afbd17961b", "score": "0.59358287", "text": "def time(self, time):\n\n self._time = time", "title": "" }, { "docid": "f78de01febdb86cb584576afbd17961b", "score": "0.59358287", "text": "def time(self, time):\n\n self._time = time", "title": "" }, { "docid": "f78de01febdb86cb584576afbd17961b", "score": "0.59358287", "text": "def time(self, time):\n\n self._time = time", "title": "" }, { "docid": "f78de01febdb86cb584576afbd17961b", "score": "0.59358287", "text": "def time(self, time):\n\n self._time = time", "title": "" }, { "docid": "dfcc3e831862cfe3a451f71038c83e3d", "score": "0.5935234", "text": "def set_day(self, day):\r\n self.day = day", "title": "" }, { "docid": "76c95f3dbadc28096aa552b5a6f992f4", "score": "0.5935188", "text": "def set_date(self, date=None):\n\n ts1 = calendar.timegm(Date.date_parser(date))\n\n self.timestamp = int(datetime.utcfromtimestamp(ts1).timestamp())\n\n return self", "title": "" }, { "docid": "b83768a1fc1f753bab3ebdf0edb27ec5", "score": "0.5930303", "text": "def set(self, value) -> None:", "title": "" }, { "docid": "eedd00febc276c48e1a274c94143a3c7", "score": "0.59284556", "text": "def helicsFederateInfoSetTimeProperty(fi: HelicsFederateInfo, time_property: HelicsProperty, value: HelicsTime):\n f = loadSym(\"helicsFederateInfoSetTimeProperty\")\n err = helicsErrorInitialize()\n f(fi.handle, HelicsProperty(time_property), value, err)\n if err.error_code != 0:\n raise HelicsException(\"[\" + str(err.error_code) + \"] \" + ffi.string(err.message).decode())", "title": "" }, { "docid": "179f1f039a8a2e26693ca2242863c07c", "score": "0.5926949", "text": "def __get_date(self):\n if self.datetime is not None:\n self.date = self.datetime.date()\n else:\n self.date = None", "title": "" }, { "docid": "a191bb1849fdd937bac959e655a29c75", "score": "0.59253573", "text": "def setDate(self, date):\n # Store date\n self.date = datetime(*date)\n \n # Update atmospheric conditions if atmosphere type is Forecast,\n # Reanalysis or Ensemble\n if self.atmosphericModelType in ['Forecast', 'Reanalysis', 'Ensemble']:\n self.setAtmosphericModel(self.atmosphericModelFile,\n self.atmosphericModelDict)\n \n return None", "title": "" }, { "docid": "a93b5ab41b6764cf7cceb3d65403dbb2", "score": "0.5918346", "text": "def set_time_initializer(self, unix_time):", "title": "" }, { "docid": "aefe4591238625a5b21ca510f2ecef13", "score": "0.5915788", "text": "async def set_datetime(self, new_time: datetime) -> None:\n self._check_dbus(HostFeature.TIMEDATE)\n\n _LOGGER.info(\"Setting new host datetime: %s\", new_time.isoformat())\n await self.sys_dbus.timedate.set_time(new_time)\n await self.sys_dbus.timedate.update()", "title": "" }, { "docid": "f44e90c1883b39419885c52e25a75186", "score": "0.5915004", "text": "def date(self, date):\n if date is None:\n raise ValueError(\"Invalid value for `date`, must not be `None`\")\n\n self._date = date", "title": "" }, { "docid": "16c0ba3adfe69a304e4b45048ebb81f1", "score": "0.5914694", "text": "def set_generated_at_time(self, datetime):\n self.set(PROV.generatedAtTime, Literal(datetime, datatype=XSD.dateTime))", "title": "" }, { "docid": "4a10c830855ebbf41269590d0a3f41c1", "score": "0.59024966", "text": "def test_getValue(self):\n control = self.createControl(dict(timezone=FixedOffset(0, 0)))\n param = control.parent.param\n\n param.value = None\n self.assertEquals(control.getValue(), u'')\n\n param.value = Time.fromDatetime(datetime(2007, 1, 1))\n self.assertTrue(isinstance(control.getValue(), unicode))\n self.assertEquals(control.getValue(), u'2007-01-01')\n\n param.value = Time.fromDatetime(datetime(542, 12, 18))\n self.assertEquals(control.getValue(), u'0542-12-18')", "title": "" }, { "docid": "b486b6dc3925a12e850eacb547d4086e", "score": "0.5901765", "text": "def set_timestamp(self, worksheet, row, col):\n approach = 1\n if approach == 1:\n # paste Now() equation get the current date time.\n worksheet.update_cell(row, col, '=Now()')\n\n # time got from Now() is copied to the same cell, removing the equation from the cell.\n worksheet.update_cell(row, col, worksheet.cell(row, col).value)\n\n elif approach == 2:\n # Get value from a fixed time cell at a cell lets say D1.\n worksheet.update_cell(row, col, worksheet.cell(1, 4).value)", "title": "" }, { "docid": "67f35902e583fe876850aef6322d0a14", "score": "0.5893888", "text": "def value(self, value):\r\n self.client.nowait(\r\n 'set_field', (Literal('browser'), self.element, value))", "title": "" }, { "docid": "cce63ef1771f3a466fbdc1e431f617b4", "score": "0.58919823", "text": "def setDate(self, _date=None, m=None, d=None):\n if m is None and d is None:\n if _date is None:\n y, m, d = time.strftime(\"%Y-%m-%d\").split(\"-\")\n else:\n m = _date.month\n y = _date.year\n d = _date.day\n else:\n y = _date\n\n self.currentYear = int(y)\n self.currentMonth = int(m)\n self.currentDay = int(d)", "title": "" }, { "docid": "c1866c7ec8042b6ad254506589de23b2", "score": "0.58810765", "text": "def time(self, a_datetime):\n self.__now = a_datetime", "title": "" }, { "docid": "17c246b50a20deabf6f38b135c33fa99", "score": "0.58744276", "text": "def to_date(self, value: date):\n self._to_date = value\n self._dao.to_date = value", "title": "" }, { "docid": "52775e2bcfbf00a80a29cf2667eac616", "score": "0.58705884", "text": "def value(self, value):\n self.client.nowait(\n 'set_field', (Literal('browser'), self.element, value))", "title": "" }, { "docid": "f6500edd9c6e1b5269998fb24f7fb5d8", "score": "0.58686435", "text": "def set_field_sweeprate(self,sweeprate):\n self.query('T%08.5f' % sweeprate)", "title": "" }, { "docid": "c1bd49ed8e5ddf57cfdb5d39443d52b9", "score": "0.5867312", "text": "def update(self, data):\n match = self.regExp.match(data)\n if match is not None:\n self.timedate = match.group(1)\n self.reParse()", "title": "" }, { "docid": "40825578cefb8f585030dc41685afa99", "score": "0.5862928", "text": "def set_snub_time(self, value):\n self.dlconfig['snub_time'] = value", "title": "" }, { "docid": "cc67802aaf02112b921e5ee642a52fda", "score": "0.585083", "text": "def test_set_value(self):\n tests_ts = [\n (TimeOffset(0, 0), TimeOffset(0, 1), (0, 1)),\n (TimeOffset(0, 0), TimeOffset(1, 0), (1, 0)),\n (TimeOffset(0, 0), TimeOffset(0, 1, -1), (0, 1, -1))\n ]\n\n for t in tests_ts:\n ts = deepcopy(t[0])\n with self.assertRaises(AttributeError):\n ts.set_value(*t[2])\n self.assertEqual(ts, t[0])", "title": "" }, { "docid": "5b4d5176d9e363bb01902879999e67fa", "score": "0.583924", "text": "def setTime(self, time, dt):\n if self.norun:\n return\n # Let the graphs know the simulation time\n self.curTime = time\n self.dt = dt\n obj = SetFrameTime(time)\n obj._tackOnTime = self.curTime # Just sloppily glue the time on the object\n self.conn.que.put(cPickle.dumps(obj))", "title": "" }, { "docid": "630b4b399e11429f1042d414b42e1fc4", "score": "0.58381385", "text": "def set_time(self, time=None):\n if not time:\n req = NTPClient().request(TIME_SERVER)\n time = datetime.fromtimestamp(req.tx_time)\n hour = 12 if ((time.hour % 12) == 0) else (time.hour % 12)\n minute, second, pm = time.minute, time.second, (time.hour > 11)\n else:\n hour, minute, second, pm = time\n\n msg = SET_TIME_COMMAND % (chr(hour), chr(minute), chr(second), chr(pm)) \n self.xbee.SendData(dbus.ByteArray(msg), dbus.UInt64(self.device_addr), 1)", "title": "" }, { "docid": "699a6252c6fa781e8aa50686d6dc5f69", "score": "0.5827371", "text": "def set_date_2(self, date_obj):\n self.date_2 = date_obj\n self.file.get_screen('navi').ids.date_text_2.text = str(date_obj.strftime(\"%d.%m.%Y\"))", "title": "" }, { "docid": "4d2b1c85cf7cb797ab98a7fe50f00c51", "score": "0.58187866", "text": "def set(self, **dArgs):\n\t\tfor sKey in dArgs:\n\t\t\tif sKey == 'year':\n\t\t\t\tself.t[0] = dArgs['year']\n\t\t\t\n\t\t\telif sKey == 'month':\n\t\t\t\tif 'doy' in dArgs:\n\t\t\t\t\traise ValueError(\"Use either month or day of year but not both\")\n\t\t\t\tself.t[1] = dArgs['month']\n\t\t\t\t\n\t\t\telif sKey == 'dom':\n\t\t\t\tif 'doy' in dArgs:\n\t\t\t\t\traise ValueError(\"Use either day of month or day of year but not both\")\n\t\t\t\tself.t[2] = dArgs['dom']\n\t\t\t\n\t\t\telif sKey == 'doy':\n\t\t\t\tif 'dom' in dArgs:\n\t\t\t\t\traise ValueError(\"Use either day of month or day of year but not both\")\n\t\t\t\tif 'month' in dArgs:\n\t\t\t\t\traise ValueError(\"Use either month or day of year but not both\")\n\t\t\t\tself.t[2] = dArgs['doy']\n\t\t\t\tself.t[1] = 1\n\t\t\t\t\n\t\t\telif sKey == 'hour':\n\t\t\t\tself.t[4] = dArgs['hour']\n\t\t\t\t\n\t\t\telif sKey == 'minute':\n\t\t\t\tself.t[5] = dArgs['minute']\n\t\t\t\t\n\t\t\telif sKey == 'seconds':\n\t\t\t\tself.t[6] = dArgs['seconds']\n\t\t\t\n\t\t\telse:\n\t\t\t\traise ValueError(\"Unknown keyword argument %s\"%sKey)\t\n\t\t\t\n\t\t\t\n\t\tself.norm()", "title": "" }, { "docid": "28c82203d0d929c5263e5f5bcdb379c9", "score": "0.5817702", "text": "def test_set_datetime_get(self):\n self.request.method = 'GET'\n self.request.GET = self.query\n\n datetime_mixin = time.CurrentDateTimeMixin()\n datetime_mixin.set_datetime(self.request)\n\n dtime = datetime(2013, 7, 14, 13, 1, 14, 34159)\n dt = date(2013, 7, 14)\n\n self.assertEqual(datetime_mixin.get_date(), dt)\n self.assertEqual(datetime_mixin.get_datetime(), dtime)", "title": "" } ]
3a7b4464ebf99794d8c116b42038d4d8
Processes the image and returns it
[ { "docid": "1fc64426902988012c1a7778bfd6ceca", "score": "0.0", "text": "def img_pre_process(img):\n # Chop off 1/3 from the top and cut bottom 150px(which contains the head of car)\n shape = img.shape\n img = img[int(shape[0] / 3):shape[0] - 150, 0:shape[1]]\n img = img / 255.\n print(img.shape)\n\n # Resize the image\n resize_img = resize(img, (128, 384), mode='reflect')\n # Return the image sized as a 4D array\n return resize_img # np.resize(img, (w, h, c)", "title": "" } ]
[ { "docid": "4b27d0b9a56e8d4d8524a2066c858a1d", "score": "0.79944336", "text": "def process(self, image):\n pass", "title": "" }, { "docid": "b92044a4dd259f62f19f82eaa207ff3e", "score": "0.74050933", "text": "def process(self, grabbed_image):\n raise NotImplementedError(\"\")", "title": "" }, { "docid": "5e80d30643edad4a6e718fe8e7b338c0", "score": "0.7040692", "text": "def takeAndProcessImage (processType):\n\n # Creat a client to perform the processing\n client = createClient()\n\n # Request a new image to be processed\n newImage = getImage()\n\n # What form of processing do we want?\n if (\"logo\" == processType):\n # Check for logos\n result = detectLogo(client, newImage)\n elif (\"text\" == processType):\n # Check for text\n result = detectText(client, newImage)\n else:\n # Default to labels\n result = detectLabels(client, newImage)\n \n return result", "title": "" }, { "docid": "7212399b3f2460c399ab3d12049ce670", "score": "0.69169545", "text": "def post_process(self, *args, **kwargs):\n return self.image_processor.post_process(*args, **kwargs)", "title": "" }, { "docid": "bbaa991a8cdd4f891dfae5d9e82ce71b", "score": "0.66806924", "text": "def proc_img(self, img): \n raise NotImplementedError('Must be implemented by subclasses')", "title": "" }, { "docid": "c58dbf53782c9dd17e3c689413d10ba6", "score": "0.6671303", "text": "def process_image():\n # Histogram Equalization [default]\n # Contrast Stretching\n # Log Compression\n # Reverse Video\n # Gamma Correction\n r = request.get_json()\n check = validateInputs(r)\n if check == 1:\n logging.error(\"Insufficient information\")\n return Response(json.dumps({'Error': 'Key Missing'}), status=500)\n username = r['username']\n method = r['processing']\n usertoprocess = User.objects.raw({\"_id\": username}).first()\n for k in usertoprocess.imgslist:\n if k == \"\":\n continue\n if k['filename'] == r['filename']:\n whichim = k\n imstr = k['imgstring']\n image = decode_image_fromb64(imstr, k['filetype'], k['dimensions'])\n break\n start = time.time()\n if method.lower() == \"histogram equalization\":\n processed = histogram_equalize(image)\n elif method.lower() == \"contrast stretching\":\n processed = contrast_stretch(image)\n elif method.lower() == \"log compression\":\n processed = log_compress(image)\n elif method.lower() == \"reverse video\":\n processed = reverse_video(image)\n elif method.lower() == \"gamma correction\":\n processed = gamma_correct(image)\n else:\n return \"no method found\"\n if processed == \"TypeError\":\n logging.error(\"Image Invalid\")\n return Response(json.dumps({'Error': 'Image Type Invalid'}),\n status=500)\n end = time.time()\n elapsed_time = end - start\n if method.lower() in whichim[\"processeddict\"]:\n data = whichim[\"processeddict\"][method.lower()]\n times_run = data[3] + 1\n else:\n times_run = 1\n whichim[\"processeddict\"][method.lower()] = [processed,\n datetime.datetime.now(),\n elapsed_time,\n times_run]\n usertoprocess.save()\n logging.info(\"Image Processing successful\")\n return \"Success\"", "title": "" }, { "docid": "ec7c55c795fdce3cb81a988d732b7566", "score": "0.6640429", "text": "def process_image(self):\n\n args = ['python3', '/home/ubuntu/Desktop/CAPSTONE_R/chess-irs/frame.py',\n 'detect', '--input', self.new_state_raw_img, '--output',\n self.new_state_proc_img]\n\n p = subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)", "title": "" }, { "docid": "7648e8af58093a35fb24c3055eae6940", "score": "0.65882206", "text": "def image():", "title": "" }, { "docid": "ee3b9ba97a1105ccd20911d660d15d48", "score": "0.6578494", "text": "def _process_image(directory, name):\n # Read the image file.\n filename = directory + DIRECTORY_IMAGES + name + '.png'\n image_data = tf.gfile.FastGFile(filename, 'r').read()\n\n # Image caracteristics\n size = Image.open(filename).size\n shape = [size[0], size[1], 3]\n\n # Read the TXT annotation file.\n filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.txt')\n\n labels, bboxes = _parse_rec(filename, shape)\n\n return image_data, shape, bboxes, labels", "title": "" }, { "docid": "566df00dac6b6580ea7b83cb50792312", "score": "0.6570799", "text": "def getImage():\n\n # Get the PID of the raspistil process that was launched from crontab using a\n # command similar to\n # @reboot raspistill -o /tmp/aiyimage.jpg -s -t 0 -w 640 -h 480 &\n raspistillPID = subprocess.check_output([\"pidof\", \"raspistill\"])\n\n # Request that a picture is taken by sending a USR1 signal to the Raspistill process\n os.kill(int(raspistillPID), signal.SIGUSR1)\n\n # Wait for the photo to be taken\n time.sleep(0.5)\n\n # The location of the file is hardcoded.\n file_name = \"/tmp/aiyimage.jpg\"\n\n # Loads the image into memory, ready for processing, and return it\n with io.open(file_name, 'rb') as image_file:\n content = image_file.read()\n return types.Image(content=content)\n\n # We failed to load in the image, or process it for some reason\n return None", "title": "" }, { "docid": "80eb10b773ad09197754e0cc154be5cf", "score": "0.6564306", "text": "def _process_image(filename):\n\t # Create a generic TensorFlow-based utility for converting all image codings.\n\tcoder = ImageCoder()\n\t# Read the image file.\n\twith tf.gfile.FastGFile(filename, 'rb') as f:\n\t\timage_data = f.read()\n\n\timage_data = coder.png_to_jpeg(image_data)\n\n\t# Decode the RGB JPEG.\n\timage = coder.decode_jpeg(image_data)\n\timage = tf.image.convert_image_dtype(image, dtype=tf.float32)\n\timage=tf.reshape(1-tf.image.rgb_to_grayscale(image),[height*width])\n\treturn image", "title": "" }, { "docid": "36f8739071343541b40896d9726cbf52", "score": "0.6560644", "text": "def get_image(self, bp):\n pass", "title": "" }, { "docid": "f01c563c11b13f39303ad4497270a3ac", "score": "0.651668", "text": "def _process_image(filename, coder):\n # Read the image file.\n image_data = tf.gfile.FastGFile(filename, 'rb').read()\n\n # Decode the RGB JPEG.\n image = coder.decode_jpeg(image_data)\n\n # Check that image converted to RGB\n assert len(image.shape) == 3\n height = image.shape[0]\n width = image.shape[1]\n assert image.shape[2] == 3\n\n return image_data, height, width", "title": "" }, { "docid": "28eda599f633cbd2b7bc71288b3fb0fc", "score": "0.64870495", "text": "def _process_image(filename, coder):\n # Read the image file.\n with tf.gfile.FastGFile(filename, 'r') as f:\n image_data = f.read()\n\n # Convert any PNG to JPEG's for consistency.\n if _is_png(filename):\n print('Converting PNG to JPEG for %s' % filename)\n image_data = coder.png_to_jpeg(image_data)\n\n # Decode the RGB JPEG.\n image = coder.decode_jpeg(image_data)\n\n # Check that image converted to RGB\n assert len(image.shape) == 3\n height = image.shape[0]\n width = image.shape[1]\n assert image.shape[2] == 3\n\n return image_data, height, width", "title": "" }, { "docid": "598ac1048497fbc36bfe27476dcce783", "score": "0.6486401", "text": "def __img_processing(cls, img):\n\n # Pega a imagem\n new_image = cv.imread(img)\n\n # Normalizar a imagem (largura=64, altura=64, canais=3)\n new_image = cv.cvtColor(new_image, cv.COLOR_BGR2RGB)\n new_image = cv.resize(new_image, (128, 64), interpolation=cv.INTER_AREA)\n new_image = new_image.astype('float32')\n new_image /= 255\n\n # Formatar para o formato do tensorFlow (qtd, altura, largura, dimensoes)\n # Verificar se isso não vai dar problema.\n new_image = np.expand_dims(new_image, axis = 0)\n\n return new_image", "title": "" }, { "docid": "ad15455f8d0490899279e0611d53ec16", "score": "0.6455995", "text": "def process_image(image):\n print('*** Start Processing Image ***')\n \n process = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(means, stdevs)])\n \n img_pil = Image.open(image)\n img_tensor = process(img_pil)\n \n print('*** Processing Image Complete ***')\n \n return img_tensor", "title": "" }, { "docid": "728a580873cf95c87e7c276f376b7b25", "score": "0.6435586", "text": "def get_and_process_image(self):\n\t\terr,res,img = vrep.simxGetVisionSensorImage(self.clientID,self.visionSensorHandle,0,vrep.simx_opmode_oneshot_wait)\n\n\t\tcolval = numpy.zeros((res[0]*res[1],3))\n\t\ti = 0\n\t\tfor pix in range(res[0]*res[1]):\n\t\t\tfor col in range(3):\n\t\t\t\tif img[i] >= 0:\n\t\t\t\t\tcolval[pix][col] = img[i]\n\t\t\t\telse:\n\t\t\t\t\tcolval[pix][col] = img[i] + 256\n\t\t\t\ti += 1\n\n\t\tred = 0.0\n\t\tred_position = 0.0\n\t\tgreen = 0.0\n\t\tgreen_position = 0.0\n\t\tblue = 0.0\n\t\tblue_position = 0.0\n\n\t\ti = 0\n\t\tfor pix in colval:\n\t\t\ti += 1\n\t\t\tposition = (i % 32)\n\t\t\tif (pix[0] > 200 and pix[1] < 100 and pix[2] < 100):\n\t\t\t\tred += 1\n\t\t\t\tred_position += -1 + position * 0.0625 \n\t\t\telif (pix[0] < 100 and pix[1] > 200 and pix[2] < 100):\n\t\t\t\tgreen += 1\n\t\t\t\tgreen_position += -1 + position * 0.0625\n\t\t\telif (pix[0] < 100 and pix[1] < 100 and pix[2] > 200):\n\t\t\t\tblue += 1\n\t\t\t\tblue_position += -1 + position * 0.0625\n\t\t\n\t\tif (red > 0):\n\t\t\tred_position = red_position / red\n\t\tif (green > 0):\n\t\t\tgreen_position = green_position / green\n\t\tif (blue > 0):\n\t\t\tblue_position = blue_position / blue\n\n\t\tinput_vars = [red / 256, red_position, green / 256, green_position, blue / 256, blue_position]\n\t\treturn input_vars", "title": "" }, { "docid": "0695e42b6ec0efda87de7bc16efa256c", "score": "0.6422601", "text": "def process_image(path, data_group_folder, file_path):\n filename = file_path.split('/')[-1]\n current_path = os.path.join(path, os.path.join(r'{0}/IMG/'.format(data_group_folder), filename))\n image = cv2.imread(current_path)\n # Convert to RGB\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image", "title": "" }, { "docid": "e61bc830729431d3770dc1738f557f59", "score": "0.6407284", "text": "def process_image(filepath):\n img = pp.load_image_as_grayscale(filepath)\n img = pp.autocrop_edges(img)\n img = pp.apply_blur(img)\n img = pp.circle_darkest(img)\n pp.save_image(filepath, img)\n return img", "title": "" }, { "docid": "4c6733cfecdbe8fdd607a2c0f4f22346", "score": "0.63953424", "text": "def process_image(self,img):\n print(\"starting image processing of file with path:\",img)\n new_image = ImageData(\n jname=self.job_name,\n frame = self.name,\n path = img,\n const = self.constants, \n db_ref = self.db_ref,\n )\n self.image_data_ls.append(new_image)\n self.image_ref_ls.append(new_image.im_id)", "title": "" }, { "docid": "749461c93e36457a91da8ee7ff9d9786", "score": "0.63952726", "text": "def image (self):\n\n\t\ti = yield threads.deferToThread(self.camera.getImage)\n\n\t\tif i is None:\n\t\t\tprint \"No image\"\n\n\t\tdefer.returnValue(i)", "title": "" }, { "docid": "df24500edf2ba2e80de71131bcc4943c", "score": "0.63754076", "text": "def process_image(self, data, preprocessing=None, postprocessing=None):\n if isinstance(data, str):\n logger.debug(\"Load image: {}\".format(data))\n image, org_image = self.__load_image__(data) # Load image\n if image is False or org_image is False:\n return False\n if preprocessing: # If an algorithm that preprocesses is specified,\n # then this algorithm should immediately remove the background\n image = preprocessing.run(self, image, org_image)\n else:\n image = self.__get_output__(image, org_image) # If this is not, then just remove the background\n if postprocessing: # If a postprocessing algorithm is specified, we send it an image without a background\n image = postprocessing.run(self, image, org_image)\n return image", "title": "" }, { "docid": "df24500edf2ba2e80de71131bcc4943c", "score": "0.63754076", "text": "def process_image(self, data, preprocessing=None, postprocessing=None):\n if isinstance(data, str):\n logger.debug(\"Load image: {}\".format(data))\n image, org_image = self.__load_image__(data) # Load image\n if image is False or org_image is False:\n return False\n if preprocessing: # If an algorithm that preprocesses is specified,\n # then this algorithm should immediately remove the background\n image = preprocessing.run(self, image, org_image)\n else:\n image = self.__get_output__(image, org_image) # If this is not, then just remove the background\n if postprocessing: # If a postprocessing algorithm is specified, we send it an image without a background\n image = postprocessing.run(self, image, org_image)\n return image", "title": "" }, { "docid": "a64d2ad4bdaac3e0eae8b8a360069261", "score": "0.63645416", "text": "def process_image(img_path, thickness=3):\n\n img = mpimg.imread(img_path)\n\n return _process_image(img, thickness=thickness)", "title": "" }, { "docid": "fc5faeb78447639adcbfd9c5a7b0d57c", "score": "0.6342809", "text": "def _get_image(self):\n return self.__image", "title": "" }, { "docid": "fc5faeb78447639adcbfd9c5a7b0d57c", "score": "0.6342809", "text": "def _get_image(self):\n return self.__image", "title": "" }, { "docid": "206681a888a715c0307709be43e19aae", "score": "0.63032174", "text": "def image(self):\n print(\"Calculating image for parameters: {}\".format(self.p))\n return transform_image(self._image, self.p[0], self.p[1], self.p[2],\n self.p[3], self.p[4])", "title": "" }, { "docid": "2d60c79ad960bffedea25b5aaa329f4d", "score": "0.6301444", "text": "def process_image(self, data, preprocessing=None, postprocessing=None):\n if isinstance(data, str):\n logger.debug(\"Load image: {}\".format(data))\n image, orig_image = self.__load_image__(data) # Load image\n if image is False or orig_image is False:\n return False\n if preprocessing: # If an algorithm that preprocesses is specified,\n # then this algorithm should immediately remove the background\n image = preprocessing.run(self, image, orig_image)\n else:\n image = self.__get_output__(image, orig_image) # If this is not, then just remove the background\n if postprocessing: # If a postprocessing algorithm is specified, we send it an image without a background\n image = postprocessing.run(self, image, orig_image)\n return image", "title": "" }, { "docid": "6ca02a80711efef822908ea778e44a3f", "score": "0.62759316", "text": "def post(self):\n data = request.files[\"image_file\"]\n if data.filename != '':\n results = main.process_image(data)\n headers = {'Content-Type': 'text/html', 'Cache-Control': 'no-store'}\n return make_response(render_template('results.html', images = results),200,headers)", "title": "" }, { "docid": "c9d38a69e40e2c9791e20f59ff678461", "score": "0.62692183", "text": "def _process_image(filename, coder):\n # Read the image file.\n with tf.gfile.FastGFile(filename, 'r') as f:\n image_data = f.read()\n\n if _is2convert(filename):\n print('Reencoding to JPEG for %s' % filename)\n image_data = coder.re_encode_jpeg(image_data)\n\n return image_data", "title": "" }, { "docid": "1e9f118aa806ba10ffbe5c090f7e2afe", "score": "0.62566334", "text": "def processImage(self):\n if self.difCent is None:\n return\n QApplication.setOverrideCursor(Qt.WaitCursor)\n QApplication.processEvents()\n flags = self.getFlags()\n try:\n self.difCent.process(flags)\n except:\n QApplication.restoreOverrideCursor()\n errMsg = QMessageBox()\n errMsg.setText('Unexpected error')\n msg = 'Please report the problem with error message below and the input image\\n\\n'\n msg += \"Error : \"+str(sys.exc_info()[0]) +'\\n\\n'+str(traceback.format_exc())\n errMsg.setInformativeText(msg)\n errMsg.setStandardButtons(QMessageBox.Ok)\n errMsg.setIcon(QMessageBox.Warning)\n errMsg.setFixedWidth(300)\n errMsg.exec_()\n raise\n\n self.updateParams()\n self.writeData()\n self.refreshUI()\n QApplication.restoreOverrideCursor()\n QApplication.processEvents()", "title": "" }, { "docid": "69816c12c902033e1cf3d1e1cf157fae", "score": "0.62435424", "text": "def process_image(image_path):\n # Call global varibales\n global means, std\n # Open image with PIL\n image = Image.open(image_path)\n\n # Resize the image\n if image.size[0] > image.size[1]:\n image.thumbnail((10000, 256))\n else:\n image.thumbnail((256, 10000))\n\n # Crop the image\n left_margin = (image.width-224)/2\n bottom_margin = (image.height-224)/2\n right_margin = left_margin + 224\n top_margin = bottom_margin + 224\n image = image.crop((left_margin, bottom_margin, right_margin,\n top_margin))\n # Normalize\n image = np.array(image)/255\n image = (image - means)/std\n\n # Move color channels to first dimension as expected by PyTorch\n image = image.transpose((2, 0, 1))\n\n return image", "title": "" }, { "docid": "1860e63b6defdd82f934bb4b8615a5e3", "score": "0.6216545", "text": "def get_image(data):\n image = Image.open(BytesIO(base64.b64decode(data['image'])))\n image = np.asarray(image)\n image = process_image(image)\n image = np.array([image])\n return image", "title": "" }, { "docid": "05cf1fd95322bcc46727cee1fb814d2b", "score": "0.6152597", "text": "def process_image(image):\n width, height = image.size\n ratio = width / height\n if width < height:\n width = 256\n height = int(256 / ratio)\n else:\n height = 256\n width = int(256 * ratio)\n image = image.resize((width, height))\n image = image.crop((width / 2 - 112, height / 2 - 112, width / 2 + 112, height / 2 + 112))\n np_image = np.array(image)\n np_image = np_image.astype(float)\n np_image = np_image / 255\n np_image = (np_image - np.array([0.485, 0.456, 0.406])) / np.array([0.229, 0.224, 0.225])\n\n return np.transpose(np_image, (2, 0, 1))", "title": "" }, { "docid": "02be9f99315f4f6b5f1b35be2c3d6074", "score": "0.6125608", "text": "def _parse_and_processed(self, example_serialized):\n image_buffer, label, bbox, _ = self._parse_fn(example_serialized)\n image = self._image_preprocessing(image_buffer, bbox)\n return image, label", "title": "" }, { "docid": "5160a5232e579ba83082bb57b12e1bb4", "score": "0.61234045", "text": "def __call__(self, results):\n\n if self.file_client is None:\n self.file_client = mmcv.FileClient(**self.file_client_args)\n\n if results['img_prefix'] is not None:\n filename = osp.join(results['img_prefix'],\n results['img_info']['filename'])\n else:\n filename = results['img_info']['filename']\n\n img_bytes = self.file_client.get(filename)\n img = mmcv.imfrombytes(img_bytes, flag=self.color_type)\n if self.to_float32:\n img = img.astype(np.float32)\n\n results['filename'] = filename\n results['ori_filename'] = results['img_info']['filename']\n results['img'] = img\n results['img_shape'] = img.shape\n results['ori_shape'] = img.shape\n results['img_fields'] = ['img']\n return results", "title": "" }, { "docid": "a23a430eabec5a5ffc0bae79b9bb049c", "score": "0.6122378", "text": "def process_image(self, base64_string: str) -> str:\n self.convert_base64_to_image(base64_string)\n self.corp_image()\n self.change_image_pixels()\n return self.image_to_string()", "title": "" }, { "docid": "8ba02a3226ed59234a99acd2ae29e487", "score": "0.6118086", "text": "def _process_image(self, data: np.ndarray,\n outfile: Optional[pathlib.Path] = None) -> np.ndarray:\n\n if data.ndim != 2:\n raise ValueError('Expected 2D image, got {}: {}'.format(data.shape, outfile))\n\n # if self._frame_count == 0:\n # print('Dtype: {}'.format(data.dtype))\n # print('Rows, Cols: {},{}'.format(*data.shape))\n\n # Crop the image\n data = data[self._crop_y_st:self._crop_y_ed, self._crop_x_st:self._crop_x_ed]\n if data.shape[0] == 0 or data.shape[1] == 0:\n err = 'Invalid image shape after bbox x({},{}) y({},{}): {}x{}: {}'\n err = err.format(self._crop_x_st,\n self._crop_x_ed,\n self._crop_y_st,\n self._crop_y_ed,\n data.shape[0],\n data.shape[1],\n outfile)\n raise ValueError(err)\n\n # Fix the contrast\n if self._fix_contrast not in (None, 'raw'):\n data = image_utils.fix_contrast(data, mode=self._fix_contrast)\n # Force the final image to be 8-bit\n data = np.round(data*255)\n data[data < 0] = 0\n data[data > 255] = 255\n data = data.astype(np.uint8)\n\n # Convert to RGB\n data = np.stack([data, data, data], axis=2)\n\n if data.ndim not in (2, 3):\n raise ValueError('Invalid segment after processing {}: {}'.format(data.shape, outfile))\n return data", "title": "" }, { "docid": "633a8b73b62ea802eda6deff54172aa3", "score": "0.61167383", "text": "def processImage(self):\n if self.bioImg is None:\n return\n QApplication.setOverrideCursor(Qt.WaitCursor)\n QApplication.processEvents()\n settings = self.getSettings()\n try:\n self.bioImg.process(settings)\n except Exception as e:\n QApplication.restoreOverrideCursor()\n errMsg = QMessageBox()\n errMsg.setText('Unexpected error')\n msg = 'Please report the problem with error message below and the input image\\n\\n'\n msg += \"Error : \" + str(sys.exc_info()[0]) + '\\n\\n' + str(traceback.format_exc())\n errMsg.setInformativeText(msg)\n errMsg.setStandardButtons(QMessageBox.Ok)\n errMsg.setIcon(QMessageBox.Warning)\n errMsg.setFixedWidth(300)\n errMsg.exec_()\n raise\n\n self.updateParams()\n self.csvManager.writeNewData(self.bioImg)\n self.csvManager2.writeNewData(self.bioImg)\n self.resetUI()\n self.refreshStatusbar()\n QApplication.restoreOverrideCursor()", "title": "" }, { "docid": "c07c062e43fcd7dce0c4346fd6876897", "score": "0.60999334", "text": "def __call__(self, results):\n\n if self.file_client is None:\n self.file_client = mmcv.FileClient(**self.file_client_args)\n\n if results['img_prefix'] is not None:\n filename = osp.join(results['img_prefix'],\n results['img_info']['filename'])\n else:\n filename = results['img_info']['filename']\n\n img_bytes = self.file_client.get(filename)\n img = mmcv.imfrombytes(\n img_bytes, flag=self.color_type, channel_order=self.channel_order)\n if self.to_float32:\n img = img.astype(np.float32)\n\n results['filename'] = filename\n results['ori_filename'] = results['img_info']['filename']\n results['img'] = img\n results['img_shape'] = img.shape\n results['ori_shape'] = img.shape\n results['img_fields'] = ['img']\n results['ori_img'] = results['img']\n return results", "title": "" }, { "docid": "4f6e024f61ccf3109576c7c12b2f4ddc", "score": "0.60888064", "text": "def take_image(self):\n return_value, image = self.camera.read()\n\n if self.color and self.write_to_file:\n self.write_image(image)\n if not self.color:\n image = self.monochrome_image(image)\n if self.write_to_file:\n self.write_image(image)\n\n # Changing the image to a 1D array.\n flat = image.ravel()\n return flat", "title": "" }, { "docid": "1133a1c9fc4a9421b320571046a80b11", "score": "0.6075215", "text": "def process_image(fname, output_dir, poller):\n result = []\n try:\n preprocessed_image, original_image = preprocess_image(fname, (poller.params.input_height, poller.params.input_width))\n results = classify_image(poller.params.model, preprocessed_image,\n threshold=poller.params.threshold, labels=poller.params.labels,\n mean=poller.params.mean, stdev=poller.params.stdev)\n out_file = os.path.join(output_dir, os.path.splitext(os.path.basename(fname))[0] + \".json\")\n with open(out_file, \"w\") as f:\n f.write(results.to_json_string())\n result.append(out_file)\n except KeyboardInterrupt:\n poller.keyboard_interrupt()\n except:\n poller.error(\"Failed to process image: %s\\n%s\" % (fname, traceback.format_exc()))\n return result", "title": "" }, { "docid": "3113916df7dc83ec3c876fc473ecf1db", "score": "0.6072556", "text": "def load_proc_image(self,image_path):\n load_path = self.image_path_prefix+'proc_img'+image_path+'.jpg'\n image = imread(load_path,plugin='pil')\n return image", "title": "" }, { "docid": "095f119d3843642e048f3d02d60e42a1", "score": "0.60693157", "text": "def get_image(self,imagefile): \n \n # Load an image and convert it to an array\n img = nibabel.load(os.path.join(self.sourcedir,imagefile))\n \n return img.get_data()", "title": "" }, { "docid": "40e45e05f0553276e2205255e0cc3efc", "score": "0.6064062", "text": "def process_img_pred_celery(user_id, frame_processed, frame_orig, frame_dict, output_dict):\n \n from .wsgi_aux import app\n with app.app_context():\n output_dict = process_img_pred(user_id, frame_processed, frame_orig, frame_dict, output_dict)\n socketio.emit('response_image', output_dict, room=output_dict['room'])", "title": "" }, { "docid": "9b112aec8b86c37a5ea859687775c5a9", "score": "0.605764", "text": "def process_image(img_path):\n\n img = image.load_img(img_path, target_size=(224, 224))\n img_array = image.img_to_array(img)\n img_array = np.expand_dims(img_array, axis=0)\n pImg = mobilenet.preprocess_input(img_array)\n return pImg", "title": "" }, { "docid": "bf217935dcaa6e661d1d4d2791e4e1b3", "score": "0.60563534", "text": "def process_image(path):\n # Read Image\n image = ndimage.imread(path)\n # Crop Image from top and bottom\n image = image[60:140,:,:]\n # Resize the Image to match the input size of the model\n image = cv2.resize(image,(64,64),cv2.INTER_AREA)\n return image", "title": "" }, { "docid": "aaeb5535fb9f5808623943d2194db077", "score": "0.6048713", "text": "def calculate(self):\n \n\n opts, args = self.op.parse_args(self.args) \n\n (addr_space, symtab, types) = load_and_identify_image(self.op, self.opts)\n\n filename = self.opts.filename\n temp = filename.replace(\"\\\\\", \"/\").lower().split(\"/\")\n imgname = temp[-1]\n\n if not opts.offset is None:\n \n try:\n offset = int(opts.offset, 16)\n except:\n op.error(\"EPROCESS offset must be a hexidecimal number.\")\n \n try:\n flat_address_space = FileAddressSpace(filename)\n except:\n op.error(\"Unable to open image file %s\" %(filename))\n\n directory_table_base = process_dtb(flat_address_space, types, offset)\n\n process_address_space = create_addr_space(addr_space, directory_table_base)\n\n if process_address_space is None:\n print \"Error obtaining address space for process [%d]\" % (process_id)\n return\n\n\n image_file_name = process_imagename(flat_address_space, types, offset)\n\n process_id = process_pid(flat_address_space, types, offset)\n \n peb = process_peb(flat_address_space, types, offset)\n\n if not process_address_space.is_valid_address(peb):\n #print \"Unable to read PEB for task.\"\n command_line = \"UNKNOWN\"\n return\n\n command_line = process_command_line(process_address_space, types, peb)\n\n if command_line is None:\n command_line = \"UNKNOWN\"\n\n \n modules = process_ldrs(process_address_space, types, peb)\n\n #if len(modules) > 0:\n #print \"%-12s %-12s %s\"%('Base','Size','Path')\n \n for module in modules:\n if not process_address_space.is_valid_address(module):\n return\n path = module_path(process_address_space, types, module)\n if path is None:\n path = \"UNKNOWN\"\n \n base = module_base(process_address_space, types, module)\n if base is None:\n base = \"UNKNOWN\"\n else:\n base = \"0x\" % (base)\n \n size = module_size(process_address_space, types, module)\n if size is None:\n size = \"UNKNOWN\"\n else:\n size = \"0x\" % (size)\n \n yield (image_file_name, process_id,command_line,base,size,path, imgname) \n \n else:\n \n # get list of windows processes\n all_tasks = process_list(addr_space, types, symtab) \n\n if not opts.pid == None:\n all_tasks = process_find_pid(addr_space,types, symtab, all_tasks, opts.pid)\n if len(all_tasks) == 0:\n print \"Error process [%d] not found\"%opts.pid\n\n \n for task in all_tasks:\n\n if not addr_space.is_valid_address(task):\n continue\n \n #if len(all_tasks) > 1:\n # print \"%s\"%star_line\n \n image_file_name = process_imagename(addr_space, types, task)\n\n process_id = process_pid(addr_space, types, task)\n\n process_address_space = process_addr_space(addr_space, types, task, opts.filename)\n\n if process_address_space is None:\n print \"Error obtaining address space for process [%d]\" % (process_id)\n continue\n \n peb = process_peb(addr_space, types, task)\n\n if not process_address_space.is_valid_address(peb):\n #print \"Unable to read PEB for task.\"\n command_line = \"UNKNOWN\"\n continue\n else:\n command_line = process_command_line(process_address_space, types, peb)\n\n if command_line is None:\n command_line = \"UNKNOWN\"\n\n #print \"Command line : %s\" % (command_line)\n \n #print \"something below\"\n #print read_unicode_string(process_address_space, types,\n # ['_PEB', 'CSDVersion'], peb)\n \n modules = process_ldrs(process_address_space, types, peb)\n\n #if len(modules) > 0:\n # print \"%-12s %-12s %s\"%('Base','Size','Path')\n \n for module in modules:\n if not process_address_space.is_valid_address(module):\n continue\n path = module_path(process_address_space, types, module)\n if path is None:\n path = \"UNKNOWN\"\n \n base = module_base(process_address_space, types, module)\n if base is None:\n base = \"UNKNOWN\"\n else:\n base = \"0x%1x\" % (base)\n \n size = module_size(process_address_space, types, module)\n if size is None:\n size = \"UNKNOWN\"\n else:\n size = \"0x%1x\" % (size)\n \n yield (image_file_name, process_id,command_line,base,size,path, imgname)", "title": "" }, { "docid": "891e9dd94d1f002f07ffc0972b1eb2e9", "score": "0.6027528", "text": "def process_image(input, **kwargs):\n from interface import load_pars\n from image import Image\n import os\n\n # Try to load input assuming it's a parameter save file or a dictionary.\n # load_pars returns None if this doesn't work.\n img, err = load_pars(input)\n\n # If load_pars fails (returns None), assume that input is an image file. If it's not a\n # valid image file (but is an existing file), an error will be raised\n # by img.process() during reading of the file.\n if img == None:\n if os.path.exists(input):\n img = Image({'filename': input})\n else:\n raise RuntimeError(\"File '\" + input + \"' not found.\")\n\n # Now process it. Any kwargs specified by the user will\n # override those read in from the parameter save file or dictionary.\n img.process(**kwargs)\n return img", "title": "" }, { "docid": "70b4565cd61c48d9ff3b3d53acc8081c", "score": "0.6027032", "text": "def _handle_result(self, result):\n image_num = 0\n for img_response in result:\n self._image_data[image_num] = img_response.shot.image.data\n image_num += 1", "title": "" }, { "docid": "52d8cf11cdaa8cc76007af61756982f5", "score": "0.60248125", "text": "def _parse_and_processed(self, example_serialized):\n image, label = self._parse_fn(example_serialized)\n image = self._image_preprocessing(image)\n return image, label", "title": "" }, { "docid": "03e8723e63a10344dd75501fb7f47ed2", "score": "0.6014056", "text": "def process_image(image):\n # Process a PIL image for use in a PyTorch model\n im = Image.open(image)\n\n # pytorch transforms can be used on and PIL.Image object\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n transform = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize])\n im = transform(im)\n return im", "title": "" }, { "docid": "2a8a9e2df1a7c6f8b8effa9395bf08cd", "score": "0.6009029", "text": "def _process_image(self, image):\n expects_means = [0.485, 0.456, 0.406]\n expects_std = [0.229, 0.224, 0.225]\n\n image = Image.open(image).convert(\"RGB\")\n\n # Any reason not to let transforms do all the work here?\n transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(expects_means, expects_std)])\n\n return transform(image)", "title": "" }, { "docid": "e386c238f0df318f75b896ab26d82152", "score": "0.6004115", "text": "def output(self):\r\n return (self.image_viewer.image, None)", "title": "" }, { "docid": "71d5b5c62c52eaaf22db63f5f08d19ac", "score": "0.5995343", "text": "def load_and_process_image(image_path):\n image = load_img(image_path)\n image = img_to_array(image)\n image = preprocess_input(image)\n image = np.expand_dims(image, axis=0)\n return image", "title": "" }, { "docid": "98a3098f5d83bff469a8edb9778b5609", "score": "0.5980046", "text": "def image_processing(root, img_name):\n\t# Parse image\n\timage = misc.imread(os.path.join(root,img_name), flatten= 0)\n\timage_array = misc.bytescale(image)\n\t#image_array = to_grey(image_array)\n\n\t# Crop image\n\ttop = find_top(image_array)\n\tbottom = find_bottom(image_array)\n\tleft = find_left(image_array)\n\tright = find_right(image_array)\n\n\t# Find some orientation with center search\n\tcenters = find_approx_centers(image_array, top, bottom, left, right)\n\n\t# Classify centers\n\tclassification = classify(image_array, centers, os.path.join(root,img_name))\n\n\t# Debug\n\t#print_center(image_array, centers, classification)\n\treturn classification", "title": "" }, { "docid": "374fec0210cdb8dd8d68a91137d98c68", "score": "0.5974606", "text": "def get_binary_result(self):\n return self.binary_image", "title": "" }, { "docid": "70d5a19a17e07e3937196b0a5be38632", "score": "0.5973736", "text": "def get_raw_image(self):\n\t\treturn self._raw_image", "title": "" }, { "docid": "5ea471eac3bd587c3de4d07f7f354a42", "score": "0.59690523", "text": "def process_image(self, image, input_shape) -> np.ndarray:\r\n width, height = image.size\r\n # ensure image type is compatible with model and convert if not\r\n if image.mode != \"RGB\":\r\n image = image.convert(\"RGB\")\r\n # center crop image (you can substitute any other method to make a square image, such as just resizing or padding edges with 0)\r\n if width != height:\r\n square_size = min(width, height)\r\n left = (width - square_size) / 2\r\n top = (height - square_size) / 2\r\n right = (width + square_size) / 2\r\n bottom = (height + square_size) / 2\r\n # Crop the center of the image\r\n image = image.crop((left, top, right, bottom))\r\n # now the image is square, resize it to be the right shape for the model input\r\n input_width, input_height = input_shape[1:3]\r\n if image.width != input_width or image.height != input_height:\r\n image = image.resize((input_width, input_height))\r\n\r\n # make 0-1 float instead of 0-255 int (that PIL Image loads by default)\r\n image = np.asarray(image) / 255.0\r\n # format input as model expects\r\n return image.reshape(input_shape).astype(np.float32)", "title": "" }, { "docid": "f20e37ef98474e029eb65dd246e6ad6f", "score": "0.5962878", "text": "def __call__(self, results):\n # Image is bgr\n img = results['img'][..., ::-1]\n img = Image.fromarray(img)\n img = self.transform(img)\n img = np.asarray(img)\n img = img[..., ::-1]\n results['img'] = img\n return results", "title": "" }, { "docid": "7f08ee2ea54bbf608cd8696a6fc4fd40", "score": "0.59553367", "text": "def proc_img(self, img, w=700):\n img = img[:,:w]\n return img", "title": "" }, { "docid": "a2f60a6de4d615b3ed4b1aeaa553aecb", "score": "0.5952116", "text": "def get_image(self):\n self.drawer.flush()\n return self.img", "title": "" }, { "docid": "ce0485f06240d3c92b80431f3c6cb0ab", "score": "0.59459233", "text": "def image (self):\n\n\t\ttry:\n\t\t\tflag, img_array = yield threads.deferToThread(self.camera.read)\n\t\texcept SystemError:\n\t\t\treturn\n\n\t\tif flag is False:\n\t\t\tprint \"No image\"\n\t\t\treturn\n\n\t\tdefer.returnValue(SimpleCV.Image(\n\t\t\tsource = cv.fromarray(img_array),\n\t\t\tcv2image = True)\n\t\t)", "title": "" }, { "docid": "29ea012497e3a71ed5875e7e5f847cea", "score": "0.5933279", "text": "def read_image_response_body():\n image_file = open(os.path.join(os.path.dirname(__file__), 'image.gif'), 'rb')\n return image_file.read()", "title": "" }, { "docid": "fd0c18f3f93958b45ee8ab19ce5973e5", "score": "0.59317374", "text": "def process_image(self, image, dims=False):\n image = self._load_image(image)\n return ImageCheck.check_and_crop(image, dims)", "title": "" }, { "docid": "48c7bea4b6c79e6ab9ad5fa1fb959378", "score": "0.59283745", "text": "def get_processed_image(username):\n try:\n user = User.objects.raw({'_id': username}).first()\n return user.processed_image\n except DoesNotExist:\n return None", "title": "" }, { "docid": "cf063ae9a6b268cdf1f03159abc84d99", "score": "0.59189093", "text": "def process(self, image):\n try:\n if self.processing_job.isAlive():\n self.display(\"Processing to slow\")\n return\n except AttributeError:\n pass\n self.processing_job = ProcessingJob()\n self.processing_job.image = image\n self.processing_job.results = self.results\n self.processing_job.start()", "title": "" }, { "docid": "315236e700ecbed35751a702546ad7f7", "score": "0.5918512", "text": "def process_data(self,img_path): \n img = tf.io.read_file(img_path)\n img = tf.io.decode_jpeg(img, channels=1)\n img = tf.image.resize(img, (196,196))\n img = tf.cast(img / 255., tf.float32)\n img = tf.reshape(img, (1,196,196,1))\n img = np.array(img)\n \n return img", "title": "" }, { "docid": "756a69c1e63f5e8bbf440e4e12c88708", "score": "0.5916828", "text": "def get_raw_image(self,imagefile): \n \n # Load an image and convert it to an array\n img = nibabel.load(os.path.join(self.sourcedir,imagefile))\n \n return img", "title": "" }, { "docid": "2cbe90c7643430c46bce855da8e20d14", "score": "0.59126264", "text": "def getImage(self):\n\t\tglobal root, image\n\n\t\tpostscript = self.canvas.postscript(colormode=\"gray\")\n\t\timage = Image.open(io.BytesIO(postscript.encode('utf-8')))\n\n\n\t\troot.destroy()", "title": "" }, { "docid": "40420202161809fb202893ab29622c96", "score": "0.59038264", "text": "def get_image(self):\n return Image.open(self.get_image_path())", "title": "" }, { "docid": "7c64ea4058547424faad6497c63fec9b", "score": "0.5901178", "text": "async def __call__(self, image: Image) -> Image:\n ...", "title": "" }, { "docid": "0d2f42ffa5e27dcf9991326f7da5e6de", "score": "0.58970296", "text": "def load_image(self):", "title": "" }, { "docid": "fc53cb0723f3bd6c81bffd5c9b1673d7", "score": "0.5895032", "text": "def process_image(img):\n\n img = Image.open(img)\n img.thumbnail((256, 256))\n\n width, height = img.size # Get dimensions\n\n new_width = 224\n new_height = 224\n left = (width - new_width) / 2\n top = (height - new_height) / 2\n right = (width + new_width) / 2\n bottom = (height + new_height) / 2\n\n img = img.crop((left, top, right, bottom))\n\n np_image = np.array(img)\n img = np_image / 255\n\n mean = np.array([0.485, 0.456, 0.406])\n stdv = np.array([0.229, 0.224, 0.225])\n img = (img - mean) / stdv\n\n img = img.transpose((2, 0, 1))\n return torch.from_numpy(img)", "title": "" }, { "docid": "bc68fee4b35c5d6ceaee80cb746428d8", "score": "0.58904344", "text": "def process(self, statePair):\n #_, img = statePair\n img = statePair[:,:,1]\n assert img.shape == (self.im_width, self.im_height)\n return np.squeeze(np.reshape(img, [self.im_width * self.im_height, -1]) - self.ref)", "title": "" }, { "docid": "cd2f5905749c8bdf07718d505fa441f7", "score": "0.5888443", "text": "def process_image(*args, keycache, config, patientcache):\n # check file type\n task, obj, _ = args\n if task != \"process\" or Path(obj.key).suffix.lower() != \".dcm\":\n # not an image, don't do anything with it\n return bonobo.constants.NOT_MODIFIED\n\n # check if work is already done\n image_in_cache = keycache.exists(obj.key)\n image_uuid = Path(obj.key).stem\n metadata_in_cache = keycache.exists(f\"{image_uuid}.json\")\n if metadata_in_cache and image_in_cache:\n # files exist, nothing to do here\n return\n\n # download the image\n image_data = PartialDicom(obj.Object()).download()\n if image_data is None:\n # we couldn't read the image data correctly\n logger.warning(\n f\"Object '{obj.key}' couldn't be loaded as a DICOM file, skipping!\"\n )\n return\n\n # extract the required data from the image\n patient_id = image_data.PatientID\n study_id = image_data.StudyInstanceUID\n series_id = image_data.SeriesInstanceUID\n group = patientcache.get_group(patient_id)\n if group is not None:\n training_set = group == \"training\"\n else:\n logger.error(\n f\"Image without patient data: {obj.key}; \"\n + f\"included patient ID: {patient_id}; \"\n + \"skipping!\"\n )\n return\n prefix = (\n constants.TRAINING_PREFIX\n if training_set\n else constants.VALIDATION_PREFIX\n )\n image_type = constants.MODALITY.get(\n image_data[\"Modality\"].value, \"unknown\"\n )\n\n date = get_date_from_key(obj.key)\n if date:\n # the location of the new files\n new_key = posixpath.join(\n prefix,\n image_type,\n patient_id,\n study_id,\n series_id,\n Path(obj.key).name,\n )\n metadata_key = posixpath.join(\n prefix,\n f\"{image_type}-metadata\",\n patient_id,\n study_id,\n series_id,\n f\"{image_uuid}.json\",\n )\n # send off to copy or upload steps\n if not object_exists(new_key):\n yield \"copy\", obj, new_key\n if not object_exists(metadata_key):\n yield \"metadata\", metadata_key, image_data", "title": "" }, { "docid": "ae1018b175ed67c5b50527733f846ceb", "score": "0.5886974", "text": "def _process_image(\n self, image: JpegImageFile, width: int, body: str,\n author: str) -> Image:\n resized = self._resize(image, width)\n draw = ImageDraw.Draw(resized)\n header_font = fit_header_font(body, resized.size, self.font_file)\n body_font = fit_footer_font(author, resized.size, self.font_file)\n img_W, img_H = resized.size\n header_w, _ = draw.textsize(body, font=header_font)\n body_w, body_h = draw.textsize(author, font=body_font)\n draw.text(((img_W-header_w)/2, 20),\n body, fill=\"white\", font=header_font)\n draw.text(((img_W-body_w)/2, img_H-body_h*2),\n author, fill=\"white\", font=body_font)\n return resized", "title": "" }, { "docid": "3775182cc2511abcc298ddfc1aee53cf", "score": "0.58860236", "text": "def get_input(self):\n\t\treturn self.img", "title": "" }, { "docid": "89cdc69cb02fa8fa9da781e663d5ff37", "score": "0.58795017", "text": "def image (self):\n\n\t\ti = self.camera.getImage()\n\n\t\tif i is None:\n\t\t\tprint \"No image\"\n\n\t\treturn i", "title": "" }, { "docid": "96a70f2017105d586a6fa87bf63d6717", "score": "0.58792675", "text": "def get_image(self):\n return self.image", "title": "" }, { "docid": "ea63f56df667f880734b4e86665802a4", "score": "0.58791053", "text": "def transform(self, image):\n pass", "title": "" }, { "docid": "70cc03838fee0d79136d37db6f17aa4f", "score": "0.5877867", "text": "def processImage(arg1, image=False):\n\timg = arg1\n\tif(not image):\n\t\timg = cv2.imread(arg1,0)\n\timg = cv2.GaussianBlur(img, (5,5), 0)\n\timg = cv2.resize(img,(SIDE_SIZE,SIDE_SIZE))\n\timg = cv2.adaptiveThreshold(img,1,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)\n\t#img = list(map((lambda x: x/float(1)-0.1), img))\n\treturn np.reshape(img, (TOTAL_SIZE, 1))", "title": "" }, { "docid": "16d499eb7197e74ca8fea66cf9cda2fb", "score": "0.5876384", "text": "def _processor(self):\n\n # Get latest image from Mailbox\n image = self._mailbox.get()\n\n # Call Every Registered Callback\n if self._running:\n for callback in self.callbacks:\n callback(image)\n\n # Update Statistics\n self._update_dt()", "title": "" }, { "docid": "74745997b4cd616fddf62fb8763b6f44", "score": "0.5866133", "text": "def processImg(img, camvals):\n img = imutils.resize(img, width = camvals[\"width\"], height = camvals[\"height\"])\n img = cv2.cvtColor(img, camvals[\"color\"])\n img = Image.fromarray(img)\n imgbytes = io.BytesIO()\n img.save(imgbytes, format = \"JPEG\") #, quality=camvals[\"quality\"])\n imgbytes = imgbytes.getvalue()\n imgbytes = zlib.compress(imgbytes, camvals[\"compression\"])\n return imgbytes", "title": "" }, { "docid": "bc03e82c25ec1360114ad27120ec8e2b", "score": "0.58655936", "text": "def process_image_for_saving(image: np.ndarray) -> np.ndarray:\n # Copy image\n img = image.copy()\n\n # Remove batch dimension\n img = np.squeeze(img, 0)\n\n # Add mean (from VGG19 model)\n img[:, :, 0] += 103.939\n img[:, :, 1] += 116.779\n img[:, :, 2] += 123.68\n\n # BGR to RGB\n img = img[:, :, ::-1]\n\n # Clip values\n img = np.clip(img, 0, 255).astype(\"uint8\")\n return img", "title": "" }, { "docid": "853c3593c0d51a2887a4c802d4e30dec", "score": "0.58615106", "text": "def read_image(file: IO) -> Image:", "title": "" }, { "docid": "0cdb02d8aecf3fc39deed1acd65fffa9", "score": "0.5858906", "text": "def process_one(self, filename, process_steps, bias, pixel_flat=None, illum_flat=None, bpm=None):\n # Load raw image\n rawImage = rawimage.RawImage(filename, self.spectrograph, self.det)\n # Process\n processrawImage = processrawimage.ProcessRawImage(rawImage, self.par, bpm=bpm)\n processedImage = processrawImage.process(process_steps, bias=bias, pixel_flat=pixel_flat,\n illum_flat=illum_flat)\n # Return\n return processedImage", "title": "" }, { "docid": "c2147e3da796d3defcfd5b551a89f60f", "score": "0.58515847", "text": "def get_file_image(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "5f5074d1886be337ae3fe6365cebae7e", "score": "0.5848452", "text": "def process_image(image):\n\n image = Image.fromarray(image)\n # Resize\n img = image.resize((256, 256))\n\n # Center crop\n width = 256\n height = 256\n new_width = 224\n new_height = 224\n\n left = (width - new_width) / 2\n top = (height - new_height) / 2\n right = (width + new_width) / 2\n bottom = (height + new_height) / 2\n img = img.crop((left, top, right, bottom))\n\n # Convert to numpy, transpose color dimension and normalize\n rgbimg = Image.new(\"RGB\", img.size)\n rgbimg.paste(img)\n img = rgbimg\n img = np.array(img).transpose((2, 0, 1)) / 256\n\n # Standardization\n means = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))\n stds = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))\n\n img = img - means\n img = img / stds\n\n return img", "title": "" }, { "docid": "d53e63357d6dde3a817ad0bced299ae6", "score": "0.58356947", "text": "def post(self):\n model_name = self.get_argument(\"model-name\", default=\"mosaic\")\n model = self.get_model(model_name)\n content_image = self.request.files.get(\"content-image\", None)\n if content_image is None or len(content_image) == 0:\n return self.write_error(500)\n content_image = content_image[0]\n input_im = Image.open(BytesIO(content_image.body))\n output_image = yield stylizer_pool.submit(\n handle_input_image,\n model,\n input_im,\n self.use_cuda,\n self.gpu_idx,\n False\n )\n self.set_header(\"Content-Type\", \"image/jpeg\")\n self.set_header(\"Refresh\", \"1\")\n self.set_header(\"content-transfer-encoding\", \"binary\")\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"POST, GET\")\n output_data = BytesIO()\n output_image.save(output_data, format=\"JPEG\")\n self.write(base64.b64encode(output_data.getvalue()))", "title": "" }, { "docid": "3651dfa7f1939a96702854c26241db8f", "score": "0.58274", "text": "def getAssembledImg(self):\n img = self.det.image(self.evt)\n return img", "title": "" }, { "docid": "68367f035b4c5c5fdd9c7fdc29803a8e", "score": "0.58238506", "text": "def PNG2Data(image_folder_location): \r\n #Make a vector of PNG files in the image directory so it can repeat \r\n #process on all images in directory\r\n images = []\r\n os.chdir(image_folder_location)\r\n for image in os.listdir(image_folder_location):\r\n if image.endswith(\".png\"):\r\n images.append(image)\r\n \r\n #create empty lists to compile information\r\n filenames = []\r\n \r\n #make a subdirectory to put the image exports if the directory\r\n #does not already exist\r\n if not os.path.exists('CroppedImages'):\r\n os.makedirs('CroppedImages')\r\n \r\n #create variable for name of folder with cropped images\r\n cropped_images_folder_location = image_folder_location + '/CroppedImages'\r\n\r\n #make a subdirectory to put the form number image exports if the directory\r\n #does not already exist\r\n if not os.path.exists('FormNumber'):\r\n os.makedirs('FormNumber')\r\n \r\n #loop through files in folder and find their form number\r\n for image in images:\r\n #find the filename\r\n filename = findFileName(image)\r\n filenames.append(filename)\r\n \r\n #find the form number and page number\r\n form_number = findFormNumber(image)\r\n \r\n #check if form_number found. If not, continue to next image. \r\n if form_number is None:\r\n continue\r\n \r\n page_number = findPageNumber(image)\r\n \r\n #set image coordinates\r\n setImageCoords(image)\r\n \r\n #find coords needed for form number\r\n coords = switchCoords2(form_number, page_number)\r\n \r\n #determine if this file contains data based on page_info lookup table and then\r\n #crop the image if it does\r\n if isinstance(page_info[form_number], int):\r\n if page_info[form_number] == page_number:\r\n #loop through the coordinates for each attribute and create a new image\r\n for key, value in coords.items():\r\n #only do the crop if there is the correct number of dimensions\r\n if len(value) != 4:\r\n if len(value) == 0:\r\n print(key, \" has no coords yet.\")\r\n continue\r\n else:\r\n print(key, \"has an incorrect number of dimensions.\")\r\n continue\r\n else:\r\n crop(image, value, 'CroppedImages/' + filename + '_' + key +'.png')\r\n else:\r\n print(\"Not a data form\")\r\n else:\r\n if (page_info[form_number][0] == page_number) or (page_info[form_number][1] == page_number):\r\n #loop through the coordinates for each attribute and create a new image\r\n for key, value in coords.items():\r\n #only do the crop if there is the correct number of dimensions\r\n if len(value) != 4:\r\n if len(value) == 0:\r\n print(key, \" has no coords yet.\")\r\n continue\r\n else:\r\n print(key, \"has an incorrect number of dimensions.\")\r\n continue\r\n else:\r\n crop(image, value, 'CroppedImages/' + filename + '_' + key +'.png')\r\n else:\r\n print(\"Not a data form\")\r\n \r\n #remove the split PDF images so process not repeated on them\r\n os.remove(image)\r\n \r\n #run tesseract on folder with cropped images\r\n tesseract(cropped_images_folder_location)\r\n \r\n ###have a process complete message\r\n easygui.msgbox(\"Completed!\", title=\"i9 Processing\")", "title": "" }, { "docid": "ea74190f1991994a34ecf44d43026d51", "score": "0.582367", "text": "def __call__(self, results):\n\n img = results['img']\n x_start, y_start, x_stop, y_stop = results['win']\n width = x_stop - x_start\n height = y_stop - y_start\n\n patch = img[y_start:y_stop, x_start:x_stop]\n if height > patch.shape[0] or width > patch.shape[1]:\n patch = mmcv.impad(patch, shape=(height, width))\n\n if self.to_float32:\n patch = patch.astype(np.float32)\n\n results['filename'] = None\n results['ori_filename'] = None\n results['img'] = patch\n results['img_shape'] = patch.shape\n results['ori_shape'] = patch.shape\n results['img_fields'] = ['img']\n return results", "title": "" }, { "docid": "fbf963ccb476c15741f0bd20c0b01baf", "score": "0.58138454", "text": "def get_image_data(self, use_original=False):\n\n if self._extracted_merged_image and use_original:\n return self._extracted_merged_image\n\n r = Renderer(self)\n return r.render()", "title": "" }, { "docid": "ace96a4d72bb91e2ff3ea500ef99cca7", "score": "0.58082485", "text": "def export_img(self):\n self._update()\n return self._img", "title": "" }, { "docid": "65c9cad040a4596928984814894e2a51", "score": "0.5791", "text": "def image_callback(self, msg):\n\n\t\tself.image = self.bridge.imgmsg_to_cv2(msg, desired_encoding=\"bgr8\")", "title": "" }, { "docid": "69d2fb81871b0922d445c4938163de78", "score": "0.57897717", "text": "def image(x):\n return x.image()", "title": "" }, { "docid": "2f11084b5e73f303b869a571d85959d8", "score": "0.5788476", "text": "def __call__(self, results):\n\n if self.file_client is None:\n self.file_client = mmcv.FileClient(**self.file_client_args)\n\n if results['img_prefix'] is not None:\n filename = [\n osp.join(results['img_prefix'], fname)\n for fname in results['img_info']['filename']\n ]\n else:\n filename = results['img_info']['filename']\n\n img = []\n for name in filename:\n img_bytes = self.file_client.get(name)\n img.append(mmcv.imfrombytes(img_bytes, flag=self.color_type))\n img = np.stack(img, axis=-1)\n if self.to_float32:\n img = img.astype(np.float32)\n\n results['filename'] = filename\n results['ori_filename'] = results['img_info']['filename']\n results['img'] = img\n results['img_shape'] = img.shape\n results['ori_shape'] = img.shape\n # Set initial values for default meta_keys\n results['pad_shape'] = img.shape\n results['scale_factor'] = 1.0\n num_channels = 1 if len(img.shape) < 3 else img.shape[2]\n results['img_norm_cfg'] = dict(\n mean=np.zeros(num_channels, dtype=np.float32),\n std=np.ones(num_channels, dtype=np.float32),\n to_rgb=False)\n return results", "title": "" }, { "docid": "966c5d0c7f11cca0db6491f413328811", "score": "0.5788234", "text": "def get_image (self, *args, **kwargs):\r\n # Just an alias for semantic clarity...\r\n return self.build_image(*args,**kwargs)", "title": "" }, { "docid": "4c1b6b875af885cee88417b67633a71e", "score": "0.578538", "text": "def run(self, image: iter, **kwargs) -> None:", "title": "" }, { "docid": "8c45b6aa73c0f122a4710f629206b8bf", "score": "0.57712483", "text": "def image_transform(instance):\n byte_array = base64.b64decode(instance[\"data\"])\n image = Image.open(io.BytesIO(byte_array))\n instance[\"data\"] = image_processing(image).tolist()\n logging.info(instance)\n return instance", "title": "" } ]
6f79505ce9f122994b5991bf039a6436
polbasis(self) > string Summary Return the polarization basis in the calibration table ('L' for linear or 'C' for circular). Description This member function returns the polarization basis in the calibration table ('L' for linear or 'C' for circular).
[ { "docid": "72c4ec11feddbef5fcda6ddb2747b04e", "score": "0.79458225", "text": "def polbasis(self):\n return _calanalysis.calanalysis_polbasis(self)", "title": "" } ]
[ { "docid": "9ddff4da2e30336611fc206b7183ff3b", "score": "0.61899257", "text": "def Lri_basis(self):\n return self.Lri_basis_set", "title": "" }, { "docid": "8a5e35c53d30934fa38fb888e9874ef7", "score": "0.6093768", "text": "def getBasis(self):\n return self.__basis", "title": "" }, { "docid": "0e2631b044b7bf878e5badba9c43f304", "score": "0.60009676", "text": "def get_polynomial_basis(self) -> typing.List[AnyFunction]:\n return self._basis", "title": "" }, { "docid": "1f608217eb66b50c81dbbc09e2f153bf", "score": "0.58080673", "text": "def getBasisFunctionsWithLVal(self):\n\t\traise NotImplementedError(\"\")", "title": "" }, { "docid": "01feac688224ddcfeefcec0adc7a9b86", "score": "0.56200033", "text": "def __init__(self):\n super().__init__()\n self._name = \"Single-Qubit Pauli Overcomplete Preparation Basis\"\n self._basis = {\"0\": np.array([[1, 0], [0, 0]]).astype(complex),\n \"1\": np.array([[0, 0], [0, 1]]).astype(complex),\n \"A\": np.array([[1, 1], [1, 1]]).astype(complex)/2,\n \"D\": np.array([[1, -1], [-1, 1]]).astype(complex)/2,\n \"L\": np.array([[1, -1j], [1j, 1]]).astype(complex)/2,\n \"R\": np.array([[1, 1j], [-1j, 1]]).astype(complex)/2}", "title": "" }, { "docid": "ce31de3387e5a439b4ad7f6712233aed", "score": "0.5590029", "text": "def basis(self) -> float:\n return self.__basis", "title": "" }, { "docid": "3267d7d1b0e051f05eab3a396830a30f", "score": "0.55111456", "text": "def one_basis(self):\n basis = self.basis_name()\n if basis == 'serre-cartan' or basis == 'arnonc':\n return (0,)\n if not self._generic:\n return ()\n return ((), ())", "title": "" }, { "docid": "5eaeddbcb774fcb8981ee10a2659a814", "score": "0.5510506", "text": "def __init__(self):\n super().__init__()\n self._name = \"Single-Qubit Pauli Preparation Basis\"\n self._basis = {\"0\": np.array([[1, 0], [0, 0]]).astype(complex),\n \"1\": np.array([[0, 0], [0, 1]]).astype(complex),\n \"A\": np.array([[1, 1], [1, 1]]).astype(complex)/2,\n \"L\": np.array([[1, -1j], [1j, 1]]).astype(complex)/2}", "title": "" }, { "docid": "4e1c8df58d2a20ca15ee615359527e19", "score": "0.550433", "text": "def get_polynomial_basis(self) -> typing.List[AnyFunction]:\n pass", "title": "" }, { "docid": "f8fa50364774bc80b66ae82e0580316e", "score": "0.5483415", "text": "def get_polynomial_basis(self) -> typing.List[AnyFunction]:\n raise NotImplementedError()", "title": "" }, { "docid": "f8fa50364774bc80b66ae82e0580316e", "score": "0.5483415", "text": "def get_polynomial_basis(self) -> typing.List[AnyFunction]:\n raise NotImplementedError()", "title": "" }, { "docid": "d1bf4f16313fbac13082a4cd6f0d070f", "score": "0.53959364", "text": "def Ri_rpa_basis_set(self):\n return self.Ri_aux_basis_set", "title": "" }, { "docid": "f6d5abd0822b83997a93e7fba822cba5", "score": "0.53462464", "text": "def poly(self) -> str:\n return self.alert_data[\"poly\"]", "title": "" }, { "docid": "9e031b2ace25d0f24f58da416d132971", "score": "0.5339238", "text": "def getPyQ(self):\n symorder = {'S': 0, 'P': 1, 'D': 2, 'F': 3}\n \"\"\"order of orbitals, first s then p, d, f\"\"\"\n for atno,contr in self.basis_data.iteritems():\n contr.sort(cmp=lambda x,y: symorder[x[0]]-symorder[y[0]])\n return(self.basis_data)", "title": "" }, { "docid": "1bf1614737176549e133d9afbda5f4c0", "score": "0.53381765", "text": "def get_basis(self):\n return self.jonesbasis", "title": "" }, { "docid": "0d70d667a0f8079245771a9bf966d5ee", "score": "0.529723", "text": "def poincare_polynomial(self):\n charpoly = self.characteristic_polynomial()\n R = charpoly.parent()\n x = R.gen(0)\n poincare = (-x)**self.dimension() * charpoly(-QQ(1)/x)\n return R(poincare)", "title": "" }, { "docid": "f4e1d1e783dd029294dd4931b9809905", "score": "0.5226901", "text": "def derivation_module_basis(self, algorithm=\"singular\"):\n alg = algorithm # prevent possible changes to a global variable\n if alg == \"singular\":\n #import sage.libs.singular.function_factory\n #syz = sage.libs.singular.function_factory.ff.syz\n f = self.defining_polynomial()\n I = f + f.jacobian_ideal()\n IS = I._singular_()\n ISS = IS.syz()\n MSTD = ISS.mstd()\n basis = MSTD[2]._sage_().transpose().submatrix(0,1)\n try:\n det = basis.det()\n # Check using Saito's criterion\n if det / f in f.parent().base_ring() and not det.is_zero():\n return basis.rows()\n except ValueError: # Non-square matrix or det = 0\n pass\n # Check if it is free\n if not self.is_free(algorithm=alg):\n return None\n # The syzygy module did not give a basis, but since it is free,\n # fallback to the Barakat-Cuntz method\n alg = \"BC\"\n if alg == \"BC\":\n C = self.derivation_module_free_chain()\n if C is not None:\n if not C: # C is an empty list\n S = self.parent().ambient_space().symmetric_space()\n return matrix.identity(S, self.dimension()).rows()\n from sage.misc.misc_c import prod\n return prod(reversed(C)).rows()\n return None\n else:\n raise ValueError(\"invalid algorithm\")", "title": "" }, { "docid": "ab5d059b80f97931dd5c8d6d2ce0725f", "score": "0.5215356", "text": "def getRestultPolynomialLagrage(self, points):\n x, y = points\n final_pol = np.polynomial.Polynomial([0.])\n n = len(x) # banyak point\n for i in range(n):\n p = np.polynomial.Polynomial([1.]) # pembilang\n q = 1 # penyebut\n for j in range(n):\n if i == j:\n continue\n p_temp = np.polynomial.Polynomial([-x[j], 1.]) # x - x[j]\n p = np.polymul(p, p_temp)\n q_temp = x[i] - x[j] # x[i] - x[j]\n q *= q_temp\n p *= y[i]/q\n final_pol = np.polyadd(final_pol, p)\n p = np.flip(final_pol[0].coef, axis=0)\n return p", "title": "" }, { "docid": "b0bfc7171a2b646cc634666c99cc09f3", "score": "0.52094346", "text": "def basis(self):\n try:\n return self.__basis\n except AttributeError:\n M = self._matrix_space()\n B = M.basis()\n self.__basis = tuple([self(x) for x in B])\n return self.__basis", "title": "" }, { "docid": "641b60e591650fd1defcac4db8c62a1f", "score": "0.519928", "text": "def defining_polynomial(self):\n return self.defining_polynomials()[0]", "title": "" }, { "docid": "1250d9c62808155a67d5b24077c96804", "score": "0.5197438", "text": "def basisOppervlakte(self):\n return self._basisOppervlakte.get_waarde()", "title": "" }, { "docid": "682b05748678621f188126d5b886244d", "score": "0.51462626", "text": "def basis(self, basis):\n from sage.misc.superseded import deprecation\n deprecation(10052, 'The .basis() method is deprecated. Use .change_basis() instead.')\n return self.change_basis(basis)", "title": "" }, { "docid": "dcf18be4dffd24c32e81db6097038faa", "score": "0.5131381", "text": "def basis_fields(self, points):\n B_coupling = self.mesh_conductor.B_coupling(points)\n\n return B_coupling @ self.basis", "title": "" }, { "docid": "cf0077e74456362da573b0a3969176d9", "score": "0.50254124", "text": "def primitive_basis(symbols: List[str]) -> LatticeBasis:\n if len(symbols) != 1:\n raise ValueError(_basis_err.format(\"primitive\"))\n return [(symbols[0], _pt0)]", "title": "" }, { "docid": "bc2f7fcef2362741a2aafd0896735a06", "score": "0.5024902", "text": "def final_basis(self, text):\n atomic_position_last_index = text.rfind(\"ATOMIC_POSITIONS (crystal)\")\n if atomic_position_last_index < 0:\n return self.initial_basis(text)\n number_of_atoms = self._number_of_atoms(text)\n basis = self._extract_basis(text[atomic_position_last_index:], number_of_atoms)\n\n # final basis is in crystal units, hence it needs to be converted into angstrom.\n final_lattice_vectors = self.final_lattice_vectors(text)\n lattice_matrix = np.array([final_lattice_vectors[\"vectors\"][key] for key in [\"a\", \"b\", \"c\"]]).reshape((3, 3))\n for coordinate in basis[\"coordinates\"]:\n coordinate[\"value\"] = np.dot(coordinate[\"value\"], lattice_matrix).tolist()\n\n return basis", "title": "" }, { "docid": "78cf7315b8fde1afc1b3493f5eb7036d", "score": "0.5006696", "text": "def Ri_aux_basis(self):\n return self.Ri_aux_basis_set", "title": "" }, { "docid": "7fa04ac104f9327afd2024ef96755d2a", "score": "0.50039446", "text": "def character_polynomial(self):\n \n #Create the polynomial ring we will use\n k = self.size()\n P = PolynomialRing(QQ, k, 'x')\n x = P.gens()\n\n #Expand s_mu in the power sum basis\n import sf.sfa\n s = sf.sfa.SFASchur(QQ)\n p = sf.sfa.SFAPower(QQ)\n ps_mu = p(s(self))\n\n #Replace each p_i by i*x_i-1\n items = ps_mu.monomial_coefficients().items() #items contains a list of (partition, coeff) pairs\n partition_to_monomial = lambda part: prod([ (i*x[i-1]-1) for i in part ])\n res = [ [partition_to_monomial(mc[0]), mc[1]] for mc in items ]\n\n #Write things in the monomial basis\n res = [ prod(pair) for pair in res ]\n res = sum( res )\n\n #Apply the umbral operator and return the result\n return misc.umbral_operation(res)", "title": "" }, { "docid": "9874e97c24ee861a12e14b3712282920", "score": "0.49805024", "text": "def load_basis(basis_name):\n basis_set = dict()\n reading = False\n current_function = None\n basis_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','basis')\n for line in open(os.path.join(basis_folder,basis_name.lower()+'.basis')):\n if line.startswith(\"#\"):\n continue\n if line.startswith(\"BASIS\"):\n reading = True\n if reading is True:\n if line.startswith(\"END\"):\n reading = False\n continue\n ls = line.split()\n if len(ls) == 2:\n if ls[1].isalpha(): # if title line like \"H S\"\n elem, spdf = ls\n if elem not in basis_set:\n basis_set[elem] = []\n current_function = ContractedBasisFunction(ftype=spdf, coefs=[])\n basis_set[elem].append(current_function)\n else: # if value line like \" 5.4471780 0.1562850\"\n a, c = float(ls[0]), float(ls[1])\n # normalize Gaussian (2a/pi)^3/4 * exp(-a*r^2)\n norm = (2.0 * a / np.pi)**0.75\n current_function.coefs.append((a, c*norm))\n return basis_set", "title": "" }, { "docid": "f312fee5594a8a716fbe5c741586a40d", "score": "0.49579558", "text": "def _basis_dictionary(self,basis):\n a = self.change_basis(basis)\n return a.monomial_coefficients()", "title": "" }, { "docid": "cd296e131d033074335b3d2a14c61687", "score": "0.49547455", "text": "def basis_column(self):\n if self.__basis is None:\n self.__basis = Column(\n self.__data.get_column_attributes(self.BASIS_NAME),\n self.__data,\n self.BASIS_NAME)\n return self.__basis", "title": "" }, { "docid": "fb64553b5e86644292283e820d33961c", "score": "0.49311262", "text": "def derived_b_lc(self):\n\n if not np.all([p in self.columns for p in ['P0', 'P1']]):\n return\n\n # get period and period derivative\n P0 = self.catalogue['P0']\n P1 = self.catalogue['P1']\n\n BLC = np.full(self.catalogue_len, np.nan)\n idx = (P1 > 0.) & np.isfinite(P1) & np.isfinite(P0)\n BLC[idx] = 3.0e8*np.sqrt(P1[idx])*np.abs(P0[idx])**(-5./2.)\n self.update(BLC, name='B_LC')", "title": "" }, { "docid": "e0508a54693db4a6b21221b8257f0c97", "score": "0.4928441", "text": "def change_basis(self, basis='milnor'):\n A = self.parent()\n return A._change_basis(self, basis)", "title": "" }, { "docid": "dabf52d8dce22e2423ed00411243bb03", "score": "0.49275562", "text": "def rational_function_string(self):\n if self._rational_function == None:\n def stringify_cone(cone,fundamental_parallelepiped):\n d = cone.ambient_dimension\n V = cone.generators\n def stringify_monomial(z):\n return \"*\".join((\"z%d**%d\" % (i,z[i]) for i in xrange(d)))\n num = \"+\".join((stringify_monomial(t) for t in fundamental_parallelepiped))\n den = \"*\".join(( \"(1-%s)\" % stringify_monomial(c) for c in V))\n return (\"(%s)/(%s)\" % (num,den))\n rational_functions = []\n for cone, multiplicity in self.symbolic_cones().iteritems():\n pi = self.fundamental_parallelepipeds()[cone]\n rational_functions.append( str(multiplicity) + \"*\" + stringify_cone(cone,pi) )\n if len(rational_functions) > 0:\n self._rational_function = \"+\".join(rational_functions)\n else:\n self._rational_function = \"0\"\n return self._rational_function", "title": "" }, { "docid": "68ca9c1f8c53d16f8971631d9a73b0f4", "score": "0.49100542", "text": "def initial_basis(self, text):\n alat = self._get_alat(text)\n number_of_atoms = self._number_of_atoms(text)\n basis_in_alat_units = self._extract_basis(text[text.find(\"positions (alat units)\") :], number_of_atoms)\n for coordinate in basis_in_alat_units[\"coordinates\"]:\n coordinate[\"value\"] = [x * alat * Constant.BOHR for x in coordinate[\"value\"]]\n return basis_in_alat_units", "title": "" }, { "docid": "f6af659d43e57e6e96425e4b64d081ae", "score": "0.49017513", "text": "def pr_as_poly(self, spamTuple, circuit, comm=None, memLimit=None):\n return self.prs_as_polys(spamTuple[0], [spamTuple[1]], circuit,\n comm, memLimit)[0]", "title": "" }, { "docid": "2ee0001b48926fee72f5e5a245c77b46", "score": "0.48807964", "text": "def wh_pol(n_0, n_m, n_c, pol, lmda):", "title": "" }, { "docid": "5d8f1eaf0bb507b71f727234ecbf1be3", "score": "0.4878286", "text": "def corrprodsforpol(self, pol=-1):\n return _msmetadata.msmetadata_corrprodsforpol(self, pol)", "title": "" }, { "docid": "ed9afe944f279c04e54ead8dd4e5847a", "score": "0.4870391", "text": "def lc(self):\n try:\n return self.__lc\n except AttributeError:\n if self.is_zero():\n return self.base_ring()._zero_element\n R = self.parent()\n f = self._MPolynomial_element__element.dict()\n self.__lc = f[self._MPolynomial_element__element.lcmt( R.term_order().greater_tuple )]\n return self.__lc", "title": "" }, { "docid": "65cf15501d551b31c0626475d4a56c8b", "score": "0.4870248", "text": "def _pauli_string_expectation(self, basis, err_param, add_param = None):\n bas_ind = {'I':0, 'X':1, 'Y':2, 'Z':3}\n for i in range(self._number_of_qubits):\n self._densitymatrix = np.reshape(self._densitymatrix,(4**(i),4,4**(self._number_of_qubits-i-1)))\n if basis[i] == 'X':\n self._densitymatrix[:,1,:] *= err_param\n self._densitymatrix[:,2,:] = 0\n self._densitymatrix[:,3,:] = 0\n elif basis[i] == 'Y':\n self._densitymatrix[:,1,:] = 0\n self._densitymatrix[:,2,:] *= err_param\n self._densitymatrix[:,3,:] = 0\n elif basis[i] == 'Z':\n self._densitymatrix[:,1,:] = 0\n self._densitymatrix[:,2,:] = 0\n self._densitymatrix[:,3,:] *= err_param\n\n self._densitymatrix = np.reshape(self._densitymatrix, self._number_of_qubits * [4])\n index = tuple([bas_ind[x] for x in basis])\n expectation = self._densitymatrix[index] * 2**self._number_of_qubits\n\n return expectation", "title": "" }, { "docid": "08c773213e20b63fb030cc682541d9e4", "score": "0.48663837", "text": "def test_get_current_lp_with_1(self):\n origin_number = 3\n origin_cons = [[1, 1, 1, '<=', 5],\n [1, 0, 0, '>=', 0],\n [0, 1, 0, '>=', 0],\n [0, 0, 1, '>=', 0]]\n origin_obj = ['max', 3, 4, 6]\n fixed = [-1, -1, 1]\n s = bandb.BandB(origin_number, origin_cons, origin_obj)\n number, cons, obj, constant = s.get_current_lp(fixed)\n self.assertEqual(number, 2)\n self.assertEqual(cons, [[1, 1, '<=', 4],\n [1, 0, '>=', 0],\n [0, 1, '>=', 0],\n [0, 0, '>=', -1]])\n self.assertEqual(obj, ['max', 3, 4])\n self.assertEqual(constant, 6)", "title": "" }, { "docid": "d5baf27482df59dfa76f154a4bd752b7", "score": "0.48517194", "text": "def orbitalBasisFunctions(self):\n\t\traise NotImplementedError(\"\")", "title": "" }, { "docid": "1c56c06ffdde885226869c0445cbc2ed", "score": "0.48399302", "text": "def _get_poly_formula(\n self,\n geometry: dict[str, Any],\n nn_sites: list[dict[str, Any]],\n nnn_sites: list[dict[str, Any]],\n ) -> str | None:\n\n def order_elements(el):\n if self.use_iupac_formula:\n return [get_el_sp(el).X, el]\n return [get_el_sp(el).iupac_ordering, el]\n\n nnn_geometries = [nnn_site[\"geometry\"] for nnn_site in nnn_sites]\n\n poly_formula = None\n if geometry[\"type\"] in connected_geometries and any(\n nnn_geometry[\"type\"] in connected_geometries\n for nnn_geometry in nnn_geometries\n ):\n nn_els = [get_el(nn_site[\"element\"]) for nn_site in nn_sites]\n comp = Composition(\"\".join(nn_els))\n el_amt_dict = comp.get_el_amt_dict()\n\n poly_formula = \"\"\n for e in sorted(el_amt_dict.keys(), key=order_elements):\n poly_formula += e\n poly_formula += str(formula_double_format(el_amt_dict[e]))\n\n return poly_formula", "title": "" }, { "docid": "a90700b1af0bc3000aa82f56d18dc7cc", "score": "0.48396713", "text": "def _get_poly_coeff(args):\n p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11 = args\n\n p10, p11 = np.deg2rad([p10, p11])\n\n coeff = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5]\n\n # Upper surface \n Cup = np.ones((6,6), dtype=np.float64)\n\n for i in range(6):\n Cup[1,i] = p2 ** coeff[i]\n Cup[2,i] = coeff[i]\n Cup[3,i] = coeff[i] * p2 **(coeff[i] - 1)\n \n Cup[4,0] = -0.25 * p2 **(-1.5)\n Cup[4,1] = 0.75 * p2 **(-0.5)\n Cup[4,2] = 3.75 * p2 **( 0.5)\n Cup[4,3] = 8.75 * p2 **( 1.5)\n Cup[4,4] = 15.75 * p2 **( 2.5)\n Cup[4,5] = 24.75 * p2 **( 3.5)\n \n Cup[5,1:] = 0\n\n Bup = np.zeros(6)\n\n Bup[0] = p8 + p9/2\n Bup[1] = p3\n Bup[2] = np.tan(p10 - p11/2)\n Bup[4] = p4\n Bup[5] = np.sqrt(2*p1)\n\n Aup = np.linalg.solve(Cup, Bup)\n \n # Lower surface\n Clo = np.ones((6,6), dtype=np.float64)\n\n for i in range(6):\n Clo[1,i] = p5 ** coeff[i]\n Clo[2,i] = coeff[i]\n Clo[3,i] = coeff[i] * p5 **(coeff[i] - 1)\n\n Clo[4,0] = -0.25 * p5 **(-1.5)\n Clo[4,1] = 0.75 * p5 **(-0.5)\n Clo[4,2] = 3.75 * p5 **( 0.5)\n Clo[4,3] = 8.75 * p5 **( 1.5)\n Clo[4,4] = 15.75 * p5 **( 2.5)\n Clo[4,5] = 24.75 * p5 **( 3.5)\n\n Clo[5,1:] = 0\n\n Blo = np.zeros(6)\n \n Blo[0] = p8 - p9/2\n Blo[1] = p6\n Blo[2] = np.tan(p10 + p11/2)\n Blo[4] = p7\n Blo[5] = -np.sqrt(2*p1)\n\n Alo = np.linalg.solve(Clo, Blo)\n\n return Aup, Alo", "title": "" }, { "docid": "1cccd5d91701211f1e81c2c27956516c", "score": "0.48070532", "text": "def defining_polynomial(self):\n S = self.parent().ambient_space().symmetric_space()\n return S.prod(H.to_symmetric_space() for H in self)", "title": "" }, { "docid": "089347f9b1432ada8ae0f4d607dd0b6f", "score": "0.480602", "text": "def get_degree_polynomial(settings):\n assert(isinstance(settings, RbfSettings))\n if (settings.rbf == 'cubic' or settings.rbf == 'thin_plate_spline'):\n return 1\n elif (settings.rbf == 'linear' or settings.rbf == 'multiquadric'):\n return 0\n else:\n return -1", "title": "" }, { "docid": "1b9d788628e0922bb648b75fc91790be", "score": "0.47768706", "text": "def p1(self):\n return _min3p.f90wrap_biol__get__p1()", "title": "" }, { "docid": "bda15edb1cd271a200f478b0fd74df20", "score": "0.4775564", "text": "def L_to_basis(L):\n\n if L==3: return basis_L3\n if L==4: return basis_L4\n if L==5: return basis_L5\n else: print('nope')", "title": "" }, { "docid": "1ab5070c64a0e5297c74eccf75d5fb7e", "score": "0.47732982", "text": "def init_preparation_basis(val: Union[str, PreparationBasis] = None) -> PreparationBasis:\n pb: PreparationBasis = None\n if val is None: # if the basis is not set, use the default PauliPrepBasis\n pb = PauliPrepBasis()\n elif isinstance(val, PreparationBasis):\n pb = val\n elif isinstance(val, str): # Construct the measurement basis from its name\n if val not in SUPPORTED_PREPARATION_BASIS:\n raise ArgumentError(\"in init_preparation_basis(): '{}' is not supported preparation basis!\".format(val))\n else:\n pb = getattr(sys.modules[__name__], val + 'PrepBasis')()\n else:\n raise ArgumentError(\"in init_preparation_basis(): unsupported input value type {}!\".format(type(val)))\n\n return pb", "title": "" }, { "docid": "77b079447f588cfffd922b02bbdf5210", "score": "0.47634554", "text": "def __init__(self):\n super().__init__()\n self._name = \"Single-Qubit Pauli Measurement Basis\"\n self._basis = QUBIT_PAULI_BASIS", "title": "" }, { "docid": "d2156139ecc9ee07c0299ae2443e465a", "score": "0.47402325", "text": "def test_get_basis_polynomial_family():\n assert_true(type(c.basis_polynomial_family) is str)", "title": "" }, { "docid": "50718ec15db25c509519cf98b73f4832", "score": "0.4739271", "text": "def Ri_mp2_basis_set(self):\n return self.Ri_aux_basis_set", "title": "" }, { "docid": "eec3ddc0a9a8e5b41970e8d6cec461e0", "score": "0.47308514", "text": "def test_get_current_lp_with_0_and_1(self):\n origin_number = 3\n origin_cons = [[1, 1, 1, '<=', 5],\n [1, 0, 0, '>=', 0],\n [0, 1, 0, '>=', 0],\n [0, 0, 1, '>=', 0]]\n origin_obj = ['max', 3, 4, 6]\n fixed = [-1, 0, 1]\n s = bandb.BandB(origin_number, origin_cons, origin_obj)\n number, cons, obj, constant = s.get_current_lp(fixed)\n self.assertEqual(number, 1)\n self.assertEqual(cons, [[1, '<=', 4],\n [1, '>=', 0],\n [0, '>=', 0],\n [0, '>=', -1]])\n self.assertEqual(obj, ['max', 3])\n self.assertEqual(constant, 6)", "title": "" }, { "docid": "343f9ef30d455fa480cb910ca35e4683", "score": "0.47148764", "text": "def get_poly(self):\n # check if there is any poly?\n if self._poly is not None:\n return tuple(self._poly)", "title": "" }, { "docid": "e1da2d94f6cbd4fd2e3364b3f5b8bc49", "score": "0.47092453", "text": "def srelfac_lc(self):\n return _min3p.f90wrap_chem__get__srelfac_lc()", "title": "" }, { "docid": "94a0597aa174639111fc9643276c9302", "score": "0.47086746", "text": "def heisenberg_1d(param_dict):\n\n L = param_dict['L']\n J = param_dict['J']\n bc = param_dict.get('bc', 'closed')\n\n coupmax = L-1 if bc=='open' else L\n pauli_x = [ embed([qt.sigmax(), qt.sigmax()], L, [i, (i+1)%2]) for i in range(coupmax)]\n pauli_y = [ embed([qt.sigmay(), qt.sigmay()], L, [i, (i+1)%2]) for i in range(coupmax)]\n pauli_z = [ embed([qt.sigmaz(), qt.sigmaz()], L, [i, (i+1)%2]) for i in range(coupmax)]\n\n if bc=='closed' and L==2:\n J = J/2\n\n return -J * (1/4) * (sum(pauli_x) + sum(pauli_y) + sum(pauli_z))", "title": "" }, { "docid": "622c1a5e529f2557f80bd106e57f80a3", "score": "0.47048718", "text": "def get_ell_pol(self):\n return self.ell_pol", "title": "" }, { "docid": "4f743b673d762bc5d99a2970ed44a5af", "score": "0.46922323", "text": "def test_get_current_lp_with_0(self):\n origin_number = 3\n origin_cons = [[1, 1, 1, '<=', 5],\n [1, 0, 0, '>=', 0],\n [0, 1, 0, '>=', 0],\n [0, 0, 1, '>=', 0]]\n origin_obj = ['max', 3, 4, 6]\n fixed = [-1, -1, 0]\n s = bandb.BandB(origin_number, origin_cons, origin_obj)\n number, cons, obj, constant = s.get_current_lp(fixed)\n self.assertEqual(number, 2)\n self.assertEqual(cons, [[1, 1, '<=', 5],\n [1, 0, '>=', 0],\n [0, 1, '>=', 0],\n [0, 0, '>=', 0]])\n self.assertEqual(obj, ['max', 3, 4])\n self.assertEqual(constant, 0)", "title": "" }, { "docid": "b20143003f05d88ad9eb288c749b608a", "score": "0.4678735", "text": "def solve_initial_basis(self):\r\n # new_basis = [i + 1 for i, val in enumerate(self.c) if val > 0]\r\n # other = [i + 1 for i, val in enumerate(self.c) if val <= 0]\r\n # if len(new_basis) >= self.eqn:\r\n # self.basis = new_basis[:self.eqn]\r\n # else:\r\n # self.basis = sorted(new_basis + other[:self.eqn - len(new_basis)])\r\n c = np.array([0 if i < self.vars else -1 for i in range(self.vars + self.eqn)])\r\n A = np.concatenate(([self.A[row] if self.b[row] > 0\r\n else -self.A[row] for row in range(self.eqn)],\r\n np.identity(self.eqn)), axis=1)\r\n b = np.abs(self.b)\r\n\r\n p = Simplex(c, 0, A, b)\r\n p.show_steps(self.display)\r\n\r\n # detect for negative b value\r\n p.set_basis([x for x in range(self.vars + 1, self.vars + self.eqn + 1)])\r\n p.solve()\r\n print('-' * 100)\r\n x = p.get_BFS()\r\n\r\n assert p.k == 0, \"Infeasible Primal Program\"\r\n\r\n self.basis = p.get_basis()", "title": "" }, { "docid": "cc3bcfe3baa8355627f75474c3479631", "score": "0.46769893", "text": "def test_with_pauli_basis(self, num_wires):\n # generators of PauliRot\n paulirot_gens = -0.5j * pauli_basis_matrices(num_wires)\n # With many entries, each containing one basis element\n paulirot_coeffs = _one_parameter_paulirot_coeffs(paulirot_gens, num_wires)\n assert qml.math.allclose(paulirot_coeffs, np.eye(4**num_wires - 1))\n # With a \"single entry\" containing all basis elements\n paulirot_coeffs = _one_parameter_paulirot_coeffs([paulirot_gens], num_wires)\n assert qml.math.allclose(paulirot_coeffs, np.eye(4**num_wires - 1))", "title": "" }, { "docid": "4f8b03d516eea33a3b23e75eaa2cc84a", "score": "0.46695307", "text": "def calc_pol(self, fil, path, sys_err=sys_err):\n\n # load in the table and its data\n table = fits.open(path)\n table_data = table[1].data\n\n # get the filter-wavelength weight\n weights = np.interp(table_data[0]['wavelength'], fil['wavelength'],\n fil['weight'], left=0, right=0)\n # define the flux\n flux = table[0].data\n\n # get the flux weight for q\n qweight = table_data[0]['q'] * flux * weights\n # sum/integrate it\n qsum = np.trapz(qweight, table_data[0]['wavelength'])\n\n # get the weight for just the flux\n fweight = flux * weights\n # sum it\n fsum = np.trapz(fweight, table_data[0]['wavelength'])\n\n # find q\n qval = qsum / fsum\n\n # get the flux weight for u\n uweight = table_data[0]['u'] * flux * weights\n # sum it\n usum = np.trapz(uweight, table_data[0]['wavelength'])\n\n # find u\n uval = usum / fsum\n\n # find the total polarized light\n pol_tot = np.sqrt(qval**2 + uval**2)\n\n # find the position angle of the light, convert to deg\n pos_ang = 0.5 * np.arctan2(uval, qval) * u.rad\n pos_ang = pos_ang.to(u.deg).value\n\n # grab the date to sort by later\n date = table[0].header[self.datekey]\n\n # define the error from the og table\n err = table_data[0]['error']\n\n # get weighted flux errors\n ferr = pd.DataFrame(weights * flux * err, columns=['pol_err'])\n ferr = ferr.loc[(ferr!=0).any(axis=1)]\n\n # get poisson error\n fish = np.sum(ferr) / np.sum(flux * weights) / np.sqrt(ferr.size)\n\n # turn the start and end times for sys errs into Time objs\n start = Time(sys_err['Start_Date'].values.astype(str), scale='utc')\n end = Time(sys_err['End_Date'].values.astype(str), scale='utc')\n\n # turn the date of obs into a time object and find which err it falls into\n time = Time(date, scale='utc')\n errmatch = (time >= start) & (time <= end)\n\n if all(errmatch) is False:\n t = pd.Series(start-time).append(pd.Series(end-time))\n errmatch = np.abs(t).values.argmin()\n if errmatch > 19:\n errmatch -= 19\n # get the err for the polization\n errpol = np.sqrt(fish**2 + sys_err[fil.index.name][errmatch]**2)\n\n # get the error for the position angle\n errang = 90 / np.pi * errpol / pol_tot\n\n table.close()\n\n return pol_tot, pos_ang, date, errpol.values[0], errang.values[0]", "title": "" }, { "docid": "a33863e5225ce039578c85e22fd18035", "score": "0.46663117", "text": "def get_fwhm_pol(self):\n return self.fwhm_ell_pol", "title": "" }, { "docid": "7f6050165b3968b7d3abbeb13bb52ec3", "score": "0.46379414", "text": "def get_poland():\r\n poland = Country('Poland')\r\n poland.get_gdp('https://www.quandl.com/api/v3'\r\n '/datasets/WWDI/POL_NY_GDP_PCAP_CD.'\r\n 'json?api_key=2jhCWecEKmuxzVY9ifwp')\r\n poland.get_investment_inflows('https://www.quandl.com/'\r\n 'api/v3/datasets/WWDI/'\r\n 'POL_BX_KLT_DINV_WD_GD_ZS.'\r\n 'json?api_key=2jhCWecEKmuxzVY9ifwp')\r\n poland.get_investment_outflows('https://www.quandl.com/'\r\n 'api/v3/datasets/WWDI/'\r\n 'POL_BM_KLT_DINV_GD_ZS.'\r\n 'json?api_key=2jhCWecEKmuxzVY9ifwp')\r\n poland.get_manufacturing('https://www.quandl.com/api'\r\n '/v3/datasets/WWDI/POL_NV_'\r\n 'IND_MANF_ZS.json?api_key'\r\n '=2jhCWecEKmuxzVY9ifwp')\r\n return poland", "title": "" }, { "docid": "adc44e64472e813683fe196624ffafdd", "score": "0.4611685", "text": "def GetRDFPolarizability(mol):\n\n filename='temp'\n lcoordinates=_ReadCoordinates(filename) \n result=CalculatePolarizabilityRDF(lcoordinates)\n \n return result", "title": "" }, { "docid": "9573dadede6f84b2c69c3c6f1acc20a2", "score": "0.46069956", "text": "def plot_basis_function(self, basis_i, basis_type):\n\n\t\tn_plot_pts = 100\n\n\t\txi_vals = numpy.linspace(-1., 1., n_plot_pts)\n\t\teta_vals = numpy.linspace(-1., 1., n_plot_pts)\n\n\t\txi_vals, eta_vals = numpy.meshgrid(xi_vals, eta_vals)\n\n\t\tif basis_type == \"basis\":\n\n\t\t\tphi_vals = numpy.zeros((n_plot_pts, n_plot_pts))\n\t\t\t\n\t\t\tfor i in range(n_plot_pts):\n\t\t\t\tfor j in range(n_plot_pts):\n\t\t\t\t\txi_ij, eta_ij = xi_vals[i,j], eta_vals[i,j]\n\t\t\t\t\t\n\t\t\t\t\tif self.point_in_reference_triangle(xi_ij, eta_ij):\n\t\t\t\t\t\tphi_vals[i,j] = self.psi_hat_basis[basis_i](xi_ij, eta_ij)\n\t\t\t\t\telse:\n\t\t\t\t\t\tphi_vals[i,j] = 0\n\n\t\t\tplt.figure()\n\t\t\t#ax = plt.gcf().gca(projection='3d')\n\t\t\t#surf = ax.plot_surface(xi_vals, eta_vals, phi_vals, cmap=cm.coolwarm, linewidth=0, antialiased=False)\n\t\t\tplt.contourf(xi_vals, eta_vals, phi_vals, 100, cmap=plt.get_cmap(\"jet\"))\n\t\t\tplt.colorbar()\n\t\t\tplt.xlabel(r\"$\\xi$\")\n\t\t\tplt.ylabel(r\"$\\eta$\")\n\n\t\t\tplt.xlim([-0.1,1.1])\n\t\t\tplt.ylim([-0.1,1.1])\n\n\t\telif basis_type == \"grad_basis\":\n\t\t\t\n\t\t\tphi_vals_xi = numpy.zeros((n_plot_pts, n_plot_pts))\n\t\t\tphi_vals_eta = numpy.zeros((n_plot_pts, n_plot_pts))\n\t\t\t\n\t\t\tfor i in range(n_plot_pts):\n\t\t\t\tfor j in range(n_plot_pts):\n\n\t\t\t\t\txi_ij, eta_ij = xi_vals[i,j], eta_vals[i,j]\n\t\t\t\t\t\n\t\t\t\t\tif self.point_in_reference_triangle(xi_ij, eta_ij):\n\t\t\t\t\t\tphi_vals_xi[i,j] = self.grad_psi_hat_basis[basis_i][0](xi_ij, eta_ij)\n\t\t\t\t\t\tphi_vals_eta[i,j] = self.grad_psi_hat_basis[basis_i][1](xi_ij, eta_ij)\n\t\t\t\t\telse:\n\t\t\t\t\t\tphi_vals_xi[i,j] = 0\n\t\t\t\t\t\tphi_vals_eta[i,j] = 0\n\n\n\t\t\tplt.figure()\n\t\t\t#ax = plt.gcf().gca(projection='3d')\n\t\t\t#surf = ax.plot_surface(xi_vals, eta_vals, phi_vals_xi, cmap=cm.coolwarm, linewidth=0, antialiased=False)\n\t\t\tplt.contourf(xi_vals, eta_vals, phi_vals_xi, 100, cmap=plt.get_cmap(\"jet\"))\n\t\t\tplt.colorbar()\n\t\t\tplt.xlabel(r\"$\\xi$\")\n\t\t\tplt.ylabel(r\"$\\eta$\")\n\t\t\tplt.title(\"xi Derivative\")\n\n\t\t\tplt.xlim([-0.1,1.1])\n\t\t\tplt.ylim([-0.1,1.1])\n\n\t\t\tplt.figure()\n\t\t\t#ax = plt.gcf().gca(projection='3d')\n\t\t\t#surf = ax.plot_surface(xi_vals, eta_vals, phi_vals_eta, cmap=cm.coolwarm, linewidth=0, antialiased=False)\n\t\t\tplt.contourf(xi_vals, eta_vals, phi_vals_eta, 100, cmap=plt.get_cmap(\"jet\"))\n\t\t\tplt.colorbar()\n\t\t\tplt.xlabel(r\"$\\xi$\")\n\t\t\tplt.ylabel(r\"$\\eta$\")\n\t\t\tplt.title(\"eta Derivative\")\n\n\t\t\tplt.xlim([-0.1,1.1])\n\t\t\tplt.ylim([-0.1,1.1])\n\n\t\telse:\n\t\t\traise ValueError(\"Unsupported\")", "title": "" }, { "docid": "81a3cef63686004cef93ab0ad1adfd0e", "score": "0.46033826", "text": "def get_QP_bandstructure(self):\n if self._qpbs is None:\n self._qpbs = self._get_bandstructure(\"qp\")\n return self._qpbs", "title": "" }, { "docid": "9b44b850126a02b8f3a10836e8fb9416", "score": "0.45987618", "text": "def change_basis(twoD_pts, l_bpb_po):\n mat1 = (l_bpb_po).inv();\n return np.array((mat1*(Matrix(twoD_pts.transpose()))).transpose(), dtype='double');", "title": "" }, { "docid": "9c52898c887165ed593ee84696243c24", "score": "0.45884678", "text": "def make_LV_crl_basis(mesh, foc):\n\n VV = dolfin.VectorFunctionSpace(mesh, \"CG\", 1)\n V = dolfin.FunctionSpace(mesh, \"CG\", 1)\n\n if dolfin.DOLFIN_VERSION_MAJOR > 1.6:\n dofs_x = V.tabulate_dof_coordinates().reshape((-1, mesh.geometry().dim()))\n else:\n dm = V.dofmap()\n dofs_x = dm.tabulate_all_coordinates(mesh).reshape((-1, mesh.geometry().dim()))\n\n e_c = make_unit_vector(V, VV, dofs_x, fill_coordinates_ec)\n e_l = make_unit_vector(V, VV, dofs_x, fill_coordinates_el, foc)\n e_r = calc_cross_products(e_c, e_l, VV)\n\n e_c.rename(\"c0\", \"local_basis_function\")\n e_r.rename(\"r0\", \"local_basis_function\")\n e_l.rename(\"l0\", \"local_basis_function\")\n\n return e_c, e_r, e_l", "title": "" }, { "docid": "9cf1f064bf72c1f421f21a1816b54841", "score": "0.4587992", "text": "def basisName(self):\n\t\traise NotImplementedError(\"\")", "title": "" }, { "docid": "a5927beb8eb74058920e61878025a846", "score": "0.45871404", "text": "def rgascal(self):\n return _min3p.f90wrap_chem__get__rgascal()", "title": "" }, { "docid": "2960f6b58dfa82669691fd1dd96e1721", "score": "0.45870912", "text": "def degree_on_basis(self, t):\n def p_degree(m, mult=1, prime=2):\n \"\"\"\n For m=(n_1, n_2, n_3, ...), Sum_i (mult) * n_i * (p^i - 1)\n \"\"\"\n i = 0\n deg = 0\n for n in m:\n i += 1\n deg += n*mult*(prime**i - 1)\n return deg\n\n def q_degree(m, prime=3):\n \"\"\"\n For m=(n_0, n_1, n_2, ...), Sum_i 2*p^(n_i) - 1\n \"\"\"\n deg = 0\n for n in m:\n deg += 2*prime**n - 1\n return deg\n\n p = self.prime()\n basis = self.basis_name()\n # milnor\n if basis == 'milnor':\n if not self._generic:\n return p_degree(t)\n else:\n return q_degree(t[0], prime=p) + p_degree(t[1], prime=p, mult=2)\n # serre-cartan, arnonc\n if not self._generic and (basis == 'serre-cartan' or basis == 'arnonc'):\n return sum(t)\n if self._generic and basis == 'serre-cartan':\n bockstein = True\n n = 0\n for j in t:\n if bockstein:\n if j != 0:\n n += 1\n bockstein = False\n else:\n n += 2 * j * (p - 1)\n bockstein = True\n return n\n\n # wood_y:\n if basis == 'woody' or basis == 'woodz':\n # each entry in t is a pair (m,k), corresponding to w(m,k), defined by\n # `w(m,k) = \\text{Sq}^{2^m (2^{k+1}-1)}`.\n return sum(2**m * (2**(k+1)-1) for (m,k) in t)\n\n # wall, arnon_a\n if basis.find('wall') >= 0 or basis.find('arnona') >= 0:\n # Wall: each entry in t is a pair (m,k), corresponding to\n # Q^m_k, defined by `Q^m_k = Sq(2^k) Sq(2^{k+1})\n # ... Sq(2^m)`.\n #\n # Arnon A: each entry in t is a pair (m,k), corresponding\n # to X^m_k, defined by `X^m_k = Sq(2^m) ... Sq(2^{k+1})\n # Sq(2^k)`\n return sum(2**k * (2**(m-k+1)-1) for (m,k) in t)\n\n # pst, comm\n if basis.find('pst') >= 0 or basis.find('comm') >= 0:\n if not self._generic:\n # Pst: each entry in t is a pair (i,j), corresponding to P^i_j\n #\n # Comm: each entry in t is a pair (i,j), corresponding\n # to c_{i,j}, the iterated commutator defined by\n # c_{i,1} = Sq(2^i) and c_{i,j} = [c_{i,j-1},\n # Sq(2^{i+j-1})].\n return sum(2**m * (2**k - 1) for (m,k) in t)\n # p odd:\n #\n # Pst: have pair (Q, P) where Q is a tuple of Q's, as in\n # the Milnor basis, and P is a tuple of terms of the form\n # ((i,j), n), corresponding to (P^i_j)^n.\n #\n # Comm: similarly (Q, C) with Q as above and C a tuple\n # with each entry in t is of the form ((s,t), n),\n # corresponding to c_{s,t}^n. here c_{s,t} is the the\n # iterated commutator defined by c_{s,1} = P(p^s) and\n # c_{s,t} = [P(p^{s+t-1}), c_{s,t-1}].\n q_deg = q_degree(t[0], prime=p)\n p_deg = sum(2 * n * p**s * (p**t - 1) for ((s,t), n) in t[1])\n return q_deg + p_deg", "title": "" }, { "docid": "143a2e5a808619a55b3c68d9f8a2ab8d", "score": "0.45868084", "text": "def linresp(self):\n\n self.polar1 = 0\n self.polar2 = 0\n # <0|Y1(B) * A_bar|0>\n self.polar1 += ndot(\"ai,ia->\", self.ccpert_A.build_Avo(), self.y1_B)\n # <0|Y2(B) * A_bar|0>\n self.polar1 += ndot(\"abij,ijab->\", self.ccpert_A.build_Avvoo(), self.y2_B, prefactor=0.5)\n self.polar1 += ndot(\"baji,ijab->\", self.ccpert_A.build_Avvoo(), self.y2_B, prefactor=0.5)\n # <0|[A_bar, X(B)]|0>\n self.polar2 += ndot(\"ia,ia->\", self.ccpert_A.build_Aov(), self.x1_B, prefactor=2.0)\n # <0|L1(0)[A_bar, X1(B)]|0>\n tmp = ndot('ia,ic->ac', self.l1, self.x1_B)\n self.polar2 += ndot('ac,ac->', tmp, self.ccpert_A.build_Avv())\n tmp = ndot('ia,ka->ik', self.l1, self.x1_B)\n self.polar2 -= ndot('ik,ki->', tmp, self.ccpert_A.build_Aoo())\n # <0|L1(0)[A_bar, X2(B)]|0>\n tmp = ndot('ia,jb->ijab', self.l1, self.ccpert_A.build_Aov())\n self.polar2 += ndot('ijab,ijab->', tmp, self.x2_B, prefactor=2.0)\n self.polar2 += ndot('ijab,ijba->', tmp, self.x2_B, prefactor=-1.0)\n # <0|L2(0)[A_bar, X1(B)]|0>\n tmp = ndot('ijbc,bcaj->ia', self.l2, self.ccpert_A.build_Avvvo())\n self.polar2 += ndot('ia,ia->', tmp, self.x1_B)\n tmp = ndot('ijab,kbij->ak', self.l2, self.ccpert_A.build_Aovoo())\n self.polar2 -= ndot('ak,ka->', tmp, self.x1_B, prefactor=0.5)\n tmp = ndot('ijab,kaji->bk', self.l2, self.ccpert_A.build_Aovoo())\n self.polar2 -= ndot('bk,kb->', tmp, self.x1_B, prefactor=0.5)\n # <0|L2(0)[A_bar, X1(B)]|0>\n tmp = ndot('ijab,kjab->ik', self.l2, self.x2_B)\n self.polar2 -= ndot('ik,ki->', tmp, self.ccpert_A.build_Aoo(), prefactor=0.5)\n tmp = ndot('ijab,kiba->jk', self.l2, self.x2_B,)\n self.polar2 -= ndot('jk,kj->', tmp, self.ccpert_A.build_Aoo(), prefactor=0.5)\n tmp = ndot('ijab,ijac->bc', self.l2, self.x2_B,)\n self.polar2 += ndot('bc,bc->', tmp, self.ccpert_A.build_Avv(), prefactor=0.5)\n tmp = ndot('ijab,ijcb->ac', self.l2, self.x2_B,)\n self.polar2 += ndot('ac,ac->', tmp, self.ccpert_A.build_Avv(), prefactor=0.5)\n\n self.polar = -1.0*(self.polar1 + self.polar2)\n\n return self.polar", "title": "" }, { "docid": "668305c0e92f97b51fd1023cf63dc9f6", "score": "0.45781037", "text": "def get_rbf_function(settings):\n assert(isinstance(settings, RbfSettings))\n if (settings.rbf == 'cubic'):\n return _cubic\n elif (settings.rbf == 'thin_plate_spline'):\n return _thin_plate_spline\n elif (settings.rbf == 'linear'):\n return _linear\n elif (settings.rbf == 'multiquadric'):\n return _multiquadric", "title": "" }, { "docid": "0ffd436e087f8b199a128188aca8b9d6", "score": "0.45772862", "text": "def linear_sorption(self):\n return _min3p.f90wrap_chem__get__linear_sorption()", "title": "" }, { "docid": "a3c9900ec96130092925ec5c81f1dd12", "score": "0.4575087", "text": "def policy(self):\r\n if len(self.resx) > 0:\r\n plan = np.array(self.resx)\r\n planmat = np.array([int(i) for i in plan[:,1]]).reshape((self.h,len(self.trans)))\r\n return(planmat)\r\n else:\r\n return(print(\"Nothing to show\"))", "title": "" }, { "docid": "f5bd1f5498ac6ef0d10e7de3c1c13376", "score": "0.45664927", "text": "def to_pbw_basis(self):\n return self.parent().pbw_element(self)", "title": "" }, { "docid": "6bdfe4a7fceff80212ed8bea3508f468", "score": "0.45644403", "text": "def basis(P1,P2):\n P1s = pol_dict[P1]\n P2s = pol_dict[P2]\n return np.matrix([[P1s[0]*P2s[0]],\n [P1s[1]*P2s[1]],\n [P1s[0]*P2s[1]],\n [P1s[1]*P2s[0]]])", "title": "" }, { "docid": "d8d21db9e880f1f3dfa2e98612030316", "score": "0.45634764", "text": "def eval_poly(self):\n\n poly_int = np.polyint(np.append(self.c_t, [0])) # indefinite integral of polynomial\n poly_eval = np.polyval(poly_int, [0, 1]) # evaluate polynomial in [0, 1]\n poly_eval = np.diff(poly_eval) # difference of evaluated polynomial\n\n return poly_eval", "title": "" }, { "docid": "4c100312714c45b6ade71c6df72cc2c4", "score": "0.45497555", "text": "def crystal_system(self):\n #FIXME(tdaff): must be aligned with x to work\n if self.alpha == self.beta == self.gamma == 90:\n if self.a == self.b == self.c:\n return 'cubic'\n elif self.a == self.b or self.a == self.c or self.b == self.c:\n return 'tetragonal'\n else:\n return 'orthorhombic'\n elif self.alpha == self.beta == 90:\n if self.a == self.b and self.gamma == 120:\n return 'hexagonal'\n else:\n return 'monoclinic'\n elif self.alpha == self.gamma == 90:\n if self.a == self.c and self.beta == 120:\n return 'hexagonal'\n else:\n return 'monoclinic'\n elif self.beta == self.gamma == 90:\n if self.b == self.c and self.alpha == 120:\n return 'hexagonal'\n else:\n return 'monoclinic'\n elif self.a == self.b == self.c and self.alpha == self.beta == self.gamma:\n return 'trigonal'\n else:\n return 'triclinic'", "title": "" }, { "docid": "1ed122eccbd8f64a0ec58d593ef4d8bc", "score": "0.4540945", "text": "def detect_basis(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "93892c362e4191adce32ab548a06f97b", "score": "0.45072585", "text": "def l_prfx(self):\n return _min3p.f90wrap_gen__get__l_prfx()", "title": "" }, { "docid": "49c77801ca9c16e8fa54d920f3352174", "score": "0.4504084", "text": "def ramification_module_decomposition_hurwitz_curve(self):\n if self.matrix_degree()!=2:\n raise ValueError(\"Degree must be 2.\")\n F = self.base_ring()\n q = F.order() \n from sage.misc.misc import SAGE_EXTCODE\n gapcode = SAGE_EXTCODE + '/gap/joyner/hurwitz_crv_rr_sp.gap'\n gap.eval('Read(\"'+gapcode+'\")')\n mults = gap.eval(\"ram_module_hurwitz(\"+str(q)+\")\")\n return eval(mults)", "title": "" }, { "docid": "52074f331ac2dd1581f5116a3661e4e0", "score": "0.4498074", "text": "def algebra_generators(self):\n return self.basis()", "title": "" }, { "docid": "334bb15012f6445cfe874f37ccee60aa", "score": "0.4496081", "text": "def __init__(self):\n super().__init__()\n self._name = \"2-Qubit Gell-Mann Measurement Basis\"\n self._basis = GELL_MANN_BASIS", "title": "" }, { "docid": "3c1f316ecc1bf81f53be873d766cd1fc", "score": "0.44939253", "text": "def rlb(self):\n rlb = np.zeros_like(self.pos)\n xyz = self.xyzmw\n rlb[:, 0] = np.sqrt(xyz[:, 0] ** 2 + xyz[:, 1] ** 2 + xyz[:, 2] ** 2)\n rlb[:, 1] = np.arctan2(xyz[:, 1], xyz[:, 0]) * 180 / np.pi\n rlb[:, 2] = np.arcsin(xyz[:, 2] / rlb[:, 0]) * 180 / np.pi\n return rlb", "title": "" }, { "docid": "096a98d3b17cfbfa5a192d9add4130cc", "score": "0.449322", "text": "def get_material_flow_basis(self):\n return MaterialFlowBasis.molar", "title": "" }, { "docid": "b5c961acd0e286d1816060d0631383c4", "score": "0.44894618", "text": "def passive_matrix_from_angle(basis, angle):\n c = np.cos(angle)\n s = np.sin(angle)\n\n if basis == 0:\n R = np.array([[1.0, 0.0, 0.0],\n [0.0, c, s],\n [0.0, -s, c]])\n elif basis == 1:\n R = np.array([[c, 0.0, -s],\n [0.0, 1.0, 0.0],\n [s, 0.0, c]])\n elif basis == 2:\n R = np.array([[c, s, 0.0],\n [-s, c, 0.0],\n [0.0, 0.0, 1.0]])\n else:\n raise ValueError(\"Basis must be in [0, 1, 2]\")\n\n return R", "title": "" }, { "docid": "feccff3965a9318bb8f8d8255d1c3e0c", "score": "0.44866404", "text": "def basis(self, X_b, Y_b, basis_type=\"simple\"):\n XY = X_b.T @ Y_b / len(Y_b)\n XX = np.mean(X_b, 0)\n if basis_type == \"simple\":\n return np.concatenate([XY, XX])\n elif basis_type == \"quad\":\n tmp = (X_b.T @ X_b).reshape(-1)\n return np.concatenate([XY, XX, tmp])\n elif basis_type == \"original\":\n return np.concatenate([X_b.reshape(-1), Y_b])\n else:\n raise AssertionError(\"invalid basis type\")", "title": "" }, { "docid": "4194fabac7bd09b2451fba4d7c8f4ca5", "score": "0.44833133", "text": "def init_measurement_basis(val: Union[str, MeasurementBasis] = None) -> MeasurementBasis:\n mb: MeasurementBasis = None\n if val is None: # if the basis is not set, use the default PauliMeasBasis\n mb = PauliMeasBasis()\n elif isinstance(val, MeasurementBasis):\n mb = val\n elif isinstance(val, str): # Construct the measurement basis from its name\n if val not in SUPPORTED_MEASUREMENT_BASIS:\n raise ArgumentError(\"in init_measurement_basis(): '{}' is not supported measurement basis!\".format(val))\n else:\n mb = getattr(sys.modules[__name__], val + 'MeasBasis')()\n else:\n raise ArgumentError(\"in init_measurement_basis(): unsupported input value type {}!\".format(type(val)))\n\n return mb", "title": "" }, { "docid": "685f11a3506e392a2c61ed3eaff714ab", "score": "0.44678122", "text": "def get_basis_vectors(self):\r\n \r\n # Return standard bases if the orbit is for the solar system root\r\n if self.a is None:\r\n X = np.array([1, 0, 0])\r\n Y = np.array([0, 1, 0])\r\n Z = np.array([0, 0, 1])\r\n return X, Y, Z\r\n elif not self.X is None:\r\n return self.X, self.Y, self.Z\r\n else:\r\n # Get state vector at periapsis\r\n rp, vp = self.get_state_vector(self.get_time(0))\r\n \r\n # Get vector normal to the orbital plane\r\n Z = np.cross(rp,vp)\r\n Z = Z / norm(Z)\r\n \r\n # Set first basis vector based on celestial longitude\r\n X = np.array([1, 0, 0]) # assumed celestial longitude\r\n X = X - np.dot(X,Z) * Z/norm(Z)**2\r\n if norm(X) < 1E-15:\r\n X = np.array([0, math.copysign(1,Z[0]), 0])\r\n Z = np.array([math.copysign(1,Z[0]), 0, 0])\r\n else:\r\n X = X / norm(X)\r\n \r\n # Determine second basis through cross product of the others\r\n Y = np.cross(Z,X)\r\n Y = Y / norm(Y)\r\n \r\n self.X = X\r\n self.Y = Y\r\n self.Z = Z\r\n \r\n return X, Y, Z", "title": "" }, { "docid": "b73e6137f7c127b64e4001ad73777b3f", "score": "0.44606206", "text": "def characteristic_polynomial(self):\n from sage.rings.polynomial.polynomial_ring import polygen\n x = polygen(QQ, 'x')\n if self.rank() == 1:\n return x**(self.dimension() - 1) * (x - len(self))\n\n H = self[0]\n R = self.restriction(H)\n charpoly_R = R.characteristic_polynomial()\n D = self.deletion(H)\n charpoly_D = D.characteristic_polynomial()\n return charpoly_D - charpoly_R", "title": "" }, { "docid": "75669629a44c3afcceb5b95240f54d41", "score": "0.44574896", "text": "def tfinal_lc(self):\n return _min3p.f90wrap_chem__get__tfinal_lc()", "title": "" }, { "docid": "6f050c36b713e775ade65b93c7995574", "score": "0.44503808", "text": "def getCoefficientRing(self):\n # short-cut self._coefficient_ring is None case\n return self._coefficient_ring", "title": "" }, { "docid": "4d45b7e86e45bbca22aef4cd6e7de5c0", "score": "0.4448082", "text": "def GetRDFPolarizability(mol):\n\n filename = 'temp'\n ChargeCoordinates = _ReadCoordinates(filename)\n result = CalculatePolarizabilityRDF(ChargeCoordinates)\n\n return result", "title": "" }, { "docid": "b5d36672a672bdfa650de9692d7f7a35", "score": "0.44449207", "text": "def _change_basis_on_basis(self, t, basis='milnor'):\n from sage.matrix.constructor import matrix\n from sage.rings.all import GF\n from .steenrod_algebra_bases import steenrod_algebra_basis,\\\n convert_from_milnor_matrix\n from .steenrod_algebra_misc import get_basis_name\n basis = get_basis_name(basis, self.prime(), generic=self._generic)\n if basis == self.basis_name():\n return self({t: 1})\n a = self._milnor_on_basis(t)\n if basis == 'milnor':\n return a\n d = a.monomial_coefficients()\n p = self.prime()\n deg = a.degree()\n A = SteenrodAlgebra(basis=basis, p=p, generic=self._generic)\n if deg == 0:\n return A(a.leading_coefficient())\n Bnew = steenrod_algebra_basis(deg, basis, p, generic=self._generic)\n Bmil = steenrod_algebra_basis(deg, 'milnor', p, generic=self._generic)\n v = []\n for a in Bmil:\n v.append(d.get(a, 0))\n out = (matrix(GF(p), 1, len(v), v) *\n convert_from_milnor_matrix(deg, basis, p, generic=self._generic))\n new_d = dict(zip(Bnew, out[0]))\n return A(new_d)", "title": "" }, { "docid": "0ccfe58818874f4d953c1e69cf838a71", "score": "0.44433168", "text": "def derived_bsurf_i(self):\n\n from .utils import B_field\n\n if 'P1_I' not in self.columns:\n self.derived_p1_i()\n\n if not np.all([p in self.columns for p in ['P0', 'P1_I']]):\n return\n\n # get period and period derivative\n P0 = self.catalogue['P0']\n P1_I = self.catalogue['P1_I']\n BSURFI = B_field(P0, P1_I)\n self.update(BSURFI, name='BSURF_I')", "title": "" }, { "docid": "28a3a1b1eec1e9afd302584793c6e708", "score": "0.44400072", "text": "def Aux_basis(self):\n return self.Aux_basis_set", "title": "" }, { "docid": "76f17a6f06f711917681a008d946a27b", "score": "0.4438974", "text": "def polarCoord_Bfield(self):\r\n try:\r\n self.dRu_coord\r\n except:\r\n self.polarCoord()\r\n \r\n pol, tor = np.meshgrid(self.u_dom, self.v_dom)\r\n \r\n pol_xm = np.dot(self.xm_nyq.reshape(self.md_nyq, 1), pol.reshape(1, self.v_num * self.u_num))\r\n tor_xn = np.dot(self.xn_nyq.reshape(self.md_nyq, 1), tor.reshape(1, self.v_num * self.u_num))\r\n \r\n cos_pol = np.cos(pol_xm)\r\n cos_tor = np.cos(tor_xn)\r\n\r\n sin_pol = np.sin(pol_xm)\r\n sin_tor = np.sin(tor_xn)\r\n \r\n cos_mu_nv = cos_pol*cos_tor + sin_pol*sin_tor\r\n sin_mu_nv = sin_pol*cos_tor - sin_tor*cos_pol\r\n\r\n Bs_coord = np.dot(self.bsmns, sin_mu_nv).reshape(self.ns, self.v_num, self.u_num)\r\n Bu_coord = np.dot(self.bumnc, cos_mu_nv).reshape(self.ns, self.v_num, self.u_num)\r\n Bv_coord = np.dot(self.bvmnc, cos_mu_nv).reshape(self.ns, self.v_num, self.u_num)\r\n \r\n B_norm = 1. / (self.dRs_coord * self.dZu_coord - self.dRu_coord * self.dZs_coord)\r\n Br_coord = (self.dZu_coord * Bs_coord - self.dZs_coord * Bu_coord) * B_norm \r\n Bp_coord = ( ( (Bs_coord * (self.dRu_coord*self.dZv_coord - self.dRv_coord*self.dZu_coord) + Bu_coord * (self.dRv_coord*self.dZs_coord - self.dRs_coord*self.dZv_coord)) * B_norm ) + Bv_coord) / self.R_coord\r\n Bz_coord = (self.dRs_coord * Bu_coord - self.dRu_coord * Bs_coord) * B_norm\r\n \r\n self.B_field = np.stack((Br_coord, Bp_coord, Bz_coord), axis=3)", "title": "" }, { "docid": "51afc9ceab256ba3aa5ac032623da438", "score": "0.44374007", "text": "def piola_kirchhoff2(self, u):\n a0, b0 = self.parameters['a0'], self.parameters['b0']\n a1, b1 = self.parameters['a1'], self.parameters['b1']\n a2, b2 = self.parameters['a2'], self.parameters['b2']\n a3, b3 = self.parameters['a3'], self.parameters['b3']\n\n # xyz -> fsn\n R = as_tensor(self.fiber_vectors)\n\n # Right Cauchy-Green deformation in xyz and fsn basis.\n C = right_cauchy_green_deformation(u)\n C_ = R*C*R.T\n\n # Invariants of B for the shape term.\n I4_ff = C_[0, 0]\n I4_ss = C_[1, 1]\n I4_nn = C_[2, 2]\n I8_fs = C_[0, 1]\n I1 = I4_ff + I4_ss + I4_nn\n\n # Derivatives of the invariants of C.\n # TODO Double check this formulation.\n ef, es, _ = self.fiber_vectors\n dW_I1 = a0*exp(b0*(I1 - 3))*Identity(u.geometric_dimension())\n dW_I4_ff = 2*a1*exp(b1*(I4_ff - 1)**2)*(I4_ff - 1)\n dW_I4_ss = 2*a2*exp(b2*(I4_ss - 1)**2)*(I4_ss - 1)\n dW_I8_fs = a3*exp(b3*I8_fs**2)*I8_fs*dot(ef, es)\n\n # Shape term component of S in fsn basis.\n Ss_ = 2*as_tensor(((dW_I1 + dW_I4_ff, dW_I8_fs, 0),\n (dW_I8_fs, dW_I1 + dW_I4_ss, 0),\n (0, 0, dW_I1)))\n\n # Combine the terms, rotate to Cartesian basis, and return.\n S = R.T*Ss_*R\n return S", "title": "" }, { "docid": "05de3d3cd7d4a12df7dac4fd077f1a8b", "score": "0.443614", "text": "def poly_to_str(p):\n polystr = ''\n\n for i in range(len(p)):\n coeff = abs(p[i])\n if coeff == 0:\n continue\n xterm = get_xterm(p, i)\n opp = get_opp(p, i)\n polystr += '{0}{1}{2}'.format(opp, coeff, xterm)\n\n return polystr", "title": "" }, { "docid": "fbbb3700e3d01f71eb12eb6327e089b2", "score": "0.44317186", "text": "def poly_constructor(n,c,x,basis):\r\n p=0\r\n for j in range(n):\r\n p += (basis[0][j]*c[j])*(x**(basis[1][j]))\r\n \r\n \r\n return p", "title": "" } ]
f73a218e6f4e61b5fc497d4418151388
Randomly select one of the variables in the test defined.
[ { "docid": "7af41151fcc2e2c972c0fbf47c98c16b", "score": "0.67626154", "text": "def _select_random_variable_for_call(\n test_case: tc.TestCase, position: int\n ) -> vr.VariableReference | None:\n candidates: list[vr.VariableReference] = [\n var\n for var in test_case.get_all_objects(position)\n if not var.is_primitive()\n and not isinstance(\n test_case.get_statement(var.get_statement_position()),\n stmt.NoneStatement,\n )\n ]\n\n if len(candidates) == 0:\n return None\n # TODO(fk) sort based on distance and use rank selection.\n return randomness.choice(candidates)", "title": "" } ]
[ { "docid": "3f010e7a53633bac681ffacf96171642", "score": "0.68720555", "text": "def rndvar(self, ln):\n\n if len(ln) < 2:\n error(\"Use: RndSet Variable_Name <list of possible values>\")\n\n v = self.getvname(ln[0])\n\n self.vars[v] = random.choice(ln[1:])\n\n if gbl.debug:\n print \"Variable $%s randomly set to '%s'\" % (v, self.vars[v])", "title": "" }, { "docid": "d648bea4aa80e9930677ceae975d273d", "score": "0.66873825", "text": "def random_choice(**kwargs):\n return random.choice([\"HIT\", \"STAND\"])", "title": "" }, { "docid": "124dc98d279df73b9be5339b7efca987", "score": "0.64623284", "text": "def random_species():\n return choice(get_species())", "title": "" }, { "docid": "8c3cdab66454f28be31e19391b5343f2", "score": "0.64495105", "text": "def select_random(nodes: List[DiscoveredNode]) -> Optional[DiscoveredNode]:\n return random.choice(nodes)", "title": "" }, { "docid": "09064f7475b0be4bb9e962db1fa96131", "score": "0.64318734", "text": "def pick_one(namelist):\n\treturn random.choice(namelist)", "title": "" }, { "docid": "050e3a8aed27cd1f1d00058148da07ce", "score": "0.63865876", "text": "def random_selection(population):\n return random.choice(population)", "title": "" }, { "docid": "050e3a8aed27cd1f1d00058148da07ce", "score": "0.63865876", "text": "def random_selection(population):\n return random.choice(population)", "title": "" }, { "docid": "050e3a8aed27cd1f1d00058148da07ce", "score": "0.63865876", "text": "def random_selection(population):\n return random.choice(population)", "title": "" }, { "docid": "2b82e63f24f8cb1ee4fc4cead2332193", "score": "0.63435096", "text": "def random_kid():\n return np.random.choice([\"boy\",\"girl\"])", "title": "" }, { "docid": "d46736c88c14ad7ff2649cc19f345966", "score": "0.634146", "text": "def select_random(self, policies):\n if self.verbose:\n print(\"random select\")\n return random.choice(policies)", "title": "" }, { "docid": "b8c74dd35582d8543228b8328c964d6c", "score": "0.6290076", "text": "def select_random(data, rng):\n return rng.choice(data)", "title": "" }, { "docid": "2eefbd2bcd59ab4b35158a4f46f92828", "score": "0.62891877", "text": "def random_vars(self, n=1):\n\t\treturn random.sample(self._xrange, n)", "title": "" }, { "docid": "72137a98e0e8e61e6a6f80936146a4dc", "score": "0.62601", "text": "def _randomize(self):\n choices = self.constraints[\"choices\"]\n if choices:\n return random.choice(choices)\n else:\n return None", "title": "" }, { "docid": "6298811146a5b30f8874b23b091e5e62", "score": "0.62282765", "text": "def RANDOM_CHOICE(*args):\n return random.choice(args)", "title": "" }, { "docid": "ba8cb1a2b7d5c4d68448f129e3827d33", "score": "0.62236166", "text": "def generateStudy():\n return random.choice(study_list)", "title": "" }, { "docid": "aeb34c0b1f30007b7cd53ab6d0b14b5f", "score": "0.62018085", "text": "def random(self):\n return random.choice(self.db.hvals(self.name()))", "title": "" }, { "docid": "14d9a71433d227eaf0ad4a897e7fab77", "score": "0.6189934", "text": "def random_selection(population):\n return self.random.choice(population)", "title": "" }, { "docid": "74c8817a1d24c0e018a4ebcf0732cc17", "score": "0.61883837", "text": "def select_random(self, input):\n input=self.formatter(input)\n return random.choice(input)", "title": "" }, { "docid": "d22ec3b4a86b25c86bf908b218b87614", "score": "0.61874163", "text": "def random(self):\n\t\t \n\t\tself.number = random.choice(self._NUMBER_CHOICE)\n\t\tself.color = random.choice(self._COLOR_CHOICE)", "title": "" }, { "docid": "383afad548d6e83b562cc114d9e48091", "score": "0.6183572", "text": "def choice(x):\n return np.random.choice(x)", "title": "" }, { "docid": "a7a7b68841ebeb568c1567272164ff69", "score": "0.6180677", "text": "def choice_var(self , range_ = range(-10 , 10)): #\n assert type(range_) == list\n random.shuffle(range_)\n return range_[0]", "title": "" }, { "docid": "211ad7b026936be75b8cbdc6b0a6114f", "score": "0.61619717", "text": "def __random_choose(names, name_1, d_names):\n name_2 = random.sample(names, 1)[0]\n while name_1 == name_2 or f'{min(name_1, name_2)}____{max(name_1, name_2)}' in d_names:\n name_2 = random.sample(names, 1)[0]\n return name_2", "title": "" }, { "docid": "0d64398f5646ff5468ec64a771c296f0", "score": "0.615701", "text": "def choice(*args):\n return random.choice(args)", "title": "" }, { "docid": "29d8753d286ef091ae7f73d559fbb417", "score": "0.6156853", "text": "def generateVariant():\n return random.choice(variant_list)", "title": "" }, { "docid": "f84cb6d96decd04722e5027379eed437", "score": "0.61386603", "text": "def randomlySelect(key,seed_data,previously_chosen):\n\n # Remove the index of categories that have multiple slots\n already_chosen = True\n while already_chosen: \n key = ''.join([i for i in key if not i.isdigit()])\n if key == 'FLEX':\n seed_data = seed_data[(seed_data['Pos'] == 'WR') | (seed_data['Pos'] == 'RB') | (seed_data['Pos'] == 'TE')].reset_index(drop=True)\n else:\n seed_data = seed_data[seed_data['Pos'].str.contains(key)].reset_index(drop=True)\n index = random.randrange(len(seed_data))\n value = (seed_data.get_value(index,'Player Name'),seed_data.get_value(index,'Proj FP'),seed_data.get_value(index,'Salary'))\n if value[0] not in previously_chosen:\n already_chosen = False \n previously_chosen.append(value[0])\n # print(previously_chosen)\n return value", "title": "" }, { "docid": "ddd8c49f1330b6cb77c4b928feac13cc", "score": "0.6130975", "text": "def random_selector(input_list):\n output = random.choice(input_list)\n return output", "title": "" }, { "docid": "5ac6c117e4f240c4abd59fce23afafa4", "score": "0.6122183", "text": "def random_color():\n return random.choice(color_list)", "title": "" }, { "docid": "85caf4c52b1c59146108c79b56ed999c", "score": "0.61195564", "text": "def test_randomize_selected(self):\n self.widget1.selected = True\n self.main.randomize_selected()\n self.assertNotEqual(self.widget1.hexed.letter, \"A\")\n self.assertEqual(self.widget2.hexed.letter, \"B\")\n self.assertEqual(self.widget3.hexed.letter, \"C\")", "title": "" }, { "docid": "8158e534b284d131e0125bbfa0225587", "score": "0.61089885", "text": "def select_random_difficulty():\r\n difficulties = {1: 'easy',\r\n 2: 'medium',\r\n 3: 'difficult'}\r\n return difficulties[random.randint(1, 3)]", "title": "" }, { "docid": "0eda774c57776e1afcf9eb85c277aa23", "score": "0.6106891", "text": "def getRandomVariable(N, board):\n var = (random.randint(0, N - 1), random.randint(0, N - 1))\n while isAssigned(var, board):\n var = (random.randint(0, N - 1), random.randint(0, N - 1))\n return var", "title": "" }, { "docid": "21262858c864bd67e47d67a61daf9318", "score": "0.6088799", "text": "def getRandom(self):\n return random.choice(self.vals)", "title": "" }, { "docid": "21262858c864bd67e47d67a61daf9318", "score": "0.6088799", "text": "def getRandom(self):\n return random.choice(self.vals)", "title": "" }, { "docid": "7f4ff2ccbabe4bfce61d56470e846157", "score": "0.6087167", "text": "def test_single_choice():\n for _ in range(100):\n assert_equal(select_operator([None], [1], RandomState()), 0)", "title": "" }, { "docid": "0141744ad8e08c69371969f9f46a96d2", "score": "0.607334", "text": "def mocked_random_choice(value):\n return value[0]", "title": "" }, { "docid": "f2adc34da285c57408efe8fbe8e44b4b", "score": "0.60731", "text": "def rand_piece():\n return random.choice(TESTING_PIECES)", "title": "" }, { "docid": "e72fa1d0f58d7bd0d9002c15fae6779e", "score": "0.6070173", "text": "def get_random_unit_expr(variables: eda.farray):\r\n rnd_var = random.choice(variables)\r\n if random.randint(0, 100) % 2 == 0:\r\n return rnd_var | ~rnd_var\r\n else:\r\n return eda.Or(eda.And(rnd_var, ~rnd_var, simplify=False), eda.expr(1), simplify=False)", "title": "" }, { "docid": "7e940287aede77e838aced9a5682fa0c", "score": "0.60688865", "text": "def get_fortune():\n return random.choice(FORTUNES)", "title": "" }, { "docid": "6b6488b51096ac89f12ed775524b0624", "score": "0.6051903", "text": "def test_selector_with_vars1 () :\n \n logger = getLogger(\"test_selector_with_vars1\")\n\n from ostap.fitting.pyselectors import SelectorWithVars\n\n mySel = SelectorWithVars ( variables = [ mass , c2dtf , pt ] ,\n selection = cuts ,\n logger = logger )\n\n \n with timing ( \"Selector with vars\" , logger ) :\n Ostap.Utils.process ( data.chain , mySel )\n \n dataset = mySel.data\n \n logger.info (\"Data set (selector-with-vars):\\n%s\" % dataset.table ( prefix = \"# \" ) )", "title": "" }, { "docid": "11c78ddfa5f4ddaaf40846d8ed5de6e5", "score": "0.6041306", "text": "def rs():\n return random.choice([0,1])", "title": "" }, { "docid": "64fbaab669e72b159dfcf028e8050f3d", "score": "0.60299474", "text": "def getRandom(self):\n return choice(self.nums_list)", "title": "" }, { "docid": "2882063f60d7f36099665b8de09d4756", "score": "0.6025865", "text": "def test_uses_random():\n pass", "title": "" }, { "docid": "8ee6dd3165909703c1786fc6581746f6", "score": "0.6007031", "text": "def random_variables():\n return tf.get_collection(RANDOM_VARIABLE_COLLECTION)", "title": "" }, { "docid": "c600af93b560d76404c012b9d3e75320", "score": "0.5995497", "text": "def get_random_state():\n return random.choice(data)", "title": "" }, { "docid": "bc7a5e1ae211b909760c30e2dbc3a392", "score": "0.5987788", "text": "def fortune(inp):\r\n return random.choice(fortunes)", "title": "" }, { "docid": "15d5692cdca2f1aba7e3611e27ce1dc9", "score": "0.5974719", "text": "def sectoin_3_11():\n import random\n\n values = [1, 2, 3, 4, 5, 6]\n\n def test1():\n print(random.choice(values))\n print(random.choice(values))\n print(random.choice(values))\n print(random.choice(values))\n\n def test2():\n print(random.sample(values, 2))\n print(random.sample(values, 2))\n print(random.sample(values, 3))\n print(random.sample(values, 3))\n\n def test3():\n print(random.shuffle(values))\n print(random.shuffle(values))\n\n def test4():\n print(random.randint(0, 10))\n print(random.random())", "title": "" }, { "docid": "3a352eb6eb0834caa6760548dbc9c305", "score": "0.5972282", "text": "def getRandom(self) -> int:\r\n return random.choice(self.vals)", "title": "" }, { "docid": "00a805fc060e435afdc230052ae870af", "score": "0.5964603", "text": "def getRandom(self):\n return random.choice(self.A)", "title": "" }, { "docid": "b653ebeee92200aeb0cb37af1773c1d7", "score": "0.59578043", "text": "def test_choice(self):\n # numpy.random.choice is only available for numpy versions >= 1.7\n major, minor, _ = numpy.version.short_version.split('.')\n if (int(major), int(minor)) < (1, 7):\n raise utt.SkipTest('choice requires at NumPy version >= 1.7 '\n '(%s)' % numpy.__version__)\n \n # Check over two calls to see if the random state is correctly updated.\n rng_R = random_state_type()\n # Use non-default parameters, and larger dimensions because of\n # the integer nature of the result\n post_r, out = choice(rng_R, (11, 8), 10, 1, 0)\n\n f = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_r, mutable=True)],\n [out], accept_inplace=True)\n\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n val0 = f()\n val1 = f()\n numpy_val0 = numpy_rng.choice(10, (11, 8), True, None)\n numpy_val1 = numpy_rng.choice(10, (11, 8), True, None)\n self.assertTrue(numpy.allclose(val0, numpy_val0))\n self.assertTrue(numpy.allclose(val1, numpy_val1))", "title": "" }, { "docid": "be4fe694b55dd001411b0e42c10be862", "score": "0.5956692", "text": "def computer_choice(self):\n\n\tself.ai_choice = np.random.choice(['rock','paper','scissors'])", "title": "" }, { "docid": "507eca4e0123d448e9d0dbddc0c5d013", "score": "0.59546953", "text": "def getRandom():\n return r.choice(Farewell.FAREWELLS)", "title": "" }, { "docid": "e10d52a9df1ca89d13e4b4a723051ee4", "score": "0.5951942", "text": "def get_random_data():\n compare = random.choice(data)\n return compare", "title": "" }, { "docid": "7a80a480502d813c4eae407f14199cee", "score": "0.59454685", "text": "def select_random_word(words):\n x = random.randint(0,len(words) - 1)\n word = words[x]\n y = random.randint(0, len(word) - 2)\n letter = word[y]\n var = word.replace(letter, \"_\")\n\n print(\"Guess the word: \"+var)\n return word", "title": "" }, { "docid": "4ddf20e5550b7cf9fa5c25a026788560", "score": "0.5942444", "text": "def random_adjective():\n return choice(get_adjectives())", "title": "" }, { "docid": "699a835a014082aaff3b9ae2c6f83ed2", "score": "0.59343415", "text": "def select_random_number():\n random_number = randint(1, 100)\n return random_number", "title": "" }, { "docid": "9c3a6655928a9a5be4b7e956e186f6a0", "score": "0.5931818", "text": "def test_random_weather(self, random):\n random.choice.return_value = 'good'\n day = Day.objects.create(session=self._get_session(),\n elves_woods=4,\n elves_forest=4,\n elves_mountains=4)\n\n self.assertEqual(day.weather, 'good')\n self.assertEqual(random.choice.call_count, 1)", "title": "" }, { "docid": "4832c26624b3918260731dc01b165905", "score": "0.5928104", "text": "def random(self):\n return choice(self.words)", "title": "" }, { "docid": "cbd2d4d3f85fb0cc7b7959831ee41d29", "score": "0.5927822", "text": "def generate_sample():\n # choice a random variable\n var = random.choice(VARIABLES)\n\n # generate ts in the last 7 days, random hours\n now = int(time.time())\n ts_delta = random.randint(0, 7)\n ts = now - ts_delta * 3600 * 24 - random.randint(0, 3600 * 24)\n\n # generate a random value\n value = random.randint(0, 100)\n\n return (var, ts, value)", "title": "" }, { "docid": "635e610aa71468d79cdd8f80b463fa54", "score": "0.5922352", "text": "def ai_choose(self, aggression):\r\n other = (1 - aggression) / 2\r\n choice = np.random.choice(a=[1, 2, 3], p=[other, aggression, other])\r\n self.choice = choice", "title": "" }, { "docid": "640c8c827415fde2ee9c1e51c95ae245", "score": "0.5921819", "text": "def random():\r\n pass", "title": "" }, { "docid": "bf45bc3f5ec24324d04c62cc4f8aed95", "score": "0.5921216", "text": "def get_random(self):\n field = random.choice(self.fields)\n return field, self.get(field)", "title": "" }, { "docid": "a8c14b4adc2a6cf015edfb1f81a3ac7a", "score": "0.5918073", "text": "def random_bool():\n return random.choice([True, False])", "title": "" }, { "docid": "34756ab22843b2e2ea15302117e43ef0", "score": "0.5917066", "text": "def getRandom(self):\n return random.choice(self.values)", "title": "" }, { "docid": "6acde7051a6439220b4883c9eefae3e3", "score": "0.59030706", "text": "def pick_word(words):\n return random.choice(words)", "title": "" }, { "docid": "0a804ef7de0b90fef6af0119c42754b3", "score": "0.5901703", "text": "def random_color():\n colors = ['blue', 'purple', 'salmon', 'lightblue', 'cyan', 'forestgreen']\n return random.choice(colors)", "title": "" }, { "docid": "380179fd6feec1b6d76c22dd472c1ce1", "score": "0.5898682", "text": "def random(self, actions):\n return random.choice(actions)", "title": "" }, { "docid": "41e176c79ab686e95866b4177c1b6eec", "score": "0.58918124", "text": "def _selector(self, select_eval):\n rng = Random(self.random_seed)\n\n def fn():\n return select_eval ^ (rng.random() >= self.validation_split)\n\n return fn", "title": "" }, { "docid": "94589860d975c05a526930438d2bc871", "score": "0.58904344", "text": "def test_randomize_none(self):\n self.main.randomize_selected()\n self.assertEqual(self.widget1.hexed.letter, \"A\")\n self.assertEqual(self.widget2.hexed.letter, \"B\")\n self.assertEqual(self.widget3.hexed.letter, \"C\")", "title": "" }, { "docid": "a973956e2b0966ca1d60f49e82f7185d", "score": "0.58895963", "text": "def rand_terminal_param(self, terminal):\n if terminal is AGNodes.LoadData:\n param = np.random.randint(self.nvars)\n else:\n param = None\n return param,", "title": "" }, { "docid": "c32d615147d33cd4fa03fff6d5ea8370", "score": "0.58755445", "text": "def choose_random():\n task = random.choice(tasks)\n lbl_display[\"text\"] = task", "title": "" }, { "docid": "ea76bb15176b03e2195f4e50d14677d4", "score": "0.58728987", "text": "def get_field(self):\n fields = ['field_x', 'field_y', 'field_z']\n return random.choice(fields)", "title": "" }, { "docid": "e1ce52e745925c70cd9cd38b025ec54d", "score": "0.58712465", "text": "def SelectDealer(self):\n return random.randint(0, 3)", "title": "" }, { "docid": "71b38b16ee48f0d7b04cbd546abe88cb", "score": "0.58657384", "text": "def get_random_survey():\n return random.sample(SAMPLING_MAP.values(), 1)[0]", "title": "" }, { "docid": "28af0f18b1b70eaeebed2fef8b0ce631", "score": "0.58648163", "text": "def computer_choice():\n\treturn(random.choice(choices))", "title": "" }, { "docid": "a3277de5c8b5c08634713c0c411f40fe", "score": "0.58571833", "text": "def random_get():\n return hyper.rng.choice(pool_activation)", "title": "" }, { "docid": "fcfb3a2cfd59f46187b83fcd5fb958c8", "score": "0.5854586", "text": "def random_selection(board_sequence):\n game_board = board_sequence.current_board\n return rand.choice(game_board.get_available())", "title": "" }, { "docid": "62836effe55e89224560246590ec318d", "score": "0.5851138", "text": "def get_char():\n # choose a structure containing some valid input\n chosen_one = choice(variables)\n # set a limit for the in structure random selecting\n limit = len(chosen_one)-1\n num = randint(0, limit)\n # if the operators dictionary was selected,choose randomly operator\n if type(chosen_one) is dict:\n chosen_one = dict(chosen_one).keys()\n num = randint(0, len(chosen_one)-1)\n counter = 0\n for i in chosen_one:\n if counter is num:\n chosen_one = i\n break\n else:\n counter = counter+1\n return chosen_one\n chosen_one = chosen_one[num]\n return chosen_one", "title": "" }, { "docid": "d3cbec910fc96fea96de0b574c20377a", "score": "0.58502024", "text": "def sample_random_attributes(self):\n self.v_pref = np.random.uniform(0.5, 1.5)\n self.radius = np.random.uniform(0.3, 0.5)", "title": "" }, { "docid": "4cca5fbe0f8a6ec8c929285b44495dfd", "score": "0.5836791", "text": "def sample(self):\n return tuple(random.choice(d) for d in self.space)", "title": "" }, { "docid": "2e0ac9fedd8c6461b8efa675e44d9259", "score": "0.5836121", "text": "def setActual_ans(self, things): \n self.x = random.randint(things[0],things[1])\n self.actual_ans = self.x", "title": "" }, { "docid": "3a366e310da49be84cae9c79d58a2262", "score": "0.5835217", "text": "def test_selector_with_vars2 () :\n \n logger = getLogger(\"test_selector_with_vars2\")\n\n from ostap.fitting.pyselectors import SelectorWithVars\n \n \n mySel = SelectorWithVars ( variables = [ mass , c2dtf , pt ] ,\n selection = cuts ,\n logger = logger )\n \n with timing ( \"Selector with vars&logic\" , logger ) :\n data.chain.process ( mySel , shortcut = False )\n \n dataset = mySel.data\n \n logger.info (\"Data set (selector-with-vars):\\n%s\" % dataset.table ( prefix = \"# \" ) )", "title": "" }, { "docid": "a3067547b1f398cc11f5118e7bd8cdbb", "score": "0.58309555", "text": "def random_sample(self):\n\t\treturn random.choice(self._action_list)", "title": "" }, { "docid": "11dc9d8988416acc5ad307caba0184cc", "score": "0.58219767", "text": "def random_strategy(_, actions):\n return np.random.choice(actions)", "title": "" }, { "docid": "7b3279820e70e93a1c3dacaae705dfdb", "score": "0.5816618", "text": "def random_individual():\n\n # list(enumerate(F_NAMES)) = [(0, \"eye\"), (1, \"nose\"), (2, \"mouth\")]\n\n ret = Individual()\n for i, feature_name in enumerate(F_NAMES):\n ret[feature_name] = random.randint(0, len(F_BANK[i])-1)\n return ret", "title": "" }, { "docid": "6e536959fdb3b48dd9ad7dc769c78320", "score": "0.5815962", "text": "def getRandom():\n return r.choice(Greeting.GREETINGS)", "title": "" }, { "docid": "b6836dd10cecaa39630014f6603a27d7", "score": "0.5811081", "text": "def choice(self):\n return random.choice(self)", "title": "" }, { "docid": "9b3c685330f4a2ef22cb93963b745981", "score": "0.58099693", "text": "def get_random_adjective():\n\n return random.choice(ADJECTIVES)", "title": "" }, { "docid": "3162a5b7c943d45488d52a16d472e72b", "score": "0.5802828", "text": "def getRandom(self) -> int:\n return random.choice(self.random_set)", "title": "" }, { "docid": "8d1b257739005623f24190b3e413174e", "score": "0.57915", "text": "def test_get_random(self, choice, randint, random):\n from models.event import ReceiveReplicaRequest\n\n nodes = OrderedDict(\n server=Mock(name='server'),\n node_1=Mock(name='node_1'),\n node_2=Mock(name='node_2'),\n node_3=Mock(name='node_3'),\n node_4=Mock(name='node_4'),\n )\n for key, item in nodes.items():\n item.name = key\n\n replica = Mock()\n replica.name = 'replica_1'\n\n replica_groups = {\n 1: Mock(),\n 2: Mock(),\n }\n\n nodes_mwg = dict(\n server=1, # XXX: irrelevant?\n node_1=1,\n node_2=2,\n node_3=2,\n node_4=1,\n )\n\n simulation = Mock(name='simulation')\n simulation.nodes = nodes\n simulation._replica_groups = replica_groups\n simulation._nodes_mwg = nodes_mwg\n simulation._mwg_prob = 0.500\n simulation.now = 2.7\n\n choice.side_effect = ['node_3', 2, 1, replica]\n randint.return_value = 61\n random.return_value = 0.501\n\n event_factory = self._make_instance(simulation)\n ret_val = event_factory.get_random()\n\n self.assertTrue(len(ret_val), 2)\n self.assertEqual(ret_val[0], 61.0) # event time\n\n event = ret_val[1]\n self.assertTrue(isinstance(event, ReceiveReplicaRequest))\n self.assertIs(event.source, None)\n self.assertIs(event.target, nodes['node_3'])\n self.assertEqual(event.replica_name, 'replica_1')\n\n # TODO: test for MWG probability? select mwg, select non-mwg\n # ... refactor setup into some helper method", "title": "" }, { "docid": "f14e0acdbe4c129f967bc863e3db71c9", "score": "0.57880485", "text": "def test_selector_with_vars3 () :\n \n logger = getLogger(\"test_selector_with_vars3\")\n\n from ostap.fitting.pyselectors import SelectorWithVars\n \n \n mySel = SelectorWithVars ( variables = [ mass , c2dtf , pt ] ,\n selection = cuts ,\n logger = logger )\n \n with timing ( \"Selector with vars&logic&kisa\" , logger ) :\n data.chain.pprocess ( mySel , shortcut = False )\n \n dataset = mySel.data\n \n logger.info (\"Data set (selector-with-vars):\\n%s\" % dataset.table ( prefix = \"# \" ) )", "title": "" }, { "docid": "6e8fe7151dadeecf55b3632c84449e87", "score": "0.5784095", "text": "def random_player(game_info):\n return random.choice(game_info.moveset)", "title": "" }, { "docid": "827a6c705f6e1ef4e0115d2756504023", "score": "0.5781002", "text": "def pick_random(self):\n id = random.sample(self.only(\"id\").all(), 1)\n return id[0]", "title": "" }, { "docid": "1f8b5079a6f6d4f43256cca169fd7d33", "score": "0.577809", "text": "def randomNoun():\n\treturn random.choice(nouns)", "title": "" }, { "docid": "0cd43d6caf9b070588874fda8e88c86d", "score": "0.577495", "text": "def get_random_choice(self):\n return random.choice(self.choices.split(':'))", "title": "" }, { "docid": "0e7ca06b52af1b672ffb39876dab3844", "score": "0.5769662", "text": "def __call__(self):\n return random.choice(self.fakers)", "title": "" }, { "docid": "1b7cc7115414eb6e64960f8ed53440df", "score": "0.57665515", "text": "async def choose(self, *choices : str):\r\n await self.bot.say(random.choice(choices))", "title": "" }, { "docid": "04304131ba034f1817528fae72227f40", "score": "0.5766042", "text": "def test_prop(self) -> typing.Union[str, None, dict]:\n return random.choice(['hello', None, {}])", "title": "" }, { "docid": "4d38e317e77e80d8a5e5534d95e9f72a", "score": "0.5759477", "text": "def getRandom(self) -> int:\n return random.choice(self.valLst)", "title": "" }, { "docid": "f1ebe505a9aeaa38a12ea773ab9ef5b1", "score": "0.5757742", "text": "def chooseRandomCountry(countries):\n\t\n\tcountry = choice(countries)\n\t\n\treturn country", "title": "" }, { "docid": "9a0d51c02d2c768f51fdfbc399431590", "score": "0.57499087", "text": "def select_data(self):\n self.randomizeData()", "title": "" }, { "docid": "7f0c875279b17afafe17243ad7c6f4b4", "score": "0.5745585", "text": "def rand(self) -> PreGroupValue:", "title": "" } ]
bef78b64f61cc20b6a443e9b8269d5d2
Resets the number of steps a dot has taken.
[ { "docid": "1dec7687e2aa8e2f0589fc101ecc18d9", "score": "0.7511817", "text": "def reset_steps(self):\r\n self.step = 0", "title": "" } ]
[ { "docid": "668d3dd6e7a95dd3d12f410de40432fe", "score": "0.6291966", "text": "def reset(self):\n self.distance_traveled = 0\n self.state = Reindeer.MOVING\n self.internal_clock = 0\n self.total_ticks = 0\n self.points = 0", "title": "" }, { "docid": "44355645acbd2ffe525dff1dd18addf6", "score": "0.628189", "text": "def reset_number(self):\n self.number = 0", "title": "" }, { "docid": "5acc4217e534afb482ef31351d17ff92", "score": "0.6264653", "text": "def reset():\n Node.count = 0", "title": "" }, { "docid": "f2a55ad558597622ef753e06ef5f7ad3", "score": "0.6261617", "text": "def reset(self, process: 'KRoadProcess') -> None:\n self.__step_number = 0", "title": "" }, { "docid": "1897647277dfa1ac463748c99078f0e9", "score": "0.62469167", "text": "def reset(self, num_particle):\n self.step = 0\n self._particles = []\n self.populate(num_particle)\n self._view.showStatus(self.step, self._particles)", "title": "" }, { "docid": "46dc018b8531e08b925eb6470d2c1ce5", "score": "0.6239844", "text": "def _step_reset(self):\n pass", "title": "" }, { "docid": "46dc018b8531e08b925eb6470d2c1ce5", "score": "0.6239844", "text": "def _step_reset(self):\n pass", "title": "" }, { "docid": "a27d575131e701c905478eb27e886068", "score": "0.621589", "text": "def reset(self):\n self.count = 0", "title": "" }, { "docid": "6668f5cfa4eb5d5d8675c53d28927f68", "score": "0.6188338", "text": "def reset(self):\n self.epsilon = self.start_value", "title": "" }, { "docid": "b6c288abc8b16c4cea07c3b2a62d6b86", "score": "0.6139222", "text": "def reset(self) -> None:\n self.last_error = 0\n self.segment = 0", "title": "" }, { "docid": "b6c288abc8b16c4cea07c3b2a62d6b86", "score": "0.6139222", "text": "def reset(self) -> None:\n self.last_error = 0\n self.segment = 0", "title": "" }, { "docid": "6b592ad462d89331b2292d9a255a0dd8", "score": "0.61165464", "text": "def reset(self):\n self.prev = 0\n self.retries = 0\n self.current = self.initial", "title": "" }, { "docid": "cc242f07feba6d37cdfc383bfcc95722", "score": "0.61037034", "text": "def reset_states(self):\n self.x_prev = self.x0 if self.x0 is not None else np.zeros(self.size)", "title": "" }, { "docid": "58c6e0d7d5b2ba8ac3aaa756853ef626", "score": "0.6059976", "text": "def reset_zero(self):\n self._reset_zero()", "title": "" }, { "docid": "27576d3f58cb5948e218e54fc2c182c9", "score": "0.6019608", "text": "def reset(self):\n self.counter = 0\n self.stop = False", "title": "" }, { "docid": "e05a6dfa383228325a3f4bc0792cf4a0", "score": "0.60102385", "text": "def reset(self) -> None:\n self._real_position = np.array(\n [self._param['x'], self._param['y']], dtype=np.int8)\n self._visual_position = np.array(\n [self._param['x'] / self._param['n_v'] + 1 / \\\n (self._param['n_v'] * 2),\n self._param['y'] / self._param['n_v'] + 1 / \\\n (self._param['n_v'] * 2)])\n self.path['path'] = np.arange(np.power(self._param['n_v'], 2))\n self.path['index'] = 0\n self.path['size'] = 0\n\n self.state = self._param['initial_state']", "title": "" }, { "docid": "5a0cc30027ca54896236f5719b4c42c8", "score": "0.600322", "text": "def count_reset(self) -> None:\n\n self.counter = 0", "title": "" }, { "docid": "157720c6077f4ed2a9c738e01b85ed51", "score": "0.5995782", "text": "def reset(self):\n\n if self.follows_waypoints:\n self.trajectory.reset()\n\n self.velocity = (0, 0)\n self.angular_velocity = 0\n \n self.position, self.angle = self.initial_coordinates\n\n self.drawn = False", "title": "" }, { "docid": "66d467c060f1b1b70e0e1b4541f5d18a", "score": "0.5988861", "text": "def reset(self):\n self.go = True\n self.distances.append(self.distance)\n self.distance = 0.0", "title": "" }, { "docid": "09bd16eec7b3103051b539a779a44014", "score": "0.5952033", "text": "def reset(self):\n self.left_line.reset()\n self.right_line.reset()\n self.center_polynom = None\n self.center_curvature = 0.0\n self.center_offset = 0.0\n self.undetected_frame_count = 0", "title": "" }, { "docid": "73b55c9142e91a153233239c47b9b09b", "score": "0.5949389", "text": "def reset(self):\n super().reset()\n self.betMultiple = 1\n self.lossCount = 0", "title": "" }, { "docid": "d27b1932d0092bc2dfdffb62f6b7a13f", "score": "0.59268034", "text": "def reset(self) -> None:\n # Reset the time step counter to zero.\n self._t.assign(tf.constant(0))", "title": "" }, { "docid": "d4b827c95a4d383830157d847149e1f5", "score": "0.5918764", "text": "def reset_epoch(self):\n self.ix = 0", "title": "" }, { "docid": "d4b827c95a4d383830157d847149e1f5", "score": "0.5918764", "text": "def reset_epoch(self):\n self.ix = 0", "title": "" }, { "docid": "d44b7e70314f03049a2331feef505191", "score": "0.591786", "text": "def reset(self):\n\n self.c = 0.01\n self.g = 9.8\n self.accelerator = 0.5\n self.boundary = 10\n self.position = 0\n self.velocity = 0", "title": "" }, { "docid": "971a49fd9e59eb0193e49742eca5ee75", "score": "0.5912027", "text": "def tts_reset(self) -> None:\n self.call_count = 0\n self.last_inner_solution = None", "title": "" }, { "docid": "9dcd62a207ae4d886cd753a3ae3383f5", "score": "0.58750665", "text": "def reset(self):\n self.sum = 0\n self.data_num = 0\n self.pfm = None", "title": "" }, { "docid": "5063406f4349437aace63f227c2a6491", "score": "0.5873979", "text": "def reset(self):\n self._value = 0.0\n self.update_val()", "title": "" }, { "docid": "4d9f77743a86b85488f30dd81639b4a3", "score": "0.58388096", "text": "def reset(self):\n self.ctr = 0\n self.last_sol = None", "title": "" }, { "docid": "4d9f77743a86b85488f30dd81639b4a3", "score": "0.58388096", "text": "def reset(self):\n self.ctr = 0\n self.last_sol = None", "title": "" }, { "docid": "be1881094938395f61ba94137bd9fa84", "score": "0.5837824", "text": "def reset(self, x_range):\n self.epsilon = (self.epsilon * 0.94)\n self.count += 1\n pass", "title": "" }, { "docid": "2272ae579e77fbe5daeda72b79da7918", "score": "0.5820158", "text": "def reset(self):\n self.currentAcc = [0.0]*self.AccWindowSize\n self.totalReset +=1\n self.cv['Act'] = 0\n self.actFlag = False", "title": "" }, { "docid": "94a29ca5437496dd22c1d015be318d16", "score": "0.5818059", "text": "def reset_current_exp(self): \n self.current_exp = 0", "title": "" }, { "docid": "c257364381f55b7713972311a47c5245", "score": "0.5813062", "text": "def reset(self):\n self.current_batch = 0\n self.current_epoch = 0", "title": "" }, { "docid": "2d5b0143427af9bccb01c5e823cc62fc", "score": "0.5812768", "text": "def reset(self):\r\n self.count = 0\r\n self._log_count = 0", "title": "" }, { "docid": "c0c2253d71958d1aa1d8f6fafb5e70c4", "score": "0.58067346", "text": "def reset(self):\n # If using the ratio of the structure that is in the target,\n # start with memory as 1. \n self._memory = 1. if self._mode else 0.\n self._reset_goal()", "title": "" }, { "docid": "829699bacac0b415034c5cb32f5e7805", "score": "0.5806358", "text": "def reset(self):\n neuron.Neuron.reset(self)\n\n self.voltage.set_value(np.zeros(self.size).astype(FLOAT_TYPE))\n self.refractory_time.set_value(np.zeros(self.size).astype(FLOAT_TYPE))", "title": "" }, { "docid": "635c3bb4c682ab3083bcd3073a48f9dd", "score": "0.5801294", "text": "def reset(self):\n self.random_valid_pose()\n self.collision_status = False\n self.done = False\n self.step_reward = 0", "title": "" }, { "docid": "76c799bb9963662016d80640d7b1ba33", "score": "0.5799926", "text": "def _reset(self):\n self.iter = 0\n self.history.clear()", "title": "" }, { "docid": "c141d6789a8403fe289ef94eff495d66", "score": "0.57846475", "text": "def reset_pour(self, counts=False):\n self.thisPour = 0.0\n if counts:\n self.pours += 1", "title": "" }, { "docid": "cd229d2e30fbc16b74e3cfe69bfbfaa3", "score": "0.5776224", "text": "def reset(self):\n self._dir = (0, -1)\n\n # You can read head, tail and food directly for drawing\n self.head = (self.width / 2, self.height - 2)\n self.tail = [(self.width / 2, self.height - 1)]\n self.food = (self.width / 2, self.height / 3)", "title": "" }, { "docid": "153119593e6cd3109935ef028c54fb8f", "score": "0.5767304", "text": "def reset_episode(self) -> None:\n self.__end_step()\n self.episode = 0\n self.step = 0", "title": "" }, { "docid": "28c6901baa57c6295a52623983286142", "score": "0.5762781", "text": "def reset(self):\n self.__set_degree(self.initial_degree)", "title": "" }, { "docid": "d57737bd91a6256464f9d785874816ef", "score": "0.57581496", "text": "def reset(self):\n self.p=self.p0\n self.o=self.o0", "title": "" }, { "docid": "990a72a89467798faaaa658ec0a97954", "score": "0.5747964", "text": "def reset(self):\n self._memory[:] = 0.\n self._reset_goal()", "title": "" }, { "docid": "6c2494ac726db4748cc0247bfa792236", "score": "0.57423", "text": "def reset(self):\n self.sim.reset()\n self.step_num = 0\n self.success = False\n state = np.concatenate([self.sim.pose] * self.action_repeat) \n return state", "title": "" }, { "docid": "4c4ae2539305755861badb7a3672f3e2", "score": "0.5724618", "text": "def reset(self):\n self._length = None", "title": "" }, { "docid": "3ca1992f328d6bfd5bbd32f170d44ed0", "score": "0.5724181", "text": "def reset_positive_plane_length():", "title": "" }, { "docid": "3958ced7b888a59d66e135bb3d8dc26c", "score": "0.5720017", "text": "def reset_values(self):\n self.number_of_d = 0\n self.number_of_c = 0\n self.update_value = self.init_uv\n self.mutual_c_outcome = 0", "title": "" }, { "docid": "f3cac144efb5f74311b464b61c34f82d", "score": "0.5709142", "text": "def reset_call_counter(self):\n self._ncalls = 0", "title": "" }, { "docid": "901a93a9b3acb2d8979e6c6f4e26d51f", "score": "0.57073426", "text": "def restart(self):\n self.current_frame_n = 0", "title": "" }, { "docid": "efb3a18a8797d0b0b4c07522cd7fc4d9", "score": "0.5707156", "text": "def reset(self):\n self.following_number = self.start", "title": "" }, { "docid": "04882a785ac09c794b04fd916509a82d", "score": "0.5706156", "text": "def reset(self):\n self._count = 0\n self._sum = float(0)\n self._min = float('inf')\n self._max = float(0)", "title": "" }, { "docid": "8a538fad6a290305c0193364b1a2542d", "score": "0.56998867", "text": "def reset(self):\n self._loc = self.history[0]\n self.history = []\n self.speeds = np.zeros(len(self.cars))\n self._next = np.zeros(len(self.cars))", "title": "" }, { "docid": "487f25c33d43fcfacee1780ce8fd9b14", "score": "0.5699454", "text": "def reset(self, state):\n self._i = 0", "title": "" }, { "docid": "1f036ff55f988801d0aaf925a07b8fb7", "score": "0.56968707", "text": "def reset_flops_count(self):\n add_batch_counter_variables_or_reset(self)\n self.apply(add_flops_counter_variable_or_reset)", "title": "" }, { "docid": "1f036ff55f988801d0aaf925a07b8fb7", "score": "0.56968707", "text": "def reset_flops_count(self):\n add_batch_counter_variables_or_reset(self)\n self.apply(add_flops_counter_variable_or_reset)", "title": "" }, { "docid": "b367d0b88c53b0f56457bd7f809bde15", "score": "0.569325", "text": "def reset(self):\n self.reset_pos()\n self.reset_vel()\n self.update()", "title": "" }, { "docid": "26032900ca1de7d2a65e1fb6b59d2c13", "score": "0.5684921", "text": "def reset(self):\n self.x_pos = 10\n self.y_pos = 10\n self.line_height = 15", "title": "" }, { "docid": "8df6b2556fc2fbe2a9f4b2ab8b250509", "score": "0.5678408", "text": "def reset(self) -> None:\n self.total_time = 0.0", "title": "" }, { "docid": "360dfb03269fcfcb97e19474dd2d9f29", "score": "0.5667716", "text": "def reset(self):\r\n self.x_pos = 10\r\n self.y_pos = 10\r\n self.line_height = 15", "title": "" }, { "docid": "e3e3e799cdb1b60a93c6c2a4ab24e0ef", "score": "0.5647098", "text": "def reset(self):\n self.diff_ngram = set()\n self.count = 0.0", "title": "" }, { "docid": "e6c6497779b9d5be61161b176bb7b2d5", "score": "0.56284297", "text": "def reset(self):\n print(colored('* Resetting fibonacci sequence', 'green'))\n self.fib_sequence = [0, 1]\n self.n = self.config.get('n', 0)", "title": "" }, { "docid": "aba5b9fd91488c30d635c6d6cca9c0e2", "score": "0.5622047", "text": "def reset(self, path_ids=None):\n if path_ids is None:\n path_ids = torch.arange(start=0, end=self.n_paths, device=self.device, dtype=torch.long)\n\n # Update poses\n self._update_center_pose(path_ids=path_ids)\n\n # Current steps default back to 0\n self.current_step[:] = 0", "title": "" }, { "docid": "88f9a76210dc7db4a7fc81687bdc432c", "score": "0.5621598", "text": "def reset():\n pyteos.node_reset()", "title": "" }, { "docid": "f69f069029e524ac3b975e26b8aede41", "score": "0.561811", "text": "def Reset(self, pos):\n self._pos = pos\n self._speed = Vector(0, 0)\n self._path = []", "title": "" }, { "docid": "ef5875fd46c74682137acbc02dcb1bbe", "score": "0.5616267", "text": "def reset(self) -> None:", "title": "" }, { "docid": "ef5875fd46c74682137acbc02dcb1bbe", "score": "0.5616267", "text": "def reset(self) -> None:", "title": "" }, { "docid": "79a77bd2b9924bdc1e2523a83f53d905", "score": "0.5615026", "text": "def reset(self):\n self._state = np.ones(self._dim) * self._mean", "title": "" }, { "docid": "db2e26615567a06afc6416c796bb9216", "score": "0.5613296", "text": "def reset(self):\n\t\tself.selected.samples[:] = 0\n\t\tself.selection_count = 0", "title": "" }, { "docid": "14f1bfa673053b3caf5dc6c7e251c2d5", "score": "0.5612518", "text": "def reset_state(self):\n self.current_batch = 0", "title": "" }, { "docid": "4db511a6662e89952ea71e2a8f54f225", "score": "0.56100214", "text": "def reset_frames_count(self):\n self.count_frames = 0", "title": "" }, { "docid": "12f1b53fbc24beedf1bfce94ab7e844a", "score": "0.56088585", "text": "def double_dot():\n dd.clear()\n dd.hideturtle()\n dd.screen.tracer(False)\n dd.penup()\n '''Center of the dot dots is in the middle of the window.'''\n dd.setpos(0, 0)\n dd.left(90)\n dd.penup()\n # dot pos\n dd.fd(20)\n dd.pendown()\n # dot size\n dd.dot(7)\n dd.left(180)\n dd.penup()\n dd.fd(40)\n dd.pendown()\n dd.dot(7)\n dd.left(90)", "title": "" }, { "docid": "59fa216d0ade722503f9e2982bb984e9", "score": "0.55985224", "text": "def reset(self):\n self._dbg_lvl = default_dbg_lvl\n self._call_count = {}", "title": "" }, { "docid": "5d1a9836633a356e4492220f52d70085", "score": "0.55789953", "text": "def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0", "title": "" }, { "docid": "7c3682237b9ab853c242aca72a03ad84", "score": "0.55711055", "text": "def reset(self):\n self.x = 0\n self.y = 0\n self.score = 0", "title": "" }, { "docid": "faceaa6d4ad614e051a5639505ac3d47", "score": "0.55666775", "text": "def reset(self):\n self.state = self.mu", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.55661595", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.55661595", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.55661595", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.55661595", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.55661595", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.55661595", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.55661595", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.55661595", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.55661595", "text": "def reset(self):", "title": "" }, { "docid": "1d5e6abef38a8738c1bbc58a664d5145", "score": "0.5552986", "text": "def reset(self) -> None:\n\n self.__count = 0\n self.__start_time = None", "title": "" }, { "docid": "c4e40f6e07a4dba560fb42ad4a15bf00", "score": "0.5551427", "text": "def reset(self):\n # reset to initial values\n self.opening_threshold = 0\n self.all_nodes = []\n self.root = None\n self.all_particles = []\n self.multipoles_up_to_date = False\n self.max_nodes = 500000\n self.max_sublevels = 50\n self.softening = 1e-3", "title": "" }, { "docid": "5e28298b3ed664517a1d059645d62d52", "score": "0.5543439", "text": "def reset(self, state):\n super(Var, self).reset(state)\n self._sum = 0.0\n self._sum_sq = 0.0\n self._count = 0", "title": "" }, { "docid": "07aa471fd569c43ff21ea24b201c3249", "score": "0.55330396", "text": "def reset(self):\n\t\tself.trees=0\n\t\tself.treeWeight=0\n\t\tself.treeVolume=0\n\t\tself.treeMoni.observe(self.trees, self.sim.now())\n\t\tself.logsPos=[]", "title": "" }, { "docid": "ad8029c437071c75bc0f388ef35006f4", "score": "0.5531897", "text": "def reset(self):\n\n self.result = 0\n self.entry1 = None\n self.entry2 = None\n self.dot_used = False\n self.has_bin = False\n self.e2_input = False\n self.error = False\n self.has_rslt = False\n self.def_bin = False\n self.func = Func.NONE\n self.bin_op = Func.NONE\n self.entry.setText(str(self.result))", "title": "" }, { "docid": "dc9c99833287032ea8714762b4c6a2f5", "score": "0.55235463", "text": "def reset(self):\n self.index = 0", "title": "" }, { "docid": "2d538fda479b21e2cf43235dbff087aa", "score": "0.5523119", "text": "def reset(self, x0):\n\t\tself.s = utils.pos2state(x0, self.n, self.m, self.N)\n\t\tself.R = 0\n\t\tself.capture = False\n\n\t\t# Reset trajectory history\n\t\tself.shist = [x0[i,:] for i in range(self.N)]\n\t\tself.ahist = [np.array([]) for i in range(self.N)]\n\t\tself.time = 0\n\t\tself.endconditionmet = False", "title": "" }, { "docid": "6942bd7d072a66782a611c1d9bd9cc12", "score": "0.55225235", "text": "def reset() -> None:", "title": "" }, { "docid": "08a5f7fe6c59281def57b6de4959a755", "score": "0.5522152", "text": "def reset(self):\n self.value = self.min\n\n if self._etaobj:\n self._etaobj.reset()", "title": "" }, { "docid": "fd4fcf0f3834cb9fdb9ca5f8b3852ef7", "score": "0.55209947", "text": "def reset(self):\n self.points_held = 0\n self.is_cheating = False", "title": "" }, { "docid": "e5080748ad6747bb004005f613aef2f8", "score": "0.5520095", "text": "def reset(self) -> None:\n self._index = self._size = 0", "title": "" }, { "docid": "0ab123ef085efb66954584f21ab66b02", "score": "0.55110675", "text": "def reset(self):\n self.start = time.monotonic()\n self.count = 0", "title": "" }, { "docid": "b2d78a78cb86ce0767d97aaf069237ef", "score": "0.55069864", "text": "def reset(self) -> np.ndarray:\n self.graph.reset()\n self.graph.update_count()\n self.leader.reset()\n self.param.iter = 0\n self.param.extra['t'] = 0\n\n # Returns the current reset observational space.\n return self.graph.distribution.current", "title": "" }, { "docid": "7d5c880ccd7d48a5a92d28d56e9eadb3", "score": "0.550386", "text": "def reset(self):\n for k, v in iter(self.nodes):\n v.reset()", "title": "" } ]
82e6d7f2553b9e80a9ca0596efe04e84
Apply Wordnet lemmatizer to text (go to root word)
[ { "docid": "de04606a08eb8f89ecf33e73eaccd774", "score": "0.76061803", "text": "def lemmatizer(text):\n wnl = WordNetLemmatizer()\n text = [wnl.lemmatize(word) for word in text.split()]\n return \" \".join(text)", "title": "" } ]
[ { "docid": "04bc11774c8564671a1c272290fcbd55", "score": "0.7877054", "text": "def lemmatizer(text):\n lemmatizer = WordNetLemmatizer()\n\n #Iterate through words and lemmatize\n i = 0\n for word in text:\n text[i] = lemmatizer.lemmatize(word)\n i = i + 1\n return text", "title": "" }, { "docid": "aadf5769dec81b75317abb5b3ded3850", "score": "0.75761193", "text": "def lematize(words):\n lemmatizer = WordNetLemmatizer()\n doc = [lemmatizer.lemmatize(x[0], wordnet_tags(x[1])) for x in pos_tag(words)]\n return ' '.join(doc)", "title": "" }, { "docid": "76c377a03d2fd47bd9134bde43e3ce69", "score": "0.74065566", "text": "def lemmatizeText(parsedData):\n wnl = WordNetLemmatizer()\n for d in parsedData:\n pos = pos_tag(word_tokenize(d[\"text\"].lower()))\n lems = lemmatize_with_pos(wnl, pos)\n # d[\"tokens\"] = word_tokenize(d[\"text\"])\n d[\"lemmas\"] = lems", "title": "" }, { "docid": "93443e160d863becd4400dcd20f2a7a4", "score": "0.73142326", "text": "def tokenize2(text):\n \n text= [char if char not in string.punctuation else \" \" for char in text.lower()]\n text=\"\".join(text)\n #split text into individual words\n text = text.split()\n # Remove stop words \n sw= stopwords.words('english')\n text = [word for word in text if word not in sw]\n lemmed_text = [WordNetLemmatizer().lemmatize(word) for word in text]\n \n return lemmed_text", "title": "" }, { "docid": "7fb4c7249ba3f3d0a53560a003148cf8", "score": "0.7205104", "text": "def normalise(self,word):\n word = word.lower()\n #word = stemmer.stem_word(word)\n stemmer = nltk.stem.porter.PorterStemmer()\n lemmatizer = nltk.WordNetLemmatizer()\n word = lemmatizer.lemmatize(word)\n return word", "title": "" }, { "docid": "357f628e903bac1044673ea08361bb94", "score": "0.7177658", "text": "def lematizing_text(data):\n \n lemmatizer = WordNetLemmatizer() \n lemmatized_output = []\n\n for tweet in data:\n lemmed = ' '.join([lemmatizer.lemmatize(w) for w in tweet])\n lemmatized_output.append(lemmed)\n \n return lemmatized_output", "title": "" }, { "docid": "21e895462dc9ad96e1d829f0cba8a04f", "score": "0.70998114", "text": "def lemmatize(data):\n lemmatizer = nltk.stem.WordNetLemmatizer()\n return [lemmatizer.lemmatize(word) for word in data]", "title": "" }, { "docid": "e1863e5e33ebed06bde1a31f63b75e53", "score": "0.7046299", "text": "def tokenize(text):\n \n stop_words = stopwords.words(\"english\")\n lemmatizer = WordNetLemmatizer()\n \n # Normalization : \n # Replace punctuations with \" \" and make string lowercase\n # Replace all punctuations except apostrophes\n text = re.sub(\"[^a-zA-Z0-9']\", \" \", text.lower())\n \n # Tokenize: Split to words\n tokenized = text.split(\" \")\n tokenized = [word for word in tokenized if word != \"\" ]\n \n # Remove stop_words\n cleaned = [word for word in tokenized if word not in stop_words]\n \n # Part of speech tagging\n tagged = nltk.pos_tag(cleaned)\n \n # Lemmatize\n lemmatized = []\n\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n for word, raw_tag in tagged:\n tag = tag_dict.get(raw_tag[0].upper(), wordnet.ADV)\n lemmatized.append(lemmatizer.lemmatize(word, pos = tag))\n\n return lemmatized", "title": "" }, { "docid": "edcdbeb323796b0e81729543909bbac6", "score": "0.70317405", "text": "def tokenize(text):\n \n \n # Normalize by replacing all values other than alphabets and numbers with space and convert all the words to lower case\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower()) \n \n #tokenize the text\n words = word_tokenize(text)\n \n #drop all teh stopwords and lemmatize\n words = [w for w in words if w not in stopwords.words('english')]\n lemmed_words = [WordNetLemmatizer().lemmatize(w) for w in words]\n \n return lemmed_words", "title": "" }, { "docid": "8a7bf798a8dbb713e59b33e3225ce84d", "score": "0.70284337", "text": "def normalise(word):\n\n lowercase_word = word.lower()\n\n nltk_tagged = nltk.pos_tag([lowercase_word])\n wordnet_tagged = map(lambda x: (x[0], nltk_to_wordnet_tag(x[1])), nltk_tagged)\n lemma = str()\n for word, tag in wordnet_tagged:\n if tag is None:\n lemma = word\n else:\n lemma = lemmatizer.lemmatize(word, tag)\n return lemma", "title": "" }, { "docid": "13476bf4e68eaf5aa121a7334c041674", "score": "0.6986198", "text": "def lemmatize_text(text):\n\n tokens = nltk.regexp_tokenize(text, sentence_re)\n for word in tokens:\n if word in punctuations:\n tokens.remove(word)\n tagged_tokens = nltk.tag.pos_tag(tokens)\n lemmatized_text = [normalise(word) for word, tag in tagged_tokens if acceptable_word(word)]\n return lemmatized_text", "title": "" }, { "docid": "b5a7648097119653b1cbd73a0e39fe17", "score": "0.6964392", "text": "def tokenize(text):\n # Normalize text\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n # Tokenize text\n words = word_tokenize(text)\n # Remove stop words\n words = [w for w in words if w not in stopwords.words(\"english\")]\n # Reduce words to their root form\n lemmed = [WordNetLemmatizer().lemmatize(w) for w in words]\n\n return lemmed", "title": "" }, { "docid": "c4d86e3add3544c0a7bfc124b3e72075", "score": "0.69499004", "text": "def normalise(word):\n word = word.lower()\n # word = stemmer.stem_word(word) #if we consider stemmer then results comes with stemmed word, but in this case word will not match with comment\n word = lemmatizer.lemmatize(word)\n return word", "title": "" }, { "docid": "7c524c23f8d38096f202b04cdf67c060", "score": "0.68733096", "text": "def tokenize(text):\n \n #initialize word lemmatizer\n lemmatizer = WordNetLemmatizer() \n #remove punctuation and uwanted characters\n text=re.sub(\"r[^a-zA-Z0-9]\",\" \",text)\n #tokenzie the words\n words= nltk.word_tokenize(text)\n #lower case the words and lemmantize them\n words = [lemmatizer.lemmatize(word.lower()) for word in words]\n #remove stop words\n words= [word for word in words if word not in stopwords.words('english')]\n \n return words", "title": "" }, { "docid": "b5317344fd5d425667af9c27d693b7b8", "score": "0.6853423", "text": "def mp_nltk_lematize(input_t):\n\n if input_t[2]:\n tokens = []\n for t in input_t[2].split():\n if len(t) < 4:\n tokens.append(t)\n else:\n tokens.append(lemmatizer.lemmatize(t))\n input_t[2] = \" \".join(tokens)\n input_t[2] = re.sub(\n r'\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*',\n '',\n input_t[2])\n input_t[2] = re.sub(r'\\W+', ' ', input_t[2]).strip().lower()\n return input_t[2]", "title": "" }, { "docid": "15189ed87ed57882b0b510f30e0d3b7a", "score": "0.684229", "text": "def normalise(word):\n word = word.lower()\n print word\n word = stemmer.stem_word(word)\n word = lemmatizer.lemmatize(word)\n return word", "title": "" }, { "docid": "016ca64e57bd84f7c89b7e55273150fd", "score": "0.68036675", "text": "def tree_words(self, wn_lemmatize=False):\n lemmas = self.tree_lemmas(wn_lemmatize=wn_lemmatize)\n return [x[0] for x in lemmas]", "title": "" }, { "docid": "d590957fc39e55404a313ac1638ea932", "score": "0.67995805", "text": "def lemmatize(self, words):\r\n lemmed = [WordNetLemmatizer().lemmatize(w) for w in words]\r\n return lemmed", "title": "" }, { "docid": "f05fbe1d6d6d5d37c5db7c18a64c49be", "score": "0.67739016", "text": "def normalise(word):\n\tword = word.lower()\n\t# word = stemmer.stem_word(word) #if we consider stemmer then results comes with stemmed word, but in this case word will not match with comment\n\tword = lemmatizer.lemmatize(word)\n\treturn word", "title": "" }, { "docid": "8cbdb0fcc966add2ccc1c7c33ae7be20", "score": "0.6773597", "text": "def word_tokenizer(text):\n\n\t# start tokenizing\n\ttry:\n\t\t# # create spacey object\n\t\t# spacy_doc = nlp(text)\n\t\t# Lemmatize tokens, remove punctuation and remove stopwords.\n\t\treturn [token.lemma_ for token in text if token.is_alpha and not token.is_stop and len(token) > 1]\n\texcept Exception, e:\n\t\tlogging.error('[{}] : {}'.format(sys._getframe().f_code.co_name,e))\n\t\texit(1)", "title": "" }, { "docid": "7473831fd2dff00e897b34bab9e443fd", "score": "0.6758672", "text": "def pre_process_text(text):\n # Cconvert to lowercase, remove punctuations and unneeded characters, then strip\n text = re.sub(r'[^\\w\\s]', '', str(text).lower().strip())\n\n # Tokenize\n lst_text = text.split()\n # Remove Stopwords\n lst_text = [word for word in lst_text if word not in lst_stopwords]\n\n # Lemmatisation (convert the word into root word)\n lem = nltk.stem.wordnet.WordNetLemmatizer()\n lst_text = [lem.lemmatize(word) for word in lst_text]\n\n # Rejoin tokenized string\n text = \" \".join(lst_text)\n return text", "title": "" }, { "docid": "b753018ae017e12ba916a92217ce7a97", "score": "0.674717", "text": "def preprocess_pipeline(text):\n words = tokenize(text[0])\n lemmed_words = lematize(words)\n return lemmed_words", "title": "" }, { "docid": "dc40d594d40c6264757adc42b76313c6", "score": "0.67336124", "text": "def tokenize(text):\n #tokenize text\n tokens = word_tokenize(text)\n \n # initiate lemmatizer\n lemmatizer = WordNetLemmatizer()\n\n # iterate through each token\n clean_tokens = []\n for tok in tokens:\n \n # lemmatize, normalize case, and remove leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip() \n clean_tokens.append(clean_tok)\n return clean_tokens", "title": "" }, { "docid": "7dd15845f26d4f381e7fb077df818202", "score": "0.67247325", "text": "def lemmatize(segments):\r\n for segment in segments:\r\n pos_list = nltk.pos_tag(segment)\r\n for i in range(len(segment)):\r\n pos = get_wordnet_pos(pos_list[i][1])\r\n segment[i] = LEMMATIZER.lemmatize(segment[i].lower(), pos) # make segment lowercase before lemmatizing\r\n return segments", "title": "" }, { "docid": "d3ee0e3c08d119c3100b4d03ed2f9fab", "score": "0.6687493", "text": "def tree_lemmas(self, wn_format=False, wn_lemmatize=False):\n word_tag = []\n for tree in self.trees:\n word_tag += tree.pos()\n return self.wn_lemmatizer(\n word_tag, wn_format=wn_format, wn_lemmatize=wn_lemmatize)", "title": "" }, { "docid": "4b75a640612d0b1edf28046f0d798913", "score": "0.6669173", "text": "def lemmatize(word_list):\n\n wordnet_lemmatizer = WordNetLemmatizer()\n word_list_lemmatized = []\n\n for word in word_list:\n if word[0].isalpha():\n try:\n word_list_lemmatized.append(wordnet_lemmatizer.lemmatize(word[0], pos=penn_to_wn(word[1])))\n except:\n try:\n word_list_lemmatized.append(wordnet_lemmatizer.lemmatize(word[0]))\n except:\n pass\n\n return word_list_lemmatized", "title": "" }, { "docid": "480c2c9ee2798e33bb3c287c9354f5b7", "score": "0.6662673", "text": "def tokenize(text):\n raw_toks = word_tokenize(text)\n lem = WordNetLemmatizer()\n tokens = [lem.lemmatize(t).strip().lower() for t in raw_toks if t not in stopwords.words(\"english\")] \n return tokens", "title": "" }, { "docid": "2b07c1eb5271e08fe53f5522bd0275a7", "score": "0.6641279", "text": "def __wn_lemmatize(self, lemma):\n string, tag = lemma\n wnl = WordNetLemmatizer()\n if tag in ('a', 'n', 'r', 'v'):\n string = wnl.lemmatize(string, tag)\n else:\n string = wnl.lemmatize(string)\n return (string, tag)", "title": "" }, { "docid": "46ac4dee280d0eff559b78d73b776023", "score": "0.6637313", "text": "def lemmatize_words(tokens):\n lemmatizer = WordNetLemmatizer()\n lemmas = []\n for word in tokens:\n lemma = lemmatizer.lemmatize(word)\n lemmas.append(lemma)\n return lemmas", "title": "" }, { "docid": "b8e1240fa575c1fed4563a889937dd9a", "score": "0.6608097", "text": "def lemmatization(data_frame): \n from textblob import Word\n data_frame['review'] = data_frame['review'].apply(lambda x: \" \".join([Word(word).lemmatize() for word in x.split()]))\n return data_frame", "title": "" }, { "docid": "2eef355d52f903560f50fac4bdfbee51", "score": "0.6558888", "text": "def tokenize(text):\n detected_urls = re.findall(url_regex, text)\n for url in detected_urls:\n text = text.replace(url, \"urlplaceholder\")\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens\n #pass", "title": "" }, { "docid": "223303b6c7f31c25c1a95533c9246f5e", "score": "0.65576214", "text": "def lemmatize_tokens(tweet_tokens):\n lemmatizer = WordNetLemmatizer()\n tags = nltk.pos_tag(tweet_tokens)\n tags_word_net = [get_wordnet_pos(w[1]) for w in tags]\n lem_result = [] \n for i in range(len(tags_word_net)):\n if tags_word_net[i]: \n lem_result.append(lemmatizer.lemmatize(tags[i][0],tags_word_net[i]))\n else:\n lem_result.append(tags[i][0])\n return lem_result", "title": "" }, { "docid": "fd1b33992da6183d994dd8864d2e4b68", "score": "0.6552774", "text": "def tokenize(text):\n\n tokens = nltk.word_tokenize(text)\n lemmatizer = nltk.WordNetLemmatizer()\n clean_tokens = [lemmatizer.lemmatize(token).lower().strip() for token in tokens]\n return clean_tokens", "title": "" }, { "docid": "50430d3127f89a07c04aeca3124e69de", "score": "0.6552474", "text": "def process(self, text):\n \n return self.my_lemmatize(self.remove_stopword(self.clean_text(text)))", "title": "" }, { "docid": "62999b2b8f5e675620980001a58770d6", "score": "0.65255034", "text": "def __lemmatize(self, text):\n return [self.__lemmatizer.lemmatize(w) for w in text]", "title": "" }, { "docid": "b444273200178788bc0359908a0bb8ef", "score": "0.6523032", "text": "def tokenize(text):\n\n #### Normalize (convert to lower case and remove punctuation) text\n text = re.sub(\"[^a-z,A-Z,0-9]\", \" \", text.lower().strip())\n\n #### Tokenize text to words\n text = word_tokenize(text)\n\n #### Remove stop words\n text = [i for i in text if i not in stopwords.words('english') ]\n\n #### Lemmatize\n text = [WordNetLemmatizer().lemmatize(x, pos = 'n') for x in text]\n text = [WordNetLemmatizer().lemmatize(x, pos = 'v') for x in text]\n\n return text", "title": "" }, { "docid": "c02227e3fc4dc0f1bec32c432ac97b1f", "score": "0.65064806", "text": "def tokenize(text): \n \n stop_words = stopwords.words(\"english\")\n lemmatizer = WordNetLemmatizer()\n \n\n # normalize case and remove punctuation\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n \n # tokenize text\n tokens = word_tokenize(text)\n \n # lemmatize andremove stop words\n tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]\n\n return tokens", "title": "" }, { "docid": "2f351bf2dffa7fe7d1b6319c1a3cf985", "score": "0.6475991", "text": "def tokenize(text):\n # remove punctuation\n text = re.sub(r'[^a-zA-Z0-9]', ' ',text)\n \n # tokenize text\n tokens = word_tokenize(text)\n \n # initiate lemmatizer\n lemmatizer = WordNetLemmatizer()\n \n # iterate through each token\n clean_tokens = []\n for tok in tokens:\n \n # lemmatize, normalize case, and remove leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "title": "" }, { "docid": "99c9517edaeabbbe322d9e4115c27845", "score": "0.64716494", "text": "def tokenize(text):\n stop_words = stopwords.words(\"english\")\n lemmatizer = WordNetLemmatizer()\n # normalize case and remove punctuation\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n \n # tokenize text\n tokens = word_tokenize(text)\n \n # lemmatize and remove the stop words\n tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words] \n \n return tokens", "title": "" }, { "docid": "3c23b40ee44a76342771ae3d58a8cb66", "score": "0.6456129", "text": "def tokenize(text):\n\n lemm = WordNetLemmatizer()\n \n text = word_tokenize(text)\n text = [lemm.lemmatize(x).lower().strip() for x in text]\n text = filter(lambda x: x not in skip_tokens, text)\n \n return \" \".join(text)", "title": "" }, { "docid": "d80792104cd594925497e1effa3c2e0c", "score": "0.645452", "text": "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "title": "" }, { "docid": "4b4b465a5e03b9e0762e76ab4ffe6963", "score": "0.64461315", "text": "def normalise(self,word):\n word = word.lower()\n # word = stemmer.stem_word(word) #if we consider stemmer then results comes with stemmed word, but in this case word will not match with comment\n word = self.lemmatizer.lemmatize(word)\n return str(word).strip()", "title": "" }, { "docid": "79358ab0f2032f042034cc197e48f383", "score": "0.6440574", "text": "def mp_spacy_lemmatize(input_t):\n if input_t[2]:\n\n doc = NLP(input_t[2])\n input_t[2] = \" \".join([token.lemma_ for token in doc])\n input_t[2] = re.sub(\n r'\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*',\n '',\n input_t[2])\n input_t[2] = re.sub(r'\\W+', ' ', input_t[2]).strip().lower()\n return input_t[2]", "title": "" }, { "docid": "db83f33dcb6ac948cc1dee4c0f710101", "score": "0.6433062", "text": "def tokenize(text):\n\n # use nltk library \n tokens = word_tokenize(text)\n\n # use nltk lemmatizer \n lemma = WordNetLemmatizer()\n \n # lemmatize, make it lower case, and remove spaces\n tokens = [lemma.lemmatize(t).lower().strip() for t in tokens]\n\n return tokens", "title": "" }, { "docid": "95b8bf3dcee81bb965f2d31adafa76b7", "score": "0.641855", "text": "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "title": "" }, { "docid": "5c3d6cf5fb48c5bcafcfb1a54fcd9648", "score": "0.64020586", "text": "def tokenize(text): \n # remove punctuation\n text = re.sub(r\"[^\\w\\s]\", \"\", text)\n \n # tokenize text\n tokens = word_tokenize(text)\n \n # initiate lemmatizer\n lemmatizer = WordNetLemmatizer()\n \n # stopping word\n stop_words = set(stopwords.words('english')) \n \n # iterate through each token\n clean_tokens = []\n for tok in tokens:\n # lemmatize, normalize case, and remove leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n if clean_tok not in stop_words:\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "title": "" }, { "docid": "1884b433a05208f82b874c67a9a0c06e", "score": "0.63928205", "text": "def normalize(document, stopwords=STOPWORDS):\n\n for token, tag in document:\n token = token.lower().strip()\n\n if is_punct(token) or (token in stopwords):\n continue\n\n yield lemmatizer.lemmatize(token, wnpos(tag))", "title": "" }, { "docid": "a9740b4931568d9e999a55ec41b3ca7c", "score": "0.6383255", "text": "def lemmatize_tokens(tokens):\n lemmatizer = nltk.WordNetLemmatizer()\n lemmatized_tokens = [lemmatizer.lemmatize(token) for token in tokens]\n return lemmatized_tokens", "title": "" }, { "docid": "5904031b83c0e1e91afe66868e833d20", "score": "0.637792", "text": "def apply(cls,\n input,\n stemmer) -> str:\n \"\"\"\n Choose the available algorithm, which user has entered\n \"\"\"\n algorithm = cls.__choose_stemmer(\n stemmer=stemmer\n )()\n \"\"\"\n Lemmatize received tokens from function self.__tokenize()\n \"\"\"\n stems = [\n algorithm.stem(\n word=token\n )\n for token in cls.__tokenize(input)\n ]\n \"\"\"\n Extract the text from the lemmatized tokens\n \"\"\"\n return ' '.join(stems)", "title": "" }, { "docid": "09840688be52cf820bfc07433727ef61", "score": "0.6369183", "text": "def lemmatizing(self, text):\n if self.lang not in self.languages:\n raise LangDependencyError(\"Lemmatizing - language not defined\")\n \n if self.lang == \"portuguese\":\n text = self.portuguese_lemmatizing(text)\n elif self.lang == \"spanish\":\n raise LangDependencyError(\"Lemmatizing - language not implemented for lemmatizing\")\n elif self.lang == \"english\":\n raise LangDependencyError(\"Lemmatizing - language not implemented for lemmatizing\")\n elif self.lang == \"italian\":\n raise LangDependencyError(\"Lemmatizing - language not implemented for lemmatizing\")\n\n return text", "title": "" }, { "docid": "95cfe9e375d8c094a40025d495016c33", "score": "0.6334536", "text": "def tokenize(text):\n # splitting the sentence(s) into tokens/words\n tokens = word_tokenize(text)\n\n # initializing the lemmatizer\n lemmatizer = WordNetLemmatizer()\n\n # iterate through each token\n clean_tokens = []\n for token in tokens:\n clean_token = lemmatizer.lemmatize(token.lower().strip())\n clean_tokens.append(clean_token)\n\n return clean_tokens", "title": "" }, { "docid": "8e7c27012d4b1d79dfc6b32a785ea975", "score": "0.63303983", "text": "def lemmatizer(tagger, document, stopwords):\n tags = tagger.tag_text(document)\n lemmatized_doc = []\n\n for elt in tags:\n clean = elt.split('\\t')\n try:\n if clean[2] not in stopwords:\n lemmatized_doc.append(str(clean[2]))\n except:\n pass\n return \" \".join(lemmatized_doc)", "title": "" }, { "docid": "41e8a23b9394caeafbc316943807f42a", "score": "0.632665", "text": "def lemmatize_as_string(document):\n try:\n doc = nlp(text=document)\n lemmatized_words = []\n for token in doc:\n lemmatized_words.append(token.lemma_)\n lemmatized_document = \" \".join(lemmatized_words)\n return lemmatized_document\n except:\n return \"\"", "title": "" }, { "docid": "8898306da707337b3ef5fe876fc34498", "score": "0.63158315", "text": "def tokenize(text):\n\n stop_words = stopwords.words(\"english\")\n lemmatizer = WordNetLemmatizer()\n\n # normalize case and remove punctuation\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n\n # tokenize text\n tokens = word_tokenize(text)\n\n # lemmatize and remove stop words\n tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]\n\n return tokens", "title": "" }, { "docid": "aa8ce10d81be0418462dace5e4176e3b", "score": "0.6315783", "text": "def _lemmatization(self, texts):\n lemmatized = [[token.lemma_ for token in self._nlp(' '.join(sent)) if token.pos_ in self._allowed_pos]\n for sent in texts]\n return lemmatized", "title": "" }, { "docid": "d9929c075b3658f8a99815dc0f6ea652", "score": "0.63129747", "text": "def token_lemma(data):\n tokenizer = nltk.tokenize.TreebankWordTokenizer()\n lemmatizer = nltk.stem.WordNetLemmatizer()\n return [lemmatizer.lemmatize(word) for word in tokenizer.tokenize(data)]", "title": "" }, { "docid": "43f2d5eaf2b6e141a29cb5ec154b7d67", "score": "0.6306946", "text": "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "title": "" }, { "docid": "2a510b0e31d98f063c7ef5ea228afd7e", "score": "0.6301819", "text": "def lem(words: list, lemmatiser_cls=WordNetLemmatizer) -> list:\n\n lemmatizer = lemmatiser_cls()\n return seq(words).map(partial(lemmatizer.lemmatize, pos='v')).list()", "title": "" }, { "docid": "1530f130e8d10729a5cd5eea1c9b312f", "score": "0.6286428", "text": "def lemmatise_text(token_list: list):\n wordnet_lemmatizer = WordNetLemmatizer()\n lemma_list = []\n # Convert each word to each lemma and append to a new list\n for word in token_list:\n lemma_list.append(wordnet_lemmatizer.lemmatize(word))\n return lemma_list", "title": "" }, { "docid": "eae2ca22367053b4c48674552dc797e4", "score": "0.62861496", "text": "def lemmatize(self, _func=None):\n\n def lemmatize_decorator(func):\n self._check_id(func)\n self.chain[self.id] += func.__name__ + \"-\"\n\n @nldmethod\n def lemmatize_wrapper(_input=None):\n lemmatizer = WordNetLemmatizer()\n result = func(_input) if _input else func()\n if isinstance(result, list):\n if len(result) > 0 and isinstance(result[0], tuple):\n if self.logger:\n self.logger.info('Lemmatize : input is tuple')\n for i in range(len(result)):\n result[i] = list(result[i])\n result[i][0] = lemmatizer.lemmatize(result[i][0])\n result[i] = tuple(result[i])\n return result\n return [lemmatizer.lemmatize(word) for word in result]\n return lemmatize_wrapper\n if not _func:\n return lemmatize_decorator\n else:\n return lemmatize_decorator(_func)", "title": "" }, { "docid": "4f7e01171444000e3c1f659e35c3bb9c", "score": "0.6277009", "text": "def tokenize(text, lemmatizer=WordNetLemmatizer()):\n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n\n # Detect URLs\n detected_urls = re.findall(url_regex, text)\n for url in detected_urls:\n text = text.replace(url, 'urlplaceholder')\n\n # Normalize and tokenize\n tokens = nltk.word_tokenize(re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower()))\n\n tokens = [t for t in tokens if t not in stopwords.words('english')]\n\n tokens = [lemmatizer.lemmatize(t) for t in tokens]\n\n return tokens", "title": "" }, { "docid": "06be923fc8dcaff86cb8a7bc15150f8f", "score": "0.6270527", "text": "def tokenize(text):\n \n # remove url place holder\n \n url_regex= r'(https?://\\S+)'\n text = re.sub(url_regex, 'urlplaceholder',text)\n \n #tokenize message into words \n \n tokens=word_tokenize(text)\n \n #remove the stop words \n \n filtered_tokens=[w for w in tokens if not w in stopwords.words('english')]\n \n #remove punctuation and tokens containing non alphabetic symbols\n \n alpha_tokens=[token.lower() for token in filtered_tokens if token.isalpha()]\n \n # make a default dictionary for the pos tagging \n tag_map = defaultdict(lambda : wordnet.NOUN)\n tag_map['J'] = wordnet.ADJ\n tag_map['V'] = wordnet.VERB\n tag_map['R'] = wordnet.ADV\n\n #lemmatize tokens using pos tags from defaulct dict\n \n clean_tokens=[]\n lmtzr = WordNetLemmatizer()\n for token, tag in pos_tag(alpha_tokens):\n clean_tokens.append(lmtzr.lemmatize(token, tag_map[tag[0]]))\n \n \n return clean_tokens", "title": "" }, { "docid": "512c0963c8b7289b2ba4ec60e49bd6ff", "score": "0.6242194", "text": "def lemmatize(sentences):\n\tlemmatizer = WordNetLemmatizer()\n\tlemmatized_sentences = []\n\tfor s in sentences:\n\t\tlemmatized_s = []\n\t\tfor word in s:\n\t\t\tlemmatized_s.append(lemmatizer.lemmatize(word))\n\t\tlemmatized_sentences.append(lemmatized_s)\n\n\treturn lemmatized_sentences", "title": "" }, { "docid": "861d17b891715ae6fe02dab8fc9e258c", "score": "0.62417716", "text": "def pos_lemmas(self, wn_format=False, wn_lemmatize=False):\n pos = self.pos\n pos = pos.strip()\n word_tag = list(map((lambda x : tuple(x.split(\"/\"))), re.split(r\"\\s+\", pos)))\n word_tag = [x for x in word_tag if len(x) == 2]\n word_tag = self.wn_lemmatizer(\n word_tag, wn_format=wn_format, wn_lemmatize=wn_lemmatize)\n return word_tag", "title": "" }, { "docid": "a5f4a528fc2125c50be4855043400c1b", "score": "0.62313986", "text": "def clean_up(text):\n # lemma = WordNetLemmatizer()\n lemmatizer = nltk.WordNetLemmatizer().lemmatize\n text = re.sub('\\W+', ' ', str(text))\n # print(\"step1:\", text)\n text = re.sub(r'[0-9]+', '', text.lower())\n # correcting spellings of words - user complaints are bound to have spelling mistakes\n # text = TextBlob(text).correct()\n # print(\"step2:\", text)\n word_pos = nltk.pos_tag(nltk.word_tokenize(text))\n normalized_text_lst = [lemmatizer(x[0], get_wordnet_pos(x[1])).lower() for x in word_pos]\n # print(\"step3:\", normalized_text_lst)\n stop_words_free = [i for i in normalized_text_lst if i not in english_stopwords and len(i) > 3]\n # print(\"step4:\", stop_words_free)\n stop_words_free = list(set(stop_words_free))\n return (stop_words_free)", "title": "" }, { "docid": "331da77597ad1df38b7959126d3643dc", "score": "0.62291944", "text": "def clean_lemmatize_token(tweet):\n stop_words = set(stopwords.words('english'))\n cleaned = tweet.translate(str.maketrans('', '', string.punctuation)).lower()\n tokenized = word_tokenize(cleaned)\n filtered = [w for w in tokenized if not w in stop_words]\n lemmatizer = WordNetLemmatizer()\n lemmatized = []\n for word in filtered:\n lemmatized.append(lemmatizer.lemmatize(word))\n to_remove = ['rt','mention','sxsw','link']\n lemmatized = [w for w in lemmatized if w not in to_remove]\n lemmatized = ' '.join(lemmatized)\n return lemmatized", "title": "" }, { "docid": "6b6576f4bfaf18cbedca083ed82728c2", "score": "0.6224641", "text": "def lemmatize_strings(body_text, language = 'da', remove_stopwords_ = True):\n \n if isinstance(body_text, str):\n body_text = [body_text] #Convert whatever passed to a list to support passing of single string\n \n if not hasattr(body_text, '__iter__'):\n raise TypeError('Passed argument should be a sequence.')\n \n lemmatizer = lemmy.load(language) #load lemmatizing dictionary\n \n lemma_list = [] #list to store each lemmatized string \n\n word_regex = re.compile('[a-zA-Z0-9æøåÆØÅ]+') #All charachters and digits i.e. all possible words\n\n for string in body_text:\n #remove punctuation and split words\n matches = word_regex.findall(string)\n\n #split words and lowercase them unless they are all caps\n lemmatized_string = [word.lower() if not word.isupper() else word for word in matches]\n \n #remove words that are in the stopwords file\n if remove_stopwords_:\n lemmatized_string = remove_stopwords(lemmatized_string)\n \n #lemmatize each word and choose the shortest word of suggested lemmatizations\n lemmatized_string = [min(lemmatizer.lemmatize('', word), key=len) for word in lemmatized_string]\n\n #remove words that are in the stopwords file\n if remove_stopwords_:\n lemmatized_string = remove_stopwords(lemmatized_string)\n\n lemma_list.append(' '.join(lemmatized_string))\n\n return lemma_list if len(lemma_list) > 1 else lemma_list[0] #return list if list was passed, else return string", "title": "" }, { "docid": "8d4f29574c92adad5260091cf9db7d28", "score": "0.6223193", "text": "def tokenize(text):\n \n # text transformation\n text = text.lower()\n text = re.sub(r\"[^a-zA-Z0-9]\",\" \",text)\n text = text.strip()\n \n #stop ords\n stop_words = stopwords.words(\"english\")\n \n #tokenize and lematize\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n \n #detele stop words\n clean_tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]\n\n return clean_tokens", "title": "" }, { "docid": "e078b420af0e13701fc1a543d5957fdc", "score": "0.61953324", "text": "def tokenize(text):\n # remove non-numerical non-character types and tokenize text\n tokens = word_tokenize(re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower()))\n\n # Instantiate lemmatizer and stemmer\n wnl = WordNetLemmatizer()\n ps = PorterStemmer()\n\n # lemmatize tokens of nouns and verbs and stemming\n clean_tokens = []\n for tok in tokens:\n clean_tok = wnl.lemmatize(tok)\n clean_tok = wnl.lemmatize(clean_tok, 'v')\n clean_tok = ps.stem(clean_tok)\n clean_tokens.append(clean_tok.strip())\n\n return clean_tokens", "title": "" }, { "docid": "64db3e1fd6cb01bae3f3e936a4a91d1c", "score": "0.61664844", "text": "def normalize_en(tokens, type, analyzer):\n process = True\n normalized_tokens = []\n for t in tokens:\n if t in en_proc_stop_words:\n process = False\n if process:\n normalized_tokens.append(analyzer.lemmatize(t))\n else:\n normalized_tokens.append(t)\n return normalized_tokens\n\n # return [analyzer.lemmatize(t) for t in tokens]", "title": "" }, { "docid": "e10b4622ad5e743a2b38db6cdcbfddf1", "score": "0.6159294", "text": "def lemmatize(self, sentence):\n lang = self.lang.lower()\n if lang == 'es' or lang == 'spanish':\n return [lemma_es(word) for word in word_tokenize(sentence)]\n elif lang == 'fr' or lang == 'french':\n return [stem_fr.stem(word) for word in word_tokenize(sentence)]\n elif lang == 'de' or lang == 'german':\n result = list()\n if use_compound_split_german:\n sentence, _ = LanguageDetection.split_compound(sentence, 'de')\n for word in word_tokenize(sentence):\n lemma_word = stem_de.stem(word)\n if word and word[0].isupper():\n first_char = lemma_word[0]\n remaining_str = lemma_word[1:] if len(lemma_word) > 1 else ''\n result.append(first_char.upper() + remaining_str)\n else:\n result.append(lemma_word)\n return result\n elif lang in ['nl','dutch']:\n return self.dutch_lemmatizer(sentence)\n elif lang == 'ar' or lang == 'arabic':\n return [stem_ar.stem(word) for word in word_tokenize(sentence)]\n elif lang == 'ru' or lang == 'russian':\n return [stem_ru.stem(word) for word in word_tokenize(sentence)]\n elif lang == 'sv' or lang == 'swedish':\n return [stem_sv.stem(word) for word in word_tokenize(sentence)]\n elif lang == 'zh' or lang == 'chinese':\n return self.chinese_tokenize(sentence)\n elif lang in [\"id\",\"bhasa\",\"ms\",\"malay\",\"indonesian\"]:\n return sastrawi_stemmer.stem(str(sentence)).split(\" \")\n elif lang in [\"ja\",\"japanese\"]:\n return stem_ja.tokenize(sentence)\n elif lang == 'ko' or lang == 'korean':\n return self.getKoLemmaTokens(sentence)\n elif lang == 'fi' or lang == 'finnish':\n return word_tokenize(sentence)\n elif lang == 'pl' or lang == 'polish':\n return word_tokenize(sentence)\n elif lang == 'uk' or lang == 'ukranian':\n return word_tokenize(sentence)\n elif lang == 'kk' or lang == 'kazakh':\n return kazakh_lemma_tokenizer(sentence)\n else:\n return [lemma_en(word) for word in word_tokenize(sentence)]", "title": "" }, { "docid": "5240d78445521278906ef327fe838c5c", "score": "0.6154293", "text": "def _tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "title": "" }, { "docid": "b675273dc8887edd469a05e656a318bc", "score": "0.6141228", "text": "def tokenize(text):\n # regex pattern for detecting a url\n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n\n # get list of all urls using regex\n detected_urls = re.findall(url_regex,text)\n\n # replace each url in text string with placeholder\n for url in detected_urls:\n text = text.replace(url,'urlplaceholder')\n\n # tokenize text\n tokens = word_tokenize(text)\n\n # initiate lemmatizer\n lemmatizer = WordNetLemmatizer()\n\n # iterate through each token\n clean_tokens = []\n for tok in tokens:\n # lemmatize, normalize case, and remove leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "title": "" }, { "docid": "cbb5b2907e63f76ef78d1d20d4de2492", "score": "0.6138159", "text": "def pos_words(self, wn_lemmatize=False):\n lemmas = self.pos_lemmas(wn_lemmatize=wn_lemmatize)\n return [x[0] for x in lemmas]", "title": "" }, { "docid": "265a7c0c4d5418119a0922c510b1d94e", "score": "0.61237025", "text": "def tokenize(text):\n # get tokens from text\n tokens= WhitespaceTokenizer().tokenize(text)\n lemmatizer= WordNetLemmatizer()\n \n # clean tokens\n processed_tokens=[]\n for token in tokens:\n token=lemmatizer.lemmatize(token).lower().strip('!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~')\n token=re.sub(r'\\[[^.,;:]]*\\]','', token)\n \n # add token to compiled list if not empty\n if token !='':\n processed_tokens.append(token)\n return processed_tokens", "title": "" }, { "docid": "b35c610690947c2d0896e05a3740aa69", "score": "0.6120525", "text": "def lemmatize_with_pos(lemmatizer, words):\n pos_tagged_words = nltk.pos_tag(words)\n return [lemmatizer.lemmatize(w, pos=Fine2CoarsePosTags.get(pos, 'n')) for w, pos in pos_tagged_words]", "title": "" }, { "docid": "0621a3c36d1714145f0436824cb1cd1f", "score": "0.61138994", "text": "def softmax_words(self, output):", "title": "" }, { "docid": "df779b915cdd7032d10f027a8e9e996f", "score": "0.6106239", "text": "def lemmatize(tokens):\n\treturn [lmtzr.lemmatize(w) for w in tokens]", "title": "" }, { "docid": "e8cff7cf6adad200eb1e919a0bad7040", "score": "0.610595", "text": "def lemmatize_document_list(documents):\n # Lemmatize\n lemmatized_berichte = [lemmatize_as_string(x) for x in documents]\n # Make everything lowercase\n lemmatized_berichte = [x.lower() for x in lemmatized_berichte]\n # Concatenate as a long string\n lemmatized_berichte = [' '.join(x.split(\" \")) for x in lemmatized_berichte]\n return lemmatized_berichte", "title": "" }, { "docid": "bcdaff3c3cfd1a4c1149d26b0ab4ce5f", "score": "0.6105473", "text": "def pre_process(character_text):\n character_text = expand_contractions(character_text) #xall the function to expand contractions\n lemmatizer = WordNetLemmatizer()\n character_text = re.sub('\\W', ' ', character_text) #remove all special character\n character_text = re.sub(r'\\s+[a-zA-Z]\\s+', ' ', character_text) #remove single character like 's\n character_text = re.sub(r'\\s+[a-zA-Z]\\s+', ' ', character_text) #remove single character from the start\n character_text.lower()\n character_text = re.sub('[\\s]+', ' ', character_text, flags=re.I)\n character_text = re.sub('[\\n]+', ' ', character_text)\n character_text = ' '.join(character_text.split()) \n #character_text.dropna(inplace=True)\n tokens=[]\n lemmatised_tokens = []\n stop_words = set(stopwords.words('english'))\n text = character_text.translate(table)\n for w in text.split(\" \"):\n if w not in stop_words:\n lemmatised_tokens.append(lemmatizer.lemmatize(w.lower()))\n# tokens = [' '.join(l) for l in nltk.bigrams(lemmatised_tokens)] + lemmatised_tokens #usebigrams \n \n \"\"\"\n posttagger = CRFTagger()\n posttagger.set_model_file(\"crf_pos.tagger\")\n tokens = posttagger.tag(tokens)\n tokens = list('@'.join(w) for w in tokens)\n tokens = [' '.join(l) for l in nltk.bigrams(tokens)] + tokens #usebigrams \n \"\"\"\n return lemmatised_tokens", "title": "" }, { "docid": "65009f45c8631ee90d2cf7a21f64b083", "score": "0.61020064", "text": "def preprocess_df(df):\n df['sk'] = df.text.str.split(' ').apply(\\\n lambda x:' '.join([w for w in x if w not in text.ENGLISH_STOP_WORDS])) \n df['nltk'] = df.text.str.split(' ').apply(\\\n lambda x:' '.join([w for w in x if w not in stopwords.words('english')])) \n # initialize wordnet lemmatizer and pre-create lemmatized words\n wnl = WordNetLemmatizer()\n df['text_lem'] = df.text.str.split(' ').apply(lambda x:\\\n ' '.join([wnl.lemmatize(w) for w in x]))\n df['sk_lem'] = df.sk.str.split(' ').apply(lambda x:\\\n ' '.join([wnl.lemmatize(w) for w in x]))\n df['nltk_lem'] = df.nltk.str.split(' ').apply(lambda x:\\\n ' '.join([wnl.lemmatize(w) for w in x]))\n return df", "title": "" }, { "docid": "0e0b6294f7c509099d02e414d1a17b1f", "score": "0.6089804", "text": "def get_processed_text(text=\"\"):\n clean_text = re.sub('[^a-zA-Z0-9 \\n\\.]', ' ', text)\n tokens = tokenizer.tokenize(clean_text)\n tokens = [lemmatizer.lemmatize(token.lower().strip()) for token in tokens\n if token not in stopwords and len(token) >= 2]\n return tokens", "title": "" }, { "docid": "9152570938b6c764b97b0f5fd4eb086e", "score": "0.60859346", "text": "def make_wordle_from_mallet(word_weights_file,topics,words,outfolder, \n font_path, dpi):\n print(\"\\nLaunched make_wordle_from_mallet.\")\n\n from wordcloud import WordCloud\n import random\n\n if not os.path.exists(outfolder):\n os.makedirs(outfolder)\n \n def read_mallet_output(word_weights_file):\n \"\"\"Reads Mallet output (topics with words and word weights) into dataframe.\"\"\" \n word_scores = pd.read_table(word_weights_file, header=None, sep=\"\\t\")\n word_scores = word_scores.sort(columns=[0,2], axis=0, ascending=[True, False])\n word_scores_grouped = word_scores.groupby(0)\n #print(word_scores.head())\n return word_scores_grouped\n\n def get_wordlewords(words,topic):\n \"\"\"Transform Mallet output for wordle generation.\"\"\"\n topic_word_scores = read_mallet_output(word_weights_file).get_group(topic)\n top_topic_word_scores = topic_word_scores.iloc[0:words]\n topic_words = top_topic_word_scores.loc[:,1].tolist()\n word_scores = top_topic_word_scores.loc[:,2].tolist()\n wordlewords = \"\"\n j = 0\n for word in topic_words:\n word = word\n score = word_scores[j]\n j += 1\n wordlewords = wordlewords + ((word + \" \") * score)\n return wordlewords", "title": "" }, { "docid": "5c60441b2dce7d5be2fef65d916f822d", "score": "0.6073282", "text": "def clean_up_sentence(sentence):\r\n\r\n lemmatizer = WordNetLemmatizer()\r\n sentence_words = nltk.word_tokenize(sentence)\r\n return [lemmatizer.lemmatize(word.lower()) for word in sentence_words]", "title": "" }, { "docid": "51da43b0b7189f7824053e4f3ba250df", "score": "0.60597473", "text": "def tokenize(text):\n \n #Detect url \n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n \n # get list of all urls using regex\n detected_urls = re.findall(url_regex, text)\n \n # replace each url in text string with placeholder\n for url in detected_urls:\n text = text.replace(url, \"urlplaceholder\")\n\n # tokenize text and initiate lemmatizer\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n \n # Remove stopwords\n tokens = [w for w in tokens if w not in stopwords.words(\"english\")]\n\n # iterate through each token\n clean_tokens = []\n for tok in tokens:\n \n # lemmatize, normalize case, and remove leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "title": "" }, { "docid": "63d48fbe5662732ac6f6208a5b85ca72", "score": "0.6031939", "text": "def tokenize(text):\n \n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n stop_words = stopwords.words(\"english\")\n\n clean_tokens = []\n for tok in tokens:\n if tok not in stop_words:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "title": "" }, { "docid": "725444c9cdbbad0344e87558fac6b9ac", "score": "0.60298574", "text": "def process(text, max_length=None):\n nlp = spacy.load('en', max_length=max_length)\n doc = nlp(text)\n\n sentences = []\n for sent in doc.sents:\n sentences.append({\n \"words\": [token.text for token in sent],\n \"lemmas\": [token.lemma_ for token in sent],\n \"pos\": [token.pos_ for token in sent],\n })\n\n return Document(sentences=[Sentence(words=s['words'], pos=s['pos'], lemmas=s['lemmas'], length=len(s['words']))\n for s in sentences])", "title": "" }, { "docid": "1d63054c2d2e9661c5c3ae6674a46f7f", "score": "0.6028903", "text": "def lemmatise_text(txt: str, lib_l: str = None) -> str:\n if lib_l is None:\n pass\n elif lib_l == 'nltk':\n txt_list = word_tokenize(txt)\n return \" \".join([lemmatiser_nltk.lemmatize(word=w) for w in txt_list])\n elif lib_l == 'spacy':\n txt = nlp(txt)\n return \" \".join(token.lemma_ for token in txt)\n else:\n raise Exception(f\"Sorry, entered library, {lib_l}, is not recognised.\\n\"\n + \"Please enter one from [None, 'nltk', 'spacy']\")", "title": "" }, { "docid": "585b1321cb97b8d9282ba9536537af3a", "score": "0.60182667", "text": "def tokenize(text):\n # Remove punctuation characters \n punctuation_characters = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n detected_characters = re.findall(punctuation_characters, text)\n for ch in detected_characters :\n text = text.replace(ch, \"\") \n #convert to lower\n text = text.lower()\n # Extract the word tokens from the provided text\n tokens = word_tokenize(text)\n #Lemmanitizer to remove inflectional and derivationally related forms of a word\n lemmatizer = WordNetLemmatizer()\n # List of clean tokens\n clean_tokens =[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "title": "" }, { "docid": "f50ff1715c9162ffb9e37f1a056d7480", "score": "0.6016047", "text": "def lemmatizer(wordform, pos_tag):\n\n if pos_tag[0] == \"j\": #i.e. the Penn tag for adjectives\n pos_tag=wn.ADJ #i.e. \"a\"\n\n lemma = wnl.lemmatize(wordform, pos_tag[0])\n return lemma", "title": "" }, { "docid": "63dabdd2253e53716b97df8a7ecaa960", "score": "0.5996638", "text": "def word_tokenizer(self):\n return", "title": "" }, { "docid": "671fa51e6c71b238090ca54256ebcb68", "score": "0.59899294", "text": "def tokenize_text_en(sample):\n # get the tokens using spaCy\n tokens = nlp_en(sample) # parser(sample)\n\n # lemmatize\n lemmas = []\n for tok in tokens:\n lemmas.append(tok.lemma_.lower().strip() if tok.lemma_ != \"-PRON-\" else tok.lower_)\n tokens = lemmas\n\n # stoplist the tokens\n tokens = [tok for tok in tokens if tok not in STOPLIST]\n\n # stoplist symbols\n tokens = [tok for tok in tokens if tok not in SYMBOLS]\n\n # remove large strings of whitespace\n while \"\" in tokens:\n tokens.remove(\"\")\n while \" \" in tokens:\n tokens.remove(\" \")\n while \"\\n\" in tokens:\n tokens.remove(\"\\n\")\n while \"\\n\\n\" in tokens:\n tokens.remove(\"\\n\\n\")\n while \"nan\" in tokens:\n tokens.remove(\"nan\")\n while \"..\" in tokens:\n tokens.remove(\"..\")\n # print(tokens)\n return tokens", "title": "" }, { "docid": "04e3e7e3b961a33ba962b6be1d580a46", "score": "0.5989709", "text": "def process_words(texts, stop_words=stop_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\n texts = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\n texts = [bigram_mod[doc] for doc in texts]\n texts = [trigram_mod[bigram_mod[doc]] for doc in texts]\n texts_out = []\n nlp = spacy.load('en', disable=['parser', 'ner'])\n for sent in texts:\n doc = nlp(\" \".join(sent)) \n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\n # remove stopwords once more after lemmatization\n texts_out = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts_out] \n return texts_out", "title": "" }, { "docid": "8f5ddcf7aa2c2e1b60b8d96281969442", "score": "0.59877634", "text": "def preprocess_string(text):\n\treturn lemmatize(stopword_removal(tokenize(text)))", "title": "" }, { "docid": "1c89783ac02c49722e4675560bb44afc", "score": "0.5982832", "text": "def extract_tokens(self, text, target):\n stoplist = stopwords.words('english')\n lemmatizer = nltk.WordNetLemmatizer()\n corpus=[]\n for txt, label in zip(text, target):\n tokenized_text = [lemmatizer.lemmatize(word.lower()) for word in word_tokenize(txt[0]) if (not word in stoplist) and (word.isalpha()) and (len(word)>=3)]\n corpus.append((tokenized_text, label))\n return corpus", "title": "" }, { "docid": "1969f34c4a0139078fb6365a4a738191", "score": "0.5979593", "text": "def lemmatize(word):\n forms = [word for pos_form in get_word_forms(word).values() for word in pos_form]\n forms.sort()\n forms.sort(key=len)\n try:\n return forms[0]\n except IndexError:\n raise ValueError(\"{} is not a real word\".format(word))", "title": "" }, { "docid": "23a7c23d62fded042ccfe71ca5b71058", "score": "0.5977578", "text": "def lemmatize_verbs(word_list):\n lemmatizer = WordNetLemmatizer()\n return [lemmatizer.lemmatize(word, pos='v') for word in word_list]", "title": "" }, { "docid": "8f201b9e342f23648e9059def6eeffc4", "score": "0.5977086", "text": "def lemmatizeSentence(inputSentence):\n \n outputSentence = []\n inputSentence = word_tokenize(inputSentence)\n\n for token, tag in pos_tag(inputSentence):\n lemma = lemmatizer.lemmatize(token, tag_map[tag[0]])\n outputSentence.append(lemma)\n\n return \" \".join(outputSentence)", "title": "" }, { "docid": "0dd178023e8c66e9145cba604151d018", "score": "0.5963174", "text": "def tokenize(text):\n\n # we convert the text to lower case\n text = text.lower()\n\n # we remove any url contained in the text\n\n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n url_in_msg = re.findall(url_regex, text)\n for url in url_in_msg:\n text = text.replace(url, \"urlplaceholder\")\n\n # we remove the punctuation\n text = re.sub(r\"[^a-z0-9\\s]\", \" \", text)\n\n # we tokenize the text\n words = word_tokenize(text)\n\n # we lemmatize and remove the stop words\n words = [lemmatizer.lemmatize(word) for word in words if word not in stopwords.words('english')]\n\n return words", "title": "" } ]
1fb062405dd05b13bd0419651eb3cbe4
Function that makes and save the figure with the local power spectrum
[ { "docid": "4a5d367a67fff5a98d7a0e2bc5885895", "score": "0.58250314", "text": "def makeAveragePSLocalFigure(averagePSLocal,figureFileName,gridSize): \r\n pylab.figure()\r\n for i in range(gridSize[0]):\r\n for j in range(gridSize[1]):\r\n pylab.subplot(gridSize[0],gridSize[1],i*gridSize[1]+j+1)\r\n pylab.imshow(np.log(averagePSLocal[i,j]),cmap = \"gray\")\r\n pylab.contour(np.log(averagePSLocal[i,j]))\r\n pylab.axis(\"off\")\r\n pylab.savefig(figureFileName)", "title": "" } ]
[ { "docid": "5c6401c84a8eda78f36dc21df5ceb618", "score": "0.6694815", "text": "def plotter_saver():\n xdat = get_xdat()\n FFCache.xdat = xdat\n\n p0 = [\n 2.14928399e+00, 6.30221940e-01, 2.75166059e+00, -1.96259549e+00,\n 7.03808465e-01, 5.26047000e-03, 1.49757697e-02, 5.81073410e-01,\n 1.59915284e-01, 5.60375907e+04, 9.44655438e-01, 5.89994829e-03\n ]\n # low plx solution\n p0 = [\n 1.96702792e+00, 7.26357060e-01, 2.51915752e+00, -8.37697825e-01,\n 6.87868987e-01, 1.18811095e-02, 1.46097125e-02, 6.77618428e-01,\n 2.33218856e-01, 5.60220478e+04, 1.46077603e-04, 1.94347621e-05\n ]\n if True:\n M = like(p0, xdat, -0.5, getModel=True)\n print(wrap_post(p0))\n tab = atpy.Table()\n tab['times'] = xdat['mjd']\n tab['flux'] = xdat['flux']\n tab['eflux'] = xdat['eflux']\n tab['xplx'] = xdat['plxs'][0]\n tab['yplx'] = xdat['plxs'][1]\n tab['mod'] = M\n tab.write('xx.fits', overwrite=True)\n # write the data", "title": "" }, { "docid": "ba78965c2580d796b0bc69c29a0ee14c", "score": "0.6406317", "text": "def save_spectrum(freqs, waveform_freq_space, output_path, figsize=(10, 4), xlabel='Frequency (Hz)', ylabel='Magnitude', title=None, color='b'):\n\n fig = plot_spectrum(freqs, waveform_freq_space, show=False, figsize=figsize, xlabel=xlabel, ylabel=ylabel, title=title, color=color)\n fig.savefig(output_path, bbox_inches='tight')", "title": "" }, { "docid": "fba762fb3f6b7530cd174a54f969ae30", "score": "0.6302121", "text": "def save_waveform(timesteps, waveform, output_path, figsize=(10, 4), xlabel='Time (s)', ylabel='y', title=None, color='b'):\n\n fig = plot_waveform(timesteps, waveform, show=False, figsize=figsize, xlabel=xlabel, ylabel=ylabel, title=title, color=color)\n fig.savefig(output_path, bbox_inches='tight')", "title": "" }, { "docid": "b128560d986e616a70e3322b82dbc3a7", "score": "0.6191265", "text": "def savePlotHysteresis(self, Amp, delay):\n plt.figure(figsize=(10, 6))\n plt.suptitle('Normvalue: ' + str(Amp * 1000) + \" mV\")\n plt.title(str(self.Power) + ' mW ' + str(int(delay)) + \" ps\")\n plt.plot(self.resultList[:, 0], self.resultList[:, 1] * 1000, 'o-b',\n lw=2, ms=2,\n label='Not pumped')\n plt.plot(self.resultList[:, 0], self.resultList[:, 2] * 1000, 's-r',\n lw=2, ms=2,\n label='Pumped')\n plt.xlabel(\"Current (A)\")\n plt.ylabel(\"Balanced Diode Signal (mV)\")\n plt.grid()\n plt.legend()\n plt.savefig(\n str(SaveImageFile) + 'SampleHysteresisCurrent' + str(int(delay)) +\n '.png', bbox_inches=\"tight\", dpi=300)", "title": "" }, { "docid": "0e59fe9967fbc0e899909d5d47ece0bb", "score": "0.61460745", "text": "def figure_exp_2(fig_type = '.png', save_traces = False, show_traces = False):\n temp, save_folder, specific_folder, temp, file_save = get_figure_data_settings(fig_no = 2)\n folder_save = save_folder + specific_folder + '_results/'\n file2save_npz = 'fig2_' + '.npz'\n file2save_filtnpz = 'fig2_filt_' + '.npz'\n file2save = 'fig2_' + fig_type\n \n filt_for_fig = 'high_pass2.npz'\n dh.filter_data(folder_save, file_save, folder_save, filt_for_fig, 2.0, \n electrodes = [2],N = 100, filter_type = 'high_pass')\n display.plot_data(folder_save,file2save,folder_save,filt_for_fig,x_scale = 'ms', time_range = [4050,4800],electrodes=[2])\n \n if save_traces:\n \"\"\" it must be done only if the recording was gap free but \n it was not saved in the trace format yet\"\"\"\n\n #display.plot_data(folder_save,file2save,folder_save,file_save,x_scale = 'ms')#,time_range=time_range)\n new_file = 'filtered_data_above500.npz'\n #dh.filter_data(folder_save, file_save, folder_save, new_file, 500.0, \n # electrodes = [2],N = 100, filter_type = 'high_pass')\n \n #dh.trigger_on_spike(folder_save, new_file, folder_save, file2save_filtnpz, thresh = -30, el=0,\n # time_range=[-15,50])\n \n #display.plot_data(folder_save,file2save,folder_save,file2save_filtnpz,\n # x_scale = 'ms', y_range = [-20,20],electrodes = [0,2])\n # #time_range = [15,23])\n \n all_times1, all_width1, all_depth1 = dh.calculate_spike_width_in_traces(folder_save,\n file2save_filtnpz, traces = [2, 5, 9, 13, 16, 17, 18, 21], time_range = [16.9,18.7])\n all_times2, all_width2, all_depth2 = dh.calculate_spike_width_in_traces(folder_save,\n file2save_filtnpz, traces = [6, 19, 48, 73, 88, 103, 110, 119, 126], time_range = [19.0,22.0])\n \n print 'half width of spike 1 (calculated on some traces): ' + str(np.mean(all_width1)) + ' ms'\n print 'depth of spike 1 (calculated on some traces): ' + str(np.mean(all_depth1)) + ' ms'\n print 'half width of spike 2 (calculated on some traces): ' + str(np.mean(all_width2)) + ' ms'\n print 'depth of spike 2 (calculated on some traces): ' + str(np.mean(all_depth2)) + ' ms' \n\n file_save = 'IPSP_traces2_' + fig_type\n time_range = [5,40] # in ms\n y_range = [-20, 35]\n data_details = dh.read_npzdata(folder_save, file2save_npz, \"data\", \"scale\", \"fs\")\n display.plot_data_one_mean(data_details, folder_save, file_save, x_scale = 'ms',\n title = 'normal ringer', sweeps = [60, 66, 72, 86, 91], #, 51, 57, 66, 67, 72, 77, 86, 91, 113, 122, 128], #[8,25, 29,32,33],\n electrodes = [0,2], y_range = y_range, time_range = time_range,\n remove_avg = True) \n \n file_save = 'IPSP_spike2_' + fig_type\n display.plot_data_one_mean(data_details, folder_save, file_save, x_scale = 'ms',\n title = 'normal ringer', sweeps = [23, 26, 62, 89, 100], #, 17, 18, 26, 39, 45, 49, 53, 62, 71, 84, 89, 100, 103, 108, 124],#[4,10, 11, 14, 18],\n electrodes = [0,2], y_range = y_range, time_range = time_range,\n remove_avg = True) \n if show_traces:\n plt.show()", "title": "" }, { "docid": "bf2a7cac6aac110470d561a6d3a47727", "score": "0.6104321", "text": "def createSpectrumPlot(self):\n\n fig = plt.figure(figsize=(8,6),dpi=80)\n fig.set_facecolor('#ededed')\n \n # Format plot\n ax = plt.subplot(111)\n\n ax.set_xlabel(\"Frequency [MHz]\")\n ax.set_ylabel(\"Power [-]\")\n ax.set_xlim(1000,1500)\n ax.set_ylim(0,1000)\n \n fig.canvas.draw()\n \n return fig, ax", "title": "" }, { "docid": "bcdc9a79e7aaceaebc54caf226176b8a", "score": "0.6049133", "text": "def saveSpectrum(self):\n filename, __ = _getSaveFileName(\n self,\n u\"Save Spectrum\",\n self._settings.value(\"imagePath\"),\n \"plain text (*.txt)\")\n if not filename:\n return\n if \".\" not in filename:\n filename += \".txt\"\n self.spectrumPlotter.save_spectrum(filename)", "title": "" }, { "docid": "abfe7633c5b625ad0f94a5fc74a1cb54", "score": "0.6027024", "text": "def save_spectrum(fname,tobs,params,pnames,stdp,l,f,res):\n\ttbhdu = fits.BinTableHDU.from_columns([fits.Column(name='wavelength', format='E', array=l), fits.Column(name='flux', format='E', array=f), fits.Column(name='residuals', format='E', array=res)])\n\tprihdr = fits.Header()\n\t\n\tfor i in range(len(pnames)):\n\t\tprihdr[pnames[i]] = params[i] # best fit params\n\t\tprihdr[pnames[i]+'_std'] = stdp[i] # \n\tprihdr['obs_time'] = tobs\n\tprihdu = fits.PrimaryHDU(header=prihdr)\n\tthdulist = fits.HDUList([prihdu, tbhdu])\n\tif os.path.isfile('../DATA/FTS_processed/%s' %fname):\n\t\tos.system('rm ../DATA/FTS_processed/%s' %fname)\n\t\t\n\tthdulist.writeto('../DATA/FTS_processed/%s' %fname)", "title": "" }, { "docid": "71bec9ca523cd03d2799e245463b4878", "score": "0.60255283", "text": "def main05_gain_scheduling_modified_steep_ramp():\n x10, x20 = 0, 0\n eta0 = 0\n xhat10, xhat20 = 0, 0\n x0 = np.array([x10, x20, eta0, xhat10, xhat20])\n r = lambda t : (t<10)*1/10*t + (t>=10)*1\n alpha = lambda t : r(t)\n t_start, t_end = 0, 120\n\n t,x1,x2,eta,xhat1,xhat2,r,y = simulate_modified(r, alpha, x0, t_start, t_end)\n\n fig, ax = fc.new_figure()\n ax.plot(t, r, 'r--', label = '$r$')\n ax.plot(t, y, label = '$y$')\n ax.set_xlabel('$t$')\n ax.grid()\n ax.legend()\n\n Path('./figures').mkdir(parents=True, exist_ok=True)\n plt.savefig('figures/gain_scheduling_modified_steep_ramp.pdf', pad_inches=0.0)", "title": "" }, { "docid": "1bf84ac760d1d73d16f9b1103f5eb871", "score": "0.6023966", "text": "def figure_exp_0(fig_type = '.png',save_traces=False, show_traces = True):\n temp, save_folder, specific_folder, temp, file_save = get_figure_data_settings(fig_no = 0)\n file2save_npz = 'fig0_' + '.npz'\n \n file2save = 'DC_seizures' + fig_type\n file2save_example = 'egDC_seizures' + fig_type\n folder_save = save_folder + specific_folder + '_results/'\n y_range = [-200,200]\n #display.plot_data(folder_save, file2save_example,folder_save,file_save,x_scale = 'sec', \n # y_range=y_range, time_range = [90000,160000], electrodes=[],\n # y_range_intra = [-2,0]) \n\n if save_traces:\n #it must be done only if the recording was gap free but \n #it was not saved in the trace format yet\n\n #display.plot_data(folder_save,file2save,folder_save,file_save,x_scale = 'ms')#,time_range=time_range)\n \n #dh.trigger_on_spike(folder_save, file_save, folder_save, file2save_npz, thresh = -1.1, el=0,\n # time_range=[-25000,60000], up = False, center_on_peak = False)\n \n display.plot_data(folder_save,file2save,folder_save,file2save_npz,x_scale = 'ms', \n electrodes = [0,1,2,3],y_range =y_range)\n #import pdb; pdb.set_trace() \n\n time_range = [] # in ms\n data_details = dh.read_npzdata(folder_save, file2save_npz, \"data\", \"scale\", \"fs\")\n file_save = 'DC_shift_all' + fig_type\n #import pdb; pdb.set_trace()\n display.plot_data_one_mean(data_details, folder_save, file_save, x_scale = 'ms',\n title = 'interictal events', sweeps = [0,1,2],\n electrodes = [0,1,2,3], y_range = y_range, time_range = time_range,\n remove_avg = True) \n if show_traces:\n plt.show()", "title": "" }, { "docid": "001d495683f96441317f801353054b10", "score": "0.60154146", "text": "def figure_exp_26(fig_type = '.eps',save_traces = False, show_traces = False):\n temp, save_folder, specific_folder, temp, file_save = get_figure_data_settings(fig_no = 26)\n #file2save_npz = 'fig25_' + '.npz'\n \n \n folder_save = save_folder + specific_folder + '_results/'\n file2save = 'inh_synch_1' + fig_type\n \n #display.plot_data(folder_save, file2save,folder_save,file_save,x_scale = 'sec', \n # y_range=[], time_range = [], electrodes=[0,1,2,3],\n # y_range_intra = []) \n\n\n y_range = [-120,100]\n #time_range = [] # in ms\n data_details = dh.read_npzdata(folder_save, file_save, \"data\", \"scale\", \"fs\")\n file_save = 'gaba_a_synch_1' + fig_type\n #import pdb; pdb.set_trace()\n display.plot_data_one_mean(data_details, folder_save, file_save, x_scale = 'ms',\n title = 'inhibitory synchrony', sweeps = [27,40,43,21,16], #2,8,16,21,23], #,27,40,43,48],\n electrodes = [0,2,3], y_range = y_range, time_range = [],\n remove_avg = True) \n plt.show()", "title": "" }, { "docid": "05666e9c3b49aa1ae7c7bd651aea1efe", "score": "0.6001972", "text": "def main04_gain_scheduling_modified_ramp():\n x10, x20 = 0, 0\n eta0 = 0\n xhat10, xhat20 = 0, 0\n x0 = np.array([x10, x20, eta0, xhat10, xhat20])\n r = lambda t : (t<100)*1/100*t + (t>=100)*1\n alpha = lambda t : r(t)\n t_start, t_end = 0, 120\n\n t,x1,x2,eta,xhat1,xhat2,r,y = simulate_modified(r, alpha, x0, t_start, t_end)\n\n fig, ax = fc.new_figure()\n ax.plot(t, r, 'r--', label = '$r$')\n ax.plot(t, y, label = '$y$')\n ax.set_xlabel('$t$')\n ax.grid()\n ax.legend()\n\n Path('./figures').mkdir(parents=True, exist_ok=True)\n plt.savefig('figures/gain_scheduling_modified_ramp.pdf', pad_inches=0.0)", "title": "" }, { "docid": "1013ebc2a97b308f1fe1c2c0a077e73f", "score": "0.59988344", "text": "def figure_exp_15_1(fig_type = '.png', show_traces = False):\n temp, save_folder, specific_folder, temp, file_save = get_figure_data_settings(fig_no = 15.1)\n folder_save = save_folder + specific_folder + '_results/' \n \n data_details = dh.read_npzdata(folder_save, file_save, \"data\", \"scale\", \"fs\")\n file_save = '_noEPSP' + fig_type\n \n display.plot_data_one_mean(data_details, folder_save, file_save, x_scale = 'ms',\n title = 'No EPSP induced', sweeps = [1,3,5,7,9,10],\n electrodes = [0,1,2, 3], y_range = [-10, 10], remove_avg = True,\n time_range = [23, 48]) \n if show_traces:\n plt.show() \n \n #folder_save,file_save,folder, file, x_scale = 'sec',\n # title = 'Data', time_range = [], y_range =[-30, 70],\n # electrodes = [],y_range_intra = []\n #display.plot_data(folder_save, file_save + fig_type, folder_save,file_save, x_scale = 'ms',\n # y_range =[-10, 10])\n \n \n \n #import pdb; pdb.set_trace() ", "title": "" }, { "docid": "7d43da31d57742040d50e4608f9c1dcf", "score": "0.5987321", "text": "def figure_exp_1(fig_type = '.png'):\n temp, save_folder, specific_folder, temp, file_save = get_figure_data_settings(fig_no = 1)\n folder_save = save_folder + specific_folder + '_results/'\n \n # plot trace of few waves\n file2save = 'seizure_like' + fig_type\n tit = 'seizure like activity in human subiculum'\n time_range = [140000,180000]# in ms\n y_range = [-100,100]\n display.plot_data(folder_save,file2save,folder_save,file_save,x_scale = 'ms',\n title=tit,time_range=time_range,y_range = y_range)\n \n # plot only one wave\n file2save = 'one_wave' + fig_type\n tit = 'seizure like activity in human subiculum'\n time_range = [149900,151500]# in ms\n y_range = [-100,100]\n display.plot_data(folder_save,file2save,folder_save,file_save,x_scale = 'ms',\n title=tit,time_range=time_range,y_range = y_range)", "title": "" }, { "docid": "b8bda6d882aa8b2cf053676d6beb0b29", "score": "0.5970684", "text": "def figure_exp_27(fig_type = '.png', save_traces = False, show_traces = False):\n temp, save_folder, specific_folder, temp, file_save = get_figure_data_settings(fig_no = 27)\n file2save_npz = 'fig27_' + '.npz'\n \n folder_save = save_folder + specific_folder + '_results/'\n file2save = 'gaba_a_synch' + fig_type\n #y_range = [-500,700] #[-15,15]\n #time_range =[3480,3510] \n #display.plot_data(folder_save, file2save,folder_save,file_save,x_scale = 'sec', \n # y_range=[], time_range = [], electrodes=[0,1,2,3],\n # y_range_intra = [-50,0]) \n #import pdb; pdb.set_trace()\n\n if save_traces:\n #it must be done only if the recording was gap free but \n #it was not saved in the trace format yet\n\n #display.plot_data(folder_save,file2save,folder_save,file_save,x_scale = 'ms')#,time_range=time_range)\n dh.trigger_on_spike(folder_save, file_save, folder_save, file2save_npz, thresh = -25, el=3,\n time_range=[-200,1000], up = False, center_on_peak = False)\n display.plot_data(folder_save,file2save,folder_save,file2save_npz,x_scale = 'ms', \n electrodes = [0,2,3],y_range =[])\n\n #time_range = [] # in ms\n data_details = dh.read_npzdata(folder_save, file2save_npz, \"data\", \"scale\", \"fs\")\n file_save = 'gabaA_synch_fig27_group1' + fig_type\n #import pdb; pdb.set_trace()\n display.plot_data_one_mean(data_details, folder_save, file_save, x_scale = 'ms',\n title = 'GABAa synch, cell2', sweeps = [0,4,10,13],\n electrodes = [0,2,3], y_range = [], time_range = [],\n remove_avg = True) \n file_save = 'gabaA_synch_fig27_group2' + fig_type \n display.plot_data_one_mean(data_details, folder_save, file_save, x_scale = 'ms',\n title = 'GABAa synch, cell2', sweeps = [2,8,15,18,21],\n electrodes = [0,2,3], y_range = [], time_range = [],\n \n remove_avg = True)", "title": "" }, { "docid": "53d21d1b575e1a14797c46f00c88259b", "score": "0.59470975", "text": "def plot_stave(self):\n plt.figure(figsize=(self.modules*(ALPIDE_COLS//ALPIDE_ROWS)*STAVE_SIZE_MODIFIER,QUAD_STAVES*STAVE_SIZE_MODIFIER))\n stave=self.remap_to_stave()\n plt.imshow(stave, interpolation='bilinear')\n plt.clim(0, 10*(self.max_charge+1))\n plt.xticks(list(range(0,ALPIDE_COLS*CHIP_PER_LANE*self.modules,ALPIDE_COLS*CHIP_PER_LANE))+[ALPIDE_COLS*self.modules*CHIP_PER_LANE-1])\n plt.xlabel('Pixel')\n plt.yticks(list(range(0,ALPIDE_ROWS*QUAD_STAVES +1,ALPIDE_ROWS)))\n plt.ylabel('Pixel')\n cbar = plt.colorbar()\n cbar.set_label(\"Threshold (electrons)\")\n filename = f\"{self.basename}stave{self.feeid}.{self._plot_extension}\"\n print(f\"Image stored in {filename}\")\n plt.savefig(filename, bbox_inches='tight', dpi=1200)\n plt.close()", "title": "" }, { "docid": "f4465088112562ca02dd580ef25efeb3", "score": "0.59387684", "text": "def dataPlot_savebutton_cmd(self):\n plotname = self.dir+str(\"%.5d\" % self.currentobj)+'_MiG1D_specplot.pdf'\n self.dataPlot_fig.savefig(plotname)\n print(' - Saved plot window to \\n '+plotname)", "title": "" }, { "docid": "442f6a85fb8e757652a9af84b928412e", "score": "0.5914415", "text": "def spectrum(spectrum_data, save=None, show=True, xlim=None, ylim=(0, 100)):\n if save is not None or show:\n (x, to_plot) = (spectrum_data[0], spectrum_data[1:])\n pyplot.clf()\n pyplot.gca().set_autoscale_on(False)\n pyplot.xlim(x[1], x[-1])\n if xlim is not None: pyplot.xlim(*xlim)\n pyplot.ylim(ylim[0], ylim[1])\n pyplot.xscale('log')\n pyplot.xlabel('frequency')\n pyplot.ylabel('power (dB)')\n for y in to_plot: pyplot.plot(x, y)\n if save is not None: pyplot.savefig(save + \"spectrum.png\")\n if show: pyplot.show()", "title": "" }, { "docid": "bbefaadf53e7029959e0bb015441b03d", "score": "0.58985144", "text": "def compute(A, b, w, T, resolution=500):\n t = linspace(0, T, resolution+1)\n u = damped_vibrations(t, A, b, w)\n plt.figure() # needed to avoid adding curves in plot\n plt.plot(t, u)\n plt.title('A=%g, b=%g, w=%g' % (A, b, w))\n '''\n if not os.path.isdir('static'):\n os.mkdir('static')\n else:\n # Remove old plot files\n for filename in glob.glob(os.path.join('static', '*.png')):\n os.remove(filename)\n # Use time since Jan 1, 1970 in filename in order make\n # a unique filename that the browser has not chached\n plotfile = os.path.join('static', str(time.time()) + '.png')\n plt.savefig(plotfile)\n return plotfile\n '''\n\n # Make Matplotlib write to BytesIO file object and grab\n # return the object's string\n from io import BytesIO\n figfile = BytesIO()\n plt.savefig(figfile, format='png')\n figfile.seek(0) # rewind to beginning of file\n import base64\n figdata_png = base64.b64encode(figfile.getvalue())\n print figdata_png\n return figdata_png", "title": "" }, { "docid": "8d10da21a2c7214236028be1452ee9bc", "score": "0.58963424", "text": "def mkExtinctionDemoFigSmall( z=2.0 ):\n import numpy as np\n import sncosmo\n # from sncosmost import hstbandpasses, ccsnmodels\n from matplotlib import rc\n rc('text',usetex=True)\n rc('text.latex', preamble='\\usepackage[usenames]{xcolor}')\n from matplotlib import pyplot as pl\n from matplotlib import ticker\n from pytools import plotsetup\n from scipy import interpolate as scint\n\n\n fig = plotsetup.fullpaperfig( 1, [8,3] )\n\n\n # load the O'Donnell 1994 dust model\n dust = sncosmo.OD94Dust()\n snIa = sncosmo.Model( source='hsiao', effects=[dust],\n effect_names=['host'], effect_frames=['rest'])\n\n ax1 = pl.gca()\n\n\n f127m = sncosmo.get_bandpass( 'f127m' )\n f139m = sncosmo.get_bandpass( 'f139m' )\n f153m = sncosmo.get_bandpass( 'f153m' )\n\n f125w = sncosmo.get_bandpass( 'f125w' )\n f140w = sncosmo.get_bandpass( 'f140w' )\n f160w = sncosmo.get_bandpass( 'f160w' )\n\n wf127m = f127m.wave / 10000.\n wf139m = f139m.wave / 10000.\n wf153m = f153m.wave / 10000.\n\n wf125w = f125w.wave / 10000.\n wf140w = f140w.wave / 10000.\n wf160w = f160w.wave / 10000.\n\n # ax2 = ax1.twinx()\n ax2 = ax1\n ax2.plot( wf127m, f127m.trans, color='darkmagenta', ls='-', lw=2)\n ax2.plot( wf153m, f153m.trans, color='darkorange', ls='-', lw=2)\n\n ax2.plot( wf125w, f125w.trans, color='darkmagenta', ls='--', lw=2)\n ax2.plot( wf160w, f160w.trans, color='darkorange', ls='--', lw=2)\n\n intf127m = scint.interp1d( wf127m, f127m.trans, bounds_error=False, fill_value=0 )\n intf153m = scint.interp1d( wf153m, f153m.trans, bounds_error=False, fill_value=0 )\n\n colorlist1, colorlist2 = [], []\n for Av,ls, alpha in zip([2,1,0],[':','--','-'],[0.1,0.3,0.5]):\n snIa.set( z=z, t0=0, hostr_v=3.1, hostebv=Av/3.1 )\n colorlist1.append( snIa.bandmag( 'f127m','ab', 0) - snIa.bandmag( 'f125w','ab', 0) )\n colorlist2.append( snIa.bandmag( 'f153m','ab', 0) - snIa.bandmag( 'f160w','ab', 0) )\n\n snwave = np.arange( 6000., 20000., 10. )\n snflux = snIa.flux( 0, snwave )\n snwave = snwave / 10000.\n snflux = 0.12 * snflux / snflux[400]\n ax1.plot( snwave, snflux, color='k', ls=ls, lw=1, label='%.1f'%Av )\n overlap127 = np.min( [snflux, intf127m(snwave)], axis=0 )\n ax2.fill_between( snwave, np.zeros(len(snwave)), overlap127, color='darkmagenta', alpha=alpha )\n overlap153 = np.min( [snflux, intf153m(snwave)], axis=0 )\n pl.fill_between( snwave, np.zeros(len(snwave)), overlap153, color='darkorange', alpha=alpha )\n\n\n ax1.legend(loc='upper left', bbox_to_anchor=(0.0,0.9),frameon=False,fontsize=11 )\n ax1.text( 0.08, 0.88, 'A$_V$', transform=ax1.transAxes, ha='left',va='bottom',fontsize=11 )\n ax1.text( 0.13, 0.88, '$\\Delta$m$_{127}$', color='darkmagenta',transform=ax1.transAxes, ha='left',va='bottom',fontsize=11 )\n ax1.text( 0.23, 0.88, '$\\Delta$m$_{153}$', color='darkorange',transform=ax1.transAxes, ha='left',va='bottom',fontsize=11 )\n\n ax1.text( 0.14, 0.78, '%.3f'%colorlist1[0], color='darkmagenta',transform=ax1.transAxes, ha='left',va='bottom',fontsize=11 )\n ax1.text( 0.23, 0.78, '%.3f'%colorlist2[0], color='darkorange',transform=ax1.transAxes, ha='left',va='bottom',fontsize=11 )\n\n ax1.text( 0.14, 0.68, '%.3f'%colorlist1[1], color='darkmagenta',transform=ax1.transAxes, ha='left',va='bottom',fontsize=11 )\n ax1.text( 0.23, 0.68, '%.3f'%colorlist2[1], color='darkorange',transform=ax1.transAxes, ha='left',va='bottom',fontsize=11 )\n\n ax1.text( 0.14, 0.58, '%.3f'%colorlist1[2], color='darkmagenta',transform=ax1.transAxes, ha='left',va='bottom',fontsize=11 )\n ax1.text( 0.23, 0.58, '%.3f'%colorlist2[2], color='darkorange',transform=ax1.transAxes, ha='left',va='bottom',fontsize=11 )\n\n # title=+\n # '\\\\textcolor{DarlMagenta}{W}' +\n # '\\\\textcolor{F153M-F160W')#, handlelength=0.5, numpoints=3)\n # ax1.text( 0.15,0.95,,ha='left',va='bottom')\n\n ax1.yaxis.set_major_locator( ticker.MultipleLocator( 0.1 ) )\n ax1.yaxis.set_minor_locator( ticker.MultipleLocator( 0.05 ) )\n ax1.xaxis.set_major_locator( ticker.MultipleLocator( 0.2 ) )\n ax1.xaxis.set_minor_locator( ticker.MultipleLocator( 0.1 ) )\n ax1.set_xlabel('wavelength ($\\mu$m)')\n ax1.set_ylabel('SN Flux or Filter Transmission\\n (arbitrary units)')\n\n ax1.set_xlim(0.6,2.0)\n ax1.set_ylim( 0.0, 0.7 )\n ax1.set_yticklabels([])\n\n ax1.text(1.27,0.6,'F127M,F125W',color='darkmagenta',fontsize=9, ha='center',va='center')\n ax1.text(1.53,0.6,'F153M,F160W',color='darkorange',fontsize=9, ha='center',va='center')\n\n fig.subplots_adjust( left=0.12, right=0.95, bottom=0.18, top=0.92 )", "title": "" }, { "docid": "ae5d60f574e3a55a887e434c7f27ecd2", "score": "0.58927673", "text": "def save_show(full_name, save, show, ext):\n plt.tight_layout()\n if save:\n plt.savefig(full_name+ext, bbox_inches='tight', transparent=True,\n pad_inches=0.1)\n if show:\n plt.show()\n else:\n plt.close()", "title": "" }, { "docid": "c010dbeda7604c2137b02fc1663b63f0", "score": "0.5888778", "text": "def glt_figure():\n r = r_input\n # first Gamma function\n gamma = 0.4\n s = transform(gamma)\n label = '$\\\\gamma$ = ' + str(gamma)\n data_plot(r, s, 'red', label)\n # second Gamma function\n gamma = 2.5\n s = transform(gamma)\n label = '$\\\\gamma$ = ' + str(gamma)\n data_plot(r, s, 'blue', label)\n matplotlib.pyplot.savefig(output + 'Power_Law_GLTs.jpg')\n matplotlib.pyplot.close()\n return", "title": "" }, { "docid": "514bdf8ac99f5611882a7b4d97bcc34e", "score": "0.588317", "text": "def save_plot(self, filename, a, title=\"\", xlabel=\"\", ylabel=\"\", color=plt.cm.hot, vmin=None, vmax=None):\n interactive = plt.isinteractive()\n fig = self.make_plot(a, title, xlabel, ylabel, color, vmin, vmax)\n plt.savefig(filename)\n if interactive:\n plt.ion()\n return fig\n else:\n plt.close()", "title": "" }, { "docid": "5cb8cae77d42a6683920367dc3ac8407", "score": "0.5866776", "text": "def dataPlot_plot(self,verbose=False,refresh=False,newobj=False,fullzoom=False):\n self.dataPlot_fig.canvas.set_window_title('1D spectrum of object '+str(self.currentobj))\n xlow, xhigh, ylow, yhigh = self.dataPlot_getwindowinfo()\n if fullzoom:\n xlow, xhigh, ylow, yhigh = self.DPxlow_full, self.DPxhigh_full, self.DPylow_full, self.DPyhigh_full\n #----------------- Define emission line list -----------------\n llist = MiGs.linelistdic(listversion='full')\n linelist = np.asarray([llist[key][1] for key in llist.keys()])\n linename = [llist[key][0] for key in llist.keys()]\n #----------------- Refreshing plot window-----------------\n if refresh:\n self.dataPlot_fig.clf() # clearing figure\n self.dataPlot_ax = self.dataPlot_fig.add_subplot(111)\n #----------------- Grab info from sliders -----------------\n smoothlevel = float(self.varslidersmooth.get())\n if verbose: print(' - Grabbed the Gauss smooth level ',smoothlevel,' from the slider')\n redshift = float(self.varsliderz.get())\n if verbose: print(' - Grabbed the redshift '+str(\"%.3f\" % redshift)+' from the slider')\n\n try:\n zbyhand = float(self.byhandz.get())\n if type(zbyhand) == float:\n redshift = zbyhand\n if verbose: print(' But the redshift',zbyhand,'was found in \"by-hand\" field so using that instead ')\n self.varsliderz.set(zbyhand)\n except:\n pass\n\n #----------------- Flambda spec -----------------\n xrangeflam = self.DPxrange\n contamplotted = False\n ymax = []\n ymin = []\n\n for ii in range(len(self.fits1Dfound)):\n color = self.DPcolor[ii]\n wave1D = self.DP_wave_all[ii]/self.DPxscale # wavelengths converted from A to micron\n flux1D = self.DP_flux_all[ii]\n flux1Derr = self.DP_fluxerr_all[ii]\n contam = self.DP_contam_all[ii]\n\n if (len(flux1D) >= 1):\n ymin.append(np.min(flux1D))\n ymax.append(np.max(flux1D))\n labstr = self.fits1Dfound[ii].split('/')[-1]\n if self.latex:\n labstr = labstr.replace('_','\\_')\n # - - - - - - - - - - Spectrum itself - - - - - - - - - -\n self.dataPlot_ax.plot(wave1D, flux1D, color=color,linestyle='-',\n linewidth=self.DPlwidth*1.5, alpha=0.40)\n\n # - - - - - - - - - - Smoothed spectrum - - - - - - - - - -\n filtersigma = smoothlevel\n flux1D_smooth = scipy.ndimage.filters.gaussian_filter1d(flux1D, filtersigma,cval=0.0)\n self.dataPlot_ax.plot(wave1D, flux1D_smooth, color=color,linestyle='-',\n linewidth=self.DPlwidth*1.5, alpha=1.0,label=labstr)\n\n # - - - - - Shaded error region around curve if requested - - - - -\n if (self.err1Dboxvar.get() != '0'):\n xwinmin, xwinmax, ywinmin, ywinmax = self.dataPlot_getwindowinfo()\n serr = flux1Derr\n serr[flux1Derr > 1e3] = 1e3 # fix errors to prevent \"OverflowError: Allocated too many blocks\"\n filllow = np.clip(flux1D,ywinmin,ywinmax)-serr\n fillhigh = np.clip(flux1D,ywinmin,ywinmax)+serr\n plt.fill_between(wave1D,filllow,fillhigh,alpha=0.20,color=color)\n\n # - - - - - - - - - - Contam curve is present - - - - - - - - - -\n if (contam != -99).any():\n self.dataPlot_ax.plot(wave1D, contam, color=color,linestyle='--',\n linewidth=self.DPlwidth, alpha=0.40)\n contam_smooth = scipy.ndimage.filters.gaussian_filter1d(contam, filtersigma,cval=0.0)\n self.dataPlot_ax.plot(wave1D, contam_smooth, color=color,linestyle='--',\n linewidth=self.DPlwidth, alpha=1.0)\n contamplotted = True\n\n # - - - - - - - - - - Sky spectrum - - - - - - - - - -\n if (self.skyboxvar.get() != '0'):\n objinfo = MiGs.get_objinfo(self.infofile,self.currentobj,self.col_infoid)\n if (np.max(wave1D) < 1.0) & (len(objinfo) == 1):\n fieldno = objinfo['FIELD_ID']\n skyMUSEfilename = glob.glob(self.MUSEskydatdir+'SKY*cdfs*-'+str(\"%.2d\" % fieldno)+'*av.fits')\n skyMUSE = afits.open(skyMUSEfilename[0])[1].data\n skywave = skyMUSE['lambda']/self.DPxscale\n skyent = np.where((skywave > np.min(wave1D)) & (skywave < np.max(wave1D)))[0]\n skywave = skywave[skyent]\n skylow = np.zeros(len(skywave))\n skyflux = skyMUSE['data'][skyent]\n skyhigh = skyflux/1.0\n elif self.skyspectrum:\n skywave = self.skydat['lam']\n skyent = np.where((skywave > np.min(wave1D)) & (skywave < np.max(wave1D)))[0]\n skywave = skywave[skyent]\n skylow = np.zeros(len(skywave))\n skyflux = self.skydat['flux'][skyent]\n skymax = np.sort(flux1D)[np.round(len(flux1D)*0.95)]\n skyhigh = skyflux/np.max(skyflux)*skymax\n else:\n skywave = wave1D\n skylow = np.zeros(len(skywave))-100.\n skyhigh = np.zeros(len(skywave))+100.\n\n plt.fill_between(skywave,skylow,skyhigh,alpha=0.3,color='black')\n skyhigh_smooth = scipy.ndimage.filters.gaussian_filter1d(skyhigh, filtersigma,cval=0.0)\n plt.fill_between(skywave,skylow,skyhigh_smooth,alpha=0.8,color='black')\n\n # set ranges based on spectra\n if (len(ymin) != 0) & (len(ymax) != 0):\n yrangeflam = [0.95*min(ymin), 1.05*max(ymax)]\n # if yrangeflam[0] < -0.01: yrangeflam[0] = -0.01\n # if yrangeflam[1] > 10.0: yrangeflam[1] = 10.0\n else:\n yrangeflam = 0.0, 1000.0\n\n if not newobj: # only check window if not plotting new object\n if (ylow != yrangeflam[0]) or (yhigh != yrangeflam[1]):\n yrangeflam = [ylow,yhigh]\n Dyrange = yrangeflam[1]-yrangeflam[0]\n self.dataPlot_ax.set_ylim(yrangeflam)\n\n if not newobj: # only check window if not plotting new object\n if (xlow != xrangeflam[0]) or (xhigh != xrangeflam[1]):\n xrangeflam = [xlow,xhigh]\n self.dataPlot_ax.set_xlim(xrangeflam)\n\n if self.latex:\n xlab = '$\\lambda / [\\mu\\mathrm{m}]$'\n ylab = '$f_\\\\lambda$ / ['+self.fluxunit+']'\n else:\n xlab = 'lambda / [micron]'\n ylab = 'f_lambda / ['+self.fluxunit+']'\n\n self.dataPlot_ax.set_xlabel(xlab)\n self.dataPlot_ax.set_ylabel(ylab)\n\n self.dataPlotManager.canvas.draw()\n\n # === plot emission lines for scale ===\n for ii in range(len(linelist)):\n lineposition = linelist[ii]/self.DPxscale*(redshift+1.0)\n self.dataPlot_ax.plot(np.zeros(2)+lineposition,yrangeflam,color='#006600',alpha=0.7,\n linestyle='-',linewidth=self.DPlwidth*2)\n\n if self.lineuncertainty:\n if (self.lineuncertainty <= 1.0) & (self.lineuncertainty > 0.0): # treat input as Delta z uncertainty\n zoffset = self.lineuncertainty\n elif self.lineuncertainty > 1.0: # treat input as Delta v uncertainty\n zoffset = self.lineuncertainty*(redshift+1.0) / 299792.458\n else:\n if self.vb: print(' WARNING: Invalid value of \"lineuncertainty\" using dz=0.1')\n zoffset = 0.1\n\n linexmin = ( (redshift-zoffset) +1) * linelist[ii]/self.DPxscale\n linexmax = ( (redshift+zoffset) +1) * linelist[ii]/self.DPxscale\n lineymin = yrangeflam[0]\n lineymax = yrangeflam[1]\n\n plt.fill_between(np.asarray([linexmin,linexmax]),np.zeros(2)+lineymin,np.zeros(2)+lineymax,\n alpha=0.2,color='#006600')\n\n voffset = zoffset * 299792.458 / (redshift+1.0)\n\n textpos = linelist[ii]/self.DPxscale*(redshift+1.0)\n if (textpos > xrangeflam[0]) & (textpos < xrangeflam[1]):\n self.dataPlot_ax.text(textpos,yrangeflam[0]+Dyrange*0.05,\n linename[ii],color='#006600',size=self.DPFsize-3.,rotation='vertical',\n horizontalalignment='right',verticalalignment='bottom')\n\n # === position legend ===\n box = self.dataPlot_ax.get_position()\n self.dataPlot_ax.set_position([box.x0, box.y0, box.width, box.height * 0.83])\n\n\n\n if (self.skyboxvar.get() != '0'):\n self.dataPlot_ax.plot(0,0,'black',alpha=0.8,label='Sky spectrum',linewidth=self.DPlwidth*2)\n if contamplotted:\n self.dataPlot_ax.plot(0,0,'black',alpha=0.8,label='Contamination',linewidth=self.DPlwidth,ls='--')\n self.dataPlot_ax.plot(0,0,'green',label='Lines at $z$ = '+str(\"%.3f\" % redshift),linewidth=self.DPlwidth*2)\n if self.lineuncertainty:\n linelab = 'Line uncertainty $z$ +/- '+str(\"%.4f\" % zoffset)+' (+/- '+str(\"%.f\" % voffset)+' km/s)'\n self.dataPlot_ax.plot(0,0,'#006600',alpha=0.4,label=linelab,linewidth=self.DPlwidth*5)\n\n leg = self.dataPlot_ax.legend(fancybox=True, loc='upper center',numpoints=1,prop={'size':self.DPFsize-3.},\n ncol=2,bbox_to_anchor=(0.5, 1.27))\n #leg.get_frame().set_alpha(0.7)\n\n self.dataPlotManager.canvas.draw()", "title": "" }, { "docid": "90d8c06e4b52fcc68f06707d44542f12", "score": "0.5853634", "text": "def plot_device_power_energy(sim_result, optimisation_model: pyo.Model, dev, filename=None, energy_fill_opacity=None):\n res = sim_result\n optimiser = optimisation_model\n dev_data = optimiser.all_devices[dev].dev_data\n device_name = \"{}:{}\".format(dev, dev_data.name)\n\n if dev_data.model == \"storagehydrogen\": # Fixme: replace with isinstance class type\n # isinstance(dev_data.model,oogeso.dto.DeviceStorageHydrogenData)\n carrier = \"hydrogen\"\n flow_title = \"Flow (Sm3/s)\"\n energy_storage_title = \"Energy storage( Sm3)\"\n else:\n carrier = \"el\"\n flow_title = \"Power (MW)\"\n energy_storage_title = \"Energy storage (MWh)\"\n # Power flow in/out\n df_flow = res.device_flow[dev, carrier].unstack(\"terminal\")\n if res.device_storage_energy is None:\n df_storage_energy = pd.DataFrame()\n else:\n df_storage_energy = res.device_storage_energy.unstack(\"device\")\n if dev in df_storage_energy:\n df_storage_energy = df_storage_energy[dev]\n df_storage_energy.index = df_storage_energy.index + 1\n\n if plotter == \"plotly\":\n fig = plotly.subplots.make_subplots(specs=[[{\"secondary_y\": True}]])\n for col in df_flow.columns:\n fig.add_scatter(\n x=df_flow.index,\n y=df_flow[col],\n line_shape=\"hv\",\n name=col,\n secondary_y=True,\n fill=\"tozeroy\",\n )\n if not df_storage_energy.empty:\n fig.add_scatter(\n x=df_storage_energy.index,\n y=df_storage_energy,\n name=\"storage\",\n secondary_y=False,\n fill=\"tozeroy\",\n ) # ,line=dict(dash='dot'))\n if energy_fill_opacity is not None:\n k = len(fig[\"data\"]) - 1\n linecol = plotly.colors.DEFAULT_PLOTLY_COLORS[k]\n opacity = energy_fill_opacity\n fillcol = \"rgba({}, {})\".format(linecol[4:][:-1], opacity)\n fig[\"data\"][k][\"fillcolor\"] = fillcol\n fig[\"data\"][k][\"fill\"] = \"tozeroy\"\n fig.update_yaxes(title_text=energy_storage_title, secondary_y=False, side=\"right\")\n fig.update_xaxes(title_text=\"Timestep\")\n fig.update_yaxes(title_text=flow_title, secondary_y=True, side=\"left\")\n\n elif plotter == \"matplotlib\":\n fig = plt.figure(figsize=(12, 4))\n plt.title(device_name)\n ax = plt.gca()\n df_flow.plot(ax=ax, drawstyle=\"steps-post\", marker=\".\")\n ax.set_xlabel(\"Timestep\")\n ax.set_ylabel(flow_title)\n tmin = df_flow.index.get_level_values(\"time\").min()\n tmax = df_flow.index.get_level_values(\"time\").max() + 1\n ax.set_ylim(0, dev_data.flow_max)\n ax.legend(loc=\"upper left\") # , bbox_to_anchor =(1.01,0),frameon=False)\n\n if not df_storage_energy.empty:\n ax2 = ax.twinx()\n ax2.grid(None)\n df_storage_energy.plot(ax=ax2, linestyle=\":\", color=\"black\")\n ax2.set_ylabel(\"Energy (MWh)\") # ,color=\"red\")\n if dev_data.model in [\"storage_el\"]:\n ax2.set_ylim(0, dev_data.max_E)\n elif dev_data.model in [\"well_injection\"]:\n ax2.set_ylim(-dev_data.max_E / 2, dev_data.max_E / 2)\n # ax2.tick_params(axis='y', labelcolor=\"red\")\n ax2.legend(loc=\"upper right\")\n ax.set_xlim(tmin, tmax)\n if filename is not None:\n plt.savefig(filename, bbox_inches=\"tight\")\n else:\n raise ValueError(f\"Plotter: {plotter} has not been implemented for plot device power energy.\")\n return fig", "title": "" }, { "docid": "2060cd57d78865d9d9ce45c8021331d8", "score": "0.5845508", "text": "def snapshot_figure(self,**kwargs):\n fig=plt.figure(1)\n fig.clf()\n ax=fig.add_subplot(1,1,1)\n\n self.plot_cell_scalar(ax, self.zi_agg['min'],'b-')\n self.plot_cell_scalar(ax, self.ei, 'g-')\n ax.plot(self.grd.cells_center()[:,0],\n np.maximum( self.eta_fn(self.grd.cells_center(),self.t),\n self.zi_agg['min']),\n color='orange')\n return fig", "title": "" }, { "docid": "45d6878eb5626ac55f0ec53e37eb0ff6", "score": "0.5827475", "text": "def save_and_show(\n fig, ax, save, show, close, filename, file_format, dpi, axis_off, extent\n):\n # save the figure if specified\n\n if save:\n start_time = time.time()\n\n # create the save folder if it doesn't already exist\n if not os.path.exists(settings.imgs_folder):\n os.makedirs(settings.imgs_folder)\n path_filename = os.path.join(\n settings.imgs_folder, os.extsep.join([filename, file_format])\n )\n\n if not isinstance(ax, (np.ndarray, list)):\n ax = [ax]\n if file_format == \"svg\":\n for ax in ax:\n # if the file_format is svg, prep the fig/ax a bit for saving\n ax.axis(\"off\")\n ax.set_position([0, 0, 1, 1])\n ax.patch.set_alpha(0.0)\n fig.patch.set_alpha(0.0)\n fig.savefig(\n path_filename,\n bbox_inches=0,\n format=file_format,\n facecolor=fig.get_facecolor(),\n transparent=True,\n )\n else:\n if extent is None:\n if len(ax) == 1:\n if axis_off:\n for ax in ax:\n # if axis is turned off, constrain the saved\n # figure's extent to the interior of the axis\n extent = ax.get_window_extent().transformed(\n fig.dpi_scale_trans.inverted()\n )\n else:\n extent = \"tight\"\n fig.savefig(\n path_filename,\n dpi=dpi,\n bbox_inches=extent,\n format=file_format,\n facecolor=fig.get_facecolor(),\n transparent=True,\n )\n log(\n \"Saved the figure to disk in {:,.2f} seconds\".format(\n time.time() - start_time\n )\n )\n\n # show the figure if specified\n if show:\n start_time = time.time()\n plt.show()\n # fig.show()\n log(\"Showed the plot in {:,.2f} seconds\".format(time.time() - start_time))\n # if show=False, close the figure if close=True to prevent display\n elif close:\n plt.close()\n\n return fig, ax", "title": "" }, { "docid": "ec78324954dfdb075c20d1a2561756bf", "score": "0.5824394", "text": "def _visualize_data(self, save_figure, show_figure):\n plt.figure()\n for i in range(1, len(self.output_ports) + 1):\n if i < len(self.OSCOPE_CHANNEL_COLORS) + 1:\n color = self.OSCOPE_CHANNEL_COLORS[i - 1]\n else:\n color = _generate_random_color_hexcode()\n plt.plot(self.result_data[0, :], self.result_data[i, :], color)\n\n legends = []\n for i in range(len(self.output_ports)):\n legends.append(self.output_ports[i])\n\n plt.legend(legends)\n plt.xlabel('Wavelength (nm)')\n plt.ylabel('Voltage (au)')\n plt.grid(True)\n plt.tight_layout()\n\n if save_figure:\n plt.savefig(self.path + \"graph\")\n if show_figure:\n plt.show()", "title": "" }, { "docid": "d616eec5545b9a7763b70eb0c448eb50", "score": "0.5822541", "text": "def save_plot(self, filename) -> None: \n #Save the results as an image\n filename = str(self.dimension) + 'd_model_results_'+str(self.layermax)+'_layers'\n self.plot().savefig(filename+'.png', dpi=300)\n pass", "title": "" }, { "docid": "3cba2a8dbdafe34fedc4e2ed915928c3", "score": "0.58088595", "text": "def plotPowerSpectra(self, iids, maxl=500., removeDipole=True,\n title=None, legendloc='upper left', color=None, labels=None,\n savefig=False, outfileRoot=None, plotkwargs=None):\n iids = self._checkPlottable(iids)\n # Check if the slicer has a power spectrum visualization.\n for iid in iids:\n slicer = self.slicers[iid]\n if (not hasattr(slicer, 'plotPowerSpectrum')):\n iids.remove(iid)\n if len(iids) == 0:\n warnings.warn('Removed all iids')\n return\n # Build a plot title.\n if title is None:\n title = self._buildPlotTitle(iids)\n if labels is None:\n labels = self._buildLegendLabels(iids)\n # Plot the data.\n fignum = None\n addLegend = False\n for i, iid in enumerate(iids):\n # If we're at the end of the list, add the legend.\n if i == len(iids) - 1:\n addLegend = True\n label = labels[i]\n # Set up plotDict.\n plotDict = {'title':title, 'label':label, 'addLegend':addLegend,\n 'legendloc':legendloc, 'color':color, 'maxl':maxl,\n 'removeDipole':removeDipole}\n if plotkwargs is not None:\n plotDict.update(plotkwargs[i])\n # Plot data.\n fignum = self.slicers[iid].plotPowerSpectrum(self.metricValues[iid],\\\n fignum=fignum, **plotDict)\n if savefig:\n if outfileRoot is not None:\n outroot = outfileRoot + title\n else:\n outroot = title\n outfile = self._buildOutfileName(self.iid_next, outfileRoot=outroot + title, plotType='ps')\n plt.savefig(os.path.join(self.outDir, outfile), figformat=self.figformat, dpi=self.dpi)\n if self.thumbnail:\n thumbname = self._getThumbName(outfile)\n thumbfile = os.path.join(self.outDir, thumbname)\n plt.savefig(thumbfile, dpi=72)\n if self.resultsDb:\n # Don't have a metricID corresonding to this combo of metrics, add to metric table.\n metricNames = self.joinMetricNames(iids)\n slicerNames = ' '.join(list(self.uniqueSlicerNames(iids)))\n simDataNames = ' '.join(list(self.uniqueSimDataNames(iids)))\n metadata = self.combineMetadata(iids)\n metricId = self.resultsDb.updateMetric(metricNames, slicerNames, simDataNames, None, metadata,\n None)\n displayDict = {}\n displayDict.update(self.displayDicts[iids[-1]])\n displayDict['order'] += 1\n displayDict['caption'] = self.captionFigure(iids, 'Combined power spectrum')\n if displayDict['subgroup'] is None:\n displayDict['subgroup'] = 'Combo PS'\n self.resultsDb.updateDisplay(metricId, displayDict)\n self.resultsDb.updatePlot(metricId, 'ComboPowerSpectrum', outfile)\n else:\n outfile = None\n return fignum, title, outfile", "title": "" }, { "docid": "3f3ea28abe867d72a4d959ced7040043", "score": "0.57929623", "text": "def onSave(self,event):\n x,y,e = self.calcSpec()\n dialog=wx.FileDialog(self,\"Name the spectm file\",\n wildcard=\"ASCII (dat)|*.dat\",\n style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)\n if dialog.ShowModal()==wx.ID_OK:\n with open(dialog.GetPath(),\"w\") as stream:\n for i in range(len(x)):\n stream.write(\"%f\\t%f\\t%f\\n\"%(x[i],y[i],e[i]))\n self.Show(False)", "title": "" }, { "docid": "f9f4fac216d2939119cbcd3f7359513c", "score": "0.5758722", "text": "def figure_exp_21(fig_type = '.png', save_traces = False, show_traces = False):\n temp, save_folder, specific_folder, temp, file_save = get_figure_data_settings(fig_no = 21)\n file2save_npz = 'fig21_' + '.npz'\n \n file2save = 'PTX_block' + fig_type\n folder_save = save_folder + specific_folder + '_results/'\n y_range = [-15,90]\n display.plot_data(folder_save, file2save,folder_save,file_save,x_scale = 'sec', \n y_range=y_range, time_range = [68000,76000], electrodes=[1,2,3],\n y_range_intra = y_range) \n if show_traces:\n plt.show()", "title": "" }, { "docid": "7f5c1ff0a56c56c29c8a567a018a547b", "score": "0.57454187", "text": "def plotSpectra(self, opening_path=None, save_fig=True, verbose=True, x=None, y=None):\n\n import matplotlib.pyplot as plt\n from astropy.visualization import astropy_mpl_style\n plt.style.use(astropy_mpl_style)\n from astropy.io import fits\n\n def wavelength(pixel, w1, dw):\n wave = w1 + (pixel - 1) * dw\n return wave\n\n def pixel(wave, w1, dw):\n pixel = old_div((wave - w1), dw) + 1\n return pixel\n\n fig, ax = plt.subplots(self.n_apertures + 1, 1)\n fig.set_size_inches(15, 5 * (self.n_apertures + 1))\n\n axall = ax[self.n_apertures].twiny()\n\n# defined self.spectra\n\n # reading in spectra files\n self.spectra = {}\n for i in range(1, self.n_apertures + 1):\n open_image = fits.open(\n self.path + self.image + '.{:04}'.format(i) + '.fits')\n if verbose:\n print(open_image.info())\n\n image_data = open_image[0].data\n\n self.spectra[i] = image_data\n if len(image_data) == 2:\n self.spectra[i] = image_data[0]\n\n open_image.close()\n\n image_data = self.spectra[i]\n\n if self.direction == 0:\n n_pix = len(image_data[1][0])\n else:\n n_pix = len(image_data)\n\n if opening_path != None:\n n_pix = len(image_data[opening_path])\n\n x_pix = np.linspace(1, n_pix, n_pix)\n # self.x_pix = x_pix\n x_wav = wavelength(x_pix, self.w1, self.dw)\n\n if self.direction == 0:\n y_val = image_data[1][0]\n else:\n y_val = image_data\n\n if verbose:\n print('x pixels: ', n_pix)\n print('x wavelengths:', x_wav[0], x_wav[-1])\n\n axi = ax[i - 1].twiny()\n ax[i - 1].plot(x_wav, y_val, linewidth=.5)\n axi.plot(x_pix, y_val, linewidth=.5)\n\n if x == None:\n # could be done much better; also fix i vs. i-1\n x = [x_wav.min(), x_wav.max()]\n\n ax[i - 1].set_xlim(x[0], x[1])\n axi.set_xlim(pixel(x[0], self.w1, self.dw),\n pixel(x[1], self.w1, self.dw))\n\n axi.set_title(self.name[i - 1], y=1.18)\n\n ax[i - 1].set_ylabel('Raw Flux Value')\n ax[i - 1].set_xlabel('Wavelength')\n axi.set_xlabel('X pixel value')\n\n # all plot\n ax[self.n_apertures].plot(x_wav, y_val, linewidth=.5)\n axall.plot(x_pix, y_val, linewidth=.5, label=self.name[i - 1])\n\n # all plot\n ax[self.n_apertures].set_xlim(x[0], x[1])\n axall.set_xlim(pixel(x[0], self.w1, self.dw),\n pixel(x[1], self.w1, self.dw))\n axall.set_title('all', y=1.18)\n ax[self.n_apertures].set_ylabel('Raw Flux Value')\n ax[self.n_apertures].set_xlabel('Wavelength')\n axall.set_xlabel('X pixel value')\n axall.legend(shadow=True, title='Spectra', fancybox=True,\n loc=\"upper left\", bbox_to_anchor=(1, 1))\n\n fig.show()\n\n del plt\n del astropy_mpl_style\n del fits\n\n if save_fig:\n import os\n directory = self.path + '/plots/'\n if not os.path.exists(directory):\n os.makedirs(directory)\n filenameend = '_x' + str(x[0]) + ':' + str(x[1]) + '.jpg'\n fig.savefig(directory + self.image + '_spectraplot' +\n filenameend, bbox_inches='tight')\n del os", "title": "" }, { "docid": "d367f865a4787ca58639ed0ec83655f7", "score": "0.5740698", "text": "def example():\n #Create test image\n xg,yg = np.meshgrid(np.linspace(-1,1,100),np.linspace(-1,1,100))\n img = np.polynomial.legendre.legval2d(xg,yg,[[0,1,0],[0,.5,0],[1,0,0]])\n #Take gradients\n gx,gy = np.gradient(img)\n #Create a boundary region\n rad = np.sqrt(xg**2+yg**2)\n img[rad>1] = np.nan\n gx[rad>1] = np.nan\n gy[rad>1] = np.nan\n #Reconstruct wavefront\n recon = southwell(gx,gy,1e-12,1.)\n\n #Plot results\n fig = plt.figure()\n fig.add_subplot(1,3,1)\n plt.imshow(img)\n plt.title('Original')\n plt.colorbar()\n fig.add_subplot(1,3,2)\n plt.imshow(recon)\n plt.title('Reconstructed')\n plt.colorbar()\n fig.add_subplot(1,3,3)\n plt.imshow(img-recon)\n plt.title('Residual')\n plt.colorbar()\n return recon", "title": "" }, { "docid": "a9b9e2597857c21f6d8ff3cfaeb1da26", "score": "0.57378095", "text": "def powerspectrum(self, eigenmodes=False):\n\n spacing = (np.max(self.times) - np.min(self.times))/len(self.times)\n frequencies = rfftfreq(len(self.times), spacing)\n\n\n markerfreqs = [sp.sqrt(self.D[i,i])/(np.pi*2) for i in range(self.n)]\n\n fig, ax = plt.subplots()\n fig.set_figwidth(12)\n fig.set_figheight(6)\n\n if eigenmodes:\n pseries = self.eigts\n labels = [r\"\\phi_{}\".format(i) for i in range(self.n)]\n title = \"Frequency Power Spectrum, eigenmode basis\"\n else:\n pseries = self.timeseries\n labels = [sp.latex(label) for label in self.q]\n title = \"Frequency Power Spectrum, coordinate basis\"\n\n for i in range(self.n):\n amplitude = rfft(pseries[:,i])\n power = np.abs(amplitude)**2\n ax.plot(frequencies, power, label=\"${}$\".format(labels[i]))\n\n ax.set_ylabel(\"Power\")\n ax.set_xlabel(\"Frequency ($s^{-1}$)\")\n ax.loglog()\n ax.set_title(title)\n ylim = ax.get_ylim()\n for freq in markerfreqs:\n line, = ax.plot([freq, freq], ylim, \":r\")\n ax.text(freq, ylim[0], \"{:.2f}\".format(freq), color=\"red\", va=\"bottom\", ha=\"center\",\n bbox=dict(facecolor='white', edgecolor='white'))\n line.set_label(\"Linear eigenmode frequencies\")\n ax.set_ylim(ylim)\n ax.legend()\n return fig", "title": "" }, { "docid": "1ea29a0e2565b3bfbdc396b8eed89850", "score": "0.57320136", "text": "def figure_exp_25(fig_type = '.png', show_traces = False):\n temp, save_folder, specific_folder, temp, file_save = get_figure_data_settings(fig_no = 25)\n #file2save_npz = 'fig25_' + '.npz'\n \n \n folder_save = save_folder + specific_folder + '_results/'\n file2save = 'exc_synch_zoom' + fig_type\n y_range = [-500,700] #[-15,15]\n time_range =[3480,3510] \n display.plot_data(folder_save, file2save,folder_save,file_save,x_scale = 'sec', \n y_range=[], time_range = time_range, electrodes=[0,1,2,3],\n y_range_intra = [-50,0]) \n \n file2save = 'exc_synch' + fig_type\n y_range = [-1000,1300] #[-15,15]\n time_range =[2800,3900]\n display.plot_data(folder_save, file2save,folder_save,file_save,x_scale = 'sec', \n y_range=y_range, time_range = time_range, electrodes=[0,1,2,3],\n y_range_intra = []) \n\n\n #time_range = [] # in ms\n data_details = dh.read_npzdata(folder_save, file_save, \"data\", \"scale\", \"fs\")\n file_save = 'exc_synch_' + fig_type\n #import pdb; pdb.set_trace()\n display.plot_data_one_mean(data_details, folder_save, file_save, x_scale = 'sec',\n title = 'excitatory synchrony', sweeps = [],\n electrodes = [0,1,2,3], y_range = y_range, time_range = time_range,\n remove_avg = True) \n if show_traces:\n plt.show()", "title": "" }, { "docid": "49f58631ed9d9c2e8be5b20cc2601f8b", "score": "0.5731807", "text": "def make_power_figure(\n resource_display_name: str,\n data: pd.DataFrame,\n forecast_data: Optional[pd.DataFrame],\n schedule_data: Optional[pd.DataFrame],\n show_consumption_as_positive: bool,\n shared_x_range: Range1d,\n tools: List[str] = None,\n sizing_mode=\"scale_width\",\n) -> Figure:\n if show_consumption_as_positive:\n title = \"Electricity consumption of %s\" % resource_display_name\n else:\n title = \"Electricity production from %s\" % resource_display_name\n if data.empty:\n title = title.replace(\"Electricity\", \"Prognosed\")\n\n return create_graph(\n data,\n unit=\"MW\",\n legend_location=\"top_right\",\n legend_labels=(\"Actual\", \"Forecast\", None)\n if schedule_data is None or schedule_data[\"event_value\"].isnull().all()\n else (\"Actual\", \"Forecast\", \"Schedule\"),\n forecasts=forecast_data,\n schedules=schedule_data,\n title=title,\n x_range=shared_x_range,\n x_label=\"Time (resolution of %s)\"\n % determine_resolution(data, forecast_data, schedule_data),\n y_label=\"Power (in MW)\",\n show_y_floats=True,\n tools=tools,\n sizing_mode=sizing_mode,\n )", "title": "" }, { "docid": "37a20063aa23c811d51ecf216ce035e7", "score": "0.5729813", "text": "def plot_datamap(self):\n plt.imshow(self.data)\n plt.clim(0, self.max_charge+1)\n cbar = plt.colorbar()\n cbar.set_label(\"Threshold (electrons)\")\n filename = f\"{self.basename}datamap{self.feeid}.{self._plot_extension}\"\n print(f\"Image stored in {filename}\")\n plt.savefig(filename, bbox_inches='tight')\n plt.close()", "title": "" }, { "docid": "884f95b900d4ddeeeea5460e77083bb1", "score": "0.5714797", "text": "def save_fig(self, filename, ext='png', close=True, verbose=False):\n import os.path\n \n if self.minor == 'minorxy': \n path=('/Users/vprzybylo/Desktop/icefiles/agg_model/agg_notes/graphics/python/'+\n str(len(self.clusters))+'crystals/'+str(len(self.clusters))+'xtals_hist/minorxy/')\n\n else:\n path=('/Users/vprzybylo/Desktop/icefiles/agg_model/agg_notes/graphics/python/'+\n str(len(self.clusters))+'crystals/'+str(len(self.clusters))+'xtals_hist/depth/')\n\n filename = \"%s.%s\" % (filename, ext)\n if path == '':\n path = '.'\n\n # If the directory does not exist, create it\n if not os.path.exists(path):\n os.makedirs(path)\n\n # The final path to save to\n savepath = os.path.join(path, filename)\n\n if verbose:\n print(\"Saving figure to '%s'...\" % savepath),\n\n # Actually save the figure\n plt.savefig(savepath)\n\n # Close it\n if close:\n plt.close() \n \n \n \"\"\"Utilities for running ice particle simulations.\"\"\"", "title": "" }, { "docid": "f4cbd6649fcb1b72581321721ae60d7e", "score": "0.569858", "text": "def plot_spectrum(self, output_dir='output/', units='si', verbose=True):\n if units == 'si':\n flux = self.flux_si\n fluxerr = self.fluxerr_si\n elif units == 'jy':\n flux = self.flux_jy\n fluxerr = self.fluxerr_jy\n else:\n raise ValueError('Units must be one of (\"si\", \"jy\")')\n\n \"\"\"\n Smooth spectrum? Well, a window length of 5 is basically no smoothing\n for this resolution of spectrum. Might as well skip it.\n\n # ax.plot(self.wave, flux, '-', lw=2, label=self.basename)\n # smoothed_flux = helpers.smooth(flux, window_len=5)\n # ax.plot(self.wave, smoothed_flux, '--', lw=1, label='smoothed')\n \"\"\"\n\n # Plot full cont subtracted spectrum\n fig, ax = plt.subplots(figsize=(9, 6))\n ax.errorbar(self.wave, flux, fluxerr, color='r', ecolor='0.45',\n lw=2, elinewidth=1)\n\n # Set plot parameters.\n ax.axhline(y=0, color='k', ls='-', zorder=-10, lw=1)\n ax.set_xlabel('Wavelength (micron)', fontsize=12)\n ax.set_ylabel(r'Flux density (${\\rm W}/{\\rm m}^2$)', fontsize=12)\n ax.set_title(self.basename + ' -- Full continuum-subtracted spectrum')\n ax.grid(ls=':')\n ax.minorticks_on()\n ax.tick_params(direction='in', which='both', right=True, top=True)\n\n # Save and close.\n pdf_filename = output_dir + self.basename + '.pdf'\n fig.savefig(pdf_filename, format='pdf', bbox_inches='tight')\n plt.close()\n fig.clear()\n\n if verbose:\n print('Saved: ', pdf_filename)", "title": "" }, { "docid": "8d8478fc1d363bf179485463d7056083", "score": "0.5696789", "text": "def __call__(self) -> None:\n self.save_figure()\n print(self.export_string)\n return", "title": "" }, { "docid": "bd8f7fbac57b8f8bd4731b5f4d60eb8d", "score": "0.5694435", "text": "def figure_exp_20(fig_type = '.png', save_traces = False, show_traces = False):\n temp, save_folder, specific_folder, temp, file_save = get_figure_data_settings(fig_no = 20)\n file2save_npz = 'fig20_' + '.npz'\n \n file2save = 'interictal' + fig_type\n folder_save = save_folder + specific_folder + '_results/'\n y_range = [-15,90]\n display.plot_data(folder_save, file2save,folder_save,file_save,x_scale = 'sec', \n y_range=y_range, time_range = [51000,59000], electrodes=[1,2,3],\n y_range_intra = y_range) \n\n if save_traces:\n #it must be done only if the recording was gap free but \n #it was not saved in the trace format yet\n\n #display.plot_data(folder_save,file2save,folder_save,file_save,x_scale = 'ms')#,time_range=time_range)\n dh.trigger_on_spike(folder_save, file_save, folder_save, file2save_npz, thresh = 25, el=3,\n time_range=[-200,200], up = True, center_on_peak = True)\n display.plot_data(folder_save,file2save,folder_save,file2save_npz,x_scale = 'ms', \n electrodes = [1,2,3],y_range =y_range)\n #import pdb; pdb.set_trace() \n\n time_range = [100,300] # in ms\n data_details = dh.read_npzdata(folder_save, file2save_npz, \"data\", \"scale\", \"fs\")\n file_save = 'interictal' + fig_type\n #import pdb; pdb.set_trace()\n display.plot_data_one_mean(data_details, folder_save, file_save, x_scale = 'ms',\n title = 'interictal events', sweeps = [5,6,4,7,8,9,10,11,12,13],\n electrodes = [1,2,3], y_range = [], time_range = time_range,\n remove_avg = True) \n if show_traces:\n plt.show()", "title": "" }, { "docid": "d30dfb5d718e491f833b47b8ce2c709a", "score": "0.5688892", "text": "def power_map(ice_power_grid_file, floorplan_file, output_format, pd_min, pd_max):\n ptrace = load_3DICE_grid_file(ice_power_grid_file, convert_K_to_C=False)\n \n flp = Floorplan.from_file(floorplan_file)\n px_height_mm = _get_px_size_mm_from_flp_ttrace(flp, ptrace)\n px_height_cm = 10 * px_height_mm\n px_area_cm_sq = px_height_cm * px_height_cm\n \n axes = plot_power_density(ptrace / px_area_cm_sq, pd_min=pd_min, pd_max=pd_max)\n \n for step, ax in enumerate(axes):\n fig = ax.get_figure()\n figname = output_format.format(step=step)\n fig.savefig(figname)", "title": "" }, { "docid": "380d0c5d22d53347394598f13d81b505", "score": "0.56859505", "text": "def saveplot_energy_resolution(SimuE, RecoE, Outfile=\"EnergyResolution.png\", cta_site=None):\n\n ax = plot_energy_resolution(SimuE, RecoE)\n\n if cta_site != None:\n ax = plot_energy_resolution_cta_requirements(cta_site, ax=ax)\n\n plt.savefig(Outfile, bbox_inches=\"tight\", format='png', dpi=200)\n plt.close()\n return ax", "title": "" }, { "docid": "43486701220f331f3dc8067926c43edb", "score": "0.56852216", "text": "def _save_raster(self, dpi=300):\n self.component.do_layout(force=True)\n # NOTE saving only works properly when dpi is a multiple of 72\n gc = PlotGraphicsContext((int(self.component.outer_width),\n int(self.component.outer_height)),\n dpi=np.ceil(dpi / 72.0)*72)\n gc.render_component(self.component)\n gc.save(self.filename)", "title": "" }, { "docid": "0f0be408f56b19f07783a47300234c34", "score": "0.5676913", "text": "def figure_exp_8(fig_type = '.png', save_traces = False):\n temp, save_folder, specific_folder, temp, file_save = get_figure_data_settings(fig_no = 8)\n file2save_npz = 'fig8_' + '.npz'\n file2save = 'part_of_trace' + fig_type\n \n folder_save = save_folder + specific_folder + '_results/'\n #display.plot_data(folder_save, file2save,folder_save,file_save,x_scale = 'sec', \n # electrodes = [1,2,3], y_range=[-100,100])\n\n if save_traces:\n #it must be done only if the recording was gap free but \n #it was not saved in the trace format yet\n\n #display.plot_data(folder_save,file2save,folder_save,file_save,x_scale = 'ms')#,time_range=time_range)\n dh.trigger_on_spike(folder_save, file_save, folder_save, file2save_npz, thresh = -30, el=2,\n time_range=[-200,200], up = False, center_on_peak = True)\n #display.plot_data(folder_save,file2save,folder_save,file2save_npz,x_scale = 'ms', \n # electrodes = [1,2,3],y_range =[-100, 100])\n\n\n time_range = [100,400] # in ms\n data_details = dh.read_npzdata(folder_save, file2save_npz, \"data\", \"scale\", \"fs\")\n file_save = 'interictal' + fig_type\n #import pdb; pdb.set_trace()\n display.plot_data_one_mean(data_details, folder_save, file_save, x_scale = 'ms',\n title = 'interictal events', sweeps = [0,1,2,3,4,5,6],\n electrodes = [1,2,3], y_range = [-100,100], time_range = time_range,\n remove_avg = True) \n plt.show()", "title": "" }, { "docid": "c5790bdfab8dc8d330667ffc9f4afad6", "score": "0.566134", "text": "def savefigure(title, xlabel, ylabel, Y, labels, x, path_save_dir, type = \"linear\"):\n plt.figure(figsize = (10,8))\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.grid(True)\n plt.ylim((0,16))\n if len(Y) != len(labels):\n print(\"Number of labels and plots don't match!\")\n return\n\n for i in range(len(Y)):\n y = Y[i]\n if type == \"linear\":\n plt.plot(x, y, label = labels[i])\n elif type == \"semilogx\":\n plt.semilogx(x, y, label = labels[i])\n plt.legend()\n try:\n os.mkdir(path_save_dir)\n print(\"folder created at \" + path_save_dir)\n except FileExistsError:\n print(\"folder already exists at \" + path_save_dir)\n path_save_fig = join(path_save_dir, title + \".png\")\n plt.savefig(path_save_fig)\n print(title + \".png saved at \" + path_save_fig)\n plt.close()\n\n return", "title": "" }, { "docid": "bdffa974dfa1303823d5845e18baa613", "score": "0.56549466", "text": "def makeAveragePSRadialFigure(radialFreq,averagePSRadial,figureFileName): \r\n pylab.figure()\r\n pylab.loglog(radialFreq,averagePSRadial,'.')\r\n pylab.xlabel(\"Frequecy\")\r\n pylab.ylabel(\"Radial Power Spectrum\")\r\n pylab.savefig(figureFileName)", "title": "" }, { "docid": "4d8fcd69d7a9718b8a50bf7dc91282ef", "score": "0.5653953", "text": "def save_and_show(fig, ax, save, show, filename, file_format, dpi):\n # save the figure if specified\n if save:\n start_time = time.time()\n \n # create the save folder if it doesn't already exist\n if not os.path.exists(_imgs_folder):\n os.makedirs(_imgs_folder)\n path_filename = '{}/{}.{}'.format(_imgs_folder, filename, file_format)\n if file_format == 'svg':\n # if the file_format is svg, prep the fig/ax a bit for saving\n ax.axis('off')\n ax.set_position([0, 0, 1, 1])\n ax.patch.set_alpha(0.)\n fig.patch.set_alpha(0.)\n fig.savefig(path_filename, bbox_inches=0, transparent=True, format=file_format)\n else:\n fig.savefig(path_filename, dpi=dpi, bbox_inches='tight', format=file_format)\n log('Saved the figure to disk in {:,.2f} seconds'.format(time.time()-start_time))\n \n # show the figure if specified\n if show:\n start_time = time.time()\n plt.show()\n log('Showed the plot in {:,.2f} seconds'.format(time.time()-start_time))\n \n return fig, ax", "title": "" }, { "docid": "7f7689a095a06ff6fd609436f134c978", "score": "0.5650501", "text": "def figure_exp_7(fig_type = '.png', save_traces = False,show_traces = False):\n temp, save_folder, specific_folder, temp, file_save = get_figure_data_settings(fig_no = 7)\n file2save_npz = 'fig6_' + '.npz'\n file2save = 'part_of_trace' + fig_type\n \n folder_save = save_folder + specific_folder + '_results/'\n #display.plot_data(folder_save, file2save,folder_save,file_save,x_scale = 'sec', \n # electrodes = [1,2,3], y_range=[-1000,1000])\n \n if save_traces:\n #it must be done only if the recording was gap free but \n #it was not saved in the trace format yet\n\n #display.plot_data(folder_save,file2save,folder_save,file_save,x_scale = 'ms')#,time_range=time_range)\n # if aligned on the second electrode:\n #dh.trigger_on_spike(folder_save, file_save, folder_save, file2save_npz, thresh = -100, el=2,\n # time_range=[-600,600], up = False, center_on_peak = True)\n \n display.plot_data(folder_save,file2save,folder_save,file2save_npz,x_scale = 'ms', \n electrodes = [0,1,2,3],y_range =[-1000, 1000])\n \n \n different_events = [[1,4,5,7,16], [9,10],[2,6,8]]\n files_save = [\"synch_I\", \"synch_II\", \"synch_III\"] \n titles = [\"synch I, 6 events\", \"synch II, 2 events\", \"synch III, 3 events\"]\n #y_ranges = [[-500,1200],[-300,800],[-100,100]]\n \n time_range = [0,1500] # in ms\n data_details = dh.read_npzdata(folder_save, file2save_npz, \"data\", \"scale\", \"fs\")\n \n y_range = [] #[-500,500]\n \n for next_id in range(len(different_events)):\n\n file_save = files_save[next_id] + fig_type\n\n #import pdb; pdb.set_trace()\n display.plot_data_one_mean(data_details, folder_save, file_save, x_scale = 'ms',\n title = titles[next_id], sweeps = different_events[next_id],\n electrodes = [0,1,2,3], y_range = y_range, time_range = time_range,\n remove_avg = True) \n if show_traces:\n plt.show()", "title": "" }, { "docid": "498a15bb122e6b40944eacf433c105c0", "score": "0.56395435", "text": "def figure_exp_6_1(fig_type = '.png', save_traces = False,show_traces = False):\n temp, save_folder, specific_folder, temp, file_save = get_figure_data_settings(fig_no = 6.1)\n file2save_npz = 'fig6_1_' + '.npz'\n file2save = 'part_of_trace' + fig_type\n \n folder_save = save_folder + specific_folder + '_results/'\n #display.plot_data(folder_save, file2save,folder_save,file_save,x_scale = 'sec', \n # electrodes = [0,2,3], y_range=[-1000,1000])\n \n if save_traces:\n #it must be done only if the recording was gap free but \n #it was not saved in the trace format yet\n\n #display.plot_data(folder_save,file2save,folder_save,file_save,x_scale = 'ms')#,time_range=time_range)\n dh.trigger_on_spike(folder_save, file_save, folder_save, file2save_npz, thresh = -500, el=2,\n time_range=[-300,1500], up = False, center_on_peak = False)\n display.plot_data(folder_save,file2save,folder_save,file2save_npz,x_scale = 'ms', \n electrodes = [0,2,3],y_range =[-300, 1500])\n \n \n fig_save = 'large_synch_2' + fig_type\n time_range = [0,800] # in ms\n\n data_details = dh.read_npzdata(folder_save, file2save_npz, \"data\", \"scale\", \"fs\")\n display.plot_data_one_mean(data_details, folder_save, fig_save, x_scale = 'ms',\n title = 'large Gabaa synchronised events', sweeps = [8, 6, 7,9,10],\n y_range_intra = [-10, 20], electrodes = [0,2], y_range = [-1000,1000],\n remove_avg = True, time_range = time_range) \n if show_traces:\n plt.show()", "title": "" }, { "docid": "cb719770867a670256fccb681d4a7e18", "score": "0.5635164", "text": "def simple_plot():\n # https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/simple_plot.html#sphx-glr-gallery-lines-bars-and-markers-simple-plot-py\n # Data for plotting\n # Modified to write only to a file and close the figure.\n t = np.arange(0.0, 2.0, 0.01)\n s = 1 + np.sin(2 * np.pi * t)\n\n\n fig, ax = plt.subplots()\n ax.plot(t, s, color= \"green\")\n\n ax.set(xlabel='time (s)', ylabel='voltage (mV)',\n title='About as simple as it gets, folks')\n ax.grid()\n print('Writing out figure to simple_plot.png')\n fig.savefig(\"simple_plot.png\")\n plt.close()", "title": "" }, { "docid": "379b205a96acc6e7f192e64206e9851e", "score": "0.56266963", "text": "def savePNG(self):\n pylab.savefig(self.outPathEntry.get())", "title": "" }, { "docid": "b630d43f26dcab246076ce0c1e541d36", "score": "0.5624538", "text": "def save_fig(location=\"./\", name=\"plot\"):\n plt.savefig(location + name + \".pdf\")\n plt.savefig(location + name + \".png\")", "title": "" }, { "docid": "bf29bade97a9d33473b47f07ada86cef", "score": "0.5624398", "text": "def _save_power_outputs(self, model):\n\n for i in model.SCENARIOS:\n # get the power output\n power_output_name = self.bidding_model_object.power_output\n model.fs[i].power_output_ref = pyo.Reference(\n getattr(model.fs[i], power_output_name)\n )\n\n return", "title": "" }, { "docid": "a5bb0e7294d2693cfeb2ddc3a7e0ca2c", "score": "0.5618493", "text": "def make_fig(self):\n\n self.fig, ax = plt.subplots()\n self.fig.show()\n\n self.im = ax.imshow(self.numpy_rast)\n\n if self.title:\n self.fig.suptitle(self.title, fontsize = 20)\n\n self.im.set_data(self.numpy_rast)\n self.fig.canvas.draw()\n return", "title": "" }, { "docid": "3f52d65408467a8a45a578c46574dbea", "score": "0.56108814", "text": "def plot_stave(self, force_binary):\n plt.figure(figsize=(self.modules*(ALPIDE_COLS//ALPIDE_ROWS)*STAVE_SIZE_MODIFIER,QUAD_STAVES*STAVE_SIZE_MODIFIER))\n stave = self.remap_to_stave()\n assert np.count_nonzero(stave<0) == 0\n stave = stave/float(self.n_events)\n stave = self.interpolate(stave,scale=20)\n plt.imshow(stave, cmap=\"tab20c\", norm=mpl.colors.LogNorm(vmin=1./float(self.n_events), vmax=1.))#plt.cm.gray)#\"binary\")\n plt.xticks(list(range(0,ALPIDE_COLS*CHIP_PER_LANE*self.modules,ALPIDE_COLS*CHIP_PER_LANE))+[ALPIDE_COLS*self.modules*CHIP_PER_LANE-1])\n plt.xlabel('Pixel')\n plt.yticks(list(range(0,ALPIDE_ROWS*QUAD_STAVES +1,ALPIDE_ROWS)))\n plt.ylabel('Pixel')\n cbar = plt.colorbar()\n cbar.set_label(\" Hits / Event\")\n filename = f\"{self.basename}stave{self.feeid}.{self._plot_extension}\"\n print(f\"Image stored in {filename}\")\n plt.savefig(filename, bbox_inches='tight', dpi=1200)\n plt.close()", "title": "" }, { "docid": "e42c6f100d80eaa3e313d36f526015a7", "score": "0.56107545", "text": "def construct_powerspectrum(self, recalculate: bool = False) -> None:\n print('Constructing the power spectrum...')\n start_time = time.time()\n self.powerspectrum.construct_data(recalculate=recalculate)\n end_time = time.time()\n print(f' Done in {end_time - start_time:0.2f}s')", "title": "" }, { "docid": "55b7e5a239aa2a1f6b7b4cdab910544b", "score": "0.5601454", "text": "def plot_spec(spec,transition_name, \n\t\t\t\tdwave = 10., dflux_window_up = 0.0, dflux_window_down=0.0):\n\twave,flux,error,dfp,dfm = spec\n\tline_region = np.median(wave)\n\n\tfig1 = pl.figure(figsize=(16,8))\n\tax1 = fig1.add_subplot(111)\n\tax1.clear()\n\tax1.set_xlabel(r'$\\lambda$ ($\\AA$)')\n\tax1.set_ylabel(r'$\\rm Flux$')\n\n\t# Rest central wavelength vertical line\n\trest_central_wave = transition_dict[transition_name].wave\n\tpl.axvline(rest_central_wave,lw=2,ls='--',color = 'b')\n\n\t# Original data. \n\tpl.step(wave,flux,color = 'k')\n\tpl.step(wave,error,color = 'r')\n\tpl.xlim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0])\n\n\t# Two points setting boundary of data for calculation \n\tx_record = []; y_record = []\n\tpoints_to_fit, = pl.plot(x_record,y_record,'bo',ms = 8)\n\n\t# Gaussian Lines\n\tgauss_wave = []; gauss_flux = []\n\tgauss_fit, = pl.plot(gauss_wave,gauss_flux,'b',lw = 1.5)\n\n\t# define dummy variables\n\tdwave = 10;\n\tbig_shift = 0.5; \t small_shift = 0.1\n\tbig_zoom = 0.5; \t zoom = 0.1\n\tdflux_window_up = 0.0; dflux_window_down = 0.0\n\tflux_zoom = 2e-15; \t big_flux_zoom = 5e-15\n\n\t########################################################################################\n\t\n\tdef shift_spec(event):\n\t\t\"\"\"\n\t\tInteractive click/plotting Event \n\n\t\tParameters\n\t\t---------------------------------------------------------------------------\n\t\tevent: obj\n\t\t\tMouse clicks or button press when focus on the plotting window\n\n\t\tReturns\n\t\t---------------------------------------------------------------------------\n\t\tcentroid_wave: float\n\t\t\tFitted gaussian centroid of observed wavelength for the input transition; it's \n\t\t\tused for calculating offset against the rest wavelength\t\t\n\t\t\"\"\"\n\t\tglobal transition_name; global centroid_wave\n\t\tglobal line_region; global dwave\n\t\tglobal dflux_window_up; global dflux_window_down\n\t\t\n\t\tix, iy = event.xdata, event.ydata\n\t\t#######################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t\t\t #\n\t\t#\tWINDOW Control \t\t\t\t\t \t\t\t\t\t \t\t\t #\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t#######################################################################\n\t\tif event.key == '}':\n\t\t\t# Move plotting and spec to right by amount 'big_shift'\n\t\t\tline_region += big_shift\n\t\t\tpl.xlim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0])\n\t\t\n\t\telif event.key == '{':\n\t\t\t# Move plotting and spec to left by amount 'big_shift'\n\t\t\tline_region -= big_shift\n\t\t\tpl.xlim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0])\n\t\t\n\t\telif event.key == ']':\n\t\t\t# Move plotting and spec to right by amount 'small_shift'\n\t\t\tline_region += small_shift\n\t\t\tpl.xlim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0])\n\t\t\n\t\telif event.key == '[':\n\t\t\t# Move plotting and spec to left by amount 'small_shift'\n\t\t\tline_region -= small_shift\n\t\t\tpl.xlim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0])\n\t\t\n\t\telif event.key == '-':\n\t\t\t# Zoom in horizontally in wavelength by amount 'zoom'\n\t\t\tdwave += zoom\n\t\t\tpl.xlim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0])\n\t\t\n\t\telif event.key == '=':\n\t\t\t# Zoom out horizontally in wavelength by amount 'zoom'\n\t\t\tdwave -= zoom\n\t\t\tpl.xlim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0])\n\t\t\n\t\telif event.key == '_':\n\t\t\t# Zoom in horizontally in wavelength by amount 'big_zoom'\n\t\t\tdwave += big_zoom\n\t\t\tpl.xlim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0])\n\t\t\n\t\telif event.key == '+':\n\t\t\t# Zoom out horizontally in wavelength by amount 'big_zoom'\n\t\t\tdwave -= big_zoom\n\t\t\tpl.xlim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0])\n\t\t\n\t\telif event.key =='b':\n\t\t\t# Zoom (b)ottom in flux by amount 'flux_zoom'\n\t\t\tdflux_window_up += flux_zoom\n\t\t\tpl.ylim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1])\n\t\t\n\t\telif event.key =='B':\n\t\t\t# UN-Zoom (b)ottom in flux by amount 'flux_zoom'\n\t\t\tdflux_window_up -= flux_zoom\n\t\t\tpl.ylim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1])\n\t\t\n\t\telif event.key =='t':\n\t\t\t# Zoom in the (t)op in flux by amount 'flux_zoom'\n\t\t\tdflux_window_down -= flux_zoom\n\t\t\tpl.ylim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1])\t\t\t\n\t\t\n\t\telif event.key =='T':\n\t\t\t# Zoom out the (t)op in flux by amount 'flux_zoom'\n\t\t\tdflux_window_down += flux_zoom\n\t\t\tpl.ylim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1])\n\t\t\n\t\telif event.key =='m':\n\t\t\t# Zoom in the bottom in flux by amount 'big_flux_zoom'\n\t\t\tdflux_window_up += big_flux_zoom\n\t\t\tpl.ylim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1])\n\t\t\n\t\telif event.key =='M':\n\t\t\t# Zoom out the bottom in flux by amount 'big_flux_zoom'\n\t\t\tdflux_window_up -= big_flux_zoom\n\t\t\tpl.ylim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1])\n\t\t\n\t\telif event.key =='u':\n\t\t\t# Zoom in the top flux by amount 'big_flux_zoom'\n\t\t\tdflux_window_down -= big_flux_zoom\n\t\t\tpl.ylim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1])\t\t\t\n\t\t\n\t\telif event.key =='U':\n\t\t\t# Zoom out the top in flux by amount 'big_flux_zoom'\n\t\t\tdflux_window_down += big_flux_zoom\n\t\t\tpl.ylim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1])\n\t\t\n\t\telif event.key =='r':\n\t\t\t# (r)eplot the spectral window from the original starting point\n\t\t\tdwave = 10; dflux_window_up = 0.0; dflux_window_down = 0.0\n\t\t\tline_region = np.median(wave)\n\t\t\tpl.xlim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0])\n\t\t\tpl.ylim(Utilities.zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1])\n\t\telif event.key == '?':\n\t\t\tprint '\\n'\n\t\t\tUtilities.printLine()\n\t\t\tprint '?\tShow keys map (What is shown here)'\n\n\t\t\tUtilities.printLine()\n\t\t\tprint 'WINDOW CONTROL KEYS:'\n\t\t\tprint '}\t\tshift to right with 0.5 Angstrom'\n\t\t\tprint '{\t\tshift to left with 0.5 Angstrom'\n\t\t\tprint ']\t\tshift to right with 0.1 Angstrom'\n\t\t\tprint '[\t\tshift to left with 0.1 Angstrom'\n\t\t\tprint 'shift +/-\tZoom in/out by 0.5 Angstrom'\n\t\t\tprint '+/-\t\tZoom in/out by 0.1 Angstrom'\n\t\t\tprint 'T/t\t\tZoom top by 1e-15'\n\t\t\tprint 'B/b\t\tZoom bottom by 1e-15'\n\t\t\tprint 'U/u\t\tZoom top by 5e-15'\n\t\t\tprint 'M/m\t\tZoom bottom by 5e-15'\n\t\t\tprint 'r\t\treplot'\n\n\t\t\tUtilities.printLine()\n\t\t\tprint 'FITTING SPEC KEYS:'\n\t\t\tprint 'a\tAdd points'\n\t\t\tprint 'shift+g\tFit Gaussian'\n\t\t\tUtilities.printLine()\n\n\t\t#######################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t\t\t #\n\t\t#\tFitting Control \t\t\t\t\t \t\t\t\t\t \t\t #\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t#######################################################################\n\t\telif event.key == 'a':\n\t\t\t# Add 2 Points setting boundary for fitting data\n\n\t\t\tif len(x_record) < 2:\n\t\t\t\tx_record.append(ix); y_record.append(iy)\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tdel x_record[:]; del y_record[:]\n\t\t\t\tx_record.append(ix); y_record.append(iy)\n\t\t\t\t\n\t\telif event.key == 'G':\n\t\t\t# Fitt a Gaussian based on the data selected by the two points \n\t\t\t# in spectrum\n\n\t\t\tif not x_record: \n\t\t\t\tprint 'No data selected to fit.'\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tp1,p2 = np.transpose(np.array([x_record,y_record]))\n\n\t\t\t\tx1,y1 = p1; x2,y2 = p2\n\t\t\t\ttemp_spec = Utilities.Select_Data(spec,[x1,x2])\n\t\t\t\testimated_cont_level = np.mean((y1,y2))\n\t\t\t\tgauss_params = Utilities.Fit_Gaussian(temp_spec,estimated_cont_level)\n\n\t\t\t\tif gauss_params:\n\t\t\t\t\tamplitude,centroid_wave,sigma_width = gauss_params\n\n\t\t\t\t\t# Apparent column density \n\t\t\t\t\tlogN = Utilities.ComputeAppColumn(temp_spec,transition_name)\n\n\t\t\t\t\t# Print out results of gaussian fit \n\t\t\t\t\tUtilities.Print_LineInfo(gauss_params,logN,transition_name)\n\n\t\t\t\t\t# Make the plot to show goodness of fit\n\t\t\t\t\tgauss_flux = Utilities.Gaussian_function(temp_spec[0],amplitude,centroid_wave,sigma_width)\n\t\t\t\t\tgauss_wave = temp_spec[0];\n\t\t\t\t\tgauss_fit.set_xdata(gauss_wave)\n\t\t\t\t\tgauss_fit.set_ydata(gauss_flux + estimated_cont_level)\n\t\t\t\telse:\n\t\t\t\t\tpass\n\n\t\tpoints_to_fit.set_xdata(x_record)\n\t\tpoints_to_fit.set_ydata(y_record)\n\n\t\tpl.draw() # needed for instant response. \n\n\tciv = fig1.canvas.mpl_connect('key_press_event', shift_spec)\n\tpl.show()\n\n\t# Exit function properly\n\ttry:\n\t\t# Test if centroid_wave exits\n\t\tcentroid_wave\n\texcept NameError:\n\t\tprint 'No Gaussian fitted to any lines; returning...'\n\t\treturn np.nan\n\telse:\n\t\tif abs(centroid_wave - transition_dict[transition_name].wave) > 2: \n\t\t\tprint 'Note: %f has a large deviation > 2 angstrom' % centroid_wave\n\t\t\tprint 'Results not recorded.'\n\t\t\treturn np.nan\n\t\telse:\n\t\t\treturn centroid_wave", "title": "" }, { "docid": "6d74a791d2b7fabcf77ab0a069a7ce50", "score": "0.5599773", "text": "def plot_save(plt, country: str, city: str, output_folder: str) -> None:\n plot_path = Path(f\"{output_folder}/{country}/{city}\")\n plot_path.mkdir(exist_ok=True, parents=True)\n plt.savefig(fname=plot_path / f\"min_max_temperature dependence in {city}.png\", dpi=150)\n print(f\"File 'Plot of min_max_temperature dependence in {city}.png' was created.\")\n plt.close()", "title": "" }, { "docid": "5e9b0dc55be6b1cfb15c772eeacfc559", "score": "0.55877715", "text": "def plotImageWavelength(self, save_fig=False, verbose=True, x=None, y=None, vmin=0, vmax=250, blackout=False):\n\n import matplotlib.pyplot as plt\n from astropy.visualization import astropy_mpl_style\n plt.style.use(astropy_mpl_style)\n from astropy.io import fits\n\n open_image = fits.open(self.path + self.image + '.fits')\n if verbose:\n print(open_image.info())\n image_data = open_image[0].data\n open_image.close()\n\n # determining where end of markings will be\n center_end = 50\n back_end = 40\n\n if x != None:\n center_end = int(x[0] + (x[1] - x[0]) * .1)\n back_end = int(x[0] + (x[1] - x[0]) * .05)\n\n # marking trace_center\n trace = self.trace_center\n image_data[:, int(trace - 1):int(trace + 1)] *= .8\n\n if blackout:\n image_data[:, int(trace - 1):int(trace + 1)] = 0\n# HERE\n # marking image with centers\n for i in range(self.n_apertures):\n cen = self.center[i]\n wid = self.width[i]\n image_data[0:center_end + 15 * (self.n_apertures - i), int(\n cen - old_div(wid, 2)):int(cen + old_div(wid, 2))] *= .7\n\n b1, b2, b3, b4 = [b + trace for b in self.background[i]]\n image_data[0:back_end, int(b1):int(b2)] *= .6\n image_data[0:back_end, int(b3):int(b4)] *= .6\n\n if blackout:\n image_data[0:center_end + 15 * (self.n_apertures - i), int(\n cen - old_div(wid, 2)):int(cen + old_div(wid, 2))] = 50\n image_data[0:back_end, int(b1):int(b2)] = 100\n image_data[0:back_end, int(b3):int(b4)] = 100\n\n fig, ax = plt.subplots(1, 1)\n fig.set_size_inches(10, 10)\n ax.imshow(image_data, cmap='gray', vmin=vmin, vmax=vmax)\n if x != None:\n ax.set_xlim(x[0], x[1])\n if y != None:\n ax.set_ylim(y[0], y[1])\n ax.grid() # takes away grid\n ax.set_title(self.image)\n fig.show()\n\n del plt\n del astropy_mpl_style\n del fits\n\n if save_fig:\n import os\n directory = self.path + '/plots/'\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n filenameend = '.jpg'\n if x != None:\n filenameend = '_x' + str(x[0]) + ':' + str(x[1]) + '.jpg'\n\n fig.savefig(directory + self.image + '_apertureplot' +\n filenameend, bbox_inches='tight')\n del os", "title": "" }, { "docid": "e42dab4d86dc6631b18974195603348e", "score": "0.55863166", "text": "def _save_raster(self):\r\n from chaco.api import PlotGraphicsContext\r\n gc = PlotGraphicsContext((int(self.component.outer_width), int(self.component.outer_height)))\r\n self.component.draw(gc, mode=\"normal\")\r\n gc.save(self.filename)\r\n return", "title": "" }, { "docid": "79c4f60dd68ef2a9bdb6627df83cca06", "score": "0.5585067", "text": "def saveRadon2DImage(self):\n self.ax1.set_title(\"Radon transform\\n(Sinogram)\")\n self.ax1.set_xlabel(\"Projection angle (deg)\")\n self.ax1.set_ylabel(\"Projection position (pixels)\")\n self.ax1.imshow(self.radonOutput, cmap=plt.cm.Greys_r, extent=(0, 180, 0, self.radonOutput.shape[0]), aspect='auto')\n self.fig.savefig('radon2D_Image.png')\n self.ax1.cla()", "title": "" }, { "docid": "a5ea8fd82b7e1a70a849e5efa0a4cae3", "score": "0.5559849", "text": "def figure_exp_6(fig_type = '.png', save_traces = False,show_traces = False):\n temp, save_folder, specific_folder, temp, file_save = get_figure_data_settings(fig_no = 6)\n file2save_npz = 'fig6_' + '.npz'\n file2save = 'part_of_trace' + fig_type\n \n folder_save = save_folder + specific_folder + '_results/'\n #display.plot_data(folder_save, file2save,folder_save,file_save,x_scale = 'sec', \n # electrodes = [0,2,3], y_range=[-1000,1000])\n \n if save_traces:\n #it must be done only if the recording was gap free but \n #it was not saved in the trace format yet\n\n #display.plot_data(folder_save,file2save,folder_save,file_save,x_scale = 'ms')#,time_range=time_range)\n dh.trigger_on_spike(folder_save, file_save, folder_save, file2save_npz, thresh = -500, el=2,\n time_range=[-300,1500], up = False, center_on_peak = False)\n display.plot_data(folder_save,file2save,folder_save,file2save_npz,x_scale = 'ms', \n electrodes = [0,2,3],y_range =[-300, 1500])\n \n \n fig_save = 'large_synch_20' + fig_type\n #time_range = [50,400] # in ms\n data_details = dh.read_npzdata(folder_save, file2save_npz, \"data\", \"scale\", \"fs\")\n display.plot_data_one_mean(data_details, folder_save, fig_save, x_scale = 'ms',\n title = 'large Gabaa synchronised events', sweeps = [0, 1,2,4,5,6],\n y_range_intra = [-10, 20], electrodes = [0,2,3], y_range = [-1000,1000],\n remove_avg = True) \n if show_traces:\n plt.show()", "title": "" }, { "docid": "cc2f3512db936a12ee167c663775a91d", "score": "0.55588657", "text": "def outdoor_windspeed_plot(self, save = False):\n global df1\n df1 = pd.read_sql_query(\"SELECT * FROM {}.{} WHERE datetime BETWEEN '{}' AND \\\n '{}'\".format(\"weather\", \"weather_all\", self.t0,\\\n self.tn), con = self.engine)\n df1 = df1.loc[:,['datetime', 'Wind Speed, m/s', 'Gust Speed, m/s', 'Wind Direction']]\n u = df1['Wind Direction'].to_numpy()\n \n U = np.sin(np.radians(u))\n V = np.cos(np.radians(u))\n wdf_plot = df1.set_index(\"datetime\")\n wdf_plot['u'] = U\n wdf_plot['v'] = V\n wdf_plot['y'] = 0\n \n \n converter = mdates.ConciseDateConverter()\n munits.registry[np.datetime64] = converter\n munits.registry[datetime.date] = converter\n munits.registry[datetime.datetime] = converter\n \n fig, ax1 = plt.subplots()\n ax1.plot(wdf_plot['Gust Speed, m/s'],color = 'silver', label = 'Gust Speed', zorder=1)\n \n \n \n ax1.set_ylabel('Gust speed (m/sec)')\n ax1.set_xlabel('Time')\n # ax2 = ax1.twinx()\n ax1.plot(wdf_plot['Wind Speed, m/s'], label = 'Wind Speed', zorder=2)\n ax1.quiver(wdf_plot.index, wdf_plot['Wind Speed, m/s'], U, V , width = 0.001, zorder=3)\n ax1.set_ylabel('wind speed (m/sec) and direction (up is north)')\n \n plt.ylim(bottom=-0.1)\n title = \"Wind and Gust speed during {}\".format(self.experiment)\n\n plt.legend( loc='upper right')\n plt.title(title)\n if save:\n plt.savefig(title + '.png', bbox_inches='tight', dpi=400)\n plt.show() \n \n return fig", "title": "" }, { "docid": "6b4ff6ca95efd2c295eb4e3c5d485a5b", "score": "0.55561817", "text": "def main03_gain_scheduling_modified():\n x10, x20 = 0, 0\n eta0 = 0\n xhat10, xhat20 = 0, 0\n x0 = np.array([x10, x20, eta0, xhat10, xhat20])\n r = lambda t : 0.2 + (t>30)*0.2 + (t>60)*0.2 + (t>90)*0.2\n alpha = lambda t : r(t)\n t_start, t_end = 0, 140\n\n t,x1,x2,eta,xhat1,xhat2,r,y = simulate_modified(r, alpha, x0, t_start, t_end)\n\n fig, ax = fc.new_figure()\n ax.plot(t, r, 'r--', label = '$r$')\n ax.plot(t, y, label = '$y$')\n ax.set_xlabel('$t$')\n ax.grid()\n ax.legend()\n\n Path('./figures').mkdir(parents=True, exist_ok=True)\n plt.savefig('figures/gain_scheduling_modified.pdf', pad_inches=0.0)", "title": "" }, { "docid": "4bacc60a5ea1e22df0754b455b00bf74", "score": "0.5533225", "text": "def figure_statespace(name, image, ext=ext, N_blur=N_blur, N_noise=N_noise, N_trials=N_trials, D_x=D_x, D_V=D_V, sigma_noise=sigma_noise, range_blur=range_blur, range_noise=range_noise,X_0_statespace=X_0_statespace, Y_0_statespace=Y_0_statespace, V_X_statespace=V_X_statespace, V_Y_statespace=V_Y_statespace, N_step=N_step, N_show=N_show_statespace, N_show_step=N_show_step, width=width, progress=PROGRESS, mode='noise', figures=True):\n\n # HINT : D_V =-1 means we do condensation without prediction\n v_D_V = np.logspace(-range_blur, range_blur, num=N_blur, base=10)*D_V\n v_D_x = np.logspace(-range_blur, range_blur, num=N_blur, base=10)*D_x\n v_noise = np.logspace(-range_noise, range_noise, num=N_noise, base=10)*noise\n# v_D_V[-1] = -1\n\n\n figname_statespace_precision = os.path.join(figpath, name) + '_statespace_precision.pdf'\n figname_statespace_bias = os.path.join(figpath, name) + '_statespace_bias.pdf'\n figname_statespace_precision_init = os.path.join(figpath, name) + '_statespace_precision_init.pdf'\n figname_statespace_bias_init = os.path.join(figpath, name) + '_statespace_bias_init.pdf'\n figname_statespace_phases = os.path.join(figpath, name) + '_statespace_phases.pdf'\n figname_statespace = os.path.join(figpath, name) + '_statespace.pdf'\n# figures = [figname_statespace_precision, figname_statespace_bias, figname_statespace_phases, figname_statespace]\n figures_statespace = 'statespace_figures/'\n mats_statespace = 'statespace_mat/'\n matname_statespace = mats_statespace + name + '_statespace.npy'\n\n def fname(i_blur, i_noise, i_trial):\n for folder in (mats_statespace, figures_statespace):\n if not(os.path.isdir(folder)):os.mkdir(folder)\n if mode=='noise':\n figname = figures_statespace + name + '_D_x-' + str(i_blur) + '_noise-' + str(i_noise)\n else:\n figname = figures_statespace + name + '_D_V-' + str(i_blur) + '_D_x-' + str(i_noise)\n\n if i_trial > 0: figname += '_trial-' + str(i_trial)\n matname = figname.replace(figures_statespace, mats_statespace) + '.npy'\n return matname, figname\n#\n# for i_noise, noise_ in enumerate(v_noise):\n# for i_trial in range(N_trials):\n# for i_blur, D_x_ in enumerate(v_D_x):\n# matname, figname = fname(i_blur, i_noise, i_trial)\n# try:\n# if i_trial > 0: os.rename(figname.replace('D_x', 'D_V'), figname)\n# os.rename(matname.replace('D_x', 'D_V'), matname)\n# except:\n# print matname.replace('D_x', 'D_V')\n figname = name +'-'+ str(parameter)+'='+ str(value).replace('.', '_')\n\n def generate_latex_table(N_blur, N_noise, N_step, name, show='all', empty_line=False):\n fig_width = .7/N_blur\n table = '\\\\begin{tabular}{' + N_blur*'c' + '}%\\n'\n # v_D_V, v_noise = [4, 2, 0], [8, 6, 4, 2, 0] #range(N_blur-1, 0, -3), range(N_blur-1, 0, -2)\n # v_D_V, v_noise = range(N_blur-1, -1, -1), range(N_blur-1, -1, -1)\n v_D_V, v_noise = range(N_blur-1, -1, -N_step), range(0, N_noise, N_step)\n\n for i_blur in v_D_V:\n for i_noise in v_noise:\n if (show == 'col' and i_noise == v_noise[len(v_noise)/2]) or (show == 'row' and i_blur == v_D_V[len(v_D_V)/2]) or (show == 'all'):\n if mode=='noise':\n table += '\\\\includegraphics[width=' + str(fig_width) + '\\\\textheight]{' + name + '_D_x-' + str(i_blur) + '_noise-' + str(i_noise) + '.png}'\n else:\n table += '\\\\includegraphics[width=' + str(fig_width) + '\\\\textheight]{' + name + '_D_V-' + str(i_blur) + '_D_x-' + str(i_noise) + '.png}'\n\n if not(i_noise == v_noise[-1]):\n table += '&%\\n' # to the next cell\n\n if not(i_blur == v_D_V[-1]):\n table += '\\\\\\\\%\\n' # to the next row\n if empty_line: table += (N_blur-1)*'&' + '\\\\\\\\%\\n' # an empty line\n\n table += '\\n\\\\end{tabular}%\\n'\n fic = open(figures_statespace + name + 'table_' + show + '.tex', 'w')\n fic.write(table)\n fic.close()\n\n\n # First, do all individual simulations statespace analysis\n switch_break = False\n if not(os.path.isfile(matname_statespace + LOCK)):# and not(os.path.isfile(matname_statespace)):\n # study over 2-3 orders of magnitude\n # main loop\n N_X, N_Y, N_frame = image.shape\n if progress:\n pbar = pyprind.ProgBar(N_blur*N_noise*N_trials, title=\"State-space\")\n shuffle_D_V = np.random.permutation(np.arange(N_blur))\n shuffle_D_x = np.random.permutation(np.arange(N_blur))\n shuffle_noise = np.random.permutation(np.arange(N_noise))\n if mode=='noise':\n for i_noise in range(N_noise):\n image_ = image.copy()\n image_ += v_noise[shuffle_noise[i_noise]] * np.random.randn(N_X, N_Y, N_frame)\n for i_blur in range(N_blur):#enumerate(v_D_x[shuffle_D_x]):\n for i_trial in range(N_trials):\n# for i_blur, D_V_ in enumerate(v_D_V[shuffle_D_V]):\n matname, figname = fname(shuffle_D_x[i_blur], shuffle_noise[i_noise], i_trial)\n if not(os.path.isfile(matname + LOCK)) and not(os.path.isfile(matname)):\n# mat_condensation(matname, image_, D_V=D_V_, D_x=v_D_x[shuffle_D_V[i_blur]], progress=False)\n mat_condensation(matname, image_, D_x=v_D_x[shuffle_D_x[i_blur]], D_V=v_D_V[shuffle_D_x[i_blur]], progress=False)\n# mat_condensation(matname, image_, D_x=D_x_, progress=False)\n# else:\n# # TODO : no prediction = condensation with D_V=0, D_x = inf, resampling = 0\n# mat_condensation(matname, image_, D_V=0, progress=False)\n # if we perform a novel individual run, we should remoe forward dependencies, that is global evaluation of the tracking\n if os.path.isfile(matname_statespace): os.remove(matname_statespace)\n if i_trial == 0:\n if os.path.isfile(matname) and not(os.path.isfile(figname + '.png')):\n particles = np.load(matname)\n show_particles(particles[:, :, ::N_show_step], image=image_[:, :, 0]+image_[:, :, -1])\n pylab.savefig(figname + '.png')\n pylab.close('all')\n\n if progress: pbar.update(i_noise*N_trials*N_blur+i_blur*N_trials+i_trial)\n else:\n for i_D_x in range(N_blur):\n image_ = image.copy()\n image_ += noise * np.random.randn(N_X, N_Y, N_frame)\n for i_trial in range(N_trials):\n for i_blur in range(N_blur):\n matname, figname = fname(shuffle_D_x[i_D_x], shuffle_D_V[i_blur], i_trial)\n if not(os.path.isfile(matname + LOCK)) and not(os.path.isfile(matname)):\n mat_condensation(matname, image_, D_x=v_D_x[shuffle_D_x[i_blur]], D_V=v_D_V[shuffle_D_V[i_blur]], progress=False)\n if os.path.isfile(matname_statespace): os.remove(matname_statespace)\n if i_trial == 0:\n if os.path.isfile(matname) and not(os.path.isfile(figname + '.png')):\n particles = np.load(matname)\n show_particles(particles[:, :, ::N_show_step], image=image_[:, :, 0]+image_[:, :, -1])\n pylab.savefig(figname + '.png')\n pylab.close('all')\n\n if progress: pbar.update()#i_D_x*N_trials*N_blur+i_trial*N_blur+i_blur)\n\n generate_latex_table(N_blur, N_noise, N_step, name)\n generate_latex_table(N_blur, N_noise, N_step, name, show='row')\n generate_latex_table(N_blur, N_noise, N_step, name, show='col')\n\n\n # routine to evaluate tracking\n def tracker(particles, frame):\n # TODO: computing variability for poisition should use a circular gaussian = von mises\n # TODO: use circular variance of P_i defined over angles \\theta_i = 1 - |R| with R = \\sum_i P_i e^{i 2 \\theta_i} / \\sum_i P_i\n# print particles_.shape, N_frame, frame\n w = particles[4, :, frame]\n w /= w.sum()\n # TODO check the formula for the trajectory... @ frame=-1 we are not yet to the same point as in frame 0!\n particle_phys = [torus((X_0_statespace+frame/float(N_frame)*V_X_statespace*width), width),\n torus((Y_0_statespace+frame/float(N_frame)*V_Y_statespace*width), width),\n V_X_statespace, V_Y_statespace]\n diff = particles_[:4, :, frame]-np.array([particle_phys]).T\n tracking = np.sqrt((diff*w).sum(axis=1)**2) # bias = squared of the mean difference\n sigma = np.sqrt((diff**2*w).sum(axis=1)) # precision = mean squared error\n return np.array([tracking, sigma])\n\n\n # Then, evaluate tracking\n\n if os.path.isfile(matname_statespace):\n tracking = np.load(matname_statespace)\n else:\n if not(os.path.isfile(matname_statespace + LOCK)):\n # check that everything was computed in the first step\n # this is already checked above: we do it again because the individual simulations may have been launched in different runs\n if mode=='noise':\n for i_noise, noise_ in enumerate(v_noise):\n for i_trial in range(N_trials):\n for i_blur, D_x_ in enumerate(v_D_x):\n matname, figname = fname(i_blur, i_noise, i_trial)\n # print matname, os.path.isfile(matname + LOCK), not(os.path.isfile(matname))\n if os.path.isfile(matname + LOCK) or not(os.path.isfile(matname)):\n switch_break = True\n else:\n for i_D_V, D_V_ in enumerate(v_D_V):\n for i_trial in range(N_trials):\n for i_D_x, D_x_ in enumerate(v_D_x):\n matname, figname = fname(i_D_x, i_D_V, i_trial)\n # print matname, os.path.isfile(matname + LOCK), not(os.path.isfile(matname))\n if os.path.isfile(matname + LOCK) or not(os.path.isfile(matname)):\n switch_break = True\n\n # Now, do it\n if not(switch_break) and not(os.path.isfile(matname_statespace + '_lock')):\n # locking\n touch(matname_statespace + LOCK)\n touch(matname_statespace + '_lock')\n\n\n # remove forward dependencies, that is figures showing tracking\n for figname in [figname_statespace_precision, figname_statespace_bias, figname_statespace_phases, figname_statespace,\n figname_statespace_precision_init, figname_statespace_bias_init]:\n if os.path.isfile(figname): os.remove(figname)\n\n # tracking : [i_blur, i_noise, bias - precision, x-y-u-v , init or last]\n tracking = np.zeros((N_blur, N_noise, 2, 4, 2))#\n if progress:\n pbar = pyprind.ProgBar(N_blur*N_noise*N_trials, title=\"State-space\")\n if mode=='noise':\n for i_noise in range(N_noise):\n for i_trial in range(N_trials):\n for i_blur in range(N_blur):\n matname, figname = fname(i_blur, i_noise, i_trial)\n particles_ = np.load(matname)\n # doing everything on the last set of particles = particles_[:, :, -1]\n tracking[i_blur, i_noise, : , :, 0] += tracker(particles_, frame=0)/N_trials\n tracking[i_blur, i_noise, : , :, 1] += tracker(particles_, frame=N_frame-1)/N_trials\n\n if progress: pbar.update()#i_noise*N_trials*N_blur+i_trial*N_blur+i_blur)\n else:\n for i_D_V, D_V_ in enumerate(v_D_V):\n\n for i_trial in range(N_trials):\n for i_blur in range(N_blur):\n matname, figname = fname(i_blur, i_D_V, i_trial)\n particles_ = np.load(matname)\n # doing everything on the last set of particles = particles_[:, :, -1]\n tracking[i_blur, i_D_V, : , :, 0] += tracker(particles_, frame=0)/N_trials\n tracking[i_blur, i_D_V, : , :, 1] += tracker(particles_, frame=N_frame-1)/N_trials\n\n if progress: pbar.update()#i_noise*N_trials*N_blur+i_trial*N_blur+i_blur)\n\n np.save(matname_statespace, tracking)\n os.remove(matname_statespace + LOCK)\n os.remove(matname_statespace + '_lock')\n print('Evaluated tracking')# just did it\n\n else:\n switch_break = True\n print(matname_statespace, ' locked')# not finished in another process\n\n v_D_V_text = ['%.3f' % D_V for D_V in v_D_V]\n v_D_x_text = ['%.3f' % D_x for D_x in v_D_x]\n v_noise_text = ['%0.2f' % noise for noise in v_noise]\n\n def figure_4panels(mat, titles):\n vmax = 0#log10(tracking[: , :, 0, 0].T).max()\n vmin = mat[: , :, 0].min()\n fig = pylab.figure(figsize=(12, 8))\n a1 = fig.add_subplot(221)\n mapable = a1.pcolormesh(mat[: , :, 0], vmin=vmin, vmax=vmax, shading='auto')#, edgecolors='k')\n pylab.axis('tight')\n a2 = fig.add_subplot(222)\n a2.pcolormesh(mat[: , :, 1], vmin=vmin, vmax=vmax, shading='auto')#, edgecolors='k')\n pylab.axis('tight')\n a3 = fig.add_subplot(223)\n a3.pcolormesh(mat[: , :, 2], vmin=vmin, vmax=vmax, shading='auto')#, edgecolors='k')\n pylab.axis('tight')\n a4 = fig.add_subplot(224)\n a4.pcolormesh(mat[: , :, 3], vmin=vmin, vmax=vmax, shading='auto')#, edgecolors='k')\n pylab.axis('tight')\n\n for i_a, ax in enumerate([a1, a2, a3, a4]):\n ax.set_yticks(np.arange(0, N_noise, 2))\n ax.set_xticks(np.arange(0, N_blur, 2))\n ax.set_title(titles[i_a])\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n if ax in [a3, a4]:\n ax.set_xticklabels(v_noise_text[::2])\n ax.set_xlabel('external noise')\n if ax in [a1, a3]:\n ax.set_ylabel('internal noise')\n ax.set_yticklabels(v_D_V_text[::2])\n ax.axis('tight')\n\n# for ax in [a1, a2]:\n# ax.set_xticklabels([])\n# ax.set_xlabel('')\n# for ax in [a2, a4]:\n# ax.set_yticklabels([])\n# ax.set_ylabel('')\n\n height = pylab.rcParams['figure.subplot.top']-pylab.rcParams['figure.subplot.bottom']\n# print [0.91, pylab.rcParams['figure.subplot.bottom'] + height/4., .025, height/2. ]\n a5 = pylab.axes([0.91, pylab.rcParams['figure.subplot.bottom'] + height/4., .025, height/2. ], facecolor='w') # [l, b, w, h]\n pylab.colorbar(mapable, cax=a5, ax=a1, format='%.2f')\n return fig\n# print tracking.min(), tracking.max(), tracking.mean()\n# print tracking[: , :, 0, :, 0].min(), tracking[: , :, 0, :, 0].max(), tracking[: , :, 0, :, 0].mean()\n# print tracking[: , :, 0, :, 1].min(), tracking[: , :, 0, :, 1].max(), tracking[: , :, 0, :, 1].mean()\n#\n # Finally, make figures\n if not(switch_break) and figures:\n # tracking : [i_blur, i_noise, bias - precision, x-y-u-v , init or last]\n if not(os.path.isfile(figname_statespace_bias_init)):\n fig = figure_4panels(np.log10(tracking[: , :, 0, :, 0]), ['tracking_X', 'tracking_Y', 'tracking_U', 'tracking_V'])\n fig.savefig(figname_statespace_bias_init)\n\n if not(os.path.isfile(figname_statespace_precision_init)):\n fig = figure_4panels(np.log10(tracking[: , :, 1, :, 0]), ['sigma_X', 'sigma_Y', 'sigma_U', 'sigma_V'])\n pylab.savefig(figname_statespace_precision_init)\n\n if not(os.path.isfile(figname_statespace_bias)):\n fig = figure_4panels(np.log10(tracking[: , :, 0, :, 1]), ['tracking_X', 'tracking_Y', 'tracking_U', 'tracking_V'])\n fig.savefig(figname_statespace_bias)\n\n if not(os.path.isfile(figname_statespace_precision)):\n fig = figure_4panels(np.log10(tracking[: , :, 1, :, 1]), ['sigma_X', 'sigma_Y', 'sigma_U', 'sigma_V'])\n pylab.savefig(figname_statespace_precision)\n\n#\n# if not(os.path.isfile(figname_statespace_phases)):\n# fig = figure_4panels(np.log10(tracking_pos_vel), ['tracking_pos', 'tracking_vel', 'sigma_pos','sigma_vel'])\n# pylab.savefig(figname_statespace_phases)\n#\n if not(os.path.isfile(figname_statespace_phases)):\n # tracking : [i_blur, i_noise, bias - precision, x-y-u-v , init or last]\n# tracking_pos_vel = np.zeros(tracking[: , :, 0, :, 1].shape) # precision at end\n# tracking_pos_vel[: , :, 0:2] = np.sqrt(.5*(tracking[: , :, :, 0, 1]**2+tracking[: , :, :, 1, 1]**2))\n# tracking_pos_vel[: , :, 2:4] = np.sqrt(.5*(tracking[: , :, :, 2, 1]**2+tracking[: , :, :, 3, 1]**2))\n# # TODO this is a HACK just showing precision\n# mat = np.log10(np.sqrt(tracking[: , :, 1, 0, 1]**2+tracking[: , :, 1, 1, 1]**2))\n# # U,V mat = np.log10(np.sqrt(tracking[: , :, 1, 2, 1]**2+tracking[: , :, 1, 3, 1]**2))\n# vmax = 0#log10(tracking[: , :, 0, 0].T).max()\n# vmin = mat.min()\n# fig = pylab.figure(figsize=(12, 8))\n# ax = fig.add_subplot(111)\n# mapable = ax.pcolormesh(mat, vmin=vmin, vmax=vmax, edgecolors='k')#\n## pylab.contour(v_D_V, v_noise, tracking_pos_vel[: , :, 0].T**2/2.+tracking_pos_vel[: , :, 1].T**2, N=1)\n## pylab.contourf(v_D_V, v_noise, tracking_pos_vel[: , :, 2].T**2/2.+tracking_pos_vel[: , :, 3].T**2, N=1)\n## # pylab.contour(v_D_V, v_noise, tracking_X[:-1, 1:].T**2/2.+tracking_V[:-1, 1:].T**2, N=1)\n## # pylab.contourf(v_D_V, v_noise, np.sqrt(sigma_X[:-1, 1:].T**2/4.+sigma_V[:-1, 1:].T**2), N=1)\n## pylab.axis('tight')\n## pylab.text(0.01, 0.012, 'Tracking', fontsize=12, weight='bold')\n## pylab.text(0.01, 0.027, 'No tracking', fontsize=12, weight='bold')\n## pylab.text(0.05, 0.012, 'False\\n tracking', fontsize=12, weight='bold')\n## ax.set_xticklabels([])\n## ax.set_yticklabels([])\n# ax.axis('tight')\n## pylab.colorbar(mapable, ax=ax, format='%.2f')\n\n tracking_vel = np.zeros(tracking[: , :, 0, 0, 1].shape) # precision at end\n tracking_vel = np.sqrt(.5*(tracking[: , :, 0, 2, 1]**2+tracking[: , :, 0, 3, 1]**2))\n bias_vel = np.zeros(tracking[: , :, 0, 0, 1].shape) # precision at end\n bias_vel = np.sqrt(.5*(tracking[: , :, 1, 2, 1]**2+tracking[: , :, 1, 3, 1]**2))\n\n fig = pylab.figure(figsize=(5, 10))\n a1 = fig.add_subplot(211)\n mapable = a1.pcolormesh(tracking_vel, shading='auto')#, edgecolors='k')\n pylab.axis('tight')\n a2 = fig.add_subplot(212)\n a2.pcolormesh(bias_vel, shading='auto')#, edgecolors='k')\n pylab.axis('tight')\n\n for i_a, ax in enumerate([a1, a2]):\n# ax.set_yticks(.5 + np.arange(0, N_noise, 2))\n# ax.set_xticks(.5 + np.arange(0, N_blur, 2))\n# ax.set_title(titles[i_a])\n# ax.set_xticklabels([])\n# ax.set_yticklabels([])\n if ax in [a1]:\n ax.set_xticklabels(v_noise_text[::2])\n ax.set_xlabel('external noise')\n if ax in [a1, a2]:\n ax.set_ylabel('internal noise')\n ax.set_yticklabels(v_D_V_text[::2])\n ax.axis('tight')\n\n # for ax in [a1, a2]:\n # ax.set_xticklabels([])\n # ax.set_xlabel('')\n # for ax in [a2, a4]:\n # ax.set_yticklabels([])\n # ax.set_ylabel('')\n\n height = pylab.rcParams['figure.subplot.top']-pylab.rcParams['figure.subplot.bottom']\n # print [0.91, pylab.rcParams['figure.subplot.bottom'] + height/4., .025, height/2. ]\n a5 = pylab.axes([0.91, pylab.rcParams['figure.subplot.bottom'] + height/4., .025, height/2. ], facecolor='w') # [l, b, w, h]\n pylab.colorbar(mapable, cax=a5, ax=a1, format='%.2f')\n pylab.savefig(figname_statespace_phases)", "title": "" }, { "docid": "899d52177b06c2e9c67d3df7b79b44b8", "score": "0.55327755", "text": "def plot_spectra(wl, spec, colspec=\"k.-\", label=None, title=\"Spectrum\"):\n plt.plot(wl, spec, colspec, label=label)\n plt.title(title)\n plt.legend()\n plt.show(block=False)\n return None", "title": "" }, { "docid": "bff59197a24d1ae4be79c179a33abdfc", "score": "0.55288994", "text": "def save_map_fig(map_trg,title,output_path,radius_cut=850,calib_type=\"point\"):\n xx = np.linspace(0,radius_cut,50)\n yy = np.linspace(0,180,50)\n xx,yy = np.meshgrid(xx,yy)\n zz = map_trg.func(xx,yy)\n fig = plt.figure(figsize=(10,6))\n plt.pcolormesh(xx,yy,zz)\n plt.colorbar()\n if calib_type == \"point\":\n plt.scatter(np.array(map_trg.radius),np.array(map_trg.theta),color=\"red\")\n else:\n plt.plot(np.array(map_trg.radius),np.array(map_trg.theta),color=\"red\")\n plt.xlabel(\"$R [mm]$\",fontsize=14)\n plt.ylabel(\"$\\\\theta [\\degree]$\",fontsize=14)\n plt.title(title,fontsize=16)\n plt.savefig(output_path)", "title": "" }, { "docid": "e2ebf7236061d1e44d7028e1cf16aa24", "score": "0.5527501", "text": "def PlotSpectrum():\n\tplt.title(r\"Solar NA I D lines \")\n\tplt.plot(wave_vac,S_spec, color = \"royalblue\")\n\tplt.grid(linestyle = \"--\")\n\tplt.xlabel(r\"Wavelength $\\lambda$ [$\\mathrm{\\AA}$]\")\n\t# plt.subplots_adjust(bottom = 0.12, left = 0.15)\n\t# plt.savefig(savepath + \"solarspectran.pdf\")\n\tplt.show()", "title": "" }, { "docid": "bd9667a048ecff2cfff49a7c2b3f8676", "score": "0.5508835", "text": "def plotOutput(X,Y,spectrumLabel,ydata,PBool=True,ydataBool=False,residuals=False,path=False):\n import matplotlib\n if PBool == False:\n matplotlib.use('Agg') #non-interactive backend\n import pylab as P\n P.ioff() #Ensure interactivity mode is off so that graph does not dissapear immediately\n fig = P.figure()\n maxYval = amax(Y)\n minYval = amin(Y)\n DynamicRange = maxYval - minYval\n if not ydataBool:\n P.plot(X,Y,'g', linewidth = 2.0)\n P.xlabel(r'Detuning (GHz)')\n P.ylabel(spectrumLabel)\n P.xlim(X[0],X[-1])\n P.ylim(minYval-0.02*DynamicRange,maxYval+0.02*DynamicRange)\n else:\n ax1 = fig.add_axes([0.15,0.30,0.75,0.65])\n ax1.plot(X,ydata,'k')\n ax1.plot(X,Y,'r--', linewidth=1.8)\n ax1.set_xlim(X[0],X[-1])\n ax1.set_ylim(minYval-0.02*DynamicRange,maxYval+0.02*DynamicRange)\n ax1.set_xticklabels([])\n P.ylabel(spectrumLabel)\n ax2 = fig.add_axes([0.15,0.10,0.75,0.15])\n ax2.plot(X,residuals*100.0,'k')\n ax2.set_xlim(X[0],X[-1])\n ax2.axhline(color='r', linestyle = '--', linewidth=1.8)\n P.xlabel(r'Detuning (GHz)')\n P.ylabel(r'Residuals $(\\times 100)$')\n if path:\n P.savefig(path)\n if PBool:\n P.show()", "title": "" }, { "docid": "59de18a6d4347fb40cba96f7adcf158b", "score": "0.549675", "text": "def plotter(dataframe,filename):\n #Create a folder to save the files separetely\n folder_name = filename + \"_Plots\"\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n \n print \"\\nSmoothing and Plotting\"\n for i in range(10):\n print \"\\n Channel \"+ str(i+1)\n df_temp = dataframe[[i]].dropna()\n #Exponential Moving Average\n df_temp['EMV'] = df_temp.ewm(span=100,min_periods=0,adjust=True).mean()\n #print \"Smoothened\"\n \n #Figures!\n #Fig 1 - initialization\n #plot = plt.figure()\n #plt.plot()\n plot = df_temp.plot(figsize=(25,10))\n fig = plot.get_figure()\n #print \"Plotted\"\n \n #Saving the plots in a subfolder\n \n fig.savefig(folder_name +'/'+ filename +\"_Channel_\"+str(i)+\".tif\",orientation='portrait',papertype='letter')\n print\n print \"Plotting Successfull!\"\n print \n print \"==================================================================================\"\n return df_temp", "title": "" }, { "docid": "41e7b817c61b6cc3fd0451617e37fd79", "score": "0.54948515", "text": "def save_spectrogram(spec, hop_length, output_path, x_axis='time', y_axis='log', figsize=(10, 4), title=None):\n\n fig = plt.figure()\n plt.style.use('dark_background')\n plt.suptitle(title)\n librosa.display.specshow(spec, hop_length=hop_length, x_axis=x_axis, y_axis=y_axis)\n plt.gca().set_axis_off()\n plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)\n fig.savefig(output_path)\n fig.clear()\n plt.close('all')", "title": "" }, { "docid": "79455ca82d0a2871f6a9250b4a366589", "score": "0.5493766", "text": "def plot_spectrum(wavelength, flux, error, rootname):\n flux = convolve(flux,Box1DKernel(5))\n plt.figure(rootname)\n plt.plot(wavelength, flux, '0.5', label='spectrum')\n plt.plot(wavelength, error, label='error') \n plt.legend()\n plt.show()\n plt.close()", "title": "" }, { "docid": "29bdc2581b9628094cdaa541f20dd911", "score": "0.54908895", "text": "def generate_stopping_power_and_particle_energy_curve(file, save_figure=False, energy_unit='keV'):\n f = open(file, \"r\")\n lines = f.readlines()\n\n ## Delete header lines\n del lines[:4]\n\n list_eletronic_stopping_power = []\n list_nuclear_stopping_power = []\n list_particle_energy = []\n\n for line in lines:\n ## Split energy columns\n energy = re.split(r'\\s', line)\n\n particle_energy = replace_scientific_notation(energy[0])\n if energy_unit.lower() =='kev':\n particle_energy = particle_energy * 10 ** -3\n eletronic_stopping_power = replace_scientific_notation(energy[3])\n nuclear_stopping_power = replace_scientific_notation(energy[5])\n\n list_eletronic_stopping_power.append(eletronic_stopping_power)\n list_nuclear_stopping_power.append(nuclear_stopping_power)\n list_particle_energy.append(particle_energy)\n\n x = np.array(list_particle_energy)\n\n y1 = np.array(list_eletronic_stopping_power)\n y2 = np.array(list_nuclear_stopping_power)\n\n legend = re.search(r'\\.\\/SRIM_files\\/(.*)', file)\n if legend:\n legend = f'Stopping power of {legend.group(1)}'\n plt.plot(x, y2, color='r', label='eletronic')\n #plt.xscale('log')\n plt.yscale('log')\n # plt.plot(x, y2, color='g', label='nuclear')\n\n plt.xlabel(\"Particle energy [MeV]\")\n plt.ylabel(\"Stopping Power [MeV/mm]\")\n plt.title(f\"{legend}\")\n\n plt.yscale('log')\n plt.tight_layout(pad=2)\n\n if save_figure:\n plt.savefig(f'./images/{legend.replace(\" \", \"_\")}.png')\n\n else:\n plt.show()", "title": "" }, { "docid": "94f64bec45cac96b2cac3c11b86f1180", "score": "0.54878795", "text": "def get_power_spectrum(p, f1, f2, jk_region=None, save_windows=True):\n try:\n fname = p.get_fname_cls(f1.name, f2.name, jk_region=jk_region)\n Cls = Spectrum.from_file(fname, f1.name, f2.name)\n except FileNotFoundError:\n bpw = p.get_bandpowers()\n wsp = get_mcm(p, f1, f2, jk_region=jk_region)\n Cls = Spectrum.from_fields(f1, f2, bpw, wsp, save_windows=save_windows)\n Cls.to_file(fname)\n return Cls", "title": "" }, { "docid": "cb040ef454519939885f659e63524920", "score": "0.5481449", "text": "def visualize_spectrum(y):\n global _prev_spectrum, colorThisTime, count0\n #y = np.copy(interpolate(y, local_N_PIXELS))\n _prev_spectrum = np.copy(y)\n y = y*noHighStuff\n # Color channel mappings\n count0+=1\n keyObj.update(y)\n chordObj.update(y, keyObj.getKeyNum())\n beatObj.update(y)\n temp1 = rawFilt.update(y)\n temp2 = ledFilt.update(y)\n countEff = count0%nFramesCycle\n if count0%10==0:\n chordObj.printChord()\n\n if 0.0*nFramesCycle <= countEff <= 0.1*nFramesCycle:\n a1 = [1, 0, 0]\n a2 = [0, 0, 1]\n elif 0.1*nFramesCycle < countEff <= 0.4*nFramesCycle:\n temp = (1/0.3)*(countEff-0.1*nFramesCycle)/nFramesCycle\n a1 = [1-temp, temp, 0] \n a2 = [temp, 0, 1-temp]\n elif 0.4*nFramesCycle < countEff <= 0.5*nFramesCycle:\n a1 = [0, 1, 0]\n a2 = [1, 0, 0]\n elif 0.5*nFramesCycle < countEff <= 0.7*nFramesCycle:\n temp = (1/0.2)*(countEff-0.5*nFramesCycle)/nFramesCycle\n a1 = [0, 1-temp, temp] \n a2 = [1-temp, temp, 0]\n elif 0.7*nFramesCycle < countEff <= 0.8*nFramesCycle:\n a1 = [0, 0, 1]\n a2 = [0, 1, 0]\n elif 0.8*nFramesCycle < countEff <= 1.0*nFramesCycle:\n temp = (1/0.2)*(countEff-0.8*nFramesCycle)/nFramesCycle\n a1 = [temp, 0, 1-temp] \n a2 = [0, 1-temp, temp]\n \n #r = temp2 * 0.0\n #g = temp2 * 0.0\n #b = temp2 * 1.0\n \n r=a1[0]*temp2\n g=a1[1]*temp2;\n b=a1[2]*temp2;\n \n '''\n # tonic is blue\n if chordObj.getChordNum()==0:\n r = temp2 * 0.0\n g = temp2 * 0.0\n b = temp2 * 1.0\n # ii is yellow\n elif chordObj.getChordNum()==1:\n r = temp2 * 0.5\n g = temp2 * 0.5\n b = temp2 * 0.0\n # iii is orange\n elif chordObj.getChordNum()==2:\n r = temp2 * 0.66\n g = temp2 * 0.33\n b = temp2 * 0.0\n # IV is green\n elif chordObj.getChordNum()==3:\n r = temp2 * 0.0\n g = temp2 * 1.0\n b = temp2 * 0.0\n # V is red\n elif chordObj.getChordNum()==4:\n r = temp2 * 1.0\n g = temp2 * 0.0\n b = temp2 * 0.0\n # vi is purple\n elif chordObj.getChordNum()==5:\n r = temp2 * 0.5\n g = temp2 * 0.0\n b = temp2 * 0.5\n elif chordObj.getChordNum()==6:\n r = temp2 * 0.5\n g = temp2 * 0.5\n b = temp2 * 0.5\n '''\n \n \n '''\n if beatObj.beatRightNow():\n colorThisTime = (colorThisTime + 1)%3\n print(\"BEAT!!!!\")\n if colorThisTime == 0:\n r = temp2 * 1.0\n g = temp2 * 0.0\n b = temp2 * 0.0\n elif colorThisTime == 1:\n r = temp2 * 0.0\n g = temp2 * 1.0\n b = temp2 * 0.0\n if colorThisTime == 2:\n r = temp2 * 0.0\n g = temp2 * 0.0\n b = temp2 * 1.0\n '''\n output = np.array([r,g,b]) * 255\n #output = np.array([np.ones(local_N_PIXELS), np.ones(local_N_PIXELS), np.ones(local_N_PIXELS)]) * 255\n output2 = np.zeros([3, 2*local_N_PIXELS])\n output2[..., 0:local_N_PIXELS] = output[...,::-1]\n output2[..., local_N_PIXELS:2*local_N_PIXELS] = output\n return output2", "title": "" }, { "docid": "292b6fc354491cf2c43a7dfe74c825b7", "score": "0.54811364", "text": "def imwrite_wrapper(show_func):\n def imwrite_func(model, dpath=None, dpi=180, asdiagnostic=True,\n ascheckpoint=None, verbose=1, **kwargs):\n import plottool as pt\n # Resolve path to save the image\n if dpath is None:\n if ascheckpoint is True:\n history_hashid = model.get_model_history_hashid()\n dpath = model._get_model_dpath(checkpoint_tag=history_hashid)\n elif asdiagnostic is True:\n dpath = model.get_epoch_diagnostic_dpath()\n else:\n dpath = model.training_dpath\n # Make the image\n fig = show_func(model, **kwargs)\n # Save the image\n output_fpath = pt.save_figure(fig=fig, dpath=dpath, dpi=dpi,\n verbose=verbose)\n return output_fpath\n return imwrite_func", "title": "" }, { "docid": "ffe6ae48625f697a6cb0280d2cfc0800", "score": "0.5479929", "text": "def plot_spectra(eigval_col, savename=\"spectrum_onetrial.jpg\", figdir=savedir, fig=None, label=\"BP\"):\n eigmean = eigval_col.mean(axis=0)\n eiglim = np.percentile(eigval_col, [5, 95], axis=0)\n sortidx = np.argsort(-np.abs(eigmean))\n eigmean = np.abs(eigmean[sortidx])\n eiglim = eiglim[:, sortidx]\n eigN = len(eigmean)\n if fig is None:\n fig, axs = plt.subplots(1, 2, figsize=[10, 5])\n else:\n # plt.figure(fig.number)\n plt.figure(num=fig.number)\n axs = fig.axes\n plt.sca(axs[0])\n plt.plot(range(eigN), eigmean, alpha=0.6)\n plt.fill_between(range(eigN), eiglim[0, :], eiglim[1, :], alpha=0.3, label=label)\n plt.ylabel(\"eigenvalue\")\n plt.xlabel(\"eig id\")\n plt.legend()\n plt.sca(axs[1])\n plt.plot(range(eigN), np.log10(eigmean), alpha=0.6)\n plt.fill_between(range(eigN), np.log10(eiglim[0, :]), np.log10(eiglim[1, :]), alpha=0.3, label=label)\n plt.ylabel(\"eigenvalue(log)\")\n plt.xlabel(\"eig id\")\n plt.legend()\n st = plt.suptitle(\"Hessian Spectrum of StyleGAN\\n (error bar for [5,95] percentile among all samples)\")\n plt.savefig(join(figdir, savename), bbox_extra_artists=[st]) # this is working.\n # fig.show()\n return fig", "title": "" }, { "docid": "1363eea180fea57c84355f68d832001b", "score": "0.54797584", "text": "def makeAveragePSFigure(averagePS, figureFileName): \r\n pylab.imshow(np.log(averagePS),cmap = \"gray\")\r\n pylab.contour(np.log(averagePS))\r\n pylab.axis(\"off\")\r\n pylab.savefig(figureFileName)", "title": "" }, { "docid": "2b4ee1d0978acda14ddf77c00949f73e", "score": "0.5479389", "text": "def save_plot(base_path, rel_path, filename):\n dir_path = os.path.join(base_path, rel_path)\n os.makedirs(dir_path, exist_ok=True)\n plot_path = os.path.join(dir_path, filename)\n pl.savefig(plot_path)\n pl.close()\n\n return os.path.join(rel_path, filename)", "title": "" }, { "docid": "a07e0976a889e28e70d21ca074bcb955", "score": "0.54790896", "text": "def save(self, filename):\n\n self.update_title()\n self.update_legend()\n self.figure.savefig(filename + '.' + self.outformat)", "title": "" }, { "docid": "063098ef98257050ca5cb45636dfb358", "score": "0.5477221", "text": "def save_structure(mo, title: str = None, output: str = \"\", azi=None, ele=None):\n matplotlib.use('agg')\n\n fig, ax = generate_figure(mo)\n\n ax.view_init(elev=ele, azim=azi)\n\n directory = f\"{make_output_folder(output)}/{title if title else mo.name}.png\"\n\n plt.title(title if title else mo.name, fontsize=5)\n plt.savefig(directory)\n plt.close('all')", "title": "" }, { "docid": "8dae707ae94cc3369b5c43340d66d534", "score": "0.54718155", "text": "def splat_assign_spectrum(self, auto=False):\n self.process_splatalogue(auto=auto)", "title": "" }, { "docid": "0be51b10a2ca5bc70d97b212de39e2df", "score": "0.54596823", "text": "def save_champs_res(dom, x, y, champs, time, cmax, cmin, j):\n plt.figure()\n # define grid.\n xi = np.linspace(-5.1, 5.1, 800)\n yi = np.linspace(-.1, 10.1, 10000)\n norm = mplc.Normalize(cmin, cmax)\n #~ v = np.linspace(cmin, cmax, 100, endpoint=True)\n bounds=np.linspace(cmin,cmax,100)\n # grid the data.\n zi = griddata(x,y, champs, xi,yi, interp='linear')\n # contour the gridded data, plotting dots at the nonuniform data points.\n CS = plt.contourf(xi, yi, zi,100, vmax=cmax, vmin=cmin, norm=norm, levels=bounds, extend='both')\n #~ plt.autumn()\n cbar = plt.colorbar(CS)\n plt.clim(vmin=cmin, vmax=cmax)\n cbar.ax.set_ylabel('Temperature')\n #~ plt.plot(xi,dom.height*np.ones(len(xi)), '-k')\n # ~ plt.plot(xi,dom.height*np.ones(len(xi)) + dom.dz, '-g')\n # ~ plt.plot(xi,dom.height*np.ones(len(xi)) - dom.dz, '-g')\n\t\n # plot data points.\n #~ plt.scatter(x, y, marker='o', s=5, zorder=10)\n #~ plt.xlim(-0.1, 0.6)\n #~ plt.ylim(-0.1, 1.1)\n\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title('Temperature au bout de %(g)s secondes'%{'g' : time})\n plt.savefig('res_%(g)s.png'%{'g' : '%06d' % j})\n plt.close()", "title": "" }, { "docid": "d6e752852aefae96a28f9a30a2f7a75a", "score": "0.5449565", "text": "def save_spectrum(velocity, opacity, flux, em_mean, em_std, filename, \n sigma_tau, smoothed_od, sigma_smoothed_od, metadata=None):\n table = Table(meta={'name': filename, 'id': 'optical_depth'})\n table.add_column(Column(name='velocity', data=velocity, unit='m/s', description='LSRK velocity'))\n table.add_column(Column(name='optical_depth', data=opacity, description='Absorption as exp(-tau)'))\n table.add_column(Column(name='flux', data=flux, unit='Jy', description='Flux per beam'))\n table.add_column(Column(name='sigma_od', data=sigma_tau, description='Noise in the absorption profile, relative to exp(-tau)'))\n table.add_column(Column(name='em_mean', data=em_mean, unit='K', description='Mean brightness temperature around the source'))\n table.add_column(Column(name='em_std', data=em_std, unit='K', description='Noise level in the brightness temperature'))\n table.add_column(Column(name='smoothed_od', data=smoothed_od, description='Absorption as exp(-tau) smoothed using a 5 point Hanning window'))\n table.add_column(Column(name='sigma_smoothed_od', data=sigma_smoothed_od, description='1-sigma noise level in the absorption profile, relative to exp(-tau)'))\n\n votable = from_table(table)\n if metadata is not None:\n for key in metadata:\n votable.infos.append(Info(name=key, value=metadata[key]))\n writeto(votable, filename)", "title": "" }, { "docid": "0542ce4df52cffe1dd24dab23599ea87", "score": "0.54450804", "text": "def figure_exp_24(fig_type = '.png'):\n temp, save_folder, specific_folder, temp, file_save = get_figure_data_settings(fig_no = 24)\n file2save_npz = 'fig24_' + '.npz'\n \n file2save = 'NBQX_block' + fig_type\n folder_save = save_folder + specific_folder + '_results/'\n y_range = [-30,30]\n display.plot_data(folder_save, file2save,folder_save,file_save,x_scale = 'sec', \n y_range=y_range, time_range = [11000,19000], electrodes=[1,2,3],\n y_range_intra = y_range)", "title": "" }, { "docid": "cea6488197bf5ef2f51449faf3a016cc", "score": "0.54408354", "text": "def powerspectrum(self, max_cycles=50, sampling=5, vmax=100, iterate=False):\n\n # import SFT\n\n def tvcircle(radius=1, xcen=0, ycen=0, center=None, **kwargs):\n \"\"\"\n draw a circle on an image.\n\n radius\n xcen\n ycen\n center= tuple in (Y,X) order.\n \"\"\"\n if center is not None:\n xcen = center[1]\n ycen = center[0]\n t = np.arange(0, np.pi * 2.0, 0.01)\n t = t.reshape((len(t), 1))\n x = radius * np.cos(t) + xcen\n y = radius * np.sin(t) + ycen\n plt.plot(x, y, **kwargs)\n\n cmap = copy.copy(matplotlib.cm.get_cmap(poppy.conf.cmap_diverging))\n cmap.set_bad('0.3')\n\n plt.clf()\n plt.subplot(231)\n self.display(title='full wavefront', clear=False, colorbar=False, vmax=vmax)\n\n ps_pixel_size = 1. / sampling # how many cycles per pixel\n trans = SFT.SFT3(self.data, max_cycles * 2, max_cycles * 2 * sampling)\n\n abstrans = np.abs(trans)\n\n extent = [-max_cycles, max_cycles, -max_cycles, max_cycles]\n\n plt.subplot(233)\n plt.imshow(abstrans, extent=extent)\n plt.title(\"Power Spectrum of the phase\")\n plt.ylabel(\"cycles/aperture\")\n tvcircle(radius=5, color='k', linewidth=1) # , ls='--')\n tvcircle(radius=30, color='k', linewidth=1) # 2, ls='--')\n plt.gca().set_xbound(-max_cycles, max_cycles)\n plt.gca().set_ybound(-max_cycles, max_cycles)\n\n y, x = np.indices(abstrans.shape)\n y -= abstrans.shape[0] / 2.\n x -= abstrans.shape[1] / 2.\n r = np.sqrt(x ** 2 + y ** 2) * ps_pixel_size\n\n mask = np.ones_like(self.data)\n mask[np.where(self.amplitude == 0)] = np.nan\n wgood = np.where(self.amplitude != 0)\n\n components = []\n for i, label in enumerate(['low', 'mid', 'high']):\n plt.subplot(2, 3, i + 4)\n if label == 'low':\n condition = r <= 5\n elif label == 'mid':\n condition = (r > 5) & (r <= 30)\n else:\n condition = (r > 30)\n filtered = trans * condition\n\n inverse = SFT.SFT3(filtered, max_cycles * 2, self.opd.shape[0], inverse=True)\n inverse = inverse[::-1, ::-1] # I thought SFT did this but apparently this is necessary to get the high freqs right...\n\n plt.imshow(inverse.real * mask, vmin=(-vmax) / 1000., vmax=vmax / 1000,\n cmap=cmap) # vmax is in nm, but WFE is in microns, so convert\n plt.title(label + \" spatial frequencies\")\n rms = (np.sqrt((inverse.real[wgood] ** 2).mean()) * 1000)\n\n components.append(rms)\n plt.xlabel(\"%.3f nm RMS WFE\" % rms)\n\n return np.asarray(components)", "title": "" }, { "docid": "aa031d6616902bbb71cafffe271e0834", "score": "0.5439782", "text": "def saveAllPosPlots():\n plt.clf(); makeRadPlot(); plt.savefig('plots/posDepRad.pdf');\n plt.clf(); makeRadPlotZoom(); plt.savefig('plots/posDepRadZoom.pdf');\n plt.clf(); makePolPlot(); plt.savefig('plots/posDepPol.pdf');\n plt.clf(); makePolPlotZoom(); plt.savefig('plots/posDepPolZoom.pdf');", "title": "" }, { "docid": "60a6165722227a8d22b10b8be82d98bd", "score": "0.5436176", "text": "def save_waveform(self,filename):\n self.scope.save_waveform(self.n,filename)", "title": "" }, { "docid": "f46c5bee83c8328198cb553ab111a1d3", "score": "0.5429792", "text": "def plot(self,output_folder= os.getcwd):\n fig1 = plt.figure()\n plt.plot(self.x,self.y,'bo')\n plt.plot(self.x , lorentzian(self.x,self.popt[0],self.popt[1],self.popt[2],self.popt[3]))\n plt.xlabel('Wave number $cm^{-1}$')\n plt.ylabel('Counts per second')\n plt.show()", "title": "" }, { "docid": "a69d83f40a878a4471fd824561fb7d24", "score": "0.5416961", "text": "def oned_spectrum(self, verbose=True):\n\n #### Paremters from the full model\n self.grism_sci = np.cast[np.float64](self.im['SCI'].data)\n self.full_model = np.cast[np.float64](self.im['CONTAM'].data + self.im['MODEL'].data)\n self.the_object = np.cast[np.float64](self.im['MODEL'].data)\n self.grism_wht = np.cast[np.float64](self.im['WHT'].data)\n self.wave = np.cast[np.float64](self.im['WAVE'].data)\n self.ytrace = np.cast[np.float64](self.im['YTRACE'].data)\n self.sens = np.cast[np.float64](self.im['SENS'].data)\n self.root = self.file.split('_%05d' %(self.id))[0]\n \n t0 = time.time()\n \n obj_cleaned = self.grism_sci - (self.full_model - self.the_object)\n contam_cleaned = self.grism_sci - self.the_object\n variance = self.grism_wht**2\n \n #### Get extraction window where profile is > 10% of maximum \n osh = self.the_object.shape\n xarr = np.arange(osh[1])\n # xint14 = int(np.interp(1.3e4, self.wave, xarr))\n # if self.grism_element == 'G102':\n # xint14 = int(np.interp(0.98e4, self.wave, xarr))\n # #\n # if self.grism_element == 'G800L':\n # xint14 = int(np.interp(0.75e4, self.wave, xarr))\n #wlim = grism_wlimit[self.grism_element]\n wlim = self.wlim\n xint14 = int(np.interp(wlim[3], self.wave, xarr))\n \n yprof = np.arange(osh[0])\n profile = np.sum(self.the_object[:,xint14-10:xint14+10], axis=1)\n profile = profile/profile.sum()\n \n #profile2D = np.dot(profile.reshape((-1,1)), np.ones((1,osh[1])))\n window = profile >= 0.1*profile.max()\n \n #### Use object as the profile since it contains the y shift along the trace\n prof_x = np.sum(self.the_object, axis=0)\n profile2D = self.the_object/np.dot(np.ones((osh[0],1)), prof_x.reshape((1,-1)))\n \n if (1 == 0):\n plt.plot(yprof, profile, color='blue')\n plt.plot(yprof[window], profile[window], color='red', linewidth=3)\n print np.trapz(profile[window], xprof[window]) / np.trapz(profile, xprof)\n \n obj_cleaned[variance == 0] = 0\n contam_cleaned[variance == 0] = 0\n \n #### Simple sums, also weighted\n weights = 1./variance\n weights[variance == 0] = 0\n \n var_sum = np.sum(weights[window,:], axis=0)\n \n weighted_sum = np.sum((obj_cleaned*weights)[window,:], axis=0) / var_sum\n weighted_var = 1./np.sum(weights[window,:], axis=0)\n weighted_sig = np.sqrt(weighted_var)\n \n simple_sum = np.sum(obj_cleaned[window,:], axis=0)\n simple_var = np.sum(variance[window,:], axis=0)\n simple_sig = np.sqrt(simple_var)\n \n #### Optimal extraction\n opt_variance = variance.copy()\n opt_variance[opt_variance == 0] = 1.e6\n \n num = np.sum(profile2D*obj_cleaned/opt_variance, axis=0)\n #num_contam = np.sum(profile2D*contam_cleaned/opt_variance, axis=0)\n num_contam = np.sum(profile2D*(self.full_model - self.the_object)/opt_variance, axis=0)\n num_full = np.sum(profile2D*self.grism_sci/opt_variance, axis=0)\n \n denom = np.sum(profile2D**2 / opt_variance, axis=0)\n \n optimal_sum = num / denom\n optimal_sum_contam = num_contam / denom\n optimal_sum_full = num_full / denom\n \n optimal_var = 1./np.sum(profile2D**2/opt_variance, axis=0)\n optimal_sig = np.sqrt(optimal_var)\n \n trace_pix = np.cast[int](np.round(self.ytrace))\n #trace_spec = self.grism_sci[trace_pix,:]\n trace_spec = optimal_sum*0.\n trace_sig = optimal_sum*0.\n for i in range(osh[1]):\n trace_spec[i] = self.grism_sci[trace_pix[i],i]\n trace_sig[i] = self.grism_wht[trace_pix[i],i]\n \n scale_to_total = 1./np.max(profile) \n if not np.isfinite(scale_to_total):\n scale_to_total=-1\n \n c1 = pyfits.Column(name='wave', format='D', unit='ANGSTROMS', array=self.wave)\n c2 = pyfits.Column(name='flux', format='D', unit='ELECTRONS/S', array=optimal_sum_full)\n c3 = pyfits.Column(name='error', format='D', unit='ELECTRONS/S', array=optimal_sig)\n c4 = pyfits.Column(name='contam', format='D', unit='ELECTRONS/S', array=optimal_sum_contam)\n c5 = pyfits.Column(name='trace', format='D', unit='ELECTRONS/S', array=trace_spec)\n c6 = pyfits.Column(name='etrace', format='D', unit='ELECTRONS/S', array=trace_sig)\n c7 = pyfits.Column(name='sensitivity', format='D', unit='E/S / 1E-17 CGS', array=self.sens*(self.growx*self.growy))\n #print 'MAX SENS: %.f' %(self.sens.max())\n \n coldefs = pyfits.ColDefs([c1,c2,c3,c4,c5,c6,c7])\n head = pyfits.Header()\n head.update('ATRACE', scale_to_total, comment='Factor to scale trace to total flux')\n \n #ii = np.where(np.cast[int](self.cat.id) == self.id)[0][0]\n try:\n head.update('RA', self.im[0].header['RA'], comment='Target R.A.')\n head.update('DEC', self.im[0].header['DEC'], comment='Target Dec.')\n except:\n head.update('RA', 0., comment='Target R.A.')\n head.update('DEC', 0., comment='Target Dec.')\n #\n head.update('X_PIX', self.x_pix, comment='X pixel in interlaced image')\n head.update('Y_PIX', self.y_pix, comment='Y pixel in interlaced image')\n head.update('MAG', self.im[0].header['MAG'], comment='MAG_AUTO from interlaced catalog')\n \n tbHDU = pyfits.new_table(coldefs, header=head)\n tbHDU.writeto(self.root+'_%05d.1D.fits' %(self.id), clobber=True)\n \n if verbose:\n t1 = time.time(); dt = t1-t0; t0=t1\n print '1D spectrum (%.3f)' %(dt)", "title": "" }, { "docid": "9da54d485cfbebc39986c765199afa50", "score": "0.5416946", "text": "def save_and_show_figs(self, save_figs=False, show_figs=True, fig_path=None):\n\n if self.reward:\n self.ax1.set_xlabel('Steps', fontsize=22)\n self.ax1.set_ylabel('Average Reward', fontsize=22)\n self.ax1.set_title('Bandit Testbed of Strategies', fontsize=22)\n self.ax1.legend(bbox_to_anchor=(1, 1), loc='upper left', fancybox=True, fontsize=20)\n self.ax1.set_xlim([0, len(self.average_rewards)])\n self.ax1.tick_params(axis='both', which='major', labelsize=20)\n self.ax1.tick_params(axis='both', which='minor', labelsize=20)\n\n if save_figs:\n if fig_path is None:\n fig_path = os.getcwd() + '/../figs'\n self.fig1.savefig(os.path.join(fig_path, 'bandit_rewards.png'), bbox_inches='tight')\n\n if self.regret:\n self.ax2.set_xlabel('Steps', fontsize=22)\n self.ax2.set_ylabel('Cumulative Regret', fontsize=22)\n self.ax2.set_title('Bandit Testbed of Strategies', fontsize=22)\n self.ax2.legend(bbox_to_anchor=(1, 1), loc='upper left', fancybox=True, fontsize=20)\n self.ax2.set_xlim([0, len(self.average_regret)])\n self.ax2.tick_params(axis='both', which='major', labelsize=20)\n self.ax2.tick_params(axis='both', which='minor', labelsize=20)\n\n if save_figs:\n if fig_path is None:\n fig_path = os.getcwd() + '/../figs'\n self.fig2.savefig(os.path.join(fig_path, 'bandit_regret.png'), bbox_inches='tight')\n\n if self.optimal:\n self.ax3.set_xlabel('Steps', fontsize=22)\n self.ax3.set_ylabel(r'$\\%$ Optimal Action', fontsize=22)\n self.ax3.set_title('Bandit Testbed of Strategies', fontsize=22)\n self.ax3.legend(bbox_to_anchor=(1, 1), loc='upper left', fancybox=True, fontsize=20)\n self.ax3.set_xlim([0, len(self.average_optimal)])\n self.ax3.tick_params(axis='both', which='major', labelsize=20)\n self.ax3.tick_params(axis='both', which='minor', labelsize=20)\n\n if save_figs:\n if fig_path is None:\n fig_path = os.getcwd() + '/../figs'\n self.fig3.savefig(os.path.join(fig_path, 'bandit_optimal_action.png'), bbox_inches='tight')\n\n if show_figs:\n plt.show()\n\n sns.reset_orig()", "title": "" }, { "docid": "13a0d6bfbd0337c6befe6499234a16b3", "score": "0.54161906", "text": "def model_save_to_fits(data, metadata, savepath, version):\n if os.path.exists(savepath) == False:\n os.mkdir(savepath)\n file_name = make_component_filename(metadata, version)\n hdr = fits.Header(metadata)\n primary_hdu = fits.PrimaryHDU(header=hdr)\n hdu = fits.table_to_hdu(Table(data))\n descriptions =['midpoint of the wavelength bin', 'left/blue edge of the wavelength bin','right/red edge of the wavelength bin','average flux over the bin']\n hdu.header.insert(8, ('EXTNAME','SPECTRUM'))\n hdu.header.insert(9, ('EXTNO',2))\n [hdu.header.insert(i[0]+10, ('TDESC%s' %(i[0]), i[1])) for i in enumerate(descriptions)]\n hdul = fits.HDUList([primary_hdu, hdu])\n hdul.writeto(savepath+file_name+'.fits', overwrite=True)\n print('Spectrum saved as '+file_name+'.fits')", "title": "" }, { "docid": "aaff2a6a9e8a1403ea28312c3030d7f9", "score": "0.54116267", "text": "def plot_topo_power(epochs, power, freq, layout=None, baseline=None,\r\n mode='mean', decim=1, colorbar=True, vmin=None, vmax=None,\r\n cmap=None, layout_scale=0.945, dB=True, title=None):\r\n times = epochs.times[::decim].copy()\r\n if mode is not None:\r\n if baseline is None:\r\n baseline = epochs.baseline\r\n power = rescale(power.copy(), times, baseline, mode)\r\n times *= 1e3\r\n if dB:\r\n power = 20 * np.log10(power)\r\n if vmin is None:\r\n vmin = power.min()\r\n if vmax is None:\r\n vmax = power.max()\r\n if layout is None:\r\n from .layouts.layout import find_layout\r\n layout = find_layout(epochs.info)\r\n\r\n power_imshow = partial(_imshow_tfr, tfr=power.copy(), freq=freq)\r\n\r\n fig = _plot_topo(info=epochs.info, times=times,\r\n show_func=power_imshow, layout=layout, decim=decim,\r\n colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,\r\n layout_scale=layout_scale, title=title, border='w',\r\n x_label='Time (s)', y_label='Frequency (Hz)')\r\n\r\n return fig", "title": "" }, { "docid": "fcf920cdfb8fd0932c195df145d3bec6", "score": "0.53973335", "text": "def save_hard_drive(self):\n self.dialog_box.clear()\n self.dialog_box.textCursor().insertText(\n \"Salvando nuvem de pontos...\\n\"\n )\n self.repaint()\n # path_pcd = (\n # \"/var/tmp/trms/crops\"\n # + self.mission_id\n # + \"/\"\n # + self.mission_id\n # + \"_\"\n # + self.current_stock\n # + \".pcd\"\n # )\n path_txt = (\n \"/var/tmp/trms/crops\"\n + self.mission_id\n + \"/\"\n + self.mission_id\n + \"_\"\n + self.current_stock\n + \".txt\"\n )\n path_png = (\n \"/var/tmp/trms/crops\"\n + self.mission_id\n + \"/\"\n + self.mission_id\n + \"_\"\n + self.current_stock\n + \".png\"\n )\n with open(self.path_to_cached_pc, \"r\", encoding=\"utf-8\") as cache_file:\n text = cache_file.read()\n with open(path_txt, \"w\", encoding=\"utf-8\") as txt_file:\n txt_file.write(text)\n\n os.system(\n \"./extconverter \"\n + path_txt\n + \" -D /var/tmp/trms/crops\"\n + self.mission_id\n + \"/\"\n )\n\n xyz_points = np.loadtxt(path_txt, delimiter=\" \")\n xyz_points = xyz_points[:, :3]\n z_points = xyz_points[:, 2]\n viewer = pptk.viewer(xyz_points, z_points)\n viewer.set(phi=-(np.pi / 2 - 0.1933), theta=np.pi / 2)\n if self.current_stock in [\"1A\", \"3A\"]:\n viewer.w_Resize()\n viewer.set(r=75)\n elif self.current_stock in [\"1B\", \"3B\"]:\n viewer.w_Resize()\n viewer.set(r=90)\n else:\n viewer.w_resize()\n viewer.set(r=150)\n box_plot = plt.boxplot(z_points)\n min_value = box_plot[\"whiskers\"][0]._y[\n 0\n ] # Minimum value of the minimum range\n max_value = box_plot[\"whiskers\"][1]._y[\n 1\n ] # Maximum value of the maximum range\n viewer.color_map(\"jet\", scale=[min_value, max_value])\n viewer.set(show_info=False)\n sleep(1.5)\n viewer.capture(path_png)\n sleep(0.5)\n viewer.close()\n volume = float(self.calc_click())\n modification_file_path = os.path.join(\n \"/var/tmp/trms/crops\" + self.mission_id, \"changes\"\n )\n with open(modification_file_path, \"w\", encoding=\"utf-8\") as changes:\n changes.write(self.current_stock + \" \" + str(volume))\n\n if self.review_mode:\n self.dialog_box.clear()\n self.dialog_box.textCursor().insertText(\n \"Nuvem de pontos revisada!\\n\"\n )\n else:\n self.dialog_box.clear()\n self.dialog_box.textCursor().insertText(\"Nuvem de pontos salva!\\n\")\n self.flag_modification = False\n self.repaint()", "title": "" }, { "docid": "105b1504378d7becbec4eb135eb76c05", "score": "0.5395983", "text": "def plotCombinedFigures(x, y, w, s, title=\"\", left=400, right=800):\n fig, (axes, axesFFT) = plt.subplots(2, 1, figsize=(10, 10))\n plt.subplots_adjust(hspace=0.31)\n axes.plot(x, y, '-')\n axes.set_title(\"Interferogramme\")\n axes.set_xlabel(\"Déplacement du miroir 1 [µm]\")\n axes.set_ylabel(\"Voltage [mV]\")\n axesFFT.plot(w * 1000, abs(s), '.-')\n axesFFT.set_xlim(left=left, right=right)\n axesFFT.set_xlabel(\"Longueur d'onde [nm]\")\n axesFFT.set_ylabel(\"Intensité [-]\")\n axesFFT.set_title(title)\n plt.savefig(f\"FiguresIM/{title.replace(' ', '').replace(',', '').replace('.', '').replace('è', 'e')}.png\", dpi=300)", "title": "" }, { "docid": "a9763d78a45fae090fadccac2eb0c516", "score": "0.5393554", "text": "def plot_power_spectrum(ax, modes, spectrum,\n label=\"\",\n dimensionless=True, mode=\"Pofk\", n_dim=3,\n x_units=None, y_units=None, h_factors=True,\n x_axis_label=True, y_axis_label=True,\n x_scale=\"log\", y_scale=\"log\",\n **kwargs):\n\n if mode.lower() == \"cl\":\n x_label = r\"$\\ell$\"\n if dimensionless:\n y_label = r\"$\\ell(\\ell+1)/2\\pi\\ C_\\ell$\"\n u = modes*(modes+1)/(2*pi)\n else:\n y_label = r\"$C_\\ell$\"\n u = 1\n elif mode.lower() == \"pofk\":\n x_label = r\"$k$\"\n if x_units is None:\n h_term = \"$h$\" if h_factors else \"\"\n x_units = \"[\" + h_term + \" \" + \"Mpc$^{-1}$\" + \"]\"\n x_label += \" \" + x_units\n if y_units is None:\n if h_factors:\n h_term = f\"$h^{{-{n_dim}}}$\"\n else:\n h_term = \"\"\n mpc_term = \"Mpc\" if n_dim == 1 else f\"Mpc$^{{{n_dim}}}$\"\n y_units = \"[\" + h_term + \" \" + mpc_term + \"]\"\n\n if dimensionless:\n y_label = r\"$\\Delta^2(k)$\"\n u = modes**n_dim / (2*pi)**n_dim * 4*pi\n else:\n y_label = r\"$P(k)$\"\n y_label += \" \" + y_units\n u = 1\n else:\n raise ValueError(f\"Mode {mode} not supported.\")\n\n ax.plot(modes, u*spectrum, label=label, **kwargs)\n\n if x_axis_label:\n ax.set_xlabel(x_label)\n if y_axis_label:\n ax.set_ylabel(y_label)\n\n ax.set_xscale(x_scale)\n ax.set_yscale(y_scale)", "title": "" }, { "docid": "74d980d0458a894990ca5134c49aa464", "score": "0.5389359", "text": "def inoutplot(d,zparams={}):\n\n if 'fignr' in zparams.keys():\n plt.figure(zparams['fignr'])\n else:\n plt.figure(99)\n ax1=plt.subplot(221)\n plt.plot( d['tvec'], np.abs(d['tfield2'])**2)\n plt.plot( d['tvec'], np.abs(d['tfield1'])**2,linewidth=1)\n plt.legend([\"out\",\"in\"],loc=0)\n\n\n ax2=plt.subplot(222)\n plt.plot( d['omvec']/2.0/np.pi, db_abs2( d['ffield2']))\n plt.plot( d['omvec']/2.0/np.pi, db_abs2( d['ffield1']),linewidth=1)\n if 'fylim' in zparams.keys():\n plt.ylim(zparams['fylim'])\n plt.legend([\"out\",\"in\"],loc=0)\n ax3=plt.subplot(223)\n plt.imshow( np.abs(d['timefield'])**2,\n aspect='auto',\n origin='lower',\n extent=( np.min(d['tvec']), np.max(d['tvec']),\n np.min(d['zvec']), np.max(d['zvec'])))\n plt.colorbar()\n\n ax4=plt.subplot(224)\n ax=plt.imshow( db_abs2(d['freqfield']),\n aspect='auto',\n origin='lower',\n extent=( np.min(d['omvec'])/2.0/np.pi, np.max(d['omvec'])/2.0/np.pi,\n np.min(d['zvec']), np.max(d['zvec'])))\n plt.colorbar()\n if 'clim' in zparams.keys():\n ax.set_clim(zparams['clim'])\n\n ax1.set_xlabel(\"time / s\")\n ax1.set_ylabel(\"power / W\")\n ax2.set_xlabel(\"frequency / Hz\")\n ax2.set_ylabel(\"spectral energy density / dB\")\n ax3.set_xlabel(\"time / s\")\n ax3.set_ylabel(\"z / m\")\n ax4.set_xlabel(\"frequency / Hz\")\n ax4.set_ylabel(\"z / m\") \n\n return [ax1,ax2,ax3,ax4]", "title": "" }, { "docid": "a0a76154d50560ec46e7fcc06d6b7585", "score": "0.53862613", "text": "def compute_powerspectrum(self):\n self.bk_directsample(psonly=True)", "title": "" } ]
d7721b2c84a1088f629f9e0fab2d0634
Video streaming home page.
[ { "docid": "a905fb886ac06831242b1eb30e3c08d3", "score": "0.0", "text": "def index():\n return render_template('index.html')", "title": "" } ]
[ { "docid": "f8fc737c05951ba93953b96f7af39dfa", "score": "0.7695669", "text": "def index():\r\n return render_template('video.html')", "title": "" }, { "docid": "b609a4c6e85aa233bafd14bfcf25d5aa", "score": "0.6744941", "text": "def stream(request):\n # Get a list of rooms, ordered alphabetically\n rooms = Room.objects.order_by(\"title\")\n\n @gzip.gzip_page\n def video_feed(request):\n try:\n return StreamingHttpResponse(gen(camera_open()),content_type='multipart/x-mixed-replace; boundary=frame')\n except HttpResponseServerError as e:\n print(\"aborted\")\n \n return render(request, \"api.html\", {\n \"rooms\": rooms,\n\n })", "title": "" }, { "docid": "04c2b10d54faf0f231a80bc0d9aca2ca", "score": "0.66371745", "text": "def serve(request, video_id):\n \n\n Viewed.log_view(request, video_id)\n video = Video.objects.get(pk=video_id)\n filename = video.uploaded_video.name.split('/')[-1]\n response = StreamingHttpResponse(video.uploaded_video, content_type='video/mp4')\n response['Content-length'] = video.uploaded_video.file.size\n return response", "title": "" }, { "docid": "6c2fea7a8d2e1e347c9615cc6caef5bf", "score": "0.65382284", "text": "def index():\n return redirect(\n \"https://app.swaggerhub.com/apis-docs/kahache/VideoPackagingPlatform\"\n \"/1.0.0\",\n code=302)", "title": "" }, { "docid": "ecaeb93e8ceae094dc29755a5e65c002", "score": "0.64452755", "text": "def video_feed():\n\n return Response(get_video_stream(topic = 1), \n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "5bc58fc6a2ae8562c6cd45f5790bbb12", "score": "0.64205784", "text": "def prepareVideo():\r\n pass", "title": "" }, { "docid": "59d98a3d9319f6994e982a40c2ba1d4c", "score": "0.6398351", "text": "def get(self):\n videos = loadVideos()\n name = self.get_secure_cookie(\"user\")\n self.render('data-list.html', title=\"video\",datalist=videos, dataType=\"video\", name=name, page=1)", "title": "" }, { "docid": "fdbebb562a70f53697cd9fd1cf9fe53e", "score": "0.6384763", "text": "def continue_video(self):\n \n\n\n print(\"continue_video needs implementation\")", "title": "" }, { "docid": "e866c85ab8c6ecaccb63dbf8229e8f59", "score": "0.6384676", "text": "def show_video(request):\n user = request.user\n context = {}\n board = UserBoard.objects.filter(user=user).order_by(\"id\").last()\n if board:\n image_link = board.board.image_link()\n context[\"image_link\"] = image_link\n context[\"mid\"] = board.board.mid\n return render(request, 'webcam/show_video.html',context)", "title": "" }, { "docid": "a932fd9a6a5e91de4ec69753ab61cfac", "score": "0.63807976", "text": "def homepage():\n\n payload = {\n 'api_key': API_KEY,\n 'page': 1\n }\n endpoint_url = MOVIE_URL + \"/movie/popular\"\n resp = requests.get(\n endpoint_url,\n params=payload)\n\n results = resp.json()\n # print(results['results'])\n\n return render_template(\"homepage.html\", movies=results['results'], imgUrl=IMG_URL, searched=False)", "title": "" }, { "docid": "c4469ee901c9a7b238d9de54254eae80", "score": "0.6349879", "text": "def video_feed():\n return Response(gen(main_camera),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "c44d561377ff4b6b580289cb21d39997", "score": "0.63485765", "text": "def video_feed():\n global cam\n cam = Camera()\n return Response(gen(cam),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "cd5d5598813b4d17372bc284f0fcf3ca", "score": "0.63460577", "text": "def video_feed():\n return Response(gen(VideoCamera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "70cf36080022418d94d9ef346185e55d", "score": "0.6327309", "text": "def video_feed():\n return Response(gen(Camera()),mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "11cbb0f19af9d42963d492468a2c35fe", "score": "0.6293855", "text": "def video_feed():\n\n return Response(\n gen(Camera()), \n mimetype=\"multipart/x-mixed-replace; boundary=frame\")", "title": "" }, { "docid": "c5c54bb0d921b4c6f8830c758ef92b69", "score": "0.62903476", "text": "def video_feed():\n return Response(gen(Camera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "c5c54bb0d921b4c6f8830c758ef92b69", "score": "0.62903476", "text": "def video_feed():\n return Response(gen(Camera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "c5c54bb0d921b4c6f8830c758ef92b69", "score": "0.62903476", "text": "def video_feed():\n return Response(gen(Camera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "c5c54bb0d921b4c6f8830c758ef92b69", "score": "0.62903476", "text": "def video_feed():\n return Response(gen(Camera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "c5c54bb0d921b4c6f8830c758ef92b69", "score": "0.62903476", "text": "def video_feed():\n return Response(gen(Camera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "c5c54bb0d921b4c6f8830c758ef92b69", "score": "0.62903476", "text": "def video_feed():\n return Response(gen(Camera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "c5c54bb0d921b4c6f8830c758ef92b69", "score": "0.62903476", "text": "def video_feed():\n return Response(gen(Camera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "c5c54bb0d921b4c6f8830c758ef92b69", "score": "0.62903476", "text": "def video_feed():\n return Response(gen(Camera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "c5c54bb0d921b4c6f8830c758ef92b69", "score": "0.62903476", "text": "def video_feed():\n return Response(gen(Camera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "368bf02cc442845f02f552589299a348", "score": "0.6280458", "text": "def play_random_video(self):\n\n print(\"play_random_video needs implementation\")", "title": "" }, { "docid": "1d6f1b2d4a08249ac563e48bdf130875", "score": "0.6272033", "text": "def video_feed():\r\n return Response(gen(Camera()),\r\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "9a7879a692f1437350eaf46e8cdd4b68", "score": "0.62379384", "text": "def video_feed():\n return Response(gen(robot_vision),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "54e308db6f776355eae8c04384c687fe", "score": "0.62353545", "text": "def continue_video(self):\n\n print(\"continue_video needs implementation\")", "title": "" }, { "docid": "54e308db6f776355eae8c04384c687fe", "score": "0.62353545", "text": "def continue_video(self):\n\n print(\"continue_video needs implementation\")", "title": "" }, { "docid": "50ec466a016280f330fab6662d15c255", "score": "0.6224935", "text": "def main():\n initial_params = set_params()\n raw_response = get_raw_res(initial_params)\n\n if is_invalid_res(raw_response):\n return\n result, next_page_token = insert_items(raw_response)\n\n while next_page_token:\n req_params = set_params(next_page_token)\n raw_response = get_raw_res(req_params)\n if is_invalid_res(raw_response):\n return\n result, next_page_token = insert_items(raw_response, result)\n print(f\"\"\"\n*********RESULT*********\nALL VIDEOS : {len(result)}\n \nVIDEO DATA TITLE & CHANNEL\n \"\"\")\n titles = [x.__dict__['title'] for x in result]\n channels = [x.__dict__['channel'] for x in result]\n for idx in range(len(titles)):\n print(f\"TITLE : {titles[idx]}\")\n print(f\"CHANNEL : {channels[idx]}\")", "title": "" }, { "docid": "11da5c5f205fd44cf793ecafad1b9826", "score": "0.618488", "text": "def video_feed():\n config = app.config\n camera = config['camera']\n return Response(gen(camera),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "cf778c61da7acca403ade4b2a7f75fec", "score": "0.6152446", "text": "def yourvideos(request, template_name=\"videostream/yourvideos.html\", group_slug=None, bridge=None):\n \n if bridge:\n try:\n group = bridge.get_group(group_slug)\n except ObjectDoesNotExist:\n raise Http404\n else:\n group = None\n \n videos = VideoStream.objects.filter(author=request.user)\n \n if group:\n videos = group.content_objects(videos)\n \n videos = videos.order_by(\"-date_added\")\n \n return render_to_response(template_name, {\n \"group\": group,\n \"videos\": videos,\n }, context_instance=RequestContext(request))", "title": "" }, { "docid": "b227d01b4ce9009eb1cd9700411d5607", "score": "0.61380315", "text": "def live_preview():\n if request.method == 'GET':\n video = CameraVideo()\n setattr(g, 'video', video)\n filename = video.start_live_preview()\n #client = get_dropbox_session(session['username'])\n #client.upload(filename)\n elif request.method == 'DELETE' and hasattr(g,'video'):\n video = g.video\n video.end_live_preview()", "title": "" }, { "docid": "abf78a88e9bdee6ea3f9be227c38cd12", "score": "0.61281693", "text": "def video_feed():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "abf78a88e9bdee6ea3f9be227c38cd12", "score": "0.61281693", "text": "def video_feed():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "abf78a88e9bdee6ea3f9be227c38cd12", "score": "0.61281693", "text": "def video_feed():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "abf78a88e9bdee6ea3f9be227c38cd12", "score": "0.61281693", "text": "def video_feed():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "abf78a88e9bdee6ea3f9be227c38cd12", "score": "0.61281693", "text": "def video_feed():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "abf78a88e9bdee6ea3f9be227c38cd12", "score": "0.61281693", "text": "def video_feed():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "abf78a88e9bdee6ea3f9be227c38cd12", "score": "0.61281693", "text": "def video_feed():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "0d4f375e9229e1fa298fe1c217592d8d", "score": "0.6128097", "text": "def get_videos():\n os.chdir(upload_dir)\n p = subprocess.run(['python3 -m http.server 8080'], shell=True)\n os.chdir(working_dir)", "title": "" }, { "docid": "954081f729d61562b26b703dc8e427d1", "score": "0.6126951", "text": "def video_feed(self):\n return Response(response=stream_with_context(\n self.generate_frame(Camera())),\n mimetype=self.headers['Content-Type'],\n headers=self.headers,\n status=200,\n )", "title": "" }, { "docid": "5c8d39a8c33d50044407cf5292782f3f", "score": "0.61148876", "text": "def videopost(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/videopost.html',\n {\n 'title':'Видео',\n 'message':'Видео по теме сайта.',\n 'year':datetime.now().year,\n }\n )", "title": "" }, { "docid": "3717b5ed02370ab6bd06b548bc30b8b8", "score": "0.61037385", "text": "def format_serve(request, video_format_id):\n\n vf = VideoFormat.objects.get(pk=video_format_id)\n courses = request.user.course.all()\n if not set(courses) & set(vf.video.lecture_set.all()[0].module.courses.all()):\n print \"sdfdf\"\n raise PermissionDenied()\n\n \n filename = vf.file.name.split('/')[-1]\n response = StreamingHttpResponse(vf.file)\n response['Content-Type'] = \"video/%s\" % (vf.format)\n response['Content-length'] = vf.file.file.size\n return response", "title": "" }, { "docid": "960a51b0e76d8c1eedc3b5391f26a5de", "score": "0.6089764", "text": "def video_feed():\r\n return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "9156e211d54c1cbae063acbcc219ce04", "score": "0.60846335", "text": "def video_feed(self):\n\n # Check that video has been started\n if self.out == \"\":\n return\n\n # Loop until a new session is created\n while self.allow_stream:\n # Save start time to synchronise framerate\n start_time = time.time()\n\n # Encode image in .jpg format\n (flag, encodedImage) = cv2.imencode(\".jpg\", self.out)\n\n # Ensure the frame was successfully encoded\n if not flag:\n continue\n\n # Yield the output frame byte format\n yield(b'--frame\\r\\n' b'Content-Type: image/jpeg\\r\\n\\r\\n' +\n bytearray(encodedImage) + b'\\r\\n')\n\n # Wait until the next frame is required\n end_time = time.time()\n time_remain = start_time + 1 / settings.FRAMERATE - end_time\n\n if time_remain > 0:\n time.sleep(time_remain)", "title": "" }, { "docid": "2b50e827330e127413f9a7f641aaa176", "score": "0.60312927", "text": "def video_feed():\n app.logger.info(f'starting video feed')\n return Response(cam.get_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "05421ef12a4e394e07d71c224b2b30c5", "score": "0.59811556", "text": "def th_vs(self):\n threadd = threading.Thread(target=self.video_start, args=())\n threadd.daemon = True\n threadd.start()", "title": "" }, { "docid": "4fd7a54af2c5f8523934e41245c1be13", "score": "0.5975253", "text": "def video_feed1():\n return Response(gen1(Camera1()), mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "1181faeea096a0965b4efb724edceeed", "score": "0.5963422", "text": "def process_video(tag, url):\n return [( cat_alt, \"video\" )]", "title": "" }, { "docid": "05a67d65ea758c94e6ed259082cad22e", "score": "0.59524226", "text": "def view():\n return CourseVideoListView.as_view()", "title": "" }, { "docid": "14ba48a877db8da86699c3fc6958aefb", "score": "0.59497786", "text": "def playVideoPreview(self):\n err = OpticstarDLL.video_preview(True, 0, -1)\n if err != 0:\n raise Exception(\"Could not start video preview\")", "title": "" }, { "docid": "74a55baf7f11828336ad60bd00e1f135", "score": "0.5936489", "text": "def details(request, slug, template_name=\"videostream/details.html\", group_slug=None, bridge=None):\n \n if bridge:\n try:\n group = bridge.get_group(group_slug)\n except ObjectDoesNotExist:\n raise Http404\n else:\n group = None\n \n videos = VideoStream.objects.all()\n \n if group:\n videos = group.content_objects(videos)\n \n video = get_object_or_404(videos, slug=slug)\n \n # check if public or owned by self\n if not video.is_public and request.user != video.author:\n raise Http404\n \n if video.author == request.user:\n is_me = True\n else:\n is_me = False\n \n if video.flvfile:\n video.increment_count()\n \n return render_to_response(template_name, {\n \"group\": group,\n \"video\": video,\n \"is_me\": is_me,\n }, context_instance=RequestContext(request))", "title": "" }, { "docid": "f3da20fcd7444a1b112bcefdae31459f", "score": "0.59124213", "text": "def index():\n if request.headers.get('accept') == 'text/event-stream':\n return Response(watch(source), content_type='text/event-stream')\n else:\n return render_template(\n 'index.html',\n body=exports['body'],\n inlining=exports['inlining']\n )", "title": "" }, { "docid": "8b49811a4fcae506e5be21eb745d8966", "score": "0.58848244", "text": "def membervideos(request, username, template_name=\"videostream/membervideos.html\", group_slug=None, bridge=None):\n \n if bridge:\n try:\n group = bridge.get_group(group_slug)\n except ObjectDoesNotExist:\n raise Http404\n else:\n group = None\n \n user = get_object_or_404(User, username=username)\n \n videos = VideoStream.objects.filter(\n author__username = username,\n is_public = True,\n )\n \n if group:\n videos = group.content_objects(videos)\n \n videos = videos.order_by(\"-date_added\")\n \n return render_to_response(template_name, {\n \"group\": group,\n \"videos\": videos,\n }, context_instance=RequestContext(request))", "title": "" }, { "docid": "3026a4200f32a29a46164ec2c61f7539", "score": "0.58806336", "text": "def run(self):\n env = self.state.document.settings.env\n conf = env.app.config.videos_config\n docname = None if env is None else env.docname\n if docname is not None:\n docname = docname.replace(\"\\\\\", \"/\").split(\"/\")[-1]\n else:\n docname = '' # pragma: no cover\n\n source = self.state.document.current_source\n filename = self.arguments[0]\n\n if '://' in filename:\n logger = getLogger(\"video\")\n logger.warning(\n \"[video] url detected %r in docname %r - line %r.\",\n filename, docname, self.lineno)\n is_url = True\n else:\n is_url = False\n\n if not is_url:\n env.videos.add_file('', filename)\n\n srcdir = env.srcdir\n rstrel = os.path.relpath(source, srcdir)\n rstfold = os.path.split(rstrel)[0]\n cache = os.path.join(srcdir, conf['cache_path'])\n vid = os.path.join(cache, filename)\n abspath = None\n relpath = None\n\n if os.path.exists(vid):\n abspath = vid\n relpath = cache\n else:\n last = rstfold.replace('\\\\', '/')\n vid = os.path.join(srcdir, last, filename)\n if os.path.exists(vid):\n relpath = last\n abspath = vid\n\n if abspath is None:\n logger = getLogger(\"video\")\n logger.warning(\n \"[video] Unable to find %r in docname %r - line %r - srcdir=%r.\",\n filename, docname, self.lineno, srcdir)\n else:\n abspath = None\n relpath = None\n\n width = self.options.get('width', conf['default_video_width'])\n height = self.options.get('height', conf['default_video_height'])\n latex = self.options.get('latex', False) in (\n 'True', 'true', True, 1, \"1\")\n\n # build node\n node = self.__class__.video_class(uri=filename, docname=docname, lineno=self.lineno,\n width=width, height=height, abspath=abspath,\n relpath=relpath, is_url=is_url)\n node['classes'] += [\"place-video\"]\n node['video'] = filename\n node['latex'] = latex\n ns = [node]\n return ns", "title": "" }, { "docid": "e8be85585dbc9df9a270826e16cf561a", "score": "0.5860709", "text": "def _api_url(self):\n return \"/api/videos/\"", "title": "" }, { "docid": "5709c3189b4beef9bb9877d98faae403", "score": "0.5852711", "text": "def OnRunningVid(self, event): # wxGlade: wxVidFrame.<event_handler>\n if self.button_1.GetValue():\n self.vidWindow.StartLiveVideo()\n else:\n self.vidWindow.StopLiveVideo()\n event.Skip()", "title": "" }, { "docid": "acf9cb763824965a5997a91f39d48386", "score": "0.5837953", "text": "def video_feed1():\n return Response(gen1(),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "f2bf93f2eae75041ccc69974a7bb089b", "score": "0.5830965", "text": "def show_video_to_moderator(request,mid):\n user = request.user\n context = {}\n if not is_moderator(user):\n raise Http404(\"You are not allowed to view this page.\")\n else:\n board = Board.objects.filter(mid=mid).order_by(\"id\").last()\n if board:\n image_link = board.image_link()\n context[\"image_link\"] = image_link\n context[\"mid\"] = mid\n return render(request, 'webcam/show_video.html',context)", "title": "" }, { "docid": "dd7a3ea1d2e09597b9e0cc8aa0a79d7a", "score": "0.5827774", "text": "def video_feed():\n return Response(gen(detector),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "50ba7c9ea45a6580f483056ce01a44a2", "score": "0.5827373", "text": "def list_videos(self, request):\n videos = Video.objects.all().order_by(\"-published_at\")\n if videos.exists():\n page = self.paginate_queryset(videos)\n if page is not None:\n serialized = VideoSerializer(page, many=True)\n return self.get_paginated_response(serialized.data)\n return Response(status=http_status.HTTP_404_NOT_FOUND)", "title": "" }, { "docid": "b7574321fec482df01cea7f728b1ab0d", "score": "0.5819272", "text": "def index():\n # Getting all sources\n all_sources = get_sources(None)\n title = 'Welcome to The best News site'\n\n return render_template('index.html', title = title, sources = all_sources)", "title": "" }, { "docid": "dee957f3441b9f7b392a5887a10ce432", "score": "0.58162266", "text": "def video_feed_1():\n try:\n if cam0.isOpened() == False:\n print(\"Camera 1 Closed\")\n return Response(genBlank(),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n else:\n return Response(gen1(),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n except Exception as e:\n print(str(e))", "title": "" }, { "docid": "b3d605867a618806e54b963d38d98e03", "score": "0.57854414", "text": "def add_video_view(request):\n return render(request, 'videos/add_video.html')", "title": "" }, { "docid": "4bc54809f859161c6b99cbb067898621", "score": "0.5760212", "text": "def video_feed2():\n return Response(gen2(Camera2()),mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "1f218cd3b24adbdd4572ab896ea1f9c5", "score": "0.57557935", "text": "def main_feed():\n return Response(streaming_gen('streaming'),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "b8fed90d5c93ab718937e9d3119de56d", "score": "0.5724387", "text": "def randomVideoHandler(msg):\n c = Command(None)\n videoLink = c.searchForLinks(\"VIDEO\", None)[1].encode('ascii', 'ignore')\n logger.info('Responding with video message {0}'.format(videoLink))\n msg.Chat.SendMessage(videoLink)", "title": "" }, { "docid": "a91882baa1bd0ed4f2d05a5de7bf1154", "score": "0.57091963", "text": "def start_live_view(self):", "title": "" }, { "docid": "7a75d3c339c11f57d82dea77d7e3aaea", "score": "0.5688304", "text": "def show_search(self, motcle, th_id, subth_id):\n self.show_videos(theme_id=th_id,subtheme_id=subth_id,referer=\"Resultats pour '%s'\"%motcle,keyword=motcle)", "title": "" }, { "docid": "82ae14388ac3585802dc0f0256566da6", "score": "0.56769085", "text": "def IntroVideo(self):\n\n i = 433 \n while i <= 697:\n video = self.LoadImagesHelper(\"Pictures/start/Pictures\" + str(i) + \".jpg\", 1280, 715)\n i += 1\n self.__screen.blit(video, (0,0))\n self.__clock.tick(30)\n pygame.display.flip()\n self.__screen.fill(BLACK)", "title": "" }, { "docid": "d94f3e75d2ffde73d435d26f5f899859", "score": "0.5674983", "text": "def list_videos_view(request):\n video_list = Video.objects.filter(user_key=User.objects.get(user_name=request.COOKIES['user_name']).id)\n context = {'video_list': video_list }\n return render(request, 'videos/index.html', context)", "title": "" }, { "docid": "96f900b8abea00a5697d9cc2e1b6cf42", "score": "0.56694424", "text": "def video_feed2():\n return Response(gen2(),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "title": "" }, { "docid": "678d922974272412f68fa98425ba74aa", "score": "0.5666339", "text": "def load_video(self, dir:str):\n return None", "title": "" }, { "docid": "258a7962c1cdbcd6673594b4793b3133", "score": "0.566409", "text": "def index():\n news_sources= get_sources('general')\n return render_template('index.html',general=news_sources)", "title": "" }, { "docid": "7fdc8605ff8ccc2ba89f2ba252bb64aa", "score": "0.5659364", "text": "def get_video_stream_url(self,attrs):\n url = \"https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s\" % (attrs['acct'],attrs['video_id'])\n q = Request(url)\n q.add_header('Accept',\"application/json;pk=%s\" % attrs['policy_key'])\n out = urlopen(q).read()\n return json.loads(out)", "title": "" }, { "docid": "e32ab77e9cc69972c6b8404d837b2190", "score": "0.56361955", "text": "def page(self):\n\t\treturn Page(\n\t\t\tself.title, \n\t\t\tf'[Watch here!](https://www.youtube.com/watch?v={self.id})\\n\\n{self.description}', \n\t\t\tcolour=self.colour, \n\t\t\timage=self.thumbnail,\n\t\t\tthumbnail=self.icon,\n\t\t\tfooter=f'Uploaded {self.uploaded}'\n\t\t)", "title": "" }, { "docid": "3a46b4a2e921e1dde6df2c0ac786eda9", "score": "0.5621051", "text": "def pause_video(self):\n\n print(\"pause_video needs implementation\")", "title": "" }, { "docid": "5d8848e53512250c749287eab5675772", "score": "0.56178194", "text": "def list_videos():\n videos = get_videos()\n\n if not videos:\n xbmc.executebuiltin(\"Notification(BaivaruTV,Unauthorized. Please use a valid token)\")\n else:\n listing = []\n for video in videos:\n list_item = xbmcgui.ListItem(label=video['name'], thumbnailImage=video['thumb'])\n list_item.setProperty('fanart_image', video['thumb'])\n list_item.setInfo('video', {'title': video['name'], 'genre': video['genre'], 'cast': list(video['cast']),\n 'director': video['director'], 'tagline': video['tagline'],\n 'year': video['year'], 'rating': video['rating']})\n list_item.setProperty('IsPlayable', 'true')\n\n # Example: plugin://plugin.video.example/?action=play&video=http://www.vidsplay.com/vids/crab.mp4\n # url = '{0}?action=play&video={1}'.format(__url__, video['video'])\n url = video['video']\n is_folder = False\n\n listing.append((url, list_item, is_folder))\n\n xbmcplugin.addDirectoryItems(__handle__, listing, len(listing))\n xbmcplugin.setContent(__handle__, 'Movies')\n\n xbmcplugin.addSortMethod(__handle__, xbmcplugin.SORT_METHOD_VIDEO_TITLE)\n xbmcplugin.addSortMethod(__handle__, xbmcplugin.SORT_METHOD_VIDEO_RATING)\n xbmcplugin.addSortMethod(__handle__, xbmcplugin.SORT_METHOD_VIDEO_YEAR)\n\n xbmcplugin.endOfDirectory(__handle__)", "title": "" }, { "docid": "90111b01d0d8044913b21e9e11d3dd32", "score": "0.56165355", "text": "def videos_index_html(course, pagination_conf=None):\n is_video_transcript_enabled = VideoTranscriptEnabledFlag.feature_enabled(course.id)\n previous_uploads, pagination_context = _get_index_videos(course, pagination_conf)\n context = {\n 'context_course': course,\n 'image_upload_url': reverse_course_url('video_images_handler', str(course.id)),\n 'video_handler_url': reverse_course_url('videos_handler', str(course.id)),\n 'encodings_download_url': reverse_course_url('video_encodings_download', str(course.id)),\n 'default_video_image_url': _get_default_video_image_url(),\n 'previous_uploads': previous_uploads,\n 'concurrent_upload_limit': settings.VIDEO_UPLOAD_PIPELINE.get('CONCURRENT_UPLOAD_LIMIT', 0),\n 'video_supported_file_formats': list(VIDEO_SUPPORTED_FILE_FORMATS.keys()),\n 'video_upload_max_file_size': VIDEO_UPLOAD_MAX_FILE_SIZE_GB,\n 'video_image_settings': {\n 'video_image_upload_enabled': WAFFLE_SWITCHES.is_enabled(VIDEO_IMAGE_UPLOAD_ENABLED),\n 'max_size': settings.VIDEO_IMAGE_SETTINGS['VIDEO_IMAGE_MAX_BYTES'],\n 'min_size': settings.VIDEO_IMAGE_SETTINGS['VIDEO_IMAGE_MIN_BYTES'],\n 'max_width': settings.VIDEO_IMAGE_MAX_WIDTH,\n 'max_height': settings.VIDEO_IMAGE_MAX_HEIGHT,\n 'supported_file_formats': settings.VIDEO_IMAGE_SUPPORTED_FILE_FORMATS\n },\n 'is_video_transcript_enabled': is_video_transcript_enabled,\n 'active_transcript_preferences': None,\n 'transcript_credentials': None,\n 'transcript_available_languages': get_all_transcript_languages(),\n 'video_transcript_settings': {\n 'transcript_download_handler_url': reverse('transcript_download_handler'),\n 'transcript_upload_handler_url': reverse('transcript_upload_handler'),\n 'transcript_delete_handler_url': reverse_course_url('transcript_delete_handler', str(course.id)),\n 'trancript_download_file_format': Transcript.SRT\n },\n 'pagination_context': pagination_context\n }\n\n if is_video_transcript_enabled:\n context['video_transcript_settings'].update({\n 'transcript_preferences_handler_url': reverse_course_url(\n 'transcript_preferences_handler',\n str(course.id)\n ),\n 'transcript_credentials_handler_url': reverse_course_url(\n 'transcript_credentials_handler',\n str(course.id)\n ),\n 'transcription_plans': get_3rd_party_transcription_plans(),\n })\n context['active_transcript_preferences'] = get_transcript_preferences(str(course.id))\n # Cached state for transcript providers' credentials (org-specific)\n context['transcript_credentials'] = get_transcript_credentials_state_for_org(course.id.org)\n\n return render_to_response('videos_index.html', context)", "title": "" }, { "docid": "5a27bbe9bfab9a4f598db28f6a3c287d", "score": "0.5611529", "text": "def get(self, *args, **kwargs):\r\n self.render(\"home.html\", listret=G_WEBAPP.list_channels())", "title": "" }, { "docid": "003c4480238229cba3c93de17a05489c", "score": "0.56037545", "text": "def VideoStream():\n\tglobal Finished\n\t#display live video\n\tcv.NamedWindow(\"Badass video window\", cv.CV_WINDOW_AUTOSIZE)\n\t#peripheral devices begin at > 0\n\tglobal camera_index\n\tcapture = cv.CaptureFromCAM(camera_index)\n\tframe = cv.QueryFrame(capture)\n\twriter = cv.CreateVideoWriter(\"Stream.avi\", 0, 20, cv.GetSize(frame), 1) #\"filename\", codec,fps, frame_size, is_color=true\n\t#isight can't handle 30 fps so changed it to 15\n\tprint \"Calling thread at: \", time.time()\n\tThread(target = AudioStream).start()\n\ti = 1\n\twhile True:\n\t\tprint \"Recording Video Frame: \",i,\" At: \", time.time()\n\t\tframe = cv.QueryFrame(capture)\n\t\tcv.WriteFrame(writer, frame)\n\t\tcv.ShowImage(\"Badass video window\", frame)\n\t\tk = cv.WaitKey(10) #milliseconds\n\t\ti+=1\n\t\tif k == 0x1b: #ESC\n\t\t\tprint 'ESC pressed. Exiting ... Time is: ', time.time()\n\t\t\tbreak\n\tFinished = True\n\tcv.DestroyWindow(\"Baddass video window\")\n\tsys.exit(1)\n\treturn", "title": "" }, { "docid": "b10a80736b640a5718a3242950780d5e", "score": "0.5595902", "text": "def return_video():\n if flask.request.method == \"GET\":\n #try:\n text = flask.request.args['text']\n video = EmotionalVideos(text).create_video_link()\n if video == \"Glad to see that you're doing fine\":\n return flask.render_template('congrats.html', title='Home')\n else:\n return flask.render_template('videosss.html', link=video)\n\n #except Exception as e:\n # return 'oops... something happened'\n\n return 'do a post request'", "title": "" }, { "docid": "55dce06cfc1c9979894fd9aafab80b49", "score": "0.559589", "text": "def get(self, videoId):\n video = getVideo(videoId)\n name = self.get_secure_cookie(\"user\")\n self.render('data-detail.html', title=\"Video-detail\", data = video, dataType=\"video\", name=name, page=1)", "title": "" }, { "docid": "82cc76e5cb3a822e46874a9c31681942", "score": "0.55913323", "text": "def show_playing(self):\n\n if self._isPaused:\n videoStatus = \"- PAUSED\"\n else:\n videoStatus = \"\"\n if self._currentVideo is None:\n print(\"No video is currently playing\")\n return\n video = f\"{self._currentVideo.title} ({self._currentVideo.video_id}) [{self._currentVideo.tags_finale}]\"\n print(f\"Currently playing: {video} {videoStatus}\")", "title": "" }, { "docid": "724b4fff3cdfe18594da9064e3f7864c", "score": "0.55840176", "text": "def index(request):\n \n # get the highest ranked submissions\n top_ranked_videos = cache.get('top_ranked_videos')\n if not top_ranked_videos:\n top_ranked_videos = []\n for category in VoteCategory.objects.all():\n # for now, calculate an average for each video\n top_ranked_videos.append({\n 'vote_category': category, \n 'submissions': Submission.objects.filter(votes__v_category=category).annotate(average_rating=Avg('votes__rating')).order_by('-average_rating')[:5],\n })\n cache.set('top_ranked_videos', top_ranked_videos, 60*10)\n\n t = loader.get_template('home/index.html')\n c = RequestContext(request, {\n 'breadcrumbs': [{'url': reverse('home'), 'title': 'Home'}],\n 'parent_categories': Category.objects.filter(parent=None),\n 'top_ranked_videos': top_ranked_videos,\n 'vote_categories': VoteCategory.objects.all(),\n })\n return HttpResponse(t.render(c))", "title": "" }, { "docid": "b2ce5a952839830acd1e1d166d64acfb", "score": "0.5581194", "text": "def _VV_Video(self,tw=None):\n if self._state == \"start\":\n return\n if self._state == \"end\":\n return\n if tw['user']['screen_name'] not in self._usrset: return 0\n ## Here we check video!\n if 'extended_entities' not in tw: return 0\n types = set(et['type'] for et in tw['extended_entities']['media'])\n if 'video' not in types and 'animated_gif' not in types: return 0\n return 1", "title": "" }, { "docid": "749bb8090ce2e5074de968d8c6d61653", "score": "0.5565033", "text": "def index(request):\n shows = Show.objects.all()\n return render(request, \"voter/home.html\", {\"shows\": shows})", "title": "" }, { "docid": "3c9c3d9a8385a6458c747c6b7ab415fb", "score": "0.5564515", "text": "def show_trailer(self):\n\t\twebbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "3c9c3d9a8385a6458c747c6b7ab415fb", "score": "0.5564515", "text": "def show_trailer(self):\n\t\twebbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "3c9c3d9a8385a6458c747c6b7ab415fb", "score": "0.5564515", "text": "def show_trailer(self):\n\t\twebbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "8fb2bb7fd2c2c3e059ab53770e85025b", "score": "0.556208", "text": "def ride_videos(self):\n print(self.index_list)\n for key in self.index_list:\n stream_imgs = []\n stream_range = pd.DataFrame()\n\n if len(self.data) <= 240:\n stream_range = self.data\n elif key - 120 <= 0:\n stream_range = self.data.loc[0:key + 240]\n elif key + 120 >= len(self.data):\n stream_range = self.data.loc[key - 240:]\n else:\n stream_range = self.data.loc[key - 120: key + 120]\n\n\n for image_path in sorted_alphanumeric(stream_range[self.config['col_list'][1]]):\n stream_imgs.append(image_path)\n\n\n video_writer = ffmpeg.input(\n 'pipe:',r='24', f='jpeg_pipe'\n ).output(\n self.config[\"ride_path\"]+'/'+self.integer_key+'_'+self.index_list[key],\n vcodec='libx264'\n ).overwrite_output().run_async(pipe_stdin=True)\n\n\n for image_dir in stream_imgs:\n with open(image_dir,'rb') as f:\n image_data = f.read()\n video_writer.stdin.write(image_data)\n\n video_writer.stdin.close()\n video_writer.wait()", "title": "" }, { "docid": "751704a43e4102cad87868ff0c710746", "score": "0.5551908", "text": "def get_videos(genre):\n if genre == 'search':\n kb = xbmc.Keyboard('', 'Please enter your search')\n kb.doModal()\n if not kb.isConfirmed():\n vid = []\n return vid\n query = kb.getText().replace(' ', '+')\n url = HOME_PAGE + '/movie/search/' + query\n elif genre == 'series':\n url = HOME_PAGE + '/movie/filter/series'\n else:\n url = HOME_PAGE + genre\n\n page = requests.get(url, headers=HEADERS)\n bs = BeautifulSoup(page.text, 'html.parser')\n vid = []\n indx = 0\n threads = []\n for a in bs.find('div', class_=\"movies-list movies-list-full\").find_all('a'):\n quality = a.find('span', class_='mli-quality')\n thumb = a.img['data-original']\n mid = a['data-url'].split('/')[-1]\n name = a['title']+' ['+quality.string+']' if quality else a['title']\n # try the cache\n data = _cache.get('%s.%s' % (APP_ID, mid))\n if data:\n if 'plot' in data:\n vid.append({'name':data['name'], 'mid':mid, 'thumb':data['thumb'], 'fanart':data['fanart'],\n 'plot':data['plot']})\n else:\n vid.append({'name':data['name'], 'mid':mid, 'thumb':data['thumb'], 'fanart':data['fanart']})\n else:\n vid.append({'name':name, 'mid':mid, 'thumb':thumb, 'fanart':thumb.replace('/poster/','/cover/')})\n # ajax call for the plot\n #t = threading.Thread(target=get_plot, args=(a['data-url'], vid[indx]))\n #threads.append((t, indx))\n #t.start()\n indx += 1\n\n #for t,i in threads:\n # t.join()\n # # cache data\n # data = {'name':vid[i]['name'], 'thumb': vid[i]['thumb'], 'fanart':vid[i]['fanart'], 'plot':vid[i]['plot']}\n # _cache.set('%s.%s' % (APP_ID, vid[i]['mid']), data)\n\n # next page\n n = bs.find('a', rel='next')\n if n is not None:\n url = n['href'].replace(HOME_PAGE, '')\n vid.append({'name':'Next', 'url':url})\n return vid", "title": "" }, { "docid": "410b7151edbc3a5d84a34305658d2501", "score": "0.55517054", "text": "def do_GET(self):\n if self.path.endswith('/cam.mjpg'):\n # Image stream\n self.send_response(200)\n self.send_header('Content-type', 'multipart/x-mixed-replace; boundary=--jpgboundary')\n self.end_headers()\n\n # Continue while stream is not finished\n while not shared_boolean.value:\n try:\n # Get the image\n imgRGB = cv2.cvtColor(shared_frame, cv2.COLOR_BGR2RGB)\n jpg = Image.fromarray(imgRGB)\n # Temporary file for JPEG\n tmpFile = StringIO.StringIO()\n jpg.save(tmpFile, 'JPEG')\n # Write the output\n self.wfile.write(\"--jpgboundary\")\n self.send_header('Content-type', 'image/jpeg')\n self.send_header('Content-length', str(tmpFile.len))\n self.end_headers()\n # Write jpeg image into the stream\n jpg.save(self.wfile, 'JPEG')\n # Delay before getting new frame\n time.sleep(0.1)\n except Exception as e:\n print(\"Exception\", e)\n break\n return\n if self.path.endswith('/main.html'):\n # Return main page\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write('<html><head></head><body>')\n self.wfile.write('<img src=\"http://{}:8080/cam.mjpg\"/>'.format(ip_address))\n self.wfile.write('</body></html>')\n return\n if self.path.endswith('/exit'):\n # Let's exit\n self.send_response(200)\n shared_boolean.value = 1\n server.shutdown()\n return\n if self.path.endswith('/fps'):\n # Return fps value\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(\"{:.2f}\".format(shared_float.value))\n return", "title": "" }, { "docid": "ca224b5cea8a3b806cddff7608aaa4af", "score": "0.55454564", "text": "async def stream(self, ctx, streamer=None, *, stream_title=None):\n\n server = ctx.message.server\n\n current_status = server.me.status if server is not None else None\n\n if stream_title:\n stream_title = stream_title.strip()\n if \"twitch.tv/\" not in streamer:\n streamer = \"https://www.twitch.tv/\" + streamer\n game = discord.Game(type=1, url=streamer, name=stream_title)\n await self.bot.change_presence(game=game, status=current_status)\n log.debug('Owner has set streaming status and url to \"{}\" and {}'.format(stream_title, streamer))\n elif streamer is not None:\n await self.bot.send_cmd_help(ctx)\n return\n else:\n await self.bot.change_presence(game=None, status=current_status)\n log.debug('stream cleared by owner')\n await self.bot.say(\"Done.\", delete_after=self.settings[server.id][\"delete_delay\"])", "title": "" }, { "docid": "d512514fbf9fb7eccbf163f92faaaf15", "score": "0.55423", "text": "def download_videos():\n driver = webdriver.Firefox()\n driver.get('https://godjango.com/browse/')\n WebDriverWait(driver, 30).until(EC.element_to_be_clickable((By.CSS_SELECTOR, GoDjango.first_video_title)))\n while True:\n for element in range(0, 10):\n all_video_elements = driver.find_elements_by_css_selector(GoDjango.first_video_title)\n all_video_elements[element].click()\n try:\n print driver.find_element_by_css_selector(GoDjango.video_download_link).get_attribute('src')\n except:\n print \"Video is private\"\n driver.execute_script(\"window.history.go(-1)\")\n driver.execute_script(\"window.scrollTo(0, 465);\")\n driver.execute_script(\"window.scrollTo(0, 4650);\")\n sleep(1)\n driver.find_element_by_css_selector(GoDjango.next_button_click).click()\n driver.quit()", "title": "" }, { "docid": "ca0d40fe87cd732ff421dae2d25a93ca", "score": "0.5540984", "text": "def LiveTVMenu(sender):\n dir = MediaContainer(viewGroup='Details', title2=sender.itemTitle)\n \n for station in LIVE_TV_STATIONS:\n url = station['url'] % Prefs.Get('livetv_quality')\n Log('Added %s' % url)\n \n dir.Append(WindowsMediaVideoItem(url, title=station['title'], summary=station['desc'], thumb=R('nrk-nett-tv.png'), width=768, height=432)) # TODO thumb=R(station['img'])\n \n return dir", "title": "" }, { "docid": "db6e4972be40081b2cd18ddaf1ccb96b", "score": "0.5536515", "text": "def play_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "1c6dcdd43c38a1c73f8b9ed5fca2d2a4", "score": "0.552755", "text": "def video_feed_3():\n\n try:\n if cam2.isOpened() == False:\n print(\"Camera 3 Closed\")\n return Response(genBlank(),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n else:\n return Response(gen3(),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n except Exception as e:\n print(str(e))", "title": "" }, { "docid": "4708cd90740bf8aa82708b104f7bd62a", "score": "0.5521038", "text": "def serve(self): #{\n pass", "title": "" }, { "docid": "480f22234b47c3290206b4cda3445b5f", "score": "0.5515588", "text": "def show_playing(self):\n videos = self._video_library.get_all_videos()\n\n if self.status is not None: # video playing\n now_vid = next(video for video in videos if video.title == self.status)\n print(f\"Currently playing: {now_vid.title} ({now_vid.video_id}) [{' '.join([tag for tag in now_vid.tags])}]{' - PAUSED' if self.paused is True else ''}\")\n elif self.status is None: # no video playing\n print(\"No video is currently playing\")", "title": "" } ]
6c0e12b0634e743b2e8588efbc847419
Make Windows and POSIX compatible absolute paths automatically.
[ { "docid": "a35283b24e3488954e811d6e8f32c2f4", "score": "0.6681356", "text": "def makePath(path):\n\n compatPath = os.path.abspath(os.path.expanduser(path))\n\n return compatPath", "title": "" } ]
[ { "docid": "08c7a8fd11865ea66f5944d77bc7b886", "score": "0.71546054", "text": "def win2unix(a_path, use_abs=1):\r\n if use_abs:\r\n a_path = os.path.abspath(a_path)\r\n return re.sub(r\"\\\\\", \"/\", a_path)", "title": "" }, { "docid": "08c7a8fd11865ea66f5944d77bc7b886", "score": "0.71546054", "text": "def win2unix(a_path, use_abs=1):\r\n if use_abs:\r\n a_path = os.path.abspath(a_path)\r\n return re.sub(r\"\\\\\", \"/\", a_path)", "title": "" }, { "docid": "8d78036d9459c4b464812d3cb93c38c3", "score": "0.7055007", "text": "def make_posix_path(windows_path):\n for regex, sub in [\n (re.compile(r'\\\\'), '/'),\n (re.compile('^[Cc]:'), '/c'),\n ]:\n windows_path = regex.sub(sub, windows_path)\n return windows_path", "title": "" }, { "docid": "dd9731aaf2dd0033c0ca44debc279282", "score": "0.7039325", "text": "def fix_windows_path_limit(path):\n if platform.system() == 'Windows':\n if path.startswith('\\\\\\\\'):\n # UNC network path\n return '\\\\\\\\?\\\\UNC\\\\' + path[2:]\n elif os.path.isabs(path):\n # local absolute path\n return '\\\\\\\\?\\\\' + path\n else:\n # relative path, don't alter\n return path\n else:\n return path", "title": "" }, { "docid": "151e74f590f9208bb7b12b3729526c95", "score": "0.6760035", "text": "def makeRelativePathsAbsolute(cmdargs):\n for i in range(len(cmdargs)):\n if relativePathRE.match(cmdargs[i]):\n cmdargs[i]=os.path.abspath(cmdargs[i])", "title": "" }, { "docid": "e65abb8042c9bb5b7fdffeb93b9f03b6", "score": "0.67013", "text": "def path_creator(rel_path=''):\n if platform.system() != 'Windows':\n if rel_path == '':\n path_list=sys.argv[0].split('/')[:-1]\n return '/'.join(path_list)\n else:\n path_list = sys.argv[0].split('/')[:-1]\n return '/'.join(path_list) + '/' + rel_path\n else:\n if rel_path == '':\n path_list=sys.argv[0].split('\\\\')[:-1]\n path_res='\\\\'.join(path_list)\n return path_res\n else:\n path_list = sys.argv[0].split('\\\\')[:-1]\n rel_path=rel_path.split('/')\n path_res='\\\\'.join(path_list) + '\\\\' + '\\\\'.join(rel_path)\n return path_res", "title": "" }, { "docid": "40fbe25467db166e4a2449d6ab5ec6fb", "score": "0.6660485", "text": "def system_path(path):\n if is_windows(): return path.replace('/', '\\\\')\n else: return path.replace('\\\\', '/')", "title": "" }, { "docid": "8a7e9a34458c9bffd1d15f925b4905f0", "score": "0.6643326", "text": "def conditional_abspath (filename):\n if sys.platform.find('cygwin') != -1:\n return filename\n else:\n return os.path.abspath(filename)", "title": "" }, { "docid": "1216777214f4db11b7ae511ff020b255", "score": "0.65816987", "text": "def _make_abspath(value):\n value = value.strip()\n if not os.path.isabs(value):\n value = os.path.abspath(os.path.join(os.getcwd(), value))\n return value", "title": "" }, { "docid": "7ea13bc00712c2f530cee4e302e3fcc5", "score": "0.65658075", "text": "def _makeAbsolute(fname):\n if fname[0] != '/':\n return os.path.join(os.getcwd(), fname)\n else:\n return fname", "title": "" }, { "docid": "be0732868a111a87534833929063fbed", "score": "0.6497008", "text": "def force_absolute(base, path):\n if os.path.abspath(path) and os.path.exists(path):\n return path\n else:\n return path_format(base + path)", "title": "" }, { "docid": "9c08bfc9f52e2c7b57168d08bfd53a52", "score": "0.64913964", "text": "def to_posix(fname):\n import sys\n if sys.platform == 'win32': # pragma: nocover\n import os.path\n if os.path.isabs(fname):\n fname = '/' + fname\n fname = fname.replace('\\\\', '/')\n return fname", "title": "" }, { "docid": "796bbda9c3077ef9ef1ddc967ebbc93a", "score": "0.62997556", "text": "def convertString(path):\n if (\"win\" in sys.platform):\n return path.replace(\"/\",\"\\\\\")\n elif (\"linux\" in sys.platform):\n return path.replace(\"\\\\\",\"/\")", "title": "" }, { "docid": "6a4d6ed4a42c33906b3bdecb26f8b689", "score": "0.62365884", "text": "def normalized_file_path(path: str) -> str:\n # Convert Unix path to Windows path for WSL\n if PLATFORM == \"WSL\":\n return path.replace(\"/\", \"\\\\\")\n\n return path", "title": "" }, { "docid": "723242f7563cb2264a670ac9eda3048b", "score": "0.6227401", "text": "def _fixpath(p):\n return os.path.abspath(os.path.expanduser(p))", "title": "" }, { "docid": "f22aed0538d3847017028af46b9e42f0", "score": "0.6193982", "text": "def AbsoluteCanonicalPath(*path):\n\n file_path = os.path.join(*path)\n return os.path.realpath(os.path.abspath(os.path.expanduser(file_path)))", "title": "" }, { "docid": "6663df5291353ae609a8bb9681e307f4", "score": "0.6186717", "text": "def _path(unix_path):\n return unix_path.replace(\"/\", os.path.sep)", "title": "" }, { "docid": "c0143e59eb08ed7dabd0b3770ba6710d", "score": "0.61507773", "text": "def fix_path(path):\n return os.path.abspath(os.path.expanduser(path))", "title": "" }, { "docid": "b5aa30749b8c0e661602dbc1bca26226", "score": "0.61474043", "text": "def _normalized_path(path):\n return os.path.abspath(os.path.expanduser(path))", "title": "" }, { "docid": "51a08c601ee2554a315c0dcc0122228c", "score": "0.6121589", "text": "def qualify(path):\n if not absoluteRegexp.search(path):\n path = os.path.join(cwd, path)\n return path", "title": "" }, { "docid": "2f72fbcd6500ac59a665b2e7a0fc1c0c", "score": "0.61142534", "text": "def canonical_path(path, *paths, **kwargs):\n resolve_link = kwargs.pop('resolve_link', True)\n path = os.path.join(path, *paths)\n path = os.path.expanduser(path)\n if resolve_link:\n path = os.path.realpath(path)\n else:\n path = os.path.abspath(path)\n if os.path.isdir(path):\n path = os.path.join(path, '')\n return path", "title": "" }, { "docid": "44ba736082371dba8e236889edfd925b", "score": "0.61123097", "text": "def win_path_check(path):\n if IS_WIN:\n return path.replace(\"\\\\\", \"/\").replace(\":\", \"\\\\:\")\n return path", "title": "" }, { "docid": "e77dc6de75bd3d1c38a61b6e0397669d", "score": "0.6105689", "text": "def WindowsPath(path):\n # TODO(pamg): make this work for other drives too.\n if path.startswith('/cygdrive/c/'):\n return path.replace('/cygdrive/c/', 'C:/')\n return path", "title": "" }, { "docid": "14b5de5c99859bf67805cdc44adb92a7", "score": "0.60867196", "text": "def change_path_to_windows_style(input):\n\n try:\n new_output_path = re.sub(\"^/cygdrive/c/\", \"C:/\", input)\n except Exception as e:\n print e\n new_output_path = input\n\n return new_output_path", "title": "" }, { "docid": "59d01495300a5196ce5d7f12ba03e352", "score": "0.60763323", "text": "def _norm_path(filepath):\n return Path(os.path.abspath(os.path.normpath(\n os.path.expandvars(os.path.expanduser(str(filepath))))))", "title": "" }, { "docid": "ec7e1778a7b61a9f546ced88ba02b592", "score": "0.6070201", "text": "def to_absolute_path(path):\n if not os.path.isabs(path):\n return os.path.join(os.getcwd(), path)\n else:\n return path", "title": "" }, { "docid": "b10cf5cc744e1224a6b60adf2575ff2b", "score": "0.60413927", "text": "def abspath(path):\n\n return os.path.abspath(path).replace(\"\\\\\", \"/\")", "title": "" }, { "docid": "b6e261b4b77e62f3f1b8be1aac1b3a54", "score": "0.59982187", "text": "def abspath(filename, relative_to = None):\n # Create filename relative to the reference, if it exists.\n import os.path\n fname = from_posix(filename)\n if relative_to and not os.path.isabs(fname):\n relative_to = from_posix(relative_to)\n if os.path.isdir(relative_to):\n fname = os.path.join(relative_to, fname)\n else:\n fname = os.path.join(os.path.dirname(relative_to), fname)\n\n # Make the result canonical\n fname = canonical_filename(fname)\n return to_posix(fname)", "title": "" }, { "docid": "3f83a96413ab66516d18f1f062471bfc", "score": "0.5958428", "text": "def abspath(path):\n if not os.path.isabs(path):\n cwd = os.getcwdu()\n path = os.path.join(cwd, path)\n return os.path.normpath(path)", "title": "" }, { "docid": "e20a65b0d6c975fe634a25ce28bbf71b", "score": "0.59544873", "text": "def absolute_path(path):\n path = re.sub('~', os.environ['HOME'], str(path))\n if path[0] != '/':\n path = str(sh.pwd()).strip() + '/' + path\n return path", "title": "" }, { "docid": "7a59484167ae68e648d8bdfd877322ec", "score": "0.5947441", "text": "def absolute_path(path):\n return os.path.abspath(os.path.normpath(path))", "title": "" }, { "docid": "eb7c1ef01e08b5e1b6b15a6279b3d31a", "score": "0.59378546", "text": "def windows2msys(path):\n if not sys.platform.startswith('win32'):\n return path\n (drive, path) = os.path.splitdrive(os.path.abspath(path))\n return \"/\" + drive[0] + path.replace('\\\\', '/')", "title": "" }, { "docid": "cd80c9898eb5fcf65da29486c5318fd0", "score": "0.5937277", "text": "def get_absolute_path(*args):\n directory = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(directory, *args)", "title": "" }, { "docid": "e98ad0425d4e839d945f3ec26b501dc1", "score": "0.59354055", "text": "def expand_config_path(path):\n if path == DEFAULT_LINUX_PATH and os.name == \"nt\":\n path = DEFAULT_WINDOWS_PATH\n return os.path.expanduser(path)", "title": "" }, { "docid": "c1d3f1e9256a96ec1ed6e966d4167eb7", "score": "0.59322435", "text": "def _config_absolute_paths(path):\n\n # Since I am calling the script from elsewhere these must be\n # absolute paths\n prepend = \"rpki.validator.data.path=\"\n replace = \".\"\n # Must remove trailing backslash at the end\n replace_with = RPKI_Validator_Wrapper.rpki_package_path[:-1]\n utils.replace_line(path, prepend, replace, replace_with)\n\n prepend = \"rpki.validator.preconfigured.trust.anchors.directory=\"\n replace = \"./preconfigured-tals\"\n replace_with = (f\"{RPKI_Validator_Wrapper.rpki_package_path}\"\n \"preconfigured-tals\")\n utils.replace_line(path, prepend, replace, replace_with)\n\n prepend = \"rpki.validator.rsync.local.storage.directory=\"\n replace = \"./rsync\"\n replace_with = f\"{RPKI_Validator_Wrapper.rpki_package_path}rsync\"\n utils.replace_line(path, prepend, replace, replace_with)", "title": "" }, { "docid": "6bdbc4155ee447ca7a2ad15fd00ad752", "score": "0.59170175", "text": "def nt_path_to_posix_path(path):\r\n path = path.replace(\"\\\\\", \"/\")\r\n parts = path.split(\":\")\r\n if len(parts) > 1:\r\n return \"/\" + parts[0].lower() + parts[1]\r\n return path", "title": "" }, { "docid": "10113ef5dc8f7ecdd7ba414321b6d4d4", "score": "0.59102654", "text": "def format_path (in_path):\n return os.path.realpath(os.path.expanduser(in_path))", "title": "" }, { "docid": "148645ed3ca56a021ef7989b20639df0", "score": "0.58967364", "text": "def _real_paths(config):\n for key in ('--config', '--ffmpeg-bin', '--log', '--music-source', '--working-dir'):\n if not config[key]:\n continue\n config[key] = os.path.realpath(os.path.expanduser(config[key]))", "title": "" }, { "docid": "347847ffdfc19096e41cae7574676d56", "score": "0.58937633", "text": "def relativeToAbsoluteHomePath(path):\n if \"~\" in path:\n return path.replace(\"~\",expanduser(\"~\"))\n else:\n return path", "title": "" }, { "docid": "cf4211e1f22ab7bd86798493db89673e", "score": "0.5884668", "text": "def _escape_path(path):\n path = path.strip()\n return '\"{0}\"'.format(path) if _platform_windows else path.replace(\" \", \"\\ \")", "title": "" }, { "docid": "ad62aa974ef221646e704d8eecf9d094", "score": "0.58800507", "text": "def robust_abspath(p):\n try:\n return abspath(p)\n except OSError as exc:\n if not isabs(p):\n try:\n os.getcwd()\n # if no exception raised it was not the reason, raise original\n raise\n except:\n return normpath(join(getpwd(), p))\n raise", "title": "" }, { "docid": "47a54a01283f89958964ba331cfd08d2", "score": "0.58489335", "text": "def native(path):\n path = _os.path.sep.join(path.split('/'))\n return _os.path.normpath(_os.path.join(root, path))", "title": "" }, { "docid": "1cb6001763cc6506a8152befa7c0d133", "score": "0.5829471", "text": "def _get_absolute(self, path: Path) -> Path:\n return path.expanduser().absolute()", "title": "" }, { "docid": "f5491bc03eb1475e459a740939ad429b", "score": "0.58250874", "text": "def path_normalize(path, target_os=None):\n if target_os and target_os is not os.name:\n raise NotImplementedError('Cannot join path with \"{target}\" style. '\n 'Host OS is \"{host}\".'.format(\n target=target_os,\n host=os.name))\n return os.path.normpath(path)", "title": "" }, { "docid": "e62c3a82d44540b9648ca6a37eaef83a", "score": "0.5814623", "text": "def _normalize_path(path):\n if path is None:\n return None\n return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))", "title": "" }, { "docid": "df1c7b09d9fb62b745e16a147d609da8", "score": "0.5805467", "text": "def cnormpath (path):\n path = normpath(path)\n if os.name == 'nt':\n # replace slashes with backslashes\n path = path.replace(\"/\", \"\\\\\")\n if not os.path.isabs(path):\n path = normpath(os.path.join(sys.prefix, path))\n return path", "title": "" }, { "docid": "1c5923946d6db5fef307e7d0e1d003ad", "score": "0.57970005", "text": "def normalizeWindowsPath(path):\n ret = libxml2mod.xmlNormalizeWindowsPath(path)\n return ret", "title": "" }, { "docid": "bce2ac196ffa7c78b132c99be2d6f08e", "score": "0.5794224", "text": "def windows_path(self, **kw):\n with_drive_letter = kw.get(\"with_drive\", True)\n return self._construct_path(\"\\\\\", with_drive_letter)", "title": "" }, { "docid": "fcc0633ee8bb8a95ed7cddc136d5bbd9", "score": "0.5782011", "text": "def test_realpath(self):\n print real_upath(\"ref with space\")\n self.assertTrue(real_upath(\"ref with space\").endswith(\"ref\\ with\\ space\"))\n self.assertTrue(real_upath(\"ref\\ with\\ space\").endswith(\"ref\\ with\\ space\"))\n self.assertTrue(real_ppath(\"ref with space\").endswith(\"ref with space\"))\n self.assertTrue(real_ppath(\"ref\\ with\\ space\").endswith(\"ref with space\"))", "title": "" }, { "docid": "cf505ea3035efb0da3220c0564eaaba4", "score": "0.57599384", "text": "def posix_path(self, **kw):\n with_drive_letter = kw.get(\"with_drive\", True)\n return self._construct_path(\"/\", with_drive_letter)", "title": "" }, { "docid": "4e4c982a18750b4706c305ce7eccab2a", "score": "0.5750479", "text": "def canonical_filename(filename):\n import os, os.path\n\n path = from_posix(filename)\n while True:\n path = os.path.abspath(path)\n try:\n p = os.path.dirname(path)\n # os.readlink doesn't exist in windows python2.7\n try:\n deref_path = os.readlink(path)\n except AttributeError: # pragma: no cover\n return path\n path = os.path.join(p, deref_path)\n except OSError:\n return path", "title": "" }, { "docid": "cd48894634bbab605109d249d0fca3c7", "score": "0.57480603", "text": "def real_absolute_path(path):\n return os.path.realpath(absolute_path(path))", "title": "" }, { "docid": "33e6639ad4b9434b61b6803b118d1ab7", "score": "0.57284135", "text": "def normalizeNativePath(path: unicode) -> unicode:\n ...", "title": "" }, { "docid": "438e16ea7832ce3427e626c4bf996af3", "score": "0.57243484", "text": "def _abs_path(fn):\n return os.path.join(os.path.dirname(__file__), fn)", "title": "" }, { "docid": "7fcfd154529630e577685b655c732d8f", "score": "0.5702541", "text": "def from_posix(fname):\n import sys\n if sys.platform == 'win32': # pragma: nocover\n if fname[0] == '/':\n fname = fname[1:]\n fname = fname.replace('/', '\\\\')\n return fname", "title": "" }, { "docid": "7140c77dc8b37816850d751dee64375a", "score": "0.569107", "text": "def get_abspath(path: str) -> str:\n if os.path.isabs(path):\n return path\n\n return os.path.join(os.path.dirname(__file__), path)", "title": "" }, { "docid": "c66ad2a9c173f74afb53aefcee167163", "score": "0.5689126", "text": "def normalizePath(p: str, *pathParts: List[str]) -> str:\n p1 = os.path.abspath(os.path.expanduser(p))\n if len(pathParts)>0:\n allPathParts = [ p1 ]\n allPathParts.extend(pathParts)\n p1 = os.path.join(*allPathParts)\n p2 = os.path.abspath(p1)\n return p2", "title": "" }, { "docid": "1daba0cef3718f896da9bddb3bb685cb", "score": "0.56857973", "text": "def ExpandPath(path):\n return os.path.realpath(os.path.expanduser(path))", "title": "" }, { "docid": "6d6233ce777cacaf6df3ac4f526986e1", "score": "0.56846035", "text": "def ospathjoin(*args, **kwargs):\n def build_value(*args, **kwargs):\n platform = kwargs.get('platform', None)\n if platform is None:\n return os.path.join(*args)\n elif platform.startswith(\"win\"):\n return \"\\\\\".join(args)\n return \"/\".join(args)\n\n value = build_value(*args, **kwargs)\n if value == \"/$PYINT\":\n raise RuntimeError( # pragma: no cover\n f\"Impossible values {args} - {kwargs}.\")\n return value", "title": "" }, { "docid": "39f8f5b7db6fcb33c4252cdd5eae47ec", "score": "0.56689423", "text": "def check_absolute_path(path):\n current_dir = os.getcwd()\n if os.path.isabs(path) is False:\n if str(path).startswith(\"./\"):\n return current_dir + path[1:]\n else:\n return current_dir + \"/\" + path\n else:\n return path", "title": "" }, { "docid": "5adb56b3b62838c0f2700a44eb515fc6", "score": "0.5656998", "text": "def _ensure_path_absolute(maybe_relpath, cfg_path):\n if not isinstance(maybe_relpath, str):\n raise TypeError(\n \"Attempting to ensure non-text value is absolute path: {} ({})\".\n format(maybe_relpath, type(maybe_relpath)))\n if os.path.isabs(maybe_relpath) or is_url(maybe_relpath):\n _LOGGER.debug(\"Already absolute\")\n return maybe_relpath\n # Maybe we have env vars that make the path absolute?\n expanded = os.path.expanduser(os.path.expandvars(maybe_relpath))\n if os.path.isabs(expanded):\n _LOGGER.debug(\"Expanded: {}\".format(expanded))\n return expanded\n # Set path to an absolute path, relative to project config.\n config_dirpath = os.path.dirname(cfg_path)\n _LOGGER.debug(\"config_dirpath: {}\".format(config_dirpath))\n abs_path = os.path.join(config_dirpath, maybe_relpath)\n _LOGGER.debug(\"Expanded and/or made absolute: {}\".format(abs_path))\n return abs_path", "title": "" }, { "docid": "37154be3ec3ed3237dd04ad78a8c4564", "score": "0.56507784", "text": "def abs_fname_(fname):\n if os.path.isabs(fname):\n pass\n elif '~' in fname:\n fname = os.path.expanduser(fname)\n else:\n fname = os.path.abspath(fname)\n\n return fname", "title": "" }, { "docid": "f2f679d3ab3be5693c8b1f14898ec3b0", "score": "0.56437653", "text": "def resolved(rpath):\r\n return realpath(abspath(rpath))", "title": "" }, { "docid": "271f494587af57a1f2f8e2a977f1a4e5", "score": "0.5634793", "text": "def _sanitize_relative_path(self, path):\n last = None\n path = os.path.normpath(path)\n while path != last:\n last = path\n # Note: os.path.join treats '/' as os.sep on Windows\n path = path.lstrip(os.sep).lstrip('/')\n path = path.lstrip(os.pardir).lstrip('..')\n drive, path = os.path.splitdrive(path) # for Windows\n return path", "title": "" }, { "docid": "fe31a4f9dc1aab6888306c558467806f", "score": "0.5630288", "text": "def rel_resolve(path):\n if os.path.isabs(path):\n return os.path.abspath(path)\n else:\n return os.path.join(SCRIPTDIR, path)", "title": "" }, { "docid": "588bf358c2f2f3b2793c59f9c54ad285", "score": "0.5628618", "text": "def make_path_safe(path):\n if path is not None:\n return os.path.abspath(os.path.expanduser(path))\n else:\n return None", "title": "" }, { "docid": "27d61a791328647e6627814d917094dc", "score": "0.5615828", "text": "def mangle_path(path):\n # Remove assigns\n path = servers.get_file_server().manglepath( str(path) )\n # Remove parent special directories\n path = os.path.abspath( path )\n # Convert path to Nebula format (slashes instead of backslashes)\n path = servers.get_file_server().manglepath( str(path) )\n # Convert drive letter to lowercase\n if len(path) > 1:\n if path[1] == ':':\n path = path[:1].lower() + path[1:]\n return path", "title": "" }, { "docid": "74981d7f57d2e48a18d6f5a6d56ed0ed", "score": "0.56110865", "text": "def make_fs_path(parts):\n return '/'.join(parts)", "title": "" }, { "docid": "a3a5886e5b6049f4dc945c8131f1f95f", "score": "0.5601287", "text": "def _absPath(self, relpath):\n\n # Pass through URIs and absolute paths.\n if self.isUrl(relpath) or relpath[0] == '/':\n return relpath\n\n # This won't deal with ~user/ syntax, but it's much less\n # common anyway.\n if relpath.startswith('~/') and 'HOME' in os.environ:\n return os.path.join(os.environ['HOME'], relpath[2:])\n\n if self._configFileStack:\n relativeTo = os.path.dirname(self._configFileStack[-1])\n else:\n relativeTo = os.getcwd()\n\n if self.isUrl(relativeTo):\n parts = urlparse.urlsplit(relativeTo)\n return urlparse.urlunsplit((parts.scheme, parts.netloc, os.path.normpath(os.path.join(parts.path, relpath)), parts.query, parts.fragment))\n return os.path.normpath(os.path.join(relativeTo, relpath))", "title": "" }, { "docid": "006e402219793008e927594d42b73da5", "score": "0.5591167", "text": "def abspath(path):\n path = os.fspath(path)\n if not os.path.isabs(path):\n path = os.path.join(get_app_root(), path)\n return os.path.normpath(path)", "title": "" }, { "docid": "229c59d82d8cb587909ca322eb163022", "score": "0.55806446", "text": "def realpath(path: str) -> str:\n pass", "title": "" }, { "docid": "273b82b3853a5d28f78bb2edd5b9f76b", "score": "0.5580259", "text": "def ConvertToCygpath(path):\n if sys.platform == 'cygwin':\n p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)\n path = p.communicate()[0].strip()\n return path", "title": "" }, { "docid": "c5f80d25c3cad955de1ba1bc33d8b7c6", "score": "0.5571355", "text": "def _abs_path(rel_path):\n return os.path.join(BASE_DIR, rel_path)", "title": "" }, { "docid": "08b482ba2220ca689b45a1bea4093892", "score": "0.55642563", "text": "def expandpath(path):\n return os.path.abspath(os.path.expanduser(path))", "title": "" }, { "docid": "faa4cc5aa23ac02e0c77547badd7d4ec", "score": "0.55602455", "text": "def ensure_file_abs_path_valid(file_abs_path: Text) -> Text:\n project_meta = load_project_meta(file_abs_path)\n raw_abs_file_name, file_suffix = os.path.splitext(file_abs_path)\n file_suffix = file_suffix.lower()\n\n raw_file_relative_name = convert_relative_project_root_dir(raw_abs_file_name)\n if raw_file_relative_name == \"\":\n return file_abs_path\n\n path_names = []\n for name in raw_file_relative_name.rstrip(os.sep).split(os.sep):\n\n if name[0] in string.digits:\n # ensure file name not startswith digit\n # 19 => T19, 2C => T2C\n name = f\"T{name}\"\n\n if name.startswith(\".\"):\n # avoid \".csv\" been converted to \"_csv\"\n pass\n else:\n # handle cases when directory name includes dot/hyphen/space\n name = name.replace(\" \", \"_\").replace(\".\", \"_\").replace(\"-\", \"_\")\n\n path_names.append(name)\n\n new_file_path = os.path.join(\n project_meta.RootDir, f\"{os.sep.join(path_names)}{file_suffix}\"\n )\n return new_file_path", "title": "" }, { "docid": "dc4b8089107fb5fb49c94531c988fe2a", "score": "0.55459946", "text": "def fix_path(path):\n path = os.path.normpath(path)\n os.makedirs(path, exist_ok=True)\n return path", "title": "" }, { "docid": "6f7a60a0f8c1e10700cce7ad37a5019b", "score": "0.55445653", "text": "def realpath(path):\n\n if path.startswith('//'):\n path = bpy.path.abspath(path)\n else:\n path = os.path.realpath(path)\n\n path = path.replace('\\\\', '/')\n path = os.path.realpath(path)\n\n return path", "title": "" }, { "docid": "4a03202ed0668f6b6874d71290ac02ba", "score": "0.55416316", "text": "def make_local_path(self, *args):\n return os.path.normpath(os.path.join(\n os.path.dirname(api.env.real_fabfile), *args).rstrip(os.path.sep))", "title": "" }, { "docid": "142d8b03fd52086dbff63d1980a61f0b", "score": "0.5540294", "text": "def abspath(*segments):\n return os.path.normpath(os.path.join(root, *segments))", "title": "" }, { "docid": "8d43843e450e7dc0d05f66da6102ffbb", "score": "0.5524549", "text": "def get_abs_path(path):\r\n abs_path = lib_path.abspath(path)\r\n return abs_path", "title": "" }, { "docid": "dbcb9b147178cb3a06a6414973eb6fed", "score": "0.5521073", "text": "def GetWindowsPathWithUNCPrefix(path):\n path = path.strip()\n\n # No need to add prefix for non-Windows platforms.\n # And \\\\?\\ doesn't work in python 2 or on mingw\n if not IsWindows() or sys.version_info[0] < 3:\n return path\n\n # Starting in Windows 10, version 1607(OS build 14393), MAX_PATH limitations have been\n # removed from common Win32 file and directory functions.\n # Related doc: https://docs.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation?tabs=cmd#enable-long-paths-in-windows-10-version-1607-and-later\n import platform\n if platform.win32_ver()[1] >= '10.0.14393':\n return path\n\n # import sysconfig only now to maintain python 2.6 compatibility\n import sysconfig\n if sysconfig.get_platform() == 'mingw':\n return path\n\n # Lets start the unicode fun\n unicode_prefix = '\\\\\\\\?\\\\'\n if path.startswith(unicode_prefix):\n return path\n\n # os.path.abspath returns a normalized absolute path\n return unicode_prefix + os.path.abspath(path)", "title": "" }, { "docid": "5a73f161a75ac9732ce6b3c3b4db097e", "score": "0.5517022", "text": "def get_abs(s):\n return os.path.abspath(s)", "title": "" }, { "docid": "6c1b59085c0cb23a84db665d77900642", "score": "0.55168426", "text": "def _set_rel_paths(self):\n if self.working_dir is not None:\n self._rel_working_dir = os.path.relpath(self.working_dir)\n if self.alignment is not None:\n self._rel_alignment = os.path.relpath(self.alignment, \n self.working_dir)\n if self.out_file is not None:\n self._rel_out_file = os.path.relpath(self.out_file, \n self.working_dir)", "title": "" }, { "docid": "f56959f597324aa9ab7810945093c1ce", "score": "0.5507602", "text": "def get_abs_path(self, value):\n return os.path.abspath(os.path.expanduser(os.path.expandvars(value)))", "title": "" }, { "docid": "bb8d3b285d1009d7bd43cc03477aaff4", "score": "0.55069214", "text": "def _windows_seps(path: str) -> str:\n\n if not path:\n return None\n elif os.sep != ntpath.sep:\n return path.replace(os.sep, ntpath.sep)\n else:\n return path", "title": "" }, { "docid": "0db16acc40da6834d67322dab00a77bd", "score": "0.5502486", "text": "def normpath_with_actual_case(name):\r\n assert os.path.isabs(name) or os.path.ismount(name), \"Not abs nor mount: \" + name\r\n assert os.path.exists(name), \"Not exists: \" + name\r\n if os.name == \"nt\":\r\n name = os.path.realpath(name)\r\n from ctypes import create_unicode_buffer, windll\r\n buf = create_unicode_buffer(512)\r\n windll.kernel32.GetShortPathNameW(name, buf, 512) # @UndefinedVariable\r\n windll.kernel32.GetLongPathNameW(buf.value, buf, 512) # @UndefinedVariable\r\n if len(buf.value):\r\n result = buf.value\r\n else:\r\n result = name\r\n assert isinstance(result, str)\r\n if result[1] == \":\":\r\n # ensure drive letter is capital\r\n return result[0].upper() + result[1:]\r\n else:\r\n return result\r\n else:\r\n return os.path.normpath(name)", "title": "" }, { "docid": "c7274af4810560519a97f4ace480147a", "score": "0.5499009", "text": "def presentation(self, value):\r\n realpath = os.path.expanduser(value)\r\n if self.auto_create:\r\n if not os.path.exists(realpath):\r\n os.makedirs(realpath)\r\n return realpath", "title": "" }, { "docid": "8d51cf2df8c6dc05159f425b2322465d", "score": "0.54973257", "text": "def resolve(fname):\n if os.path.dirname(__file__):\n return os.path.dirname(__file__) + \"/../common/\" + fname\n else:\n return \"/common/\" + fname", "title": "" }, { "docid": "43d2f51517bce81b67e4f684181c458d", "score": "0.54943675", "text": "def get_absolute_root_path(self) -> PureWindowsPath:\n root_path = self.get_param_by_type(RootSourcePath)\n if not root_path:\n raise NoAbsoluteRootPathException(\n \"No absolute root root_path defined\"\n )\n elif not root_path.path.is_absolute():\n raise NoAbsoluteRootPathException(\n f\"Root root_path {root_path} is not absolute\"\n )\n else:\n return root_path.value", "title": "" }, { "docid": "8636611481c176f2cba580f4df08975b", "score": "0.547934", "text": "def full_path(path):\n return os.path.realpath(os.path.expandvars(os.path.expanduser(path)))", "title": "" }, { "docid": "9dfeafc01287fc59868a97bd71eabce0", "score": "0.5479139", "text": "def abspath(path: str) -> str:\n pass", "title": "" }, { "docid": "7d67bc3481666cbc16ac2e33e740968f", "score": "0.54786444", "text": "def absolute(self):\n if self.relative == '':\n return self.root # don't join in this case as that appends trailing '/'\n return os.path.join(self.root, self.relative)", "title": "" }, { "docid": "2cf0c32620d18fbf8b264f9507709874", "score": "0.547711", "text": "def setfilepaths():\n\n if gethostname() in ['stable', 'challenger', 'p', 'fog']:\n ncDir = '/home/disk/eos9/woelfle/cesm/nobackup/cesm1to2/'\n ncSubDir = '0.9x1.25/'\n saveDir = ('/home/disk/user_www/woelfle/cesm1to2/')\n\n elif gethostname() == 'woelfle-laptop':\n ncDir = 'C:\\\\Users\\\\woelfle\\\\Documents\\\\UW\\\\CESM\\\\hist\\\\'\n ncSubDir = ''\n saveDir = 'C:\\\\Users\\\\woelfle\\\\Documents\\\\UW\\\\CESM\\\\figs\\\\'\n\n elif gethostname()[0:6] in ['yslogi', 'geyser']:\n ncDir = '/glade/p/cgd/amp/people/hannay/amwg/climo/'\n ncSubDir = '0.9x1.25/'\n saveDir = '/glade/p/work/woelfle/figs/cesm1to2/'\n\n return (ncDir, ncSubDir, saveDir)", "title": "" }, { "docid": "d705359d30d20251a3f6d2ffb7e2e229", "score": "0.54744196", "text": "def _relpath_posix(path, start=os.path.curdir):\r\n\r\n if not path:\r\n raise ValueError(\"no path specified\")\r\n \r\n start_list = os.path.abspath(start).split(os.path.sep)\r\n path_list = os.path.abspath(path).split(os.path.sep)\r\n\r\n # Work out how much of the filepath is shared by start and path.\r\n i = len(os.path.commonprefix([start_list, path_list]))\r\n\r\n rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]\r\n if not rel_list:\r\n return os.path.curdir\r\n return os.path.join(*rel_list)", "title": "" }, { "docid": "e457a107e93d3cc8a0c9ad289315d8a9", "score": "0.5471526", "text": "def osnorm(self):\n import os\n if os.sep=='/' and \"\\\\\" in str(self):\n return Path(os.path.normpath(str(self).replace('\\\\','/' )))\n elif os.sep=='\\\\' and \"/\" in str(self):\n return Path(os.path.normpath(str(self).replace('/','\\\\' )))\n else:\n return self.norm()", "title": "" }, { "docid": "f2702f6dc8859faf7b5605e84980fed6", "score": "0.546702", "text": "def getFulldirAddress(x):\n x_first10 = x[:10]\n if x_first10.find(\":\\\\\") >=0 or x_first10.startswith(\"/\") or x_first10.find(\":/\") >=0:\n return x\n else:\n return os.path.join(os.getcwd(),x)", "title": "" }, { "docid": "5e96ce175b51b6ff61752241bf9c0adf", "score": "0.5459551", "text": "def test_fix_path(self):\n\n expected = \"hello\" + PyFunceble.directory_separator + \"world\" + PyFunceble.directory_separator # pylint: disable=line-too-long\n actual = Directory(\"/hello/world\").fix_path()\n\n self.assertEqual(expected, actual)\n\n actual = Directory(\"\\\\hello\\\\world\").fix_path()\n self.assertEqual(expected, actual)\n\n actual = Directory(\"hello\\\\world\").fix_path()\n self.assertEqual(expected, actual)\n\n actual = Directory(r\"hello\\world\").fix_path()\n self.assertEqual(expected, actual)\n\n actual = Directory(r\"hello/world/\").fix_path()\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "dd9f404d5e0029450ccb05cba2481a08", "score": "0.5452528", "text": "def abs_path_or_uri(path_or_uri, relative_to):\n is_uri = \"://\" in path_or_uri\n if not is_uri and not os.path.isabs(path_or_uri):\n path_or_uri = os.path.join(relative_to, path_or_uri)\n if not is_uri:\n _ensure_file_exists(path_or_uri)\n return path_or_uri", "title": "" }, { "docid": "d45a5cdb7a30fa85668d788278ab9024", "score": "0.5445164", "text": "def mkpath(*paths):\n return os.sep + os.sep.join(paths)", "title": "" }, { "docid": "549087eedd1c21c77e2bd7697e98b29e", "score": "0.54420483", "text": "def join(self, *args: os.PathLike[str], abs: bool = False) -> LocalPath:\n sep = self.sep\n strargs = [os.fspath(arg) for arg in args]\n strpath = self.strpath\n if abs:\n newargs: list[str] = []\n for arg in reversed(strargs):\n if isabs(arg):\n strpath = arg\n strargs = newargs\n break\n newargs.insert(0, arg)\n # special case for when we have e.g. strpath == \"/\"\n actual_sep = \"\" if strpath.endswith(sep) else sep\n for arg in strargs:\n arg = arg.strip(sep)\n if iswin32:\n # allow unix style paths even on windows.\n arg = arg.strip(\"/\")\n arg = arg.replace(\"/\", sep)\n strpath = strpath + actual_sep + arg\n actual_sep = sep\n obj = object.__new__(self.__class__)\n obj.strpath = normpath(strpath)\n return obj", "title": "" } ]
2f4f0d8423f6ecc13377132d7754a67e
Turn selected bars on other off
[ { "docid": "92d9f575597c61a0ce457e38318d744d", "score": "0.64476544", "text": "def selectedBarsOn(strip, color):\n for i in range(strip.numPixels()):\n if leds[i] == True:\n strip.setPixelColor(i, color)\n else:\n strip.setPixelColor(i, Color(0, 0, 0))\n strip.show()", "title": "" } ]
[ { "docid": "fca8ef6d118cf293f5681412d3b06a9c", "score": "0.68102723", "text": "def allBarsOff(strip):\n setAll(strip, Color(0, 0, 0))\n strip.show()", "title": "" }, { "docid": "43c8c3bc1f6fd947ad5058d5a9316b9e", "score": "0.6350631", "text": "def deselect(self):\n self.dock_draw_color = Port.brown", "title": "" }, { "docid": "bef3d7842127e4e9832b0e26dc01db78", "score": "0.6304234", "text": "def deselectAll(self):\n print \"deselecting all!!\"\n self.plotFilter=True\n for item in self.antListItems:\n item.setSelected(False)\n self.plotFilter=False\n self.plot()", "title": "" }, { "docid": "5bb40d1c04e1e808645c2c0cd8f0e2fd", "score": "0.6219368", "text": "def del_selected_tickers(self):\n for lbl in self.lbls: lbl.destroy()\n for txt in self.txts: txt.destroy()\n if (self.lstbx[\"state\"] == \"normal\"):\n self.b1[\"state\"] = \"normal\"\n else:\n self.b1[\"state\"] = \"disabled\"\n self.b2[\"state\"] = \"disabled\"\n self.b3[\"state\"] = \"disabled\"\n self.b4[\"state\"] = \"disabled\"\n self.past_state = []\n self.count = 0.0\n self.wdgt_trackr = {}\n self.percent.set(\"0.0\")\n self.header3.config(bg=\"red\")", "title": "" }, { "docid": "30215d55200474d80e84529ebf8df09d", "score": "0.6074427", "text": "def UnselectHiddenSegs(self):\n return _MaxPlus.BezierShape_UnselectHiddenSegs(self)", "title": "" }, { "docid": "8949742eac5caea9e2a6f7ad8492ab4d", "score": "0.6031566", "text": "def changeCrossRebarType(self):\n self.cross_rebars_type = (\n self.cross_rebars_widget.cross_rebars_type.currentText()\n )\n self.cross_rebars_widget.cross_l_shapeHookOrientation.hide()\n self.cross_rebars_widget.cross_rounding.hide()\n\n self.cross_rebars_widget.cross_l_shapeHookOrientationLabel.hide()\n self.cross_rebars_widget.cross_roundingLabel.hide()\n\n if self.cross_rebars_type == \"StraightRebar\":\n pass\n elif self.cross_rebars_type == \"LShapeRebar\":\n self.cross_rebars_widget.cross_l_shapeHookOrientation.show()\n self.cross_rebars_widget.cross_l_shapeHookOrientationLabel.show()\n self.cross_rebars_widget.cross_rounding.show()\n self.cross_rebars_widget.cross_roundingLabel.show()\n\n elif self.cross_rebars_type == \"UShapeRebar\":\n self.cross_rebars_widget.cross_rounding.show()\n self.cross_rebars_widget.cross_roundingLabel.show()", "title": "" }, { "docid": "10fe207a553c2d6fefa129f4852c2a16", "score": "0.60237634", "text": "def allBarsOn(strip, color):\n setAll(strip, color)\n strip.show()", "title": "" }, { "docid": "4554e4611ae8c647dc9538992a84be89", "score": "0.59816223", "text": "def toggle_off(self):\n self.config(highlightbackground=self.non_selected_color)\n self.select_button.config(fg=self.non_selected_color)\n self.remove_button.pack_forget() # Remove option to remove this program", "title": "" }, { "docid": "04dbcb2c070c7ad751455da0d4696ace", "score": "0.5951433", "text": "def resetAllBands(self):\n try:\n for slider in self.sliders:\n slider.setValue(1)\n self.sliderChangedGraph.plotItem.clear()\n self.plotUsingDimension()\n for slider, value in self.sliderValuesClicked.items():\n self.sliderValuesClicked[slider] = ...\n self.signalModification = ...\n self.signalModificationInv = self.signalFile['data']\n except :\n pass", "title": "" }, { "docid": "51ad9e049ed774de09cb070313fa71f6", "score": "0.5868103", "text": "def hide_bar(self):\n self.bar.setVisible(False)", "title": "" }, { "docid": "32e14d7d03bb49cf7cb32aff07013bab", "score": "0.58570886", "text": "def deactivate(self):\r\n self.active = False\r\n self.label.setFill('darkgray')\r\n self.rect.setWidth(1)", "title": "" }, { "docid": "b039120bb531ee37e48abe97a3654219", "score": "0.5844637", "text": "def checkAll(self):\n if self.toPlot.get():\n for checkBut in self.checkButList:\n checkBut.select()\n else:\n for checkBut in self.checkButList:\n checkBut.deselect()", "title": "" }, { "docid": "be477d0fffce44f682dfdd46c133dca1", "score": "0.57926685", "text": "def set_deselected(self):\n pg.sprite.Group.remove(self.game.g.selected_units, self)\n self.is_selected = False\n self.image = self.image_deselected", "title": "" }, { "docid": "249e0f042fddfa75345f5d2d6ab57439", "score": "0.5774625", "text": "def select(self):\n self.dock_draw_color = WHITE", "title": "" }, { "docid": "cfef70fe1cc5229ba7afdb97a7ad2319", "score": "0.57719696", "text": "def markfordynselection(self):\n self.origselection = self.selectedlights[:]", "title": "" }, { "docid": "a845cbf9d91c525bd0e3dfd9d3dc6aec", "score": "0.5762742", "text": "def click_plot(self):\n self.plot_choice0.set(0) # clear None button\n self.plot_choice_all = 0\n if self.plot_choice5.get() == 1:\n self.plot_choice_all = self.plot_choice_all | 1\n elif (self.prev_plot_choice & 1) == 1: # then this guy was deselected just now\n self.remove_from_list_by_bit_flag(1)\n if self.plot_choice11.get() == 1:\n self.plot_choice_all = self.plot_choice_all | 2\n elif (self.prev_plot_choice & 2) == 2:\n self.remove_from_list_by_bit_flag(2)\n if self.plot_choice21.get() == 1:\n self.plot_choice_all = self.plot_choice_all | 4\n elif (self.prev_plot_choice & 4) == 4:\n self.remove_from_list_by_bit_flag(4)\n if self.plot_choice41.get() == 1:\n self.plot_choice_all = self.plot_choice_all | 8\n elif (self.prev_plot_choice & 8) == 8:\n self.remove_from_list_by_bit_flag(8)\n if self.plot_choice81.get() == 1:\n self.plot_choice_all = self.plot_choice_all | 16\n elif (self.prev_plot_choice & 16) == 16:\n self.remove_from_list_by_bit_flag(16)\n if self.plot_choice161.get() == 1:\n self.plot_choice_all = self.plot_choice_all | 32\n elif (self.prev_plot_choice & 32) == 32:\n self.remove_from_list_by_bit_flag(32)\n if self.plot_choice251.get() == 1:\n self.plot_choice_all = self.plot_choice_all | 64\n elif (self.prev_plot_choice & 64) == 64:\n self.remove_from_list_by_bit_flag(64)\n\n # if all other plots are deselected, select the \"none\" checkbox\n if self.plot_choice_all == 0:\n self.plot_choice0.set(1)\n\n elif self.prev_plot_choice <= self.plot_choice_all:\n if self.par5 is None:\n self.par5 = self.host.twinx()\n elif self.par5 is not None: # temp change to fix zorder issues\n self.par5.remove()\n self.par5 = self.host.twinx()\n\n if self.plot_choice_all > 0:\n len_closes = len(self.controller.model.closes)\n just_ints = np.arange(len_closes)\n\n # For color names, see:\n # http://stackoverflow.com/questions/22408237/named-colors-in-matplotlib\n\n #optParam3 = self.controller.model.getOptParam3()\n this_choice = self.plot_choice_all\n if (this_choice & 0x1) != 0 and len_closes > 5:\n temp, = self.par5.plot(just_ints, self.controller.model.ave_5_days, \"r-\", zorder=11)\n self.p5.insert(0, (temp, 0x1))\n if (this_choice & 0x2) != 0 and len_closes > 11:\n temp, = self.par5.plot(just_ints, self.controller.model.ave_11_days, \"r-\", zorder=11)\n self.p5.insert(0, (temp, 0x2))\n if (this_choice & 0x4) != 0 and len_closes > 21:\n temp, = self.par5.plot(just_ints, self.controller.model.ave_21_days, \"r-\")\n self.p5.insert(0, (temp, 0x4))\n if (this_choice & 0x8) != 0 and len_closes > 41:\n temp, = self.par5.plot(just_ints, self.controller.model.ave_41_days, \"r-\")\n self.p5.insert(0, (temp, 0x8))\n if (this_choice & 0x10) != 0 and len_closes > 81:\n temp, = self.par5.plot(just_ints, self.controller.model.ave_81_days, \"r-\")\n self.p5.insert(0, (temp, 0x10))\n if (this_choice & 0x20) != 0 and len_closes > 161:\n temp, = self.par5.plot(just_ints, self.controller.model.ave_161_days, \"r-\")\n self.p5.insert(0, (temp, 0x20))\n if (this_choice & 0x40) != 0 and len_closes > 251:\n temp, = self.par5.plot(just_ints, self.controller.model.ave_251_days, \"r-\")\n self.p5.insert(0, (temp, 0x40))\n View.make_patch_spines_invisible(self.par5)\n self.par5.spines[\"right\"].set_visible(False)\n self.par5.get_yaxis().set_visible(False)\n self.par5.set_ylim(self.host.get_ylim())\n self.host.set_xlim(0, len_closes-1)\n\n self.data_plot.draw()\n self.prev_plot_choice = self.plot_choice_all", "title": "" }, { "docid": "33958d67348bfd5367a9b91f07bc7500", "score": "0.57620925", "text": "def selection_toggle(self, items):\n\t\tpass", "title": "" }, { "docid": "0316e5f5574a8cf3f7e6f82a38e5ae20", "score": "0.5747883", "text": "def HideSelectedSegs(self):\n return _MaxPlus.BezierShape_HideSelectedSegs(self)", "title": "" }, { "docid": "ec45cd06970c37276301678bfb5e6aa7", "score": "0.571863", "text": "def reset_selected(self) -> None:\r\n for i, j in itertools.product(range(self.row), range(self.col)):\r\n self.squares[i][j].selected = False", "title": "" }, { "docid": "0ae26084f3d62d5f579906cdc4f17405", "score": "0.57094073", "text": "def UnselectHiddenVerts(self):\n return _MaxPlus.BezierShape_UnselectHiddenVerts(self)", "title": "" }, { "docid": "98936c9097302311519e505a3c6d2be3", "score": "0.5702747", "text": "def toggleSelected(self, point, layers, series, axes):\n # Do we have an empty set to start with?\n index = self.data_CCS.index[point]\n self.data_CCS.loc[index, 'Selected'] = not self.data_CCS.loc[index, 'Selected']\n if layers[series] is not None :\n layers[series].remove()\n layers[series] = self.plotSelectedOnTop(layers, series, axes)\n axes.figure.canvas.draw_idle()", "title": "" }, { "docid": "c9e2c1cc7d6d5925ca240991b87e0c91", "score": "0.57000417", "text": "def on_bar(self, bar: BarData):\n self.bg.update_bar(bar)\n self.bg_xsmall.update_bar(bar)", "title": "" }, { "docid": "7b5ece9d787aeafd7a715b9268197089", "score": "0.5681529", "text": "def deselect(self):\n self.cell_color = stg.colors[\"white\"]\n self.state = False\n self.focus = False", "title": "" }, { "docid": "8ef3045a769b272950f08d6037420b6c", "score": "0.56812847", "text": "def UnhideSegs(self):\n return _MaxPlus.BezierShape_UnhideSegs(self)", "title": "" }, { "docid": "e0bff93a6491836ffb23b90b4ff6203a", "score": "0.567959", "text": "def deselect(self, i):\n characters[i].photo = ImageTk.PhotoImage(characters[i].image.point(lambda p: p * DARKNESS_LEVEL))\n characters[i].icon.configure(image=characters[i].photo)\n characters[i].dark = True", "title": "" }, { "docid": "75f3fe5cfabef92111c9bf489e165326", "score": "0.56729466", "text": "def _update_axes(self):\n super()._update_axes()\n self.clear_marks()", "title": "" }, { "docid": "4b1df810f5d031dfcd7d5e4d4b0b323f", "score": "0.56434256", "text": "def reset_selection():\n for node in nuke.selectedNodes():\n node[\"selected\"].setValue(False)", "title": "" }, { "docid": "9b0731d4522c72f1ce21a7d9f0fd6286", "score": "0.56415325", "text": "def unselect_all(self):\n self.queue_draw_item(*self._selected_items)\n self._selected_items.clear()\n self.focused_item = None\n self.emit('selection-changed', self._selected_items)", "title": "" }, { "docid": "6013d3a3bf700129f170840ac2d8adcf", "score": "0.56387824", "text": "def unshow_value(self):\r\n self.chart.delete(self.mark)\r\n self.chart.delete(self.value)\r\n self.value = 0\r\n self.mark = 0", "title": "" }, { "docid": "073251240e37bdc545743b25c32cce63", "score": "0.5636258", "text": "def _setBarGraphItem(self):\n if self.y_waveform is None:\n return\n\n brushes = np.array([self.color] * len(self.y_waveform))\n if self.threshold_color is not None:\n if self.upper_threshold is not None:\n brushes[np.argwhere(self.y_waveform > self.upper_threshold)] = self.threshold_color\n if self.lower_threshold is not None:\n brushes[np.argwhere(self.y_waveform < self.lower_threshold)] = self.threshold_color\n\n if self.x_waveform is None:\n self.bar_graph_item.setOpts(x=np.arange(len(self.y_waveform)),\n height=self.y_waveform,\n brushes=brushes)\n return\n\n self.bar_graph_item.setOpts(x=self.x_waveform,\n height=self.y_waveform,\n brushes=brushes)", "title": "" }, { "docid": "13c2626f0b69cb88e7797a8d3dabd1d1", "score": "0.56362545", "text": "def _change_selection_vscroll(self, event):\n buttons = self.mne.fig_selection.mne.radio_ax.buttons\n labels = [label.get_text() for label in buttons.labels]\n offset = 0\n selections_dict = self.mne.ch_selections\n for idx, label in enumerate(labels):\n offset += len(selections_dict[label])\n if event.ydata < offset:\n with _events_off(buttons):\n buttons.set_active(idx)\n self.mne.fig_selection._radiopress(event)\n return", "title": "" }, { "docid": "d69ceee3a7b097f02842747670191483", "score": "0.5619699", "text": "def change_inactive(self, event):\n self._canvas.configure(bg=\"#4B2E49\")\n self._bind.configure(bg=\"#4B2E49\")\n self._tower_name.configure(bg=\"#4B2E49\")\n self._tower_price.configure(bg=\"#4B2E49\")", "title": "" }, { "docid": "d2a9bea6b7d59a9f4ce1c3691c591bb2", "score": "0.5597312", "text": "def remove_barrier(self): \n self.colour = WHITE\n self.animator = 8", "title": "" }, { "docid": "4d2769242318e107806343c3fa2f8cfc", "score": "0.5592031", "text": "def reset_selected(self):\n self.selected = []", "title": "" }, { "docid": "e114b08a2b8183bc23516053983ded1c", "score": "0.5489594", "text": "def selectAll(self):\n print \"selecting all!!\"\n self.plotFilter=True\n for item in self.antListItems:\n item.setSelected(True)\n self.plotFilter=False\n self.plot()", "title": "" }, { "docid": "ba7b475bb3313910c520365f8410cc84", "score": "0.54488504", "text": "def SetUseSoftSelections(self, *args):\n return _MaxPlus.BezierShape_SetUseSoftSelections(self, *args)", "title": "" }, { "docid": "fe38e92972cf0affe8b0bcdfb85d4c65", "score": "0.544682", "text": "def toggle_selected_visibility(self):\n for layer in self:\n if layer.selected:\n layer.visible = not layer.visible", "title": "" }, { "docid": "87b069abada6bdf03825158e3e0c62e7", "score": "0.543815", "text": "def _update_highlighted_sensors(self):\n inds = np.in1d(\n self.mne.fig_selection.lasso.ch_names, self.mne.ch_names[self.mne.picks]\n ).nonzero()[0]\n self.mne.fig_selection.lasso.select_many(inds)", "title": "" }, { "docid": "92d83bc328d616ab45965e02c7db56b1", "score": "0.54369575", "text": "def _hide_scalebars(self):\n for bar in self.mne.scalebars.values():\n bar.remove()\n for text in self.mne.scalebar_texts.values():\n text.remove()\n self.mne.scalebars = dict()\n self.mne.scalebar_texts = dict()", "title": "" }, { "docid": "d771e06b796cb9bb49aa824b8a972df6", "score": "0.54333687", "text": "def rectangular_selection(self):\n self.rectangular_selection_flag = not self.rectangular_selection_flag", "title": "" }, { "docid": "1a1c8a51b7c5ba64c6eadce32a683f52", "score": "0.5430333", "text": "def deselect(self):\n self.set_label(('reversed',self.display_label))", "title": "" }, { "docid": "ce84e4c0ababe2b5d14829bd1ba2bccf", "score": "0.54106873", "text": "def dec_selected(self):\n if self.selected > 0:\n self.selected -= 1\n if self.selected < self.screen_start:\n self.screen_start -= 1\n self.screen_end -= 1\n self.wipe_canvas()\n self.draw_text()", "title": "" }, { "docid": "d687a6070796af803397fc461fb5d033", "score": "0.54096085", "text": "def reset_crosshair(self):\n self.text_dictionary_list = []\n self.current_crosshair_cross_color = self.crosshair_cross_color_default\n self.show_crosshair()", "title": "" }, { "docid": "1cc470d2bc625c6446ee9d43ba562894", "score": "0.5402042", "text": "def on_bar(self, bar: BarData):\n self.bg.update_bar(bar)", "title": "" }, { "docid": "1cc470d2bc625c6446ee9d43ba562894", "score": "0.5402042", "text": "def on_bar(self, bar: BarData):\n self.bg.update_bar(bar)", "title": "" }, { "docid": "1cc470d2bc625c6446ee9d43ba562894", "score": "0.5402042", "text": "def on_bar(self, bar: BarData):\n self.bg.update_bar(bar)", "title": "" }, { "docid": "c268266d910776b50cee44ec360ef9dc", "score": "0.54007953", "text": "def toolbar_checked ( self, toolbar_item ):\r\n for item in self.items:\r\n if item is not toolbar_item:\r\n item.tool_bar.ToggleTool( item.control_id, False )\r\n item.item.action.checked = False", "title": "" }, { "docid": "c268266d910776b50cee44ec360ef9dc", "score": "0.54007953", "text": "def toolbar_checked ( self, toolbar_item ):\r\n for item in self.items:\r\n if item is not toolbar_item:\r\n item.tool_bar.ToggleTool( item.control_id, False )\r\n item.item.action.checked = False", "title": "" }, { "docid": "dcf8cba354895cd0091a5fe78975fbfb", "score": "0.53876555", "text": "def unselect_all(self, ignore=None):\n for layer in self:\n if layer.selected and layer != ignore:\n layer.selected = False", "title": "" }, { "docid": "dcf8cba354895cd0091a5fe78975fbfb", "score": "0.53876555", "text": "def unselect_all(self, ignore=None):\n for layer in self:\n if layer.selected and layer != ignore:\n layer.selected = False", "title": "" }, { "docid": "a84c68884c6be7eb1bb344b80eaaf864", "score": "0.5387134", "text": "def unselect(self):\n\t\tself._selected_elements = []", "title": "" }, { "docid": "228537dfcbbfefe055d8141e62d7e814", "score": "0.53815544", "text": "def changeParallelRebarType(self):\n self.parallel_rebars_type = (\n self.parallel_rebars_widget.parallel_rebars_type.currentText()\n )\n self.parallel_rebars_widget.parallel_l_shapeHookOrientation.hide()\n self.parallel_rebars_widget.parallel_rounding.hide()\n\n self.parallel_rebars_widget.parallel_l_shapeHookOrientationLabel.hide()\n self.parallel_rebars_widget.parallel_roundingLabel.hide()\n\n if self.parallel_rebars_type == \"StraightRebar\":\n pass\n elif self.parallel_rebars_type == \"LShapeRebar\":\n self.parallel_rebars_widget.parallel_l_shapeHookOrientation.show()\n self.parallel_rebars_widget.parallel_l_shapeHookOrientationLabel.show()\n self.parallel_rebars_widget.parallel_rounding.show()\n self.parallel_rebars_widget.parallel_roundingLabel.show()\n\n elif self.parallel_rebars_type == \"UShapeRebar\":\n self.parallel_rebars_widget.parallel_rounding.show()\n self.parallel_rebars_widget.parallel_roundingLabel.show()", "title": "" }, { "docid": "e6c58a870affc8cc0a329292bcb16305", "score": "0.5378483", "text": "def remove_unselected(self):\n clusters = {}\n for representative in self.selected:\n clusters[representative] = self.clusters[representative]\n self.clusters_reset(clusters)\n self.history.append('remove_unselected()')\n self.simple_history_store()\n self.remove_unselected_action()\n self.remselect_handler.fire(True)", "title": "" }, { "docid": "8cd098bc3f5c993014fd7508098315e1", "score": "0.53540146", "text": "def selection_sort(self):\r\n for i in range(len(self.data)):\r\n min_idx = i\r\n for j in range(i + 1, len(self.data)):\r\n if self.data[min_idx] > self.data[j]:\r\n min_idx = j\r\n\r\n self.data[i], self.data[min_idx] = self.data[min_idx], self.data[i]\r\n self.colors = ['red' if n == i or n == i + 1 else 'green' for n in range(len(self.data))]\r\n self.bars.drawData(self.colors)\r\n update(self.speed)\r\n\r\n self.colors = ['white' for n in range(len(self.data))]\r\n self.bars.drawData(self.colors)", "title": "" }, { "docid": "aa111a75ebebe96d51b5df5ae4e31e2b", "score": "0.53493565", "text": "def toggle_select(self):\n if self.selected:\n self.deselect()\n else:\n self.select()", "title": "" }, { "docid": "fd3576a6507e4ba4a992b6b7fd09bf6d", "score": "0.5347151", "text": "def __toggle_select(self, id: int) -> None:\n tags = self.__canvas.gettags(id)\n if 'selected' in tags:\n self.__canvas.dtag(id, 'selected')\n self.__canvas.itemconfigure(id, outline=NORMAL_OUTLINE)\n else:\n self.__canvas.itemconfigure(id, tags=['selected'],\n outline=SELECT_OUTLINE)", "title": "" }, { "docid": "9cf5bf8f1db2dec855510e37675b679c", "score": "0.533518", "text": "def _clear_selection( self, box ):\r\n for i in box.GetSelections():\r\n box.SetSelection( i, False )", "title": "" }, { "docid": "e8d761f12dc14cff3df954bd8e0ab7aa", "score": "0.53266144", "text": "def _selected_changed(self, selected):\r\n for page, ui, _, _ in self._uis:\r\n if ui.info and selected is ui.info.object:\r\n self.control.setCurrentWidget(page)\r\n break\r\n deletable = self.factory.deletable\r\n deletable_trait = self.factory.deletable_trait\r\n if deletable and deletable_trait:\r\n enabled = xgetattr(selected, deletable_trait, True)\r\n self.close_button.setEnabled(enabled)", "title": "" }, { "docid": "1b6840aafc3f8b9e686d33471c9c46b2", "score": "0.5326535", "text": "def setShowUnitlessCurves(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "e62cda14516b840ae680c199a5aae810", "score": "0.53224605", "text": "def all_off(self):\n for v in self.values():\n v.off()", "title": "" }, { "docid": "bd0d75f0f4b0a1ccf0003105bdc5edaa", "score": "0.53195703", "text": "def Unhighlight(self):\r\n pass", "title": "" }, { "docid": "4f79d387a64e9c334cccba003a6ab2f1", "score": "0.5318318", "text": "def _style_radio_buttons_butterfly(self):\n # Show all radio buttons as selected when in butterfly mode\n parent = self.mne.parent_fig\n buttons = self.mne.radio_ax.buttons\n color = buttons.activecolor if parent.mne.butterfly else parent.mne.bgcolor\n if _OLD_BUTTONS:\n for circle in buttons.circles:\n circle.set_facecolor(color)\n # when leaving butterfly mode, make most-recently-used selection active\n if not parent.mne.butterfly:\n with _events_off(buttons):\n buttons.set_active(self.mne.old_selection)\n # update the sensors too\n parent._update_highlighted_sensors()", "title": "" }, { "docid": "2a0bd3e7cb8a88a29b8193ddee51e163", "score": "0.53180015", "text": "def _OnAxis( self, ds_name, axis_name, ev ):\n ev.Skip()\n\n if ev.IsChecked():\n other_axis = 'top' if axis_name == 'bottom' else 'bottom'\n\n other_radio = self.fDataSetControls[ ds_name ][ other_axis ]\n if other_radio.GetValue():\n other_radio.SetValue( False )\n\n for k in self.fDataSetControls:\n if k != ds_name:\n\t controls = self.fDataSetControls[ k ]\n\t if controls[ axis_name ].GetValue():\n\t controls[ axis_name ].SetValue( False )\n #end for\n #end if\n\n self._UpdateValue()", "title": "" }, { "docid": "d61f9a63bcc061ad076492075b571c68", "score": "0.5315275", "text": "def clearSelectedText(self):\n \n for coordPair in self.coordsToCheck:\n item = self.tableWidget.item(coordPair[0], coordPair[1])\n if item.background() == QtGui.QColor(0,204,204):\n item.setBackground(QtGui.QColor('lightGreen'))\n else:\n item.setBackground(QtGui.QColor('white'))\n \n self.coordsToCheck = [] #selected letters removed from coordsToCheck", "title": "" }, { "docid": "8d7aafbc168c8bc801475b5de66984c5", "score": "0.5310579", "text": "def deactivate(self):\n self._active = False\n self._obj.setBorderColor('black')", "title": "" }, { "docid": "bb0fa32673f1686a3784261f939a36c8", "score": "0.5308559", "text": "def HideSelectedVerts(self):\n return _MaxPlus.BezierShape_HideSelectedVerts(self)", "title": "" }, { "docid": "1c6f61d699bd8dd363831cc1af49641b", "score": "0.53018725", "text": "def on_bar(self, bar: BarData):\n self.bg_xmin.update_bar(bar)\n self.cancel_all()\n\n self.exit_shour,self.exit_long = self.am.donchian(self.dc_length * 60)\n\n if self.pos == 0:\n self.intra_trade_high = bar.high_price\n self.intra_trade_low = bar.low_price\n\n if self.cci_value > self.cci_stop:\n self.buy(self.up, self.fixed_size,True)\n\n elif self.cci_value < -self.cci_stop:\n self.short(self.down, self.fixed_size,True)\n\n elif self.pos > 0:\n self.intra_trade_high = max(self.intra_trade_high,bar.high_price)\n self.intra_trade_low = bar.low_price\n\n self.long_out = self.intra_trade_high * (\n 1 - self.trailing_tax / 100\n )\n\n self.long_stop = max(self.long_entry,self.long_out,self.exit_long)\n self.sell(self.long_stop, abs(self.pos), True)\n\n elif self.pos < 0:\n self.intra_trade_high = bar.high_price\n self.intra_trade_low = min(self.intra_trade_low,bar.low_price)\n\n self.short_out = self.intra_trade_low * (\n 1 + self.trailing_tax / 100\n )\n\n self.short_stop = min(self.short_entry,self.short_out,self.exit_shour)\n self.cover(self.short_stop, abs(self.pos), True)\n\n self.put_event()", "title": "" }, { "docid": "83aabf261dd3fd2109cf49b8e49eefda", "score": "0.5290698", "text": "def toggleselect(self,objectlist):\r\n for i in objectlist[0]:\r\n if i.IsSelected():\r\n i.ClearSelected()\r\n else:\r\n i.SetSelected()", "title": "" }, { "docid": "5b27c8b7b0466b3355436dac47d95d12", "score": "0.52891797", "text": "def crossAmountRadioClicked(self):\n self.cross_rebars_widget.cross_spacing.setEnabled(False)\n self.cross_rebars_widget.cross_amount.setEnabled(True)", "title": "" }, { "docid": "300783aa22732592a5fe524111af831a", "score": "0.5279168", "text": "def reset(self, reset_tranparency = False):\n for f in self.filters.values():\n f.setChecked(False)\n if reset_tranparency:\n self.transparency.setChecked(False)", "title": "" }, { "docid": "294cc218b6a1a0054d29d2eb6a247bc9", "score": "0.5276453", "text": "def reset(self):\n self.side_bar_arm.reset()", "title": "" }, { "docid": "fbfb4309d6e6c7c02a167074c53a00e6", "score": "0.5270375", "text": "def DeleteSelSegs(self, *args):\n return _MaxPlus.BezierShape_DeleteSelSegs(self, *args)", "title": "" }, { "docid": "4395f09e96d4eb1c226834dd8f6d2556", "score": "0.5265872", "text": "def on_bar(self, bopen, bhigh, blow, bclose, time=None):\r\n pass", "title": "" }, { "docid": "3ad8fce40ab4086cd2d905e966ac7f1f", "score": "0.52474487", "text": "def on_bar(self, bar: BarData):\n self.bg.update_bar(bar)\n\n if hasattr(self,'kc'):\n self.process_pos(bar)", "title": "" }, { "docid": "56a82a67bad9a376858a0a1fa5802466", "score": "0.5241514", "text": "def _on_selection_set_changed(self, sender, selection_set_id, selection_diff):\n x_axis_key, y_axis_key = self.horizontal_axis_data.keys()[0], self.vertical_axis_data.keys()[0]\n markings = self.horizontal_axis_views[x_axis_key].markings\n markings.update(self.vertical_axis_views[y_axis_key].markings)\n\n for key, marking in markings.iteritems():\n if len(self._selection_data_holder.check_if_selection_in_set([[key]])) != 0:\n marking.variant = \"bold\"\n else:\n marking.variant = \"Regular\"", "title": "" }, { "docid": "df43ff21a83e99ef097d1254abdd552c", "score": "0.5238999", "text": "def toggle_on(self):\n self.config(highlightbackground=self.selected_color)\n self.select_button.config(fg=self.selected_color)\n if self.sim_num > 0:\n self.remove_button.pack(side=LEFT) # Show option to remove this program if it's not the main simulation", "title": "" }, { "docid": "8b06c5fe8731123983fcc7fb94ca24df", "score": "0.5228201", "text": "def after_all(context):\n set_highlight(False)", "title": "" }, { "docid": "29366be906b8ffef62ac83b74dfe60ca", "score": "0.5215062", "text": "def unhovered(self):\r\n\t\tpass", "title": "" }, { "docid": "b22e7fdca82cf88461390b337a19a6c4", "score": "0.52127564", "text": "def select_deselect_object_by_call(self, color='w') :\n self.swap_select_deselect_status() # Swap the isSelected between True and False\n self.set_select_deselect_color(color) # Set the object color depending on isSelected\n self.figure.canvas.draw() # Draw canvas with all current objects on it", "title": "" }, { "docid": "aee02d29323c9ef24816908a3ea0492b", "score": "0.5210572", "text": "def crossSpacingRadioClicked(self):\n self.cross_rebars_widget.cross_spacing.setEnabled(True)\n self.cross_rebars_widget.cross_amount.setEnabled(False)", "title": "" }, { "docid": "aa6f6aca603023d9e88910a1451c0bc2", "score": "0.5207316", "text": "def update_bars(self):\r\n raise NotImplementedError(\"Should implement update_bars()\")", "title": "" }, { "docid": "2ddd131f204002e7c3009c790ed473d5", "score": "0.52058923", "text": "def unsetRoiClicked(self):\n if self.quadFold is not None:\n self.fixedRoi.setEnabled(False)\n self.fixedRoiChkBx.setChecked(False)\n if 'fixed_roi_rad' in self.quadFold.info:\n del self.quadFold.info['fixed_roi_rad']\n if 'roi_rad' in self.quadFold.info:\n del self.quadFold.info['roi_rad']\n self.result_zoom = None\n self.zoomOutClicked = True\n self.default_result_img_zoom = None\n self.processImage()", "title": "" }, { "docid": "69d0bc87fe6f17719c4dbe960a34dbec", "score": "0.52001345", "text": "def setCallback(self, *args, **keys):\n\n #g.trace(self.tree.redrawCount,args,g.callers())\n self.canvas.leo_treeBar.set(*args, **keys)\n\n if self.tree.allocateOnlyVisibleNodes:\n self.tree.setVisibleArea(args)", "title": "" }, { "docid": "3bc772291934bfe92f95d9fe41e130f9", "score": "0.5200052", "text": "def changeYDirRebarsType(self):\n self.ydir_rebars_type = (\n self.sec_ydir_rebars_widget.ydir_rebars_type.currentText()\n )\n if self.ydir_rebars_type == \"LShapeRebar\":\n self.sec_ydir_rebars_widget.ydir_rebars_hookOrientation.setEnabled(\n True\n )\n self.sec_ydir_rebars_widget.ydir_rebars_hookExtension.setEnabled(\n True\n )\n self.sec_ydir_rebars_widget.ydir_rebars_rounding.setEnabled(True)\n else:\n self.sec_ydir_rebars_widget.ydir_rebars_hookOrientation.setEnabled(\n False\n )\n self.sec_ydir_rebars_widget.ydir_rebars_hookExtension.setEnabled(\n False\n )\n self.sec_ydir_rebars_widget.ydir_rebars_rounding.setEnabled(False)", "title": "" }, { "docid": "4eed0c97aca30eaf9efcef713c9306cb", "score": "0.5199876", "text": "def setActiveShadingGraph(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "e095f979d8435eda623ec2ab324e6c36", "score": "0.5194979", "text": "def deactivate(self):\n\n self._is_active = False\n # self.unhighlight()\n\n # reset lane visibility\n if self._is_lane_visible_before is not None:\n lanedraws = self.get_drawobj_by_ident('lanedraws')\n if lanedraws:\n lanedraws.set_visible(self._is_lane_visible_before)\n self._canvas.draw()\n\n self.deactivate_select()", "title": "" }, { "docid": "f9923cae4f9b45abf378052efaa5aa42", "score": "0.519212", "text": "def box_off(ax):\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ax.tick_params(axis='x', direction='out')\n ax.tick_params(axis='y', direction='out')\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')", "title": "" }, { "docid": "6f576a7f8566f4c1ed3d9e8765dcb935", "score": "0.5189551", "text": "def allOff(self):\n for light in self.lightDict.values():\n self.setOff(light)", "title": "" }, { "docid": "7c23ede951bafea07055aa1632ff24e1", "score": "0.5188617", "text": "def do_hide_cyclic_slicers(self):\n # manage slicers by some keys!\n target_name = str(self.ui.lineEdit_target1.text())\n if target_name == '':\n return\n\n self._myParent.remove_slicers_highlights(target_name)\n\n return", "title": "" }, { "docid": "95d2025865c0a7edf4f4e4c0e7c5e490", "score": "0.5186471", "text": "def UpdateSels(self, save=True):\n return _MaxPlus.BezierShape_UpdateSels(self, save)", "title": "" }, { "docid": "857aee0aa275862010a58b4a532052af", "score": "0.51852787", "text": "def setPlotStyle(self):\n if self.mainWidget.plotSingle.isChecked():\n print 'Single plot mode!'\n curr_selected_ant = self.mainWidget.antennaList.selectedItems()\n curr_selected_fpga= self.mainWidget.fpgaList.selectedItems()\n self.mainWidget.antennaList.setSelectionMode(gui.QAbstractItemView.SingleSelection)\n self.mainWidget.fpgaList.setSelectionMode(gui.QAbstractItemView.SingleSelection)\n self.subplots=False\n # If more than one selection was made before entering single selection mode,\n # then highlight the first entry of the current selection\n if len(curr_selected_ant) > 1: \n self.mainWidget.antennaList.setCurrentItem(curr_selected_ant[0])\n if len(curr_selected_fpga) > 1:\n self.mainWidget.fpgaList.setCurrentItem(curr_selected_fpga[0])\n else:\n print 'Multiple plot mode!'\n self.mainWidget.antennaList.setSelectionMode(gui.QAbstractItemView.MultiSelection)\n self.mainWidget.fpgaList.setSelectionMode(gui.QAbstractItemView.MultiSelection)\n if self.mainWidget.plotMultiple.isChecked():\n print 'No subplots!'\n self.subplots=False\n else:\n print 'Subplots!'\n self.subplots=True\n # plot with the new settings\n self.plot()", "title": "" }, { "docid": "b5e89bb2120bf45f7bedc3a837d81d3a", "score": "0.51784855", "text": "def setShowAnimCurvesOnly(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "76b4878c6061f6b61e6ae1551a9c89ef", "score": "0.5175352", "text": "def _set_custom_selection(self):\n chs = self.lasso.selection\n parent = self.mne.parent_fig\n buttons = self.mne.radio_ax.buttons\n if not len(chs):\n return\n labels = [label.get_text() for label in buttons.labels]\n inds = np.in1d(parent.mne.ch_names, chs)\n parent.mne.ch_selections[\"Custom\"] = inds.nonzero()[0]\n buttons.set_active(labels.index(\"Custom\"))", "title": "" }, { "docid": "a15ced98fac38e272abeb5f9cf601d01", "score": "0.5173961", "text": "def update_bars(self):\n raise NotImplementedError(\"Should implement update_bars()\")", "title": "" }, { "docid": "a15ced98fac38e272abeb5f9cf601d01", "score": "0.5173961", "text": "def update_bars(self):\n raise NotImplementedError(\"Should implement update_bars()\")", "title": "" }, { "docid": "c9bfa23c754c8f2d22de48bf7f3a3a30", "score": "0.51666516", "text": "def update_bars(self):\n \n raise NotImplementedError(\"Should implement update_bars()\")", "title": "" }, { "docid": "28c3ec781de65d09a901289afdb98f6a", "score": "0.5165645", "text": "def hide_axes_all(self):\n for renderer in self.renderers:\n renderer.hide_axes()", "title": "" }, { "docid": "819a288d40720994b4c88d28e944fa10", "score": "0.516192", "text": "def on_bar(self, bar: BarData):\n pass", "title": "" }, { "docid": "255e50154d5fa0a059e5f637f16b5e8b", "score": "0.51541525", "text": "def off(self):\n self.light.high()", "title": "" }, { "docid": "de2f44a38ef1603282bccd785e281560", "score": "0.5151811", "text": "def UnselectHiddenSplines(self):\n return _MaxPlus.BezierShape_UnselectHiddenSplines(self)", "title": "" } ]
766ceaba5bfcfc6970b6b5c9006dc0f4
Return update operation string with random values x y z w UPDATE x y z w
[ { "docid": "ecb0c9b2dd34d88ab35ca778d1f9e9ca", "score": "0.6559078", "text": "def random_update_string(edgesize):\n x, y, z = np.random.randint(edgesize, size=3) + np.array([1, 1, 1])\n w = int(np.random.uniform(-10e9, 10e9))\n return f'UPDATE {x} {y} {z} {w}'", "title": "" } ]
[ { "docid": "b756e050907f15b4c563749e003c97d4", "score": "0.62117904", "text": "def update():\n\n return 0, \"\"", "title": "" }, { "docid": "2668478b44a072326982505a1a41252f", "score": "0.60683626", "text": "def test_10_update_simple(self):\n\n User = \"User.update(\\\"38f22813-2753-4d42-b37c-57a17f1e4f88\\\", \" + \\\n \"\\\"first_name\\\", \\\"Erika\\\")\"\n Place = \"Place.update(\\\"246c227a-d5c1-403d-9bc7-6a47bb9f0f68\\\", \" + \\\n \"\\\"first_name\\\", \\\"Osorio\\\")\"\n State = \"State.update(\\\"5fb793e6-9c5a-4063-9c60-2f3f5a061d95\\\", \" + \\\n \"\\\"first_name\\\", \\\"Edison\\\")\"\n City = \"City.update(\\\"7a6d7852-9368-4138-889b-8b3086a51885\\\", \" + \\\n \"\\\"first_name\\\", \\\"Esteban\\\")\"\n Amenity = \"Amenity.update(\\\"fce4d085-ebc6-4472-ae0e-82e2a0db\\\", \" + \\\n \"\\\"first_name\\\", \\\"Isaza\\\")\"\n Reivew = \"Reivew .update(\\\"7aeaff10-96dd-4754-b429-8e0f7f645e47\\\",\" + \\\n \"\\\"first_name\\\", \\\"John\\\")\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(Place)\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(State)\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(City)\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(Amenity)\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(Reivew)\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(User)", "title": "" }, { "docid": "3e08fa5b97bcf140d9bfb8ff2d6ab83e", "score": "0.5943084", "text": "def build_update(\n self, style: ParamStyleType = DEFAULT_PARAM_STYLE\n ) -> Tuple[str, ArgList]:\n updates = []\n args = ArgList()\n for field in self.fields:\n stmt, args = Filter(field, prefix=\"col\").to_sql(args, style)\n updates.append(stmt)\n updates = \",\\n \".join(updates)\n return f\"UPDATE\\n {self.table_name}\\nSET\\n {updates}\", args", "title": "" }, { "docid": "e1f6dac47d0359b08c1a174adc9dfe33", "score": "0.5905035", "text": "def update(self) -> str:\n # Block template\n tlp_block = Template(\"\"\"\n for(unsigned int i = 0; i< this->post->size; i++){\n for(unsigned int j = 0; i< this->pre->size; i++){\n$update\n }\n }\"\"\")\n\n # Equation template\n tpl_eq = Template(\"\"\"\n // $hr\n $lhs $op $rhs;\n \"\"\")\n\n # Iterate over all blocks of equations\n code = \"\"\n for block in self.parser.update_equations:\n for eq in block.equations:\n\n # Temporary variables\n if eq['type'] == 'tmp':\n code += tpl_eq.substitute(\n lhs = \"double \" + eq['name'],\n op = eq['op'],\n rhs = parser.code_generation(eq['rhs'], self.correspondences),\n hr = eq['human-readable']\n )\n else:\n code += tpl_eq.substitute(\n lhs = \"this->\"+eq['name'] if eq['name'] in self.parser.shared \n else \"this->\"+eq['name'] + \"[i][j]\",\n op = eq['op'],\n rhs = parser.code_generation(eq['rhs'], self.correspondences),\n hr = eq['human-readable']\n )\n\n return tlp_block.substitute(update=code)", "title": "" }, { "docid": "c65b49b2d3fbd828b5a4339ce91de68b", "score": "0.577425", "text": "def for_update_clause(self, select):\r\n return ''", "title": "" }, { "docid": "2b63135918dde00f4bf19df5a21c549b", "score": "0.5720273", "text": "def update( ):", "title": "" }, { "docid": "9fd52aaf9bb796e5f2bfc36d8b4aa5d2", "score": "0.56519884", "text": "def build_update_value_sql(self, column_name):\n clause = (\n '(%(column_name)s | %%(update_%(column_name)s_add)s) & '\n '~%%(update_%(column_name)s_remove)s') % dict(\n column_name=column_name)\n bind_vars = {\n 'update_%s_add' % column_name: self.flags_to_add,\n 'update_%s_remove' % column_name: self.flags_to_remove}\n return clause, bind_vars", "title": "" }, { "docid": "bad2581dee43043dfc82e8d8ae588a65", "score": "0.54874057", "text": "def test_table_rendering(self):\n us = UpdateStatement('table')\n self.assertTrue(six.text_type(us).startswith('UPDATE table SET'), six.text_type(us))\n self.assertTrue(str(us).startswith('UPDATE table SET'), str(us))", "title": "" }, { "docid": "2d44e586acc21aa683bd3d55a1136ea6", "score": "0.5463413", "text": "def test_squareupdate(self):\n s103 = Square(5)\n self.assertEqual(\n s103.__str__(), \"[Square] ({}) 0/0 - 5\".format(s103.id)) # id 18\n s103.update(103)\n self.assertEqual(\n s103.__str__(), \"[Square] ({}) 0/0 - 5\".format(s103.id))\n s103.update(103, 2)\n self.assertEqual(\n s103.__str__(), \"[Square] ({}) 0/0 - 2\".format(s103.id))\n s103.update(103, 2, 3)\n self.assertEqual(\n s103.__str__(), \"[Square] ({}) 3/0 - 2\".format(s103.id))\n s103.update(103, 2, 3, 4)\n self.assertEqual(\n s103.__str__(), \"[Square] ({}) 3/4 - 2\".format(s103.id))\n s103.update(y=1)\n self.assertEqual(\n s103.__str__(), \"[Square] (103) 3/1 - 2\".format(s103.id))\n s103.update(size=7, id=4000, y=1)\n self.assertEqual(\n s103.__str__(), \"[Square] ({}) 3/1 - 7\".format(s103.id))", "title": "" }, { "docid": "07e970d206b1518e4d307cc25073789f", "score": "0.5433119", "text": "def update():", "title": "" }, { "docid": "8b73ac0ab5977b58068f4c52ca3c5481", "score": "0.5401433", "text": "def update_command() -> None:", "title": "" }, { "docid": "3726d8c00b40638cbeca91179a06b3f2", "score": "0.5399929", "text": "def _make_update_command( bugnumber, param_dic ):\n param_set, check_param, check_value = '', '', ''\n for param in param_dic.keys():\n if re.search( r'(bugnumber|addcomment|comment)', param ) : continue \n \n value = param_dic[param]\n\n if isinstance( value, list ) or isinstance( value, tuple ): \n value = ','.join( value )\n\n if isinstance( value, basestring ):\n value = value.strip(r', ')\n value = re.sub( r',+', ',', value.replace(' ','') )\n value = General.unique( value.split(',') )\n value = ','.join( value )\n\n if not check_param: # -- parameter and value to use in the check\n check_param = param\n check_value = value\n\n param_set = param_set + ' ' + param + \"='\" + str(value) + \"'\"\n\n user = os.getlogin()\n\n try : comment = param_dic['comment']\n except KeyError: comment = 'BATCH PROCESSED'\n\n base_set = \"bugnumber=\"+str(bugnumber) + \" user=\" + user + \" mode=modify ignoreunset=1 addcomment='\" + comment + \"' end=print:\" + check_param\n\n command = '/bugs/modify ' + base_set + ' ' + param_set\n\n return command, check_param, check_value", "title": "" }, { "docid": "1b548d719e3a6601b0459f223e8c25b0", "score": "0.5380551", "text": "def stringify(cls, table, where, values):\n return \"UPDATE `\" + table + \"` SET \" + string_helper.dict_to_sql_query(values) + \" WHERE \" + ' AND '.join(\n string_helper.pairs_to_strings(where)) + ';'", "title": "" }, { "docid": "b1921b4cbde506f11abb3ca3e995ac09", "score": "0.5379543", "text": "def _get_update_sql(self, update_fields: List[str]) -> str:\n key = \",\".join(update_fields)\n if key in self.update_cache:\n return self.update_cache[key]\n\n table = self.model._meta.table()\n query = self.db.query_class.update(table)\n count = 0\n\n for field_name in update_fields:\n field_object = self.model._meta.fields_map[field_name]\n if not field_object.primary_key:\n query = query.set(field_object.db_column, self.parameter(count))\n count += 1\n\n query = query.where(table[self.model._meta.pk_db_column] == self.parameter(count))\n\n sql = self.update_cache[key] = query.get_sql()\n return sql", "title": "" }, { "docid": "0ea5ca402d37b4820ab1432f819f6e28", "score": "0.53761333", "text": "def get_update_query(self, row):\n\n columns = self.get_query_columns()\n values = self.get_query_values(row)\n query = \"UPDATE \" + self.table\n i = 0\n query += self.get_update_query_line(row) + \";\"\n return query", "title": "" }, { "docid": "9ee0fbb5f6c945c1a18aacd828b209d1", "score": "0.529451", "text": "def test_update_and_query(self):\n m = Matrix(3)\n for x in range(1, 4):\n for y in range(1, 4):\n for z in range(1, 4):\n # Check support of negative values.\n value = -((x - 1) * 3 * 3 + (y - 1) * 3 + (z - 1) + 1)\n # Check that the query format has some degree of tolerance.\n query = ' update {} {} {} {} '.format(x, y, z, value)\n self.assertEqual(m.execute(query), ('SUCCESS', None))\n self.assertEqual(m.execute(' query 3 3 3 3 3 3 '), ('SUCCESS', -27))\n self.assertEqual(m.execute('QUERY 1 1 1 3 3 3'), ('SUCCESS', -378))", "title": "" }, { "docid": "26adad678e40065487b2d151837e36c6", "score": "0.52926016", "text": "def test_update(self):\n oper = yield self.getOperator()\n pool = yield self.getPool()\n\n obj = Empty()\n yield pool.runInteraction(oper.insert, obj)\n\n obj.name = 'new name'\n obj.uni = u'unicycle'\n obj.date = date(2000, 1, 1)\n yield pool.runInteraction(oper.update, obj)\n\n obj2 = Empty()\n obj2.id = obj.id\n yield pool.runInteraction(oper.refresh, obj2)\n self.assertEqual(obj2.name, 'new name')\n self.assertEqual(obj2.uni, u'unicycle')\n self.assertEqual(obj2.date, date(2000, 1, 1))", "title": "" }, { "docid": "d2744a7c0c40d1ceeca43ce2f71c545a", "score": "0.5287537", "text": "def build_codes_string(self, update_id):\n model_string = self.build_model_str()\n return model_string+\"_codes/\"+update_id", "title": "" }, { "docid": "42e1433ce749544a15aa95c21da63a5f", "score": "0.5279717", "text": "def test_update(self):\n square = Square(5)\n square.update()\n square.update(1, 2, y=3)\n square.update(1, 12, id=4)\n square.update(size=2, y=5)", "title": "" }, { "docid": "8bd6c616c728cee1ed5d80704167ef65", "score": "0.5240214", "text": "def update(self):\n f = [ ]\n #variable declarations\n for var in self.variables:\n domainList = \",\".join([str(i) for i in var.domain])\n f.append( \"variable %s in { %s } \\n\" % (var.name, domainList) )\n #target value declaration\n f.append( \"target = %s \\n\" % str(self.Params.Target) )\n #constraint declarations\n for con in self.constraints:\n f.append(str(con))\n return \" \".join(f)", "title": "" }, { "docid": "4004aea5f2b4d395867ce5076805f061", "score": "0.52381516", "text": "def test_update_invalid_params(self):\n m = Matrix(3)\n self.assertEqual(m.execute('UPDATE'), ('ERROR', None))\n self.assertEqual(m.execute('UPDATE 1'), ('ERROR', None))\n self.assertEqual(m.execute('UPDATE a'), ('ERROR', None))\n self.assertEqual(m.execute('UPDATE 1 1 1 127'), ('ERROR', None))\n self.assertEqual(m.execute('UPDATE 1 1 1 -127'), ('ERROR', None))\n self.assertEqual(m.execute('UPDATE 0 1 1 -127'), ('ERROR', None))\n self.assertEqual(m.execute('UPDATE 4 1 1 -127'), ('ERROR', None))", "title": "" }, { "docid": "da0b0ec15d47e38ce42739a9ff63304b", "score": "0.52327245", "text": "def help_update(self):\n print(\n \"\"\"Updates an instance based on the class name and id by adding or\n updating attribute (save the change into the JSON file).\n Ex: $ update BaseModel 1234-1234-1234\n email \"[email protected]\"\"\")", "title": "" }, { "docid": "7a192b93900b4667cb070551775b4236", "score": "0.5228067", "text": "def test_update_expression(self):\n engine = yield self.engine()\n crud = Crud(Readset(families), Sanitizer(families))\n yield crud.create(engine, {'surname': 'Jones', 'location': 'anvilania'})\n yield crud.create(engine, {'surname': 'James', 'location': 'gotham'})\n\n fams = yield crud.update(engine, {'location': 'middle earth'},\n families.c.surname == 'James')\n self.assertEqual(len(fams), 1)\n\n fams = yield crud.fetch(engine, families.c.surname == u'Jones')\n self.assertEqual(fams[0]['location'], 'anvilania')\n\n fams = yield crud.fetch(engine, families.c.surname == u'James')\n self.assertEqual(fams[0]['location'], 'middle earth')", "title": "" }, { "docid": "7f57172ef333aac84ff21496996feb03", "score": "0.5226005", "text": "def update_fields_sql(row):\n # NB: If we ever read column names from the card (aka unsafe user input),\n # be sure to convert colnames to ''.join(c for c in colname if c.isalnum())\n return (\"han=?,pinyin=?,english=?,pack_name=?,errors=?\",\n (row['han'], row['pinyin'], row['english'], row['pack_name'], row['errors']))", "title": "" }, { "docid": "6336ae88c596954bd64525315c49c4db", "score": "0.52183926", "text": "def update(self, flags=''):\n return (0,'')", "title": "" }, { "docid": "e1d55c29645621feb4d81d3609f3520b", "score": "0.520957", "text": "def test_23_update_method_kwargs(self):\n\n rect = Rectangle(1, 1, 1, 1, 30)\n str_test = \"[Rectangle] (30) 1/1 - 1/1\"\n str_org = rect.__str__()\n self.assertEqual(str_org, str_test)\n\n \"\"\"test update 1 *arg, 1 **kwarg\"\"\"\n rect.update(31, height=2)\n str_test = \"[Rectangle] (31) 1/1 - 1/1\"\n str_org = rect.__str__()\n self.assertEqual(str_org, str_test)\n\n \"\"\"test update 5 **kwarg\"\"\"\n rect.update(height=2, id=32, width=3, y=4, x=5)\n str_test = \"[Rectangle] (32) 5/4 - 3/2\"\n str_org = rect.__str__()\n self.assertEqual(str_org, str_test)", "title": "" }, { "docid": "56f2df1d1e6347c467c5eb77faf0a320", "score": "0.5186159", "text": "def test_t_update_args(self):\n r = Rectangle(1, 1, 0, 0, 1)\n self.assertEqual(str(r), \"[Rectangle] (1) 0/0 - 1/1\")\n r.update(89)\n self.assertEqual(str(r), \"[Rectangle] (89) 0/0 - 1/1\")\n r.update(89, 2)\n self.assertEqual(str(r), \"[Rectangle] (89) 0/0 - 2/1\")\n r.update(89, 2, 3)\n self.assertEqual(str(r), \"[Rectangle] (89) 0/0 - 2/3\")\n r.update(89, 2, 3, 4)\n self.assertEqual(str(r), \"[Rectangle] (89) 4/0 - 2/3\")\n r.update(89, 2, 3, 4, 5)\n self.assertEqual(str(r), \"[Rectangle] (89) 4/5 - 2/3\")", "title": "" }, { "docid": "cd86b56263f7126b8e9bd424dcac6ba1", "score": "0.5183953", "text": "def test_Update(self):\n\n s = Square(1, 2, 3)\n arguments = (\n ('id', 'id'), ('size', 20), ('x', 30), ('y', 40), ('extra', 0)\n )\n d = s.to_dictionary()\n for i in range(len(arguments)):\n args = arguments[:i + 1]\n if i < len(arguments) - 1:\n d.update(args)\n with self.subTest():\n s.update(*(val for _, val in args))\n self.assertEqual(s.to_dictionary(), d)\n s.update('new', width=5)\n d['id'] = 'new'\n with self.subTest():\n self.assertEqual(s.to_dictionary(), d)\n s.update('new', 1, 2, 3, 4)\n d = s.to_dictionary()\n for i in range(len(arguments)):\n args = arguments[:i + 1]\n if i < len(arguments) - 1:\n d.update(args)\n with self.subTest():\n s.update(**dict(args))\n self.assertEqual(s.to_dictionary(), d)", "title": "" }, { "docid": "6b57b4b78e589f9ad246c02f49bade18", "score": "0.51742065", "text": "def test_update(self):\n r1 = Rectangle(10, 10, 10, 10)\n\n r1.update(89)\n self.assertEqual(str(r1), '[Rectangle] (89) 10/10 - 10/10')\n\n r1.update(89, 2)\n self.assertEqual(str(r1), '[Rectangle] (89) 10/10 - 2/10')\n\n r1.update(89, 2, 3, 4, 5)\n self.assertEqual(str(r1), '[Rectangle] (89) 4/5 - 2/3')", "title": "" }, { "docid": "0a9892031bf29a4cd23309dcd92a2739", "score": "0.51637274", "text": "def test_update(self):\n ins1 = Rectangle(5, 5)\n ins1.update(7, 2)\n self.assertEqual(ins1.__str__(), '[Rectangle] (7) 0/0 - 2/5')", "title": "" }, { "docid": "46907bba197e8a35c0e16ad0144d2eb8", "score": "0.5142938", "text": "def visit_update(self, update_stmt, **kw):\n\n # [10] CrateDB patch.\n if not update_stmt.parameters and \\\n not hasattr(update_stmt, '_crate_specific'):\n return super().visit_update(update_stmt, **kw)\n\n self.isupdate = True\n\n extra_froms = update_stmt._extra_froms\n\n text = 'UPDATE '\n\n if update_stmt._prefixes:\n text += self._generate_prefixes(update_stmt,\n update_stmt._prefixes, **kw)\n\n table_text = self.update_tables_clause(update_stmt, update_stmt.table,\n extra_froms, **kw)\n\n dialect_hints = None\n if update_stmt._hints:\n dialect_hints, table_text = self._setup_crud_hints(\n update_stmt, table_text\n )\n\n # [10] CrateDB patch.\n crud_params = _get_crud_params(self, update_stmt, **kw)\n\n text += table_text\n\n text += ' SET '\n\n # [10] CrateDB patch begin.\n include_table = \\\n extra_froms and self.render_table_with_column_in_update_from\n\n set_clauses = []\n\n for k, v in crud_params:\n clause = k._compiler_dispatch(self,\n include_table=include_table) + \\\n ' = ' + v\n set_clauses.append(clause)\n\n for k, v in update_stmt.parameters.items():\n if isinstance(k, str) and '[' in k:\n bindparam = sa.sql.bindparam(k, v)\n set_clauses.append(k + ' = ' + self.process(bindparam))\n\n text += ', '.join(set_clauses)\n # [10] CrateDB patch end.\n\n if self.returning or update_stmt._returning:\n if not self.returning:\n self.returning = update_stmt._returning\n if self.returning_precedes_values:\n text += \" \" + self.returning_clause(\n update_stmt, self.returning)\n\n if extra_froms:\n extra_from_text = self.update_from_clause(\n update_stmt,\n update_stmt.table,\n extra_froms,\n dialect_hints,\n **kw)\n if extra_from_text:\n text += \" \" + extra_from_text\n\n if update_stmt._whereclause is not None:\n t = self.process(update_stmt._whereclause)\n if t:\n text += \" WHERE \" + t\n\n limit_clause = self.update_limit_clause(update_stmt)\n if limit_clause:\n text += \" \" + limit_clause\n\n if self.returning and not self.returning_precedes_values:\n text += \" \" + self.returning_clause(\n update_stmt, self.returning)\n\n return text", "title": "" }, { "docid": "f33527efd2036df88dc6af90db9c1bee", "score": "0.5136234", "text": "async def test_update_string(ref_obj):\n new_string = bytearray(\n b\"802,T=6.3596289|5.139203|342.67|||7.3|729234.25|-58312.28|,\"\n b\"Type=Ground+Static+Aerodrome,Name=FARP,Color=Blue,\"\n b\"Coalition=Enemies,Country=us\"\n )\n cyfuns.proc_line(raw_line=new_string, ref=ref_obj)\n\n update_string = bytearray(b\"802,T=123.45|678.09|234.2||\")\n correct_resp = {\"id\": int(\"802\", 16), \"lat\": 678.09, \"lon\": 123.45, \"alt\": 234.2}\n parsed = cyfuns.proc_line(raw_line=update_string, ref=ref_obj)\n for key, value in correct_resp.items():\n if key == \"id\":\n continue\n assert value == getattr(parsed, key)", "title": "" }, { "docid": "9c16f3c2d7548999700487106edd6349", "score": "0.51317805", "text": "def test_ops_v1_adhoc_partial_update(self):\n pass", "title": "" }, { "docid": "81c9e044ed399e0f3d12a5860e40c14e", "score": "0.51080877", "text": "def _update_test_row(\n self,\n mechanism,\n initial_temp,\n initial_press,\n fuel,\n oxidizer,\n equivalence,\n diluent,\n diluent_mol_frac,\n inert,\n cj_speed,\n ind_len_west,\n ind_len_gav,\n ind_len_ng,\n cell_size_west,\n cell_size_gav,\n cell_size_ng,\n ):\n with self.con as con:\n cur = con.cursor()\n cur.execute(\n \"\"\"\n UPDATE {:s} SET \n date_stored = datetime('now', 'localtime'),\n cj_speed = :cj_speed, \n ind_len_west = :ind_len_west,\n ind_len_gav = :ind_len_gav,\n ind_len_ng = :ind_len_ng,\n cell_size_west = :cell_size_west,\n cell_size_gav = :cell_size_gav,\n cell_size_ng = :cell_size_ng\n WHERE\n mechanism = :mechanism AND\n initial_temp = :initial_temp AND\n initial_press = :initial_press AND\n equivalence = :equivalence AND\n fuel = :fuel AND\n oxidizer = :oxidizer AND\n diluent = :diluent AND\n diluent_mol_frac = :diluent_mol_frac AND\n inert = :inert\n \"\"\".format(self.table_name),\n {\n 'mechanism': mechanism,\n 'initial_temp': initial_temp,\n 'initial_press': initial_press,\n 'fuel': fuel,\n 'oxidizer': oxidizer,\n 'equivalence': equivalence,\n 'diluent': diluent,\n 'diluent_mol_frac': diluent_mol_frac,\n 'inert': inert,\n 'cj_speed': cj_speed,\n 'ind_len_west': ind_len_west,\n 'ind_len_gav': ind_len_gav,\n 'ind_len_ng': ind_len_ng,\n 'cell_size_west': cell_size_west,\n 'cell_size_gav': cell_size_gav,\n 'cell_size_ng': cell_size_ng,\n }\n )", "title": "" }, { "docid": "60a5b1d05b97aa12512326118788660e", "score": "0.5088329", "text": "def update(d):", "title": "" }, { "docid": "8aa9affa6be7fd1927a83a4930cbfd1e", "score": "0.50853604", "text": "def do_update(self, *args):", "title": "" }, { "docid": "b374d2ed4f6bc0f11f892aff9632ecfd", "score": "0.50775504", "text": "def test_update_extra_args(self):\n classes = [\"BaseModel\", \"User\", \"State\", \"City\",\n \"Amenity\", \"Place\", \"Review\"]\n attr = [\"name\", \"code\"]\n value = [\"Holberton\", \"123\"]\n typeval = [str, str]\n\n for i in classes:\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"create \" + i)\n id_st = f.getvalue()\n alldic = storage.all()\n self.assertTrue((i + '.' + id_st[:-1]) in alldic.keys())\n for j, k in zip(attr, value):\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"update \" + i + \" \" + id_st +\n \" \" + j + \" \" + \"\\\"\" + k + \"\\\"\" +\n \"Hey_name \\\"Betty\\\"\")\n alldic = storage.all()\n ins = alldic[i + '.' + id_st[:-1]]\n for j, k, m in zip(attr, value, typeval):\n gattr = getattr(ins, j, False)\n self.assertEqual(gattr, k)\n self.assertEqual(m, type(gattr))\n self.assertFalse(getattr(ins, \"Hey_name\", False))", "title": "" }, { "docid": "2b78f2b1baa10543b365e9d938d79f21", "score": "0.5068736", "text": "def build_update_statement(table, statement_dict):\n\tquery = \"UPDATE %s SET\" % table\n\tfor key in statement_dict:\n\t\tif isinstance(statement_dict[key], str):\n\t\t\tquery += \" {} = '{}',\".format(key, statement_dict[key])\n\t\telif statement_dict[key] is None:\n\t\t\tquery += \" {} = NULL,\".format(key)\n\t\telse:\n\t\t\tquery += \" {} = {},\".format(key, statement_dict[key])\n\treturn query[:-1]", "title": "" }, { "docid": "fd4b4b8557de4e97c8fdafb67803d8f4", "score": "0.50271964", "text": "def build_update_value_sql(self, column_name):\n _ = column_name\n if not self.update_value_expr:\n raise ValueError('update_value_expr must be defined.')\n return self.update_value_expr, self.bind_vars", "title": "" }, { "docid": "073dc6934d9d9bf0e423fc7d8c6be552", "score": "0.50180775", "text": "def test_update_State(self):\n i = \"State\"\n attr = [\"name\", \"code\"]\n value = [\"Holberton\", \"123\"]\n typeval = [str, str]\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"create \" + i)\n id_st = f.getvalue()\n alldic = storage.all()\n self.assertTrue((i + '.' + id_st[:-1]) in alldic.keys())\n for j, k in zip(attr, value):\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"update \" + i + \" \" + id_st +\n \" \" + j + \" \" + k)\n alldic = storage.all()\n ins = alldic[i + '.' + id_st[:-1]]\n for j, k, m in zip(attr, value, typeval):\n gattr = getattr(ins, j, False)\n self.assertEqual(gattr, k)\n self.assertEqual(m, type(gattr))", "title": "" }, { "docid": "b29a3251b3df7d4acd1a0f863012eea6", "score": "0.5010128", "text": "def col_update_params(self, func_ref: FunctionTemplate):\n\t\tout = \"/* Generated using col_update_params() */\\n\"\n\t\tout += \"MYSQL_BIND param[PARAM_COUNT];\\n\"\n\t\tout += \"memset(&param, 0, sizeof(param));\\n\"\n\t\tmemb = self.members.copy()\n\t\tmemb.append(memb.pop(0))\n\t\tfor prop in memb:\n\t\t\tif prop.proptype in [SqlType.VARCHAR, SqlType.TEXT]:\n\t\t\t\tout += self.col_param_length(prop, func_ref)\n\n\t\tfor i, prop in enumerate(memb):\n\t\t\tout += self.col_param_from_prop(i, prop, self.name)\n\t\treturn out", "title": "" }, { "docid": "fb1fed21a61815705d9cbfcf9a41fed8", "score": "0.500608", "text": "def randomize_numbers(update_obj: Update, context: CallbackContext):\n # store the numbers in the context\n context.user_data['rand_x'], context.user_data['rand_y'] = randint(1,1000), randint(1,1000)\n # send the question\n update_obj.message.reply_text(f\"Calculate {context.user_data['rand_x']} + {context.user_data['rand_y']} (20 sec)\")", "title": "" }, { "docid": "40053223189dd79348e0e202f69d3b75", "score": "0.49650937", "text": "def test_update_complaint(self):\n pass", "title": "" }, { "docid": "705d578333836665d6f5d60426ce2af2", "score": "0.4962902", "text": "def test_update(self):\n # Status codes\n n, m = 2, 2\n snake = Snake([(0, 1), (0, 0)])\n snake.direction = 'R'\n apple = (1, 1)\n # Normal status = 0\n status = snake.update(n, m, apple)\n self.assertEqual(status, 'normal')\n # Ate apple status = 1\n snake.direction = 'D'\n status = snake.update(n, m, apple)\n self.assertEqual(status, 'ate_apple')\n # Died status = 2\n status = snake.update(n, m, apple)\n self.assertEqual(status, 'died')", "title": "" }, { "docid": "103708eecf98dbf2302ea546a2417333", "score": "0.49500352", "text": "def test_22_update_method(self):\n rect = Rectangle(1, 1, 1, 1, 20)\n str_test = \"[Rectangle] (20) 1/1 - 1/1\"\n str_org = rect.__str__()\n self.assertEqual(str_org, str_test)\n\n \"\"\"test update 1 *arg\"\"\"\n rect.update(21)\n str_test = \"[Rectangle] (21) 1/1 - 1/1\"\n str_org = rect.__str__()\n self.assertEqual(str_org, str_test)\n\n \"\"\"test update 2 *arg\"\"\"\n rect.update(22, 2)\n str_test = \"[Rectangle] (22) 1/1 - 2/1\"\n str_org = rect.__str__()\n self.assertEqual(str_org, str_test)\n\n \"\"\"test update full *arg\"\"\"\n rect.update(23, 1, 2, 3, 4)\n str_test = \"[Rectangle] (23) 3/4 - 1/2\"\n str_org = rect.__str__()\n self.assertEqual(str_org, str_test)\n\n \"\"\"test update full *arg wrong arg\"\"\"\n with self.assertRaises(TypeError):\n rect.update(23, 'a', 2, 3, 4)\n\n \"\"\"test update full *arg wrong arg\"\"\"\n with self.assertRaises(ValueError):\n rect.update(23, -5, 2, 3, 4)\n\n \"\"\"test update full *arg wrong second arg\"\"\"\n with self.assertRaises(TypeError):\n rect.update(23, 1, 'a', 3, 4)", "title": "" }, { "docid": "7aadf329d5ab6effa22ba07fcbba22aa", "score": "0.49490106", "text": "def make_update() -> dict:\n response = requests.post(\n url=os.getenv(\"HASURA_ENDPOINT\"),\n headers={\n \"Accept\": \"*/*\",\n \"content-type\": \"application/json\",\n \"x-hasura-admin-secret\": os.getenv(\"HASURA_ADMIN_KEY\")\n },\n json={\n \"type\": \"run_sql\",\n \"args\": {\n \"sql\": \"\"\"\n /*\n This query reassociates any locations that belong in the wrong location_id\n */\n UPDATE\n atd_txdot_crashes\n SET\n location_id = find_location_id_for_cr3_collision(crash_id),\n updated_by = 'SYSTEM'\n WHERE\n crash_id IN (\n SELECT\n DISTINCT (atc.crash_id)\n FROM atd_txdot_crashes AS atc\n LEFT OUTER JOIN find_location_for_cr3_collision(atc.crash_id) AS loc ON TRUE\n WHERE 1=1\n AND atc.location_id IS NOT NULL\n AND (atc.austin_full_purpose = 'Y' OR (atc.city_id = 22 AND atc.position IS NULL))\n AND atc.location_id != loc.location_id\n );\n \"\"\"\n }\n }\n )\n response.encoding = \"utf-8\"\n return response.json()", "title": "" }, { "docid": "e7e22bcc4211599fc162b1f34490a274", "score": "0.49370617", "text": "def test_ops_v1_adhoc_update(self):\n pass", "title": "" }, { "docid": "36c363bf3af5bf4cb114bbb734c039d8", "score": "0.49262986", "text": "def update_parameter():\n pass", "title": "" }, { "docid": "304d8a5457632ef8a319c73106d34e85", "score": "0.49188325", "text": "async def intent_update_stuff(self,minfo): \n LOGGER.debug('BgxTeleBot: update stuff FOR=%s',minfo) \n if minfo.batch_id: \n LOGGER.debug('BgxTeleBot: CHECK=%s CREATE stuff',minfo.batch_id) \n batch = await self.check_batch_status(minfo.batch_id,minfo) \n return \n args = self.get_args_from_request(minfo.result.parameters) if minfo.result else {'name' : minfo.user_first_name} \n LOGGER.debug('BgxTeleBot: update stuff args=%s',args) \n if 'number' in args: \n new_stuff = user_stuff_name(args['number']) \n upd = {}\n for nm,val in args.items():\n if nm == 'number':\n continue\n if nm[:4] == 'name':\n nnum = 'number'+nm[4:]\n LOGGER.debug('upd : %s=%s',val,args[nnum] if nnum in args else 'undef')\n if nnum in args:\n upd[val] = args[nnum]\n if upd != {}:\n await self.make_stuff_transaction('upd',new_stuff,upd,minfo=minfo) \n self.send_message(minfo.chat_id, 'Изменяю описание детали {} от имени {}.'.format(new_stuff,minfo.user_first_name)) \n else:\n self.send_message(minfo.chat_id, 'Не заданы новые параметры детали {}.'.format(new_stuff))", "title": "" }, { "docid": "0a63606d4549df4544e35106f875bfc4", "score": "0.4918445", "text": "def test_w_update_too_many_args(self):\n r = Rectangle(1, 1, 0, 0, 1)\n r.update(1, 1, 1, 1, 1, 2)\n self.assertEqual(str(r), \"[Rectangle] (1) 1/1 - 1/1\")", "title": "" }, { "docid": "f195676de2ede2b714e812b27e0aa198", "score": "0.49147543", "text": "def update_one_indemnity(update_dict,id,indemnity_id,con,cur):\n psql=\"update indemnities set \"\n psql_update=\"\"\n for (key,value) in update_dict.items():\n psql_update=f\"{key}='{value}',\"+psql_update\n\n condition=f\" where extern_id='{indemnity_id}' and extern_client_id='{id}';\"\n psql=psql+psql_update[:-1]+condition\n print(psql)\n cur.execute(psql)\n con.commit()", "title": "" }, { "docid": "8a7c66d94f2cf625a7f140a5a1901ccf", "score": "0.49147314", "text": "def cmd_update(self, text):\n self.update(text)", "title": "" }, { "docid": "aa0364924fd03feb08cbf54cb3fd0d6b", "score": "0.49105972", "text": "def get_update_op():\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n if update_ops:\n return tf.group(*update_ops)\n return None", "title": "" }, { "docid": "27c24728f3563e3ba561659dec917967", "score": "0.48964286", "text": "def update_module(sql_str):\n update_dict = {'update': [], 'set': [], \"where\": [], }\n try:\n update_dict = sql_format(sql_str, update_dict)\n db = update_dict['update'][0].split('.')\n update_dict['set'] = set_module(update_dict['set'], db[1])\n with open(\"%s/%s\" % (db_path, db[1]), 'r', encoding='utf-8') as f, \\\n open(\"%s/%s\" % (db_path, db[1]) + '~', \"w\", encoding=\"utf-8\") as write_f:\n write_str = ','\n for line in f:\n db_dict = dict(zip(db_dic[db[1]].split(','), line.split(',')))\n if screen_print(update_dict['where'], db_dict):\n lines = []\n line = line.split(',')\n for x in range(len(line)):\n if x in update_dict['set']:\n lines.append(update_dict['set'][x])\n else:\n lines.append(line[x])\n write_f.write(write_str.join(lines))\n logger.warning(\"数据修改成功\")\n else:\n write_f.write(line)\n os.remove(\"%s/%s\" % (db_path, db[1]))\n os.rename(\"%s/%s\" % (db_path, db[1]) + '~', \"%s/%s\" % (db_path, db[1]))\n except:\n logger.error(\"语法有误请重新输入\")\n return", "title": "" }, { "docid": "82c606def06f484c90ea6924f8f56e64", "score": "0.48963046", "text": "def update_function(self, function_id, values):", "title": "" }, { "docid": "d8a3c69ffd3ceb774608aad27218b9d3", "score": "0.48959485", "text": "def putOp(op, a, b, c):\n putInstr(op + ' ' + a + ', ' + b + ', ' + str(c))", "title": "" }, { "docid": "2daad55de023f96a0b0319f6a08e99f4", "score": "0.4875336", "text": "def proprietors_update(name_current, name_new, phone_number, address_street, city, state, zip_code, country, email):\n query = f\"\"\"\n UPDATE Proprietors\n SET\n name = '{name_new}',\n phone_number = '{phone_number}',\n address_street = '{address_street}',\n city = '{city}',\n state = '{state}',\n zip_code = '{zip_code}',\n country = '{country}',\n email = '{email}'\n WHERE\n proprietor_id = (SELECT proprietor_id from Proprietors WHERE name = '{name_current}');\n \"\"\"\n return query", "title": "" }, { "docid": "40197e86d5711ccdcd17e754db899b3a", "score": "0.48747063", "text": "def update_func(*args):\n return _ida_funcs.update_func(*args)", "title": "" }, { "docid": "b4260f6d0edd84ace00c8b9bed948e0a", "score": "0.48632845", "text": "def test_help_update(self):\n msg = \"Usage: update <class name> <id> <attribute name> \" \\\n \"<attribute value>\\n\"\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"help update\")\n st = f.getvalue()\n self.assertEqual(msg, st)", "title": "" }, { "docid": "321674674510b4cc7fe0d8ebf07e629d", "score": "0.48587358", "text": "def updateName(act):\n if (act.getDescription() == 'Calculate' or\n act.getDescription() == 'Conditional interrupt'):\n act.replaceStringInInput(0, columnData[0], columnData[1])", "title": "" }, { "docid": "9e3e323476b906c842ab1483a0f5a136", "score": "0.4837574", "text": "def test19(self):\n rec = Rectangle(10, 10, 10, 10, 1)\n rec.update(width=12, x=2)\n self.assertEqual(rec.__str__(), \"[Rectangle] (1) 2/10 - 12/10\")\n rec.update(y=1, width=2, x=3, id=112)\n self.assertEqual(rec.__str__(), \"[Rectangle] (112) 3/1 - 2/10\")", "title": "" }, { "docid": "0e27f945da889e750021de75fbf872c4", "score": "0.4836721", "text": "def update_parameters(self, updates):\n i = 0\n for p in self.get_parameters():\n p += updates[i]\n i += 1", "title": "" }, { "docid": "c9b057771de5a361818f771cd4f972eb", "score": "0.48348036", "text": "def test_update_Review(self):\n i = \"Review\"\n attr = [\"place_id\", \"user_id\", \"text\", \"name\", \"code\"]\n value = [\"985\", \"7621\", \"Random Text\", \"Holberton\", \"123\"]\n typeval = [str, str]\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"create \" + i)\n id_st = f.getvalue()\n alldic = storage.all()\n self.assertTrue((i + '.' + id_st[:-1]) in alldic.keys())\n for j, k in zip(attr, value):\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"update \" + i + \" \" + id_st +\n \" \" + j + \" \" + k)\n alldic = storage.all()\n ins = alldic[i + '.' + id_st[:-1]]\n for j, k, m in zip(attr, value, typeval):\n gattr = getattr(ins, j, False)\n self.assertEqual(gattr, k)\n self.assertEqual(m, type(gattr))", "title": "" }, { "docid": "ac0721bc1d14b90bd5252bbf6aae962d", "score": "0.48301694", "text": "def test_update_input(self):\n pass", "title": "" }, { "docid": "0995ebb4b379fecd11e6e80cd0606981", "score": "0.48274854", "text": "def test_mixed_ops(self):\n storage = SQLBalanceStorage(self.db_session)\n insert = {str(x): x for x in range(100)}\n storage.update(insert=insert)\n\n for a in range(100):\n self.assertEqual(storage.get(str(a)), a)\n \n # Insert and update\n insert = {str(x): x for x in range(100, 110)}\n update = {str(x): 5 for x in range(100)}\n storage.update(insert=insert, update=update)\n\n for a in range(100):\n self.assertEqual(storage.get(str(a)), 5)\n\n for a in range(100, 110):\n self.assertEqual(storage.get(str(a)), a)\n\n # Insert udpate and delete\n insert = {str(x): x for x in range(200, 300)}\n update = {str(x): 7 for x in range(50)}\n delete = [str(x) for x in range(50, 100)]\n storage.update(insert=insert, update=update, delete=delete, height=10)\n\n for a in range(50):\n self.assertEqual(storage.get(str(a)), 7)\n\n for a in range(200, 300):\n self.assertEqual(storage.get(str(a)), a)\n\n for a in range(50, 100):\n self.assertEqual(storage.get(str(a), 99999), 99999)", "title": "" }, { "docid": "02c4400482e66865c4af1ca1348f889e", "score": "0.4804673", "text": "def add_update_values(self):\n self.add_macro('classoperation Main \"Update Values\"')", "title": "" }, { "docid": "b24e6c69f0f4107d39f4aab700d2be3f", "score": "0.48040476", "text": "def data_edit(passed_column,passed_input,passed_id, passed_table):\n if passed_table == \"crm_data\":\n statement = (\"UPDATE \" + passed_table + \" SET \"+ passed_column + \" = \\\" \" + passed_input + \"\\\"WHERE crm_ID =\\\" \" + passed_id + \"\\\"\")\n my_db.executeQuery(statement)\n my_db.conn.commit()\n elif passed_table.title() == \"Mailings\":\n statement = (\"UPDATE \" + passed_table + \" SET \"+ passed_column + \" = \\\" \" + passed_input + \"\\\"WHERE mail_ID =\\\" \" + passed_id + \"\\\"\")\n my_db.executeQuery(statement)\n my_db.conn.commit()\n print(\"\\nDATA EDITED\\n\")", "title": "" }, { "docid": "81609b96725a2b6254d3d353f9d79c12", "score": "0.47910562", "text": "def test_wb_update_kwargs(self):\n r = Rectangle(1, 1, 0, 0, 1)\n self.assertEqual(str(r), \"[Rectangle] (1) 0/0 - 1/1\")\n r.update(height=10)\n self.assertEqual(str(r), \"[Rectangle] (1) 0/0 - 1/10\")\n r.update(width=11, x=2)\n self.assertEqual(str(r), \"[Rectangle] (1) 2/0 - 11/10\")\n r.update(y=3, width=4, x=5, id=89)\n self.assertEqual(str(r), \"[Rectangle] (89) 5/3 - 4/10\")\n r.update(x=6, height=7, y=8, width=9)\n self.assertEqual(str(r), \"[Rectangle] (89) 6/8 - 9/7\")", "title": "" }, { "docid": "5cf1cb5f5492d49eea1bed73d2e8e28c", "score": "0.47874764", "text": "def random_update_rows(self):\n sql_command = sql.SQL(\"\"\"\n update {0}.{1}\n set transaction_amount = round(random()::numeric, 2)\n where ctid = any(array(\n select ctid\n from {0}.{1}\n tablesample bernoulli (1) ))\"\"\").format(sql.Identifier(self.schema_raw),\n sql.Identifier(self.raw_table_name))\n try:\n rows = self.database.execute(sql_command)\n m.info('Has been updated [%s rows] from table %s' % (rows, self.raw_table_name))\n except psycopg2.Error as err:\n m.error('Oops! Delete random rows has been FAILED. Reason: %s' % err.pgerror)", "title": "" }, { "docid": "a62ab1716771c9f2aab0587d4b967231", "score": "0.4787274", "text": "def string(self):\n return f'y = {self.a.item()} + {self.b.item()} x + {self.c.item()} x^2 + {self.d.item()} x^3'", "title": "" }, { "docid": "a62ab1716771c9f2aab0587d4b967231", "score": "0.4787274", "text": "def string(self):\n return f'y = {self.a.item()} + {self.b.item()} x + {self.c.item()} x^2 + {self.d.item()} x^3'", "title": "" }, { "docid": "19c0e2a1a38f20f30955b004c4f8c629", "score": "0.47824582", "text": "def write_updated(self, txt, place):\n pass", "title": "" }, { "docid": "3e133759a6c178eb4d19df376b4bd87e", "score": "0.478127", "text": "def random_function_sql(self):\n return 'RAND()'", "title": "" }, { "docid": "9195b65fb8bf6e2b1cfb77bb1d3af564", "score": "0.477829", "text": "def q_update(session, q_nn_update, s, s_t, a, a_t, y, y_t):\n session.run(q_nn_update, feed_dict={s: s_t, a: a_t, y: y_t})", "title": "" }, { "docid": "1f7e7666a8d019c7b7d540e8350e2d35", "score": "0.4776224", "text": "def print_update(data):\n sys.stdout.write(\"\\r\\x1b[K\"+data.__str__())\n sys.stdout.flush()", "title": "" }, { "docid": "adf154c9fbdf27148126e14a75f82b71", "score": "0.47674385", "text": "def update_state(xk, sk, wx, wRec):\n return xk * wx + sk * wRec", "title": "" }, { "docid": "4a5a668b1c49f305b84c78322b282094", "score": "0.4766852", "text": "def test_update_output(self):\n pass", "title": "" }, { "docid": "50a73a76a613256411ac5b6ba1e27b40", "score": "0.4764053", "text": "def edit(**kwargs):\n return PATH.format(**kwargs), kwargs", "title": "" }, { "docid": "ae9d58195b2e0e80525cb8309991f730", "score": "0.47601542", "text": "def test_update_User(self):\n i = \"User\"\n attr = [\"name\", \"code\", \"email\", \"password\", \"first_name\", \"last_name\"]\n value = [\"Holberton\", \"123\", \"[email protected]\", \"pswd\", \"Larry\", \"Page\"]\n typeval = [str, str, str, str, str, str]\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"create \" + i)\n id_st = f.getvalue()\n alldic = storage.all()\n self.assertTrue((i + '.' + id_st[:-1]) in alldic.keys())\n for j, k in zip(attr, value):\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"update \" + i + \" \" + id_st +\n \" \" + j + \" \" + k)\n alldic = storage.all()\n ins = alldic[i + '.' + id_st[:-1]]\n for j, k, m in zip(attr, value, typeval):\n gattr = getattr(ins, j, False)\n self.assertEqual(gattr, k)\n self.assertEqual(m, type(gattr))", "title": "" }, { "docid": "90c5709d603231079009f782d07a01ec", "score": "0.47599232", "text": "def test_do_resource_update(self):\n json_data = {'operation_id': 'test_user-414b-aa5e-dedbeef00101',\n 'IaaS_tenant_id': 'tenantid-414b-aa5e-dedbeef00101',\n 'IaaS_tenant_name': 'tanant_name',\n 'IaaS_region_id': 'regionid-414b-aa5e-dedbeef00101'}\n input = {'type': 'globalip',\n 'json_data': json.dumps(json_data)}\n args = self._make_args(input)\n with mock.patch.object(self.gc.resource,\n 'update') as mocked_func:\n mocked_func.return_value = return_status\n v1shell.do_resource_update(self.gc, args)\n\n fields = json_data\n fields['function_type'] = 'globalip'\n\n mocked_func.assert_called_once_with(fields)", "title": "" }, { "docid": "70dc04cddc7e9cd36f333420d0bab203", "score": "0.47571367", "text": "def test_update_Place(self):\n i = \"Place\"\n attr = [\"city_id\", \"user_id\", \"name\", \"description\", \"number_rooms\",\n \"number_bathrooms\", \"max_guest\", \"price_by_night\", \"latitude\",\n \"longitude\", \"code\"]\n value = [\"686\", \"123\", \"Larry\", \"Nice\", 5, 2, 15, 136,\n 8.7, 9.4, \"988\"]\n typeval = [str, str, str, str, int, int, int, int, float, float, str]\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"create \" + i)\n id_st = f.getvalue()\n alldic = storage.all()\n self.assertTrue((i + '.' + id_st[:-1]) in alldic.keys())\n for j, k in zip(attr, value):\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"update \" + i + \" \" + id_st +\n \" \" + j + \" \" + str(k))\n alldic = storage.all()\n ins = alldic[i + '.' + id_st[:-1]]\n for j, k, m in zip(attr, value, typeval):\n gattr = getattr(ins, j, False)\n self.assertEqual(gattr, k)\n self.assertEqual(m, type(gattr))", "title": "" }, { "docid": "4ec51962b416da5fdc990b63f23ef18d", "score": "0.4755857", "text": "def string(self):\n return f'y = {self.a.item()} + {self.b.item()} x + {self.c.item()} x^2 + {self.d.item()} x^3 + {self.e.item()} x^4 ? + {self.e.item()} x^5 ?'", "title": "" }, { "docid": "4f785871136405c03fe2b5df030e3008", "score": "0.47536784", "text": "def _emit_post_update_statements(\n base_mapper, uowtransaction, mapper, table, update\n):\n\n execution_options = {\"compiled_cache\": base_mapper._compiled_cache}\n\n needs_version_id = (\n mapper.version_id_col is not None\n and mapper.version_id_col in mapper._cols_by_table[table]\n )\n\n def update_stmt():\n clauses = BooleanClauseList._construct_raw(operators.and_)\n\n for col in mapper._pks_by_table[table]:\n clauses._append_inplace(\n col == sql.bindparam(col._label, type_=col.type)\n )\n\n if needs_version_id:\n clauses._append_inplace(\n mapper.version_id_col\n == sql.bindparam(\n mapper.version_id_col._label,\n type_=mapper.version_id_col.type,\n )\n )\n\n stmt = table.update().where(clauses)\n\n return stmt\n\n statement = base_mapper._memo((\"post_update\", table), update_stmt)\n\n if mapper._version_id_has_server_side_value:\n statement = statement.return_defaults(mapper.version_id_col)\n\n # execute each UPDATE in the order according to the original\n # list of states to guarantee row access order, but\n # also group them into common (connection, cols) sets\n # to support executemany().\n for key, records in groupby(\n update,\n lambda rec: (rec[3], set(rec[4])), # connection # parameter keys\n ):\n rows = 0\n\n records = list(records)\n connection = key[0]\n\n assert_singlerow = connection.dialect.supports_sane_rowcount\n assert_multirow = (\n assert_singlerow\n and connection.dialect.supports_sane_multi_rowcount\n )\n allow_executemany = not needs_version_id or assert_multirow\n\n if not allow_executemany:\n check_rowcount = assert_singlerow\n for state, state_dict, mapper_rec, connection, params in records:\n c = connection.execute(\n statement, params, execution_options=execution_options\n )\n\n _postfetch_post_update(\n mapper_rec,\n uowtransaction,\n table,\n state,\n state_dict,\n c,\n c.context.compiled_parameters[0],\n )\n rows += c.rowcount\n else:\n multiparams = [\n params\n for state, state_dict, mapper_rec, conn, params in records\n ]\n\n check_rowcount = assert_multirow or (\n assert_singlerow and len(multiparams) == 1\n )\n\n c = connection.execute(\n statement, multiparams, execution_options=execution_options\n )\n\n rows += c.rowcount\n for state, state_dict, mapper_rec, connection, params in records:\n _postfetch_post_update(\n mapper_rec,\n uowtransaction,\n table,\n state,\n state_dict,\n c,\n c.context.compiled_parameters[0],\n )\n\n if check_rowcount:\n if rows != len(records):\n raise orm_exc.StaleDataError(\n \"UPDATE statement on table '%s' expected to \"\n \"update %d row(s); %d were matched.\"\n % (table.description, len(records), rows)\n )\n\n elif needs_version_id:\n util.warn(\n \"Dialect %s does not support updated rowcount \"\n \"- versioning cannot be verified.\"\n % c.dialect.dialect_description\n )", "title": "" }, { "docid": "1f6999a99027eb76444ac33228bf5fae", "score": "0.4751669", "text": "def q_update(self, s, a, r, sp, done):\n # TODO: 1 lines missing.\n raise NotImplementedError(\"Implement function body\")", "title": "" }, { "docid": "1e8c2e898e4f9ca20f3d3443de2e6ea5", "score": "0.47502673", "text": "def increment_msg(self):\n return '<increment command=\"%s\" step=\"%s\" steps=\"%s\"/>\\n' % (self.command, self.step, self.steps)", "title": "" }, { "docid": "b41d1dd6fbb819e0cf2550f9e54f7959", "score": "0.4741742", "text": "def test_update_City(self):\n i = \"City\"\n attr = [\"state_id\", \"name\", \"code\"]\n value = [\"568\", \"Holberton\", \"123\"]\n typeval = [str, str, str]\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"create \" + i)\n id_st = f.getvalue()\n alldic = storage.all()\n self.assertTrue((i + '.' + id_st[:-1]) in alldic.keys())\n for j, k in zip(attr, value):\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"update \" + i + \" \" + id_st +\n \" \" + j + \" \" + k)\n alldic = storage.all()\n ins = alldic[i + '.' + id_st[:-1]]\n for j, k, m in zip(attr, value, typeval):\n gattr = getattr(ins, j, False)\n self.assertEqual(gattr, k)\n self.assertEqual(m, type(gattr))", "title": "" }, { "docid": "b699f0463526e0bdddd745b428c42ea6", "score": "0.47297275", "text": "def test_update_operator_coeff(self):\n coeff = numpy.ones(6, dtype=numpy.complex64)\n test = FermionOperator('1^', 1. + .0j)\n ops = FermionOperator('1^', 1. + .0j)\n for i in range(2, 7):\n ops += FermionOperator(str(i) + '^', i * (1. + .0j))\n test += FermionOperator(str(i) + '^', 1. + .0j)\n\n openfermion_utils.update_operator_coeff(ops, coeff)\n self.assertEqual(ops, test)", "title": "" }, { "docid": "e668c778743d85ea02253a3b1e893aaf", "score": "0.4723034", "text": "def update():\n\tsafeUpdate()", "title": "" }, { "docid": "2489ee6204b762583404f36cddfd86e0", "score": "0.4720609", "text": "def update_data(msid, data):\n cmd = 'UPDATE v_table SET '\n cmd = cmd + ' yl_time=' + str(data[0]) + ', '\n cmd = cmd + ' yt_time=' + str(data[1]) + ', '\n cmd = cmd + ' rl_time=' + str(data[2]) + ', '\n cmd = cmd + ' rt_time=' + str(data[3])\n cmd = cmd + ' WHERE msid=\"' + msid + '\"'\n\n cursor = db.cursor()\n cursor.execute(cmd)\n db.commit()", "title": "" }, { "docid": "6307efdab438afaf37a43bb462728943", "score": "0.47141886", "text": "def update():\n emitUpdatedData()\n return \"Emitted\"", "title": "" }, { "docid": "d64305faddf9420fdab778ab502f9311", "score": "0.47117412", "text": "def test_update_no_value(self):\n msg = \"** value missing **\\n\"\n classes = [\"BaseModel\", \"User\", \"State\", \"City\",\n \"Amenity\", \"Place\", \"Review\"]\n for i in classes:\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"create \" + i)\n id_st = f.getvalue()\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"update \" + i + \" \" + id_st + \" name\")\n st = f.getvalue()\n self.assertEqual(msg, st)", "title": "" }, { "docid": "e1281ffd635b6c10063e4b944a26b2d6", "score": "0.47114664", "text": "def test_help_update(self):\n\n out_put = (\"Updates an object with new information\\n\"\n \"Usage: update <className> <id> <attName> <attVal>\")\n with patch('sys.stdout', new=StringIO()) as f:\n self.assertFalse(HBNBCommand().onecmd(\"help update\"))\n self.assertEqual(out_put, f.getvalue().strip())", "title": "" }, { "docid": "a44362b37f275ed528eb1044ef72448f", "score": "0.47098", "text": "def d22r ( state ) :\n byte = state[\"byte\"]\n\n r = byte & 0b00000111\n\n reg = registertable[\"r32\"][r]\n\n return f\"XCHG EAX, {reg}\"", "title": "" }, { "docid": "067f6a8a91d2f96f5dd357cc0177be81", "score": "0.47071573", "text": "def inc_update(e_domain,e_port,module,data,full_timestamp):\n try:\n del(data[\"source_head\"])\n if data.has_key(\"@info\"):\n del(data[\"@info\"])\n index_type = data.pop(\"@type\")\n \n #add modify_ for modify request\n for k,v in data.items():\n if k.find(\"@\") == 0:\n nk = k[1:]\n nv = v\n del(data[k])\n data.setdefault(nk,nv)\n elif index_type == \"modify\":\n nk = \"_modify_\"+k\n nv = v\n del(data[k])\n data.setdefault(nk,nv)\n\n #add some other info for response url\n if index_type == \"delete\":\n data.setdefault(\"_index_del\",\"1\")\n else:\n data.setdefault(\"_index_\"+index_type,\"1\")\n\n data.setdefault(\"_inc_num\",data[\"stamp\"])\n del(data[\"stamp\"])\n data.setdefault(\"_full_timestamp\",full_timestamp)\n \n #add modify_ for modify request\n inc_par = urlencode(data)\n #logger.info(\"inc_update: http://%s:%s/?%s\"%(e_domain,e_port,inc_par))\n inc_data = query(\"http://%s:%s/?%s\"%(e_domain,e_port,inc_par))\n #if inc_data is None: return \"\"\n #if inc_data !=\"<result>OK</result>\":\n # url = \"inc stamp:http://%s:%s/?%s\"%(e_domain,e_port,inc_par)\n # logger.warning(\"module[%s] inc update failed:%s. URL: %s\"%(module,inc_data,url))\n cycleCnt =1\n url = \"inc stamp:http://%s:%s/?%s\"%(e_domain,e_port,inc_par)\n # it always does inc update util inc update succeeded.\n while True:\n if inc_data is not None and inc_data == \"<result>OK</result>\":\n break;\n if cycleCnt%10 == 0:\n cycleCnt = 1\n logger.warning(\"module[%s] inc update failed:%s. URL: %s\"%(module,inc_data,url))\n else: cycleCnt = cycleCnt +1\n inc_data = query(\"http://%s:%s/?%s\"%(e_domain,e_port,inc_par))\n return inc_data\n except Exception,e:\n logger.exception(e)\n return e", "title": "" }, { "docid": "238bbeda30422e9906824668c0fe29f0", "score": "0.47068277", "text": "def __master_string(self, tablename, key):\r\n return 'ty_name=\"%s\" name=\"%s\" changed_entry=\"true\"' % (\r\n tablename, key)", "title": "" }, { "docid": "1596c96053d5d401568fd85ffec4236a", "score": "0.469719", "text": "def test_update_evaluation(self):\n pass", "title": "" }, { "docid": "0dfb8bb00c46b3e26d4f28e6d7bba6e8", "score": "0.46927968", "text": "def test_update_BaseModel(self):\n i = \"BaseModel\"\n attr = [\"name\", \"code\"]\n value = [\"Holberton\", \"123\"]\n typeval = [str, str]\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"create \" + i)\n id_st = f.getvalue()\n alldic = storage.all()\n self.assertTrue((i + '.' + id_st[:-1]) in alldic.keys())\n for j, k in zip(attr, value):\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"update \" + i + \" \" + id_st +\n \" \" + j + \" \" + k)\n alldic = storage.all()\n ins = alldic[i + '.' + id_st[:-1]]\n for j, k, m in zip(attr, value, typeval):\n gattr = getattr(ins, j, False)\n self.assertEqual(gattr, k)\n self.assertEqual(m, type(gattr))", "title": "" }, { "docid": "d916bca36ee5f6484480566e766b9683", "score": "0.46829998", "text": "def update(self, info):", "title": "" }, { "docid": "0d446e6eb1da9e6526927535ec501c38", "score": "0.46774003", "text": "def test_update(self):\n doc = {\"a\": 1, \"b\": 2}\n self.conn.test.test.insert_one(doc)\n selector = {\"_id\": doc['_id']}\n\n def update_and_retrieve(update_spec, replace=False):\n if replace:\n self.conn.test.test.replace_one(selector, update_spec)\n else:\n self.conn.test.test.update_one(selector, update_spec)\n\n # self.conn.test.test.update(selector, update_spec)\n # Give the connector some time to perform update\n time.sleep(1)\n return self.synchronizer._search()[0]\n\n # Update whole document\n doc = update_and_retrieve({\"a\": 1, \"b\": 2, \"c\": 10}, replace=True)\n self.assertEqual(doc['a'], 1)\n self.assertEqual(doc['b'], 2)\n self.assertEqual(doc['c'], 10)\n\n # $set only\n doc = update_and_retrieve({\"$set\": {\"b\": 4}})\n self.assertEqual(doc['a'], 1)\n self.assertEqual(doc['b'], 4)\n\n # $unset only\n doc = update_and_retrieve({\"$unset\": {\"a\": True}})\n self.assertNotIn('a', doc)\n self.assertEqual(doc['b'], 4)\n\n # mixed $set/$unset\n doc = update_and_retrieve({\"$unset\": {\"b\": True}, \"$set\": {\"c\": 3}})\n self.assertEqual(doc['c'], 3)\n self.assertNotIn('b', doc)\n\n # ensure update works when fields are given\n opthread = self.connector.shard_set[0]\n opthread.fields = ['a', 'b', 'c']\n try:\n doc = update_and_retrieve({\"$set\": {\"d\": 10}})\n self.assertEqual(self.conn.test.test.find_one(doc['_id'])['d'], 10)\n self.assertNotIn('d', doc)\n doc = update_and_retrieve({\"$set\": {\"a\": 10}})\n self.assertEqual(doc['a'], 10)\n finally:\n # cleanup\n opthread.fields = None", "title": "" }, { "docid": "a362249a0ba46887801719fc017d6771", "score": "0.467708", "text": "def test18(self):\n rec = Rectangle(10, 7, 8, 3, 1)\n rec.update(24)\n self.assertEqual(rec.__str__(), \"[Rectangle] (24) 8/3 - 10/7\")", "title": "" } ]
3b2ca89911346bae817b9127aabd70e3
plots the baselined data
[ { "docid": "1a5e5a9858fa3e8ce25ccbdc3b39dd32", "score": "0.0", "text": "def plot_data_bas(save_folder, electrode = 'all', save_name = 'data_bas', save_ext = '.png'):\n data_bas, fs = reader.read_databas(save_folder)\n plot_data(data_bas, fs, save_folder, electrode = 'all', save_name = save_name, save_ext = save_ext)", "title": "" } ]
[ { "docid": "c53b6e855fb9c1e6c35c8eb1a4de3a95", "score": "0.7291515", "text": "def stat_plot():", "title": "" }, { "docid": "b15382ff7e5413c58f939ddcb600f9cc", "score": "0.72905284", "text": "def plot(self):", "title": "" }, { "docid": "a4240a5aadf84ac63cc47713c81c8741", "score": "0.7153346", "text": "def plot(self):\n pass", "title": "" }, { "docid": "a4240a5aadf84ac63cc47713c81c8741", "score": "0.7153346", "text": "def plot(self):\n pass", "title": "" }, { "docid": "e7b1f483929ac8139b7234ce53b1887c", "score": "0.7001416", "text": "def CreatePlot():", "title": "" }, { "docid": "3bd7eee84ddf415145756a499a303fd7", "score": "0.6907235", "text": "def plot(self, ax):\n pass", "title": "" }, { "docid": "01e589792f32c8cc39dd88623ce3f2da", "score": "0.6877346", "text": "def plot(self):\n self.ss.plot()", "title": "" }, { "docid": "01f7fb6c04e21dce7bd977dc80a12ab2", "score": "0.68725234", "text": "def _createBasePlot(self):\n\t\tfor i in xrange(0,self.N+1):\n\t\t\tix = self._baseN(i,self.base)\n\t\t\tself.x_dict[ix] = round(math.cos(math.pi/2 - i*self.angle), 5)\n\t\t\tself.y_dict[ix] = round(math.sin(math.pi/2 - i*self.angle), 5)\n\n\t\tself.fig, ax = plt.subplots()\n\t\tax.set_xlim((-1,1))\n\t\tax.set_ylim((-1,1))\n\t\tax.plot(self.x_dict.values(), self.y_dict.values(), 'o', color='black')\n\t\tself.fig.suptitle('Modulo %i, multiplier %i' % (self.base, self.multiplier), fontsize=20)", "title": "" }, { "docid": "3439bef379f8d34b9cad61cd6b306eef", "score": "0.6718292", "text": "def plot_bases( arr ):\n\treadlen = len(arr)\n\tprint readlen\n\txg = np.arange( readlen ) \n\tpyplot.plot( xg, arr[ : , 0 ], marker='.', color='red')\n\tpyplot.plot( xg, arr[ : , 1 ], marker='.', color='darkgreen')\n\tpyplot.plot( xg, arr[ : , 2 ], marker='.',color='lightgreen')\n\tpyplot.plot( xg, arr[ : , 3 ], marker='.',color='orange')\n\tpyplot.plot( xg, arr[ : , 4 ], marker='.',color='grey')\n\tpyplot.axis( (0, readlen-1, 0, 1 ) )\n\tpyplot.text( readlen*.70, .9, \"A\", color=\"red\" )\n\tpyplot.text( readlen*.75, .9, \"C\", color=\"darkgreen\" )\n\tpyplot.text( readlen*.80, .9, \"G\", color=\"lightgreen\" )\n\tpyplot.text( readlen*.85, .9, \"T\", color=\"orange\" )\n\tpyplot.text( readlen*.90, .9, \"N\", color=\"grey\" )", "title": "" }, { "docid": "5d23cdf556805f098d4bfeb6e0c66524", "score": "0.6697805", "text": "def main():\n\n x = sorted(read())\n y = [0, 1, 2, 3, 5, 7, 10, 20, 30, 50, 70, 100, 200, 300, 500, 700, 1000, 2000, 3000, 5000]\n plt.subplot(211)\n plt.plot(y[:7], x[:7])\n plt.ylabel('Q')\n plt.grid(True)\n plt.subplot(212)\n plt.plot(y[3:], x[3:])\n plt.xlabel('A')\n plt.ylabel('Q')\n plt.grid(True)\n plt.show()", "title": "" }, { "docid": "752f8adde0fbd639945e177a26989f80", "score": "0.65748715", "text": "def plot() -> None:\n raise NotImplementedError()", "title": "" }, { "docid": "ce7814dba497631bf575da1404a6456b", "score": "0.65513206", "text": "def plot_raw_data(self, axes_hdl):\r\n pass", "title": "" }, { "docid": "c8bae29bacb1870c7e9132e0d575dd06", "score": "0.6535789", "text": "def plot_raw(self,data,Y):\n\n if self.decodeLable:\n lable=self.decoding[Y==True]\n else:\n lable = Y\n\n fig, axs=plt.subplots(len(self.CH_names),1,sharey=True,figsize=(6,20))\n for n,CH in enumerate(self.CH_names):\n axs[n].plot(data[n])\n axs[n].set_ylabel(CH)\n fig.suptitle(f\"lable {lable}\")\n plt.show()", "title": "" }, { "docid": "187884bdce68ebff919c0d94e2d1708a", "score": "0.6515786", "text": "def plotAlgo(self):\r\n plot_total = len(self.states)\r\n _, ax = plt.subplots(nrows = plot_total, ncols = 1)\r\n i = 0\r\n for row in ax:\r\n curr_plot = np.array([k[0] for k in self.states[i]])\r\n index = np.arange(len(curr_plot))\r\n row.bar(index, curr_plot)\r\n row.set_xticks(list(range(2**self.n)))\r\n row.set_xlabel(\"States\")\r\n row.set_ylabel(\"Amplitudes\")\r\n i+=1\r\n plt.show()", "title": "" }, { "docid": "6996d76fa5adaaffe907df1163785ffc", "score": "0.6483892", "text": "def make_plot(self,other_params):\n \n pass", "title": "" }, { "docid": "bf3ba3a7f11399273c4a2f09c1c2d928", "score": "0.6457584", "text": "def plot_raw(self, datslice):\n plt.plot(self.sdat_list[datslice])\n plt.show()", "title": "" }, { "docid": "0df3ac0ee7ec66d0692d9e736706c4ae", "score": "0.64543104", "text": "def plot(self,typ='brain'):\n if typ=='train' or typ=='both':\n for i in np.unique(self.y_train):\n plt.plot(self.X_train[self.y_train==i,0],self.X_train[self.y_train==i,1],'.')\n if typ=='test' or typ=='both':\n for i in np.unique(self.y_test):\n plt.plot(self.X_test[self.y_test==i,0],self.X_test[self.y_test==i,1],'.')\n plt.show()\n plt.close()", "title": "" }, { "docid": "ba8b22ffcdbe7a35750bb0d4f4bc260b", "score": "0.64490503", "text": "def plot(self):\n fig = plt.figure(figsize=(2, 0.5))\n ax = fig.add_subplot(111)\n ax.plot(self.ts, 'r-')\n for _, v in ax.spines.items():\n v.set_visible(False)\n ax.set_xticks([])\n ax.set_yticks([])\n return ax\n # here", "title": "" }, { "docid": "94b984bfbcd0b34ba60808f045989300", "score": "0.6441489", "text": "def plot_raw(self):\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n fig.suptitle(r'Four Possible a vs. $\\hat{a}$ Models')\n csfont = {'fontname': 'Times New Roman', 'fontsize': 16}\n\n ax1.plot(self.a, self.a_hat, 'ks', markersize=0.5)\n ax1.set_xlabel('Size, a (mm) \\n (a)')\n ax1.set_ylabel(r'Response, $\\hat{a}$ (mV)')\n\n ax2.plot(self.a, self.a_hat, 'ks', markersize=0.5)\n ax2.set_xscale('log')\n # ax2.set_xticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])\n # ax2.set_xticklabels(['$10^{-1}$', 2, 3, 4, 5, 6, 7, 8, 9, '$10^0$'])\n ax2.set_xlabel('Size, $\\log(a)$ (mm) \\n (b)')\n ax2.set_ylabel(r'Response, $\\hat{a}$ (mV)')\n\n ax3.plot(self.a, self.a_hat, 'ks', markersize=0.5)\n ax3.set_yscale('log')\n # ax3.set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])\n # ax3.set_yticklabels(['$10^{-1}$', 2, 3, 4, 5, 6, 7, 8, 9, '$10^0$'])\n ax3.set_xlabel('Size, a (mm) \\n (c)')\n ax3.set_ylabel(r'Response, $\\log(\\hat{a})$ (mV)')\n\n ax4.plot(self.a, self.a_hat, 'ks', markersize=0.5)\n ax4.set_xscale('log')\n ax4.set_yscale('log')\n # ax4.set_xticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])\n # ax4.set_xticklabels(['$10^{-1}$', 2, 3, 4, 5, 6, 7, 8, 9, '$10^0$'])\n # ax4.set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])\n # ax4.set_yticklabels(['$10^{-1}$', 2, 3, 4, 5, 6, 7, 8, 9, '$10^0$'])\n ax4.set_xlabel('Size, $\\log(a)$ (mm) \\n (d)')\n ax4.set_ylabel(r'Response, $\\log(\\hat{a})$ (mV)')\n\n # plt.tight_layout()\n plt.show()", "title": "" }, { "docid": "fb71583bf541d57d8e7bc9b3087f246b", "score": "0.6410512", "text": "def plot_linechart(ax, database, infobase, start_date, end_date, mode='preview'):\r\n ax2 = ax.twinx()\r\n\r\n if len(database)<glc.NUM_OF_FILENAME_DROPDOWN:\r\n database += [None for j in range(glc.NUM_OF_FILENAME_DROPDOWN - len(database))]\r\n\r\n if len(infobase)<glc.NUM_OF_FILENAME_DROPDOWN:\r\n infobase += [None for j in range(glc.NUM_OF_FILENAME_DROPDOWN - len(infobase))]\r\n\r\n # colorlist = ['red', 'green', 'blue', 'chocolate', 'gold']\r\n units_lst = tools.get_units_lst(infobase)\r\n curvelist = []\r\n labellist = []\r\n\r\n if len(units_lst) == 1:\r\n ax.set_ylabel(units_lst[0], fontsize=12)\r\n ax.axis('on')\r\n ax2.axis('off')\r\n elif len(units_lst) == 2:\r\n ax.set_ylabel(units_lst[0], fontsize=12)\r\n ax2.set_ylabel(units_lst[1], fontsize=12)\r\n ax.axis('on')\r\n ax2.axis('on')\r\n\r\n for i in range(glc.NUM_OF_FILENAME_DROPDOWN):\r\n if database[i] is not None and infobase[i] is not None :\r\n title = infobase[i]['title']\r\n dim = infobase[i]['dim']\r\n color = tools.convert_rgb2mcolor(infobase[i]['color'])\r\n alpha = infobase[i]['alpha'] /100\r\n style = infobase[i]['style']\r\n width = infobase[i]['width'] / 2\r\n if dim == units_lst[0]:\r\n curvelist += ax.plot(database[i].index, database[i], c=color, alpha=alpha, linestyle=style, linewidth=width)\r\n elif dim == units_lst[1]:\r\n curvelist += ax2.plot(database[i].index, database[i], c=color, alpha=alpha, linestyle=style, linewidth=width)\r\n labellist.append(infobase[i]['title'])\r\n \r\n if mode=='preview':\r\n ax.legend(curvelist, labellist, loc=3, prop={'size': 10}, bbox_to_anchor=(0.0, 1.0), ncol=5)\r\n elif mode=='annually' or mode=='sequentially':\r\n ax.set_title(title)\r\n elif mode=='monthly':\r\n ax.set_title(tools.convert_number_to_month(title))\r\n xticklabel, xtick = tools.convert_date_to_ticklabel_monthly(database[0].index)\r\n ax.set_xticks(xtick)\r\n ax.set_xticklabels(xticklabel, rotation='horizontal')\r\n else:\r\n raise NotImplementedError\r\n print(end_date, type(end_date))\r\n ax.set_xlim(start_date, end_date+pd.Timedelta('23 hour 59 min 59 s'))\r\n return ax, ax2", "title": "" }, { "docid": "9eedaf2846f8d774c383087c8ebcf732", "score": "0.640053", "text": "def plot(self):\n data, cidx = self.center()\n # beams = range(self.header[-1])\n # samples = range(-1*cidx, len(data)-cidx)\n # X,Y = np.meshgrid(beams, samples)\n # plt.pcolormesh(X,Y,data, cmap = 'gray')\n plt.imshow(data, aspect='auto', cmap='gray', interpolation='none')\n plt.clim((-80, 0))", "title": "" }, { "docid": "36d898cda6c0566c165400aa202c2ed2", "score": "0.639997", "text": "def plot(self, gen_params):\n import pylab as plt\n\n fig, ax = plt.subplots(ncols=2, nrows=2, figsize=(10, 9))\n\n todraw = ['z', 'x1', 'color', 'daymax']\n idx = dict(zip(todraw, [(0, 0), (0, 1), (1, 1), (1, 0)]))\n\n for name in todraw:\n i = idx[name][0]\n j = idx[name][1]\n ax[i][j].hist(gen_params[name], histtype='step')\n ax[i][j].set_xlabel(name)\n ax[i][j].set_ylabel('Number of entries')\n\n plt.show()", "title": "" }, { "docid": "ca1dac7817aba86d04fd9190a3e3eec6", "score": "0.6384763", "text": "def base_tech():\n plot('tech_base', normalize=True, title='Base Tech Distribution',\n xlabel='Tech Given', ylabel='Probability', axis=[-1, 11, 0, 0.1],\n save='base_tech', color='c', bar=True)", "title": "" }, { "docid": "8ad79ff3cc2b34741b812f0b0a140422", "score": "0.6384625", "text": "def __plot_original_data__(self, X,y): \n imbalance_cl = plot_data.PlotDataClasses(X,y)\n imbalance_cl.num_lab_medications()\n imbalance_cl.medication_comparison()\n imbalance_cl.change_med_and_readmission()\n imbalance_cl.readmitted()\n #exit()", "title": "" }, { "docid": "f0bf12d2a9c60465c44eadbfe72901cd", "score": "0.6375226", "text": "def plotBinning_n(bin_info_n,var_name):\n \n tbl=bin_info_n[bin_info_n.var_name==var_name]\n IV=tbl.IV.sum()\n print \"IV= \",IV\n plt.figure(figsize=(9, 4))\n plt.subplot(121)\n plt.plot(tbl.clus_num,tbl[\"BadRate\"],label='BadRate',linewidth=1.5,color='r',marker='o', markerfacecolor='blue',markersize=6) \n plt.xticks(tbl.clus_num, tbl.bucket, rotation=90)\n #ax1 = tbl[\"BadRate\"].plot.line(style='r',grid=True,title=var_name)\n #ax1.set_xticklabels(tbl.bucket)\n plt.subplot(122)\n ax1 = tbl['PctTotal'].plot.bar( alpha=0.3,grid=True, title=var_name)\n ax1.set_xticklabels(tbl.bucket)\n plt.show()", "title": "" }, { "docid": "0678e2706afbd8a681affe70fd217781", "score": "0.63594794", "text": "def plot_hrc_dose(acc_data, dff_data, ofile):\n#\n#--- open the data lists\n#\n [date, amean, amin, amax, accs1, accs2, accs3] = acc_data\n [date, dmean, dmin, dmax, dffs1, dffs2, dffs3] = dff_data\n#\n#---- set a few parameters\n#\n plt.close('all')\n mpl.rcParams['font.size'] = 9\n props = font_manager.FontProperties(size=6)\n plt.subplots_adjust(hspace=0.05)\n plt.subplots_adjust(wspace=0.12)\n#\n#--- mean\n#\n ax1 = plt.subplot(3,2,1)\n plot_panel(date, dmean, 'Average', ax1)\n#\n#--- mean cumulative\n#\n ax2 = plt.subplot(3,2,2)\n plot_panel(date, amean, 'Average Cumulative', ax2)\n#\n#--- max\n#\n ax3 = plt.subplot(3,2,3)\n plot_panel(date, dmax, 'Maximum', ax3)\n#\n#--- max cumulative\n#\n ax4 = plt.subplot(3,2,4)\n plot_panel(date, amax, 'Maximum Cumulative', ax4)\n#\n#--- 68, 95, and 99.6% levels\n#\n labels = [\"68% Value \", \"95% Value\", \"99.7% Value\"]\n ax5 = plt.subplot(3,2,5)\n plot_panel2(date, dffs1, dffs2, dffs3, labels, ax5)\n#\n#--- 68, 95, and 99.6% cumulative\n#\n ax6 = plt.subplot(3,2,6)\n plot_panel2(date, accs1, accs2, accs3, labels, ax6)\n#\n#--- plot x axis tick label only at the bottom ones\n#\n for ax in ax1, ax2, ax3, ax4, ax5, ax6:\n if ax != ax5 and ax != ax6:\n for label in ax.get_xticklabels():\n label.set_visible(False)\n else:\n pass\n#\n#--- putting axis names\n#\n ax3.set_ylabel('Counts per Pixel')\n ax5.set_xlabel('Year')\n ax6.set_xlabel('Year')\n#\n#--- set the size of the plotting area in inch (width: 10.0in, height 10.0in)\n# \n fig = matplotlib.pyplot.gcf()\n fig.set_size_inches(10.0, 10.0)\n#\n#--- save the plot in png format\n# \n plt.savefig(ofile, format='png', dpi=200)\n\n plt.close('all')", "title": "" }, { "docid": "bb83c7686fbcb8fb252d0038dd37cf71", "score": "0.6356733", "text": "def plot(self,nrows=1,ncols=1,iplot=1,ttitle=''):\n import matplotlib.pyplot as plt\n if iplot==1: plt.clf()\n plt.subplot(nrows,ncols,iplot)\n plt.scatter([0],[0])\n if self!='T': self._plot_subtree(0,0,1.)\n\n fs=int(np.ceil(20./nrows))\n plt.title(ttitle,{'fontsize': fs})\n plt.xticks([])\n plt.yticks([])\n plt.axis('off')", "title": "" }, { "docid": "96a8341ad999172638e28f4d7d2150b5", "score": "0.63548684", "text": "def plot(self):\n fig1, ax1 = plt.subplots(1, 1, facecolor='white', figsize=(16, 9))\n fig1.canvas.set_window_title('Data Plot')\n\n ax1.plot(self.x, self.y, '-b^', markersize=8, label='data')\n ax1.plot(self.x[1:-1], self.dy[1:-1], '-.ro', markersize=5,\n label=r'$\\mathbf{\\frac{\\delta y}{\\delta x}}$')\n ax1.plot(self.x[2:-2], self.ddy[2:-2], '--md', markersize=5,\n label=r'$\\mathbf{\\frac{\\delta^2 y}{\\delta x^2}}$')\n\n ax1.set_title(self.title, fontsize=self.title_size, fontweight='bold')\n ax1.legend(loc='upper right', fontsize=20)\n ax1.set_xlabel(self.x_label, fontsize=self.label_size,\n fontweight='bold')\n ax1.set_ylabel(self.y_label, fontsize=self.label_size,\n fontweight='bold')\n plt.grid(True)\n\n save_plot(name=self.plot_name)", "title": "" }, { "docid": "2c651cf0b66afe0710ebb17d06752a34", "score": "0.6345417", "text": "def plot(self):\n super(LightCurve, self).plot()\n\n # Load the data.\n with self.open() as f:\n data = f[1].data\n time, sapflux, pdcflux, qual = (data[\"time\"], data[\"sap_flux\"],\n data[\"pdcsap_flux\"],\n data[\"sap_quality\"])\n\n # Set up the figure.\n fig, axes = pl.subplots(2, 1, figsize=(6, 6))\n fig.subplots_adjust(left=0.17, top=0.99, right=0.99,\n wspace=0.0, hspace=0.0)\n\n # Plot the data.\n m = np.isfinite(time)\n xlim = [np.min(time[m]), np.max(time[m])]\n for i, (f, nm) in enumerate(zip([sapflux, pdcflux],\n [\"sap flux\", \"pdc flux\"])):\n ax = axes[i]\n m = np.isfinite(time) * np.isfinite(f)\n m1 = m * (qual == 0)\n m2 = m * (qual != 0)\n mu = np.median(f[m1])\n f1 = (f[m1] / mu - 1) * 1e6\n ax.plot(time[m1], f1, \".k\", ms=3, alpha=0.6)\n ax.plot(time[m2], (f[m2] / np.median(f[m2]) - 1) * 1e6, \".r\", ms=3,\n alpha=0.6)\n ax.set_xlim(xlim)\n ax.set_ylim(np.min(f1), np.max(f1))\n ax.annotate(\"relative \" + nm + \" [ppm]\",\n xy=(1, 1), xycoords=\"axes fraction\",\n xytext=(-3, -3), textcoords=\"offset points\",\n horizontalalignment=\"right\", verticalalignment=\"top\")\n\n axes[0].set_xticklabels([])\n axes[1].set_xlabel(\"time [KBJD]\")\n\n return fig", "title": "" }, { "docid": "c0a3d01495584a1580027afd27c9772a", "score": "0.63360786", "text": "def plotTraining(self, p):", "title": "" }, { "docid": "7db117549409ae400cc8625fb16e0896", "score": "0.6327995", "text": "def plot_vs(nombre,fecha_inical,fecha_final,lim,ext):\n \n model_CO , name = leerModel() # Carga los datos y los nombres del Modelo en un Datafame \n obs_CO , name = leerObs() # Carga los datos y los nombres de las Obs en un Dataframe \n\n aux = ['CHR', 'RAP', 'SMO', 'POCN15', 'POCN10', 'POCN05', 'POC000', 'POCS05',\n 'POCS10', 'POCS15', 'POCS20', 'POCS25', 'POCS30', 'GUAM']\n \n indice = aux.index(nombre) # Este es pora llamar los plots con el nombre lat lon completo\n\n ax= plt.subplot(1,1,1)\n \n ####### Define Variables \n var_model = model_CO[nombre].loc[fecha_inical : fecha_final] # Esto busca las fechas para plotiar\n var_obs = obs_CO[nombre].loc[fecha_inical : fecha_final] # Esto busca las fechas para plotiar \n \n \n ####### Parametros para estilo del Plot\n plt.rcParams[\"font.family\"] = \"Times New Roman\" # Parametros para letras y tamaños\n plt.rcParams['xtick.labelsize'] = 18\n plt.rcParams['ytick.labelsize'] = 18\n \n plt.plot(var_model,'-',color='black',linewidth=1,markersize=5) # Plot del modelo\n plt.plot(var_obs,'ob',color='red',linewidth=2,markersize=3) # Plot de la obs\n plt.xlabel('Months', fontsize=14) ; plt.ylabel('CO [ppbv]', fontsize=14 ) # label x \"meses\"\n plt.title(name[indice], fontsize=15) # Titulo busca el nombre con las ids CHR,RAP,SOMO.. \n plt.legend(['Mod','Obs'],loc='upper center',ncol=3,frameon=False\n , fontsize=12 ) # Legend\n plt.ylim(lim) # lim es definido en la funcion pone los extremos del eje y\n \n\n \n ####### Define de Estadisticos según Borrego et al 2008 (https://doi.org/10.1016/j.envint.2007.12.005)\n\n df = pd.DataFrame() \n df['obs'] = var_obs\n df['mod'] = var_model #Se define en un solo Dataframe para facilitar calculo \n \n # Corellacion Pearson\n Corr = df.corr(method ='pearson') ; Corr = Corr.values[0,1]\n \n # Error Cuadratico Medio\n Rms = ((df['obs'] - df['mod']) ** 2).mean() ** .5\n \n #Indice de coincidencia\n Index = 1-np.sum(((df['obs']-df['mod'])**2)) / np.sum((abs(df['obs'] -np.mean(df['mod']))+ abs(df['mod']-np.mean(df['mod'])))**2 )\n \n #Added by LGK: Fractional Bias (Sesgo fraccional)\n aa=df['obs'].mean()\n bb=df['mod'].mean()\n FB=((aa-bb)/((aa+bb)*0.5))\n # Cuadro de texto modo String para enmcaren en el plot\n anchored_text = AnchoredText( \" R=\"+str(round(Corr,2))+ \n \"\\n RMS=\" + \n str(round(Rms,1)) + \n \"\\n FB=\" + \n str(round(FB,2))\n + \"\\n Index=\" \n + str(round(Index,2)),\n prop=dict(size=12), loc=1) \n ax.add_artist(anchored_text)\n \n plt.savefig(nombre+'_modelVsObs.'+ ext,dpi=600) # Guardar figura eslilo nombre def por RAP SMO etc.. puedes cambiar \n \n \n return(plt.show()) # Retonar la figura ", "title": "" }, { "docid": "43a3edaaba93b0b59ea471be83c8d98c", "score": "0.63253176", "text": "def draw_plots():\r\n matplotlib.rcParams['savefig.dpi'] = 600\r\n matplotlib.rcParams[\"figure.dpi\"] = 100\r\n \r\n # plot configuration\r\n fig, ax = plt.subplots()\r\n \r\n ax.set_xlabel('time, fs')\r\n ax.set_ylabel('species count')\r\n\r\n x_axis = np.asarray(range(len(data[\"h2o\"]))) * ts\r\n\r\n if sum(data[\"h2\"]) > 0:\r\n ax_h2, = ax.plot(x_axis, data[\"h2\"], marker='', color='y',\r\n linewidth=1.5, label=\"$H_2$\")\r\n if sum(data[\"h2o2\"]) > 0: \r\n ax_h202, = ax.plot(x_axis, data[\"h2o2\"], \"-\", color='m',\r\n linewidth=2, label=\"$H_2O_2$\")\r\n if sum(data[\"h_diss\"]) > 0: \r\n ax_h_diss, = ax.plot(x_axis, data[\"h_diss\"], \":\", color='g',\r\n linewidth=2.5, label=\"$H^*$\")\r\n \r\n if sum(data[\"o_diss\"]) > 0: \r\n ax_o_diss, = ax.plot(x_axis, data[\"o_diss\"], \"--\", color='r',\r\n linewidth=1, label=\"$O^*$\")\r\n \r\n if sum(data[\"oh_diss\"]) > 0: \r\n ax_oh_diss, = ax.plot(x_axis, data[\"oh_diss\"], \":\", color='b',\r\n linewidth=1.5, label=\"$OH^*$\")\r\n \r\n if sum(data[\"h3o\"]) > 0: \r\n ax_h3o, = ax.plot(x_axis, data[\"h3o\"], marker='', color='c',\r\n linewidth=1.5, label=\"$H_3O^*$\")\r\n \r\n if sum(data[\"ho2\"]) > 0: \r\n ax_ho2, = ax.plot(x_axis, data[\"ho2\"], marker='', color='k',\r\n linewidth=1, label=\"$HO_2^*$\")\r\n\r\n # use 2nd axis for water count\r\n ax_water = ax.twinx()\r\n ax_h2o, = ax_water.plot(x_axis, data[\"h2o\"], \":\", marker='', color='r',\r\n linewidth=1, label=\"$H_2O$\")\r\n ax_water.set_ylabel('$H_2O$ count', color=\"r\")\r\n\r\n ax.legend(bbox_to_anchor=(1.13, 1))\r\n ax_water.legend(loc=2)\r\n\r\n # draw plot\r\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\r\n ax_water.yaxis.set_major_locator(MaxNLocator(integer=True))\r\n \r\n plt.show()", "title": "" }, { "docid": "55fda6c751ef9de557d5f2311b29f5d8", "score": "0.63213766", "text": "def setup_plot(self):\n data = next(self.stream)\n x, y = data[0]\n t = data[1]\n\t# self.backdrop = self.ax.plot([self.xbounds[0], self.xbounds[1]], [self.ybounds[0], self.ybounds[1]])\n for i in aa_geom.index:\n self.ax.plot(aa_geom.iloc[i].exterior.xy[0],\n\t\t\t aa_geom.iloc[i].exterior.xy[1], color='green')\n self.scat = self.ax.scatter(x, y, animated=True)\n self.ax.axis([self.xbounds[0], self.xbounds[1],\n\t\t self.ybounds[0], self.ybounds[1]])\n\n self.text = self.ax.text(self.xbounds[0], self.ybounds[0], '', horizontalalignment='left', verticalalignment='bottom')\n # For FuncAnimation's sake, we need to return the artist we'll be using\n # Note that it expects a sequence of artists, thus the trailing comma.\n return self.scat,", "title": "" }, { "docid": "830e9c9cc6874080ef2cb81eae43d8c0", "score": "0.62936383", "text": "def plot_data(x,y,train = True):\n plt.figure(figsize=(8,8))\n radius=(1/(2*math.pi))**(1/2)\n circle=plt.Circle((0.5,0.5),radius,color='g')\n plt.gca().add_artist(circle)\n for i in range(len(x)):\n if y[i][0].item()==1:\n plt.plot(x[i][0],x[i][1],'bo')\n else:\n plt.plot(x[i][0],x[i][1],'r+')\n \n plt.xlim([0,1])\n plt.ylim([0,1])\n plt.xlabel(\"x\",size = 18)\n plt.ylabel(\"y\",size = 18)\n if train :\n plt.title('Distribution of the train set',size = 20)\n plt.savefig(\"Train set distribution.jpg\") ;\n else : \n plt.title('Distribution of the test set',size = 20)\n plt.savefig(\"Test set distribution.jpg\") ;\n\n plt.show()", "title": "" }, { "docid": "5bf1417a71c217e308cdbb7b14a1e387", "score": "0.6292867", "text": "def plot_all(self):\r\n\r\n axes = self.data.plot(subplots=True, figsize=(plt.rcParams['figure.figsize'][0], self.data.shape[1] * 4))\r\n return axes", "title": "" }, { "docid": "88e975fdff529fbff207c9715c67a5f4", "score": "0.6281861", "text": "def plot_error_total_9(trt, vat):\r\n\r\n #fig, axs = plt.subplots(2, 2, sharex=True)\r\n\r\n\r\n\r\n #plt.subplots(2, 2)\r\n #plt.subplots_adjust(left=0.08, right=0.98, wspace=0.3)\r\n #trt = tetotal\r\n #vat = vetotal\r\n\r\n plt.figure(dpi=200)\r\n\r\n# for i in range(9):\r\n#\r\n# plt.subplot(331 + i)\r\n# plt.plot(range(len(trt[i])), trt[i], c='r', label='Training Error')\r\n# plt.plot(range(len(vat[i])), vat[i], c='b', linewidth=1, linestyle='dashed', label='Validation Error')\r\n# plt.xticks(fontsize=8)\r\n# plt.title('Experimento '+str(1+i), fontsize=8)\r\n# plt.xlabel('Number of Epochs', fontsize=8)\r\n# plt.ylabel('Error value', fontsize=7)\r\n\r\n\r\n for i in range(4):\r\n\r\n plt.subplot(221 + i)\r\n plt.plot(range(len(trt[i])), trt[i], c='r', label='Training Error')\r\n plt.plot(range(len(vat[i])), vat[i], c='b', linewidth=1, linestyle='dashed', label='Validation Error')\r\n plt.xticks(fontsize=8)\r\n plt.title('Experimento '+str(1+i), fontsize=8)\r\n plt.xlabel('Number of Epochs', fontsize=8)\r\n plt.ylabel('Error value', fontsize=7)\r\n\r\n plt.tight_layout()\r\n plt.show()", "title": "" }, { "docid": "17f8ca081f4708a7809971b0f5deb507", "score": "0.62786514", "text": "def baseline_plot(subset_dates, baseline_table, supermaster={}, dates=True):\n\n # Check for supermaster; set to empty if none is provided\n if len(supermaster) == 0:\n supermaster['dates'] = None\n supermaster['Bp'] = None\n\n # Initialize plot\n fig, ax = plt.subplots(figsize=(14, 8.2))\n\n # Plot pairs\n colors = ['k', 'steelblue', 'red', 'gold', 'green', 'mediumpurple']\n\n for i, key in enumerate(subset_dates.keys()):\n for j, date_pair in enumerate(subset_dates[key]):\n # Get corresponding baselines\n Bp_pair = [baseline_table[baseline_table['date'] == date]['Bp'].values for date in date_pair]\n\n if j == 0:\n label = key\n else:\n label = None\n\n ax.plot(date_pair, Bp_pair, c=colors[i], linewidth=1.5, zorder=0, label=label)\n\n\n # Plot nodes\n for i in range(len(baseline_table)):\n\n # Change settings if master\n if baseline_table['date'][i] == supermaster['date']:\n c = 'red'\n c_text = 'red'\n s = 30\n else:\n # c = 'C0'\n c = 'k'\n c_text = 'k'\n s = 20\n\n ax.scatter(baseline_table['date'][i], baseline_table['Bp'][i], marker='o', c=c, s=20)\n\n # Offset by 10 days/5 m for readability\n if dates:\n ax.text(baseline_table['date'][i] + 0.005*(baseline_table['date'].iloc[-1] - baseline_table['date'].iloc[0]), \n baseline_table['Bp'][i] + 0.01*(baseline_table['Bp'].iloc[-1] - baseline_table['Bp'].iloc[0]), \n #baseline_table['date'][i].strftime('%Y/%m/%d'), \n baseline_table['date'][i].strftime('%m/%d'),\n size=7, color=c_text, \n # bbox={'facecolor': 'w', 'pad': 0, 'edgecolor': 'w', 'alpha': 0.7}\n )\n \n ax.legend()\n ax.set_ylabel('Perpendicular baseline (m)')\n ax.set_xlabel('Date')\n ax.tick_params(direction='in')\n plt.savefig('baseline_plot.eps')\n plt.show()", "title": "" }, { "docid": "fd87c249470366dccdcb32d61f249b61", "score": "0.6275113", "text": "def plot_orb_all(self):\n _pl.clf()\n for bb in [1,2]:\n for pp in 'h','v':\n _pl.subplot(int(210+bb))\n _pl.plot(self.orb()['b'+str(bb)+pp],label=('b'+str(bb)+pp).upper()) \n self.opt_plot_orb()\n _pl.legend()", "title": "" }, { "docid": "9a6284af2df94b7f8440c556aa82206b", "score": "0.62643147", "text": "def plot_result(self):\n\n ax1 = plt.subplot(131)\n plt.title(\"Position [m]\")\n plt.xlabel(\"time [s]\")\n plt.plot(self.data.time, self.data.position, \"-\")\n for point in self.list_point:\n plt.scatter(point.time, point.position)\n\n plt.subplot(132, sharex=ax1)\n plt.title(\"Speed [m/s]\")\n plt.xlabel(\"time [s]\")\n plt.plot(self.data.time, self.data.speed, \"-\")\n for point in self.list_point:\n plt.scatter(point.time, point.speed)\n\n plt.subplot(133, sharex=ax1)\n plt.title(\"Acceleration [m/s2]\")\n plt.xlabel(\"time [s]\")\n plt.plot(self.data.time, self.data.acceleration, \"-\")\n for point in self.list_point:\n plt.scatter(point.time, point.acceleration)\n\n plt.show()", "title": "" }, { "docid": "db7c486e21f3b896eaca05004230f8fe", "score": "0.626202", "text": "def plot(self):\n labels=[]\n #fig_size[0] = 12\n #fig_size[1] = 9\n #plt.rcParams[\"figure.figsize\"] = fig_size\n fig = plt.gcf()\n fig.set_size_inches(8, 6)\n xmin, xmax = None, None\n ymin, ymax = None, None\n if self.rows is not None:\n for i, t in enumerate(self.rows):\n x, y = self.F[i,0], self.F[i,1]\n print (x,'|',y,'|',t,'|',i)\n labels.append(t)\n plt.text(x, y, t, va='center', ha='center', color='r', fontsize=8)\n xmin = min(x, xmin if xmin else x)\n xmax = max(x, xmax if xmax else x)\n ymin = min(y, ymin if ymin else y)\n ymax = max(y, ymax if ymax else y)\n else:\n plt.plot(self.F[:, 0], self.F[:, 1], 'ro')\n import mpld3 \n if self.cols is not None:\n for i, t in enumerate(self.cols):\n x, y = self.G[i,0], self.G[i,1]\n # if (t) in \"Repair\":\n print (x,'|',y,'|',t,'|',i)\n plt.text(x, y, t, va='center', ha='center', color='b', fontsize=14)\n xmin = min(x, xmin if xmin else x)\n xmax = max(x, xmax if xmax else x)\n ymin = min(y, ymin if ymin else y)\n ymax = max(y, ymax if ymax else y)\n else:\n plt.plot(self.G[:, 0], self.G[:, 1], 'bs')\n\n if xmin and xmax:\n pad = (xmax - xmin) * 0.1\n plt.xlim(xmin - pad, xmax + pad)\n if ymin and ymax:\n pad = (ymax - ymin) * 0.1\n plt.ylim(ymin - pad, ymax + pad)\n\n plt.grid()\n plt.xlabel('Dim 1')\n plt.ylabel('Dim 2')\n #tooltip = mpld3.plugins.PointLabelTooltip(plt, labels=labels)\n #mpld3.plugins.connect(fig, tooltip)\n #mpld3.show()", "title": "" }, { "docid": "8e8b4fec4ebd085cd5b0f77f8eea8965", "score": "0.6261496", "text": "def plot_data(self, **kwargs):\n predictions = self.predict(self.x)\n try:\n fig, ax = plt.subplots(figsize=kwargs['figsize'])\n except:\n fig, ax = plt.subplots()\n ax.scatter(self.x, self.y, label=\"Data Points\")\n ax.plot(self.x, predictions, label=\"Regression Line\")\n try:\n ax.set_xlabel(kwargs['xlabel'])\n ax.set_ylabel(kwargs['ylabel'])\n ax.set_title(kwargs['title'])\n except:\n pass\n ax.legend()\n return fig", "title": "" }, { "docid": "b3d3296554bf5b90b6e0f6596f2e9cf8", "score": "0.6253059", "text": "def plot_heredity(indiv, num_cp):\n box = []\n parents = self.lineage[indiv]['parents']\n for key in [parents[0], indiv, parents[1]]:\n gene = self.lineage[key]['generation']\n blood = self.lineage[key]['blood']\n box.append([gene, blood])\n h_lines = np.array(box)\n ax1.plot(h_lines[:, 0], h_lines[:, 1], '-',\n color=color_p[num_cp][0])\n box = []\n gene = self.lineage[indiv]['generation']\n blood = self.lineage[indiv]['blood']\n box.append([gene, blood])\n h_point = np.array(box)\n ax1.plot(h_point[:, 0], h_point[:, 1], 'o',\n color=color_p[num_cp][0])\n\n #ax1.plot(arrayB[:, 0], arrayB[:, 1], 'o')", "title": "" }, { "docid": "da6a496ee4eaeef6bbdd8ff7f922e798", "score": "0.6241577", "text": "def plot(self, data = None, color = 'r'):\n if data is not None:\n x, y = data\n plt.plot(x[:], y, 'bo') \n plt.axis('equal') \n plt.title(' Analytical Solution for Least Squares Linear Regression') \n plt.title('Amazon Employee Compensation (Linear) Dataset') \n plt.xlabel('Years of experience of the employee.')\n plt.ylabel('Compensation in $100,000') \n grid = np.asarray([0, 10])[:,np.newaxis]\n predictions = self.get_predictions(grid)\n plt.plot(grid, predictions, color)\n plt.show()", "title": "" }, { "docid": "f34bf693acf96dbf693ed55edf46cf27", "score": "0.6225802", "text": "def plot_showbaselines(self):\n for prof in self.profiles:\n for spec in prof.spectra_list:\n spec.plot_showbaseline()", "title": "" }, { "docid": "d0beb6babfb4f71d241f34e7341717da", "score": "0.62046057", "text": "def plot(data,typ = 'line',title='',xlabel='',ylabel='',legend = []):\r\n if typ == 'line':\r\n plt.plot(data)\r\n elif typ == 'histogram':\r\n plt.hist(data)\r\n\r\n plt.title(title)\r\n plt.xlabel(xlabel)\r\n plt.ylabel(ylabel)\r\n plt.legend(legend)\r\n \r\n plt.show()", "title": "" }, { "docid": "7fb545f51658407b57ce89215e135b05", "score": "0.62013537", "text": "def plot_all():\n\n plot_fission_rates()\n #plot_activities()\n #plot_au_rfs_and_unfolded()\n\n return", "title": "" }, { "docid": "0aea74b5704b404fba161fe20e8b393a", "score": "0.61829036", "text": "def plot(self, ax=None):\n if ax is None:\n _, ax = plt.subplots(len(self.data), 1, figsize=(7, (7 * len(self.data))))\n if len(self.data) == 1:\n self.data[0].plot(ax=ax)\n else:\n for i, tpf in enumerate(self.data):\n tpf.plot(ax=ax[i])\n return ax", "title": "" }, { "docid": "feafea3aba292345d8131b516a8fcebf", "score": "0.6176923", "text": "def figure(self):\r\n figure = Figure(figsize=(5, 4), dpi=100)\r\n ax = figure.add_subplot(111)\r\n ax.plot(self.t, self.datapoints, 'ro')\r\n ax.set_xlabel('Time (hours)')\r\n ax.set_ylabel('Bacteria Density (OD)')\r\n return figure", "title": "" }, { "docid": "b79691eec6c5367c2a2aea85e95718a8", "score": "0.6173775", "text": "def plot_hrd():\n # a = [np.log10(0.01),np.log10(0.0125),np.log10(0.0156),np.log10(0.0194),np.log10(0.0240)] #KIC5786154\n # a = [np.log10(0.0090),np.log10(0.0072),np.log10(0.0057),np.log10(0.0046),np.log10(0.0036)] #KIC8430105\n # a = [np.log10(0.0125),np.log10(0.0100),np.log10(0.0080),np.log10(0.0064),np.log10(0.0051)] #KIC9970396\n # a = [np.log10(0.0156),np.log10(0.0125),np.log10(0.0100),np.log10(0.0080),np.log10(0.0064)] #KIC7037405\n # a = [np.log10(0.0194),np.log10(0.0240),np.log10(0.0297),np.log10(0.0365),np.log10(0.0156)] #KIC8410637\n # a = [np.log10(0.0267),np.log10(0.0297),np.log10(0.0329),np.log10(0.0365),np.log10(0.0404),np.log10(0.0446),np.log10(0.0492)] #NGC6791\n a = [np.log10(0.0090),np.log10(0.0112),np.log10(0.0140),np.log10(0.0174),np.log10(0.0216),np.log10(0.0267),np.log10(0.0329)] #NGC6819\n for k in a:\n idx = []\n fig = plt.figure()\n for i in xrange(len(results_age[0])):\n for j in xrange(len(results_age[0][i][:,0])):\n if results_age[0][i][j,ndim-2] == k:\n idx = np.append(idx,results_age[0][i][j,:])\n\n plt.scatter(idx[20::ndim+nglb+6+5],np.log10(idx[21::ndim+nglb+6+5]/3.828e33),c=np.log10(idx[ndim::ndim+nglb+6+5]),label=r'Z = %s '%(10**idx[1]))\n cb = plt.colorbar()\n cax = cb.ax\n cax.text(3.5,0.7,r\"$\\log_{10}$(Max. error)\",rotation=270,fontsize=20)\n cax.tick_params(labelsize=15)\n plt.gca().invert_xaxis()\n plt.legend(loc=3)\n # plt.savefig('/home/bmr135/AIMS/AIMS_BEN/KIC5786154/HRD_KIC5786154_Z'+str(10**idx[1])+'.png')\n # plt.show()", "title": "" }, { "docid": "c8111ab025433b9e5035e4160aabd76b", "score": "0.6162633", "text": "def basisplot(self):\n for i in range(len(self.N)):\n for j in range(len(self.N[i])):\n if self.N[i][j]!= 0:\n x = linspace(self.points[i+j],self.points[i+j+1],50)\n y = self.coeff[i]*self.N[i][j](x)\n plt.plot(x,y)\n plt.title(\"Plot of the basic functions for the splines\")\n plt.show()", "title": "" }, { "docid": "8c45f38c171ab8be2379fccbc2815add", "score": "0.6140964", "text": "def plotLine(values,title,x=\"Round No.\",y=\"Advantage\"):\n global figureNum\n figureNum+=1\n plt.figure(figureNum)\n plt.plot(values)\n plt.title(title)\n plt.xlabel(x)\n plt.ylabel(y)\n plt.show()", "title": "" }, { "docid": "ed4de85498baf0f3003fdadc175152eb", "score": "0.61356056", "text": "def plot_glottal(self, data_audio,fs,GCI, glottal_flow, glottal_sig):\n\n fig, ax=plt.subplots(3, sharex=True)\n t=np.arange(0, float(len(data_audio))/fs, 1.0/fs)\n if len(t)>len(data_audio):\n t=t[:len(data_audio)]\n elif len(t)<len(data_audio):\n data_audio=data_audio[:len(t)]\n ax[0].plot(t, data_audio, 'k')\n ax[0].set_ylabel('Amplitude', fontsize=12)\n ax[0].set_xlim([0, t[-1]])\n ax[0].grid(True)\n\n ax[1].plot(t, glottal_sig, color='k', linewidth=2.0, label=\"Glottal flow signal\")\n amGCI=[glottal_sig[int(k-2)] for k in GCI]\n\n GCI=GCI/fs\n ax[1].plot(GCI, amGCI, 'bo', alpha=0.5, markersize=8, label=\"GCI\")\n GCId=np.diff(GCI)\n ax[1].set_ylabel(\"Glottal flow\", fontsize=12)\n ax[1].text(t[2],-0.8, \"Avg. time consecutive GCI:\"+str(np.round(np.mean(GCId)*1000,2))+\" ms\")\n ax[1].text(t[2],-1.05, \"Std. time consecutive GCI:\"+str(np.round(np.std(GCId)*1000,2))+\" ms\")\n ax[1].set_xlabel('Time (s)', fontsize=12)\n ax[1].set_xlim([0, t[-1]])\n ax[1].set_ylim([-1.1, 1.1])\n ax[1].grid(True)\n\n ax[1].legend(ncol=2, loc=2)\n\n ax[2].plot(t, glottal_flow, color='k', linewidth=2.0)\n ax[2].set_ylabel(\"Glotal flow derivative\", fontsize=12)\n ax[2].set_xlabel('Time (s)', fontsize=12)\n ax[2].set_xlim([0, t[-1]])\n ax[2].grid(True)\n\n plt.show()", "title": "" }, { "docid": "08264b03540695fa8002852efa75390b", "score": "0.6133603", "text": "def plot(n_plot, title, position, x, y):\n ax = plt.subplot(1, n_plot, position)\n ax.set_title(title)\n ax.plot(x[y == 0][:, 0], x[y == 0][:, 1], 'ro')\n ax.plot(x[y == 1][:, 0], x[y == 1][:, 1], 'bo')\n return ax", "title": "" }, { "docid": "bc4f4d469aa5e4a461ba026b5f6505e1", "score": "0.61230665", "text": "def plot_distribution(x_raw, y_raw, x_test, y_test):\n df_raw = pd.DataFrame(dict(x=x_raw[:,0], y=x_raw[:,1], label=y_raw))\n df_test = pd.DataFrame(dict(x=x_test[:,0], y=x_test[:,1], label=y_test))\n colors = {0:'#ef8a62', 1:'#67a9cf'}\n fig, ax = plt.subplots(figsize=(10,4), dpi=100, nrows=1, ncols=2)\n grouped_raw = df_raw.groupby('label')\n grouped_test = df_test.groupby('label')\n for key, group in grouped_raw:\n group.plot(ax=ax[0], kind='scatter', x='x', y='y', label=key, color=colors[key])\n for key, group in grouped_test:\n group.plot(ax=ax[1], kind='scatter', x='x', y='y', label=key, color=colors[key])\n ax[0].set_title('Dados Bruto')\n ax[0].set_xlabel('X1')\n ax[0].set_ylabel('X2')\n ax[1].set_title('Dados de Teste')\n ax[1].set_xlabel('X1')\n ax[1].set_ylabel('X2')\n fig.tight_layout()\n plt.show()", "title": "" }, { "docid": "41ebfcc810a882f5004e20156d3ea0bf", "score": "0.6110879", "text": "def plotData(self ,**kwargs):\n\t\traise NotImplementedError(\"\")", "title": "" }, { "docid": "33f2b9eaa5d5e9e57d2655f645032404", "score": "0.61041296", "text": "def plot(self):\n layout = self.__setupLayout()\n layout.plot()", "title": "" }, { "docid": "29cb5c246d05847a35005fddb9b97508", "score": "0.6102824", "text": "def addline(self):\n for subplot in self.subplots.values():\n subplot.plot([], [])", "title": "" }, { "docid": "180953148c54b46d0a6d48642cea2eb4", "score": "0.6101339", "text": "def plot(data_list):\n plt.style.use('ggplot')\n plt.close('all')\n for data in data_list:\n plt.plot(data)\n plt.show()", "title": "" }, { "docid": "76c399efebc79f4519383fbf42aecd31", "score": "0.6096881", "text": "def plot(self):\n plt.rcParams['font.size'] = 12\n fig = plt.figure(figsize=(8, 7)) \n plt.plot(self.envelope, color='k', linewidth=1)\n #plt.xticks(np.arange(0, 1000, step=200))\n plt.xlim(0, self.Nx)\n #plt.yticks(np.arange(-1, 5, step=1))\n plt.ylim(0, 3)\n plt.axvline(3200, color='b')\n plt.axvline(3500, color='b')", "title": "" }, { "docid": "b2a6b2c453aae7e83eedb7aea35815b7", "score": "0.60961324", "text": "def plot_tvis_lines( snIadatfile='snIa_tvis.dat', snIIdatfile='snII_tvis.dat' ):\n dat1 = ascii.read( snIadatfile, header_start=-1 )\n dat2 = ascii.read( snIIdatfile, header_start=-1 )\n\n mulist = [2,4,6,10,15,20]\n mucolorlist = ['m','b','c','g','r','k']\n\n pl.clf()\n ax1 = pl.subplot( 1,2,1 )\n ax2 = pl.subplot( 1,2,2, sharex=ax1, sharey=ax1 )\n\n for dat,ax,sntype in zip( [dat1,dat2], [ax1,ax2], ['Ia','II']) :\n\n z = np.array( dat['z'] )\n for mu, mucolor in zip(mulist,mucolorlist) :\n tvis = np.array( dat['tvis%02i'%mu] )\n err = np.array( dat['err%02i'%mu] )\n tvismax = tvis + err\n tvismin = np.max( [np.zeros(len(tvis)), tvis-err], axis=0 )\n # ax.fill_between( z, tvismin, tvismax, color=mucolor, alpha=0.3 )\n ax.plot( z, tvis, marker=' ', ls='-', color=mucolor, label='%i'%mu )\n z10 = z[ np.where(tvis<12)[0][0] ]\n ax.text( z10,10, '%i'%mu, color=mucolor, ha='center', va='center',\n backgroundcolor='w' )\n\n # ax.legend(loc='upper right')\n ax.set_xlabel('Redshift')\n ax.set_ylabel('Visibility Time [days]')\n ax.text(0.95,0.95,'Type %s SN'%sntype, ha='right',va='top',\n transform=ax.transAxes, fontsize='large' )\n ax.set_ylim( 0, 140 )\n ax.set_xlim( 0.8, 3.2 )\n ax.text(1.0,10,'$\\mu$=',ha='right',va='center',\n backgroundcolor='w' )\n\n fig = pl.gcf()\n fig.subplots_adjust( left=0.12, right=0.88, bottom=0.12, top=0.95, wspace=0 )\n\n ax2.yaxis.set_ticks_position('right')\n ax2.yaxis.set_label_position('right')\n ax2.yaxis.set_ticks_position('both')\n ax2.set_ylabel('Visibility Time [years]', rotation=-90 )\n\n # ax1.set_xlim(0.9,3.2)\n\n ax2.set_yticks( np.array([0.1,0.2,0.3])*365 )\n\n ax2.set_yticklabels( [0.1,0.2,0.3] )\n\n ax1.set_ylim(0,120)\n\n return( dat1, dat2 )", "title": "" }, { "docid": "8b9ab82273b1a0acb0f584b5f40f6c27", "score": "0.6093382", "text": "def plot_indicies(ds):\n plt.style.use('stylesheets/timeseries.mplstyle')\n\n\n fig = plt.figure(figsize=[5,2*len(ds)])\n i = 0\n for index in tqdm(ds):\n data = ds[index].copy()\n data = data.dropna(dim='time')\n data_m, data_b, data_r_value, data_p_value, data_std_err = scipy.stats.linregress(data.time.values.astype(float), data)\n yfit = data_m * data.time.values.astype(float) + data_b\n ax = fig.add_subplot(len(ds),1,i+1)\n ax.plot(data.time, data)\n ax.plot(data.time, yfit, color = '#177E89')\n print(f'{data_m:.2e}',max(data.values))\n ax.set_title(index)\n # ax.set_ylabel(index)\n i += 1\n # fig.suptitle(\"Index timeseries\")\n plt.tight_layout()\n plt.savefig('images/week1/index_timeseries.pdf',bbox_inches='tight')\n plt.show()", "title": "" }, { "docid": "5e69361592102cfd4c25db331b97d008", "score": "0.60773265", "text": "def mini_plot(fig_num, x, y, xl, yl, clr, lbl):\n py.figure(fig_num)\n py.xlabel(xl)\n py.ylabel(yl)\n return py.plot(x, y, clr, linewidth=1.0, label=lbl)", "title": "" }, { "docid": "d16e06a1cdea0ca2428af3e5b609aa30", "score": "0.60758823", "text": "def plot_result(self):\n fig, axarr = plt.subplots(2, 1, sharex=True)\n plt.tight_layout()\n fig.subplots_adjust(hspace=0)\n\n axarr[0].plot(\n self.data['time'], self.data['ctlbrake'], label='Brake CMD')\n axarr[0].plot(\n self.data['time'],\n self.data['brake_percentage'],\n label='Brake Output')\n axarr[0].plot(\n self.data['time'], self.data['ctlthrottle'], label='Throttle CMD')\n axarr[0].plot(\n self.data['time'],\n self.data['throttle_percentage'],\n label='Throttle Output')\n axarr[0].plot(\n self.data['time'],\n self.data['engine_rpm'] / 100,\n label='Engine RPM')\n axarr[0].legend(fontsize='medium')\n axarr[0].grid(True)\n axarr[0].set_title('Command')\n\n axarr[1].plot(\n self.data['time'],\n self.data['vehicle_speed'],\n label='Vehicle Speed')\n\n for i in range(len(self.timesection)):\n axarr[1].plot(\n self.timesection[i],\n self.speedsection[i],\n label='Speed Segment')\n axarr[1].plot(\n self.timesection[i], self.accsection[i], label='IMU Segment')\n\n axarr[1].legend(fontsize='medium')\n axarr[1].grid(True)\n axarr[1].set_title('Speed')\n\n mng = plt.get_current_fig_manager()\n mng.full_screen_toggle()\n\n #plt.tight_layout(pad=0.20)\n fig.canvas.mpl_connect('key_press_event', self.press)\n plt.show()", "title": "" }, { "docid": "06c82bd364cd01d8219f06bcef90f5b6", "score": "0.60700345", "text": "def plot_data(self, show=False):\n\n fig, ax = plt.subplots(2, 1, figsize = (10, 6))\n\n # plot one random row from the simulated train data \n if self.flatten:\n print ('Plotting data... reshaping the flattened data to %s'%str(input_shape))\n temp = self.data['x_central'][np.random.randint(self.n_train * self.n_s)].reshape(input_shape)\n x, y = temp.T[:,0]\n else:\n print ('Plotting data...')\n temp = self.data['x_central'][np.random.randint(self.n_train * self.n_s)].reshape(55,len(ells))\n Cl = temp[0] # plot the (0,0) autocorrelation bin\n\n ax[0].loglog(ells, ells*(ells+1)*Cl)\n ax[0].set_title('Training data, bin (0,0)')\n ax[0].set_xlabel('$\\ell$')\n ax[0].set_ylabel('$\\ell(\\ell+1) C_\\ell$')\n \n # plot one random row from the simulated test data \n if self.flatten:\n temp = self.data['x_central_test'][np.random.randint(self.n_s)].reshape(input_shape)\n x, y = temp.T[:,0]\n else:\n temp = self.data['x_central_test'][np.random.randint(self.n_train * self.n_s)].reshape(55,len(ells))\n Cl = temp[0] # plot the (0,0) autocorrelation bin\n\n ax[0].loglog(ells, ells*(ells+1)*Cl)\n ax[0].set_title('Test data, bin (0,0)')\n ax[0].set_xlabel('$\\ell$')\n ax[0].set_ylabel('$\\ell(\\ell+1) C_\\ell$')\n\n plt.legend()\n\n plt.savefig(f'{self.figuredir}data_visualization_{self.modelversion}.png')\n if show: plt.show()\n plt.close()", "title": "" }, { "docid": "0415e3e03ed64f3f2964d9464a661f1d", "score": "0.6067389", "text": "def plot_p_value(self, \n plot_name=\"BEAGLE_p_value.pdf\", broken_axis=False): \n\n xdata = self.data['p_value']\n n_data = len(xdata)\n min_x = 0.\n max_x = 1.\n\n fig = plt.figure()\n \n if broken_axis:\n fig, axs = plt.subplots(2, 1, sharex=True)\n fig.subplots_adjust(left=0.13, bottom=0.10)\n else:\n fig, axs = plt.subplots(1, 1)\n axs = (axs,)\n\n ylabel = \"Number of galaxies\"\n xlabel = \"$p$-value\"\n\n fig.text(0.5, 0.02, xlabel, ha='center')\n fig.text(0.03, 0.5, ylabel, va='center', rotation='vertical')\n\n # Plot the histogram of the average chi-square\n kwargs = {'alpha':0.7, 'linewidth':0.5}\n for ax in axs:\n n, bins, patches = ax.hist(xdata, \n bins=50, \n range=(min_x, max_x),\n color='gray',\n **kwargs)\n\n ax.set_xlim((min_x, max_x))\n\n\n if broken_axis:\n # Set the correct number of major and mnor tick marks\n set_plot_ticks(axs[0], n_x=4, n_y=3, prune_y='lower')\n set_plot_ticks(axs[1], n_x=4, n_y=3, prune_y='both')\n\n max_y = np.max(n[1:])\n axs[1].set_ylim((0, max_y*1.12))\n axs[0].set_ylim((n[0]*0.8, n[0]*1.04))\n\n # hide the spines between ax and ax2\n axs[0].spines['bottom'].set_visible(False)\n axs[1].spines['top'].set_visible(False)\n axs[0].xaxis.tick_top()\n axs[0].tick_params(labeltop='off') # don't put tick labels at the top\n axs[1].xaxis.tick_bottom()\n\n d = .015 # how big to make the diagonal lines in axes coordinates\n # arguments to pass plot, just so we don't keep repeating them\n kwargs = dict(transform=axs[0].transAxes, color='k', clip_on=False)\n axs[0].plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal\n axs[0].plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal\n\n kwargs.update(transform=axs[1].transAxes) # switch to the bottom axes\n axs[1].plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal\n axs[1].plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal\n else:\n set_plot_ticks(axs[0], n_x=4, n_y=4, prune_y='lower')\n axs[0].set_ylim((0, np.max(n)*1.1))\n\n\n\n # y0, y1 = ax.get_ylim()\n levels = (0.01, 0.05)\n for lev in levels:\n l = lev\n frac = 1.*len(np.where(xdata <= l)[0])/n_data\n print(\"Fraction of galaxies with p-value < \" + \"{:.2f}\".format(lev) + \" = {:.2f}\".format(frac))\n # ax.plot((l, l),\n # (y0, y1),\n # color='black',\n # linestyle='--')\n\n name = prepare_plot_saving(plot_name)\n fig.savefig(name, dpi=None, facecolor='w', edgecolor='w',\n orientation='portrait', format=\"pdf\",\n transparent=False, bbox_inches=\"tight\", pad_inches=0.1)\n\n plt.close(fig)", "title": "" }, { "docid": "31b888e07e92d13058b30314a327d0f2", "score": "0.6067353", "text": "def figS2():\n df = pd.read_table('epsilon.txt')\n fig = plt.figure(figsize=(8, 8))\n columns = ['valid', 'desired', 'unique', 'diversity']\n labels = ['Valid SMILES (%)', 'Desired SMILES (%)', 'Unique desired SMILES (%)', 'Diversity']\n for i, column in enumerate(columns):\n ax = fig.add_subplot(221 + i)\n for a in ['Pre-trained', 'Fine-tuned']:\n data = df[df['exploration'] == a]\n baselines = data.groupby('baseline')\n for b, baseine in baselines:\n ax.plot(baseine.epsilon, baseine[column], label='%s(β = %.1f)' % (a, b))\n ax.legend(loc='lower left')\n ax.set(ylabel=labels[i], xlabel='Epsilon', xlim=[0, 0.26], ylim=[0.65, 0.85] if column == 'diversity' else [30, 100])\n fig.tight_layout()\n fig.savefig('Figure_S2.tif', dpi=300)", "title": "" }, { "docid": "91ed43ce8fd47f17cab6090e696c166f", "score": "0.60664845", "text": "def plot(self, ax):\n stack = np.stack((self.A-0.2*self._normal, self.A, self.B, self.B-0.2*self._normal))\n ax.plot(stack[:, 0], stack[:, 1], \":\")", "title": "" }, { "docid": "5deda6f97464186f53c8cdadaf8a2c1c", "score": "0.60651624", "text": "def _create_line(plots, labels, plot_info):\r\n # when we're doing any kind of normalization, all series get put into a\r\n # single plot\r\n single = bool(plot_info.normalize_to)\r\n\r\n area_data = []\r\n lines = []\r\n if single:\r\n plot_height = _SINGLE_PLOT_HEIGHT\r\n else:\r\n plot_height = _MULTIPLE_PLOT_HEIGHT_PER_PLOT * len(plots)\r\n figure, height = _create_figure(plot_height)\r\n\r\n if single:\r\n subplot = figure.add_subplot(1, 1, 1)\r\n\r\n # Plot all the data\r\n for plot_index, (plot, color) in enumerate(zip(plots, _colors(len(plots)))):\r\n needs_invert = (plot['label'] in plot_info.inverted_series)\r\n\r\n # Add a new subplot, if user wants multiple subplots\r\n # Also handle axis inversion for subplots here\r\n if not single:\r\n subplot = figure.add_subplot(len(plots), 1, plot_index + 1)\r\n subplot.set_title(plot['label'])\r\n if needs_invert:\r\n # for separate plots, just invert the y-axis\r\n subplot.set_ylim(1, 0)\r\n elif needs_invert:\r\n # for a shared plot (normalized data), need to invert the y values\r\n # manually, since all plots share a y-axis\r\n plot['y'] = [-y for y in plot['y']]\r\n\r\n # Plot the series\r\n subplot.set_xticks(range(0, len(labels)))\r\n subplot.set_xlim(-1, len(labels))\r\n if single:\r\n lines += subplot.plot(plot['x'], plot['y'], label=plot['label'],\r\n marker=_MULTIPLE_PLOT_MARKER_TYPE,\r\n markersize=_MULTIPLE_PLOT_MARKER_SIZE)\r\n error_bar_color = lines[-1].get_color()\r\n else:\r\n lines += subplot.plot(plot['x'], plot['y'], _SINGLE_PLOT_STYLE,\r\n label=plot['label'])\r\n error_bar_color = _SINGLE_PLOT_ERROR_BAR_COLOR\r\n if plot['errors']:\r\n subplot.errorbar(plot['x'], plot['y'], linestyle='None',\r\n yerr=plot['errors'], color=error_bar_color)\r\n subplot.set_xticklabels([])\r\n\r\n # Construct the information for the drilldowns.\r\n # We need to do this in a separate loop so that all the data is in\r\n # matplotlib before we start calling transform(); otherwise, it will return\r\n # incorrect data because it hasn't finished adjusting axis limits.\r\n for line in lines:\r\n\r\n # Get the pixel coordinates of each point on the figure\r\n x = line.get_xdata()\r\n y = line.get_ydata()\r\n label = line.get_label()\r\n icoords = line.get_transform().transform(zip(x, y))\r\n\r\n # Get the appropriate drilldown query\r\n drill = plot_info.query_dict['__' + label + '__']\r\n\r\n # Set the title attributes (hover-over tool-tips)\r\n x_labels = [labels[x_val] for x_val in x]\r\n titles = ['%s - %s: %f' % (label, x_label, y_val)\r\n for x_label, y_val in zip(x_labels, y)]\r\n\r\n # Get the appropriate parameters for the drilldown query\r\n params = [dict(query=drill, series=line.get_label(), param=x_label)\r\n for x_label in x_labels]\r\n\r\n area_data += [dict(left=ix - 5, top=height - iy - 5,\r\n right=ix + 5, bottom=height - iy + 5,\r\n title=title,\r\n callback=plot_info.drilldown_callback,\r\n callback_arguments=param_dict)\r\n for (ix, iy), title, param_dict\r\n in zip(icoords, titles, params)]\r\n\r\n subplot.set_xticklabels(labels, rotation=90, size=_LINE_XTICK_LABELS_SIZE)\r\n\r\n # Show the legend if there are not multiple subplots\r\n if single:\r\n font_properties = matplotlib.font_manager.FontProperties(\r\n size=_LEGEND_FONT_SIZE)\r\n legend = figure.legend(lines, [plot['label'] for plot in plots],\r\n prop=font_properties,\r\n handlelen=_LEGEND_HANDLE_LENGTH,\r\n numpoints=_LEGEND_NUM_POINTS)\r\n # Workaround for matplotlib not keeping all line markers in the legend -\r\n # it seems if we don't do this, matplotlib won't keep all the line\r\n # markers in the legend.\r\n for line in legend.get_lines():\r\n line.set_marker(_LEGEND_MARKER_TYPE)\r\n\r\n return (figure, area_data)", "title": "" }, { "docid": "e8e61f39c7fd5c5e959b27acaac25885", "score": "0.6062538", "text": "def makePlot(self):\n rec = self.grid.getReceptor(self.receptor)\n\n data = self.getData(self.receptor, self.odor)\n # raise BadOption\n\n self.plot = MeshLinePlot(color=[1, 0, 0, 1])\n # print(self.grid.odors)\n # plot.points = self.grid.getConcPointsOcc(0, 'odorlog_5-4-100a.odo') #[(x, -0.1*x) for x in range(-10, 0)]\n self.plot.points = data\n # print(plot.points)\n self.graph.add_plot(self.plot)\n self.conc_line = MeshLinePlot(color=[0, 0, 1, 1])\n self.conc_line.points = [(self.conc, y/10) for y in range(0,11)]\n self.graph.add_plot(self.conc_line)", "title": "" }, { "docid": "5b9efc75eec92732c4fdcaedff8209e1", "score": "0.6044267", "text": "def plot(self, x=None, y=None, ax=None):\n\t\tif ax == None:\n\t\t\tfig, ax = plt.subplots()\n\t\t\treturn_fig = True\n\t\telse:\n\t\t\treturn_fig = False\n\n\t\t\n\t\tcolors = [cm.viridis(x) for x in np.linspace(0, 1, len(self.keys()))]\n\n\t\tfor color, index in zip(colors, self.keys()):\n\t\t\tif x == None:\n\t\t\t\tdata_keys_to_plot = list(self[index]['data'].keys())\n\t\t\telse:\n\t\t\t\txs = self[index]['data'][x]\n\t\t\t\tdata_keys_to_plot = set(self[index]['data'].keys()) - set({x})\n\n\t\t\tif type(y) == type(None):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tdata_keys_to_plot = set(np.array([y]).flatten())\n\t\t\t\t\n\n\t\t\tfor plotkey in data_keys_to_plot:\n\t\t\t\tto_plot = self[index]['data'][plotkey]\n\t\t\t\t\n\t\t\t\tif len(to_plot.shape) == 1: #1d data\n\t\t\t\t\tif x == None:\n\t\t\t\t\t\tax.plot(to_plot, color = color)\n\t\t\t\t\telse:\n\t\t\t\t\t\tax.plot(xs, to_plot, color = color)\n\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\tfor i in range(to_plot.shape[0]):\n\t\t\t\t\tif x == None:\n\t\t\t\t\t\tax.plot(to_plot[i,:], color = color)\n\t\t\t\t\telse:\n\t\t\t\t\t\tax.plot(xs[i,:], to_plot[i,:], color = color)\n\n\t\tif return_fig:\n\t\t\treturn fig, ax\n\t\telse:\n\t\t\treturn ax", "title": "" }, { "docid": "90949c69b22682e3e3954aaf8eb2f0a9", "score": "0.60441655", "text": "def view_with_pyplot():\n df = pd.DataFrame(create.get_numeric_matrix())\n #df.plot()\n #df.plot.area()\n #df.plot.bar()\n df.plot.barh()\n plt.show() \n plt.close()", "title": "" }, { "docid": "e70d69f614c9ecee8a654b335fe00e71", "score": "0.6037614", "text": "def plot_experiment_1_2():\n\n trec_name = 'TREC-5'\n percentage = 0.1\n\n measures = ['ap', 'rp', 'p30']\n subtitles = ['MAP', 'RP', 'P@30']\n\n stats = ['mse', 'bias', 'variance']\n ylabels = ['MSE', 'Bias', 'Variance']\n\n models = ['mtf', 'mab', 'importance', 'activewresap']\n labels = ['MTF', 'MAB', 'Stratif', 'Active']\n colors = ['#0088A8', '#B8860B', '#483D8B', '#DC143C']\n markers = ['^', 'v', '*', '.']\n\n f, axarr = plt.subplots(len(stats), len(measures))\n\n for k, model in enumerate(models):\n for j, measures_ in enumerate(measures):\n\n # read data\n df = pd.read_csv(os.path.join(os.path.join(EXP_DIR, trec_name), '{}.exp1.csv'.format(model)))\n df = df.loc[(df['percentage'] == percentage) & (df['measure'] == measures_)]\n systems = df['system'].unique()\n # print(systems.values)\n\n # extract statistics\n array_stats = np.full((len(systems), len(stats)), fill_value=0, dtype=float)\n for s, sys in enumerate(systems):\n for i, stats_ in enumerate(stats):\n array_stats[s][i] = df.loc[df['system'] == sys].ix[:, stats_].values[0]\n\n # plot bias/variance/mse\n for i, stats_ in enumerate(stats):\n if not (('mtf' == model ) and ('variance' == stats_)):\n\n axarr[i, j].plot(range(len(systems)), array_stats[:, i].flatten(), label=model, linestyle='--',\n linewidth=0.5, color=colors[k], marker=markers[k], markersize=3.5)\n\n # tick fontsize & spacing\n axarr[i, j].xaxis.set_tick_params(pad=1, labelsize=6)\n axarr[i, j].yaxis.set_tick_params(pad=1, labelsize=6)\n\n # grid\n axarr[i, j].grid(b=True, which='major', color='gray', linestyle='-',\n alpha=0.25, zorder=1, lw=0.5)\n\n first_row = (i == 0)\n last_row = (i == (len(stats) - 1))\n first_in_row = (j == 0)\n middle_in_row = (j == int(len(measures)/2))\n\n if first_row:\n axarr[i, j].set_title(subtitles[j], fontsize=10)\n if last_row & middle_in_row:\n axarr[i, j].set_xlabel('System run', labelpad=8, fontsize=10)\n if first_in_row:\n axarr[i, j].set_ylabel(ylabels[i], labelpad=8, fontsize=10)\n\n # f.suptitle(r\"Bias & variance\", fontsize=11)\n\n patch0 = mlines.Line2D([], [], color=colors[0], marker=markers[0], markersize=4, label=labels[0])\n patch1 = mlines.Line2D([], [], color=colors[1], marker=markers[1], markersize=4, label=labels[1])\n patch2 = mlines.Line2D([], [], color=colors[2], marker=markers[2], markersize=4, label=labels[2])\n patch3 = mlines.Line2D([], [], color=colors[3], marker=markers[3], markersize=4, label=labels[3])\n plt.legend(handles=[patch0, patch1, patch2, patch3])\n\n plt.show()\n return", "title": "" }, { "docid": "b9626b0fa70f32c519782c4a6e963c0b", "score": "0.60369813", "text": "def plotOpen(mydata):\n plt.plot(mydata['Open'])\n plt.show()", "title": "" }, { "docid": "b668062b7623a19f5eea6ad3acd8fd69", "score": "0.6031428", "text": "def plot_data_population(self):\n plt.figure()\n plt.plot(self.data_population, \"k--\")\n plt.plot(self.model.population, \"k-\")\n plt.legend([\"Data\", \"Model\"])\n plt.xlabel(\"Step #\")\n plt.ylabel(\"Population\")\n plt.title(\"Comparision of Population Time Series for Data and Model\")", "title": "" }, { "docid": "73d9167a3e45107eabd5f8d81c7376fe", "score": "0.6028835", "text": "def make_plot(self):\n fig = plt.figure()\n print(\"Making the plots!\")\n plt.plot(self.__x, self.__y1)\n plt.ylabel('Number of fruit catch')\n plt.xlabel('Number of episodes')\n plt.savefig('fruits.png')\n ######\n fig = plt.figure()\n plt.plot(self.__x, self.__y2)\n plt.ylabel('Total reward')\n plt.xlabel('Number of episodes')\n plt.savefig('reward.png')\n print(\"done\")", "title": "" }, { "docid": "3a4584e9b94bf2309d182da1825b4a57", "score": "0.6027923", "text": "def plots_type_cy(self):\n\n for name in self.data.names:\n maxval = 0\n try:\n if self.printing[name][0]:\n fig = plt.figure(figsize=self.figsize)\n lines = []\n label = []\n plt.title(name)\n plt.grid(True, which=\"both\", axis=\"both\")\n for fr in range(len(self.data.memory_dict[name])):\n fr_id = self.data.frame_id[name][fr]\n lines += plt.plot(self.moving_mean(self.data.get_values(name, fr_id), self.window),\n color=self.color, alpha=(fr + 1.0) / (len(self.data.memory_dict[name]) + 1))\n if self.printing[name][2] == -1:\n maxval = max(max(self.data.get_values(name, fr_id)), maxval)\n else:\n maxval = self.printing[name][2]\n label.append(\"%s\" % fr_id)\n plt.ylim(self.printing[name][1], maxval)\n plt.xlabel(\"cycles\")\n plt.legend(label, bbox_to_anchor=(1, 0.5), loc=\"center left\")\n self.plots[0].append(name)\n self.plots[1].append(fig)\n if self.showing:\n plt.show()\n else:\n plt.close(fig)\n except:\n pass", "title": "" }, { "docid": "e5f3f5bded3312355c9ce74c3e989881", "score": "0.60256886", "text": "def plot_elbo(self):\n sns.set_style(\"white\")\n plt.plot(-self.advi_hist)\n plt.ylabel('ELBO')\n plt.xlabel('iteration')\n sns.despine()", "title": "" }, { "docid": "7aa0f97be16787238c89b0f109347271", "score": "0.60165954", "text": "def _draw_one_plot(\n df: pd.DataFrame,\n time_column_name: str,\n grain_column_names: List[str],\n pdf: PdfPages,\n) -> None:\n fig, _ = plt.subplots(figsize=(20, 10))\n df = df.set_index(time_column_name)\n plt.plot(df[[ACTUALS, PREDICTIONS]])\n plt.xticks(rotation=45)\n iteration = df[BACKTEST_ITER].iloc[0]\n if grain_column_names:\n grain_name = [df[grain].iloc[0] for grain in grain_column_names]\n plt.title(f\"Time series ID: {_format_grain_name(grain_name)} {iteration}\")\n plt.legend([\"actual\", \"forecast\"])\n plt.close(fig)\n pdf.savefig(fig)", "title": "" }, { "docid": "b8a67de17c317a15771bb8ca18fd5f1a", "score": "0.601435", "text": "def plotData(i):\n global count, ys \n count += 1\n data = readFromSerial()\n # The data must me plotted as float not as a string!\n xs.append(float(count))\n ys.append(float(data))\n\n # Clean everything before we plot\n ax1.clear()\n ax1.plot(xs,ys)\n\n\t# Adding labels and title\n ax1.set_xlabel('milliseconds (ms)')\n ax1.set_ylabel('Volts (V)')\n ax1.set_title('Sensor Acquistion')", "title": "" }, { "docid": "47779d2315aa3a3fadfc93eb10538962", "score": "0.6011693", "text": "def plot_multi_entries(xmin, xmax, ymin, ymax, xSets, ySets, xname, yname, entLabels, outname, yerror = 0,fsize = 9, psize = 2.0,lsize=0, resolution=100, linefit=0, connect=0):\n\n colorList = ('green', 'blue', 'red', 'lime', 'line', 'yellow', 'maroon', 'black', 'fushia', 'olive')\n\n markerList = ('o', '*', '+', '^', 's', 'D', '1', '2', '3', '4')\n plt.close('all')\n#\n#---- set a few parameters\n#\n mpl.rcParams['font.size'] = fsize\n props = font_manager.FontProperties(size=9)\n plt.subplots_adjust(hspace=0.08)\n\n#\n#---- set a panel\n#\n ax = plt.subplot(111)\n ax.set_autoscale_on(False) #---- these three may not be needed for the new pylab, but \n ax.set_xbound(xmin,xmax) #---- they are necessary for the older version to set\n\n ax.set_xlim(xmin=xmin, xmax=xmax, auto=False)\n ax.set_ylim(ymin=ymin, ymax=ymax, auto=False)\n\n tot = len(entLabels)\n#\n#--- start plotting each data set\n#\n lnamList = []\n for i in range(0, tot):\n xdata = xSets[i]\n ydata = ySets[i]\n color = colorList[i]\n marker = markerList[0]\n label = entLabels[i]\n\n if tot > 1:\n lnam = 'p' + str(i)\n lnamList.append(lnam)\n exec('%s, = plt.plot(xdata, ydata, color=\"%s\", lw =lsize , marker=\"%s\", markersize=3, label=entLabels[i])' %(lnam, color, marker))\n\n else:\n#\n#--- if there is only one data set, ignore legend\n#\n plt.plot(xdata, ydata, color=color, lw =connect , marker='o', markersize=psize)\n\n if yerror != 0:\n p, = plt.errorbar(xdata, ydata, yerr=yerror[i], lw = 0, elinewidth=1)\n\n if linefit > 0:\n (sint, slope,serror) = robust.robust_fit(xdata, ydata)\n start = sint + slope * xmin\n stop = sint + slope * xmax\n plt.plot([xmin, xmax],[start,stop], color=color, lw =lsize )\n\n#\n#--- add legend\n#\n if tot > 1:\n line = '['\n for ent in lnamList:\n if line == '[':\n line = line + ent\n else:\n line = line +', ' + ent\n line = line + ']'\n\n leg = eval(\"legend(%s, entLabels, prop=props)\" % (line))\n leg.get_frame().set_alpha(0.5)\n\n ax.set_xlabel(xname, size=fsize)\n ax.set_ylabel(yname, size=fsize)\n\n\n#\n#--- set the size of the plotting area in inch (width: 10.0in, height 5.0in)\n#\n fig = matplotlib.pyplot.gcf()\n fig.set_size_inches(10.0, 5.0)\n#\n#--- save the plot in png format\n#\n plt.savefig(outname, format='png', dpi=resolution)", "title": "" }, { "docid": "70992bcc5ce3687528a7aafcacfaa4e6", "score": "0.60112303", "text": "def plot_fitres(self):\r\n\r\n f, a = plt.subplots()\r\n data = self.data_fit_normal.dropna()\r\n a.fill_between(data.index, data.Sigma_high, data.Sigma_low,\r\n color=plt_tools.color_cycle[0],\r\n alpha=0.5,\r\n )\r\n a.plot(data.index.values, data.Pos.values, color=plt_tools.color_cycle[0], linewidth=2, label='center')\r\n # data.Pos.plot(ax=a, color=plt_tools.color_cycle[0], linewidth=2, label='center')\r\n a.legend(loc=2)\r\n a.set_ylabel('Particle diameter (nm)')\r\n a.set_xlabel('Altitude (m)')\r\n\r\n a2 = a.twinx()\r\n # data.Amp.plot(ax=a2, color=plt_tools.color_cycle[1], linewidth=2, label='amplitude')\r\n a2.plot(data.index.values, data.Amp.values, color=plt_tools.color_cycle[1], linewidth=2, label='amplitude')\r\n a2.legend()\r\n a2.set_ylabel('Amplitude - %s' % (get_label(self.distributionType)))\r\n f.autofmt_xdate()\r\n return f, a, a2", "title": "" }, { "docid": "d1b92e4dc99401aea6feec9ee5772e92", "score": "0.60071975", "text": "def plot_experiment_2_1():\n\n trecs = ['TREC-5', 'TREC-6', 'TREC-7', 'TREC-8', 'TREC-9', 'TREC-10', 'TREC-11']\n\n measures = ['ap', 'rp', 'p30']\n subtitles = ['MAP', 'RP', 'P@30']\n\n models = ['mtf', 'mab', 'importance', 'activewresap']\n labels = ['MTF', 'MAB', 'Stratif', 'Active']\n colors = ['#0088A8', '#B8860B', '#483D8B', '#DC143C']\n markers = ['^', 'v', '*', '.']\n\n f, axarr = plt.subplots(len(trecs), len(measures))\n\n for i, trec_name in enumerate(trecs):\n for j, measure in enumerate(measures):\n ax_twin = axarr[i, j].twinx()\n for k, model in enumerate(models):\n df = pd.read_csv(os.path.join(os.path.join(EXP_DIR, trec_name), '{}.exp2.csv'.format(model)))\n perc = df.loc[df['measure'] == measure].ix[:, 'percentage']\n\n # left y-axis\n rms = df.loc[df['measure'] == measure].ix[:, 'rms']\n rms_var = df.loc[df['measure'] == measure].ix[:, 'rms_var']\n\n axarr[i, j].plot(perc, rms, linestyle='-', label=model,\n linewidth=0.4, color=colors[k], marker=markers[k], markersize=2)\n\n # axarr[i, j].fill_between(perc, rms - rms_var, rms + rms_var, color=colors[k], alpha=0.8)\n\n # right y-axis\n tau = df.loc[df['measure'] == measure].ix[:, 'tau']\n tau_var = df.loc[df['measure'] == measure].ix[:, 'tau_var']\n ax_twin.plot(perc, tau, linestyle='--', label=model,\n linewidth=0.4, color=colors[k], marker=markers[k], markersize=2)\n\n # ax_twin.fill_between(perc, tau - tau_var, tau + tau_var, color=colors[k], alpha=0.8)\n\n # tick fontsize & spacing\n axarr[i, j].xaxis.set_tick_params(pad=1, labelsize=4)\n axarr[i, j].yaxis.set_tick_params(pad=1, labelsize=4)\n ax_twin.yaxis.set_tick_params(pad=1, labelsize=4)\n\n # grid\n axarr[i, j].grid(b=True, which='major', color='gray', linestyle='-',\n alpha=0.25, zorder=1, lw=0.2)\n\n first_row = (i == 0)\n last_row = (i == len(trecs)-1)\n first_in_row = (j == 0)\n middle_in_row = (j == int(len(measures)/2))\n\n if first_row:\n axarr[i, j].set_title(subtitles[j], fontsize=4)\n if last_row & middle_in_row:\n axarr[i, j].set_xlabel('Percentage', labelpad=8, fontsize=4)\n if first_in_row:\n axarr[i, j].set_ylabel(trecs[i], labelpad=8, fontsize=4)\n\n # f.suptitle(r\"RMS (left y-axis) & Kendall's $\\tau$ (right y-axis)\", fontsize=11)\n\n # Legend\n patch0 = mlines.Line2D([], [], color=colors[0], linewidth=0.4, marker=markers[0], markersize=2, label=labels[0])\n patch1 = mlines.Line2D([], [], color=colors[1], linewidth=0.4, marker=markers[1], markersize=2, label=labels[1])\n patch2 = mlines.Line2D([], [], color=colors[2], linewidth=0.4, marker=markers[2], markersize=2, label=labels[2])\n patch3 = mlines.Line2D([], [], color=colors[3], linewidth=0.4, marker=markers[3], markersize=2, label=labels[3])\n\n rms_line = mlines.Line2D([], [], linestyle='-', linewidth=0.4, color='black', label='$RMS$')\n tau_line = mlines.Line2D([], [], linestyle='--', linewidth=0.4, color='black', label=r'$\\tau$')\n\n plt.legend(handles=[patch0, patch1, patch2, patch3, rms_line, tau_line], fontsize=4)\n\n plt.show()\n\n return", "title": "" }, { "docid": "5c8081a2edd6f328b53782958aa0ac62", "score": "0.6006711", "text": "def __init__(self):\n self.fig = plt.figure()\n self.data = [[] for _ in range(7)]\n self.plots = []\n for i in range(7):\n self.plots.append(self.fig.add_subplot(2, 4, i+1))\n plt.ion()\n self.fig.show()\n self.fig.canvas.draw()", "title": "" }, { "docid": "717201975eb53f90807d691475cac282", "score": "0.60055894", "text": "def plot(self):\n plt.figure()\n self.data[['Open', 'Close', 'High', 'Low']].plot(figsize=(16, 4), title='{} OCHL Price'.format(self.symbol.upper()))\n\n plt.figure()\n self.data[['Volume']].plot(figsize=(16, 4), title='{} Volume'.format(self.symbol.upper()))\n\n plt.figure()\n ax3 = (100.0 * self.returns()).hist(figsize=(16, 4), bins=100, normed=1)\n (100.0 * self.returns()).plot(kind='kde', ax=ax3)\n ax3.set_title('{} Daily Return Distribution'.format(self.symbol.upper()))\n\n plt.figure()\n ax4 = (100.0 * self.returns(freq='M')).hist(figsize=(16, 4), bins=100, normed=1)\n (100.0 * self.returns(freq='M')).plot(kind='kde', ax=ax4)\n ax4.set_title('{} Monthly Return Distribution'.format(self.symbol.upper()))", "title": "" }, { "docid": "4991535cfe90fcac6e482601565d31e8", "score": "0.60024863", "text": "def plot_experiment_3_1():\n\n trecs = ['TREC-5', 'TREC-6', 'TREC-7', 'TREC-8', 'TREC-9', 'TREC-10', 'TREC-11']\n\n measures = ['ap', 'rp', 'p30']\n subtitles = ['MAP', 'RP', 'P@30']\n\n models = ['mtf', 'mab', 'importance', 'activewresap']\n labels = ['MTF', 'MAB', 'Stratif', 'Active']\n colors = ['#0088A8', '#B8860B', '#483D8B', '#DC143C']\n markers = ['^', 'v', '*', '.']\n\n stats = ['rms', 'tau']\n linestyles = ['-', '--']\n\n types = ['null']\n\n f, axarr = plt.subplots(len(trecs), len(measures))\n\n for i, trec_name in enumerate(trecs):\n for j, measure in enumerate(measures):\n ax_twin = axarr[i, j].twinx()\n for k, model in enumerate(models):\n df = pd.read_csv(os.path.join(os.path.join(EXP_DIR, trec_name), '{}.exp3.group.csv'.format(model)))\n for l, type in enumerate(types):\n # Left y-axis\n axarr[i, j].plot(df.loc[(df['measure'] == measure) & (df['type'] == type)].ix[:,'percentage'],\n df.loc[(df['measure'] == measure) & (df['type'] == type)].ix[:, stats[0]],\n linestyle=linestyles[0], label=model, linewidth=0.4, color=colors[k], marker=markers[k], markersize=2)\n\n # Right y-axis\n ax_twin.plot(df.loc[(df['measure'] == measure) & (df['type'] == type)].ix[:,'percentage'],\n df.loc[(df['measure'] == measure) & (df['type'] == type)].ix[:, stats[1]],\n linestyle=linestyles[1], label=model, linewidth=0.4, color=colors[k], marker=markers[k], markersize=2)\n\n # Tick fontsize & spacing\n axarr[i, j].xaxis.set_tick_params(pad=2, labelsize=4)\n axarr[i, j].yaxis.set_tick_params(pad=2, labelsize=4)\n ax_twin.yaxis.set_tick_params(pad=2, labelsize=4)\n\n # Grid\n axarr[i, j].grid(b=True, which='major', color='gray', linestyle='-',\n alpha=0.25, zorder=1, lw=0.2)\n\n first_row = (i == 0)\n last_row = (i == len(trecs)-1)\n first_in_row = (j == 0)\n middle_in_row = (j == int(len(measures)/2))\n\n if first_row:\n axarr[i, j].set_title(subtitles[j], fontsize=4)\n if last_row & middle_in_row:\n axarr[i, j].set_xlabel('Percentage', labelpad=8, fontsize=4)\n if first_in_row:\n axarr[i, j].set_ylabel(trecs[i], labelpad=8, fontsize=4)\n\n # f.suptitle(r\"RMS (left y-axis) & Kendall's $\\tau$ (right y-axis)\", fontsize=11)\n\n # Legend\n patch0 = mlines.Line2D([], [], color=colors[0], linewidth=0.4, marker=markers[0], markersize=2, label=labels[0])\n patch1 = mlines.Line2D([], [], color=colors[1], linewidth=0.4, marker=markers[1], markersize=2, label=labels[1])\n patch2 = mlines.Line2D([], [], color=colors[2], linewidth=0.4, marker=markers[2], markersize=2, label=labels[2])\n patch3 = mlines.Line2D([], [], color=colors[3], linewidth=0.4, marker=markers[3], markersize=2, label=labels[3])\n\n rms_line = mlines.Line2D([], [], linestyle='-', linewidth=0.4, color='black', label='$RMS$')\n tau_line = mlines.Line2D([], [], linestyle='--', linewidth=0.4, color='black', label=r'$\\tau$')\n\n plt.legend(handles=[patch0, patch1, patch2, patch3, rms_line, tau_line], fontsize=4)\n\n plt.show()\n return", "title": "" }, { "docid": "13f30e10d8d682818b4db42881aaa9cb", "score": "0.6000625", "text": "def plot_experiment_1_1():\n\n trec = 'TREC-5'\n percentage = 'percentage10'\n\n measures = ['ap', 'rp', 'p30']\n subtitles = ['MAP', 'RP', 'P@30']\n\n models = ['mtf', 'mab', 'importance', 'activewresap']\n labels = ['MTF', 'MAB', 'Stratif', 'Active']\n colors = ['#0088A8', '#B8860B', '#483D8B', '#DC143C']\n markers = ['^', 'v', 'x', 'o']\n f, axarr = plt.subplots(1, len(measures), sharey=True)\n\n for k, model in enumerate(models):\n # read one sample\n sample_index = '2'\n sample_dir = '{}{}'.format('sample', sample_index)\n ret_dir = os.path.join(RESULT_DIR, trec, sample_dir, percentage)\n file_name = '{}.csv'.format(model)\n df = pd.read_csv(os.path.join(ret_dir, file_name))\n\n # plot scatters\n for i, m in enumerate(measures):\n actu_m = 'actu_' + m\n estm_m = 'estm_' + m\n\n # # pearson rho\n # rho_, p_value = pearsonr(df[estm_m].values, df[actu_m].values)\n # axarr[i].text(0.02+0.02, 0.3, r'$Pearson \\ \\rho$', fontsize=5)\n # axarr[i].text(0.02, 0.3-(k+1)*0.012, '{}'.format(labels[k]), fontsize=5, family='serif')\n # axarr[i].text(0.02+0.05, 0.3-(k+1)*0.012, ': {:<.4f}'.format(rho_), fontsize=5, family='serif')\n\n # scatters\n axarr[i].scatter(df[actu_m].values, df[estm_m].values, marker=markers[k], s=16, color=colors[k],\n alpha=0.8, label=labels[k])\n axarr[i].plot([0, 1], [0, 1], color='black', linestyle='--', linewidth=0.5)\n\n axarr[i].set_ylim(0, 0.5)\n axarr[i].set_xlim(0, 0.5)\n\n axarr[i].xaxis.set_tick_params(pad=1, labelsize=10)\n axarr[i].yaxis.set_tick_params(pad=1, labelsize=10)\n\n axarr[i].set_title(subtitles[i], fontsize=12)\n\n # grid\n axarr[i].grid(b=True, which='major', color='gray', linestyle='-', alpha=0.25, zorder=1, lw=0.5)\n\n first_in_row = (i == 0)\n middle_in_row = (i == int(len(measures) / 2))\n\n if first_in_row:\n axarr[i].set_ylabel('Estimated value', fontsize=12)\n if middle_in_row:\n axarr[i].set_xlabel('Actual value', labelpad=8, fontsize=12)\n\n plt.legend()\n plt.show()\n return", "title": "" }, { "docid": "beb0761e0e7fe784b963d627c07e9182", "score": "0.5993595", "text": "def plot_POD_results(self, axes_hdl):\r\n pass", "title": "" }, { "docid": "92827d786f4040cd2baa23eb3e372221", "score": "0.5988429", "text": "def plot_da(self, data):\n return data.hvplot(\n x=\"y\",\n y=\"x\",\n rasterize=True,\n cmap=\"gray\",\n width=1000,\n height=400,\n title=self.pid[:15],\n )", "title": "" }, { "docid": "eef7534dc0c1db0e353398e6582f2eb8", "score": "0.59882176", "text": "def plot_data(dataframe, yname):\n number_of_epochs = np.size(dataframe, axis=0)\n number_of_plots = np.size(dataframe, axis=1)-1\n x_axis = np.arange(0,number_of_epochs)\n\n matplotlib.rcParams.update({'font.size': 20})\n matplotlib.rcParams['font.family'] = \"serif\"\n fig = plt.figure(figsize=(11,8))\n count = 0\n for col, item in dataframe.iteritems():\n if col != 'epoch':\n plt.plot(x_axis, dataframe[col], label='Validation = Fold '+str(count))\n count += 1\n\n plt.xlabel('Epoch')\n plt.ylabel(yname)\n plt.ylim(0,1)\n #plt.title('Validation')\n plt.legend()\n fig.tight_layout()\n plt.show()", "title": "" }, { "docid": "b5af3789619fe8f2858bcbdba39c137e", "score": "0.59875983", "text": "def show_plots_of_dataset(self):\n\n fig1 = plt.figure(figsize=(15, 15))\n x = 0\n r = 10\n c = 10\n for i in range(x, r * c):\n img, cat = self.dataset['train']['VAE'][i]\n img = img.view(28, 28).data\n img = img.numpy()\n ax = fig1.add_subplot(r, c, i - x + 1)\n ax.axis('off')\n ax.imshow(img,cmap='gray')\n plt.ioff()\n plt.show()", "title": "" }, { "docid": "20e6bcff922d77fad76d0a7279e5d9fd", "score": "0.5987113", "text": "def generate_plots(self):\n x = np.arange(len(self.classes))\n y = self.stats\n plt.figure()\n#plt.subplot(211)\n plt.bar(x,y,edgecolor='k',linewidth=2,color=['black','red','green','blue','yellow'])\n plt.tick_params(axis='both',which='major',labelsize=12)\n plt.xticks(x,self.classes,fontsize=8)\n plt.ylabel(\"Appearances\",fontsize=14)\n plt.savefig(\"flaskapp/static/Cumulative_Bar.png\")\n\n x2 = np.array(range(self.NUM_FRAMES))/45.0\n plt.figure()\n#plt.subplot(212)\n cs = ['black','red','green','blue','yellow']\n for i in range(len(self.classes)):\n plt.plot(x2,self.running[:,i],color=cs[i],linewidth=2)\n\n plt.legend(self.classes)\n plt.xlim(0.0,float(self.NUM_FRAMES)/45.0)\n plt.tick_params(axis='both',which='major',labelsize=12)\n plt.xlabel(\"Time (seconds)\",fontsize=14)\n plt.ylabel(\"Appearances\")\n plt.savefig(\"flaskapp/static/Run_Chart.png\")\n return", "title": "" }, { "docid": "a7fe21b8571e739d22ebbcbea6e3dd1d", "score": "0.5972485", "text": "def plot_input_data(self, axes_hdl):\r\n if self.is_data_loaded is False:\r\n return\r\n axes_hdl.cla()\r\n axes_hdl.plot(self.input_sizes,self.input_results,'b.')\r\n axes_hdl.set_title(self.title)\r\n axes_hdl.set_xlabel(self.title_sizes)\r\n axes_hdl.set_ylabel(self.title_results)\r\n return", "title": "" }, { "docid": "4ef5200205352075b4f9c0db2c08d16b", "score": "0.59701204", "text": "def visual_charts(train_y_orig, dev_y_orig, test_y_orig):\n datasets = {\"Training Set\":train_y_orig,\"Dev Set\": dev_y_orig,\"Test Set\": test_y_orig}\n \n #setting the plot style\n# plt.style.use('seaborn')\n \n #creating subplots\n fig, axes = plt.subplots(nrows=3, ncols=1,figsize=(10,15))\n fig.subplots_adjust(hspace=.2)\n i = 0\n \n #plotting the bar graph for each dataset labels\n for dataset,datalabel in datasets.items():\n unique, counts = np.unique(datalabel, return_counts=True) \n axes[i].bar(unique, counts)\n max_value = np.max(counts)\n axes[i].set(xticks = unique, ylim = (0,max_value + max_value // 10))\n axes[i].set_title(\"Number of Examples in \" + dataset , fontsize = 16)\n axes[i].set_xlabel(\"Classes\", fontsize = 12)\n axes[i].set_ylabel(\"Number of Examples\", fontsize = 12)\n i += 1\n\n plt.show()", "title": "" }, { "docid": "f6b19fc94ccc2feb1a2290194d9cacf3", "score": "0.596995", "text": "def initial_plot(data):\n fig, ax_val = plt.subplots(figsize=(18, 8))\n\n ax_val.plot(data.index.values, data['count'] / 100000,\n lw=2, color='mediumseagreen')\n\n ax_val.set_xlabel('Year', fontsize=20)\n ax_val.set_ylabel('Ride Count (100k)', fontsize=20)\n\n ax_val.xaxis.set_major_locator(mdates.YearLocator())\n ax_val.xaxis.set_major_formatter(DateFormatter(\"%Y\"))\n plt.title('Number of Bike Rentals per Month, Time Series', fontsize=30)\n plt.show()", "title": "" }, { "docid": "e87e2052fc2bb99ac5e568c88c1328e9", "score": "0.59695923", "text": "def plot_2d(self):\n pass", "title": "" }, { "docid": "8bd4f452b595c62c542652b84af44dca", "score": "0.596561", "text": "def main():\n\n # Parse args\n parser = argparse.ArgumentParser(description=main.__doc__)\n parser.add_argument('--delimiter', '-d', default=None,\n help=\"Set the delimiter, by default use any whitespace.\")\n parser.add_argument('--header', action=\"store_true\",\n help=\"Does the file have a header line?\")\n parser.add_argument('filenames', nargs=\"*\", default=[\"-\"],\n help=\"Files to plot, - reads from stdin, by default read from stdin\")\n parser.add_argument('--transpose', '-t', action=\"store_true\",\n help=\"Transpose data before plotting\")\n parser.add_argument('--stretch-x', action = \"store_true\",\n help = \"Should subsequent files be mapped onto the x range of the first?\")\n parser.add_argument('--col', default = None, type = int,\n help = \"Column of the data to plot\")\n parser.add_argument('--use-x', action = \"store_true\",\n help = \"Use first column of data as x values\")\n parser.add_argument('--scatter', action = \"store_true\",\n help = \"Create a scatter plot\")\n args = parser.parse_args()\n\n\n # Make plot\n fig, ax = subplots(1, 1)\n\n xmin = None\n xmax = None\n\n # Read and plot data\n for name in args.filenames:\n with open_or_stdin(name, 'r') as f:\n\n # Check the first line, if it's text then treat it as a header\n maybe_header = f.readline()\n ncol = len(maybe_header.strip().split(args.delimiter))\n\n # Get headers\n if args.transpose:\n # Fill in later\n headers = []\n start_line = [maybe_header]\n\n elif re.match('^[A-Za-z]', maybe_header) or args.header:\n headers = maybe_header.strip().split(args.delimiter)\n start_line = []\n\n else:\n headers = [str(i) for i in range(0, ncol)]\n start_line = [maybe_header]\n\n\n # Read data\n data = []\n for line in start_line + f.readlines():\n line = line.strip().split(args.delimiter)\n\n if args.transpose and args.header:\n headers.append(line[0])\n line = line[1:]\n\n l = [float(x) for x in line]\n data.append(l)\n\n data = np.array(data)\n if args.transpose:\n data = np.transpose(data)\n\n # If we said to, then use the first col as x values\n if args.use_x:\n x_data = data[:, 0]\n x_label = headers[0]\n y_data = data[:, 1:]\n y_label = headers[1]\n else:\n x_label = \"Number\"\n x_data = list(range(0, data.shape[0]))\n y_data = data\n y_label = headers[0]\n\n # Maybe pick some specific cols for the y values\n if args.col is not None:\n y_data = y_data[:, args.col]\n y_label = headers[args.col]\n\n\n # Maybe stretch the data to fit the first files x scale\n if args.stretch_x and xmin is None and xmax is None:\n xmin = min(x_data)\n xmax = max(x_data)\n elif args.stretch_x:\n x_data = rescale(x_data, xmin, xmax)\n\n for y, name in zip(zip(*list(y_data)), headers):\n\n if args.scatter:\n ax.scatter(x_data, y, label = name)\n else:\n ax.plot(x_data, y, label = name)\n\n ax.legend(loc=0)\n\n pltshow()", "title": "" }, { "docid": "f7c7870c83fc86dfdc34ecd431a4e44a", "score": "0.5953242", "text": "def plot_example(xv, yv, b):\n matplotlib.rcParams['font.family'] = \"serif\"\n matplotlib.rcParams['font.sans-serif'] = \"Times\"\n matplotlib.rcParams[\"legend.edgecolor\"] = \"None\"\n matplotlib.rcParams[\"axes.spines.top\"] = False\n matplotlib.rcParams[\"axes.spines.bottom\"] = True\n matplotlib.rcParams[\"axes.spines.left\"] = True\n matplotlib.rcParams[\"axes.spines.right\"] = False\n matplotlib.rcParams['axes.grid'] = True\n matplotlib.rcParams['axes.grid.axis'] = 'both'\n matplotlib.rcParams['axes.grid.which'] = 'major'\n matplotlib.rcParams['legend.edgecolor'] = '1.0'\n plt.plot(xv[:, 113], yv[:, 113], 'ko')\n plt.plot(xv[:, 113], xv[:, 113]*b[113, 1] + b[113, 0], 'k')\n #plt.plot(x[113], x[113]*b[113, 1] + b[113, 0], 'ro')\n plt.grid(True)\n plt.xlabel('Radiance, $\\mu{W }nm^{-1} sr^{-1} cm^{-2}$')\n plt.ylabel('Reflectance')\n plt.show(block=True)\n plt.savefig('empirical_line.pdf')", "title": "" }, { "docid": "663ac9d9c69e07da5584839bc5a18800", "score": "0.5953058", "text": "def pr_plotter(self, pr):\n \n fig, axs = plt.subplots(self.nrows, self.ncols, sharex=True, sharey='row')\n axs[np.int(self.nrows//2), 0].set_ylabel('Power ' r'[$\\frac{Hz^2}{Hz}$]') #!these labels are adapted for 5x5 subplot.\n axs[-1, np.int(self.ncols//2)].set_xlabel('Frequency [Hz]')\n axs = axs.flatten()\n skipidx = len(self.contrasts) // len(axs)\n for pidx, ax in enumerate(axs):\n pidx *= skipidx\n ax.set_title('[%.4f]' %(self.contrasts[pidx]))\n ax.set_xticks(np.linspace(*self.xticks, 5))\n ax.plot(self.frange, pr[pidx], self.colstr+'-', label='response')\n ax.plot([self.bfr, self.bfr], [0,np.max(pr[pidx])], \n 'k--', label='baseline')\n ax.plot([self.sfr[pidx], self.sfr[pidx]], [0,np.max(pr[pidx])], \n 'k.-', label='contrast avg')\n plt.subplots_adjust(**self.sadj)\n return fig", "title": "" }, { "docid": "3db6be11a995ac7fa9b5cb6ac3da969a", "score": "0.5951082", "text": "def plot_bias_data():\n for ccd in range(0, 10):\n#\n#--- set arrays\n#\n yMinSets1 = []\n yMaxSets1 = []\n xSets1 = []\n ySets1 = []\n entLabels1 = []\n\n yMinSets2 = []\n yMaxSets2 = []\n xSets2 = []\n ySets2 = []\n entLabels2 = []\n\n yMinSets3 = []\n yMaxSets3 = []\n xSets3 = []\n ySets3 = []\n entLabels3 = []\n\n for quad in range(0, 4):\n#\n#--- read data in\n#\n file = data_dir + 'Bias_save/CCD' + str(ccd) + '/quad' + str(quad)\n f = open(file, 'r')\n data = [line.strip() for line in f.readlines()]\n f.close()\n\n dtime = []\n bias = []\n overclock = []\n bdiff = []\n scnt = 0.0\n sum1 = 0.0\n sum2 = 0.0\n sum3 = 0.0\n\n for ent in data:\n try:\n atemp = re.split('\\s+|\\t+', ent)\n stime = float(atemp[0])\n\n bval = float(atemp[1])\n oval = float(atemp[3])\n bmo = bval - oval \n\n dtime.append(stime)\n bias.append(bval)\n overclock.append(oval)\n bdiff.append(bmo)\n\n sum1 += bval\n sum2 += oval\n sum3 += bmo\n scnt += 1.0\n except:\n pass\n#\n#--- put x and y data list into the main list\n#\n title = 'CCD' + str(ccd) + ' Quad' + str(quad)\n dtime = convert_stime_to_ytime(dtime)\n\n xSets1.append(dtime)\n ySets1.append(bias)\n entLabels1.append(title)\n\n xSets2.append(dtime)\n ySets2.append(overclock)\n entLabels2.append(title)\n\n xSets3.append(dtime)\n ySets3.append(bdiff)\n entLabels3.append(title)\n#\n#--- set plotting range\n#\n xmin = min(dtime)\n xmax = max(dtime)\n diff = xmax - xmin\n xmin = int(xmin - 0.05 * diff)\n if xmin < 0:\n xmin = 0\n\n xmax = int(xmax + 0.05 * diff)\n#\n#-- plotting range of bias\n#\n avg = float(sum1) / float(scnt)\n ymin = int(avg - 200.0)\n ymax = int(avg + 200.0)\n\n yMinSets1.append(ymin)\n yMaxSets1.append(ymax)\n#\n#-- plotting range of overclock\n#\n avg = float(sum2) / float(scnt)\n ymin = int(avg - 200.0)\n ymax = int(avg + 200.0)\n\n yMinSets2.append(ymin)\n yMaxSets2.append(ymax)\n#\n#-- plotting range of bias - overclock\n#\n ymin = -1.0\n ymax = 2.5\n if ccd == 7:\n ymin = 2.5\n ymax = 6.0\n\n yMinSets3.append(ymin)\n yMaxSets3.append(ymax)\n\n xname = \"Time (Year)\"\n#\n#--- plotting bias \n#\n yname = 'Bias'\n ofile = web_dir + 'Plots/Bias_bkg/ccd' + str(ccd) +'.png'\n plotPanel(xmin, xmax, yMinSets2, yMaxSets1, xSets1, ySets1, xname, yname,\\\n entLabels1, ofile, mksize=1.0, lwidth=0.0)\n#\n#--- plotting overclock\n#\n yname = 'Overclock Level'\n ofile = web_dir + 'Plots/Overclock/ccd' + str(ccd) +'.png'\n plotPanel(xmin, xmax, yMinSets2, yMaxSets2, xSets2, ySets2, xname, yname,\\\n entLabels2, ofile, mksize=1.0, lwidth=0.0)\n#\n#--- plotting bias - overclock\n#\n yname = 'Bias'\n ofile = web_dir + 'Plots/Sub/ccd' + str(ccd) +'.png'\n plotPanel(xmin, xmax, yMinSets3, yMaxSets3, xSets3, ySets3, xname, yname,\\\n entLabels3, ofile, mksize=1.0, lwidth=0.0)", "title": "" }, { "docid": "c0fa09b885e5635a2bedbbefaed77e4d", "score": "0.594781", "text": "def plot_data(df_data):\n ax = df_data.plot(title=\"Stock Data\", fontsize=2)\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Price\")\n plt.show()", "title": "" }, { "docid": "5053742d4f1e6f49bfcfb2d42db83726", "score": "0.59426576", "text": "def plots(self, data, y_pred_train):\n plt.plot(self.features_scores)\n plt.plot(self.pca_scores)\n sns.scatterplot(x=\"sum_axis_1_50\", y=\"variance\", data=data, hue=y_pred_train)\n plt.plot()", "title": "" } ]
7fab4685d4907d969bd93e4e11fe13da
Deletes an index and returns the Response.
[ { "docid": "9ae801eac2ec0c7b235c50e123742490", "score": "0.70268214", "text": "def delete(self,index_name):\r\n try:\r\n return self.client.delete_edge_index(index_name)\r\n except LookupError:\r\n return None", "title": "" } ]
[ { "docid": "20c4a6d50b9d9e8a7160621773fc11d0", "score": "0.81049603", "text": "def delete_index(self, name):\r\n path = build_path(index_path,name)\r\n return self.request.delete(path,params=None)", "title": "" }, { "docid": "48fc0bcbd5e887e6669b330ad32e82f5", "score": "0.7816265", "text": "def delete(self, index, params=None):\r\n _, data = self.transport.perform_request('DELETE', _make_path(index),\r\n params=params)\r\n return data", "title": "" }, { "docid": "e7ecf7798953c3a7893b5d3460165eb5", "score": "0.7806298", "text": "def delete_index():\n requests.delete(INDEX_URL)", "title": "" }, { "docid": "3d790f3db8464be24a98dd6c60d4c620", "score": "0.77793753", "text": "def DeleteIndex(index):\n _Call('DeleteIndex', index, api_base_pb.VoidProto())", "title": "" }, { "docid": "d7eb889433da06853f63d2dcae42f06f", "score": "0.74449074", "text": "def delete_index(self, index, docs=None, queries=None):\n raise NotImplementedError", "title": "" }, { "docid": "f26cc94c2c510163fa2d5557358614fa", "score": "0.7440584", "text": "def delete(self, index_name):\r\n raise NotImplementedError", "title": "" }, { "docid": "f26cc94c2c510163fa2d5557358614fa", "score": "0.7440584", "text": "def delete(self, index_name):\r\n raise NotImplementedError", "title": "" }, { "docid": "f26cc94c2c510163fa2d5557358614fa", "score": "0.7440584", "text": "def delete(self, index_name):\r\n raise NotImplementedError", "title": "" }, { "docid": "ab2d6331b23fb8135db9a30a2789910c", "score": "0.74283904", "text": "def delete_index(ddoc, name):\n assert self.server.version >= \"2.0\"\n self.server._DELETE(self.name, \"_index\", ddoc, \"json\", name)", "title": "" }, { "docid": "edd0e9a3b85c0b62b35ca203fd463eb5", "score": "0.7365272", "text": "def delete(self, index: int):\n try:\n self._stack.cancel(int(index))\n except ValueError as err:\n raise tornado.web.HTTPError(404, reason=str(err))\n else:\n self.set_status(204)\n self.finish()", "title": "" }, { "docid": "cee6650ea3bf52daca579eb9097e1578", "score": "0.7286008", "text": "def delete(self, index_name):\r\n return self.client.delete_edge_index(index_name)", "title": "" }, { "docid": "a51a741c13fcaadfc42d68d9e7b5cdf9", "score": "0.72569835", "text": "def delete(self):\n\n return HttpRequests.delete(self.config, '{}/{}'.format(self.index_path, self.uid))", "title": "" }, { "docid": "8d41d7fc5975a7825019dc8845a6a61a", "score": "0.7247449", "text": "def delete(self,index_name):\r\n raise NotImplementedError", "title": "" }, { "docid": "e1df7aaa8722505340b50958e0041b46", "score": "0.7238563", "text": "def delete(self, index_name):\n return self.client.delete_edge_index(index_name)", "title": "" }, { "docid": "ff86fe25ceba2d34c2fdac100ddc3fe9", "score": "0.7056827", "text": "def delete_index(name=app.config['ES_INDEX_NAME']):\n _delete_index(name)", "title": "" }, { "docid": "65309611c5360e359a2b0b38b3001427", "score": "0.7056389", "text": "def delete(self, index_name):\r\n return self.client.delete_vertex_index(index_name)", "title": "" }, { "docid": "af4add700bee6667d76c36880e09cb2e", "score": "0.69564444", "text": "def delete(self, index_name):\n return self.client.delete_vertex_index(index_name)", "title": "" }, { "docid": "5b02610b4768d2ade71b6493ccc3bff7", "score": "0.69370896", "text": "def index_exists(cls, index_name):\n url = 'http://%s/%s/' % (cls.get_host(), index_name)\n exists = requests.head(url).status_code\n if exists == 200:\n url = 'http://%s/%s/?pretty' % (cls.get_host(), index_name)\n response = requests.delete(url)\n r = response.json()\n acknowledged = r.get('acknowledged', False)\n if acknowledged:\n print 'DELETE http://%s/%s/' % (cls.get_host(), index_name), response.text\n else:\n error = r.get('error', 'unknown error')\n raise Exception(error)", "title": "" }, { "docid": "80ddaee077823e01df628ad8d20556be", "score": "0.6911914", "text": "def delete_edge_index(self, index_name):\r\n path = build_path(index_path, edge_path, index_name)\r\n params = None\r\n return self.request.delete(path, params)", "title": "" }, { "docid": "75900ce086c10c085d753cba3e7a71ce", "score": "0.6902261", "text": "def delete_search_index(self, index):\n if not self.yz_wm_index:\n raise NotImplementedError(\"Search 2.0 administration is not \"\n \"supported for this version\")\n\n url = self.search_index_path(index)\n\n # Run the request...\n status, _, _ = self._request('DELETE', url)\n\n if status != 204:\n raise RiakError('Error setting Search 2.0 index.')\n return True", "title": "" }, { "docid": "26af4bd47464a00fef1f7e6b3b831dfb", "score": "0.68732905", "text": "def delete_index(client: Elasticsearch, index_name: str, logger: StructLogger) -> None:\n logger.info(f\"attempting to delete index: {index_name}\")\n\n client.indices.delete(index=index_name, ignore=[404])\n logger.info(f\"deleted index: {index_name}\")", "title": "" }, { "docid": "0b0feaf6a98df27741dd07e488fea714", "score": "0.685596", "text": "def delete_index(es_object, index_name):\n try:\n es_object.indices.delete(index_name)\n print(f\"Deleted Index '{index_name}'.\")\n except Exception as ex:\n print(f\"Index '{index_name}' DNE.\")\n pass", "title": "" }, { "docid": "2df403cd0105ce4de5140cc1dff679a7", "score": "0.67597914", "text": "def delete(self, searchindex_id):\n searchindex = SearchIndex.query.get_with_acl(searchindex_id)\n if not searchindex:\n abort(HTTP_STATUS_CODE_NOT_FOUND, \"No searchindex found with this ID.\")\n\n if not searchindex.has_permission(current_user, \"delete\"):\n abort(\n HTTP_STATUS_CODE_FORBIDDEN,\n (\n \"User does not have sufficient access rights to \"\n \"delete the search index.\"\n ),\n )\n\n if searchindex.get_status.status == \"deleted\":\n abort(HTTP_STATUS_CODE_BAD_REQUEST, \"Search index already deleted.\")\n\n timelines = Timeline.query.filter_by(searchindex=searchindex).all()\n sketches = [\n t.sketch\n for t in timelines\n if t.sketch and t.sketch.get_status.status != \"deleted\"\n ]\n\n if sketches:\n error_strings = [\"WARNING: This timeline is in use by:\"]\n for sketch in sketches:\n error_strings.append(\" * {0:s}\".format(sketch.name))\n abort(HTTP_STATUS_CODE_FORBIDDEN, \"\\n\".join(error_strings))\n\n searchindex.set_status(status=\"deleted\")\n db_session.commit()\n\n other_indexes = SearchIndex.query.filter_by(\n index_name=searchindex.index_name\n ).all()\n if len(other_indexes) > 1:\n logger.warning(\n \"Search index: {0:s} belongs to more than one \"\n \"db entry.\".format(searchindex.index_name)\n )\n return HTTP_STATUS_CODE_OK\n\n try:\n self.datastore.client.indices.close(index=searchindex.index_name)\n except opensearchpy.NotFoundError:\n logger.warning(\n \"Unable to close index: {0:s}, the index wasn't \"\n \"found.\".format(searchindex.index_name)\n )\n\n return HTTP_STATUS_CODE_OK", "title": "" }, { "docid": "50bc3319eb93367080a97e63abb963c7", "score": "0.6732665", "text": "def delete(self, generic_index, generic_type, guid):\n GEN.delete(generic_index, generic_type, guid)\n return \"\", 204", "title": "" }, { "docid": "f95a88aca1f90c6b8a10a8b9d111b5e3", "score": "0.67308986", "text": "def delete(self, index_name):\r\n try:\r\n return self.client.delete_vertex_index(index_name)\r\n except LookupError:\r\n return None", "title": "" }, { "docid": "293fe26f9c765671800d29b631e9492d", "score": "0.6718106", "text": "def delete_index(self, table_name, index_name):\n self.otsclient.delete_search_index(table_name, index_name)", "title": "" }, { "docid": "da3424be9a576c98bf4fb3b055acce30", "score": "0.6638721", "text": "def delete_index(self, index_name):\n result = None\n for attempt in range(1, self.RETRY_ATTEMPTS + 1):\n try:\n result = self.connection.indices.delete(index=index_name)\n break\n except es_exceptions.NotFoundError: # pragma: no cover\n result = False\n break\n except (es_exceptions.ConnectionTimeout, es_exceptions.ConnectionError): # pragma: no cover\n logging.warning(\"ESClient.delete_index connection timeout\") # Retry on timeout\n self.connect() # Not sure if this is helpful, or connection is lazy?\n continue\n\n if not result: # pragma: no cover\n logging.warning(\"ESClient.delete_index failed for {}\".format(index_name))\n return result", "title": "" }, { "docid": "bc7338931dd2d83251bd1cca7995a193", "score": "0.6628879", "text": "def index_delete():\n db.delete_mappings()\n print(\"All URLs deleted\")\n return \"\", 204", "title": "" }, { "docid": "930531d84ec47b7048185ee174971b29", "score": "0.652851", "text": "def delete_doc(self, index, doc_id):\n body = self.bulk_body[index]\n body.write(json.dumps({'delete': {'_id': doc_id}}))\n body.write('\\n')", "title": "" }, { "docid": "a2eee6d95e9cb9cbf13e8450c8b41654", "score": "0.6515346", "text": "def delete(cls, dataset, index):\n query = {INDEX: index,\n DATASET_ID: dataset.dataset_id}\n query = cls.encode(query, dataset=dataset)\n\n super(cls, cls()).delete(query)", "title": "" }, { "docid": "ccfaf486f924d803a05db26516f16d62", "score": "0.6425481", "text": "def delete_index(self, model_or_type):\n index = self._index(model_or_type)\n self.es.indices.delete(index=index, ignore=400)", "title": "" }, { "docid": "da063eef9ab4b712d2fc3e99193f77be", "score": "0.64055234", "text": "def delete_vertex_index(self, index_name):\r\n path = build_path(index_path, vertex_path, index_name)\r\n params = None\r\n return self.request.delete(path, params)", "title": "" }, { "docid": "8f0d0c41eb3aac508009bd80dc6a11df", "score": "0.6259716", "text": "def remove_index(self):\n\t\ttry:\n\t\t\tconnection.indices.delete_index(self.colName)\n\t\t\tprint \"index\", self.colName, \"removed from ElasticSearch.\"\n\t\texcept:\n\t\t\tpass", "title": "" }, { "docid": "c709ca37bd6fe7c989091e5a22efbc7b", "score": "0.6230911", "text": "def delete(self, *args, **kwargs):\n return self.invoke(\"DELETE\", *args, **kwargs)", "title": "" }, { "docid": "42481b423de005d791f99bc9ea95bba3", "score": "0.61892015", "text": "def DELETE(self, req):\n return req.get_response(self.app)", "title": "" }, { "docid": "a799030c7e95afee955192f231558698", "score": "0.6158135", "text": "def delete(self):\r\n url = self._url + \"/delete\"\r\n params = {\r\n \"f\" : \"json\",\r\n }\r\n return self._con.post(path=url,\r\n postdata=params)", "title": "" }, { "docid": "ebe8f6f01c6260cd0c9ee299aefe9874", "score": "0.6154963", "text": "def clear_index(self, index_name):\n try:\n if self.es_client is None or not self.es_client.ping():\n self.connect()\n\n if self.es_client.indices.exists(index_name):\n self.es_client.indices.delete(index=index_name)\n logger.info('Deleted existing index')\n else:\n logger.info('Index does not exist')\n except:\n logger.exception(\"could not clear index\")", "title": "" }, { "docid": "7201c4164be6498bdf7bb3ce90cd2a4b", "score": "0.613936", "text": "def delete_row_by_index(self, index):\n entry = self._get_row_entries(self.query)[index]\n self.gd_client.DeleteRow(entry)\n del self.entries[index]", "title": "" }, { "docid": "b5d97fa2bed127e3f75bdb0d9189293e", "score": "0.612475", "text": "def delete_index(sender, instance, **kwargs):\n if not hasattr(instance, \"fields_to_index\"):\n return\n content_type = ContentType.objects.get_for_model(instance)\n search = Search.objects.get(content_type__pk=content_type.id, object_id=instance.id)\n search.delete()", "title": "" }, { "docid": "4c14b18105b269478e3ce60d80696b12", "score": "0.6120686", "text": "def delete_edge_index(self, index_name):\r\n raise NotImplementedError", "title": "" }, { "docid": "ec3bbc9dac5d3711223187263668cbb1", "score": "0.61012095", "text": "def delete_vertex_index(self, index_name):\r\n raise NotImplementedError", "title": "" }, { "docid": "fb8f4d793d5b837ff35d91aae85fc710", "score": "0.60887855", "text": "def test_delete_no_index_id():\n config = CORTXS3Config()\n response = CORTXS3KVApi(config, CONNECTION_TYPE_PRODUCER).delete(None, \"test_key1\")\n if (response is not None):\n assert response[0] is False\n assert response[1] is None", "title": "" }, { "docid": "b42750be30eaaf602d22d5272af815e8", "score": "0.60624003", "text": "def drop_index(self, timeout=None, **kwargs):\n if self.has_index() is False:\n raise IndexNotExistException(0, ExceptionsMessage.IndexNotExist)\n conn = self._get_connection()\n tmp_index = conn.describe_index(self._name, \"\")\n if tmp_index is not None:\n index = Index(self, tmp_index['field_name'], tmp_index, construct_only=True)\n index.drop(timeout=timeout, **kwargs)", "title": "" }, { "docid": "27304f24eb35bda7e924925174e29243", "score": "0.605294", "text": "def delete(self, index, doc_type=None, id=None, *,\n consistency=default, parent=default, refresh=default,\n replication=default, routing=default, timeout=default,\n version=default, version_type=default):\n params = {}\n if consistency is not default:\n if not isinstance(consistency, str):\n raise TypeError(\"'consistency' parameter is not a string\")\n elif consistency.lower() in ('one', 'quorum', 'all'):\n params['consistency'] = consistency.lower()\n else:\n raise ValueError(\n \"'consistency' parameter should be one of \"\n \"'one', 'quorum', 'all'\")\n if replication is not default:\n if not isinstance(replication, str):\n raise TypeError(\"'replication' parameter is not a string\")\n elif replication.lower() in ('async', 'sync'):\n params['replication'] = replication.lower()\n else:\n raise ValueError(\"'replication' parameter should be one of \"\n \"'async', 'sync'\")\n if timeout is not default:\n params['timeout'] = timeout\n if parent is not default:\n params['parent'] = parent\n if refresh is not default:\n if refresh != 'wait_for':\n refresh = str(bool(refresh)).lower()\n params['refresh'] = refresh\n if routing is not default:\n params['routing'] = routing\n if version is not default:\n params['version'] = int(version)\n\n if version_type is not default:\n if not isinstance(version_type, str):\n raise TypeError(\"'version_type' parameter is not a string\")\n elif version_type.lower() in ('internal', 'external',\n 'external_gt', 'external_gte',\n 'force'):\n params['version_type'] = version_type.lower()\n else:\n raise ValueError(\"'version_type' parameter should be one of \"\n \"'internal', 'external', 'external_gt', \"\n \"'external_gte', 'force'\")\n\n _, data = yield from self.transport.perform_request(\n 'DELETE',\n _make_path(index, doc_type, id),\n params=params)\n\n return data", "title": "" }, { "docid": "afe129d957ee103a7f4972f5733e4cc8", "score": "0.6036717", "text": "def delete(self):\n self.connection.delete(self._get_endpoint(), expected_status=204)", "title": "" }, { "docid": "66214878602303fa696faa2c6e79f863", "score": "0.6029086", "text": "async def delete(self, *args, **kwargs):\n raise HTTPError(405)", "title": "" }, { "docid": "9a8cd0dfc0d6c9482a66d5cb507d8aa9", "score": "0.60136527", "text": "def remove_from_index(index, model):\r\n if not current_app.elasticsearch:\r\n return\r\n current_app.elasticsearch.delete(index=index, id=model.id)", "title": "" }, { "docid": "97b3028874d2f7100b89287a37dbda56", "score": "0.59986424", "text": "def delete_vertex_index(self, name):\r\n return self.delete_index(name)", "title": "" }, { "docid": "e4cd091ff3fe002770b6694b02813387", "score": "0.5991627", "text": "def close(self, index, params=None):\r\n _, data = self.transport.perform_request('POST', _make_path(index, '_close'),\r\n params=params)\r\n return data", "title": "" }, { "docid": "10fc3cd5c91f39f7a66c8f9012483132", "score": "0.598831", "text": "def delete(self, result, pk=None):\r\n return Response({\"message\": \"Delete\"})", "title": "" }, { "docid": "b617ddb7a0d25aeb401389487391765d", "score": "0.59695095", "text": "def delete_by_expired_time(self, index: str):\n return self.client.query(\n q.map_(\n lambda _, ref: q.delete(ref),\n q.paginate(\n q.range(\n q.match(q.index(index)), q.time(\"2020-01-01T00:00:00Z\"), q.time_add(q.now(), 1, \"minutes\")\n )\n )\n )\n )", "title": "" }, { "docid": "651a3bdb95eb81201794bef2a8ce828a", "score": "0.5954555", "text": "async def delete_alert(self, alert_index: int):\n if not self._alerts_loaded:\n raise ValueError(\"Alerts not loaded, please run update_alerts first.\")\n alert_id = self._get_alert_by_index(alert_index)\n if alert_id:\n return await self._request(Methods.DELETE, f\"alerts/{alert_id}/\")", "title": "" }, { "docid": "0bd075e90a9cbcc5b8f4bc2d6748188b", "score": "0.59456956", "text": "def deleted(self, obj):\n obj.delete(commit=True)\n return '', HTTPStatus.NO_CONTENT", "title": "" }, { "docid": "75c8829379f433ea71df79f22705438e", "score": "0.5917876", "text": "def delete(self, del_index):\n #print(\"len() == \" + str(len(self.messages)))\n del self.messages[del_index]", "title": "" }, { "docid": "9ee40c7b0ffed1bc5b9d7239f3409345", "score": "0.5910536", "text": "def random_index(app):\n test_index = Index(uuid4().hex)\n test_index.create()\n app.cluster.health(wait_for_status='yellow')\n\n yield test_index\n\n test_index.delete()", "title": "" }, { "docid": "44180fe385cca531fe7ad7caeb93ca72", "score": "0.59082115", "text": "def delete(self):\n self.request().delete()", "title": "" }, { "docid": "44180fe385cca531fe7ad7caeb93ca72", "score": "0.59082115", "text": "def delete(self):\n self.request().delete()", "title": "" }, { "docid": "6ea97c881a067e18b39dbf40f516c0ec", "score": "0.58988196", "text": "def test_delete_error(client, indexd, entities):\n\n response_mock = MagicMock()\n response_mock.status_code = 500\n response_mock.json.return_value = {'error': 'fake error message'}\n\n def exc():\n raise HTTPError()\n response_mock.raise_for_status = exc\n indexd.delete.return_value = response_mock\n\n init = GenomicFile.query.count()\n\n r = _new_genomic_file(client)\n kf_id = r['results']['kf_id']\n\n response = client.delete(url_for(GENOMICFILE_URL,\n kf_id=kf_id),\n headers={'Content-Type': 'application/json'})\n\n resp = json.loads(response.data.decode(\"utf-8\"))\n\n assert indexd.delete.call_count == 1\n assert 'fake error message' in resp['_status']['message']\n assert GenomicFile.query.count() == init + 1", "title": "" }, { "docid": "4d3811d3165ac51f93f6620b559ad868", "score": "0.58864546", "text": "def deleteEntity(self, index):\n\t\tself._app.deleteSpecies(str(self.item(index, 2).text()))\n\t\tself.entityDeleted.emit('species')", "title": "" }, { "docid": "f92342752b7a5febc2b64fdba1379ec3", "score": "0.5874578", "text": "def delete(self, request, pk=None):\n return Response({'method': 'DELETE'})", "title": "" }, { "docid": "8f9d2ffd19342f985c5929fdc2cef150", "score": "0.58699054", "text": "def delete(self) -> Tuple[str, int]:\n redis_client.flushall()\n return \"\", 204", "title": "" }, { "docid": "d4efb0b31a584750ca39ea9e558b1783", "score": "0.5860465", "text": "def delete(self):\r\n url = self._url\r\n\r\n response = self.http_request(url, 'DELETE')\r\n if response.status != 204:\r\n self.raise_http_error(response)", "title": "" }, { "docid": "71939c599b73b9f4fd73f1a9980914a9", "score": "0.5860137", "text": "def get_drop_index_sql(self, model, index_name):\n return SQLResult(sql_delete_index(connection=self.connection,\n model=model,\n index_name=index_name))", "title": "" }, { "docid": "3c5c1ceba1e5fd032c154bb61ef83ab9", "score": "0.5856297", "text": "def delete(self):\n return self.set_method('DELETE').send()", "title": "" }, { "docid": "f2d2b195624bf0a20abdd7f4dec637d3", "score": "0.5853538", "text": "def delete_mapping(self, index, doc_type, params=None):\r\n _, data = self.transport.perform_request('DELETE', _make_path(index, '_mapping', doc_type),\r\n params=params)\r\n return data", "title": "" }, { "docid": "ceb5062d47d0ab08a1a2b2392e62c408", "score": "0.5850006", "text": "def delete(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"delete\"), kwargs)", "title": "" }, { "docid": "92e3148bff6f98f055cdc9632c929dfe", "score": "0.58487797", "text": "def remove_client_by_index(self, index):\n del self.__clients[index]\n return", "title": "" }, { "docid": "a0c790d42f8dfd21e44e4836c64edff5", "score": "0.5834356", "text": "def delete(url: str, kwargs=None, result_key: str = None, result_index: int = None):\n request = lambda: __local__.make_call(\"DELETE\", url, kwargs, result_key, result_index)\n return request()", "title": "" }, { "docid": "8070a417c556c806cf21e7eb3e2b4a01", "score": "0.5832157", "text": "def delete(cls, doc_type, doc_id, **request_params):\n\n response = cls.connection.delete(cls.index_name, doc_type, doc_id,\n **request_params)\n if response[const.OK]:\n return True\n else:\n raise exceptions.DeleteDocumentError(\"Failed to delete doc %s: %s\" %\n (doc_id, str(response)))", "title": "" }, { "docid": "d771f6c5d9d39e7c52d310f1ec14fa6e", "score": "0.5804419", "text": "def deleteRuleIndex(Self,Index):\n Status, Output = commands.getstatusoutput('iptables -t \"%s\" -D \"%s\" %i'\n % (Self.Table, Self.Chain, Index+1))\n if Status:\n raise IPTablesError(Output)", "title": "" }, { "docid": "59672c0c274b88fd4e67ce967508693f", "score": "0.57850677", "text": "def test_delete_success():\n httpconnection = Mock(spec=HTTPConnection)\n httpresponse = Mock(spec=HTTPResponse)\n httpresponse.status = 204\n httpresponse.getheaders.return_value = \\\n 'Content-Type:text/html;Content-Length:14'\n httpresponse.read.return_value = b'{}'\n httpresponse.reason = 'NO CONTENT'\n httpconnection.getresponse.return_value = httpresponse\n\n config = CORTXS3Config()\n response = CORTXS3KVApi(config, CONNECTION_TYPE_PRODUCER, connection=httpconnection).delete(\"test_index1\", \"test_key1\")\n if (response is not None):\n assert response[0] is True", "title": "" }, { "docid": "e7539fe09af17b677e4942508278989c", "score": "0.578451", "text": "def delete(self, *args, **kw):\r\n kw['method'] = 'DELETE'\r\n return self.open(*args, **kw)", "title": "" }, { "docid": "159cc419ba5a76649ebd71f775581ed6", "score": "0.57576215", "text": "def __delitem__(self, index):\n return self._list.__delitem__(index)", "title": "" }, { "docid": "fe6334a615ecd4c4f062232ebd5f8916", "score": "0.5754161", "text": "def delete_product(request):\r\n if request.method == 'POST':\r\n post_body = json.loads(request.body)\r\n work_id = post_body['workID']\r\n try:\r\n product = Product.objects.get(id = work_id)\r\n product.delete()\r\n # ES delete start\r\n # response = requests.post(settings.ES_DELETE_URL, data=json.loads({\"query\":{\"match\":{\"ID\": work_id}}}), headers={\"content-type\":\"application/json\"})\r\n # if response.status_code != 200:\r\n # raise RuntimeError('Index has not been deleted!')\r\n # ES delete end\r\n return HttpResponse(json.dumps(\"succeed\"), status = 200, content_type = \"application/json\")\r\n except:\r\n return HttpResponse(json.dumps(\"error\"), status = 400, content_type = \"application/json\")", "title": "" }, { "docid": "abce249be9fa576245e45e26aef7aa9f", "score": "0.5749975", "text": "def test_delete(client, indexd, entities):\n init = GenomicFile.query.count()\n\n r = _new_genomic_file(client)\n kf_id = r['results']['kf_id']\n\n response = client.delete(url_for(GENOMICFILE_URL,\n kf_id=kf_id),\n headers={'Content-Type': 'application/json'})\n\n resp = json.loads(response.data.decode(\"utf-8\"))\n\n assert 'genomic_file' in resp['_status']['message']\n assert 'deleted' in resp['_status']['message']\n assert GenomicFile.query.count() == init\n assert indexd.delete.call_count == 1", "title": "" }, { "docid": "d50d1cb16eec2884c76ad106aef3383d", "score": "0.574983", "text": "def random_index():\n index = Index(uuid4().hex)\n\n yield index\n\n if index.exists():\n index.delete()", "title": "" }, { "docid": "5b6836fce28d752bcd6e4ff68dfae2ad", "score": "0.57296926", "text": "def _delete_request(self, key: str) -> Response:\n\n try:\n del self._store[key]\n except KeyError:\n return Response(ResponseEnum.NOT_OK.name)\n else:\n return Response(ResponseEnum.OK.name)", "title": "" }, { "docid": "e3e75c9ff2ca93ca1ecfb5051fc3be5a", "score": "0.5703314", "text": "def delete_warmer(self, index, name, params=None):\r\n _, data = self.transport.perform_request('DELETE', _make_path(index, '_warmer', name),\r\n params=params)\r\n return data", "title": "" }, { "docid": "3009b4bc09c74d17beb39e72fee7613a", "score": "0.5700128", "text": "def delete(self,request, pk=None):\n return Response({'method':'DELETE'})", "title": "" }, { "docid": "d801ec24f2a33b3667d08fbe51d91b2e", "score": "0.56980693", "text": "def delete(self,request,pk=None):\n return Response({'method':'delete'})", "title": "" }, { "docid": "b32177a3244d08dc332701a6829d4876", "score": "0.56814003", "text": "def delete_vertex_index(self, name):\r\n raise NotImplementedError", "title": "" }, { "docid": "a2608c2eaa486dd673d61e5e6dcb3874", "score": "0.56675255", "text": "def delete_from_index(self, index = -1, noCopy = False):\n Validator.validate_index(self.sequence_store.sequence, index)\n self.sequence_store.sequence.pop(index)\n if(not noCopy):\n self._save_sequence()\n return self.sequence_store.sequence\n # Another possible solution: return sequence[0:index] + sequence[index + 1: len(sequence)]", "title": "" }, { "docid": "23d146854512adbf9c08693cd0e74f72", "score": "0.5661111", "text": "def __delitem__(self, index):\n if self.__data.has_key(index):\n del (self.__data[index])", "title": "" }, { "docid": "b2f5719779ebcad677d740da3dfe8e62", "score": "0.5657717", "text": "def cmd__delete(self, index):\n\n entry = self.main_list.entries[int(index)]\n obj = entry.data['object']\n\n self.info(f'Marking \"{obj.title}\" as Deleted...')\n self.client._mailer.store(f'{obj.uid}', '+FLAGS', '\\\\Deleted')\n self.info('Expunge...')\n self.client._mailer.expunge()\n self.info('Message deleted!')\n self.enqueue(self.reload)", "title": "" }, { "docid": "0f80cdf3e5ec39b902a2e88239113e62", "score": "0.56442374", "text": "def test_delete_failure():\n httpconnection = Mock(spec=HTTPConnection)\n httpresponse = Mock(spec=HTTPResponse)\n httpresponse.status = 404\n httpresponse.getheaders.return_value = \\\n 'Content-Type:text/html;Content-Length:14'\n httpresponse.read.return_value = b'{}'\n httpresponse.reason = 'NO CONTENT'\n httpconnection.getresponse.return_value = httpresponse\n\n config = CORTXS3Config()\n response = CORTXS3KVApi(config, CONNECTION_TYPE_PRODUCER, connection=httpconnection).delete(\"test_index1\", \"test_key1\")\n if (response is not None):\n assert response[0] is False", "title": "" }, { "docid": "88976ba4d86c5331bf81ed2dfd47abdd", "score": "0.5641066", "text": "def delete(self, **kwargs):\n return self.session.delete(self.uri, params={**self._constraints, **kwargs})", "title": "" }, { "docid": "4ed53f16322088aa9d9166aef0c9c0e9", "score": "0.5633039", "text": "def delete(self):\n from esify import session\n if(session.has_key('logged_in') != True):\n return \"Forbidden accesss\", 403\n if delete_account(session[\"X-Auth-Token\"]):\n return 'Deleted account', 200\n else:\n return 'Bad Request', 400", "title": "" }, { "docid": "417a6b2b667e2ddf10ffd54140e60b06", "score": "0.56330204", "text": "def RemoveRowByIndex(self, index):\n del self._rows[index]", "title": "" }, { "docid": "dd376005e5b310a375927863672e62e4", "score": "0.5632864", "text": "def delete(self):\n self.res.delete()\n log.debug(f\"Deleted object {self.url}\")", "title": "" }, { "docid": "49e5b6c3066402aec1e0d2578ec77d2a", "score": "0.56307876", "text": "def unindex(self, obj):\n\n records = self._get_records(obj)\n for record in list(records):\n try:\n record.delete()\n except AssertionError:\n logging.exception(\"Something went wrong while unindexing an index record.\")", "title": "" }, { "docid": "35e5fbf1d5ae28fb0174434b6262d5ec", "score": "0.55985737", "text": "def __delitem__(self, index):\n pos, idx = self._fen_findkth(self._len + index if index < 0 else index)\n self._delete(pos, idx)", "title": "" }, { "docid": "bb583837c5c4b786d801a3b4e7da862d", "score": "0.5593044", "text": "def remove(self, index):\n del self._list[index]", "title": "" }, { "docid": "15f2b2b223d4b47bc0ec8a69b0c8381c", "score": "0.5583165", "text": "def do_dry_run(self):\n show_dry_run(self.index_list, 'delete_indices')", "title": "" }, { "docid": "a4e72bdd0c31b77ea530eaa915e63945", "score": "0.5581254", "text": "def delete( request ) :\n\n log = logging.getLogger(__name__)\n\n log.debug( 'delete entry from the database' )\n\n idx = request.GET.get('id', 'None')\n\n if idx == 'None' : return JsonResponse({'error_flag':True})\n\n category_obj = models.Document( id=int(idx) )\n category_obj.delete()\n\n context = { 'id' : idx,\n 'error_flag' : False}\n\n\n return JsonResponse(context)", "title": "" }, { "docid": "e9641abde9759d45da652d983562c7e5", "score": "0.55810785", "text": "def uiFormDelete(form, index):\n\n clibui.uiFormDelete(form, index)", "title": "" }, { "docid": "5ef52efe3f122ef6d106e56f3744f669", "score": "0.5577493", "text": "def delete(self, loc):\n return Index(np.delete(self._data, loc), name=self.name)", "title": "" }, { "docid": "c0b79eb665e25b34f0b41d0ff95651ee", "score": "0.55724984", "text": "def delete(*args, **kwargs):\n return __delete(*args, **kwargs)", "title": "" }, { "docid": "3cbef3bc9f3f21401de7b284d4b875cd", "score": "0.5568585", "text": "def delete(self, id):\n return make_response(DAO.delete_by_id(id), 204)", "title": "" }, { "docid": "29f3661903a92546bef2f07223bf8481", "score": "0.5566772", "text": "def delete(self):\n self.method = \"DELETE\"\n self.send()", "title": "" }, { "docid": "29f3661903a92546bef2f07223bf8481", "score": "0.5566772", "text": "def delete(self):\n self.method = \"DELETE\"\n self.send()", "title": "" } ]
69e1b9bb94dfdc0b51ac7e8fb8227fdb
Generates reads with a given error rate. >>> from Bio.Seq import Seq >>> contigs = dict() >>> contigs['1'] = Seq('ACGATGAGTAGAGACAGAGATAGAGAGATAGAACGATGAGTAGAGAG') >>> rec = None
[ { "docid": "6ac9ac1b7ee9747624ea4b8ba384f9b0", "score": "0.5562005", "text": "def gen_reads(contigs, n, read_length, sub_erate):\n \n contig_names = list(contigs.keys())\n contig_lens = dict()\n\n for i in contigs:\n contig_lens[i] = len(contigs[i])\n \n nread = 0\n done = False\n \n while not done:\n contig_name = random.choice(contig_names)\n loc = random.randint(0, contig_lens[contig_name] - read_length + 1)\n read_seq = contigs[contig_name][loc:loc+read_length].upper()\n\n if 'N' in read_seq:\n continue\n\n nread += 1\n done = True if nread == n else False\n\n read_seq_m = introduce_error(read_seq, sub_erate)\n read_qual = gen_read_quals(read_length=read_length)\n \n yield 'READ' + str(nread), contig_name, loc, str(read_seq_m), read_qual", "title": "" } ]
[ { "docid": "8a9afa899340c5163c92f8b1efa1fa13", "score": "0.52502173", "text": "def iter_record(record: SeqRecord) -> Generator[Tuple, None, None]:\n for nuc, qual in zip(record, record.letter_annotations[\"phred_quality\"]):\n # prob of error\n prob = 10 ** -(qual / 10)\n yield nuc, prob", "title": "" }, { "docid": "1090d1fd744c4a27da0306fcae3e40c0", "score": "0.5024106", "text": "def introduce_error(read_seq, sub_erate):\n \n pos_to_mutate = []\n for idx, val in enumerate(np.random.uniform(0, 1/sub_erate, len(read_seq))):\n if int(val) == SUCCESS:\n pos_to_mutate.append(idx)\n \n read_seq_m = read_seq.tomutable()\n \n for idx in pos_to_mutate:\n read_seq_m[idx] = random.choice(ECHOICE[read_seq_m[idx]])\n \n return read_seq_m", "title": "" }, { "docid": "98482a5df10fa2f35bce01b895794382", "score": "0.50224924", "text": "def character_error_rate(reference, compared) -> float:\n cer, _ = character_error_rate_n(reference, compared)\n return cer", "title": "" }, { "docid": "4ce653aae03ba900b80a784ce70b0113", "score": "0.50088036", "text": "def rate(r):\n ...", "title": "" }, { "docid": "b8d2d02c0efff6de19aa234e2c25f4f1", "score": "0.4937345", "text": "def get_errors(ref, read, is_seq=False):\n if len(read) == 0:\n print(\"LOST SEQUENCE: ignoring errors\")\n return 0, 0, 0, 0\n ins_pos, dels_pos, subs_pos, ref_align, read_align = get_error_summary(ref, read, is_seq)\n ref_length = len(ref)\n ins_count = np.sum(ins_pos)\n dels_count = np.sum(dels_pos)\n subs_count = np.sum(subs_pos)\n return ins_count / ref_length * 100, dels_count / ref_length * 100, subs_count / ref_length * 100, (ins_count + dels_count + subs_count) / ref_length * 100", "title": "" }, { "docid": "9f6e2e1203f2e36f2acbe713bde65071", "score": "0.48749697", "text": "def get_error_rates():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # fixed as per review, displays only the ones which are greater than 1.5\n # (stated in project built it chapter)\n c.execute(\"SELECT date, rate \"\n \"FROM \"\n \"(SELECT e.date, \"\n \"((e.errors*100)::DECIMAL/d.total_activity)::DECIMAL as rate \"\n \"FROM error_stats e, day_acitvity d \"\n \"WHERE d.date = e.date \"\n \"ORDER BY rate DESC) \"\n \"AS r \"\n \"WHERE r.rate >1.5\")\n errors = c.fetchall()\n db.commit()\n db.close()\n return errors", "title": "" }, { "docid": "f6f837f84aea6051e0357f18aeef14f3", "score": "0.48508462", "text": "def error_rate_estimation(self) -> float:\n # Calculate error rate of all generated keys\n num_errors = 0\n for i in range(self.key_num):\n if self.request_to is not None:\n key_xor = self.keys[i] ^ self.request_to.keys[self.node][i] # in decimal\n elif self.request_from is not None:\n key_xor = self.keys[i] ^ self.request_from.keys[self.node][i] # in decimal\n num_errors += bin(key_xor).count('1') # count the number of errors\n error_rate = num_errors / (self.key_num * self.key_length)\n self.node.env.logger.info(f\"End-to-end error rate: {error_rate:.4f}\")\n print(f\"End-to-end error rate: {error_rate:.4f}\")\n return error_rate", "title": "" }, { "docid": "fb3c0bdef6d838162ab6390a68893d85", "score": "0.48150572", "text": "def uniform_reads(mat, pat, ref, depth, read_len):\n len_seq = len(ref)\n iterations = (len_seq - read_len + 1) * depth #to get average depth\n reads = []\n positions = []\n\n for i in range(iterations):\n mat_or_pat = randint(0, 2) #random stuff\n start_ind = randint(0, len_seq - read_len + 1)\n if mat_or_pat == 0:\n initial_read = mat[start_ind : start_ind + read_len]\n error_read = rr.rand_variant(initial_read, 30000) #adds errors\n reads.append(error_read)\n positions.append(start_ind)\n else:\n initial_read = pat[start_ind : start_ind + read_len]\n error_read = rr.rand_variant(initial_read, 30000) #adds errors\n reads.append(error_read)\n positions.append(start_ind)\n\n return reads, positions", "title": "" }, { "docid": "5b2b2b971b9b088d967fb2025445f49a", "score": "0.4757518", "text": "def interpolated(self, precisionAtRecall):\n\n interpolatedPatR = {'pAtR0': [0], 'pAtR10': [0], 'pAtR20': [0], 'pAtR30': [0],\n 'pAtR40': [0], 'pAtR50': [0], 'pAtR60': [0], 'pAtR70': [0], 'pAtR80': [0], 'pAtR90': [0], 'pAtR100': [0]}\n for recall in precisionAtRecall.keys():\n if recall == 100:\n interpolatedPatR['pAtR100'].append(precisionAtRecall[recall])\n if recall >= 90 and recall <= 100:\n interpolatedPatR['pAtR90'].append(precisionAtRecall[recall])\n if recall >= 80 and recall <= 100:\n interpolatedPatR['pAtR80'].append(precisionAtRecall[recall])\n if recall >= 70 and recall <= 100:\n interpolatedPatR['pAtR70'].append(precisionAtRecall[recall])\n if recall >= 60 and recall <= 100:\n interpolatedPatR['pAtR60'].append(precisionAtRecall[recall])\n if recall >= 50 and recall <= 100:\n interpolatedPatR['pAtR50'].append(precisionAtRecall[recall])\n if recall >= 40 and recall <= 100:\n interpolatedPatR['pAtR40'].append(precisionAtRecall[recall])\n if recall >= 30 and recall <= 100:\n interpolatedPatR['pAtR30'].append(precisionAtRecall[recall])\n if recall >= 20 and recall <= 100:\n interpolatedPatR['pAtR20'].append(precisionAtRecall[recall])\n if recall >= 10 and recall <= 100:\n interpolatedPatR['pAtR10'].append(precisionAtRecall[recall])\n if recall >= 0 and recall <= 100:\n interpolatedPatR['pAtR0'].append(precisionAtRecall[recall])\n\n for key in interpolatedPatR:\n interpolatedPatR[key] = max(interpolatedPatR[key]) #taking the maximum value of all the precision at each level\n\n return interpolatedPatR", "title": "" }, { "docid": "cc9fd8eaf6a083cb91343e14a0eae1d9", "score": "0.4749846", "text": "def get_error_summary(ref, read, is_seq=False):\n if not is_seq:\n # Read from files\n with open(os.path.join(ref)) as ref_f:\n ref = ref_f.readlines()[1].strip()\n with open(os.path.join(read)) as read_f:\n read = read_f.readlines()[1].strip()\n\n error_summary = sm_align(ref, read)\n\n ref_align, read_align, _, ins_pos, dels_pos, subs_pos = error_summary\n ref_length = len(ref)\n ins_count = np.sum(ins_pos)\n dels_count = np.sum(dels_pos)\n subs_count = np.sum(subs_pos)\n print(\"Insertion errors %.3f%%, deletion errors %.3f%%, substitution errors %.3f%%, error rate %.3f%%\"\n % (ins_count / ref_length * 100, dels_count / ref_length * 100, subs_count / ref_length * 100, (ins_count + dels_count + subs_count) / ref_length * 100))\n\n return ins_pos, dels_pos, subs_pos, ref_align, read_align", "title": "" }, { "docid": "f9c16d3785dc1fda656de23a3a09546d", "score": "0.46689016", "text": "def genCRN(crn, reversible = True, rates = True, interpretation = None):\n if reversible:\n pcrn = combine_reversible_rxns(crn)\n else:\n pcrn = split_reversible_rxns(crn)\n\n if interpretation:\n icrn = []\n for rxn in pcrn:\n R = interpret(rxn.reactants, interpretation)\n P = interpret(rxn.products, interpretation)\n icrn.append(Reaction(R, P, rxn.k_fwd, rxn.k_rev))\n pcrn = remove_duplicate_rxns(remove_trivial_rxns(icrn))\n\n for rxn in pcrn:\n R = natsorted(rxn.reactants)\n P = natsorted(rxn.products)\n if rxn.k_rev == 0:\n rate = ' [k = {:g}]'.format(rxn.k_fwd) if rates else ''\n yield '{} -> {}{}'.format(' + '.join(R), ' + '.join(P), rate)\n else:\n if natsorted([R, P]) == [R, P]: \n rate = ' [kf = {:g}, kr = {:g}]'.format(rxn.k_fwd, rxn.k_rev) if rates else ''\n yield '{} <=> {}{}'.format(' + '.join(R), ' + '.join(P), rate)\n else:\n rate = ' [kf = {:g}, kr = {:g}]'.format(rxn.k_rev, rxn.k_fwd) if rates else ''\n yield '{} <=> {}{}'.format(' + '.join(P), ' + '.join(R), rate)", "title": "" }, { "docid": "cdf2206e19b49c5598b75016e3ea0fe5", "score": "0.45707858", "text": "def gen_read_quals(read_length):\n \n quals = 'I' * read_length\n return quals", "title": "" }, { "docid": "91475ba9762868f02115e492c70eea78", "score": "0.45671505", "text": "def check_read_quality(sam_record: pysam.AlignedSegment, struct_collection):\n read_ID = sam_record.query_name\n flag = sam_record.flag\n cigar = sam_record.cigarstring\n seq = sam_record.query\n read_length = sam_record.query_length\n\n # Only use uniquely mapped transcripts\n if flag not in [0, 16]:\n return [read_ID, 0, 0, read_length, \"NA\", \"NA\"]\n\n # Only use reads that are greater than or equal to length threshold\n if read_length < struct_collection.run_info.min_length:\n return [read_ID, 0, 1, read_length, \"NA\", \"NA\"]\n\n # Locate the MD field of the sam transcript\n try:\n md_tag = sam_record.get_tag('MD')\n except KeyError:\n raise ValueError(\"SAM transcript %s lacks an MD tag\" % read_ID)\n\n # Only use reads where alignment coverage and identity exceed\n # cutoffs\n coverage = tutils.compute_alignment_coverage(cigar)\n identity = tutils.compute_alignment_identity(md_tag, seq)\n\n if coverage < struct_collection.run_info.min_coverage or \\\n identity < struct_collection.run_info.min_identity:\n return [read_ID, 0, 1, read_length, coverage, identity]\n\n # At this point, the read has passed the quality control\n return [read_ID, 1, 1, read_length, coverage, identity]", "title": "" }, { "docid": "193c38f95ce24a81246f3565a46289c0", "score": "0.45529452", "text": "def from_mapped_read_collections(cls, error_rate, references, *args):\n obj = cls(references)\n\n for mrc in args:\n pileup = mrc.pileup()\n rid = mrc.reference.name\n\n coverage = mrc.coverage(pileup)\n\n for pos in range(0, len(pileup)):\n for event, event_count in pileup[pos].items():\n alt_allele = event.lower()\n if len(event) > 1:\n alt_allele = event[:1].lower()\n\n if alt_allele != '-' and alt_allele != \\\n mrc.reference.sub_seq(pos, pos).lower():\n\n if rid in obj.variants and pos+1 in obj.variants[rid] \\\n and alt_allele in obj.variants[rid][pos+1]:\n\n variant_obj = obj.variants[rid][pos+1][alt_allele]\n\n new_allele_count = \\\n event_count + variant_obj.info['AC']\n variant_obj.info['AC'] = new_allele_count\n variant_obj.info['AF'] = \\\n float(new_allele_count) / coverage[pos]\n\n else:\n event_frequency = \\\n float(event_count) / coverage[pos]\n\n variant_obj = NTVariant(chrom=mrc.reference.name,\n pos=pos+1,\n ref=mrc.reference.sub_seq(\n pos, pos).lower(),\n alt=alt_allele,\n info={\n 'DP': coverage[pos],\n 'AC': event_count,\n 'AF': event_frequency\n })\n\n obj.variants[rid][pos+1][alt_allele] = variant_obj\n\n for alt_allele, variant in obj.variants[rid][pos+1].items():\n variant.qual = obj.__calculate_variant_qual(\n error_rate, variant.info['AC'], variant.info['DP'])\n\n return obj", "title": "" }, { "docid": "cc7fb65da3eeaa7fc2f9d7a7b9d7796c", "score": "0.45317575", "text": "def calculate_error_rate(self, records):\r\n errors = 0\r\n for cur_record in records:\r\n hypo_illness = self.diagnose(cur_record.symptoms)\r\n if hypo_illness != cur_record.illness:\r\n errors += 1\r\n return errors / len(records)", "title": "" }, { "docid": "d91b50f9f320b730fa93a2d22eaa13e7", "score": "0.4528298", "text": "def error_rate_estimation(self) -> float:\n # Find the peer protocol\n for proto in self.peer.protocol_stack.protocols:\n if isinstance(proto, KeyGeneration) and proto.peer == self.node:\n peer_proto = proto\n\n # Calculate error rate of all sifted keys\n num_errors = 0\n for i in range(self.key_num):\n key_xor = self.sifted_keys[i] ^ peer_proto.sifted_keys[i] # in decimal\n num_errors += bin(key_xor).count('1') # count the number of errors\n self.error_rate = num_errors / (self.key_num * self.key_length)\n self.node.env.logger.info(f\"Key error rate: {self.error_rate:.4f}\")\n return self.error_rate", "title": "" }, { "docid": "5aca1316e4260f5109ff92b1a349dd9f", "score": "0.45107883", "text": "def testRaisesSamplingErrors(self):\n number_dict = {\"fragment1\": 3000000, \"fragment2\": 10}\n with self.assertRaises(KeyError):\n self.assertEqual(\n 13, self.tree.sample_landscape_richness(number_of_individuals=number_dict, n=1, community_reference=2)\n )\n number_dict2 = {\"fragment\": 10, \"fragment2\": 10}\n with self.assertRaises(KeyError):\n self.assertEqual(\n 13, self.tree.sample_landscape_richness(number_of_individuals=number_dict2, n=1, community_reference=2)\n )", "title": "" }, { "docid": "5a011c72fba6055f7384e0e1293a0a56", "score": "0.45050377", "text": "def tcperrfirstretransmissionsrate(self) :\n try :\n return self._tcperrfirstretransmissionsrate\n except Exception as e:\n raise e", "title": "" }, { "docid": "76925b77c703b57d00340db5e6b543a4", "score": "0.4498821", "text": "def rwRate(self, tn, rn):\n self.nrs = np.random.normal(0,self.rw_std, 3) # update normal random sample\n rate = np.zeros(3)\n for i in range(0,3):\n if rn[i]>self.rw_limit:\n rate[i] = -abs(self.nrs[i])\n elif rn[i]<-self.rw_limit:\n rate[i] = abs(self.nrs[i])\n else:\n rate[i] = self.nrs[i]\n return rate", "title": "" }, { "docid": "db75f7eea85f9268afe01d2e0cd9e829", "score": "0.44955212", "text": "def rerror(msg, record):\n if record.json and 'control_number' in record.json:\n recid = record.json.get('control_number')\n else:\n recid = record.id\n\n error('%s: %s' % (recid, msg))", "title": "" }, { "docid": "c320e7c55b566a60d72b98bb2b371968", "score": "0.44690904", "text": "def estimate_read_distribution(file_in, num_seq, n_chromosomes=None):\n try:\n with pysam.FastaFile(file_in) as fa:\n print(\"The file contains\", fa.nreferences, \"sequences\", file=sys.stderr)\n if num_seq:\n if n_chromosomes:\n print(\"Working only with the first\", n_chromosomes, file=sys.stderr)\n full_size = sum(fa.lengths[:n_chromosomes])\n reads_per_chrom = [\n int(s / full_size * num_seq) + 1\n for s in fa.lengths[:n_chromosomes]\n ]\n else:\n n_chromosomes = len(fa.lengths)\n full_size = sum(fa.lengths)\n reads_per_chrom = [\n int(s / full_size * num_seq) + 1 for s in fa.lengths\n ]\n except OSError: # fasta is not indexed do a naive fallback\n print(\"Error, fasta file not indexed, doing naive sampling...\", file=sys.stderr)\n if n_chromosomes:\n reads_per_chrom = [int(num_seq / n_chromosomes) + 1] * n_chromosomes\n else:\n print(\n \"Naive sampling needs the option --chromosomes...Aborting...\",\n file=sys.stderr,\n )\n sys.exit(1)\n # our samplig tends to slightly over sample, readjust by removing reads on random chromosomes\n extra_samples = sum(reads_per_chrom) - num_seq\n for idx in [random.randint(0, n_chromosomes - 1) for _ in range(extra_samples)]:\n # some scaffolds are so small that the don't even get a read to sample.\n while reads_per_chrom[idx] == 0:\n idx = random.randint(0, n_chromosomes - 1)\n reads_per_chrom[idx] -= 1\n return reads_per_chrom", "title": "" }, { "docid": "fca544178f0646853b98e9a50353f102", "score": "0.44323528", "text": "def error_rate(self):\n return self.item_counts['FileError'] / (self.item_counts['File'] + self.item_counts['FileError'])", "title": "" }, { "docid": "46cc1255d11f0fdd3a6871f300665142", "score": "0.44322392", "text": "def tcperrseventhretransmissionsrate(self) :\n try :\n return self._tcperrseventhretransmissionsrate\n except Exception as e:\n raise e", "title": "" }, { "docid": "e290d9ead6d4dd1a270fcc1ff6fc5178", "score": "0.44181174", "text": "def tcperrcookiepktseqrejectrate(self) :\n try :\n return self._tcperrcookiepktseqrejectrate\n except Exception as e:\n raise e", "title": "" }, { "docid": "d7c2d47149da6d9c16cef8290675a0fb", "score": "0.44157565", "text": "def error_analysis_and_figs(self,\n Imin_max_names_tup,\n seq=None,\n num_bootstraps=100,\n conf_pct=90,\n min_reads=5,\n out_dir=None,\n out_bname=None):\n if seq is None:\n seq = self.target\n\n fit_func = self.fit_func_given_Imin_max_names[Imin_max_names_tup]\n Imin, Imax = self.Imin_max_pairs_given_names[Imin_max_names_tup]\n Imin_name, Imax_name = Imin_max_names_tup\n\n ref_Kd = fit_func(seq, Imin=Imin, Imax=Imax, Imin_name=Imin_name)\n ref_dG = np.log(ref_Kd)\n\n read_names = self.IA.read_names_given_seq[seq]\n nclusters = range(3, min(100, len(read_names)))\n Kd_avg_errors, dG_avg_errors = [], []\n Kd_conf_errors, dG_conf_errors = [], []\n for n in nclusters:\n sys.stdout.write('.')\n sys.stdout.flush()\n bs_Kds = [fit_func(seq, Imin=Imin, Imax=Imax, Imin_name=Imin_name, max_clust=n, bootstrap=True)\n for _ in range(num_bootstraps)]\n bs_Kd_errors = [abs(ref_Kd - Kd) / 1000.0 for Kd in bs_Kds]\n bs_dG_errors = [abs(ref_dG - np.log(Kd)) for Kd in bs_Kds]\n Kd_avg_errors.append(np.average(bs_Kd_errors))\n Kd_conf_errors.append(np.percentile(bs_Kd_errors, conf_pct))\n dG_avg_errors.append(np.average(bs_dG_errors))\n dG_conf_errors.append(np.percentile(bs_dG_errors, conf_pct))\n print\n\n def c_over_sqrt_n(n, c):\n return c / np.sqrt(n)\n\n def fit_c_over_sqrt_n(ns, data):\n new_ns = [n for n, dd in zip(ns, data) if np.isfinite(n) and np.isfinite(dd) and n > 10]\n new_data = [dd for n, dd in zip(ns, data) if np.isfinite(n) and np.isfinite(dd) and n > 10]\n popt, pcov = curve_fit(c_over_sqrt_n, new_ns, new_data, maxfev=10000)\n return popt[0]\n\n fig, axes = plt.subplots(1, 2, figsize=(14, 6))\n for ax, label, units, avg_errors, conf_errors in zip(axes,\n ('$K_d$', 'ABA'),\n ('nM', '$k_B T$'),\n (Kd_avg_errors, dG_avg_errors),\n (Kd_conf_errors, dG_conf_errors)):\n fit_ns = np.linspace(1, max(nclusters), 300)\n c_avg = fit_c_over_sqrt_n(nclusters, avg_errors)\n c_conf = fit_c_over_sqrt_n(nclusters, conf_errors)\n avg_fit_vals = [c_over_sqrt_n(n, c_avg) for n in fit_ns]\n conf_fit_vals = [c_over_sqrt_n(n, c_conf) for n in fit_ns]\n\n min_reads_avg_fit = c_over_sqrt_n(min_reads, c_avg)\n\n ax.plot(nclusters, avg_errors, '.', label='Average Error')\n ax.plot(fit_ns, avg_fit_vals, label='Average Fit = $%.2f / \\sqrt{n}$' % c_avg)\n ax.plot(nclusters, conf_errors, '.', label='90% Confidence Interval')\n ax.plot(fit_ns, conf_fit_vals, '--', label='90%% Conf Interval Fit = $%.2f / \\sqrt{n}$' % c_conf)\n ax.plot([0, min_reads, min_reads], [min_reads_avg_fit, min_reads_avg_fit, 0], ':k')\n ax.set_xlim((0, 100))\n ax.set_xlabel('Number of clusters', fontsize=18)\n ax.set_ylabel('{} Error ({})'.format(label, units), fontsize=18)\n ax.legend(fontsize=14)\n ax.get_legend().get_frame().set_facecolor('white')\n ax.set_axis_bgcolor('white')\n ax.grid(False)\n\n for item in ax.get_xticklabels() + ax.get_yticklabels():\n item.set_fontsize(16)\n\n if out_dir:\n out_bname = '{}_{}_{}_error_analysis'.format(out_bname,\n Imin_name.replace(' ', '_'),\n Imax_name.replace(' ', '_'))\n fig.savefig(os.path.join(out_dir, out_bname + '.png'), dpi=300)\n fig.savefig(os.path.join(out_dir, out_bname + '.eps'))", "title": "" }, { "docid": "eafb96359d83001e91d56e06219cd70f", "score": "0.4408603", "text": "def error_rate(self):\n error = 0.0\n rows, cols = self.reviews.shape\n for idx in xrange(rows):\n for jdx in xrange(cols):\n if self.reviews[idx, jdx] > 0:\n error += (self.model[idx, jdx] - self.reviews[idx, jdx]) ** 2\n print error\n return error", "title": "" }, { "docid": "29da583e2521648401c3d55e84f7bbe6", "score": "0.4396956", "text": "def tcperrsecondretransmissionsrate(self) :\n try :\n return self._tcperrsecondretransmissionsrate\n except Exception as e:\n raise e", "title": "" }, { "docid": "015ac6df8b0a1f3b63b8211474439541", "score": "0.4392933", "text": "def tcperrfastretransmissionsrate(self) :\n try :\n return self._tcperrfastretransmissionsrate\n except Exception as e:\n raise e", "title": "" }, { "docid": "ef6f14cb469c57d4c2372149ac44833d", "score": "0.43763864", "text": "def q5_error_rate_on_test_set(self):\n test_data = [zip(*sentence) for sentence in self.test.tagged_sentences]\n self.model_tester = ModelTester(lambda x: self.model.decode(x), self.model.states, test_data)\n print(\"Q5: Error rate on model for test set:\", self.model_tester.get_error_rate())", "title": "" }, { "docid": "89bd4a2d9926a87406fed9f03ae0b2b0", "score": "0.43701538", "text": "def picard_rnaseq_metrics(picard, align_bam, ref, ribo=\"null\", out_file=None):\r\n base, ext = os.path.splitext(align_bam)\r\n if out_file is None:\r\n out_file = \"%s.metrics\" % (base)\r\n if not file_exists(out_file):\r\n with curdir_tmpdir() as tmp_dir:\r\n with file_transaction(out_file) as tx_out_file:\r\n opts = [(\"INPUT\", align_bam),\r\n (\"OUTPUT\", tx_out_file),\r\n (\"TMP_DIR\", tmp_dir),\r\n (\"REF_FLAT\", ref),\r\n (\"STRAND_SPECIFICITY\", \"NONE\"),\r\n (\"ASSUME_SORTED\", \"True\"),\r\n (\"RIBOSOMAL_INTERVALS\", ribo)]\r\n\r\n picard.run(\"CollectRnaSeqMetrics\", opts)\r\n return out_file", "title": "" }, { "docid": "a69be648a01718ad8e6c43cc6d7fa99a", "score": "0.4362417", "text": "def get_REACLIB(reader):\n\n # Create a list over reactions compatible with REACLIB\n reactions = []\n for element in reader.keywords[\"element\"]:\n for mass in reader.keywords[\"mass\"][element]:\n reactions.append(\"{}{}(n,g)\".format(element, mass))\n\n for reaction in reactions:\n # Get the rate index from the html-page\n write(\"Attempting to open \"+address_search+reaction+\"...\")\n res = scrape(address_search+reaction)\n if res is None:\n continue\n print(\"ok\")\n\n # Find an occurence of the rate index and use it \n pattern = re.compile(\"rateindex=(\\d+)\")\n rateindex = re.search(pattern, res.text)\n if rateindex is None:\n print(\"Could not find the rate index. Probably unexpected HTML-encoding. Skipping \", reaction)\n continue\n\n # Get the data\n print(\"Found the rate index. Attempting to download data\")\n link = address_data.format(rateindex.group(1))\n filename = \"{}_reaclib.txt\".format(reaction[:-5])\n if not save_data(filename, link):\n continue", "title": "" }, { "docid": "8868cb9832652039e522070923c4e249", "score": "0.43530092", "text": "def _roc_error(self):\n s = self\n # r1 is number of negatives with each score,\n # r2 is number of positives rated higher than each score\n # r3 is number of positives with each score\n # r4 is number of negatives rated lower than each score\n r1 = s.NE\n r2 = s.TP - s.PE\n r3 = s.PE\n r4 = s.TN\n r5 = r1 * r2 + 0.5 * r1 * r3\n r6 = r3 * (r4**2 + r4*r1 + (r1**2)/3)\n r7 = r1 * (r2**2 + r2*r3 + (r3**2)/3)\n N = float(len(s.nscores))\n P = float(len(s.pscores))\n W = r5.sum() / (N*P)\n Q2 = r6.sum() / (P * N**2)\n Q1 = r7.sum() / (N * P**2)\n W_stderr = nx.sqrt((W*(1-W)+(P-1)*(Q1-W**2)+(N-1)*(Q2-W**2))/(P*N))\n #print W, Q1, Q2, W_stderr\n self.W = W\n self.W_stderr = W_stderr", "title": "" }, { "docid": "85cf9c107101144b3fa90781015fc96f", "score": "0.43477654", "text": "def sample_rate(self, value: float) -> None:", "title": "" }, { "docid": "e18ebc4d060a1ca266f355884de6fbf5", "score": "0.43437263", "text": "def tcperrforthretransmissionsrate(self) :\n try :\n return self._tcperrforthretransmissionsrate\n except Exception as e:\n raise e", "title": "" }, { "docid": "a51a91c76c23b78c7ddc01deb3637ab7", "score": "0.43271446", "text": "def tcperrsixthretransmissionsrate(self) :\n try :\n return self._tcperrsixthretransmissionsrate\n except Exception as e:\n raise e", "title": "" }, { "docid": "d8ae5f90b92658825976eb6f27f2e9e7", "score": "0.43194607", "text": "def learing_rate(self, error_est):\r\n return 0.008*np.sqrt(error_est)+1", "title": "" }, { "docid": "b50f050ff779911f6daeb02f987593a9", "score": "0.43171683", "text": "def upsample_dem_rsc(rate=None, rsc_dict=None, rsc_filepath=None):\n if rsc_dict and rsc_filepath:\n raise TypeError(\"Can only give one of rsc_dict or rsc_filepath\")\n elif not rsc_dict and not rsc_filepath:\n raise TypeError(\"Must give at least one of rsc_dict or rsc_filepath\")\n elif not rate:\n raise TypeError(\"Must supply rate for upsampling\")\n\n if rsc_filepath:\n rsc_dict = sario.load_dem_rsc(rsc_filepath)\n\n outstring = \"\"\n for field, value in rsc_dict.items():\n # Files seemed to be left justified with 13 spaces? Not sure why 13\n # TODO: its 14- but fix this and previous formatting to be DRY\n if field.lower() in ('width', 'file_length'):\n new_size = _up_size(value, rate)\n outstring += \"{field:<14s}{val}\\n\".format(field=field.upper(), val=new_size)\n elif field.lower() in ('x_step', 'y_step'):\n # New is 1 + (size - 1) * rate, old is size, old rate is 1/(size-1)\n value /= rate\n # Also give step floats proper sig figs to not output scientific notation\n outstring += \"{field:<14s}{val:0.12f}\\n\".format(field=field.upper(), val=value)\n else:\n outstring += \"{field:<14s}{val}\\n\".format(field=field.upper(), val=value)\n\n return outstring", "title": "" }, { "docid": "59c19ceda683a072151c3e1c4c631c2d", "score": "0.4310556", "text": "def errorRate(self, rules):\n\n myErrorRate = 0\n for value in self.information:\n isValid = False\n for key in rules:\n for(minBound, maxBound) in rules[key]:\n if value >= minBound and value <= maxBound:\n isValid = True\n break\n\n if isValid:\n break\n\n if not isValid:\n myErrorRate = myErrorRate + value\n\n return myErrorRate", "title": "" }, { "docid": "4732d35a2f423e761ede8771cdef9139", "score": "0.43096858", "text": "def sample_rate(self) -> float:\n ...", "title": "" }, { "docid": "c5981da3b425b92ba4cc55a5eaa1a9f2", "score": "0.42989045", "text": "def testSaveAsFASTQFailsOnReadWithNoQuality(self):\n reads = Reads()\n read1 = Read('id1', 'AT', '!!')\n read2 = Read('id2', 'AC')\n reads.add(read1)\n reads.add(read2)\n error = \"Read 'id2' has no quality information\"\n self.assertRaisesRegexp(ValueError, error, reads.save, 'file', 'fastq')", "title": "" }, { "docid": "e89574573a2158fbc06fd1330d267fde", "score": "0.42934057", "text": "def recalibrate_fastq(fastq, infer_rg = False):\n if infer_rg is False:\n rgfun = lambda x: 0 #since we currently only support 1 fastq at a time\n else:\n rgfun = utils.fastq_infer_rg\n rg_to_int = dict()\n nrgs = 0\n\n meanq, *vectors = fastq_to_covariate_arrays(fastq, infer_rg)\n dqs = applybqsr.get_delta_qs(meanq, *vectors)\n with pysam.FastxFile(fastq[0]) as fin:\n for read in fin:\n rg = rgfun(read)\n rgint = rg_to_int.get(rg)\n if rgint is None:\n rgint = nrgs\n rg_to_int[rg] = rgint\n nrgs = nrgs + 1\n recalibrated_quals = utils.recalibrate_fastq(read, meanq, *dqs,\n rg = rgint, dinuc_to_int = utils.Dinucleotide.dinuc_to_int,\n secondinpair = utils.fastq_infer_secondinpair(read))\n strquals = ''.join((recalibrated_quals + 33).astype(np.uint32).view('U1'))\n print('@' + read.name)\n print(read.sequence)\n print('+')\n print(strquals)", "title": "" }, { "docid": "9bff7f3a5b7638e86478991a063393d4", "score": "0.42892268", "text": "def sample_rate(self) -> float:", "title": "" }, { "docid": "9f98ccdbe07d1fd75bf220b224b9ded8", "score": "0.42884278", "text": "def test_error_rate(estimator, clusterparams, gentype='default', runs=100):\n error_rate_list = list()\n\n # Do multiple runs\n for _x in range(runs):\n if gentype == 'default':\n samples, labels = make_blobs(**clusterparams)\n elif gentype == 'skewed':\n samples, labels = make_skewed_blobs(**clusterparams)\n elif gentype == 'unequal':\n samples, labels = make_unequal_blobs(**clusterparams)\n\n estimator.fit(samples)\n\n # Actual and estimated labels won't match so we must map them.\n # Each possibile mapping is tested and the lowest error rate is used.\n # e.g. for mapping, if there are three labels 0,1,2\n # -> mapping = (0,1,2), (0,2,1), (2,1,0), etc.\n # So if estimator label = 1 then we pretend it is the number at index 1 of mapping\n error_rate = np.inf\n for mapping in permutations(range(len(estimator.cluster_centers_))):\n error = 0\n for label, result in zip(labels, estimator.labels_):\n if label != mapping[result]:\n error += 1\n result = error/len(samples)\n if result < error_rate:\n error_rate = result\n error_rate_list.append(error_rate)\n\n mean = np.mean(error_rate_list)\n standard_deviation = np.std(error_rate_list)\n return {'mean': mean,\n 'std': standard_deviation,\n 'elabels': estimator.labels_,\n 'labels': labels,\n 'samples': samples,\n 'clusters': estimator.cluster_centers_}", "title": "" }, { "docid": "6cc3d49dcdcd6bdfe6df7b431e3ad1b4", "score": "0.42870006", "text": "def tcperrfifthretransmissionsrate(self) :\n try :\n return self._tcperrfifthretransmissionsrate\n except Exception as e:\n raise e", "title": "" }, { "docid": "42482b025a0dc4a0249a0378a2e5ae46", "score": "0.42857113", "text": "def Er(i: int, r: int) -> float:\n # if r = 0, then we're out of samples and has a constant value of 0\n return 0 if r <= 0 else capped(rvs[i], cmfs[i], evs[i], Er(i, r - 1))", "title": "" }, { "docid": "3199071d5d0e73e21a68962d0411eac9", "score": "0.4276371", "text": "def set_sample_rate(self, sample_rate=1.0E9):\n self.write('CLOCk:SRATe %d' % (int(sample_rate)))", "title": "" }, { "docid": "b75de98edcf8ec38e5311dc0cc333f8e", "score": "0.42590305", "text": "def RadiativeRecombinationRate(self, species, T):\n \n if self.rec == 0:\n return 0.0\n \n if self.rate_src == 'fk94':\n if self.rec == 'A':\n if species == 0:\n return 6.28e-11 * T**-0.5 * (T / 1e3)**-0.2 * (1. + (T / 1e6)**0.7)**-1.\n elif species == 1:\n return 1.5e-10 * T**-0.6353\n elif species == 2:\n return 3.36e-10 * T**-0.5 * (T / 1e3)**-0.2 * (1. + (T / 4e6)**0.7)**-1.\n elif self.rec == 'B':\n if species == 0:\n return 2.6e-13 * (T / 1.e4)**-0.85 \n elif species == 1:\n return 9.94e-11 * T**-0.6687\n elif species == 2:\n alpha = 3.36e-10 * T**-0.5 * (T / 1e3)**-0.2 * (1. + (T / 4.e6)**0.7)**-1 # To n >= 1\n \n if type(T) in [float, np.float64]:\n if T < 2.2e4:\n alpha *= (1.11 - 0.044 * np.log(T)) # To n >= 2\n else:\n alpha *= (1.43 - 0.076 * np.log(T)) # To n >= 2\n else:\n alpha[T < 2.2e4] *= (1.11 - 0.044 * np.log(T[T < 2.2e4])) # To n >= 2\n alpha[T >= 2.2e4] *= (1.43 - 0.076 * np.log(T[T >= 2.2e4])) # To n >= 2\n \n \n return alpha\n else:\n raise ValueError('Unrecognized RecombinationMethod. Should be A or B.')\n \n else:\n name = self.grid.ions[species]\n return self.ions[name]['recombRate'](T)", "title": "" }, { "docid": "e98138f38ee62707a06e36038f1923cc", "score": "0.42490926", "text": "def generate_bug(generation_rate, rate_std, stochastic=True):\n # deal with the std:\n ivl_mean = 1./generation_rate\n rel_std = rate_std / generation_rate\n ivl_std = rel_std * ivl_mean\n if stochastic:\n while 1:\n ivl = np.clip(np.random.normal(\n loc=ivl_mean, scale=ivl_std\n ),\n a_min=0., a_max=None)\n yield ivl\n else:\n while 1:\n yield ivl_mean", "title": "" }, { "docid": "dcbf9c11a22be331a2d971ed1398fc03", "score": "0.42483705", "text": "def LifetimeRatio(self,i):\n if i == 1: \n return self.rtau1\n elif i == 2: \n return self.rtau2\n elif i == 3: \n return self.rtau3\n else:\n print \"index must be 1,2 or 3\"\n sys.exit(0)", "title": "" }, { "docid": "f634d8c3251424cff3992f72600e37ee", "score": "0.4242704", "text": "def testUnequalLengths(self):\n error = 'Invalid read: sequence length \\(4\\) != quality length \\(3\\)'\n with self.assertRaisesRegexp(ValueError, error):\n Read('id', 'ACGT', '!!!')", "title": "" }, { "docid": "5115c4401605603a510178cd4ea5dfa6", "score": "0.42414904", "text": "def getAvgError(self):\n warnings.warn(\"use getRate instead\", DeprecationWarning)\n with self.mutex:\n return self.getError()", "title": "" }, { "docid": "f3f1aba4c71295e6b88c67096263430e", "score": "0.42324334", "text": "def return_rri(self, begsam, endsam):\n interval = endsam - begsam\n dat = empty(interval)\n k = 0\n \n with open(self.filename, 'rt') as f:\n [next(f) for x in range(12)]\n \n for j, datum in enumerate(f):\n \n if begsam <= j < endsam:\n dat[k] = float64(datum[:datum.index('\\t')])\n k += 1\n if k == interval:\n break\n \n return dat", "title": "" }, { "docid": "ecbbfc4d69973a66abef51c784761506", "score": "0.4231564", "text": "def tcperrthirdretransmissionsrate(self) :\n try :\n return self._tcperrthirdretransmissionsrate\n except Exception as e:\n raise e", "title": "" }, { "docid": "ddc22dd06c4f970c0dfa3cadbb58e2d3", "score": "0.42308506", "text": "def perfect_read_hits(sam_file, sam_data = None):\n if not sam_data:\n sam_data = SamData()\n\n gi_pat = re.compile(r'gi\\|(\\d+)\\|')\n align_count = 0\n print >> sys.stderr, 'Scanning SAM file %s to find alignments for each read' % sam_file\n sam_in = open(sam_file, 'r')\n for line in sam_in:\n #if line.startswith('@SQ'):\n # ref_fld = line.rstrip('\\n').split('\\t')[1]\n # gi_match = gi_pat.search(ref_fld)\n # if gi_match:\n # gi = gi_match.group(1)\n # sam_data.add_target(gi)\n if not line.startswith('@'):\n flds = line.rstrip('\\n').split('\\t')\n align_count += 1\n if align_count % 100000 == 0:\n print >> sys.stderr, 'Processed %d alignments' % align_count\n flags = int(flds[1])\n if (flags & bad_read) == 0:\n cigar = flds[5]\n cigar_ops = parse_cigar(cigar)\n if len(cigar_ops) == 1 and cigar_ops[0][0] == 'M':\n # Read aligns to target along its entire length. \n # Set the read length if we don't know it already.\n read_len = cigar_ops[0][1]\n sam_data.set_read_length(read_len)\n # Only count reads with zero mismatches\n if mismatch_count(flds) == 0:\n read_id = flds[0]\n target_gi = gi_pat.match(flds[2]).group(1)\n pos = int(flds[3])\n strand = read_strand(flags)\n sam_data.add_target(target_gi)\n sam_data.add_alignment(read_id, target_gi, pos, strand)\n sam_in.close()\n return sam_data", "title": "" }, { "docid": "c56c4d500cdf183961a1c0d6b4bd4420", "score": "0.423043", "text": "def __init__(self, kmer_model, reference_hdf, read_length_model, sample_rate=800., batch_size=1,\n dura_shape=None, dura_rate=None, pad_label=0):\n # save input params:\n self.batch_size = batch_size\n self.kmer_model = kmer_model\n self.reference_hdf = reference_hdf\n self.read_length_model = read_length_model\n self.sample_rate = sample_rate\n self.dura_shape_arg = dura_shape\n self.dura_rate_arg = dura_rate\n self.pad_label = pad_label\n if pad_label != 0: raise ValueError(\"ERR: padding values other than 0 are currently unsupported.\")\n\n # load gaussian model:\n self.num_kmers = 4**5 # total number of 5-mers\n kmer_model_npz = np.load(kmer_model)\n self.kmer_means = kmer_model_npz['means']\n self.kmer_stdvs = kmer_model_npz['stdvs']\n\n # load reference genome as file handle to HDF5:\n self.reference = h5py.File(reference_hdf, 'r')\n self.contigs = list(self.reference.keys())\n\n # hard-coded shape/rate parameters for gamma-distributed duration modelling:\n self.sample_rate = sample_rate\n self.duration_shape = 2.461964 if dura_shape is None else dura_shape\n self.duration_rate = 587.2858 if dura_rate is None else dura_rate\n\n # load read lengths model and normalize:\n if isinstance(read_length_model, tuple):\n self.read_lengths = np.zeros(read_length_model[1])\n for k in range(read_length_model[0], read_length_model[1]):\n self.read_lengths[k] = 1.\n self.read_lengths = self.read_lengths / np.sum(self.read_lengths)\n else:\n self.read_lengths = np.load(read_length_model)\n self.read_lengths = self.read_lengths / np.sum(self.read_lengths)", "title": "" }, { "docid": "23a5b5d1c0832716f263236d3c4bcfb8", "score": "0.42254728", "text": "def psnr_error(gen_frames, gt_frames):\n shape = tf.shape(gen_frames)\n num_pixels = tf.to_float(shape[1] * shape[2] * shape[3])\n gt_frames = (gt_frames + 1.0) / 2.0\n gen_frames = (gen_frames + 1.0) / 2.0\n square_diff = tf.square(gt_frames - gen_frames)\n\n batch_errors = 10 * log10(1 / ((1 / num_pixels) * tf.reduce_sum(square_diff, [1, 2, 3])))\n return tf.reduce_mean(batch_errors)", "title": "" }, { "docid": "7b3a7994e1aadb2574168a40e579652c", "score": "0.42176554", "text": "def irr(cashflows, precision=10**-4, maxrate=10**6):\n binf, bsup = 0, maxrate\n while bsup - binf > precision:\n irr = (binf+bsup)/2.\n if npv(irr, cashflows) < 0:\n bsup = irr\n else:\n binf = irr\n return irr", "title": "" }, { "docid": "d7fcfdeaebfd692a7bb5530d9c15b4ba", "score": "0.41965672", "text": "def tcperrcipallocrate(self) :\n try :\n return self._tcperrcipallocrate\n except Exception as e:\n raise e", "title": "" }, { "docid": "5acca0a105f9147ca04cfd96a4f212c2", "score": "0.4194664", "text": "def run_err_stats():\n d_d = load_dictionary(\"diff_dict.npy\")\n alphas = get_cat_attributes_names()\n for key in d_d.keys():\n print(\"----- \", key, \"-------\")\n for matrix, alpha in zip(d_d[key], alphas):\n print_errs(convert_to_percentage_mat(matrix), alpha)", "title": "" }, { "docid": "60caa1db4822d2d849a6e5172502aba5", "score": "0.4190385", "text": "def error_rate(tp, tn, fp, fn) -> float:\n errors = float(fp + fn)\n all_ = (fp + fn + tp + tn)\n return errors / all_", "title": "" }, { "docid": "d8cbecdf2236cbd6178e3bb560337718", "score": "0.41879708", "text": "def retrieval_rate_init(model,r,g,strg_tech,t,tm,s):\n if r in model.STORAGE_R:\n if r in model.STORABLE_CO2:\n # CO2 cannot be retrieved from the storage technologies once after storage.\n model.retrieval_rate[r,g,strg_tech,t,tm,s].fixed = True\n return 0\n else:\n return None\n\n else:\n # No retrieval for resources that are not stored.\n model.retrieval_rate[r,g,strg_tech,t,tm,s].fixed = True\n return 0", "title": "" }, { "docid": "4b9689cacc4427ca75e1cd37a00d4476", "score": "0.4186584", "text": "def test_read_segment_range_is_reasonable(self):\n filename = self.get_local_path('nest/0gid-1time-1256-0.gdf')\n r = NestIO(filenames=filename)\n\n seg = r.read_segment(gid_list=(10, 10), t_start=400. * pq.ms,\n t_stop=500. * pq.ms, lazy=False,\n id_column_gdf=0, time_column_gdf=1)\n self.assertEqual(len(seg.spiketrains), 1)\n with self.assertRaises(ValueError):\n r.read_segment(gid_list=(10, 9), t_start=400. * pq.ms,\n t_stop=500. * pq.ms, lazy=False,\n id_column_gdf=0, time_column_gdf=1)", "title": "" }, { "docid": "30bf72148d19228ca5fbe2ba78200d4c", "score": "0.41759986", "text": "def reverb_ratios(x, rir, reverb_start=20, fs=16000):\n # DRR\n i_peak = np.argmax(rir)\n n_d = int(1e-3 * reverb_start * fs) # Sample where reverberation starts\n h_d = rir[:i_peak + n_d]\n h_r = rir[i_peak + n_d:]\n\n drr = 10*np.log10(sum(h_d**2)/sum(h_r**2))\n\n # SRR\n x_d = np.convolve(x, h_d)\n x_r = np.convolve(x, h_r)\n srr = 10*np.log10(sum(x_d**2)/sum(x_r**2))\n\n return drr, srr", "title": "" }, { "docid": "f61b89a9fc4d0d60d344aa6fb589a864", "score": "0.41651112", "text": "def annotate_read_pattern(\n references, df_cov, bam_fname,\n cache_fname='data/coverage_annos.txt'\n):\n def get_reads(ref, start, end):\n sequences = []\n with tempfile.NamedTemporaryFile(suffix='sam') as tf:\n os.system(f'samtools view -o {tf.name} {bam_fname} {ref}:{start}-{end}')\n\n for line in tf:\n parts = line.split()\n pos = int(parts[3])\n seq = parts[9].decode('utf-8')\n\n if pos == start:\n sequences.append(seq)\n return sequences\n\n def extract_enriched_bases(base_freqs):\n enriched_bases = ''\n score_string = ''\n\n for _, bases in base_freqs.items():\n best_base = max(bases, key=lambda b: bases[b])\n\n total_count = sum(bases.values())\n score = bases[best_base] / total_count\n\n enriched_bases += best_base\n\n cur = str(int(abs(score * 10) % 10))\n score_string += '\\u2714' if cur == '0' else cur\n\n return enriched_bases, score_string\n\n peak_data = annotate_peak_positions(references, df_cov)\n open(cache_fname, 'w').close() # clear old cache\n\n data = {}\n for ref, peak_list in tqdm(peak_data.items()):\n with open(cache_fname, 'a') as fd:\n fd.write(ref + '\\n')\n\n data[ref] = []\n for peak in sorted(peak_list, key=lambda x: x['strand']):\n start, end = peak['start'], peak['end']\n\n reads = get_reads(ref, start, end)\n base_freqs = compute_base_enrichment(reads)\n\n ebases, score = extract_enriched_bases(base_freqs)\n pos_range = f'{start}{\" \"*(len(ebases)-len(str(start))-len(str(end)))}{end}'\n\n annotation = f'{pos_range}\\n{ebases}\\n{score}'\n data[ref].append({\n 'start': start,\n 'end': end,\n 'name': annotation\n })\n\n with open(cache_fname, 'a') as fd:\n fd.write(f'{peak[\"strand\"]} strand\\n' + annotation + '\\n\\n')\n return data", "title": "" }, { "docid": "9f1c3dc81153e22e19d563874fc0e29d", "score": "0.4152355", "text": "def sample_restr(self, num_iters, objs, type_opts):\n for i in range(num_iters):\n self.gibbs_scan(True, objs, type_opts)", "title": "" }, { "docid": "4adca43ca1ff74fe54422724a231456f", "score": "0.41493046", "text": "def getBadReadsName(self, verbose=False): #check for bad reads in the contig\n # comparing the actual read start and alignment start pos\n badReadsName=[]\n for read in self.reads:\n actual_read_start=read.read_ref_start\n algn_start=(read.contig_ref_start)+(read.read_align_start)\n if(abs(algn_start-actual_read_start)<=10000):\n \n badReadsName.append(str(read.getReadsName()));\n #contig.errors.append(int(elements[5])) \n \n return badReadsName;", "title": "" }, { "docid": "f9f6e1d69b6a0e783a81ea4b5eca62db", "score": "0.41435444", "text": "def capture_rate(code):\n repo = PokemonRepo(PokemonDataAccess())\n response = repo.get_capture_rate(code)\n\n if (response == None):\n abort(404)\n return response", "title": "" }, { "docid": "3fcfb27225934d6d260454141f809eec", "score": "0.41420305", "text": "def rate(self, rateindex, timeindex):\n pass", "title": "" }, { "docid": "f6368cc2f9bc8d7f2ca6528fee70f764", "score": "0.41413286", "text": "def test_response_with_missing_rates(self):\n responses.add(responses.GET, client.LATEST_URL, body='{\"success\": true}')\n\n with self.assertRaises(exceptions.ValidationError):\n fetch_rate(currencies.EUR, currencies.USD)\n\n self.assertEqual(len(responses.calls), 1)", "title": "" }, { "docid": "aad304a8ccea49ac1cc26a180eeb51a6", "score": "0.4124364", "text": "def test_error_rate(self):\n max_n = 10000\n p = .1\n count = 10000\n last_p = 0.0\n\n bloom_filter = BloomFilter(max_n, p)\n\n for i in (str(i) for i in range(count)):\n try:\n bloom_filter.add(i)\n except BloomFilterExceedsErrorRate:\n fail(\"Bloom filter exceeded error rate.\")\n else:\n rate_success = last_p < bloom_filter.current_p_float\n\n assert rate_success, \"last_p !< current_p\"\n\n last_p = bloom_filter.current_p_float", "title": "" }, { "docid": "6865b1a7051cbf7020f1e9a46e3ce982", "score": "0.41242865", "text": "def __init__(self, error_rate, redis_host=None, redis_port=None, redis_db=None, redis_key=None):\n \n if not (0 < error_rate < 1):\n raise ValueError(\"Error_Rate must be between 0 and 1.\")\n\n # error_rate = 1.04 / sqrt(m)\n # m = 2 ** b\n # M(1)... M(m) = 0\n\n b = int(math.ceil(math.log((1.04 / error_rate) ** 2, 2)))\n\n self.alpha = self._get_alpha(b)\n self.b = b\n self.m = 1 << b\n self.M = [ 0 for i in range(self.m) ]\n self.bitcount_arr = [ 1L << i for i in range(160 - b + 1) ]\n\n if redis_host:\n if not redis_port or not redis_key:\n raise ValueError(\"missing redis_information\")\n if not redis_db:\n redis_db = 0\n # redis connection information\n self.redis_host = redis_host\n self.redis_port = redis_port\n self.redis_db = redis_db\n self.redis_key = redis_key\n self.redis = Redis(redis_host, redis_port, redis_db)\n if self.redis.exists(self.redis_key):\n self.restore_from_redis()\n else:\n self.save_to_redis()", "title": "" }, { "docid": "599e8fa54d6625fc046a428d197dfac1", "score": "0.41234636", "text": "def get_risk(self, outputfile=None):\n risk_fields = [[\"alpha1\", \"alpha3\", \"alpha5\", \"alpha10\"],\n [\"beta1\", \"beta3\", \"beta5\", \"beta10\"],\n [\"MAR1\", \"MAR3\", \"MAR5\", \"MAR10\"],\n [\"R2_1\", \"R2_3\", \"R2_5\", \"R2_10\"],\n [\"SD1\", \"SD3\", \"SD5\", \"SD10\"],\n [\"sharpe1\", \"sharpe3\", \"sharpe5\", \"sharpe10\"]]\n\n ticker_count = len(self.tickers)\n risk_list = []\n for ticker_no, ticker in enumerate(self.tickers):\n page = os.path.join(self.fundpages_location, \"%s.html\" % ticker)\n newpage, errors = tidy_document(open(page).read())\n soup = BeautifulSoup(newpage)\n\n # Initialize a dict to hold risk data of this ticker. The data will be added\n # at the ned of the try block\n riskdata_dict = {\"ticker\": ticker}\n\n try:\n # Retrieve the risk table by descending into the DOM\n risktable = \\\n soup\\\n .body\\\n .div(id=\"gf-viewc\")[0]\\\n .findAll(\"div\", class_=\"fjfe-content\")[0]\\\n .findAll(\"div\", class_=\"mutualfund\")[0]\\\n .findAll(\"div\", class_=\"g-section g-tpl-right-1\")[0]\\\n .findAll(\"div\", class_=\"g-unit\")[1]\\\n .findAll(\"div\", class_=\"g-c sfe-break-right\")[0]\\\n .findAll(\"div\", class_=\"sector\")[1]\\\n .findAll(\"div\", class_=\"subsector\")[0].table\n\n riskdata_raw = [\n [col.text.strip() for col in row.findAll(\"td\")]\n for row in risktable.findAll(\"tr\")\n ]\n\n # Convert available fields to float. Unavailable fields are presented as '-'\n # in the html, convert them to empty strings.\n riskdata_float = [map(lambda x: float(x) if x != \"-\" else \"\", R[1:])\n for R in riskdata_raw[1:-1]]\n\n # Add the risk data for this ticker to riskdata_dict\n for field_type, field_data in zip(risk_fields, riskdata_float):\n riskdata_dict.update(dict(zip(field_type, field_data)))\n except (IndexError, AttributeError):\n pass\n # print \"page could not be scraped for ticker %s\" % ticker\n\n # Append risk data for the current ticker to the list\n risk_list.append(riskdata_dict)\n\n # Print progress\n if ticker_no % 100 == 0:\n print \"%d of %d\" % (ticker_no, ticker_count)\n\n # Flatten the risk_fields to feed to the CSV writer and add field\n # \"ticker\" at the front\n risk_fields_flattened = [\"ticker\"] + reduce(lambda x, y: x + y, risk_fields)\n\n # Write the CSV file\n super(GfncScraper, self).writecsv(risk_fields_flattened, risk_list, outputfile)", "title": "" }, { "docid": "a778e6695cda71cb59f426d0c308a41b", "score": "0.41213062", "text": "def testDecreasingErg(self):\n ergs = StringIO.StringIO('2\\n1\\n3\\n4')\n self.assertEqual(0,te.read_and_tag_phtn_ergs(ergs, self.sm))", "title": "" }, { "docid": "15f41d37fb8840126a52cfd08e133823", "score": "0.41199923", "text": "def extractInteractionRate(self,filename):\n m = mctal.MCTAL(filename)\n tally = m.tallies[54]\n countRate = tally.data[-1]*2.3E3 \n title = m.header.title\n genome = title.strip('MCNPX Simulation of RPM8 Genome: ')\n return {genome:countRate}", "title": "" }, { "docid": "c680a86837cc6e218cbf34c1d1dc33d3", "score": "0.41174725", "text": "def avg_quality_scores(input_dictionary, max_length_seq):\r\n avg_score = {}\r\n\r\n #a for loop to run till the range of value of max_length_seq\r\n #this will cover the size of all sequences even after trimming\r\n #the position counter for every sequence is taken using the i of first for loop\r\n for i in range(max_length_seq):\r\n #a variable to store the score at every position\r\n total_quality_score = 0\r\n #a variable whose value will increase per every base at that postion\r\n count_pos = 0\r\n\r\n #a for loop to calculate the score for every (10,000) sequences in the dictionary\r\n #at a time only a particular position of the sequences will be accessed\r\n for keys in input_dictionary:\r\n #storing the list of quality scores\r\n individual_quality_score = input_dictionary[keys][1]\r\n\r\n #an if block to calculate the scores only of informative positions\r\n if i < len(individual_quality_score):\r\n #this variable will be incremented till the position runs out of nucleotides\r\n #for that position in all the sequences\r\n count_pos += 1\r\n total_quality_score += int(individual_quality_score[i])\r\n\r\n #finally storing the average score at every position in the dictionary\r\n avg_score[i+1] = total_quality_score/count_pos\r\n\r\n return avg_score", "title": "" }, { "docid": "bd01069f92ff707e71f9d35eae832ca1", "score": "0.41158757", "text": "def rms_error(seq1, seq2):\r\n assert len(seq1) == len(seq2)\r\n return math.sqrt(sum((x - y) ** 2 for x, y in zip(seq1, seq2)) / len(seq1))", "title": "" }, { "docid": "c855ccee5b5796cecc8faf6d9dea2b21", "score": "0.41152087", "text": "def tcperrpartialretrasmitrate(self) :\n try :\n return self._tcperrpartialretrasmitrate\n except Exception as e:\n raise e", "title": "" }, { "docid": "5f0e66255ceec7e582fe323ded4e14a2", "score": "0.41121373", "text": "def DielectricRecombinationRate(self, T):\n \n if self.rate_src == 'fk94':\n return 1.9e-3 * T**-1.5 * np.exp(-4.7e5 / T) * (1. + 0.3 * np.exp(-9.4e4 / T))\n else:\n raise NotImplementedError()", "title": "" }, { "docid": "1a00cd51b998e9c4174afd824f41efef", "score": "0.4111168", "text": "def read_file(fn, args):\n database = args.database\n gtf = args.gtf\n sep = \" \" if args.out_format == \"gtf\" else \"=\"\n map_mir = mapper.read_gtf_to_mirna(gtf)\n reads = defaultdict(dict)\n reads_in = 0\n sample = os.path.splitext(os.path.basename(fn))[0]\n hits = _get_hits(fn)\n logger.debug(\"ISOMIRSEA::SAMPLE::%s\" % sample)\n with open(fn) as handle:\n for line in handle:\n cols = line.strip().split(\"\\t\")\n attr = read_attributes(line, \"=\")\n query_name = attr['TS']\n query_sequence = attr['TS'].replace(\"U\", \"T\")\n start = int(cols[3])\n end = int(cols[4])\n isomirseq_iso = attr['ISO']\n if query_name not in reads and query_sequence == None:\n continue\n if query_sequence and query_sequence.find(\"N\") > -1:\n continue\n counts = attr[\"TC\"]\n chrom = cols[0]\n # logger.debug(\"SEQBUSTER:: cigar {cigar}\".format(**locals()))\n cigar = attr['CI'].replace(\"U\", \"T\")\n idu = make_id(query_sequence)\n isoformat = cigar2variants(cigar, query_sequence, attr['ISO'])\n logger.debug(\"\\nISOMIRSEA::NEW::query: {query_sequence}\\n\"\n \" precursor {chrom}\\n\"\n \" name: {query_name}\\n\"\n \" idu: {idu}\\n\"\n \" start: {start}\\n\"\n \" cigar: {cigar}\\n\"\n \" iso: {isoformat}\\n\"\n \" variant: {isoformat}\".format(**locals()))\n source = \"isomiR\" if isoformat != \"NA\" else \"ref_miRNA\"\n strand = \"+\"\n database = cols[1]\n mirName = attr['MIN'].split()[0]\n preName = attr['PIN'].split()[0]\n score = \".\"\n Filter = attr['FILTER']\n isotag = attr['ISO']\n tchrom, tstart = _genomic2transcript(map_mir[mirName],\n chrom, start)\n start = start if not tstart else tstart\n chrom = chrom if not tstart else tchrom\n end = start + len(query_sequence)\n hit = hits[idu]\n fields = {'seq_name': query_sequence, 'idseq': idu,\n 'name': mirName, 'parent': preName,\n 'variant': isoformat, 'cigar': cigar,\n 'counts': counts, 'filter': Filter,\n 'hits': hit, 'chrom': chrom,\n 'start': start, 'end': end,\n 'database': database, 'source': source,\n 'score': score, 'strand': strand}\n # TODO: convert to genomic if args.out_genomic\n line = feature(fields).line\n if args.add_extra:\n extra = variant_with_nt(line, args.precursors, args.matures)\n line = \"%s Changes %s;\" % (line, extra)\n\n line = paste_columns(feature(line), sep=sep)\n if start not in reads[chrom]:\n reads[chrom][start] = []\n if Filter == \"Pass\":\n reads_in += 1\n reads[chrom][start].append([idu, chrom, counts, sample, line])\n\n logger.info(\"Hits: %s\" % reads_in)\n return reads", "title": "" }, { "docid": "eab6fcab756794bd378a215cafefad45", "score": "0.41100007", "text": "def _ect0_gconst(pattern, qrs):\n if pattern.evidence[o.Cardiac_Rhythm]:\n mrr, stdrr = pattern.evidence[o.Cardiac_Rhythm][0].meas[0]\n if mrr > 0:\n ectrr = qrs.time.start - pattern.evidence[o.QRS][0].time.start\n verify(ectrr < 0.9 * mrr)\n verify(ectrr < mrr - stdrr)", "title": "" }, { "docid": "88b5ebe117e1af4beeceffdc996c5716", "score": "0.41061786", "text": "def RoughErrors (self, idx):\n counts = np.zeros(self.k, dtype=np.int_)\n idxList = list(idx)\n #print idxList\n for i in idxList:\n counts[i] = counts[i] + 1\n return sum([abs(55 - i) for i in counts]) / 2", "title": "" }, { "docid": "a49c2c833743a032e1697795be80ca18", "score": "0.41035068", "text": "def recovery_prob(self, _cell) -> float:\n return self._registered_recovery[self.get_type_name_by_cell(_cell)].recovery_rate * self.step_period", "title": "" }, { "docid": "5bfec59327d8b5f1003bb420ffd4743f", "score": "0.4102836", "text": "def set_RF_zero(self, protein_rec):\n\t\tprint(\"Init lower bound RF to 0\")\n\t\tprotein_rec.letter_annotations[\"lower_bound_rf\"] = len(protein_rec.seq) * [0.0]\n\t\tprotein_rec.letter_annotations[\"sliding_avg_lower_bound\"] = len(protein_rec.seq) * [0.0]\n\t\treturn protein_rec", "title": "" }, { "docid": "8de2b7a90c81773932d1fcbf109cdacc", "score": "0.4100847", "text": "def Calibrate(SEQ, NUM_SAMPLES = 5000, PRECOMPUTE_DICT = None,\r\n MI_RNA_NAME = None, DUMP_QUEUE = None,\r\n CODING_FILE = 'C:\\\\RNAHybrid\\\\coding_HIV.txt',\r\n RNAHybridPath = 'C:\\\\RNAHybrid\\\\', PICKLE_DICT = None):\r\n\r\n #If a pre-computed dictionary is provided,\r\n #check to see if it has the data we need\r\n try:\r\n outputList = ('junk', 'junk', PRECOMPUTE_DICT[MI_RNA_NAME][0],\r\n PRECOMPUTE_DICT[MI_RNA_NAME][1])\r\n logging.debug('Found Calib Data')\r\n except KeyError:\r\n logging.debug('Making Calib Data')\r\n command = RNAHybridPath + 'RNAcalibrate'\r\n\r\n command += ' -d ' + CODING_FILE\r\n command += ' -k ' + str(NUM_SAMPLES)\r\n\r\n command += ' ' + SEQ\r\n\r\n sys_call = subprocess.Popen(command, shell = True,\r\n stdout = subprocess.PIPE)\r\n\r\n logging.debug('starting waiting')\r\n sys_call.wait()\r\n logging.debug('done waiting')\r\n\r\n output = sys_call.communicate()[0]\r\n outputList = re.split(' |\\n', output)\r\n\r\n if DUMP_QUEUE == None:\r\n return (outputList[2], outputList[3])\r\n else:\r\n DUMP_QUEUE.put((MI_RNA_NAME, SEQ, outputList[2], outputList[3]))\r\n\r\n if PRECOMPUTE_DICT != None:\r\n PRECOMPUTE_DICT[MI_RNA_NAME] = (outputList[2], outputList[3])\r\n\r\n if PICKLE_DICT != None:\r\n logging.debug('pickling Data')\r\n pickHandle = open(RNAHybridPath + 'SavedCalib.pkl', mode = 'w')\r\n pickle.dump(PRECOMPUTE_DICT, pickHandle)\r\n pickHandle.close()", "title": "" }, { "docid": "43551c9f09e57b0395d89c887b3c47a6", "score": "0.41008154", "text": "def reads_for_barcode( self, reads_file ):\n for read in SeqIO.parse( reads_file, 'sff' ):\n # Quit if max_num is reached\n if self.max_num != 'All' and self._processed == self.max_num:\n break\n if self._readMatches( read ):\n logger.debug( \"%s: %s Matched Read %s\" % (self.proc_name, self.id_str, read.id) )\n self._matched_reads += 1\n yield read\n self._processed += 1", "title": "" }, { "docid": "20284abe243962f8e639a312a14ac298", "score": "0.4098606", "text": "def test_realigner_doesnt_create_invalid_intervals(self):\n region = ranges.parse_literal('chr20:63,025,320-63,025,520')\n\n # pylint: disable=g-complex-comprehension\n reads = [\n test_utils.make_read(\n 'ACCGT' * 50,\n start=63025520 - 250,\n cigar='250M',\n quals=list(np.tile(range(30, 35), 50))) for _ in range(20)\n ]\n # pylint: enable=g-complex-comprehension\n self.reads_realigner.realign_reads(reads, region)\n\n # These reads are aligned off the edge of the contig. Note that the\n # reference bases in this interval are all Ns as well.\n # pylint: disable=g-complex-comprehension\n reads = [\n test_utils.make_read(\n 'TTATA' * 50,\n start=63025520 - 200,\n cigar='200M50S',\n quals=list(np.tile(range(30, 35), 50))) for _ in range(20)\n ]\n # pylint: enable=g-complex-comprehension\n self.reads_realigner.realign_reads(reads, region)", "title": "" }, { "docid": "d7cb55a7f1a8ee14befd24737cb0350d", "score": "0.40911597", "text": "def write_kmers(gene_dict,k,genome_dict,out_file,make_fastq=False):\n outf = open(out_file,'w')\n gene_counter = 0\n chroms_not_in_genome = []\n for gene in gene_dict:\n\n gene_counter += 1 # Track how many genes\n if gene_counter%2500 == 0:\n print 'Number of genes processed: ', gene_counter\n\n isoforms = gene_dict[gene]\n\n reads_included = set() # This will prevent double-writing reads of identical position/sequence\n\n for tx in isoforms:\n\n gene_name = tx.get_gene()\n chromosome = tx.chrom\n tx_name = tx.get_name()\n if chromosome in chroms_not_in_genome:\n continue\n try:\n seq = tx.get_sequence(genome_dict)\n except(KeyError):\n printer.write('Chromosome/contig %s not found in genome. Skipping... ' % chromosome)\n chroms_not_in_genome.append(chromosome)\n continue\n genome_positions = tx.get_position_list()\n\n strand = tx.strand\n\n if make_fastq == False:\n for ii in range(len(seq) - k):\n read = seq[ii:ii + k]\n if strand == '-':\n pos5pr = genome_positions[-(ii+1)]\n elif strand == '+':\n pos5pr = genome_positions[ii]\n if (pos5pr,read) not in reads_included: # check if pos,seq of read already in file\n outf.write('\\n'+'>%s:%s(%s)' % (chromosome,pos5pr,strand))\n outf.write('\\n'+read)\n reads_included.add((pos5pr,read))\n\n elif make_fastq == True: # TODO: This may give errors - only updated make_fastq=False so far\n for ii in range(len(seq) - k):\n read = seq[ii:ii + k]\n if strand == '-':\n pos5pr = genome_positions[-(ii+1)]\n elif strand == '+':\n pos5pr = genome_positions[ii]\n if (pos5pr,read) not in reads_included: # check if pos,seq of read already in file\n out_file.write('\\n'+'@'+gene_name+'_'+tx_name+'_'+str(pos5pr)+':'+chromosome+'('+strand+')')\n out_file.write('\\n'+read)\n out_file.write('\\n'+'+'+gene_name+'_'+tx_name+'_'+str(pos5pr)+':'+chromosome+'('+strand+')')\n out_file.write('\\n'+'I'*len(read))\n reads_included.add((pos5pr,read))\n outf.close()", "title": "" }, { "docid": "10df3aacc7baf8cd41cb57c5824d2149", "score": "0.4089274", "text": "def __init__(self, name, transmission_rate, recover_rate, recover_type=1, selection_rate=None, ):\n self.name = name\n self.b = transmission_rate\n try:\n self.b_dist = Distro(self.b ** (-1), 10000)\n except ZeroDivisionError:\n self.b_dist = Distro(NO_RATE_FLAG, 10000)\n self.g = recover_rate\n try:\n self.g_dist = Distro(self.g ** (-1), 10000)\n except ZeroDivisionError:\n self.g_dist = Distro(NO_RATE_FLAG, 10000)\n if selection_rate:\n if type(selection_rate) is not dict:\n self.s = {'Default': selection_rate}\n else:\n self.s = selection_rate\n else:\n self.s = {'Default': 0}\n self.recover_type = recover_type", "title": "" }, { "docid": "62d1961ec41c1f25fe5857c681de97dd", "score": "0.40888405", "text": "def get_errors_from_file(filename, outdir='', limit=200):\n ins_pos = np.array([]) # relative\n dels_pos = np.array([]) # relative\n subs_pos = np.array([]) # relative\n ins_rates = []\n dels_rates = []\n subs_rates = []\n error_rates = []\n ref_lengths = []\n read_lengths = [] # relative\n with open(filename, 'r') as f:\n num_samples = 0\n line = f.readline()\n while line and num_samples < limit:\n ref = f.readline().strip()\n read = f.readline().strip()\n error_summary = sm_align(ref, read)\n ref_align, read_align, _, ins, dels, subs = error_summary\n with open(os.path.join(outdir, 'error_pos.log'), 'a') as log:\n log.write(\"%s\\n%s\\n%s\\n\" % (ins.tolist(), dels.tolist(), subs.tolist()))\n\n relative_pos = lambda x: x / len(ref)\n ins_rates.append(np.sum(ins) / len(ref) * 100)\n ins = [[i for j in range(int(err_count))] for i, err_count in enumerate(ins)]\n ins = [relative_pos(pos) for sublist in ins for pos in sublist]\n ins_pos = np.concatenate((ins_pos, ins))\n\n dels_rates.append(np.sum(dels) / len(ref) * 100)\n dels = [[i for j in range(int(err_count))] for i, err_count in enumerate(dels)]\n dels = [relative_pos(pos) for sublist in dels for pos in sublist]\n dels_pos = np.concatenate((dels_pos, dels))\n\n subs_rates.append(np.sum(subs) / len(ref) * 100)\n subs = [[i for j in range(int(err_count))] for i, err_count in enumerate(subs)]\n subs = [relative_pos(pos) for sublist in subs for pos in sublist]\n subs_pos = np.concatenate((subs_pos, subs))\n\n error_rates.append(ins_rates[-1] + dels_rates[-1] * subs_rates[-1])\n print(\"%s, %s, %s, %s\" % (ins_rates[-1], dels_rates[-1], subs_rates[-1], ins_rates[-1] + dels_rates[-1] + subs_rates[-1]))\n\n ref_lengths.append(len(ref))\n read_lengths.append(len(read) / len(ref))\n\n line = f.readline()\n num_samples += 1\n\n ins_rate = np.mean(ins_rates)\n dels_rate = np.mean(dels_rates)\n subs_rate = np.mean(subs_rates)\n print(\"Average insertions %.3f%%, deletions %.3f%%, substitutions %.3f%%, errors %.3f%%\"\n % (ins_rate, dels_rate, subs_rate, ins_rate + dels_rate + subs_rate))\n\n # Plotting distribution of insertion, deletion & substitution errors\n plt.figure()\n plt.title(\"Sequence Error Distribution\")\n x, y = np.unique(ins_pos, return_counts=True)\n x, y = bucket_frequencies(x, y, 0, 1, 0.002)\n plt.plot(x, y, label=\"Insertions\")\n x, y = np.unique(dels_pos, return_counts=True)\n x, y = bucket_frequencies(x, y, 0, 1, 0.002)\n plt.plot(x, y, label=\"Deletions\")\n x, y = np.unique(subs_pos, return_counts=True)\n x, y = bucket_frequencies(x, y, 0, 1, 0.002)\n plt.plot(x, y, label=\"Substitutions\")\n plt.xlabel('DNA Sequence Relative Position')\n plt.ylabel('Total Error Count')\n plt.legend()\n plt.savefig(os.path.join(outdir, \"error_distribution.png\"), dpi=800)\n\n # Write statistics\n with open(os.path.join(outdir, 'avg_error_summary.txt'), 'w') as fasta:\n fasta.write(\"Insertions(%%):\\t\\t%.3f\\nDeletions(%%):\\t\\t\\t%.3f\\nSubstitutions(%%):\\t%.3f\\nErrors(%%):\\t\\t\\t\\t%.3f\\n\"\n % (ins_rate, dels_rate, subs_rate, (ins_rate + dels_rate + subs_rate)))\n\n # Plot read length vs error rate\n plt.figure()\n plt.title(\"Sequence Length vs Error Rate\")\n plt.plot(ref_lengths, error_rates, 'o')\n plt.plot(np.unique(ref_lengths), np.poly1d(np.polyfit(ref_lengths, error_rates, 1))(np.unique(ref_lengths)))\n plt.xlabel('Sequence Length (nt)')\n plt.ylim(0,10)\n plt.ylabel('Error Rate (%)')\n plt.savefig(os.path.join(outdir, \"len_vs_errors.png\"), dpi=800)\n\n # Plot distribution of read lengths\n plt.figure()\n x, y = np.unique(read_lengths, return_counts=True)\n x, y = bucket_frequencies(x, y, 0.88, 1.12, 0.005)\n x, y = filter_zero_counts(x, y)\n plt.title(\"Relative Read Length Distribution\")\n plt.scatter(x, y)\n plt.ylabel('Frequency')\n plt.xlabel('Relative Read Length')\n plt.axvline(x=1, label=\"Reference Length\")\n plt.savefig(os.path.join(outdir, \"read_len_distribution.png\"), dpi=800)", "title": "" }, { "docid": "693dc11256fee31b2f8cb9dc01ca363a", "score": "0.408772", "text": "def tcperrretransmitrate(self) :\n try :\n return self._tcperrretransmitrate\n except Exception as e:\n raise e", "title": "" }, { "docid": "53a1b7e184ef05fcbdbae8705402f55a", "score": "0.40868095", "text": "def scan(self, seqs, nreport=100, scan_rc=True, zscore=False, gc=False):\n\n if not self.threshold:\n logger.info(\n \"Using default threshold of 0.95. \" \"This is likely not optimal!\"\n )\n self.set_threshold(threshold=0.95)\n\n seqs = as_fasta(seqs, genome=self.genome)\n\n it = self._scan_sequences(seqs.seqs, nreport, scan_rc)\n\n if zscore:\n if gc:\n if len(self.meanstd) <= 1:\n self.set_meanstd(gc=gc)\n else:\n if len(self.meanstd) != 1:\n self.set_meanstd(gc=gc)\n\n gc_seqs = [self.get_seq_bin(seq) for seq in seqs.seqs]\n\n logger.debug(\"Scanning\")\n for result, gc_seq in zip(it, gc_seqs):\n if zscore:\n zresult = []\n for i, mrow in enumerate(result):\n try:\n m_mean, m_std = self.get_motif_mean_std(\n gc_seq, self.motif_ids[i]\n )\n except Exception:\n print(self.meanstd)\n print(gc_seq, self.motif_ids[i])\n raise\n mrow = [((x[0] - m_mean) / m_std, x[1], x[2]) for x in mrow]\n zresult.append(mrow)\n yield zresult\n else:\n yield result", "title": "" }, { "docid": "57d6a4bb723deec009a118d5a08fd2ef", "score": "0.4082775", "text": "def __init__(self, rates):\n n_rates = len(rates)\n if n_rates < 1 :\n raise Exception(\"Rates entered must be >= 1, but is {}\".format(n_rates))\n if n_rates > 11:\n raise Exception(\"Can't have more than 11 hours but {} were submitted\".format(n_rates))\n\n self.start_time = MINIMUM_START\n self.end_time = MAXIMUM_END\n self.default_rate = DEFAULT_MINIMUM_RATE\n self.rate_dict = self.slice_rates(rates)", "title": "" }, { "docid": "51d7e58ff0839d14adddde20c8d36540", "score": "0.40799025", "text": "def _fit_record_gaussian(my_ds, record_number):\n bins = np.arange(0, 100, 1.)\n p0 = []\n chn = 0\n data = my_ds['Data_ch' + str(chn)].values[record_number]\n height = np.nan\n pos = np.nan\n start = np.nan\n error = np.nan\n try:\n data_fit = data\n bins_fit = bins\n p0 = np.array([data_fit.max()-data_fit.min(), np.argmax(data_fit), 20., np.nanmin(data_fit)]).astype(float)\n\n coeff, var_matrix = curve_fit(_gaus, bins_fit, data_fit, p0=p0, method='lm', maxfev=40, ftol=1e-3)\n amplitude = coeff[0]\n peakpos = coeff[1]\n width = coeff[2] * (2.35482)\n base = coeff[3]\n fit_data = _gaus(np.array(bins, dtype=np.float64), *coeff)\n chi2 = chisquare(np.array(data, dtype='float64'), f_exp=np.array(fit_data, dtype='float64'))\n if not (amplitude > 1 and peakpos > 0 and\n peakpos < len(data) and width < len(data) and width < amplitude and width > 0):\n amplitude = np.nan\n width = np.nan\n peakpos = np.nan\n base = np.nan\n chi2 = np.nan\n except RuntimeError:\n amplitude = np.nan\n width = np.nan\n peakpos = np.nan\n base = np.nan\n chi2 = np.nan\n error = 1\n except MemoryError:\n amplitude = np.nan\n width = np.nan\n peakpos = np.nan\n base = np.nan\n chi2 = np.nan\n error = 2\n\n if np.isfinite(base):\n try:\n height = data_fit.max() - base\n pos = np.argmax(data)\n except ValueError:\n height = np.nan\n pos = np.nan\n\n try:\n start = np.where((data - base) < 50)[0]\n if start == []:\n start = np.nan\n else:\n start = start[start <= pos][-1]\n except IndexError:\n start = np.nan\n\n # Filter out bad points\n bad = ~np.logical_and.reduce(\n (height > 1, peakpos > 0, peakpos < len(bins)))\n if bad:\n amplitude = np.nan\n base = np.nan\n peakpos = np.nan\n width = np.nan\n chi2 = np.nan\n pos = np.nan\n else:\n height = np.nan\n pos = np.nan\n start = np.nan\n\n fit_coeffs = {'amplitude': amplitude, 'peakpos': peakpos,\n 'width': width, 'base': base, 'chi2': chi2,\n 'height': height, 'pos': pos, 'start': start,\n 'error': error}\n return fit_coeffs", "title": "" }, { "docid": "f319761352c5ef83ae7d99c05a7763f5", "score": "0.4077894", "text": "def G_prefire( r, k, RI, severity):\n x = r*RI\n phi_R = 1 - severity # fraction of biomass remaining\n\n numer = phi_R - np.exp(-x)\n denom = 1 - np.exp(-x)\n G_max = k*numer/denom/phi_R\n\n return greater_than_zero(G_max)", "title": "" }, { "docid": "0ac3a11118a972b3761b279b61676fff", "score": "0.40766227", "text": "def test_1qubit(self):\n\n # Test circuit: ideal outcome \"11\"\n probs = [[1, 0], [0, 1]]\n roerror_dict = {\"type\": \"roerror\", \"operations\": [\"measure\"], \"probabilities\": probs}\n roerror = ReadoutError(probs)\n self.assertEqual(roerror.number_of_qubits, 1)\n self.assertEqual(roerror.probabilities.tolist(), probs)\n self.assertEqual(roerror.to_dict(), roerror_dict)", "title": "" }, { "docid": "34e5bb52ca454c250d4841f0e499e38b", "score": "0.40754494", "text": "def radtime(TR: xarray.Dataset, c1: dict[str, Any], log: bool = False) -> None:\n\n for t in TR.time: # for each time\n irradiance(TR.sel(time=t), c1, log)", "title": "" }, { "docid": "e1b5998868106497bc70640a6a0abb4d", "score": "0.40724123", "text": "def get_rate(self) -> float:\r\n pass", "title": "" }, { "docid": "61fb9bdd7fe3b204230e64666e8c2a69", "score": "0.4067729", "text": "def __init__(self,readsList,consensusThreshold=0.99):\n self.reads = readsList\n reads = [r.seq for r in readsList]\n readLength={len(r) for r in reads}\n if len()!=1: raise Exception('Reads not all of the same length')\n else: readLength=readLength.pop()\n self.consensus=[{} for i in range(readLength)]\n for r in reads:\n for i in range(readLength):\n try: self.consensus[i][r[i]]+=1\n except KeyError: self.consensus[i][r[i]]=1\n readNumber=float(len(reads))\n self.consensus=[{base:c[base]/readNumber for base in c} for c in self.consensus]\n self.seq=''.join([sorted(c,key=lambda x: c[x],reverse=True)[0] if max(c.values()) >= threshold else 'X' \n for c in self.consensus])", "title": "" }, { "docid": "21836f37c2daf9b377ed1d7260f78822", "score": "0.40640733", "text": "def irr(pmts, guess=0.01):\n def _discf(rate, pmts, ):\n \"\"\"used for irr calculation\"\"\"\n dcf=[]\n for i,cf in enumerate(pmts):\n dcf.append(cf*(1+rate)**(-i))\n return np.add.reduce(dcf)\n\n f = lambda x: _discf(x, pmts)\n\n try:\n sys.stderr = NullDevice()\n value = newton(f, guess, maxiter=100, tol=10**(-10))\n sys.stderr = sys.__stderr__\n if value > 1:\n print \"Too large IRR %s . Setting to zero\" % value\n value = 0\n if value < 0.0001:\n print \"Too low IRR %s . Setting to zero\" % value\n value = 0\n return value\n except RuntimeError:\n return float('Nan')", "title": "" }, { "docid": "90f3895ad5ef03268367992b6bdd9a88", "score": "0.40570363", "text": "def burst_run_detail(run_number, subrun, sub, level=2):\n server = couchdb.Server(\"http://snoplus:\"+app.config[\"COUCHDB_PASSWORD\"]+\"@\"+app.config[\"COUCHDB_HOSTNAME\"])\n if level==3:\n db = server[\"burst_cleaning\"]\n view_string = \"burst_cleaning\"\n else:\n db = server[\"burst\"]\n view_string = \"burst\"\n\n startkey = [run_number, subrun, sub]\n endkey = [run_number, subrun, sub]\n rows = db.view('_design/'+view_string+'/_view/burst_by_run', startkey=startkey, endkey=endkey, descending=False, include_docs=True)\n for row in rows:\n run_id = row.id\n try:\n result = dict(db.get(run_id).items())\n except KeyError:\n app.logger.warning(\"Code returned KeyError searching for burst_details information in the couchDB. Run Number: %d\" % run_number)\n files = \"%i_%i_%i\" % (run_number,subrun,sub)\n\n return result, files", "title": "" } ]
8043ab3390a3fe7221896f6dbe791964
Retrieve the picture from TwitPic
[ { "docid": "0647cd8cb20444896cbcb30038f69059", "score": "0.83215106", "text": "def get_twit_pic(**kargs):\r\n twitpage = api_call(*urlsplit(kargs['url'])).read()\r\n anchor = '<img class=\"photo\" id=\"photo-display\" src=\"'\r\n start = twitpage.index(anchor) + len(anchor)\r\n end = twitpage.index('\"', start)\r\n imgurl = twitpage[start:end]\r\n return api_call(*urlsplit(imgurl)).read()", "title": "" } ]
[ { "docid": "f03a07b9e9453c460107bcb1bc2c8829", "score": "0.84223866", "text": "def get_twitgoo_pic(**kargs):\r\n host, path, secure = urlsplit(kargs['url'])\r\n pic = api_call(host, path +'/img', secure).read()\r\n return pic", "title": "" }, { "docid": "22e35d95f91d07a40d8079ddd05fe27d", "score": "0.7985925", "text": "def get_tweetphoto_pic(**kargs):\r\n pic_page = api_call(*urlsplit(kargs['url'])).read()\r\n anchor = '\" alt=\"\" id=\"photo\"'\r\n end = pic_page.find(anchor)\r\n start = pic_page.rfind('\"', 0, end) + 1\r\n imgurl = pic_page[start:end]\r\n return api_call(*urlsplit(imgurl)).read()", "title": "" }, { "docid": "d6198e325ea1ba68948bef58d338ff3d", "score": "0.7165673", "text": "def get_profile_pic(twitter_username, size='bigger'):\n t = Twython()\n return t.getProfileImageUrl(twitter_username, size=size)", "title": "" }, { "docid": "1ab571fe5e3d4b9bb28e51e71ea28d44", "score": "0.6742042", "text": "def get_yfrog_pic(**kargs):\r\n host, path, secure = urlsplit(kargs['url'])\r\n pic = api_call(host, path +':iphone', secure).read()\r\n return pic", "title": "" }, { "docid": "a078afe5ccd809e0981f0086e80dc4aa", "score": "0.6643563", "text": "def TweetPicture(twit,file,statustext) -> None:\n photo = open(file, 'rb')\n response = twitter.upload_media(media=photo)\n twit.update_status(status=statustext, media_ids=[response['media_id']])", "title": "" }, { "docid": "10a998016553e0222ee15281d6f7aa6c", "score": "0.66054976", "text": "def trophy_picture(self, id_trophy):\n\n if not self.is_logged:\n return None, None\n\n url = 'https://achievements.etna-alternance.net/api/achievements/%d.png' % (id_trophy,)\n res = self.session.get(url, stream=True)\n res.encoding = 'utf-8'\n\n if (res.status_code == requests.codes.ok):\n return url, res.raw\n else:\n return None, None", "title": "" }, { "docid": "adf856f1b52b53fdd0bc2468a3dd85d2", "score": "0.6516989", "text": "def downloadPic(self):\n L = self.__startInstaloaderInstance()\n shortcode = self.metadata['photo_url'].split('/')[-2]\n profile = Profile.from_username(L.context, self.metadata['photographer_name'])\n post = Post.from_shortcode(L.context, shortcode)\n if post.is_video:\n self.postIsVideo = True\n picture_save_flag = L.download_post(post, target='DB' + profile.username, )\n return picture_save_flag", "title": "" }, { "docid": "c38721ac7bb7eaa11d5bb41924cdfe95", "score": "0.6504063", "text": "def get_pic(**kargs):\r\n urlpart = kargs['url'].split('/')\r\n pic_api = _SERVICEPROVIDERS[urlpart[2]]\r\n return pic_api(**kargs)", "title": "" }, { "docid": "fde4115dc05656197f49c686ae0eac73", "score": "0.64324033", "text": "def find_photo(parsed):\n return parsed['profile_image_url']", "title": "" }, { "docid": "48d240ccccacdfb6efad733c236f1aeb", "score": "0.6398809", "text": "def get_picture(ID):\n pic = PICTURES[ID]\n print(\"Pic is: \" + str(pic))\n filename = pic[\"source\"]\n\n result = {'picture': pic}\n return jsonify(result)", "title": "" }, { "docid": "7b951af84a6994160adb81a9a908ae5d", "score": "0.633708", "text": "def getUserPicture(u_id):\n sanitized_uid = str(u_id)\n (res, code) = req('get', '/api/account/picture/' + sanitized_uid)\n return res, code", "title": "" }, { "docid": "c6876dcc5b9ebac8c283146443a1ea5c", "score": "0.63024235", "text": "def get_profile_picture(self):\n\n try:\n photos = bot.get_user_profile_photos(user_id=self.chat_id, limit=1)\n except TelegramError:\n return None\n\n photo = photos.photos[0]\n photo = sorted(photo, key=lambda x: x.file_size, reverse=True)[0]\n file_name = md5(photo.file_id.encode()).hexdigest() + \".png\"\n file_path = \"static/images/profile/\" + file_name\n\n if os.path.exists(file_path):\n return file_name\n\n photo.get_file().download(custom_path=file_path)\n return file_name", "title": "" }, { "docid": "6e3cf7d9bd6beb427ed796568d52ac7e", "score": "0.6266403", "text": "def getProfilePicture(user):\n profilePicResponse = getUserProfilePic(user) #API call\n try:\n url = profilePicResponse['json_data']['business_discovery']['profile_picture_url'] #url endpoint\n\n resp = requests.get(url, stream=True) #get request\n local_file = open('images/users/' + user + '.jpg', 'wb') #location to save the picture in\n resp.raw.decode_content = True\n shutil.copyfileobj(resp.raw, local_file)\n del resp\n except:\n print(\"Error: \" + user + \"\\n\")", "title": "" }, { "docid": "fa06006e84cfa8fdd4e2a4e7c55d2c0b", "score": "0.6252235", "text": "def get_images_from_feed(screen_name, auth_file):\r\n print(\"authorize Twitter...\")\r\n api = twitter_OAuth_login(auth_file)\r\n print(\"start grabbing tweets...\")\r\n # Twitter can only return maximum 3000+ tweets\r\n download_tweets(api, screen_name, 3500)\r\n print(\"start downloading images...\")\r\n urls = extract_images_url(file_name=screen_name+'_tweets.json')\r\n download_images(screen_name, urls)\r\n print(\"..................finish\")", "title": "" }, { "docid": "999608aa5a8c13d38479727e63e343c7", "score": "0.6213378", "text": "def image(self):\n image = self.images.first()\n if image:\n return image.url\n return image", "title": "" }, { "docid": "1096ec38bcaa1228cd0b25f40559f835", "score": "0.61409366", "text": "def snap_picture(self):\n url = f\"{self.sync.urls.base_url}/api/v1/accounts/{self.sync.blink.account_id}/networks/{self.network_id}/owls/{self.camera_id}/thumbnail\"\n return api.http_post(self.sync.blink, url)", "title": "" }, { "docid": "e4412d31eac83d5490640c18ca342503", "score": "0.6132442", "text": "def getPhoto(self):\n response = requests.get(\n Student.STUDENTPHOTO_URL,\n data={},\n headers=Student.HEADERS,\n cookies=self.cookies)\n res = response.content\n\n if response.content is None:\n print(\"Error: \", response.status_code)\n return None\n else:\n self.img_path = self.regdno + \".jpg\"\n with open(self.img_path, \"wb+\") as image:\n image.write(res)\n print(\"File written to {}\".format(self.img_path))\n return self.img_path", "title": "" }, { "docid": "223b494b4acfafaee2c83d077b105fdd", "score": "0.6110817", "text": "def get_image(self):\n file = None\n\n # query Google\n try:\n file = urllib.urlopen(self.google_query)\n except :\n # in case of error retry 5 times\n i = 0\n while i < 5:\n sleep(10)\n try:\n file = urllib.urlopen(self.google_query)\n break\n except URLError:\n # increment attempt count\n i += 1\n continue\n\n # in case of fatal error return None\n if file is None:\n return None\n\n b = BytesIO(file.read())\n img = Image.open(b)\n\n # convert to grey scale and then resize\n img = img.convert('L')\n img.thumbnail((300, 300), Image.ANTIALIAS)\n\n # print stats\n print(\"fetched image for lat: %2.4f long: %2.4f\" % (self.latitude, self.longitude))\n\n return img", "title": "" }, { "docid": "8e77b93fc022c0f01f3dc59c9d6529c0", "score": "0.61024874", "text": "def grab_from_url(url):\n # Simple calls\n rtv = requests.get(url)\n img = Image.open(BytesIO(rtv.content))\n # Return\n return img", "title": "" }, { "docid": "ab1d894c9c50d64be9e6c2e0c118a550", "score": "0.6089832", "text": "def get_image_without_is_url(self, **kwargs):\n relevant_items = [item for item in self.contents.get_content_items()\n if isinstance(item, PictureItem)]\n item = relevant_items.pop(0)\n return item.image", "title": "" }, { "docid": "b39355c7162d0d5d3b3f9cc5262bcf9a", "score": "0.6070345", "text": "def picture():\n result = car_send(\"take_picture\")\n return recv_file(result)", "title": "" }, { "docid": "b302f7f8da79a0452ce114b29736e567", "score": "0.60607916", "text": "def get_picture(self):\n self.ret, self.img = self.cam.read()\n return self.ret, self.img", "title": "" }, { "docid": "d71669c4d421f243bfa93f149ea88484", "score": "0.60599416", "text": "def get_picture(self):\n return self.__get__control_command(\"picture\")", "title": "" }, { "docid": "fc2da6a63515e35c095bb5bce8aacc58", "score": "0.6036139", "text": "def download_profile_pic(self):\n url = self.get_img_url()\n if self.profile_has_pic(url):\n full_size_url = ''\n if url[-10:] == 's70-p-k-no':\n full_size_url = url[:-11]\n else:\n full_size_url = url[:url.find('s70') + 3] + '0' + url[url.find('s70') + 3:]\n img = requests.get(full_size_url)\n with open(os.path.dirname(os.path.abspath(__file__)) + '/' + self.target_email + '.png', 'wb') as fp:\n fp.write(img.content)\n print('Downloaded Image!')\n else:\n print('User doesn\\'t a profile picture!')", "title": "" }, { "docid": "9ef845f2e5b86ba8bf6d6c4df28eba7d", "score": "0.60300255", "text": "def image(self):\n if self.photo:\n return self.photo.image\n return self.photo_url", "title": "" }, { "docid": "0a8f42301fa7d2e2820b7d297f33c1e0", "score": "0.6015636", "text": "def get_photo(self):\n return self.photo", "title": "" }, { "docid": "5064d749a7e0f221b26688637588e00c", "score": "0.6010937", "text": "def getPicture (self, id) : \n result = []\n picRef = Picture.objects.filter (answer__id = id)\n for pic in picRef :\n result.append (pic.photo_uri)\n return result", "title": "" }, { "docid": "e5c9384a39a3a8beda6fd1fe88db42cb", "score": "0.59980255", "text": "def get_image(self):\n return self.__data[\"image\"]", "title": "" }, { "docid": "41574ff7d1843985539b44acd6346a1e", "score": "0.59967643", "text": "def get_image_file(self):\n better_link = self.get_better_image_url()\n if not better_link:\n return None\n\n file_res = get(better_link)\n return file_res.content", "title": "" }, { "docid": "46ce80143aa24090e61b1c7475055fd7", "score": "0.5987705", "text": "def get_artist_image(artist_id):\n image = sp.artist(artist_id)['images'][0]['url']\n return image", "title": "" }, { "docid": "bd1de8516d71eb20fbd7a875bd50de4f", "score": "0.5969522", "text": "def get_pic_url(self):\n user = self.find()\n query = '''\n\t MATCH (u:User)\n\t WHERE u.username = {username}\n\t RETURN u.url\n\t '''\n pp_url = graph.run(query, username=self.username).data()\n return pp_url[0][\"u.url\"]", "title": "" }, { "docid": "2271656a885dedbe144840a25af10dfd", "score": "0.59556925", "text": "def get_user_avatar(self):\r\n return self.profile_pic", "title": "" }, { "docid": "11208f1c643a615521edaa8a3b53914c", "score": "0.5941569", "text": "def poster(reference):\n\n url = 'https://www.imdb.com/title/tt0' + str(reference)\n html = requests.get(url).content\n response = HtmlResponse(url=url, body=html)\n\n pic_url = response.css('div.poster img::attr(src)').get()\n # From the url, get pictures\n pic = requests.get(pic_url)\n\n img = Image.open(BytesIO(pic.content))\n \n return img", "title": "" }, { "docid": "3c07327d0716110c1481dde2ccf418a6", "score": "0.59406024", "text": "def user_pic(self):\n if self.username == \"PythonMaster_24\":\n return \"https://www.gravatar.com/avatar/ff92a08aae577ceafbdab92bf029ae90\"\n r = self.json\n try:\n if (r[\"icon\"] == None):\n return \"https://repl.it/public/images/evalbot/evalbot_\" + str(\n random.randint(17, 43)) + \".png\"\n return r[\"icon\"][\"url\"]\n except:\n return \"https://repl.it/public/images/evalbot/evalbot_\" + str(\n random.randint(17, 43)) + \".png\"", "title": "" }, { "docid": "5ffec645313384c150a0e3b4a8d6efea", "score": "0.5938949", "text": "def get_image(self):\r\n return self.image", "title": "" }, { "docid": "e933f28d115e62189a89d8542e9df35a", "score": "0.592589", "text": "def camera_image(self):\n # Send the request to snap a picture and return raw jpg data\n # Handle exception if host is not reachable or url failed\n result, response = self._foscam_session.snap_picture_2()\n if result != 0:\n return None\n\n return response", "title": "" }, { "docid": "31c5a587668c8d7f0d1e1de0ba3ca996", "score": "0.59086597", "text": "def get_image_fb(url):\n file_name = url.split('/')[-1]\n location = os.path.join(settings.PROJECT_ROOT, 'uploads/fb_profile')\n if not os.path.exists(location):\n os.makedirs(location)\n file_name_url = os.path.join(location,file_name)\n urllib.urlretrieve (url, file_name_url)\n try:\n im = Image.open(file_name_url)\n except Exception,e:\n return None\n\n return file_name_url", "title": "" }, { "docid": "ca9b8ba703fb36bf863f4f853aaf3c12", "score": "0.5868145", "text": "def _get_image(card):\n image_url = card.find(\"a\", class_=\"photoLink\").img.attrs[\"src\"]\n if \"photo_big.gif\" in image_url:\n return None\n else:\n return \"http:\" + image_url", "title": "" }, { "docid": "d7146f0f904a47f84d0c59d584f2a2dc", "score": "0.584849", "text": "def getimg(card):\n if str(card.multiverseid) in Card.cache :\n return ImageTk.PhotoImage(Card.cache[card.multiverseid])\n if card.image_url is None :\n url = None\n elif card.foreign_names is None :\n url = card.image_url\n else :\n url = None\n for l in card.foreign_names :\n if l[\"language\"] == LANG :\n url = l.get(\"imageUrl\")\n break\n if url is None :\n url = card.image_url\n if url is not None :\n data = urllib.request.urlopen(url)\n img = Image.open(data)\n else :\n img = Image.open(\"./back.jpeg\")\n Card.cache[card.multiverseid] = img\n return ImageTk.PhotoImage(img)", "title": "" }, { "docid": "c45638eab4227b99f5818147250f8bcb", "score": "0.5832882", "text": "def get_image(self):\n return self.send_command('image.cgi')", "title": "" }, { "docid": "96a70f2017105d586a6fa87bf63d6717", "score": "0.58246493", "text": "def get_image(self):\n return self.image", "title": "" }, { "docid": "f8e3652962468402c76df5237aafe3ab", "score": "0.5816383", "text": "def snap_picture(self):\n return api.request_new_image(self.sync.blink, self.network_id, self.camera_id)", "title": "" }, { "docid": "3acb5836a8fa2515de518c248a8c06a3", "score": "0.58110195", "text": "def getImage(self):\n return myImage", "title": "" }, { "docid": "77681f75cf54a6308fbd5651694feca6", "score": "0.5780475", "text": "def get_image_url(self): \n if self.image:\n return self.image.url", "title": "" }, { "docid": "9352c457cf64ce82d473d85f9ad564ad", "score": "0.57747215", "text": "async def random_img(query):\n offset = random.randint(1, 250)\n params = gen_params(query, offset)\n\n result = await httpx.get(url, params=params, headers=headers)\n data = result.json()\n if (data['status'] == 'success'):\n return data['data']['result']['items'][0]['media']\n else:\n return random_img(query)", "title": "" }, { "docid": "c03260207cde7c9a7386950689900f29", "score": "0.5767304", "text": "def _get_photo_url(self):\n return self._random_photo_url + self._tags[random.randint(0, len(self._tags) - 1)]", "title": "" }, { "docid": "bfcfeac28307cd9955358bb527dbfd39", "score": "0.57670087", "text": "def test_retrieve_image(self):\n images = self.api.list_images(omit=\"annotations\")\n image = images.results[0]\n image = self.api.retrieve_image(id=image.id) \n pass", "title": "" }, { "docid": "99dcc9bda10ac964a288bab9b7584a69", "score": "0.57624924", "text": "async def dog(self):\r\n def findDog():\r\n global pic\r\n page = BeautifulSoup(urllib2.urlopen(\"https://random.dog/\"), \"lxml\")\r\n img = page.findAll('img')\r\n pic = str(img)\r\n pic = pic.replace('[<img id=\"dog-img\" src=\"', \"\")\r\n pic = pic.replace('\"/>]', \"\")\r\n pic = \"https://random.dog/{}\".format(pic)\r\n\r\n global pic\r\n findDog()\r\n while \"https://random.dog/[]\" in pic:\r\n findDog()\r\n em = discord.Embed(colour=0x260068)\r\n em.set_image(url=pic)\r\n await self.bot.say(embed=em)", "title": "" }, { "docid": "f7f2993b547c2e86a4f7b85faeb5001f", "score": "0.5753861", "text": "def get_image_from_htk_response(resp):\n image_content = BytesIO(resp.content)\n image_content.seek(0)\n image = Image.open(image_content)\n image = image.convert('RGB')\n return np.uint8(image)", "title": "" }, { "docid": "d2a2cb1bb567b2241b7456936b5b0619", "score": "0.575281", "text": "def live_image(self) -> Optional[bytes]:\n res = self.query(\"images\", extra_params={\"channel\": 0})\n\n if not res.headers.get(\"Content-Type\") in (\"image/jpeg\", \"image/png\"):\n return None\n\n return res.content", "title": "" }, { "docid": "3e296efe51fdaed4f126f5e8a391d9ef", "score": "0.5735249", "text": "async def picture(self):\r\n loop = asyncio.get_event_loop()\r\n future = loop.run_in_executor(None, xkcd.getRandomComic) # Permet une réelle parallelisation\r\n comic = await future\r\n return comic.getImageLink()", "title": "" }, { "docid": "5d5c68693464d60824ebd8d733f812f5", "score": "0.573461", "text": "def get_pillow_image(self):", "title": "" }, { "docid": "6bd45c1b3962fe1ea7dd295a92786929", "score": "0.57264954", "text": "def get_image_url(self):\n if self.image:\n return helpers.getMediaUrl(self.image.url)\n else:\n return helpers.getBlankImage()", "title": "" }, { "docid": "b686262f282bc9bd3530a027e58b0150", "score": "0.5723666", "text": "def get_image(self):\n dom = html5lib.parseFragment(self.body, treebuilder=\"etree\", namespaceHTMLElements=False)\n images = dom.findall('.//img')\n if images:\n img = images[0].get('src') # u'https://medor.coop/media/filer_public/cb/1b/cb1b0760-5931-4766-b062-6ea821ba33c6/gent-cropped.png'\n img_path = urlparse(img).path # u'/media/filer_public/cb/1b/cb1b0760-5931-4766-b062-6ea821ba33c6/gent-cropped.png'\n img_filename = basename(img_path) # u'gent-cropped.png'\n for image in Image.objects.filter(original_filename__iexact=img_filename):\n if image.url == img_path:\n return image\n return None", "title": "" }, { "docid": "50142bf7dd40dc73acffbf76bc28ef3e", "score": "0.57195365", "text": "def getContextAvatar(context, request):\n chash = context['hash']\n twitter_username = context['twitterUsername']\n\n base_folder = request.registry.settings.get('avatar_folder')\n avatar_folder = get_avatar_folder(base_folder, 'contexts', chash)\n\n context_image_filename = '%s/%s' % (avatar_folder, chash)\n\n api = get_twitter_api(request.registry)\n if not os.path.exists(context_image_filename):\n download_twitter_user_image(api, twitter_username, context_image_filename)\n\n if os.path.exists(context_image_filename):\n # Calculate time since last download and set if we have to re-download or not\n modification_time = os.path.getmtime(context_image_filename)\n hours_since_last_modification = (time.time() - modification_time) / 60 / 60\n if hours_since_last_modification > 3:\n download_twitter_user_image(api, twitter_username, context_image_filename)\n else:\n context_image_filename = '{}/missing-context.png'.format(base_folder)\n\n data = open(context_image_filename).read()\n image = Response(data, status_int=200)\n image.content_type = 'image/png'\n return image", "title": "" }, { "docid": "c7b5ec7ea52f83bf0d6d98003573224d", "score": "0.57084966", "text": "async def nobully(self, ctx):\r\n\t\tauthor = ctx.message.author\r\n\t\twith aiohttp.ClientSession() as session:\r\n\t\t\tasync with session.get(\"http://i.imgur.com/mTfw7bk.jpg\") as resp:\r\n\t\t\t\ttest = await resp.read()\r\n\t\t\t\twith open(\"data/commands/Images/imgres.png\", \"wb\") as f:\r\n\t\t\t\t\tf.write(test)\r\n\t\tawait self.bot.upload(\"data/commands/Images/imgres.png\")", "title": "" }, { "docid": "b1a8b8eeafa0d255f0989251f242a47f", "score": "0.57015187", "text": "def image(self) -> 'outputs.ImageNoteResponse':\n return pulumi.get(self, \"image\")", "title": "" }, { "docid": "c7ac4fdc5e6282f762220f174831d274", "score": "0.5699423", "text": "def pictureDownload():\n redis_client = current_app.config['RDSCXN']\n if request.method == 'POST':\n data = request.get_json()\n if not data:\n error = 'Data Body Required'\n return errorResponse(error)\n uid = data['uid'] if 'uid' in data.keys() else None\n if uid is None:\n error = 'uid is required.'\n return errorResponse(error)\n profilePicUrl = redis_client.get(\"picture{}\".format(uid)) or \"\"\n return jsonResponse({'profilePicUrl': profilePicUrl})\n return jsonResponse()", "title": "" }, { "docid": "9db853f7ab8aa4624b43905a5baf1727", "score": "0.56641495", "text": "def get_random_image(sub):\n client_id = os.environ.get('IMGUR_CLIENT_ID')\n client_secret = os.environ.get('IMGUR_CLIENT_SECRET')\n\n if client_id and client_secret:\n client = ImgurClient(client_id, client_secret)\n\n subreddit_list = get_subreddit_list(sub)\n subreddit = client.subreddit_gallery(random.choice(subreddit_list))\n\n # Uncomment this if you want just a single image to be returned.\n # return random.choice(subreddit).link\n\n # We'll return 15 imgur photo links, each in its own line.\n sample = random.sample(subreddit, 15)\n return \"\\n\".join([item.link for item in sample])", "title": "" }, { "docid": "3410d34f13a08294a1277eaf345574ce", "score": "0.56637585", "text": "def _get_a_image_by_url( path ):\n req = urllib2.Request( url = path )\n f = urllib2.urlopen(req)\n return f.read()", "title": "" }, { "docid": "243a18b94783063b83454588922abb5a", "score": "0.5659159", "text": "async def praiselenny(self, ctx):\r\n\t\tauthor = ctx.message.author\r\n\t\twith aiohttp.ClientSession() as session:\r\n\t\t\tasync with session.get(\"https://images-2.discordapp.net/.eJwFwVEOgyAMANC7cACwmxTxMoQhAxK1hJYvs7vvvUfNcapdVZHOuzFH40Tj0Cw0Ysm6EJUzx95YJ7pMFImpXvkWNuDsa8Vt2Tw6sICABjygtR5gsevboUdrqIbPlEDfkGgOzoHb0P0u6vcH9hAoLA.eXBAkxrHMhUgUqy17NfChORgTOg.png?width=400&height=225\") as resp:\r\n\t\t\t\ttest = await resp.read()\r\n\t\t\t\twith open(\"data/commands/Images/imgres.png\", \"wb\") as f:\r\n\t\t\t\t\tf.write(test)\r\n\t\tawait self.bot.upload(\"data/commands/Images/imgres.png\")", "title": "" }, { "docid": "862dd0736bcf0c8858c0943dc9018b01", "score": "0.56472003", "text": "def get_bird_pic_flickr(photo_id):\n\n actual_image_URL = 'https://api.flickr.com/services/rest/'\n photo_url = []\n \n params = { 'method': 'flickr.photos.getSizes',\n 'api_key': os.environ['flickr_api_key'],\n 'photo_id': photo_id,\n 'format': 'json',\n 'nojsoncallback': '1',\n }\n \n r = requests.get(url=actual_image_URL, params=params)\n photo_data = r.json()\n\n bird_src = photo_data['sizes']['size'][3]['source']\n photo_url.append(bird_src)\n\n return photo_url", "title": "" }, { "docid": "2fc876aa6be288253a27843104e627df", "score": "0.56366044", "text": "def media_image_url(self):\n if url := self._track_info.get(\"artwork_url\"):\n url = self.api.full_url(url)\n return url", "title": "" }, { "docid": "88d50c4ad1297c6344a57a17ff8c3049", "score": "0.5636104", "text": "def get_thumb_image(self):\n return self._media_autoconfiguration[\"thumb_url\"] % self.media_params", "title": "" }, { "docid": "2d6325ea81762a558e93de4c24af02c6", "score": "0.56359106", "text": "def get_photo_url(self,*args,**kwargs):\n if self.photo:\n return \"/media/{}\".format(self.photo)\n else:\n return \"/static/images/carrot.jpg/\"", "title": "" }, { "docid": "7675fb9b0d3c0a1f90b3a4cd4ff89ee3", "score": "0.5632322", "text": "def random_dog_url():\n api_url = \"https://dog.ceo/api/breeds/image/random\"\n response = requests.get(api_url)\n if response.status_code == 200:\n js = json.loads(response.content.decode('utf-8'))\n return js['message']\n else:\n # Failsafe Labrador\n return \"https://dcewboipbvgi2.cloudfront.net/cdn/farfuture/F3Jhqj1h8Lw_ZY8KFN4psInhN8vPekhOtFUYDskKWJs/mtime:1496942436/sites/default/files/styles/article_hero_image/public/Puppy_Dog_Labrador_Jerry.jpg\"", "title": "" }, { "docid": "1000be11e5ad44263e71be1bc3a5eab3", "score": "0.5630348", "text": "def picture(self, widget, source, href=None):\n pass", "title": "" }, { "docid": "18c3c6c741daf2604ab7eb4a862161b6", "score": "0.5625596", "text": "def action_picture(self, nick, data=''):\n self.simple_search(nick, data, dclib.DCBot.SEARCH_TYPE_PICTURE)", "title": "" }, { "docid": "079268ef49932d1ff445b502acffa9a0", "score": "0.5622252", "text": "def get_image(self):\n\n return self.image", "title": "" }, { "docid": "b355016645a73c442830efabb9253a2e", "score": "0.56152475", "text": "def get_image(d_card_num, pic_loc=picture_loc):\n l_noPict=[]\n for i in d_card_num.keys():\n r = requests.get(url0+i, headers=headers)\n url1 = get_xpath(r, url1_xapth)\n if len(url1)<1:#if not picture\n print('not found :', i, 'num :', d_card_num[i])\n l_noPict.append((i,d_card_num[i]))\n continue\n if type(url1)==list:#get scd paper\n r1 = requests.get('http://www.orenoturn.com'+url1[0], headers=headers)\n else:\n r1 = requests.get('http://www.orenoturn.com'+url1, headers=headers)\n url_im = get_xpath(r1, url_img_xpath)\n if type(url_im)==list:#normally img_url\n url_im = 'http://' + re.search(r'img.*', url_im[0]).group()\n else:\n url_im = 'http://' + re.search(r'img.*', url_im).group()\n r2 = requests.get(url_im, headers=headers)\n \n im = Image.open(BytesIO(r2.content))\n dpi = cov_cm2dpi(resize_cm, ogi_px=im.size)\n num=d_card_num[i]\n while num>0:\n out=pic_loc+i+'_'+str(num)+\".jpg\"\n im.save(out, dpi=dpi)\n num-=1\n return l_noPict", "title": "" }, { "docid": "e0acfbed381d0e9c14324e802b65281e", "score": "0.5606069", "text": "def getImage(ip):\n try:\n # Here is a portion of the URL\n #######################################################################\n # TODO1 : Send a HTTP GET Request to the WebCam\n # (with Username:'admin' and Password:''). \n # We recommend using the httplib package \n h = httplib.HTTP(ip, 80) \n h.putrequest('GET', '/image.jpg')\n h.putheader('Host', ip)\n h.putheader('User-agent', 'python-httplib')\n h.putheader('Content-type', 'image/jpeg')\n h.putheader('Authorization', 'Basic {0}'.format(base64.b64encode(\"{0}:\".format('admin'))))\n h.endheaders()\n\n (returncode, returnmsg, headers) = h.getreply()\n print \"return code:\",returncode\n print \"return message:\",returnmsg\n print \"headers:\",headers\n if returncode != 200:\n print returncode, returnmsg\n sys.exit()\n\n f = h.getfile()\n return StringIO.StringIO(f.read())\n\n except Exception as e:\n print('!! Failed to connect to webcam: %s' % str(e))\n return None", "title": "" }, { "docid": "7d3e912dd77e5042674b423a8b42d85e", "score": "0.5605615", "text": "def get_photo(self):\n photo = self.photo\n if photo is not None:\n return photo\n\n for item in self.items.itervalues():\n if isinstance(item[1], Photo):\n return item[1]", "title": "" }, { "docid": "df881839a54666b4d7fea5c3b166bcca", "score": "0.55924594", "text": "def tweet_image(self, msg, img):\r\n uImg = self.upload_image(img)\r\n if uImg is None:\r\n return False\r\n\r\n r = self.api.request('statuses/update', {'status': msg, 'media_ids': uImg})\r\n if r.status_code == 200:\r\n self.log.log(logger.LogLevel.INFO, 'Tweeted(img): %s | %s' % (msg, img))\r\n return r.json()['id']\r\n else:\r\n self.log.log(logger.LogLevel.ERROR, 'Failed to tweet(img): %s, %s | %s' % (msg, img, r.text))\r\n return False", "title": "" }, { "docid": "b2f460369671c0db6833359509ba6f81", "score": "0.5586691", "text": "def get_album_image(track):\n info = get_track(track)\n album_image = info['album']['images'][0]['url']\n return album_image", "title": "" }, { "docid": "bd324084b27e26e0b012c8298f95f3a5", "score": "0.5584737", "text": "def get_img(self):\n try:\n img = io.imread(IMG_URL)\n #return io.imread(IMG_URL)# cv2.cvtColor(,cv2.COLOR_BGR2RGB)\n except Exception as e:\n print(\"error: No image\")\n img = None\n return img", "title": "" }, { "docid": "fe6d33a328b2b408cc74adc195b0cc9c", "score": "0.5584113", "text": "def fetch_image(url, tweet_id, path, stats_df):\n try:\n response = requests.get(url)\n if response.status_code == 404 or response.status_code == 403:\n remove_bad_image(tweet_id)\n else:\n img = Image.open(BytesIO(response.content))\n width, height = img.size\n img_stats = {'width': width, 'height': height,\n 'pixels': width * height}\n stats_df = stats_df.append(img_stats, ignore_index=True)\n write_size(tweet_id, width, height)\n img = img.resize((400, 400), PIL.Image.ANTIALIAS)\n filename = path + str(tweet_id) + '.jpg'\n img.save(filename)\n except Exception as err:\n print('Error on tweet id: ' + str(tweet_id))\n print(err)\n return stats_df", "title": "" }, { "docid": "65097f878d157d7871343b706305ad96", "score": "0.55828494", "text": "def image(self) -> 'outputs.ImageResponse':\n return pulumi.get(self, \"image\")", "title": "" }, { "docid": "92b4a512fde94dd9a10e78fe897e2abd", "score": "0.55794483", "text": "def get_teacher_pictures(teacher_id):\n\n teacher = db.query(Teacher). \\\n filter(Teacher.id == teacher_id). \\\n first()\n\n return jsonify({\n '_client_id': client_id,\n '_count': 1,\n 'pictures': teacher.get_pictures_json(client_id),\n })", "title": "" }, { "docid": "7dc0502f381ccce0c8e5efeb178b14a5", "score": "0.5576203", "text": "def get_image(request, username, imagename):\n master = request.user\n user = get_object_or_404(User, username=username)\n if master == user or master.check_relationship(user) == RELATIONSHIP_FRIENDS:\n file_on_disk = os.path.join(users_media, username, imagename)\n file_size = os.path.getsize(file_on_disk)\n content_type, _ = mimetypes.guess_type(imagename)\n response = HttpResponse()\n response.status_code = 200\n response['X-Accel-Redirect'] = request.path\n response['Content'] = content_type\n response['Content-length'] = file_size\n else:\n response = HttpResponseNotFound(\"File not found:{}\".format(imagename))\n return response", "title": "" }, { "docid": "c2881e020c7c88ecafd2b0003200ade4", "score": "0.5574741", "text": "def get_image(self, name):\n images = self.glance_client.images.list()\n for image in images:\n if image.name == name:\n return image\n return None", "title": "" }, { "docid": "b75544d9ffed53357d5e0fdf5140b48d", "score": "0.5556693", "text": "def get_image(id, target, dir, type, optional_name=\"\"):\n\t\t# Decide upon a filename, needed so dups of symbols wont happen\n\t\tpart_of_path = \"\"\n\t\tif optional_name != \"\":\n\t\t\tpart_of_path = str(optional_name)\n\t\telse:\n\t\t\tpart_of_path = str(id)\n\t\t# Check if photo exists if not SAVE it\n\t\tif os.path.isfile( \"img/\" + str(dir) + \"/c\" + str(part_of_path) + \".\" + str(type) ):\n\t\t\tprint \"Image exists - no-op\"\n\t\telse:\n\t\t\t# write cover photo to file\n\t\t\tresource = requests.get( url_img + target).raw\n\t\t\tr = requests.get(url_img + target, stream=True)\n\t\t\tif r.status_code == 200:\n\t\t\t\twith open(\"img/\" + str(dir) + \"/c\" + str(part_of_path) + \".\" + str(type),\"wb\") as f:\n\t\t\t\t\tfor chunk in r:\n\t\t\t\t\t\tf.write(chunk)", "title": "" }, { "docid": "c4d69f1d1c4e888198f8c321192d78ae", "score": "0.55488616", "text": "def getUserThumbnail(u_id):\n sanitized_uid = str(u_id)\n (res, code) = req('get', '/api/account/thumbnail/' + sanitized_uid)\n return res, code", "title": "" }, { "docid": "1decfe9eba29a4bed55036e0e15accf3", "score": "0.5548805", "text": "def get_img_url(self):\n compose = self.driver.find_elements_by_css_selector('.T-I-KE')[-1]\n compose.click()\n self.wait.until(\n expected_conditions.element_to_be_clickable((By.XPATH, './/textarea[contains(@aria-label, \"To\")]'))\n )\n to = self.driver.find_element_by_xpath('.//textarea[contains(@aria-label, \"To\")]')\n to.send_keys(self.target_email + ' ')\n self.wait.until(\n expected_conditions.visibility_of_element_located((By.CSS_SELECTOR, '.vT'))\n )\n mail_field = self.driver.find_element_by_css_selector('.vT')\n mail_field.click()\n sleep(0.3)\n mail_field = self.driver.find_element_by_css_selector('.vT')\n hover = ActionChains(self.driver).move_to_element(mail_field)\n hover.perform()\n self.wait.until(\n expected_conditions.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR, '#__HC_94253229 > iframe:nth-child(1)'))\n )\n self.wait.until(\n expected_conditions.visibility_of_element_located((By.XPATH, './/*[@alt= \"Profile Photo\"]'))\n )\n photo = self.driver.find_element_by_xpath('.//*[@alt= \"Profile Photo\"]')\n return photo.get_attribute('src')", "title": "" }, { "docid": "6cb8291275c2b9a61822cf64a6e018c8", "score": "0.5548192", "text": "def get_original_image(self, url):\n # these path parts somehow prevent us from changing the rest of media url\n #url = re.sub(r'/vp/[0-9A-Fa-f]{32}/[0-9A-Fa-f]{8}/', '/', url)\n # remove dimensions to get largest image\n #url = re.sub(r'/[sp]\\d{3,}x\\d{3,}/', '/', url)\n # get non-square image if one exists\n #url = re.sub(r'/c\\d{1,}.\\d{1,}.\\d{1,}.\\d{1,}/', '/', url)\n return url", "title": "" }, { "docid": "26500af7175a9adcf935980279636341", "score": "0.55446583", "text": "def get_thumbnail(self, options={}): \n raise NotImplementedError()", "title": "" }, { "docid": "c19085f980361cac9148b7e90a088a37", "score": "0.55324125", "text": "def get_thumbnail(self, obj):\n return obj.get_thumbnail_html(size=(48, 20))", "title": "" }, { "docid": "052c58fdce541f656eaf706e2911beef", "score": "0.55286217", "text": "def _fetch_image(url):\n req = requests.get(url, stream=True)\n img = Image.open(req.raw)\n img.load()\n return img", "title": "" }, { "docid": "11e3966a8bf99ac2cf2960b17a06872c", "score": "0.55189544", "text": "def extract_images_url(file_name):\r\n image_urls = []\r\n with open(file_name) as file:\r\n data = json.loads(file.read())\r\n\r\n for tweet in data:\r\n if 'extended_entities' in tweet.keys():\r\n extended_entities = tweet['extended_entities']['media']\r\n for extended_entity in extended_entities:\r\n if 'photo' == extended_entity['type']:\r\n image_urls.append(extended_entity['media_url'])\r\n\r\n return image_urls", "title": "" }, { "docid": "8f30f02362bc8d8f868af45e53a088fb", "score": "0.5513542", "text": "def get(apic, relative_url):\n # type: (APIC, str) -> Something\n refresh_token(apic)\n res = request(apic, 'GET', relative_url)\n return Option(res.json())['imdata']", "title": "" }, { "docid": "3174531787558bec204840fe7b839ed9", "score": "0.5509331", "text": "def get_video_pic(self, video_id):\n return \"\"\"<img src=\"http://img.youtube.com/vi/{0}/hqdefault.jpg\"></img>\"\"\".format(video_id)", "title": "" }, { "docid": "7eb95bd94f59f2f872d8cb70d91cb592", "score": "0.55081356", "text": "def getPhotoURL(photo, size='s') :\n \n return \"http://farm\" + photo.get('farm') + \".static.flickr.com/\" + photo.get('server') + \\\n \"/\" + photo.get('id') + \"_\"+photo.get('secret')+\"_\"+size+\".jpg\"", "title": "" }, { "docid": "71ebe2079d60146b3336c6530c000807", "score": "0.55079675", "text": "def get_avatar(request, size):\n if request.user.is_authenticated() and request.user.profile.avatar:\n avatar = get_thumbnail(request.user.profile.avatar, size)\n else:\n avatar = get_thumbnail('http://collegeinn.com/images/recipes/recipe-default.jpg', size)\n return avatar", "title": "" }, { "docid": "a257c6adc30bc8974fdf2649e490742b", "score": "0.5498967", "text": "def download_flickr_img(params, url, path_saved):\n r = requests.get(url, params)\n data = r.json()\n if r.status_code == 200 and ('sizes' in r.json()):\n downloader.download_img(\n data['sizes']['size'][-1]['source'], path_saved)", "title": "" }, { "docid": "0f70b91813ddc283024f3034bcd58869", "score": "0.54970235", "text": "def tweet(self):\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n if not is_logged():\n webbrowser.open(auth.get_authorization_url())\n pin = input('Verification pin number from twitter.com: ').strip()\n token = auth.get_access_token(verifier=pin)\n access_token = token[0]\n access_token_secret = token[1]\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth)\n login(api.me()._json.get('name'), token[0], token[1])\n else:\n access_token, access_token_secret = get_credentials()\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth)\n loqeusea = api.media_upload(f'{self.name}.jpg')\n api.update_status(f'#ASCIIArtPM1',media_ids=[loqeusea.media_id])", "title": "" }, { "docid": "84c216a20295982b1030b6f79da36e3f", "score": "0.5495829", "text": "def get_image(self, args):\n print(\"Getting Image\")\n return self.mmc.getImage()", "title": "" }, { "docid": "27c5b96b7a6fd66ca0bb706d0c0a5f6b", "score": "0.54927284", "text": "def get_image_url():\n baseurl = 'https://apod.nasa.gov/apod/'\n response = urllib.urlopen(baseurl + 'astropix.html')\n parser = ApodHtmlParser()\n parser.feed(response.read())\n response.close()\n return baseurl + parser.full_photo_url", "title": "" }, { "docid": "777c73ab5da0ff0a5c49199edc2cccc5", "score": "0.54908264", "text": "def getImageURL(self):\n img = self.draw()\n img.imagepath = os.path.abspath(dconf['ms_tmp_path'])\n fn = \"%s_%s_%s_%s.%s\" % (self.app, self.mid, self.sess_mid['map'], self.sess.session_id, self.imagetype)\n img_url = \"%s%s%s\" % (dconf['ms_tmp_url'], '' if dconf['ms_tmp_url'].endswith('/') else '/', fn)\n img.save(\"%s/%s\" % (os.path.abspath(dconf['ms_tmp_path']), fn))\n return img_url", "title": "" }, { "docid": "30347a825c4f28fc4f530a704ccd8391", "score": "0.54882044", "text": "def get_image(n):\n def _get_comic_page(n):\n return open_page('http://www.giantitp.com/comics/oots%04d.html' %n)\n\n def _get_img_url(page):\n return 'http://www.giantitp.com' + _img_url_regex.search(page).group(1)\n\n img_url = _get_img_url(_get_comic_page(n))\n log.info('Downloading \"%s\"', img_url)\n img = open_page(img_url)\n log.info('Download finished')\n return img, os.path.splitext(img_url)[1]", "title": "" }, { "docid": "50837ea400ed99268887a60e48b1e4aa", "score": "0.5485925", "text": "def get_meta_image(self):\n\n if self.podcast_image:\n return self.podcast_image\n else:\n return None", "title": "" }, { "docid": "d437f940829491ae413b614ba75dcf45", "score": "0.5481268", "text": "def get_avatar(cls, Uid):\n return cls._get(Uid).avatar", "title": "" } ]
25f31fa66315ff122abe3ecdb28aea6b
Sets the keyguard_disabled of this EnterprisePolicyData. Should KeyGuard be disabled?
[ { "docid": "16f2d6be8939f474fc51d4968abbf9c9", "score": "0.8649095", "text": "def keyguard_disabled(self, keyguard_disabled):\n\n self._keyguard_disabled = keyguard_disabled", "title": "" } ]
[ { "docid": "01e0ff0e55526b4b4f5e9f5c55b1e085", "score": "0.7613828", "text": "def keyguard_disabled(self):\n return self._keyguard_disabled", "title": "" }, { "docid": "a26b8f8bf253dc943f1427bc1a1603c8", "score": "0.59514517", "text": "def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.disable\", {})", "title": "" }, { "docid": "d6d472ab2771e5afe0b1d7e467650d0b", "score": "0.58652246", "text": "def safe_boot_disabled(self, safe_boot_disabled):\n\n self._safe_boot_disabled = safe_boot_disabled", "title": "" }, { "docid": "d8a49b064670c11e1de772bdca1fd3eb", "score": "0.5607342", "text": "def _disable_encryption(self):\n # () -> None\n self.encrypt = self._disabled_encrypt\n self.decrypt = self._disabled_decrypt", "title": "" }, { "docid": "430e3aaaa032e1c824653b2effc8883e", "score": "0.56056374", "text": "def testSetDisabled(self):\n self.mgr.setGoProEnabled(False)\n self.assertFalse(self.mgr.enabled)\n self.mockWrite.assert_called_with(\"GoProEnabled\", \"0\")\n self.mgr.setGimbalEnabledParam.assert_called_with()", "title": "" }, { "docid": "347f99ce17cc7611a646e5f0616c0be1", "score": "0.56044734", "text": "def disable(self):\n self.enabled = False\n self.__store(self)", "title": "" }, { "docid": "2a7f0e22790c6ed87f9655b7d80fdd21", "score": "0.5597612", "text": "def _disable(self):\n self.enabled = False", "title": "" }, { "docid": "fb787e949ba0d83d48b473adc38c3f15", "score": "0.55836296", "text": "def set_disabled(self, val):\n self._disabled = val", "title": "" }, { "docid": "fb787e949ba0d83d48b473adc38c3f15", "score": "0.55836296", "text": "def set_disabled(self, val):\n self._disabled = val", "title": "" }, { "docid": "f8c0f02b1283df6fd7561ad5b883f2b3", "score": "0.5575458", "text": "def set_smart_guard_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetSmartGuardEnabled', self.handle, bEnabled)", "title": "" }, { "docid": "275e0a3d139a11e61643739cadb3c8f5", "score": "0.55159926", "text": "def disabled(self, disabled):\n self._disabled = disabled", "title": "" }, { "docid": "a0e700847c9392a64855fe8502cf25f6", "score": "0.5496816", "text": "def test_disabled(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'disabled'\n key.audit(60, 80, 30, 20)\n assert key.audit_state == 'disabled'", "title": "" }, { "docid": "fead498412a7af8c638a1cce32bf9765", "score": "0.5485759", "text": "async def disable_protection(self) -> None:\n await self._request(\n \"dns_config\", method=\"POST\", json_data={\"protection_enabled\": False},\n )", "title": "" }, { "docid": "5583e44d5eb7ed322eb72126262ddfa4", "score": "0.5482145", "text": "def adb_disabled(self, adb_disabled):\n\n self._adb_disabled = adb_disabled", "title": "" }, { "docid": "b9f35bf77790f3cd526b14beee57f87a", "score": "0.544973", "text": "def google_assistant_disabled(self, google_assistant_disabled):\n\n self._google_assistant_disabled = google_assistant_disabled", "title": "" }, { "docid": "65a3357349c88aad4d9399265e782908", "score": "0.54316086", "text": "def disabled(self, disabled):\n\n self._disabled = disabled", "title": "" }, { "docid": "fb363f043443cc4ce8e89413a561ae9f", "score": "0.5424421", "text": "def disabled(self, disabled: bool):\n if disabled is None:\n raise ValueError(\"Invalid value for `disabled`, must not be `None`\")\n\n self._disabled = disabled", "title": "" }, { "docid": "8cf6ee3be31e230bdf62b786deb2ca82", "score": "0.5368662", "text": "def disable(self):\n self._enabled = False", "title": "" }, { "docid": "69a125c32ddc1f1c03bea80982631b25", "score": "0.5316436", "text": "def disable(self):\n self.enabled = False", "title": "" }, { "docid": "dfc55310c3e2618343478d27713ab09d", "score": "0.5311405", "text": "def deny_encryption_scope_override(self) -> Optional[bool]:\n return pulumi.get(self, \"deny_encryption_scope_override\")", "title": "" }, { "docid": "34c69ab5772e1db7bbe630cc0080bf2d", "score": "0.53081685", "text": "async def disable(self) -> None:\n try:\n await self.adguard.request(\"parental/disable\", method=\"POST\")\n except AdGuardHomeError as exception:\n raise AdGuardHomeError(\n \"Disabling AdGuard Home parental control failed\"\n ) from exception", "title": "" }, { "docid": "d83d0dba8e0ebe29816477d0b3646ae7", "score": "0.5305532", "text": "def disable_forging():\n if not __pillar__.get('secret'):\n\n return \"No secret set in pillar data\"\n\n else:\n\n secret = __pillar__.get('secret').strip()\n payload = {'secret': secret}\n\n return _get_api().delegates('disable_forging',\n payload)", "title": "" }, { "docid": "f99da02031245a4ce785850d0a5dacfe", "score": "0.5288372", "text": "def set_protection_enabled(self, c, state):\n self.enable_protection = state", "title": "" }, { "docid": "ea905a001a07e42905956253e893e6d4", "score": "0.5285271", "text": "def set_disabled_switch(self, disabled):\n self.disabled = disabled", "title": "" }, { "docid": "a1291968b5bb2d709917f362e0134141", "score": "0.5284216", "text": "def can_be_disabled(self) -> bool:\n return True", "title": "" }, { "docid": "d24c096a320562770ed02a91d2d8141f", "score": "0.52620476", "text": "def testSetDisabled(self):\n self.mgr.enabled = False\n self.mgr.setGimbalEnabledParam()\n self.mgr.shotMgr.vehicle.message_factory.param_set_encode.assert_called_with(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL, # target system, target component\n \"GMB_GP_CTRL\", 0.0, mavutil.mavlink.MAV_PARAM_TYPE_REAL32 )", "title": "" }, { "docid": "c4e044f2524bf9b497b1d59dace50287", "score": "0.5260804", "text": "def disable(self):\n raise NotImplementedError", "title": "" }, { "docid": "50d622b71ea16e8046df71eb4c8f0d89", "score": "0.52356607", "text": "def disabled_not(self, disabled_not):\n\n self._disabled_not = disabled_not", "title": "" }, { "docid": "67db8be0f09c1d36f412577748909f6b", "score": "0.5230787", "text": "def __disable__(self) -> None:\n pass", "title": "" }, { "docid": "2642646f91643396b6e38277ce30f779", "score": "0.52238864", "text": "def disable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/disable\")\n\t\treturn bool(response.json[\"success\"])", "title": "" }, { "docid": "486eb2ad05e9c6fbc43358960ec7817d", "score": "0.52210206", "text": "def disable_bprot(self):\n result = self._lib.NRFJPROG_disable_bprot()\n if result != NrfjprogdllErr.SUCCESS:\n raise APIError(result)", "title": "" }, { "docid": "66fada567faf60ee8dda25b8658b8b89", "score": "0.51893836", "text": "def _disable(self):\n self.debug_log(\"Disabling...\")\n self._unregister_handlers()", "title": "" }, { "docid": "f7067da9af0bc8bcec32274c79ca9389", "score": "0.5182784", "text": "def disable(self):\n self.error_code = 'DISABLED'\n self.running = False", "title": "" }, { "docid": "52fa6bf4a3efa7640dd35ba2e4124e58", "score": "0.5166869", "text": "def get_disable_secret_rotation(self) -> bool:\n return self._get_disable_secret_rotation(enable_validation=True)", "title": "" }, { "docid": "c74a052dc481f2fc5e7ea9543fa89a32", "score": "0.5146575", "text": "def disableEditing(self, disable):\n self.disabled = disable", "title": "" }, { "docid": "2b4dbeb8677021c1911b76e03e14e72c", "score": "0.51400113", "text": "def disable(self) -> None:", "title": "" }, { "docid": "4276c163cf37c65075ede8d41895326d", "score": "0.51233983", "text": "def disable(self):\n pass", "title": "" }, { "docid": "bbacef47b782cd9cf645b2771226bdb6", "score": "0.51165205", "text": "def test_no_disable(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'false')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'expire'", "title": "" }, { "docid": "976235fee7d9c7da007d55eeb3825527", "score": "0.5093382", "text": "def disable_play_store(self, disable_play_store):\n\n self._disable_play_store = disable_play_store", "title": "" }, { "docid": "9505ad6093f8255e15619e5f4cfcabb8", "score": "0.5088052", "text": "def disable(self):\n for val in data:\n val.disable()\n self.enabled = False", "title": "" }, { "docid": "746357a9f82f2f5dda093d2c10cf2cb2", "score": "0.5087968", "text": "def disable(self):\n return self.enable(False)", "title": "" }, { "docid": "7f0a5d83f3115fc8cce5bea4c1c6c8a1", "score": "0.50583017", "text": "def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Database.disable\", {})", "title": "" }, { "docid": "bca5f176f8e488838e145cd7c5ebd87c", "score": "0.50551337", "text": "def disable(self):\n super().disable()", "title": "" }, { "docid": "23934ec8a8fa53121d045d79ee8a8a19", "score": "0.5054608", "text": "def is_protected(cls, key, **kwargs):\n setting = cls.get_setting_definition(key, **kwargs)\n\n return setting.get('protected', False)", "title": "" }, { "docid": "f51a750778ca50030ceaafd1357bd6ef", "score": "0.5036245", "text": "def setBandwidthSaveModeDisabled(self, isDisabled):\n _ExceptionUtil.raiseOnError(\n internals.blpapi_SessionOptions_setBandwidthSaveModeDisabled(\n self.__handle,\n isDisabled))", "title": "" }, { "docid": "59ca1e3a37519271f6bf55826be8e29c", "score": "0.5029686", "text": "def screenshot_disabled(self, screenshot_disabled):\n\n self._screenshot_disabled = screenshot_disabled", "title": "" }, { "docid": "4baff9ba8852bc0654ebb031333bd120", "score": "0.5023333", "text": "def disable(self):", "title": "" }, { "docid": "8506c409f19ed6710500f4e131686e4c", "score": "0.50136805", "text": "def get_disable_vpa(self) -> bool:\n return self._get_disable_vpa(enable_validation=True)", "title": "" }, { "docid": "1d957d578f3acfb6a8afdbad5a78c365", "score": "0.50029874", "text": "def disable_emails(self, disable_emails):\n\n self._disable_emails = disable_emails", "title": "" }, { "docid": "2735e7fff0da186aaa567658a77ba4da", "score": "0.49957153", "text": "async def disable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(False)", "title": "" }, { "docid": "0f0c6550735ef27d8fb2078bd50dd6ae", "score": "0.49897465", "text": "def disable():\n boutonPierre[\"state\"] = \"disabled\"\n boutonFeuille[\"state\"] = \"disabled\"\n boutonCiseaux[\"state\"] = \"disabled\"", "title": "" }, { "docid": "dec6cafd557f4b4e4c61cca0c1218d61", "score": "0.49883357", "text": "def disable_pki(client, mount_point=\"pki\"):\n client.sys.disable_secrets_engine(mount_point)", "title": "" }, { "docid": "4bee12ff964f061d5519aab4ed0feb9a", "score": "0.4955769", "text": "async def enable_protection(self) -> None:\n await self._request(\n \"dns_config\", method=\"POST\", json_data={\"protection_enabled\": True},\n )", "title": "" }, { "docid": "72befa0fb2ce78127e01a6684687f583", "score": "0.49486887", "text": "def tethering_disabled(self, tethering_disabled):\n\n self._tethering_disabled = tethering_disabled", "title": "" }, { "docid": "147f262e970071dde9ac266b46e7a9fd", "score": "0.4946974", "text": "def purge_protection_enabled(self) -> bool:\n return pulumi.get(self, \"purge_protection_enabled\")", "title": "" }, { "docid": "7a122547762f1008d31e6d3962d957d0", "score": "0.4923745", "text": "def disable(self):\r\n self.update(enabled=False)", "title": "" }, { "docid": "3145daf534a1afb40d12edbb43877783", "score": "0.49220687", "text": "def fill_disable_ssl_verification(self, data):\n disable_ssl_verification = get_optional_value(data, self.DISABLE_SSL, False)\n self.verify_ssl = not bool(disable_ssl_verification)", "title": "" }, { "docid": "8ab9a9cb3104feb36e795ca882f35da4", "score": "0.49192578", "text": "def disabled(self) -> bool:\n return pulumi.get(self, \"disabled\")", "title": "" }, { "docid": "8ab9a9cb3104feb36e795ca882f35da4", "score": "0.49192578", "text": "def disabled(self) -> bool:\n return pulumi.get(self, \"disabled\")", "title": "" }, { "docid": "dd938fdf672084036d3c0f53456890b0", "score": "0.49146986", "text": "def disabledPeriodic(self):\n self.putData()", "title": "" }, { "docid": "7bd05ad11f7688eb99a6ad45c43e67ce", "score": "0.49121195", "text": "def emails_disabled(self, emails_disabled):\n\n self._emails_disabled = emails_disabled", "title": "" }, { "docid": "0565938c9287d482557031af62064b1e", "score": "0.48793617", "text": "def disabled(self) -> bool:\n return self._disabled", "title": "" }, { "docid": "b7a2f6adcff079e8b3c7fa2d12b56501", "score": "0.4855858", "text": "def set_data_protected(self): \n pass", "title": "" }, { "docid": "e531e143e7f96ee80e7090766dbacc95", "score": "0.48465225", "text": "def is_Disable_allowed(self):\n handler = self.get_command_object(\"Disable\")\n return handler.check_allowed()", "title": "" }, { "docid": "9abb23b8e1179c6199921778a2b8e22b", "score": "0.48407775", "text": "def on_disable(self) -> None:\n self._cancel_automation()", "title": "" }, { "docid": "1396181391222504b0a496f645f83a73", "score": "0.48273307", "text": "def safe_boot_disabled(self):\n return self._safe_boot_disabled", "title": "" }, { "docid": "ad4984a50230a15b37bc7d1e544f7248", "score": "0.4819977", "text": "def disable_detector(self):\n detector_id = self.list_detector()\n if detector_id:\n try:\n response = self.client.update_detector(DetectorId=detector_id, Enable=False)\n print(detector_id, 'has been disabled')\n return True\n except ClientError as e:\n print(e.response['Error']['Code'])\n return False\n else:\n print('no detector has been found.')\n return False", "title": "" }, { "docid": "d771c4a21781e51a18bb9d5aacfaec47", "score": "0.48195073", "text": "def disable(self):\n logging.debug(\"Disabling switch %s\" % self.name)\n self.disabled = True", "title": "" }, { "docid": "0a09f8646460ecab9c89f00f87559fb4", "score": "0.48094666", "text": "def nfc_beam_disabled(self, nfc_beam_disabled):\n\n self._nfc_beam_disabled = nfc_beam_disabled", "title": "" }, { "docid": "86ef4b249edd530bebd704a8965dcb1e", "score": "0.48056024", "text": "def disable(self):\n disable_request = self._commcell_object._services['DISABLE_SCHEDULE']\n\n request_text = \"taskId={0}\".format(self.schedule_policy_id)\n\n flag, response = self._commcell_object._cvpysdk_object.make_request(\n 'POST', disable_request, request_text)\n\n if flag:\n if response.json():\n error_code = str(response.json()['errorCode'])\n\n if error_code == \"0\":\n return\n else:\n error_message = 'Failed to disable Schedule Policy'\n\n if 'errorMessage' in response.json():\n error_message = \"{0}\\nError: {1}\".format(error_message, response.json()['errorMessage'])\n\n raise SDKException('Schedules', '102', error_message)\n\n else:\n raise SDKException('Response', '102')\n\n response_string = self._commcell_object._update_response_(\n response.text)\n raise SDKException('Response', '101', response_string)", "title": "" }, { "docid": "c4197f525eeecf956aa8c7bce7534859", "score": "0.4798153", "text": "def disable_weapon(self, weapon):\n if weapon == \"nothing\":\n weapon = 0\n elif weapon == \"main\":\n weapon = 1\n elif weapon == \"secondary\":\n weapon = 2\n elif weapon == \"everything\":\n weapon = 3\n cmd = '{}testDisableWeaponMode {}'.format(self.console, weapon)\n self.write_command(cmd)", "title": "" }, { "docid": "9a2a71505c05ced2976613231f0c1aba", "score": "0.4797927", "text": "def disallowed_vehicles(self, disallowed_vehicles):\n\n self._disallowed_vehicles = disallowed_vehicles", "title": "" }, { "docid": "9a2a71505c05ced2976613231f0c1aba", "score": "0.4797927", "text": "def disallowed_vehicles(self, disallowed_vehicles):\n\n self._disallowed_vehicles = disallowed_vehicles", "title": "" }, { "docid": "7daaee9fb7c70c470f75fcecc891af27", "score": "0.47928694", "text": "def on_disable(self) -> None:\n self._on_stop_cycle({})", "title": "" }, { "docid": "a32c7d7d9791eea7ad41a88735eed2e0", "score": "0.47855258", "text": "def DisableByRunIf(self):\n self.run_if = 'False'", "title": "" }, { "docid": "c9c79b765477e4b63a6c7900856f20e0", "score": "0.47838792", "text": "def set_exclusive(self, exclusive):\n self.widget.setExclusive(exclusive)", "title": "" }, { "docid": "a6af6b15d38f1251d5b17da86322573b", "score": "0.4781513", "text": "def setDisableWithLayer( self, state ):\n self._disableWithLayer = state\n self.setDirty()", "title": "" }, { "docid": "7eb684fea48d21a4c5d6eba9cc61ae78", "score": "0.47804755", "text": "def disable(self, subsystem=False):\n self.__dict__[\"enabled\"] = False\n\n if subsystem:\n self.subsystem.disable()", "title": "" }, { "docid": "df3b2b4b03a9eb7d7d9fcaae194d7db9", "score": "0.47713342", "text": "def disable(self):\n self.colour_combo.config(state=tk.DISABLED)\n self.game_name_entry.config(state=tk.DISABLED)\n self.num_tickets_entry.config(state=tk.DISABLED)", "title": "" }, { "docid": "2887940897dec508dc9f5acb24dacb71", "score": "0.47687528", "text": "def is_smart_guard_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsSmartGuardEnabled', self.handle))", "title": "" }, { "docid": "1017e735cda2e0ebc6b0475edfde2f53", "score": "0.4765396", "text": "def is_locking_disabled(self):\n return getattr(self, 'disable_locking', False)", "title": "" }, { "docid": "407b17fc7b03def3e99928a1c6ccfbce", "score": "0.47604686", "text": "def disabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"disabled\")", "title": "" }, { "docid": "0e7b099183b5ffccb5f4c15840821d6b", "score": "0.4758564", "text": "def google_assistant_disabled(self):\n return self._google_assistant_disabled", "title": "" }, { "docid": "1193524b0a288646ba49e8d80fd8fd40", "score": "0.47563398", "text": "def disable_cmd_restricted(self, cls):\n whitelist = self.get_availables_cmd(cls)\n if not whitelist:\n return True\n acessmethods = AcessMethods(cls, whitelist)\n setattr(self.cls, \"__getattribute__\", acessmethods.disabled_method)", "title": "" }, { "docid": "40b02878bd203fc566172104e114db5d", "score": "0.4755852", "text": "def deletion_protection_enabled(self) -> bool:\n return pulumi.get(self, \"deletion_protection_enabled\")", "title": "" }, { "docid": "d27416e5c54e2416dd2c8efb88f2a50c", "score": "0.47406423", "text": "def disable(self, modname):\n if self.cfg.blacklist and modname not in self.cfg.blacklist: self.cfg.blacklist.append(modname)\n if self.cfg.loadlist and modname in self.cfg.loadlist: self.cfg.loadlist.remove(modname)\n self.cfg.save()", "title": "" }, { "docid": "20a0c40145ba0c87089127863241d941", "score": "0.47332096", "text": "def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")", "title": "" }, { "docid": "20a0c40145ba0c87089127863241d941", "score": "0.47332096", "text": "def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")", "title": "" }, { "docid": "00dc750f8162fe33d01ab530e456a986", "score": "0.47329345", "text": "def disable_everything(self):\n zhinst.utils.disable_everything(self.daq, self.device_id)\n self.log.info(\"Disabled everything.\")", "title": "" }, { "docid": "8781476300d04ff883e6b6dcfe7eba93", "score": "0.47299847", "text": "def auth_state_unavailable(auth_state_enabled):\n crypto.CryptKeeper.instance().keys = []\n yield", "title": "" }, { "docid": "3188fdedaf08ef5657dc9a61ef4113d5", "score": "0.4725148", "text": "def adb_disabled(self):\n return self._adb_disabled", "title": "" }, { "docid": "5e11ddb54e22a64f51e4d7566b0f169b", "score": "0.4724051", "text": "def get_disabled(self):\n return self._disabled", "title": "" }, { "docid": "5e11ddb54e22a64f51e4d7566b0f169b", "score": "0.4724051", "text": "def get_disabled(self):\n return self._disabled", "title": "" }, { "docid": "cc448de9551f5a3965a6398e4ab8a538", "score": "0.4712655", "text": "def disable_play_store(self):\n return self._disable_play_store", "title": "" }, { "docid": "df7760920d7efcbcdd15d032ffab96d8", "score": "0.47121733", "text": "def disableDestruction(self):\n self.destructable = False", "title": "" }, { "docid": "8767dd8454d843c24763d45a7e0dde6f", "score": "0.47117078", "text": "def is_vuln_mode_disabled(self):\n # Set this value if you want the vuln data to be collected in the S3 file.\n return os.environ.get('DISABLE_VULN_MODE', 'false').lower() in ('1', 'yes', 'true')", "title": "" }, { "docid": "6094cd8545ebd5b4aca0ba2acd9e7a19", "score": "0.4709241", "text": "def disable_date(self, disable_date):\n\n self._disable_date = disable_date", "title": "" }, { "docid": "13fb89c52fd5032e65c13397aa5cfac0", "score": "0.4704631", "text": "def __setattr__(self, key: str, value: Any):\n if key == 'is_verified' and value is False and self.is_primary is True:\n raise PrimaryElementViolation(\"Can't remove verified status of primary element\")\n\n super().__setattr__(key, value)", "title": "" }, { "docid": "42b3cd1e89f6334dd3e20a208aa373b3", "score": "0.47039595", "text": "def disable_setup(self):\n self.high_ver_entry.config(state=\"disabled\")\n self.low_ver_entry.config(state=\"disabled\")\n self.left_hor_entry.config(state=\"disabled\")\n self.right_hor_entry.config(state=\"disabled\")", "title": "" }, { "docid": "e563960723019e46447f1ae61e5c5836", "score": "0.4699275", "text": "def allow_privilege_escalation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_privilege_escalation\")", "title": "" } ]
eb71efa1a2962d64023d5d4afc01b0fa
Returns if there is any word in the trie that starts with the given prefix.
[ { "docid": "c41d511488f18931b5996f998378ce6c", "score": "0.7661376", "text": "def startsWith(self, prefix: str) -> bool:\n node = self.root\n for char in prefix:\n if char not in node.children:\n return False\n node = node.children[char]\n\n return True", "title": "" } ]
[ { "docid": "5d1a552ce96f28a17ff02733ba0212b7", "score": "0.84394646", "text": "def startsWith(self, prefix: str) -> bool: \n return(self.searchTrie(self.root, prefix, True))", "title": "" }, { "docid": "0a82b07344eb20900f183e4a08b3d6a7", "score": "0.843329", "text": "def startsWith(self, prefix: str) -> bool:\n t = self.trie\n i = 0\n failed = False\n while i < len(prefix) and not failed:\n failed = prefix[i] not in t\n if not failed:\n t = t[prefix[i]]\n i += 1 \n return not failed", "title": "" }, { "docid": "dadcffb3bf482974c68594b81af4c0b6", "score": "0.8428586", "text": "def startsWith(self, prefix):\n temp = self.trie\n for w in prefix:\n if w in temp:\n temp = temp[w]\n else:\n return False\n return True", "title": "" }, { "docid": "c90848e750e488f323036b7590bb83e9", "score": "0.84256893", "text": "def startsWith(self, prefix: str) -> bool:\n node = self.trie\n for w in prefix:\n if w not in node:\n return False\n node = node[w]\n \n return True", "title": "" }, { "docid": "30e50a38562c29bfa379745288d5cb9a", "score": "0.84208626", "text": "def startsWith(self, prefix: str) -> bool:\n root = self.trie\n for ch in prefix:\n if ch not in root:\n return False\n root = root[ch]\n return True", "title": "" }, { "docid": "ed23fc2f0a62b646333995cb7b15cef6", "score": "0.8417957", "text": "def startsWith(self, prefix):\n node = self.search_trie(prefix, 0)\n return bool(node)", "title": "" }, { "docid": "ff7f6a4d1565386de512cd8d4b67eb07", "score": "0.8405622", "text": "def startsWith(self, prefix):\n\n node = self.trie\n for c in prefix:\n node = node.get(c, None)\n if node is None:\n return False\n return True", "title": "" }, { "docid": "1510a574d511019d15e307f7e9429bff", "score": "0.8395297", "text": "def startsWith(self, prefix: str) -> bool:\n trie = self.trie\n for char in prefix:\n if char not in trie:\n return False\n trie = trie[char]\n return True", "title": "" }, { "docid": "9c2ffd8488ee039e0428ce464d494ff4", "score": "0.82884425", "text": "def startsWith(self, prefix):\n tmp = self.trie\n for charr in prefix:\n if charr not in tmp:\n return False\n tmp = tmp[charr]\n\n return True", "title": "" }, { "docid": "f221b048f2af9c2c1522a42b21ac0a5a", "score": "0.82635236", "text": "def startsWith(self, prefix: str) -> bool:\n curr = self.trie\n for c in prefix: \n if c not in curr.keys():\n return False\n curr = curr[c]\n return True", "title": "" }, { "docid": "fc622229ffd3dae60b19336b63686e4a", "score": "0.82159823", "text": "def startsWith(self, prefix):\n try:\n root = self\n for ch in prefix:\n root = root.trie[ch]\n except KeyError as e:\n return False\n\n return True", "title": "" }, { "docid": "7332aa7edfd326e7fb3fa98b2dfdf5a3", "score": "0.8149632", "text": "def startsWith(self, prefix):\n node = self.tries.get(prefix[0])\n if not node: return False\n for s in prefix[1:]:\n if s not in node.children:\n return False\n node = node.children[s]\n return True\n\n\n\n\n # Your Trie object will be instantiated and called as such:\n # obj = Trie()\n # obj.insert(word)\n # param_2 = obj.search(word)\n # param_3 = obj.startsWith(prefix)", "title": "" }, { "docid": "718570ddb9cc8c23ca8fa37cc243680b", "score": "0.81220734", "text": "def startsWith(self, prefix: str) -> bool:\n return True in [word.startswith(prefix) for word in self.__words]", "title": "" }, { "docid": "fd545dbab21fa068abc9e50b31298770", "score": "0.80960995", "text": "def starts_with(self, prefix: str) -> bool:\n current_trie = self.head\n for index in range(1, len(prefix) + 1):\n node_index = ord(prefix[:index][-1]) - ord('a')\n if current_trie.child_nodes[node_index]:\n current_trie = current_trie.child_nodes[node_index]\n else:\n return False\n return True", "title": "" }, { "docid": "5867ef803dc06a6f5ef5af4cc6b1b588", "score": "0.8024399", "text": "def startsWith(self, prefix: str) -> bool:\n curr = self.root\n\n for char in prefix:\n loc = Trie.child_loc(char)\n\n if not curr.children[loc]:\n return False\n\n curr = curr.children[loc]\n\n return True", "title": "" }, { "docid": "b03c9bdc827b93a64515a669b111e77a", "score": "0.79254913", "text": "def startsWith(self, prefix):\n node = self.root\n for i in xrange(0, len(prefix)):\n node = node.search(i, prefix)\n if not node:\n return False\n return True", "title": "" }, { "docid": "43415b0f787c71ba77188c1c34aee099", "score": "0.79068255", "text": "def startsWith(self, prefix):\n if len(prefix) == 0: return True\n found = self.__search__(prefix)\n return found and len(found) > 0", "title": "" }, { "docid": "e64ff64b8765bfc2cca48ecdb32db920", "score": "0.7892173", "text": "def startsWith(self, prefix):\n node = self.root\n for char in prefix:\n if char not in node:\n return False\n node = node[char]\n return True", "title": "" }, { "docid": "d269c1f2f1580f50501267553b4ef0a9", "score": "0.7889207", "text": "def startsWith(self, prefix):\n curr = self._root\n for idx, char in enumerate(prefix):\n if char not in curr['children']:\n return False\n curr = curr['children'][char]\n if idx == len(prefix) - 1:\n return True", "title": "" }, { "docid": "95f36c358954b75cb3640ab12b19fc51", "score": "0.78871316", "text": "def startsWith(self, prefix: str) -> bool:\n p = self.root\n for i in prefix:\n if p.next[ord(i) - ord('a')] == None:\n return False\n p = p.next[ord(i) - ord('a')]\n \n return True", "title": "" }, { "docid": "69530257be2eb7369d7a2a8e639db9b9", "score": "0.78862256", "text": "def startsWith(self, prefix: str) -> bool:\n s = self.root\n for char in prefix:\n if char not in s:\n return False\n else:\n s = s[char]\n return True", "title": "" }, { "docid": "fa82b83f55c0f99c248cb350d8290641", "score": "0.78860176", "text": "def startsWith(self, prefix: str) -> bool:\n node = self.root\n for char in prefix:\n if char not in node:\n return False\n node = node[char]\n return True", "title": "" }, { "docid": "e335f47ece9bd378ca40acd60b0a7453", "score": "0.7869209", "text": "def startsWith(self, prefix: str) -> bool:\r\n node = self.root\r\n for char in prefix:\r\n if char not in node:\r\n return False\r\n node = node[char]\r\n return True", "title": "" }, { "docid": "6737c0f6384bf7ef8ccf0234f9a0272f", "score": "0.7864041", "text": "def trie_prefix(prefix, root):\n\n if not isinstance(root, TrieNode):\n raise TypeError(\"Argument `root` is not a TrieNode object\")\n\n walk = root\n\n for c in prefix:\n if c in walk.children:\n walk = walk.children[c]\n else:\n return False\n\n return walk", "title": "" }, { "docid": "28b1b9668580b16e4137aa79a422c627", "score": "0.7863121", "text": "def startswith(self, prefix: str) -> bool:\n x = self._search(self.root, prefix, 0)\n if x is None:\n return False\n return True", "title": "" }, { "docid": "48d23bc40f23bb0f525212d4ac86bb23", "score": "0.78552794", "text": "def startsWith(self, prefix: str) -> bool:\n node = self.tree\n for a in prefix:\n if a in node.keys():\n node = node[a]\n else:\n return False\n return True", "title": "" }, { "docid": "c97c3a7f8a56bfd8f349e0ed65246620", "score": "0.7827465", "text": "def startsWith(self, prefix: str) -> bool:\n node = self.root\n for ch in prefix:\n if ch not in node:\n return False\n node = node[ch]\n return True", "title": "" }, { "docid": "34809accce03ba726af77abc8ec1e1ea", "score": "0.78244233", "text": "def startsWith(self, prefix):\n cur = self.root\n for x in prefix:\n x_index = ord(x) - ord('a')\n if cur.next[x_index] == None:\n return False\n cur = cur.next[x_index]\n return True", "title": "" }, { "docid": "a1e1abd580aa469604bc22b2ede2b31b", "score": "0.7808699", "text": "def startsWith(self, prefix: str) -> bool:\n pcrawl = self.root\n length = len(prefix)\n for level in range(length):\n index = self.chartoindex(prefix[level])\n if pcrawl.children[index]", "title": "" }, { "docid": "ac15351bc6ae1b50f5771b2e83869251", "score": "0.77895415", "text": "def startsWith(self, prefix):\n now = self.root\n for c in prefix:\n idx = ord(c) - ord('a')\n if now.next[idx]:\n now = now.next[idx]\n else:\n return False\n return True", "title": "" }, { "docid": "99156a0e7a2879f2c0d427cd0dd57bae", "score": "0.7788128", "text": "def startsWith(self, prefix: str) -> bool:\n current = self.root\n for w in prefix:\n current = current.children.get(w)\n if current == None:\n return False\n return True", "title": "" }, { "docid": "23222ea917f93e3a814f4a08f54273c3", "score": "0.7783289", "text": "def startsWith(self, prefix):\r\n temp = self.root\r\n for c in prefix:\r\n index = ord(c) - ord('a')\r\n if not temp.next[index]:\r\n return False\r\n temp = temp.next[index]\r\n return True", "title": "" }, { "docid": "f744e7f66bf2fc642ad80fe9fde16481", "score": "0.7776757", "text": "def startsWith(self, prefix: str) -> bool:\n return self.root.search(prefix, prefix=True)", "title": "" }, { "docid": "1f9d1094ed034936ea328403737d8f30", "score": "0.77750313", "text": "def startsWith(self, prefix: str) -> bool:\n node = self.root\n\n for i in prefix:\n\n if node.children[ord(i)-ord('a')] == None:\n return False\n node = node.children[ord(i)-ord('a')]\n\n return True", "title": "" }, { "docid": "07f1f2eb7cadea8ee03928fbd336b630", "score": "0.7770272", "text": "def startsWith(self, prefix: str) -> bool:\n node = self.searchPrefix(prefix)\n return node is not None", "title": "" }, { "docid": "98a89cd0ad35dde1440281cd4455f0ad", "score": "0.77297807", "text": "def startsWith(self, prefix: str) -> bool:\n curr = self.root\n \n for ch in prefix:\n \n if ch not in curr:\n return False\n \n curr = curr[ch]\n \n return True", "title": "" }, { "docid": "17359f1b661e9b481dae9e8a16e847a4", "score": "0.7722284", "text": "def startsWith(self, prefix):\n node = self.root\n for char in prefix:\n if char not in node.children:\n return False\n node = node.children[char]\n return True", "title": "" }, { "docid": "38f0625d42d949f8d9bfcf56d633aa23", "score": "0.77136356", "text": "def startsWith(self, prefix: str) -> bool:\n node = self.root\n for w in prefix:\n if not node.children[w]:\n return False\n node = node.children[w]\n return True", "title": "" }, { "docid": "2c1145793bc3afb816ebd2a8c3caaf4b", "score": "0.77062017", "text": "def startsWith(self, prefix):\r\n currNode = self.root\r\n for i in range(len(prefix)):\r\n char = prefix[i]\r\n if char not in currNode.children:\r\n return False\r\n currNode = currNode.children[char]\r\n return True", "title": "" }, { "docid": "c8f7e6016a7d7473fef0a9703a64e295", "score": "0.7701523", "text": "def startsWith(self, prefix: str) -> bool:\n curr = self.root\n for c in prefix:\n if c not in curr.next:\n return False\n curr = curr.next[c]\n return True", "title": "" }, { "docid": "39bdbe12a566613013c78aa916114ac1", "score": "0.767962", "text": "def startsWith(self, prefix):\n ptr = self.root\n for ch in prefix:\n if ch not in ptr['neighbors']:\n return False\n ptr = ptr['neighbors'][ch]\n \n return True", "title": "" }, { "docid": "59cdab4afe4794e58986b9366cab42c3", "score": "0.7674477", "text": "def contains(self, word, prefix=False):\n cur_node = self\n for letter in word:\n if letter in cur_node.children:\n cur_node = cur_node.children[letter]\n else:\n return False\n\n # If the current node doesn't end a word, return False unless prefix==True\n return cur_node.word_end or prefix", "title": "" }, { "docid": "39eda3b44bfea4df3832cf0a2083be7c", "score": "0.7665837", "text": "def starts_with(self, prefix):\n node = self.root\n for c in prefix:\n if c not in node.sons:\n return False\n node = node.sons[c]\n return True", "title": "" }, { "docid": "5478dede2a5a35c33279787b84e5541c", "score": "0.7652525", "text": "def startsWith(self, prefix: str) -> bool:\n i = 0\n node = self.root\n while(i < len(prefix)):\n if prefix[i] in node.children:\n node = node.children[prefix[i]]\n else:\n return False\n i += 1\n return True", "title": "" }, { "docid": "499a0956ea552268fd80805f3f8d353c", "score": "0.76447237", "text": "def starts_with(self, prefix):\n current = self.root\n for character in prefix:\n index = self.char_to_index(character)\n if not current.children[index]:\n return False\n current = current.children[index]\n return True", "title": "" }, { "docid": "83286bc712ccdde74258048d9038bb74", "score": "0.76354855", "text": "def startsWith(self, prefix):\r\n poz = self.root\r\n length = len(prefix)\r\n for level in range(length):\r\n i = self._charToIndex(prefix[level])\r\n if not poz.children[i]:\r\n return False\r\n poz = poz.children[i]\r\n \r\n return True", "title": "" }, { "docid": "72779097e49c00c54e86641eb34222a8", "score": "0.762668", "text": "def startsWith(self, prefix):\n tmp = self._dict\n for c in prefix:\n if not c in tmp:\n return False\n tmp = tmp[c]\n return True", "title": "" }, { "docid": "4bc9104565954347fa5033ddcf2ee33d", "score": "0.7621423", "text": "def prefix_trie_matching(text: str, trie: dict) -> bool:\n node = trie[0]\n for i in range(len(text)):\n symbol = text[i]\n if symbol in node:\n node = node[symbol]\n else:\n break\n if len(node) == 0:\n return True\n else:\n return False", "title": "" }, { "docid": "269d99d4a8ed987e47df281e428235c4", "score": "0.7617441", "text": "def startsWith(self, prefix: str) -> bool:\n node = self\n charList = [char for char in prefix]\n for char in charList:\n if char in node.childList:\n index = node.childList.index(char)\n node = node.children[index]\n else:\n return False\n return True", "title": "" }, { "docid": "416945cc1bd317e495153f847d001c23", "score": "0.7612122", "text": "def startsWith(self, prefix: str) -> bool:\n\n p = self.root\n for i in prefix:\n p = p.children.get(i)\n if p == None:\n return False\n return True", "title": "" }, { "docid": "24edd699647427ff35a36c4e8a4e2736", "score": "0.759234", "text": "def startsWith(self, prefix: str) -> bool:\n node = self.root\n for w in prefix:\n if w in node.children:\n node = node.children[w]\n else:\n return False\n \n return True", "title": "" }, { "docid": "6ba1e8374f6ed04383ca8f477f23df24", "score": "0.7589363", "text": "def starts_with(self, prefix):\n curr = self.root\n for letter in prefix:\n if letter not in curr.children:\n return False\n curr = curr.children[letter]\n return True", "title": "" }, { "docid": "10ab6a612250d7200dd523f7b135e70f", "score": "0.7584371", "text": "def startsWith(self, prefix: str) -> bool:\n if not prefix:\n return True\n root = self\n for each_char in prefix:\n idx = int(ord(each_char)) - 97\n if root.next[idx] is not None:\n root = root.next[idx]\n else:\n return False\n return True", "title": "" }, { "docid": "c0d56b7b06d1b7b44dad302775ae7b40", "score": "0.75830305", "text": "def startsWith(self, prefix: str) -> bool:\n node = self.root\n for i in prefix:\n if i not in node.next:\n return False\n node = node.next[i]\n return True", "title": "" }, { "docid": "4d0fdfb1629882b0e0022d06b01d0017", "score": "0.7575522", "text": "def startsWith(self, prefix):\n root = self.root\n for c in prefix:\n if c in root.children:\n root = root.children[c]\n else:\n return False\n return True", "title": "" }, { "docid": "0d9bd9c263ee30451438c551bda78574", "score": "0.7559532", "text": "def startsWith(self, prefix: str) -> bool:\n if prefix == \"\":\n return True\n if prefix[0] not in self.stack.keys():\n return False\n else:\n return self.stack[prefix[0]].startsWith(prefix[1:])", "title": "" }, { "docid": "a89ed3a9b7937a3a778e407ec40547fd", "score": "0.75348955", "text": "def startsWith(self, prefix: str) -> bool:\n cur = self.root\n for c in prefix:\n if c in cur.children:\n cur = cur.children[c]\n else:\n return False\n return True", "title": "" }, { "docid": "a175851807344a121e3ef232460edbf3", "score": "0.7517827", "text": "def startsWith(self, prefix):\n children = self.root.children\n for c in prefix:\n if c not in children:\n return False\n treenode = children[c]\n children = treenode.children\n return True", "title": "" }, { "docid": "35f817fbc94254f6d4f8a266e07deda3", "score": "0.75149447", "text": "def startsWith(self, prefix: str) -> bool:\n tmp = self.node\n for i in prefix:\n if i not in tmp.next:\n return False\n tmp = tmp.next[i]\n return True", "title": "" }, { "docid": "35f817fbc94254f6d4f8a266e07deda3", "score": "0.75149447", "text": "def startsWith(self, prefix: str) -> bool:\n tmp = self.node\n for i in prefix:\n if i not in tmp.next:\n return False\n tmp = tmp.next[i]\n return True", "title": "" }, { "docid": "2fa8597451d7632ee7e7d04ca8a11096", "score": "0.7505829", "text": "def startsWith(self, prefix: str) -> bool:\n return self._search(prefix) != {}", "title": "" }, { "docid": "c8cbd81a8e8df3a52e87e9b854e6242a", "score": "0.7498065", "text": "def prefixExists(self, prefix):\n node = self.root\n for c in prefix:\n if not node.containsChild(c):\n return False\n node = node.getChildNode(c)\n return True", "title": "" }, { "docid": "a185c140db17c98ef94e45bc59722249", "score": "0.74885845", "text": "def _has_tree_prefix(self, prefix: str) -> bool:\n return any(\n prefix in tree.name\n for tree in self.trees\n )", "title": "" }, { "docid": "990d7b7f1557302d030b4bfb2f296d99", "score": "0.7480631", "text": "def startsWith(self, prefix: str) -> bool:\n curr = self.root\n for key in prefix:\n if key in curr.children:\n curr = curr.children[key]\n else:\n # mismatch of prefix, early return\n return False\n\n # reach the desired node that with the given prefix\n return True", "title": "" }, { "docid": "a7719d910ebff037703b0492754a2204", "score": "0.74685484", "text": "def search(self, word: str) -> bool:\n node = self.trie\n for w in word:\n if w not in node:\n return False\n node = node[w]\n return '#' in node # check if word is inserted or just a prefix", "title": "" }, { "docid": "408b662856bf843fadf7517547ae21b5", "score": "0.7459677", "text": "def match_any_prefix(text, prefixes):\n return any([text.startswith(pre) for pre in prefixes])", "title": "" }, { "docid": "989ca54117ab751f1fe2106b4bc9f2d5", "score": "0.7451152", "text": "def startsWith(self, prefix):\n if len(prefix) == 0:\n return True\n\n if prefix[0] in self.son:\n return self.son[prefix[0]].startsWith(prefix[1:])\n else:\n return False", "title": "" }, { "docid": "3e3c1c5d0ebe7996de5a70eea7e8fbaf", "score": "0.7419602", "text": "def startsWith(self, prefix):\n node = self.root\n for i in prefix:\n if not i in node.children:\n return False\n node = node.children[i]\n return True", "title": "" }, { "docid": "e76e305535222a8672380810821dac9a", "score": "0.739338", "text": "def startsWith(self, prefix: str) -> bool:\n dic = self.dic\n for i in prefix:\n if i in dic:\n dic = dic[i]\n else:\n return False\n return True", "title": "" }, { "docid": "4f04739f9a51ae0e0d4f6f7e46a71c0d", "score": "0.73907334", "text": "def startsWith(self, prefix):\n current = self.__root\n for c in prefix:\n if current.has_child(c):\n current = current.get_child(c)\n else:\n return False\n return True", "title": "" }, { "docid": "2f2e440306e1739ab23872abbba4e4f3", "score": "0.737301", "text": "def starts_with_prefix(self, prefix):\n prefixed_words = list()\n\n if prefix is None:\n raise ValueError(\"Prefix must not be null/None.\")\n\n top_node = self.head\n for letter in prefix:\n if letter in top_node.children:\n top_node = top_node.children[letter]\n else:\n return prefixed_words\n \n if top_node == self.head:\n prefixes_queue = [node for key, node in top_node.children.items()]\n else:\n prefixes_queue = [top_node]\n\n while prefixes_queue:\n current_node = prefixes_queue.pop()\n if current_node.data is not None:\n prefixed_words.append(current_node.data)\n prefixes_queue = [node for key, node in current_node.children.items()] + prefixes_queue\n\n return prefixed_words", "title": "" }, { "docid": "f60e020f9e12235652e39da5a71bb5d7", "score": "0.73699576", "text": "def is_prefix(self, word):\n word = word.replace('.', '').upper()\n return word in self.prefixes", "title": "" }, { "docid": "7d91ea2d37d50a13b280ec66a60c96a5", "score": "0.73532164", "text": "def startsWith(self, prefix: str) -> bool:\n _, flag = self._end_node(prefix)\n return flag", "title": "" }, { "docid": "339af29c8a943ee87f2b88fd98b2b497", "score": "0.7338736", "text": "def start_with_prefix(self, prefix):\n words = list()\n if prefix == None:\n raise ValueError('Requires not-Null prefix')\n \n # Determine end-of-prefix node\n top_node = self.head\n for letter in prefix:\n if letter in top_node.children:\n top_node = top_node.children[letter]\n else:\n # Prefix not in tree, go no further\n return words\n \n # Get words under prefix\n if top_node == self.head:\n queue = [node for key, node in top_node.children.iteritems()]\n else:\n queue = [top_node]\n \n # Perform a breadth first search under the prefix\n # A cool effect of using BFS as opposed to DFS is that BFS will return\n # a list of words ordered by increasing length\n while queue:\n current_node = queue.pop()\n if current_node.data != None:\n # Isn't it nice to not have to go back up the tree?\n words.append(current_node.data)\n \n queue = [node for key,node in current_node.children.iteritems()] + queue\n \n return words", "title": "" }, { "docid": "73b71f0999d23c1c40d471bdbbf4ea90", "score": "0.7332664", "text": "def startswith(self, prefix):\n\n if not self.parts:\n return False\n else:\n return self.parts[0].startswith(prefix)", "title": "" }, { "docid": "6a981798ff2e572ffd79ae9e778c1553", "score": "0.7321072", "text": "def matches_prefix(self, prefix: list) -> bool:\n # if there aren't enough tokens to match the whole prefix, no\n if len(self.tokens) - self.index < len(prefix):\n return False\n\n for i in range(len(prefix)):\n if self.tokens[self.index + i].name != prefix[i]:\n return False\n return True", "title": "" }, { "docid": "6647ad3fcd687a5230643915cfa01c54", "score": "0.7253987", "text": "def find_prefix(root, prefix: str) -> Tuple[bool, int]:\n node = root\n # If the root node has no children, then return False.\n # Because it means we are trying to search in an empty trie\n if not root.children:\n return False, 0\n for char in prefix:\n char_not_found = True\n # Search through all the children of the present `node`\n for child in node.children:\n if child.char == char:\n # We found the char existing in the child.\n char_not_found = False\n # Assign node as the child containing the char and break\n node = child\n break\n # Return False anyway when we did not find a char.\n if char_not_found:\n return False, 0\n # Well, we are here means we have found the prefix. Return true to indicate that\n # And also the counter of the last node. This indicates how many words have this\n # prefix\n return True, node.counter", "title": "" }, { "docid": "91f86d11bc037e81ff616f858ce49c64", "score": "0.72529864", "text": "def startsWith(self, prefix: str) -> bool:\n tmp, idx = self.head, 0\n while idx < len(prefix) and prefix[idx] in tmp: # 找到插入的节点\n tmp = tmp[prefix[idx]][1]\n idx += 1\n return idx == len(prefix)", "title": "" }, { "docid": "b66d06fa64723300c2f056f8711cb2f1", "score": "0.72032166", "text": "def startswith(self, prefix):\n return self.value.startswith(prefix)", "title": "" }, { "docid": "efb6804c871fb9c4fca5ce4dbf5e8ed8", "score": "0.71434295", "text": "def is_match(prefix: str, value: str) -> bool:\n assert prefix != '', 'Cannot match with empty prefix'\n return value.lower().startswith(prefix.lower())", "title": "" }, { "docid": "89cf174a06d0286479f26754a52b2942", "score": "0.7101888", "text": "def search(self, word: str) -> bool:\n node = self.searchPrefix(word)\n return node is not None and node.isEnd()", "title": "" }, { "docid": "a57920d7b639eb09bf0e6629c1912a4c", "score": "0.7095519", "text": "def __some_prefix(s, st):\n if s == '':\n return False\n for e in st:\n if e.startswith(s):\n return True\n return False", "title": "" }, { "docid": "c883a3ce4b8257e39f049ec3096652b2", "score": "0.709494", "text": "def has_tree_prefix(mesh_id, tree_prefix):\n tree_numbers = get_mesh_tree_numbers(mesh_id)\n return any(tn.startswith(tree_prefix) for tn in tree_numbers)", "title": "" }, { "docid": "19c59c4a0db4e86c26c2b62e5912b41e", "score": "0.6931367", "text": "def search(self, word: str) -> bool:\n curr = self.root\n for key in word:\n if key in curr.children:\n curr = curr.children[key]\n else:\n # mismatch of prefix, early return\n return False\n \n # check if the node contains a value\n return curr.hasValue", "title": "" }, { "docid": "a5978eccf7f5642ae8699421d7e37bd2", "score": "0.69043934", "text": "def find_prefix_words(self, prefix):\n\n # Find prefix\n\n # Get list of suffixes from this point downward. This doesn't\n # include the prefix itself -- so four our sample trie, above,\n # 'ac' -> [e', 't']\n\n # Return list of words, joining the prefix to each suffix", "title": "" }, { "docid": "a3beedd83ab299ba159455d27cc20e49", "score": "0.6893603", "text": "def is_path(self, prefix: str):\n if not prefix:\n return False\n\n # start exploration from the root node\n current = self.__root\n upper_case_prefix = prefix.upper()\n\n # check that for each character, there exists a child node link from our trie root\n for char in upper_case_prefix:\n child = current.get_child(char)\n if not child:\n # at this point, there's no link from the root to the current character\n # therefore, the provided prefix does not exist in our dictionary, and we can return early\n return False\n # otherwise: if a link (child) is found, continue exploring\n current = child\n\n # when we reach this point, all characters in the prefix have been found in the trie\n return True", "title": "" }, { "docid": "7459587b15f745a9e2b4c764ad574940", "score": "0.6868195", "text": "def find_prefix(root, prefix: str, pos: str) -> Tuple[bool, int, str, list]:\n node = root\n matched = ''\n possibilities = []\n # If the root node has no children, then return False.\n # Because it means we are trying to search in an empty trie\n if not root.children:\n return False, 0, matched, possibilities\n for char in prefix:\n char_not_found = True\n # Search through all the children of the present `node`\n for child in node.children:\n if child.char == char:\n # We found the char existing in the child.\n char_not_found = False\n # Assign node as the child containing the char and break\n node = child\n matched += char\n if node.word_finished:\n possibilities.append(matched)\n break\n # Return False anyway when we did not find a char.\n if char_not_found:\n new_possibilities = []\n if node.word_finished:\n new_possibilities.append((matched, node.pos))\n for n in node.children:\n if n.word_finished:\n new_possibilities.append((matched + n.char, n.pos))\n return False, 0, matched, get_most_matched(new_possibilities, prefix, pos)\n # Well, we are here means we have found the prefix. Return true to indicate that\n # And also the counter of the last node. This indicates how many words have this\n # prefix\n return True, node.counter, matched, possibilities", "title": "" }, { "docid": "9baae5de173f1fbdf9a59641437cb468", "score": "0.6792369", "text": "def __contains__(self, prefix):\n\n return self.fetch(prefix) is not None", "title": "" }, { "docid": "13f96fbc883ad1694eaebccc0c15c02d", "score": "0.6789298", "text": "def search(self, word: str) -> bool:\n root = self.trie\n for ch in word:\n if ch not in root:\n return False\n root = root[ch]\n return 'word' in root", "title": "" }, { "docid": "f476345a9f6a18334bbcab2f1f7e21be", "score": "0.67870563", "text": "def has_prefix(sub_s):\n\tglobal dict\n\n\tif sub_s[0] in dict:\n\t\tif len(sub_s) == 1:\n\t\t\treturn True\n\t\tif sub_s[0:2] in dict[sub_s[0]]:\n\t\t\tfor string in dict[sub_s[0]][sub_s[0:2]]:\n\t\t\t\tif string.startswith(sub_s):\n\t\t\t\t\treturn True\n\treturn False", "title": "" }, { "docid": "47194bae76c5320e4e0b854f26f8428b", "score": "0.67664164", "text": "def is_prefix_code(words: List[str]) -> bool:\n for i, w1 in enumerate(words):\n for j, w2 in enumerate(words):\n if i != j and w1.startswith(w2):\n return False\n return True", "title": "" }, { "docid": "7c391d1fa4aea12edf81c0c398d9c0c9", "score": "0.67024386", "text": "def find_count(root, prefix: str):\n node = root\n # If the root node has no children, then return False.\n # Because it means we are trying to search in an empty trie\n if not root.children:\n return False, 0\n for char in prefix:\n char_not_found = True\n # Search through all the children of the present `node`\n for child in node.children:\n if child.char == char:\n # We found the char existing in the child.\n char_not_found = False\n # Assign node as the child containing the char and break\n node = child\n break\n # Return False anyway when we did not find a char.\n if char_not_found:\n return 0\n # Well, we are here means we have found the prefix. Return true to indicate that\n # And also the counter of the last node. This indicates how many words have this\n # prefix\n return node.counter", "title": "" }, { "docid": "c16ac616b4c1cdbe042041b0ca76fb02", "score": "0.66946626", "text": "def contains_word(self, word):\n if len(word) < 1:\n return False\n if word is None:\n raise ValueError(\"Trie.contains_word() requires a non-empty string.\")\n\n current_node, does_exist = self.head, True\n\n for letter in word:\n if letter in current_node.children:\n current_node = current_node.children[letter]\n else:\n does_exist = False\n break\n\n if does_exist:\n if current_node.data == None:\n does_exist = False\n\n return does_exist", "title": "" }, { "docid": "20dbe843f09110a16eef746c0695e3a1", "score": "0.66937584", "text": "def is_complete_prefix_code(words: List[str]) -> bool:\n return is_prefix_code(words) and mcmillan_sum(words) == 1", "title": "" }, { "docid": "d760d8344a924d43f86e2934f8d53785", "score": "0.66620356", "text": "def search(self, word: str) -> bool:\n current_trie = self.head\n for index in range(1, len(word) + 1):\n node_index = ord(word[:index][-1]) - ord('a')\n if current_trie.child_nodes[node_index]:\n current_trie = current_trie.child_nodes[node_index]\n else:\n return False\n return current_trie.child_nodes == [None] * 26", "title": "" }, { "docid": "3e1cc68b7ea67dd792926a20cc5b670f", "score": "0.6660197", "text": "def search(self, word):\n try:\n root = self\n for ch in word:\n root = root.trie[ch]\n except KeyError as e:\n return False\n\n return root.word is not None", "title": "" }, { "docid": "51b3c1b11249c833172fe27b90e424e6", "score": "0.6658243", "text": "def __contains__(self, word):\n\t\tcurrent_dict = self.keyword_trie_dict\n\t\tlen_covered = 0\n\t\tfor char in word:\n\t\t\tif char in current_dict:\n\t\t\t\tcurrent_dict = current_dict[char]\n\t\t\t\tlen_covered += 1\n\t\t\telse:\n\t\t\t\tbreak\n\t\treturn self._keyword in current_dict and len_covered == len(word)", "title": "" }, { "docid": "a558d758f50c036ad466cc0bc6c799ab", "score": "0.6637899", "text": "def _has_tree_prefixes(self, prefixes) -> bool:\n return any(\n prefix in tree.name\n for prefix, tree in itt.product(prefixes, self.trees)\n )", "title": "" }, { "docid": "8343044b390120efb637dec502505e13", "score": "0.66144466", "text": "def exists(self, prefix, node = None):\n if not node:\n node = self.root\n\n if not prefix.startswith(node.prefix):\n return False\n elif node.prefix == prefix:\n return True\n elif not node.children:\n return False\n else:\n for child in node.children:\n if prefix.startswith(child.prefix):\n return self.exists(prefix, child)", "title": "" }, { "docid": "f8b4ea925ba7f27cf78b794e0e4d32c6", "score": "0.65974736", "text": "def find_words(self, prefix):\n cur_node = self.root\n for char in prefix:\n if char not in cur_node.next:\n return []\n cur_node = cur_node.next[char]\n ans = []\n if cur_node.is_word:\n ans.append(prefix)\n for key in cur_node.next.keys():\n self.dfs(prefix + key, cur_node.next[key], ans)\n return ans", "title": "" } ]
4ac27fb0f988bff24354ee1a1b987a18
Renders the home page.
[ { "docid": "79a1cec6577b7e28956c3623e2ffc701", "score": "0.0", "text": "def home(request):\n\n #assert savesave_to_bd.save_to_bd()\n save_to_bd.save_to_bd()\n\n assert isinstance(request, HttpRequest)\n\n table, folium_map = plot_map.show()\n\n return render(\n request,\n 'app/index.html',\n {\n 'map': folium_map._repr_html_(),\n 'table': table,\n 'title':'Map',\n 'year':datetime.now().year,\n }\n )", "title": "" } ]
[ { "docid": "93b0d81a4e377798d73883654df1785e", "score": "0.86085397", "text": "def home():\n\treturn render_template('home.html')", "title": "" }, { "docid": "93b0d81a4e377798d73883654df1785e", "score": "0.86085397", "text": "def home():\n\treturn render_template('home.html')", "title": "" }, { "docid": "93b0d81a4e377798d73883654df1785e", "score": "0.86085397", "text": "def home():\n\treturn render_template('home.html')", "title": "" }, { "docid": "07d8ac6a8bb6163b8e1dce951255e3ec", "score": "0.84564984", "text": "def home():\r\n return render_template('home.html')", "title": "" }, { "docid": "32316a16667a76b3a02144f264963832", "score": "0.8441976", "text": "def home():\r\n return render_template(\"home.html\")", "title": "" }, { "docid": "7535d5aa33292749bd14571f01795b34", "score": "0.8303572", "text": "def home():\r\n\r\n return render_template('index.html')", "title": "" }, { "docid": "a5802c3e0cba4018af4c5778253ce4b0", "score": "0.8282502", "text": "def home():\n\n title_page = \"HOME\"\n return render_template(\"home.html\",title_page=title_page)", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "f9e9a047407a7e73406083c92df41cfd", "score": "0.8279762", "text": "def home():\n return render_template('home.html')", "title": "" }, { "docid": "08d5344d382217947cd0c6b57070d987", "score": "0.82759887", "text": "def home():\n return render_template(\"home.html\")", "title": "" }, { "docid": "08d5344d382217947cd0c6b57070d987", "score": "0.82759887", "text": "def home():\n return render_template(\"home.html\")", "title": "" }, { "docid": "08d5344d382217947cd0c6b57070d987", "score": "0.82759887", "text": "def home():\n return render_template(\"home.html\")", "title": "" }, { "docid": "08d5344d382217947cd0c6b57070d987", "score": "0.82759887", "text": "def home():\n return render_template(\"home.html\")", "title": "" }, { "docid": "04ae4a0cebcab769b0ad77439afcb533", "score": "0.82107955", "text": "def home():\n return render_template('HomePage.html',\n title='Home',\n year=datetime.now().year,\n message='Message!.')", "title": "" }, { "docid": "be9ff638e08e8baef80762a11f768c44", "score": "0.8151845", "text": "def homepage():\n return render_template('home.html', title=\"Welcome\")", "title": "" }, { "docid": "19ccb4211e4217d9a9474e2be9cc7d34", "score": "0.81287277", "text": "def home():\n return render_template(\n '/ClientFinePage.html',\n title='Home Page',\n year=current_year,\n )", "title": "" }, { "docid": "ea8ebe28db26fbcb25ae0c8f42cd99af", "score": "0.812513", "text": "def homepage():\n return render_template('home/index.html')", "title": "" }, { "docid": "901f074c1714f113f936e835c20035d8", "score": "0.81234246", "text": "def home(request):\n\n\ttitle = \"Home\"\n\n\tcontext = {'title':title}\n\treturn render(request, 'ChowNowApp/template_home.html', context)", "title": "" }, { "docid": "5468256264451085292fa4bbc35e2e1a", "score": "0.8121039", "text": "def homepage():\n return render_template('home/index.html', title=\"Home\")", "title": "" }, { "docid": "d46ea148099e24d575da5e964ee70952", "score": "0.8091238", "text": "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "title": "" }, { "docid": "db79cdefd5940cf1527f7c064c918d5b", "score": "0.80608845", "text": "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "title": "" }, { "docid": "db79cdefd5940cf1527f7c064c918d5b", "score": "0.80608845", "text": "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "title": "" }, { "docid": "db79cdefd5940cf1527f7c064c918d5b", "score": "0.80608845", "text": "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "title": "" }, { "docid": "db79cdefd5940cf1527f7c064c918d5b", "score": "0.80608845", "text": "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "title": "" }, { "docid": "db79cdefd5940cf1527f7c064c918d5b", "score": "0.80608845", "text": "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "title": "" }, { "docid": "db79cdefd5940cf1527f7c064c918d5b", "score": "0.80608845", "text": "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "title": "" }, { "docid": "db79cdefd5940cf1527f7c064c918d5b", "score": "0.80608845", "text": "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "title": "" }, { "docid": "db79cdefd5940cf1527f7c064c918d5b", "score": "0.80608845", "text": "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "title": "" }, { "docid": "db79cdefd5940cf1527f7c064c918d5b", "score": "0.80608845", "text": "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "title": "" }, { "docid": "db79cdefd5940cf1527f7c064c918d5b", "score": "0.80608845", "text": "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "title": "" }, { "docid": "c3ba2ac6a6c67449c2cdaa6219dd32fc", "score": "0.8050424", "text": "def home():\n path = '{}/{}'.format(app.config['PAGE_DIR'], 'home')\n page = flatpages.get_or_404(path)\n return render_template('page.html', page=page)", "title": "" }, { "docid": "03a88c85de015397215f911305d16456", "score": "0.80282336", "text": "def home():\n return render_template(\n \"home.html\",\n profiles=profiles,\n albums=albums,\n projects=projects,\n )", "title": "" }, { "docid": "10c8bf328028bf952daf7adba2d02eed", "score": "0.80130893", "text": "def home():\n\n return render_template(\n 'index.html',\n title='Jarvis'\n )", "title": "" }, { "docid": "458a536e5829ff6e0f2a6e588f8829d9", "score": "0.8012217", "text": "def home():\n\n return render_template(\"index.html\")", "title": "" }, { "docid": "3a920c919fba24c2452893b6f6124cc1", "score": "0.79877025", "text": "def home_page():\n return render_template('home/index.html', title=\"Inicio\")", "title": "" }, { "docid": "3bb10e053f1f9a6c466cea0c9723b581", "score": "0.79865545", "text": "def homepage():\n\n return render_template('homepage.html')", "title": "" }, { "docid": "d310dc548280c5f13080f46bb2c0f917", "score": "0.7979365", "text": "def show_home(request):\n return render(request, 'coreapp/home.html')", "title": "" }, { "docid": "9c88382d0ead7a2c8fe1563f25518fd4", "score": "0.7969911", "text": "def index():\n return render_template(\n 'home.html'\n )", "title": "" }, { "docid": "4cc64628f21a19d92279f7a587498984", "score": "0.7957263", "text": "def home():\n return render_template(\n 'index.jade',\n title='Home Page',\n year=datetime.now().year,\n )", "title": "" }, { "docid": "011a97527c37fd5e9a296c3868f539dc", "score": "0.7955161", "text": "def home():\n return render_template('index.html',\n title='Home Page',\n year=datetime.now().year)", "title": "" }, { "docid": "a96a58f3eb46490ee49bcd8c5952b992", "score": "0.7946966", "text": "def home():\n return render_template('index.html')", "title": "" }, { "docid": "a96a58f3eb46490ee49bcd8c5952b992", "score": "0.7946966", "text": "def home():\n return render_template('index.html')", "title": "" }, { "docid": "a96a58f3eb46490ee49bcd8c5952b992", "score": "0.7946966", "text": "def home():\n return render_template('index.html')", "title": "" }, { "docid": "a96a58f3eb46490ee49bcd8c5952b992", "score": "0.7946966", "text": "def home():\n return render_template('index.html')", "title": "" }, { "docid": "6f79a0b01a91ec07b492b3f44ab28f8f", "score": "0.79432386", "text": "def home():\n return render_template('index.html',)", "title": "" }, { "docid": "34e054deae5f6d2541daa8cc51f45f26", "score": "0.79275405", "text": "def home():\n return render_template(\"index.html\")", "title": "" }, { "docid": "34e054deae5f6d2541daa8cc51f45f26", "score": "0.79275405", "text": "def home():\n return render_template(\"index.html\")", "title": "" }, { "docid": "bbc85bd9e51211fae056164cbee5ac8d", "score": "0.79243827", "text": "def home_page():\n return render_template('index.html')", "title": "" }, { "docid": "aff1bb3a7d03b5db8fba22cbba3929e5", "score": "0.7909518", "text": "def home():\n return render_template('home.html',first=True)", "title": "" }, { "docid": "80b74ec6b8c9db74020a1a39dae85207", "score": "0.7869666", "text": "def homepage():\n return render_template('index.html')", "title": "" }, { "docid": "80b74ec6b8c9db74020a1a39dae85207", "score": "0.7869666", "text": "def homepage():\n return render_template('index.html')", "title": "" }, { "docid": "6e3901d01933633e0a26d100f1cf1014", "score": "0.7865287", "text": "def home():\n return render_template(\n 'index.html',\n title='Home Page',\n regions_list = regions,\n selected_region_index = 0,\n provinces_list = provinces[provinces.codice_regione==regions.codice_regione[0]],\n selected_province_index = 0\n )", "title": "" }, { "docid": "1d30805b94099b5001dd0cb751dfc313", "score": "0.78397673", "text": "def home(): \n return render_template(\n 'index.html', title=\"Главная страница\")", "title": "" }, { "docid": "d5dc2fee1595715456ebfc1178a0f1bc", "score": "0.7826738", "text": "def index():\n return render_template('home.html')", "title": "" }, { "docid": "74fe04c56625d726d9e8db8cf20507f8", "score": "0.78032357", "text": "def index():\n return render_template('home/index.html')", "title": "" }, { "docid": "4e6b131e15cb54841b311931f7b7cad4", "score": "0.7797248", "text": "def home():\n\n # If no one is logged in, show the anon home page.\n if not g.user:\n return render_template(\"home-anon.html\")\n\n rooms = Room.query.all()\n\n return render_template(\"home.html\", user=g.user, rooms=rooms)", "title": "" }, { "docid": "144146e4eff32c1c3c2224972a7733f4", "score": "0.7795442", "text": "def home():\n\n print(\"page loaded\")\n return render_template('layouts/panel.html')", "title": "" }, { "docid": "b767f8dc78cb8b18116c7c14a15c1a20", "score": "0.7762826", "text": "def index():\n return render_template(\"app/home.html\")", "title": "" }, { "docid": "d11996f6ff8a9d7f2d7bf4e0223967e4", "score": "0.77391165", "text": "def home():\n return render_template('./index.html')", "title": "" }, { "docid": "95adbf086a69eadd5f94ad3fab0ff4a1", "score": "0.7706899", "text": "def home():\n return render_template('base.html')", "title": "" }, { "docid": "89c5701fa0dc04a321fd59baf587a7bf", "score": "0.7697904", "text": "def showHome():\n return render_template('index.jade',\n title='Welcome',\n year=datetime.now().year)", "title": "" }, { "docid": "085f8cb12df540126c5e6cfc2e4148d0", "score": "0.769332", "text": "def home():\n #return redirect(url_for('Under'))\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )", "title": "" }, { "docid": "9188f150cdd76922c539f094214631b2", "score": "0.7649646", "text": "def home(request):\n context = initialize_context(request)\n\n return render(request, 'home.html', context)", "title": "" }, { "docid": "06cdee22e6b02ea4bac1c2cdf4b78123", "score": "0.764843", "text": "def home():\r\n return_403('lecturer_id')\r\n return render_template(\"student/home.html\", title=\"Students Homepage\", home=True, is_student=True,\r\n pid=session['student_id'])", "title": "" }, { "docid": "fdf2e8c089a2b713c2104cbb5403b450", "score": "0.7639184", "text": "def show_homepage():\n\n return render_template(\"homepage.html\")", "title": "" }, { "docid": "1fb3b21593e41a41d1d674d7b1ccbf42", "score": "0.763282", "text": "def home(request):\n return render(request, 'base/home.html')", "title": "" }, { "docid": "190019f66d3db73a5cdc76a495761daa", "score": "0.76165605", "text": "def homepage(request):\n return render(request, \"home.html\")", "title": "" }, { "docid": "fd70e062f61e6e52d32a01c84a87e9ca", "score": "0.76081216", "text": "def home():\n return render_template('home.html', Catagory=Catagory, Item=Item)", "title": "" }, { "docid": "206704651bf4f63e2a400e22a85810b9", "score": "0.75778896", "text": "def home():\n return render_template('gameshome.html')", "title": "" }, { "docid": "eb81ffcfd1863655b22f1a1781861fd5", "score": "0.7569973", "text": "def home(request):\n return render(request, 'home.html')", "title": "" }, { "docid": "fda1103e02ce2cb98a54b5135ca19d4d", "score": "0.7548318", "text": "def home(request):\n\n context = {}\n\n return render(request, 'home.html', context)", "title": "" }, { "docid": "0d96f85097dd896246765cac07003104", "score": "0.7541632", "text": "def home(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/index.html', {\n 'title': 'Home Page',\n 'year': datetime.now().year,\n }\n )", "title": "" }, { "docid": "2751281eff6d70c50f1818fdf8a46d07", "score": "0.7541434", "text": "def home(request):\n assert isinstance(request, HttpRequest)\n return render(\n request, \"app/index.html\", {\"title\": \"Home Page\", \"year\": datetime.now().year}\n )", "title": "" }, { "docid": "06270c3c3ab125fa27f8f913b7d5b89a", "score": "0.7540675", "text": "def home(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/index.html',\n context_instance = RequestContext(request,\n {\n 'title':'Home Page',\n 'year':datetime.now().year,\n })\n )", "title": "" }, { "docid": "26b6b80bee2f2328bc018d6abacbebad", "score": "0.7538162", "text": "def home():\n return render_template('info.html')", "title": "" }, { "docid": "534bdac822a8aead7bc335960c9bfd84", "score": "0.75355184", "text": "def home(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/index.html',\n {\n 'title': 'Home Page',\n 'year': datetime.now().year,\n }\n )", "title": "" }, { "docid": "ae4c3741a9f8d921b12fbd0916e1e341", "score": "0.7534953", "text": "def render_homepage():\n\n return render_template('index.html')", "title": "" }, { "docid": "ae4149903cd519b168d19cd109ab5aec", "score": "0.7528647", "text": "def home(request):\n response = render(request, \"service/home.html\")\n return response", "title": "" }, { "docid": "69608d1f981b04d837b1e76771f2a8df", "score": "0.7521853", "text": "def home():\n\n # if user is loggedIn\n if already_loggedIn():\n return render_template('home.html', user_info = user_info(session['user_id']) , all_users = users.query.all())\n # otherwise\n else:\n return render_template('home.html')", "title": "" }, { "docid": "3d61eea45cd5c4382550877812542e61", "score": "0.7506135", "text": "def index():\n\treturn render_template('index.html', title='Home')", "title": "" }, { "docid": "a1314f888a184581cf504fb1587ee024", "score": "0.7502744", "text": "def home():\n return template('home')", "title": "" } ]
93a4e194462b6657722b80220b536963
_main_ provide support for some basic docker operations so that building images can be standardised as part of a workflow
[ { "docid": "ecd643d4ccd428e0fa21ebc05bb2a5ce", "score": "0.64680725", "text": "def main():\n opts = build_parser()\n config = load_configuration()\n if not config.has_section('docker'):\n msg = (\n \"Unable to find docker section in cirrus.conf\"\n #TODO: Link to docs here\n )\n LOGGER.error(msg)\n sys.exit(1)\n\n if not is_docker_connected():\n LOGGER.error(DOCKER_CONNECTION_HELP)\n sys.exit(1)\n\n if opts.command == 'build':\n docker_build(opts, config)\n if opts.command == 'push':\n docker_push(opts, config)\n if opts.command == 'test':\n # Already called above\n pass", "title": "" } ]
[ { "docid": "1f58d9f59126c05bb35db12984aabc71", "score": "0.72338295", "text": "def docker() -> None:\n pass", "title": "" }, { "docid": "6c1687f1282d12b20080c89c4e51a02e", "score": "0.6880054", "text": "def build(ctx):\n ctx.run('docker build -t {} .'.format(cli_image_name))", "title": "" }, { "docid": "96397b5822dc28bad634ae58abf614ba", "score": "0.68744004", "text": "def main():\n\n parser = argparse.ArgumentParser(\n description=\"List and create emulator docker containers ({}).\".format(emu.__version__),\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", action=\"store_true\", help=\"Set verbose logging\")\n\n subparsers = parser.add_subparsers()\n\n list_parser = subparsers.add_parser(\n \"list\", help=\"list all the available the publicly available emulators and system images.\"\n )\n\n list_parser.add_argument(\n \"--arm\",\n action=\"store_true\",\n help=\"Display arm images. Note that arm images are not hardware accelerated and are *extremely* slow.\",\n )\n list_parser.set_defaults(func=list_images)\n\n create_parser = subparsers.add_parser(\n \"create\",\n help=\"Given an emulator and system image zip file, \"\n \"generates a Docker image comprising complete environment in which the Android Emulator runs. \"\n \"After the Docker image is started up, interaction with the emulator is made possible via port forwarding and ADB, \"\n \"or gRPC and WebRTC.\",\n )\n create_parser.add_argument(\n \"emuzip\",\n help=\"Zipfile containing the a publicly released emulator, or [canary|stable] to use the latest canary or stable release.\",\n )\n create_parser.add_argument(\n \"imgzip\",\n help=\"Zipfile containing a public system image that should be launched, or a regexp matching the image to retrieve. \"\n \"The first matching image will be selected when using a regex. \"\n 'Use the list command to show all available images. For example \"P google_apis_playstore x86_64\".',\n )\n create_parser.add_argument(\n \"--extra\",\n default=\"\",\n help=\"Series of additional commands to pass on to the emulator. \"\n + 'For example \"-turncfg \\\\\"curl -s -X POST https://networktraversal.googleapis.com/v1alpha/iceconfig?key=MySec\\\\\"\"',\n )\n create_parser.add_argument(\n \"--dest\", default=os.path.join(os.getcwd(), \"src\"), help=\"Destination for the generated docker files\"\n )\n create_parser.add_argument(\"--tag\", default=\"\", help=\"Docker image name\")\n create_parser.add_argument(\n \"--start\",\n action=\"store_true\",\n help=\"Starts the container after creating it. \"\n \"All exposed ports are forwarded, and your private adbkey (if available) is injected but not stored.\",\n )\n create_parser.set_defaults(func=create_docker_image)\n\n create_inter = subparsers.add_parser(\n \"interactive\",\n help=\"Interactively select which system image and emulator binary to use when creating a docker container\",\n )\n create_inter.add_argument(\n \"--extra\",\n default=\"\",\n help=\"Series of additional commands to pass on to the emulator. \"\n 'For example -turncfg \\\\\"curl -s -X POST https://networktraversal.googleapis.com/v1alpha/iceconfig?key=MySec\\\\\"',\n )\n create_inter.add_argument(\n \"--dest\", default=os.path.join(os.getcwd(), \"src\"), help=\"Destination for the generated docker files\"\n )\n create_inter.add_argument(\n \"--start\",\n action=\"store_true\",\n help=\"Starts the container after creating it. \"\n \"All exposed ports are forwarded, and your private adbkey (if available) is injected but not stored.\",\n )\n create_inter.add_argument(\n \"--arm\",\n action=\"store_true\",\n help=\"Display arm images. Note that arm images are not hardware accelerated and are *extremely* slow.\",\n )\n create_inter.add_argument(\"--tag\", default=\"\", help=\"Docker image name\")\n create_inter.set_defaults(func=create_docker_image_interactive)\n\n args = parser.parse_args()\n lvl = logging.DEBUG if args.verbose else logging.WARNING\n logging.basicConfig(level=lvl)\n if hasattr(args, \"func\"):\n args.func(args)\n else:\n parser.print_help()", "title": "" }, { "docid": "fd17b6b296d23b6ff2ecda317494e2b3", "score": "0.6813535", "text": "def multios_build():\n local( 'cd docker ; ./generate_all.sh' )", "title": "" }, { "docid": "ba5222b4169daf5e2a06304106871bb5", "score": "0.6744934", "text": "def standard_docker_args():\n docker_args = []\n # Remove the container immediately when we're done building the docs\n docker_args.append('--rm')\n # Make sure we create files as the current user because that is what\n # folks that use build_docs.pl expect.\n uid = getuid()\n if uid == 0:\n raise ArgError(\"This process isn't likely to suceed if run as root\")\n docker_args.extend(['--user', '%d:%d' % (uid, getgid())])\n # Mount the docs build code so we can run it!\n docker_args.extend(['-v', '%s:/docs_build:cached' % DIR])\n # Seccomp adds a *devestating* performance overhead if you happen\n # to have it installed.\n docker_args.extend(['--security-opt', 'seccomp=unconfined'])\n # Keep stdin open so the docs build can use closing it as a signal that\n # it needs to die.\n docker_args.append('-i')\n # Pass the node name into the docker image if it is set\n if 'NODE_NAME' in environ:\n docker_args.extend(['-e', 'NODE_NAME=%s' % environ['NODE_NAME']])\n # Ritual to make nginx run (even with -t) as the mapped user\n docker_args.extend(['--tmpfs', '/run/nginx',\n '--tmpfs', '/var/log/nginx',\n '--tmpfs', '/var/lib/nginx/body',\n '--tmpfs', '/var/lib/nginx/fastcgi',\n '--tmpfs', '/var/lib/nginx/proxy',\n '--tmpfs', '/var/lib/nginx/uwsgi',\n '--tmpfs', '/var/lib/nginx/scgi'])\n # Mount in a custom gitconfig that treats all directories as safe\n docker_args.extend(['-v', '%s/gitconfig:/.gitconfig' % DIR])\n return docker_args", "title": "" }, { "docid": "0feaf87ef5d25244b4fb4c23ae1f1e7c", "score": "0.6717394", "text": "def ensure_base_images():\n\n origwd = os.getcwd()\n docker_files = os.path.join(os.path.dirname(__file__), \"static\", \"docker\")\n os.chdir(docker_files)\n\n images = [(\"base\", \"Dockerfile.base\", \".\"),\n (\"hacksport\", \"Dockerfile.hacksport\", \"/picoCTF-env\"),\n (\"shellmanager\", \"Dockerfile.config\", \"/opt/hacksports\")]\n\n for build in images:\n name, dockerfile, context = build\n # Use existing DockerChallenge infrastrucutre to consistently build images.\n builder = DockerChallenge()\n builder.image_name = f\"{REPO_NAME}/{name}\"\n\n # Copy Dockerfile into context. While the docker cli allows a seperate\n # -f, the SDK would require building a custom context.\n dockerfile_tmp = os.path.join(context, dockerfile)\n clean = False\n try:\n shutil.copyfile(dockerfile, dockerfile_tmp)\n except shutil.SameFileError:\n clean = True\n\n # build the image\n img = builder._build_docker_image(\n build_args={},\n timeout=600,\n labels={},\n dockerfile=dockerfile,\n context=context)\n\n if img is None:\n logger.error(f\"Failed to build base image: {builder.image_name}\")\n return False\n else:\n logger.debug(f\"{builder.image_name} built: {img.id}\")\n\n # Clean up the temporary, in context, Dockerfile.\n if not clean:\n os.remove(dockerfile_tmp)\n\n # Resore working directory\n os.chdir(origwd)", "title": "" }, { "docid": "f35ab7cb5e303c92180f4484956a4b81", "score": "0.66465396", "text": "def run_in_docker(self, cmd, image, threads_needed, stdout=None, stderr=None):\n\t\t\n\t\tif len(cmd)<2:\n\t\t\tcontainer_name = self.sample_name+\"_vt\"\n\t\telse:\n\t\t\tcontainer_name = self.sample_name+\"_\"+cmd[0]\n\t\tcheckContainer(container_name)\n\t\t\n\t\tif len(cmd[0].split(\" \"))>1:\n\t\t\tif cmd[0].split(\" \")[1]==\"STAR\":\n\t\t\t\tdcmd = [\"docker\", \"run\",\"--name\",container_name,\n\t\t\t\t\t\t\t\t\"-v\", \"{}:{}\".format(self.input_folder, self.input_folder),\n\t\t\t\t\t\t\t\t\"-v\", \"{}:{}\".format(output_folder, output_folder),\n\t\t\t\t\t\t\t\t\"-v\", \"{}:{}\".format(reference_folder, reference_folder),\n\t\t\t\t\t\t\t\t'--ipc=\"host\"',\n\t\t\t\t\t\t\t\tself.docker_images_dict[image],\n\t\t\t\t\t\t\t\t\"/bin/bash -c \"\n\t\t\t\t\t\t\t\t]\n\t\telse:\n\t\t\tdcmd = [\"docker\", \"run\",\"--name\",container_name,\n\t\t\t\t\t\t\t\t\"-v\", \"{}:{}\".format(self.input_folder, self.input_folder),\n\t\t\t\t\t\t\t\t\"-v\", \"{}:{}\".format(output_folder, output_folder),\n\t\t\t\t\t\t\t\t\"-v\", \"{}:{}\".format(reference_folder, reference_folder),\n\t\t\t\t\t\t\t\tself.docker_images_dict[image]\n\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t\t\t\t\t\n\t\tdcmd += cmd\n\t\tif stdout is not None:\n\t\t\tstdout = open(stdout,\"w\")\n\t\t\tself.open_files.append(stdout)\n\t\tif stderr is not None:\n\t\t\tstderr = open(stderr,\"w\")\n\t\t\tself.open_files.append(stderr)\n\t\t\t\n\t\t############ LOCK DOWN NEEDED THREADS #############\n\t\tthreads_needed = int(threads_needed)\n\t\twait_go = \"wait\"\n\t\twhile wait_go !='go':\n\t\t\ttime.sleep(10)\n\t\t\twait_go = check_threads(batch_ID, container_name,\"start\",(-1)*threads_needed, max_nr_threads)\t\n\t\t\n\t\t########### RUN COMMAND IN DOCKER #################\n\t\tself.logger.info(\"Running: \"+\" \".join(dcmd))\n\t\tos.system(\" \".join(dcmd))\n\t\tcheckContainer(container_name)\n\t\t\n\t\t########### RELEASE LOCK DOWN CORES ###############\n\t\twait_go = \"wait\"\n\t\twhile wait_go !='go':\n\t\t\ttime.sleep(10)\n\t\t\twait_go = check_threads(batch_ID, container_name,\"finish\", threads_needed, max_nr_threads)", "title": "" }, { "docid": "f56397500b4d2a0b397b708b9a8ede14", "score": "0.6622241", "text": "def test_build_docker_cmd_pure(self, run_command_mock, process_check_mock, _):\n bootstrap = docker_bootstrap.Bootstrap(\n '/docker_folder',\n '/workspace',\n 'test_config.yaml',\n docker_tag='tf_test/framework',\n auth_token_dir='/test/auth_token',\n pure_docker=True)\n\n bootstrap.run_tests()\n # Assumes last call was to kick off the docker image.\n arg0 = run_command_mock.call_args[0][0]\n self.assertEqual(\n arg0, 'docker run --rm '\n '-v /test/auth_token:/auth_tokens -v /workspace:/workspace '\n 'tf_test/framework python '\n '/workspace/git/benchmark_harness/oss_bench/harness/controller.py'\n ' --workspace=/workspace --test-config=test_config.yaml'\n ' --framework=tensorflow')\n process_check_mock.assert_called()", "title": "" }, { "docid": "fdf30a4f9ee0ecd554b8bec88ada4ec8", "score": "0.6607228", "text": "def build():\n click.echo('Building docker image...')\n dc.images.build(path=DOCKER_PATH, tag=DOCKER_TAG, rm=True, stream=True)\n click.echo('Finished building docker image!')", "title": "" }, { "docid": "ea845d2b7e8299364dd350bd08f18e96", "score": "0.65985185", "text": "def run(collector, image, **kwargs):\n image.build_and_run(collector.configuration[\"images\"])", "title": "" }, { "docid": "3e9d709e3fa3862e15403ca95481d0f5", "score": "0.65979344", "text": "def docker(c, verbose=False):\n\n if PLATFORM == \"Linux\":\n # Install Docker\n # Install key dependencies first\n if run(\"command -v docker\", hide=not verbose, warn=True).ok:\n run('echo \"===> docker {}\"'.format(MESSAGE_ALREADY_INSTALLED))\n else:\n run(\n \"apt-get install apt-transport-https ca-certificates curl gnupg2 software-properties-common\",\n hide=not verbose,\n warn=True,\n )\n res = run(\"cat /etc/issue\", hide=not verbose, warn=True)\n if \"Debian\" in str(res):\n run(\n \"curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add -\",\n hide=not verbose,\n warn=True,\n )\n run(\"apt-key fingerprint 0EBFCD88\", hide=not verbose, warn=True)\n run(\n 'add-apt-repository \\\n \"deb [arch=amd64] https://download.docker.com/linux/debian \\\n $(lsb_release -cs) \\\n stable\"',\n hide=not verbose,\n warn=True,\n )\n run(\n \"apt install docker-ce docker-ce-cli containerd.io\",\n hide=not verbose,\n warn=True,\n )\n\n if run(\"docker\", hide=not verbose, warn=True).ok:\n run('echo \"===> docker {}\"'.format(MESSAGE_OK))\n else:\n run('echo \"===> docker {}\"'.format(MESSAGE_FAILED))\n elif \"Ubuntu\" in str(res):\n run(\n \"curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -\",\n hide=not verbose,\n warn=True,\n )\n run(\"apt-key fingerprint 0EBFCD88\", hide=not verbose, warn=True)\n run(\n 'add-apt-repository \\\n \"deb [arch=amd64] https://download.docker.com/linux/debian \\\n $(lsb_release -cs) \\\n stable\"',\n hide=not verbose,\n warn=True,\n )\n run(\"apt install docker-ce\", hide=not verbose, warn=True)\n\n if run(\"docker\", hide=not verbose, warn=True).ok:\n run('echo \"===> docker {}\"'.format(MESSAGE_OK))\n else:\n run('echo \"===> docker {}\"'.format(MESSAGE_FAILED))\n else:\n run(\"echo {}\".format(MESSAGE_WRONG_PLATFORM))\n\n elif PLATFORM == \"Darwin\":\n if run(\"command -v docker\", hide=not verbose, warn=True).ok:\n run('echo \"===> docker {}\"'.format(MESSAGE_ALREADY_INSTALLED))\n else:\n if run(\"brew cask install docker\", hide=not verbose, warn=True).ok:\n run('echo \"===> docker {}\"'.format(MESSAGE_OK))\n else:\n run('echo \"===> docker {}\"'.format(MESSAGE_FAILED))\n run(\"open /Applications/Docker.app\")\n\n else:\n run(\"echo {}\".format(MESSAGE_WRONG_PLATFORM))", "title": "" }, { "docid": "b603de705b03b108135fe4897ec6ed3b", "score": "0.6540381", "text": "def create_docker_image_interactive(args):\n img = emu_downloads_menu.select_image(args.arm) or sys.exit(1)\n emulator = emu_downloads_menu.select_emulator() or sys.exit(1)\n\n img_zip = img.download()\n emu_zip = emulator.download(\"linux\")\n device = DockerDevice(emu_zip, img_zip, args.dest)\n device.create_docker_file(args.extra)\n img = device.create_container()\n if img and args.start:\n device.launch(img)", "title": "" }, { "docid": "19fdbc39441e37eb21968377ecd096db", "score": "0.65236735", "text": "def main():\n\n # get AWS credentials\n aws_credentials = read_aws_credentials()\n access_key_id = aws_credentials['access_key_id']\n secret_access_key = aws_credentials['secret_access_key']\n aws_region = aws_credentials['region']\n print(access_key_id)\n print(secret_access_key)\n print(aws_region)\n\n # build Docker image\n docker_client = docker.from_env()\n image, build_log = docker_client.images.build(\n path='.', tag=LOCAL_REPOSITORY, rm=True)\n\n # get AWS ECR login token\n ecr_client = boto3.client(\n 'ecr', aws_access_key_id=access_key_id,\n aws_secret_access_key=secret_access_key, region_name=aws_region)\n ecr_credentials = (\n ecr_client\n .get_authorization_token()\n ['authorizationData'][0])\n ecr_username = 'AWS'\n ecr_password = (\n base64.b64decode(ecr_credentials['authorizationToken'])\n .replace(b'AWS:', b'')\n .decode('utf-8'))\n ecr_url = ecr_credentials['proxyEndpoint']\n # get Docker to login/authenticate with ECR\n docker_client.login(\n username=ecr_username, password=ecr_password, registry=ecr_url, reauth=True)\n # tag image for AWS ECR\n ecr_repo_name = '{}/{}'.format(\n ecr_url.replace('https://',''), LOCAL_REPOSITORY)\n image.tag(ecr_repo_name, tag='latest')\n\n # push image to AWS ECR\n push_log = docker_client.images.push(ecr_repo_name, tag='latest')\n print(push_log)\n # force new deployment of ECS service\n ecs_client = boto3.client(\n 'ecs', aws_access_key_id=access_key_id,\n aws_secret_access_key=secret_access_key, region_name=aws_region)\n\n ecs_client.update_service(\n cluster=ECS_CLUSTER, service=ECS_SERVICE, forceNewDeployment=True)\n return None", "title": "" }, { "docid": "e143c298f88e439d2ef23b043640ff8a", "score": "0.6522166", "text": "def __init__(__self__, *,\n image: pulumi.Input[str],\n name: pulumi.Input[str],\n command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n cpu: Optional[pulumi.Input[int]] = None,\n depends_on: Optional[pulumi.Input[Sequence[pulumi.Input['TaskDefinitionContainerDependencyArgs']]]] = None,\n disable_networking: Optional[pulumi.Input[bool]] = None,\n dns_search_domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n dns_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n docker_labels: Optional[Any] = None,\n docker_security_options: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n entry_point: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n environment: Optional[pulumi.Input[Sequence[pulumi.Input['TaskDefinitionKeyValuePairArgs']]]] = None,\n environment_files: Optional[pulumi.Input[Sequence[pulumi.Input['TaskDefinitionEnvironmentFileArgs']]]] = None,\n essential: Optional[pulumi.Input[bool]] = None,\n extra_hosts: Optional[pulumi.Input[Sequence[pulumi.Input['TaskDefinitionHostEntryArgs']]]] = None,\n firelens_configuration: Optional[pulumi.Input['TaskDefinitionFirelensConfigurationArgs']] = None,\n health_check: Optional[pulumi.Input['TaskDefinitionHealthCheckArgs']] = None,\n hostname: Optional[pulumi.Input[str]] = None,\n interactive: Optional[pulumi.Input[bool]] = None,\n links: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n linux_parameters: Optional[pulumi.Input['TaskDefinitionLinuxParametersArgs']] = None,\n log_configuration: Optional[pulumi.Input['TaskDefinitionLogConfigurationArgs']] = None,\n memory: Optional[pulumi.Input[int]] = None,\n memory_reservation: Optional[pulumi.Input[int]] = None,\n mount_points: Optional[pulumi.Input[Sequence[pulumi.Input['TaskDefinitionMountPointArgs']]]] = None,\n port_mappings: Optional[pulumi.Input[Sequence[pulumi.Input['TaskDefinitionPortMappingArgs']]]] = None,\n privileged: Optional[pulumi.Input[bool]] = None,\n pseudo_terminal: Optional[pulumi.Input[bool]] = None,\n readonly_root_filesystem: Optional[pulumi.Input[bool]] = None,\n repository_credentials: Optional[pulumi.Input['TaskDefinitionRepositoryCredentialsArgs']] = None,\n resource_requirements: Optional[pulumi.Input[Sequence[pulumi.Input['TaskDefinitionResourceRequirementArgs']]]] = None,\n secrets: Optional[pulumi.Input[Sequence[pulumi.Input['TaskDefinitionSecretArgs']]]] = None,\n start_timeout: Optional[pulumi.Input[int]] = None,\n stop_timeout: Optional[pulumi.Input[int]] = None,\n system_controls: Optional[pulumi.Input[Sequence[pulumi.Input['TaskDefinitionSystemControlArgs']]]] = None,\n ulimits: Optional[pulumi.Input[Sequence[pulumi.Input['TaskDefinitionUlimitArgs']]]] = None,\n user: Optional[pulumi.Input[str]] = None,\n volumes_from: Optional[pulumi.Input[Sequence[pulumi.Input['TaskDefinitionVolumeFromArgs']]]] = None,\n working_directory: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"image\", image)\n pulumi.set(__self__, \"name\", name)\n if command is not None:\n pulumi.set(__self__, \"command\", command)\n if cpu is not None:\n pulumi.set(__self__, \"cpu\", cpu)\n if depends_on is not None:\n pulumi.set(__self__, \"depends_on\", depends_on)\n if disable_networking is not None:\n pulumi.set(__self__, \"disable_networking\", disable_networking)\n if dns_search_domains is not None:\n pulumi.set(__self__, \"dns_search_domains\", dns_search_domains)\n if dns_servers is not None:\n pulumi.set(__self__, \"dns_servers\", dns_servers)\n if docker_labels is not None:\n pulumi.set(__self__, \"docker_labels\", docker_labels)\n if docker_security_options is not None:\n pulumi.set(__self__, \"docker_security_options\", docker_security_options)\n if entry_point is not None:\n pulumi.set(__self__, \"entry_point\", entry_point)\n if environment is not None:\n pulumi.set(__self__, \"environment\", environment)\n if environment_files is not None:\n pulumi.set(__self__, \"environment_files\", environment_files)\n if essential is not None:\n pulumi.set(__self__, \"essential\", essential)\n if extra_hosts is not None:\n pulumi.set(__self__, \"extra_hosts\", extra_hosts)\n if firelens_configuration is not None:\n pulumi.set(__self__, \"firelens_configuration\", firelens_configuration)\n if health_check is not None:\n pulumi.set(__self__, \"health_check\", health_check)\n if hostname is not None:\n pulumi.set(__self__, \"hostname\", hostname)\n if interactive is not None:\n pulumi.set(__self__, \"interactive\", interactive)\n if links is not None:\n pulumi.set(__self__, \"links\", links)\n if linux_parameters is not None:\n pulumi.set(__self__, \"linux_parameters\", linux_parameters)\n if log_configuration is not None:\n pulumi.set(__self__, \"log_configuration\", log_configuration)\n if memory is not None:\n pulumi.set(__self__, \"memory\", memory)\n if memory_reservation is not None:\n pulumi.set(__self__, \"memory_reservation\", memory_reservation)\n if mount_points is not None:\n pulumi.set(__self__, \"mount_points\", mount_points)\n if port_mappings is not None:\n pulumi.set(__self__, \"port_mappings\", port_mappings)\n if privileged is not None:\n pulumi.set(__self__, \"privileged\", privileged)\n if pseudo_terminal is not None:\n pulumi.set(__self__, \"pseudo_terminal\", pseudo_terminal)\n if readonly_root_filesystem is not None:\n pulumi.set(__self__, \"readonly_root_filesystem\", readonly_root_filesystem)\n if repository_credentials is not None:\n pulumi.set(__self__, \"repository_credentials\", repository_credentials)\n if resource_requirements is not None:\n pulumi.set(__self__, \"resource_requirements\", resource_requirements)\n if secrets is not None:\n pulumi.set(__self__, \"secrets\", secrets)\n if start_timeout is not None:\n pulumi.set(__self__, \"start_timeout\", start_timeout)\n if stop_timeout is not None:\n pulumi.set(__self__, \"stop_timeout\", stop_timeout)\n if system_controls is not None:\n pulumi.set(__self__, \"system_controls\", system_controls)\n if ulimits is not None:\n pulumi.set(__self__, \"ulimits\", ulimits)\n if user is not None:\n pulumi.set(__self__, \"user\", user)\n if volumes_from is not None:\n pulumi.set(__self__, \"volumes_from\", volumes_from)\n if working_directory is not None:\n pulumi.set(__self__, \"working_directory\", working_directory)", "title": "" }, { "docid": "30d61c6dc30b386935a6d7b50226d3df", "score": "0.65205675", "text": "def build():\n build_docker_images()\n build_network()\n compose_tools()", "title": "" }, { "docid": "445b8c66419ab2ab277cc37729a389c2", "score": "0.6517359", "text": "def docker_build(opts, config):\n tag = tag_name(config)\n latest = latest_tag_name(config)\n helper = BuildOptionHelper(opts, config)\n templ = helper['template']\n path = helper['directory']\n\n if helper['login']:\n check = _docker_login(helper)\n if not check:\n msg = \"Unable to perform docker login due to missing cirrus conf entries\"\n LOGGER.error(msg)\n sys.exit(1)\n if templ is not None:\n ds.run(\n input=templ,\n output=path,\n context=helper['context'],\n defaults=helper['defaults'],\n extend_context=config.configuration_map()\n )\n\n image = _docker_build(path, tag, tag_base(config))\n _docker_tag(image, tag, latest)", "title": "" }, { "docid": "2c5842299d3d6a14bcdabb53a4cae143", "score": "0.6499194", "text": "def build(aws_session, docker_client, image_name=None, tag=None):\n\n service_name = get_service_name()\n print('Looking for the location of the service \"' + service_name + '\" in the AUTOCOMPOSE_PATH...')\n autocompose_config_file = get_first_from_paths(os.path.join('services', service_name), AUTOCOMPOSE_SERVICE_FILE)\n autocompose_config = yaml.load(open(autocompose_config_file, 'r'))\n\n # Get the name of the image\n if AUTOCOMPOSE_IMAGE_KEY not in autocompose_config:\n raise Exception('No Autocompose image specified')\n image = autocompose_config[AUTOCOMPOSE_IMAGE_KEY]\n print('The service \"' + service_name + '\" wants to use the image \"' + image + '\".')\n\n # Find the directory where the image recipe resides\n print('Looking for the location of the image \"' + image + '\" in the AUTOCOMPOSE_PATH...')\n image_path = __get_image_path(image)\n if image_path is None:\n raise Exception('Could not find the image ' + image)\n print('Using the path \"' + image_path + '\"')\n\n # Copy files from the recipe to the current directory\n print('Copying files from \"' + image_path + '\" to your current directory...')\n copied_files = __copy_files(image_path)\n\n # If the Dockerfile.sh file exists, execute it\n if os.path.exists(DOCKERFILE_SH):\n print(DOCKERFILE_SH + ' exists. Executing...')\n try:\n subprocess.call(['bash', DOCKERFILE_SH])\n except BaseException as e:\n print(e)\n __fail(copied_files)\n raise Exception('An error occurred while executing Dockerfile.sh')\n print('Dockerfile.sh executed successfully.')\n\n # Execute 'docker build .'\n if image_name is None:\n image_name = service_name\n\n if tag is None:\n repo_tag = image_name\n else:\n repo_tag = image_name + ':' + tag\n\n print('Calling \"docker build .\" (and tagging image with \"' + repo_tag + '\")')\n try:\n __build_docker_image(docker_client, path='.', tag=repo_tag)\n except BaseException as e:\n print(e)\n __fail(copied_files)\n raise Exception('An error occurred when running \"docker build .\". Make sure the Dockerfile is correct.')\n\n print('Image built successfully.')\n\n # Cleanup copied files\n print('Cleaning up copied files...')\n __cleanup(copied_files)\n\n print('Tagging image with ECR repository...')\n tag_to_ecr(aws_session, docker_client, tag)\n print('Image tagged.')", "title": "" }, { "docid": "3aaf72b7c00eefb5b3e16230e1367250", "score": "0.64873606", "text": "def Run(self, args):\n docker_util.AddDockerTag(args)", "title": "" }, { "docid": "10feecd19dc1dc0723b5cb6721be5464", "score": "0.6476252", "text": "def __generate_docker__(self):\n self.__generate_environment__()\n logger.info(\"Generating docker files\")\n sensors = self.config.sensors\n data = {\n \"netflow_sensors\": sensors.netflow_sensors,\n \"sflow_sensors\": sensors.sflow_sensors,\n \"production\": not self.config.dev_queue,\n }\n\n output = os.path.join(self.config.deploy_path, \"docker-compose.yml\")\n self.__render_template__(\"docker/docker-compose.yml\", output, data)\n\n if self.config.dev_generate or self.config.dev_queue:\n self.__generate_docker__dev__(sensors)", "title": "" }, { "docid": "b74037cf1aa8e0d4cfc7baf458577c43", "score": "0.6468963", "text": "def main():\n opts = build_parser(sys.argv)\n\n if opts.command == 'init':\n init_package(opts)\n\n if opts.command == 'container-init':\n init_container(opts)\n\n if opts.command == 'project':\n build_project(opts)\n\n if opts.command == 'update':\n update_package(opts)", "title": "" }, { "docid": "7c350aa02d6753602fce0a709e7d4f2a", "score": "0.6398899", "text": "def main(self, expected_container_name, image, root_password):\n does_container_exist = self.does_container_exist_by_name(expected_container_name)\n if does_container_exist:\n print(\"Container '{c_name}' exists. Starting it.\".format(c_name=expected_container_name))\n self.start_my_container(expected_container_name)\n print(\"***************************************\")\n print(\"Started container '{}'\".format(expected_container_name))\n print(\"***************************************\")\n else:\n print(\"Container '{c_name}' does not exist. Running new container with name '{c_name}'\".format(c_name=expected_container_name))\n\n # check if the images is available on local drive. If not new image will be downloaded (takes time)\n exists = self.does_local_image_exist(image)\n if exists:\n print(\"Image '{}' exist on local drive. Starting a new container using the image.\".format(image))\n else:\n print(\"Image '{}' does not exist on local drive. Downloading.... will take few minutes ......\".format(image))\n\n # Specify volume mapping. Volumes are we keep persistent data and custom configs\n volumes = {os.path.join(os.getcwd(), \"mysql_volumes\", \"configurations\"): {'bind': '/etc/mysql/conf.d', 'mode': 'rw'},\n os.path.join(os.getcwd(), \"mysql_volumes\", \"data\"): {'bind': '/var/lib/mysql', 'mode': 'rw'}}\n environment = {\"MYSQL_ROOT_PASSWORD\": root_password}\n ports = {\"3306/tcp\": \"3308\"}\n\n # run the container\n container = self.client.containers.run(image,\n name=expected_container_name,\n environment=environment,\n ports=ports,\n volumes=volumes,\n detach=True,\n stdout=True)\n if self.docker_logs:\n time.sleep(4)\n logs = container.logs()\n logs_split = logs.split('\\n')\n for line in logs_split:\n print(line)\n\n # print useful message to the user\n print(\"**********************************************\")\n print(\"Created container\")\n print(\"Container name: '{}'\".format(container.name))\n print(\"Image used: '{}'\".format(image))\n print(\"Root password: '{}'\".format(root_password))\n print(\"Run command 'docker ps -a' to see all containers and statuses\")\n print(\"Run command 'docker ps' to see running containers and statuses\")\n print(\"**********************************************\")", "title": "" }, { "docid": "e52f98915bcd1fd48e4d55d28616644f", "score": "0.6388636", "text": "def application_Dockerfile(interpreter, version, os, repository, application_name, git_url, port = DEFAULT_PORT):\n return render_template__('app/Dockerfile.tpl', **locals())", "title": "" }, { "docid": "49f542c5cecbef5b5feadc257322a821", "score": "0.63176036", "text": "def start():\n local(\"docker-compose up --build\")", "title": "" }, { "docid": "ebbdeb7b22d5432e37cfa1ee7527478e", "score": "0.63120145", "text": "def build_docker_img(img_tag, filename, context):\n if context is None:\n context = '.'\n local(\"docker build -t %s -f %s --no-cache %s\" % (img_tag, filename, context))", "title": "" }, { "docid": "b56384d285cf4f9ec1104ae0d80f9437", "score": "0.62970835", "text": "def create_docker_file(self, image_type):\r\n\r\n new_docker_file = []\r\n if image_type == 'application':\r\n current_name = self.__application_parent\r\n current_items = self.__application_items\r\n current_directory = self.__application_directory\r\n new_docker_file.append(f'FROM {current_name}\\n')\r\n elif image_type == 'build':\r\n current_name = self.__build_parent\r\n current_items = self.__build_items\r\n current_directory = self.__build_directory\r\n new_docker_file.append(f'FROM {current_name}\\n')\r\n else:\r\n sys.exit(f'You must only choose application or build for image_type')\r\n\r\n selected_items_list = self.__input_to_list__(current_items)\r\n base_name = self.__get_base_name__(current_name)\r\n for selected_ in selected_items_list:\r\n working_directory = f'{os.getcwd()}{os.path.sep}{selected_}{os.path.sep}{base_name}'\r\n if self.__path_exists__(working_directory):\r\n working_directory_contents = self.__get_directory_contents__(working_directory)\r\n if working_directory_contents:\r\n found_docker = False\r\n for file_ in working_directory_contents:\r\n if 'Dockerfile' in file_:\r\n found_docker = True\r\n logging.warning(f'Adding contents to {current_directory}/Dockerfile from {file_}')\r\n with open(file_, 'r') as in_:\r\n in_lines = in_.readlines()\r\n if in_lines:\r\n new_docker_file.extend(self.__update_tokens__(in_lines))\r\n else:\r\n sys.exit(f'{file_} has no content.')\r\n else:\r\n shutil.copy(file_, f'{current_directory}{os.path.sep}')\r\n\r\n if found_docker:\r\n with open(f'{current_directory}{os.path.sep}Dockerfile', 'w') as out_:\r\n out_.writelines(new_docker_file)\r\n\r\n else:\r\n sys.exit(f'{working_directory} is missing Dockerfile')\r\n\r\n else:\r\n sys.exit(f'{working_directory} was empty')\r\n\r\n return new_docker_file", "title": "" }, { "docid": "be521d96bcecd05a2e7c1a985ba881d1", "score": "0.62483495", "text": "def build_docker(model_uri, name, install_mlflow, enable_mlserver):\n mlflow_home = os.environ.get(\"MLFLOW_HOME\", None)\n _get_flavor_backend(model_uri, docker_build=True).build_image(\n model_uri,\n name,\n mlflow_home=mlflow_home,\n install_mlflow=install_mlflow,\n enable_mlserver=enable_mlserver,\n )", "title": "" }, { "docid": "3c92d53e3a22ddeb4d37227904c830b5", "score": "0.6194577", "text": "def __build_docker_image(docker_client, path, tag):\n print_docker_output(docker_client.build(path=path, tag=tag, stream=True))", "title": "" }, { "docid": "8fc4aad45d2eb2f912b07b2871ef65df", "score": "0.6179726", "text": "def configure_docker_image(self, img=\"bft:node\"):\n # only provide absolute path for tar image\n # else will build image from default location\n if not self.validate_docker_image(img):\n # only build an image if path is provided from config\n if self.build_img_path:\n self.build_docker_image(img, self.build_img_path)\n assert self.validate_docker_image(\n img), \"Failed to build docker image: %s\" % img", "title": "" }, { "docid": "18e4144c35fb83056b84bc07e0104c74", "score": "0.61745954", "text": "def test_do_images(self, mock_msg, mock_utools, mock_localrepo):\n udocker.Msg = mock_msg\n udocker.conf = udocker.Config()\n mock_localrepo.return_value.cd_imagerepo.return_value = \\\n \"/home/user/.udocker/repos/X/latest\"\n mock_localrepo.return_value.get_imagerepos.return_value = [\n ('iscampos/openqcd', 'latest'), ('busybox', 'latest')]\n t_argv = ['./udocker.py', \"images\"]\n with mock.patch.object(sys, 'argv', t_argv):\n # Unprotected\n mock_localrepo.return_value.isprotected_imagerepo\\\n .return_value = False\n main = udocker.Main()\n main.execute()\n msg_out = (\"busybox:latest\"\n \" .\")\n find_str(self, msg_out, mock_msg.return_value.out.call_args)\n # Protected\n mock_localrepo.return_value.isprotected_imagerepo\\\n .return_value = True\n main.execute()\n msg_out = (\"busybox:latest\"\n \" P\")\n find_str(self, msg_out, mock_msg.return_value.out.call_args)\n t_argv = ['./udocker.py', \"images\", \"-l\"]\n with mock.patch.object(sys, 'argv', t_argv):\n main = udocker.Main()\n main.execute()\n msg_out = \" /home/user/.udocker/repos/X/latest\"\n find_str(self, msg_out, mock_msg.return_value.out.call_args)\n #\n mock_localrepo.return_value.get_imagerepos.return_value = [\n ('busybox', 'latest')]\n mock_localrepo.return_value.get_layers.return_value = [\n ('/home/jorge/.udocker/repos/busybox/latest/' +\n 'sha256:385e281300cc6d88bdd155e0931fbdfbb1801c2b' +\n '0265340a40481ee2b733ae66', 675992),\n ('/home/jorge/.udocker/repos/busybox/latest/' +\n '56ed16bd6310cca65920c653a9bb22de6b235990dcaa174' +\n '2ff839867aed730e5.layer', 675992),\n ('/home/jorge/.udocker/repos/busybox/latest/' +\n '56ed16bd6310cca65920c653a9bb22de6b235990dcaa174' +\n '2ff839867aed730e5.json', 1034),\n ('/home/jorge/.udocker/repos/busybox/latest/' +\n 'bc744c4ab376115cc45c610d53f529dd2d4249ae6b35e5d' +\n '6e7a96e58863545aa.json', 1155),\n ('/home/jorge/.udocker/repos/busybox/latest/' +\n 'bc744c4ab376115cc45c610d53f529dd2d4249ae6b35e5d' +\n '6e7a96e58863545aa.layer', 32),\n ('/home/jorge/.udocker/repos/busybox/latest/' +\n 'sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633c' +\n 'b16422d00e8a7c22955b46d4', 32)]\n main.execute()\n msg_out = ' /home/jorge/.udocker/repos/busybox/latest/' +\\\n 'sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16' +\\\n '422d00e8a7c22955b46d4 ('\n find_str(self, msg_out, mock_msg.return_value.out.call_args)", "title": "" }, { "docid": "b0aee047ac5e689953aa551c44a0b4dd", "score": "0.6158605", "text": "def run(self):\n # Docker run command with 'interactive' and 'tag' flags\n cmd = 'docker run -i -t'\n\n # Confirm both host_port and container_port are integers\n if all(port != '' and isinstance(int(port), int) for port in (self.host_port, self.container_port)):\n cmd += ' -p {host}:{container}'.format(host=self.host_port, container=self.container_port)\n return cmd + ' {image}'.format(image=self.docker_image)", "title": "" }, { "docid": "4395b2556c8dc296dfc0230249673275", "score": "0.6156641", "text": "def build(**options):\n rebuild = tobool(options.get(\"rebuild\", False))\n version = options.get(\"version\", \"latest\")\n\n tag = f\"{TAG:s}:{version:s}\"\n args = [\"docker\", \"build\", \"-t\", tag, \".\"]\n\n if rebuild:\n args.insert(-1, \"--no-cache\")\n\n with msg(\"Building Image\"):\n local(\" \".join(args))", "title": "" }, { "docid": "1f8543927f25ca2f7cbaa154467273c1", "score": "0.6149534", "text": "def modify_docker_image_metadata(image_path: str,\n output_docker_image: str):\n\n # 1 - Get layers info\n log.debug(\" > Opening docker file\")\n with open_docker_image(image_path) as (\n img, top_layer, _, manifest):\n\n # 2 - Get the last layer in manifest\n old_layer_digest = get_last_image_layer(manifest)\n log.debug(\" > Last layer: {}\".format(old_layer_digest))\n\n with extract_layer_in_tmp_dir(img, old_layer_digest) as d:\n\n # Start trojanizing\n log.info(\" > Starting trojaning process\")\n\n new_layer_path, new_layer_digest = \\\n build_image_layer_from_dir(\"new_layer.tar\", d)\n\n # 5 - Updating the manifest\n new_manifest = build_manifest_with_new_layer(manifest,\n old_layer_digest,\n new_layer_digest)\n\n # Add new enviroment vars with LD_PRELOAD AND REMOTE ADDR\n json_info_last_layer = read_file_from_image(img,\n \"{}/json\".format(\n old_layer_digest))\n\n json_info_last_layer = json.loads(json_info_last_layer.decode())\n\n _, json_info_root_layer = get_root_json_from_image(img)\n\n new_json_data_last_layer, new_json_info_root_layer = None, None\n\n try:\n yield json_info_last_layer, json_info_root_layer\n except Exception as e:\n if e.__class__.__name__ == \"DockerscanReturnContextManager\":\n new_json_data_last_layer, new_json_info_root_layer = e.args\n\n if new_json_data_last_layer is None:\n return\n\n # 6 - Create new docker image\n log.info(\" > Creating new docker image\")\n create_new_docker_image(new_manifest,\n output_docker_image,\n img,\n old_layer_digest,\n new_layer_path,\n new_layer_digest,\n new_json_data_last_layer,\n new_json_info_root_layer)", "title": "" }, { "docid": "58826ca1518e0494f3ca46264560a6ca", "score": "0.6145811", "text": "def create_multi_docker_file(self):\r\n new_docker_file = []\r\n current_directory = self.__multi_build_folder\r\n code_items = self.__input_to_list__(self.__build_items)\r\n app_items = self.__input_to_list__(self.__application_items)\r\n\r\n if self.__build_parent == self.__application_parent:\r\n base_name = self.__get_base_name__(self.__build_parent)\r\n\r\n if code_items:\r\n new_docker_file.append(f'FROM {self.__build_parent} as B\\n')\r\n for ci in code_items:\r\n working_directory = f'{os.getcwd()}{os.path.sep}{ci}{os.path.sep}{base_name}'\r\n if self.__path_exists__(working_directory):\r\n working_directory_contents = self.__get_directory_contents__(working_directory)\r\n if working_directory_contents:\r\n found_docker = False\r\n for file_ in working_directory_contents:\r\n if 'Dockerfile' in file_:\r\n found_docker = True\r\n logging.warning(f'Adding contents to {current_directory}/Dockerfile from {file_}')\r\n with open(file_, 'r') as in_:\r\n in_lines = in_.readlines()\r\n if in_lines:\r\n new_docker_file.extend(self.__update_tokens__(in_lines))\r\n else:\r\n sys.exit(f'{file_} has no content.')\r\n else:\r\n shutil.copy(file_, f'{current_directory}{os.path.sep}')\r\n\r\n if not found_docker:\r\n sys.exit(f'{working_directory} is missing Dockerfile')\r\n\r\n else:\r\n sys.exit(f'{working_directory} was empty')\r\n\r\n new_docker_file.append('\\n')\r\n\r\n if app_items:\r\n new_docker_file.append(f'FROM {self.__application_parent} as A\\n')\r\n for ai in app_items:\r\n working_directory = f'{os.getcwd()}{os.path.sep}{ai}{os.path.sep}{base_name}'\r\n if self.__path_exists__(working_directory):\r\n working_directory_contents = self.__get_directory_contents__(working_directory)\r\n if working_directory_contents:\r\n found_docker = False\r\n for file_ in working_directory_contents:\r\n if 'Dockerfile' in file_:\r\n found_docker = True\r\n logging.warning(f'Adding contents to {current_directory}/Dockerfile from {file_}')\r\n with open(file_, 'r') as in_:\r\n in_lines = in_.readlines()\r\n if in_lines:\r\n new_docker_file.extend(self.__update_tokens__(in_lines))\r\n else:\r\n sys.exit(f'{file_} has no content.')\r\n else:\r\n shutil.copy(file_, f'{current_directory}{os.path.sep}')\r\n\r\n if not found_docker:\r\n sys.exit(f'{working_directory} is missing Dockerfile')\r\n\r\n else:\r\n sys.exit(f'{working_directory} was empty')\r\n\r\n new_docker_file.append('\\n')\r\n else:\r\n sys.exit(f'You cannot build code with {self.__build_parent} '\r\n f'and build application with {self.__application_parent} '\r\n f'they have to be the same.')\r\n\r\n if new_docker_file:\r\n new_docker_file.append(f'COPY --from=B '\r\n f'/code/{self.__build_artifacts}/ /opt/code/ \\n')\r\n with open(f'{current_directory}/Dockerfile', 'w') as out_:\r\n out_.writelines(new_docker_file)\r\n\r\n return new_docker_file", "title": "" }, { "docid": "13e5d367f87137761fe860c3db0443b8", "score": "0.61452836", "text": "def build_base_image(\n self,\n directory: str,\n language_name: str,\n language_version: str,\n benchmark: str,\n is_cached: bool,\n ) -> bool:\n\n # We need to retag created images when pushing to registry other\n # than default\n registry_name = self.config.resources.docker_registry\n repository_name = self.system_config.docker_repository()\n image_tag = self.system_config.benchmark_image_tag(\n self.name(), benchmark, language_name, language_version\n )\n if registry_name is not None:\n repository_name = f\"{registry_name}/{repository_name}\"\n else:\n registry_name = \"Docker Hub\"\n\n # Check if we the image is already in the registry.\n if not is_cached:\n if self.find_image(repository_name, image_tag):\n self.logging.info(\n f\"Skipping building OpenWhisk Docker package for {benchmark}, using \"\n f\"Docker image {repository_name}:{image_tag} from registry: \"\n f\"{registry_name}.\"\n )\n return False\n else:\n # image doesn't exist, let's continue\n self.logging.info(\n f\"Image {repository_name}:{image_tag} doesn't exist in the registry, \"\n f\"building OpenWhisk package for {benchmark}.\"\n )\n\n build_dir = os.path.join(directory, \"docker\")\n os.makedirs(build_dir, exist_ok=True)\n shutil.copy(\n os.path.join(DOCKER_DIR, self.name(), language_name, \"Dockerfile.function\"),\n os.path.join(build_dir, \"Dockerfile\"),\n )\n\n for fn in os.listdir(directory):\n if fn not in (\"index.js\", \"__main__.py\"):\n file = os.path.join(directory, fn)\n shutil.move(file, build_dir)\n\n with open(os.path.join(build_dir, \".dockerignore\"), \"w\") as f:\n f.write(\"Dockerfile\")\n\n builder_image = self.system_config.benchmark_base_images(self.name(), language_name)[\n language_version\n ]\n self.logging.info(f\"Build the benchmark base image {repository_name}:{image_tag}.\")\n\n buildargs = {\"VERSION\": language_version, \"BASE_IMAGE\": builder_image}\n image, _ = self.docker_client.images.build(\n tag=f\"{repository_name}:{image_tag}\", path=build_dir, buildargs=buildargs\n )\n\n # Now push the image to the registry\n # image will be located in a private repository\n self.logging.info(\n f\"Push the benchmark base image {repository_name}:{image_tag} \"\n f\"to registry: {registry_name}.\"\n )\n ret = self.docker_client.images.push(\n repository=repository_name, tag=image_tag, stream=True, decode=True\n )\n # doesn't raise an exception for some reason\n for val in ret:\n if \"error\" in val:\n self.logging.error(f\"Failed to push the image to registry {registry_name}\")\n raise RuntimeError(val)\n return True", "title": "" }, { "docid": "33af421e378ebd638d41ef98e2d17995", "score": "0.6128888", "text": "def build_container(self, repo_urls, push_to_list, scratch=False):\n\n action = \"build\"\n record = {\n \"dir\": self.distgit_dir,\n \"dockerfile\": \"%s/Dockerfile\" % self.distgit_dir,\n \"image\": self.org_image_name,\n \"version\": self.org_version,\n \"release\": self.org_release,\n \"message\": \"Unknown failure\",\n \"status\": -1,\n # Status defaults to failure until explicitly set by succcess. This handles raised exceptions.\n }\n\n target_image = \"%s:%s-%s\" % (self.org_image_name, self.org_version, self.org_release)\n\n try:\n\n # If this image is FROM another group member, we need to wait on that group member\n if self.config[\"from\"].member is not Missing:\n parent_name = self.config[\"from\"].member\n parent_img = self.runtime.resolve_image(parent_name, False)\n if parent_img is None:\n self.info(\"Skipping parent image build since it is not included: %s\" % parent_name)\n else:\n parent_dgr = parent_img.distgit_repo()\n parent_dgr.wait_for_build(self.metadata.qualified_name)\n\n self.info(\"Building image: %s\" % target_image)\n\n cmd_list = [\"rhpkg\", \"--path=%s\" % self.distgit_dir]\n\n if self.runtime.user is not None:\n cmd_list.append(\"--user=%s\" % self.runtime.user)\n\n cmd_list.append(\"container-build\")\n\n cmd_list.append(\"--nowait\")\n\n if scratch:\n cmd_list.append(\"--scratch\")\n\n if len(repo_urls) > 0:\n cmd_list.append(\"--repo\")\n for repo_url in repo_urls:\n cmd_list.append(repo_url)\n\n # Run the build with --nowait so that we can immdiately get information about the brew task\n rc, out, err = gather_exec(self.runtime, cmd_list)\n\n if rc != 0:\n # Probably no point in continuing.. can't contact brew?\n raise IOError(\"Unable to create brew task: out=%s ; err=%s\" % (out, err))\n\n # Otherwise, we should have a brew task we can monitor listed in the stdout.\n out_lines = out.splitlines()\n\n # Look for a line like: \"Created task: 13949050\" . Extract the identifier.\n task_id = next((created_line.split(\":\")[1]).strip() for created_line in out_lines if\n created_line.startswith(\"Created task:\"))\n\n record[\"task_id\"] = task_id\n\n # Look for a line like: \"Task info: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=13948942\"\n task_url = next((info_line.split(\":\", 1)[1]).strip() for info_line in out_lines if\n info_line.startswith(\"Task info:\"))\n\n record[\"task_url\"] = task_url\n\n # Now that we have the basics about the task, wait for it to complete\n rc, out, err = gather_exec(self.runtime, [\"brew\", \"watch-task\", task_id])\n\n # Looking for somethine like the following to conclude the image has already been built:\n # \"13949407 buildContainer (noarch): FAILED: BuildError: Build for openshift-enterprise-base-docker-v3.7.0-0.117.0.0 already exists, id 588961\"\n if \"already exists\" in out:\n self.info(\"Image already built for: %s\" % target_image)\n rc = 0\n\n if rc != 0:\n # An error occurred during watch-task. We don't have a viable build.\n raise IOError(\"Error building image: out=%s ; err=%s\" % (out, err))\n\n self.info(\"Successfully built image: %s ; %s\" % (target_image, task_url))\n record[\"message\"] = \"Success\"\n record[\"status\"] = 0\n self.build_status = True\n\n if scratch:\n # If this is a scratch build, we aren't going to be pushing. We might be able to determine the\n # image name by parsing the build log, but not worth the effort until we need scratch builds.\n # The image name for a scratch build looks something like:\n # brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/openshift3/ose-base:rhaos-3.7-rhel-7-docker-candidate-16066-20170829214444\n return True\n\n except Exception as err:\n record[\"message\"] = \"Exception occurred: %s\" % str(err)\n self.info(\"Exception occurred during build: %s\" % str(err))\n # This is designed to fall through to finally. Since this method is designed to be\n # threaded, we should throw and exception; instead return False.\n finally:\n self.runtime.add_record(action, **record)\n # Regardless of success, allow other images depending on this one to progress or fail.\n self.build_lock.release()\n\n if self.build_status:\n if len(push_to_list) > 0:\n # To ensure we don't overwhelm the system building, pull & push synchronously\n with self.runtime.mutex:\n try:\n self.push_image(push_to_list)\n except Exception as push_e:\n self.info(\"Error during push after successful build: %s\" % str(push_e))\n return False\n\n return self.build_status", "title": "" }, { "docid": "0ad3dc79dacc748a2d224dd6b58c73b7", "score": "0.6124601", "text": "def test_correct_image_used(self):\n name = random_name()\n d = self.start_container(name)\n\n def started(_):\n data = subprocess.check_output(\n [b\"docker\", b\"inspect\", name.encode(\"ascii\")])\n self.assertEqual(json.loads(data)[0][u\"Config\"][u\"Image\"],\n u\"openshift/busybox-http-app\")\n d.addCallback(started)\n return d", "title": "" }, { "docid": "53ad8816cd9ada1aab67bca6d4238866", "score": "0.6119147", "text": "def docker_build(image):\n command = [\n 'docker', 'build', '-t', f'{BASE_CIFUZZ_DOCKER_TAG}/{image}', '--file',\n f'{image}.Dockerfile', '.'\n ]\n subprocess.run(command, check=True, cwd=INFRA_DIR)", "title": "" }, { "docid": "9c66390607b85eaa48286d80ef95b4ab", "score": "0.6113113", "text": "def run(self, *args, **kwargs):\n if not args:\n return ['Image name is required.']\n\n if kwargs['remove'] and kwargs['detach']:\n return ['Use either --rm or --detach.']\n\n # Always call external cli for this, rather than figuring out\n # why docker-py throws \"jack is incompatible with use of CloseNotifier in same ServeHTTP call\"\n kwargs['force'] = True\n called, args, kwargs = self.call_external_cli('run', *args, **kwargs)\n if not called:\n kwargs['image'] = args[0]\n kwargs['command'] = args[1:] if len(args) > 1 else []\n\n kwargs = self._add_port_bindings(kwargs)\n kwargs = self._add_exposed_ports(kwargs)\n kwargs = self._add_link_bindings(kwargs)\n kwargs = self._add_volumes_from(kwargs)\n kwargs = self._add_volumes(kwargs)\n kwargs = self._add_network_mode(kwargs)\n\n create_args = allowed_args('create', **kwargs)\n result = self.instance.create_container(**create_args)\n\n if result:\n if \"Warnings\" in result and result['Warnings']:\n return [result['Warnings']]\n if \"Id\" in result and result['Id']:\n self.is_refresh_containers = True\n is_attach = 'detach' not in kwargs or not kwargs['detach']\n start_args = allowed_args('start', **kwargs)\n start_args.update({\n 'container': result['Id'],\n 'attach': is_attach\n })\n return self.start(**start_args)\n return ['There was a problem running the container.']", "title": "" }, { "docid": "7a8d1a5955e24b18eda5acdebdf9417b", "score": "0.6109772", "text": "def build_docker_image(self, image_type):\r\n\r\n if image_type == 'application':\r\n current_directory = self.__application_directory\r\n image_tag = f'{self.__repository_uri}:{self.__application_name}-{self.__application_version_next}'\r\n elif image_type == 'build':\r\n current_directory = self.__build_directory\r\n image_tag = f'{self.__application_name}:{self.__application_version_next}'\r\n else:\r\n sys.exit(f'You must only choose application or build for image_type')\r\n\r\n logging.warning(f'Building {current_directory}/Dockerfile with tag {image_tag}')\r\n results = self.__docker_env.images.build(\r\n path=current_directory,\r\n tag=image_tag,\r\n quiet=False,\r\n labels={'Application': self.__application_name,\r\n 'ApplicationVersion': f'{round(self.__application_version_next, 4)}'}\r\n )\r\n logging.warning(f'Completed build of id {results[0].id}')\r\n return image_tag", "title": "" }, { "docid": "6f5843aef072aa8331c959322784f77d", "score": "0.61082304", "text": "def build_image(client: docker.Client, path='./remote-docker', name='noodles-remote'):\n assert os.path.exists(path + '/Dockerfile')\n time = os.stat(path + '/Dockerfile').st_mtime\n\n il = client.images(name=name)\n if len(il) == 0 or il[0]['Created'] < time:\n response = client.build(path, tag=name, rm=True)\n for json_bytes in response:\n line = json.loads(json_bytes.decode())['stream']\n print(line, end='', file=sys.stderr, flush=True)", "title": "" }, { "docid": "d3e6303ba02d215ff2df161c1f2d6602", "score": "0.6084334", "text": "def run(args):\n implementation.docker.check()\n\n with implementation.general.message(\"deployment.\"):\n image = implementation.build.get_image(args)\n if not args.no_run:\n implementation.build.get_package(args, image)", "title": "" }, { "docid": "d912233f44bb21db6d97a0dde8d55871", "score": "0.60573405", "text": "def build_multi_docker_image(self):\r\n current_directory = self.__multi_build_folder\r\n image_tag = f'{self.__repository_uri}:{self.__application_name}-{self.__application_version_next}'\r\n logging.warning(f'Building {current_directory}/Dockerfile with tag {image_tag}')\r\n try:\r\n results = self.__docker_env.images.build(\r\n path=current_directory,\r\n tag=image_tag,\r\n quiet=False,\r\n labels={'Application': self.__application_name,\r\n 'ApplicationVersion': f'{round(self.__application_version_next, 4)}'},\r\n timeout=120,\r\n network_mode=self.__network.name,\r\n )\r\n logging.warning(f'Completed build of id {results[0].id}')\r\n except BuildError as be:\r\n bi = iter(be.build_log)\r\n while True:\r\n try:\r\n i = next(bi)\r\n logging.warning(i)\r\n except StopIteration:\r\n break\r\n sys.exit(bi)\r\n\r\n return image_tag", "title": "" }, { "docid": "10d58d933ac6bc67761f40fe27ddefa9", "score": "0.6055713", "text": "def build_images(ctx, context=None) -> List[str]:\n utils.ensure_context(ctx, context)\n\n images = []\n\n for docker_path in ALL_BUILDS:\n\n docker_path = Path(docker_path)\n\n for docker_file in docker_path.rglob('Dockerfile'):\n # The path to the Dockerfile.\n path = docker_file.parent\n print(f'\\nBuilding docker image in {path}.')\n\n # Get the parent folder name.\n image_name = f'{NAMESPACE}-{docker_file.parts[-2]}'\n ctx.run(f'docker build -t {image_name} {path}/')\n\n images.append(image_name)\n\n return images", "title": "" }, { "docid": "3555d899f1e2e4ccda1155ff41343789", "score": "0.60509145", "text": "def main(create_config, run, verbose):\n if verbose:\n logging.basicConfig(\n format='%(levelname)s %(filename)s: %(message)s',\n level=logging.DEBUG\n )\n else:\n # Log info and above to console\n logging.basicConfig(\n format='%(levelname)s: %(message)s',\n level=logging.INFO\n )\n\n try:\n if create_config:\n create_config_file()\n elif run:\n execute_container()\n except Exception, e:\n click.echo(click.style(e, fg='red'))\n sys.exit(1)", "title": "" }, { "docid": "0d59874160b89c018f3d82f5e3277490", "score": "0.60507786", "text": "def test_docker_executor():\n\n executor_config = {\n \"execution\": {\n \"docker\": {\n \"config\": {\n \"networks\": [\"container:test-postgres-db-docker\"],\n \"env_vars\": [\n \"AWS_ACCESS_KEY_ID\",\n \"AWS_SECRET_ACCESS_KEY\",\n ],\n }\n }\n }\n }\n\n docker_image = get_test_project_docker_image()\n if IS_BUILDKITE:\n executor_config[\"execution\"][\"docker\"][\"config\"][\n \"registry\"\n ] = get_buildkite_registry_config()\n else:\n find_local_test_image(docker_image)\n\n run_config = merge_dicts(\n merge_yamls(\n [\n os.path.join(get_test_project_environments_path(), \"env.yaml\"),\n os.path.join(get_test_project_environments_path(), \"env_s3.yaml\"),\n ]\n ),\n executor_config,\n )\n\n with environ({\"DOCKER_LAUNCHER_NETWORK\": \"container:test-postgres-db-docker\"}):\n with docker_postgres_instance() as instance:\n recon_pipeline = get_test_project_recon_pipeline(\"demo_pipeline_docker\", docker_image)\n assert execute_pipeline(\n recon_pipeline, run_config=run_config, instance=instance\n ).success", "title": "" }, { "docid": "f6367fb0d1d8257d37dc8bc776c17b0e", "score": "0.60448277", "text": "def train():\n system(\"docker-compose run --rm foresee\")", "title": "" }, { "docid": "79583c1fe5bbbf1cb8645d71d8419612", "score": "0.6044711", "text": "def _docker_build(path, tag, base_tag):\n command = ['docker', 'build', '-t', tag, path]\n LOGGER.info(\"Executing docker build command: {}\".format(' '.join(command)))\n try:\n stdout = subprocess.check_output(command)\n except subprocess.CalledProcessError as ex:\n LOGGER.error(ex.output)\n raise\n LOGGER.info(stdout)\n image = find_image_id(base_tag)\n LOGGER.info(\"Image ID: {}\".format(image))\n return image", "title": "" }, { "docid": "caf3745795a7c15da312d44d6e53f69a", "score": "0.6042599", "text": "def package(args, config):\n release = generate_release(config['git'])\n print '===> Build package, release: {0}'.format(release)\n\n # prepare actions\n if args.remove:\n clear(args, config)\n\n create_tmpdir()\n if config['prepare']:\n print '===> Run prepare script: {0}'.format(config['prepare'])\n subprocess.check_call(config['prepare'], shell=True)\n\n options = {\n 'release': release,\n 'target': config['target'],\n 'image': config['image'],\n }\n docker_cmd = 'docker run --rm -v $(pwd)/build-env/:/home/builder/build \\\n -e RELEASE={release} -e TARGET={target} \\\n {image}'.format(**options)\n\n print '===> Run container: {0}'.format(' '.join(docker_cmd.split()))\n subprocess.check_call(docker_cmd, shell=True)", "title": "" }, { "docid": "6ee1c3de23c13207dd7af2bb6912fa60", "score": "0.6036361", "text": "def images_foreach(runtime, cmd, message, push):\n runtime.initialize(clone_distgits=True)\n\n # If not pushing, do not clean up our work\n runtime.remove_tmp_working_dir = push\n\n cmd_str = \" \".join(cmd)\n\n for image in runtime.image_metas():\n dgr = image.distgit_repo()\n with Dir(dgr.distgit_dir):\n runtime.logger.info(\"Executing in %s: [%s]\" % (dgr.distgit_dir, cmd_str))\n\n dfp = DockerfileParser()\n dfp.content = image.fetch_cgit_file(\"Dockerfile\")\n\n if subprocess.call(cmd_str,\n shell=True,\n env={\"oit_repo_name\": image.name,\n \"oit_repo_namespace\": image.namespace,\n \"oit_image_name\": dfp.labels[\"name\"],\n \"oit_image_version\": dfp.labels[\"version\"],\n \"oit_group\": runtime.group,\n \"oit_metadata_dir\": runtime.metadata_dir,\n \"oit_working_dir\": runtime.working_dir,\n \"oit_config_filename\": image.config_filename,\n \"oit_distgit_key\": image.distgit_key,\n }) != 0:\n raise IOError(\"Command return non-zero status\")\n runtime.logger.info(\"\\n\")\n\n if message is not None:\n dgr.commit(message)\n\n if push:\n runtime.push_distgits()", "title": "" }, { "docid": "3abdf6d6f28bf40226c492f07f365d2a", "score": "0.6029921", "text": "def images_build_image(runtime, odcs, repo_type, repo, push_to_defaults, push_to, scratch):\n # Initialize all distgit directories before trying to build. This is to\n # ensure all build locks are acquired before the builds start and for\n # clarity in the logs.\n runtime.initialize(clone_distgits=True)\n\n items = [m.distgit_repo() for m in runtime.image_metas()]\n if not items:\n runtime.logger.info(\"No images found. Check the arguments.\")\n exit(1)\n\n # Without one of these two arguments, brew would not enable any repos.\n if not repo_type and not repo:\n runtime.logger.info(\"No repos specified. --repo-type or --repo is required.\")\n exit(1)\n\n results = runtime.parallel_exec(\n lambda (dgr, terminate_event): dgr.build_container(\n odcs, repo_type, repo, push_to_defaults, additional_registries=push_to,\n terminate_event=terminate_event, scratch=scratch),\n items)\n results = results.get()\n\n try:\n print_build_metrics(runtime)\n except:\n # Never kill a build because of bad logic in metrics\n traceback.print_exc()\n runtime.logger.error(\"Error trying to show build metrics\")\n\n failed = [m.distgit_key for m, r in zip(runtime.image_metas(), results) if not r]\n if failed:\n runtime.logger.error(\"\\n\".join([\"Build/push failures:\"] + sorted(failed)))\n exit(1)\n\n # Push all late images\n for image in runtime.image_metas():\n image.distgit_repo().push_image([], push_to_defaults, additional_registries=push_to, push_late=True)", "title": "" }, { "docid": "ec57affe1f7d47d7d20d819ea825e403", "score": "0.60257953", "text": "def build_docker_image(self, img=\"bft:node\", path=None):\n if path:\n if any(list(map(lambda x: x in path, [\"http://\", \"https://\"]))):\n self.sendline(\"curl -O %s\" % path)\n self.expect_prompt(timeout=120)\n path = path.split('/')[-1]\n self.sendline(\"docker image load --input %s\" % path)\n else:\n # try to build an image, for first time\n path = os.path.join(\n os.path.dirname(pkgutil.get_loader(\"boardfarm\").path),\n \"bft-node\")\n self.sendline(\"docker build -t %s %s\" % (img, path))\n # will give it a good 10 mins to build image.\n self.expect_prompt(timeout=600)", "title": "" }, { "docid": "936e80a4218ada070dc180ce2d2dd7e2", "score": "0.60196286", "text": "def step_run_container(context, container_name, image_name):\n context.cli.sendline('run --name {0} {1}'.format(\n container_name,\n image_name))\n context.has_containers = True", "title": "" }, { "docid": "dd8775c50b40960c32477f57795b211e", "score": "0.60055095", "text": "def bup(ctx):\n ctx.run(\"docker build -t {}:latest . && docker-compose up -d\".format(CONTAINER_NAME))", "title": "" }, { "docid": "0bc4ff343fb90f7df7bda690a36428da", "score": "0.6005021", "text": "def docker_compose(context, command, **kwargs):\n print(f'Running docker-compose command \"{command}\"')\n return context.run(f\"{COMPOSE_COMMAND} {command}\", env={\"PYTHON_VER\": PYTHON_VER}, **kwargs)", "title": "" }, { "docid": "e6bc4339e8be49906c5b2e9227090661", "score": "0.6004799", "text": "def _run_docker(self,\n docker_stage_slug,\n docker_command,\n on_line=None,\n on_done=None):\n def runnable(handle):\n \"\"\"\n Perform the Docker command given\n \"\"\"\n output = docker_command()\n\n line = ''\n for line in output:\n if isinstance(line, bytes):\n handle.write(line)\n else:\n handle.write(line.encode())\n\n handle.flush()\n\n if on_line:\n on_line(line)\n\n if on_done:\n return on_done(line)\n\n elif line:\n return True\n\n return False\n\n return self._stage('docker_%s' % docker_stage_slug,\n runnable=runnable).returncode", "title": "" }, { "docid": "067d4f403b4641f0e6c4b159d89e1ef7", "score": "0.59859544", "text": "def build_docker_compose (shell, localshell, target_host):\n\n if PROXY == True :\n result = shell.run([\"docker-compose\", \"--file\", \"docker-elk/docker-compose_proxy_settings.yml\", \"up\", \"-d\", \"--build\"])\n \n else :\n result = shell.run([\"docker-compose\", \"--file\", \"docker-elk/docker-compose.yml\", \"up\", \"-d\", \"--build\"])", "title": "" }, { "docid": "e0812c9f788931921f7779677ed76714", "score": "0.59617734", "text": "def od_pipeline():\n\n mount = '/mnt'\n\n vop = dsl.VolumeOp(\n name=\"create_pvc\",\n resource_name=\"my-pvc\",\n size=\"2Gi\",\n modes=dsl.VOLUME_MODE_RWM\n )\n\n def data_collection():\n return dsl.ContainerOp(\n name='Data collection',\n image='jameswong/data-collector:next',\n command=['sh', '-c'],\n arguments=['git show --summary && ls -l $0 && cp /app/*.json $0 && touch /app/results.txt && ls -l $0 ', mount],\n file_outputs={\n 'data': '/app/results.txt',\n },\n pvolumes={mount: vop.volume}\n )\n\n def detection(input):\n return dsl.ContainerOp(\n name=\"Outlier Detection\",\n image='jameswong/outlier-detection:next7',\n command=[\"sh\", \"-c\"],\n arguments=[\n \"git show --summary && ls -al $0 && python start.py --input_path $0/$1 --output_path $0 --job_id 1 && ls -al $0\",\n mount,\n '1212729.json', # will be parameterized\n ],\n pvolumes={mount: vop.volume}\n )\n \n collect_task = data_collection().set_image_pull_policy('Always')\n detection_task = detection(collect_task.output).set_image_pull_policy('Always').after(collect_task)", "title": "" }, { "docid": "6da3813f7c16185c5cc9994927eddcb0", "score": "0.59473616", "text": "def images_print(runtime, short, show_non_release, show_base_only, pattern):\n\n runtime.initialize(clone_distgits=False)\n\n # If user omitted braces, add them.\n if \"{\" not in pattern:\n pattern = \"{%s}\" % pattern.strip()\n\n count = 0\n if short:\n echo_verbose = lambda _: None\n else:\n echo_verbose = click.echo\n\n echo_verbose(\"\")\n echo_verbose(\"------------------------------------------\")\n\n non_release_images = runtime.group_config.non_release.images\n if non_release_images is Missing:\n non_release_images = []\n\n if not show_non_release:\n images = [i for i in runtime.image_metas() if i.distgit_key not in non_release_images]\n else:\n images = list(runtime.image_metas())\n\n for image in images:\n click.echo(image.in_group_config_path)\n count += 1\n continue\n\n # skip base images unless requested\n if image.base_only and not show_base_only:\n continue\n\n dfp = DockerfileParser(path=runtime.working_dir)\n try:\n dfp.content = image.fetch_cgit_file(\"Dockerfile\")\n except Exception:\n click.echo(\"Error reading Dockerfile from distgit: {}\".format(image.distgit_key))\n raise\n\n version = dfp.labels[\"version\"]\n\n s = pattern\n s = s.replace(\"{build}\", \"{component}-{version}-{release}\")\n s = s.replace(\"{repository}\", \"{image}:{version}-{release}\")\n s = s.replace(\"{namespace}\", image.namespace)\n s = s.replace(\"{name}\", image.name)\n s = s.replace(\"{component}\", image.get_component_name())\n s = s.replace(\"{image}\", dfp.labels[\"name\"])\n s = s.replace(\"{version}\", version)\n s = s.replace(\"{lf}\", \"\\n\")\n\n release_query_needed = '{release}' in s or '{pushes}' in s\n\n # Since querying release takes time, check before executing replace\n release = ''\n if release_query_needed:\n _, _, release = image.get_latest_build_info()\n\n s = s.replace(\"{release}\", release)\n\n pushes_formatted = ''\n for push_name in image.get_default_push_names():\n pushes_formatted += '\\t{} : [{}]\\n'.format(push_name, ', '.join(image.get_default_push_tags(version, release)))\n\n if pushes_formatted is '':\n pushes_formatted = \"(None)\"\n\n s = s.replace(\"{pushes}\", '{}\\n'.format(pushes_formatted))\n\n if \"{\" in s:\n raise IOError(\"Unrecognized fields remaining in pattern: %s\" % s)\n\n click.echo(s)\n count += 1\n\n echo_verbose(\"------------------------------------------\")\n echo_verbose(\"{} images\".format(count))\n\n # If non-release images are being suppressed, let the user know\n if not show_non_release and non_release_images:\n echo_verbose(\"\\nThe following {} non-release images were excluded; use --show-non-release to include them:\".format(\n len(non_release_images)))\n for image in non_release_images:\n echo_verbose(\" {}\".format(image))", "title": "" }, { "docid": "8c3dce290885be7d2e504c1eb07cecac", "score": "0.59440666", "text": "def run_in_docker():\n parser = ArgumentParser()\n parser.add_argument('command', type=str, nargs='+', help='command to run in docker')\n parser.add_argument('--name', type=str, default=None)\n parser.add_argument('--gpus', type=str, default='')\n args = vars(parser.parse_args())\n if len(args['command']) == 1:\n args['command'] = args['command'][0].split(' ')\n\n runner = DockerRunner()\n gpus = os.getenv('CUDA_VISIBLE_DEVICES', \"\")\n name = os.getenv('NAME', \"\")\n name = runner.run(args['command'], gpus, name)\n subprocess.call(['docker', 'logs', '-f', name])", "title": "" }, { "docid": "3f68f5fe823b897d9fdfbd04c6c84061", "score": "0.5932818", "text": "def docker_snippet(model_name, source=\"kipoi\"):\n src = kipoi.get_source(source)\n docker_container_json = os.path.join(src.local_path, CONTAINER_PREFIX, \"model-to-docker.json\")\n with open(docker_container_json, 'r') as docker_container_json_filehandle:\n model_group_to_image_dict = json.load(docker_container_json_filehandle)\n \n try:\n kw = get_example_kwargs(model_name, source)\n except Exception:\n kw = \"Error\"\n if isinstance(kw, dict):\n for key, value in kw.items():\n if isinstance(value, str):\n kw[key] = value.replace('example', '/app/example')\n ctx = {\"model_name\": model_name,\n \"example_kwargs\": kw,\n \"batch_size\": get_batch_size(model_name, source),\n \"source\": source,\n \"model_name_no_slash\": model_name.replace(\"/\", \"_\"),\n \"output_dir\" : \"example\"\n }\n try:\n if model_name in model_group_to_image_dict: # Special provision for MMSplice/mtsplice, APARENT/veff\n docker_image_name = model_group_to_image_dict[model_name]\n else:\n docker_image_name = model_group_to_image_dict[model_name.split('/')[0]]\n slim_docker_image_name = f\"{docker_image_name}-slim\"\n except Exception:\n docker_image_name = \"\"\n slim_docker_image_name = \"\" \n ctx[\"docker_image_name\"] = docker_image_name\n ctx[\"slim_docker_image_name\"] = slim_docker_image_name\n if not slim_docker_image_name and not docker_image_name:\n pull_snippet = \"Get the docker image\", \"Not available yet\"\n pull_snippet_fullsized = \"Get the full sized docker image\", \"Not available yet\"\n activated_snippet = \"Get the activated conda environment inside the container\", \"Not available yet\"\n test_snippet = \"Test the model\", \"Not available yet\"\n predict_snippet = \"Make prediction for custom files directly\", \"Not available yet\"\n else:\n pull_snippet = \"Get the docker image\", \"\"\"docker pull {slim_docker_image_name}\"\"\".format(**ctx)\n pull_snippet_fullsized = \"Get the full sized docker image\", \"\"\"docker pull {docker_image_name}\"\"\".format(**ctx)\n activated_snippet = \"Get the activated conda environment inside the container\", \"\"\"docker run -it {slim_docker_image_name}\"\"\".format(**ctx)\n test_snippet = \"Test the model\", \"docker run {slim_docker_image_name} kipoi test {model_name} --source={source}\".format(**ctx)\n predict_snippet = \"Make prediction for custom files directly\", \"\"\"# Create an example directory containing the data\nmkdir -p $PWD/kipoi-example \n# You can replace $PWD/kipoi-example with a different absolute path containing the data \ndocker run -v $PWD/kipoi-example:/app/ {slim_docker_image_name} \\\\\nkipoi get-example {model_name} -o /app/{output_dir} \ndocker run -v $PWD/kipoi-example:/app/ {slim_docker_image_name} \\\\\nkipoi predict {model_name} \\\\\n--dataloader_args='{example_kwargs}' \\\\\n-o '/app/{model_name_no_slash}.example_pred.tsv' \n# check the results\nhead $PWD/kipoi-example/{model_name_no_slash}.example_pred.tsv\n \"\"\".format(**ctx)\n if model_name == \"Basenji\":\n test_snippet = \"Test the model\", \"docker run {slim_docker_image_name} kipoi test {model_name} --batch_size=2 --source={source}\".format(**ctx)\n predict_snippet = \"Make prediction for custom files directly\", \"\"\"# Create an example directory containing the data\nmkdir -p $PWD/kipoi-example \n# You can replace $PWD/kipoi-example with a different absolute path containing the data \ndocker run -v $PWD/kipoi-example:/app/ {slim_docker_image_name} \\\\\nkipoi get-example {model_name} -o /app/{output_dir} \ndocker run -v $PWD/kipoi-example:/app/ {slim_docker_image_name} \\\\\nkipoi predict {model_name} \\\\\n--dataloader_args='{example_kwargs}' \\\\\n--batch_size=2 -o '/app/{model_name_no_slash}.example_pred.tsv' \n# check the results\nhead $PWD/kipoi-example/{model_name_no_slash}.example_pred.tsv\n \"\"\".format(**ctx)\n return [\n (pull_snippet),\n (pull_snippet_fullsized),\n (activated_snippet),\n (test_snippet),\n (predict_snippet),\n ]", "title": "" }, { "docid": "0bebb0f6193bee3c2c77afce710f94ae", "score": "0.5932524", "text": "def build(self, container):", "title": "" }, { "docid": "5aa5021685fea1c785832e657011efab", "score": "0.59316826", "text": "def build(self):\n logger.info('Building the container image')\n\n subprocess.call([\n self.container_runtime,\n 'build',\n '--layers',\n '--tag', self.container_image,\n '--build-arg', 'OPENSHIFT_VERSION={}'.format(\n self.openshift_version),\n BASE_DIR,\n ])\n\n logger.info('Container image built')", "title": "" }, { "docid": "3cffc842af7152d614d414d03fd9eec8", "score": "0.5931192", "text": "def shell(args, config):\n print '===> Run docker interactive shell from image: {0}'.format(config['image'])\n release = generate_release(config['git'])\n create_tmpdir()\n\n options = {\n 'release': release,\n 'target': config['target'],\n 'image': config['image'],\n }\n docker_cmd = 'docker run --rm -v $(pwd)/build-env/:/home/builder/build \\\n -e RELEASE={release} -e TARGET={target} \\\n -it --entrypoint=/bin/bash {image}'.format(**options)\n\n print '===> Run container: {0}'.format(' '.join(docker_cmd.split()))\n subprocess.call(docker_cmd, shell=True)", "title": "" }, { "docid": "bee491ba7bae2f9a8091aae7c2a2cef8", "score": "0.59224653", "text": "def publish_test_images():\n tests = []\n for version in SupportedPythons:\n key = _test_image_step(version)\n tests.append(\n StepBuilder(f\":docker: test-image {version}\", key=key)\n # these run commands are coupled to the way the test-image-builder is built\n # see python_modules/automation/automation/docker/images/buildkite-test-image-builder\n .run(\n # credentials\n \"/scriptdir/aws.pex ecr get-login --no-include-email --region us-west-2 | sh\",\n 'export GOOGLE_APPLICATION_CREDENTIALS=\"/tmp/gcp-key-elementl-dev.json\"',\n \"/scriptdir/aws.pex s3 cp s3://$${BUILDKITE_SECRETS_BUCKET}/gcp-key-elementl-dev.json $${GOOGLE_APPLICATION_CREDENTIALS}\",\n \"export BASE_IMAGE=$${AWS_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com/buildkite-unit:py\"\n + version\n + \"-\"\n + UNIT_IMAGE_VERSION,\n # build and tag test image\n \"export TEST_IMAGE=$${AWS_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com/buildkite-test-image:$${BUILDKITE_BUILD_ID}-\"\n + version,\n \"./python_modules/dagster-test/dagster_test/test_project/build.sh \"\n + version\n + \" $${TEST_IMAGE}\",\n #\n # push the built image\n 'echo -e \"--- \\033[32m:docker: Pushing Docker image\\033[0m\"',\n \"docker push $${TEST_IMAGE}\",\n )\n .on_python_image(\n \"buildkite-test-image-builder:py{python_version}-{image_version}\".format(\n python_version=SupportedPython.V3_8, image_version=TEST_IMAGE_BUILDER_VERSION\n ),\n [\n \"AIRFLOW_HOME\",\n \"AWS_ACCOUNT_ID\",\n \"AWS_ACCESS_KEY_ID\",\n \"AWS_SECRET_ACCESS_KEY\",\n \"BUILDKITE_SECRETS_BUCKET\",\n ],\n )\n .build()\n )\n\n key = _core_test_image_step(version)\n tests.append(\n StepBuilder(f\":docker: test-image-core {version}\", key=key)\n # these run commands are coupled to the way the test-image-builder is built\n # see python_modules/automation/automation/docker/images/buildkite-test-image-builder\n .run(\n # credentials\n \"/scriptdir/aws.pex ecr get-login --no-include-email --region us-west-2 | sh\",\n # set the base image\n \"export BASE_IMAGE=$${AWS_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com/buildkite-unit:py\"\n + version\n + \"-\"\n + UNIT_IMAGE_VERSION,\n # build and tag test image\n \"export TEST_IMAGE=$${AWS_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com/buildkite-test-image-core:$${BUILDKITE_BUILD_ID}-\"\n + version,\n \"./python_modules/dagster-test/build_core.sh \" + version + \" $${TEST_IMAGE}\",\n #\n # push the built image\n 'echo -e \"--- \\033[32m:docker: Pushing Docker image\\033[0m\"',\n \"docker push $${TEST_IMAGE}\",\n )\n .on_python_image(\n \"buildkite-test-image-builder:py{python_version}-{image_version}\".format(\n python_version=SupportedPython.V3_8, image_version=TEST_IMAGE_BUILDER_VERSION\n ),\n [\n \"AWS_ACCOUNT_ID\",\n \"AWS_ACCESS_KEY_ID\",\n \"AWS_SECRET_ACCESS_KEY\",\n \"BUILDKITE_SECRETS_BUCKET\",\n ],\n )\n .build()\n )\n return tests", "title": "" }, { "docid": "d4c4796113b8747b4620ac27dfc1a054", "score": "0.5920188", "text": "def _parse_args() -> argparse.Namespace:\n\n # There are multiple ways to invoke finer-grained control over which\n # images are built.\n #\n # (1) How many images to build\n #\n # all: all images\n # default: images required for minimum functionality\n # - excluding metrics images\n # - including postgres, proxy, etc\n #\n # (2) Of the core orc8r images, which modules to build\n #\n # Defaults to all modules, but can be further specified by targeting a\n # deployment type.\n\n parser = argparse.ArgumentParser(description='Orc8r build tool')\n\n # Run something\n parser.add_argument(\n '--tests', '-t',\n action='store_true',\n help='Run unit tests',\n )\n\n parser.add_argument(\n '--mount', '-m',\n action='store_true',\n help='Mount the source code and create a bash shell',\n )\n\n parser.add_argument(\n '--precommit', '-c',\n action='store_true',\n help='Mount the source code and run pre-commit checks',\n )\n\n parser.add_argument(\n '--coverage', '-o',\n action='store_true',\n help='Generate test coverage statistics',\n )\n\n parser.add_argument(\n '--lint', '-l',\n action='store_true',\n help='Run lint test',\n )\n\n parser.add_argument(\n '--health', '-e',\n action='store_true',\n help='Run health test',\n )\n\n # Run something\n parser.add_argument(\n '--git', '-g',\n action='store_true',\n help='Get git info',\n )\n\n # How to do it\n parser.add_argument(\n '--nocache', '-n',\n action='store_true',\n help='Build the images with no Docker layer caching',\n )\n parser.add_argument(\n '--down', '-down',\n action='store_true',\n default=False,\n help='Leave containers up after running tests',\n )\n\n return parser.parse_args()", "title": "" }, { "docid": "b89d23f4bcc8e77bea5943eeb0dc4f69", "score": "0.59019315", "text": "def make_docker_dir(specs):\n \n new_image_path = saved_images_path / specs['proc_name']\n if new_image_path.exists():\n shutil.rmtree(new_image_path)\n\n # Copy docker dir template\n shutil.copytree(docker_base_path, new_image_path)\n\n # Overwrite proc.yml\n with open(new_image_path / \"proc.yml\", \"w+\") as f:\n f.write(yaml.dump(specs))\n\n # Rewrite build and run scripts\n with open(new_image_path / \"build.sh\", \"r+\") as f:\n build_script = f.read().format(IMAGE_NAME=specs['proc_name'])\n with open(new_image_path / \"build.sh\", \"w\") as f:\n f.write(build_script)\n\n with open(new_image_path / \"run.sh\", \"r+\") as f:\n run_script = f.read().format(IMAGE_NAME=specs['proc_name'])\n with open(new_image_path / \"run.sh\", \"w\") as f:\n f.write(run_script)", "title": "" }, { "docid": "5be7f11385927de29c21ca7820627b24", "score": "0.58970785", "text": "def start(ctx):\n try:\n ctx.run('docker volume create {name}'.format(name=LOG_VOLUME))\n except:\n pass\n for i in range(0, INST_NUMBER):\n ctx.run('''docker run -d --net={net} -v logs:/tmp/{app}/ \\\n --name={app}_{ver}_{i} \\\n {img}:latest \\\n sh -c \"python service/main.py\"'''.format(net=NETWORK, img=APP_IMG, app=APP_NAME,\n ver=PACKAGE_VERSION, i=i))", "title": "" }, { "docid": "af74cbc9c251559e489a88340b587f2d", "score": "0.5893835", "text": "def dockerfiles(\n template: Path,\n dev_output: Path,\n prod_output: Path,\n dry_run: bool,\n verbose: int,\n yes: bool,\n port: int,\n ):\n from .generate import Generator\n\n gen = Generator(template, dev_output, prod_output)\n gen.generate(dry_run, verbose, yes, port)", "title": "" }, { "docid": "a7a191d7abe90df87806b5a57f5f61c4", "score": "0.588628", "text": "def test_build_docker_cmd_boot_config(self, run_command_mock,\n process_check_mock, _):\n bootstrap = docker_bootstrap.Bootstrap(\n '/docker_folder',\n '/workspace',\n 'test_config.yaml',\n bootstrap_config='bootstrap/test_config/config_multi_mount_points.yaml',\n docker_tag='tf_test/framework',\n auth_token_dir='/test/auth_token')\n\n bootstrap.run_tests()\n # Assumes last call was to kick off the docker image.\n arg0 = run_command_mock.call_args[0][0]\n\n expected_docker_cmd = (\n 'nvidia-docker run --rm '\n '-v /home/user/data/imagenet:/data/imagenet '\n '-v /home/user/data/cifar-10:/data/cifar-10 '\n '-v /test/auth_token:/auth_tokens '\n '-v /workspace:/workspace tf_test/framework python '\n '/workspace/git/benchmark_harness/oss_bench/harness/controller.py '\n '--workspace=/workspace --test-config=test_config.yaml '\n '--framework=tensorflow')\n\n self.assertEqual(arg0, expected_docker_cmd)\n process_check_mock.assert_called()", "title": "" }, { "docid": "2dd89230ed43a5c3fc78235f521e8691", "score": "0.5861163", "text": "def Main(self):\n options = self.ParseArguments()\n self.ParseOptions(options)\n\n self._SetDockerDirectory(self.docker_directory)\n\n\n if options.command == 'mount':\n self.Mount(options.container_id, options.mountpoint)\n\n elif options.command == 'history':\n self.ShowHistory(\n options.container_id, show_empty_layers=options.show_empty)\n\n elif options.command == 'list':\n if options.what == 'all_containers':\n self.ShowContainers()\n elif options.what == 'running_containers':\n self.ShowContainers(only_running=True)\n elif options.what == 'repositories':\n self.ShowRepositories()\n\n else:\n raise ValueError('Unhandled command %s' % options.command)", "title": "" }, { "docid": "d2c44d1dc982c88dc9727c11c0f545a4", "score": "0.5855853", "text": "def docker_run(task, image, pull_image=True, entrypoint=None, container_args=None,\n volumes=None, remove_container=True, **kwargs):\n return _docker_run(\n task, image, pull_image, entrypoint, container_args, volumes,\n remove_container, **kwargs)", "title": "" }, { "docid": "daca60949c6b4e3fb7b69eae26ac6d03", "score": "0.58495396", "text": "def print_dockerfile(collector, image, **kwargs):\n print('\\n'.join(image.docker_file.docker_lines))", "title": "" }, { "docid": "d8aebece7c0dd879aba0a2b9a3e81419", "score": "0.58492506", "text": "def run_task_based_container(\n aci_client,\n resource_group,\n container_group_name,\n container_name,\n container_image_name,\n start_command_line=None,\n):\n # If a start command wasn't specified, use a default\n if start_command_line is None:\n start_command_line = \"-c default/path/from/function\"\n\n # Configure some environment variables in the container which the\n # wordcount.py or other script can read to modify its behavior.\n env_var_1 = EnvironmentVariable(\n name=\"STORAGE_CONNECTION_STRING\", value=\"path/from/function/enviro\"\n )\n env_var_2 = EnvironmentVariable(name=\"MinLength\", value=\"8\")\n\n logging.info(\n f\"Creating container group '{container_group_name}' with start command '{start_command_line}'\"\n )\n\n # Configure the container\n container_resource_requests = ResourceRequests(memory_in_gb=1.5, cpu=1.0)\n container_resource_requirements = ResourceRequirements(\n requests=container_resource_requests\n )\n container = Container(\n name=container_name,\n image=container_image_name,\n resources=container_resource_requirements,\n command=start_command_line.split(),\n environment_variables=[env_var_1, env_var_2],\n )\n\n # Configure the container group\n group = ContainerGroup(\n location=resource_group.location,\n containers=[container],\n os_type=OperatingSystemTypes.linux,\n restart_policy=ContainerGroupRestartPolicy.never,\n )\n\n # Create the container group\n result = aci_client.container_groups.begin_create_or_update(\n resource_group.name, container_group_name, group\n )\n\n # Wait for the container create operation to complete. The operation is\n # \"done\" when the container group provisioning state is one of:\n # Succeeded, Canceled, Failed\n while result.done() is False:\n sys.stdout.write(\".\")\n time.sleep(1)\n\n # Get the provisioning state of the container group.\n container_group = aci_client.container_groups.get(\n resource_group.name, container_group_name\n )\n\n if str(container_group.provisioning_state).lower() == \"succeeded\":\n logging.info(\n f\"\\nCreation of container group '{container_group_name}' succeeded.\"\n )\n else:\n logging.info(\n f\"\\nCreation of container group '{container_group_name}' failed. Provisioning state is: {container_group.provisioning_state}\"\n )", "title": "" }, { "docid": "70a80fd06f9666fd5c7e1394338e1bec", "score": "0.5833689", "text": "def main_self(load):\n current_config = config.read_config()\n images = release(registry=ecr_registry(region=current_config[\"region\"]))\n next_config = {}\n next_config.update(current_config)\n next_config[\"server_image\"] = images[\"server\"].with_digest()\n next_config[\"worker_image\"] = images[\"worker\"].with_digest()\n write_config_response = config.write_config(next_config)\n print \"Configuration has been committed to %s\" % json.dumps(\n write_config_response, sort_keys=True, indent=2)\n\n if load:\n subprocess.check_call([\"rbs/images/worker\"])\n subprocess.check_call([\n \"docker\", \"tag\", \"bazel/rbs/images:worker\", images[\"worker\"].full_tag()\n ])", "title": "" }, { "docid": "89b4fcebdf39662153e1cd9202e46513", "score": "0.582965", "text": "def _run_docker(cmd: List[str]) -> None:\n print(\"Running 'docker-compose %s'...\" % \" \".join(cmd))\n try:\n subprocess.run(['docker-compose'] + cmd, check=True)\n except subprocess.CalledProcessError as err:\n exit(err.returncode)", "title": "" }, { "docid": "82ea42f9bf65eb32e090e8b050c81500", "score": "0.58262616", "text": "def build_image_fn(request, prepared_test_build_base, bitbake_image):\n\n def img_builder():\n reset_build_conf(prepared_test_build_base[\"build_dir\"])\n build_image(\n prepared_test_build_base[\"build_dir\"],\n prepared_test_build_base[\"bitbake_corebase\"],\n bitbake_image,\n [\n 'SYSTEMD_AUTO_ENABLE_pn-mender = \"disable\"',\n 'EXTRA_IMAGE_FEATURES_append = \" ssh-server-openssh\"',\n ],\n )\n return prepared_test_build_base[\"build_dir\"]\n\n return img_builder", "title": "" }, { "docid": "2649707e74a07029a091e110b371e904", "score": "0.5821975", "text": "def check() -> None:\n docker_check_impl()", "title": "" }, { "docid": "49ff908565bdcf8c5664ef7c9133fbba", "score": "0.58192223", "text": "def build():\n\n # Setup build directory\n local(\"mkdir -p output\")\n\n # Fabric by godber\n local(\"mkdir -p output/fabric-godber\")\n local(\"cp fabric-godber/fabric-output/index.html output/fabric-godber\")\n\n # Python and MongoDB by wtolson\n local(\"cp -r python-and-mongodb output/\")\n\n # Python and MongoDB by wtolson\n local(\"cp -r rpi-lapdock output/\")\n\n # Godber's virtualenv presentation\n local(\"mkdir -p output/virtualenv-godber\")\n local(\"cp virtualenv-godber/virtualenv.pdf output/virtualenv-godber\")\n\n local(\"mkdir -p output/pandas-and-friends-godber\")\n local(\"cp pandas-intro-godber/*.{gif,jpg} output/pandas-and-friends-godber/\", shell=\"/bin/bash\")\n local(\"cp pandas-intro-godber/presentation-deck.html output/pandas-and-friends-godber/index.html\")\n local(\"cp pandas-intro-godber/presentation.pdf output/pandas-and-friends-godber/pandas-and-friends.pdf\")\n\n local(\"cp -r salt-stack-forrest output/\")\n\n # Adding Sara Braden's Feb 2014 talk\n local(\"cp -r image_processing_pillow output/\")\n\n # Adding Austin's March 2014 talk\n local(\"cp -r ipython-godber output/\")\n\n # Adding Trevor's PEP talk from March 2014\n local(\"cp -r pep-428-pathlib-trevor output/\")\n\n # Adding Jerry's talk from April 2014\n local(\"cp -r python3-jerry output/\")\n\n # Adding Sarah's PEP450 talk from May 2014\n local(\"cp -r pep-450-braden output/\")\n\n # Austin's pytest talk, June 2014\n local(\"mkdir -p output/pytest-godber\")\n local(\"cp -r pytest-godber/Pytest_Presentation.slides.html output/pytest-godber/index.html\")\n local(\"cp -r pytest-godber/reveal.js output/pytest-godber/\")\n local(\"cp -r pytest-godber/custom.css output/pytest-godber/\")\n local(\"cp -r pytest-godber/*.png output/pytest-godber/\")\n\n # Adding the Thunderstorm 2014 directory to output\n local(\"cp -r thunderstorm-2014 output\")\n\n # Adding the Exploring Numpy and Python LIRC\n local(\"cp -r exploring-numpy-godber output\")\n local(\"cp -r python-lirc-davis output\")\n\n # Adding Sarah's Machine Learning Talk\n local(\"cp -r machine_learning_braden output\")\n\n # Adding Tim's PyPy talk\n local(\"cp -r pypy-hochberg output\")\n\n local(\"cp -r antlr-preston output\")\n local(\"cp -r pandas-intro-godber-jan-2014 output\")\n\n local(\"cp -r queue-battle output\")\n local(\"cp -r rq-godber output\")\n\n # Adding Michael's GUI talk\n local(\"cp -r GUI_Programming_Wx_and_Kivy-Michael output\")\n\n # Adding Michael's Pyinstaller talk\n local(\"cp -r Pyinstaller_Frozen_Binaries-Michael output\")\n\n # Adding Michael's Win32com talk\n local(\"cp -r Win32com_Automating_Outlook-Michael output\")\n\n # Adding Michael's SQLAlchemy & Pycon 2016 talk\n local(\"cp -r SQLAlchemy_and_Pycon_2016_update-Michael output\")", "title": "" }, { "docid": "8b19fecaf1333075fccd25c06f870fa6", "score": "0.5818368", "text": "def build(self):\n cmd = 'docker build -t {tag} -f {dockerfile}'.format(tag=self.docker_image, dockerfile=self.dockerfile_path)\n if not self.build_cache:\n cmd += ' --no-cache'\n cmd += ' {source}'.format(source=self.source,)\n return cmd", "title": "" }, { "docid": "c1a23dcf37e88d8456a5a8234b10595d", "score": "0.5817774", "text": "def third_party_Dockerfile(os, software, repository, client_url):\n port = ports[software]\n return render_template__('third_party/%s/Dockerfile.tpl' % software, **locals())", "title": "" }, { "docid": "b22b524adbf22c5cefb8b8d0b688377f", "score": "0.58149797", "text": "def _run(self, image, wd, script):\n command_string = \"docker run -d -v /docker_data:/root/docker_data \" + \\\n \"-w {} {} python {}\".format(wd, image, script)\n self.id = subprocess.check_output(command_string.split()).decode('ascii')[:-1]", "title": "" }, { "docid": "8a3f310af9063b5018dff8f5669d2c3f", "score": "0.58072865", "text": "def dcos_docker(verbose: None) -> None:\n # We \"use\" variables to satisfy linting tools.\n for _ in (verbose, ):\n pass", "title": "" }, { "docid": "ffc51186dc1866d871b15c5317c8d34f", "score": "0.5804416", "text": "def _run_cmd_in_docker_container(image_name, command, environment):\n client = utils.get_docker_client()\n container = client.create_container(\n image=image_name,\n command=[command],\n environment=environment,\n )\n if 'Id' not in container:\n raise ShubException(\"Create container error:\\n %s\" % container)\n try:\n client.start(container)\n except docker.errors.APIError as e:\n explanation = utils.ensure_unicode(e.explanation or '')\n if 'executable file not found' in explanation:\n # docker.errors.APIError: 500 Server Error:\n # Internal Server Error (\"Cannot start container xxx:\n # [8] System error: exec: \"shub-image-info\":\n # executable file not found in $PATH\")\n return 127, None\n raise\n statuscode = client.wait(container=container['Id'])['StatusCode']\n logs = client.logs(\n container=container['Id'], stream=False, timestamps=False,\n stdout=True, stderr=True if statuscode else False,\n )\n return statuscode, utils.ensure_unicode(logs)", "title": "" }, { "docid": "ce1b968dfe722d688ed955dd60bf0a4e", "score": "0.5797282", "text": "def docker_run(name, workspace, project_src_path):\n command = [\n 'docker', 'run', '--name', name, '--rm', '-e', 'PROJECT_SRC_PATH', '-e',\n 'OSS_FUZZ_PROJECT_NAME', '-e', 'WORKSPACE', '-e', 'REPOSITORY', '-e',\n 'DRY_RUN', '-e', 'CI', '-e', 'SANITIZER', '-e', 'GIT_SHA', '-e',\n 'FILESTORE', '-e', 'NO_CLUSTERFUZZ_DEPLOYMENT'\n ]\n if project_src_path:\n command += ['-v', f'{project_src_path}:{project_src_path}']\n command += [\n '-v', '/var/run/docker.sock:/var/run/docker.sock', '-v',\n f'{workspace}:{workspace}', f'{BASE_CIFUZZ_DOCKER_TAG}/{name}'\n ]\n print('Running docker command:', command)\n subprocess.run(command, check=True)", "title": "" }, { "docid": "2d6ae7a5f2c9284121da147d5de9ff78", "score": "0.57776207", "text": "def checkDocker(self):\n pass", "title": "" }, { "docid": "cd787a5dab9914c8f7564872c6361169", "score": "0.57758427", "text": "def prefect_base_image(pytestconfig: \"pytest.Config\", docker: DockerClient):\n image_name = get_prefect_image_name()\n\n image_exists, version_is_right = False, False\n\n try:\n image_exists = bool(docker.images.get(image_name))\n except ImageNotFound:\n pass\n\n if image_exists:\n output = docker.containers.run(\n image_name, [\"prefect\", \"--version\"], remove=True\n )\n image_version = output.decode().strip()\n version_is_right = image_version == prefect.__version__\n\n if not image_exists or not version_is_right:\n if pytestconfig.getoption(\"--disable-docker-image-builds\"):\n if not image_exists:\n raise Exception(\n \"The --disable-docker-image-builds flag is set, but \"\n f\"there is no local {image_name} image\"\n )\n if not version_is_right:\n raise Exception(\n \"The --disable-docker-image-builds flag is set, but \"\n f\"{image_name} includes {image_version}, not {prefect.__version__}\"\n )\n else:\n CliRunner().invoke(dev_app, [\"build-image\"])\n\n return image_name", "title": "" }, { "docid": "f00d2841aedbdc3131208f8421a4c5bb", "score": "0.576544", "text": "def test_build_docker_cmd_pytorch(self, run_command_mock, process_check_mock,\n _):\n bootstrap = docker_bootstrap.Bootstrap(\n '/docker_folder',\n '/workspace',\n 'test_config.yaml',\n framework='pytorch',\n docker_tag='tf_test/framework',\n auth_token_dir='/test/auth_token')\n\n bootstrap.run_tests()\n # Assumes last call was to kick off the docker image.\n arg0 = run_command_mock.call_args[0][0]\n self.assertEqual(\n arg0, 'nvidia-docker run --ipc=host --rm '\n '-v /test/auth_token:/auth_tokens -v /workspace:/workspace '\n 'tf_test/framework python '\n '/workspace/git/benchmark_harness/oss_bench/harness/controller.py'\n ' --workspace=/workspace --test-config=test_config.yaml'\n ' --framework=pytorch')\n process_check_mock.assert_called()", "title": "" }, { "docid": "7d7216b3d17a276011df1dfd89cbe4b6", "score": "0.5757992", "text": "def run_local(ctx, action='up'):\n # 2 Scale up or down\n cmd = f'docker-compose -f docker-compose.yml -p {PROJECT_NAME.lower()}'\n if action == 'up':\n cmd += ' up -d'\n elif action in ['stop', 'down', 'build']:\n cmd += f' {action}'\n elif 'logs' in action:\n parts = action.split('-')\n assert len(parts) == 2\n cmd += ' logs {}'.format(parts[1])\n elif 'exec' in action:\n parts = action.split('-')\n assert len(parts) == 2\n cmd += ' exec {} bash'.format(parts[1])\n elif 'makemigrations' in action:\n cmd += ' run --rm web python manage.py makemigrations'\n elif 'collectstatic' in action:\n cmd += ' run --rm web python manage.py collectstatic --noinput'\n elif 'migrate' in action:\n cmd += ' run --rm web python manage.py migrate'\n elif 'init' in action:\n cmd += ' run --rm web ./server-init.sh'\n else:\n print('action can only be up/stop/down')\n ctx.run(cmd, pty=True)", "title": "" }, { "docid": "271e9762835fccbfbaab9f75fff6f047", "score": "0.5753987", "text": "def images_update_dockerfile(runtime, stream, version, release, repo_type, message, push):\n runtime.initialize(validate_content_sets=True)\n\n # If not pushing, do not clean up our work\n runtime.remove_tmp_working_dir = push\n\n # For each \"--stream alias image\" on the command line, register its existence with\n # the runtime.\n for s in stream:\n runtime.register_stream_alias(s[0], s[1])\n\n # Get the version from the atomic-openshift package in the RPM repo\n if version == \"auto\":\n version = runtime.auto_version(repo_type)\n\n if version and not runtime.valid_version(version):\n raise ValueError(\n \"invalid version string: {}, expecting like v3.4 or v1.2.3\".format(version)\n )\n\n runtime.clone_distgits()\n for image in runtime.image_metas():\n dgr = image.distgit_repo()\n (real_version, real_release) = dgr.update_distgit_dir(version, release)\n dgr.commit(message)\n dgr.tag(real_version, real_release)\n\n if push:\n runtime.push_distgits()", "title": "" }, { "docid": "5e9612a1fa2f06df1fccebe550bf9aff", "score": "0.5749865", "text": "def docker(self) -> aiodocker.Docker:\n return aiodocker.Docker()", "title": "" }, { "docid": "8d44877d2e58e5ca0104ea88be218655", "score": "0.57457834", "text": "def build_parser():\n parser = ArgumentParser(\n description='git cirrus docker command'\n )\n parser.add_argument('command', nargs='?')\n\n subparsers = parser.add_subparsers(dest='command')\n build_command = subparsers.add_parser('build')\n build_command.add_argument(\n '--docker-repo', '-r',\n dest='docker_repo',\n help='docker repository name',\n default=None,\n )\n build_command.add_argument(\n '--login',\n action='store_true',\n dest='login',\n help='perform docker login before command using settings in cirrus.conf',\n default=False\n )\n build_command.add_argument(\n '--directory', '-d',\n dest='directory',\n help='path to directory containing dockerfile to run docker build in',\n default=None,\n )\n build_command.add_argument(\n '--dockerstache-template',\n dest='dockerstache_template',\n default=None,\n help='directory containing dockerstache template to render'\n )\n build_command.add_argument(\n '--dockerstache-context',\n dest='dockerstache_context',\n default=None,\n help='path to dockerstache context file'\n )\n build_command.add_argument(\n '--dockerstache-defaults',\n dest='dockerstache_defaults',\n default=None,\n help='path to dockerstache defaults file'\n )\n\n push_command = subparsers.add_parser('push')\n push_command.add_argument(\n '--login',\n action='store_true',\n dest='login',\n help='perform docker login before command using settings in cirrus.conf',\n default=False\n )\n push_command.add_argument(\n '--latest',\n action='store_true',\n dest='latest',\n help='include the image tagged \"latest\" in the docker push command',\n default=False\n )\n\n subparsers.add_parser('test', help='test docker connection')\n opts = parser.parse_args()\n return opts", "title": "" }, { "docid": "4cdabb0d00e94f4266b93af2e7fc6b2f", "score": "0.57452714", "text": "def test_image_build_subcommand(mock_config_source):\n runner = CliRunner()\n\n # Do some mock weirdness to make sure our commands get the right values for the default profile.\n mock_config_source = mock_config_source.return_value\n mock_config_source.load.return_value = {\"default\": {\"provider\": \"mock-aws\", \"credentials\": {}}}\n result = cloudless.profile.load_profile(\"default\")\n assert result == {\"provider\": \"mock-aws\", \"credentials\": {}}\n\n # Remove state from old tests that may have failed.\n if os.path.exists(IMAGE_BUILD_STATE):\n shutil.rmtree(IMAGE_BUILD_STATE)\n\n result = runner.invoke(get_cldls(), ['image-build', 'deploy', IMAGE_BUILD_CONFIGURATION])\n assert result.exception is None\n assert result.output == (pytest_regex(\n r'image group with provider: mock-aws\\n'\n r'Successfully deployed! Log in with:\\n'\n r'ssh -i /.*/tests/.*/.cloudless/id_rsa_image_build cloudless_image_build@.*\\n'))\n assert result.exit_code == 0\n\n result = runner.invoke(get_cldls(), ['image-build', 'configure', IMAGE_BUILD_CONFIGURATION])\n assert result.output == ('image group with provider: mock-aws\\n'\n 'Configure complete!\\n')\n assert result.exception is None\n assert result.exit_code == 0\n\n result = runner.invoke(get_cldls(), ['image-build', 'check', IMAGE_BUILD_CONFIGURATION])\n assert result.output == ('image group with provider: mock-aws\\n'\n 'Check complete!\\n')\n assert result.exception is None\n assert result.exit_code == 0\n\n result = runner.invoke(get_cldls(), ['image-build', 'cleanup', IMAGE_BUILD_CONFIGURATION])\n assert result.output == ('image group with provider: mock-aws\\n'\n 'Cleanup complete!\\n')\n assert result.exception is None\n assert result.exit_code == 0\n\n result = runner.invoke(get_cldls(), ['image-build', 'run', IMAGE_BUILD_CONFIGURATION])\n assert result.output == ('image group with provider: mock-aws\\n'\n 'Build complete!\\n')\n assert result.exception is None\n assert result.exit_code == 0\n\n # Need to test these with the image build commands otherwise they would race in parallel tests.\n result = runner.invoke(get_cldls(), ['image', 'get', 'my-image'])\n assert result.output == (pytest_regex(\n r'image group with provider: mock-aws\\n'\n r'Image Name: .*\\n'\n r'Image Id: .*\\n'\n r'Image Created At: .*\\n'))\n assert result.exception is None\n assert result.exit_code == 0\n\n result = runner.invoke(get_cldls(), ['image', 'list'])\n assert result.output == (pytest_regex(\n r'image group with provider: mock-aws\\n'\n r'Listing all images.\\n'\n r'Image Name: .*\\n'\n r'Image Id: .*\\n'\n r'Image Created At: .*\\n'))\n assert result.exception is None\n assert result.exit_code == 0\n\n result = runner.invoke(get_cldls(), ['image', 'delete', 'my-image'])\n assert result.output == ('image group with provider: mock-aws\\n'\n 'Deleted image: my-image\\n')\n assert result.exception is None\n assert result.exit_code == 0", "title": "" }, { "docid": "035ff43cf2f856fcf2e3b9970e01e335", "score": "0.573081", "text": "def docker(name: str, image: str, scripts: List[str], env: Dict[str, str]):\n check_docker()\n\n docker_bash = REPO_ROOT / \"docker\" / \"bash.sh\"\n command = [docker_bash, \"--name\", name]\n for key, value in env.items():\n command.append(\"--env\")\n command.append(f\"{key}={value}\")\n\n SCRIPT_DIR.mkdir(exist_ok=True)\n\n script_file = SCRIPT_DIR / f\"{name}.sh\"\n with open(script_file, \"w\") as f:\n f.write(\"set -eux\\n\\n\")\n f.write(\"\\n\".join(scripts))\n f.write(\"\\n\")\n\n command += [image, \"bash\", str(script_file.relative_to(REPO_ROOT))]\n\n try:\n cmd(command)\n except RuntimeError as e:\n clean_exit(f\"Error invoking Docker: {e}\")\n except KeyboardInterrupt:\n cmd([\"docker\", \"stop\", \"--time\", \"1\", name])", "title": "" }, { "docid": "adb8fd4f74c83d70583b8254a6656660", "score": "0.5728988", "text": "def test_execute_dockerized_benchmark_using_images_only(mock_hashes_for_benchmarks,\n mock_have_build_options, mock_pull_image,\n mock_execute, mock_run_image, mock_symlink):\n # Build a default job control object with images\n job_control = proto_control.JobControl(remote=False, dockerized_benchmark=True)\n generate_test_objects.generate_environment(job_control)\n generate_test_objects.generate_images(job_control)\n\n mock_run_image.return_value = b\"benchmark_http_client output....\"\n mock_execute.return_value = None\n mock_have_build_options.return_value = False\n mock_hashes_for_benchmarks.return_value = {'tag1', 'tag2'}\n\n # Instantiate the BenchmarkRunner so that it prepares the job control\n # objects for each benchmark\n benchmark = run_benchmark.BenchmarkRunner(job_control)\n benchmark.execute()\n\n mock_have_build_options.assert_called()\n mock_pull_image.assert_called()\n mock_symlink.assert_called()\n mock_execute.assert_has_calls([mock.call(), mock.call()])", "title": "" }, { "docid": "7b80438f71901795dfae7c2faaff9148", "score": "0.5724597", "text": "def start_docker(self, image_name, command):\n logger.warning(\"Pulling Docker image. This may take a minute.\")\n for line in self._docker_client.pull(image_name, stream=True):\n logger.debug(line)\n\n host_config = docker.utils.create_host_config(\n binds={self._temp_dir: {\"bind\": self._temp_dir, \"mode\": \"rw\"}},\n port_bindings={\n 80: (\"127.0.0.1\", self.http_port),\n 443: (\"127.0.0.1\", self.https_port)},)\n container = self._docker_client.create_container(\n image_name, command, ports=[80, 443], volumes=self._temp_dir,\n host_config=host_config)\n if container[\"Warnings\"]:\n logger.warning(container[\"Warnings\"])\n self._container_id = container[\"Id\"]\n self._docker_client.start(self._container_id)", "title": "" }, { "docid": "8c0aff349421096595bfde91fbf07939", "score": "0.57206416", "text": "def create_docker_container(c_name, veth1, c_cidr, gw, conn, ssh_conn=None, primary=True):\n \n host_c = conn.create_host_config(privileged=True)\n c_id = conn.create_container(image='atandon70/ubuntu_project:loadedUBUNTUimage',\n command='/bin/sleep 3000000',\n host_config=host_c,\n name=c_name)\n container_id = c_id['Id']\n conn.start(container_id)\n c_pid = conn.inspect_container(c_id['Id'])['State']['Pid']\n cmd1 = \"sudo ip link set {0} netns {1}\".format(veth1, c_pid)\n cmd2 = \"sudo docker exec -i --privileged {0} ip addr add {1} dev {2} \".format(\n c_id['Id'], c_cidr, veth1)\n cmd3 = \"sudo docker exec -i --privileged {0} ip link set {1} up \".format(\n c_id['Id'], veth1)\n cmd4 = \"sudo docker exec -i --privileged {0} ip route del default\".format(c_id['Id'])\n cmd5 = \"sudo docker exec -i --privileged {0} ip route add default via {1}\".format(\n c_id['Id'], gw)\n cmd_list = [cmd1, cmd2, cmd3, cmd4, cmd5]\n\n if primary==True:\n print('local:')\n for cmd in cmd_list:\n print(cmd)\n os.system(cmd)\n return container_id\n print(cmd_list)\n ssh_remote(ssh_conn, cmd_list)\n return container_id", "title": "" }, { "docid": "b891693914f974f94c56fc2182e1fbe5", "score": "0.57077736", "text": "def _create_machine_docker(conn, machine_name, image_id,\n script=None, public_key=None,\n docker_env={}, docker_command=None,\n tty_attach=True, docker_port_bindings={},\n docker_exposed_ports={}):\n image = ContainerImage(id=image_id, name=image_id,\n extra={}, driver=conn, path=None,\n version=None)\n try:\n if public_key:\n environment = ['PUBLIC_KEY=%s' % public_key.strip()]\n else:\n environment = []\n\n if isinstance(docker_env, dict):\n # docker_env is a dict, and we must convert it ot be in the form:\n # [ \"key=value\", \"key=value\"...]\n docker_environment = [\"%s=%s\" % (key, value) for key, value in\n docker_env.items()]\n environment += docker_environment\n try:\n container = conn.deploy_container(\n machine_name, image,\n command=docker_command,\n environment=environment,\n tty=tty_attach,\n ports=docker_exposed_ports or {},\n port_bindings=docker_port_bindings or {}\n )\n except Exception as e:\n # if image not found, try to pull it\n if 'No such image' in str(e):\n try:\n conn.install_image(image.name)\n container = conn.deploy_container(\n machine_name, image,\n command=docker_command,\n environment=environment,\n tty=tty_attach,\n ports=docker_exposed_ports,\n port_bindings=docker_port_bindings\n )\n except Exception as e:\n raise Exception(e)\n else:\n raise Exception(e)\n\n except Exception as e:\n raise MachineCreationError(\"Docker, got exception %s\" % e, e)\n\n return container", "title": "" }, { "docid": "e4e4bc06dd08ae82d5388c80be4f8111", "score": "0.5705378", "text": "def os_Dockerfile(os):\n return render_template__('os/%s.tpl' % os, **locals())", "title": "" }, { "docid": "0da6ce120cab6ec50ee453687b85e7e7", "score": "0.570271", "text": "def create_docker_image(args):\n imgzip = args.imgzip\n if not os.path.exists(imgzip):\n imgzip = emu_downloads_menu.find_image(imgzip).download()\n\n emuzip = args.emuzip\n if emuzip in [\"stable\", \"canary\"]:\n emuzip = emu_downloads_menu.find_emulator(emuzip).download()\n\n rel = emu_downloads_menu.AndroidReleaseZip(imgzip)\n if not rel.is_system_image():\n raise Exception(\"{} is not a zip file with a system image\".format(imgzip))\n rel = emu_downloads_menu.AndroidReleaseZip(emuzip)\n if not rel.is_emulator():\n raise Exception(\"{} is not a zip file with an emulator\".format(imgzip))\n\n device = DockerDevice(emuzip, imgzip, args.dest, args.tag)\n device.create_docker_file(args.extra)\n img = device.create_container()\n if img and args.start:\n device.launch(img)\n\n return device", "title": "" }, { "docid": "a9787a97c30051d7f316b4ec094b61e0", "score": "0.56917804", "text": "def build_command(cli_args: argparse.Namespace, docker_args: List[str]) -> List[str]:\n dockerfile = str(return_fpath_for_docker_file(\"Dockerfile\"))\n command_list = [\"docker\", \"build\", \"-f\", dockerfile, \"-t\", CONTAINER_TAG] + docker_args + [\".\"]\n return command_list", "title": "" } ]
ac413010644727bcbf79d8d69b379b03
Helper function to calculate the sum of Loggamma functions for calculate the score.
[ { "docid": "584d5b7712ab1a056c6130fa1819055b", "score": "0.59813946", "text": "def bayesian_score_component(M, alpha):\n M_values= [M[i] for i in M.keys()]\n p = np.sum(loggamma(alpha + M_values))\n p -= np.sum(loggamma(alpha))\n p += np.sum(loggamma(np.sum(alpha, axis=1)))\n p -= np.sum(loggamma(np.sum(alpha, axis=1) + np.sum(M_values,axis=1)))\n return p", "title": "" } ]
[ { "docid": "2d9472ac0b40863e837c622553d8e6ac", "score": "0.71171165", "text": "def loggamma(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "7af8e16cfcd828e9e2ad587e0b056dd0", "score": "0.6929556", "text": "def log_Beta(alphas):\n #return product(map(gamma,alphas)) / gamma(sum(alphas))\n return sum(lgamma(alpha) for alpha in alphas) - lgamma(sum(alphas))", "title": "" }, { "docid": "8726e84760d9bc063faa28a306867cae", "score": "0.6904447", "text": "def gamma(x):\n x = np.maximum(np.abs(x), 1e-5)\n return (logistic(x) - .5) / x", "title": "" }, { "docid": "459d20cb4a93d290cf71c2aa1ac7e393", "score": "0.67529845", "text": "def log_pdf(self, xs):\n axis = len(shape(xs))-1\n g1x = sum(self.gamma1*xs, axis)\n g2x = sum(self.gamma2*xs, axis)\n g3x = sum(self.gamma3*xs, axis)\n f = self.kappa*(g1x + 0.5*self.beta*(g2x**2 - g3x**2))\n return f", "title": "" }, { "docid": "5b50ea47c6c6261a670f9ecd6b74787e", "score": "0.6716648", "text": "def Gamma(a, l, x):\n return l / gamma(a) * (l * x) ** (a-1) * np.exp(-l * x)", "title": "" }, { "docid": "b2d19ea517c07bcd1f607bd02c0c0639", "score": "0.66906095", "text": "def gamma(smk, X):\n rawscores = [smk.weighted_match_score(X, X, c) for c in X.words]\n idf_list = np.array(ut.take(smk.wx_to_idf, X.words))\n scores = np.array(rawscores) * idf_list\n score = scores.sum()\n sccw = np.reciprocal(np.sqrt(score))\n return sccw", "title": "" }, { "docid": "e1c4d654c694c4dd24c18be28f5f028a", "score": "0.66881454", "text": "def calc_gamma(self):\n assert isinstance(self.beta, float)\n # upper and lower bounds\n sorted_xv = numpy.array(sorted(self.xv))\n sorted_xg = numpy.array(sorted(self.xg))\n if self.xg.size == 0:\n delta_max = self.xv.max() - self.xv.min()\n delta_min = (sorted_xv[1:] - sorted_xv[:-1]).min()\n else:\n delta_max = max(self.xv.max(), self.xg.max()) - \\\n min(self.xv.min(), self.xg.min())\n delta_min = min((sorted_xv[1:] - sorted_xv[:-1]).min(), \\\n (sorted_xg[1:] - sorted_xg[:-1]).min())\n assert delta_max > delta_min\n gamma_min = 1. / delta_max\n gamma_max = pi / delta_min\n # logorithmic bisection for gamma\n while gamma_max / gamma_min > 1.1:\n if self.verbose > 1:\n print ' bisecting [', gamma_min, ',', gamma_max, '] for gamma...'\n gamma_mid = sqrt(gamma_max * gamma_min)\n res_ratio = self._calc_res_ratio_avg(self.beta, gamma_mid)\n if res_ratio < 1.0:\n gamma_max = gamma_mid\n else:\n gamma_min = gamma_mid\n # final selected gamma\n gamma_mid = sqrt(gamma_max * gamma_min)\n return gamma_mid", "title": "" }, { "docid": "ac9b44a25e12b24296ef92b152797d49", "score": "0.6620471", "text": "def compute_gamma(self, f, w):\n f1 = torch.zeros_like(w)\n f1[f] = 1\n Lf = (self.sigma / self.sigmas[f] * f1.t() @ self.ELn).squeeze()\n Lfw = Lf - self.ELw\n numerator = Lfw @ (self.EL - self.ELw)\n denominator = Lfw @ Lfw\n return numerator / denominator, f1", "title": "" }, { "docid": "efe06c824b3d3db57dfd12b0cec0d0e5", "score": "0.6605369", "text": "def gamma(x):\r\n return exp(gammaln(x))", "title": "" }, { "docid": "0282b373a315050f88192334ebb670aa", "score": "0.649276", "text": "def _lgamma(n):\n return np.log(np.abs(gamma(n)))", "title": "" }, { "docid": "b7ad60c5dcb7a88f1e770ce2dbf0122f", "score": "0.6382976", "text": "def fn(x, data):\n beta, q, delta, gamma_mild, gamma_wild, k = x\n S, I_mild, I_wild, P, N = data\n\n pR_mild = 1 - np.exp(-gamma_mild)\n pR_wild = 1 - np.exp(-gamma_wild)\n\n # log likelihood\n # add epsilon to avoid log 0.\n logC = np.sum(np.log(sp.stats.binom(S, P).pmf(C) + epsilon))\n\n logD_mild = np.sum(np.log(sp.stats.binom(I_mild, pR_mild).pmf(D_mild) + epsilon))\n logD_wild = np.sum(np.log(sp.stats.binom(I_wild, pR_wild).pmf(D_wild) + epsilon))\n\n assert not np.isnan(logC)\n assert not np.isnan(logD_mild)\n assert not np.isnan(logD_wild)\n\n # log prior\n log_prior = 0\n for i in range(len(priors)):\n a, b = priors[i]\n log_prior += np.log(sp.stats.gamma(a, b).pdf(x[i])+epsilon)\n assert not np.isnan(log_prior) \n return logC + logD_mild + logD_wild + log_prior", "title": "" }, { "docid": "17a0306aeecfe0034491c6881221ba32", "score": "0.63240796", "text": "def lgamma(*args, **kwargs):\n\n pass", "title": "" }, { "docid": "96c5a32b51064c70e6f229301bfd0661", "score": "0.62972456", "text": "def KL(P, Q):\n # sum_x P(x) ( logP(x) - logQ(x) )\n res = 0\n for item, prob in P.items():\n res += prob * (log(prob) - log(Q[item]))\n return res", "title": "" }, { "docid": "ea9f9a6846ccb4a5bc9fa613afcb7807", "score": "0.6282555", "text": "def eta_0(self):\n return fsum(np.square(self.gamma))", "title": "" }, { "docid": "5078eec45fbde2742e9c1eaaf116d758", "score": "0.6246137", "text": "def _arcsinh_gamma(self, x):\n x = x.to(dtype=torch.float64)\n log_gammas = torch.ones_like(x) * np.log(self.gamma)\n return torch.log(torch.sqrt(x**2 + self.gamma**2) + x) - log_gammas", "title": "" }, { "docid": "5d8ab6133927fb81a3d4f01eed90a2a3", "score": "0.62426674", "text": "def findLgamma(CalphaGammaDict, PalphaGammaHatDict):\n LgammaDict = {}\n for gene, value in PalphaGammaHatDict.items():\n LgammaValue = 0.0\n for family in value:\n if family in CalphaGammaDict[gene]:\n if all(PalphaGammaHatDict[gene][family][row,col] != 0.0 for row in range(4) for col in range(4)):\n LgammaValue += numpy.sum(numpy.matrix([[CalphaGammaDict[gene][family][0,i]*numpy.log(PalphaGammaHatDict[gene][family][0,i]) for i in range(4)],\n [CalphaGammaDict[gene][family][1,j]*numpy.log(PalphaGammaHatDict[gene][family][1,j]) for j in range(4)], \n [CalphaGammaDict[gene][family][2,k]*numpy.log(PalphaGammaHatDict[gene][family][2,k]) for k in range(4)], \n [CalphaGammaDict[gene][family][3,m]*numpy.log(PalphaGammaHatDict[gene][family][3,m]) for m in range(4)]]))\n if LgammaValue != 0.0 and not math.isnan(LgammaValue):\n LgammaDict[gene] = LgammaValue\n return LgammaDict", "title": "" }, { "docid": "9152704be0eb186e3549a36ca6a2c4b8", "score": "0.62294334", "text": "def gamma(self, X):\n m, v = self.UT_g_moments(X)\n \n z = m/np.sqrt(v)\n phi = stats.norm.pdf(z)\n gamma = phi*(1-phi)\n \n return gamma", "title": "" }, { "docid": "76b8494265f03130d57c98bd1147c7ea", "score": "0.6227638", "text": "def _compute_log_a(q, sigma, alpha):\n if float(alpha).is_integer():\n return _compute_log_a_int(q, sigma, int(alpha))\n else:\n return _compute_log_a_frac(q, sigma, alpha)", "title": "" }, { "docid": "ed9518c4110ef909f73cc42ce61899e7", "score": "0.6225094", "text": "def gamma(inDelta,window):\r\n\t\tgamma=(FocalStatistics(Square(inDelta),window,\"MEAN\",\"NODATA\"))/2\r\n\t\treturn gamma", "title": "" }, { "docid": "676e19561a0cb89ef0be0240016c0373", "score": "0.6215262", "text": "def nl(x, gamma):\n return 1.0/(1.0-gamma*x)", "title": "" }, { "docid": "708579bea42cbcc5eadfdc63578481e4", "score": "0.61986774", "text": "def logProb(self, *args):\n return _ndlml.gammaDist_logProb(self, *args)", "title": "" }, { "docid": "2cf3e1dcfdf0ed66779a32830618bf9a", "score": "0.6143721", "text": "def add_log_probs(log_p, log_q):\n if log_p == NEGATIVE_INFINITY:\n return log_q\n elif log_p < log_q:\n log_p, log_q = log_q, log_p\n return log_p + math.log(1 + math.exp(log_q - log_p))", "title": "" }, { "docid": "6da3b8ef75163405859a30e12cbbbb78", "score": "0.61331266", "text": "def calcular_gamma(self,gradiente_1,gradiente_0):\n return np.sum((gradiente_1-gradiente_0)*gradiente_1)/np.sum(gradiente_0*gradiente_0)", "title": "" }, { "docid": "38bff132cf5189be73a9dd177e196386", "score": "0.6115526", "text": "def scoresum(self):\r\n return self.avgscore + N.log(self.numscores)", "title": "" }, { "docid": "31236fb387e14d7f874217f66f9c42b8", "score": "0.60781056", "text": "def updateMeanAndVar(X, log_gamma, varianceFloor=5.0):\n\tgamma = np.exp(log_gamma) # Into regular domain\n\n\tprint()\n\n\tmeans = np.zeros((log_gamma.shape[1], X.shape[1]))\n\tcovars = np.zeros((log_gamma.shape[1], X.shape[1]))\n\n\tfor i in range(log_gamma.shape[1]):\n\t\tgamma_sum = np.sum(gamma[:,i])\n\n\t\tmeans[i] = np.sum(gamma[:,i].reshape(-1, 1) * X, axis = 0) / gamma_sum\n\n\t\tcovars[i] = np.sum(gamma[:,i].reshape(-1, 1) * (X - means[i])**2, axis = 0) / gamma_sum\n\t\tcovars[covars < varianceFloor] = varianceFloor\n\n\treturn (means, covars)", "title": "" }, { "docid": "8df228b4393390e0a4a25c68dd2ca14f", "score": "0.60771775", "text": "def compute_loglike(self, tools: ModelingTools) -> float:", "title": "" }, { "docid": "94fcd5dd413494fdc4d6506e75fc32c6", "score": "0.6075763", "text": "def compute_gamma(self, f, w, cross_prods):\n f1 = np.zeros_like(w)\n f1[f] = 1\n denominator = self.sigma**2 - 2 * self.sigma / self.sigmas[f] * w.T @ cross_prods[f] + w.T @ cross_prods @ w\n numerator = self.sigma / self.sigmas[f] * (1-w).T @ cross_prods[f] - (1-w).T @ cross_prods @ w\n return numerator / denominator, f1", "title": "" }, { "docid": "b38640ff1ed71292dba8077e0433d4a5", "score": "0.60744625", "text": "def calculate_gamma(T, RH, b = 18.678, c = 257.14):\n\n return numpy.log(RH / 100) + b * T / (c + T)", "title": "" }, { "docid": "bf6e543aaed25fdb5e94da7d4dbacd3a", "score": "0.6065267", "text": "def _mp_fn(x):\n #return mp.mpf(2)*mp.j1(x)/x\n return mp.exp(mp.loggamma(x))", "title": "" }, { "docid": "08c7cb33008d0c4558cc73fdbc6823d6", "score": "0.60542446", "text": "def log_prob(self):", "title": "" }, { "docid": "42d5c8b271d3c9a3bea40537b4364b1d", "score": "0.6027058", "text": "def gammaAll(myDeltas,window):\r\n\t return [gamma(X,window) for X in myDeltas]", "title": "" }, { "docid": "cdd8c6d3ca7d43db764259ab3d774830", "score": "0.5983377", "text": "def joint_log_lik(doc_counts, topic_counts, alpha, gamma):\n n_docs, n_topics = np.shape(doc_counts)\n _, alphabet_size = np.shape(topic_counts)\n \n add = (n_docs*sp.gammaln(np.sum(alpha)) +\n np.sum(sp.gammaln(doc_counts + alpha.reshape((1,n_topics)))) +\n n_topics*sp.gammaln(np.sum(gamma)) +\n np.sum(sp.gammaln(topic_counts + gamma.reshape((1,alphabet_size)))))\n sub = (n_docs*np.sum(sp.gammaln(alpha)) +\n np.sum(sp.gammaln(np.sum(doc_counts + alpha.reshape((1,n_topics)), axis=1))) +\n n_topics*np.sum(sp.gammaln(gamma)) +\n np.sum(sp.gammaln(np.sum(topic_counts + gamma.reshape((1,alphabet_size)), axis=1))))\n return add - sub\n \n \n \n \n \n #add all terms and do subtraction last to minimize cancellation error", "title": "" }, { "docid": "8c3442579d71f86057aa395221d23f4b", "score": "0.5977998", "text": "def lorentzian(x, gamma):\n return 1/(np.pi * gamma * (1 + np.power(x/gamma, 2)))", "title": "" }, { "docid": "6e2270bdafcb659e8eb950c7d15c17b0", "score": "0.5965604", "text": "def gamma_mle(x):\n \n npos = np.shape(x)[0]\n \n # Initialize a\n a = 0.5 / ( np.log(np.mean(x)) - np.mean(np.log(x)) )\n \n \n # Generalized Newton updates for \"a\"\n while True:\n a_new = 1 / (1/a + (np.mean(np.log(x)) \\\n - np.log(np.mean(x)) \\\n + np.log(a) \\\n - psi(a) ) \\\n / \n (np.power(a,2) \\\n * (1/a - polygamma(1, a))))\n if (abs(a-a_new)/abs(a)) < 1e-6: break\n else: a = a_new\n \n b = np.mean(x)/a\n \n return (a, b)", "title": "" }, { "docid": "81bacaba39fd596d396fe4e9c0f84a17", "score": "0.5954874", "text": "def log_post(self, betagamma, lambdas=lambdas, nus=nus):\n\n\t\tbeta, gamma = betagamma\n\t\tif (beta <= 0.) or (gamma <= 0.):\n\t\t\treturn float(\"-inf\")\n\t\tlmbda_bet, lmbda_gam = lambdas\n\t\tnu_bet, nu_gam = nus\n\t\t# Evaluate terms of posterior\n\t\tterm1 = np.log(beta)*(lmbda_bet+self.ni-2)\n\t\tterm2 = -beta*(self.xy_int + nu_bet)\n\t\tterm3 = np.log(gamma)*(lmbda_gam+self.nr-1)\n\t\tterm4 = -gamma*(self.y_int + nu_gam)\n\t\t# Posterior proportional to below\n\t\treturn term1 + term2 + term3 + term4", "title": "" }, { "docid": "edffb6ee96a57f927726d4543c97d6d7", "score": "0.5951274", "text": "def _gamma(\n c: Union[int, float],\n k: int,\n mu: Union[int, float],\n sigma_squared: Union[int, float],\n team: Sequence[BradleyTerryFullRating],\n rank: int,\n) -> Union[int, float]:\n return math.sqrt(sigma_squared) / c", "title": "" }, { "docid": "2ad77230371c526289e28d26622b926b", "score": "0.59405375", "text": "def g(x, mu, alpha, n, func, N_L):\n # First we make each of the Lambda_T entries and the f entries\n L_T = [mv_log_prime(x, mu, alpha, l, n, func) for l in range(N_L)] #mv_log_prime is the derivative of the multivarite logistic function \n f_x = func(x)\n return np.hstack([0, f_x, L_T])", "title": "" }, { "docid": "1fe10fa5d3024b4ac439f1863d19b19d", "score": "0.59383345", "text": "def _get_gamma(self, loss, s_t):\n return loss / (s_t + (1/(2 * self._C)))", "title": "" }, { "docid": "9a35128c407b3a8242552a2301951405", "score": "0.59366035", "text": "def calc_gae(rewards, values, gamma, lam):\n # temporal differences\n tds = rewards - values + np.append(values[1:] * gamma, 0)\n advantages = calc_discount_sum_rewards(tds, gamma * lam)\n return advantages", "title": "" }, { "docid": "27172e84f5103c756caada2625af8105", "score": "0.59350693", "text": "def gamma(self):\n if (self.species, self.isotope) in constants.gamma:\n return constants.gamma[(self.species, self.isotope)]\n else:\n return 0.0", "title": "" }, { "docid": "46e1944b548c7db7d05c47d153c8628a", "score": "0.5932415", "text": "def gamma(flag, F, K, t, r, sigma): \n\n b = 0\n\n return numerical_gamma(flag, F, K, t, r, sigma, b, f)", "title": "" }, { "docid": "2d7079d05d99552b121ff0b2fcd9a5a4", "score": "0.5930024", "text": "def logsum(log_xs):\n return reduce(lambda logx,logy:logx + log(1+exp(logy-logx)),log_xs)", "title": "" }, { "docid": "983c81ba817a73beece8800250e085c5", "score": "0.5929712", "text": "def exponentiated_log(x, gamma=0.1):\n if x < 0:\n raise ValueError(\"value %s not a valid input. Must be >= 0\")\n if x == 0:\n # since the below function is undefined at x=0, return 1 if x=0.\n return 1.0\n return 1 / (1 + np.power(np.e, np.log(gamma * x)))", "title": "" }, { "docid": "773bd39a736961f035d988a900503c4e", "score": "0.5929574", "text": "def gae_returns(rollout: StepSequence, gamma: float = 0.99, lamb: float = 0.95):\n\n def _next_value(step: Step) -> float:\n \"\"\" Helper to return `next_value = 0` for last step \"\"\"\n if step.done:\n return 0.\n return step.next_value\n\n deltas = [step.reward + gamma*_next_value(step) - step.value for step in rollout]\n cumsum = discounted_reverse_cumsum(deltas, gamma*lamb)\n return cumsum", "title": "" }, { "docid": "b63074042c754fc70de41c94a6e20f41", "score": "0.5925774", "text": "def gamma(*args, **kwargs):\n\n pass", "title": "" }, { "docid": "9c4114cdfd9c1e96448bfdf69041f6d6", "score": "0.5906143", "text": "def kumaraswamy_kl(prior_alpha, prior_beta,a,b, x):\r\n \r\n q_log_prob = kumaraswamy_log_pdf(a, b, x)\r\n p_log_prob = Beta(prior_alpha, prior_beta).log_prob(x)\r\n\r\n return -(p_log_prob-q_log_prob)", "title": "" }, { "docid": "ef6e956639fdb988ffd9fdff42bd5fb3", "score": "0.59047055", "text": "def log_normconst(self,k,b):\n\n def kfb(j, gam, lam, ta):\n if (j == 1):\n kd = sum(0.5/(lam - ta) + 0.25 * (gam**2/(lam - ta)**2))\n \n if (j > 1):\n kd = sum(0.5 * factorial(j - 1)/(lam - ta)**j + 0.25*factorial(j) * gam**2/(lam - ta)**(j + 1))\n return kd\n\n beta= 0.5*b*k\n gam = array([0,k,0])\n lam = array([0,-beta,beta])\n p = 3.0\n\n lam = sort(lam)\n mina = min(lam)\n if mina <= 0 :\n aaa = abs(mina) + 1\n lam = lam + aaa\n\n low = lam[0] - 0.25*p - 0.5*sqrt(0.25 * p**2 + p*max(gam)**2)\n up = lam[0] - 0.25 - 0.5*sqrt(0.25 + min(gam)**2)\n tau = bisect(lambda ta:sum(0.5/(lam - ta) + 0.25 * (gam**2/(lam - ta)**2)) - 1,a=low, b=up)\n\n rho3 = kfb(3, gam, lam, tau)/kfb(2, gam, lam, tau)**1.5\n rho4 = kfb(4, gam, lam, tau)/kfb(2, gam, lam, tau)**2\n Ta = rho4/8 - 5/24 * rho3**2\n c1 = 0.5 * log(2) + 0.5 * (p - 1) * log(pi) - 0.5 * log(kfb(2, gam, lam, tau)) - 0.5 * sum(log(lam - tau)) - tau + 0.25 * sum(gam**2/(lam - tau))\n c2 = c1 + log1p(Ta)\n c3 = c1 + Ta\n if (mina <= 0):\n c1 = c1 + aaa\n c2 = c2 + aaa\n c3 = c3 + aaa\n return c1", "title": "" }, { "docid": "8c1b34ea5a9d7f9e87bd5e355268a39a", "score": "0.5895625", "text": "def sum_log_probs(log_probs):\n return functools.reduce(add_log_probs, log_probs)", "title": "" }, { "docid": "df4413d44e090934198cfc04dbc77d1a", "score": "0.5894394", "text": "def gamma(input_image, gamma_r, gamma_g=None, gamma_b=None, gamma_a=1.0):\n #\n if not gamma_g:\n gamma_g = gamma_r\n if not gamma_b:\n gamma_b = gamma_r\n\n ImageBufAlgo.pow(\n input_image,\n input_image,\n ((1.0/gamma_r), (1.0/gamma_g), (1.0/gamma_b), gamma_a)\n )\n\n if input_image.has_error:\n print(\"Error gamma:\", input_image.geterror())\n\n return input_image", "title": "" }, { "docid": "bfccc4fd5ee4dc03205c619537efcc43", "score": "0.5883205", "text": "def _m_step(self, X, gamma):\r\n # Compute alpha\r\n alpha = gamma.sum(axis=0) / gamma.sum()\r\n\r\n # Compute beta\r\n weighted_counts = gamma.T.dot(X)\r\n beta = weighted_counts / weighted_counts.sum(axis=-1).reshape(-1, 1)\r\n\r\n return alpha, beta", "title": "" }, { "docid": "909fcb4b0ce4b7a6e61df6fd7e5819a7", "score": "0.5878982", "text": "def calc_ll(alpha0, trans_mat, emissions):\n _, n = fwd_algorithm(alpha0, emissions, trans_mat=trans_mat)\n return np.sum([np.sum(np.log(n_)) for n_ in n])", "title": "" }, { "docid": "f63418fdb97d8e978c870d863249fbcc", "score": "0.5878116", "text": "def kl(p, q):\n return np.sum(p * np.log(p / q))", "title": "" }, { "docid": "9122b88b06d29d2d1b3091e81449ba82", "score": "0.5874537", "text": "def compute_log(g, v, delta):\n global nb_users\n for sum in range (0, (nb_users*delta)+1):\n value = pow(g, sum)\n if v == value:\n return sum\n print(\"Error during discrete log computation: no value found\")\n exit()", "title": "" }, { "docid": "f25b0d4481164001a26d7159ed02a474", "score": "0.587415", "text": "def _compute_log_a_int(q, sigma, alpha):\n assert isinstance(alpha, (int, long))\n\n # The first and second terms of A_alpha in the log space:\n log_a1, log_a2 = -np.inf, -np.inf\n\n for i in range(alpha + 1):\n # Compute in the log space. Extra care needed for q = 0 or 1.\n log_coef_i = math.log(special.binom(alpha, i))\n if q > 0:\n log_coef_i += i * math.log(q)\n elif i > 0:\n continue # The term is 0, skip the rest.\n\n if q < 1.0:\n log_coef_i += (alpha - i) * math.log(1 - q)\n elif i < alpha:\n continue # The term is 0, skip the rest.\n\n s1 = log_coef_i + (i * i - i) / (2.0 * (sigma ** 2))\n s2 = log_coef_i + (i * i + i) / (2.0 * (sigma ** 2))\n log_a1 = _log_add(log_a1, s1)\n log_a2 = _log_add(log_a2, s2)\n\n log_a = _log_add(math.log(1 - q) + log_a1, math.log(q) + log_a2)\n if FLAGS.rdp_verbose:\n print(\"A: by binomial expansion {} = {} + {}\".format(\n _log_print(log_a),\n _log_print(math.log(1 - q) + log_a1), _log_print(math.log(q) + log_a2)))\n return float(log_a)", "title": "" }, { "docid": "39e2e992d6aefdc03c8819892c5bcf05", "score": "0.5868323", "text": "def _e_step(self, X, alpha, beta):\r\n # Compute gamma\r\n N = X.shape[0]\r\n K = alpha.shape[0]\r\n weighted_multi_prob = np.zeros((N, K))\r\n for k in range(K):\r\n weighted_multi_prob[:, k] = alpha[k] * self._multinomial_prob(X, beta[k])\r\n\r\n denum = weighted_multi_prob.sum(axis=1)\r\n gamma = weighted_multi_prob / denum.reshape(-1, 1)\r\n\r\n return gamma", "title": "" }, { "docid": "be0fb843f827ded5ae842c6d5e0bf96f", "score": "0.58669674", "text": "def e_logx(self):\n return digamma(self.vi_shape) - np.log(self.vi_rate)", "title": "" }, { "docid": "a3572339c2d8fc21836cef2a225d275e", "score": "0.5866854", "text": "def logistic(x, L, k, x0):\n return L/(1+np.exp(-k*(x-x0)))", "title": "" }, { "docid": "26e841dfb52278b58f6afe36d777c643", "score": "0.5863399", "text": "def e_step(mu_hats, var_hats, pi_hats, data, gamma_iks):\n data = np.expand_dims(data, axis=1)\n log_pis = np.log(pi_hats)\n # print(f\"log_pis.shape: {log_pis.shape}\")\n # print(mu_hats)\n log_norm_pdfs = np.log(ss.norm.pdf(data, mu_hats, np.sqrt(var_hats)))\n # print(f\"log_norm_pdfs.shape: {log_norm_pdfs.shape}\")\n log_pi_norm_pdfs = log_pis + log_norm_pdfs\n # print(f\"log_pi_norm_pdfs.shape: {log_pi_norm_pdfs.shape}\")\n # gamma_iks = gamma_ik(mu_hats, var_hats, pi_hats, data)\n # print(f\"gamma_iks.shape: {gamma_iks.shape}\")\n return np.sum(log_pi_norm_pdfs * gamma_iks)", "title": "" }, { "docid": "8fe1ed799b44029458b4bb4fc312a1a7", "score": "0.58377194", "text": "def gamma_summation(wx2_rvecs, wx2_weight):\n gamma_iter = (wx2_weight.get(wx, 0) * Match_N(vecs, vecs).sum()\n for wx, vecs in six.iteritems(wx2_rvecs))\n return np.reciprocal(np.sqrt(sum(gamma_iter)))", "title": "" }, { "docid": "4c679097943963db8000dbccb9ec4542", "score": "0.582551", "text": "def getGamma(self, held_out):\n # gammas = [(i+1)*10 for i in range(10)]\n self.gamma = 1\n max_log_prob = self.log_probability(held_out) # Candidato a maximo\n\n # Rango de gammas a probar de 100 en 100 hasta 1000\n gammas = [i for i in range(100, 1100, 100)]\n\n print(\"Gamma =\", self.gamma, \"==> Log-Prob =\", max_log_prob)\n best_gamma = self.gamma\n for gamma in gammas:\n self.gamma = gamma\n my_log_prob = self.log_probability(held_out)\n\n print(\"Gamma =\", self.gamma, \"==> Log-Prob =\", my_log_prob)\n # Si my_log_prob es mas grande que mi candidato\n # entonces lo seteo como maximo\n if max_log_prob < my_log_prob:\n max_log_prob = my_log_prob\n best_gamma = self.gamma\n\n print(\"\\nMejor Gamma =\", best_gamma)\n\n return best_gamma", "title": "" }, { "docid": "348137cdcff3de954aac3efff4d60f85", "score": "0.582541", "text": "def logliks(self, x):\n x = x.copy()\n\n # Replace exactly 0 and exactly 1 values with a very small number\n # (machine epsilon, the smallest number that this computer is capable\n # of storing) because 0 and 1 are not in the Beta distribution.\n x[x == 0] = VERY_SMALL_NUMBER\n x[x == 1] = 1 - VERY_SMALL_NUMBER\n\n return np.array([np.log(prob) + rv.logpdf(x[np.isfinite(x)]).sum()\n for prob, rv in\n zip(self.prob_parameters, self.rvs)])", "title": "" }, { "docid": "d7da80dcc7c9b4f81769714ccd9b97ca", "score": "0.58195525", "text": "def calculate_gamma(self):\n gamma = 360*(self.time.julian_day-1)/365\n self.gamma = pd.Series(gamma)", "title": "" }, { "docid": "04d1b531df3b310b0bd70f7df7064699", "score": "0.5819409", "text": "def lngamma(z):\n z -= 1\n x = _p[0]\n for i in range(1, _g+2):\n x += _p[i]/(z+i)\n t = z + _g + 0.5\n return 0.9189385332046727 + (z + 0.5) * log(t) - t + log(x) # log(sqrt(2*pi)) = 0.9189385332046727", "title": "" }, { "docid": "ce351fa8c157433fddba9ab0931e8ddb", "score": "0.58109015", "text": "def norm(self):\n # For trigrams\n tot = 0.0\n # Calculating the total number of trigrams\n for tri in self.trigrams:\n tot += self.trigrams[tri]\n ltot = log(tot, 2) \n for tri in self.trigrams:\n self.trigrams[tri] = log(self.trigrams[tri], 2) - ltot\n # For bigrams\n tot = 0.0\n for bi in self.bigrams:\n tot += self.bigrams[bi]\n ltot = log(tot, 2)\n for bi in self.bigrams:\n self.bigrams[bi] = log(self.bigrams[bi], 2) - ltot\n# # For unigrams\n tot = 0.0\n for uni in self.unigrams:\n tot += self.unigrams[uni]\n ltot = log(tot, 2)\n for uni in self.unigrams:\n self.unigrams[uni] = log(self.unigrams[uni], 2) - ltot\n# # For trigram history (context)\n tot = 0.0\n for context_words in self.trigrams_history:\n tot += self.trigrams_history[context_words]\n ltot = log(tot, 2)\n for context_words in self.trigrams_history:\n self.trigrams_history[context_words] = log(self.trigrams_history[context_words], 2) - ltot\n# # For bigram history (context)\n tot = 0.0\n for context_word in self.bigrams_history:\n tot += self.bigrams_history[context_word]\n ltot = log(tot, 2)\n for context_word in self.bigrams_history:\n self.bigrams_history[context_word] = log(self.bigrams_history[context_word], 2) - ltot", "title": "" }, { "docid": "d11f7a28f633810bcedd5237223d078f", "score": "0.5810086", "text": "def score(self, sentence):\n # TODO your code here\n score = 0.0\n for token in sentence:\n freq = self.vocab[token] + self.epsilon\n score += math.log(freq)\n score -=math.log(self.total + self.v * self.epsilon)\n return score", "title": "" }, { "docid": "d08064d9e159c3e89a41be76ec2bdeb5", "score": "0.580868", "text": "def calculate_gamma(df, log=None):\n SP = df.SALNTY / 42.\n pt = df.THETA / 40.\n\n gamma_NAtl = fit_polynomial(SP, pt, fit_north_atlantic)\n gamma_SAtl = fit_polynomial(SP, pt, fit_south_atlantic)\n gamma_Pac = fit_polynomial(SP, pt, fit_pacific)\n gamma_Ind = fit_polynomial(SP, pt, fit_indian)\n gamma_SOce = gamma_G_southern_ocean(SP, pt, df.CTDPRS)\n #gamma_Arc = [np.nan] * len(SP)\n\n in_pacific = np.logical_or(in_poly(df.LONGITUDE, df.LATITUDE, pacific_poly), in_poly(df.LONGITUDE, df.LATITUDE, pacific_poly_b))\n in_indian = np.logical_and(in_poly(df.LONGITUDE, df.LATITUDE, indian_poly), np.logical_not(in_pacific))\n in_atlantic = np.logical_not(np.logical_or(in_pacific, in_indian))\n\n c1_sa = df.LATITUDE < -10\n c2_sa = np.logical_and(df.LATITUDE <= 10, df.LATITUDE >= -10)\n weight_sa = c1_sa + c2_sa * (0.5 + 0.5 * np.cos(math.pi * 0.05 * (df.LATITUDE+10)))\n\n c1_so = df.LATITUDE < -40\n c2_so = np.logical_and(df.LATITUDE <= -20, df.LATITUDE >= -40)\n weight_so = c1_so + c2_so * (0.5 + 0.5 * np.cos(math.pi * 0.05 * (df.LATITUDE+40)))\n\n gamma_Atl = (1 - weight_sa) * gamma_NAtl + weight_sa * gamma_SAtl\n\n gamma_middle = in_pacific * gamma_Pac + in_atlantic * gamma_Atl + in_indian * gamma_Ind\n\n gamma_GP = weight_so * gamma_SOce + (1 - weight_so) * gamma_middle\n\n gamma_GP[df.LATITUDE > 66] = np.nan\n\n gamma_GP = 20*gamma_GP - 20\n\n df = df.assign(GAMMA = gamma_GP)\n\n return df", "title": "" }, { "docid": "53cef0736bcea6614e117048389734f1", "score": "0.58017594", "text": "def _compute_log_a_frac(q, sigma, alpha):\n # The four parts of A_alpha in the log space:\n log_a11, log_a12 = -np.inf, -np.inf\n log_a21, log_a22 = -np.inf, -np.inf\n i = 0\n\n z0, _ = _compute_zs(sigma, q)\n\n while True: # do ... until loop\n coef = special.binom(alpha, i)\n log_coef = math.log(abs(coef))\n j = alpha - i\n\n log_t1 = log_coef + i * math.log(q) + j * math.log(1 - q)\n log_t2 = log_coef + j * math.log(q) + i * math.log(1 - q)\n\n log_e11 = math.log(.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma))\n log_e12 = math.log(.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma))\n log_e21 = math.log(.5) + _log_erfc((i - (z0 - 1)) / (math.sqrt(2) * sigma))\n log_e22 = math.log(.5) + _log_erfc((z0 - 1 - j) / (math.sqrt(2) * sigma))\n\n log_s11 = log_t1 + (i * i - i) / (2 * (sigma ** 2)) + log_e11\n log_s12 = log_t2 + (j * j - j) / (2 * (sigma ** 2)) + log_e12\n log_s21 = log_t1 + (i * i + i) / (2 * (sigma ** 2)) + log_e21\n log_s22 = log_t2 + (j * j + j) / (2 * (sigma ** 2)) + log_e22\n\n if coef > 0:\n log_a11 = _log_add(log_a11, log_s11)\n log_a12 = _log_add(log_a12, log_s12)\n log_a21 = _log_add(log_a21, log_s21)\n log_a22 = _log_add(log_a22, log_s22)\n else:\n log_a11 = _log_sub(log_a11, log_s11)\n log_a12 = _log_sub(log_a12, log_s12)\n log_a21 = _log_sub(log_a21, log_s21)\n log_a22 = _log_sub(log_a22, log_s22)\n\n i += 1\n if max(log_s11, log_s21, log_s21, log_s22) < -30:\n break\n\n log_a = _log_add(\n math.log(1. - q) + _log_add(log_a11, log_a12),\n math.log(q) + _log_add(log_a21, log_a22))\n return log_a", "title": "" }, { "docid": "f24a20b3f26714cf3e9fa624e50cc0d9", "score": "0.5782719", "text": "def lyap_gamma(c,s,E=0):\n c,s = np.asarray(c), np.asarray(s)\n return (s)**2/(24*(4*c**2-E**2))", "title": "" }, { "docid": "8a493813d3229cfd5046290a64db8d19", "score": "0.5774819", "text": "def E_gamma(self):\n return const.h*self.nu_opt# joule", "title": "" }, { "docid": "917dc8720a39ed82911b9b43ad950168", "score": "0.5773304", "text": "def gamma_G(self):# TODO: change approximation to K?\n return 16*self.N0*self.delta0**3*np.pi*self.T*np.exp(-2*self.delta0/(const.k*self.T))/(self.tau0*const.k**2*self.Tc**3)# per second(?)", "title": "" }, { "docid": "8f4278e00dce2b42194587e9006e0407", "score": "0.5765853", "text": "def gamma(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "604ea3c1428d9f0b06942f82f5ec3a65", "score": "0.5762183", "text": "def start_gamma(gamma):\n\n return np.sqrt(1. - 1 / gamma**2), gamma", "title": "" }, { "docid": "d7db1d3bae37246c06f897c9d8a88917", "score": "0.5761173", "text": "def unorm_lcrp_post(alpha, N, K, log_prior_fun):\n return gammaln(alpha)+float(K)*log(alpha)-gammaln(alpha+float(N))+log_prior_fun(alpha)", "title": "" }, { "docid": "4c23f656f9b07fee5da27dcd9f50e226", "score": "0.57607394", "text": "def _calc_logprob(self, actions, means, logvars):\n exp_item = layers.elementwise_div(\n layers.square(actions - means), layers.exp(logvars), axis=1)\n exp_item = -0.5 * layers.reduce_sum(exp_item, dim=1)\n\n vars_item = -0.5 * layers.reduce_sum(logvars)\n logprob = exp_item + vars_item\n return logprob", "title": "" }, { "docid": "6a8b84e29934621c2edf121bf48b457b", "score": "0.5759556", "text": "def compute_returns(rewards,gamma=1.0): \n T = len(rewards) \n returns = np.array([\n np.sum(np.array(\n rewards[t:])*np.array(\n [gamma**i for i in range(T-t)]\n )) for t in range(T)\n ])\n return returns", "title": "" }, { "docid": "953047445b4663f0b2c783f48bb97ff6", "score": "0.5745926", "text": "def kumaraswamy_log_pdf(a, b, x):\r\n \r\n return tf.log(a) +tf.log(b) + (a-1.)*tf.log(x)+ (b-1.)*tf.log(1.-x**a)", "title": "" }, { "docid": "e24132adf75ad49ff983d65335b3e99e", "score": "0.5742534", "text": "def log_like_iid_gamma(params, n):\n shape, scale = params\n \n if shape <= 0 or scale <= 0:\n return -np.inf\n \n return np.sum(scipy.stats.gamma.logpdf(n, shape, 0, scale))", "title": "" }, { "docid": "4c8d6695536289b559e9804c1363da8e", "score": "0.5735335", "text": "def compute_returns(rewards,gamma=1.0): \n assert BATCHSIZE==1,'return computation'\n T = len(rewards) \n returns = np.array([\n np.sum(np.array(\n rewards[t:])*np.array([\n gamma**i for i in range(T-t)]\n )) for t in range(T)\n ]) ## not sure how to parse this\n return returns", "title": "" }, { "docid": "abf4a0a7530a21404e673089251766c7", "score": "0.5731304", "text": "def B(alpha, beta):\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)", "title": "" }, { "docid": "434775aec251cc2b4d66730c0902b728", "score": "0.57310295", "text": "def kl(p, q):\n p = np.asarray(p, dtype=np.float)\n q = np.asarray(q, dtype=np.float)\n new_q = q[q != 0]\n new_p = p[q != 0]\n return np.sum(np.where(new_p != 0, new_p * np.log(new_p / new_q), 0))", "title": "" }, { "docid": "4466597f717fdba014f4c5ebcb3781e1", "score": "0.5729368", "text": "def smash_log(X, k=10, d=0):\n return 1 / (1 + np.exp(-X * k)) - d", "title": "" }, { "docid": "209ffad89efeb01b9b74a2db884a8199", "score": "0.5711811", "text": "def kl_divergence(mu: Tensor, log_var: Tensor) -> Tensor:\n return 0.5 * torch.sum(torch.exp(log_var) + mu ** 2 - 1 - log_var, dim=-1, keepdim=True).float()", "title": "" }, { "docid": "a1073945f4f1080a2edd2c63b7d04852", "score": "0.57091373", "text": "def Gamma(self, Lambda):\n\n if Lambda == 0: return 1 # wild guess\n\n return self.Psi(Lambda)/Lambda", "title": "" }, { "docid": "cf2bbd0bd103e671eae5a9c051580dcf", "score": "0.5708433", "text": "def calc_DirMult_logL(n, alphas):\n #1st two terms for full calculation\n sum_alphas = np.sum(alphas)\n lg_sum_alphas = math.lgamma(sum_alphas)\n sum_lg_alphas = np.sum(calc_lgamma_vect(alphas))\n\n lg_sum_alphas_n = math.lgamma(sum_alphas + np.sum(n))\n sum_lg_alphas_n = np.sum(calc_lgamma_vect(n+alphas))\n\n logL = lg_sum_alphas - sum_lg_alphas - lg_sum_alphas_n + sum_lg_alphas_n\n return logL", "title": "" }, { "docid": "1f0f7595eff50f633e996894b01ecfe1", "score": "0.5705244", "text": "def gamma(x):\n\n if x == 2.0 or x == 1.0:\n return complex(1.0, 0)\n\n g = 607.0 / 128.0\n c = [\n 0.99999999999999709182,\n 57.156235665862923517,\n -59.597960355475491248,\n 14.136097974741747174,\n -0.49191381609762019978,\n .33994649984811888699e-4,\n .46523628927048575665e-4,\n -.98374475304879564677e-4,\n .15808870322491248884e-3,\n -.21026444172410488319e-3,\n .21743961811521264320e-3,\n -.16431810653676389022e-3,\n .84418223983852743293e-4,\n -.26190838401581408670e-4,\n .36899182659531622704e-5\n ]\n\n if np.real(x) < 0:\n xx = x\n x = -x\n x = x - 1\n xh = x + 0.5\n xgh = xh + g\n # Trick for avoiding FP overflow above z=141\n xp = xgh ** (xh * 0.5)\n\n # Evaluate sum\n ss = 0.0\n for pp in range(14, 0, -1):\n ss = ss + c[pp] / (x + pp)\n\n sq2pi = 2.5066282746310005024157652848110\n f = (sq2pi * (c[0] + ss)) * ((xp * np.exp(-1.0 * xgh)) * xp)\n f = -1.0 * np.pi / (xx * f * np.sin(np.pi * xx))\n\n else:\n x = x - 1\n xh = x + 0.5\n xgh = xh + g\n # Trick for avoiding FP overflow above z=141\n xp = xgh ** (xh * 0.5)\n\n # Evaluate sum\n ss = 0.0\n for pp in range(14, 0, -1):\n ss = ss + c[pp] / (x + pp)\n\n sq2pi = 2.5066282746310005024157652848110\n f = (sq2pi * (c[0] + ss)) * ((xp * np.exp(-1.0 * xgh)) * xp)\n\n return f", "title": "" }, { "docid": "1bb754e7adba0c447cdb3fc53b79bc8b", "score": "0.5697919", "text": "def gamma_from_meas(k,r,f,drdt,dfdt):\n \n gamma_rel_sq = (k+1)/((r*dfdt)/(f*drdt) + 1)\n \n if gamma_rel_sq > 0:\n gamma_rel = np.sqrt(gamma_rel_sq)\n else:\n print \"gamma^2 < 0!\"\n sys.exit()\n \n beta_rel = np.sqrt(1-(1/gamma_rel_sq))\n beta_gamma_rel = beta_rel*gamma_rel\n p = 1e-6*PROTON_MASS*beta_gamma_rel\n \n return gamma_rel, p", "title": "" }, { "docid": "25e9b1c41200773520de717b9f995a63", "score": "0.5680434", "text": "def evaluate(self, l, default_to_0=False):\n if default_to_0 and len(normalize(l)) <= 1:\n return 0\n log_prob = 0.0\n transition_ct = 0\n for a, b in ngram(2, l):\n log_prob += self.log_prob_mat[pos[a]][pos[b]]\n transition_ct += 1\n # The exponentiation translates from log probs to probs.\n return math.exp(log_prob / (transition_ct or 1))", "title": "" }, { "docid": "362cb9d30f42d4223f9f1ea01fb5b99c", "score": "0.566134", "text": "def _compute_log_b0(sigma, q, alpha, z1):\n z0, _ = _compute_zs(sigma, q)\n s, log_term, log_b0, k, sign, max_log_term = 0, 1., 0, 0, 1, -np.inf\n # Keep adding new terms until precision is no longer preserved.\n # Don't stop on the negative.\n while (k < alpha or (log_term > max_log_term - 36 and log_term > -30) or\n sign < 0.):\n log_b1 = k * (k - 2 * z0) / (2 * sigma ** 2)\n log_b2 = _log_erfc((k - z1) / (math.sqrt(2) * sigma))\n log_term = log_b0 + log_b1 + log_b2\n max_log_term = max(max_log_term, log_term)\n s += sign * math.exp(log_term)\n k += 1\n # Maintain invariant: sign * exp(log_b0) = {-alpha choose k}\n log_b0 += math.log(abs(-alpha - k + 1)) - math.log(k)\n sign *= -1\n\n if s == 0: # May happen if all terms are < 1e-324.\n return -np.inf\n if s < 0 or math.log(s) < max_log_term - 25: # The series failed to converge.\n return None\n c = math.log(.5) - math.log(1 - q) * alpha\n return c + math.log(s)", "title": "" }, { "docid": "e07a6592fb0aca0584aef7988a340412", "score": "0.56603223", "text": "def sensitivity_gamma(self, V):\n dx_ii = np.ones((self.k, self.n, self.k))\n for i in range(self.k):\n idx = list(set(range(self.k)) - set([i]))\n for j, m in enumerate(idx):\n dx_ii[i, :, j] = np.gradient(self.X[:, m])\n self.gamma_i.append((1./V)*(np.sum(self.y*dx_ii[i, :, j])))\n return self.gamma_i", "title": "" }, { "docid": "89da2c6bf0009a10caf58e4cf82ba553", "score": "0.56555945", "text": "def gamma_op(g):\n # TODO: Expand..\n assert (0 <= g <= 1)\n \n def e(a, b):\n def f(z):\n x, y = a(z), b(z)\n return (x * y) ** (1 - g) * ((1 - x) * (1 - y)) ** g\n return f\n return e", "title": "" }, { "docid": "4de1884c0a17f70e20d46e6de8a3bc60", "score": "0.56526405", "text": "def calculate_mutation_score(ref_seq_score, alt_seq_score):\n return np.log10(ref_seq_score/alt_seq_score)", "title": "" }, { "docid": "99acf9a65c7aedc7112eb7bdb87b8706", "score": "0.5651609", "text": "def fn(a,y):\n #print \"using Cross Entropy\"\n #print(\"y = \",y , \" a = \",a)\n #print(\"-y*np.log(a)\",-y*np.log(a))\n #print(\"-(1-y)*np.log(1-a)=\",-(1-y)*np.log(1-a) )\n cost=np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))\n #print(\"cost=\",cost)\n return cost", "title": "" }, { "docid": "33a17e82e0f2c373ae7f030c5b7f41c0", "score": "0.5647718", "text": "def gamma(arr, g):\n if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):\n raise ValueError(\"Input array must have float values between 0 and 1\")\n if g <= 0 or np.isnan(g):\n raise ValueError(\"gamma must be greater than 0\")\n\n return arr ** (1.0 / g)", "title": "" }, { "docid": "55d20988ca89d2e737f56dd4c34ded45", "score": "0.5633212", "text": "def discounted_value(rollout: StepSequence, gamma: float):\n rewards = [step.reward for step in rollout]\n return discounted_reverse_cumsum(rewards, gamma)", "title": "" }, { "docid": "49d98838d9961827f49010a697dc0e1d", "score": "0.5633106", "text": "def log_prior(self):\n lp=np.sum(self.g(self.length),keepdims=True)\n if self.nugget_est==1:\n lp+=self.g(self.nugget)\n return lp", "title": "" }, { "docid": "daeaad7d0949cb1957b3f3cfebe9828c", "score": "0.5630555", "text": "def log_sum_exp_kl_divergence(anchor_embed_mean, pos_embed_mean, neg_embed_mean,\n anchor_embed_var, pos_embed_var, neg_embed_var,\n beta):\n\n # compute symmetric KL divergence between anchor and positive\n mean_term_ap = ((anchor_embed_mean - pos_embed_mean)**2) / 2\n var_term_ap = ((anchor_embed_var**2 + pos_embed_var**2) /\n (2 * anchor_embed_var * pos_embed_var))\n d_pos = tf.reduce_sum(mean_term_ap * var_term_ap - 0.5, [1, 2, 3]) # batch\n\n # compute symmetric KL divergence between anchor and negative\n mean_term_an = (tf.expand_dims(anchor_embed_mean, 1) -\n tf.expand_dims(neg_embed_mean, 0)) ** 2 / 2\n anchor_embed_var_expand = tf.expand_dims(anchor_embed_var, 1)\n neg_embed_var_expand = tf.expand_dims(neg_embed_var, 1)\n var_term_an = ((anchor_embed_var_expand**2 + neg_embed_var_expand**2) /\n (2 * anchor_embed_var_expand * neg_embed_var_expand))\n d_pairwise_neg = tf.reduce_sum(mean_term_an * var_term_an - 0.5,\n [2, 3, 4]) # batch x batch_neg\n\n difference = (tf.expand_dims(d_pos/beta, 1) -\n d_pairwise_neg/beta) # postives x negatives\n\n # Option 1(unused) : log-sum-exp loss\n # log(\\sum_j(exp(d+ - dj-)))\n # loss = tf.reduce_sum(beta * tf.reduce_logsumexp(difference, 1), 0)\n\n # Option 2\n # log(1 + \\sum_j(exp(d+ - dj-)))\n difference_padded = tf.pad(difference, [[0, 0], [0, 1]])\n loss = tf.reduce_sum(beta * tf.reduce_logsumexp(difference_padded, 1), 0)\n\n accuracy_tf = tf.reduce_mean(tf.sign(-tf.expand_dims(d_pos, 1) +\n d_pairwise_neg))\n\n return loss, accuracy_tf, d_pos, d_pairwise_neg", "title": "" }, { "docid": "c03d483cc4f2ef8136085818ef9abc90", "score": "0.5628759", "text": "def log_sum_exp(x):\r\n x_max = x.max()\r\n\r\n x = torch.log(torch.sum(torch.exp(x-x_max))) + x_max\r\n #x = F.log_softmax(x, dim=1)\r\n\r\n return x", "title": "" }, { "docid": "ca5136d434d542e870b84e2f60d589cc", "score": "0.5627645", "text": "def _get_test_symbols_to_logits_fn(self):\n def symbols_to_logits_fn(ids, i, cache):\n del ids\n logits = tf.cast(tf.math.log(self.probabilities[:, i, :]), tf.float32)\n return logits, cache\n return symbols_to_logits_fn", "title": "" }, { "docid": "62dcce5530518f6eb62a478acd749cb0", "score": "0.5619232", "text": "def fullObjectiveLog(fz, val, c_W, M):\n temp = c_W*scipy.dot(M, fz)\n idx = temp != 0\n return scipy.sum(pow(scipy.log(val[idx]) - scipy.log(temp[idx]), 2))/2.", "title": "" }, { "docid": "7dca0a3c0c9056e0665e5a06db0d0c73", "score": "0.5612098", "text": "def compute_gamma(self,A,E,zz_tn_prev,zz_tn,gamma_tn):\n # This is the main bottleneck of the code.\n # Would be faster if:\n # - implemented in C\n # - roots() was also implemented in C\n d_z = len(gamma_tn)\n product_matrix_matrix(zz_tn_prev,A.T,self.mat_d_z_d_z)\n product_matrix_matrix(A,self.mat_d_z_d_z,self.mat_d_z_d_z2)\n getdiag(self.mat_d_z_d_z2,self.AzzA_prev)\n G = diag(zz_tn)+2*self.gamma_prior_beta\n H = self.AzzA_prev\n a1 = 2.0*(self.gamma_prior_alpha+1.0)\n a2 = (4.0*self.gamma_prior_alpha+5.0)*E + H - G\n a3 = ((2.0*self.gamma_prior_alpha+3)*E-2.0*G)*E\n a4 = -G*E**2\n Q = ((3.0*a3/a1)-((a2/a1)**2))/9\n R = (9*a1*a2*a3-27*a4*(a1**2)-2*(a2**3))/(54*a1**3)\n ##delta = Q**3+R**2\n #rho = sqrt(-Q**3)\n #theta = arccos(R/rho)\n theta = arccos(sign(R)*minimum(exp(log(abs(R))-3.0/2.0*log(-Q)),1.0))\n #print theta1, theta\n #JJ = pow(rho,1.0/3)\n HH = sqrt(-Q)\n am = 2*HH*cos(theta/3)-a2/(3.0*a1) \n am = maximum(abs(am),0.00001)\n \n gamma_mean_diff = sum((am-gamma_tn)**2)/d_z\n gamma_tn[:] = am\n return gamma_mean_diff", "title": "" } ]
4f20628cc01c7ef652bdba4cc4450d0b
Allow to convert the iso code language list into a list of titles
[ { "docid": "1d7b00379515f47b11a4c7a2df993c20", "score": "0.55227435", "text": "def get_languages(self, iso_codes):\n langList = []\n for isoCode in iso_codes:\n if CONSTANTS.dico_languages.has_key(isoCode):\n langList.append(CONSTANTS.dico_languages[isoCode])\n else:\n logging.error(\"get_languages has not key %s\" % (isoCode))\n return langList", "title": "" } ]
[ { "docid": "6832b39328905e63f1a7613e38e97b4d", "score": "0.67992806", "text": "def translated_titles(self, iso_format=None):\n\n fmt = iso_format or self._iso_format\n\n trans_titles = {}\n if 'v12' in self.data['article']:\n for title in self.data['article']['v12']:\n if 'l' in title:\n language = tools.get_language(title['l'], fmt)\n if language != self.original_language(iso_format=fmt):\n t = title.get('_', '').strip()\n if not t:\n t = title.get('t', '').strip()\n\n trans_titles.setdefault(\n html_decode(language),\n html_decode(t)\n )\n\n if len(trans_titles) == 0:\n return None\n\n return trans_titles", "title": "" }, { "docid": "5bf0c9b6b066fbaf3190fa803ac733c5", "score": "0.637254", "text": "def extract_titles(self, preprocessed_input):\n if self.creative:\n if (len(re.findall('\"([^\"]*)\"', preprocessed_input)) == 0):\n potential_titles = {} # {startIndex: titles}\n input_lower = preprocessed_input.lower().strip(string.punctuation)\n splited_input = re.split(r' ', input_lower)\n \n for movie in self.movieTitles:\n movietitle = movie[0].lower()\n articles = [\"a\", \"an\", \"the\"]\n containsYear = re.findall('\\(\\d{4}\\)', movietitle)\n if len(containsYear) != 0:\n movietitle = movietitle[:-7] \n for article in articles:\n size = len(article)\n if (movietitle[-size-2:] == ', ' + article):\n movietitle = article + \" \" + movietitle[:-size-2] \n splited_movie = re.split(r' ', movietitle)\n startIndex = self.isSubstring(splited_input, splited_movie)\n if startIndex >= 0:\n if startIndex in potential_titles:\n old_title = potential_titles[startIndex]\n if len(old_title) < len(movietitle):\n potential_titles[startIndex] = movietitle\n else:\n potential_titles[startIndex] = movietitle\n # print(list(potential_titles.values()))\n return list(potential_titles.values())\n else:\n return re.findall('\"([^\"]*)\"', preprocessed_input)\n else: \n return re.findall('\"([^\"]*)\"', preprocessed_input)", "title": "" }, { "docid": "ad013a9f1d228728341add9c220c95a1", "score": "0.63016796", "text": "def fill_lang(lang_title):\n error= True\n out= []\n info= HttpObject().get_data_from_http_response('GET', conf.API_URI)\n for lang in info['language']:\n if lang_title:\n if lang_title != lang['title'].lower():\n continue\n out.append(lang['title'].lower())\n error= False\n return out, error", "title": "" }, { "docid": "ec173e8f38dbde53ebcddb65b58f3e3e", "score": "0.61024165", "text": "def original_title(self, iso_format=None):\n\n fmt = iso_format or self._iso_format\n\n if 'v12' in self.data['article']:\n for title in self.data['article']['v12']:\n if 'l' in title:\n language = tools.get_language(title['l'], fmt)\n if language == self.original_language(iso_format=fmt):\n t = title.get('_', '').strip()\n if not t:\n t = title.get('t', '').strip()\n\n return html_decode(t)", "title": "" }, { "docid": "3ba2680a47019f7bf005dc4338ab5ac9", "score": "0.60651493", "text": "def get_titles(json_data):\n titles = []\n for el in json_data:\n titles.append(el['title'])\n return titles", "title": "" }, { "docid": "d7c24e516af4daf5126b037ebd794869", "score": "0.59161985", "text": "def make_title(words):\n return list(map(lambda x: x.title(), words))", "title": "" }, { "docid": "2a4083b8d4073c0bc5aedf2292efc9ab", "score": "0.5898381", "text": "def xml_languages(self, iso_format=None):\n\n if 'v601' in self.data['article']:\n return [i['_'] for i in self.data['article']['v601']]", "title": "" }, { "docid": "75fbf82a89b33ab4058b2ed15fe5ba6c", "score": "0.5890233", "text": "def create_lang_abbr_map():\n r = requests.get(\"https://ws.detectlanguage.com/0.2/languages\")\n return {x[\"code\"]: x[\"name\"] for x in r.json()}", "title": "" }, { "docid": "09855d162fe41674ad7c4eaee7185196", "score": "0.5882687", "text": "def get_article_titles(self):\r\n # INFOBAE\r\n if self.name == 'infobae':\r\n self.set_infobae_titles_opt()\r\n return self.news_titles\r\n # PAGINA 12\r\n elif self.name == 'pagina12':\r\n self.set_p12_titles_opt()\r\n return self.news_titles\r\n # MINUTO 1\r\n elif self.name == 'minutouno':\r\n self.set_minutouno_titles()\r\n return self.news_titles\r\n # TN\r\n elif self.name == 'tn':\r\n self.set_tn_titles()\r\n return self.news_titles\r\n # EL LITORAL\r\n elif self.name == 'ellitoral':\r\n self.set_litoral_titles()\r\n return self.news_titles\r\n # CLARIN\r\n elif self.name == 'clarin':\r\n self.set_clarin_titles()\r\n return self.news_titles\r\n # LA NACION\r\n elif self.name == 'lanacion':\r\n self.set_lanacion_titles()\r\n return self.news_titles", "title": "" }, { "docid": "c83cb060e48117b474118afcf21d2d46", "score": "0.5859863", "text": "def _split_locales(locales):\n for code in locales:\n yield code\n yield code.partition('-')[0].lower()", "title": "" }, { "docid": "e425417f6988e2764b230aeb5ecc7cc9", "score": "0.5809677", "text": "def get_titles(book_titles):\n min_word_length = 3\n taboo_words = [\"The\", \"What\", \"Other\", \"For\", \"You\",\n \"That\",\"And\", \"With\"]\n\n alist = []\n for i, raw_title in enumerate(book_titles[1:]):\n tokens = raw_title[0].split(\";\")\n \n if len(tokens) < 2:\n print(\"line {}: {}\".format(i, tokens))\n title = tokens[1].replace('\"', '')\n words = [word.capitalize() for word in re.split(r'\\W+', title)\n if len(word) >= min_word_length]\n alist.append([word for word in words if word not in taboo_words])\n return alist", "title": "" }, { "docid": "4824187798b8dd39defa6f316c7c74f4", "score": "0.58015513", "text": "def from_list_to_texts(lis):\n texts = \"\"\n for string in lis:\n if str(string) == '-':\n texts = texts[:-1]\n texts += str(string)\n else:\n texts += str(string) + \" \"\n return texts", "title": "" }, { "docid": "49db3e7f02fdcfc65a9a2867e2bffcd0", "score": "0.58009356", "text": "def get_post_titles(json_data) -> list:\n posts_title = list()\n for post in json_data:\n post_title = remove_chars(post[\"post_title\"])\n posts_title.append(post_title)\n\n return posts_title", "title": "" }, { "docid": "f2d5f4050945dbba936e4c5bfcc6e7e2", "score": "0.57730085", "text": "def extract_titles(row):\n data = [\n {\"title\":row[0], \"type\": \"OriginalTitle\"}\n ]\n for item in set(row[1:]):\n if item and item != row[0]:\n data.append(\n {\"title\":item,\"type\":\"AlternativeType\"}\n )\n return data", "title": "" }, { "docid": "53372430516e489f93cc8831eb603826", "score": "0.57588756", "text": "def make_titles_shorter(array_of_dict):\n\n def keep_last_three(string):\n my_pattern = '.*?(\\S+\\s\\S+\\s\\S+$)'\n my_group = re.match(my_pattern, string)\n try:\n return my_group.group(1)\n except:\n return string\n \n my_array_of_names = []\n \n for item in array_of_dict:\n item['title'] = keep_last_three(item['title'])\n \n my_array_of_names.append(item)\n \n \n return my_array_of_names", "title": "" }, { "docid": "7394723bd4d465d43c13c6a07d4dc58b", "score": "0.5721789", "text": "def make_title(word_list):\n if not word_list:\n \n return ''\n \n return title[0].upper() + word_list[1:]", "title": "" }, { "docid": "c12effd43d8f4a411c2dc6551a2dae34", "score": "0.5719602", "text": "def task_1_fix_names_start_letter(data: DT) -> DT:\n return [{key: value.title() if isinstance(value, str) else value for key, value in dict.items()} for dict in data]", "title": "" }, { "docid": "32ad39bb5003850e47a4439d49fb88b3", "score": "0.56843525", "text": "def _get_titles(body, titles, cont_type):\n\n pages = body['query']['pages']\n links = []\n if cont_type == 'plcontinue':\n link_type = 'links'\n else:\n link_type = 'linkshere'\n\n for page in pages:\n if link_type in pages[page]:\n links.append(pages[page][link_type])\n\n for link in links:\n for sub in link:\n titles.append(sub['title'])", "title": "" }, { "docid": "ff96c60503f177761f2c37b365086e6f", "score": "0.5641115", "text": "def getTitlesList(self,collection):\n titles = collection.find({}, {\"title\": 1})\n tmp = []\n for d in titles:\n tmp.append(d['title'])\n print d\n return tmp", "title": "" }, { "docid": "9e00ed8921f0cd9186e934ec5e8843c6", "score": "0.563004", "text": "def as_list(self):\n return [translate (self.code), translate (self.label), translate (self.duration)]", "title": "" }, { "docid": "0544c0e53a2a54dc6024029ef7da21bc", "score": "0.56208396", "text": "def special_read_language_list():\r\n\r\n path = 'language_list.txt'\r\n\r\n with open(path, 'r') as f:\r\n data = dict()\r\n for line in f.readlines():\r\n words = line.strip().split()\r\n data[words[1]] = words[0]\r\n return data", "title": "" }, { "docid": "2e7c7c18d49d260da7219c69c3d6c531", "score": "0.5613723", "text": "def listify(raw_text,lang=\"en\"):\n\n punctuation_to_replace = [\"---\",\"--\",\"''\"]\n for punctuation in punctuation_to_replace:\n raw_text = raw_text.replace(punctuation,\" \")\n words = [x.lower() for x in findall(r\"[\\w\\@\\#\\'\\&\\]\\*\\-\\/\\[\\=\\;]+\",raw_text,flags=UNICODE)]\n\n return words", "title": "" }, { "docid": "755b1805dd12ff9096dd7684b8482317", "score": "0.5612253", "text": "def extract_titles(self):\n titles = []\n if self.as_json is not None:\n base = json.loads(self.as_json)['SitesLinkingInResult']['Alexa']\n sites_linking_in = base['SitesLinkingIn']\n sites = sites_linking_in.get('Site', [])\n\n if isinstance(sites, list):\n for site in sites_linking_in.get('Site', []):\n title = site.get('Title').get('$')\n url = site.get('Url').get('$')\n titles.append({'title': title, 'url': url})\n else:\n kwargs = {\n 'title': sites.get('Title').get('$'),\n 'url': sites.get('Url').get('$'),\n }\n titles.append(kwargs)\n\n return titles", "title": "" }, { "docid": "363972c2a0813a5c932ce77df873394f", "score": "0.5600133", "text": "def languages(self, show_urls=False, iso_format=None):\n\n languages = set()\n\n if 'fulltexts' in self.data:\n\n if 'pdf' in self.data['fulltexts']:\n for lang in self.data['fulltexts']['pdf'].keys():\n languages.add(lang)\n\n if 'html' in self.data['fulltexts']:\n for lang in self.data['fulltexts']['html'].keys():\n languages.add(lang)\n\n languages.add(self.original_language(iso_format=iso_format))\n\n if len(languages) > 0:\n return [i for i in languages]", "title": "" }, { "docid": "615a215f83d76fb7d46e9c07f2070257", "score": "0.55826217", "text": "def _get_titles(fn, w):\n ptr = _wordindex.get(w)\n if(ptr is None): return []\n with open(fn, 'rb') as f:\n f.seek(ptr)\n nints = struct.unpack('>i', f.read(4))[0]\n arr = array.array(\"i\")\n arr.fromfile(f, nints)\n if _little_endian:\n arr.byteswap()\n return arr.tolist()", "title": "" }, { "docid": "8d936811fda9fb8aac79a6e6669c9892", "score": "0.5548793", "text": "def get_titles(self):\n resDicts = list()\n for item in self.titles:\n resDicts.append(dict(item))\n return resDicts", "title": "" }, { "docid": "57b943f9af8788aa403cf0d3e0e2d3ac", "score": "0.5531353", "text": "def pre_process_title(title):\n t = str(title).lower().strip()\n for word in urban_dict:\n if word in t:\n t = t.replace(word, ' '.join(urban_dict[word]))\n for emoji in emoji_dict:\n if emoji in t:\n t = t.replace(emoji, ' ' + ' '.join(emoji_dict[emoji]))\n t = check_whitespace_word(t)\n t = t.split(' ')\n t = [x.strip() for x in t]\n stop_words = ['playlist', 'music']\n t = [remove_punct(x) for x in t]\n return_tokens = []\n for ti in t:\n if ti in urban_dict:\n return_tokens.extend(urban_dict[ti])\n if ti not in stop_words:\n return_tokens.append(ti)\n return return_tokens", "title": "" }, { "docid": "a7cab9a39606c22369358387ef32fd85", "score": "0.55194676", "text": "def augment_text(text_to_tans, list_lang):\r\n translator = Translator()\r\n list_text_lang = []\r\n for lang in list_lang:\r\n tr_str = translator.translate(text_to_tans, src='ru', dest=lang)\r\n rus_tr_str = translator.translate(tr_str.text, src=lang, dest='ru')\r\n list_text_lang.append(rus_tr_str.text)\r\n return list_text_lang", "title": "" }, { "docid": "64c40d16ed33eaa395ab5ae755ff787c", "score": "0.55063915", "text": "def get_titles_from_search_results():\n\n pass", "title": "" }, { "docid": "1b3de167206dd12449aa035eb4b0e02c", "score": "0.5490614", "text": "def simplify_extracted_title(titles):\n def simplify_single_title(title):\n remove_after = [\"|\"] # Add to this list if needed\n for remove_string in remove_after:\n title = title.split(remove_string)[0]\n return title.strip()\n\n return list(map(simplify_single_title, titles))", "title": "" }, { "docid": "7b4a0f061c02c73d98ff622fe1ae7fa6", "score": "0.5473228", "text": "def get_english_posts(posts):\n english_posts = []\n\n for post in posts:\n if langid.classify(post['title'])[0] == 'en':\n english_posts.append(post)\n return english_posts", "title": "" }, { "docid": "d524e02ca8b4a3b88a774ffb3aa502d7", "score": "0.5447971", "text": "def get_player_titles(self) -> ContentList:\n playerTitles = [ContentItemDTO(p) for p in self.playerTitles]\n\n return ContentList(playerTitles)", "title": "" }, { "docid": "1bf15abe7c2593fc50445bd07514852b", "score": "0.5446618", "text": "def simplifyTitle(self):\n simple = self.title.lower()\n remove = [ \n u' ', # FIXME: Hardcoded Lithuanian \n u'„', # symbols.\n u'“',\n u'-',\n u'–',\n u',',\n u'.',\n #u'gimnazija',\n #u'pagrindinė',\n #u'pagrindine',\n #u'vidurinė',\n #u'vidurine',\n #u'pradinė',\n #u'pradine',\n u'mokykla',\n ]\n for i in remove:\n simple = simple.replace(i, u' ')\n replace = [\n (u'ė', u'e'),\n (u'ų', u'u'),\n (u'ž', u'z'),\n\n ]\n for k, v in replace:\n simple = simple.replace(k, v)\n self.simple = simple\n self.words = set(simple.split())", "title": "" }, { "docid": "234f0bd08870024548444d6da1e6ea59", "score": "0.5440797", "text": "def getTitles(self, metadata):\n fallback = [\"english_name\", \"romaji_name\", \"kanji_name\"]\n title = self.getValueWithFallbacks(metadata, titleKey(), *fallback)\n sort = self.getValueWithFallbacks(metadata, sortKey(), *fallback)\n\n return (title, sort)", "title": "" }, { "docid": "dd8f04b5a14c6d2aebe8d7cc4f4b7221", "score": "0.54178447", "text": "def translate(codes: List[str]) -> List[test_extract.Course]:\n courses = []\n for code in codes:\n courses.append(data.filterCourses(lambda c: c.id==code)[0])\n return courses", "title": "" }, { "docid": "23c89bc75cf41be603f4151f37ed18ba", "score": "0.5417205", "text": "def get_category_titles(self):\n\n elist = self.find_elements(self.locators['cattitles'])\n return [e.text for e in elist]", "title": "" }, { "docid": "ab5b4c5c1f34dd7e642986e36b3f941c", "score": "0.54081357", "text": "def parse_titles(title_elements):\n titles = [None] * len(title_elements)\n kanji_cnt = Counter()\n for idx, title in enumerate(title_elements):\n clean_title = title.text.strip().replace('\\u3000', '') #strip ideographic space\n titles[idx] = clean_title\n kanji_cnt += Counter(extract_kanji(clean_title))\n return titles, kanji_cnt", "title": "" }, { "docid": "e2a35965786687d1cbf2f84ca92d5eac", "score": "0.53924024", "text": "def bookTitles(self):\n all_titles = self.biblioteca.values()\n unpacked = [j for i in all_titles for j in i]\n return ', '.join(unpacked)", "title": "" }, { "docid": "f6d3e5cd866e46be4c1a9794b8c11ef1", "score": "0.5386279", "text": "def listify(raw_text,lang=\"en\"):\n\n punctuation_to_replace = [\"---\",\"--\",\"''\"]\n for punctuation in punctuation_to_replace:\n raw_text = raw_text.replace(punctuation,\" \")\n # four groups here: numbers, links, emoticons, words\n # could be storing which of these things matched it...but don't need to\n words = [x.lower() for x in re.findall(r\"(?:[0-9][0-9,\\.]*[0-9])|(?:http://[\\w\\./\\-\\?\\&\\#]+)|(?:[\\w\\@\\#\\'\\&\\]\\[]+)|(?:[b}/3D;p)|'\\-@x#^_0\\\\P(o:O{X$\\[=<>\\]*B]+)\",raw_text,flags=re.UNICODE)]\n\n return words", "title": "" }, { "docid": "501ed9add0f6f4280f85a8601eb815da", "score": "0.5381846", "text": "def get_titles(self):\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select * from title')\n titles = list()\n for row in cursor:\n titles.append(row[0])\n return titles", "title": "" }, { "docid": "269627ebe2a4d327f454e56226190474", "score": "0.53777575", "text": "def get_title(data, name):\n for i, item in enumerate(data):\n if name in data[i][\"en_movie\"]:\n return item[\"en_txt\"]", "title": "" }, { "docid": "26d8626fa4dfe91e6fdce7ebea999794", "score": "0.5377615", "text": "def group_titles(release_title: str, alternative_titles: list) -> dict:\n grouped_titles = {}\n for language in VisualNovelDatabase.KNOWN_LANGUAGES:\n grouped_titles[language.__name__.lower()] = []\n\n grouped_titles['english'] = [release_title]\n\n for title in alternative_titles:\n for language in VisualNovelDatabase.KNOWN_LANGUAGES:\n if matches_language(title, language) and title not in grouped_titles[language.__name__.lower()]:\n grouped_titles[language.__name__.lower()].append(title)\n continue\n\n return grouped_titles", "title": "" }, { "docid": "d08f8db74b94cc668ef934dbb80b1ecb", "score": "0.536592", "text": "def title_category():\n\n titles = []\n\n r_url = requests.get(ADDRESS_SITE)\n bs_page = Bs(r_url.content, \"html.parser\")\n\n for list_link in bs_page.find(\"ul\", class_=\"nav-list\")(\"ul\")[0](\"li\"):\n titles.append(list_link.find(\"a\").text.strip())\n\n return titles", "title": "" }, { "docid": "1e520d15871047f593debc18c807799f", "score": "0.5343911", "text": "def test_nexted_with_list_and_dict(self):\n from palisades.i18n.translation import extract_languages\n lang_args = LanguageExtractionTest._basic_args()\n lang_args['list'] = [lang_args.copy(), lang_args.copy()]\n lang_args['list'][0]['zh'] = 'foo'\n lang_args['list'][1]['zh'] = 'foo'\n lang_args['label']['zh'] = 'foo'\n self.assertEqual(extract_languages(lang_args), ['de', 'en', 'es', 'zh'])", "title": "" }, { "docid": "560e7f440058210dd21d8cd3928cbbef", "score": "0.5334523", "text": "def _read_titles(fn):\n with open(fn) as f:\n return [line.strip().replace(r'\\n', '\\n') for line in f]", "title": "" }, { "docid": "08c08bb914b92886d67944560a0635aa", "score": "0.5330392", "text": "def get_titles(self):\n \n return self.titles[0], self.titles[1]", "title": "" }, { "docid": "05dcb417c57d52ffcd2b2eafd490a236", "score": "0.5329059", "text": "def get_wiki_texts(self):\n new_title_candidates = {}\n for t in self.title_candidates:\n text, title = self.get_wiki_text(t.encode('utf-8'))\n new_title_candidates[title] = text\n self.title_candidates = new_title_candidates", "title": "" }, { "docid": "371b878df92b70e277021269deb50bfc", "score": "0.5326339", "text": "def extract_titles(self, text):\n self.__clean_text = self.__remove_punctuation(text)\n by_indices = self.__find_by_indices()\n books = []\n for index in by_indices:\n book_author = self.__get_author(index)\n book_title = self.__get_title(index)\n\n if len(book_title) > 1 and len(book_author) > 1:\n books.append(book_title + book_author)\n return books", "title": "" }, { "docid": "d1c64a8a173986a702fa613cd17b17e8", "score": "0.53155994", "text": "def process_captions(self, list_of_items):\r\n processed_list = list()\r\n processed_list.append(self.start_word)\r\n processed_list.extend(list_of_items)\r\n processed_list.append(self.end_word)\r\n if self.pad_caption:\r\n processed_list.extend([self.end_word]*(self.pad_limit - len(list_of_items)))\r\n\r\n return processed_list", "title": "" }, { "docid": "5477fb9d0a850a209ccbdbf098f6b37b", "score": "0.5309655", "text": "def Titles(stub): \n titles = stub.Titles(protob_pb2.TitlesOpt())\n for title in titles:\n yield title", "title": "" }, { "docid": "3d894f5ceaefcd51e89eb64e6aeca4bd", "score": "0.5309131", "text": "def cli_mode(titles_name, category):\n media_titles = [] # Contains key names of titles_and_links dictionary.\n\n for i, x in enumerate(category.find_all_next(\"div\", {\"class\": \"title\"})):\n title = x.text.encode(\"ascii\", \"ignore\").decode(\"utf-8\").strip()\n url = x.a.get('href')\n media_titles.append({'title': title, 'url': SUBSCENE_URL+url})\n\n return media_titles", "title": "" }, { "docid": "2d9979f6c91fced93e8eb3933e659908", "score": "0.53086215", "text": "def read_data(data: Iterable[dict[str, str]]) -> Generator[list[str], None, None]:\n for entry in data:\n text = entry[\"article\"]\n words = split(r\"\\W+\", text)\n yield [word.lower() for word in words]", "title": "" }, { "docid": "eff6e242044e8f239f27649acc824434", "score": "0.530028", "text": "def as_list(self):\n return [translate (self.work_id), translate (self.name), translate (self.surname), translate (self.birthday)]", "title": "" }, { "docid": "698cc82c6b83fa9096d159a2343f97e5", "score": "0.52963525", "text": "def movies_lang(dataset, index_, lang_):\r\n movies_=[]\r\n for i in dataset:\r\n if i[index_]==lang_:\r\n movies_.append(i)\r\n explore_data(movies_,0,2)\r\n\r\n return movies_", "title": "" }, { "docid": "74acccdbd863789e17f4a7afa3ad6cf6", "score": "0.52828765", "text": "def create_menu_text(list_of_items):\n def translate_maybe(txt, lang):\n try:\n return gs.translate(txt, lang)\n except Exception:\n return txt\n\n def create_str_item(item):\n return \"{}: {} ({})\".format(\n item.get(\"category\").replace(\"Angebot \", \"N°\"),\n translate_maybe(item.get(\"name\"), \"en\"),\n \",\".join(item.get(\"notes\"))\n )\n ret = \"\\n\".join(map(create_str_item, list_of_items))\n return ret", "title": "" }, { "docid": "1c5321b39d7480c115e905cb5bd06581", "score": "0.5280264", "text": "def get_title_feature(data):\n\n titles = data['Name'].apply(get_title)\n title_map = {\"Mr\": 1, \"Miss\": 2, \"Mrs\": 3, \"Master\": 4, \"Dr\": 5,\n \"Rev\": 6, \"Major\": 7, \"Col\": 7, \"Mlle\": 8, \"Mme\": 8,\n \"Don\": 9, \"Lady\": 10, \"Countess\": 10, \"Jonkheer\": 10,\n \"Sir\": 9, \"Capt\": 7, \"Ms\": 2, \"Dona\": 8}\n\n for title, value in title_map.items():\n titles[titles == title] = value\n\n data['Title'] = titles\n\n return titles", "title": "" }, { "docid": "001e94fe5280a558d75d0b201ed0bd95", "score": "0.52611613", "text": "def make_data(data):\n data = data.lower() #turns all letters in string lowercase\n data = data.translate(None, string.punctuation) #strips all punctuation from string\n listdata = data.split()\n return listdata", "title": "" }, { "docid": "04029ba19c374799c878891c41499979", "score": "0.52486145", "text": "def abbreviated_iso_title(self):\n\n return self.data.get('v151', [{'_': None}])[0]['_']", "title": "" }, { "docid": "c61bf0b2a53d5f76256344cfc79b19ac", "score": "0.521775", "text": "def preprocess(self, entry):\n\n f_entry = entry.lower()\n\n f_entry = f_entry.replace('\\t', '|').strip()\n \n if not isinstance(f_entry, str):\n f_entry = self.strip_accents(str(f_entry, 'utf-8'))\n\n f_entry = self._SPECIAL_CHAR_REGEX.sub(' ', f_entry)\n\n f_entry = self._EXTRA_SPACE_REGEX.sub(' ', f_entry)\n\n book_desc = f_entry.split('|')\n\n book_desc_ = [' '.join(eng_stemmer.stem(k) for k in cys.split(' ')) for cys in book_desc]\n\n return book_desc_", "title": "" }, { "docid": "2df26de8de07cb3b8a596da80e6e6879", "score": "0.5211481", "text": "def get_titles(url=reader.URL):\n articles = _get_feed(url).entries\n return [a.title for a in articles]", "title": "" }, { "docid": "83a10795cb06800ba71d8a4e424d41f3", "score": "0.52097404", "text": "def supported_languages(self) -> list[str]:", "title": "" }, { "docid": "99b0f3dc39bb355bf0a2b10eba0432b5", "score": "0.5208257", "text": "def dedup_and_title_case_names(ip_list):\r\n unique_list = []\r\n\r\n for i in ip_list:\r\n if i not in unique_list:\r\n unique_list.append(i)\r\n\r\n unique_list = [x.title() if isinstance(x,str) else x for x in unique_list]\r\n\r\n return unique_list\r\n\r\n pass", "title": "" }, { "docid": "990af316b52c6ba62f3f83de89322983", "score": "0.5202857", "text": "def video_get_title_description(self):\r\n return track_description_list(libvlc_video_get_title_description(self))", "title": "" }, { "docid": "794c57fd5efea51445f03312d773f7d7", "score": "0.51986563", "text": "def prompt_language(self, sender):\n\n # set languages\n self.lang_list = [\n {'title': 'Arabic (Saudi Arabia)', 'code': 'ar-SA'},\n {'title': 'Czech (Czech Republic)', 'code': 'cs-CZ'},\n {'title': 'Danish (Denmark)', 'code': 'da-DK'},\n {'title': 'Dutch (Belgium)', 'code': 'nl-BE'},\n {'title': 'Dutch (Netherlands)', 'code': 'nl-NL'},\n {'title': 'English (Australian)', 'code': 'en-AU'},\n {'title': 'English (Ireland)', 'code': 'en-IE'},\n {'title': 'English (South Africa)', 'code': 'en-ZA'},\n {'title': 'English (United Kingdom)', 'code': 'en-GB'},\n {'title': 'English (United States)', 'code': 'en-US'},\n {'title': 'Finnish (Finland)', 'code': 'fi-FI'},\n {'title': 'French (Canadian)', 'code': 'fr-CA'},\n {'title': 'French', 'code': 'fr-FR'},\n {'title': 'German (Germany)', 'code': 'de-DE'},\n {'title': 'Greek (Greece)', 'code': 'el-GR'},\n {'title': 'Hindi (India)', 'code': 'hi-IN'},\n {'title': 'Hungarian (Hungary)', 'code': 'hu-HU'},\n {'title': 'Indonesian (Indonesia)', 'code': 'id-ID'},\n {'title': 'Italian (Italy)', 'code': 'it-IT'},\n {'title': 'Japanese (Japan)', 'code': 'ja-JP'},\n {'title': 'Korean (South Korea)', 'code': 'ko-KR'},\n {'title': 'Norwegian (Norway)', 'code': 'no-NO'},\n {'title': 'Polish (Poland)', 'code': 'pl-PL'},\n {'title': 'Portuguese (Brazil)', 'code': 'pt-BR'},\n {'title': 'Portuguese (Portugal)', 'code': 'pt-PT'},\n {'title': 'Romanian (Romania)', 'code': 'ro-RO'},\n {'title': 'Russian (Russia)', 'code': 'ru-RU'},\n {'title': 'Slovak (Slovakia) ', 'code': 'sk-SK'},\n {'title': 'Spanish (Mexico)', 'code': 'es-MX'},\n {'title': 'Spanish (Spain)', 'code': 'es-ES'},\n {'title': 'Swedish (Sweden)', 'code': 'sv-SE'},\n {'title': 'Thai (Thailand)', 'code': 'th-TH'},\n {'title': 'Turkish (Turkey)', 'code': 'tr-TR'},\n {'title': 'Chinese (China)', 'code': 'zh-CN'},\n {'title': 'Chinese (Hong Kong SAR China)', 'code': 'zh-HK'},\n {'title': 'Chinese (Taiwan)', 'code': 'zh-Tw'}\n ]\n\n self.prompt_lang = ui.load_view('dialogs/select_language')\n table = self.prompt_lang['tableview1']\n listsource = ui.ListDataSource(self.lang_list)\n table.data_source = listsource\n table.delegate = listsource\n listsource.action = self.set_language\n self.prompt_lang.present('sheet')", "title": "" }, { "docid": "e21090b69b1ff218f6f96dfdfc81207e", "score": "0.5187061", "text": "def get_word_list(file_name):\n\tf = open(file_name,'r')\n\tlines = f.readlines()\n\tcurr_line = 0\n\twhile lines[curr_line].find('CHAPTER I') == -1:\n\t curr_line += 1\n\n\tlines = lines[curr_line+1:]\n\n\tword = \"\"\n\tfor line in lines:\n\t\tword += line\n\n\t#strip punctuation\n\tword = word.translate(string.maketrans(\"\",\"\"), string.punctuation)\n\n\t#strip whitespace and convert to list\n\tword_list = re.sub(\"[^\\w]\", \" \", word).split()\n\n\t#convert each word to lowercase\n\tnew_word_list = []\n\tfor word in word_list:\n\t\tnew_word_list.append(word.lower())\n\n\treturn new_word_list", "title": "" }, { "docid": "e3abaec3e7db6de841b80f2408aa69f6", "score": "0.51823103", "text": "def custom_format(comedian, title):\n # swap comedian and title for special cases\n if comedian in ['The Standups', 'Comedy Central Presents']:\n title_placeholder = comedian\n comedian = title\n title = title_placeholder\n\n formatted = []\n for text in [comedian, title]:\n text = replace_if_title_does_not_begin_with(text, 'In ', 'in ')\n text = replace_if_title_does_not_begin_with(text, 'At ', 'at ')\n text = replace_if_title_does_not_begin_with(text, 'On ', 'on ')\n text = replace_if_title_does_not_begin_with(text, 'Of ', 'of ')\n text = replace_if_title_does_not_begin_with(text, 'To ', 'to ')\n text = replace_if_title_does_not_begin_with(text, 'The ', 'the ')\n text = replace_if_title_does_not_begin_with(text, 'From ', 'from ')\n text = replace_if_title_does_not_begin_with(text, 'For ', 'for ')\n text = replace_if_title_does_not_begin_with(text, 'And ', 'and ')\n text = replace_if_title_does_not_begin_with(text, 'Is ', 'is ')\n text = replace_if_title_does_not_begin_with(text, ' A ', ' a ')\n\n custom_mappings = {\n 'Hbo Comedy Half-hour': 'HBO Comedy Half-Hour',\n 'T.j. Miller': 'T.J. Miller',\n 'Smd': 'SMD',\n 'Ladsladslads': 'LadsLadsLads',\n 'Protected: Katherine Ryan': 'Katherine Ryan',\n 'Live (at the Time)': 'Live (At the Time)',\n 'Gabriel “fluffy” Iglesias': 'Gabriel “Fluffy” Iglesias',\n 'John Leguizamo’s Road to Broadway': 'John Leguizamo',\n 'D.l. Hughley': 'D.L. Hughley',\n 'Live Iv – Science': 'Live IV – Science',\n 'Comedy Central Special': 'Comedy Central Presents',\n 'Stand-up Comedian': 'Stand-Up Comedian',\n 'Ricky Gervais Live 2': 'Ricky Gervais',\n 'Politics': 'Live 2: Politics',\n 'Russell Howard Live': 'Russell Howard',\n 'Christina Pazsitzky': 'Christina P',\n 'Live in Madison Square Garden': 'Live at Madison Square Garden',\n 'A Piece of My Mind – Godbless America': 'A Piece of My Mind – God Bless America',\n 'Kill the Messenger – London, New York, Johannesburg': 'Kill the Messenger',\n 'Live from D.c.': 'Live from D.C.',\n 'If I Could Reach Out Through Your Tv and Strangle You I Would': 'If I Could Reach Out Through Your TV and '\n 'Strangle You, I Would',\n 'Live and Smokin’': 'Live & Smokin’',\n '…here and Now': 'Here and Now',\n 'Again!*': 'Again!',\n 'Louis C.k.': 'Louis C.K.',\n 'Smart and Classy': 'Smart & Classy',\n 'Oh Come On': 'Oh, Come On',\n 'Comin’ in Hot': 'Comin’ In Hot',\n 'This is Me Now': \"This Is Me Now\",\n 'Jesus is Magic': 'Jesus Is Magic',\n \"Frankie Boyle Live 2\": \"Frankie Boyle\",\n \"Patrice O’neal\": \"Patrice O’Neal\"\n }\n\n # try to return title mapping. if doesn't exist, return text as is\n try:\n formatted.append(custom_mappings[text])\n except KeyError:\n formatted.append(text)\n\n return formatted[0], formatted[1]", "title": "" }, { "docid": "d75a5ee88aa411b4eb8ffc4a188dad02", "score": "0.516964", "text": "def generate_title_candidates(self):\n for c in '{}[]\\n.':\n self.cf_title = self.cf_title.replace(c, '')\n self.cf_title = self.cf_title.split(':')[0]\n self.cf_title = self.cf_title.split('(')[0]\n if len(self.cf_title) > 1:\n if self.cf_title[0] != self.cf_title[0].upper() or \\\n self.cf_title[1] != self.cf_title[1].lower():\n self.cf_title = self.cf_title[0].upper() +\\\n self.cf_title[1:].lower()\n ce = BeautifulSoup.HTML_ENTITIES\n self.cf_title = BeautifulSoup(self.cf_title, convertEntities=ce)\n self.cf_title = self.cf_title.contents[0]\n self.cf_title = self.cf_title.replace('reg;', '')\n self.cf_title = self.cf_title.replace(';', '')\n self.cf_title = self.cf_title.replace('(R)', '')\n self.cf_title = self.cf_title.replace('(r)', '')\n keys = {self.cf_title.strip()}\n\n # handle prefix/suffix swaps, e.g., \"Haine, La\"\n prefixes = {'The', 'A', 'An', 'La', 'Le', 'Les', 'Die', 'Das', 'Der',\n 'Ein', 'Il', \"L'\", 'Lo', 'Le', 'I', 'El', 'Los', 'Las', 'O'}\n new_keys = set()\n for k in keys:\n parts = k.split(' ')\n if len(parts) > 1 and parts[0].strip() in prefixes:\n new_keys.add(' '.join(parts[1:]))\n keys |= new_keys\n\n # add \"The\" to the beginning, if it is not already there\n new_keys = set()\n for k in keys:\n p = k.split(' ')[0]\n if p not in prefixes:\n new_keys.add('The ' + k)\n keys |= new_keys\n\n # adapt captialization to the Wikipedia Manual of Style\n # (this is only a heuristic)\n new_keys = set()\n minuscles = {'a', 'an', 'the', 'and', 'but', 'or', 'nor', 'for',\n 'yet', 'of', 'to', 'in', 'for', 'on', 'with'}\n\n for k in keys:\n parts = k.split(' ')\n parts = [p for p in parts if p]\n parts_new = [parts[0]]\n for p in parts[1:]:\n if p.lower() not in minuscles:\n parts_new.append(p[0].upper() + p[1:])\n else:\n parts_new.append(p)\n new_keys.add(' '.join(parts_new))\n keys |= new_keys\n\n author_last = self.author.rsplit(' ', 1)[-1]\n book = [k + ' (' + author_last + ' book)' for k in keys]\n booka = [k + ' (book)' for k in keys]\n novel = [k + ' (novel)' for k in keys]\n novela = [k + ' (' + author_last + ' novel)' for k in keys]\n keys.update(set(book), set(novel), set(booka), set(novela))\n self.title_candidates = {k: '' for k in keys}", "title": "" }, { "docid": "75d4f0fe76a14d37e57cc891ad6853b0", "score": "0.5168943", "text": "def test_get_languages_grid_list(self):\n pass", "title": "" }, { "docid": "a0710d947d1b92f437a4020950830d03", "score": "0.51644987", "text": "def split_position_name(self, titles: list) -> List[str]:\n return random.choice(titles).split()", "title": "" }, { "docid": "fcc97695141df97852e8e3809d9f384f", "score": "0.51623565", "text": "def get_languages_list():\n return [language[0] for language in settings.LANGUAGES]", "title": "" }, { "docid": "5e50ae23fed0afbc0f4436d558c99c6d", "score": "0.5161475", "text": "def id_to_title(self, wikidata_id: str) -> List[str]:\n\n with sqlite3.connect(self._path_to_db) as conn:\n c = conn.cursor()\n c.execute(\n \"SELECT DISTINCT wikipedia_title FROM mapping WHERE wikidata_id =?\", (wikidata_id,)\n )\n results = c.fetchall()\n if len(results) >= 1:\n return results[0][0] # return the main record only\n return None", "title": "" }, { "docid": "aed8512c8b0d4852a2cf4cda85a16eea", "score": "0.51527727", "text": "def prep_data(self, data) -> list:\n text_list = []\n post_content = ''\n for idx, text in enumerate(data):\n post_content = data[idx]['content']\n text_list.append(post_content)\n return text_list", "title": "" }, { "docid": "e6cca3258fbc1db17e117d5dca378ca6", "score": "0.51463634", "text": "def _compress_ordinal_numbers(title):\n os = ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eighth', 'ninth', 'tenth', 'eleventh',\n 'twelfth', 'thirteenth', 'fourteenth', 'fifteenth', 'sixteenth', 'seventeenth', 'eighteenth', 'nineteenth',\n 'twentieth', 'thirtieth', 'fortieth', 'fiftieth', 'sixtieth', 'seventieth', 'eightieth', 'ninetieth']\n suffixes = ['st', 'nd', 'rd']\n decimals = ['twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety']\n\n for d in range(0, len(decimals)):\n for n in range(0, 9):\n if n < 3:\n title = re.sub(r'%s-%s' % (decimals[d], os[n]), '%s%s%s' % (d + 2, n + 1, suffixes[n]), title,\n flags=re.IGNORECASE)\n else:\n title = re.sub(r'%s-%s' % (decimals[d], os[n]), '%s%sth' % (d + 2, n + 1), title,\n flags=re.IGNORECASE)\n for n in range(0, 3):\n title = re.sub(r'%s' % os[n], '%s%s' % (n + 1, suffixes[n]), title, flags=re.IGNORECASE)\n for n in range(3, len(os)):\n title = re.sub(r'%s' % os[n], '%sth' % (n + 1), title, flags=re.IGNORECASE)\n\n return title.strip()", "title": "" }, { "docid": "bb95781911fc7e8bfea221dbbf322454", "score": "0.51377594", "text": "def format_list(l, must_sort=True, separator=' '):\n titles = [unicode(v) for v in l]\n if must_sort:\n titles = sorted(titles)\n\n return separator.join(titles)", "title": "" }, { "docid": "3d1aa4452c4e518ea3c102ac7fd335a7", "score": "0.5117662", "text": "def removeTitles(names):\r\n filtered_names = []\r\n for name in names:\r\n name = removeHonorifics(name)\r\n if len(name) > 0:\r\n filtered_names.append(name)\r\n\r\n return filtered_names", "title": "" }, { "docid": "59f48ca70bd22bdc88c8aa51824707e3", "score": "0.511757", "text": "def scrape_data() -> list:\n total_pages = TOTAL_PAGES\n titles = []\n\n print(\"Connecting to website\")\n for page in range(1, total_pages + 1):\n print(f\"Processing page: {page}/{total_pages}\")\n url = f\"https://nyaa.si/?f=0&c=0_0&q=[HorribleSubs]&p={page}\"\n soup = open_url(url)\n tags = soup('a')\n for tag in tags:\n anime_id = tag.get('href', None)\n temp = tag.get('title', None)\n if temp and temp.startswith(\"[HorribleSubs]\") and temp.endswith(\"[720p].mkv\"):\n anime_id = re.findall(\"view/([0-9]+)\", anime_id)[0]\n # temp = re.findall(\"\\[HorribleSubs\\] (.*?) - ([0-9]*) \\[720p\\].mkv\", temp)\n titles.append((temp, anime_id))\n print(\"Done!\")\n print(\"Anime retrieval complete!\")\n return titles", "title": "" }, { "docid": "21facbb1f44212dea6310c04b89f1a94", "score": "0.5104132", "text": "def other_titles(self):\n if 'v240' not in self.data:\n return None\n\n return [title['_'] for title in self.data.get('v240') if '_' in title and title['_'] != \"\"]", "title": "" }, { "docid": "0dfbc0a55f1c04ee44c6a9116dca62ab", "score": "0.51038796", "text": "def doi_and_lang(self):\n raw_doi = self.data.get('article', {}).get('v337')\n items = []\n for item in raw_doi or []:\n lang = item.get(\"l\")\n doi = item.get(\"d\")\n if lang and doi:\n if len(DOI_REGEX.findall(lang)) == 1 and len(doi) == 2:\n lang, doi = doi, lang\n if len(DOI_REGEX.findall(doi)) == 1 and len(lang) == 2:\n items.append((lang, doi))\n if self.doi:\n item = (self.original_language(), self.doi)\n if all(item) and item not in items:\n items.insert(0, item)\n return items", "title": "" }, { "docid": "0d12d863488cd5fee5793b52e164135a", "score": "0.5099155", "text": "def getSubTitle(self) -> unicode:\n ...", "title": "" }, { "docid": "117e598c459cc51aeec8a5ca9c3ee059", "score": "0.5096474", "text": "def normalize_date(date_list):\n x_li = list()\n for x in date_list:\n date = x.split(\"-\")\n d = date[1] + \"-\" + date[0]\n x_li.append(d)\n return x_li", "title": "" }, { "docid": "95d4fe8af1a412b88f2bed7ae0cb470c", "score": "0.50896", "text": "def add_nameanddate_title(list_authors,date,title):\n\n\n if len(list_authors)==1:\n add_title = '(' + list_authors[0].split(',')[0]\n elif len(list_authors)==2:\n add_title = '(' + list_authors[0].split(',')[0] + ' & ' + list_authors[1].split(',')[0]\n else:\n add_title = '(' + list_authors[0].split(',')[0] + ' et al'\n\n Year = date[:4]\n\n add_title += ' ' + Year + ')'\n\n new_title = title+ ' ' + add_title\n return new_title", "title": "" }, { "docid": "8809c166eba2035aaf15edaee939f20f", "score": "0.5087959", "text": "def get_audio_languages_from_current_actionmenu(self):\n return self.str_like_list_2_list(self.get_value_by_key(\"audioTitles\"))", "title": "" }, { "docid": "6151b2d1b3070ffe7987807f23d39766", "score": "0.5087666", "text": "def clean_jaspar_names(uncleaned_jaspar_ids):\n\n special_dict = {\"EWSR1-FLI1\" : [\"EWSR1\",\"FLI1\"]}\n names_list = []\n\n # split the combined names\n for uncleaned_jaspar_id in uncleaned_jaspar_ids:\n uncleaned_jaspar_id = uncleaned_jaspar_id.upper()\n split_names = uncleaned_jaspar_id.split(\"::\")\n for name in split_names:\n names_list.append(name)\n\n # replace variants\n for i, name in enumerate(names_list):\n names_list[i] = name.replace(\"(VAR.2)\",\"\").replace(\"(VAR.3)\",\"\")\n\n tmp_list = []\n for i, name in enumerate(names_list):\n if name in special_dict:\n tmp_list += special_dict[name]\n else:\n tmp_list.append(name)\n\n names_list = list(set(tmp_list))\n names_list.sort()\n\n return names_list", "title": "" }, { "docid": "3c10aea0ecdc8dbbcaf0927ade1c7301", "score": "0.5086054", "text": "def asXMLSubjects(self):\n subjects = []\n for key,value in self.data.get('name', dict()).iteritems():\n lang = key.lower().replace('_','-')\n subjects.append(u'<subject xml:lang=\"{0}\">{1}</subject>'.format(lang,value))\n if not subjects:\n subjects.append(u'<subject>{0}</subject>'.format(self.name))\n return subjects", "title": "" }, { "docid": "21bec1436aeeb61307004aaca202a4bf", "score": "0.5085983", "text": "def get_language_list(survey):\r\n headers = set()\r\n for header in survey._cell_values[0]:\r\n if '#' in header:\r\n headers.add(header.split('#')[1])\r\n if '::' in header:\r\n headers.add(header.split('::')[1])\r\n return list(headers)", "title": "" }, { "docid": "8a050a9d61196484d5989dfb083d38f3", "score": "0.50828284", "text": "def shouty_list(ls):\r\n for i in range(len(ls)):\r\n ls[i] = ls[i].upper()\r\n return ls", "title": "" }, { "docid": "feddf464ce30e385a7df18be55c4bdb1", "score": "0.5070396", "text": "def lower_text(text_list):\n return [a.lower() for a in text_list]", "title": "" }, { "docid": "c67400297b30560e24bf4e589dc9b24b", "score": "0.506867", "text": "def get_language_list():\n word2lang = os.path.join(psychic_learners_dir, 'data_utils', 'word_to_lang.json')\n with open(word2lang, 'r') as f:\n word2lang = json.load(f)\n language_counts = {}\n for word, lang in word2lang.items():\n if lang in language_counts.keys():\n language_counts[lang] += 1\n else:\n language_counts[lang] = 1\n result_list = sorted([[lang, counts] for lang, counts in language_counts.items()], \n key = lambda x: x[1], reverse=True)\n return result_list #array of [lang, count]", "title": "" }, { "docid": "80fa1de7e4550639d433734ad78608a9", "score": "0.5065887", "text": "def run(self,list_of_strings):\n for i,string in enumerate(list_of_strings):\n list_of_strings[i] = to_lower(remove_non_ascii(replace_nan(string)))\n return list_of_strings", "title": "" }, { "docid": "449db5b9c8ea75928526e2b16ad5cdac", "score": "0.5056463", "text": "def test_title(names):\n return (list(filter(lambda x: x.istitle(), names)),\n list(filter(lambda x: not x.istitle(), names)))", "title": "" }, { "docid": "89fd7615c6c864a25e5e989d3c919033", "score": "0.5040852", "text": "def translated_abstracts(self, iso_format=None):\n fmt = iso_format or self._iso_format\n\n trans_abstracts = {}\n if 'v83' in self.data['article']:\n for abstract in self.data['article']['v83']:\n if 'a' in abstract and 'l' in abstract: # Validating this, because some original 'isis' records doesn't have the abstract driving the tool to an unexpected error: ex. S0066-782X2012001300004\n language = tools.get_language(abstract['l'], fmt)\n if language != self.original_language(iso_format=fmt):\n trans_abstracts.setdefault(\n html_decode(language),\n html_decode(abstract['a'])\n )\n\n if len(trans_abstracts) == 0:\n return None\n\n return trans_abstracts", "title": "" }, { "docid": "fb31863090f1d972791745a3d8bc3c4c", "score": "0.50397676", "text": "def test_nested_with_list(self):\n from palisades.i18n.translation import extract_languages\n lang_args = LanguageExtractionTest._basic_args()\n lang_args['list'] = [{'en':'foo'}, {'es', 'foo'}, {'zh': 'foo'}]\n self.assertEqual(extract_languages(lang_args), ['de', 'en', 'es', 'zh'])", "title": "" }, { "docid": "42014003804e979c03f836a7237df1a7", "score": "0.5034766", "text": "def get_codes_to_countries_from_wiki():\n data = {}\n for continent, countries in get_continents_to_countries_from_wiki().items():\n for country_name in countries:\n try:\n country_code = pycountry.countries.get(name=country_name).alpha2\n data[country_code] = country_name\n continue\n except KeyError:\n pass\n\n try:\n country_code = get_country_name_to_2_code_from_wiki(country_name)\n data[country_code] = country_name\n continue\n except KeyError:\n pass\n\n return data", "title": "" }, { "docid": "eee8dd63c48a0259dad9328c7d087af1", "score": "0.5031524", "text": "def getVariantRedirects(rTitle: str) -> List[str]:\n variantTitles = [rTitle]\n replacements = [('Adm.', 'Admin.'),\n ('Animal', 'Anim.'),\n ('Am.', 'Amer.'),\n ('Atmospheric', 'Atmos.'),\n ('Br.', 'Brit.'),\n ('Calif.', 'Cal.'),\n ('Commun.', 'Comm.'),\n ('Contributions', 'Contrib.'),\n ('Entomol.', 'Ent.'),\n ('Investig.', 'Invest.'),\n ('Lond.', 'London'),\n ('Philos.', 'Phil.'),\n ('Political', 'Polit.'),\n ('Radiat.', 'Rad.'),\n ('Royal', 'Roy.'),\n ('Royal', 'R.'),\n ('Special', 'Spec.')]\n for replIso, replVariant in replacements:\n newVariantTitles = variantTitles\n for vTitle in variantTitles:\n if replIso in vTitle:\n newVariantTitles.append(vTitle.replace(replIso, replVariant))\n variantTitles = newVariantTitles\n dotless = [v.replace('.', '') for v in variantTitles]\n variantTitles.extend(dotless)\n return variantTitles", "title": "" }, { "docid": "d2acdced4796b8d6e836398e0fdfb619", "score": "0.50303465", "text": "def _title_from_filename(self, filename):\n for cpl in self.dcp._list_cpl:\n if cpl['FileName'] == filename:\n desc = \"({})\".format(\n cpl['Info']['CompositionPlaylist'].get(\n 'ContentTitleText', ''))\n return desc\n\n for pkl in self.dcp._list_pkl:\n if pkl['FileName'] == filename:\n desc = \"({})\".format(\n pkl['Info']['PackingList'].get('AnnotationText', ''))\n return desc\n\n return ''", "title": "" }, { "docid": "fad10631a5c06d6b34cca6a1fef9e35c", "score": "0.50282204", "text": "def parse(language_header):\n\n if language_header is None: return [] # noqa: E701\n\n # strip whitespaces.\n lh = language_header.translate(*remove_ws)\n\n # if nothing, return\n if lh == \"\": return [] # noqa: E701\n\n # split by commas and parse the quality values.\n pls = [lre.findall(x) for x in lh.split(',')]\n\n # drop uncomformant\n qls = [x[0] for x in pls if len(x) > 0]\n\n # use a heap queue to sort by quality values.\n # the value of each item is 1.0 complement.\n pq = []\n order = 0\n for lang in qls:\n order += 1\n if lang[0] != '':\n heapq.heappush(pq, (0.0, order, lang[0]))\n else:\n heapq.heappush(pq, (1.0-float(lang[2]), order, lang[1]))\n\n # get the languages ordered by quality\n # and replace - by _\n return [heapq.heappop(pq)[2].replace('-', '_') for x in range(len(pq))]", "title": "" }, { "docid": "a25572d904c730c634cfab58ea7bf821", "score": "0.50281733", "text": "def titles(self, unique=False):\n if unique:\n return tools.uniqued(title for _, title in self.iterfiles())\n return [title for _, title in self.iterfiles()]", "title": "" }, { "docid": "3ddf13953a25b15184a9577343a316c7", "score": "0.50245535", "text": "def getTitle(self) -> unicode:\n ...", "title": "" }, { "docid": "88f0af829b4a3eb803b95e306a6ff39a", "score": "0.5022029", "text": "def get_title(self, entry):\n titles = self.get_candidates(entry, TITLE_CANDIDATE_JSONPATH)\n return self.pick_longest(titles)", "title": "" }, { "docid": "3e9c4359271615605a12c40a6b381fd7", "score": "0.5019061", "text": "def standardize_list(string_list, group_by_letter_case=None, group_by_slugify=None):\n return [\n Question.standardize(strng, group_by_letter_case, group_by_slugify)\n for strng in string_list\n ]", "title": "" } ]
f7336e3c5843aec041b3eabb6d1a0311
Take integer y (tensor or variable) with n dims and convert it to 1hot representation with n+1 dims.
[ { "docid": "6e5732d85223adf236219a11551c52e0", "score": "0.8346173", "text": "def to_one_hot(y, n_dims=None):\n y_tensor = y.data\n y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1)\n n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1\n y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).scatter_(1, y_tensor, 1)\n y_one_hot = y_one_hot.view(*y.shape, -1)\n return y_one_hot", "title": "" } ]
[ { "docid": "16ae1ce18896056ce2bb276fb97081be", "score": "0.8294972", "text": "def to_one_hot(y, N):\n y_tensor = y#.data if isinstance(y, torch.autograd.Variable) else y\n y_tensor = y_tensor.long().view(-1, 1)\n if int(N) <= 0:\n N = torch.max(y_tensor).long() + 1\n y_one_hot = torch.zeros(y_tensor.shape[0], N).to(y_tensor.device).scatter_(1, y_tensor, 1)\n y_one_hot = y_one_hot.view(y.shape + (-1,))\n return y_one_hot", "title": "" }, { "docid": "2e5116cf2bfe7d856906cf1522c43ed0", "score": "0.8252922", "text": "def to_one_hot(y, n_dims=None):\n y_tensor = y.data #if isinstance(y, torch.Variable) else y\n y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1)\n n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1\n y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).scatter_(1, y_tensor, 1)\n #y_one_hot = y_one_hot.view(*(y.shape), -1)\n return torch.autograd.Variable(y_one_hot).cuda() #if isinstance(y, Variable) else y_one_hot", "title": "" }, { "docid": "82c42150f42e9e934fca7624d2cd13b8", "score": "0.809198", "text": "def _one_hot(self, y, n_labels, dtype):\n mat = np.zeros((len(y), n_labels))\n for i, val in enumerate(y):\n mat[i, val] = 1\n return mat.astype(dtype)", "title": "" }, { "docid": "a9a7b92e7178a26c9bdb02209aafab46", "score": "0.8059406", "text": "def make_one_hot(y):\n one_hot = np.zeros((len(y), 10))\n for i in range(len(y)):\n one_hot[i, y[i]] = 1.\n return one_hot.transpose(1, 0)", "title": "" }, { "docid": "25a98826aaf43077e327db2859e01137", "score": "0.7912115", "text": "def one_hot(y):\n n_classes = np.max(y) + 1\n return np.eye(n_classes)[y]", "title": "" }, { "docid": "843f02f00e6fe04856ce0fd799d053fa", "score": "0.7885", "text": "def _onehot(self, y, n_classes):\n onehot = np.zeros((n_classes, y.shape[0]))\n for idx, val in enumerate(y):\n onehot[val, idx] = 1.\n \"\"\"\n onehot是10行(y有多少就多少)列,每一行分别代表这10个数字其中一个在这y中的位置\n 转置后onehot.T就变成 y有多少就多少)行 10列,哪个索引上是1就代表这一行的数字是几\n 热编码的意思 https://blog.csdn.net/qq_27825451/article/details/83823665\n \"\"\"\n #\n return onehot.T", "title": "" }, { "docid": "8bb2a5c2c74e46b7f216827fcea1b7c9", "score": "0.78846824", "text": "def to_one_hot(self, y, n_classes):\n\n n_labels = y.shape[0]\n ret = np.zeros((n_classes, n_labels))\n ret[y, np.arange(n_labels)] = 1\n # But this makes 1 whenever y is zero, so we must put zero in all the first row:\n ret[0, :] = 0\n return ret", "title": "" }, { "docid": "917b721299e80c4a25d7ffb2acb59caa", "score": "0.7870014", "text": "def _one_hot(self, y):\n one_hot = np.zeros((y.shape[0], self.num_labels))\n one_hot[np.arange(y.shape[0]), y.T] = 1\n return one_hot", "title": "" }, { "docid": "99e7fa7dbbdc11bbfd35813b8218e9b6", "score": "0.78334177", "text": "def one_hot_encode(y, n_classes=None):\n n_classes = n_classes or numpy.max(y) + 1\n return numpy.eye(n_classes)[y]", "title": "" }, { "docid": "90d262c1a771400f6ded64a74280abb3", "score": "0.7789243", "text": "def to_one_hot(y, nb_class, dtype=None):\n \n fill_vals = cgt.ones((y.shape[0],))\n ret = cgt.zeros((y.shape[0], nb_class), dtype)\n \n d1 = cgt.arange(y.shape[0])\n d2 = cgt.cast(y, 'i1')\n \n ret = cgt.inc_subtensor(ret, [d1, d2], fill_vals)\n \n return ret", "title": "" }, { "docid": "f6f3e1792b5e80ce524c60e54c3a0dff", "score": "0.7789046", "text": "def index_to_one_hot(y, n_classes):\n one_hot = np.eye(n_classes)[y]\n\n return one_hot", "title": "" }, { "docid": "c6f0bf0e1589d8c0c8de4c5ee25c1755", "score": "0.7705924", "text": "def one_hot(y, num_classes=10):\n N = len(y)\n y_where = y[:, :6]\n y_label = y[:, 6].astype(np.int64)\n y_label_one_hot = np.zeros((N, num_classes))\n y_label_one_hot[np.arange(N), y_label] = 1\n return np.concatenate((y_where, y_label_one_hot), axis=1)", "title": "" }, { "docid": "2e4b91c8ace43c6d717e9e2079ca80f2", "score": "0.76946247", "text": "def one_hot(y, numOfClasses):\n y = np.asarray(y, dtype='int32')\n\n if len(y) > 1:\n y = y.reshape(-1)\n\n if not numOfClasses:\n numOfClasses = np.max(y) + 1\n\n yMatrix = np.zeros((len(y), numOfClasses))\n yMatrix[np.arange(len(y)), y] = 1\n\n return yMatrix", "title": "" }, { "docid": "12e6846145402d63024911b0e6a40be8", "score": "0.7688399", "text": "def one_hot_enc(self, y=None):\n if y is None:\n X, y = self._extract_arrays()\n n_samples = y.shape[0]\n n_targets = len(np.unique(y))\n y_one_hot = np.zeros(shape=(n_samples, n_targets))\n for i in range(n_samples):\n col = int(y[i])\n y_one_hot[i, col] = 1\n return y_one_hot", "title": "" }, { "docid": "e156c530e6189191b35225e97134467c", "score": "0.76331055", "text": "def one_hot(y: np.ndarray, num_classes: int) -> np.ndarray:\n init_flag = 0\n\n for i in y:\n row = np.zeros(num_classes, dtype=int)\n row[int(i)] = 1\n\n if init_flag == 0:\n base = row\n init_flag = 1\n else:\n base = np.vstack((base, row))\n return(base)", "title": "" }, { "docid": "117b3b85dd3b9f8ceb4a6f46df3bc75b", "score": "0.7502489", "text": "def onehot_to_int(y):\n if y.dtype == torch.float and y.shape[-1] == 1000:\n y = torch.argmax(y, dim=-1)\n return y.reshape(-1)", "title": "" }, { "docid": "1d8825bc3c3558db5a7484ec3f3581e6", "score": "0.74925226", "text": "def one_hot(self, x, n):\n x = np.array(x) # 传入的x为list需转为array\n assert x.ndim == 1\n return np.eye(n)[x]", "title": "" }, { "docid": "e83b405b9a17daed6e7955767d0a38e2", "score": "0.7429509", "text": "def one_hot_decision_function(y):\n z = np.zeros_like(y)\n z[np.arange(len(z)), np.argmax(y, axis=1)] = 1\n return z", "title": "" }, { "docid": "cfa8ff3ac939fabaa954a742782235d7", "score": "0.74263453", "text": "def one_hot_np(Yc, cat_dim=None):\n if cat_dim is None:\n cat_dim = np.max(Yc) + 1\n Yoh = np.zeros((Yc.size, cat_dim))\n Yoh[np.arange(Yc.size),Yc.flatten()] = 1.0\n return Yoh", "title": "" }, { "docid": "7e1623bb26120861136c78ef33f18399", "score": "0.73423105", "text": "def to_categorical(x,n_col=None):\n if not n_col:\n n_col = np.amax(x)+1\n one_hot = np.zeros((x.shape[0], n_col))\n one_hot[np.arange(x.shape[0]), x] = 1\n return one_hot", "title": "" }, { "docid": "08ff24d5e80b2a5d9e0bf61a6cc5016f", "score": "0.7336097", "text": "def oneHotEncoder(label, n):\n tmp = np.zeros((len(label), n))\n for number in range(n):\n tmp[:, number] = (label == number)\n tmp = tmp.astype(int)\n\n return tmp", "title": "" }, { "docid": "6cdce0fb0bd17b98e36f9ca9c676f58f", "score": "0.73348695", "text": "def one_hot_encoder(y, m, n_of_class):\n\n\ty_encoded = np.zeros((n_of_class, m))\n\tfor i in range(m):\n\t\ty_encoded[y[i], i] = 1\n\n\treturn y_encoded", "title": "" }, { "docid": "a8bd32ac0711a235f7d21c6411c9983a", "score": "0.73303807", "text": "def one_hot_encode(x, n_classes):\n return np.eye(n_classes)[x]", "title": "" }, { "docid": "622429552dbde50037dab1cdc1813855", "score": "0.7296371", "text": "def to_categorical(x, n_col=None):\r\n if not n_col:\r\n n_col = np.amax(x) + 1\r\n one_hot = np.zeros((x.shape[0], n_col))\r\n one_hot[np.arange(x.shape[0]), x] = 1\r\n return one_hot", "title": "" }, { "docid": "926460357c478b363915840b0515ee2b", "score": "0.72423255", "text": "def onehot_encode(y, num_labels=None):\n y_reshaped = y.ravel()\n\n if num_labels is None:\n num_labels = len(set(y_reshaped))\n\n y_onehot = lil_matrix(y_reshaped.shape + (num_labels,), dtype='int16')\n y_onehot[np.arange(len(y_reshaped)), y_reshaped] = 1\n\n #y_onehot_old = np.zeros((len(labels), len(y)), dtype='int16')\n #for label in labels:\n # label_row = y_onehot_old[labels.index(label)]\n # label_row[np.nonzero(y == label)[0]] = 1\n \n return y_onehot.tocsr()", "title": "" }, { "docid": "f7fa12dd341dafd25518752c4e8811b8", "score": "0.7223366", "text": "def onehot2int(y):\n try:\n val = np.where(y == 1)[1]\n # print(y)\n # print(val, val.shape, y.shape)\n except IndexError:\n val = np.where(y == 1)[0]\n # print(y)\n # print(val, val.shape, y.shape)\n return val", "title": "" }, { "docid": "e9a5c4428ddd9ad4548c9c9099b0a509", "score": "0.72217864", "text": "def to_one_hot(y):\n lb = LabelBinarizer()\n lb.fit(y)\n Y = lb.transform(y)\n return (Y.base, lb.classes_)", "title": "" }, { "docid": "c473958b07a3d2163900d45272de9167", "score": "0.7176815", "text": "def one_hot(self, d):\r\n\t\ty = np.zeros((self.num_outputs, 1))\r\n\t\ty[d] = 1\r\n\t\treturn y", "title": "" }, { "docid": "0faffd3ccf8830eb8a48265c605cc2fd", "score": "0.7064721", "text": "def one_hot(x, class_count):\n return torch.eye(class_count)[x,:]", "title": "" }, { "docid": "c927cc17df35dfec3129a059eebe08c9", "score": "0.7039873", "text": "def unhot(y):\n if not isinstance(y, np.ndarray):\n y = np.asarray(y)\n _, n_classes = y.shape\n return y.dot(np.arange(n_classes))", "title": "" }, { "docid": "af5f1b1a426e903403537dcf08b044a3", "score": "0.70305526", "text": "def one_hot_encoder(x):\n ids = x.unique()\n id_dict = dict(list(zip(ids.numpy(), np.arange(len(ids)))))\n one_hot = th.zeros((len(x), len(ids)))\n for i, u in enumerate(x):\n if id_dict[u.item()] == 4:\n pass\n else:\n one_hot[i][id_dict[u.item()]] = 1\n\n return one_hot", "title": "" }, { "docid": "63404f1baf5f5043f615ed183d6a3fdb", "score": "0.7026372", "text": "def onehot(nclasses, dtype=torch.float32):\n def f(ys):\n if isinstance(ys, list):\n ys = np.ndarray(ys, dtype=\"int64\")\n if isinstance(ys, np.ndarray):\n result = np.zeros((len(ys), nclasses))\n result[np.arange(len(ys)), ys] = 1\n return result\n elif isinstance(ys, torch.Tensor):\n result = torch.zeros_like(ys, dtype=dtype)\n result.scatter(1, ys, 1)\n return result\n else:\n raise ValueError(\"unknown dtype\", ys.dtype)\n return f", "title": "" }, { "docid": "a0fe677246740da5f2abdaffa8c99868", "score": "0.7014668", "text": "def one_hot_class(y):\n enc = OneHotEncoder(sparse=False)\n y = y.reshape(-1, 1)\n return enc.fit_transform(y)", "title": "" }, { "docid": "7069547835d2f0646ec3ee9c44ac01e1", "score": "0.70019615", "text": "def _category_to_one_hot(category_id, num_classes, dtype=\"uint\"):\n return torch.from_numpy(np.eye(num_classes, dtype=dtype)[category_id])", "title": "" }, { "docid": "e28bab3b0aa0e51cd9cf6d9f14993e08", "score": "0.6995472", "text": "def to_one_hot(labels, dimension):\n results = np.zeros((len(labels), dimension))\n for i, label in enumerate(labels):\n results[i, label] = 1.\n return results", "title": "" }, { "docid": "32fae5e7d1b6f1e46d6fe0bfe99de1c3", "score": "0.69927055", "text": "def to_one_hot(inp, num_classes, device):\n y_onehot = torch.FloatTensor(inp.size(0), num_classes)\n y_onehot.zero_()\n\n y_onehot.scatter_(1, inp.unsqueeze(1).data.cpu(), 1)\n\n return y_onehot.to(device)", "title": "" }, { "docid": "1db0102f12d81b26928bc36de25cdee7", "score": "0.6976307", "text": "def __one_hot(self, y_train):\n numSamples = len(y_train)\n new_responses = np.zeros(numSamples*self.numClasses, np.float32)\n resp_idx = np.int32(y_train + np.arange(numSamples)*self.numClasses)\n new_responses[resp_idx] = 1\n return new_responses", "title": "" }, { "docid": "04de781916a3801c5d1d3ccbf5790ade", "score": "0.6955438", "text": "def one_hot_encode(x):\n if(x==0):\n return [1 , 0 , 0]\n\n if(x==1):\n return [0 , 1 , 0]\n\n if(x==2):\n return [0 , 0 , 1]", "title": "" }, { "docid": "f0010112c7bc49dec9e45b3f8b463538", "score": "0.6955125", "text": "def one_hot_encode(Y, classes):\n if not isinstance(Y, np.ndarray) or len(Y) == 0:\n return None\n if not isinstance(classes, int) or classes < np.max(Y) + 1:\n return None\n arr = np.zeros((classes, Y.shape[0]))\n for cl, m in enumerate(Y):\n arr[m][cl] = 1\n return arr", "title": "" }, { "docid": "7094a7f3173994d1240971ff0cafccd3", "score": "0.6952203", "text": "def one_hot_encode(x: torch.Tensor, size: int, dtype: torch.dtype) -> torch.Tensor:\n n_rows = len(x)\n\n # here we manually add 1 at the end, in order to have each row of the matrix as a\n # matrix, instead of vector, for properly calculating dot products\n # for example, if size = 15, then the each row should have the size - (15, )\n one_hot_encoded = torch.zeros((n_rows, size))\n one_hot_encoded[(torch.arange(n_rows), x)] = 1.\n return one_hot_encoded.type(dtype)", "title": "" }, { "docid": "b0ad8fd7b6a7fe58944e3f6d83d89f5a", "score": "0.69266176", "text": "def _one_hot(y = None):\n\t\tenc = OneHotEncoder()\n\t\treturn enc.fit(y)", "title": "" }, { "docid": "433d54bbaeba617fbc17a41b5e82142e", "score": "0.6906976", "text": "def indices_to_one_hot(data, n_classes): #NUEVO\n targets = np.array(data).reshape(-1)\n return np.eye(n_classes)[targets]", "title": "" }, { "docid": "c9e64c4f6081cdfadd300cc468cebdf4", "score": "0.6903235", "text": "def one_hot_decode(encoding):\n \n return (encoding.T @ np.arange(encoding.shape[0])).astype(int)", "title": "" }, { "docid": "e0a492f7cdf3fe5ac9f84b5d108f6cce", "score": "0.68825454", "text": "def get_one_hot_encoding_from_int(z, n_classes):\n\n z_one_hot = torch.zeros(len(z), n_classes).to(device)\n z_one_hot.scatter_(1, z.view(-1, 1), 1)\n z_one_hot = z_one_hot.view(len(z), n_classes)\n\n return z_one_hot", "title": "" }, { "docid": "d7bedcad2363e5dad2c79d638b8c112f", "score": "0.68796295", "text": "def convert_to_one_hot(Y, C):\r\n\r\n Y = np.eye(C)[Y.reshape(-1)].T\r\n return Y", "title": "" }, { "docid": "768c3d84eb09001471221e66cb62bfab", "score": "0.68472403", "text": "def to_categorical(y, nb_classes):\n y = np.asarray(y, dtype='int32')\n if not nb_classes:\n nb_classes = np.max(y)+1\n Y = np.zeros((len(y), nb_classes))\n for i in range(len(y)):\n Y[i, y[i]] = 1.\n return Y", "title": "" }, { "docid": "37030b593aed5312d20b439d5ed2f27f", "score": "0.68427604", "text": "def make_one_hot(input, num_classes=None):\n if num_classes is None:\n num_classes = input.max() + 1\n shape = np.array(input.shape)\n shape[1] = num_classes\n shape = tuple(shape)\n result = torch.zeros(shape)\n result = result.scatter_(1, input.cpu().long(), 1)\n return result", "title": "" }, { "docid": "09482fc1b5033fb54acb9fa27a3605cc", "score": "0.681917", "text": "def one_hot(X):\n # Reshape Y\n n = len(X)\n hands = X.reshape((n, 5, 2))\n\n out = np.zeros((n, 5 * (4 + 13)))\n\n for i, hand in zip(range(len(hands)), hands):\n for j, card in zip(range(len(hand)), hand):\n suit_slots = np.zeros(4)\n value_slots = np.zeros(13)\n\n suit, value = card - 1\n\n suit_slots[suit] = 1\n value_slots[value] = 1\n\n set_one_card(out[i], j, (suit_slots, value_slots))\n\n return out", "title": "" }, { "docid": "59b6b3f3cb59047639880fff36cc94a0", "score": "0.68180555", "text": "def create_one_hot(df):\n \n return ...", "title": "" }, { "docid": "e8100b1c703768161d6865253f991466", "score": "0.6814986", "text": "def one_hot_encode(Y, classes):\n if type(Y) is not np.ndarray or type(classes) is not int:\n return None\n if np.max(Y) > classes or len(Y) == 0:\n return None\n else:\n one = np.transpose(np.eye(classes)[Y])\n return one", "title": "" }, { "docid": "70bfb63eb901a702ba3d6fa9e9d82f7a", "score": "0.6794432", "text": "def onehot(arr, minlength=None):\n length = np.amax(arr) + 1\n if minlength is not None:\n length = max(minlength, length)\n result = np.zeros(arr.shape + (length,), dtype=np.float32)\n result[tuple(list(np.indices(arr.shape)) + [arr])] = 1\n return result", "title": "" }, { "docid": "1202e92872c059b338dc753133d17d25", "score": "0.6787197", "text": "def one_hot_encode(arr, labels=21):\n a = arr\n b = np.zeros((len(a), labels))\n b[np.arange(len(a)), a] = 1\n return b", "title": "" }, { "docid": "1331f2d849642e3d888977877b66f68a", "score": "0.67860436", "text": "def _one_hot(data):\n num_rows, num_cols = np.shape(data)\n encoded = np.array([], dtype=np.int32).reshape((num_rows, 0))\n for i in range(num_cols):\n vocabulary = sorted(list(set(data[:, i])))\n lookup = dict(list(zip(vocabulary, list(range(len(vocabulary))))))\n int_encoded = np.array([lookup[x] for x in data[:, i]])\n new_cols = np.eye(len(vocabulary), dtype=np.int32)[int_encoded]\n encoded = np.append(encoded, new_cols, axis=1)\n return encoded", "title": "" }, { "docid": "bffb1ef5df87e76675cd703158098ae9", "score": "0.6768129", "text": "def _one_hot(x, k, dtype=numpy.float32):\n return numpy.array(x[:, None] == numpy.arange(k), dtype)", "title": "" }, { "docid": "efbe480f74f4e83f69dbfceb22ae7ed3", "score": "0.67647326", "text": "def to_one_hot(labels, num_classes=10):\n return np.eye(num_classes)[labels]", "title": "" }, { "docid": "e82ade26a12a70275b73119bdb1bac41", "score": "0.6759235", "text": "def one_hot_encode(Y, classes):\n\n if not isinstance(Y, np.ndarray):\n return None\n if Y.size is 0:\n return None\n if type(classes) is not int:\n return None\n if classes < Y.max() + 1:\n return None\n\n data = Y\n one_hot = np.zeros((classes, Y.shape[0]))\n rows = np.arange(Y.shape[0])\n one_hot[data, rows] = 1\n return one_hot", "title": "" }, { "docid": "9e4cae793bc0adeb98d81ffd55e4a859", "score": "0.6744475", "text": "def onehot(t, num_classes):\n\tout = np.zeros((t.shape[0], num_classes))\n\tfor row, col in enumerate(t):\n\t\tout[row, col] = 1\n\treturn out", "title": "" }, { "docid": "501774addc8eec82f090c68242454331", "score": "0.67329955", "text": "def one_hot_encode(gt_cats, num_classes):\n return np.eye(num_classes)[gt_cats]", "title": "" }, { "docid": "b5c0451081035ac6833ff98d40621ff3", "score": "0.6705156", "text": "def one_hot_encoder(batch_inds, num_categories):\n\n one_hots = jax.nn.one_hot(batch_inds, num_classes=num_categories)\n \n return one_hots", "title": "" }, { "docid": "b13f87e495c64885510453d2955f087a", "score": "0.67041093", "text": "def one_hot_encode(Y, classes):\n try:\n return np.squeeze(np.eye(classes)[Y.reshape(-1)]).T\n except Exception:\n return None", "title": "" }, { "docid": "d41a24eaa1edb5538edf0110382d74a7", "score": "0.66888875", "text": "def one_hot_encoding(labels, num_classes=10):\n return np.array([[0 for a in range(0,label)]+\n [1]+\n [0 for b in range(label+1,num_classes)]\n for label in labels])", "title": "" }, { "docid": "4fab41415466db1f15fd237ee10c05a0", "score": "0.6687711", "text": "def one_hot(number, max_size):\n b = np.zeros(max_size,dtype=float)\n b[number]=1.0\n return b", "title": "" }, { "docid": "4fab41415466db1f15fd237ee10c05a0", "score": "0.6687711", "text": "def one_hot(number, max_size):\n b = np.zeros(max_size,dtype=float)\n b[number]=1.0\n return b", "title": "" }, { "docid": "6428d6a3b16f23948e06aaaae0eecc5e", "score": "0.66831017", "text": "def one_hot(x: TensorType, space: gym.Space) -> TensorType:\n if isinstance(space, Discrete):\n return tf.one_hot(x, space.n, dtype=tf.float32)\n elif isinstance(space, MultiDiscrete):\n if isinstance(space.nvec[0], np.ndarray):\n nvec = np.ravel(space.nvec)\n x = tf.reshape(x, (x.shape[0], -1))\n else:\n nvec = space.nvec\n return tf.concat(\n [tf.one_hot(x[:, i], n, dtype=tf.float32) for i, n in enumerate(nvec)],\n axis=-1,\n )\n else:\n raise ValueError(\"Unsupported space for `one_hot`: {}\".format(space))", "title": "" }, { "docid": "3a54c54f5b3bd9bde4296a54ca750ab3", "score": "0.6674728", "text": "def indices_to_one_hot(number, nb_classes,label_dummy=-1):\n \n if number==label_dummy:\n return np.zeros(nb_classes)\n else:\n return np.eye(nb_classes)[number]", "title": "" }, { "docid": "cd612e55d23e38779a199a9696f12575", "score": "0.66699153", "text": "def to_categorical(y, num_classes):\n\treturn np.eye(num_classes, dtype='uint8')[y]", "title": "" }, { "docid": "38564111ded698e07f68ef1c24125339", "score": "0.6658102", "text": "def indices_to_one_hot(data, nb_classes):\r\n targets = np.array(data).reshape(-1)\r\n return np.eye(nb_classes).astype(int)[targets]", "title": "" }, { "docid": "305eff4a6fc1504ebfd933a37a740eb4", "score": "0.66295415", "text": "def onehot_array():\n n_c = 3\n shape = (10, 10, 10)\n array = np.random.default_rng().normal(size=(n_c,) + shape)\n argmax_channel_idcs = np.argmax(array, axis=0)\n onehot_array = np.zeros(array.shape, dtype=np.int)\n for idx in range(n_c):\n onehot_array[idx, argmax_channel_idcs == idx] = 1\n return onehot_array", "title": "" }, { "docid": "f8a5c04f775f57211443b34de3f5275f", "score": "0.6622304", "text": "def to_categorical(y, num_classes):\n return np.eye(num_classes, dtype='uint8')[y]", "title": "" }, { "docid": "c91ce0491c44e53adedcd9cc60c26ef4", "score": "0.66184795", "text": "def _create_one_hot(self, int_representation: int) -> list:\n\n one_hot_target = np.zeros((self.class_amount))\n one_hot_target[int_representation] = 1\n\n return one_hot_target", "title": "" }, { "docid": "abbb8fb26be4a7d30f4e24ebe5fa48cc", "score": "0.66156", "text": "def to_categorical(y, nb_classes):\n y = np.asarray(y, dtype='int32')\n #high dimensional array warning\n if len(y.shape) > 2:\n warnings.warn('{}-dimensional array is used as input array.'.format(len(y.shape)), stacklevel=2)\n #flatten high dimensional array\n if len(y.shape) > 1:\n y = y.reshape(-1)\n if not nb_classes:\n nb_classes = np.max(y) + 1\n Y = np.zeros((len(y), nb_classes))\n Y[np.arange(len(y)),y] = 1.\n return Y", "title": "" }, { "docid": "f1ba53e8e605e1c7d8d46dffeaf6577e", "score": "0.66131717", "text": "def to_categorical(y, num_classes=None):\n y = np.array(y, dtype='int').ravel()\n if not num_classes:\n num_classes = np.max(y) + 1\n n = y.shape[0]\n categorical = np.zeros((n, num_classes))\n categorical[np.arange(n), y] = 1\n return categorical", "title": "" }, { "docid": "f1ba53e8e605e1c7d8d46dffeaf6577e", "score": "0.66131717", "text": "def to_categorical(y, num_classes=None):\n y = np.array(y, dtype='int').ravel()\n if not num_classes:\n num_classes = np.max(y) + 1\n n = y.shape[0]\n categorical = np.zeros((n, num_classes))\n categorical[np.arange(n), y] = 1\n return categorical", "title": "" }, { "docid": "f1ba53e8e605e1c7d8d46dffeaf6577e", "score": "0.66131717", "text": "def to_categorical(y, num_classes=None):\n y = np.array(y, dtype='int').ravel()\n if not num_classes:\n num_classes = np.max(y) + 1\n n = y.shape[0]\n categorical = np.zeros((n, num_classes))\n categorical[np.arange(n), y] = 1\n return categorical", "title": "" }, { "docid": "9b497d76361a7ac91cb5d771862987bd", "score": "0.6609716", "text": "def to_categorical(y, num_classes):\n return torch.eye(num_classes)[y.cpu().data.numpy(), ].to(device)", "title": "" }, { "docid": "c1e789a5d694832350679ab41d528f28", "score": "0.660857", "text": "def to_categorical(y, num_classes):\n new_y = torch.eye(num_classes, device=y.device)[y.cpu().data.numpy(),]\n return new_y", "title": "" }, { "docid": "574abdd6b0fb4698ddaabc7b95ab13a6", "score": "0.6607586", "text": "def one_hot(indices, num_classes):\n return array_ops.one_hot(indices, depth=num_classes, axis=-1)", "title": "" }, { "docid": "18a4ad130f3df6d68f82169a8181a630", "score": "0.65958405", "text": "def make_one_hot(examples, n_values=333):\n arr = np.empty((len(examples), len(examples[0]), n_values), dtype=object)\n for i in range(len(examples)):\n one_hots = to_categorical(examples[i], num_classes=n_values, dtype='float32')\n arr[i] = one_hots\n return arr", "title": "" }, { "docid": "ebc8f9acedae82cf2756bbf71f5f6984", "score": "0.6590285", "text": "def one_hot_decode(one_hot):\n if not isinstance(one_hot, np.ndarray):\n return None\n if len(one_hot.shape) != 2:\n return None\n if np.any((one_hot != 0) & (one_hot != 1)):\n return None\n\n classes, m = one_hot.shape\n y_decoded = np.zeros(m, dtype=int)\n\n for i, arrays in enumerate(one_hot):\n indexes = np.where(arrays == 1)\n y_decoded[indexes] = i\n\n return y_decoded", "title": "" }, { "docid": "a97f9aa8103957f2f822bd11ee2674c4", "score": "0.658177", "text": "def to_categorical(y, num_classes=None):\n y = np.array(y, dtype='int')\n input_shape = y.shape\n if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:\n input_shape = tuple(input_shape[:-1])\n y = y.ravel()\n if not num_classes:\n num_classes = np.max(y) + 1\n n = y.shape[0]\n categorical = np.zeros((n, num_classes))\n categorical[np.arange(n), y] = 1\n output_shape = input_shape + (num_classes,)\n categorical = np.reshape(categorical, output_shape)\n return categorical", "title": "" }, { "docid": "a97f9aa8103957f2f822bd11ee2674c4", "score": "0.658177", "text": "def to_categorical(y, num_classes=None):\n y = np.array(y, dtype='int')\n input_shape = y.shape\n if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:\n input_shape = tuple(input_shape[:-1])\n y = y.ravel()\n if not num_classes:\n num_classes = np.max(y) + 1\n n = y.shape[0]\n categorical = np.zeros((n, num_classes))\n categorical[np.arange(n), y] = 1\n output_shape = input_shape + (num_classes,)\n categorical = np.reshape(categorical, output_shape)\n return categorical", "title": "" }, { "docid": "2f0f2811551f291a7417ef686099d334", "score": "0.6576753", "text": "def to_categorical(y, num_classes=None):\n y = np.array(y, dtype='int')\n input_shape = y.shape\n if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:\n input_shape = tuple(input_shape[:-1])\n y = y.ravel()\n if not num_classes:\n num_classes = np.max(y) + 1\n n = y.shape[0]\n categorical = np.zeros((n, num_classes), dtype=np.float32)\n categorical[np.arange(n), y] = 1\n output_shape = input_shape + (num_classes,)\n categorical = np.reshape(categorical, output_shape)\n return categorical", "title": "" }, { "docid": "5606bc33fdc85130791b94391b8f609d", "score": "0.6573783", "text": "def one_hot_encode(labels):\n new_labels = np.zeros((len(labels), len(CATEGORIES)))\n new_labels[np.arange(labels.size), labels] = 1\n return new_labels", "title": "" }, { "docid": "26f153d8b239a48ef518388eaf030d11", "score": "0.6543183", "text": "def convert_to_one_hot_labels(input, target):\n tmp = input.new_zeros(target.size(0), target.max() + 1)\n tmp.scatter_(1, target.view(-1, 1), 1.0)\n return tmp", "title": "" }, { "docid": "b296c512c88c9f55b574237b0ae9c834", "score": "0.65425265", "text": "def to_one_hot(board: np.array) -> np.array:\n oh = np.stack((board == 0, board == 1, board == -1))\n return oh.astype(int)", "title": "" }, { "docid": "c4c87fc880117bd255f1ed1f5003fe0d", "score": "0.65373945", "text": "def to_categorical(y, num_classes=None):\n y = np.array(y, dtype='int')\n input_shape = y.shape\n if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:\n input_shape = tuple(input_shape[:-1])\n y = y.ravel()\n if not num_classes:\n num_classes = np.max(y) + 1\n n = y.shape[0]\n categorical = np.zeros((n, num_classes), dtype=np.float32)\n categorical[np.arange(n), y] = 1\n output_shape = input_shape + (num_classes,)\n categorical = np.reshape(categorical, output_shape)\n return categorical", "title": "" }, { "docid": "63b07dc087806dbe4490dd3f85161a47", "score": "0.6535646", "text": "def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]", "title": "" }, { "docid": "63b07dc087806dbe4490dd3f85161a47", "score": "0.6535646", "text": "def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]", "title": "" }, { "docid": "7df63c1bcc1c278e81cccbefc8b18ab8", "score": "0.65278596", "text": "def make_one_hot(input, num_classes):\n shape = np.array(input.shape)\n shape[1] = num_classes\n shape = tuple(shape)\n result = torch.zeros(shape).cuda()\n result = result.scatter_(1, input, 1)\n return result", "title": "" }, { "docid": "d6cc1fbb08f4ae5d1f511f61b68ce729", "score": "0.6524456", "text": "def one_hot_encode(labels):\n n_labels = len(labels)\n n_unique_labels = len(np.unique(labels))\n one_hot_encode = np.zeros((n_labels, n_unique_labels))\n one_hot_encode[np.arange(n_labels), labels - 1] = 1\n return one_hot_encode, n_unique_labels", "title": "" }, { "docid": "2b7316329476c7a2f276778e5c50ea15", "score": "0.6515593", "text": "def to_categorical(y, num_classes):\n y = np.asarray(y)\n y = y.squeeze()\n # print(type(y))\n y = np.eye(num_classes, dtype='uint8')[y]\n return y", "title": "" }, { "docid": "b5ae81b5f101c832d5521e153d5a614a", "score": "0.6514173", "text": "def reverse_onehot(df):\n return df.dot(np.array(range(df.shape[1]))).astype(int)", "title": "" }, { "docid": "c6c20cf91c3a69bab6b9be8a17d40f9e", "score": "0.65051866", "text": "def labels_to_one_hot(self, labels):\n N = len(labels)\n one_hot_labels = np.zeros([N, 4], dtype=int)\n one_hot_labels[np.arange(N), labels.astype(int)] = 1\n return one_hot_labels", "title": "" }, { "docid": "a5a4755d3347d2f8ca37ed23017d0168", "score": "0.6499896", "text": "def convert_to_one_hot_labels(input, target):\n tmp = input.new(target.size(0), target.max() + 1).fill_(-1)\n tmp.scatter_(1, target.view(-1, 1), 1.0)\n return tmp", "title": "" }, { "docid": "aabdead5f87ccb008538a64c288bf27e", "score": "0.6499616", "text": "def one_hot(labels, num_classes):\n labels_1hot_size = list(labels.size()) + [num_classes, ]\n labels_unsqueeze = labels.unsqueeze(-1)\n labels_1hot = torch.zeros(labels_1hot_size).scatter_(len(labels_1hot_size) - 1, labels_unsqueeze, 1)\n return labels_1hot", "title": "" }, { "docid": "b9e026464bf0126d86498e12190c526c", "score": "0.64962953", "text": "def one_hot_encode(target, length):\n batch_s = target.size(0)\n one_hot_vec = torch.zeros(batch_s, length)\n\n for i in range(batch_s):\n one_hot_vec[i, target[i]] = 1.0\n\n return one_hot_vec", "title": "" }, { "docid": "ed572e8bee52d9090bea9dde42c291cd", "score": "0.64947873", "text": "def one_hot_encoder(data, number_of_labels):\n data_size = len(data)\n one_hot_matrix = np.zeros(shape=(data_size, number_of_labels))\n for i in range(data_size):\n current_row = np.zeros(shape=(number_of_labels))\n current_number = data[i][0]\n if current_number == 10:\n current_row[0] = 1\n else:\n current_row[current_number] = 1\n one_hot_matrix[i] = current_row\n return one_hot_matrix", "title": "" }, { "docid": "8acaf2ece8af0a9f444a152a5d464edf", "score": "0.64887667", "text": "def one_hot(idx, l):\n ret = [0]*l\n ret[idx] = 1\n return ret", "title": "" }, { "docid": "0d20ec66d8beeb4465d1651a971778ac", "score": "0.6480735", "text": "def one_hot_encoding(labels, num_classes=10):\n shape = (labels.size, num_classes)\n one_hot = np.zeros(shape)\n rows = np.arange(labels.size)\n one_hot[rows, labels] = 1\n return one_hot", "title": "" }, { "docid": "58c5a77db170e679ebd5699bf1a40601", "score": "0.6478949", "text": "def to_categorical(y, num_classes=None, dtype=\"float32\"):\n\n y = np.array(y, dtype=\"int\")\n input_shape = y.shape\n if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:\n input_shape = tuple(input_shape[:-1])\n y = y.ravel()\n if not num_classes:\n num_classes = np.max(y) + 1\n n = y.shape[0]\n categorical = np.zeros((n, num_classes), dtype=dtype)\n categorical[np.arange(n), y] = 1\n output_shape = input_shape + (num_classes,)\n categorical = np.reshape(categorical, output_shape)\n return categorical", "title": "" } ]
3e2a1a41a04d3c65373b88054dd9463f
Decode given bytes object `data` as given type `name` and return the decoded data as a dictionary. >>> foo.decode('Question', b'0\\x0e\\x02\\x01\\x01\\x16\\x09Is 1+1=3?')
[ { "docid": "5a88bf17a05e248dbe34a7f77b98c0b1", "score": "0.7317871", "text": "def decode(self, name, data):\n\n return self._types[name].decode(data)", "title": "" } ]
[ { "docid": "603690964ff48e76ef4b6e52c09673fc", "score": "0.703049", "text": "def decode(data: dict):\n pass", "title": "" }, { "docid": "4047129ee4cf8585707fee8949b61464", "score": "0.7024252", "text": "def decode_data(data):\n return data.decode(encoding='utf-8') if isinstance(data, bytes) else data", "title": "" }, { "docid": "424c1cc1c85588324ae9b620935f32e8", "score": "0.6914742", "text": "def decode(data):\n raise NotImplementedError", "title": "" }, { "docid": "21bacf83b36ddc3bcc0085c2ea2954b6", "score": "0.6626561", "text": "def decode_bytes(name: Union[Text, bytes]) -> Text:\n if isinstance(name, bytes):\n name = name.decode(\"UTF-8\")\n\n return name", "title": "" }, { "docid": "a354fcd575b4d16e3ffbc8dad5f0d6b2", "score": "0.66095215", "text": "def decode(self, data):\n pass", "title": "" }, { "docid": "1a65bddd11c9b9246fbd582cda0f0b2b", "score": "0.65319896", "text": "def Decode(self, encoded_data):", "title": "" }, { "docid": "eacd87199d07ccc15e6db67a18c148f0", "score": "0.6510455", "text": "def decode(self, data):\n raise NotImplementedError", "title": "" }, { "docid": "fc0927980508c00acbbb374c13daccf7", "score": "0.6472989", "text": "def decode_dict(data: Dict[bytes, bytes]) -> Dict[str, str]:\n return {decode(key): decode(value) for key, value in data.items()}", "title": "" }, { "docid": "2587feb05ad04d2682e8ab34ee122d1d", "score": "0.645101", "text": "def mutf8_decode(data, **kwargs):\n return codecs.decode(data, NAME, kwargs.get('errors', 'strict'))", "title": "" }, { "docid": "02cee3dccd4e0fe53b4e0a0f17576b3f", "score": "0.637287", "text": "def _decode(cls, b):", "title": "" }, { "docid": "e4f5400623dbdbdd1dbae985d4a754a4", "score": "0.6369571", "text": "def decode(data: bytes) -> MessageData:\n return _encoder.from_json(data.decode(\"utf-8\"))", "title": "" }, { "docid": "1e7ae7d56d27f6ea3a15fba9bc9f3110", "score": "0.63455486", "text": "def decode(s):", "title": "" }, { "docid": "cd37dfbd6b8bc5384942d4d7ba38004d", "score": "0.63227206", "text": "def decodeData(edata):\n pdata = decodestring(edata)\n data = loads(pdata)\n return data", "title": "" }, { "docid": "ce3f60a70c8a313e3495927563998645", "score": "0.63206375", "text": "def decode(self, data: bytes) -> Optional[str]:\n raise NotImplemented", "title": "" }, { "docid": "2dcf4b030b10e8bec41a3d595a12a271", "score": "0.627883", "text": "def deserialize(data: bytes):\n pass", "title": "" }, { "docid": "806caee1f0ec394f39e71575581bc0f5", "score": "0.62637246", "text": "def _decode_asn1_string(data):\r\n if isinstance(data, BMPString):\r\n return bytes(data).decode(\"utf-16-be\")\r\n else:\r\n return bytes(data).decode(\"utf-8\")", "title": "" }, { "docid": "32ebdebf998c5fe42f31e468f2b3b764", "score": "0.6240461", "text": "def decode(cls, data):\n return cls(**global_encoder.decode(data))", "title": "" }, { "docid": "7b07971f7bb2e14a67bfaf89e51d0dcc", "score": "0.61948645", "text": "def decode(byte_string):\n obj = pickle.loads(byte_string)\n return obj", "title": "" }, { "docid": "86cd05970e6299458513a69c5a068286", "score": "0.61924446", "text": "def decoder(data):\n def next_byte(_it, start, count):\n try:\n return next(_it)[1]\n except StopIteration:\n raise UnicodeDecodeError(\n NAME, data, start, start + count,\n \"incomplete byte sequence\"\n )\n\n it = iter(enumerate(six.iterbytes(data)))\n for i, d in it:\n if d == 0x00: # 00000000\n raise UnicodeDecodeError(\n NAME, data, i, i + 1,\n \"embedded zero-byte not allowed\"\n )\n elif d & 0x80: # 1xxxxxxx\n if d & 0x40: # 11xxxxxx\n if d & 0x20: # 111xxxxx\n if d & 0x10: # 1111xxxx\n raise UnicodeDecodeError(\n NAME, data, i, i + 1,\n \"invalid encoding character\"\n )\n elif d == 0xed:\n value = 0\n for i1, dm in enumerate(DECODE_MAP[6]):\n d1 = next_byte(it, i, i1 + 1)\n value = dm.apply(d1, value, data, i, i1 + 1)\n else: # 1110xxxx\n value = d & 0x0f\n for i1, dm in enumerate(DECODE_MAP[3]):\n d1 = next_byte(it, i, i1 + 1)\n value = dm.apply(d1, value, data, i, i1 + 1)\n else: # 110xxxxx\n value = d & 0x1f\n for i1, dm in enumerate(DECODE_MAP[2]):\n d1 = next_byte(it, i, i1 + 1)\n value = dm.apply(d1, value, data, i, i1 + 1)\n else: # 10xxxxxx\n raise UnicodeDecodeError(\n NAME, data, i, i + 1,\n \"misplaced continuation character\"\n )\n else: # 0xxxxxxx\n value = d\n # noinspection PyCompatibility\n yield mutf8_unichr(value)", "title": "" }, { "docid": "10f110e23290ae524edd974d06865304", "score": "0.6178719", "text": "def decode(data, encoding):\n\tencoding = encoding.lower()\n\tif encoding in ('utf-8', 'utf-16', 'utf-16be', 'utf-16le', 'utf-32', 'utf-32be', 'utf-32le'):\n\t\tdata = data.encode(encoding)\n\t\tregex = br'(?<!\\\\)(?:\\\\\\\\)*\\\\([nrt]|x[0-9a-f][0-9a-f])'\n\t\tdata = re.sub(regex, _decodestr_repl, data)\n\telif encoding == 'base64':\n\t\tdata = binascii.a2b_base64(data)\n\telif encoding in ('base16', 'hex'):\n\t\tif len(data) > 2 and re.match(r'^[a-f0-9]{2}[^a-f0-9]', data):\n\t\t\tdata = data.replace(data[3], '')\n\t\tdata = binascii.a2b_hex(data)\n\telse:\n\t\traise ValueError('unsupported encoding: ' + encoding)\n\treturn data", "title": "" }, { "docid": "4d42061da35a9b009b5cd57b8d183d5d", "score": "0.6173123", "text": "def decode (cls, bytes, cmddict=None):\n return cls()", "title": "" }, { "docid": "339b2d21c26d1c35ce4beb8520ce3a1f", "score": "0.61384875", "text": "def decode(byte_string):\n raise NotImplementedError(\"This method has to be overwritten!\")", "title": "" }, { "docid": "a56913b8474483788dadab950fa438de", "score": "0.6110796", "text": "def decode_pzem(data: bytes) -> Dict[str, Any]:\n res = {}\n int16 = lambda ix: data[ix] << 8 | data[ix + 1]\n int32 = lambda ix: int16(ix) + int16(ix + 2) * 65536\n\n res['voltage'] = int16(0) * 0.1 # Volts\n # res['current'] = int32(2) * 0.001 # Amps\n res['power'] = int32(6) * 0.1 # Watts\n res['energy'] = int32(10) # Watt-hours\n res['frequency'] = int16(14) * 0.1 # Hz\n res['power_factor'] = int16(16) * 0.01\n\n return res", "title": "" }, { "docid": "ff30b4857282e2a8c391d3229ea00059", "score": "0.61005086", "text": "def main_decoder(data):\n \n master_dictionary = MasterDictionary()\n standard_dictionary, special_characters_dictionary = master_dictionary(load = True)\n\n t = Translator(data)\n \n decoded = t._decode(standard_dictionary, special_characters_dictionary)\n \n return decoded", "title": "" }, { "docid": "5588f2e4e426705222e00660d550089c", "score": "0.6052947", "text": "def cookie_decode(data, key):\n data = tob(data)\n if cookie_is_encoded(data):\n sig, msg = data.split(tob('?'), 1)\n if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):\n print msg\n return pickle.loads(base64.b64decode(msg))", "title": "" }, { "docid": "1748840e48ac127425d03b5c183bdf67", "score": "0.60516423", "text": "def decode_sdata(encoded_string):\n try:\n return pickle.loads(base64.b16decode(encoded_string))\n except (TypeError, EOFError, pickle.UnpicklingError):\n return None", "title": "" }, { "docid": "6763570ac1886b31f1bb1cd1c8510c94", "score": "0.6022265", "text": "def decode(s):\n if isinstance(s, unicode):\n return s\n elif isinstance(s, bytes):\n return s.decode(\"utf-8\")\n raise NotImplementedError(\"decode(): %s not implemented\" % (type(s)))", "title": "" }, { "docid": "cb4f62ae240fc0ebb55da90268b256d9", "score": "0.59927887", "text": "def decode_data(self):\n pass", "title": "" }, { "docid": "4058967aca84a3517d979f195ba1be43", "score": "0.59543186", "text": "def decode(value):\r\n\r\n return Coder.decodings[value]", "title": "" }, { "docid": "a5ef24b5be85e756ae13d17a37a6f9ba", "score": "0.59417355", "text": "def decode(data: Union[str, bytes], *, string_codec: strcodec.Codec = None) -> Track:\n decoded = base64.b64decode(data)\n stream = codec.MessageInput(io.BytesIO(decoded), string_codec=string_codec)\n return TrackDecoder().decode(stream)", "title": "" }, { "docid": "00bdde8b8ce7304d451754a2f3f65fa5", "score": "0.594075", "text": "def _decode_byte(data):\n return struct.unpack('!B', data)[0]", "title": "" }, { "docid": "8136473c2fa444c23eb4686a68463e65", "score": "0.59315395", "text": "def decode_to_string(data_bytes):\n\n\t# Python2.7\n\tif PYTHON_MAJOR_VERSION == 2 and PYTHON_MINOR_VERSION == 7:\n\t\treturn data_bytes\n\t\n\t# Python3\n\telif PYTHON_MAJOR_VERSION == 3: # Pyton3 uses unicde strings\n\t\treturn data_bytes.decode(\"utf-8\")\n\t\n\telse:\n\t\traise RuntimeError(\"Unrecognized Python interpreter.\")", "title": "" }, { "docid": "7854d721eef672bd963de5f00c84a7b1", "score": "0.58877546", "text": "def data_decoder(data):\n if not data.startswith('0x'):\n data = '0x' + data\n\n if len(data) % 2 != 0:\n data = '0x0' + data[2:]\n\n return binascii.unhexlify(data[2:])", "title": "" }, { "docid": "7854d721eef672bd963de5f00c84a7b1", "score": "0.58877546", "text": "def data_decoder(data):\n if not data.startswith('0x'):\n data = '0x' + data\n\n if len(data) % 2 != 0:\n data = '0x0' + data[2:]\n\n return binascii.unhexlify(data[2:])", "title": "" }, { "docid": "cb043ba7ad6e9a32239fb4918f3b72c9", "score": "0.58798975", "text": "def _decode_name(self, name):\n return name", "title": "" }, { "docid": "5518950cc9e0eaaaca0e7c8118d54212", "score": "0.58737576", "text": "def decode_utf8(input):\n if not input:\n return input\n\n print(input)\n try:\n return ast.literal_eval('b\"%s\"' % input).decode(\"utf8\")\n except SyntaxError:\n return ast.literal_eval('\"%s\"' % input)", "title": "" }, { "docid": "d04cda7881e3bd5f6b3100b1b815e8b0", "score": "0.58709097", "text": "def unpack(cls, data: bytes):\n\n base = cls.ENCODER.unpack(data[:cls.ENCODER.size])\n return cls(\n info1=base[1],\n info2=base[2],\n info3=base[3],\n fields_num=base[8],\n bins_num=base[9],\n status_code=base[4],\n generation=base[5],\n )", "title": "" }, { "docid": "4d1fb656686fc22c37bfff5309dac7f2", "score": "0.58645236", "text": "def decode_message(message):", "title": "" }, { "docid": "741fa379f1b38fce7b2114632e273f2c", "score": "0.5847286", "text": "def decode(mail_as_bytes, which=1):\n return mail_decode(mail_as_bytes, which)", "title": "" }, { "docid": "12c7f03a762170db396f07278dda26df", "score": "0.58421886", "text": "def decode_bytes(s):\n if isinstance(s, unicode):\n return s\n elif isinstance(s, bytes):\n return s.decode(\"raw_unicode_escape\")\n else:\n return s", "title": "" }, { "docid": "a68b6af0031654ac09b4608c543a0c23", "score": "0.5812921", "text": "def decode_value(data):\n new_data = {}\n for x, y in data.items():\n value = bytes(y.replace('%', '=').replace(\"+\", \" \"), 'UTF-8')\n value_decode_str = quopri.decodestring(value).decode('UTF-8')\n new_data[x] = value_decode_str\n return new_data", "title": "" }, { "docid": "8342929e5a514dd514a855399e77acf8", "score": "0.58058226", "text": "def decode(self, data, key):\n return self.decompress(self.decrypt(base64.b64decode(data), key))", "title": "" }, { "docid": "63b7bc025554044b5e49415dd0d938b3", "score": "0.5775589", "text": "def decode(self, data):\n return json.loads(data)", "title": "" }, { "docid": "9ef79cfabc48a3f982da119bf5f53d33", "score": "0.5767549", "text": "def decode(self, packet):\n packet['data'] = decode_dct(packet['data'])\n return packet", "title": "" }, { "docid": "25f2f81b82b6d3afacf4b5ea9cff24cf", "score": "0.57656986", "text": "def decode(cls, data: bytes) -> \"KNXDPacket\":\n return cls(KNXDPacketTypes(int.from_bytes(data[0:2], byteorder='big')), data[2:])", "title": "" }, { "docid": "59ee574aa4d1eef3d5f6418aa50d6f9e", "score": "0.57641006", "text": "def _decode_dict(data):\n rv = {}\n for key, value in data.iteritems():\n if isinstance(key, unicode):\n key = key.encode('utf-8')\n if isinstance(value, unicode):\n value = value.encode('utf-8')\n elif isinstance(value, list):\n value = _decode_list(value)\n elif isinstance(value, dict):\n value = _decode_dict(value)\n rv[key] = value\n return rv", "title": "" }, { "docid": "30297f9ebbc5999322f985633e20a955", "score": "0.5745714", "text": "def decode(self, obj: bytes, key_or_type: Union[Type, str]):\n key = self._get_key(key_or_type)\n return self.decoders[key](obj)", "title": "" }, { "docid": "530b0bae59f6fb4a83d4899d68e24110", "score": "0.57393855", "text": "def decode_message_part(message_part):\n charset = message_part.get_content_charset()\n if charset is None or charset == '7-bit' or charset == '7bit':\n charset = 'us-ascii'\n elif charset == '8-bit' or charset == '8bit':\n charset = 'iso-8859-15'\n try:\n return message_part.get_payload(decode=True).decode(charset, errors='ignore')\n except LookupError:\n return message_part.get_payload(decode=True).decode('us-ascii', errors='ignore')", "title": "" }, { "docid": "8de399c29e938832f13d07b2189f25f4", "score": "0.5723284", "text": "def decode(data):\n return \"\".join(SquareCodeDecoder(data).stream())", "title": "" }, { "docid": "35c892dab8373fe36bf0136e49d6712d", "score": "0.57231694", "text": "def auto_decode(data):\n # type: (bytes) -> Text\n for bom, encoding in BOMS:\n if data.startswith(bom):\n return data[len(bom):].decode(encoding)\n # Lets check the first two lines as in PEP263\n for line in data.split(b'\\n')[:2]:\n if line[0:1] == b'#' and ENCODING_RE.search(line):\n encoding = ENCODING_RE.search(line).groups()[0].decode('ascii')\n return data.decode(encoding)\n return data.decode(\n locale.getpreferredencoding(False) or sys.getdefaultencoding(),\n )", "title": "" }, { "docid": "c587c82c8458bf888edb5a6ab31e38f9", "score": "0.5707315", "text": "def deserialize(cls, data: bytes):\n pass", "title": "" }, { "docid": "cdfa8f1ffa2eef2fa03c67e48ca91b16", "score": "0.56977594", "text": "def decode(data, alphabet=None):\n validate_alphabet(alphabet or RFC_3548)\n validate_encoded_data(data, alphabet or RFC_3548)\n\n encoded = data\n if alphabet is not None:\n try:\n # Python 2\n translator = string.maketrans(alphabet, RFC_3548)\n except AttributeError:\n # Python 3\n translator = bytes.maketrans(alphabet, RFC_3548)\n encoded = data.translate(translator)\n\n # base64 module / RFC3548 requires padding for decoding\n encoded += b'=' * ((8 - len(data) % 8) % 8)\n\n return base64.b32decode(encoded)", "title": "" }, { "docid": "d4e38d3d8e49da7978d1d45c894f0bd5", "score": "0.56617093", "text": "def decode(self, data, content_type, content_encoding):\r\n content_type = content_type or 'application/data'\r\n content_encoding = (content_encoding or 'utf-8').lower()\r\n\r\n # Don't decode 8-bit strings or unicode objects\r\n if content_encoding not in ('binary', 'ascii-8bit') and \\\r\n not isinstance(data, unicode):\r\n data = codecs.decode(data, content_encoding)\r\n\r\n try:\r\n decoder = self._decoders[content_type]\r\n except KeyError:\r\n return data\r\n\r\n return decoder(data)", "title": "" }, { "docid": "7ed8145dd5152864c6373eb0ec80c4c6", "score": "0.5657905", "text": "def base64_to_dict(data):\n\n data = binascii.a2b_base64(data + \"\\n\")\n data = zlib.decompress(data)\n\n if not isinstance(data, basestring): # bytes\n data = data.decode(\"utf-8\")\n\n data = json.loads(data)\n\n return data", "title": "" }, { "docid": "35d30f007a18521e8e53e2fee64ee64a", "score": "0.56314313", "text": "def decode_string(data):\n return data.decode(\"utf-8\").rstrip(\"\\0\")", "title": "" }, { "docid": "01583dbf9389f1071c94ce00c848f024", "score": "0.56291986", "text": "def decode(self):\n binntype = self._buffer.read(1)\n decoder = self._decoders.get(binntype, None)\n if decoder and binntype in (types.BINN_OBJECT, types.BINN_MAP, types.PYBINN_MAP):\n return decoder(binntype)\n\n if decoder:\n return decoder()\n\n # if type was not found, try using custom decoders\n for decoder in self._custom_decoders:\n if not issubclass(type(decoder), CustomDecoder):\n raise TypeError(\"Type {} is not CustomDecoder.\")\n if binntype == decoder.datatype:\n return self._decode_custom_type(decoder)\n\n raise TypeError(f\"Invalid data format: {binntype}\")", "title": "" }, { "docid": "6e1585d6a458b0e19ded35f26ff111b5", "score": "0.5621349", "text": "def _decode_word(data):\n return struct.unpack('!I', data)[0]", "title": "" }, { "docid": "939f5780d2c670a683ef7a9c140e4878", "score": "0.5620982", "text": "def test_decoding(self):\n self.assertEqual(decode('password'), \"b'¥«,Â\\x8aÝ'\")", "title": "" }, { "docid": "7d3df7093ebb90ffd691879b6e2b4ffe", "score": "0.5613214", "text": "def decode(input, errors='strict'):", "title": "" }, { "docid": "744fcd33732b1878555a5f83e6983fe1", "score": "0.5612513", "text": "def decode(s):\n return from_dict(json.loads(s))", "title": "" }, { "docid": "55350763d35a7ff9533af9d4af49514b", "score": "0.5604345", "text": "def decode_message(message_bytes: bytes) -> dict:\n return json.loads(message_bytes.decode('utf-8'))", "title": "" }, { "docid": "9dee63b45632180b9dddce7a400d0033", "score": "0.5597405", "text": "def decode(self, input):\n return self.decoder(input)", "title": "" }, { "docid": "b39b49593faac639dfe7b11611af4b67", "score": "0.558156", "text": "def _decode_message(self, message, data_coding):\n codec = {\n 1: 'ascii',\n 3: 'latin1',\n 8: 'utf-16be', # Actually UCS-2, but close enough.\n }.get(data_coding, None)\n if codec is None or message is None:\n log.msg(\"WARNING: Not decoding message with data_coding=%s\" % (\n data_coding,))\n else:\n try:\n return message.decode(codec)\n except Exception, e:\n log.msg(\"Error decoding message with data_coding=%s\" % (\n data_coding,))\n log.err(e)\n return message", "title": "" }, { "docid": "40d23e383e9e7bd3870f7ce04215798e", "score": "0.5570802", "text": "def decode(self, s, _w=WHITESPACE.match):\r\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\r\n end = _w(s, end).end()\r\n if end != len(s):\r\n raise ValueError(errmsg(\"Extra data\", s, end, len(s)))\r\n return obj", "title": "" }, { "docid": "b20532e95d66e5f13817d845b347f8ef", "score": "0.5564892", "text": "def base64_decodestring(data):\n return base64.decodebytes(data.encode()).decode()", "title": "" }, { "docid": "9443962dea3bc7e63b5bd25ff455f1f9", "score": "0.55640435", "text": "def from_bytes(data):\n if type(data) is str:\n return data\n\n return str(data, \"UTF-8\")", "title": "" }, { "docid": "bc8d1821ab3a770b0b45ff9cd7cc1f54", "score": "0.5559787", "text": "def b64_decode(data):\n message_bytes = base64.b64decode(data)\n return message_bytes.decode('utf-8')", "title": "" }, { "docid": "7d225c396c3ee5728f781d40a0eee684", "score": "0.55586195", "text": "def decode(input, encoding='utf-8', errors='strict', detect='utf-8'):\n if isinstance(input, str):\n try:\n r = dec(input, encoding, errors)\n except:\n r = dec(input, det(input)['encoding'], errors)\n\n if isinstance(r, str):\n return decode(r, detect, errors, detect)\n else:\n return r.replace(u'\\ufeff', '', 1)\n elif isinstance(input, unicode):\n if encoding == Py.Enc.default:\n return input\n else:\n return decode(enc(input, 'latin-1'), encoding, errors, detect)\n else:\n return input", "title": "" }, { "docid": "6f789815f260026a799af064f8a00363", "score": "0.5557743", "text": "def bdecode(x):\n def decode_int(x, f):\n f += 1\n newf = x.index('e', f)\n try: n = int(x[f:newf])\n except (OverflowError, ValueError): n = long(x[f:newf])\n if x[f] == '-':\n if x[f + 1] == '0': raise ValueError\n elif x[f] == '0' and newf != f+1: raise ValueError\n return (n, newf+1)\n def decode_string(x, f):\n colon = x.index(':', f)\n try: n = int(x[f:colon])\n except (OverflowError, ValueError): n = long(x[f:colon])\n if x[f] == '0' and colon != f+1: raise ValueError\n colon += 1\n return (x[colon:colon+n], colon+n)\n def decode_list(x, f):\n r, f = [], f+1\n while x[f] != 'e':\n v, f = decode_func[x[f]](x, f)\n r.append(v)\n return (r, f + 1)\n def decode_dict(x, f):\n r, f = {}, f+1\n lastkey = None\n while x[f] != 'e':\n k, f = decode_string(x, f)\n if lastkey >= k: raise ValueError\n lastkey = k\n r[k], f = decode_func[x[f]](x, f)\n return (r, f + 1)\n decode_func = {\n 'l' : decode_list ,\n 'd' : decode_dict,\n 'i' : decode_int}\n for i in range(10): decode_func[str(i)] = decode_string\n if hasattr(x, 'read'): x = x.read()\n try: r, l = decode_func[x[0]](x, 0)\n except (IndexError, KeyError):\n try:\n x = open(x, 'r').read()\n r, l = decode_func[x[0]](x,0)\n except (OSError, IOError, IndexError, KeyError): raise ValueError\n if l != len(x): raise ValueError\n return r", "title": "" }, { "docid": "38f03ac93ecda3c5d0198a69505548d6", "score": "0.5554388", "text": "def decode(self, byte_str):\n return byte_str.decode(self.str_encoding)", "title": "" }, { "docid": "19143921f775c63e129bf98d66a71960", "score": "0.55379367", "text": "def smart_decode(claim_value):\n\n # if already decoded, return\n if isinstance(claim_value, ClaimDict):\n return claim_value\n elif isinstance(claim_value, dict):\n return ClaimDict.load_dict(claim_value)\n\n try:\n claim_value = binascii.unhexlify(claim_value)\n except (TypeError, ValueError):\n pass\n\n if claim_value[0] in ['{', ord('{')]:\n try:\n if isinstance(claim_value, bytes):\n claim_value = claim_value.decode()\n decoded_json = json.loads(claim_value)\n return migrate_json_claim_value(decoded_json)\n except (ValueError, TypeError):\n pass\n try:\n if isinstance(claim_value, str):\n claim_value = claim_value.encode()\n return ClaimDict.deserialize(claim_value)\n except (DecodeError, InvalidAddress, KeyError, TypeError):\n raise DecodeError(claim_value)", "title": "" }, { "docid": "82b98819ac3837980b2036b47c84d4ce", "score": "0.5515442", "text": "def decode(self, msg_bytes):\n return msg_bytes.decode()", "title": "" }, { "docid": "61f991314fc23783b17237aac7ff0ffb", "score": "0.55093384", "text": "def _decode(self, value):\n if isinstance(value, list):\n return [self._decode(v) for v in value]\n elif isinstance(value, dict):\n return {self._decode(k): self._decode(v)\n for k, v in value.items()}\n elif isinstance(value, bytes):\n return value.decode('utf-8')\n return value", "title": "" }, { "docid": "f2e21c970c3759cb50a0a870aa6f8960", "score": "0.55089444", "text": "def base64decode(self, data):\n return base64.b64decode(data)", "title": "" }, { "docid": "1d3e05b54c1a487caffddc65a64bfc71", "score": "0.54954004", "text": "def decode_data(self, data: Any, msg: SecureMessage) -> Optional[bytes]:\n raise NotImplemented", "title": "" }, { "docid": "156204ada075ce34dc3ebc6c46c9f7bb", "score": "0.5495152", "text": "def from_bytes(data):\n if type(data) is str:\n return data\n\n return str(data)", "title": "" }, { "docid": "aa9c10b19baaddaa54ccf102e77f9ef6", "score": "0.5494931", "text": "def decode(cls, decodable_string):\n return json.loads(decodable_string)", "title": "" }, { "docid": "e6370326a0e4aebb4a8c87ab5c66bdea", "score": "0.5492694", "text": "def deserialize(self, data):", "title": "" }, { "docid": "51b7aca3972d22c91f5de561ec9f6d2e", "score": "0.54880637", "text": "def decode_json(data):\n \n return json.loads(data.decode('utf8'))", "title": "" }, { "docid": "74efd2700465e3437c5d77b6df1ef1a5", "score": "0.5483528", "text": "def deserialize(message: bytes) -> dict:\n return json.loads(message.decode(\"utf-8\"))", "title": "" }, { "docid": "d393fb5c041e223bb69f10093386a107", "score": "0.5482826", "text": "def deserialize(binary: bytes) -> Dict:\n if isinstance(binary, str):\n binary = unhexlify(binary)\n _, obj = ObjectSerializer().deserialize(binary)\n return obj", "title": "" }, { "docid": "ea6f9f5df3a497bb1391f2814300ed0b", "score": "0.54692763", "text": "def decode(self, s: str) -> [str]:", "title": "" }, { "docid": "1c4dd23d8ee9dfe221ab1cea71ec3518", "score": "0.5461739", "text": "def bytes_encoded_yaml_to_dict(encoded_yaml: str) -> dict:\n decoded_yaml = base64.b64decode(encoded_yaml).decode('utf-8')\n dict_from_yaml = yaml.load(decoded_yaml, Loader=yaml.FullLoader)\n return dict_from_yaml", "title": "" }, { "docid": "0dc394a58ddbd765eb1f3be37fb6ee6c", "score": "0.54572654", "text": "def decode(val):\n try:\n # assume it is an encoded bytes object\n return val.decode('utf-8')\n except AttributeError:\n # it was an already decoded unicode object\n return val", "title": "" }, { "docid": "03b489aaa31488ed3ed7a27d2e853464", "score": "0.5455847", "text": "def decode(self, encoded, parentFieldName=\"\"):\n\n if parentFieldName != \"\":\n fieldName = \"%s.%s\" % (parentFieldName, self.name)\n else:\n fieldName = self.name\n\n return ({fieldName: ([[0, 0]], \"input\")}, [fieldName])", "title": "" }, { "docid": "4d1557136976701eebc00eec3b2fff28", "score": "0.54383487", "text": "def decode(self, data):\r\n try:\r\n if data[0] == 0:\r\n return self.hid [data[2]] \r\n if data[0] == 2:\r\n return self.hid2 [data[2]] \r\n except KeyError:\r\n return '?'", "title": "" }, { "docid": "03abadb38a889cdfc4e9d5c917717200", "score": "0.5436302", "text": "def decode_string(self) -> str:", "title": "" }, { "docid": "6e3f46e4197163e915125925df69e163", "score": "0.5418049", "text": "def _decrypt_request(raw: bytes) -> dict[str, typing.Any]:\n try:\n raw_json = encryption.decrypt_message(base64.b64decode(raw))\n except ValueError:\n raise utils.RequestError(3113)\n try:\n return json.loads(raw_json.decode())\n except json.JSONDecodeError:\n raise utils.RequestError(3113)\n except UnicodeDecodeError:\n raise utils.RequestError(3113)", "title": "" }, { "docid": "e451418306c9de9c81619f272c9d7bbf", "score": "0.54029983", "text": "def b64decode(data: Union[str, bytes], urlsafe: bool = False) -> Union[str, bytes]:\n if not isinstance(data, bytes):\n data = data.encode(\"ascii\")\n if urlsafe:\n b64 = base64.urlsafe_b64decode(data)\n else:\n b64 = base64.b64decode(data)\n try:\n return b64.decode(\"ascii\")\n except UnicodeDecodeError:\n return b64", "title": "" }, { "docid": "3789c2d1a6ee7f9029c19b72692d81a5", "score": "0.5389138", "text": "def loads(data,beSilent=False):\n buffer = StringIO(data)\n header = buffer.read(len(HEADER))\n if (not beSilent):\n assert header == HEADER\n option = buffer.read(1)\n decompress = False\n if option == \"Z\":\n buffer = StringIO(zlib.decompress(buffer.read()))\n try:\n value = decoder[buffer.read(1)](buffer)\n except KeyError, e:\n raise DecodeError, \"Type prefix not supported. (%s)\" % e\n\n value = dict2Obj(value)\n return value", "title": "" }, { "docid": "f568f9b35c3d9b0d0650bb32ea1f953e", "score": "0.5361082", "text": "def decode(data: bytes) -> str:\n\n # ignore if the start bytes is missing\n # start the data after the first comma\n start_index = data.find(b',')\n if start_index == -1:\n raise InvalidFormatException(\"could not find a comma\")\n\n end_index = data.find(AnemometerDecoder.END_BYTE)\n\n if end_index == -1:\n raise InvalidFormatException(\"line is missing the end byte\")\n\n # TODO: perform the checksum\n # for now the checksum is ignored\n\n # take only the data - remove the end byte and the comma before it\n data = data[start_index + 1:end_index - 1]\n\n return data.decode('utf-8')", "title": "" }, { "docid": "cced15976fb3085c2f0eb4c64d456d03", "score": "0.53556716", "text": "def decode_email(email):\n return quopri.decodestring(email.get_payload())", "title": "" }, { "docid": "7205e246a0281872b04a34b870b74e40", "score": "0.535378", "text": "def decode(self, z):\n pass", "title": "" }, { "docid": "7071e2c2c93e55607d5da073f68ec60b", "score": "0.5349912", "text": "def _trans_bytes(data):\n if platforms.is_py2():\n return data\n if isinstance(data, bytes):\n try:\n data = bytes.decode(data)\n except Exception:\n data = 'Error to decode result'\n return data", "title": "" }, { "docid": "b2eb44a257baa0927c712754b03ae383", "score": "0.5343873", "text": "def _decode_utf8(s):\r\n return unicode(s, 'utf-8')", "title": "" }, { "docid": "3e74fd6111e86297eeb65f0ec3cb02bc", "score": "0.53425944", "text": "def decode_from_data(data, key, dec_type=\"list\"):\n default_value = list() if dec_type == \"list\" else dict()\n data = data.get(key, default_value)\n\n if data == \"\":\n return default_value\n\n if data and isinstance(data, str):\n try:\n data = json.loads(data)\n except json.decoder.JSONDecodeError:\n return -1\n\n return data", "title": "" }, { "docid": "285e48aa57dac59a983085aaa47a892f", "score": "0.53312343", "text": "def _from_bytes(self, byte_data, key=''):\n\n if not can_encrypt and key:\n result = decode_safely(byte_data)\n elif can_encrypt and key:\n cipher = AESCipher(key)\n result = decode_safely(cipher.decrypt(byte_data))\n else:\n result = decode_safely(byte_data)\n return result", "title": "" }, { "docid": "8c1ad9564bf14209070c8c608e1fa38a", "score": "0.5329614", "text": "def decode(msg):\n unpickler = Unpickler(BytesIO(msg))\n unpickler.find_global = find_global\n try:\n unpickler.find_class = find_global # PyPy, zodbpickle, the non-c-accelerated version\n except AttributeError:\n pass\n try:\n return unpickler.load() # msgid, flags, name, args\n except:\n log(\"can't decode message: %s\" % short_repr(msg),\n level=logging.ERROR)\n raise", "title": "" }, { "docid": "dc8709fdff2610bb6e7f42bf3263a141", "score": "0.5325542", "text": "def msgpack_decode(enc):\n decoded = enc\n if not isinstance(enc, dict):\n decoded = msgpack.unpackb(base64.b64decode(enc), raw=False)\n if \"type\" in decoded:\n return transaction.Transaction.undictify(decoded)\n if \"l\" in decoded:\n return transaction.LogicSig.undictify(decoded)\n if \"msig\" in decoded:\n return transaction.MultisigTransaction.undictify(decoded)\n if \"lsig\" in decoded:\n if \"txn\" in decoded:\n return transaction.LogicSigTransaction.undictify(decoded)\n return transaction.LogicSigAccount.undictify(decoded)\n if \"sig\" in decoded:\n return transaction.SignedTransaction.undictify(decoded)\n if \"txn\" in decoded:\n return transaction.Transaction.undictify(decoded[\"txn\"])\n if \"subsig\" in decoded:\n return transaction.Multisig.undictify(decoded)\n if \"txlist\" in decoded:\n return transaction.TxGroup.undictify(decoded)\n if \"t\" in decoded:\n return auction.NoteField.undictify(decoded)\n if \"bid\" in decoded:\n return auction.SignedBid.undictify(decoded)\n if \"auc\" in decoded:\n return auction.Bid.undictify(decoded)", "title": "" }, { "docid": "82bdb26577b9b94fac8e25e96f7ceae1", "score": "0.53171575", "text": "def deserializeData(data:bytes, ct:ContentSerializationType) -> JSON:\n\tif len(data) == 0:\n\t\treturn {}\n\tif ct == ContentSerializationType.JSON:\n\t\treturn cast(JSON, json.loads(data.decode(\"utf-8\")))\n\telif ct == ContentSerializationType.CBOR:\n\t\treturn cast(JSON, cbor2.loads(data))\n\t# except Exception as e:\n\t# \tLogging.logErr(f'Deserialization error: {str(e)}')\n\treturn None", "title": "" } ]
d43e4579041b8be2490d535ce802d27f
The arguments have the same meaning as in the dropout_layer function.
[ { "docid": "39c9b2b3ee640bd9c9519a3485bbd8b8", "score": "0.0", "text": "def enable_dropout(train_premasks, train_postmasks):\n for premask in train_premasks:\n premask.set_value(1)\n for postmask in train_postmasks:\n postmask.set_value(np.cast[floatX](1))", "title": "" } ]
[ { "docid": "c25be25acb9e00e6b9991291b5072f2e", "score": "0.64838076", "text": "def create_dropout_layer(self):\n raise NotImplementedError", "title": "" }, { "docid": "3ce062bbf7af24958e07c6fe63adccf0", "score": "0.6317723", "text": "def push_dropout_layer(self, keep_prob):\n layer_name = 'dropout' + str(self.size)\n self.layers.append(layer_name)\n # self.layers.append({\"type\":\"dropout\",'prob':prob})\n self.push_back(DropoutLayer(keep_prob, layer_name))", "title": "" }, { "docid": "8279f21ea1e11fa40a6491954170a604", "score": "0.61981696", "text": "def get_params(self):\n my_params = super(DropoutLayer, self).get_params()\n my_params[\"dropout_p\"] = self.dropout_p\n return my_params", "title": "" }, { "docid": "c8b98a4f2a35b87c783078ae8abb02a9", "score": "0.60969514", "text": "def handlerDropLayer(self):\n pass", "title": "" }, { "docid": "77fa41c7adeb33fb205bb39125ff0ed4", "score": "0.6075677", "text": "def get_dropout(**kwargs):\n backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)\n\n class FixedDropout(layers.Dropout):\n def _get_noise_shape(self, inputs):\n if self.noise_shape is None:\n return self.noise_shape\n\n symbolic_shape = backend.shape(inputs)\n noise_shape = [symbolic_shape[axis] if shape is None else shape\n for axis, shape in enumerate(self.noise_shape)]\n return tuple(noise_shape)\n\n return FixedDropout", "title": "" }, { "docid": "655e09d5b3ac1acfd05487131b8cb79f", "score": "0.59961367", "text": "def dolly(*args, **kwargs):\n\n pass", "title": "" }, { "docid": "2f7ae02c07a46049787140cca779a08f", "score": "0.59933174", "text": "def dolly(*args, **kwargs):\n pass", "title": "" }, { "docid": "27a13d78a8dfad1ad06d005c18825438", "score": "0.5986018", "text": "def _fix_outputs(self, op_name, outputs):\n if op_name == 'Dropout':\n if len(outputs) == 1:\n return outputs\n # TODO(zhreshold): support dropout mask?\n outputs = outputs[:-1]\n return outputs", "title": "" }, { "docid": "d47e17d126a0ee12b5b1d00de9f5df62", "score": "0.5964656", "text": "def toolDropped(*args, **kwargs):\n pass", "title": "" }, { "docid": "951c2d6b366c9faf62cb0b92ed985dd2", "score": "0.5881264", "text": "def _onnx_crypten_dropout(g, input, p, train):\n r, _ = g.op(\"Dropout\", input, ratio_f=p, outputs=2)\n return r", "title": "" }, { "docid": "1e486f1271ae5a88fc75a7eb49cd6f5c", "score": "0.5874585", "text": "def prepare_layer(self, **kargs):\n pass", "title": "" }, { "docid": "7cd69ee04a87025dfb407243962aa0b5", "score": "0.5851011", "text": "def dropout_layer(inputs, rate, training, type='regular'):\r\n if rate == 0:\r\n return inputs\r\n if type == 'regular':\r\n outputs = tf.keras.layers.Dropout(rate=rate, name='dropout')(inputs, training=training)\r\n elif type == 'spatial':\r\n outputs = tf.keras.layers.SpatialDropout3D(rate=rate, name='dropout')(inputs, training=training)\r\n else:\r\n raise NotImplementedError\r\n return outputs", "title": "" }, { "docid": "8f8ba5e7c8e4fb84540dcfe690ae6fe5", "score": "0.58081007", "text": "def replace_dropout_layers():\n\n pattern_div = re.compile('/dropout/div')\n pattern_mul = re.compile('/dropout/mul')\n op_outputs_old, op_outputs_new = [], []\n for op in tf.get_default_graph().get_operations():\n if re.search(pattern_div, op.name) is not None:\n x = tf.identity(op.inputs[0])\n op_outputs_new += [x]\n if re.search(pattern_mul, op.name) is not None:\n op_outputs_old += [op.outputs[0]]\n\n return op_outputs_old, op_outputs_new", "title": "" }, { "docid": "41ee8449415610c52b1d23635da4dd0c", "score": "0.576443", "text": "def OnDrop(self, x, y):", "title": "" }, { "docid": "41ee8449415610c52b1d23635da4dd0c", "score": "0.576443", "text": "def OnDrop(self, x, y):", "title": "" }, { "docid": "41ee8449415610c52b1d23635da4dd0c", "score": "0.5763812", "text": "def OnDrop(self, x, y):", "title": "" }, { "docid": "758df7ace469f1c16fa694b683164485", "score": "0.57514757", "text": "def __init__(self, classifier, input_dim, output_dim):\n super(MTNetworkSingleLayerDropout, self).__init__()\n self.classifier = classifier\n self.layer = nn.Linear(input_dim, output_dim)\n self.dropout1 = nn.Dropout(p=0.1)", "title": "" }, { "docid": "d35e44a4aa821de3d87ddd4b84c4b206", "score": "0.57493645", "text": "def send_drop(self):\n self.display.out_queue.append((self.pack_arguments(4), ()))", "title": "" }, { "docid": "29cfc5aaa58bf7620d3384e2ef43ac64", "score": "0.57493323", "text": "def apply_dropout(inputs, dropout_rate, filterwise_dropout):\n if filterwise_dropout:\n return tf.keras.layers.Dropout(\n dropout_rate, noise_shape=[inputs.shape[0], 1, 1, inputs.shape[3]\n ])(inputs, training=True)\n else:\n return tf.keras.layers.Dropout(dropout_rate)(inputs, training=True)", "title": "" }, { "docid": "3347934d7ca088de9983df7ea57c72b1", "score": "0.57485396", "text": "def dropout_forward(x, dropout_param):\r\n \r\n# Get the current dropout mode, p, and seed\r\n p, mode = dropout_param['p'], dropout_param['mode']\r\n if 'seed' in dropout_param:\r\n np.random.seed(dropout_param['seed'])\r\n \r\n# Initialization of outputs and mask\r\n mask = None\r\n out = None\r\n \r\n if mode == 'train':\r\n # create and apply mask (normally p = 0.5for half of neurons), we scale all\r\n # by p to avoid having to multiply by p on backpropagation, this is called \r\n # inverted dropout\r\n\r\n mask = (np.random.rand(*x.shape) < p) / p\r\n \r\n # Apply mask\r\n out = x * mask\r\n elif mode == 'test':\r\n # during prediction no mask is used\r\n mask = None\r\n out = x\r\n \r\n # Save mask and dropout parameters for backprop\r\n cache = (dropout_param, mask)\r\n \r\n # convert \"out\" type and return ouput and cache\r\n out = out.astype(x.dtype, copy = False)\r\n return out, cache", "title": "" }, { "docid": "b59c943b097dd62a0be14057f63e71d5", "score": "0.57463795", "text": "def dropout(self, dropout):\n\n self._dropout = dropout", "title": "" }, { "docid": "afda3abc6e81e45a8581427f676e49c1", "score": "0.569868", "text": "def MultiDropout(shapes, dropout = 0.):\n return [Dropout(shape, dropout) for shape in shapes]", "title": "" }, { "docid": "fd33eeda01a6e4becfba21c44bb500ba", "score": "0.5673957", "text": "def dropout(x, rate, name=None, **kwargs):\n return activation_ops.dropout(x, rate, name=name, **kwargs)", "title": "" }, { "docid": "93e22aac56c279d898da6cb9a3674436", "score": "0.5672959", "text": "def dummy(self, arg):\r\n print(self._layer)\r\n print(arg)", "title": "" }, { "docid": "a0788038d7161f939f1094bd5df10e69", "score": "0.5668472", "text": "def _onnx_crypten_feature_dropout(g, input, p, train):\n r, _ = g.op(\"DropoutNd\", input, ratio_f=p, outputs=2)\n return r", "title": "" }, { "docid": "b65055a9cd79868dbc7e5a23d86d1baf", "score": "0.56649363", "text": "def relu_dropout_layer(input_tensor, input_dim, output_dim, name):\n with tf.name_scope(name):\n weights = weight_variable(input_dim + output_dim)\n biases = bias_variable(output_dim)\n with tf.name_scope('ReLU'):\n relu_activation = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)\n with tf.name_scope('dropout'):\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n dropout_activation = tf.nn.dropout(relu_activation, keep_prob, seed=get_seed())\n return dropout_activation, keep_prob", "title": "" }, { "docid": "49aca02b2cfc4cf16fe8419e246de243", "score": "0.565018", "text": "def dropout(prev_layer, hype_space, for_convolution_else_fc=True):\n if for_convolution_else_fc:\n return keras.layers.core.Dropout(\n rate=hype_space['conv_dropout_drop_proba']\n )(prev_layer)\n else:\n return keras.layers.core.Dropout(\n rate=hype_space['fc_dropout_drop_proba']\n )(prev_layer)", "title": "" }, { "docid": "690f9dd808e3b59339924e4dba409b18", "score": "0.5648344", "text": "def layer(self, t=None):", "title": "" }, { "docid": "09806cb8d3323123ad37cd7bbc11fa56", "score": "0.5646418", "text": "def add_drop_out_layer(self, input_layer):\n return tf.nn.dropout(input_layer, self.keep_prob)", "title": "" }, { "docid": "402ea320b7df4d24152cc93edbaa612d", "score": "0.56430256", "text": "def dropout_backward(dout,cache):\n\t# recover dropout parameters(p,mask,mode)from cache\n\tdropout_param, mask = cache\n\tmode = dropout_param['mode']\n\n\tdx = None\n\t# back propagate (dropout layer has no parameters just input x)\n\tif mode == \"train\":\n\t\t# just back propagate dout from the neurons that were used during dropout\n\t\tdx = dout * mask\n\n\telif mode == \"test\":\n\t\t# disable dropout during prediction/test\n\t\tdx = dout\n\n\treturn dx", "title": "" }, { "docid": "96763f4293d561f6cb8f061064c1f1d9", "score": "0.56397945", "text": "def dropout(dropout_prob: float = 0.1):\n return nn.Dropout2d(p = dropout_prob)", "title": "" }, { "docid": "0e297eb4d1cb21f783c34b1d8d05ed13", "score": "0.5630197", "text": "def _add_outlayer_args(parser):\n parser.add_argument(\n \"--prediction_sample\", default=None,\n help=\"file of predictions for quantile norm\")\n parser.add_argument(\n \"--h5_saver_batch_size\", default=2048, type=int,\n help=\"chunking before saving to h5 (improve I/O)\")\n parser.add_argument(\n \"--sample_size\", default=10000000, type=int,\n help=\"total regions (post filtering) to return\")\n \n return", "title": "" }, { "docid": "191eb2b2fb42844093bfc6a1f6c51a2e", "score": "0.56234145", "text": "def dropout(self, keep):\n \n return self.add(Dropout(keep))", "title": "" }, { "docid": "56848b0d0c93d93397a809ef3b3eee71", "score": "0.56189144", "text": "def dropout_op(node_in, keep_prob, ctx=None):\r\n return DropoutOp(node_in, keep_prob, ctx=ctx)", "title": "" }, { "docid": "9865ea82335bd2175fd7472dea2f55fd", "score": "0.5613924", "text": "def layerButton(*args, **kwargs):\n pass", "title": "" }, { "docid": "29404e888482418de8ee8f2198bc4947", "score": "0.5609234", "text": "def call(self, states):\n\t\t# TODO: implement this!\n\t\thidden_output1 = self.hidden_layer1(states)\n\t\thidden_output2 = self.hidden_layer2(self.dropout1(hidden_output1))\n\t\treturn self.output_layer(self.dropout2(hidden_output2))\n\t\tpass", "title": "" }, { "docid": "2e2b6c1ceb2b774ce035d4c66097f3db", "score": "0.55978787", "text": "def layer5_dropout(self, input):\n\n self.dropout_prob = tf.placeholder(tf.float32)\n do_layer = tf.nn.dropout(input, keep_prob=self.dropout_prob)\n\n W_fc2 = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1))\n b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]))\n\n o_layer = tf.matmul(do_layer, W_fc2) + b_fc2\n output = tf.nn.softmax(o_layer)\n return output", "title": "" }, { "docid": "06425049369b2f28512c7cf5cbe7abe9", "score": "0.5574219", "text": "def dropout_forward(x, dropout_param):\n p, mode = dropout_param[\"p\"], dropout_param[\"mode\"]\n if \"seed\" in dropout_param:\n np.random.seed(dropout_param[\"seed\"])\n\n mask = None\n out = None\n\n if mode == \"train\":\n #######################################################################\n # TODO: Implement training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n #######################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n mask = (np.random.rand(*x.shape) < p ) / p\n out = mask * x\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == \"test\":\n #######################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n #######################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n out = x\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "title": "" }, { "docid": "0f95557373aea823117fe1e962e5d767", "score": "0.55681443", "text": "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n mask = None\n \n if mode == 'train':\n # divide p is for inverted dropout to improve test-time performance\n mask = (np.random.rand(*x.shape)<p) /p\n out = x * mask\n\n elif mode == 'test':\n out = x\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "title": "" }, { "docid": "3fd9721b252b07cc92698f9825c9368a", "score": "0.5567754", "text": "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n ###########################################################################\n # TODO: Implement the training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n ###########################################################################\n \n mask = (np.random.rand(*x.shape) < p) / p;\n out = x*mask;\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n elif mode == 'test':\n ###########################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n ###########################################################################\n out = x;\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "title": "" }, { "docid": "09ba481d015af065677e0a02e0740c43", "score": "0.556395", "text": "def CallTakeDrop(self, *arg):\r\n self.CallBack(self.varNBTakeDrop)", "title": "" }, { "docid": "6087a204d22dda9148ac6e9157c757d1", "score": "0.5557589", "text": "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n #######################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n \n mask = (np.random.rand(* x.shape) < p) / p\n out = x * mask\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n #######################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n mask = None\n out = x\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "title": "" }, { "docid": "ca9a09514413a041ff5342e9c4f18bbd", "score": "0.5539942", "text": "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask, out = None, None\n if mode == 'train':\n # TODO: Implement the training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n mask = (np.random.randn(*x.shape) < p) / p\n out = x * mask\n elif mode == 'test':\n # TODO: Implement the test phase forward pass for inverted dropout. #\n out = x\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "title": "" }, { "docid": "dc45fd1c2d9170bb9f8e2c4bd671779c", "score": "0.55392736", "text": "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n ###########################################################################\n # TODO: Implement the training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n ###########################################################################\n # 2 lines of code expected\n mask = (np.random.rand(*x.shape) < p)/p\n out = x*mask\n\n pass\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n elif mode == 'test':\n ###########################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n ###########################################################################\n # 1 line of code expected\n out = x\n\n pass\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "title": "" }, { "docid": "6ca56c2950068975e7ea2373e798cf0e", "score": "0.5537654", "text": "def backward(self, dout, index, args):\n n = len(self.layers)\n layer_d = dout\n for i in range(n-1, -1, -1):\n layer = self.layers[i]\n # print(layer.name)\n if i == n - 1:\n layer_d = layer.backward(layer_d, args, index=index)\n else:\n layer_d = layer.backward(layer_d, args)\n return", "title": "" }, { "docid": "bc09f915eaef9d729909996d1eeb9b78", "score": "0.551224", "text": "def __call__(self, inputs, states):\n output, states = self.base_cell(inputs, states)\n if self.dropout_outputs > 0:\n output = symbol.Dropout(data=output, p=self.dropout_outputs)\n if self.dropout_states > 0:\n states = symbol.Dropout(data=states, p=self.dropout_states)\n return output, states", "title": "" }, { "docid": "43a7d27e1e396113ab72b65973d358d6", "score": "0.55108887", "text": "def dropout_forward(x, dropout_param):\n\n\t# get the current dropout mode, p , and seed\n\tp, mode = dropout_param['p'], dropout_param['mode']\n\tif 'seed' in dropout_param:\n\t\tnp.random.seed(dropout_param['seed'])\n\n\t# initialization of the outputs and mask\n\tmask = None\n\tout = None\n\n\tif mode == \"train\":\n\t\t# Create an apply mask (normally p=0.5 for half of neurons), we scale all\n\t\t# by p to avoid having to multiply by p on backpropagation,\n\t\t# this is called inverted dropout\n\t\tmask = (np.random.randn[*x.shape]<p)/p\n\t\t#apply mask\n\t\tout = x * mask\n\n\telif mode == \"test\":\n\t\t# during prediction no mask is used\n\t\tmask = None\n\t\tout = x\n\n\t# Save mask and dropout parameters for backpropagation\n\tcache = (dropout_param, mask)\n\n\t#convert \"out\" type and return output and cache\t\n\tout = out.astype[x.dtype, copy=false]\n\treturn out,cache", "title": "" }, { "docid": "587a00546b58903c830eb1c8406617ab", "score": "0.5506868", "text": "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n #######################################################################\n \n mask = np.random.uniform(0,1,x.shape) \n out = x * (mask < p) / p\n \n# =============================================================================\n# mask = np.random.binomial(1,1-p,size=x.shape)\n# out = x * mask\n# =============================================================================\n \n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n #######################################################################\n \n out = x \n \n# =============================================================================\n# out = x * (1-p)\n# =============================================================================\n \n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "title": "" }, { "docid": "147a9dbab7055a874a83f8dbc2718ef0", "score": "0.54839265", "text": "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n\n\n mask = (np.random.rand(*x.shape) >= p) / (1 - p)\n # mask = (np.random.rand(x.shape[1]) >= p) / (1 - p)\n out = x * mask\n\n\n elif mode == 'test':\n\n\n out = x\n\n\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "title": "" }, { "docid": "eb114cc844102a424f73daba84c0fe05", "score": "0.54834175", "text": "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n #######################################################################\n mask = (np.random.rand(*x.shape) < p) / p\n out = x*mask\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n #######################################################################\n out = x\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "title": "" }, { "docid": "82b4c788cfaa4e6fea758b7255aca0e9", "score": "0.5471207", "text": "def _dropout_from_layer(theano_rng, hid_out, p):\n # p=1-p because 1's indicate keep and p is prob of dropping\n return theano_rng.binomial(n=1, p=1-p, size=hid_out.shape,\n dtype=theano.config.floatX) * hid_out", "title": "" }, { "docid": "c28fa8934ebcdadcca9e1d6b900e7549", "score": "0.5463676", "text": "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n ##############################################################\n # TODO: Implement the training phase forward pass for #\n # inverted dropout. Store the dropout mask in the mask #\n # variable. #\n ##############################################################\n mask = (np.random.rand(*x.shape) < p) / p\n out = x * mask\n ##############################################################\n # END OF YOUR CODE #\n ##############################################################\n elif mode == 'test':\n ##############################################################\n # TODO: Implement the test phase forward pass for inverted #\n # dropout. #\n ##############################################################\n out = x\n ##############################################################\n # END OF YOUR CODE #\n ##############################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "title": "" }, { "docid": "2810a5b2836a6b545b22ecec3472ed67", "score": "0.54635686", "text": "def dropout(inputs,\n rate=0.5,\n noise_shape=None,\n seed=None,\n training=False,\n name=None):\n warnings.warn('`tf.layers.dropout` is deprecated and '\n 'will be removed in a future version. '\n 'Please use `tf.keras.layers.Dropout` instead.')\n layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)\n return layer.apply(inputs, training=training)", "title": "" }, { "docid": "07fbf24dedde35680e8d926b69d7c993", "score": "0.54571337", "text": "def animLayer(*args, **kwargs):\n pass", "title": "" }, { "docid": "28250943179d33e4992a23be6dcd45b9", "score": "0.54498386", "text": "def apply_dropout(m):\r\n if type(m) == torch.nn.Dropout2d:\r\n m.train()", "title": "" }, { "docid": "e551a89141f8f092ce88b16edadcee63", "score": "0.54472667", "text": "def dropout_backward(dout, cache):\n dropout_param, mask = cache\n mode = dropout_param['mode']\n\n dx = None\n if mode == 'train':\n\n\n dx = dout * mask\n\n\n elif mode == 'test':\n dx = dout\n return dx", "title": "" }, { "docid": "70a5bb4c2ba16801a405a4309909e873", "score": "0.54448617", "text": "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n #######################################################################\n # mask = (np.random.rand(np.shape(x)[0], np.shape(x)[1] if len(np.shape(x)) > 1 else 1) < p).astype(int)\n # out = np.multiply(x.reshape(np.shape(mask)), mask)\n mask = (np.random.rand(*x.shape) < p)\n out = x * mask\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n #######################################################################\n mask = None\n out = x\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "title": "" }, { "docid": "68c856cc96a3c6020ed97b385182f00a", "score": "0.54391503", "text": "def dropout_backward(dout, cache):\r\n\r\n\t# Recover dropout parameters (p, mask, mode) from cache\r\n\tdropout_param, mask = cache\r\n\tmode = dropout_param['mode']\r\n\r\n\tdx = None\r\n\t# Back propagate (Dropout layer has no parameters just input X)\r\n\tif mode == 'train':\r\n\t\t# Just back propagate dout from the neurons that were used during dropout\r\n\t\tdx = dout * mask\r\n\r\n\telif mode = 'test':\r\n\t\t# Disable dropout during prediction/ test\r\n\t\tdx = dout\r\n\r\n\r\n\t# Return dx\r\n\treturn dx", "title": "" }, { "docid": "63b3479285a7ae3ebcc9d293d9c9343c", "score": "0.543669", "text": "def send_dnd_drop_performed(self):\n self.display.out_queue.append((self.pack_arguments(3), ()))", "title": "" }, { "docid": "974cc6e4d57b1c4cfcb1081cb4b75c64", "score": "0.54326755", "text": "def dropout(x, ratio=.5, **kwargs):\n x, t, l = x\n x = chainer.functions.dropout(x, ratio=ratio, **kwargs)\n return x, t, l", "title": "" }, { "docid": "158bd696ecdeb83fb22cd993f4943fee", "score": "0.54306126", "text": "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n #######################################################################\n \n [N,D] = x.shape\n mask = (np.random.rand(N,D) < (1-p))/(1-p)\n out = x*mask\n \n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n #######################################################################\n mask = None\n out = x\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "title": "" }, { "docid": "deb28fc618b47dcbbf5b541cdd4cce41", "score": "0.5426072", "text": "def dropout_constr(options, use_noise, trng, sampling):\n\n # if dropout is off, or we don't need it because we're sampling, multiply by 1\n # this is also why we make all arguments optional\n def get_layer(shape=None, dropout_probability=0, num=1):\n if num > 1:\n return theano.shared(numpy.array([1.] * num, dtype=floatX))\n else:\n return theano.shared(numpy_floatX(1.))\n\n if options['use_dropout']:\n # models trained with old dropout need to be rescaled at test time\n if sampling and options['model_version'] < 0.1:\n def get_layer(shape=None, dropout_probability=0, num=1):\n if num > 1:\n return theano.shared(numpy.array([1 - dropout_probability] * num, dtype=floatX))\n else:\n return theano.shared(numpy_floatX(1 - dropout_probability))\n elif not sampling:\n if options['model_version'] < 0.1:\n scaled = False\n else:\n scaled = True\n\n def get_layer(shape, dropout_probability=0, num=1):\n if num > 1:\n return shared_dropout_layer((num,) + shape, use_noise, trng, 1 - dropout_probability, scaled)\n else:\n return shared_dropout_layer(shape, use_noise, trng, 1 - dropout_probability, scaled)\n\n return get_layer", "title": "" }, { "docid": "435f61f628710ba97b72fba04af4ddb2", "score": "0.5419554", "text": "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n mask = (np.random.rand(*x.shape) < p) / p # first dropout mask. Notice /p!\n out = x * mask\n elif mode == 'test':\n out = x\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "title": "" }, { "docid": "4885fc4682cadb09032d29e88cf25ac8", "score": "0.5419383", "text": "def run(layers):", "title": "" }, { "docid": "05020962be52dccc57b8ef980e27b7bd", "score": "0.5417766", "text": "def create_dropout_predict_function(model, dropout):\r\n \r\n # Load the config of the original model\r\n conf = model.get_config()\r\n # Add the specified dropout to all layers\r\n for layer in conf['layers']:\r\n # Dropout layers\r\n if layer[\"class_name\"]==\"Dropout\":\r\n #print(\"1\")\r\n layer[\"config\"][\"rate\"] = dropout\r\n # Recurrent layers with dropout\r\n elif \"dropout\" in layer[\"config\"].keys():\r\n #print(\"2\")\r\n #print(layer)\r\n #print(layer[\"config\"][\"dropout\"])\r\n layer[\"config\"][\"dropout\"] = dropout\r\n\r\n # Create a new model with specified dropout\r\n if type(model)==Sequential:\r\n # Sequential\r\n model_dropout = Sequential.from_config(conf)\r\n else:\r\n # Functional\r\n model_dropout = Model.from_config(conf)\r\n model_dropout.set_weights(model.get_weights()) \r\n \r\n # Create a function to predict with the dropout on\r\n predict_with_dropout = K.function(model_dropout.inputs+[K.learning_phase()], model_dropout.outputs)\r\n \r\n return predict_with_dropout", "title": "" }, { "docid": "d31618c408b5e80cf65c1a5fcdca2da6", "score": "0.54155517", "text": "def deactivate_dropout(self):\n for layer in self.layers:\n if isinstance(layer, Dropout):\n layer.deactivate()", "title": "" }, { "docid": "c3984c78db9937441b558dd576bd618f", "score": "0.5414331", "text": "def __init__(self, classifier, input_dim, hidden_dim, output_dim):\n super(MTNetworkTwoLayerDoubleDropout, self).__init__()\n self.classifier = classifier\n self.layer1 = nn.Linear(input_dim, hidden_dim)\n self.layer2 = nn.Linear(hidden_dim, output_dim)\n self.dropout1 = nn.Dropout(p=0.1)\n self.dropout2 = nn.Dropout(p=0.1)", "title": "" }, { "docid": "f6641427562f72399e33d15f47593821", "score": "0.5414024", "text": "def __init__(self, size: int, dropout_p: float = 0.0):\n super(SublayerConnection, self).__init__()\n self.norm = nn.LayerNorm(size)\n self.dropout_p = nn.Dropout(dropout_p)", "title": "" }, { "docid": "0e560e9f220eb928ba48e1b4d24bd7c2", "score": "0.5412067", "text": "def dropout(x, rate, training):\n\n return tf.layers.dropout(x, rate, training=training)", "title": "" }, { "docid": "5dd92bb7ad0d55fa03cb9bb2cda46f82", "score": "0.5411963", "text": "def dropout(d, len):\n if dropout_keep_prob < 1:\n prob = (1.0 - dropout_keep_prob) / len\n d = smart_cond(is_training, lambda: tf.nn.dropout(d, rate=prob), lambda: d)\n return d", "title": "" }, { "docid": "de504bf5c29277a0d7565958c44470c2", "score": "0.5404141", "text": "def build_target_drop(self):\n def drop(*args):\n return\n return drop", "title": "" }, { "docid": "ec89abdc2f59c081ab1b938a3d9c44b0", "score": "0.53933126", "text": "def activate_dropout(self):\n for layer in self.layers:\n if isinstance(layer, Dropout):\n layer.activate()", "title": "" }, { "docid": "191c21a7c6f4b014a38859c40987e7b2", "score": "0.5385725", "text": "def dropout_layer(state_before, use_noise, trng):\n proj = tensor.switch(use_noise,\n state_before *\n trng.binomial(state_before.shape, p=0.5, n=1, dtype=state_before.dtype),\n state_before * 0.5)\n return proj", "title": "" }, { "docid": "191c21a7c6f4b014a38859c40987e7b2", "score": "0.5385725", "text": "def dropout_layer(state_before, use_noise, trng):\n proj = tensor.switch(use_noise,\n state_before *\n trng.binomial(state_before.shape, p=0.5, n=1, dtype=state_before.dtype),\n state_before * 0.5)\n return proj", "title": "" }, { "docid": "48ec124c6d0d286ddc00cc6d58892f88", "score": "0.5384018", "text": "def __init__(self,\r\n queries_dropout=0.,\r\n keys_dropout=0.,\r\n values_dropout=0.,\r\n causal=True):\r\n super(Attention, self).__init__()\r\n\r\n self.q_dropout = tf.keras.layers.Dropout(queries_dropout)\r\n self.k_dropout = tf.keras.layers.SpatialDropout2D(keys_dropout)\r\n self.v_dropout = tf.keras.layers.SpatialDropout2D(values_dropout)\r\n\r\n # these parameters need to be stored so that\r\n # tf.layers.model.save_model works\r\n self.queries_dropout_rate = queries_dropout\r\n self.keys_dropout_rate = keys_dropout\r\n self.values_dropout_rate = values_dropout\r\n self.causal = causal", "title": "" }, { "docid": "25ad247551d2704a43de8661d3b8ef96", "score": "0.5367822", "text": "def __init__(self, classifier, input_dim, hidden_dim, output_dim):\n super(MTNetworkTwoLayerSingleDropout, self).__init__()\n self.classifier = classifier\n self.layer1 = nn.Linear(input_dim, hidden_dim)\n self.layer2 = nn.Linear(hidden_dim, output_dim)\n self.dropout1 = nn.Dropout(p=0.1)", "title": "" }, { "docid": "5fbb18347344f1216ce3594943c4d68e", "score": "0.5367302", "text": "def dropout(self):\n nodes_lib = []\n for ly in self._layers:\n if ly.is_dropout():\n remaining_nodes = ly.reset_dropout()\n else:\n remaining_nodes = np.arange(ly.get_size())\n nodes_lib.append(remaining_nodes)\n return nodes_lib", "title": "" }, { "docid": "312d82aa2dccf1d64c1e13780eea69ad", "score": "0.53614163", "text": "def dropout_backward(dout, cache):\n dropout_param, mask = cache\n mode = dropout_param['mode']\n\n if mode == 'train':\n dx = dout * mask\n\n elif mode == 'test':\n dx = dout\n \n return dx", "title": "" }, { "docid": "0c5adab142e2e08babf7de3833508286", "score": "0.53486484", "text": "def add_dropout_layer(self, threshold=0.5):\n drop_layer = gl.deeplearning.layers.DropoutLayer(threshold)\n self.layers.append(drop_layer)", "title": "" }, { "docid": "6de20def447d07f6d7470a855ba7d0e7", "score": "0.5348429", "text": "def dropout_backward(dout, cache):\r\n \r\n # recover dropout parameters (p, mask, mode) from cache\r\n dropout_param, mask = cache\r\n mode = dropout_param['mode']\r\n\r\n dx = None\r\n # Back propagate (Dropout laer has not parameters just input X)\r\n \r\n if mode == 'train':\r\n # just back propagate dout from teh neurons that were used during dropout\r\n dx = dout*mask\r\n elif mode =='test':\r\n # disable droput dring prediction / test\r\n dx = dout\r\n \r\n # return dx\r\n return dx", "title": "" }, { "docid": "f3b1bd9146a34959ad438056f94ff9de", "score": "0.53451914", "text": "def lenet2_dropout(graph):\n with graph.as_default():\n is_train = tf.placeholder(tf.bool, shape=(), name='is_train')\n keep_prob = tf.placeholder(tf.float32, shape=(), name='keep_prob')\n with tf.name_scope('Input'):\n with tf.name_scope('X'):\n\n X = tf.placeholder(DTYPE, shape=[None, 32*32*3], name='X')\n X_reshaped = tf.reshape(X, shape=[-1, 32, 32, 3])\n\n with tf.name_scope('y'):\n y = tf.placeholder(tf.int32, shape=[None], name='y')\n with tf.device(_gpu_device_name(0)):\n with tf.name_scope('conv1'):\n conv1 = tf.layers.conv2d(X_reshaped,\n filters=6,\n kernel_size=5,\n strides=1,\n padding='valid',\n activation=tf.nn.relu,\n name='conv1'\n )\n max_pool1 = tf.nn.max_pool(value=conv1,\n ksize=(1, 2, 2, 1),\n strides=(1, 2, 2, 1),\n padding='VALID',\n name='max_pool1')\n max_pool1 = tf.nn.dropout(max_pool1, keep_prob)\n with tf.name_scope('conv2'):\n conv2 = tf.layers.conv2d(max_pool1,\n filters=12,\n kernel_size=3,\n strides=1,\n padding='valid',\n activation=tf.nn.relu,\n name='conv2')\n max_pool2 = tf.nn.max_pool(value=conv2,\n ksize=(1, 2, 2, 1),\n strides=(1, 2, 2, 1),\n padding='VALID',\n name='max_pool2')\n max_pool2 = tf.nn.dropout(max_pool2, keep_prob)\n with tf.name_scope('conv3'):\n conv3 = tf.layers.conv2d(max_pool2,\n filters=12,\n kernel_size=3,\n strides=1,\n padding='valid',\n activation=tf.nn.relu,\n name='conv3')\n max_pool3 = tf.nn.max_pool(value=conv3,\n ksize=(1, 2, 2, 1),\n strides=(1, 2, 2, 1),\n padding='VALID',\n name='max_pool3')\n max_pool3 = tf.nn.dropout(max_pool3, keep_prob)\n\n with tf.name_scope('fc1'):\n flatten = tf.layers.Flatten()(max_pool3)\n fc1 = nn_layer(X=flatten, n_neurons=50, name='fc1', activation=tf.nn.relu)\n fc1 = tf.nn.dropout(fc1, keep_prob)\n with tf.name_scope('logits'):\n logits = nn_layer(X=fc1, n_neurons=10, name='logits', activation=None)\n\n return X, y, is_train, logits", "title": "" }, { "docid": "02c108815e496f61dba2f7bea588e1e1", "score": "0.5337546", "text": "def create_vgg(dropout):\n # Convolutional layers\n conv_layers = [\n ConvLayer(name='conv1_1', ksize=3, stride=1, maps=64,\n padding='same', activation='relu'),\n ConvLayer(name='conv1_2', ksize=3, stride=1, maps=64, padding='same',\n pool='max2d', pool_size=2, pool_stride=2,\n pool_padding='same', activation='relu'),\n\n ConvLayer(name='conv2_1', ksize=3, stride=1, maps=128, padding='same',\n activation='relu'),\n ConvLayer(name='conv2_2', ksize=3, stride=1, maps=128, padding='same',\n pool='max2d', pool_size=2, pool_stride=2,\n pool_padding='same', activation='relu'),\n\n ConvLayer(name='conv3_1', ksize=3, stride=1, maps=256, padding='same',\n activation='relu'),\n ConvLayer(name='conv3_2', ksize=3, stride=1, maps=256, padding='same',\n activation='relu'),\n ConvLayer(name='conv3_3', ksize=3, stride=1, maps=256, padding='same',\n pool='max2d', pool_size=2, pool_stride=2,\n pool_padding='same', activation='relu'),\n\n ConvLayer(name='conv4_1', ksize=3, stride=1, maps=512, padding='same',\n activation='relu'),\n ConvLayer(name='conv4_2', ksize=3, stride=1, maps=512, padding='same',\n activation='relu'),\n ConvLayer(name='conv4_3', ksize=3, stride=1, maps=512, padding='same',\n pool='max2d', pool_size=2, pool_stride=2,\n pool_padding='same', activation='relu'),\n\n ConvLayer(name='conv5_1', ksize=3, stride=1, maps=512, padding='same',\n activation='relu'),\n ConvLayer(name='conv5_2', ksize=3, stride=1, maps=512, padding='same',\n activation='relu'),\n ConvLayer(name='conv5_3', ksize=3, stride=1, maps=512, padding='same',\n pool='max2d', pool_size=2, pool_stride=2,\n pool_padding='same', activation='relu')\n ]\n\n # Fully connected layers\n fully_layers = [\n FullyCLayer(name='fc6', hidden=4096,\n dropout=dropout, activation='relu'),\n FullyCLayer(name='fc7', hidden=4096,\n dropout=dropout, activation='relu')\n ]\n\n return CNNConfig(conv_layers=conv_layers, full_layers=fully_layers)", "title": "" }, { "docid": "d1dd54e66091dd7423d3ba6617df267d", "score": "0.53317255", "text": "def backward(self, dout):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n for layer in reversed(self.layers):\n dout = layer.backward(dout)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return", "title": "" }, { "docid": "d63706f75fd8d9cb3067772b0bbc4bbd", "score": "0.5321562", "text": "def make_dropout_graphs(dropout, time_dropout, accuracy_dropout, \n dataset_name='Mnist', time_stamp='20180520-132113', data_load = False, dir='./result_data/graphs/'):\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\n # Loading in data\n if data_load:\n dropout = pickle.load(open(\"dropout_\" + time_stamp, \"rb\"))\n accuracy_dropout = pickle.load(open(\"accuracy_dropout_\" + time_stamp, \"rb\"))\n time_dropout = pickle.load(open(\"time_dropout_\" + time_stamp, \"rb\"))\n\n fig = plt.figure()\n ax1 = fig.add_subplot(121)\n # Dropout graphs\n #horiz_accuracy_data = np.array([np.mean(accuracy_dropout[0]) for i in range(len(dropout))])\n #ax1.plot(dropout, horiz_accuracy_data, label='No Dropout')\n ax1.scatter(dropout, accuracy_dropout, label='Dropout')\n ax1.set_title (dataset_name + ' Accuracy vs. Dropout')\n ax1.set_xlabel('Dropout Layer %')\n ax1.set_ylabel('Accuracy')\n ax1.legend()\n fig.savefig(dir + timestr + dataset_name + '_dropout_accuracy.png')\n\n ax2 = fig.add_subplot(122)\n #horiz_time_data = np.array([np.mean(baseline_time) for i in range(len(dropout_dropout))])\n #plt.plot(horiz_time_data, time_list, label='No dropout')\n ax2.scatter(dropout, time_dropout, label='Dropout')\n ax2.set_title (dataset_name + ' Time vs. Dropout')\n ax2.set_xlabel('Dropout Layer %')\n ax2.set_ylabel('Time')\n ax2.legend()\n fig.savefig(dir + timestr + dataset_name + '_dropout_time.png')", "title": "" }, { "docid": "1616795245deb64f8944d8504031834e", "score": "0.53172797", "text": "def dropout(x, keep_prob):\n return tf.nn.dropout(x, keep_prob)", "title": "" }, { "docid": "a1c852a5baf8120ae83edc79c80ada03", "score": "0.5310219", "text": "def dst(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "69bd8d9c8ce4ebfb655179e9ac3e5af1", "score": "0.53088033", "text": "def dropout_create_layer(prev, n, activation, keep_prob):\n init = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n reg = tf.layers.Dropout(keep_prob)\n\n layer = tf.layers.Dense(units=n, activation=activation,\n kernel_initializer=init, kernel_regularizer=reg)\n return layer(prev)", "title": "" }, { "docid": "b7d02c26f97031c4a9d5809accdc9887", "score": "0.5296763", "text": "def dropout(self, layer):\n\t\tp=0.5\n\t\tif self.testing:\n\t\t\treturn layer*p\n\t\telse:\n\t\t\trng = np.random.RandomState(99999)\n\t\t\tsrng = theano.tensor.shared_randomstreams.RandomStreams(rng.randint(999999))\n\t\t\t# p=1-p because 1's indicate keep and p is prob of dropping\n\t\t\tmask = srng.binomial(n=1, p=1-p, size=layer.shape)\n\t\t\t# The cast is important because\n\t\t\t# int * float32 = float64 which pulls things off the gpu\n\t\t\treturn layer * T.cast(mask, theano.config.floatX)", "title": "" }, { "docid": "861b8980f4c6d041d894bb7a15bce7fa", "score": "0.52870893", "text": "def __call__(self, inputs, state, time_step, **kwargs):\n if (self._input_keep_prob < 1):\n inputs = tf.nn.dropout(inputs, self._input_keep_prob, seed=self._seed)\n output, new_state = self._cell(inputs, state, time_step, **kwargs)\n if (self._output_keep_prob < 1):\n output = tf.nn.dropout(output, self._output_keep_prob, seed=self._seed)\n return output, new_state", "title": "" }, { "docid": "4a59fbb3ddd4551cb6639b0237dc5662", "score": "0.5280344", "text": "def test_dropout():\n CNN_instance = CNN(layers_info=[[\"conv\", 25, 5, 1, \"valid\"], [\"linear\", 1]],\n hidden_activations=\"relu\", output_activation=\"sigmoid\", dropout=0.9999,\n initialiser=\"xavier\")\n assert CNN_instance.dropout_layer.rate == 0.9999\n assert not solves_simple_problem(X, y, CNN_instance)\n CNN_instance = CNN(layers_info=[[\"conv\", 25, 5, 1, \"valid\"], [\"linear\", 1]],\n hidden_activations=\"relu\", output_activation=None, dropout=0.0000001,\n initialiser=\"xavier\")\n assert CNN_instance.dropout_layer.rate == 0.0000001\n assert solves_simple_problem(X, y, CNN_instance)", "title": "" }, { "docid": "dbd91c5b2e98fc40e35a4261c870c705", "score": "0.52694005", "text": "def dropout_forward(x, dropout_param):\n\tp, mode = dropout_param['p'], dropout_param['mode']\n\tif 'seed' in dropout_param:\n\t\tnp.random.seed(dropout_param['seed'])\n\n\tmask = None\n\tout = None\n\n\tif mode == 'train':\n\t\t#######################################################################\n\t\t# TODO: Implement training phase forward pass for inverted dropout. #\n\t\t# Store the dropout mask in the mask variable. #\n\t\t#######################################################################\n\t\tprobs = np.random.rand(*x.shape)\n\t\tmask = probs < p\n\t\t\n\t\tout = mask*x\n\t\t#pass\n\t\t#######################################################################\n\t\t# END OF YOUR CODE #\n\t\t#######################################################################\n\telif mode == 'test':\n\t\t#######################################################################\n\t\t# TODO: Implement the test phase forward pass for inverted dropout. #\n\t\t#######################################################################\n\t\tout = np.copy(x)\n\t\t#pass\n\t\t#######################################################################\n\t\t# END OF YOUR CODE #\n\t\t#######################################################################\n\n\tcache = (dropout_param, mask)\n\tout = out.astype(x.dtype, copy=False)\n\n\treturn out, cache", "title": "" }, { "docid": "bc46f853b39e39a965eca2bb64e0effe", "score": "0.5264848", "text": "def set_dropout_(module):\n Dropout = (nn.Dropout, nn.Dropout2d, nn.Dropout3d)\n if isinstance(module, Dropout):\n module.training = True\n module.train()", "title": "" }, { "docid": "9ca1731c06b94a28d4d697d4f0338470", "score": "0.52593505", "text": "def __init__(self ,size ,dropout_ratio):\n super(SublayerConnection ,self).__init__()\n self.norm = LayerNorm(size)\n self.dropout = nn.Dropout(dropout_ratio)", "title": "" }, { "docid": "2e3198866a8333b0bb2e0fda07856c4e", "score": "0.5257155", "text": "def dropout_v2(circuit, rate, noise_shape=None, seed=None, name=None):", "title": "" }, { "docid": "297a08a20b1db5f9519efd226d36af10", "score": "0.52554154", "text": "def __init__(self, dropout_p, out_features=10):\n\n super(LeNet5MCDropout, self).__init__()\n self.conv = nn.Sequential( \n nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1, padding=2),\n MCDropout(dropout_p),\n nn.Tanh(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n \n nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1),\n MCDropout(dropout_p),\n nn.Tanh(),\n nn.AvgPool2d(kernel_size=2),\n \n nn.Conv2d(in_channels=16, out_channels=120, kernel_size=5, stride=1),\n MCDropout(dropout_p),\n nn.Tanh(),)\n \n self.fc = nn.Sequential(\n nn.Linear(in_features=120, out_features=84),\n MCDropout(dropout_p),\n nn.Tanh(),\n\n nn.Linear(in_features=84, out_features=out_features),\n )", "title": "" }, { "docid": "f52a0479c226deecd1bbdf3056bfb2ce", "score": "0.5248188", "text": "def drag(*args, **kwargs):\n pass", "title": "" }, { "docid": "eb462a2e70ecf0c4c90696b675530313", "score": "0.5247192", "text": "def dropout_create_layer(prev, n, activation, keep_prob):\n init = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n drop_out = tf.layers.Dropout(rate=keep_prob)\n next_layer = tf.layers.Dense(units=n,\n activation=activation,\n name='layer',\n kernel_initializer=init,\n kernel_regularizer=drop_out\n )\n\n y_pred = next_layer(prev)\n\n return y_pred", "title": "" }, { "docid": "42b226db45a422ea694bb162d818b274", "score": "0.52366596", "text": "def dropout(tensor_in, prob, name=None):\n with ops.op_scope([tensor_in], name, \"dropout\") as name:\n if isinstance(prob, float):\n prob = vs.get_variable(\"prob\", [],\n initializer=init_ops.constant_initializer(prob),\n trainable=False)\n ops.add_to_collection(DROPOUTS, prob)\n return contrib_dropout(tensor_in, keep_prob=prob)", "title": "" }, { "docid": "a794fef60c938cee1ba05cc6e1b6c0cb", "score": "0.52260077", "text": "def dropout_backward(dout, cache):\n dropout_param, mask = cache\n mode = dropout_param['mode']\n\n dx = None\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase backward pass for inverted dropout #\n #######################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n \n dx = dout * mask\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n dx = dout\n return dx", "title": "" }, { "docid": "d898b224062074a286a179e8412e4cc7", "score": "0.52202564", "text": "def shadingNode(*args, **kwargs):\n\n pass", "title": "" }, { "docid": "f97b060b22a32dcce90330f3aa0f5e0c", "score": "0.5218704", "text": "def nn_layer(self,input, input_dim, output_dim, name, act=tf.nn.leaky_relu):\n with tf.variable_scope(None,name):\n with tf.variable_scope(None,'weights'):\n w = self.weight_variable([input_dim, output_dim])\n self.variable_summaries(w)\n with tf.variable_scope(None,\"bias\"):\n b = self.bias_variable([output_dim])\n self.variable_summaries(b)\n with tf.variable_scope(None,\"act\"):\n pre_layer1 = tf.matmul(input, w) + b\n tf.summary.histogram('act', pre_layer1)\n layer1 = act(pre_layer1)\n with tf.variable_scope(None,\"drop_out1\"):\n dropOutLayer1 = tf.contrib.layers.dropout(layer1, self.keep_prob)\n return dropOutLayer1", "title": "" } ]
fea7c20246b3976198808fa8c7a94e6d
Load image from base64
[ { "docid": "194aa7db8e2d52e9e5fb4d999046ced4", "score": "0.83857167", "text": "def _load_from_base64(self, value):\n search_results = re.search(self.BASE64_REGEX, value)\n data = search_results.group(2)\n if len(data) % 4:\n data += '=' * (4 - len(data) % 4)\n data = base64.decodebytes(data.encode('ascii'))\n file = BytesIO(data)\n img = PYTHON_Image.open(file)\n return img", "title": "" } ]
[ { "docid": "f7722b5e811a9bfbac4a10b63e8fd8ed", "score": "0.80330646", "text": "def _load_image_from_base64_image(base64_image: str) -> np.ndarray:\n return ImageUtilities._pil_to_numpy(\n ImageUtilities._base64_to_pil(base64_image), _format=\"BGR\"\n )", "title": "" }, { "docid": "28d28a545c970a912f2de0a2dde6ef3c", "score": "0.77049154", "text": "def base642img(b64data):\n try:\n data = b64data.split(',', 1)[1] if b64data.startswith('data:') else b64data\n image_string = io.StringIO(base64.b64decode(data))\n image = PIL.Image.open(image_string)\n image.load()\n return image\n except Exception:\n return None", "title": "" }, { "docid": "a468bc4dd2de702a01e96bd115dae8cd", "score": "0.75463647", "text": "def base64_to_img(b64_img: str) -> Image.Image:\n return Image.open(io.BytesIO(base64.b64decode(b64_img)))", "title": "" }, { "docid": "8d0ca970e7c09853a95cc0a5bf031655", "score": "0.71680576", "text": "def _base64_to_pil(base64_image: str) -> Image.Image:\n # Select just the image information if there is more information\n if len(base64_image.split(\",\")) > 1:\n _, base64_image = base64_image.split(\",\")\n pil_image = Image.open(BytesIO(b64decode(base64_image)))\n if pil_image.mode == \"RGBA\":\n pil_image = pil_image.convert(\"RGB\")\n return pil_image", "title": "" }, { "docid": "3743d74dbfdc053f22b4ceeea7e742a0", "score": "0.7140751", "text": "def _base64_to_pil(base64_image: str) -> Image.Image:\n # Select just the image information if ther is more than one\n if len(base64_image.split(\",\")) > 1:\n _, base64_image = base64_image.split(\",\")\n pil_image = Image.open(BytesIO(b64decode(base64_image)))\n if pil_image.mode == \"RGBA\":\n pil_image.convert(\"RGB\")\n return pil_image", "title": "" }, { "docid": "087eea3241b5fcca21122325fdde9349", "score": "0.6988519", "text": "def decode_b64(self, b64string):\n return Image.open(cStringIO.StringIO(base64.b64decode(b64string)))", "title": "" }, { "docid": "2ba92556e8f1718d2d5999ffc7387e01", "score": "0.6955489", "text": "def decode_img(self, encoded_data):\n return Image.open(BytesIO(base64.b64decode(encoded_data)))", "title": "" }, { "docid": "df10d65951666f5c8f0e53895f045e05", "score": "0.6943743", "text": "def uploadedToPil(content):\n string = content.split(';base64,')[-1]\n decoded = base64.b64decode(string)\n buffer = BytesIO(decoded)\n im = Image.open(buffer)\n return im", "title": "" }, { "docid": "59f0838c21b42117dfe071520406c728", "score": "0.686301", "text": "def test_decoding(self):\n\n with open(os.getcwd() + '/Unit Tests/Assets/base64.json', 'r') as base64:\n data = base64.read()\n\n base64_img = json.loads(data)\n\n result = decode_base64(str(base64_img['base64_img']))\n\n self.assertEqual(result.format, \"PNG\")", "title": "" }, { "docid": "1cab48b13a9d80b4a8844c930476a8f6", "score": "0.68327504", "text": "def load_image_data ( image ):\n return image.load()", "title": "" }, { "docid": "58b9c1bee6c962baba94ee1f01c052c4", "score": "0.68058807", "text": "def base64_to_image(data: bytes) -> np.ndarray:\n b64_image = base64.b64decode(data)\n fd = BytesIO(b64_image)\n img = Image.open(fd)\n img_data = np.array(img).astype(\"float32\")\n\n if img_data.shape[-1] == 4:\n # We only support rgb\n img_data = img_data[:, :, :3]\n\n return img_data", "title": "" }, { "docid": "b04022caf635bc44257b00a5b50e9150", "score": "0.6797289", "text": "def load_image_file(self, path: str):", "title": "" }, { "docid": "ceef887644019cd73c3c33f32c267595", "score": "0.6786988", "text": "def base64_to_image_obj(req: dict):\n image_base64 = req.get(\"image\", None)\n if not image_base64:\n raise TorchException(\"No image\")\n encoding_regex = re.search(\n r\"^data:image(/(.*))?;base64,(.+)$\", image_base64)\n if not encoding_regex:\n raise TorchException(\"Invalid image format\")\n encoding = encoding_regex.group(3)\n image = base64.b64decode(encoding)\n return image", "title": "" }, { "docid": "ca6a922d7b5d0989bed67af4f8592ffd", "score": "0.6766832", "text": "def decode_photo(base64_text):\n return base64.b64decode(bytes(base64_text, encoding=\"utf-8\"))", "title": "" }, { "docid": "ec3ef2b4312ed34060889bcce318df7a", "score": "0.6711937", "text": "def decode_image(file):\n with open(file, \"rb\") as img:\n data = base64.b64encode(img.read())\n return data.decode('utf-8')", "title": "" }, { "docid": "e0a653fbcc9e457dd7ee9bebfe425727", "score": "0.67056656", "text": "def b64_to_PILImage(b64_string):\n b64_split = b64_string.split(\",\")[1]\n b64_bin = base64.b64decode(b64_split)\n with BytesIO(b64_bin) as b:\n pil_img = Image.open(b).copy().convert('RGB')\n return pil_img", "title": "" }, { "docid": "278ca04562e6343c2b72d775cff70f02", "score": "0.669826", "text": "def __validateBase64Image(base64Img:str)->str:\n try:\n isJpeg = imghdr.tests[0](base64.b64decode(base64Img), None) == 'jpeg'\n if not isJpeg:\n raise ImageBase64DecodeException()\n except:\n raise ImageBase64DecodeException()\n return base64Img", "title": "" }, { "docid": "d33882e605f2393cf178ce6af3d1c9a4", "score": "0.6618903", "text": "def create_image(base64_string, filename):\n img_data = base64.b64decode(base64_string)\n with open(filename, 'wb') as output_file:\n output_file.write(img_data)", "title": "" }, { "docid": "133ff4cc9ae2c4a546037334b82ca2d9", "score": "0.64916855", "text": "def load_image_bytes(fname='example.png'):\n import io\n with io.open(fname, 'rb') as image_file:\n image = image_file.read()\n return image", "title": "" }, { "docid": "3bc7125ddfb29699c91ea54f02114a82", "score": "0.64680034", "text": "def base64_to_binary_for_cv2(image_64_encoded):\n img_base64_binary = image_64_encoded.encode(\"utf-8\")\n img_binary = base64.b64decode(img_base64_binary)\n image = cv2.imdecode(np.frombuffer(img_binary, np.uint8), cv2.IMREAD_COLOR)\n return image", "title": "" }, { "docid": "8408427db2a4f887203c2e6584921b6b", "score": "0.6445457", "text": "def get_image_from_request_body(event):\n \n base64_image = event['body']\n img = Image.open(BytesIO(base64.b64decode(base64_image)))\n imdata = np.asarray(img) \n img = Image.fromarray(np.roll(imdata, 1, axis=-1))\n return img", "title": "" }, { "docid": "7db5987d56be8c0e92c1cb68726155e8", "score": "0.64037466", "text": "def load_image(self, **kwargs):\n ...", "title": "" }, { "docid": "14e54f5def3484b69b7c3599597b9eac", "score": "0.6384909", "text": "def img2base64(filename):\n with open(filename, \"rb\") as image_file:\n b64string = base64.b64encode(image_file.read())\n return b64string.decode()", "title": "" }, { "docid": "f11d85ad2502612391e8d322de9d186b", "score": "0.6369647", "text": "def create_image_from_base64(\n self, # type: Inputs\n base64_bytes, # type: str\n image_id=None, # type: typing.Optional[str]\n concepts=None, # type: typing.Optional[typing.List[str]]\n not_concepts=None, # type: typing.Optional[typing.List[str]]\n crop=None, # type: typing.Optional[BoundingBox]\n metadata=None, # type: typing.Optional[dict]\n geo=None, # type: typing.Optional[Geo]\n allow_duplicate_url=False # type: bool\n ):\n # type: (...) -> Image\n\n if crop:\n raise UserError(\n \"The `crop` argument is not used/supported by create_image_from_base64. Please remove \"\n \"it.\")\n\n image = Image(\n base64=base64_bytes,\n image_id=image_id,\n concepts=concepts,\n not_concepts=not_concepts,\n metadata=metadata,\n geo=geo,\n allow_dup_url=allow_duplicate_url)\n return self.create_image(image)", "title": "" }, { "docid": "c5a3276706d8fe35bb537454576b6c9b", "score": "0.6336592", "text": "def decode(binary):\n # create a string buffer from the string (binary data)\n strio = StringIO.StringIO(binary.data)\n # create a PIL image from that string and then convert it to a\n # numpy array\n image = Image.open(strio)\n image = np.asarray(image).copy()\n strio.close()\n return image", "title": "" }, { "docid": "f5a0da8d07279b27fa99a1348f1a9e39", "score": "0.6318242", "text": "def load_image(data_dir, image_file):\n return mpimg.imread(os.path.join(data_dir, image_file.strip()))", "title": "" }, { "docid": "635298dd7ae9ba02c2f62a9abc17fa81", "score": "0.6264914", "text": "def _load_image(image):\n return image.get_file().download()", "title": "" }, { "docid": "af0e6d64e1965de620d671b30ce99026", "score": "0.62355226", "text": "def decode_image(field):\n array = np.frombuffer(base64.b64decode(field), dtype=np.uint8)\n image_array = cv2.imdecode(array, cv2.IMREAD_ANYCOLOR) # BGR\n return image_array", "title": "" }, { "docid": "2ede5076c5a5d4d2f53d6f4a44c9609a", "score": "0.6185093", "text": "def load_image(data_dir, image_file):\n img_path = \"{0}/{1}\".format(data_dir, image_file)\n return mpimg.imread(img_path)", "title": "" }, { "docid": "356370c108ebf57618f94b22d1d2a94f", "score": "0.60786134", "text": "def _load_image_from_bytes_image(bytes_image: bytes) -> np.ndarray:\n pil_image = ImageUtilities._bytes_to_pil(bytes_image)\n pil_image.save(\"/opt/ml/model/image.jpg\")\n return ImageUtilities._pil_to_numpy(\n pil_image, _format=\"BGR\"\n )", "title": "" }, { "docid": "73c6c0f5616a5c2975dea382e70fdd47", "score": "0.6064075", "text": "def test_encoding(self):\n img_path = os.getcwd() + '/Unit Tests/Assets/test_rotate.jpg'\n img = Image.open(img_path)\n\n result = encode_base64(img)\n self.assertEqual(type(result), bytes)", "title": "" }, { "docid": "6e0bba5b7a6f82ca902b176ba1f125bc", "score": "0.604651", "text": "def load_image(img_fpath):\n return Image.open(str(img_fpath))", "title": "" }, { "docid": "d97a513342f874f19a05545912d6db61", "score": "0.603784", "text": "def load_image(self, image_id):\n\t\tinfo = self.image_info[image_id]\n\t\timg_path = info['path']\n\t\t#print(img_path)\n\t\timage = skimage.io.imread(img_path)\n\n\t\treturn image", "title": "" }, { "docid": "cf6cf50a9f44691c1b9c98a709b3835b", "score": "0.6027852", "text": "def reload_data(self):\n with open(self.filename, 'rb') as f:\n data = f.read()\n self.tag.setAttribute('src', 'data:image/{format};base64,{data}'.format(format=self.format, data=base64.b64encode(data).decode('ascii')))\n self.mark_dirty()", "title": "" }, { "docid": "c8c7f0d3b11370228efa559be2a8c7c5", "score": "0.6013168", "text": "def load_image(image_file):\n img = Image.open(image_file)\n return img", "title": "" }, { "docid": "4ad428440da4c89ed424b2f02816a188", "score": "0.60100645", "text": "def load_image(self, image_id):\n info = self.image_info[image_id]\n image_path = info['path']\n image = imageio.imread(image_path)\n image = image[:, :, :3] # discard alpha\n return image", "title": "" }, { "docid": "ca25f640565c462b1497ec67e29113b5", "score": "0.6001796", "text": "def string_to_image(img_string: str, format=\"bgr\"):\n img_string = re.sub('^data:image/[a-z]+;base64,', '', img_string)\n imgdata = base64.b64decode(img_string)\n\n image = Image.open(io.BytesIO(imgdata))\n img = np.array(image)\n if 'bgr' == format:\n return img\n elif 'rgb' == format:\n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)", "title": "" }, { "docid": "95b802cec31a1955d0c120f9237ed9b2", "score": "0.59603", "text": "def load_image(filename):\n # if a bytesio instance is passed in, use it as is.\n if isinstance(filename, BytesIO):\n return filename\n # by default loading from network is allowed for all images\n if filename.startswith((\"http://\", \"https://\")):\n f = BytesIO(urlopen(filename).read())\n else:\n with open(filename, \"rb\") as fl:\n f = BytesIO(fl.read())\n return f", "title": "" }, { "docid": "a55db12facbc747919450acf96dcbd8c", "score": "0.5945072", "text": "def decode_base64_dict(data):\n b64 = base64.b64decode(data[\"__ndarray__\"])\n array = np.copy(np.frombuffer(b64, dtype=data[\"dtype\"]))\n if len(data[\"shape\"]) > 1:\n array = array.reshape(data[\"shape\"])\n return array", "title": "" }, { "docid": "445dc0c17274e1e7fd9ff6e1e19d4213", "score": "0.5936991", "text": "def load_image(self):\n filepath = self.photo_txt.GetValue()\n img = wx.Image(filepath, wx.BITMAP_TYPE_ANY)\n \n self.image_ctrl.SetBitmap(wx.Bitmap(img))\n self.Refresh()", "title": "" }, { "docid": "b0f6b7b43ef1a92a9c5d9e16b1de4c38", "score": "0.59276813", "text": "def load_image(image_path):\n\n image = img.imread(image_path)\n\n return image", "title": "" }, { "docid": "d6c7c8f47fc52271e5ceed9061084cf3", "score": "0.5918374", "text": "def pil_to_base64(img):\n buf = BytesIO()\n img.save(buf, format=\"png\")\n img_base64 = base64.b64encode(buf.getvalue()).decode(\"ascii\")\n return img_base64", "title": "" }, { "docid": "6ee27e1866bccb097a09e688148d8367", "score": "0.5912908", "text": "def load(base64Str: str) -> 'QResult':\n byteStr = base64.b64decode(base64Str.encode())\n obj = pickle.loads(byteStr) # type: QResult\n return obj", "title": "" }, { "docid": "e1e44aa3b3575e0d45a8a7089381a62f", "score": "0.5901844", "text": "def read_img(data_dir, img_id, train_or_test, size):\n img = image.load_img(join(data_dir, train_or_test, '%s.jpg' % img_id), target_size=size)\n img = image.img_to_array(img)\n return img", "title": "" }, { "docid": "30968edbad793347d0fb5e5b6695bb97", "score": "0.5896277", "text": "def load_image_path ( path ):\n return Image.open( path )", "title": "" }, { "docid": "894b1e2e8207895b22b8ea89ce4e3728", "score": "0.58904064", "text": "def retrieve_image_data(self):\n pass", "title": "" }, { "docid": "fa09f5040ae63a03ab367f40e8fafeb6", "score": "0.5884569", "text": "def img_to_base64(img: Image.Image) -> str:\n img = Image.fromarray(img)\n with io.BytesIO() as f:\n img.save(f, format=\"jpeg\")\n return base64.b64encode(f.getvalue()).decode(\"ascii\")", "title": "" }, { "docid": "17c66c7d5fc799099af61f62c7c3c4bb", "score": "0.5879111", "text": "def load_image(image_bytes):\n transform = transforms.Compose(\n [transforms.ToTensor(), transforms.Lambda(lambda x: x.mul(255))])\n image = Image.open(io.BytesIO(image_bytes))\n return transform(image).unsqueeze(0).to(device)", "title": "" }, { "docid": "e7dda43074d33ff36ee4bf1eac8c8c20", "score": "0.5875794", "text": "def b64_str_to_np(base64_str):\r\n\t#base64_str=str(base64_str)\r\n\t#if \"base64\" in base64_str:\r\n\t#\t_, base64_str=base64_str.split(',')\r\n\r\n\tbuf=BytesIO()\r\n\tbuf.write(base64.b64decode(base64_str))\r\n\tbuf.seek(0)\r\n\tpimg=Image.open(buf).convert(\"RGBA\")\r\n\tpimg.save('kherya.png', 'png')\r\n\timg=np.array(pimg)\r\n\tprint(img)\r\n\t# Keep only 4th value in 3rd dimension (first 3 are all zeros)\r\n\treturn img[:, :, 3]", "title": "" }, { "docid": "5f3f6e6b1fa724f1f629a1c76aec83aa", "score": "0.5863296", "text": "def load_img(filepath):\n return Image.open(filepath)", "title": "" }, { "docid": "a5e7377653a60f82dca9529cf409a563", "score": "0.58521473", "text": "def b64_str_to_np(base64_str):\n base64_str = str(base64_str)\n if \"base64\" in base64_str:\n _, base64_str = base64_str.split(',')\n\n buf = BytesIO()\n buf.write(base64.b64decode(base64_str))\n buf.seek(0)\n pimg = Image.open(buf)\n img = np.array(pimg)\n\n # Keep only 4th value in 3rd dimension (first 3 are all zeros)\n return img[:, :, 3]", "title": "" }, { "docid": "aa6f571b6d391b3484a497501da9d1de", "score": "0.584215", "text": "def render_picture(data):\n render_pic =base64.b64encode(data).decode('ascii')\n return render_pic", "title": "" }, { "docid": "68ca171372a33f96aeeaa565f9ee7dbd", "score": "0.5826409", "text": "def base64tocv2(s):\n return cv2.imdecode(np.fromstring(base64.decodebytes(str.encode(s.split(',')[-1])), dtype=np.uint8), 1)", "title": "" }, { "docid": "ff1a81cf5284242f5355bf6b9fc662ad", "score": "0.58100235", "text": "def _load_image(p: bytes):\n img_arr = np.array(Image.open(BytesIO(p)))\n if len(img_arr.shape) == 3:\n img_arr = np.dstack((np.zeros(img_arr.shape[:2] + (1,)), img_arr))\n img_arr = img_arr[:, :, ::-1]\n img_arr = img_arr.astype(np.uint8).view(np.uint32)\n img_arr = img_arr.reshape(img_arr.shape[:2]).T\n return img_arr.astype(np.uint32)", "title": "" }, { "docid": "b2088fd02f1ea4e8a8c68ba6b588fdd6", "score": "0.58050895", "text": "def load_data(self, image_id):\n image_path = os.path.join(self.root, 'images', '{}2017'.format(self.split), '{}.jpg'.format(image_id))\n\n return Image.open(image_path).convert('RGB')", "title": "" }, { "docid": "bb9cd496d4d785b3da88a561fb3fa246", "score": "0.5767724", "text": "def img2base64(s):\n parser = Img2Base64Parser()\n parser.feed(s)\n parser.close()\n outtext = u''\n startpos = 0\n for img in parser.imglist:\n outtext += s[startpos:img[1][0]]\n if img[0][1] == 'svg':\n outtext += \"data:image/%s+xml;base64,%s\"% \\\n (img[0][1],str(img[0][0]).lstrip('b\\'').rstrip('\\''))\n elif img[0][1] == 'pdf':\n outtext += \"data:application/%s;base64,%s\"% \\\n (img[0][1],str(img[0][0]).lstrip('b\\'').rstrip('\\''))\n else:\n outtext += \"data:image/%s;base64,%s\"% \\\n (img[0][1],str(img[0][0]).lstrip('b\\'').rstrip('\\''))\n startpos = img[1][1] if len(img)==3 else -1\n outtext += s[startpos:] if startpos != -1 else ''\n return outtext", "title": "" }, { "docid": "3aebdee00b56465c20672a431e003448", "score": "0.5761761", "text": "def load(cls, path_to_file):\n import mimetypes\n mimetypes.init()\n mime = mimetypes.guess_type('file://%s' % path_to_file)[0]\n img_type = ImageTypeEnum.lookup_by_mime_type(mime)\n with open(path_to_file, 'rb') as f:\n data = f.read()\n return Image(data, image_type=img_type)", "title": "" }, { "docid": "3b381a65b08f7edfbf09deea65cff7a4", "score": "0.5752236", "text": "def load_image(self, image_id):\n # Load image\n print(\"loading image: \",image_id)\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "title": "" }, { "docid": "c90b3feb9109b77481d39509a975797e", "score": "0.5750719", "text": "def _pil_to_base64(pil_image: Image.Image) -> str:\n _buffer = BytesIO()\n if pil_image.mode != \"RGBA\":\n pil_image.save(_buffer, format=\"JPEG\")\n else:\n pil_image.save(_buffer, format=\"PNG\")\n img_str = b64encode(_buffer.getvalue()).decode(\"utf-8\")\n return img_str", "title": "" }, { "docid": "7f403c571da2e7f7360ab2a75da579fc", "score": "0.57461995", "text": "def __from_base64_string(raw_string):\n # type: (str) -> VirgilBuffer\n return VirgilBuffer(base64.b64decode(raw_string))", "title": "" }, { "docid": "3387dc0c7077d3fbefae0975eb926b4c", "score": "0.57117623", "text": "def bytesToImage(imgBytes):\n imgBytesIO = io.BytesIO(imgBytes)\n curImg = Image.open(imgBytesIO)\n return curImg", "title": "" }, { "docid": "b6bfe24a796988665bae0aed6844b590", "score": "0.5705712", "text": "def set_data_file_from_base64(self, content_base64):\n try:\n raw = base64.standard_b64decode(str(content_base64))\n except (TypeError, binascii.Error):\n raise Exception('The provided file to be signed is not '\n 'base64-encoded')\n self.set_signature_file_from_raw(raw)", "title": "" }, { "docid": "a0aaf893b8d44a197c7b43476cd92236", "score": "0.56949294", "text": "def decodeImgString(imgStr: str, dimensions: List) -> np.array:\n buffer = base64.b64decode(imgStr)\n img = np.frombuffer(buffer, dtype= np.uint8).reshape(dimensions)\n return img", "title": "" }, { "docid": "b919a891fb17658384ead443bfaa028d", "score": "0.56933725", "text": "def Base64Decode(jsonDump):", "title": "" }, { "docid": "1961db814208da5d509a755ddafbfdc9", "score": "0.5684188", "text": "def load_img(path):\n if path is None or not is_image(path):\n assert path, '%s is none or is not an image'\n return Image.open(path).convert('RGB')", "title": "" }, { "docid": "0727d080933c1815b9ceab09121b5080", "score": "0.5677947", "text": "def _deserialize64(self, data64):\n return pickle.loads(base64.decodestring(data64))", "title": "" }, { "docid": "f61a6b80f4b890c3e2ca20b250c20eea", "score": "0.56614906", "text": "def encode_base64(file_path):\n\n with open(file_path, \"rb\") as image_file:\n base64_string = base64.b64encode(image_file.read())\n return base64_string", "title": "" }, { "docid": "65decdff8b1ff179ba9896c38086fb31", "score": "0.56531733", "text": "def raw_image(self):\n pass", "title": "" }, { "docid": "9cfcc3771535fa3a66e848ccdc6f0ba0", "score": "0.5652095", "text": "def from_base64(cls: Type[FileBox], base64: bytes, name: str = 'base64.dat') -> FileBox:\n options = FileBoxOptionsBase64(base64=base64, name=name)\n return FileBox(options)", "title": "" }, { "docid": "c1e0f113e1f0eb431f8e9a783f54beca", "score": "0.5649604", "text": "def load_image(filename):\n image = io.imread(os.path.join('images//', filename))\n return image", "title": "" }, { "docid": "9df6d441851a5212776a4fba49d9a85f", "score": "0.5646943", "text": "def encode_image(image_path):\n encoded = None\n with open(image_path, 'rb') as imgfile:\n encoded = base64.b64encode(imgfile.read())\n return encoded.decode('UTF-8')", "title": "" }, { "docid": "d8da3026cecc1d61bc70eda06485cc1a", "score": "0.56376225", "text": "def data_uri_to_cv2_img(uri):\n encoded_data = uri.split(',')[1]\n nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n return img", "title": "" }, { "docid": "ca7b33eb1c6c18a088c44b66975c852d", "score": "0.56345695", "text": "def _load_from_network(url):\n data = urlopen(url).read()\n if isinstance(data, str):\n data = data.encode('ascii')\n file = BytesIO(data)\n img = PYTHON_Image.open(file)\n return img", "title": "" }, { "docid": "370592c6891fe12de92e9149319dfa8f", "score": "0.56188786", "text": "def from_base64(cls, s, subformat='json', encoding='utf-8', **kwargs):\n kwargs['subformat'] = subformat\n kwargs['encoding'] = encoding\n return cls(s, format='base64', **kwargs)", "title": "" }, { "docid": "bdef57109c8dc755b41973c62eed04fc", "score": "0.5606238", "text": "def load_image(img_path):\n #return img_path\n im = Image.open(img_path).convert('RGB') #as im:\n #print(im.size)\n #s()\n imArr = np.fromstring(im.tobytes(), dtype=np.uint8)\n imArr = imArr.reshape((im.size[1], im.size[0], 3))\n return imArr", "title": "" }, { "docid": "31985cea5fb66288fc89c0a3aaa48b38", "score": "0.55999845", "text": "def get_image_of_element(wrapper):\n image = wrapper.capture_as_image()\n img = image\n im_file = BytesIO()\n img.save(im_file, format=\"JPEG\")\n # img.show()\n im_bytes = im_file.getvalue() # im_bytes: image in binary format.\n im_b64 = base64.b64encode(im_bytes)\n return im_b64", "title": "" }, { "docid": "64f916789995085b562662b1ed0ab6f8", "score": "0.5597849", "text": "def load_image(resource, image_data):\n if image_data:\n image, _ = LearningResourceImage.objects.get_or_create(**image_data)\n\n resource.image = image\n else:\n resource.image = None\n image = None\n resource.save()\n return image", "title": "" }, { "docid": "4cc6fd445560960384131f94380f631c", "score": "0.55964845", "text": "def load_image(self, image_id):\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "title": "" }, { "docid": "4cc6fd445560960384131f94380f631c", "score": "0.55964845", "text": "def load_image(self, image_id):\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "title": "" }, { "docid": "a7420bb7c9ea61fbeca2d27834fdba80", "score": "0.55889773", "text": "def load_image(self, image_id):\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n\n # If 16bit, convert to 8bit\n if image.dtype=='uint16': \n \t image = self.map_uint16_to_uint8(image, lower_bound=None, upper_bound=None)\n\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "title": "" }, { "docid": "95f901e4f12dc3bbc7bf9cc6e721879b", "score": "0.55854046", "text": "def decode_encoded_compressed_shot(arg: str) -> Image:\n shot_enc = StringCompressor.decompress(bytes.fromhex(arg))\n msg = base64.b64decode(shot_enc)\n buf = BytesIO(msg)\n img = Image.open(buf)\n return img", "title": "" }, { "docid": "87e584aeb61baa5373bac99bf1a3adb0", "score": "0.55837005", "text": "def test_load_image(self):\n assert load_image(TEST_IMAGE_FILE) is not None", "title": "" }, { "docid": "e065c10fd492c713e460fc0bd2abfbdb", "score": "0.55812633", "text": "def save_image(self, obj):\n img_path = self.file_op.get_path(obj['name'])\n with open(img_path, 'wb') as file:\n file.write(base64.b64decode(obj['data']))\n file.close()\n return img_path", "title": "" }, { "docid": "00d119bdfb12709629e6084d44d60983", "score": "0.55809706", "text": "def encodeFromRaw(contents, name, filename, path, public_url, b64=False):\n if not isinstance(contents, (bytes, str)):\n return None\n # ====================================================================#\n # Detect Base64 Images\n if b64 is True:\n image = base64.b64decode(contents)\n else:\n image = contents\n # ====================================================================#\n # Detect Image Dimensions\n dims = ImagesHelper.get_pil_dims(image)\n # ====================================================================#\n # Build Splash Image Data\n return {\n \"name\": name,\n \"filename\": filename,\n \"md5\": ImagesHelper.md5(contents, b64),\n \"path\": path,\n \"size\": len(image),\n \"url\": public_url,\n \"width\": dims[0],\n \"height\": dims[1],\n }", "title": "" }, { "docid": "da1b60a4c8f1433201730ccca55dd281", "score": "0.557907", "text": "def load(path):\n img = plt.imread(path)\n dimensions = f\"{img.shape[0]} x {img.shape[1]}\"\n print(f\"Loaded image at {path} of dimensions {dimensions}\")\n return img", "title": "" }, { "docid": "cc1afb7bd005317a93c4c24ae4c2de22", "score": "0.5573601", "text": "def load_raw_file_to_image(filename):\n with Raw(filename=filename) as raw:\n raw.options.rotation = 0\n w = raw.data.contents.sizes.width\n h = raw.data.contents.sizes.height\n buffered_image = np.array(raw.to_buffer())\n image = Image.frombytes('RGB', (w, h), buffered_image)\n return image", "title": "" }, { "docid": "e0047132855a74315e39d6ed2ca58df1", "score": "0.55721366", "text": "def load_image_url ( url ):\n imagem_remota = requests.get( url )\n return Image.open( BytesIO( imagem_remota.content ) )", "title": "" }, { "docid": "6d90a682f66db32cc066a3e043fc2690", "score": "0.55721223", "text": "def PILImage_to_b64(pil_img):\n with BytesIO() as b:\n pil_img.save(b, format='png')\n b.seek(0)\n img_base64 = base64.b64encode(b.read()).decode('utf-8')\n img_base64 = 'data:image/png;base64,' + img_base64\n return img_base64", "title": "" }, { "docid": "acceb979746b7a1fa007cc3757b83a71", "score": "0.55655575", "text": "def load_image(path, xmin, xmax, ymin, ymax, show=True):\n img_array = plt.imread(path)\n\n img = ImageData(img_array)\n img.set_dimensions(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)\n if show:\n img.show('Raw Data', figsize=(6, 6/img.aspect))\n\n return img", "title": "" }, { "docid": "d542f9eeef16189829efc2dfb172aa87", "score": "0.5565333", "text": "def find_element_by_image(self, img_path):\n with open(img_path, 'rb') as i_file:\n b64_data = base64.b64encode(i_file.read()).decode('UTF-8')\n\n return self.find_element(by=MobileBy.IMAGE, value=b64_data)", "title": "" }, { "docid": "7bfe9aaa6caa9e70787da01a3f455e0b", "score": "0.55645835", "text": "def loadImage(name, size=0):\n path = join(PACKAGE_HOME, 'input', name)\n fd = open(path, 'rb')\n data = fd.read()\n fd.close()\n return data", "title": "" }, { "docid": "065280a0f1c2fa38b1beeca0b506b86d", "score": "0.5558007", "text": "def load_image(data):\n image_array = np.fromstring(data, np.uint8)\n image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n return gray_image", "title": "" }, { "docid": "b60b3578f75f42423ef680775fe354f5", "score": "0.55515325", "text": "def get_thumbnail(self, b64string):\n img = self.decode_b64(b64string)\n size = (100, 100)\n img = ImageOps.fit(img, size, Image.ANTIALIAS)\n return self.encode_b64(img)", "title": "" }, { "docid": "d4234e32533b466570264dca23c7660e", "score": "0.55490226", "text": "def conv_img_to_b64(self, image_url):\r\n \r\n returned_object = requests.get(image_url, stream=True)\r\n filename = image_url.split(\"/\")[-1]\r\n result = \"\" \r\n\r\n if returned_object.status_code == 200: \r\n returned_object.raw.decode_content = True\r\n\r\n with open(filename, 'wb') as f: \r\n shutil.copyfileobj(returned_object.raw, f) \r\n f.close() \r\n\r\n with open(filename, \"rb\") as f: \r\n result = base64.b64encode(f.read()).decode('utf-8') \r\n f.close() \r\n\r\n else: \r\n print('Image Couldn\\'t be retrieved')\r\n\r\n return result", "title": "" }, { "docid": "9b04c2fc55f236d0b531db92592615fa", "score": "0.5548839", "text": "async def get_captcha_base64_image(url):\n with start_firefox_driver() as driver:\n driver.get(url)\n\n wait = WebDriverWait(driver, WAITING_TIME)\n\n image_element = wait.until(\n SearchCaptchaDataInElement(\n (By.ID, CAPTCHA_ELEMENT_ID),\n REGEX_SEARCH_CAPTCHA\n )\n )\n\n _ , base64_img = image_element.string.split(',')\n\n\n await asyncio.sleep(0)\n\n return base64_img", "title": "" }, { "docid": "1037bed933585e5837aa21fcf3b0177c", "score": "0.5543789", "text": "def base64_decode(data):\n return b64decode(data.replace('-', '+').replace('_', '/'))", "title": "" }, { "docid": "d6e32c00a4d7cb59c2e0932f6cb6f5fd", "score": "0.5534618", "text": "def image_to_base64(image: np.ndarray) -> bytes:\n # Make the image the correct format\n image = image.astype(\"uint8\")\n pil_image = Image.fromarray(image)\n fd = BytesIO()\n # Save the image as PNG\n pil_image.save(fd, format=\"PNG\")\n return base64.b64encode(fd.getvalue())", "title": "" }, { "docid": "02dd5b3509772e2dce7dbe3adac28165", "score": "0.5531213", "text": "def decode(cls, encoded):\n decoded = base64.b64decode(encoded)\n return decoded", "title": "" }, { "docid": "4b9059d0eba41a895aa89ef9b89eb882", "score": "0.5525381", "text": "def byte_to_image(inp):\r\n MAX_WIDTH = 400\r\n \r\n _bytes = base64.b64decode(inp)\r\n nparr = np.fromstring(_bytes, dtype=np.uint8)\r\n img = cv2.imdecode(nparr, 1)\r\n \r\n if (img.shape[1] > MAX_WIDTH): \r\n ratio = float(MAX_WIDTH) / img.shape[1]\r\n dim = (MAX_WIDTH, int(img.shape[0] * ratio))\r\n img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\r\n \r\n return img", "title": "" }, { "docid": "7549b6f03f404f3587cac8104fa31101", "score": "0.55230165", "text": "def load_image_file(self, sample: CityscapesSample) -> Image:\n\t\tinput_file = glob(os.sep.join([self.input_dir, sample.city, sample.id + \"_leftImg8bit.png\"]))\n\t\tassert len(input_file) == 1, \\\n\t\t\tf'Either no image or multiple images found for the ID {sample.id}: {input_file}'\n\n\t\treturn Image.open(input_file[0])", "title": "" } ]
bed5dcbe4383c9988164b8bcee8f4ae3
Returns True if the given string is in Google Music id form.
[ { "docid": "b19cfb950577f0622b596c172db40f15", "score": "0.7482086", "text": "def is_gm_id(s):\r\n return re.match(gm_id_regex, s) is not None", "title": "" } ]
[ { "docid": "12e69c27f1ba64c6839ef55fd6a0a90a", "score": "0.65633464", "text": "def stringcheck_id(self, string):\n string = str(string) # because of 2.7 input problem\n if re.match(\"^[a-zA-Z_][a-zA-Z0-9_]*$\", string) != None:\n return True\n else:\n return False", "title": "" }, { "docid": "a63b169e654e099270475b1ba2747065", "score": "0.6351389", "text": "def is_valid_domain_id(id_str: str) -> bool:\n return re.match('([0-9][a-zA-Z0-9]{3})([a-zA-Z0-9])([0-9]{2})$', id_str)", "title": "" }, { "docid": "d4f34533fc9ea30755afc9dfb78c3954", "score": "0.63081807", "text": "def is_valid_identifier(string):\n return bool(_VALID_IDENTIFIER_RE.search(string))", "title": "" }, { "docid": "2cd3a04be89e7f75b4e6c9be06d98b2b", "score": "0.6273932", "text": "def _is_avid(query_string):\n return query_string.isdigit() and (7 <= len(query_string) <= 10)", "title": "" }, { "docid": "83c236be2aa36de92e8beb83f7a7a048", "score": "0.62563276", "text": "def is_song(d):\r\n #Not really precise, but should be good enough.\r\n return is_gm_id(d[\"id\"])", "title": "" }, { "docid": "7854ca313b948a4134c45a96d8375d72", "score": "0.6221448", "text": "def is_gr_id_token(token: str) -> bool:\r\n return bool(_GR_ID_TOKEN_RE.fullmatch(token))", "title": "" }, { "docid": "5aab38c02f9104bb7775b7448e3da9d6", "score": "0.6131062", "text": "def isidentifier(string):\n return _IDENTIFIERS_RE.match(string) is not None", "title": "" }, { "docid": "a64ef3883c252945cef978710d8dc30e", "score": "0.6108348", "text": "def valid_cve_id_format(cve_id: str) -> bool:\n return bool(re.match(cveRegex, cve_id))", "title": "" }, { "docid": "e3a44f444c5ae4d90a0f0c6b9769585b", "score": "0.6086889", "text": "def check_user_id(string):\n if not string:\n return False\n\n idsplit = string.split(\"-\")\n if len(idsplit) != 2:\n return False\n uid = idsplit[0]\n code = idsplit[1]\n\n if len(code) == 2:\n # Type2 code\n verify = encode_type2_user_id(int(uid))\n else:\n # Type1 code\n verify = encode_type1_user_id(int(uid))\n\n if string == verify:\n return True\n else:\n return False", "title": "" }, { "docid": "752004c24c004fb1a5fc16dacee4227a", "score": "0.603233", "text": "def is_id(self, p_id):\n if isinstance(p_id, str):\n if p_id[0].isalpha():\n return True\n return False", "title": "" }, { "docid": "32342e8f795744dcb11146198a805244", "score": "0.5829441", "text": "def isidentifier(string):\n return string.isidentifier()", "title": "" }, { "docid": "4f096b4f633cf9c8bbb16de8c5d0b864", "score": "0.5779153", "text": "def is_random_string_identifier(sid):\n sid_pattern = app.URLSAFE_CHAR * 12\n return apf.full_match(sid_pattern, sid)", "title": "" }, { "docid": "ab8510d53a10f641b93921cbddd0f087", "score": "0.5743334", "text": "def isIdentifier(id):\n for c in bytearray(id):\n if c < 32:\n return False\n if c == 255:\n return False\n return True", "title": "" }, { "docid": "52d17ec03d5a649561dc975e5bdf0ddc", "score": "0.5599504", "text": "def isvalid_identifier(s):\n # the re module compiles and caches regexs so no need to compile it\n return (s is not None and not iskeyword(s) and\n re.match(r'^[_a-zA-Z][_a-zA-Z0-9]*$', s) is not None)", "title": "" }, { "docid": "129ecdc58603148f9a32e549544048aa", "score": "0.55298334", "text": "def is_new_id(_id):\n return isinstance(_id, text_type) and _id[0] == '_'", "title": "" }, { "docid": "5747eaf1b4b7c63514acfacdf2791936", "score": "0.5509691", "text": "def is_guid(guid: str) -> bool:\n return bool(guid_regex.fullmatch(guid)) # identify if argument in an AliEn GUID", "title": "" }, { "docid": "6366f8951c76dc0b1c23bbbbc6cdfa52", "score": "0.54896325", "text": "def flag_music(_, flag):\n return OPTIONS['music_id'] == flag.value", "title": "" }, { "docid": "0d4dfe64d58dfb509b9d52849ec5b2ea", "score": "0.5485219", "text": "def identifier_is_valid(identifier):\n if not isinstance(identifier, basestring):\n raise ValueError('identifier must be a string')\n for c in identifier:\n if c not in string.letters + string.digits + '_':\n return False\n return True", "title": "" }, { "docid": "d7199e274f1041abde11fc92e8616f3d", "score": "0.5474346", "text": "def check_instance_id(self, id):\n regex_expression = re.compile(\"(^i-(\\w{8}|\\w{17})$)|(^mi-\\w{17}$)\")\n if regex_expression.match(id):\n return True\n else:\n return False", "title": "" }, { "docid": "a00e087ecf7f7064ec1d08e80c5b1848", "score": "0.5426727", "text": "def is_valid_nucleotide_string(s):\n s = s.upper().replace('U', 'T')\n return all([_ in DEGENERATE_NUCLEOTIDES for _ in s])", "title": "" }, { "docid": "53a136d44ce50bcc5b2309553e765dd8", "score": "0.54256797", "text": "def is_isogram(string):\n\n normalized_str = string.replace(SPACE, '').replace(HYPHEN, '').lower()\n\n return len(normalized_str) == len(set(normalized_str))", "title": "" }, { "docid": "e2682294b811fdce4f05a787669a964e", "score": "0.5397229", "text": "def cookie_is_encoded(data):\n return bool(data.startswith(tob('!')) and tob('?') in data)", "title": "" }, { "docid": "acef9b84eb6689fffda086060820bfee", "score": "0.5393329", "text": "def is_uuid_string(string: str) -> bool:\n if string is None:\n return False\n if len(string) != 36:\n return False\n UUID_PATTERN = re.compile(r'^[\\da-f]{8}-([\\da-f]{4}-){3}[\\da-f]{12}$', re.IGNORECASE)\n if UUID_PATTERN.match(string):\n return True\n else:\n return False", "title": "" }, { "docid": "72e84cf73f67cd5b4bdaf354ebfb8e49", "score": "0.5380964", "text": "def check_id(cls, _id):\n if len(str(_id)) != 9:\n return False\n sum = 0\n factor = 0\n for i in _id:\n tmp = int(i) * (factor + 1)\n factor = (factor + 1) % 2\n dig = 1 if tmp > 9 else 0\n sum += tmp % 10 + dig\n return False if sum % 10 != 0 else True", "title": "" }, { "docid": "be3b1593cdd37a529a14cdea5c6f95e5", "score": "0.5380194", "text": "def is_phone(string: Text) -> bool:\n try:\n value = int(string)\n regex = \"\\w{10}\"\n if re.search(regex,string):\n return True\n except :\n return False", "title": "" }, { "docid": "9c0bc1e363f2db4d7034947da5bc6181", "score": "0.5348842", "text": "def machine_id(machine_id):\n if not isinstance(machine_id, str):\n raise TypeError('machine_id must be a string.')\n if len(machine_id) != 64:\n raise ValueError('machine_id must be exactly 64 bytes/characters.')\n for letter in machine_id:\n if letter not in '0123456789abcdef':\n raise ValueError('machine_id must be only 0-9, a-f (lowercase)')\n return True", "title": "" }, { "docid": "1b4a5130d7360cbe0fa33117a5d3e7cd", "score": "0.5344987", "text": "def is_channel(string):\n return string and string[0] in \"#&+!\"", "title": "" }, { "docid": "c951f6872df90c653aebbec7ce8832d7", "score": "0.5336667", "text": "def is_synapse_id(cls, value):\n if isinstance(value, str):\n return re.match('^syn[0-9]+$', value.strip(), re.IGNORECASE) is not None\n else:\n return False", "title": "" }, { "docid": "32dddd3722d3b10e360738bb87cedf08", "score": "0.5332616", "text": "def is_valid(self, id_string):\n pass", "title": "" }, { "docid": "d7503690b200377eaefa1b895b99105c", "score": "0.53296936", "text": "def string_conforms_to_base64(string):\n return (len(string) % 4 == 0) and re.match('^[A-Za-z0-9+/]+[=]{0,2}$', string)", "title": "" }, { "docid": "21c46dabb0433082e67bde63c8e8f328", "score": "0.53152794", "text": "def parse_id(data: str) -> int:\r\n\r\n if type(data) == int:\r\n return data\r\n\r\n try:\r\n data = data.strip()\r\n except:\r\n return False\r\n\r\n if data.isnumeric() and len(data) == 18:\r\n # User ID is given\r\n user_id = int(data)\r\n elif data[0:3] == \"<@!\" or data[0:3] == \"<@&\":\r\n user_id = int(data[3:-1])\r\n elif data[0:2] == \"<@\" or data[0:2] == \"<#\":\r\n user_id = int(data[2:-1])\r\n else:\r\n return False\r\n\r\n return user_id", "title": "" }, { "docid": "ea191984b4fb2903d83a5e38a27c36c5", "score": "0.5310239", "text": "def check_for_valid_ADRC_ID( string_to_check):\n\tm = adrc_pat_one.match(string_to_check)\n\tm_second_pat = adrc_pat_two.match(string_to_check)\n\tm_third_pat = adrc_pat_three.match(string_to_check)\n\tif m:\n\t\tpatient_id = m.group(1)\n\t\tsection_id = m.group(2)\n\t\tstain = m.group(3)\n#\t\tprint patient_id,section_id,stain\n\t\treturn(True)\n\telif m_second_pat:\n\t\tpatient_id = m_second_pat.group(1)\n\t\tsection_id = m_second_pat.group(2)\n\t\tstain = m_second_pat.group(3)\n#\t\tprint patient_id,section_id,stain\n\t\treturn(True)\n\telif m_third_pat:\n\t\tpatient_id = m_third_pat.group(1)\n\t\tsection_id = m_third_pat.group(2)\n\t\tstain = m_third_pat.group(3)\n\telse:\n\t\tprint \"no match\",string_to_check\n\t\treturn(False)", "title": "" }, { "docid": "d00a24751b8207b521ce8dcd283a53bb", "score": "0.53043175", "text": "def is_zuuid(s):\n return re_zuuid.match(s)", "title": "" }, { "docid": "efdb34d60814ca705d7bf9a0fb357f18", "score": "0.52956474", "text": "def validate_google_user_id(value):\n if value and not re.match(r'^\\d{21}$', value):\n raise ValidationError(_('Google user ID should be 21 digits.'))", "title": "" }, { "docid": "8fe01f805d1a5752570685462a25336b", "score": "0.52877545", "text": "def is_quantifier(string: str) -> bool:\n return string == 'A' or string == 'E'", "title": "" }, { "docid": "7d5131694276a3fad6bb0e7f5e57d3d3", "score": "0.52463144", "text": "def _is_lei(query_string):\n return (query_string.isalnum() and \n len(query_string) == 20 and\n query_string[-2:].isdigit())", "title": "" }, { "docid": "5151b9eed26734153e3dc7624199c100", "score": "0.52236056", "text": "def is_jid(jid):\n ret = False\n jid = six.text_type(jid)\n if len(jid) > 21 and jid[20] == '_':\n try:\n jid, pid = jid.split('_', 1)\n ret = bool(int(jid) and int(pid)) # Pid cannot be 0.\n except ValueError:\n pass\n\n return ret", "title": "" }, { "docid": "18873101627b5bb169a76435be9cfdb2", "score": "0.52035564", "text": "def check_passport_id(val):\n return len(val) == 9 and val.isdigit()", "title": "" }, { "docid": "66bb1a4e692acd618469a8d2e4ecfd10", "score": "0.5192838", "text": "def includes_harlequin(string):\n return re.search('harlequin', string.lower())", "title": "" }, { "docid": "afa9287ca9983170bc0df5de91d8fe58", "score": "0.51866555", "text": "def check_identifier(self, id_, type_, log):\n if id_.startswith(\"$\"):\n id_ = id_[1:]\n\n if id_ in self.identifiers:\n that_type = self.identifiers[id_].type_\n if that_type == type_:\n log.debug(\"%s used known experimenter id: %s\",\n type_, id_)\n return True\n log.warning(\"experimenter id found with wrong type,\"\n \" expected %s found %s\", type_, that_type)\n else:\n maybes = [k for k, v in viewitems(self.identifiers)\n if v.type_ == type_]\n maybes = difflib.get_close_matches(id_, maybes)\n maybes = \" or \".join(maybes)\n if maybes:\n log.warning(\"Experimental %s id %s not found\"\n \" - did you mean: %s?\", type_, id_, maybes)\n else:\n log.warning(\"Experimental %s id %s not found\", type_, id_)\n return False", "title": "" }, { "docid": "4ecc12bc6a15efcb6b820a1f1ed4251c", "score": "0.518605", "text": "def is_interstitial(self):\n from re import match\n return match(\"[A-Z][a-z]?_interstitial_\\S+\", self.name) is not None", "title": "" }, { "docid": "7e92608a8c4ba925658a7fffcd224cf5", "score": "0.51841617", "text": "def is_god_name(word: str):\n return word.startswith(\"{d}\") and word.count('-') <= 1", "title": "" }, { "docid": "8a4fef17fe6cf00491945435bba2e30a", "score": "0.51743084", "text": "def test_has_valid_id_with_not_alphanumeric_chars(self):\n raw_id_data = 'N#%6'\n is_valid = self.validator.has_valid_id(raw_id_data)\n self.assertFalse(is_valid)", "title": "" }, { "docid": "29c2e40adf5aee3f00c44c4bc45e4265", "score": "0.5129163", "text": "def valid_account_id(account_id):\n valid = True\n\n # Valid length\n valid = valid and len(account_id) == 64\n\n # Valid characters\n valid_chars = '_ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'\n tmp_account_id = str(account_id.upper())\n for valid_char in valid_chars:\n tmp_account_id = tmp_account_id.replace(valid_char, '')\n valid = valid and len(tmp_account_id) == 0\n\n return valid", "title": "" }, { "docid": "02ed295e9a83edb7b56e00c0ff108d4b", "score": "0.5123445", "text": "def verify_string(self, string):\n regexp = re.search('^\\$(GP[A-Z]{3}.+)\\*([0-9A-F]{2})', string)\n if not regexp:\n return False\n checksum = int(regexp.group(2), 16)\n int_values = [ord(x) for x in regexp.group(1)]\n value = 0\n for x in int_values:\n value = value ^ x\n return True if value == checksum else False", "title": "" }, { "docid": "c1d445ed26fdab95980752c36222c50d", "score": "0.51167834", "text": "def is_valid_display_identifier(display_identifier):\n return bool(DISPLAY_REGEX.match(display_identifier))", "title": "" }, { "docid": "25a84c7a91316becf9beb0592cee8cdb", "score": "0.51139414", "text": "def check_id(id_: str) -> None:\n if id_ == \"\" or all_base62.search(id_) is None:\n raise ConversionError(f\"Invalid id: {id_!r}!\")", "title": "" }, { "docid": "f4b1d8f198bdbffe00faa45ba1056a33", "score": "0.51048166", "text": "def valid_identifier(s):\n if isinstance(s, _strtypes):\n if not s or s[0].isdigit():\n return\n return s.replace(' ', '_').replace('.', '_').replace('-', '_')\n return s", "title": "" }, { "docid": "555d472770c1fc677cfeb82910ab91ea", "score": "0.5104736", "text": "def valid_reference_id(ref):\n \n # ref code must match a prefix\n if not re.match(\"OMIM|ORPH|DECI|DISEASE|PMID\", ref):\n return False\n # ref must have only two components\n parts = ref.split(\":\")\n if len(parts)>2:\n return False\n # each component must be non-empty\n if min([len(_) for _ in parts])<1:\n return False\n \n return True", "title": "" }, { "docid": "7c952bcf496dbbdbe4c5e9127da2a2bd", "score": "0.51022357", "text": "def isValid(text):\n return all(word in text for word in [\"微信\", \"二维码\"])", "title": "" }, { "docid": "71ac50aaf21f940727c41513dfe3d772", "score": "0.5087304", "text": "def is_valid_cve_id(cve_id) -> bool:\n return cve_pattern.match(cve_id) is not None", "title": "" }, { "docid": "32cb6a759db3a97e87ac882552da8d12", "score": "0.5082401", "text": "def validate_oid(oid):\n find_illegal = re.search('[^0-9a-fA-F]+', oid)\n if find_illegal:\n return False\n else:\n if len(oid) >= 40 and len(oid) <= 128:\n return True\n else:\n return False", "title": "" }, { "docid": "e622581f5ac6e0841d2c4ce769446660", "score": "0.5080688", "text": "def storage_name_valid(name: str) -> bool:\n if re.match('[a-z0-9]{3,24}$', name):\n return True\n return False", "title": "" }, { "docid": "2baf66a5ac2caf58d5a43bd4dac58735", "score": "0.5074452", "text": "def is_emoji_name_like(self, string):\n return re.search(r\":[A-Za-z\\-_]+:\", string) is not None", "title": "" }, { "docid": "b88402183e09da2528d2c7e5a50ca57e", "score": "0.5070839", "text": "def header_quopri_check(c):\r\n return bool(hqre.match(c))", "title": "" }, { "docid": "3905d69dd77a0a6d15e9d5252ea3c533", "score": "0.5065215", "text": "def isIdentChar(c: 'char const') -> \"SbBool\":\n return _coin.SbName_isIdentChar(c)", "title": "" }, { "docid": "d69e1627af922e0513ee4dd1ecb0f421", "score": "0.5058171", "text": "def safeId(id, nospaces=0):\r\n lowercase = 'abcdefghijklmnopqrstuvwxyz'\r\n digits = '0123456789'\r\n specials = '_-.'\r\n allowed = lowercase + lowercase.upper() + digits + specials\r\n if not nospaces:\r\n allowed = ' ' + allowed\r\n n_id=[]\r\n allowed_list = list(allowed)\r\n for letter in list(id):\r\n if letter in allowed_list:\r\n n_id.append(letter)\r\n return ''.join(n_id)", "title": "" }, { "docid": "c6d8c072bc1b143af7c91c7713fbf2bc", "score": "0.50554454", "text": "def is_unique2(self, string: str) -> bool:\n total_chars = 256 # 128 ASCII, 256 for UNICODE\n buffer = [0 for i in range(total_chars)]\n for char in string:\n if buffer[ord(char)] == 0:\n buffer[ord(char)] = 1\n elif buffer[ord(char)] == 1:\n return False\n return True", "title": "" }, { "docid": "08976b5712e1fde777a6346622a0aa19", "score": "0.5037239", "text": "def test_has_valid_id_too_short(self):\n raw_id_data = '605'\n is_valid = self.validator.has_valid_id(raw_id_data)\n self.assertFalse(is_valid)", "title": "" }, { "docid": "3f86cd7c879432b723d36d9e7cc277a9", "score": "0.5025728", "text": "def is_non_empty_string(input_id_key):\n if input_id_key and isinstance(input_id_key, string_types):\n return True\n\n return False", "title": "" }, { "docid": "9de2bdc0484fb47db8a5025cc24cf66c", "score": "0.5016616", "text": "def is_palin(string):\n start, end = 0, len(string) - 1\n while end > start:\n if string[start] != string[end]:\n return False\n start += 1\n end -= 1\n return True", "title": "" }, { "docid": "0eafbc73cf966d02917f980922494730", "score": "0.5010141", "text": "def is_valid_sequence(s):\n nucs = \"ATCG\"\n counterL = []\n total = 0\n for i in s:\n if i in nucs and not i in counterL:\n total += 1\n counterL += i\n elif i not in nucs: return False\n return (total >= 4) and (nucs == \"ATCG\")", "title": "" }, { "docid": "d6fdab36ac895d6249de87ffea2ce4b8", "score": "0.5009293", "text": "def is_identifier(self, char: str):\n\n if char.isalpha() or char in (\"_\", ): # More coming soon\n return True", "title": "" }, { "docid": "9f19b211264d3979f0a2012b43f35bf4", "score": "0.50031674", "text": "def stringcheck_str(self, string):\n string = str(string) # because of 2.7 input problem\n if string != \"\":\n return True\n else:\n return False", "title": "" }, { "docid": "a168d8db3ceb6f2cf59c48f89797c08a", "score": "0.5002282", "text": "def is_spotify(self, name):\n if match_url(name) and 'open.spotify' in name:\n return True\n if name.startswith('spotify:'):\n return True\n return False", "title": "" }, { "docid": "be0e2a86e2a20cf542b4e4b411d60149", "score": "0.5002182", "text": "def test_has_valid_id_with_missing_numeric_character(self):\n raw_id_data = \"AC67\"\n is_valid = self.validator.has_valid_id(raw_id_data)\n self.assertFalse(is_valid)", "title": "" }, { "docid": "3523269098e0c935eefa695bb29fd888", "score": "0.50014615", "text": "def _heuristic_is_identifier(value):\n first = str(value)[0]\n return first != '-' and not first.isdigit()", "title": "" }, { "docid": "0225713f55f56dfe5ad9725f71e9a5a4", "score": "0.50010496", "text": "def is_variable(string: str) -> bool:\n return string[0] >= 'u' and string[0] <= 'z' and string.isalnum()", "title": "" }, { "docid": "6f9a8e26c9eb8dfa1d88acf31c46e671", "score": "0.49978906", "text": "def is_valid(self, input_string: str) -> bool:\n ...", "title": "" }, { "docid": "38cf34ad1d8283ab1d084f8092075d7e", "score": "0.49868488", "text": "def IsPubidChar(c):\n return PubidCharClass.test(c)", "title": "" }, { "docid": "a434a8030005db9f9d5590051523d17e", "score": "0.49824592", "text": "def is_valid_genotype_fields_string(genotype_fields_string):\n result = False\n # this regex means \"one or more characters that is not a comma, period, colon, zero, or forward slash\"\n content_char_match = re.search(r\"[^,.:0\\/]+\", genotype_fields_string)\n # NB: necessary to ALSO check first character of string, even if no match to above regex is found, because\n # \"0/0\" should be a valid genotype (even though all the characters it contains could signal null content in\n # other configurations), and a genotype fields string with nothing but a genotype in it should be legal.\n if content_char_match is not None or not genotype_fields_string.startswith(\".\"):\n result = True\n return result", "title": "" }, { "docid": "ee7ec8d1307d5fd4b93551d618c91a79", "score": "0.49793106", "text": "def valid_id(id_string, reserved_ok=False):\n reserved = (\n [ layout.INITIAL_VALUES_ID\n , layout.COLL_ROOT_CONF_OLD_DIR\n , layout.SITEDATA_ID\n ])\n #@@DEBUG - write traceback to local file\n # DEBUG = (id_string in reserved) and (not reserved_ok)\n # if DEBUG:\n # with open(\"debug-traceback.log\", \"a\") as f:\n # f.write(\"@@@@ Unexpected identifier %s\"%(id_string))\n # f.write(\"\".join(traceback.format_stack()))\n #@@\n # cf. urls.py:\n if id_string and re.match(r\"\\w{1,128}$\", id_string):\n return reserved_ok or (id_string not in reserved)\n # log.warning(\"util.valid_id: id %s\"%(id_string))\n return False", "title": "" }, { "docid": "e036153fdf2f96340354c0918a307993", "score": "0.49781483", "text": "def SbName_isIdentChar(c: 'char const') -> \"SbBool\":\n return _coin.SbName_isIdentChar(c)", "title": "" }, { "docid": "e988ed6f6e29d47a7611cbfdd28181a2", "score": "0.49756396", "text": "def test_has_valid_id_with_missing_alpha_character(self):\n raw_id_data = \"6758\"\n is_valid = self.validator.has_valid_id(raw_id_data)\n self.assertFalse(is_valid)", "title": "" }, { "docid": "d159707177a5342c303d50ae80d490f8", "score": "0.49729326", "text": "def is_glob(s):\n return len(glob_letters.intersection(s)) > 0", "title": "" }, { "docid": "e34f4ce48a42198d59f23277750c88b0", "score": "0.49656326", "text": "def contains_data_task_id(value: str) -> bool:\n return isinstance(value, str) and \\\n value.startswith('$') and \\\n value.endswith('$')", "title": "" }, { "docid": "e3d16508673a25729da9356eedb6289d", "score": "0.4955603", "text": "def checkIdentifier(id):\n if not Identifier.isIdentifier(id):\n raise IdentifierException(\"String '%s' is not a valid identifier! Ensure its binary representation only contains characters >= 32\")", "title": "" }, { "docid": "59439fd7343c34866a7acd1f76f38e35", "score": "0.49352401", "text": "def validate_user_id(user_id):\n user_id = str(user_id)\n if not valid_re.match(user_id):\n msg = \"Invalid user id %s, please only use letters, numbers and _\"\n raise ValueError(msg % user_id)\n return user_id", "title": "" }, { "docid": "dc8b105347e3cd6c402112ab5f1d3a8e", "score": "0.4917988", "text": "def _sane(lang_id):\n return (isinstance(lang_id, basestring)\n and len(lang_id) == 2\n and lang_id.isalpha()\n and lang_id.islower())", "title": "" }, { "docid": "0c2c0500c33bc3936cd9bbfaef15e2ba", "score": "0.49065542", "text": "def isIdentStartChar(c: 'char const') -> \"SbBool\":\n return _coin.SbName_isIdentStartChar(c)", "title": "" }, { "docid": "b6e005100c6d2cee189433d96628845d", "score": "0.49060687", "text": "def is_unique(self, string: str) -> bool:\n buffer = set()\n for char in string:\n if char not in buffer:\n buffer.add(char)\n elif char in buffer:\n return False\n return True", "title": "" }, { "docid": "507c0b44096cf27af448d19ed63adc22", "score": "0.4901622", "text": "def SbName_isIdentStartChar(c: 'char const') -> \"SbBool\":\n return _coin.SbName_isIdentStartChar(c)", "title": "" }, { "docid": "ea678ccc5a283efaa414c2aa1fb96976", "score": "0.48974344", "text": "def check_libname(name):\n # name = <str>\n # ch = <str>\n # return <int>|<bool>\n name = str(name)\n if not name:\n return 0\n return (name[0] in _FIRST_LETTERS and\n all(ch in _OTHER_LETTERS for ch in name[1:]))", "title": "" }, { "docid": "64456ac30e17f76e254d640a1cc532b9", "score": "0.48816082", "text": "def valid_lang(x: str) -> bool:\n return x in LANGS", "title": "" }, { "docid": "3941ee44a96b1a78345b944a450889b1", "score": "0.48806885", "text": "def validate_synapse_id(cls, value: str) -> str:\n if not re.search(\"^syn[0-9]+\", value):\n raise ValueError(f\"{value} is not a valid Synapse id\")\n return value", "title": "" }, { "docid": "3941ee44a96b1a78345b944a450889b1", "score": "0.48806885", "text": "def validate_synapse_id(cls, value: str) -> str:\n if not re.search(\"^syn[0-9]+\", value):\n raise ValueError(f\"{value} is not a valid Synapse id\")\n return value", "title": "" }, { "docid": "a37c025168de87e2afe32cc264f51a2b", "score": "0.48746097", "text": "def test_has_valid_id_too_long(self):\n raw_id_data = 'C7224'\n is_valid = self.validator.has_valid_id(raw_id_data)\n self.assertFalse(is_valid)", "title": "" }, { "docid": "262b087842f63afb951f9328e41fca14", "score": "0.4872656", "text": "def valid(some_id):\n return some_id // 10000 == 2019", "title": "" }, { "docid": "16505f3cebf57da4ac0f1d8b68669191", "score": "0.4867083", "text": "def is_identifier( word ):\r\n if not word:\r\n return False\r\n if '_' in word:\r\n return True\r\n rest_of_word = word[1:]\r\n for ch in rest_of_word:\r\n if ch == ch.upper():\r\n return True\r\n return False", "title": "" }, { "docid": "17390190301b0da1194453a866c3478b", "score": "0.48605952", "text": "def is_unique3(self, string: str) -> bool:\n string = \"\".join(sorted(string))\n for i in range(len(string)):\n if string[i] == string[i - 1]:\n return False\n return True", "title": "" }, { "docid": "04e7b5bfdc10ab09bff769e2dfa49146", "score": "0.4858536", "text": "def validate_id(value):\n # http://plone.org/documentation/manual/developer-manual/forms/\n # using-zope.formlib/adding-validation\n if not re.match(\"^[A-Za-z][A-Za-z0-9_]*$\", value):\n raise InvalidId(value)\n return True", "title": "" }, { "docid": "6382492415248763af0848d878fd3e37", "score": "0.48543355", "text": "def is_constant(string: str) -> bool:\n return (((string[0] >= '0' and string[0] <= '9') or \\\n (string[0] >= 'a' and string[0] <= 'd')) and \\\n string.isalnum()) or string == '_'", "title": "" }, { "docid": "2951074c6b0c497e0de4cfec81ec8614", "score": "0.48508438", "text": "def isIdNS(self, namespaceURI, localName):\r\n return False", "title": "" }, { "docid": "1d2f6c238b99d2465c226c4de9edba35", "score": "0.48501512", "text": "def is_unique_2(string):\n bits = 0\n for char in string:\n position = 1 << ord(char)\n bits ^= position\n if bits & position == 0:\n return False\n return True", "title": "" }, { "docid": "cb13fd55e8d70ac64d3e46a308dac1f6", "score": "0.484625", "text": "def test_get_trackId_from_url(self):\n self.assertEqual(\n Spotify.get_trackId_from_url(\n \"https://open.spotify.com/track/3h3pOvw6hjOvZxRUseB7h9?si=Ci-fm4N2TYq7kKlJANDnhA\" # noqa: E501\n ),\n \"3h3pOvw6hjOvZxRUseB7h9\",\n )", "title": "" }, { "docid": "677d39cd52529fc6eb60e5f523a037b8", "score": "0.48414698", "text": "def stringcheck_int(self, string):\n string = str(string) # because of 2.7 input problem\n if re.match(\"^-?\\d+$\", string) != None:\n return True\n else:\n return False", "title": "" }, { "docid": "cdf53bc3a1f009f3aa6c91aea78aa943", "score": "0.48413512", "text": "def validate_id(self, arg):\n args = arg.split(' ')\n if len(args) < 2:\n print(HBNBCommand.ERROR_ID)\n return False\n id_number = args[1]\n return id_number", "title": "" }, { "docid": "378e471161237262cd5a5ba8426947a9", "score": "0.48304522", "text": "def is_quantifier(s):\n return s == \"A\" or s == \"E\"", "title": "" }, { "docid": "16c5a32d72b19dc35b09930980a7ed71", "score": "0.48200774", "text": "def is_relation(string: str) -> bool:\n return string[0] >= 'F' and string[0] <= 'T' and string.isalnum()", "title": "" }, { "docid": "f57887f54ce4cd9cc32c415d49848636", "score": "0.48196086", "text": "def test_validate_id_too_short():\n assetpack = get_test_assetpack()\n with raises(PyInquirer.ValidationError):\n assert validate_component_id('z', assetpack) is False", "title": "" } ]
f60393a5315a0be0bd462b1ad5888fd5
Converts a connectivity tensor to a dgl graph with edge and node features.
[ { "docid": "5609fad7909b17ecf78df3cf43f4ba09", "score": "0.6688228", "text": "def _connectivity_to_dgl_edge(connectivity,sparsify=False):\n assert len(connectivity.shape)==3, \"Should have a shape of N,N,2\"\n N,_,_ = connectivity.shape\n distances = connectivity[:,:,1]\n mask = torch.ones_like(connectivity)\n if sparsify:\n mask = torch.zeros_like(connectivity)\n assert isinstance(sparsify,int), f\"Sparsify not recognized. Should be int (number of closest neighbors), got {sparsify}\"\n knns = npargpartition(distances, kth=sparsify, axis=-1)[:, sparsify ::-1].copy()\n range_tensor = torch.tensor(range(N)).unsqueeze(-1)\n mask[range_tensor,knns,1] = 1\n mask[:,:,1] = mask[:,:,1]*(1-torch.eye(N)) #Remove the self value\n mask[:,:,0] = sparsify*torch.eye(N)\n connectivity = connectivity*mask\n adjacency = (connectivity!=0).to(torch.float)\n gdgl = _connectivity_to_dgl_adj(adjacency)\n src,rst = gdgl.edges() #For now only contains node features\n efeats = distances[src,rst]\n gdgl.edata[\"feat\"] = efeats.reshape((efeats.shape[0],1))\n return gdgl", "title": "" } ]
[ { "docid": "428149d33b5dcdfa8fa35ea1acbb7cb4", "score": "0.7025889", "text": "def adj_to_dgl_graph(adj):\n nx_graph = nx.from_scipy_sparse_matrix(adj)\n dgl_graph = dgl.DGLGraph(nx_graph)\n return dgl_graph", "title": "" }, { "docid": "760c8511bc57de853e2a221c132ee3bd", "score": "0.6932594", "text": "def fast_networkx_to_dgl(\n graph, node_attrs=[\"text_idx\", \"type\"], edge_attrs=[\"flow\", \"position\"]\n):\n\n edges = [edge for edge in graph.edges()]\n dgl_graph = dgl.graph(edges, num_nodes=graph.number_of_nodes())\n\n for feat in edge_attrs:\n edge_assigns = torch.tensor(\n [val[-1] for val in graph.edges(data=feat)], dtype=torch.int64\n )\n dgl_graph.edata[feat] = edge_assigns\n\n for feat in node_attrs:\n node_assigns = torch.tensor(\n [val[-1] for val in graph.nodes(data=feat)], dtype=torch.int64\n )\n dgl_graph.ndata[feat] = node_assigns\n\n return dgl_graph", "title": "" }, { "docid": "9ded53ade13216abe3fa1acf5aa3fb7c", "score": "0.6749192", "text": "def connectivity_to_dgl(connectivity_graph):\n if len(connectivity_graph.shape)==4:#We assume it's a siamese dataset, thus of shape (2,N,N,in_features)\n assert connectivity_graph.shape[0]==2\n assert connectivity_graph.shape[1]==connectivity_graph.shape[2]\n graph1,graph2 = connectivity_to_dgl(connectivity_graph[0]), connectivity_to_dgl(connectivity_graph[1])\n return (graph1,graph2)\n elif len(connectivity_graph.shape)==3:#We assume it's a simple dataset, thus of shape (N,N,in_features)\n assert connectivity_graph.shape[0]==connectivity_graph.shape[1]\n if is_adj(connectivity_graph[:,:,1]):\n return _connectivity_to_dgl_adj(connectivity_graph)\n return _connectivity_to_dgl_edge(connectivity_graph)", "title": "" }, { "docid": "8889e18a981420f3da983d36fcd16025", "score": "0.66971153", "text": "def ConstructGraph(edges, n_entities, args):\n if args.has_edge_importance:\n src, etype_id, dst, e_impts = edges\n else:\n src, etype_id, dst = edges\n coo = sp.sparse.coo_matrix((np.ones(len(src)), (src, dst)), shape=[n_entities, n_entities])\n g = dgl.DGLGraph(coo, readonly=True, multigraph=True, sort_csr=True)\n g.edata['tid'] = F.tensor(etype_id, F.int64)\n if args.has_edge_importance:\n g.edata['impts'] = F.tensor(e_impts, F.float32)\n return g", "title": "" }, { "docid": "576cdaa5d03d91c86bc1ab6e94b94d49", "score": "0.63676363", "text": "def load_graph(connection):\n\n query = \"\"\"\n SELECT eid, gamma, distance, startnode, endnode, geom\n FROM model2.edges;\n \"\"\"\n\n e = gpd.GeoDataFrame.from_postgis(query, con=connection)\n attrs = ['distance', 'gamma', 'geom', 'eid'] # attributes of an edge\n return nx.convert_matrix.from_pandas_edgelist(e, 'startnode', 'endnode', attrs)", "title": "" }, { "docid": "09814d72bb803828607b2df958fa0559", "score": "0.6148687", "text": "def make_dense_graph(self, graph):\n T = nx.Graph()\n for (s, e) in graph.edges():\n ps = graph[s][e]['pts']\n # Add first node\n start = graph.nodes()[s]['o']\n stop = ps[0]\n # Add the edge nodes\n T = self.add_node_edge(T, start, stop)\n for i in range(len(ps) - 1):\n T = self.add_node_edge(T, ps[i], ps[i + 1])\n # Add the last node\n start = ps[-1]\n stop = graph.nodes()[e]['o']\n T = self.add_node_edge(T, start, stop)\n return T", "title": "" }, { "docid": "0afc469b48e8e1965b0b7b7fff0807b3", "score": "0.6098195", "text": "def process_networkx_graph(\n graph,\n vocab,\n node_feature_list=[\"text\", \"type\"],\n edge_feature_list=[\"flow\", \"position\"],\n):\n update_graph_with_vocab(graph.nodes, node_feature_list, vocab)\n update_graph_with_vocab(graph.edges, edge_feature_list, vocab)\n\n dgl_graph = fast_networkx_to_dgl(graph)\n return dgl_graph", "title": "" }, { "docid": "fc1bc286d017fee9d42fbcc01a22e652", "score": "0.59795743", "text": "def corpus_to_graph(corpus_doc):\n words = corpus_doc.split(' ')\n num_words = len(words)\n\n # get distinct sorted words\n words_ds = list(set(words))\n words_ds.sort()\n n = len(words_ds)\n\n # A_hat - n*n matrix to describe connections\n # we skip creating A and I and jump straight to A_hat\n # A_hat is adding A node connection matrix to I identity matrix\n A_hat = sparse.eye(n).todok()\n\n for pos in range(num_words):\n word = words[pos]\n word_idx = index(words_ds, word)\n\n if pos + 1 < num_words:\n n_word = words[pos + 1]\n n_word_idx = index(words_ds, n_word)\n A_hat[word_idx, n_word_idx] = 1\n\n # D - degree of nodes matrix\n D = A_hat.sum(0)[0] # sum will make this a numpy matrix\n D = np.squeeze(np.asarray(D)) # make it a 1d array\n D = np.matrix(np.diag(D))\n\n # X - create a feature matrix, should just be vectors for each node\n X = []\n\n for pos in range(n):\n word = words_ds[pos]\n word_vector = get_word_vec(word)\n X.append(word_vector)\n\n X = np.array(X)\n\n layer_1_size = 10\n layer_2_size = 3\n layer_3_size = 1\n\n W_1 = np.random.normal(loc=0, scale=1, size=(X.shape[1], layer_1_size))\n W_2 = np.random.normal(loc=0, size=(W_1.shape[1], layer_2_size))\n W_3 = np.random.normal(loc=0, size=(W_2.shape[1], layer_3_size))\n\n # print(\"mat_D.shape\", mat_D.shape)\n # print(\"mat_A.shape\", mat_A.shape)\n # print(\"mat_X.shape\", mat_X.shape)\n # print(\"shape(np.linalg.inv(mat_D) * mat_A * mat_X)\", (np.linalg.inv(mat_D) * mat_A * mat_X).shape)\n # print(\"W_1.shape\", W_1.shape)\n\n # scipy.linalg.fractional_matrix_power(D, -0.5)\n\n # Mean Rule\n # D_hat = linalg.fractional_matrix_power(D, -1.)\n # l1_out = relu(D_hat * A_hat * X * W_1)\n # l2_out = relu(D_hat * A_hat * l1_out * W_2)\n # l3_out = relu(D_hat * A_hat * l2_out * W_3)\n\n # Spectral Rule\n D_hat = linalg.fractional_matrix_power(D, -0.5)\n l1_out = relu(D_hat * A_hat * D_hat * X * W_1)\n l2_out = relu(D_hat * A_hat * D_hat * l1_out * W_2)\n l3_out = relu(D_hat * A_hat * D_hat * l2_out * W_3)\n\n return (A_hat, X, n, D, W_1, W_2, l1_out, l2_out, l3_out)", "title": "" }, { "docid": "273eaa87881d47d6bd189e24cbd97e42", "score": "0.58924884", "text": "def network(\n conn, nodes_data=None, nodes_name=None, nodes_x=None, nodes_y=None,\n nodes_z=None, nodes_color=None, nodes_size=None, nodes_size_min=1,\n nodes_size_max=30, nodes_cmap='plasma', edges_min=None, edges_max=None,\n edges_width_min=.5, edges_width_max=8, edges_opacity_min=0.1,\n edges_opacity_max=1., edges_cmap='plasma', cbar=True, cbar_title='Edges',\n directed=False, fig=None, kw_trace={}, kw_cbar={}):\n import plotly.graph_objects as go\n\n # -------------------------------------------------------------------------\n # I/O\n # -------------------------------------------------------------------------\n # connectivity matrix conversion\n conn = io_to_df(conn, xr_pivot=True)\n plt_in = '3D' if nodes_z is not None else '2D'\n\n # get node names and coordinates in case of dataframe\n kw_nodes = dict(nodes_name=nodes_name, nodes_size=nodes_size,\n nodes_x=nodes_x, nodes_y=nodes_y, nodes_z=nodes_z,\n nodes_color=nodes_color)\n if isinstance(nodes_data, pd.DataFrame):\n kw_nodes = extract_df_cols(nodes_data, **kw_nodes)\n\n # get useful variables for plotting\n df_nodes, df_edges = prepare_to_plot(\n conn, nodes_size_min=nodes_size_min, nodes_size_max=nodes_size_max,\n edges_min=edges_min, edges_max=edges_max,\n edges_width_min=edges_width_min, edges_width_max=edges_width_max,\n edges_opacity_min=edges_opacity_min,\n edges_opacity_max=edges_opacity_max, directed=directed,\n edges_cmap=edges_cmap, edges_sorted=True, edges_rm_missing=True,\n **kw_nodes\n )\n\n # -------------------------------------------------------------------------\n # PLOT VARIABLES\n # -------------------------------------------------------------------------\n # build edges lines\n edges_x = np.c_[df_edges['x_s'], df_edges['x_t']]\n edges_y = np.c_[df_edges['y_s'], df_edges['y_t']]\n edges_z = np.c_[df_edges['z_s'], df_edges['z_t']]\n\n # automatic nodes_size ratio\n sizeref = np.max(df_nodes['size_plt']) / nodes_size_max ** 2\n\n # prepare hover data\n hovertemplate = (\n \"<b>Node :</b> %{text} <br><b>Size :</b> %{customdata[0]:.3f} <br>\"\n \"<b>Color :</b> %{customdata[1]:.3f}<br>\"\n \"<b>Degree :</b> %{customdata[2]}\")\n\n # hover custom data\n customdata = np.stack(\n (df_nodes['size'], df_nodes['color'], df_nodes['degree']), axis=-1)\n\n if fig is None:\n fig = go.Figure()\n\n # switch between 2D and 3D representations\n Scatter = go.Scatter3d if plt_in == '3D' else go.Scatter\n\n # -------------------------------------------------------------------------\n # NODES PLOT\n # -------------------------------------------------------------------------\n # node plot\n kw_nodes = dict(x=list(df_nodes['x']), y=list(df_nodes['y']))\n if plt_in == '3D':\n kw_nodes['z'] = list(df_nodes['z'])\n node_trace = Scatter(\n mode='markers+text', text=list(df_nodes['name']), name='Nodes',\n textposition=\"top center\", hovertemplate=hovertemplate,\n customdata=customdata, marker=dict(\n showscale=False, colorscale=nodes_cmap, sizemode='area',\n sizeref=sizeref, opacity=1., size=list(df_nodes['size_plt']),\n color=list(df_nodes['color_plt']),\n ), **kw_nodes\n )\n\n # -------------------------------------------------------------------------\n # EDGES PLOT\n # -------------------------------------------------------------------------\n # get dataframe variables\n opacity, width = list(df_edges['opacity']), list(df_edges['width'])\n color = list(df_edges['color'])\n # edges plot\n for k in range(edges_x.shape[0]):\n # switch between 2D / 3D plot\n kw_edges = dict(x=edges_x[k, :], y=edges_y[k, :])\n if plt_in == '3D':\n kw_edges['z'] = edges_z[k, :]\n # single line trace\n _line = Scatter(\n mode='lines', showlegend=False, hoverinfo='none', name='edges',\n opacity=opacity[k], line=dict(width=width[k], color=color[k]),\n **kw_edges\n )\n fig.add_trace(_line, **kw_trace)\n fig.add_trace(node_trace, **kw_trace)\n\n # -------------------------------------------------------------------------\n # COLORBAR\n # -------------------------------------------------------------------------\n # edges colorbar (dirty but working solution...)\n if cbar:\n cbar_trace = go.Scatter(\n x=[0.], y=[0.], mode='markers', hoverinfo='none', showlegend=False,\n marker=dict(size=[0.], color=list(df_edges['values']),\n colorscale=edges_cmap, showscale=True,\n colorbar=dict(title=cbar_title, lenmode='fraction', len=0.75,\n **kw_cbar))\n )\n fig.add_trace(cbar_trace)\n \n fig.update_xaxes(showgrid=False, visible=False, **kw_trace)\n fig.update_yaxes(showgrid=False, visible=False, **kw_trace)\n if not len(kw_trace):\n fig.update_layout(width=900, height=800)\n\n return fig", "title": "" }, { "docid": "cd907679ceb619dad8573a9322362b01", "score": "0.5880552", "text": "def d3_graph(adjacency_matrix, row_names, col_names=None, str_to_group=len_str_to_group, str_to_name=strip_path_ext_characters, str_to_value=float, num_groups=7., directional=True):\r\n if col_names is None:\r\n col_names = row_names\r\n\r\n nodes, links = [], []\r\n\r\n print '-' * 10\r\n # get the nodes list first, from the row and column labels, even if not square\r\n for names in (row_names, col_names):\r\n for i, name_group in enumerate(names):\r\n if isinstance(name_group, basestring):\r\n name_group = (str_to_name(name_group), str_to_group(name_group))\r\n node = {\"name\": name_group[0], \"group\": name_group[1] or 1}\r\n print node\r\n if node not in nodes:\r\n nodes += [node] \r\n\r\n for i, row in enumerate(adjacency_matrix):\r\n for j, value in enumerate(row):\r\n links += [{\"source\": i, \"target\": j, \"value\": str_to_value(value)}]\r\n if directional:\r\n links += [{\"source\": j, \"target\": i, \"value\": str_to_value(value)}]\r\n\r\n return {'nodes': nodes, 'links': links}", "title": "" }, { "docid": "fafd62c206e324f2eae641caf729d9d2", "score": "0.58324057", "text": "def get_graph_data(x, num_nodes, num_relations, num_features):\n adj = x[:, :num_nodes*num_nodes*num_relations].reshape([-1, num_relations, num_nodes, num_nodes])\n feat_mat = x[:, num_nodes*num_nodes*num_relations:].reshape([-1, num_nodes, num_features])\n return adj, feat_mat", "title": "" }, { "docid": "a53520f07fc9d74dc2be2d340ba96b68", "score": "0.5741208", "text": "def construct_graph(self, x):\n if x.dim() != 3:\n if x.dim() == 4:\n x = x.squeeze()\n elif x.dim() == 2:\n x = x.unsqueeze(0)\n assert x.dim() == 3\n\n B, n, d = x.shape\n # logger.info('x.shape: {}'.format(x.shape))\n\n # Compute edge connections\n edge_index = torch.tensor(list(permutations(range(n), 2)), dtype=torch.long)\n edge_index = edge_index.t().contiguous() # Shape: [2 x E], E = n * (n - 1)\n\n # Compute edge features for all graphs\n src, dest = edge_index\n edge_attrs = x[:, dest, :2] - x[:, src, :2] # Shape: [B x E x T x 2]\n e = edge_attrs.shape[1]\n\n # U vector. |U|-dimensional 0-vector\n u = torch.zeros((1, self.u_dim), dtype=torch.float, device=x.device)\n\n # Create list of Data objects, then call Batch.from_data_list()\n data_objs = [Data(x=x[b].view(n, -1),\n edge_index=edge_index,\n edge_attr=edge_attrs[b].view(e, -1),\n u=u.clone())\n for b in range(B)]\n batch = Batch.from_data_list(data_objs).to(x.device)\n\n return batch", "title": "" }, { "docid": "260c9a9ef4f42a01bf2bdbe29ae79ffd", "score": "0.5718244", "text": "def build_graph() -> nx.DiGraph:\n graph = nx.DiGraph()\n conn = get_sqlite_conn()\n\n # Query all nodes first\n nodes = conn.execute(\"SELECT file, id, title FROM nodes;\")\n # A double JOIN to get all nodes that are connected by a link\n links = conn.execute(\"SELECT n1.id, nodes.id FROM ((nodes AS n1) \"\n \"JOIN links ON n1.id = links.source) \"\n \"JOIN (nodes AS n2) ON links.dest = nodes.id \"\n \"WHERE links.type = '\\\"id\\\"';\")\n # Populate the graph\n graph.add_nodes_from((n[1], {\n \"label\": n[2].strip(\"\\\"\"),\n \"tooltip\": n[2].strip(\"\\\"\"),\n \"lnk\": to_rellink(n[0]).lower(),\n \"id\": n[1].strip(\"\\\"\")\n }) for n in nodes)\n graph.add_edges_from(n for n in links if n[0] in graph.nodes and n[1] in graph.nodes)\n conn.close()\n return graph", "title": "" }, { "docid": "3d4598fddbdd12c9bc9c84f7c8999fbc", "score": "0.57002777", "text": "def __as_graph(self):\n g = gt.Graph()\n\n # Add some graph properties\n g.vp[\"name\"] = g.new_vertex_property(\"string\")\n g.vp[\"pos\"] = g.new_vertex_property(\"vector<float>\")\n g.vp[\"stat_obj\"] = g.new_vertex_property(\"bool\")\n g.vp[\"dyn_obj\"] = g.new_vertex_property(\"bool\")\n g.ep[\"dist\"] = g.new_edge_property(\"double\")\n g.ep[\"weight\"] = g.new_edge_property(\"double\")\n g.ep[\"active\"] = g.new_edge_property(\"bool\")\n\n # Add the vertices\n for i, row in enumerate(self.c_free):\n for j, col in enumerate(row):\n node_loc = (self.arena.x_mesh[i, j], self.arena.y_mesh[i, j])\n node_name = self.get_node_name(node_loc)\n\n if node_name in self.node_map:\n continue\n\n new_node = g.add_vertex(1)\n self.node_map[node_name] = new_node\n g.vp[\"name\"][new_node] = node_name\n g.vp[\"pos\"][new_node] = node_loc\n\n # Now add the edges\n for i, row in enumerate(self.c_free):\n for j, col in enumerate(row):\n\n # reachable points have a value of 0. This is if it\n # reachable, it will have a value.\n\n if self.c_free[i, j]:\n continue\n\n src_loc = (self.arena.x_mesh[i, j], self.arena.y_mesh[i, j])\n src = self.get_node_by_coord(src_loc)\n\n for di in [-1, 0, 1]:\n for dj in [-1, 0, 1]:\n if (not di) and (not dj):\n continue\n\n # Again, if the adjacent point has a value, it is in invalid traversal.\n if self.c_free[i + di, j + dj]:\n continue\n\n tgt_loc = (\n self.arena.x_mesh[i + di, j + dj],\n self.arena.y_mesh[i + di, j + dj],\n )\n tgt = self.get_node_by_coord(tgt_loc)\n\n e = g.add_edge(src, tgt)\n\n q_src = np.array(g.vp[\"pos\"][src])\n q_tgt = np.array(g.vp[\"pos\"][tgt])\n q_dst = q_src - q_tgt\n q_dst = np.linalg.norm(q_dst)\n g.ep[\"dist\"][e] = q_dst\n g.ep[\"weight\"][e] = q_dst\n g.ep[\"active\"][e] = True\n\n self.graph = g", "title": "" }, { "docid": "8b1fa183b2757eed5053034172aea8c0", "score": "0.5641135", "text": "def _graph_to_networkx_graph(self, graph, include_edge_attrs, \n include_node_attrs):\n\n # Initializing networkx graph\n networkx_graph = networkx.Graph(\n model_name = graph.model_name, category = graph.category,\n sub_category = graph.sub_category, source = graph.source\n )\n\n input_edges = dict()\n\n # Adding edges to the graph\n for src_node_index in graph.adj_list.keys():\n for [edge_index, dest_node_index] in graph.adj_list[src_node_index]:\n networkx_graph.add_edge(\n src_node_index, dest_node_index,\n tensor_shape = graph.edges[edge_index].tensor_shape,\n tensor_type = graph.edges[edge_index].tensor_type)\n \n if dest_node_index not in input_edges:\n input_edges.update({dest_node_index : []})\n \n input_edges[dest_node_index].append(graph.edges[edge_index])\n \n # Adding all nodes to the graph, and building features\n for index in range(len(graph.nodes)):\n features = list()\n\n if include_node_attrs.lower() == \"true\":\n for node_attr in self._NODE_ATTRS:\n features.append(str(getattr(graph.nodes[index], node_attr)))\n\n if include_edge_attrs.lower() == \"true\" and index in input_edges:\n for input_edge in input_edges[index]:\n for edge_attr in self._EDGE_ATTRS:\n features.append(str(getattr(input_edge, edge_attr)))\n\n concat_feature = \" \".join(features)\n\n networkx_graph.add_node(\n index, feature = concat_feature)\n\n # If graph is not connected, introduce a dummy node and connect all \n # inputs to it to make it connected.\n if not networkx.algorithms.is_connected(networkx_graph):\n max_node_num = max(networkx_graph.nodes())\n\n networkx_graph.add_node(max_node_num + 1, feature = \"Dummy\")\n\n for index in graph.start_node_indices:\n networkx_graph.add_edge(max_node_num + 1, index)\n\n return networkx_graph", "title": "" }, { "docid": "4e29e44d05823ac681b6c06ee7f6c1c5", "score": "0.56389105", "text": "def forward(self, node_word_idx, node_size, num_nodes, node_mask=None):\n node_emb = self.embedding(node_word_idx, node_size, num_nodes)\n\n dgl_graph = self.topology(node_emb, node_mask)\n dgl_graph.ndata['node_feat'] = node_emb\n\n return dgl_graph", "title": "" }, { "docid": "85915cc754e1ebf2555a4741bd138eb3", "score": "0.5622964", "text": "def to_nx_graph(self):\n nodes = range(len(self.G.graph))\n self.labels = {i: self.G.nodes[i].get_name() for i in nodes}\n self.nx_graph.add_nodes_from(nodes)\n undirected = self.find_undirected()\n directed = self.find_fully_directed()\n bidirected = self.find_bi_directed()\n for (i, j) in undirected:\n self.nx_graph.add_edge(i, j, color='g') # Green edge: undirected edge\n for (i, j) in directed:\n self.nx_graph.add_edge(i, j, color='b') # Blue edge: directed edge\n for (i, j) in bidirected:\n self.nx_graph.add_edge(i, j, color='r') # Red edge: bidirected edge", "title": "" }, { "docid": "bbba2666ac8f8b006f94a71c44e4d5e2", "score": "0.5561343", "text": "def get_graph(cuv_channel_id, nodes, edges):\n cuv_channel = Channel.objects.get(id=cuv_channel_id)\n graph = {\n \"title\": \"Import graph data for channel \" + cuv_channel.name,\n \"description\": \"The channel description of CUV is: \" + cuv_channel.description,\n \"nodes\": {}, # dict {channel_id --> channel_info}, where channe_info is a dict with keys: name, channel_id, counts\n \"edges\": [], # edges of the form (source, target, kind, count) where source and target are channel_ids\n }\n # for each channel, there are three graph nodes:\n # - channel_id\n # - channel_id+'-added' = to represent nodes added to studio by uploading new content\n # - channel_id+'-unused' = nodes in a channel that are not imported into derivative channels\n for channel_id, channel_data in nodes.items():\n print('processing channel channel_id='+channel_id)\n channel = Channel.objects.get(id=channel_id) \n # INPUTS: LISTS OF INDIVIDUAL CONTENT NODES\n added = channel_data[\"added\"].values()\n imported = channel_data[\"imported\"].values()\n all_nodes = added + imported\n # A. ADD GRAPH NODES\n # add three (3x) nodes that correspond to this channel_id\n ########################################################################\n # self\n channel_node = get_channel_as_graph_node(channel)\n channel_node['counts'] = get_resource_counts_by_kind(channel.main_tree)\n graph['nodes'][channel_id] = channel_node\n # added\n added_node_id = channel_id+'-added'\n added_node = get_channel_as_graph_node(\n channel,\n name='Added',\n description='Count of nodes uploaded to channel_id ' + channel_id\n )\n graph['nodes'][added_node_id] = added_node\n # unused\n unused_node_id = channel_id+'-unused'\n unused_node = get_channel_as_graph_node(\n channel,\n name='Unused',\n description='Connts of nodes in channel_id ' + channel_id + ' that are not imported in any downstream channels.'\n )\n graph['nodes'][unused_node_id] = unused_node\n \n \n # B. ADD GRAPH EDGES\n ########################################################################\n # 1. add unused edges\n # counts for the {{channel_id}} --> {{channel_id}}-unused edges\n unused_aggregates = defaultdict(int)\n for node in all_nodes:\n if not is_source(edges, node.id):\n unused_aggregates[node.kind_id] += 1\n unused_node['counts'] = unused_aggregates\n for kind, count in unused_aggregates.items():\n graph['edges'].append( (channel_id, unused_node_id, kind, count) )\n # \n # 2. add added edges\n # counts for the {{channel_id}}-added --> {{channel_id}} edges\n added_aggregates = defaultdict(int)\n for node in all_nodes:\n if not is_target(edges, node.id):\n added_aggregates[node.kind_id] += 1\n added_node['counts'] = added_aggregates\n for kind, count in added_aggregates.items():\n graph['edges'].append( (added_node_id, channel_id, kind, count) )\n #\n # 3. add imports edges\n # we're computing (snode-->node) imports for current channel only---not recusively\n for source_channel_id, imported_nodes in group_node_list_by_source_channel_id(imported).items():\n print(' processing source_channel_id '+source_channel_id)\n imported_aggregates = defaultdict(int)\n for imported_node in imported_nodes:\n snode = get_source_node(imported_node)\n assert is_source(edges, snode.id), 'failed assumption snode is not in edges'\n imported_aggregates[imported_node.kind_id] += 1\n for kind, count in imported_aggregates.items():\n graph['edges'].append((source_channel_id, channel_id, kind, count))\n #\n # thank you; come again...\n return graph", "title": "" }, { "docid": "5641fcefb553324dae9306a47d6ca12d", "score": "0.5561018", "text": "def channel_to_networkx_graph(self, channel):\n return nx.from_scipy_sparse_matrix(self.ch_to_adj[channel], parallel_edges=True)", "title": "" }, { "docid": "72077df3310a4874b94459ce797ad3c7", "score": "0.5522097", "text": "def gt_files_to_dgraph(gtfilesfolder,\n submodelname,\n minlayerid=0,\n edgeextension='.edge0'):\n \n lmb = {}\n mu = {}\n consumption_factor = {}\n production_factor = {}\n node_type = {}\n \n cash = {}\n goods = {}\n price = {}\n \n \n # Treat each vertex file with each node attribute\n for layer in [minlayerid, minlayerid+1, minlayerid+2]:\n vertexpath = gtfilesfolder + submodelname + '.vertex' + str(layer)\n \n with open(vertexpath, 'r') as vertexfile: \n linenumber = 0\n for line in vertexfile:\n if not linenumber==0:\n line_array = re.split('\\s+', line)\n \n nid_value = int(line_array[0])\n if layer==minlayerid:\n lmb[nid_value] = float(line_array[1])\n cash[nid_value] = float(line_array[2])\n if layer==minlayerid+1:\n mu[nid_value] = float(line_array[1])\n goods[nid_value] = float(line_array[2])\n consumption_factor[nid_value] = float(line_array[3])\n production_factor[nid_value] = float(line_array[4])\n if layer==minlayerid+2:\n price[nid_value] = float(line_array[1])\n node_type[nid_value] = int(line_array[2])\n\n linenumber += 1\n vertexfile.close()\n \n \n # Treat the edge file with edge_type attribute\n edge_type = {}\n edgepath = gtfilesfolder + submodelname + edgeextension\n with open(edgepath, 'r') as edgefile:\n linenumber = 0\n for line in edgefile:\n if not linenumber==0:\n line_array = re.split('\\s+', line)\n start = int(line_array[1])\n end = int(line_array[2])\n edge_type[(start,end)] = 1 # Goods link is always with type 1\n linenumber += 1\n edgefile.close()\n \n dgraph = nx.DiGraph()\n \n dgraph.add_nodes_from(list(lmb))\n nx.set_node_attributes(dgraph, 'lmb', lmb)\n nx.set_node_attributes(dgraph, 'cash', cash)\n nx.set_node_attributes(dgraph, 'mu', mu)\n nx.set_node_attributes(dgraph, 'goods', goods)\n nx.set_node_attributes(dgraph, 'consumption_factor', consumption_factor)\n nx.set_node_attributes(dgraph, 'production_factor', production_factor)\n nx.set_node_attributes(dgraph, 'price', price)\n nx.set_node_attributes(dgraph, 'node_type', node_type)\n \n dgraph.add_edges_from(list(edge_type))\n nx.set_edge_attributes(dgraph, 'edge_type', edge_type)\n \n return dgraph", "title": "" }, { "docid": "7dad86a32005e8a5a0c3bf519a0de607", "score": "0.5508512", "text": "def initialise_graph(edges='connectivity.csv', nodes='nodecords.csv', endnode=None):\n\n edgelist = pd.read_csv(edges, names=['node1', 'node2'])\n nodelist = pd.read_csv(nodes, names=['id', 'x', 'y', 'z'])\n G = nx.Graph()\n # Add edges\n for i, elrow in edgelist.iterrows():\n G.add_edge(elrow[0], elrow[1])\n # Add node attributes\n for i, nlrow in nodelist.iterrows():\n G.node[nlrow['id']].update(nlrow[1:].to_dict())\n\n for edge in G.edges():\n n1 = edge[0]\n n2 = edge[1]\n dx = abs(G.node[n1]['x'] - G.node[n2]['x'])\n dy = abs(G.node[n1]['y'] - G.node[n2]['y'])\n dz = abs(G.node[n1]['z'] - G.node[n2]['z'])\n G[n1][n2]['dx'] = dx\n G[n1][n2]['dy'] = dy\n G[n1][n2]['dz'] = dz\n G[n1][n2]['distance'] = max([dx, dy, dz])\n\n if endnode:\n G.remove_node(endnode) # Remove endnode and adjacent edges from graph\n else:\n print(\"Warning: No endnode removed from graph\")\n return G", "title": "" }, { "docid": "2ae5c63c0041e6b18ada696b52d66e2d", "score": "0.5494185", "text": "def graph(g):\r\n return g.adjacencyList() #+ '\\n' + '\\n' + str(g.adjacencyMatrix())\r", "title": "" }, { "docid": "e0a469d7147853bffac195a30d4c8c9e", "score": "0.5470044", "text": "def _convert_to_nx(self):\n nx_graph = nx.DiGraph()\n for edge in self.edges():\n edge = next(iter(edge.items()))\n nx_graph.add_edge(*edge)\n return nx_graph", "title": "" }, { "docid": "e4542787b61261560485505a9cdd7c53", "score": "0.5469895", "text": "def to_graph(self):\r\n pass", "title": "" }, { "docid": "6db86a33f060519be686e028b4f349c1", "score": "0.5468705", "text": "def to_graph(weighted_edges):\n G = nx.Graph()\n G.add_weighted_edges_from(weighted_edges)\n return G", "title": "" }, { "docid": "484f9dfbed2004d397ced784353a9748", "score": "0.5425727", "text": "def mol2graph_data(mol)->tuple:\n atoms = mol.GetAtoms()\n bonds = mol.GetBonds()\n\n node_feats = [atom_features(atom) for atom in atoms]\n\n edge_ixs, adj = get_bond_pair(mol)\n\n edge_feats = [bond_features(bond) for bond in bonds]\n\n return np.stack(node_feats), np.stack(edge_ixs), np.stack(edge_feats)#, adj", "title": "" }, { "docid": "1c59a6014bca97b97b991f9c60b0d0d6", "score": "0.5416383", "text": "def networkx_to_data(G, node_feature_dim=0):\n n = G.number_of_nodes()\n x_shape = (n, node_feature_dim)\n\n edge_index = list()\n for edge in G.edges:\n edge_index.append(list(edge))\n\n edge_attr = list()\n for edge in edge_index:\n edge_feature = list()\n for key in G[edge[0]][edge[1]].keys():\n edge_feature.append(G[edge[0]][edge[1]][key])\n edge_attr.append(edge_feature)\n\n x = torch.ones(x_shape, dtype=torch.float)\n edge_index = torch.tensor(edge_index, dtype=torch.long)\n edge_index = edge_index.t().contiguous()\n edge_attr = torch.tensor(edge_attr, dtype=torch.float)\n return Data(x=x, edge_index=edge_index, edge_attr=edge_attr)", "title": "" }, { "docid": "1ee20a2d87fd1451e819858d954f8743", "score": "0.54147404", "text": "def create_all_adj_matrix((data_file, graph_file), output_file):\n\n d = pickle.load(open(data_file, 'r'))\n tfdf = d['tfdf']\n wiredf = d['wiredf']\n \n gf = pickle.load(open(graph_file, 'r'))\n g = gf['graph']\n\n canonical_node_ordering = tfdf.index\n N = len(canonical_node_ordering)\n adj_mat = np.zeros((N, N), dtype = [('link', np.uint8), \n ('distance', np.float32)])\n \n print \"now walk\"\n # create graph\n for n1_i, (n1, n1_data) in enumerate(tfdf.iterrows()):\n x1 = n1_data['x']\n y1 = n1_data['y']\n print n1_i\n for n2_i, (n2, row_data) in enumerate(tfdf.iterrows()):\n if g.has_edge(n1, n2):\n adj_mat[n1_i, n2_i]['link'] =True\n x2 = row_data['x']\n y2 = row_data['y']\n d = np.sqrt((x2-x1)**2 + (y2-y1)**2)\n adj_mat[n1_i, n2_i]['distance'] = d\n pickle.dump({'adj_mat' : adj_mat}, \n open(output_file, 'w'))", "title": "" }, { "docid": "3a5b0e2424746c4579dede1dafc56762", "score": "0.5414644", "text": "def _scipy_to_igraph(matrix, coords, directed=False):\n\n matrix = csr_matrix(matrix)\n sources, targets = matrix.nonzero()\n weights = matrix[sources, targets].tolist()[0]\n\n x = coords[:, 0]\n y = coords[:, 1]\n z = coords[:, 2]\n if igraph_available:\n g = igraph.Graph(list(zip(sources, targets)),\n n=matrix.shape[0], directed=directed,\n edge_attrs={'weight': weights},\n vertex_attrs={'x': x, 'y': y, 'z': z})\n return g\n else:\n raise HalotoolsError(no_igraph_msg)", "title": "" }, { "docid": "c008c9aa7230a227a34599b3960ae714", "score": "0.54108155", "text": "def gen_connected_graph(self):\n\n while True:\n self.adj_mat = np.eye(self.n)\n edges = np.random.choice(self.max_edges, self.m)\n for i, j in self.map[edges]:\n self.adj_mat[i, j] = 1\n if self.is_connected():\n break\n\n return np.reshape(self.adj_mat, -1)", "title": "" }, { "docid": "b722f491b92cb7eb4dbb860c27827e78", "score": "0.5410762", "text": "def nx_to_adj(g):\n node_order = np.arange(len(g.nodes))\n adj = nx.to_numpy_array(g, dtype=np.int32,\n weight='color', nodelist = node_order)\n color = np.array([g.nodes[n]['color'] for n in node_order], dtype=np.int32)\n return adj, color", "title": "" }, { "docid": "39b6c9f28805fc1c0c7fd9966e60f4a3", "score": "0.538456", "text": "def to_networkx_graph(net, size=None, labels='indices', **kwargs):\n if net.__class__.__name__ == 'ECA':\n if size is None:\n msg = \"`size` required to convert an ECA to a networkx network\"\n raise AttributeError(msg)\n else:\n return net.to_networkx_graph(size)\n\n elif net.__class__.__name__ in ['WTNetwork', 'LogicNetwork']:\n return net.to_networkx_graph(labels=labels)", "title": "" }, { "docid": "4ac9ca62848aa5cb8f5350ec5a4fb971", "score": "0.5383456", "text": "def getGraphNodes(self):\n self._compute_shortest_paths()\n curr_node = [self.node_str(n,state.location.viewpointId) for n,state in enumerate(self.getState())]\n num_nodes = nx.number_of_nodes(self.G)\n features = torch.empty(num_nodes, 7, device=self.args.device)\n i = 0\n shortest_paths = torch.zeros(num_nodes, self.args.max_steps*2, 3, device=self.args.device)\n for node in sorted(self.G.nodes(data=True)):\n n = int(node[0].split('_')[0])\n start_pos = self.G.node[self.start_node[n]]['position']\n features[i,0] = n\n features[i,1:4] = node[1]['position'] - start_pos # relative xyz (relative to start location)\n features[i,4] = self.distances[self.start_node[n]][node[0]] # dist from start\n\n path = self.paths[self.start_node[n]][node[0]] # path from start to node\n shortest_paths[i,:len(path),:] = torch.cat([self.G.node[p]['position'].unsqueeze(0) for p in path], dim=0)\n\n features[i,5] = self.distances[curr_node[n]][node[0]] # dist from curr\n features[i,6] = 1 if 'visited' in node[1] else 0\n i = i+1\n # Calculate adjacency matrix\n adj = nx.adjacency_matrix(self.G,nodelist=sorted(self.G.nodes()))\n adj[adj>0]=1\n adj = sp.eye(adj.shape[0])\n adj = self._sparse_mx_to_torch_sparse_tensor(adj).to(self.args.device)\n return features,adj,shortest_paths", "title": "" }, { "docid": "0b19d2b43397cb1744bc6f7e49095ea6", "score": "0.5382405", "text": "def _get_graph_adj_lists(device, world, global_entity_id, global_node=False):\n entity_mapping = {}\n for i, entity in enumerate(world.db_context.knowledge_graph.entities):\n entity_mapping[entity] = i\n entity_mapping['_global_'] = global_entity_id\n adj_list_own = [] # column--table (bi-direction)\n adj_list_link = [] # table->table / foreign->primary\n adj_list_linked = [] # table<-table / foreign<-primary\n adj_list_global = [] # node->global\n\n # TODO: Prepare in advance?\n for key, neighbors in world.db_context.knowledge_graph.neighbors.items():\n idx_source = entity_mapping[key]\n for n_key in neighbors:\n idx_target = entity_mapping[n_key]\n if n_key.startswith(\"table\") or key.startswith(\"table\"):\n adj_list_own.append((idx_source, idx_target))\n elif n_key.startswith(\"string\") or key.startswith(\"string\"):\n adj_list_own.append((idx_source, idx_target))\n elif key.startswith(\"column:foreign\"):\n adj_list_link.append((idx_source, idx_target))\n src_table_key = f\"table:{key.split(':')[2]}\"\n tgt_table_key = f\"table:{n_key.split(':')[2]}\"\n idx_source_table = entity_mapping[src_table_key]\n idx_target_table = entity_mapping[tgt_table_key]\n adj_list_link.append((idx_source_table, idx_target_table))\n elif n_key.startswith(\"column:foreign\"):\n adj_list_linked.append((idx_source, idx_target))\n src_table_key = f\"table:{key.split(':')[2]}\"\n tgt_table_key = f\"table:{n_key.split(':')[2]}\"\n idx_source_table = entity_mapping[src_table_key]\n idx_target_table = entity_mapping[tgt_table_key]\n adj_list_linked.append((idx_source_table, idx_target_table))\n else:\n assert False\n\n adj_list_global.append((idx_source, entity_mapping['_global_']))\n\n all_adj_types = [adj_list_own, adj_list_link, adj_list_linked]\n\n if global_node:\n all_adj_types.append(adj_list_global)\n\n return [torch.tensor(l, device=device, dtype=torch.long).transpose(0, 1) if l\n else torch.tensor(l, device=device, dtype=torch.long)\n for l in all_adj_types]", "title": "" }, { "docid": "cf13b8c9748548e87f32e223f0267288", "score": "0.5380778", "text": "def forward(self, graph):\n # graph = graph.to_dgl()\n # node_feats = graph.node_features['node_feat']\n\n node_feats = graph.ndata['node_feat']\n\n if self.direction_option == 'uni':\n node_embs = self.models(graph, node_feats)\n else:\n assert node_feats.shape[1] == self.input_size\n\n zero_pad = node_feats.new_zeros((node_feats.shape[0], self.output_size - node_feats.shape[1]))\n node_feats = torch.cat([node_feats, zero_pad], -1)\n\n feat_in = node_feats\n feat_out = node_feats\n\n for i in range(self.num_layers):\n h = self.models(graph, (feat_in, feat_out))\n feat_in = h[0]\n feat_out = h[1]\n\n if self.direction_option == 'bi_sep':\n node_embs = torch.cat([feat_in, feat_out], dim=-1)\n elif self.direction_option == 'bi_fuse':\n node_embs = feat_in\n else:\n raise RuntimeError('Unknown `bidirection` value: {}'.format(self.direction_option))\n\n # graph.node_features['node_emb'] = node_embs\n graph.ndata['node_emb'] = node_embs\n\n return graph", "title": "" }, { "docid": "af5b7dfedad4ebffaaf3dde850e1e91d", "score": "0.5360096", "text": "def show_graph_with_labels(adjacency_matrix):\n gr = nx.from_numpy_matrix(np.matrix(adjacency_matrix))\n nx.draw(gr)\n plt.show()", "title": "" }, { "docid": "7369492e8e00977007a6b7e728e88caf", "score": "0.5357308", "text": "def nx_graph():\n adj_list = TextCorpus().global_adj_list_for_nx\n print adj_list\n G = nx.Graph()\n G.add_edges_from(adj_list)\n nx.draw(G)\n img_fn = \"graph-{0}\".format(strftime('%Y-%m-%d %H:%M:%S.png', gmtime()))\n plt.savefig(img_fn)\n plt.show()", "title": "" }, { "docid": "f37bdd0d63f3e005afed55ec6a8c6306", "score": "0.53330815", "text": "def BuildGraphData(self, nodes, edges):\n\n graph = nx.Graph()\n for key in nodes:\n graph.add_node(key, weight=nodes[key])\n\n for key in edges:\n edge_values = tuple(key.split(\"----\"))\n graph.add_edge(edge_values[0], edge_values[1], weight=edges[key])\n\n return graph", "title": "" }, { "docid": "11d209dcc5d134ea6103a6c371d3ba2e", "score": "0.5330367", "text": "def make_graph_abcde(node):\n d = node('D')\n e = node('E')\n c = node('C', cd=d)\n b = node('B', bc=c)\n a = node('A', ab=b, ac=c, ae=e)\n return a, b, c, d, e", "title": "" }, { "docid": "8afd5a96ba9a5bc119c13ff9def8d430", "score": "0.5308444", "text": "def _map_to_graph(self):\n\t\tid_node_counter = 0\n\t\tconnections = {}\n\t\t# targets = [agent.target for agent in self.env.agents]\n\n\t\t# Identify cells hat are nodes (switches or diamond crossings)\n\t\tfor i in range(self.env.height):\n\t\t\tfor j in range(self.env.width):\n\n\t\t\t\tis_switch = False\n\t\t\t\tis_crossing = False\n\t\t\t\t# is_target = False\n\t\t\t\tconnections_matrix = np.zeros((4, 4)) # Matrix NESW x NESW\n\n\t\t\t\t# Check if diamond crossing\n\t\t\t\ttransitions_bit = bin(self.env.rail.get_full_transitions(i, j))\n\t\t\t\tif int(transitions_bit, 2) == int('1000010000100001', 2):\n\t\t\t\t\tis_crossing = True\n\t\t\t\t\tconnections_matrix[0, 2] = connections_matrix[2, 0] = 1\n\t\t\t\t\tconnections_matrix[1, 3] = connections_matrix[3, 1] = 1\n\n\t\t\t\telse:\n\t\t\t\t\t# Check if target\n\t\t\t\t\t# if (i, j) in targets:\n\t\t\t\t\t#\tis_target = True\n\t\t\t\t\t# Check if switch\n\t\t\t\t\tfor direction in (0, 1, 2, 3): # 0:N, 1:E, 2:S, 3:W\n\t\t\t\t\t\tpossible_transitions = self.env.rail.get_transitions(i, j, direction)\n\t\t\t\t\t\tfor t in range(4): # Check groups of bits\n\t\t\t\t\t\t\tif possible_transitions[t]:\n\t\t\t\t\t\t\t\tinv_direction = (direction + 2) % 4\n\t\t\t\t\t\t\t\tconnections_matrix[inv_direction, t] = connections_matrix[t, inv_direction] = 1\n\t\t\t\t\t\tnum_transitions = np.count_nonzero(possible_transitions)\n\t\t\t\t\t\tif num_transitions > 1:\n\t\t\t\t\t\t\tis_switch = True\n\n\t\t\t\tif is_switch or is_crossing: #or is_target:\n\t\t\t\t\t# Add node - keep info on cell position\n\t\t\t\t\t# Update only for nodes that are switches\n\t\t\t\t\tconnections.update({id_node_counter: connections_matrix})\n\t\t\t\t\tself.id_node_to_cell.update({id_node_counter: (i, j)})\n\t\t\t\t\tself.cell_to_id_node.update({(i, j): id_node_counter})\n\t\t\t\t\tid_node_counter += 1\n\n\t\t# Enumerate edges from these nodes\n\t\tid_edge_counter = 0\n\t\t# Start from connections of one node and follow path until next switch is found\n\t\tnodes = connections.keys() # ids\n\t\tvisited = set() # Keeps set of CardinalNodes that were already visited\n\t\tfor n in nodes:\n\t\t\tfor cp in range(4): # Check edges from the 4 cardinal points\n\t\t\t\tif np.count_nonzero(connections[n][cp, :]) > 0:\n\t\t\t\t\tvisited.add(CardinalNode(n, cp)) # Add to visited\n\t\t\t\t\tcells_sequence = []\n\t\t\t\t\tnode_found = False\n\t\t\t\t\tedge_length = 0\n\t\t\t\t\t# Keep going until another node is found\n\t\t\t\t\tdirection = cp\n\t\t\t\t\tpos = self.id_node_to_cell[n]\n\t\t\t\t\twhile not node_found:\n\t\t\t\t\t\tneighbour_pos = get_new_position(pos, direction)\n\t\t\t\t\t\tcells_sequence.append((neighbour_pos, direction))\n\t\t\t\t\t\tif neighbour_pos in self.cell_to_id_node: # If neighbour is a node\n\t\t\t\t\t\t\t# node_found = True\n\t\t\t\t\t\t\t# Build edge, mark visited\n\t\t\t\t\t\t\tid_node1 = n\n\t\t\t\t\t\t\tcp1 = cp\n\t\t\t\t\t\t\tid_node2 = self.cell_to_id_node[neighbour_pos]\n\t\t\t\t\t\t\tcp2 = self._reverse_dir(direction)\n\t\t\t\t\t\t\tif CardinalNode(id_node2, cp2) not in visited:\n\t\t\t\t\t\t\t\tself.info.update({id_edge_counter:\n\t\t\t\t\t\t\t\t\t (CardinalNode(id_node1, cp1),\n\t\t\t\t\t\t\t\t\t CardinalNode(id_node2, cp2),\n\t\t\t\t\t\t\t\t\t edge_length)})\n\t\t\t\t\t\t\t\tcells_sequence.pop() # Don't include this node in the edge\n\t\t\t\t\t\t\t\tself.id_edge_to_cells.update({id_edge_counter: cells_sequence})\n\t\t\t\t\t\t\t\tid_edge_counter += 1\n\t\t\t\t\t\t\t\tvisited.add(CardinalNode(id_node2, cp2))\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tedge_length += 1 # Not considering switches in the count\n\t\t\t\t\t\t# Update pos and dir\n\t\t\t\t\t\tpos = neighbour_pos\n\t\t\t\t\t\texit_dir = self._reverse_dir(direction)\n\t\t\t\t\t\tpossible_transitions = np.array(self.env.rail.get_transitions(pos[0], pos[1], direction))\n\t\t\t\t\t\tpossible_transitions[exit_dir] = 0 # Don't consider direction from which I entered\n\t\t\t\t\t\t# t = 2\n\t\t\t\t\t\tt = np.argmax(possible_transitions) # There's only one possible transition except the one that I took to get in\n\t\t\t\t\t\ttemp_pos = get_new_position(pos, t)\n\t\t\t\t\t\tif 0 <= temp_pos[0] < self.env.height and 0 <= temp_pos[1] < self.env.width: # Patch - check if this cell is a rail\n\t\t\t\t\t\t\t# Entrance dir is always opposite to exit dir\n\t\t\t\t\t\t\tdirection = t\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\n\t\tself.nodes = nodes # Set of nodes\n\t\tself.edges = self.info.keys() # Set of edges\n\t\tself.num_rails = len(self.edges)", "title": "" }, { "docid": "66311bafeea31f290dba814cc89893ce", "score": "0.5304772", "text": "def transformToGraph(C,shortcuts):\n G = networkx.DiGraph()\n G.add_edges_from([(i,i+1) for i in range(len(C)-1)])\n G.add_edges_from([(C.index(s[0]), C.index(s[1])) for s in shortcuts])\n\n return G", "title": "" }, { "docid": "7c80500e5f1b5849b218e415442e8c91", "score": "0.52968556", "text": "def df_graph():\n g = Graph()\n g.graph = {\n 'A': {'B': 1, 'D': 2},\n 'B': {'C': 2, 'D': 4},\n 'C': {'G': 3},\n 'D': {'E': 2, 'F': 4, 'H': 6},\n 'E': {'D': 2},\n 'F': {'H': 3, 'F': 6},\n 'G': {'C': 2},\n 'H': {'D': 5, 'F': 10},\n }\n return g", "title": "" }, { "docid": "7bd981cb0bbd4082299b43312d5a2818", "score": "0.5288867", "text": "def load_graph(dataset_str):\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = []\n for i in range(len(names)):\n with open(\"datasets/{}/ind.{}.{}\".format(dataset_str, dataset_str, names[i]), 'rb') as f:\n if sys.version_info > (3, 0):\n objects.append(pk.load(f, encoding='latin1'))\n else:\n objects.append(pk.load(f))\n\n x, y, tx, ty, allx, ally, graph = tuple(objects)\n test_idx_reorder = parse_index_file(\"datasets/{}/ind.{}.test.index\".format(dataset_str, dataset_str))\n test_idx_range = np.sort(test_idx_reorder)\n\n if dataset_str == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph)\n # Find isolated nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range-min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range-min(test_idx_range), :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n adj = normalize(adj + sp.eye(adj.shape[0]))\n features = normalize(features)\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n labels = np.where(labels)[1]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(len(y))\n idx_val = range(len(y), len(y)+500)\n \n graph = Graph(edge_mat=adj, node_features=features.todense(), node_tags=labels)\n\n return graph, idx_train, idx_val, idx_test", "title": "" }, { "docid": "1ddca1f99f9a25702fd6d3fd03993bd1", "score": "0.52802914", "text": "def graph_to_gdfs(G, nodes=True, edges=True, node_geometry=True, fill_edge_geometry=True):\n\n if not (nodes or edges):\n raise ValueError('You must request nodes or edges, or both.')\n\n to_return = []\n\n if nodes:\n\n start_time = time.time()\n\n nodes = {node:data for node, data in G.nodes(data=True)}\n gdf_nodes = gpd.GeoDataFrame(nodes).T\n if node_geometry:\n gdf_nodes['geometry'] = gdf_nodes.apply(lambda row: Point(row['x'], row['y']), axis=1)\n gdf_nodes.crs = G.graph['crs']\n gdf_nodes.gdf_name = '{}_nodes'.format(G.graph['name'])\n\n to_return.append(gdf_nodes)\n \n if edges:\n\n start_time = time.time()\n\n # create a list to hold our edges, then loop through each edge in the\n # graph\n edges = []\n for u, v, data in G.edges(data=True):\n # for each edge, add key and all attributes in data dict to the\n # edge_details\n edge_details = {'u':u, 'v':v}\n for attr_key in data:\n edge_details[attr_key] = data[attr_key]\n\n # if edge doesn't already have a geometry attribute, create one now\n # if fill_edge_geometry==True\n if 'geometry' not in data:\n if fill_edge_geometry:\n point_u = Point((G.nodes[u]['x'], G.nodes[u]['y']))\n point_v = Point((G.nodes[v]['x'], G.nodes[v]['y']))\n edge_details['geometry'] = LineString([point_u, point_v])\n else:\n edge_details['geometry'] = np.nan\n\n edges.append(edge_details)\n\n # create a GeoDataFrame from the list of edges and set the CRS\n gdf_edges = gpd.GeoDataFrame(edges)\n gdf_edges.crs = G.graph['crs']\n gdf_edges.gdf_name = '{}_edges'.format(G.graph['name'])\n\n to_return.append(gdf_edges)\n \n if len(to_return) > 1:\n return tuple(to_return)\n else:\n return to_return[0]", "title": "" }, { "docid": "a57a2580f6a4e616a7de62a028fadbab", "score": "0.5275201", "text": "def graph_etl_model(self, model, data):\r\n self.save_model_to_map(model)\r\n etl_source = model['Name']\r\n node_index = {}\r\n graph = {\"nodes\": [], \"lines\": [], \"n_index\": []}\r\n # Ensure the data received is changed into a DataFrame if it is not already\r\n if str(type(data)) != \"<class 'pandas.core.frame.DataFrame'>\":\r\n file = self.file_to_frame(data)\r\n if str(type(file[\"data\"])) != \"<class 'pandas.core.frame.DataFrame'>\":\r\n return file\r\n else:\r\n data = file[\"data\"]\r\n\r\n def get_key(**kwargs):\r\n \"\"\"\r\n Handles node creation based on the local node_index and the local create_node function.\r\n The node expects an icon and class_name (EntityType)\r\n expects an Icon with a key but if there is none it will create it\r\n :param kwargs:\r\n :return:\r\n \"\"\"\r\n # Check if it has been created based on attributes and return the corresponding key or create a new node\r\n h_key = self.hash_node(kwargs)\r\n if h_key in node_index.keys():\r\n return node_index[h_key]\r\n else:\r\n try:\r\n new_node = self.create_node(**kwargs)[\"data\"]\r\n node_index[h_key] = new_node[\"key\"]\r\n graph[\"nodes\"].append(new_node)\r\n return new_node[\"key\"]\r\n except Exception as e:\r\n print(str(e))\r\n return None\r\n\r\n for index, row in data.iterrows():\r\n\r\n if index != 0:\r\n # Based on the entities in the model, get IDs that can be used to create relationships\r\n rowConfig = {}\r\n badRow = False\r\n for entity in model[\"Entities\"]:\r\n # The extracted entity is based on the model and mapped row value to entity attributes\r\n # If the class_name is not in the models then it should be created as a Category of an Object class\r\n if \"className\" in model[\"Entities\"][entity].keys():\r\n extractedEntity = {\"class_name\": model[\"Entities\"][entity][\"className\"], \"source\": etl_source}\r\n elif entity in self.models.keys():\r\n extractedEntity = {\"class_name\": entity, \"source\": etl_source}\r\n else:\r\n extractedEntity = {\"class_name\": \"Object\", \"entity\": entity, \"source\": etl_source}\r\n # Check if there is a description, otherwise set it up to auto create a description\r\n if \"description\" not in model[\"Entities\"][entity]:\r\n extractedEntity['description'] = \"\"\r\n autoDescribe = True\r\n else:\r\n autoDescribe = False\r\n for att in model[\"Entities\"][entity]:\r\n # If the attribute is in the row headers then it is to be mapped otherwise it is a custom value\r\n\r\n if model[\"Entities\"][entity][att] in row.keys():\r\n val = row[model[\"Entities\"][entity][att]]\r\n try:\r\n clean_val = val.to_pydatetime()\r\n except:\r\n clean_val = val\r\n\r\n extractedEntity[att] = clean_val\r\n else:\r\n extractedEntity[att] = model[\"Entities\"][entity][att]\r\n clean_val = model[\"Entities\"][entity][att]\r\n if autoDescribe:\r\n clean_val = date_to_standard_string(clean_val)\r\n extractedEntity['description'] = extractedEntity['description'] + str(clean_val) + \" \"\r\n # Check if this Entity has already been extracted and get the key.\r\n # The function also adds the entity to the graph which will be exported\r\n exEntityKey = get_key(**extractedEntity)\r\n if not exEntityKey:\r\n badRow = True\r\n if exEntityKey in graph[\"n_index\"]:\r\n graph[\"n_index\"].append(exEntityKey)\r\n # Add the entity key to its spot within the mapping configuration so the lines can be built\r\n rowConfig[entity] = exEntityKey\r\n if not badRow:\r\n # Use the entity names that are saved into the relation to and from to assign the row config entity key\r\n for line in model[\"Relations\"]:\r\n if({\"to\": rowConfig[model[\"Relations\"][line][\"to\"]], \"from\": rowConfig[model[\"Relations\"][line][\"from\"]], \"description\": line }) not in graph[\"lines\"]:\r\n graph[\"lines\"].append({\r\n \"to\": rowConfig[model[\"Relations\"][line][\"to\"]],\r\n \"from\": rowConfig[model[\"Relations\"][line][\"from\"]],\r\n \"description\": line,\r\n })\r\n self.create_edge_new(\r\n fromNode=rowConfig[model[\"Relations\"][line][\"from\"]],\r\n toNode=rowConfig[model[\"Relations\"][line][\"to\"]],\r\n edgeType=line\r\n )\r\n\r\n return graph", "title": "" }, { "docid": "02e4552f90e8e6d84c7546c33d03aa8d", "score": "0.52716106", "text": "def build_graph(self, fn_node, fn_edge):\n node = load_data(fn_node)\n edge = load_data(fn_edge)\n assert len(node) > 1, '#node of {}: {}'.format(fn_node, len(node))\n # take majority as label of the graph\n lb2cnt = {}\n for idx in node:\n if idx not in self.dataset.idx2lb:\n continue\n lb = self.dataset.idx2lb[idx]\n if lb not in lb2cnt:\n lb2cnt[lb] = 0\n lb2cnt[lb] += 1\n gt_lb, _ = get_majority(lb2cnt)\n gt_node = self.dataset.lb2idxs[gt_lb]\n iou = compute_iou(node, gt_node)\n # compute adj\n node = list(node)\n abs2rel = {}\n for i, n in enumerate(node):\n abs2rel[n] = i\n size = len(node)\n adj = np.eye(size)\n for e in edge:\n if len(e) == 2:\n e1, e2 = e\n w = 1\n elif len(e) == 3:\n e1, e2, dist = e\n if self.dataset.wo_weight:\n w = 1\n else:\n w = 1 - dist\n else:\n raise ValueError('Unknown length of e: {}'.format(e))\n v1 = abs2rel[e1]\n v2 = abs2rel[e2]\n adj[v1][v2] = w\n adj[v2][v1] = w\n if self.dataset.featureless:\n vertices = adj.sum(axis=1, keepdims=True)\n vertices /= vertices.sum(axis=1, keepdims=True)\n else:\n vertices = self.dataset.features[node, :]\n if self.dataset.is_norm_adj:\n adj /= adj.sum(axis=1, keepdims=True)\n return vertices, adj, iou", "title": "" }, { "docid": "931979d21c8bdbdd861af9da7ce4232a", "score": "0.5269351", "text": "def create_dir_adj_matrix((data_file, graph_file), output_file):\n\n d = pickle.load(open(data_file, 'r'))\n tfdf = d['tfdf']\n wiredf = d['wiredf']\n \n gf = pickle.load(open(graph_file, 'r'))\n g = gf['graph']\n\n canonical_node_ordering = tfdf.index\n N = len(canonical_node_ordering)\n adj_mat = np.zeros((N, N), dtype = [('link', np.uint8), \n ('distance', np.float32)])\n \n print \"now walk\"\n # create graph\n for n1_i, (n1, n1_data) in enumerate(tfdf.iterrows()):\n x1 = n1_data['x']\n y1 = n1_data['y']\n print n1_i\n for n2_i, (n2, row_data) in enumerate(tfdf.iterrows()):\n if g.has_edge(n1, n2):\n adj_mat[n1_i, n2_i]['link'] =True\n x2 = row_data['x']\n y2 = row_data['y']\n d = np.sqrt((x2-x1)**2 + (y2-y1)**2)\n adj_mat[n1_i, n2_i]['distance'] = d\n pickle.dump({'adj_mat' : adj_mat}, \n open(output_file, 'w'))", "title": "" }, { "docid": "2679ceacb32b0cad8392eddc9b32df22", "score": "0.52526706", "text": "def compute_graph_nn(xyz, k_nn):\n num_ver = xyz.shape[0]\n graph = dict([(\"is_nn\", True)])\n nn = NearestNeighbors(n_neighbors=k_nn+1, algorithm='kd_tree').fit(xyz)\n distances, neighbors = nn.kneighbors(xyz)\n neighbors = neighbors[:, 1:]\n distances = distances[:, 1:]\n source = np.matlib.repmat(range(0, num_ver), k_nn, 1).flatten(order='F')\n #save the graph\n graph[\"source\"] = source.flatten().astype('uint32')\n graph[\"target\"] = neighbors.flatten().astype('uint32')\n graph[\"distances\"] = distances.flatten().astype('float32')\n return graph", "title": "" }, { "docid": "4a526ce9434a88542b9b4241af3bede8", "score": "0.5243471", "text": "def build_from(model):\n\n assert isinstance(model, dnn.Model)\n\n dg = NetDiagram(model.name)\n\n # variable -> latest upstream\n upstream_map = dict()\n\n # add layer nodes\n for layer in model.layer_entries():\n # add a new layer\n lid = layer.id\n dg.add_layer(lid, layer.typename)\n\n # add link from its dependent layers\n dep_layer_ids = []\n for vid in layer.bottom_ids():\n dep_id = upstream_map.get(vid)\n if dep_id:\n dep_layer_ids.append(dep_id)\n\n for dep_id in dep_layer_ids:\n dg.add_link(dep_id, lid)\n\n # make this layer the latest upstream of its outputs\n for out_vid in layer.top_ids():\n upstream_map[out_vid] = lid\n\n return dg", "title": "" }, { "docid": "bf683aa19b0d3d634cbc740b097d1f2a", "score": "0.52337164", "text": "def construct_graph(matrix):\n \n print(\"Constructing graph from coordinate file format...\")\n G = nx.Graph(matrix)\n print(\"Construction done.\")\n return G", "title": "" }, { "docid": "2053dfe2e39d3a01b43f976660c03523", "score": "0.5226109", "text": "def default_graph_init(connectivity, model = 'BetaBernoulli', extra_conn = None):\n T1_N = connectivity.shape[0]\n assert connectivity.shape[0] == connectivity.shape[1]\n\n latent = {'domains' : {'d1' : {'hps' : {'alpha' : 1.0},\n 'assignment' : np.arange(T1_N) % 50} \n },\n 'relations' : { 'R1' : {'hps' : {'alpha' : 1.0, \n 'beta' : 1.0}}}}\n\n data = {'domains' : {'d1' : { 'N' : T1_N}, }, \n 'relations' : { 'R1' : {'relation' : ('d1', 'd1'), \n 'model' : model, \n 'data' : connectivity}}}\n if extra_conn != None:\n for c_i, connectivity in enumerate(extra_conn):\n assert T1_N == connectivity.shape[0]\n assert connectivity.shape[0] == connectivity.shape[1]\n r_name = 'R%d' % (c_i +2)\n latent['relations'][r_name] = {'hps' : {'alpha' : 1.0, \n 'beta' : 1.0}}\n data['relations'][r_name] = {'relation' : ('d1', 'd1'), \n 'model' : model, \n 'data' : connectivity}\n \n\n return latent, data", "title": "" }, { "docid": "cc71b1d9e234e4530a306aad28c1badc", "score": "0.5225895", "text": "def graph2matrix(self) -> Matrix:\n matrix = [[None] + [v_id for v_id in self.vertices]]\n for v_id, outgoing in self.vertices.items():\n matrix.append([v_id] + [outgoing.adj.get(v) for v in self.vertices])\n return matrix if self.size else None", "title": "" }, { "docid": "1f6a861a6ca197ee4976bb9e9b327e24", "score": "0.522445", "text": "def gen_graph(self):\n\n self.adj_mat = np.eye(self.n)\n edges = np.random.choice(self.max_edges, self.m)\n for i, j in self.map[edges]:\n self.adj_mat[i, j] = 1\n return np.reshape(self.adj_mat, -1)", "title": "" }, { "docid": "f19808f454d5e729dee7a50c78adc863", "score": "0.5222895", "text": "def get_network_graph(self):\r\n\r\n # Initialise map between all nodes directly connected to each node\r\n network_graph = {n: set() for n in self.df_n.index}\r\n\r\n # Loop through AC edges, update map\r\n for index, row in self.df_e.iterrows():\r\n network_graph[row['FROM_NODE']].add(row['TO_NODE'])\r\n network_graph[row['TO_NODE']].add(row['FROM_NODE'])\r\n\r\n return network_graph", "title": "" }, { "docid": "d99612ab03f2823c320a9296f4550941", "score": "0.52217776", "text": "def create_network(edge_list, node_list):\n G = nx.MultiDiGraph()\n\n edge_attr = ['lenght_m', 'diameter_mm', 'heat_transfer_coefficient_W/mK', 'roughness_mm']\n G = nx.from_pandas_edgelist(edge_list, 'from_node', 'to_node', edge_attr=edge_attr, create_using=G)\n\n for node in G.nodes:\n G.add_node(node, lon=node_list.loc[int(node)]['lon'],\n lat=node_list.loc[int(node)]['lat'],\n node_type=node_list.loc[int(node)]['node_type'])\n\n return G", "title": "" }, { "docid": "3f1248ab0099e6208b8ea4425d80fc47", "score": "0.520385", "text": "def build_graph(node_count, edge_count, edges):\n adjacencyMatrix = np.zeros((node_count, node_count))\n for edge in edges:\n adjacencyMatrix[edge[0], edge[1]] = 1\n adjacencyMatrix[edge[1], edge[0]] = 1\n return adjacencyMatrix", "title": "" }, { "docid": "3aa307b2d72c173aebf4d2dbfcbdcbb2", "score": "0.51980364", "text": "def get_graph(json_data: str) -> ig.Graph:\n vertices = json_data['nodes']\n edges = json_data['edges']\n ncol = []\n for edge in edges:\n ncol.append((edge[0],edge[1],float(edge[2])))\n\n g = ig.Graph.TupleList(ncol,edge_attrs=\"weights\")\n return g", "title": "" }, { "docid": "ac0bfceaeddc66a67e9ec8c6444ab072", "score": "0.5196855", "text": "def pgframe_to_neo4j(pgframe=None, uri=None, username=None, password=None,\n driver=None, node_label=None, edge_label=None, directed=True,\n node_types_as_labels=False,\n edge_types_as_labels=False,\n batch_size=10000):\n if node_label is None:\n if node_types_as_labels is False or not pgframe.has_node_types():\n raise BlueGraphException(\n \"Cannot create a Neo4j graph without node labels: \"\n \"node label is not provided \"\n \"and 'node_types_as_labels' is either set to False \"\n \"or the nodes do not have types\")\n\n if edge_label is None:\n if edge_types_as_labels is False or not pgframe.has_edge_types():\n raise BlueGraphException(\n \"Cannot create a Neo4j graph without edge labels: \"\n \"edge label is not provided \"\n \"and 'edge_types_as_labels' is either set to False \"\n \"or the edges do not have types\")\n else:\n if edge_types_as_labels is True and pgframe.has_edge_types():\n warnings.warn(\n \"Edge types are used as Neo4j relationship types, \"\n \"provided edge label will be ignored\",\n BlueGraphWarning)\n\n driver = generate_neo4j_driver(uri, username, password, driver)\n\n if pgframe is None:\n return Neo4jGraphView(\n driver, node_label, edge_label, directed=directed)\n\n # Create nodes\n\n # Split nodes into batches\n batches = np.array_split(\n pgframe._nodes.index,\n math.ceil(pgframe.number_of_nodes() / batch_size))\n # Run node creation queries for different batches\n for batch in batches:\n node_batch = pgframe._nodes.loc[batch]\n node_repr = []\n for index, properties in node_batch.to_dict(\"index\").items():\n node_id = safe_node_id(index)\n node_dict = [\n \"id: '{}'\".format(node_id)\n ]\n node_dict += _generate_property_repr(\n properties, pgframe._node_prop_types)\n node_repr.append(\"{\" + \", \".join(node_dict) + \"}\")\n\n node_label_repr = f\":{node_label}\" if node_label else \"\"\n\n query = (\n f\"\"\"\n WITH [{\", \".join(node_repr)}] AS batch\n UNWIND batch as individual\n CREATE (n{node_label_repr})\n SET n += individual\n \"\"\")\n execute(driver, query)\n\n # Add node types to the Neo4j node labels\n if node_types_as_labels:\n with driver.session() as session:\n for index, properties in pgframe._nodes.to_dict(\"index\").items():\n labels = labels_from_types(properties)\n if len(labels) > 0:\n result = session.run(\n \"MATCH (n {{id: '{}'}})\\n\".format(safe_node_id(index)) +\n \"SET n:{}\".format(\":\".join(labels))\n )\n\n # Create edges\n custom_rel_types = edge_types_as_labels and pgframe.has_edge_types()\n if custom_rel_types:\n edge_labels = pgframe.edge_types(flatten=True)\n else:\n edge_labels = [edge_label]\n\n for edge_label in edge_labels:\n # Select edges of a given type, if applicable\n edges = pgframe.edges(\n raw_frame=True,\n typed_by=edge_label if custom_rel_types else None)\n\n # Split edges into batches\n batches = np.array_split(\n edges.index, math.ceil(edges.index.shape[0] / batch_size))\n for batch in batches:\n edge_batch = edges.loc[batch]\n edge_repr = []\n for (s, t), properties in edge_batch.to_dict(\"index\").items():\n edge_dict = [\n \"source: '{}'\".format(safe_node_id(s)),\n \"target: '{}'\".format(safe_node_id(t))\n ]\n edge_props = []\n for k, v in properties.items():\n if k != \"@type\":\n quote = \"'\"\n if pgframe._edge_prop_types[k] == \"numeric\":\n quote = \"\"\n edge_props.append(\n f\"{k}: {quote}{preprocess_value(v)}{quote}\")\n edge_dict.append(\"props: {{{}}}\".format(\n ', '.join(_generate_property_repr(\n properties, pgframe._edge_prop_types))))\n edge_repr.append(\"{\" + \", \".join(edge_dict) + \"}\")\n\n query = (\n f\"\"\"\n WITH [{\", \".join(edge_repr)}] AS batch\n UNWIND batch as individual\n MATCH (n {{id: individual[\"source\"]}})\n WITH individual, n\n OPTIONAL MATCH (m {{id: individual[\"target\"]}})\n FOREACH (dummy in CASE WHEN m IS NULL THEN [] ELSE [1] END |\n CREATE (n)-[r:{edge_label}]->(m)\n SET r += individual[\"props\"]\n )\n \"\"\")\n execute(driver, query)\n\n return Neo4jGraphView(driver, node_label, edge_label, directed=directed)", "title": "" }, { "docid": "2114d062b51071403d87b41c77ebce92", "score": "0.51967597", "text": "def encode(self, ego_graph_tensor):\n\n pass", "title": "" }, { "docid": "179222ebbc014350b0720bd244029dbe", "score": "0.5195671", "text": "def import_graph(hl_graph, tf_graph, output=None, verbose=False):\n # Get clean(er) list of nodes\n graph_def = tf_graph.as_graph_def(add_shapes=True)\n graph_def = tf.graph_util.remove_training_nodes(graph_def)\n\n # Dump list of TF nodes (DEBUG only)\n if verbose:\n dump_tf_graph(tf_graph, graph_def)\n\n # Loop through nodes and build the matching directed graph\n for tf_node in graph_def.node:\n # Read node details\n try:\n op, uid, name, shape, params = import_node(tf_node, tf_graph, verbose)\n except:\n if verbose:\n logging.exception(\"Failed to read node {}\".format(tf_node))\n continue\n\n # Add node\n hl_node = Node(uid=uid, name=name, op=op, output_shape=shape, params=params)\n hl_graph.add_node(hl_node)\n\n # Add edges\n for target_node in graph_def.node:\n target_inputs = target_node.input\n if uid in target_node.input:\n hl_graph.add_edge_by_id(uid, target_node.name, shape)\n return hl_graph", "title": "" }, { "docid": "0884c2eae4f308fc4944bb6530a3d853", "score": "0.51946765", "text": "def make_double_graph(AllLinks):\n MyGraph = nx.Graph(name=\"word-graph\")\n for Link in AllLinks:\n Node1 = Link[0]\n Node2 = Link[1]\n Weight = Link[2]\n #print(Node1, Node2, Weight)\n MyGraph.add_node(Node1, label=Node1)\n MyGraph.add_edge(Node1, Node2, weight=Weight)\n nx.write_gexf(MyGraph,\"mygraph_contemps.gexf\")\n return MyGraph", "title": "" }, { "docid": "1e4744159f1e1f611a69565c9dd280cf", "score": "0.51856625", "text": "def convert_fully_connected(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n initializer = kwargs[\"initializer\"]\n\n no_bias = get_boolean_attribute_value(attrs, \"no_bias\")\n\n fcnode = []\n\n op_name = \"flatten_\" + str(kwargs[\"idx\"])\n flatten_node = onnx.helper.make_node(\n 'Flatten',\n inputs=[input_nodes[0]],\n outputs=[op_name],\n name=op_name\n )\n\n input_nodes[0] = op_name\n fcnode.append(flatten_node)\n\n if no_bias:\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))]\n bias_name = \"bias\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(bias_name, data_type, (1,))\n initializer.append(\n onnx.helper.make_tensor(\n name=bias_name,\n data_type=data_type,\n dims=(1,),\n vals=[0],\n raw=False,\n )\n )\n input_nodes.append(bias_name)\n fcnode.append(tensor_node)\n\n node = onnx.helper.make_node(\n \"Gemm\",\n input_nodes, # input (A, B, C) - C can be in place\n [name], # output\n alpha=1.0,\n beta=1.0,\n transA=False,\n transB=True,\n name=name\n )\n\n fcnode.append(node)\n\n return fcnode", "title": "" }, { "docid": "f78134e86fd5dbfa6d05c382841a40a8", "score": "0.5184624", "text": "def to_torch_graph(graphs, task):\n if task == 'graph':\n return [torch.tensor(g) for g in graphs]\n else:\n return torch.tensor(graphs)", "title": "" }, { "docid": "bf6c1ab86830928ad97ead4741433c3e", "score": "0.5184423", "text": "def _lg_directed(G, create_using=None):\n L = nx.empty_graph(0, create_using, default=G.__class__)\n\n # Create a graph specific edge function.\n get_edges = _edge_func(G)\n\n for from_node in get_edges():\n # from_node is: (u,v) or (u,v,key)\n L.add_node(from_node)\n for to_node in get_edges(from_node[1]):\n L.add_edge(from_node, to_node)\n\n return L", "title": "" }, { "docid": "12456953791f7aa28b2d54e748aefe5d", "score": "0.5180125", "text": "def convert_graph_def(\n graph_def, pass_pipeline='tf-standard-pipeline', show_debug_info=False\n):\n return pywrap_mlir.import_graphdef(graph_def, pass_pipeline, show_debug_info)", "title": "" }, { "docid": "b6bdc317911028966c4ee3caeb473d0f", "score": "0.5156328", "text": "def triu_to_3d_dense(triu_values, num_nodes, depth=len(SUPPORTED_EDGES)):\n # Create placeholder for 3d matrix\n adj_matrix_3d = torch.empty((num_nodes, num_nodes, depth), dtype=torch.float, device=device)\n for edge_type in range(len(SUPPORTED_EDGES)):\n adj_mat_edge_type = triu_to_dense(triu_values[:, edge_type].float(), num_nodes)\n adj_matrix_3d[:, :, edge_type] = adj_mat_edge_type\n return adj_matrix_3d", "title": "" }, { "docid": "e99871c745a38dd484988596ebef87a5", "score": "0.5155884", "text": "def __edges_to_rdf__(e, graph, NODES):\n for edge in e.edges:\n graph.add((NODES[edge[0]], EDS[edge[1].lower()], NODES[edge[2]]))", "title": "" }, { "docid": "6ca70fddc7da6dc64d0f0d581d2d9067", "score": "0.5152426", "text": "def read_graph():\n return nx.read_edgelist('edges.txt.gz', delimiter='\\t')", "title": "" }, { "docid": "4c1a25f17f7473dfd1f4186d00fc5aa9", "score": "0.51506066", "text": "def learn_graph(self, labels=None):\n graph = self._method.predict(self._data)\n\n # Get adjacency matrix\n self._adjacency_matrix = nx.to_numpy_matrix(graph)\n self._adjacency_matrix = np.asarray(self._adjacency_matrix)\n\n # If labels not provided\n if labels is not None:\n self._labels = labels\n\n self._graph_dot = adjacency_matrix_to_graph(self._adjacency_matrix, self._labels)\n\n # Obtain valid DOT format\n self._graph_dot = str_to_dot(self._graph_dot.source)\n return self._graph_dot", "title": "" }, { "docid": "50c6c80306d92828f17300d197be569a", "score": "0.5145867", "text": "def graph(hlda):\n dict_list = hlda.dict_for_tree(5)\n dot = Digraph(comment='Topics')\n t = Digraph (\"HLda\")\n for l in dict_list:\n dot.node(str(l[\"topic_id\"]),(\"Topic \" + str(l[\"topic_id\"])+\": \\n\" + l[\"topic\"].replace(\",\",\"\\n\")), shape = \"box\")\n if l[\"parent\"] != -1:\n dot.edge(str(l[\"parent\"]),str(l[\"topic_id\"]),constraint='true')\n return dot", "title": "" }, { "docid": "c9850adcc49bf49c54ed3412eefc2f5e", "score": "0.5143274", "text": "def visualize_model(model, inp_size=[1, 3, 64, 64], device=\"cuda:0\"): #%t\n model = model.to(device)\n model.eval()\n graph = hl.build_graph(model, torch.zeros(inp_size).to(device))\n return graph", "title": "" }, { "docid": "6d463b33fc2f9bda2430c1054ee221b3", "score": "0.5142066", "text": "def convert_to_nx_graph(self, castlist=None):\n\n G = nx.DiGraph()\n ### add nodes to the graph\n for node in self.orig_graph_json['nodes']:\n # TODO: check node is not empty or invalid?\n if not node['name'].strip():\n continue\n\n if node['id'] in G.nodes():\n print(\"BROKEN: Found duplicate node-id {} in graph for {}\".format(node['id'], self.video['fname']))\n continue\n # raise RuntimeError('Node with same id already exists!')\n\n # add entity nodes\n if node['type'] == 'entity':\n G.add_node(node['id'], name=node['name'], node_id=node['node_id'])\n\n # add attribute nodes\n elif node['type'] == 'attribute':\n subtype = '' # default ''\n text = node['name'] # default \"name\"\n if ':' in node['name']:\n subtype, text = node['name'].split(':')\n G.add_node(node['id'], name=text, subtype=subtype)\n\n elif node['type'] == 'time':\n if 't_start' in node and 't_end' in node:\n G.add_node(node['id'], name=node['name'], start=node['t_start'], end=node['t_end'])\n else:\n warnings.warn(\"Time node without proper assignment!\", RuntimeWarning)\n G.add_node(node['id'], name=node['name'])\n\n # add all other nodes\n else:\n G.add_node(node['id'], name=node['name'])\n\n # add position and type\n # G.add_node(node['id'], origtext=node['name'], type=node['type'], pos=(node['x'], node['y']))\n if 'x' in node and 'y' in node:\n G.add_node(node['id'], origtext=node['name'], type=node['type'], pos=(node['x'], node['y']))\n else:\n G.add_node(node['id'], origtext=node['name'], type=node['type'], pos=(0, 0))\n\n if self.situation:\n G.add_node(-1, origtext=self.situation, name=self.situation, type='situation', pos=(0, 0))\n if self.scene_label:\n G.add_node(-2, origtext=self.scene_label, name=self.scene_label, type='scene', pos=(0, 10))\n\n ### add edges to the graph\n for edge in self.orig_graph_json['edges']:\n # check nodes exist\n if edge['source'] in G.nodes() and edge['target'] in G.nodes():\n G.add_edge(edge['source'], edge['target'])\n else:\n warnings.warn('Edge source/target node not in graph. %d --> %d' \\\n %(edge['source'], edge['target']), RuntimeWarning)\n\n # save\n self.G = G", "title": "" }, { "docid": "5f45ee42156bca7c4ca02f7e10e5443a", "score": "0.5141248", "text": "def obs_to_graph(self, obs):\n nodes = tf.compat.v1.gather(tf.compat.v1.cast(obs, dtype=tf.float32), self._node_indices) # Ordering the obs\n nodes = tf.compat.v1.reshape(nodes, (self.num_nodes, 1))\n\n data_dict = {\n \"globals\": self.globals,\n \"nodes\": nodes,\n \"edges\": self.edges,\n \"receivers\": self.receivers,\n \"senders\": self.senders\n }\n return data_dicts_to_graphs_tuple_eager([data_dict])", "title": "" }, { "docid": "d385f64631f14992bf61f816aad48dbb", "score": "0.5130671", "text": "def mol2dgl_dec(cand_batch, atom_featurizer, bond_featurizer):\n # Note that during graph decoding they don't predict stereochemistry-related\n # characteristics (i.e. Chiral Atoms, E-Z, Cis-Trans). Instead, they decode\n # the 2-D graph first, then enumerate all possible 3-D forms and find the\n # one with highest score.\n cand_graphs = []\n tree_mess_source_edges = [] # map these edges from trees to...\n tree_mess_target_edges = [] # these edges on candidate graphs\n tree_mess_target_nodes = []\n n_nodes = 0\n\n for mol, mol_tree, ctr_node_id in cand_batch:\n n_atoms = mol.GetNumAtoms()\n\n g = mol_to_bigraph(mol,\n node_featurizer=atom_featurizer,\n edge_featurizer=bond_featurizer,\n canonical_atom_order=False)\n cand_graphs.append(g)\n\n if isinstance(mol_tree, DGLMolTree):\n tree_graph = mol_tree.g\n else:\n tree_graph = mol_tree\n\n for i, bond in enumerate(mol.GetBonds()):\n a1, a2 = bond.GetBeginAtom(), bond.GetEndAtom()\n begin_idx, end_idx = a1.GetIdx(), a2.GetIdx()\n x_nid, y_nid = a1.GetAtomMapNum(), a2.GetAtomMapNum()\n # Tree node ID in the batch\n x_bid = mol_tree.nodes_dict[x_nid - 1]['idx'] if x_nid > 0 else -1\n y_bid = mol_tree.nodes_dict[y_nid - 1]['idx'] if y_nid > 0 else -1\n\n if x_bid >= 0 and y_bid >= 0 and x_bid != y_bid:\n if tree_graph.has_edges_between(x_bid, y_bid):\n tree_mess_target_edges.append(\n (begin_idx + n_nodes, end_idx + n_nodes))\n tree_mess_source_edges.append((x_bid, y_bid))\n tree_mess_target_nodes.append(end_idx + n_nodes)\n if tree_graph.has_edges_between(y_bid, x_bid):\n tree_mess_target_edges.append(\n (end_idx + n_nodes, begin_idx + n_nodes))\n tree_mess_source_edges.append((y_bid, x_bid))\n tree_mess_target_nodes.append(begin_idx + n_nodes)\n\n # Update offset\n n_nodes += n_atoms\n\n return cand_graphs, \\\n torch.IntTensor(tree_mess_source_edges), \\\n torch.IntTensor(tree_mess_target_edges), \\\n torch.IntTensor(tree_mess_target_nodes)", "title": "" }, { "docid": "957a1825c511f0b02dca7d84b2e8750f", "score": "0.5127597", "text": "def connect_representation_graph(self, tf_features, n_components, n_features, node_name_ending):\n tf_tanh_weights = tf.Variable(tf.random.normal([n_features, n_components], stddev=.5),\n name='tanh_weights_%s' % node_name_ending)\n\n tf_repr = tf.nn.tanh(tf.sparse.sparse_dense_matmul(tf_features, tf_tanh_weights))\n\n # Return repr layer and variables\n return tf_repr, [tf_tanh_weights]", "title": "" }, { "docid": "48c63a21e7f4d5df1c2da52f13c80eb0", "score": "0.5107274", "text": "def gtfilestodigraph(modelname, nodefilename, edgefilename):\n\n node_fullpath = gtfolder+modelname+'/'+nodefilename\n edge_fullpath = gtfolder+modelname+'/'+edgefilename\n \n # Build the list of nodes\n \n nodelist = []\n with open(node_fullpath, 'r') as nodefile:\n i=0\n for line in nodefile:\n if not i==0:\n line_array = re.split('\\s+', line) # '\\s' is a regex for white space (tab, whitespace, newline)\n nid = int(line_array[0])\n nodelist.append(nid)\n i = i+1\n nodefile.close()\n \n # Build the list of edges\n edgelist = []\n with open(edge_fullpath, 'r') as edgefile:\n i=0\n for line in edgefile:\n if not i==0:\n line_array = re.split('\\s+', line)\n start = int(line_array[1])\n end = int(line_array[2])\n edgelist.append((start,end))\n i = i+1\n edgefile.close()\n \n # Build the digraph\n dgraph = nx.DiGraph()\n dgraph.add_nodes_from(nodelist)\n dgraph.add_edges_from(edgelist)\n \n return dgraph", "title": "" }, { "docid": "410699dc062c9773a836882f1bdbfb6a", "score": "0.5104183", "text": "def to_networkx_graph(graph: MolGraph) -> nx.Graph:\n G = nx.Graph(graph.adj_list)\n node_attrs = {\n num: {\"element\": element, \"xyz\": xyz}\n for num, (element, xyz) in enumerate(graph)\n }\n nx.set_node_attributes(G, node_attrs)\n edge_attrs = {\n edge: {\"length\": length} for edge, length in graph.bond_lengths.items()\n }\n nx.set_edge_attributes(G, edge_attrs)\n return G", "title": "" }, { "docid": "d7a9ce953c6a22b397895cb5d1b62508", "score": "0.51030403", "text": "def get_graph(table_of_categorie, SGdb):\n MainGraph = nx.DiGraph() # The networkx graph\n graph_from_db = SGdb.execute(\"select * from \"+table_of_categorie) # Get the saved data from the db\n # cast puvi to float\n edges_and_weights = [(user1, user2, float(puvi)) for (user1, user2, puvi) in list(graph_from_db.fetchall())]\n # Create the networkx graph with data from the db\n MainGraph.add_weighted_edges_from(edges_and_weights)\n\n print(\"-------------------------------------------------------------------------------\")\n print(\"Creating graph...Done\")\n print(\"Number of nodes in graph: \", MainGraph.number_of_nodes())\n print(\"Number of edges in graph: \", MainGraph.number_of_edges())\n print(\"-------------------------------------------------------------------------------\")\n\n return MainGraph", "title": "" }, { "docid": "924b3338bd8058c4c4821033c8729967", "score": "0.5097614", "text": "def construct_adj(self, kg_graph):\n # self.logger.info('constructing knowledge graph ...')\n # treat the KG as an undirected graph\n kg_dict = dict()\n for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):\n head = triple[0]\n relation = triple[1]\n tail = triple[2]\n if head not in kg_dict:\n kg_dict[head] = []\n kg_dict[head].append((tail, relation))\n if tail not in kg_dict:\n kg_dict[tail] = []\n kg_dict[tail].append((head, relation))\n\n # self.logger.info('constructing adjacency matrix ...')\n # each line of adj_entity stores the sampled neighbor entities for a given entity\n # each line of adj_relation stores the corresponding sampled neighbor relations\n entity_num = kg_graph.shape[0]\n adj_entity = np.zeros([entity_num, self.neighbor_sample_size], dtype=np.int64)\n adj_relation = np.zeros([entity_num, self.neighbor_sample_size], dtype=np.int64)\n for entity in range(entity_num):\n if entity not in kg_dict.keys():\n adj_entity[entity] = np.array([entity] * self.neighbor_sample_size)\n adj_relation[entity] = np.array([0] * self.neighbor_sample_size)\n continue\n\n neighbors = kg_dict[entity]\n n_neighbors = len(neighbors)\n if n_neighbors >= self.neighbor_sample_size:\n sampled_indices = np.random.choice(\n list(range(n_neighbors)),\n size=self.neighbor_sample_size,\n replace=False,\n )\n else:\n sampled_indices = np.random.choice(\n list(range(n_neighbors)),\n size=self.neighbor_sample_size,\n replace=True,\n )\n adj_entity[entity] = np.array([neighbors[i][0] for i in sampled_indices])\n adj_relation[entity] = np.array([neighbors[i][1] for i in sampled_indices])\n\n return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)", "title": "" }, { "docid": "5766b6ad87766babd7c59d5bf7042117", "score": "0.50975144", "text": "def graph_from_dict(d):\n\n g = networkx.DiGraph()\n for key, children in d.items():\n for child in children:\n g.add_edge(key, child)\n return g", "title": "" }, { "docid": "bf70d96e6ee55994ed60b7b265a1b154", "score": "0.50940245", "text": "def adj2edgelist(adj):\n nonzeros = adj.nonzero()\n max_fan_idx = max(nonzeros[0])\n # need to change the indices as the graph is bipartite and otherwise vertices will be interpreted differently\n star_idx = nonzeros[1] + max_fan_idx + 1\n df = pd.DataFrame({'fan_idx': nonzeros[0], 'star_idx': star_idx})\n return df", "title": "" }, { "docid": "09cfe8b716ca719aeddcc7221c11806b", "score": "0.50860983", "text": "def call(\n self, inputs):\n features, adjacency = inputs\n\n assert isinstance(features, tf.Tensor)\n assert isinstance(adjacency, tf.SparseTensor)\n assert len(features.shape) == 2\n assert len(adjacency.shape) == 2\n assert features.shape[0] == adjacency.shape[0]\n\n assignments = tf.nn.softmax(self.transform(features), axis=1)\n cluster_sizes = tf.math.reduce_sum(assignments, axis=0) # Size [k].\n assignments_pooling = assignments / cluster_sizes # Size [n, k].\n\n degrees = tf.sparse.reduce_sum(adjacency, axis=0) # Size [n].\n degrees = tf.reshape(degrees, (-1, 1))\n\n number_of_nodes = adjacency.shape[1]\n number_of_edges = tf.math.reduce_sum(degrees)\n\n # Computes the size [k, k] pooled graph as S^T*A*S in two multiplications.\n graph_pooled = tf.transpose(\n tf.sparse.sparse_dense_matmul(adjacency, assignments))\n graph_pooled = tf.matmul(graph_pooled, assignments)\n\n # We compute the rank-1 normaizer matrix S^T*d*d^T*S efficiently\n # in three matrix multiplications by first processing the left part S^T*d\n # and then multyplying it by the right part d^T*S.\n # Left part is [k, 1] tensor.\n normalizer_left = tf.matmul(assignments, degrees, transpose_a=True)\n # Right part is [1, k] tensor.\n normalizer_right = tf.matmul(degrees, assignments, transpose_a=True)\n\n # Normalizer is rank-1 correction for degree distribution for degrees of the\n # nodes in the original graph, casted to the pooled graph.\n normalizer = tf.matmul(normalizer_left,\n normalizer_right) / 2 / number_of_edges\n spectral_loss = -tf.linalg.trace(graph_pooled -\n normalizer) / 2 / number_of_edges\n self.add_loss(spectral_loss)\n\n collapse_loss = tf.norm(cluster_sizes) / number_of_nodes * tf.sqrt(\n float(self.n_clusters)) - 1\n self.add_loss(self.collapse_regularization * collapse_loss)\n\n features_pooled = tf.matmul(assignments_pooling, features, transpose_a=True)\n features_pooled = tf.nn.selu(features_pooled)\n if self.do_unpooling:\n features_pooled = tf.matmul(assignments_pooling, features_pooled)\n return features_pooled, assignments", "title": "" }, { "docid": "3e578cbd685435f80df14301afca1061", "score": "0.5083476", "text": "def graph_structure(graph, content_graph, path):\n A = scipy.sparse.csr_matrix(nx.google_matrix(graph, alpha = 1, weight = 'weight'))\n W = nx.to_scipy_sparse_matrix(content_graph)\n W2 = (W.dot(A) + A.transpose().dot(W)) * 0.5\n # rescaling in range 0-1\n max_val = W2.max()\n W2 = W2.multiply(1.0 / max_val)\n\n save_matrix_to_edgelist(W2, path)", "title": "" }, { "docid": "03def76b09a66ae9c3157cabddff14d6", "score": "0.5064086", "text": "def genGraphFromContactFile(commutes: pd.DataFrame) -> nx.DiGraph:\n G = nx.convert_matrix.from_pandas_edgelist(commutes, edge_attr=True, create_using=nx.DiGraph)\n for edge in G.edges.data():\n if \"weight\" not in edge[2] or edge[2][\"weight\"] < 0.0:\n raise ValueError(\"missing weight or weight less than zero\")\n if \"delta_adjustment\" not in edge[2] or edge[2][\"delta_adjustment\"] < 0.0:\n raise ValueError(\"missing delta_adjustment or delta_adjustment less than zero\")\n\n return G", "title": "" }, { "docid": "1f693c4ea1c665a060405994ae6b006c", "score": "0.50611293", "text": "def get_dummy_connected_graph():\n conn_graph = ConnectedGraph()\n op1 = Op('op1', 'op1_dotted_name', None, False, 'op1_type')\n op2 = Op('op2', 'op2_dotted_name', None, False, 'op2_type')\n op2.model_module = ModelModule('module')\n op3 = Op('op3', 'op3_dotted_name', None, False, 'op3_type')\n op4 = Op('op4', 'op4_dotted_name', None, False, 'op4_type')\n op5 = Op('op5', 'op5_dotted_name', None, False, 'op5_type')\n\n prod_inp_1 = Product('input1_to_op1', None)\n prod_inp_1.is_model_input = True\n prod_inp_1.add_consumer(op1)\n op1.add_input(prod_inp_1)\n\n prod_inp_2 = Product('input2_to_op2', None)\n prod_inp_2.is_model_input = True\n prod_inp_2.add_consumer(op2)\n op2.add_input(prod_inp_2)\n\n prod_1_3 = Product('op1_to_op3', None)\n prod_1_3.producer = op1\n prod_1_3.add_consumer(op3)\n op1.output = prod_1_3\n op3.add_input(prod_1_3)\n\n prod_2_3 = Product('op2_to_op3', None)\n prod_2_3.producer = op2\n prod_2_3.add_consumer(op3)\n op2.output = prod_2_3\n op3.add_input(prod_2_3)\n\n prod_3_out = Product('op3_to_multiple_ops', None)\n prod_3_out.producer = op3\n prod_3_out.add_consumer(op4)\n prod_3_out.add_consumer(op5)\n op3.output = prod_3_out\n op4.add_input(prod_3_out)\n op5.add_input(prod_3_out)\n\n prod_4_param = Product('op4.param', None)\n prod_4_param.is_parm = True\n prod_4_param.add_consumer(op4)\n op4.add_input(prod_4_param)\n\n prod_5_param_1 = Product('op5.param1', None)\n prod_5_param_1.is_parm = True\n prod_5_param_1.add_consumer(op5)\n op5.add_input(prod_5_param_1)\n\n prod_5_param_2 = Product('op5.param2', None)\n prod_5_param_2.is_parm = True\n prod_5_param_2.add_consumer(op5)\n op5.add_input(prod_5_param_2)\n\n conn_graph._ops[op1.name] = op1\n conn_graph._ops[op2.name] = op2\n conn_graph._ops[op3.name] = op3\n conn_graph._ops[op4.name] = op4\n conn_graph._ops[op5.name] = op5\n\n conn_graph._products[prod_inp_1.name] = prod_inp_1\n conn_graph._products[prod_inp_2.name] = prod_inp_2\n conn_graph._products[prod_1_3.name] = prod_1_3\n conn_graph._products[prod_2_3.name] = prod_2_3\n conn_graph._products[prod_3_out.name] = prod_3_out\n conn_graph._products[prod_4_param.name] = prod_4_param\n conn_graph._products[prod_5_param_1.name] = prod_5_param_1\n conn_graph._products[prod_5_param_2.name] = prod_5_param_2\n\n return conn_graph", "title": "" }, { "docid": "b4967a70d9d42c1be5c1888c23c19410", "score": "0.5060961", "text": "def create_graph(graph_data):\n print(\"Initalize Pool on client\")\n rmm.reinitialize(pool_allocator=True)\n # Assume strings are names of datasets in the datasets package\n if isinstance(graph_data, str):\n ds = getattr(datasets, graph_data)\n edgelist_df = ds.get_edgelist()\n # FIXME: edgelist_df should have column names that match the defaults\n # for G.from_cudf_edgelist()\n\n # Assume dictionary contains RMAT params\n elif isinstance(graph_data, dict):\n scale = graph_data[\"scale\"]\n num_edges = (2**scale) * graph_data[\"edgefactor\"]\n seed = _seed\n edgelist_df = rmat(\n scale,\n num_edges,\n 0.57, # from Graph500\n 0.19, # from Graph500\n 0.19, # from Graph500\n seed,\n clip_and_flip=False,\n scramble_vertex_ids=False, # FIXME: need to understand relevance of this\n create_using=None, # None == return edgelist\n mg=False,\n )\n edgelist_df[\"weight\"] = cp.float32(1)\n\n else:\n raise TypeError(f\"graph_data can only be str or dict, got {type(graph_data)}\")\n\n num_nodes = max(edgelist_df['src'].max(),\n edgelist_df['dst'].max())+1\n\n num_nodes_dict = {'_N':num_nodes}\n\n gs = CuGraphStorage(num_nodes_dict=num_nodes_dict, single_gpu=True)\n gs.add_edge_data(edgelist_df,\n # reverse to make same graph as cugraph\n node_col_names=['dst', 'src'],\n canonical_etype=['_N', 'connects', '_N'])\n\n return gs", "title": "" }, { "docid": "f30f29948fb8427af76e7afd00aa1ec5", "score": "0.50579417", "text": "def load(self):\n print('Loading {} dataset...'.format(self.dataset_name))\n\n idx_features_labels = np.genfromtxt(\"{}/node\".format(self.dataset_source_folder_path), dtype=np.dtype(str))\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n\n one_hot_labels = self.encode_onehot(idx_features_labels[:, -1])\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n index_id_map = {i: j for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}/link\".format(self.dataset_source_folder_path),\n dtype=np.int32)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(one_hot_labels.shape[0], one_hot_labels.shape[0]),\n dtype=np.float32)\n\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n eigen_adj = None\n if self.compute_s:\n eigen_adj = self.c * inv((sp.eye(adj.shape[0]) - (1 - self.c) * self.adj_normalize(adj)).toarray())\n\n norm_adj = self.adj_normalize(adj + sp.eye(adj.shape[0]))\n\n if self.dataset_name == 'cora':\n idx_train = range(140)\n idx_test = range(200, 1200)\n idx_val = range(1200, 1500)\n elif self.dataset_name == 'citeseer':\n idx_train = range(120)\n idx_test = range(200, 1200)\n idx_val = range(1200, 1500)\n #features = self.normalize(features)\n elif self.dataset_name == 'pubmed':\n idx_train = range(60)\n idx_test = range(6300, 7300)\n idx_val = range(6000, 6300)\n elif self.dataset_name == 'cora-small':\n idx_train = range(5)\n idx_val = range(5, 10)\n idx_test = range(5, 10)\n\n features = torch.FloatTensor(np.array(features.todense()))\n labels = torch.LongTensor(np.where(one_hot_labels)[1])\n adj = self.sparse_mx_to_torch_sparse_tensor(norm_adj)\n\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n\n if self.load_all_tag:\n hop_dict, wl_dict, batch_dict = self.load_hop_wl_batch()\n raw_feature_list = []\n role_ids_list = []\n position_ids_list = []\n hop_ids_list = []\n for node in idx:\n node_index = idx_map[node]\n neighbors_list = batch_dict[node]\n\n raw_feature = [features[node_index].tolist()]\n role_ids = [wl_dict[node]]\n position_ids = range(len(neighbors_list) + 1)\n hop_ids = [0]\n for neighbor, intimacy_score in neighbors_list:\n neighbor_index = idx_map[neighbor]\n raw_feature.append(features[neighbor_index].tolist())\n role_ids.append(wl_dict[neighbor])\n if neighbor in hop_dict[node]:\n hop_ids.append(hop_dict[node][neighbor])\n else:\n hop_ids.append(99)\n raw_feature_list.append(raw_feature)\n role_ids_list.append(role_ids)\n position_ids_list.append(position_ids)\n hop_ids_list.append(hop_ids)\n raw_embeddings = torch.FloatTensor(raw_feature_list)\n wl_embedding = torch.LongTensor(role_ids_list)\n hop_embeddings = torch.LongTensor(hop_ids_list)\n int_embeddings = torch.LongTensor(position_ids_list)\n else:\n raw_embeddings, wl_embedding, hop_embeddings, int_embeddings = None, None, None, None\n\n return {'X': features, 'A': adj, 'S': eigen_adj, 'index_id_map': index_id_map, 'edges': edges_unordered, 'raw_embeddings': raw_embeddings, 'wl_embedding': wl_embedding, 'hop_embeddings': hop_embeddings, 'int_embeddings': int_embeddings, 'y': labels, 'idx': idx, 'idx_train': idx_train, 'idx_test': idx_test, 'idx_val': idx_val}", "title": "" }, { "docid": "16d4cc969c9d52eeef7ca9f9853d5ce3", "score": "0.50549674", "text": "def create_nncf_graph(onnx_model: onnx.ModelProto) -> NNCFGraph:\n onnx_model = GraphConverter._replace_empty_node_name(onnx_model)\n nncf_graph = NNCFGraph()\n onnx_graph = ONNXGraph(onnx_model)\n for node in onnx_graph.get_all_nodes():\n metatype = ONNX_OPERATION_METATYPES.get_operator_metatype_by_op_name(node.op_type)\n if metatype.get_subtypes():\n subtype = metatype.determine_subtype(onnx_model, node)\n if subtype is not None:\n metatype = subtype\n\n if metatype in WEIGHT_LAYER_METATYPES:\n is_shared = onnx_graph.is_node_shared(node)\n weight_edge_name = onnx_graph.get_weight_tensor_edge(node)\n edge = onnx_graph.get_edge(weight_edge_name)\n weight_shape = ONNXGraph.get_edge_shape(edge)\n layer_attributes = ONNXExtendedLayerAttributes(node.input, node.output, weight_shape)\n else:\n is_shared, weight_edge_name, layer_attributes = None, None, None\n nncf_graph.add_nncf_node(\n node_name=node.name,\n node_type=node.op_type,\n node_metatype=metatype,\n layer_attributes=layer_attributes,\n layer_name=weight_edge_name,\n is_shared=is_shared,\n )\n for output_node in onnx_graph.get_all_nodes():\n output_edges = onnx_graph.get_node_edge_names(output_node.name)[\"output\"]\n for output_edge in output_edges:\n edge = onnx_graph.get_edge(output_edge)\n if edge is None:\n # If the edge is None it means that the edge was not added during shape inference of ONNX model.\n # BatchNorm exported in Training mode has unused outputs edges: mean, var, saved_mean, saved_var.\n # NNCFGraph should not contain such edges.\n continue\n tensor_shape = ONNXGraph.get_edge_shape(edge)\n onnx_dtype = ONNXGraph.get_edge_dtype(edge)\n nncf_dtype = GraphConverter.convert_onnx_dtype_to_nncf_dtype(onnx_dtype)\n output_node_id = nncf_graph.get_node_by_name(output_node.name).node_id\n input_nodes = onnx_graph.get_nodes_by_input(output_edge)\n for input_node in input_nodes:\n port_ids = ONNXGraph.get_port_ids_between_nodes(output_node, input_node)\n input_port_id = port_ids[\"input_port_id\"]\n output_port_id = port_ids[\"output_port_id\"]\n in_node_id = nncf_graph.get_node_by_name(input_node.name).node_id\n nncf_graph.add_edge_between_nncf_nodes(\n from_node_id=output_node_id,\n to_node_id=in_node_id,\n tensor_shape=tensor_shape,\n input_port_id=input_port_id,\n output_port_id=output_port_id,\n dtype=Dtype(nncf_dtype),\n )\n GraphConverter._add_nncf_input_nodes(onnx_graph, nncf_graph)\n GraphConverter._add_nncf_output_nodes(onnx_graph, nncf_graph)\n return nncf_graph", "title": "" }, { "docid": "72e70d9bf9adf2446f7af3c377ccd2b5", "score": "0.5044183", "text": "def compute_sp_graph(xyz, d_max, in_component, components, labels, n_labels):\n n_com = max(in_component)+1\n in_component = np.array(in_component)\n has_labels = len(labels) > 0\n label_hist = has_labels and len(labels.shape) > 1 and labels.shape[1] > 1\n #---compute delaunay triangulation---\n tri = Delaunay(xyz)\n #interface select the edges between different components\n #edgx and edgxr converts from tetrahedrons to edges\n\t#done separatly for each edge of the tetrahedrons to limit memory impact\n interface = in_component[tri.vertices[:, 0]] != in_component[tri.vertices[:, 1]]\n edg1 = np.vstack((tri.vertices[interface, 0], tri.vertices[interface, 1]))\n edg1r = np.vstack((tri.vertices[interface, 1], tri.vertices[interface, 0]))\n interface = in_component[tri.vertices[:, 0]] != in_component[tri.vertices[:, 2]]\n edg2 = np.vstack((tri.vertices[interface, 0], tri.vertices[interface, 2]))\n edg2r = np.vstack((tri.vertices[interface, 2], tri.vertices[interface, 0]))\n interface = in_component[tri.vertices[:, 0]] != in_component[tri.vertices[:, 3]]\n edg3 = np.vstack((tri.vertices[interface, 0], tri.vertices[interface, 3]))\n edg3r = np.vstack((tri.vertices[interface, 3], tri.vertices[interface, 0]))\n interface = in_component[tri.vertices[:, 1]] != in_component[tri.vertices[:, 2]]\n edg4 = np.vstack((tri.vertices[interface, 1], tri.vertices[interface, 2]))\n edg4r = np.vstack((tri.vertices[interface, 2], tri.vertices[interface, 1]))\n interface = in_component[tri.vertices[:, 1]] != in_component[tri.vertices[:, 3]]\n edg5 = np.vstack((tri.vertices[interface, 1], tri.vertices[interface, 3]))\n edg5r = np.vstack((tri.vertices[interface, 3], tri.vertices[interface, 1]))\n interface = in_component[tri.vertices[:, 2]] != in_component[tri.vertices[:, 3]]\n edg6 = np.vstack((tri.vertices[interface, 2], tri.vertices[interface, 3]))\n edg6r = np.vstack((tri.vertices[interface, 3], tri.vertices[interface, 2]))\n del tri, interface\n edges = np.hstack((edg1, edg2, edg3, edg4 ,edg5, edg6, edg1r, edg2r,\n edg3r, edg4r ,edg5r, edg6r))\n del edg1, edg2, edg3, edg4 ,edg5, edg6, edg1r, edg2r, edg3r, edg4r, edg5r, edg6r\n edges = np.unique(edges, axis=1)\n #---sort edges by alpha numeric order wrt to the components of their source/target---\n n_edg = len(edges[0])\n edge_comp = in_component[edges]\n edge_comp_index = n_com * edge_comp[0,:] + edge_comp[1,:]\n order = np.argsort(edge_comp_index)\n edges = edges[:, order]\n edge_comp = edge_comp[:, order]\n edge_comp_index = edge_comp_index[order]\n #marks where the edges change components iot compting them by blocks\n jump_edg = np.vstack((0, np.argwhere(np.diff(edge_comp_index)) + 1, n_edg)).flatten()\n n_sedg = len(jump_edg) - 1\n #---set up the edges descriptors---\n graph = dict([(\"is_nn\", False)])\n graph[\"sp_centroids\"] = np.zeros((n_com, 3), dtype='float32')\n graph[\"sp_length\"] = np.zeros((n_com, 1), dtype='float32')\n graph[\"sp_surface\"] = np.zeros((n_com, 1), dtype='float32')\n graph[\"sp_volume\"] = np.zeros((n_com, 1), dtype='float32')\n graph[\"sp_point_count\"] = np.zeros((n_com, 1), dtype='uint64')\n graph[\"source\"] = np.zeros((n_sedg, 1), dtype='uint32')\n graph[\"target\"] = np.zeros((n_sedg, 1), dtype='uint32')\n graph[\"se_delta_mean\"] = np.zeros((n_sedg, 3), dtype='float32')\n graph[\"se_delta_std\"] = np.zeros((n_sedg, 3), dtype='float32')\n graph[\"se_delta_norm\"] = np.zeros((n_sedg, 1), dtype='float32')\n graph[\"se_delta_centroid\"] = np.zeros((n_sedg, 3), dtype='float32')\n graph[\"se_length_ratio\"] = np.zeros((n_sedg, 1), dtype='float32')\n graph[\"se_surface_ratio\"] = np.zeros((n_sedg, 1), dtype='float32')\n graph[\"se_volume_ratio\"] = np.zeros((n_sedg, 1), dtype='float32')\n graph[\"se_point_count_ratio\"] = np.zeros((n_sedg, 1), dtype='float32')\n if has_labels:\n graph[\"sp_labels\"] = np.zeros((n_com, n_labels + 1), dtype='uint32')\n else:\n graph[\"sp_labels\"] = []\n #---compute the superpoint features---\n for i_com in range(0, n_com):\n comp = components[i_com]\n if has_labels and not label_hist:\n graph[\"sp_labels\"][i_com, :] = np.histogram(labels[comp]\n , bins=[float(i)-0.5 for i in range(0, n_labels + 2)])[0]\n if has_labels and label_hist:\n graph[\"sp_labels\"][i_com, :] = sum(labels[comp,:])\n graph[\"sp_point_count\"][i_com] = len(comp)\n xyz_sp = np.unique(xyz[comp, :], axis=0)\n if len(xyz_sp) == 1:\n graph[\"sp_centroids\"][i_com] = xyz_sp\n graph[\"sp_length\"][i_com] = 0\n graph[\"sp_surface\"][i_com] = 0\n graph[\"sp_volume\"][i_com] = 0\n elif len(xyz_sp) == 2:\n graph[\"sp_centroids\"][i_com] = np.mean(xyz_sp, axis=0)\n graph[\"sp_length\"][i_com] = np.sqrt(np.sum(np.var(xyz_sp, axis=0)))\n graph[\"sp_surface\"][i_com] = 0\n graph[\"sp_volume\"][i_com] = 0\n else:\n ev = LA.eig(np.cov(np.transpose(xyz_sp), rowvar=True))\n ev = -np.sort(-ev[0]) #descending order\n graph[\"sp_centroids\"][i_com] = np.mean(xyz_sp, axis=0)\n try:\n graph[\"sp_length\"][i_com] = ev[0]\n except TypeError:\n graph[\"sp_length\"][i_com] = 0\n try:\n graph[\"sp_surface\"][i_com] = np.sqrt(ev[0] * ev[1] + 1e-10)\n except TypeError:\n graph[\"sp_surface\"][i_com] = 0\n try:\n graph[\"sp_volume\"][i_com] = np.sqrt(ev[0] * ev[1] * ev[2] + 1e-10)\n except TypeError:\n graph[\"sp_volume\"][i_com] = 0\n #---compute the superedges features---\n for i_sedg in range(0, n_sedg):\n i_edg_begin = jump_edg[i_sedg]\n i_edg_end = jump_edg[i_sedg + 1]\n ver_source = edges[0, range(i_edg_begin, i_edg_end)]\n ver_target = edges[1, range(i_edg_begin, i_edg_end)]\n com_source = edge_comp[0, i_edg_begin]\n com_target = edge_comp[1, i_edg_begin]\n xyz_source = xyz[ver_source, :]\n xyz_target = xyz[ver_target, :]\n graph[\"source\"][i_sedg] = com_source\n graph[\"target\"][i_sedg] = com_target\n #---compute the ratio features---\n graph[\"se_delta_centroid\"][i_sedg,:] = graph[\"sp_centroids\"][com_source,:] - graph[\"sp_centroids\"][com_target, :]\n graph[\"se_length_ratio\"][i_sedg] = graph[\"sp_length\"][com_source] / (graph[\"sp_length\"][com_target] + 1e-6)\n graph[\"se_surface_ratio\"][i_sedg] = graph[\"sp_surface\"][com_source] / (graph[\"sp_surface\"][com_target] + 1e-6)\n graph[\"se_volume_ratio\"][i_sedg] = graph[\"sp_volume\"][com_source] / (graph[\"sp_volume\"][com_target] + 1e-6)\n graph[\"se_point_count_ratio\"][i_sedg] = graph[\"sp_point_count\"][com_source] / (graph[\"sp_point_count\"][com_target] + 1e-6)\n #---compute the offset set---\n delta = xyz_source - xyz_target\n if len(delta > 1):\n graph[\"se_delta_mean\"][i_sedg] = np.mean(delta, axis=0)\n graph[\"se_delta_std\"][i_sedg] = np.std(delta, axis=0)\n graph[\"se_delta_norm\"][i_sedg] = np.mean(np.sqrt(np.sum(delta ** 2, axis=1)))\n else:\n graph[\"se_delta_mean\"][i_sedg, :] = delta\n graph[\"se_delta_std\"][i_sedg, :] = [0, 0, 0]\n graph[\"se_delta_norm\"][i_sedg] = np.sqrt(np.sum(delta ** 2))\n return graph", "title": "" }, { "docid": "0b2203aa361054e364b387c07309db37", "score": "0.5042868", "text": "def _convert_to_graph(self):\n graph = nx.Graph()\n graph.add_edges_from(self.interactions)\n graph.remove_edges_from(graph.selfloop_edges())\n return graph", "title": "" }, { "docid": "d4842f90447dbe6f5530cda06990456d", "score": "0.5041988", "text": "def _create_dot_graph(graph, show_connectinfo=False):\n logger.debug('creating pickleable graph')\n pklgraph = nx.DiGraph()\n for edge in graph.edges():\n data = graph.get_edge_data(*edge)\n if hasattr(edge[0], '_interface'):\n srcclass = edge[0]._interface.__class__.__module__.split('.')[2]\n else:\n srcclass = ''\n srcname = '.'.join(str(edge[0]).split('.')[1:])\n srcname = '.'.join((srcname, srcclass))\n if hasattr(edge[1], '_interface'):\n destclass = edge[1]._interface.__class__.__module__.split('.')[2]\n else:\n destclass = ''\n destname = '.'.join(str(edge[1]).split('.')[1:])\n destname = '.'.join((destname, destclass))\n if show_connectinfo:\n pklgraph.add_edge(srcname, destname, l=str(data['connect']))\n else:\n pklgraph.add_edge(srcname, destname)\n return pklgraph", "title": "" }, { "docid": "0f1b15ca9713a2c0d2f6c411d3e5385d", "score": "0.50389767", "text": "def main(args):\n\n # Load edgelist\n oneIndx = False\n E = np.loadtxt(args.inputgraph, delimiter=args.delimiter, dtype=int)\n if np.min(E) == 1:\n oneIndx = True\n E -= 1\n\n # Create an unweighted graph\n G = nx.Graph()\n G.add_edges_from(E[:, :2])\n\n # Get adj matrix of the graph\n tr_A = nx.adjacency_matrix(G, weight=None)\n num_nodes = tr_A.shape[0]\n\n # Set main diag to 1s and normalize (algorithm requirement)\n adj_norm = preprocess_graph(tr_A)\n\n # Define placeholders\n placeholders = {\n 'features': tf.sparse_placeholder(tf.float32),\n 'adj': tf.sparse_placeholder(tf.float32),\n 'adj_orig': tf.sparse_placeholder(tf.float32),\n 'dropout': tf.placeholder_with_default(0., shape=())\n }\n\n # Create empty feature matrix\n features = sp.identity(num_nodes) # featureless\n features = sparse_to_tuple(features.tocoo())\n num_features = features[2][1]\n features_nonzero = features[1].shape[0]\n\n # Create model\n model = None\n if args.model == 'gcn_ae':\n model = GCNModelAE(placeholders, num_features, features_nonzero)\n elif args.model == 'gcn_vae':\n model = GCNModelVAE(placeholders, num_features, num_nodes, features_nonzero)\n\n pos_weight = float(tr_A.shape[0] * tr_A.shape[0] - tr_A.sum()) / tr_A.sum()\n norm = tr_A.shape[0] * tr_A.shape[0] / float((tr_A.shape[0] * tr_A.shape[0] - tr_A.sum()) * 2)\n\n # Optimizer\n with tf.name_scope('optimizer'):\n if args.model == 'gcn_ae':\n opt = OptimizerAE(preds=model.reconstructions,\n labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],\n validate_indices=False), [-1]),\n pos_weight=pos_weight,\n norm=norm)\n elif args.model == 'gcn_vae':\n opt = OptimizerVAE(preds=model.reconstructions,\n labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],\n validate_indices=False), [-1]),\n model=model, num_nodes=num_nodes,\n pos_weight=pos_weight,\n norm=norm)\n\n # Initialize session\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n adj_label = tr_A + sp.eye(tr_A.shape[0])\n adj_label = sparse_to_tuple(adj_label)\n\n # Train model\n for epoch in range(FLAGS.epochs):\n # Construct feed dictionary\n feed_dict = construct_feed_dict(adj_norm, adj_label, features, placeholders)\n feed_dict.update({placeholders['dropout']: FLAGS.dropout})\n # Run single weight update\n outs = sess.run([opt.opt_op, opt.cost, opt.accuracy], feed_dict=feed_dict)\n print(\"Epoch:\", '%04d' % (epoch + 1), \"train_loss=\", \"{:.5f}\".format(outs[1]),\n \"train_acc=\", \"{:.5f}\".format(outs[2]))\n\n # Compute predictions\n feed_dict.update({placeholders['dropout']: 0})\n emb = sess.run(model.z_mean, feed_dict=feed_dict)\n\n def sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n # Node similarities\n adj_rec = np.dot(emb, emb.T)\n\n start = time.time()\n # Read the train edges and compute similarity\n if args.tr_e is not None:\n train_edges = np.loadtxt(args.tr_e, delimiter=args.delimiter, dtype=int)\n if oneIndx:\n train_edges -= 1\n scores = list()\n for src, dst in train_edges:\n scores.append(sigmoid(adj_rec[src, dst]))\n np.savetxt(args.tr_pred, scores, delimiter=args.delimiter)\n\n # Read the test edges and run predictions\n if args.te_e is not None:\n test_edges = np.loadtxt(args.te_e, delimiter=args.delimiter, dtype=int)\n if oneIndx:\n test_edges -= 1\n scores = list()\n for src, dst in test_edges:\n scores.append(sigmoid(adj_rec[src, dst]))\n np.savetxt(args.te_pred, scores, delimiter=args.delimiter)\n\n # If no edge lists provided to predict links, then just store the embeddings\n else:\n np.savetxt(args.output, emb, delimiter=args.delimiter)\n\n print('Prediction time: {}'.format(time.time()-start))", "title": "" }, { "docid": "3fccf22e013a1bfcc53010920e88bc5c", "score": "0.50353944", "text": "def get_graph(self):\n return graphviz.create_graph(\n feature_names=self.feature_names,\n shared_parameter_names=self.shared_parameter_names,\n bound_parameter_names=self.bound_parameter_names,\n variable_names=self.variable_names,\n instruction_list=self.to_dict()['instructions']\n )", "title": "" }, { "docid": "041107aee50ea5a672afe15bce0e07a7", "score": "0.5034987", "text": "def guido_to_nx(graph):\n G = nx.DiGraph()\n for node in graph:\n for tgt, length in graph[node].items():\n cap = int(length / CAR_LENGTH)\n G.add_edge(node, tgt,\n capacity=cap,\n length=length,\n num=0,\n queue=collections.deque())\n return G", "title": "" }, { "docid": "da975a81fa482678b08464e5f0b36a45", "score": "0.50338423", "text": "def create_pg_graph(datapoint, n_edge_types):\n edges, annotations, target = datapoint\n x = torch.FloatTensor(annotations)\n directed_edge_index = torch.LongTensor(\n [[edge[0] - 1, edge[2] - 1] for edge in edges])\n reverse_edge_index = torch.index_select(\n directed_edge_index, 1, torch.LongTensor([1, 0]))\n edge_index = torch.cat([directed_edge_index, reverse_edge_index], dim=0).T\n # print(\"Edge index\", edge_index)\n\n edge_type_indices = torch.LongTensor(\n [[i, edge[1] - 1] for i, edge in enumerate(edges)])\n # print(\"Edge type\", edge_type_indices)\n reverse_edge_type_indices = torch.LongTensor(\n [[i + len(edges), edge[1] - 1 + n_edge_types] for i, edge in\n enumerate(edges)])\n full_edge_type_indices = torch.cat(\n [edge_type_indices, reverse_edge_type_indices], dim=0)\n # print(\"Full edge\", full_edge_type_indices)\n\n edge_attr = torch.zeros(edge_index.size(1), n_edge_types * 2)\n for edge_type in full_edge_type_indices.numpy().tolist():\n edge_attr[tuple(edge_type)] = 1\n\n # if len(datapoint[2]) == 1:\n # target = datapoint[2][0] - 1\n # else:\n target = [element - 1 for element in datapoint[2]]\n y = torch.LongTensor(target).view(1, -1)\n # y = torch.unsqueeze(torch.LongTensor(target).view(1), -1)\n return Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)", "title": "" }, { "docid": "114f5ae1204e10462cd7dbd1b32fc70b", "score": "0.50338334", "text": "def create_count_adj_matrix((data_file, graph_file), output_file):\n\n d = pickle.load(open(data_file, 'r'))\n tfdf = d['tfdf']\n wiredf = d['wiredf']\n \n gf = pickle.load(open(graph_file, 'r'))\n g = gf['graph']\n\n canonical_node_ordering = tfdf.index\n N = len(canonical_node_ordering)\n adj_mat = np.zeros((N, N), dtype = [('link', np.int32), \n ('distance', np.float32)])\n \n print \"now walk\"\n # create graph\n for n1_i, (n1, n1_data) in enumerate(tfdf.iterrows()):\n x1 = n1_data['x']\n y1 = n1_data['y']\n print n1_i\n for n2_i, (n2, row_data) in enumerate(tfdf.iterrows()):\n if g.has_edge(n1, n2):\n adj_mat[n1_i, n2_i]['link'] += 1\n x2 = row_data['x']\n y2 = row_data['y']\n d = np.sqrt((x2-x1)**2 + (y2-y1)**2)\n adj_mat[n1_i, n2_i]['distance'] = d\n pickle.dump({'adj_mat' : adj_mat}, \n open(output_file, 'w'))", "title": "" }, { "docid": "dfbe09e91fdc3aacde1af00143cec8ec", "score": "0.50337666", "text": "def fully_connect(x_tensor, num_outputs, name):\n # shape_list = x_tensor.get_shape().as_list()\n with tf.name_scope(name):\n result = tf.layers.dense(inputs = x_tensor,\n units = num_outputs,\n activation = tf.nn.relu,\n # activation = tf.nn.elu,\n kernel_initializer = tf.truncated_normal_initializer(),\n name=name)\n tf.summary.histogram(\"fully_connect_layer\", result)\n print(\"==================================================\")\n print(\"fully_connect:\")\n print(\"input x_tensor = {}\".format(x_tensor))\n print(\"result = {}\".format(result))\n print(\"==================================================\")\n return result", "title": "" }, { "docid": "9ee1f2e6db90f59a2a7f4c83cca7dc34", "score": "0.5030448", "text": "def example_graph():\n g = nx.Graph()\n g.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D'), ('D', 'E'), ('D', 'F'), ('D', 'G'), ('E', 'F'), ('G', 'F')])\n return g", "title": "" }, { "docid": "9ee1f2e6db90f59a2a7f4c83cca7dc34", "score": "0.5030448", "text": "def example_graph():\n g = nx.Graph()\n g.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D'), ('D', 'E'), ('D', 'F'), ('D', 'G'), ('E', 'F'), ('G', 'F')])\n return g", "title": "" }, { "docid": "7b7515b454b1557b5bc1bc951941bcf7", "score": "0.50281143", "text": "def get_graph_data():\n thread_id = request.args.get(\"thread_id\", random_thread_id(post_count_min=200), type=int)\n min_edges = request.args.get(\"min_edges\", 1, type=int)\n pqdict, userdict = graphs.get_post_quote_dict(thread_id)\n G = graphs.create_graph(pqdict)\n s = graphs.graph_to_node_link(G, userdict, min_degree=min_edges)\n return json.dumps(s)", "title": "" } ]
ec6ec90b5353cfd5cfc372183415c6a7
check missing percentage of every feature
[ { "docid": "925655d1faa0bbaf4b620a03b04e768d", "score": "0.64688915", "text": "def missing_value_pct(self, data):\n tmp_data = data.copy()\n if tmp_data.empty:\n return False\n missing_rate_df = pd.DataFrame(tmp_data.isnull().mean(), columns=['missing_rate'])\n return missing_rate_df.sort_values(by='missing_rate', ascending=False)", "title": "" } ]
[ { "docid": "6fc001d90fc77d9d7be6395036917507", "score": "0.6547822", "text": "def _is_ok_feature(self, feature) -> bool:\n if feature.isnull().mean() >= self.max_nan_rate:\n return False\n if (feature.value_counts().values[0] / feature.shape[0]) >= self.max_constant_rate:\n return False\n return True", "title": "" }, { "docid": "602725a4f27e4e10995ca5d8386df624", "score": "0.6491639", "text": "def get_percentage_missing(series):\n num = series.isnull().sum()\n den = len(series)\n return round(num/den, 2)", "title": "" }, { "docid": "a1e6f4de10a0ee15d0018f5817eb2f95", "score": "0.64608294", "text": "def missing_detect(data):\n # Tests whether input data is of pd.DataFrame type\n if not isinstance(data, pd.DataFrame):\n raise TypeError(\"Please pass in a Pandas DataFrame for `data`\")\n\n missing_count = pd.DataFrame(data.isnull().sum(), columns=[\"n_missing\"])\n missing_count[\"percent\"] = missing_count[\"n_missing\"] / data.shape[0]\n\n return missing_count", "title": "" }, { "docid": "14d9a63e71e1d817e8a80a583f457b21", "score": "0.64536774", "text": "def deal_missing_data(df_train, df_test):\n total = df_train.isnull().sum().sort_values(ascending=False)\n percent = (df_train.isnull().sum()/df_train.isnull().count()).sort_values(ascending=False)\n missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])\n missing_data = missing_data[missing_data['Total'] > 0]\n # Fill rows that have missing data with the median value of the feature\n numerical_features = df_train.select_dtypes(exclude=object).columns\n for c in numerical_features:\n df_train[c].fillna(np.nanmedian(df_train[c]), inplace=True)\n if c != 'SalePrice':\n df_test[c].fillna(np.nanmedian(df_test[c]), inplace=True)\n # Fill rows that have missing data with specific values\n df_train['MasVnrType'].fillna('None', inplace=True)\n df_train['Electrical'].fillna('SBrkr', inplace=True)\n # Fill rows that have missing data with None or the median (numerical features)\n categorical_features = df_train.select_dtypes(include=object).columns\n for c in categorical_features:\n df_train[c].fillna('None', inplace=True)\n df_test[c].fillna('None', inplace=True)\n # Drop PoolQC because almost 100% has no information regarding it.\n # The feature PoolArea represents most of the feature PoolQC.\n features_to_drop = ['PoolQC', 'Street', 'Utilities']\n df_train.drop(features_to_drop, axis=1, inplace=True)\n df_test.drop(features_to_drop, axis=1, inplace=True)\n assert(df_train.isna().any().sum() == 0)\n assert (df_test.isna().any().sum() == 0)", "title": "" }, { "docid": "6bcd68e7ec37717acbaaa1dedda92008", "score": "0.6383096", "text": "def sanity_checks(self, x):\n if th.isnan(th.sum(x)):\n print(\"nan inputs\")\n ipdb.set_trace()\n if th.isnan(self.clusters[0][0]):\n print(\"nan clusters\")\n ipdb.set_trace()", "title": "" }, { "docid": "90e4147579a677208aba2251e8b9277d", "score": "0.63819003", "text": "def print_missing_percentages(df):\n percent_missing = df.isnull().sum() * 100 / len(df)\n max_missing = percent_missing.max()\n min_missing = percent_missing.min()\n mean_missing = percent_missing.mean()\n print(\"Max, min and mean number of missing values for the columns\")\n print(\"Max:\", max_missing,'%')\n print(\"Min:\", min_missing,'%')\n print(\"Mean:\", mean_missing,'%')\n return min_missing, max_missing", "title": "" }, { "docid": "48adac8be1738b7e5559fce34e74b51c", "score": "0.63806444", "text": "def pe20(preds, labels):\n diff = np.abs(preds - labels)\n error = diff / preds\n return np.count_nonzero(error < .20) / np.count_nonzero(error)", "title": "" }, { "docid": "adc2c272c4eb82cd345d10f36ae1f345", "score": "0.63747245", "text": "def _warn_for_empty_labels():\n warnings.warn(\"Empty ground truth set! Check input data\")\n return 0.", "title": "" }, { "docid": "06551c4acb682d68e7036348c3b18f77", "score": "0.63424575", "text": "def find_features(dataset, features, percent):\n count_nan, count, my_features, NAN, AVBLE, Per_nan, Per = 0, 0, [], [], \\\n [], [], []\n # Go through each feature in order to count NaN and available values\n for j in range(0, len(features)):\n for i in dataset.values():\n if i.values()[j] == \"NaN\":\n count_nan = count_nan + 1\n else:\n count = count + 1\n\n if round(count*100/146.0, 0) >= percent:\n my_features.append(dataset.values()[0].keys()[j])\n NAN.append(count_nan)\n Per_nan.append(round(count_nan*100.0/len(dataset), 0))\n AVBLE.append(count)\n Per.append(round(count*100.0/len(dataset), 0))\n count_nan, count = 0, 0\n\n df_con = pd.DataFrame(columns=[\"Number of NaN\", \"Percent NaN\",\n \"Number of Available\",\n \"Percent Available\"],\n index=features)\n\n df_con[\"Number of NaN\"] = NAN\n df_con[\"Percent NaN\"] = Per_nan\n df_con[\"Number of Available\"] = AVBLE\n df_con[\"Percent Available\"] = Per\n\n print df_con\n return my_features", "title": "" }, { "docid": "26408b1c5583758900d5443f5ccd48a2", "score": "0.63092726", "text": "def fraction_of_missing_rows(df):\n return np.sum(pd.DataFrame.any(df.isnull(),axis=1)) / len(df)", "title": "" }, { "docid": "b726c9f66c6a22288578dae3ba88f1a6", "score": "0.6304949", "text": "def check_grads(self):\n for param in self.parameters():\n if torch.sum(param.data != param.data) > 0:\n print(\"NaNs in Grad!\")", "title": "" }, { "docid": "9c13990d2d6259322ece8ee1a1517590", "score": "0.62769705", "text": "def test_n_features_in_no_validation():\n est = MyEstimator()\n est._check_n_features(\"invalid X\", reset=True)\n\n assert not hasattr(est, \"n_features_in_\")\n\n # does not raise\n est._check_n_features(\"invalid X\", reset=False)", "title": "" }, { "docid": "d4a9537e936c14fd603b38a410012c14", "score": "0.6268468", "text": "def get_missing(self):\n\n self.data.vmiss = []\n\n for s in self.data.samps:\n missing = []\n\n if len(s) < self.nof_feats + 1:\n r = i = 0\n while i < len(s) - 1:\n if r in self.ffmap.dir[self.data.nm2id[self.data.fvmap.opp[abs(s[i])][0]]]:\n i += 1\n else:\n missing.append(r)\n\n r += 1\n\n # adding the rest of the features\n missing.extend(range(r, self.nof_feats))\n\n # set is needed for testing inclusion\n self.data.vmiss.append(set(missing))", "title": "" }, { "docid": "741a2cc4fd73c92be3c419e24116e549", "score": "0.6259721", "text": "def class_empty(prediction, y, significance):\n\tprediction = prediction > significance\n\tn_empty = np.sum(1 for _ in filter(lambda x: np.sum(x) == 0,\n\t prediction))\n\treturn n_empty / y.size", "title": "" }, { "docid": "341886fd5de79faaf7a7b917d017fe82", "score": "0.6231333", "text": "def missing_statistics(df):\n statitics = pd.DataFrame(df.isnull().sum()).reset_index()\n statitics.columns = ['COLUMN NAME', \"MISSING VALUES\"]\n statitics['TOTAL ROWS'] = df.shape[0]\n statitics['% MISSING'] = round((statitics['MISSING VALUES'] / statitics['TOTAL ROWS']) * 100, 2)\n return statitics", "title": "" }, { "docid": "d638085cf8cb1e40214d09b55fa2b46d", "score": "0.62091655", "text": "def fillNa(dataframe=None, X_train=None, y_train=None,\n by_mean=False, by_mode=False, inplace=False):\n# for cols in dataframe:\n# if impute:\n# from sklearn.impute import SimpleImputer\n# from sklearn.preprocessing import LabelEncoder\n# \n# encoder = LabelEncoder()\n# X_train = encoder.fit_transform(X_train)\n# y_train = encoder.transform(y_train)\n# \n# imputer = SimpleImputer(strategy='most_frequent',fill_value='mode', copy=True)\n# imputed_X_train = pd.DataFrame(imputer.fit_transform(X_train))\n# imputed_y_train = pd.DataFrame(imputer.transform(y_train))\n \n # replacing column\n# imputed_X_train.columns = X_train.columns\n# imputed_y_train.columns = y_train.columns\n\n# a = missing_by_percount(dataframe)\n# b = pd.DataFrame([dataframe.drop([colname], axis=1, inplace=True) \n# for colname in a['Column Name'] \n# if a['Percentage Missing'].values.any() >= 50])\n# if droplarge:\n# _getterFunction = pd.DataFrame(missing_by_percount(dataframe))\n# _getValue = [rowname for rowname in _getterFunction.loc[:,'Column Name'].values.any() \n# if _getterFunction.loc[:, 'Percentage Missing'].values.any().item() \n# >= 50] \n# dataframe.drop(_getValue, axis=1, inplace=True)\n #_getterFunction.loc[:, 'Percentage Missing'] > 50\n# dataframe.drop([_getValue == True],axis=1, inplace=True)\n \n# for i in _getValue:\n# pass\n# if True:\n# pass\n# dataframe.drop([i.index], inplace=True)\n# else:\n# pass\n# dataframe.drop([])\n# ahoy = dataframe.drop([])\n if by_mode:\n shoot = [colname \n for colname in dataframe.columns \n for colname in dataframe[f'{colname}'].mode()]\n for boom in shoot:\n for colname in dataframe.columns:\n dataframe[f'{colname}'].fillna(value=boom, inplace=inplace)\n if by_mean:\n shooter = [colname\n for colname in dataframe.columns\n \n if dataframe[colname].dtype in ['int64', 'float64']\n# for colname in dataframe[f'{colname}'].mean()\n ]\n shoot = [colname \n for colname in shooter \n for colname in dataframe[f'{colname}'].mean()] \n for boom in shoot:\n for colname in dataframe.columns:\n dataframe[f'{colname}'].fillna(value=boom, inplace=inplace)\n \n\n return dataframe", "title": "" }, { "docid": "335c0eeb7c75d32f0cef79acbb44b211", "score": "0.6175372", "text": "def test_n_features_parameter_not_one_percent():\n fg = FeatureGenerator(strategy=\"GFG\", n_features=23, population=200)\n with pytest.raises(ValueError, match=r\".*should be <1%.*\"):\n fg.fit(X_bin, y_bin)", "title": "" }, { "docid": "99c9f708fc8169e93aa1f4142a4e8b82", "score": "0.617391", "text": "def cross_validate(_features_true, _features_false, percentage):\n\n res0 = []\n score = 0.0\n\n for p in range((int)(1.0/percentage)):\n _from = _features_true.shape[0] * p*percentage\n _to = _from + ( percentage * _features_true.shape[0] )\n\n trainFeature0 = np.vstack((_features_true[0:_from], _features_true[_to:]))\n testFeature0 = _features_true[_from:_to]\n\n _from = _features_false.shape[0] * p*percentage\n _to = _from + percentage * _features_false.shape[0]\n\n trainFeature1 = np.vstack((_features_false[0:_from], _features_false[_to:]))\n #testFeature1 = _features_false[_from:_to]\n\n # extend train feature true in order to balance against a bigger number of false features\n trainFeature0 = np.repeat(trainFeature0, (trainFeature1.shape[0]/trainFeature0.shape[0]), axis=0)\n\n X_train = np.vstack((trainFeature0, trainFeature1))\n X_test = testFeature0\n Y_train = np.append(np.ones(trainFeature0.shape[0]), np.zeros(trainFeature1.shape[0]))\n Y_test = np.ones(testFeature0.shape[0])\n\n #clf = RandomForestClassifier(n_estimators=80)\n #clf = LogisticRegression(C=0.001)\n from sklearn.ensemble import GradientBoostingClassifier\n clf = GradientBoostingClassifier()\n clf.fit(X_train, Y_train)\n\n #score = score + clf.score(X_test, Y_test)\n\n clf_probs = clf.predict_proba(X_test)\n\n score += sum(1 for i in clf_probs if i[1] > THRESHOLD) * 1.0 / len(clf_probs)\n\n res0 = np.append(res0, [elem[1] for elem in clf_probs])\n\n score = score * percentage\n\n return res0, score", "title": "" }, { "docid": "70e5d7859e66036616a8ce730efd9e72", "score": "0.6146839", "text": "def _get_missing_values(self,data):\r\n #Getting sum of missing values for each feature\r\n missing_values = data.isnull().sum()\r\n #Feature missing values are sorted from few to many\r\n missing_values.sort_values(ascending=False, inplace=True)\r\n \r\n #Returning missing values\r\n return missing_values", "title": "" }, { "docid": "1a41d84cc61eebb83edcb0fb4133b3ea", "score": "0.6129801", "text": "def missing_samples(self):\n return len(self.samples) == 0", "title": "" }, { "docid": "21b554e576a85586af99c003cfed2686", "score": "0.6061157", "text": "def check(self, feature):\n mapper = feature.as_feature_engineering_pipeline()\n X = mapper.fit_transform(self.X, y=self.y)\n assert not np.any(np.isnan(X))", "title": "" }, { "docid": "2a1c44833bd3d9bc2e3f6b9c8efc51d0", "score": "0.60489154", "text": "def nan_values_detection(dataframe):\n # check if any items in a row are not-zero, not-empty or not-False\n print(f\"\\nVariables with missing data: \\n{dataframe.isna().any()}\\n\")\n\n # table of % of missing values using the \"sidetable\" library instead\n # of performing the task manually\n nan_table = dataframe.stb.missing(\n clip_0=True,\n # style=True # not working?\n )\n print(f\"\\nSummary of missing data: \\n{nan_table}\\n\")\n\n # repeat below AFTER imputation steps\n # calculate the percentage of NaNs in the dependent variables\n total_nan_values = dep_vars.isna().sum().sum()\n total_dep_vars_values = dep_vars.count().sum()\n pct_nan_dep_vars = total_nan_values / total_dep_vars_values\n print(\n f\"\\nTotal number of NaN values in the dependent variables: \"\n f\"{total_nan_values}\\n\"\n f\"Total percentage of NaN values: {pct_nan_dep_vars:.1%}\\n\"\n )\n # # below NOT working\n # # gives the list of indexes corresponding to missing vallues (NaNs)\n # nan_values_index = (\n # dataframe[[\"ch4_flux\", \"co2_flux\", \"n2o_flux\"]]\n # .isna()\n # .to_numpy()\n # .nonzero()\n # )\n # # nan_list = nan_values_index.index.to_list()\n # print(f\"\\nList of NaN values: \\n{nan_values_index}\\n\")\n return nan_table", "title": "" }, { "docid": "8120781559065fa6477019bdf571399a", "score": "0.60398895", "text": "def _set_missingness(self) -> \"TSForecastingExperiment\":\n self.num_missing_target = self.y.isna().sum()\n self.target_has_missing = self.num_missing_target != 0\n if isinstance(self.X, pd.DataFrame):\n self.num_missing_exogenous = self.X.isna().sum().sum()\n self.exogenous_has_missing = self.num_missing_exogenous != 0\n elif self.X is None:\n self.num_missing_exogenous = 0\n self.exogenous_has_missing = False\n\n return self", "title": "" }, { "docid": "99b83eef7c834956e0a6445d837491cd", "score": "0.60175174", "text": "def is_missing(self, null, count, n: int = 20):\n nvalid = null.count(dim=\"time\").fillna(0) - null.sum(dim=\"time\").fillna(0)\n return nvalid < n", "title": "" }, { "docid": "b028c3185d861084186314f0ba48356d", "score": "0.59647125", "text": "def check_dataset(self, dataset):\n in_support = np.sum(\n (dataset.x >= self.min_x) * (dataset.x <= self.max_x),\n axis=1) == self.num_p\n print(\"percent in support\", np.sum(in_support) / dataset.num_obs)\n return np.sum(in_support) == dataset.num_obs", "title": "" }, { "docid": "a2ee5f032af9f824133690e218316110", "score": "0.59486026", "text": "def check_train_error(self):\n # check error:\n label = self.predict(self.Xy.iloc[:, :-1]) # use train set without labels\n error = 0\n for i in range(len(label)):\n if int(label[i]) != int(self.true_y[i]):\n error += 1\n print('training error rate is: ', error / len(label))\n return error / len(label)", "title": "" }, { "docid": "03e12b8eff5b6c07eff4973e4ba2b984", "score": "0.5945362", "text": "def count_valid_values(data_dict, features):\n for feature in features:\n feature_name = feature[0]\n feature.append(0)\n feature.append(0)\n for record in data_dict:\n person = data_dict[record]\n feature[3] += 1\n if person[feature_name] != 'NaN':\n feature[2] += 1\n \n return features", "title": "" }, { "docid": "179262e8ad2b946dcc353895965062b5", "score": "0.5944875", "text": "def not_valid_feature(dataset, features):\n print \"Features with invalid value: \"\n for j in features:\n for keys, values in dataset.items():\n if j == 'email_address':\n if validate_email(values[j]) is False and values[j] != 'NaN':\n print keys, \"has invalid %s\" % j, values[j], j\n elif j in ['deferred_income', 'restricted_stock_deferred']:\n if values[j] > 0 and values[j] != 'NaN':\n print keys, \"has incorrect %s\" % j, values[j], j\n else:\n if values[j] < 0 and values[j] != 'NaN':\n print keys, \"has incorrect %s\" % j, values[j]\n return", "title": "" }, { "docid": "3f1d9496fa1a47bebc3971643245d4a6", "score": "0.5934855", "text": "def check_dataset(self, dataset):\n new_x = self.pca.transform(dataset.x.reshape((dataset.x.shape[0], -1)))\n in_support = np.sum(\n (new_x >= self.min_x) * (new_x <= self.max_x),\n axis=1) == self.num_p\n print(\"percent in support\", np.sum(in_support) / dataset.num_obs)\n return np.sum(in_support) == dataset.num_obs", "title": "" }, { "docid": "da73d8795721ffba7208fae6e3207df7", "score": "0.591258", "text": "def measure_null_accuracy(y):\n # print(y.value_counts())\n most_frequent_class_count = y.value_counts().max()\n all_count = y.count()\n null_accuracy = most_frequent_class_count / all_count\n print(f\"Null Accuracy: {null_accuracy: .1%}\")", "title": "" }, { "docid": "480f56d6ac4489f2f806735c7e66d12f", "score": "0.5900153", "text": "def percentage_error_function(dataset, labels, classifier):\n\n # Cantidad de puntos mal etiquetados\n misclassified_count = 0\n\n # Iteramos los puntos junto a sus etiquetas\n for point, label in zip(dataset, labels):\n if classifier(point) != label:\n misclassified_count += 1\n\n # Devolvemos el porcentaje (en tantos por uno)\n return misclassified_count / len(dataset)", "title": "" }, { "docid": "15f7d475a3331d2ad5bf2b1802cca261", "score": "0.58902043", "text": "def test_expected_value_pp_theta(self):\n\n for i in self.analyser.theta_samples.columns:\n self.assertAlmostEqual(self.analyser.theta_samples.mean()[i],\n self.inf_n_pp.mean()[i],\n delta=1e-4)", "title": "" }, { "docid": "b7a916f3c2e71b6446e2cbca1bcc008d", "score": "0.5876287", "text": "def test_mean_impute_missing_values(self):\n imputed = impy.mean(self.data_m)\n self.assertFalse(np.isnan(imputed).any())", "title": "" }, { "docid": "487d5411f4cf0c9a016eaaabfb2e2c07", "score": "0.5868152", "text": "def perm4missing(flights, col, N):\n pt = (\n flights.assign(is_null=flights.DEPARTURE_DELAY.isnull())\n .pivot_table(index='is_null', columns=col, aggfunc='size')\n )\n distr = (pt.T / pt.sum(axis=1)).T\n obs = distr.diff().iloc[-1].abs().sum() / 2\n\n tvds = []\n for _ in range(1000):\n shuffled_col = (\n flights[col]\n .sample(replace=False, frac=1)\n .reset_index(drop=True)\n )\n shuffled = (\n flights\n .assign(**{\n col: shuffled_col,\n 'is_null': flights['DEPARTURE_DELAY'].isnull()\n })\n )\n shuffled = (\n shuffled\n .pivot_table(index='is_null', columns=col, aggfunc='size')\n .apply(lambda x: x / x.sum(), axis=1)\n )\n tvd = shuffled.diff().iloc[-1].abs().sum() / 2\n tvds.append(tvd)\n\n return np.mean(tvds > obs)", "title": "" }, { "docid": "5c0f35666e1a2ab24d864c13f1ead10a", "score": "0.5866258", "text": "def test_add_sample_presence_count_zeros():\n\n table, metadata, ranks = get_test_data()\n\n # Test 1: zero out all counts for feature F3\n table.loc[\"F3\"] = 0\n output_feature_data = add_sample_presence_count(ranks, table)\n assert_series_equal(\n output_feature_data[\"qurro_spc\"],\n Series([3, 2, 0, 3, 2, 2, 2, 2], index=ranks.index, name=\"qurro_spc\"),\n )\n verify_spc_data_integrity(output_feature_data, ranks)\n\n # Test 2: zero out all counts\n table.loc[:] = 0\n ofd_2 = add_sample_presence_count(ranks, table)\n assert_series_equal(\n ofd_2[\"qurro_spc\"],\n Series([0] * 8, index=ranks.index, name=\"qurro_spc\"),\n )\n verify_spc_data_integrity(ofd_2, ranks)\n\n # Test 3: just one count for one feature\n table[\"Sample4\"][\"F2\"] = 1\n ofd_3 = add_sample_presence_count(ranks, table)\n assert_series_equal(\n ofd_3[\"qurro_spc\"],\n Series([0, 1, 0, 0, 0, 0, 0, 0], index=ranks.index, name=\"qurro_spc\"),\n )\n verify_spc_data_integrity(ofd_3, ranks)", "title": "" }, { "docid": "8a5fe3b3e968f443f5819be56c4a5987", "score": "0.58631384", "text": "def count_missing(dataframe):\n return (dataframe.shape[0] * dataframe.shape[1]) - dataframe.count().sum()", "title": "" }, { "docid": "8a5fe3b3e968f443f5819be56c4a5987", "score": "0.58631384", "text": "def count_missing(dataframe):\n return (dataframe.shape[0] * dataframe.shape[1]) - dataframe.count().sum()", "title": "" }, { "docid": "d5674b67122eb247c0cad5d03d8615bb", "score": "0.58525544", "text": "def get_nan_cnt(feature_df):\n nan_cnt = []\n nan_cnt = (feature_df!=feature_df).sum(axis=0)\n return nan_cnt", "title": "" }, { "docid": "b41029781587a2a0a4c793024372d24a", "score": "0.58005005", "text": "def test_impute_missing_values(self):\n imputed = impy.random(self.data_m)\n self.assertFalse(np.isnan(imputed).any())", "title": "" }, { "docid": "19574771c31210745a79e58e327497f5", "score": "0.5795593", "text": "def test_for_nans(ds, var):\n assert ds[var].isnull().sum() == 0, \"there are nans!\"", "title": "" }, { "docid": "845b3bce6ea306f2ac1762ef893dcca6", "score": "0.57906467", "text": "def check(self, feature):\n mapper = feature.as_feature_engineering_pipeline()\n X = mapper.fit_transform(self.X, y=self.y)\n assert not np.any(np.isinf(X))", "title": "" }, { "docid": "2bac68990c93924eece6a98a7716ad4c", "score": "0.5787226", "text": "def fcheck_missing(self, df):\n data_i = pd.DatetimeIndex(df.index)\n true_i = pd.date_range(start=df.index[0], end=df.index[-1])\n if not data_i.equals(true_i):\n print(\"MISSING DATA: {} missing entries\".format(len(true_i) - len(data_i)))\n else:\n print(\"No missing data!\")", "title": "" }, { "docid": "acb88077366826945763b0448b3055b2", "score": "0.5783125", "text": "def check_hold_out_error(self):\n # check error:\n\n label = self.predict()\n if len(label) == 0:\n raise Exception('No hold out Data!!!')\n\n error = 0\n for i in range(len(label)):\n if int(label[i]) != int(self.true_y[i]):\n error += 1\n return error/len(label)", "title": "" }, { "docid": "9d5dadf313d8393d3b3fc15c85496a67", "score": "0.5776349", "text": "def test_fit_multivariate_normal_too_many_nans(self):\n\n with self.assertRaises(ValueError):\n prob_dist.fit_multivariate_normal(FEATURE_TABLE_TOO_MANY_NAN)", "title": "" }, { "docid": "94f8779ebb0b83b466863f2cca86a746", "score": "0.576862", "text": "def prune_by_missing_percent(df, percentage=0.4):\n mask = (df.isnull().sum() / df.shape[0]).map(lambda x: True if x < percentage else False)\n pruned_df = df[df.columns[mask.values]]\n return pruned_df", "title": "" }, { "docid": "73d2721e2f246a40d99d2e9a228d1863", "score": "0.57653403", "text": "def missing_values_row(df):\n df = df.copy()\n null_count = df.isnull().sum(axis=1)\n null_percentage = (null_count / df.shape[1]) * 100\n return pd.DataFrame({'num_missing': null_count, 'percentage': null_percentage})", "title": "" }, { "docid": "13915c29e3a4f25270d8eaa9a577bd3c", "score": "0.57543004", "text": "def class_error(true_labels, pred_labels):\n unique_true = np.unique(true_labels)\n unique_pred = np.unique(pred_labels)\n min_wrong = np.inf\n for permutation in itertools.permutations(unique_pred):\n f = {a:b for a, b in zip(unique_true, permutation)}\n wrong = 0\n for i in range(len(true_labels)):\n if f[true_labels[i]] != pred_labels[i]:\n wrong += 1\n if wrong < min_wrong:\n min_wrong = wrong\n return min_wrong/len(true_labels)", "title": "" }, { "docid": "33e6df655cbd26b25a43ac8e52890a07", "score": "0.5752662", "text": "def _imputer(self):\n\n logging.info(f'#{self._index()} - Imputing appropriate values in empty cells...')\n # deal with missing values\n missing_values = self.X.isnull().sum() + self.X_test.isnull().sum()\n logging.info(f'Number of missing values before imputing: {missing_values.sum()}')\n\n plt.figure(figsize=(10, 5))\n sns.heatmap(self.X.isnull(), yticklabels=0, cbar=False, cmap='viridis')\n plt.show()\n\n missing_data = pd.DataFrame({'Missing Values': missing_values})\n print(missing_data.head(20))\n\n fill_with_none = ['Alley', 'Fence', 'MiscFeature', 'MasVnrType', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2',\n 'GarageType', 'GarageFinish']\n\n fill_with_na = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'HeatingQC', 'KitchenQual', 'FireplaceQu',\n 'GarageQual', 'GarageCond', 'PoolQC']\n\n fill_with_zero = ['LotFrontage', 'LotArea', 'MasVnrArea', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF',\n 'TotalBsmtSF', '1stFlrSF', '2ndFlrSF', 'LowQualFinSF', 'GrLivArea', 'BsmtFullBath',\n 'BsmtHalfBath', 'Fireplaces', 'GarageCars', 'GarageArea', 'GarageYrBlt', 'WoodDeckSF',\n 'OpenPorchSF', 'EnclosedPorch', '3SsnPorch', 'ScreenPorch', 'PoolArea', 'MiscVal']\n\n fill_with_most_frequent = ['Electrical', 'Exterior1st', 'Exterior2nd', 'MSZoning', 'SaleType']\n\n for col in fill_with_none:\n self.X[col] = self.X[col].fillna('None')\n self.X_test[col] = self.X_test[col].fillna('None')\n\n for col in fill_with_na:\n self.X[col] = self.X[col].fillna('NA')\n self.X_test[col] = self.X_test[col].fillna('NA')\n\n for col in fill_with_zero:\n self.X[col] = self.X[col].fillna(0)\n self.X_test[col] = self.X_test[col].fillna(0)\n\n for col in fill_with_most_frequent:\n self.X[col] = self.X[col].fillna(self.X[col].mode()[0])\n self.X_test[col] = self.X_test[col].fillna(self.X[col].mode()[0])\n\n self.X['Functional'] = self.X['Functional'].fillna('Typ')\n self.X_test['Functional'] = self.X_test['Functional'].fillna('Typ')\n\n missing_values = self.X.isnull().sum().sum() + self.X_test.isnull().sum().sum()\n\n logging.info(f'Number of missing values after imputing: {missing_values}')\n logging.info(f'#{self._step_index} - DONE!')", "title": "" }, { "docid": "c125fba068225abe6e9325c9410d7d5d", "score": "0.5748004", "text": "def test_fit_mvn_for_each_class_missing_class(self):\n\n with self.assertRaises(ValueError):\n prob_dist.fit_mvn_for_each_class(\n NEW_FEATURE_TABLE, class_labels=BINARY_CLASS_LABELS,\n num_classes=3)", "title": "" }, { "docid": "b6d7a830347d315c958d15ddafc183c7", "score": "0.57469916", "text": "def validate(self, datapoints):\r\n\r\n incorrect = 0\r\n for x, y in datapoints:\r\n if(self.predict(x) != y):\r\n incorrect += 1\r\n return incorrect/len(datapoints)", "title": "" }, { "docid": "7fda52437ae6a85c4134d1741fa0a5f0", "score": "0.5746667", "text": "def test_unsupervised_density():\n # !TODO: Implement a suitable scenario.\n pass", "title": "" }, { "docid": "78b49b41828338e68847ffe90d5ba57d", "score": "0.57386965", "text": "def generate_missing_observations(X, method = 'uniform', low_i=0.1, high_i=0.5, low_j=0.1, high_j=0.5, loc_i=0, sigma_i=1, loc_j=0,sigma_j=1):\n assert method in METHODS , \"Missing mechanism not supported\"\n n,d = X.shape\n if method == 'uniform':\n p_i = np.random.uniform(low_i,high_i,n).reshape(-1,1)\n p_j = np.random.uniform(low_j,high_j,d).reshape(-1,1)\n\n \n p = p_i.dot(p_j.T) # missing probabilities\n\n mask = np.random.binomial(n=1,p=p,size=X.shape).astype(bool) #True for missing values, False for others\n\n X_miss = np.copy(X)\n\n X_miss[mask] = np.nan\n\n print('Fraction of missing data: %2.3f' % mask.mean())\n \n return X_miss", "title": "" }, { "docid": "f07dc39e46ca6031353c38dfbdab942b", "score": "0.5737364", "text": "def notnancount(data): \n return(np.count_nonzero(~np.isnan(data)))", "title": "" }, { "docid": "c706fce848bbae4e833518ebd00a99f3", "score": "0.57264996", "text": "def test(alg, dset):\r\n\r\n errors = 0\r\n x_val, y_val = dset[:, :-1], dset[:, -1]\r\n\r\n for x, y in zip(x_val, y_val):\r\n\r\n if not alg.test(x, y):\r\n errors = errors + 1\r\n\r\n loss = float(errors) / dset.shape[0]\r\n print(str((1 - loss) * 100) + \" % of accuracy\")\r\n\r\n return (1 - loss) * 100", "title": "" }, { "docid": "b6830aeed531298f43694fafff1a6919", "score": "0.5725042", "text": "def fraction_valid(gen, n_jobs=1):\n gen = mapper(n_jobs)(get_mol, gen)\n return 1 - gen.count(None) / len(gen)", "title": "" }, { "docid": "5432a2437be7488e7666ec3287e77033", "score": "0.5724736", "text": "def check_missing(self, data, index, missing_pattern):\n self.log.info(\"check the missing values.\")\n missing = [d for d in data if d[index] == missing_pattern]\n num_missing = len(missing)\n return missing, num_missing", "title": "" }, { "docid": "bb597b4c9faa770e710502cd6e43dd8b", "score": "0.5697898", "text": "def _check_missing_values(series, warn_vals, err_vals=None):\n missing_number = np.sum(np.isnan(series.values))\n total_number = np.prod(series.shape)\n missing_fraction = missing_number / total_number\n missing_msg = 'Too many missing values: {} out of {}.'.format(\n missing_number, total_number)\n warning_msg = 'Large number of missing values: {}'.format(missing_msg)\n\n # Raise an Error (default) or warning if too many missing values.\n if err_vals:\n if (missing_fraction >= err_vals.overall_fraction or\n (missing_fraction >= err_vals.fraction_low_missing_number and\n missing_number >= err_vals.low_missing_number) or\n (missing_fraction >= err_vals.fraction_high_missing_number and\n missing_number >= err_vals.high_missing_number)):\n raise ValueError('Too many missing values: ' + missing_msg)\n\n # Raise a warning in case of a lot of missing values.\n if (missing_fraction >= warn_vals.overall_fraction or\n (missing_fraction >= warn_vals.fraction_low_missing_number and\n missing_number >= warn_vals.low_missing_number) or\n (missing_fraction >= warn_vals.fraction_high_missing_number and\n missing_number >= warn_vals.high_missing_number)):\n warnings.warn(warning_msg)", "title": "" }, { "docid": "8af2620b576d2debbf194942e6182834", "score": "0.5693044", "text": "def testMissingRecords(self):\n\n c = self._classifier([1], 1.0, 0.1, 0)\n recordNum = 0\n c.compute(recordNum=recordNum, patternNZ=[1, 3, 5],\n classification={\"bucketIdx\": 0, \"actValue\": 0},\n learn=True, infer=True)\n recordNum += 1\n\n c.compute(recordNum=recordNum, patternNZ=[2, 4, 6],\n classification={\"bucketIdx\": 1, \"actValue\": 1},\n learn=True, infer=True)\n recordNum += 1\n\n c.compute(recordNum=recordNum, patternNZ=[1, 3, 5],\n classification={\"bucketIdx\": 2, \"actValue\": 2},\n learn=True, infer=True)\n recordNum += 1\n\n c.compute(recordNum=recordNum, patternNZ=[2, 4, 6],\n classification={\"bucketIdx\": 1, \"actValue\": 1},\n learn=True, infer=True)\n recordNum += 1\n\n\n # -----------------------------------------------------------------------\n # At this point, we should have learned [1,3,5] => bucket 1\n # [2,4,6] => bucket 2\n result = c.compute(recordNum=recordNum, patternNZ=[1, 3, 5],\n classification={\"bucketIdx\": 2, \"actValue\": 2},\n learn=True, infer=True)\n recordNum += 1\n self.assertLess(result[1][0], 0.1)\n self.assertGreater(result[1][1], 0.9)\n self.assertLess(result[1][2], 0.1)\n\n result = c.compute(recordNum=recordNum, patternNZ=[2, 4, 6],\n classification={\"bucketIdx\": 1, \"actValue\": 1},\n learn=True, infer=True)\n recordNum += 1\n self.assertLess(result[1][0], 0.1)\n self.assertLess(result[1][1], 0.1)\n self.assertGreater(result[1][2], 0.9)\n\n\n\n # -----------------------------------------------------------------------\n # Feed in records that skip and make sure they don\"t mess up what we\n # learned\n # If we skip a record, the CLA should NOT learn that [2,4,6] from\n # the previous learn associates with bucket 0\n recordNum += 1\n result = c.compute(recordNum=recordNum, patternNZ=[1, 3, 5],\n classification={\"bucketIdx\": 0, \"actValue\": 0},\n learn=True, infer=True)\n recordNum += 1\n self.assertLess(result[1][0], 0.1)\n self.assertGreater(result[1][1], 0.9)\n self.assertLess(result[1][2], 0.1)\n\n # If we skip a record, the CLA should NOT learn that [1,3,5] from\n # the previous learn associates with bucket 0\n recordNum += 1\n result = c.compute(recordNum=recordNum, patternNZ=[2, 4, 6],\n classification={\"bucketIdx\": 0, \"actValue\": 0},\n learn=True, infer=True)\n recordNum += 1\n self.assertLess(result[1][0], 0.1)\n self.assertLess(result[1][1], 0.1)\n self.assertGreater(result[1][2], 0.9)\n\n # If we skip a record, the CLA should NOT learn that [2,4,6] from\n # the previous learn associates with bucket 0\n recordNum += 1\n result = c.compute(recordNum=recordNum, patternNZ=[1, 3, 5],\n classification={\"bucketIdx\": 0, \"actValue\": 0},\n learn=True, infer=True)\n recordNum += 1\n self.assertLess(result[1][0], 0.1)\n self.assertGreater(result[1][1], 0.9)\n self.assertLess(result[1][2], 0.1)", "title": "" }, { "docid": "5a48ceecc16aa0acf241c5c98a8ae7de", "score": "0.56896156", "text": "def adf_check(time_series):\n result = adfuller(time_series)\n print('Augmented Dickey-Fuller Test:')\n labels = ['ADF Test Statistic','p-value','#Lags Used','Number of Observations Used']\n\n for value,label in zip(result,labels):\n print(label+' : '+str(value) )\n \n if result[1] <= 0.05:\n print(\"strong evidence against the null hypothesis, reject the null hypothesis. Data has no unit root and is stationary\")\n else:\n print(\"weak evidence against null hypothesis, time series has a unit root, indicating it is non-stationary \")", "title": "" }, { "docid": "6d49bda1d81574254cc3c1c454f1cfa4", "score": "0.566481", "text": "def count_missing(data):\n res = []\n for i in range(len(data[0])):\n unique, counts = np.unique(data[:,i],return_counts=True)\n i, = np.where(unique == '?')\n if len(i) > 0:\n res.append(counts[i][0])\n else:\n res.append(0)\n return res", "title": "" }, { "docid": "08fd1ba5f4a1936ed49d649be5ab0d46", "score": "0.5661984", "text": "def missing_values_row(df):\n\tnull_count = df.isnull().sum(axis=1)\n\tnull_percentage = (null_count / df.shape[1]) * 100\n\treturn pd.DataFrame({'num_missing': null_count, 'percentage': null_percentage})", "title": "" }, { "docid": "4070eaa256aad75850477771c7222e39", "score": "0.5653676", "text": "def _find_missing(self):\n # build all_df, the full cartesian cross of all grain values\n col_uniqs = [self._df[col].unique() for col in self.columns]\n all_df = pd.DataFrame(\n data = itertools.product(*col_uniqs)\n ,columns = self.columns)\n all_n = all_df.shape[0]\n\n # left join all_df to unique value combinations, keep records that\n # are only on the left, return result\n joined_df = all_df.merge(self.row_counts, how='left', indicator=True)\n missing_df = joined_df[joined_df._merge=='left_only']\n missing_df = missing_df.drop(labels=['row_count', '_merge'],\n axis=1)\n\n missing_rate = missing_df.shape[0] / all_n\n\n return missing_df, missing_rate", "title": "" }, { "docid": "e94144dca148ec96a1568419693d5465", "score": "0.56483424", "text": "def have_full_comparisons(df):\n # TODO docstring\n if len(missing_odor_pairs(df)) == 0:\n return True\n else:\n return False", "title": "" }, { "docid": "09e0da6d26688803c2a16cb24ee9427a", "score": "0.56303924", "text": "def categorical_accuracy_missing(y_true, y_pred):\n \n select = K.cast( K.greater_equal(K.max(y_true,axis=1),0.5), 'float32' )\n return K.sum(K.cast(K.equal(K.argmax(y_true, axis=1),\n K.argmax(y_pred, axis=1)),'float32')*select) / (K.sum(select)+_EPSILON)", "title": "" }, { "docid": "1ce75f6651efb7dd38f323fcdc477fa3", "score": "0.5609718", "text": "def test_warn_not_enough_exog(df_and_regressors):\n df, df_exog = df_and_regressors\n ts = TSDataset(df=df, df_exog=df_exog, freq=\"D\")\n with pytest.warns(UserWarning, match=\"Some regressors don't have enough values\"):\n ts.make_future(ts.df_exog.shape[0] + 100)", "title": "" }, { "docid": "6c215614e47addb96629629420e67e54", "score": "0.55927265", "text": "def nb_samples_is_sufficient(dataset):\n if np.shape(dataset)[0] > 2 * np.shape(dataset)[1]:\n return True\n return False", "title": "" }, { "docid": "aa46b4d2706efd4d7a4704066c6b1b41", "score": "0.5589042", "text": "def analysis( classifier , guesses , testLabels , testData ) :\n\tcorrect = [ guesses[ i ] == testLabels[ i ] for i in range( len( testLabels ) ) ].count( True )\n\tprint \"%s correct out of %s (%.1f%%)\" % ( correct , len( testLabels ) , 100.0 * correct / len( testLabels ) )", "title": "" }, { "docid": "d655088bfe87ce1558284ed60395910f", "score": "0.55862916", "text": "def test_fnn_loss(self):\n assert (\n loss_false(tf.zeros((25, 10)), 25).numpy() < 1e-10\n ), \"False neighbor loss is working\"", "title": "" }, { "docid": "fc21c6804aef56340de033ee2840d9c3", "score": "0.55779904", "text": "def __feat_num_significant(self, col, df_uni_var_norm, df_uni_var_un_norm, target_col, group_col, add_null):\n df_target_col = df_uni_var_norm[[target_col, group_col, col]]\n df_bin_col = self.__bin_generator.create_percentile_bins(df_target_col.copy(), [col], \n num_bins=self.__config[Constants.num_bins_numerical],\n add_null=add_null)\n binned_feats = [feat for feat in df_bin_col.columns if col in feat]\n\n sig = False\n\n for feat in binned_feats:\n num_c, len_c, num_t, len_t = self.__feat_info(df_bin_col[[target_col, group_col, feat]],\n feat,\n target_col,\n group_col)\n sig = self.__sig_check(num_c, len_c, num_t, len_t)\n if sig:\n break\n\n # if none of the binned features are significant return False, 0 impact\n if not sig:\n return False, 0, 0, 0, 0, 0\n\n # contribution on the non-normalized data set\n df_target_col = df_uni_var_un_norm[[target_col, group_col, col]]\n df_bin_col = self.__bin_generator.create_percentile_bins(df_target_col.copy(), [col], \n num_bins=self.__config[Constants.num_bins_numerical],\n add_null=add_null)\n binned_feats = [feat for feat in df_bin_col.columns if col in feat]\n\n expected = []\n actual = []\n contribution = []\n is_sig = []\n\n for feat in binned_feats:\n num_c, len_c, num_t, len_t = self.__feat_info(df_bin_col[[target_col, group_col, feat]], feat, target_col,\n group_col)\n contribution.append(num_t - num_c * len_t / len_c)\n actual.append(num_t)\n expected.append(num_c * len_t / len_c)\n is_sig.append(self.__sig_check(num_c, len_c, num_t, len_t))\n\n return True, binned_feats, is_sig, expected, actual, contribution", "title": "" }, { "docid": "90809db87a9972a5ff17090dfeb5b4a6", "score": "0.5575794", "text": "def checkProbabilities(l: ndarray) -> bool:\n\treturn round(l.sum(),3) == 1.0 or round(l.sum(),3) == 0.0", "title": "" }, { "docid": "14a741248ec40633134b716e90cec02d", "score": "0.5574588", "text": "def missing_values_table(df):\n # Total missing values\n mis_val = df.isnull().sum()\n\n # Percentage of missing values\n mis_val_percent = 100 * df.isnull().sum() / len(df)\n\n # Make a table with the results\n mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)\n\n # Rename the columns\n mis_val_table_ren_columns = mis_val_table.rename(\n columns={0: 'Missing Values', 1: '% of Total Values'})\n\n # Sort the table by percentage of missing descending\n mis_val_table_ren_columns = mis_val_table_ren_columns[\n mis_val_table_ren_columns.iloc[:, 1] != 0].sort_values(\n '% of Total Values', ascending=False).round(3)\n\n # Print some summary information\n print(\"Your selected dataframe has \" + str(df.shape[1]) + \" columns.\\n\"\n \"There are \" + str(mis_val_table_ren_columns.shape[0]) +\n \" columns that have missing values.\")\n # Return the dataframe with missing information\n return mis_val_table_ren_columns", "title": "" }, { "docid": "02dda5011813b3b6d58f05a2b3f3f52a", "score": "0.55741584", "text": "def missing_values_col(df):\n\tnull_count = df.isnull().sum()\n\tnull_percentage = (null_count / df.shape[0]) * 100\n\tempty_count = pd.Series(((df == ' ') | (df == '')).sum())\n\tempty_percentage = (empty_count / df.shape[0]) * 100\n\tnan_count = pd.Series(((df == 'nan') | (df == 'NaN')).sum())\n\tnan_percentage = (nan_count / df.shape[0]) * 100\n\treturn pd.DataFrame({'num_missing': null_count, 'missing_percentage': null_percentage,\n\t 'num_empty': empty_count, 'empty_percentage': empty_percentage,\n\t 'nan_count': nan_count, 'nan_percentage': nan_percentage})", "title": "" }, { "docid": "614602cbabca956ed1b5ec24059a5593", "score": "0.55706817", "text": "def missing_values_col(df):\n df = df.copy()\n null_count = df.isnull().sum()\n null_percentage = (null_count / df.shape[0]) * 100\n empty_count = pd.Series(((df == ' ') | (df == '')).sum())\n empty_percentage = (empty_count / df.shape[0]) * 100\n nan_count = pd.Series(((df == 'nan') | (df == 'NaN')).sum())\n nan_percentage = (nan_count / df.shape[0]) * 100\n return pd.DataFrame({'num_missing': null_count, 'missing_percentage': null_percentage,\n 'num_empty': empty_count, 'empty_percentage': empty_percentage,\n 'nan_count': nan_count, 'nan_percentage': nan_percentage})", "title": "" }, { "docid": "a659a8a3af64d52aa148fdf73a0cbbc2", "score": "0.55666554", "text": "def percentage_error(dataset, labels, weights):\n # Tomamos el perceptron representado por los pesos dados\n perceptron = get_perceptron(weights)\n\n # Cantidad de puntos mal etiquetados\n misclassified_count = 0\n\n # Iteramos los puntos junto a sus etiquetas\n for point, label in zip(dataset, labels):\n if perceptron(point) != label:\n misclassified_count += 1\n\n # Devolvemos el porcentaje (en tantos por uno)\n return misclassified_count / len(dataset)", "title": "" }, { "docid": "a05d7f45666820107151622c1fa7f990", "score": "0.55657434", "text": "def is_defeated(army):\n\n for group in army:\n if group[\"units\"] > 0:\n return False\n return True", "title": "" }, { "docid": "26340499a83311506f958dcb7be8ea61", "score": "0.5561466", "text": "def identify_where_class_missing(arr1,arr2):\n missing_arr1 = not np.any(arr1)\n missing_arr2 = not np.any(arr2)\n if not missing_arr1 and not missing_arr2:\n return 0\n elif missing_arr1 and not missing_arr2:\n return 1\n elif missing_arr2 and not missing_arr1:\n return 2\n elif missing_arr1 and missing_arr2:\n return 3", "title": "" }, { "docid": "0dab7f5d14dd92f2a08157ab5ff583eb", "score": "0.5559521", "text": "def test_taxonomy_missing(self):\n schema = hxl.schema(SCHEMA_TAXONOMY_MISSING)\n result = hxl.validate(hxl.data(DATA_TAXONOMY_GOOD), schema)\n self.assertTrue(result['is_valid'])\n self.assertTrue('external_issues' in result)\n self.assertEqual(0, result['stats']['error'])\n self.assertEqual(1, result['stats']['external'])\n self.assertEqual(0, len(result['issues']))\n self.assertEqual(1, len(result['external_issues']))", "title": "" }, { "docid": "3a0f70d4fb4ac02a54f25f149635ea12", "score": "0.55441195", "text": "def has_missing_values(self):\n return self.data.isnull().sum().sum() > 0", "title": "" }, { "docid": "bb950c5441ae5cfdd1edffd21e062353", "score": "0.5543181", "text": "def test_count_invalid_param(self):\n count = utils.count_param(self._model, ('not_kernel',))\n\n self.assertEqual(count, 0)", "title": "" }, { "docid": "8e6c7ef560bc83dc4c1adf033b5ce43b", "score": "0.5542588", "text": "def is_missing(self, null, count):\n return False", "title": "" }, { "docid": "d02de922e7e364400e383004e281a416", "score": "0.5537084", "text": "def test_exhaust_data():\n \n training_data, training_targets = get_training_data()\n seen_pts = 0\n num_chunks = range(num_chunks_train)\n my_gen = generators.labeled_sequence_gen(training_data, training_targets)\n for e, (x_chunk, y_chunk) in zip(num_chunks,my_gen):\n seen_pts = seen_pts + x_chunk.shape[0]\n print(\"Saw \", str(seen_pts), \" points total\") \n ok_(seen_pts <= train_size)", "title": "" }, { "docid": "e90809be561a8e21e04b18a4ae4f98d2", "score": "0.5536789", "text": "def analyzeMakeup(nodes, data, labels):\n totals = []\n for i in range(len(labels)):\n labelTotal = []\n for j in range(len(nodes)):\n if data[nodes[j]][i] == 1:\n labelTotal.append(1)\n else:\n labelTotal.append(0)\n percentage = np.mean(labelTotal)\n if percentage > 0.5: \n print(labels[i], percentage)\n totals.append(percentage)\n return totals", "title": "" }, { "docid": "13471972b5340ff4e18407d7397757aa", "score": "0.5531612", "text": "def analyze_features(dataset, data):\n \n expected = dataset.config[\"feature_vector_length\"]\n print \"Expected:\", expected\n \n i = 0\n for key in data.keys():\n if not len(data[key][\"feature_vect\"]) == expected:\n #print \"[ERROR] \", key, \" - delka: \", len(data[key][\"feature_vect\"])\n i += 1\n \n print \"Celkem \", i, \" vektoru nema pozadovanou delku \", expected, \"!\"", "title": "" }, { "docid": "a10d870198ad471bb923ea4245c4f3c3", "score": "0.5530415", "text": "def calc_misses(trialdf):\r\n misses = ((trialdf['Stimulus.ACC']==0) & \r\n (trialdf['Stimulus.RESP'].notnull())).sum()\r\n return misses", "title": "" }, { "docid": "f60b3143d9ef2ff92f86350760733cd5", "score": "0.5528512", "text": "def _missing_ids(vba, pcode_ids, verbose=False):\n\n # Check each ID.\n num_missing = 0.0\n for curr_id in pcode_ids:\n if (curr_id not in vba):\n if (verbose):\n print (\"P-code ID '\" + str(curr_id) + \"' is missing.\")\n num_missing += 1\n if (len(pcode_ids) == 0):\n return 0.0\n return (num_missing / len(pcode_ids))", "title": "" }, { "docid": "1d9e2ac06f208bad63e022a22ed0f5f5", "score": "0.552719", "text": "def check_detail(df, verbose=True):\n\trows, cols = df.shape()\n\n\tcate_cols = []\n\tnumb_cols = []\n\tcate_miss_cols = []\n\tnumb_miss_cols = []\n\n\tfor col in train.columns:\n\t if train[col].dtype == object:\n\t\t if train[col].isnull().sum() != 0:\n\t\t\t\tCATE_MISSING_COLS.append(col)\n\t\t\tCATE_COLS.append(col)\n\t\telse:\n\t\t\tif train[col].isnull().sum() != 0:\n\t\t\t\tNUM_MISSING_COLS.append(col)\n\t\t\tNUM_COLS.append(col)\n\t\n\tif verbose:\n\t\tprint(f\"There are {rows} rows and {cols} columns in the dataframe.\")\n\t\tprint()\n\t\tprint(f'There are {len(CATE_COLS)} categorical features.')\n\t\tprint(f'They are {CATE_COLS}.')\n\t\tprint('='*20)\n\t\tprint(f'There are {len(NUM_COLS)} numerical features.')\n\t\tprint(f'They are {NUM_COLS}.')\n\t\tprint('='*20)\n\t\tprint(f'Among all the cate features, these features have missing values: {CATE_MISSING_COLS}')\n\t\tprint(f'Among all the num features, these features have missing values: {NUM_MISSING_COLS}')\n\n\t\tprint('#'*20)\n\t\tprint('Categorical columns with missing values.')\n\t\tprint(df[cate_miss_cols].isnull().sum())\n\t\tprint('='*20)\n\t\tprint('Numerical columns with missing values.')\n\t\tprint(df[numb_miss_cols].isnull().sum())\n\n\treturn cate_cols, numb_cols, cate_miss_cols, numb_miss_cols", "title": "" }, { "docid": "cbc8bb829b5660902ce205557395879d", "score": "0.5522681", "text": "def return_valid_averages_mask(u11, u12, u21, u22, label=nan):\r\n\r\n valid_averages = ones(u11.shape[0], dtype=bool_)\r\n\r\n if isnan(label):\r\n # If NaN provided for any scanline uncertainty match-up invalid\r\n for i, (row11, row12, row21, row22) in enumerate(zip(u11, u12, u21, u22)):\r\n if (where(isnan(row11))[0].size != 0) or (where(isnan(row12))[0].size != 0) or \\\r\n (where(isnan(row21))[0].size != 0) or (where(isnan(row22))[0].size != 0):\r\n valid_averages[i] = False\r\n\r\n return valid_averages", "title": "" }, { "docid": "3dcbc39116d99bae80bb5569a3345ef4", "score": "0.55066156", "text": "def is_good_enough(H, training_points, classifier_to_misclassified, mistake_tolerance=0):\n if len(get_overall_misclassifications(H, training_points, classifier_to_misclassified)) > mistake_tolerance: return False\n return True", "title": "" }, { "docid": "32a7bc47f4ba1bdd8bbd935de69b7af8", "score": "0.550569", "text": "def misclassification_error(y_true, y_pred):\n assert y_true.size == y_pred.size\n return np.sum(y_true != y_pred) / y_true.size", "title": "" }, { "docid": "1c938fb0c1bd723663ef85e4090583af", "score": "0.5505078", "text": "def _compute_gfes(self):\n return self._det_dcv is not None", "title": "" }, { "docid": "313bdf2ca8ae5526abcb4c86f5147c06", "score": "0.549915", "text": "def _get_missing_features_info(self, X):\n if not self._precomputed:\n imputer_mask = _get_mask(X, self.missing_values)\n else:\n imputer_mask = X\n\n if sp.issparse(X):\n imputer_mask.eliminate_zeros()\n\n if self.features == 'missing-only':\n n_missing = imputer_mask.getnnz(axis=0)\n\n if self.sparse is False:\n imputer_mask = imputer_mask.toarray()\n elif imputer_mask.format == 'csr':\n imputer_mask = imputer_mask.tocsc()\n else:\n if not self._precomputed:\n imputer_mask = _get_mask(X, self.missing_values)\n else:\n imputer_mask = X\n\n if self.features == 'missing-only':\n n_missing = imputer_mask.sum(axis=0)\n\n if self.sparse is True:\n imputer_mask = sp.csc_matrix(imputer_mask)\n\n if self.features == 'all':\n features_indices = np.arange(X.shape[1])\n else:\n features_indices = np.flatnonzero(n_missing)\n\n return imputer_mask, features_indices", "title": "" }, { "docid": "8ed2549422d30356750b96257a1b0253", "score": "0.5499099", "text": "def get_missing_values(df):\n missing = 100 * (1 - df.count() / len(df))\n missing = missing.apply(lambda x: '%.3f%%' % x)\n return pd.DataFrame(missing, columns=['Missing %'])", "title": "" }, { "docid": "165517d1e5b3bca351eb6a7d91cb2dce", "score": "0.5497856", "text": "def checkMissingVal(data,missingval,tol=1e-3):\r\n if data >=(missingval-tol) and data <=(missingval+tol):\r\n return np.nan\r\n else:\r\n return data", "title": "" }, { "docid": "39836f0a7811c58ca04712a5eccf4a1d", "score": "0.54911816", "text": "def check_for_features(cmph5_file, feature_list):\n\n aln_group_path = cmph5_file['AlnGroup/Path'][0]\n missing_features = []\n for feature in feature_list:\n if feature not in cmph5_file[aln_group_path].keys():\n missing_features.append(feature)\n\n return missing_features", "title": "" }, { "docid": "422da8482f9fd7d1ad6b4e1a7cb4c1e1", "score": "0.54724926", "text": "def verify_percentage_intruded(percentage_intruded):\n\n\tif percentage_intruded > 0.5:\n\t\traise ValueError(\"Given data has too few (< 50 %) normal samples.\")\n\telif percentage_intruded > 0.2:\n\t\twarnings.warn(\"Given data has few (< 80 %) normal samples.\")\n\telif percentage_intruded < 0.01:\n\t\twarnings.warn(\"Given data has very few (< 1 %) intruded samples.\")", "title": "" }, { "docid": "a073e6ca91acf6fa8053d34c5edb93b8", "score": "0.5470708", "text": "def check_uniformity(data_uniform):\n\n if data_uniform.isnull().values.any():\n pass\n\n return data_uniform", "title": "" }, { "docid": "47713be3fa92f8fafa7d0b5cddff816e", "score": "0.54686886", "text": "def test_fit(self):\n assert_array_equal((self.clf.score(X,y)) >= 0, True)", "title": "" }, { "docid": "ba69dd2ad16b531a8a0aa6e7128033da", "score": "0.54677606", "text": "def allow_nan_stats(self):\n return self._allow_nan_stats", "title": "" }, { "docid": "4154c060176cf3151ce6f27b60399d61", "score": "0.54669327", "text": "def test_cp_raise_no_error(self):\n cp.count_processing(self.correct_df, self.pop_df)", "title": "" }, { "docid": "ecf7fb81c20dcc6bd599faa97bc8feb0", "score": "0.54660326", "text": "def test_sample_F_valid(self):\n model = fcdiff.UnsharedRegionModel()\n f = model.sample_F(100)\n gamma_est = np.mean(f, axis = 0)\n nptest.assert_allclose(gamma_est, model.gamma, atol = 0.05)", "title": "" } ]
213a4ea178810f219a88f61ad163e5e2
Returns LFS for current node
[ { "docid": "0b8195ae715b1ebb5d4948e0e91d1633", "score": "0.510134", "text": "def get_lfs(conf, ring, datadir, default_port, logger):\n fs = conf.get('fs', 'xfs')\n try:\n module_name = 'swift_lfs.fs.%s' % fs\n cls_name = 'LFS%s' % fs.upper()\n module = __import__(module_name, fromlist=[cls_name])\n cls = getattr(module, cls_name)\n if '__file__' in conf and fs in conf:\n fs_conf = readconf(conf['__file__'], fs)\n conf = dict(conf, **fs_conf)\n return cls(conf, ring, datadir, default_port, logger)\n except ImportError, e:\n raise SwiftConfigurationError(\n _('Cannot load LFS. Invalid FS: %s. %s') % (fs, e))", "title": "" } ]
[ { "docid": "be7ee2215b02a613745df021d80db130", "score": "0.65237695", "text": "def lft(self):\n return self._lft", "title": "" }, { "docid": "d4e35465f4e9efa5863436ea473cfab4", "score": "0.6152094", "text": "def bfs(self):\n\n queue = Queue()\n queue.put(self)\n\n while not queue.empty():\n\n current_node = queue.get()\n print(current_node.data, end=' ')\n\n if current_node.left:\n queue.put(current_node.left)\n\n if current_node.right:\n queue.put(current_node.right)", "title": "" }, { "docid": "0dc173b4edc9c3381884530f7b3e3085", "score": "0.61099946", "text": "def get_current_node(self):\n openSet = list(self.openSet)\n temp = openSet[0]\n for i in openSet:\n self.calculate_fscore(i)\n if self.fScore[i] < self.fScore[temp]:\n temp = i\n return temp", "title": "" }, { "docid": "4309686b9b74b2a48f4e47bd3e776502", "score": "0.60792744", "text": "def bfs(self, start):\n if start in self.nodes:\n path = []\n\n # using a list for ease; not as efficient as dll\n q = []\n seen = set([])\n\n q.append(self.nodes[start])\n seen.add(start)\n\n while q:\n current = q.pop(0)\n path.append(current.key)\n for neighbor in current.neighbors:\n if neighbor.key not in seen:\n q.append(neighbor)\n seen.add(neighbor.key)\n return path\n else:\n print(\"This starting node is not in the graph.\")", "title": "" }, { "docid": "005928c2f688e8c2ddcf2e4cc2321a2a", "score": "0.5900905", "text": "def bfs_search(self):\n\n queue = [(0,0)] # level and the position of the sink\n aug_path = {(0,0):[0]} # level,last_index store to path storage\n min_path = [] # one storing the min path\n visited = set()\n while len(queue) > 0:\n prev_level, queue_node = queue[0]\n queue = queue[1:]\n visited.add(queue_node)\n neigh = self.get_neighbors(queue_node)\n neigh = [i for i in neigh if i not in visited]\n level = prev_level + 1 # current level of the bfs\n path = aug_path[(prev_level,queue_node)]\n for node in neigh:\n edge = self.edge_flow[queue_node][node]\n if edge.capacity > edge.flow:\n aug_path[(level,node)] = path + [node]\n \n if node+1 == self.total_nodes:\n queue = []\n min_path = path +[node]\n else:\n \n queue.append((level, node))\n \n return self.path_edges(min_path)", "title": "" }, { "docid": "16f242fa9d7041749e142380ff7166f3", "score": "0.58604187", "text": "def _get_bfs_tree(self) -> nx.DiGraph:\n tree = nx.minimum_spanning_tree(self.graph)\n return nx.bfs_tree(tree, self.home)", "title": "" }, { "docid": "040c65d17ecd9e5aec108e27d516b6f4", "score": "0.5856313", "text": "def llf(self):\n return self._llf", "title": "" }, { "docid": "13cf7617c9dac8f7eef91769cf6a5611", "score": "0.582439", "text": "def GetFS(self):", "title": "" }, { "docid": "e861e40f91446bfba8c066aff5c37c98", "score": "0.5658262", "text": "def getLFO(self):\n\t\treturn self.lfo", "title": "" }, { "docid": "043c3a3d6ee7ab65bee046c20f026234", "score": "0.5586315", "text": "def bfs(initial, goal_test, successors):\n # frontier representa os lugares que ainda nao visitamos\n frontier = Queue()\n frontier.push(Node(initial, None))\n # explored representa os lugares que ja foram visitados\n explored = {initial}\n\n # continua enquanto houver lugares para explorar\n while not frontier.empty:\n current_node = frontier.pop()\n current_state = current_node.state\n\n # se encontrar o objetivo retorna o no atual\n if goal_test(current_state):\n return current_node\n\n # verifica para onde podemos ir em seguida\n for child in successors(current_state):\n # ignora os nos filhos que ja foram visitados\n if child in explored:\n continue\n explored.add(child)\n frontier.push(Node(child, current_node))\n # passamos por todos os lugares e nao atingimos o objetivo\n return None", "title": "" }, { "docid": "0469bf8e8fbf23ecd4c76f3e3ec2fe50", "score": "0.55849457", "text": "def bfs(root):\n\n to_visit = []\n if root:\n to_visit.append(root)\n while to_visit:\n current = to_visit.pop()\n if current.left:\n to_visit.append(current.left)\n if current.right:\n to_visit.append(current.right)\n if not to_visit or not current.right:\n return current", "title": "" }, { "docid": "333e214c999d868735a7192b8da121bc", "score": "0.55775005", "text": "def bfs(self):\n queue = []\n queue.append(self.root)\n while queue:\n node = queue[0]\n print node.value\n if node.left_child:\n queue.append(node.left_child)\n if node.right_child:\n queue.append(node.right_child)\n del queue[0]", "title": "" }, { "docid": "ed0a01edd130ec74113696b0fd09cc40", "score": "0.5571185", "text": "def Bfs(self,start):\n self.visited = dict()\n q = Queue()\n q.append(start)\n self.visited[start]=1\n if self.repr==1: \n while(len(q)):\n cur = q.pop()\n print(cur)\n if self.G.get(cur):\n for c in self.G[cur]:\n if self.visited.get(c):\n continue\n self.visited[c]=1\n q.append(c)\n\n if self.repr==2:\n while(len(q)):\n cur = q.pop()\n print(cur)\n u = cur-1\n for i in range(self.n):\n if self.visited.get(i+1):\n continue\n if self.G[u][i]:\n self.visited[i+1]=1\n q.append(i+1)", "title": "" }, { "docid": "c93f1151ee0af17a1f54c5d7a90d3e21", "score": "0.5523278", "text": "def bfs(node):\n queue = []\n while node:\n print(node.value)\n if node.left:\n queue.insert(0, node.left)\n if node.right:\n queue.insert(0, node.right)\n if queue:\n node = queue.pop()\n else:\n break", "title": "" }, { "docid": "e9dc98eef43f91c0b24b0dbfec7cbf22", "score": "0.5491846", "text": "def dfs(self, start):\n if start in self.nodes:\n path = []\n\n # using a list as a stack for ease\n s = []\n seen = set([])\n\n s.append(self.nodes[start])\n seen.add(start)\n\n while s:\n current = s.pop()\n path.append(current.key)\n for neighbor in current.neighbors:\n if neighbor.key not in seen:\n s.append(neighbor)\n seen.add(neighbor.key)\n return path\n else:\n print(\"This starting node is not in the graph.\")", "title": "" }, { "docid": "f907e0a54fef1646edacc6897c511a73", "score": "0.54915065", "text": "def dfs(self, node):\n if not node:\n return\n\n self.rtn_list.append(node.data)\n print(node.data)\n\n self.dfs(node.left)\n self.dfs(node.right)", "title": "" }, { "docid": "24088e22557e69f4ee9373fcf9cd2848", "score": "0.54864484", "text": "def next_node(self):\n path = self.run_bfs()\n return path[1]", "title": "" }, { "docid": "33eeb7a807cef0fa00245725c908786f", "score": "0.54852945", "text": "def bfs(self, start_node_num):\n node = self.find_node(start_node_num)\n self._clear_visited()\n ret_list = []\n # Your code here\n queue = [node]\n node.visited = True\n def enqueue(n, q=queue):\n n.visited = True\n q.append(n)\n def unvisited_outgoing_edge(n, e):\n return ((e.node_from.value == n.value)\n and (not e.node_to.visited))\n while queue:\n node = queue.pop(0)\n ret_list.append(node.value)\n for e in node.edges:\n if unvisited_outgoing_edge(node, e):\n enqueue(e.node_to)\n return ret_list", "title": "" }, { "docid": "8a7a69f58f85b2b8959c32318011eb13", "score": "0.54811156", "text": "def bfs(root):\n q = queue.Queue()\n q.put(root)\n\n while not q.empty():\n current_vertex = q.get()\n print(current_vertex)\n\n for vertex in current_vertex.neighbour_list:\n if not vertex.visited:\n vertex.visited = True\n q.put(vertex)", "title": "" }, { "docid": "c40ff0b3453cb6812de6fe7f353f7144", "score": "0.5419131", "text": "def bfs(graph, start_node):\n node_queue = Queue()\n dist = {}\n parent = {}\n for node in graph.nodes():\n dist[node] = float(\"inf\")\n parent[node] = None\n dist[start_node] = 0\n node_queue.push(start_node)\n while len(node_queue) != 0:\n node = node_queue.pop()\n for neighbor in graph.get_neighbors(node):\n if dist[neighbor] == float(\"inf\"):\n dist[neighbor] = dist[node] + 1\n parent[neighbor] = node\n node_queue.push(neighbor)\n return dist, parent", "title": "" }, { "docid": "94263564ee39b917a14039f58e25c88c", "score": "0.5404512", "text": "def dfs(graph, start_node):\n nonlocal current_label\n start_vertex = graph.get_vertex(start_node)\n start_vertex.mark_explored()\n for edge in start_vertex.get_edges():\n linked_vertex = graph.get_vertex(edge)\n if not linked_vertex.get_nodes()[0][1]:\n dfs(graph, edge)\n start_vertex.add_label(current_label)\n current_label -= 1", "title": "" }, { "docid": "cd03e2ed62fb05ff80482718908701b6", "score": "0.53835267", "text": "def dfs(node):\n if node is None:\n return [-1,-1,-1]\n left,right = dfs(node.left), dfs(node.right)\n return [left[1]+1, right[0]+1, max(left[1]+1, right[0]+1, left[2], right[2])]\n #left[1]+1 for node.left.right\n #left[1]+1 for node.right.left", "title": "" }, { "docid": "6477e9197ea527c783c4174da800932a", "score": "0.5376386", "text": "def dfs(self, start_node):\n\t\tself.visited = [False]*self.nodes\n\t\tprint \"dfs_recursive\"\n\t\tself.dfs_recursive(start_node)\n\t\tprint \"====\"\n\t\tprint \"dfs_iterative\"\n\t\tself.visited = [False]*self.nodes\n\t\tself.dfs_iterative(start_node)", "title": "" }, { "docid": "62432411e86a69882541641151e1926e", "score": "0.53730875", "text": "def dfs(initial, goal_test, successors):\n # frontier representa os lugares que ainda nao visitamos\n frontier = Stack()\n frontier.push(Node(initial, None))\n # explored representa os lugares que ja foram visitados\n explored = {initial}\n\n # continua enquanto houver lugares para explorar\n while not frontier.empty:\n current_node = frontier.pop()\n current_state = current_node.state\n\n # se encontrar o objetivo retorna o no atual\n if goal_test(current_state):\n return current_node\n\n # verifica para onde podemos ir em seguida\n for child in successors(current_state):\n # ignora os nos filhos que ja foram visitados\n if child in explored:\n continue\n explored.add(child)\n frontier.push(Node(child, current_node))\n # passamos por todos os lugares e nao atingimos o objetivo\n return None", "title": "" }, { "docid": "04e13520b02f722dfbf49f115c59e3df", "score": "0.5349521", "text": "def depthFirstSearch(problem):\n \n # Initialize Stack() instances for LIFO datastructure (Source: College 2, Slide 38)\n fringe = util.Stack()\n visited = util.Stack()\n \n \"\"\"\n Initialize the fringe with following indeces:\n [0] = state (or node) to be expanded\n [1] = list of actions up to this node\n \"\"\"\n \n fringe.push([problem.getStartState(), []])\n\n while not fringe.isEmpty():\n \n # Extract deepest node from the fringe\n state, actions = fringe.pop()\n \n # Goal test\n if problem.isGoalState(state):\n return actions\n \n # Get successors of current state\n successors = problem.getSuccessors(state)\n\n # Loop over successors\n for index in reversed(range(0, len(successors))):\n\n # Successors can't be visited, so check\n if successors[index][0] not in visited.list:\n \n # Push successors' state and actions to the fringe if not visited\n fringe.push([successors[index][0], actions + [successors[index][1]]])\n \n # Current node is visited\n visited.push(state)\n\n # Return empty list of actions in case no goal is present\n return []", "title": "" }, { "docid": "c3192fe553f5bceec13920978c6fb623", "score": "0.53470683", "text": "def bfs(graph, initial_node, dest_node):\n # Queue to store the list of node which will be going to be visited\n Q = []\n # list to store the list of nodes visited\n visited_nodes = []\n # Dictionar key variable to store the list of parents of node visited\n parent_list = {}\n # dictionary key variable to store the distance between the two nodes\n distance_node = {}\n # Adding the intial node into the list\n distance_node[initial_node] = 0\n parent_list[initial_node]= None\n Q.append(initial_node)\n while(bool(Q)):\n cur_node = Q.pop(0)\n for neighbor_nodes in graph.neighbors(cur_node):\n if neighbor_nodes not in visited_nodes:\n visited_nodes.append(neighbor_nodes)\n parent_list[neighbor_nodes] = cur_node\n distance_node[neighbor_nodes] = distance_node[cur_node] + graph.distance_nodes(cur_node,neighbor_nodes)\n Q.append(neighbor_nodes)\n\n if dest_node in visited_nodes:\n break\n\n list = []\n start_node = dest_node\n #print(\"parent list =\",parent_list)\n\n #for cur_node,par_node in parent_list.items():\n # if par_node is not None:\n # edge = graph.get_edge(par_node,cur_node)\n # print(\"edge\",edge)\n # list.append(edge)\n while(parent_list[start_node] is not None):\n par_node = parent_list[start_node]\n #print(\"parent node\",par_node)\n edge = graph.get_edge(par_node,start_node)\n #print(\"edge\",edge)\n list.append(edge)\n start_node = par_node\n\n\n list.reverse()\n return list", "title": "" }, { "docid": "ce60ddc0005c5442de501c331763cda2", "score": "0.5345536", "text": "def bfs(self, verbose: bool = True) -> list:\r\n all_states = []\r\n if verbose:\r\n print(\"**************Solving(BFS)*****************\")\r\n depth_count = 0\r\n states = 1\r\n queue = [self.state]\r\n while len(queue) != 0:\r\n if verbose:\r\n print(f\"\\rDepth: {depth_count} | States: {states}\", end='')\r\n new_open = []\r\n for state in queue:\r\n if self.quit:\r\n quit()\r\n if self.goal_test(state):\r\n if verbose:\r\n print()\r\n return self.tree.get_path(state)\r\n new_open += self.non_visited_states(state)\r\n queue = new_open\r\n depth_count += 1\r\n states += len(queue)\r\n all_states.extend(queue)\r\n raise StopIteration(\"Can't find Solution.\")", "title": "" }, { "docid": "f26423ab2e76bb111ff831722366270a", "score": "0.53446543", "text": "def dfs_helper(self, start_node):\n ret_list = [start_node.value]\n start_node.visited = True\n edges_out = [e for e in start_node.edges\n if e.node_to.value != start_node.value]\n for edge in edges_out:\n if not edge.node_to.visited: ret_list.extend(self.dfs_helper(edge.node_to))\n return ret_list", "title": "" }, { "docid": "8065d090a66ccbfa019e6896011e2915", "score": "0.5343275", "text": "def BFS(self, nd):\n #TODO: design your data structure here for your algorithm\n u = 0\n roads = dict()# key: length, value: path[]\n roadslength = []# all length of each path are in here\n prevNode = 0 \n start = 0\n \n for deadend in range(len(self.nodes)):\n if deadend != nd-1 and self.nd_dict[deadend].isEnd() and deadend+1 not in self.explored:\n path = self.BFS_2(nd,deadend+1)# a tople,([path],length)\n roads[path[1]] = path[0]\n roadslength.append(path[1])\n #TODO: update the information of your data structure\n if len(roadslength):\n shortestlength = min(roadslength)\n return roads[shortestlength]\n else:\n return []", "title": "" }, { "docid": "e3f612d39ecc9154b00a9e27895c1528", "score": "0.5342101", "text": "def dfs_helper(self, start_node):\n ret_list = [start_node.value]\n start_node.visited = True\n edges_out = [e for e in start_node.edges\n if e.node_to.value != start_node.value]\n for edge in edges_out:\n if not edge.node_to.visited:\n ret_list.extend(self.dfs_helper(edge.node_to))\n return ret_list", "title": "" }, { "docid": "3adb2cf1772c115dce4eb556f8021302", "score": "0.5330284", "text": "def get_lchild(self, node):\n return self.heap[2 * node + 1] if self.has_lchild(node) else None", "title": "" }, { "docid": "a751f553fe569db4ef3afe8adec97cfb", "score": "0.53252494", "text": "def dfs(self, node):\r\n if not node:\r\n return None\r\n \r\n if (node.left):\r\n self.dfs(node.left)\r\n\r\n print(node.val, end=' -> ')\r\n\r\n if (node.right):\r\n self.dfs(node.right)", "title": "" }, { "docid": "3c6021209e951d92f0c645706c985624", "score": "0.53199184", "text": "def dft(self, starting_vertex):\n q = Stack()\n visited = set() # (1a)\n q.push(starting_vertex) # (1)\n while q.size() > 0: # (2)\n v = q.pop()\n if v not in visited: # (2a)\n print(v)\n visited.add(v)\n for neighbor in self.get_neighbors(v): # (2b)\n q.push(neighbor)\n\n # ---------> [dft_NOTES] <---------\n # Depth First Traversal\n # (1) Init the stack [q] with the [starting_vertex] passed in\n # --> push [starting_vertex] onto our stack [q]\n # --> we are using a pre-built Stack()\n # --> SEE: projects>>graph>>[util.py]\n # --> WHY?\n # --> allows us to back up and try other paths\n # --> more details ⬇️\n # --> guided>>2201-graphs>>README>>recorded lecture\n # (1a) we will need a [visited] set\n # --> keep track of where we have been\n\t\t# (2) While our queue isn't empty ...\n # --> look at front of the stack : [v = q.pop()]\n # (2a) if [v] has not been visited ...\n # --> visit the node : we will just [print(v)]\n # --> add v to the visited set : [visited.add(v)]\n # (2b) add all neighbors of this node to the stack\n # --> get all the neighbors : [self.get_neighbors(v)]\n # --> loop thru each and push onto stack", "title": "" }, { "docid": "2ac70e2b11dc129026d884f5bebdb9ca", "score": "0.53139883", "text": "def get_leftmost(self):\n if self.tree is None:\n return None\n else:\n ltree = self.left\n if ltree.is_empty:\n return self\n else:\n return ltree.get_leftmost()", "title": "" }, { "docid": "af71abcc53bafc82496bdeb991022c7e", "score": "0.5312154", "text": "def get_current_node():\n return get_current_node.current[-1]", "title": "" }, { "docid": "df28794914ebff0ccc6d2813e11ff1cf", "score": "0.5301736", "text": "def best_fs(self, heuristic=None, verbose: bool = True) -> list:\r\n heuristic = self.heuristic if heuristic is None else heuristic\r\n if heuristic is None:\r\n raise Exception(\"No heuristic function is provided.\")\r\n if verbose:\r\n print(\"**************Solving(BEST_FS)*****************\")\r\n depth_count = 0\r\n states = 1\r\n queue = [self.state]\r\n while len(queue) != 0:\r\n current_state = queue.pop()\r\n if verbose:\r\n print(f\"\\rDepth: {depth_count} | States: {states}\", end='')\r\n if self.quit:\r\n quit()\r\n if self.goal_test(current_state):\r\n if verbose:\r\n print()\r\n return self.tree.get_path(current_state)\r\n next_states = self.non_visited_states(current_state)\r\n heuristics = [heuristic(state) for state in next_states]\r\n queue.extend([state for _, state in sorted(zip(heuristics, next_states))])\r\n depth_count += 1\r\n states += len(next_states)\r\n raise Exception(\"Can't find Solution.\")", "title": "" }, { "docid": "bccf72b67826d29976b8a59c0a6f2f19", "score": "0.53009474", "text": "def DFS(self, visitor, incoming):\n visitor.VisitLeaf(self, incoming)", "title": "" }, { "docid": "0df8624dbbe4b905d3bb170756fba383", "score": "0.5273659", "text": "def dft(self, starting_vertex):\n # TODO\n visited = set()\n stack = deque()\n stack.append(starting_vertex)\n while len(stack) > 0:\n currNode = stack.pop()\n if currNode not in visited:\n visited.add(currNode)\n print(currNode)\n for neighbor in self.get_neighbors(currNode):\n stack.append(neighbor)", "title": "" }, { "docid": "7fef8c43fbea55cb406f5db53fbe622d", "score": "0.52731425", "text": "def bfs(self, starting_vertex, destination_vertex):\n q = Queue()\n visited = set()\n q.enqueue([starting_vertex])\n \n \n while q.size() > 0:\n path = q.dequeue() \n last = path[-1]\n if last not in visited:\n \n visited.add(last)\n if last == destination_vertex:\n return path\n else:\n for each in self.get_neighbors(last):\n new_path = path + [each]\n q.enqueue(new_path)", "title": "" }, { "docid": "a4f520a34f719f1be44ed196c1d47f60", "score": "0.52715015", "text": "def depthFirstSearch(problem):\n\n #Nodos a expandir\n OPEN = util.Stack()\n\n #Nodos expandidos\n CLOSE = []\n\n #initial position\n #(coordenada,direccion,coste,padre)\n startNode = (problem.getStartState(), \"\", 0,None)\n\n #Nodo iniicial --> pila\n OPEN.push(startNode )\n\n #Indices\n COORD = 0 #State\n DIR = 1 #Action\n COST = 2 #Cost\n FATHER = 3 #Father node\n\n while not OPEN.isEmpty():\n\n #Obtenemos elemento de la lista\n vertex = OPEN.pop()\n\n #Comprobamos si es un goal\n if problem.isGoalState(vertex[COORD]):\n path_to_Goal = []\n #Construimos camino al path\n while vertex[FATHER]!= None:\n path_to_Goal.append(vertex[DIR])\n vertex = vertex[FATHER]\n\n #Necesario para path= Raiz ----> Goal\n path_to_Goal.reverse()\n return path_to_Goal\n\n #Comprobar si no se expandio antes\n if not vertex[COORD] in CLOSE:\n\n #Expandemos el nodo\n sucesors = problem.getSuccessors(vertex[COORD])\n #recorremos los hijos del nodo\n for tempNode in sucesors:\n if not tempNode[COORD] in CLOSE:\n\n\n state=tempNode[COORD]\n action = tempNode[DIR]\n cost = tempNode[COST]\n fath = vertex\n #creamos un nuevo nodo\n newNode= (state,action,cost,fath)\n #Agregamos a nodos a expandir\n OPEN.push(newNode)\n\n #Cerramos el nodo expandido\n CLOSE.append(vertex[COORD])\n return []", "title": "" }, { "docid": "ce9cc891c32b1cc04d22d53fb504d036", "score": "0.5268616", "text": "def bfs(self, start_node = None, visitor = lambda x: x):\n distances = {} # track visited nodes\n while len(distances) < len(self._nodes):\n break_first = bool(start_node)\n if not break_first:\n start_node = list(set(self._nodes).difference(set(distances.keys())))[0]\n distances[start_node] = 0\n que = deque([start_node])\n while len(que) > 0:\n current_id = que.popleft()\n for child_id in self.children(current_id):\n if child_id not in distances:\n que.append(child_id)\n distances[child_id] = distances[current_id] + 1\n visitor(child_id)\n if break_first: break\n return distances", "title": "" }, { "docid": "20c26b22698ff62d282997c294f410e5", "score": "0.52572304", "text": "def dfs_recursive(self, start_node):\n\t\tif not self.visited[start_node]:\n\t\t\tself.visited[start_node] = True\n\t\t\tprint \"visited : \", start_node\n\t\t\tfor each_node in self.graph[start_node]:\n\t\t\t\tif not self.visited[each_node]:\n\t\t\t\t\tself.dfs_recursive(each_node)", "title": "" }, { "docid": "92395f1ef938c6e8f364a09c65165ba2", "score": "0.5253412", "text": "def bfs_tree(adj_list, start):\n n = len(adj_list)\n parent_array = [None] * n\n state_array = [\"Undiscovered\"] * n\n queue = []\n state_array[start] = \"Discovered\"\n queue.append(start) #Enqueue\n return bfs_loop(adj_list, queue, state_array, parent_array)", "title": "" }, { "docid": "30c41b019acc6bdf3848a33677480e75", "score": "0.5247685", "text": "def dfs(self, start_node_num):\n self._clear_visited()\n start_node = self.find_node(start_node_num)\n return self.dfs_helper(start_node)", "title": "" }, { "docid": "5ad9a6d8f92f71a8d7da611079bdf221", "score": "0.52263486", "text": "def __best_first_graph_search(self, problem, f):\n f = self.__memoize(f, 'f')\n node = Node(problem.initial)\n\n assert node != None and node.state != None\n\n if problem.goal_test(node.state):\n return node\n frontier = PriorityQueue(min, f)\n\n\n frontier.append(node)\n explored = set()\n step = 0\n while frontier:\n step+=1\n \n node = frontier.pop()\n assert node != None and node.state != None, \"Estratto un nodo None\"\n \n #print '---- CURRENT NODE ----'\n #print node.state\n \n if problem.goal_test(node.state):\n return node, len(explored)+1\n explored.add(node.state)\n \n for child in node.expand(problem):\n assert child != None and child.state != None\n if child.state not in explored and child not in frontier:\n frontier.append(child)\n elif child in frontier:\n incumbent = frontier[child]\n if f(child) < f(incumbent):\n del frontier[incumbent]\n frontier.append(child)\n return None", "title": "" }, { "docid": "e1b29e928c69f8d15e5c2ca87a4b8061", "score": "0.52216387", "text": "def Node_fspath(self: Node) -> LEGACY_PATH:\n return legacy_path(self.path)", "title": "" }, { "docid": "ab81469afb9be272757e50d6f69c6064", "score": "0.5218042", "text": "def _get_logical_node(self):\n return self.__logical_node", "title": "" }, { "docid": "09abf87935b1f62e282ba88171239374", "score": "0.5217506", "text": "def bfs(self, node=None):\n yield self\n last = self\n for node in self.bfs(last):\n for child in node.children:\n yield child\n last = child\n if last == node:\n return", "title": "" }, { "docid": "62bc6914d35f5779d5d76b83abddf876", "score": "0.52159667", "text": "def bfs(self, starting_vertex, destination_vertex):\n # make a queue\n q = Queue()\n\n # enqueue our starting a node\n path = [starting_vertex]\n q.enqueue(path)\n\n # make a set to track if we've been at that node before \n visited = set() \n\n # while q is not empty\n while q.size() > 0:\n\n ## dequeue path at the front of our line\n current_path = q.dequeue()\n current_node = current_path[-1] # last item in list of nodes in our path.\n\n ### it this is our destination_vertex (or search node), return our current path\n if current_node == destination_vertex:\n return current_path\n\n ### if we haven't visited this node yet.\n if current_node not in visited:\n ### mark node as visited\n visited.add(current_node)\n ### get the node's neighbors\n neighbors = self.friendships[current_node]\n ### for each of the neighbors\n for neighbor in neighbors: \n #### add it to queue\n q.enqueue(current_path + [neighbor])", "title": "" }, { "docid": "34e1a8060fd393f0059b799e77824dbb", "score": "0.5210689", "text": "def solve(self, l0, l2, m, gap_tol=1e-2, warm_start=None, mu=0.95,\n branching='maxfrac', l1solver='l1cd', number_of_dfs_levels=0,\n verbose=False, time_limit=3600, cd_max_itr=1000,\n kkt_max_itr=100):\n st = time.time()\n upper_bound, upper_beta, support = self. \\\n _warm_start(warm_start, verbose, l0, l2, m)\n if verbose:\n print(f\"initializing took {time.time() - st} seconds\")\n\n # root node\n self.root = Node(None, [], [], x=self.x, y=self.y,\n xi_norm=self.xi_norm)\n self.bfs_queue = queue.Queue()\n self.dfs_queue = queue.LifoQueue()\n self.bfs_queue.put(self.root)\n\n # lower and upper bounds initialization\n lower_bound, dual_bound = {}, {}\n self.levels = {0: 1}\n min_open_level = 0\n\n max_lower_bound_value = -sys.maxsize\n best_gap = gap_tol + 1\n\n if verbose:\n print(f'{number_of_dfs_levels} levels of depth used')\n\n while (self.bfs_queue.qsize() > 0 or self.dfs_queue.qsize() > 0) and \\\n (time.time() - st < time_limit):\n\n # get current node\n if self.dfs_queue.qsize() > 0:\n curr_node = self.dfs_queue.get()\n else:\n curr_node = self.bfs_queue.get()\n\n # prune?\n if curr_node.parent_dual and upper_bound <= curr_node.parent_dual:\n self.levels[curr_node.level] -= 1\n # self.leaves.append(current_node)\n continue\n \n rel_gap_tol = -1\n if best_gap <= 20 * gap_tol or \\\n time.time() - st > time_limit / 4:\n rel_gap_tol = 0\n if best_gap <= 10 * gap_tol or \\\n time.time() - st > 3 * time_limit / 4:\n rel_gap_tol = 1\n # calculate primal and dual values\n curr_primal, curr_dual = self. \\\n _solve_node(curr_node, l0, l2, m, l1solver, lower_bound,\n dual_bound, upper_bound, rel_gap_tol, cd_max_itr,\n kkt_max_itr)\n\n curr_upper_bound = curr_node.upper_solve(l0, l2, m)\n if curr_upper_bound < upper_bound:\n upper_bound = curr_upper_bound\n upper_beta = curr_node.upper_beta\n support = curr_node.support\n best_gap = \\\n (upper_bound - max_lower_bound_value) / abs(upper_bound)\n\n # update gap?\n if self.levels[min_open_level] == 0:\n del self.levels[min_open_level]\n max_lower_bound_value = max([j for i, j in dual_bound.items()\n if i <= min_open_level])\n best_gap = \\\n (upper_bound - max_lower_bound_value) / abs(upper_bound)\n if verbose:\n print(f'l: {min_open_level}, (d: {max_lower_bound_value}, '\n f'p: {lower_bound[min_open_level]}), '\n f'u: {upper_bound}, g: {best_gap}, '\n f't: {time.time() - st} s')\n min_open_level += 1\n\n # arrived at a solution?\n if best_gap <= gap_tol:\n return self._package_solution(upper_beta, upper_bound,\n lower_bound, best_gap, support,\n self.p, time.time() - st)\n\n # integral solution?\n if is_integral(curr_node.z, self.int_tol):\n curr_upper_bound = curr_primal\n if curr_upper_bound < upper_bound:\n upper_bound = curr_upper_bound\n upper_beta = curr_node.upper_beta\n support = curr_node.support\n if verbose:\n print('integral:', curr_node)\n best_gap = \\\n (upper_bound - max_lower_bound_value) / abs(upper_bound)\n # branch?\n elif curr_dual < upper_bound:\n left_node, right_node = branch(curr_node, self.x, l0, l2, m,\n self.xi_norm, self.int_tol,\n branching, mu)\n self.levels[curr_node.level + 1] = \\\n self.levels.get(curr_node.level + 1, 0) + 2\n if curr_node.level < min_open_level + number_of_dfs_levels:\n self.dfs_queue.put(right_node)\n self.dfs_queue.put(left_node)\n else:\n self.bfs_queue.put(right_node)\n self.bfs_queue.put(left_node)\n else:\n pass\n\n return self._package_solution(upper_beta, upper_bound, lower_bound,\n best_gap, support, self.p,\n time.time() - st)", "title": "" }, { "docid": "36aeb4b0f3997370d2d545349a7af83c", "score": "0.5209212", "text": "def bfs_tree(self, src_vertex = 0):\n parents = self.breadth_first_search(src_vertex)[1]\n return parents", "title": "" }, { "docid": "f69c0ddacf92fad4b9d102b75cd2357e", "score": "0.52060425", "text": "def dfs(node):\n if not node:\n return float('inf'), None\n if not node.left and not node.right:\n return 1, node.val\n ld, lv = dfs(node.left)\n rd, rv = dfs(node.right)\n if ld < rd:\n return ld+1, lv\n else:\n return rd+1, rv", "title": "" }, { "docid": "61754d354f26bda446220f6749fe3183", "score": "0.5196221", "text": "def getNodeFringeFieldIN(self):\n\t\treturn self.__fringeFieldIN", "title": "" }, { "docid": "be5a892615b9ce89cda82c38b60c18a5", "score": "0.5184284", "text": "def bfs(self, starting_vertex, destination_vertex):\n # TODO\n q = deque()\n # Each element in the queue\n q.append([starting_vertex])\n visited = set()\n while len(q) > 0:\n currPath = q.popleft() \n currNode = currPath[-1] \n if currNode == destination_vertex:\n return currPath\n if currNode not in visited:\n visited.add(currNode)\n for neighbor in self.get_neighbors(currNode):\n newPath = list(currPath)\n newPath.append(neighbor)\n q.append(newPath)", "title": "" }, { "docid": "c65cc536f708838a3961342b68f895d8", "score": "0.5182056", "text": "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #util.raiseNotDefined()\n return __generalSearch(\"dfs\",problem,nullHeuristic)", "title": "" }, { "docid": "2dcd0b038c1aef7ef45e80368d234aae", "score": "0.51798636", "text": "def dfs(node,direction):\n if node is None:\n return 0\n elif direction == -1:\n return 1+dfs(node.right,1)\n elif direction == 1:\n return 1+dfs(node.left,-1)", "title": "" }, { "docid": "b630216a44a40a74bdec4aaec80021aa", "score": "0.517526", "text": "def lft(self, lft):\n\n self._lft = lft", "title": "" }, { "docid": "7e44587174432732d1e3ba1e2bdb62e0", "score": "0.51747566", "text": "def bfs_visited(ugraph, start_node):\n queue = deque()\n visited = set()\n if start_node not in ugraph.keys():\n return None\n visited.add(start_node)\n queue.append(start_node)\n while len(queue) > 0:\n node = queue.popleft()\n for neighbor in list(ugraph[node]):\n if not neighbor in visited:\n visited.add(neighbor)\n queue.append(neighbor)\n return visited", "title": "" }, { "docid": "b323297ddfcf1a6da098589a2560b41e", "score": "0.51747423", "text": "def dfs(self, start_node = None, visitor = lambda x: x):\n history = set()\n def visit(node):\n if node not in history:\n history.add(node)\n for child in self.children(node):\n visit(child)\n visitor(node) # callback comes here\n if not start_node:\n while len(history) < len(self._nodes):\n visit(list(set(self._nodes).difference(history))[0])\n else:\n visit(start_node)", "title": "" }, { "docid": "ef6f168e80bbd395c67723c72153496c", "score": "0.51731706", "text": "def bfs(maze):\n # TODO: Write your code here\n (stRow, stCol) = maze.getStart()\n # print(maze.getObjectives())\n checked = set()\n # checked.add((stRow, stCol))\n queue = []\n queue.append((stRow, stCol))\n prev = dict()\n while len(queue) > 0:\n (row, col) = queue.pop(0)\n if (row, col) in checked:\n continue\n checked.add((row, col))\n # print(path)\n if maze.isObjective(row, col):\n path = []\n cur = (row, col)\n while cur in prev:\n path.append(cur)\n cur = prev[cur]\n path.append(cur)\n path.reverse()\n return path\n neighbors = maze.getNeighbors(row, col)\n for n in neighbors:\n if n not in checked:\n prev[n] = (row,col)\n # newPath = list(path)\n# newPath.append(n)\n queue.append(n)\n #else: \n # neighbors.remove(n)\n \n \n \n return []", "title": "" }, { "docid": "3f48a4b1c6f2a288aaf29bbd332a3353", "score": "0.5172477", "text": "def LM(f):\n return smp_ground_LM(f.rep, f.lev, f.ord, f.dom)", "title": "" }, { "docid": "64f67a48663ad3e1591293e54a6b3e98", "score": "0.51596355", "text": "def bfs(graph, start):\n\n # list to keep track of all visited nodes\n explored = []\n\n # the FIFO queue\n queue = []\n\n # add the start node to the queue\n queue.append(start)\n\n # keep looping until there are no nodes still to be checked\n while len(queue) > 0:\n\n # pop first item from queue (FIFO)\n node = queue.pop(0)\n\n # check if the node has already been explored\n if node not in explored:\n\n # add node to list of checked nodes\n explored.append(node)\n\n # get neighbours if node is present, otherwise default to empty list\n neighbours = graph.get(node, [])\n\n # add neighbours of node to queue\n for neighbour in neighbours:\n queue.append(neighbour)\n\n # return the explored nodes\n return explored", "title": "" }, { "docid": "eae62765e043dbdc0c96bea52007d369", "score": "0.51557684", "text": "def fs(self):\n return self._fs", "title": "" }, { "docid": "c9bbb0ad0794a4a753742ca2f9ac47df", "score": "0.51550764", "text": "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n myXsb= pyxf.xsb(\"/Users/muthukumarsuresh/Downloads/XSB/bin/xsb\")\n myXsb.load('mazeeee.P')\n myXsb.load('dfs2.P')\n time.sleep(5)\n result = myXsb.query('planPath(X)')\n\n while(result == False):\n result = myXsb.query('planPath(X)')\n returnstr = result[0]['X']\n returnstr = returnstr[1:len(returnstr)-1]\n returnList = returnstr.split(',')\n s =Directions.SOUTH\n w = Directions.WEST\n e = Directions.EAST\n n = Directions.NORTH\n retList = []\n for element in returnList:\n if element == 's':\n retList.append(s)\n if element == 'e':\n retList.append(e)\n if element == 'w':\n retList.append(w)\n if element == 'n':\n retList.append(n)\n # returnList = returnList.reverse()\n return retList\n # util.raiseNotDefined()", "title": "" }, { "docid": "61e3ddf19556c58f52aa956a33e9e136", "score": "0.5154698", "text": "def DLT(attributesList, matrix, heuristic, currentNode):\n #print(\"am i here\", currentNode)\n classColumn = matrix[:, (matrix.shape[1] - 1)]\n # If attributes list is empty\n if not attributesList:\n # Change node value most common value of class column\n currentNode.value = round(classColumn.sum(axis = 0) / matrix.shape[0])\n # If nodes are pure\n elif classColumn.sum(axis = 0) / matrix.shape[0] == 1:\n currentNode.value = 1\n elif classColumn.sum(axis = 0) == 0:\n currentNode.value = 0\n else:\n # Find max gain\n gainList=[]\n for a in attributesList:\n gainList.append(h.gain(a, matrix, heuristic))\n nextAtt = attributesList[gainList.index(max(gainList))]\n \n if currentNode is None:\n #print(\"where am i\")\n currentNode = Node(nextAtt, -1)\n currentNode.label = nextAtt\n for i in [0,1]:\n # Break into submatrices where attribute is 0 or 1\n subAtt = numpy.where(matrix[:, nextAtt] == i)\n submatrix = matrix[subAtt] \n \n if i == 0:\n attributesList.remove(nextAtt)\n currentNode.left = Node(-10)\n nextNode = currentNode.left\n DLT(attributesList, submatrix, heuristic, nextNode)\n attributesList.append(nextAtt)\n else:\n attributesList.remove(nextAtt)\n currentNode.right = Node(-10)\n nextNode = currentNode.right\n DLT(attributesList, submatrix, heuristic, nextNode)\n attributesList.append(nextAtt)\n return currentNode", "title": "" }, { "docid": "724fd8ce4383eee102487edeffc64263", "score": "0.5151105", "text": "def search(self, problem):\n\n # return RECURSIVE-DLS(MAKE-NODE(INITIAL-STATE[problem]), problem, limit)\n return self._recursive_dls(Node(problem.get_initial_state()), problem, self._limit)", "title": "" }, { "docid": "13a18a50084db93318f5317d05c1639a", "score": "0.5150288", "text": "def depthFirstSearch(problem):\n \n expanded = set() # used to not expand previously expanded nodes\n moves = [] # keeps track of actions for return\n if (problem.isGoalState(problem.getStartState()) == False):\n expanded.add(problem.getStartState())\n path = _dfs(problem, problem.getStartState(), expanded, moves)\n final = [] # used to return actions to take\n for move in path: # goes through every state in path\n for direc in moves:\n if direc[0] == move: # gets action associated to that state\n final += [direc[1]] # adds action to return list\n return final", "title": "" }, { "docid": "2a2a46dbae1ddadbccbdafbdb66221cd", "score": "0.51487803", "text": "def set_l(self, node):\n self.left = node", "title": "" }, { "docid": "59b77c50ed9f9b435e46244825054299", "score": "0.5138312", "text": "def dfs(self, l):\n if not self.real:\n l.append((self.p, self.word, self.wid))\n else:\n for k,v in self.child.items():\n v.dfs(l)", "title": "" }, { "docid": "f7f84a6d570dbdb47a4398648cfa197c", "score": "0.51378864", "text": "def bfs(self, start, finish, graph):\n # create empty queue holding paths this time\n q = Queue()\n # empty set for visited vertexes\n visited = set()\n\n # enqueue a list representing path to starting_vertex which is just itself\n q.enqueue([start])\n\n # while the queue isn't empty\n while q.size() > 0:\n # dequeue path\n v = q.dequeue()\n # node is the last vertex in path\n node = v[-1]\n\n if node == finish:\n return [node]\n\n # if we haven't visited node yet\n if node not in visited:\n # loop through neighbors\n for neighbor in graph[node]:\n # add neighbor to path\n path = list(v)\n path.append(neighbor)\n # enqueue the path for more searching\n q.enqueue(path)\n # if neighbor is goal, return the path\n if neighbor == finish:\n return path\n \n # add vertex to visited after we've searched it\n visited.add(node)\n\n return None", "title": "" }, { "docid": "ae4e6b6718667b15a2dc3caf3e30450c", "score": "0.5130743", "text": "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # 初始化搜索状态\n fringe = util.Stack()\n node = {\"state\":problem.getStartState(), \"path\":[], \"cost\":0}\n fringe.push(node)\n explored = set()\n # 构造循环展开搜索树\n while (not fringe.isEmpty()):\n # 获得待判定的叶子节点\n node = fringe.pop()\n # 判断节点是否满足目标要求,如果是一个可行解,就返回行动方案\n if problem.isGoalState(node[\"state\"]):\n return node[\"path\"]\n # 否则,就继续从这个叶子节点往下展开\n else:\n # 先判断一下这个节点是不是已经展开过了,避免重复展开\n if node[\"state\"] not in explored:\n for nextnode in problem.getSuccessors(node[\"state\"]):\n # 为了适应可能的数据结构为图,必须判定叶子节点是否已经访问过\n if nextnode[0] not in explored:\n nextnode = {\"state\":nextnode[0],\n \"path\":node[\"path\"]+[nextnode[1]],\n \"cost\":node[\"cost\"]+nextnode[2]}\n # 如果没有访问过,就将叶子节点添加到待搜索的节点集合中\n fringe.push(nextnode)\n # 最后不要忘记把搜索过的节点添加到访问过的节点集合中\n explored.add(node[\"state\"])", "title": "" }, { "docid": "7fd8c6d5cab0e719327139a229fb2d73", "score": "0.51249397", "text": "def left(self):\n return self.edges[0]", "title": "" }, { "docid": "c4f2553bff75c0df35c8cdea22fa002a", "score": "0.51225144", "text": "def bfs_dfs(graph, rac_class, start_node, end_node):\n rac_object = rac_class()\n dist = {}\n parent = {}\n # Initialise all the information for nodes\n for node in graph.nodes():\n dist[node] = float(\"inf\")\n parent[node] = None\n dist[start_node] = 0\n rac_object.push(start_node)\n # Below is the core algorithm for Bread First Search/Depth First Search\n while len(rac_object) != 0:\n node1 = rac_object.pop()\n # Check if we already reached the end node. If so, break out of the loop\n if node1 == end_node:\n break\n for neighbor in graph.get_neighbors(node1):\n if dist[neighbor] == float(\"inf\"):\n dist[neighbor] = dist[node1] + 1\n parent[neighbor] = node1\n rac_object.push(neighbor)\n return parent", "title": "" }, { "docid": "ec3beca85225ca185da4c4d40d853860", "score": "0.5109261", "text": "def bfs(graph, start):\n\n\tvertex_q = Queue()\n\tvertex_q.put(start)\n\n\twhile vertex_q.qsize() > 0:\n\t\tcurrent = vertex_q.get()\n\t\tfor", "title": "" }, { "docid": "7c85b591f3ff7e2863a52e6d1821849b", "score": "0.5106741", "text": "def get_rightmost_path(G):\n v_root = G.get_min_vertex()\n v_target = G.get_max_vertex()\n T_G = DFS(G=G, v=v_root)\n v_target = G.get_max_dfs_id_vertex()\n R = rightmost_path_BFS(T_G, v_root, v_target)\n #for v in R.vertices:\n # v.id = v.dfs_id\n R.reverse_graph()\n return R", "title": "" }, { "docid": "fd1ed09a5dc81f81811d55cce48600b0", "score": "0.51034385", "text": "def _lg_undirected(G, selfloops=False, create_using=None):\n L = nx.empty_graph(0, create_using, default=G.__class__)\n\n # Graph specific functions for edges and sorted nodes.\n get_edges = _edge_func(G)\n sorted_node = _node_func(G)\n\n # Determine if we include self-loops or not.\n shift = 0 if selfloops else 1\n\n edges = set([])\n for u in G:\n # Label nodes as a sorted tuple of nodes in original graph.\n nodes = [sorted_node(*x) for x in get_edges(u)]\n\n if len(nodes) == 1:\n # Then the edge will be an isolated node in L.\n L.add_node(nodes[0])\n\n # Add a clique of `nodes` to graph. To prevent double adding edges,\n # especially important for multigraphs, we store the edges in\n # canonical form in a set.\n for i, a in enumerate(nodes):\n edges.update([_sorted_edge(a, b) for b in nodes[i + shift:]])\n\n L.add_edges_from(edges)\n return L", "title": "" }, { "docid": "49859bc25eb54851e0ae3aebb78fc0f9", "score": "0.5096115", "text": "def dft_recursive(self, starting_vertex):\n def traverse(vertex, visited):\n neighbors = self.get_neighbors(vertex).values()\n if vertex not in visited:\n print(vertex)\n visited.append(vertex)\n \n for next_vert in neighbors:\n traverse(next_vert, visited)\n\n return traverse(starting_vertex, [])", "title": "" }, { "docid": "990e8ad1eca5390eeeff863dba6d4753", "score": "0.509476", "text": "def DFS(self, start):\n pass", "title": "" }, { "docid": "b9fb15f29ce64398a5be8d95adffe2f0", "score": "0.50871783", "text": "def RSSI_FSPL(self, node):\r\n map_name = \"RSSI_\" + str(node.get_Id())\r\n map_out = self.copy(map_name)\r\n Loc = node.get_Loc()\r\n x_point = float(Loc[0])\r\n y_point = float(Loc[1])\r\n z_point = float(Loc[2])\r\n map_out.dist(x_point, y_point, z_point)\r\n dist = map_out.get_Values()\r\n dist[dist < 0.001] = 0.001\r\n # d[km], freq[GHz] +92.45; d[m], freq[kHz] -87.55;\r\n # d[m], freq[MHz] -27.55; d[km], freq[MHz] +32.45;\r\n FSPL = 20.0 * np.log10(dist) + 20.0 * np.log10(float(node.get_FcMHz())) - 27.55\r\n FSPL = float(node.get_PowdBm()) - FSPL\r\n map_out.set_Values(FSPL)\r\n return map_out", "title": "" }, { "docid": "65534407a48b072bc2d247e8e4e3f111", "score": "0.50847125", "text": "def _bfs(self,\n start: Vertex,\n destination: Vertex) -> Optional[list[Vertex]]:\n # queue to enforce a visiting order based on hop distance\n q = [start]\n\n # dictionary to keep track of visited nodes\n # along with their predecessors\n visited = {start: None}\n\n while len(q) > 0:\n current_vertex = q.pop(0)\n\n # destination vertex found\n if current_vertex == destination:\n break\n\n # add neighboring vertices to the queue\n for nbr in current_vertex.getConnections():\n if nbr not in visited:\n visited[nbr] = current_vertex\n q.append(nbr)\n\n if destination in visited:\n shortest_path = []\n temp = destination\n while temp != start:\n shortest_path.insert(0, temp)\n temp = visited[temp]\n shortest_path.insert(0, start)\n return shortest_path\n else:\n return None", "title": "" }, { "docid": "b8343a1aa2cc7ded8c66dd206164ba62", "score": "0.507099", "text": "def dfs(self):\n def inner_dfs(node):\n # preorder visit\n if node.left_child:\n inner_dfs(node.left_child)\n # inorder visit\n print node.value\n if node.right_child:\n inner_dfs(node.right_child)\n # postorder visit\n return inner_dfs(self.root)", "title": "" }, { "docid": "b8d3c79dba70f47d6f3025caf9b6b0d9", "score": "0.5068718", "text": "def depthFirstSearch(problem):\n # frontier will contain those node which needs to be explored.\n # Order will be Last In First Out\n frontier = util.Stack() \n \n explored = list() # explored List will contain list of nodes which are already explored.\n startState = problem.getStartState()\n node_with_direction_list = [startState]\n frontier.push(node_with_direction_list) # frontier is list of list (Direction to reach a Node and a Node)\n # Example frontier = list(['North','South','B'],['East','West','C'])\n # Here frontier contains the list of list of (Direction to reach B along with node B) and\n # (Direction to reach C along with node C).\n \n while frontier.isEmpty() != True:\n \n node_with_direction_list = frontier.pop()\n currentNode = node_with_direction_list.pop()\n # After pop operation, nodeWithDirectionList will contain only path to reach currentNode\n explored.append(currentNode)\n\n if problem.isGoalState(currentNode) == True:\n return node_with_direction_list\n \n for successor in problem.getSuccessors(currentNode):\n if explored.count(successor[0]) == 0:\n # Creating a list which consist of a direction from currentNode to Successor and a Successor\n child_node_and_direction = []\n child_node_and_direction.append(successor[1])\n child_node_and_direction.append(successor[0])\n # Will adding successor to frontier, the full path from starting node to successor is require.\n # By merging path of currentNode and path from currentNode to Successor will give full path of\n # Successor from starting Node\n frontier.push(node_with_direction_list + child_node_and_direction)\n\n return []", "title": "" }, { "docid": "c2fd1701f5971e440c48fe8248d043c4", "score": "0.5058268", "text": "def bfs(self, starting_vertex, destination_vertex):\n # similar to bft, but must use paths\n #Create a queue\n q = Queue()\n visited = set()\n q.enqueue([starting_vertex])\n \n while q.size() > 0:\n #Dequeue the first path, and then grab the vertext from the end of the path in order to check for it in the set\n path = q.dequeue()\n last_vertex = path[-1]\n #check if it's been visited\n if last_vertex not in visited:\n #add to it\n visited.add(last_vertex)\n\n if last_vertex == destination_vertex:\n return path\n\n # add a path to vertex neighbors\n for neighbor in self.get_neighbors(last_vertex):\n #make a copy of the path\n path_copy = path.copy()\n #append neighbors to new path + enqueue \n path_copy.append(neighbor)\n q.enqueue(path_copy)", "title": "" }, { "docid": "c6f6a3e908e2c6c0d01dbd7593df4465", "score": "0.50527245", "text": "def bfs(start,goal):\n\n path = []\n depth = []\n\n # initialize variables\n max_depth = 0 \n\n # initialize a Queue() object and add the start location to it:\n queue = Queue()\n queue.put(start)\n # initialize a set() object for visited list and add the start location to it\n visited = set()\n visited.add(start)\n\n # initialize Queue() object for depth calculation\n depth = Queue()\n depth.put(0)\n\n\n # define an empty dictionary, where you'll record how you moved through the grid and a goal location,\n branch = {}\n found = False\n\n max_fringe_size = 0 \n \n \n while not queue.empty():\n # deque and store the explored node\n current_node = queue.get()\n visited.add(current_node)\n dep = depth.get()\n \n \n # goal check\n if current_node == goal:\n print('Found the Solution')\n found = True\n break\n else:\n for action in valid_actions(current_node):\n # get movement indicator from actions list\n da = action.delta\n \n # tuple -> grid transformation\n grid = np.array(current_node).reshape(3,-1)\n \n # find grid index of 0\n index = np.where(grid == 0)\n x,y = int(index[0]),int(index[1])\n \n #grid manipulation to exchange 0 and neighbor elements. \n grid[x+da[0],y+da[1]],grid[x,y] = grid[x,y],grid[x+da[0],y+da[1]]\n \n # grid -> tuple transformation\n next_node = tuple(grid.flatten().tolist())\n \n\n # Check if the new node has been visited before.\n # If the node has not been visited:\n # 1. Mark it as visited\n # 2. Add it to the queue\n # 3. Add how I got there to branch\n if next_node not in visited:\n visited.add(next_node)\n queue.put(next_node)\n depth.put(dep+1)\n branch[next_node] = (current_node, action)\n\n fringe_size = queue.qsize()\n if fringe_size > max_fringe_size:\n max_fringe_size = fringe_size\n\n if dep + 1 > max_depth:\n max_depth = dep + 1\n\n nodes = 0\n\n if found:\n\n nodes = len(branch)\n \n # traceback to find the depth by using of the branch dictionary.\n n = goal\n #print(branch[n][0])\n while branch[n][0] != start:\n \n path.append(branch[n][1])\n n = branch[n][0]\n \n path.append(branch[n][1])\n\n return path[::-1],nodes,max_depth,max_fringe_size", "title": "" }, { "docid": "3816c34840ae40724cfe00d115799544", "score": "0.50462496", "text": "def bfs_visited(ugraph, start_node):\n queue = deque([])\n visited = [start_node]\n queue.append(start_node)\n while (len(queue) != 0):\n next_node = queue.popleft()\n for item in ugraph[next_node]:\n if item not in visited:\n visited.append(item)\n queue.append(item)\n visited = set(visited)\n return visited", "title": "" }, { "docid": "94d6c1f3bb41dabe450e8cc9e3b9f60e", "score": "0.5038765", "text": "def choose_next_node(self):\n\n for node in self.open_list:\n heuristic = Heuristic(current_node=node, final_node=self.final_node,\n heuristic=self.heuristic)\n h_score = heuristic.calculate()\n node.h = h_score\n node.f = self.get_f_score(h_score, node)\n\n # sort list of node by f-score, from higher to lower.\n self.open_list.sort(key=lambda x: x.f)\n\n if len(self.open_list) > 1 and self.open_list[0].f == self.open_list[1].f:\n return [node for node in self.open_list if node.f == self.open_list[0].f]\n else:\n self.solution_history.append(deepcopy(self.open_list[0]))\n return self.open_list[0]", "title": "" }, { "docid": "39dd0e5cee85e2a048084d5bf9e2d457", "score": "0.5038192", "text": "def leaf_filesystem_layer(self):\n return self.filesystem_layers[-1]", "title": "" }, { "docid": "48bbd29bfd91acb215ca7ee0c427aa68", "score": "0.5036113", "text": "def lnk(self):\n try:\n return self.__lnk\n except:\n self.__lnk = self.lnkh + np.log(self.cosmo_params['H0'] / 100.0)\n return self.__lnk", "title": "" }, { "docid": "1345ac8fc14bd5f63799d69a105d70dc", "score": "0.50332123", "text": "def forw_bfs_subgraph(self, start_id):\r\n return self._bfs_subgraph(start_id, forward=True)", "title": "" }, { "docid": "9025c08d3121725c5a40d2f6afa70543", "score": "0.5008504", "text": "def bfs_tree(self, graph, reverse=False):\n return nx.bfs_tree(self, graph, reverse=reverse)", "title": "" }, { "docid": "ea9bdd94ebd8300763608978390837bb", "score": "0.5000737", "text": "def LT(f):\n return smp_ground_LT(f.rep, f.lev, f.ord, f.dom)", "title": "" }, { "docid": "1a2d7f4e09113236ca9c2892bf09d435", "score": "0.49923807", "text": "def bfs(self):\n queue = []\n values = []\n dequeued = None\n\n queue.append(self.root)\n\n while len(queue):\n dequeued = queue.pop(0)\n values.append(dequeued.value)\n dequeued.left and queue.append(dequeued.left)\n dequeued.right and queue.append(dequeued.right)\n\n return values", "title": "" }, { "docid": "7bba57d74feee14f3d9ce267afb9d7a1", "score": "0.49863958", "text": "def dofs_node(self):\n local_dof = np.arange( self.degree_freedom, dtype=int )\n global_dof = self.degree_freedom * self.index + local_dof\n\n return global_dof", "title": "" }, { "docid": "5ce7df98d7bf12dd9caa10aaf0f78e57", "score": "0.49855685", "text": "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n startState = problem.getStartState()\n expandedStatesInfo = {} # state:(fromState,fromDir)\n stack = util.Stack()\n \"stack entry : (state,fromState,fromDir)\" \n stack.push((startState, \"\", \"\"))\n \n while True:\n try: expState,fromState,fromDir = stack.pop()\n except IndexError:\n print \"No goal state found!!\"\n raise\n print \"expState : \", expState, fromState, fromDir\n\n if expState in expandedStatesInfo:\n print \"already expanded : \", expState\n continue\n\n print \"adding to expandedStatesInfo : \", expState\n expandedStatesInfo[expState] = (fromState,fromDir)\n\n if problem.isGoalState(expState):\n print \"reached goal state : \", expState\n break\n\n # handle the fringes\n fringesInfo = problem.getSuccessors(expState)\n print \"fringesInfo : \", fringesInfo\n for fringeState, fringeDir, fringeCostLocal in fringesInfo:\n print \"fringeState : \", fringeState, fringeDir, fringeCostLocal\n stack.push((fringeState,expState,fringeDir))\n # print \"stack : \", stack.list\n\n assert problem.isGoalState(expState)\n\n # backtrack the path\n path = []\n state = expState\n while state != startState:\n fromState, fromDir = expandedStatesInfo[state]\n print \"adding to path : \", state, fromState, fromDir\n path.append(fromDir)\n state = fromState\n\n path.reverse()\n print \"path : \", path\n return path", "title": "" }, { "docid": "b60a5ddbededaa309bf99467bf7f29cb", "score": "0.49790117", "text": "def bfs(graph: Graph, root: str) -> List[str]:\n\n queue = deque([root])\n visited = []\n\n while queue:\n current = queue.pop()\n if current not in visited:\n neighbors = graph[current]\n queue.extendleft(neighbors)\n visited.append(current)\n\n return visited", "title": "" }, { "docid": "3fd281944310936ccfa63a596dcc7808", "score": "0.49755606", "text": "def dft(self, starting_vertex):\n s = Stack()\n visited = set()\n s.push(starting_vertex)\n while s.size() > 0:\n poped = s.pop()\n if poped not in visited:\n visited.add(poped)\n for each in self.get_neighbors(poped):\n s.push(each)\n print(poped)", "title": "" }, { "docid": "bb9798b86e40ee006ba1408a87dada8b", "score": "0.4971379", "text": "def bfs(maze):\n # TODO: Write your code here\n visited = []\n #path dictionary to store prev nodes\n path = {}\n path2 = []\n start = maze.getStart()\n queue = [start]\n path[start] = None\n #print(\"test1, \", type(path2))\n #queue_size check\n while(queue):\n curr = queue.pop(0)\n if (curr not in visited):\n visited.append(curr)\n r = curr[0]\n c = curr[1]\n #found goal\n if (maze.isObjective(r, c)):\n path2 = create_path(path, curr, start).copy()\n break\n\n neighbors = maze.getNeighbors(r, c)\n for i in neighbors:\n if (i not in visited):\n path[i] = curr\n queue.append(i)\n return path2", "title": "" }, { "docid": "b28084e433244aaa3c48d69e265ce582", "score": "0.49709606", "text": "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # visited is a dictionary with location:([path], cost) pairs\n # i realize now that cost is kinda unnecessary lmao. whoops\n visited = {}\n stack = util.Stack()\n\n startState = problem.getStartState()\n if problem.isGoalState(startState):\n return []\n stack.push((startState, '', 0))\n # response are dictionaries for finding prev\n response = {\"North\": (0, -1), \"South\": (0, 1), \"East\": (-1, 0), \"West\": (1, 0)}\n\n # Main loop\n while not stack.isEmpty():\n # Pop from stack - node is (location, direction, cost)\n node = stack.pop()\n visited[node[0]] = (node[1], node[2])\n\n # Managing successors\n for s in problem.getSuccessors(node[0]):\n # Check goal state on creation\n if problem.isGoalState(s[0]):\n get = response[s[1]]\n back = (s[0][0] + get[0], s[0][1] + get[1])\n path = [s[1]]\n cost = s[2]\n while back != startState:\n path.append(visited[back][0])\n cost += visited[back][1]\n get = response[visited[back][0]]\n back = (back[0] + get[0], back[1] + get[1])\n path.reverse()\n return path\n\n # Check if s in frontier (stack)\n inFrontier = False\n for f in stack.list:\n if s[0] == f[0]:\n inFrontier = True\n # Put in stack if not visited or in frontier\n if (not s[0] in visited) and (not inFrontier):\n stack.push(s)\n return None", "title": "" }, { "docid": "2cf5037d562209d5722f2798e534a53f", "score": "0.49696815", "text": "def depthFirstSearch(problem):\n DEBUG = False;\n if DEBUG == True: print \"Start:\", problem.getStartState()\n if DEBUG == True: print \"Is the start a goal?\", problem.isGoalState(problem.getStartState())\n if DEBUG == True: print \"Start's successors:\", problem.getSuccessors(problem.getStartState())\n \"*** YOUR CODE HERE ***\"\n \"\"\"Create the frontier\"\"\"\n frontier = util.Stack()\n \"\"\"Keep track of how you got somewhere\"\"\"\n frontierHash = {}\n \"\"\"Keep track of explored nodes with True/False hashes\"\"\"\n exploredHash = {}\n \"\"\"The current node being evaluated\"\"\"\n current = problem.getStartState()\n\n if problem.isGoalState(current):\n \"\"\"Start is the goal\"\"\"\n return []\n \"\"\"Otherwise, do some exploring\"\"\"\n frontier.push(current)\n exploredHash[current] = True\n frontierHash[current] = []\n \"\"\"Do the search on the frontier\"\"\"\n while frontier.isEmpty() == False:\n current = frontier.pop()\n if DEBUG == True: print \"Popping \",current,\"off of frontier.\"\n if problem.isGoalState(current):\n \"\"\"Return the path to the goal\"\"\"\n if DEBUG == True: print \"The goal is: \",current\n if DEBUG == True: print \"The path is: \",frontierHash[current]\n return frontierHash[current]\n \n successors = problem.getSuccessors(current)\n for successor in successors:\n \"\"\"If the node hasn't been explored, put it on the frontier\"\"\"\n if successor[0] not in exploredHash:\n\t\"\"\"Add node to the frontier\"\"\"\n frontier.push(successor[0])\n \"\"\"Add current to explored hashtable\"\"\"\n exploredHash[successor[0]] = True\n\t\"\"\"Add path to the node to the frontierHash\"\"\"\n\tpath = list(frontierHash[current])\n\tpath.append(successor[1])\n\tfrontierHash[successor[0]] = path\n\tif DEBUG == True: print \"Pushing \",successor[0],\" at \",path\n\n util.raiseNotDefined()", "title": "" }, { "docid": "fcf23a86a658e6f2570f81d2a5b21d9f", "score": "0.4969494", "text": "def get_lhood(*args):\n args = validated_params(*args)\n T, edge_to_P, root, root_prior_distn1d, node_to_data_fvec1d = args\n\n root_lhoods = _get_root_lhoods(*args)\n if root_lhoods.any():\n return root_lhoods.sum()\n else:\n return None", "title": "" } ]
a0f7b02c15b120e78b8ea99e523bd88a
Test that the report is correct.
[ { "docid": "2cee7de2673fae20e61b01ceaced3c26", "score": "0.7139832", "text": "def test_report(self):\n self.__birt.nr_manual_ltcs.return_value = 10, []\n self.__birt.nr_manual_ltcs_too_old.return_value = 5\n self.__birt.date_of_last_manual_test.return_value = datetime.datetime.now() - timedelta(days=5)\n self.assertTrue('5 van de 10 handmatige logische testgevallen zijn te lang geleden '\n '(meest recente 5 dag(en))' in self.__metric.report())", "title": "" } ]
[ { "docid": "33413fe45069fc706964ce3f54eb8e12", "score": "0.77926624", "text": "def test_report(self):\n self.assertEqual('Van 4 van de 12 handmatige logische testgevallen is de uitvoeringstijd niet ingevuld.',\n self.__metric.report())", "title": "" }, { "docid": "2a912c8a46e86e960f2b57a08bed772c", "score": "0.7606778", "text": "def test_report(self):\n self.assertEqual('De uitvoering van 8 van de 12 handmatige logische testgevallen kost 120 minuten.',\n self.__metric.report())", "title": "" }, { "docid": "a855b7d0271388063f42115c085a2905", "score": "0.75368786", "text": "def test_for_report(self):\n pass", "title": "" }, { "docid": "b09b5cb417f6b4b8428dcbd84424403d", "score": "0.7462171", "text": "def test_report(self):\n self.assertEqual('5 van de 10 unittesten falen.', self.__metric.report())", "title": "" }, { "docid": "58ffe5a29e3da6c6bf14631385a37f45", "score": "0.7457009", "text": "def test_get_report(self):\n pass", "title": "" }, { "docid": "bbc34740557a1e3882a906f17e0dc037", "score": "0.7455599", "text": "def test_report(self):\n self.__birt.nr_ltcs.return_value = 120, []\n self.__birt.reviewed_ltcs.return_value = 110, []\n self.assertEqual('Er zijn 10 niet gereviewde logische testgevallen, van in totaal 120 '\n 'logische testgevallen.', self.__metric.report())", "title": "" }, { "docid": "0781a818e0fb96c075afe49a84f832af", "score": "0.74532145", "text": "def test_create_report1(self):\n pass", "title": "" }, { "docid": "22c98af2be0c11d65e2062d79178742a", "score": "0.74511546", "text": "def test_report(self):\n self.__birt.approved_ltcs.return_value = 100, []\n self.__birt.reviewed_ltcs.return_value = 110, []\n self.assertEqual('Er zijn 10 niet goedgekeurde logische testgevallen, van in totaal 110 gereviewde '\n 'logische testgevallen.', self.__metric.report())", "title": "" }, { "docid": "ee91ec45d972bd981bc15e9060ea1d71", "score": "0.7442183", "text": "def test_get_report1(self):\n pass", "title": "" }, { "docid": "dec9eacb285a7ee1c3676624f21be4c7", "score": "0.7419393", "text": "def test_create_report(self):\n pass", "title": "" }, { "docid": "e4d3d5e42a623237fea046dbbf2b4c3a", "score": "0.73501694", "text": "def test_process_report_request_correct(self) -> None:\n pass", "title": "" }, { "docid": "9e05c857909d812e8e63ec06b04a4522", "score": "0.71790683", "text": "def test_with_no_data(self):\n actual_report = self.analysis.get_report_as_text()\n expected_report = \"\"\"\n (reported in boxes)\nmaximum weekly quay side throughput: 0\naverage weekly quay side throughput: 0.0\nstandard deviation: -1.0\nmaximum daily quay side throughput: 0.0\naverage daily quay side throughput: 0.0\nmaximum hourly quay side throughput: 0.0\naverage hourly quay side throughput: 0.0\n(daily and hourly values are simply scaled weekly values, rounding errors might exist)\n\"\"\"\n self.assertEqual(actual_report, expected_report)", "title": "" }, { "docid": "3aa7dd87fb249e11e645ceec66e0027e", "score": "0.71108896", "text": "def test_report(self):\n self.__birt.nr_manual_ltcs.return_value = 10, []\n self.__birt.nr_ltcs.return_value = 120, []\n self.assertEqual('Er zijn 10 handmatige logische testgevallen, van in totaal 120 logische testgevallen.',\n self.__metric.report())", "title": "" }, { "docid": "26e00f418c10789f6dc588bed8f592d6", "score": "0.70995986", "text": "def test_report(self):\n basket = FruitBasket(Path(__file__).parent.joinpath(\"data\", \"given.csv\"))\n result = report(basket)\n print(result)\n assert result.count(\"\\n\") == 28", "title": "" }, { "docid": "3b4f45074fdadd74c24630c98c1d77be", "score": "0.70927775", "text": "def test_get_report_information_correct(self) -> None:\n self.mock_report_information_processor.process.return_value = Report(\n created_at=datetime(2020,10,12,13,0,0),\n patient_id=\"1000000\",\n document_text=\"stuff\"\n )\n\n report = self.get_report_information_use_case.get(self.correct_text_file)\n self.assertEqual(report, self.correct_output)\n self.mock_report_repository.add.assert_called_once()", "title": "" }, { "docid": "a547a885813e9eb24b88dc3b9c3c5101", "score": "0.7005789", "text": "def test_test_report(self):\n self.__opener.contents = '''<result><successfulTestCount>1973</successfulTestCount>\n <failedTestCount>17</failedTestCount>\n <quarantinedTestCount>0</quarantinedTestCount>\n <skippedTestCount>2</skippedTestCount></result>'''\n self.assertEqual(17, self.__report.failed_tests('url'))\n self.assertEqual(1973, self.__report.passed_tests('url'))\n self.assertEqual(2, self.__report.skipped_tests('url'))", "title": "" }, { "docid": "9e664b0311f6627f3de1d3ec1656ec2d", "score": "0.6998808", "text": "def test_report_when_untested(self):\n self.__birt.date_of_last_manual_test.return_value = datetime.datetime.min\n self.__birt.nr_manual_ltcs.return_value = 10, []\n self.assertEqual('De 10 handmatige logische testgevallen zijn nog niet allemaal uitgevoerd.',\n self.__metric.report())", "title": "" }, { "docid": "1aa02f764a6e574f7147a32f0db39d52", "score": "0.6933463", "text": "def test_valid_report(self):\n self.assertEqual(\n first=\"\",\n second=_validate_report([None, \"Sneasel\", \"Momos\", \"Home\", '{:%H:%M}'.format(datetime.now() + timedelta(minutes=2))])\n )", "title": "" }, { "docid": "60c867e2f1e7d0347404813d02203229", "score": "0.69127405", "text": "def test_report(self):\n self.__birt.nr_ltcs_to_be_automated.return_value = 25, []\n self.__birt.nr_automated_ltcs.return_value = 20, []\n self.assertEqual('Er zijn 5 nog te automatiseren logische testgevallen, van in totaal 25 '\n 'geautomatiseerde logische testgevallen.', self.__metric.report())", "title": "" }, { "docid": "ffee03f1341246b6c89fc6daaeb1a85a", "score": "0.6866708", "text": "def test_reporting_policy(self):\n\n reporting_text1 = driver.find_element_by_xpath(\"//*[@id='footer-content']/footer/div/p[3]\").text\n reporting1 = \"Personal information provided above will not be used for any other purpose beyond this peer-to-peer program and required federal and state-level reporting.\"\n\n if assertEqual(reporting_text1, unicode(reporting1, \"utf-8\")):\n print('\\n') # adds line break\n print \"text not found\"\n else:\n print('\\n') # adds line break\n print \"'\", reporting_text1, \"'\", \" text is present\"\n\n reporting_text2 = driver.find_element_by_xpath(\"//*[@id='footer-content']/footer/div/p[4]\").text\n reporting2 = \"Consistent with federal and state reporting obligations including, but not limited to, the Federal Physician Payment Sunshine Act, CSL Behring will disclose any transfers of value given to healthcare providers attending our programs.\"\n\n if assertEqual(reporting_text2, unicode(reporting2, \"utf-8\")):\n print('\\n') # adds line break\n print \"text not found\"\n else:\n print('\\n') # adds line break\n print \"'\", reporting_text2, \"'\", \" text is present\"", "title": "" }, { "docid": "8aae783fb5f6a26624df3f00bb6c1483", "score": "0.67734313", "text": "def test_report_with_untested(self):\n self.__birt.date_of_last_manual_test.return_value = datetime.datetime.now() - datetime.timedelta(days=60)\n self.__birt.nr_manual_ltcs.return_value = 10, []\n self.__birt.nr_manual_ltcs_too_old.return_value = 5\n self.assertTrue(\n self.__metric.report().startswith('5 van de 10 handmatige logische testgevallen zijn '\n 'te lang geleden (meest recente 60 dag(en))'))", "title": "" }, { "docid": "e80330aa862f7dca50e431b811bc6951", "score": "0.67699206", "text": "def test_str(self) -> None:\n self.assertEqual(str(self.report), 'Beaver Creek TEST: 2019-01-09')", "title": "" }, { "docid": "dcbf3b2cb07d4ed558f81f7eafbd0287", "score": "0.66823614", "text": "def test_report():\n report_command = next(readme_blocks)\n want = next(readme_blocks)\n simulator_status = phmdoctest.simulator.run_and_pytest(\n report_command, pytest_options=None)\n got = simulator_status.runner_status.stdout\n verify.a_and_b_are_the_same(want, got)", "title": "" }, { "docid": "21106421aa10aee7fcff3292bc27547d", "score": "0.66454893", "text": "def test_all(self):\n my_report = report.ReportOptions()\n name = my_report.SECTION_NAME\n assert name == \"[REPORT]\"\n test_text = \"[REPORT]\\n\" + \\\n \"Status NO\\n\" + \\\n \"Energy NO\\n\" + \\\n \"Summary NO\\n\" + \\\n \"NODES N1 N2 N3 N17\\n\" + \\\n \"LINKS ALL\\n\" \\\n \"FLOW YES\\n\" + \\\n \"VELOCITY PRECISION 4\\n\" \\\n \"F-FACTOR PRECISION 4\\n \" \\\n \"VELOCITY ABOVE 3.0\"\n my_report = ReportOptionsReader.read(test_text)\n actual_text = ReportOptionsWriter.as_text(my_report)\n msg = '\\nSet:'+test_text+'\\nGet:'+actual_text\n self.assertTrue(match(actual_text, test_text), msg)", "title": "" }, { "docid": "e37b1527841309dded70df82604de5d5", "score": "0.66447735", "text": "def test_simple(self):\n my_report = report.ReportOptions()\n my_report.pagesize = 64\n name = my_report.SECTION_NAME\n assert name == \"[REPORT]\"\n expected_text = \"[REPORT]\\n\" + \\\n \" Status \tNO\\n\" + \\\n \" Energy \tNO\\n\" + \\\n \" Page \\t64\\n\" + \\\n \" Summary \tYES\"\n actual_text = ReportOptionsWriter.as_text(my_report)\n msg = '\\nSet:'+expected_text+'\\nGet:'+actual_text\n self.assertTrue(match(actual_text, expected_text), msg)", "title": "" }, { "docid": "12b8e007d80a7e431cb5e1d0ecd0aa70", "score": "0.66099477", "text": "def test(self):\n # -- Test --\n resp = self.request(\n self.client.get,\n '/admin/report/facility',\n {},\n self.admin_access_token\n )\n\n # (1)\n self.assertEqual(resp.status_code, 200)\n\n # (2)\n data = self.get_response_data(resp)\n self.assertIsInstance(data, list)\n\n # (3)\n self.assertEqual(len(data), 1)\n\n # (4)\n report = data[0]\n\n self.assertIn('id', report)\n id = report['id']\n self.assertIsInstance(id, str)\n self.assertEqual(len(id), 24)\n\n del report['id']\n\n self.report.update({'author': 'fake_student'})\n self.assertDictEqual(report, self.report)\n # -- Test --", "title": "" }, { "docid": "f428b4b71b9af524da3dd8cfb4fcdb36", "score": "0.65758437", "text": "def test_write_report(capsys, donors):\n donors.create_report()\n out, _ = capsys.readouterr()\n assert \"Bill Murray\" in out\n assert \"Woody Harrelson\" in out\n assert out.index(\"Bill Murray\") < out.index(\"Woody Harrelson\")", "title": "" }, { "docid": "7c56cb95ecd3129bc1453423b1cc2d4c", "score": "0.6568402", "text": "def test_report_without_jira(self):\n self.assertEqual('De uitvoeringstijd van handmatige logische testgevallen van <no name> kon niet gemeten '\n 'worden omdat de bron ManualLogicalTestCaseTracker niet is geconfigureerd.',\n metric.DurationOfManualLogicalTestCases(domain.Project(), domain.Project()).report())", "title": "" }, { "docid": "cfc50fe67eccff9b2a8b527e3a3c3951", "score": "0.65586543", "text": "def test_write_report(report: ViyaDeploymentReport) -> None:\n # write the report and data file\n data_file, html_file = report.write_report()\n\n # check for expected files\n assert os.path.exists(data_file)\n assert os.path.isfile(data_file)\n assert os.path.exists(html_file)\n assert os.path.exists(html_file)\n\n # clean up files\n os.remove(data_file)\n os.remove(html_file)", "title": "" }, { "docid": "9cdc84495caec38202647cc962a07cca", "score": "0.65537614", "text": "def test_report_line(self):\n line = ('218501217863',\n 1,\n '%s' % self._now,\n 'pod_name 218501217863',\n 'License',\n '1234',\n 'priority_item_nbr_001')\n msg = 'Default exporter line entry not as expected'\n received = self._e.get_report_line(line)\n expected = \"\"\"%s|%s|%s|%s|%s|%s|%s\"\"\" % ('218501217863',\n '1',\n self._now,\n 'pod_name 218501217863',\n 'License',\n '1234',\n 'priority_item_nbr_001')\n self.assertEqual(received, expected, msg)", "title": "" }, { "docid": "e8111e36bdf7011c808d344e772c22ce", "score": "0.6516754", "text": "def test_duc_report(self):\n url = reverse('recoup_report')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "478263e2e1544607d844ec6c3eb6ebf9", "score": "0.6491565", "text": "def test_str(self):\n self.assertEqual(str(self.bmreport), 'Beaver Creek TEST: 2019-01-09')", "title": "" }, { "docid": "a021d887ee61cbb4c77bbe89a4594418", "score": "0.6476277", "text": "def test_page(self):\n test_text = \"[REPORT]\\n\" + \\\n \" Status \tNO\\n\" + \\\n \" Summary \tYES\\n\" \\\n \" Energy \tNO\\n\" + \\\n \" Nodes \t Node1 Node2 Node3\\n\" \\\n \" Links \tLink1 Link2\\n \" \\\n \" Page 64\"\n my_report = ReportOptionsReader.read(test_text)\n actual_text = ReportOptionsWriter.as_text(my_report)\n msg = '\\nSet:'+test_text+'\\nGet:'+actual_text\n self.assertTrue(match(actual_text, test_text), msg)", "title": "" }, { "docid": "c868fc521fb6a9bfd3e0485d2eefb4fb", "score": "0.6460325", "text": "def test_summary(self):\r\n assert '3 tests, 1 failure, 1 error in ' in self.output", "title": "" }, { "docid": "3ac051fc9cab71dac4e4cc79820ae0e6", "score": "0.6457043", "text": "def TestArapahoe(self):\r\n result = arapahoe_basin_snow_report()\r\n print \"Output from method shown below:\\n\\n%s\" % result\r\n self.assertTrue(True)", "title": "" }, { "docid": "a7b2c6811c76bec80d170fb78a08dcdf", "score": "0.64400166", "text": "def test_report(self):\n user = yaklient.User(CLEAR_LOC_1, USER_IDS[0])\n yak = user.post_yak(\"Test Message\", \"Hello\")\n comment = user.post_comment(\"Test Comment\", yak)\n user.report(yak, \"Report\")\n user.report(comment, \"Report\")\n delete_success = user.delete(yak)\n self.assertEqual(delete_success, True)\n self.assertEqual(yak.update(), False)", "title": "" }, { "docid": "36a2c57a672841a7371f5ace4dbfe14b", "score": "0.6433924", "text": "def test_domainreport_view(self):\n self.import_reports()\n user = core_models.User.objects.get(username=\"admin\")\n self.client.force_login(user)\n url = reverse(\"modoboa_dmarc:domain_report\", args=[self.domain.pk])\n response = self.client.get(\"{}?period=2015-26\".format(url))\n self.assertContains(response, \"'Failed', 100.0\")", "title": "" }, { "docid": "5e6b9c0d09e2fdb4838be7d546aeb731", "score": "0.6393931", "text": "def test_process_report_request_invalid_input_incorrect(self) -> None:\n pass", "title": "" }, { "docid": "4630be84cc3f59af4b68719c5e654033", "score": "0.639187", "text": "def test_correct_output(self):\n # Change input/output files to testing files specified above\n purchase_analytics.parse_filenames = self.parse_filenames\n purchase_analytics.run_analysis()\n \n # Compare generated output file with gold output file\n with open(self.testing_output_file_name,\"r\") as f1:\n with open(self.testing_gold_output_file_name,\"r\") as f2:\n self.assertEqual(f1.read().strip(),f2.read().strip())", "title": "" }, { "docid": "4630be84cc3f59af4b68719c5e654033", "score": "0.639187", "text": "def test_correct_output(self):\n # Change input/output files to testing files specified above\n purchase_analytics.parse_filenames = self.parse_filenames\n purchase_analytics.run_analysis()\n \n # Compare generated output file with gold output file\n with open(self.testing_output_file_name,\"r\") as f1:\n with open(self.testing_gold_output_file_name,\"r\") as f2:\n self.assertEqual(f1.read().strip(),f2.read().strip())", "title": "" }, { "docid": "fb677cff84d3b9e3e589114adf5d3ee7", "score": "0.63726556", "text": "def test_write_report_unpopulated() -> None:\n # create unpopulated report instance\n report = ViyaDeploymentReport()\n\n # write report\n data_file, html_file = report.write_report()\n\n # make sure None is returned\n assert data_file is None\n assert html_file is None", "title": "" }, { "docid": "aa088408c231a60861a78f31b0fa3125", "score": "0.6366452", "text": "def TestLoveland(self):\r\n result = loveland_snow_report()\r\n print \"Output from method shown below:\\n\\n%s\" % result\r\n self.assertTrue(True)", "title": "" }, { "docid": "ac32d7454b9335c8993bc83c791e1268", "score": "0.6337851", "text": "def test_success(database):\n det_award_1 = DetachedAwardFinancialAssistanceFactory(period_of_performance_curr=\"20120725\")\n det_award_2 = DetachedAwardFinancialAssistanceFactory(period_of_performance_curr=None)\n det_award_3 = DetachedAwardFinancialAssistanceFactory(period_of_performance_curr=\"5\")\n det_award_4 = DetachedAwardFinancialAssistanceFactory(period_of_performance_curr=\"\")\n\n errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4])\n assert errors == 0", "title": "" }, { "docid": "77d1f5dcc02faff3dbb9ea4714ba2920", "score": "0.63351446", "text": "def test_invoice_analysis(self):\n pass", "title": "" }, { "docid": "8d20fc39b50783d66d62deb8e2e8d01e", "score": "0.633226", "text": "def test_success(database):\n\n state_code = States(state_code=\"NY\")\n # Required for these values\n det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"00*****\", record_type=1)\n det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"00FORGN\", record_type=1)\n det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"00FORgN\", record_type=2)\n det_award_4 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"NY*****\", record_type=2)\n det_award_5 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"Ny*****\", record_type=1)\n det_award_6 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"NY**123\", record_type=1)\n det_award_7 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"NY12345\", record_type=2)\n det_award_8 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"NY1234R\", record_type=2)\n # Ignored for record type 3\n det_award_9 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"AB12345\", record_type=3)\n errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, det_award_5,\n det_award_6, det_award_7, det_award_8, det_award_9, state_code])\n assert errors == 0", "title": "" }, { "docid": "ff92d31a2bc2a4e47318e0902f301070", "score": "0.6331776", "text": "def test_set(self):\n report = self.analytics.suites[test_report_suite].report\\\n .set('anomalyDetection',True)\\\n .set({\"test\":\"abc\",\"currentData\":True})\n \n self.assertEqual(report.raw['anomalyDetection'], True)\n self.assertEqual(report.raw['test'], \"abc\")\n self.assertEqual(report.raw['currentData'], True)\n \n with self.assertRaises(ValueError):\n report.set()", "title": "" }, { "docid": "085bb33762cdcf1e423001389921f03d", "score": "0.63256544", "text": "def test_result(self):\n data = {'raw': get_content('coverage.out')}\n result = coverage_violation(data)\n result['status'].should.be.equal(STATUS_SUCCESS)\n result['plot']['cover'].should.be.equal(86)\n result['success_percent'].should.be.equal(86)", "title": "" }, { "docid": "1b374f6456fa8ac56cc60501d51206c1", "score": "0.631895", "text": "def _report(self):", "title": "" }, { "docid": "8c9eb93ad6f4fb71070faa485c6358dc", "score": "0.63149506", "text": "def test_report_view(self):\n year = factories.YearFactory.create()\n user = factories.UserFactory.create(\n _full_name=\"Fritz\", working_time_model=year.working_time_model\n )\n inactive = factories.UserFactory.create(\n working_time_model=year.working_time_model\n )\n Employment.objects.create(user=user, percentage=50, vacation_weeks=5)\n\n # New user has a different working time model...\n self.client.force_login(factories.UserFactory.create(_full_name=\"Hans\"))\n\n url = \"/report/annual-working-time/\"\n response = self.client.get(url)\n self.assertNotContains(response, str(user))\n self.assertNotContains(response, str(inactive))\n\n response = self.client.get(url + \"?user=active\")\n self.assertContains(response, str(user))\n self.assertNotContains(response, str(inactive))\n\n response = self.client.get(url + \"?user=\" + str(inactive.pk))\n self.assertContains(response, str(inactive))\n\n response = self.client.get(url + \"?year=2018\")\n self.assertEqual(\n messages(response),\n [\n \"No annual working time defined for user Hans\"\n \" with working time model Test.\"\n ],\n )\n\n other = factories.UserFactory.create(working_time_model=year.working_time_model)\n Employment.objects.create(user=other, percentage=50, vacation_weeks=5)\n factories.AbsenceFactory.create(user=other)\n\n response = self.client.get(url + \"?export=pdf&user=active\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response[\"content-type\"], \"application/zip\")\n\n response = self.client.get(url + \"?export=pdf&user={}\".format(user.pk))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response[\"content-type\"], \"application/pdf\")", "title": "" }, { "docid": "ac9c3c56b27bd0b8d6a0469ae9f77fe3", "score": "0.631163", "text": "def test_update_report_by_id1(self):\n pass", "title": "" }, { "docid": "a8b57d43bc427821965b029e2e0e6a15", "score": "0.6304681", "text": "def TestEldora(self):\r\n result = eldora_snow_report()\r\n print \"Output from method shown below:\\n\\n%s\" % result\r\n self.assertTrue(True)", "title": "" }, { "docid": "c7faf8bd8b90bd77ff872fc175b92687", "score": "0.62872577", "text": "def test_jsonReport(self):\n report = self.analytics.suites[test_report_suite].report.range(dateFrom,dateTo,granularity='day')\\\n .set(\"source\",\"standard\")\\\n .metric(\"pageviews\")\\\n .metric(\"visits\")\\\n .element(\"page\")\\\n .element(\"sitesection\", top=100, startingWith=1)\\\n .set(\"locale\",\"en_US\")\\\n .sortBy(\"visits\")\\\n .set(\"anomalyDetection\",True)\\\n .set(\"currentData\", True)\\\n .set(\"elementDataEncoding\",\"utf8\")\n\n testreport = self.analytics.suites[test_report_suite].jsonReport(report.json())\n self.assertEqual(report.json(),testreport.json(), \"The reportings aren't deserializing from JSON the same old:{} new:{}\".format(report.json(),testreport.json()))\n self.assertEqual(report.json(),testreport.__str__(), \"The reportings aren't deserializing to string __str__ the same old:{} new:{}\".format(report.json(),testreport.__str__()))", "title": "" }, { "docid": "ea38a43d19bc55c370d0fd7a819cafe0", "score": "0.62821436", "text": "def test_report_details_views_with_no_monsterreports(self):\r\n response = self.client.get('/report/{}/'.format(random.randrange(0,1000,1)))\r\n\r\n self.assertEqual(response.status_code, 302)", "title": "" }, { "docid": "da6db312d36db70b1e5f5cbe767ba5fd", "score": "0.62778455", "text": "def test_report_generator():\r\n donor_Db = Donor_Collection(get_sampleDB())\r\n\r\n report = donor_Db.report_generator()\r\n\r\n assert report.startswith(\"Donor Name | Total Given | Num Gifts| Average Gift |\")\r\n assert \"Christina Levermore $ 10000.00 1 $ 10000.00\" in report", "title": "" }, { "docid": "7b5e71ccf8e32b6d7a0a16638451ef9f", "score": "0.6255225", "text": "def test_cash_analysis(self):\n pass", "title": "" }, { "docid": "73fda46848a3970facda9dbb9cf7da53", "score": "0.6248184", "text": "def test_success(database):\n\n det_award = DetachedAwardFinancialAssistanceFactory(assistance_type=\"07\", face_value_loan_guarantee=0)\n det_award_2 = DetachedAwardFinancialAssistanceFactory(assistance_type=\"08\", face_value_loan_guarantee=20)\n\n errors = number_of_errors(_FILE, database, models=[det_award, det_award_2])\n assert errors == 0", "title": "" }, { "docid": "4a3ec39dc5fa6dbf8900f90b2a4e3d90", "score": "0.62437344", "text": "def test_success(database):\n det_award = DetachedAwardFinancialAssistanceFactory(legal_entity_country_code=\"USA\", legal_entity_zip_last4=\"12345\")\n det_award_2 = DetachedAwardFinancialAssistanceFactory(legal_entity_country_code=\"USA\", legal_entity_zip_last4=None)\n det_award_null = DetachedAwardFinancialAssistanceFactory(legal_entity_country_code=\"UK\",\n legal_entity_zip_last4=None)\n det_award_null_2 = DetachedAwardFinancialAssistanceFactory(legal_entity_country_code=\"UK\",\n legal_entity_zip_last4='')\n\n errors = number_of_errors(_FILE, database, models=[det_award, det_award_2, det_award_null, det_award_null_2])\n assert errors == 0", "title": "" }, { "docid": "d20f845668d118c39848965279494dec", "score": "0.62130356", "text": "def test_report_without_jira(self):\n self.assertEqual('De hoeveelheid logische testgevallen zonder ingevulde uitvoeringstijd van <no name> kon niet '\n 'gemeten worden omdat de bron ManualLogicalTestCaseTracker niet is geconfigureerd.',\n metric.ManualLogicalTestCasesWithoutDuration(domain.Project(), domain.Project()).report())", "title": "" }, { "docid": "72ffce8c42041c48483cb87ebc68493c", "score": "0.6203891", "text": "def test(self):\n self.compare_outputs()", "title": "" }, { "docid": "5925c273deea4ff2844cb37e2128c208", "score": "0.62026334", "text": "def test_get_all_reports1(self):\n pass", "title": "" }, { "docid": "f0a7e0a0c6954a0fd80b0665a894c8e4", "score": "0.6199768", "text": "def test_CrearReporte(self):\n rp = Reporte()\n rp.descripcion = 'prueba'\n rp.porcentaje_alcanzado = '50'\n rp.horas_faltantes = '50'\n rp.fecha_reporte = '2016-01-01'\n rp.save()\n \n rp.save()\n \n self.assertTrue(Reporte.objects.filter(descripcion = 'prueba').exists(), \"El reporte no se a creado\")", "title": "" }, { "docid": "6012cba79bb403df1774d648fab3af61", "score": "0.61636615", "text": "def test_reportar(self):\n examen = Exam.objects.get(nombre_materia='Matematicas')\n pregunta = Question.objects.get(text_preg='2 + 2 ?')\n response = self.client.get(reverse('reportar', args=[examen.id, pregunta.id]))\n self.assertEqual(response.resolver_match.func, reportar)", "title": "" }, { "docid": "2c4b41caaba2e628beebfc049cb11549", "score": "0.6159754", "text": "def test_parse_report_file(self):\n test_file_dir = os.path.join('top', 'tests', 'files')\n file = 'VIC_VANA_REI_20131108145146.txt'\n\n received = self._e.parse_report_file(os.path.join(test_file_dir,\n file))\n expected = [{'identity_type_id': 9,\n 'identity_type_data': 'a1234',\n 'item_nbr': '6827668473420000130001',\n 'pod_name': 'Test Line1',\n 'pickup_ts': '2013-11-08 14:50:11',\n 'connote_nbr': '8473420000130'},\n {'identity_type_id': 9,\n 'identity_type_data': 'b1234',\n 'item_nbr': '6827668473420000131001',\n 'pod_name': 'Test Line2',\n 'pickup_ts': '2013-11-08 14:50:42',\n 'connote_nbr': '8473420000131'},\n {'identity_type_id': 9,\n 'identity_type_data': 'c1234',\n 'item_nbr': '6827668473420000131002',\n 'pod_name': 'Test Line3',\n 'pickup_ts': '2013-11-08 14:50:42',\n 'connote_nbr': '8473420000131'},\n {'identity_type_id': 9,\n 'identity_type_data': 'd1234',\n 'item_nbr': '6827668473420000131003',\n 'pod_name': 'Test Line4',\n 'pickup_ts': '2013-11-08 14:50:42',\n 'connote_nbr': '8473420000131'}]\n msg = 'Exporter report files parsed values error'\n self.assertListEqual(received, expected, msg)", "title": "" }, { "docid": "6c2a95341c7fcfcb9478fa2976596c76", "score": "0.6157855", "text": "def testBasic(self, rc):\n dirname = mktemp('mvpa', 'test_report')\n report = rc('UnitTest report',\n title=\"Sample report for testing\",\n path=dirname)\n isdummy = isinstance(report, DummyReport)\n\n ohandlers = verbose.handlers\n verbose.handlers = [report]\n verbose.level = 3\n verbose(1, \"Starting\")\n verbose(2, \"Level 2\")\n\n if not isdummy:\n self.failUnless(len(report._story) == 2,\n msg=\"We should have got some lines from verbose\")\n\n if __debug__:\n odhandlers = debug.handlers\n debug.handlers = [report]\n oactive = debug.active\n debug.active = ['TEST'] + debug.active\n debug('TEST', \"Testing report as handler for debug\")\n if not isdummy:\n self.failUnless(len(report._story) == 4,\n msg=\"We should have got some lines from debug\")\n debug.active = oactive\n debug.handlers = odhandlers\n\n os.makedirs(dirname)\n\n if externals.exists('pylab plottable'):\n if not isdummy:\n clen = len(report._story)\n import pylab as P\n P.ioff()\n P.close('all')\n P.figure()\n P.plot([1, 2], [3, 2])\n\n P.figure()\n P.plot([2, 10], [3, 2])\n P.title(\"Figure 2 must be it\")\n report.figures()\n\n if not isdummy:\n self.failUnless(\n len(report._story) == clen+2,\n msg=\"We should have got some lines from figures\")\n\n report.text(\"Dugi bugi\")\n # make sure we don't puke on xml like text with crap\n report.text(\"<kaj>$lkj&*()^$%#%</kaj>\")\n report.text(\"locals:\\n%s globals:\\n%s\" % (`locals()`, `globals()`))\n # bloody XML - just to check that there is no puke\n report.xml(\"<b>Dugi bugi</b>\")\n report.save()\n\n if externals.exists('pylab'):\n import pylab as P\n P.close('all')\n P.ion()\n\n # cleanup\n if os.path.exists(dirname):\n # poor man recursive remove\n for f in os.listdir(dirname):\n try:\n os.remove(os.path.join(dirname, f))\n except:\n # could be a directory... but no deeper ones expected\n for f2 in os.listdir(os.path.join(dirname, f)):\n os.remove(os.path.join(dirname, f, f2))\n os.rmdir(os.path.join(dirname, f))\n os.rmdir(dirname)\n verbose.handlers = ohandlers", "title": "" }, { "docid": "320e17e49ab3a5a92b65a4b86fb4890d", "score": "0.61563045", "text": "def test_financial_statement_analysis(self):\n pass", "title": "" }, { "docid": "2a83ad9dd9666bba342be0391883f5f0", "score": "0.61519486", "text": "def verifyEquity2(self, record):\n \tself.assertEqual('2018-04-30', record['valuation date'])\n \tself.assertEqual('00899.HK', record['ticker'])\n \tself.assertEqual('HKD', record['currency'])\n \tself.assertEqual(267580, record['total market value'])\n \tself.assertEqual(-105228546.41, record['market value gain loss'], 4)\n \tself.assertEqual(0.01, record['percentage of fund'], 6)", "title": "" }, { "docid": "b84f5f2cc4cc50faa98634a1e33a6933", "score": "0.6131317", "text": "def test_choose_unit_400():\n\n assert False", "title": "" }, { "docid": "ad9c83db0db02eb745298e4208987045", "score": "0.61167717", "text": "def test_report_run(self,m):\n path = os.path.dirname(__file__)\n\n with open(path+'/mock_objects/basic_report.json') as data_file:\n json_response = data_file.read()\n\n with open(path+'/mock_objects/Report.Queue.json') as queue_file:\n report_queue = queue_file.read()\n\n #setup mock object\n m.post('https://api.omniture.com/admin/1.4/rest/?method=Report.Get', text=json_response)\n m.post('https://api.omniture.com/admin/1.4/rest/?method=Report.Queue', text=report_queue)\n\n\n self.assertIsInstance(self.analytics.suites[test_report_suite].report.run(), omniture.Report, \"The run method doesn't work to create a report\")", "title": "" }, { "docid": "017fc1987aac77c29bed228c255b71ad", "score": "0.6113997", "text": "def test_create_report_file_entry(api_client, ofa_admin):\n user = ofa_admin\n api_client.login(username=user.username, password=\"test_password\")\n data = {\n \"original_filename\": \"report.txt\",\n \"quarter\": \"Q1\",\n \"slug\": uuid.uuid4(),\n \"user\": user.id,\n \"stt\": user.stt.id,\n \"year\": 2020,\n \"section\": \"Active Case Data\",\n }\n response = api_client.post(\"/v1/reports/\", data)\n assert response.status_code == status.HTTP_201_CREATED\n assert response.data[\"slug\"] == str(data[\"slug\"])\n\n assert ReportFile.objects.filter(\n slug=data[\"slug\"],\n year=data[\"year\"],\n section=data[\"section\"],\n version=1,\n user=user,\n ).exists()", "title": "" }, { "docid": "3cea96da25854bae2d8e26d6f4f72fd6", "score": "0.6112115", "text": "def report():\n pass", "title": "" }, { "docid": "ba777c8b4bb4d8b054a38bd4fddaedb9", "score": "0.61117953", "text": "def test_analyze(self):\n\n pass", "title": "" }, { "docid": "30b0f4f24d70fd88b870cc39f5016e5e", "score": "0.6108431", "text": "def test_report_without_manual_testcases(self):\n self.__birt.date_of_last_manual_test.return_value = datetime.datetime.min\n self.__birt.nr_manual_ltcs.return_value = 0, []\n self.assertEqual(self.__metric.no_manual_tests_template.format(name='FakeSubject', unit=self.__metric.unit),\n self.__metric.report())", "title": "" }, { "docid": "e41407d2da3e38efc5565a55a5484557", "score": "0.61025345", "text": "def AssertTest(test):\r\n\r\n assertionText = 'ControlSum test - more then 1 `RF`.'\r\n assert test['RF'].nunique() == 1, assertionText\r\n\r\n assertionText = 'ControlSum test - more then 1 `controlSum`.'\r\n controlSum = test.loc[test['Value'] == 'on', 'Field'].values[0]\r\n assert type(controlSum) == str, assertionText", "title": "" }, { "docid": "6e1b414370a55c8177047859834d6c57", "score": "0.6088687", "text": "def test_can_access_reports_page_and_see_the_form(self):\n self.login()\n response = self.client.get(reverse(\"reports\"))\n self.assertTrue(response.status_code, 200)\n self.assertContains(response, \"Generate a report\")", "title": "" }, { "docid": "51fa0f7465890887b84dbe1b9032ea9d", "score": "0.6084817", "text": "def testPerformanceReport(self):\n step = self.processingReport.retrieveStep('cmsRun1')\n perfInfo = self.reporter.getPerformanceInformation(step)\n self.assertEqual(len(self.trimNoneValues(perfInfo)), 21,\n 'Found less information than expected')\n self.assertEqual(perfInfo['PeakValueRss'], '891.617',\n 'Values do not match')\n self.assertEqual(perfInfo['readCachePercentageOps'], 0.995779157341,\n 'Values do not match')\n self.assertEqual(perfInfo['MaxEventTime'], '3.32538',\n 'Values do not match')\n\n step = self.processingReport.retrieveStep('logArch1')\n perfInfo = self.reporter.getPerformanceInformation(step)\n self.assertEqual(self.trimNoneValues(perfInfo), {},\n 'logArch1 performance info is not empty')\n\n step = self.processingReport.retrieveStep('stageOut1')\n perfInfo = self.reporter.getPerformanceInformation(step)\n self.assertEqual(self.trimNoneValues(perfInfo), {},\n 'stageOut1 performance info is not empty')\n\n step = self.errorReport.retrieveStep('cmsRun1')\n perfInfo = self.reporter.getPerformanceInformation(step)\n self.assertEqual(self.trimNoneValues(perfInfo), {},\n 'cmsRun1 performance info is not empty')\n\n step = self.errorReport.retrieveStep('logArch1')\n perfInfo = self.reporter.getPerformanceInformation(step)\n self.assertEqual(self.trimNoneValues(perfInfo), {},\n 'logArch1 performance info is not empty')\n\n step = self.errorReport.retrieveStep('stageOut1')\n perfInfo = self.reporter.getPerformanceInformation(step)\n self.assertEqual(self.trimNoneValues(perfInfo), {},\n 'stageOut1 performance info is not empty')", "title": "" }, { "docid": "a1cd6811569b6f73df74e14a976dcc41", "score": "0.60783243", "text": "def test_stats_compare(self):\n result_expected = \"\" \\\n \"+--------------------------------------------------------------+---------------------+---------------------+-----------------+-----------------+-------+-------+-------+-------+-------+\\n\" \\\n \"| refstr | service_bibcode | classic_bibcode | service_conf | classic_score | match | miss | new | newu | diff |\\n\" \\\n \"+==============================================================+=====================+=====================+=================+=================+=======+=======+=======+=======+=======+\\n\" \\\n \"| J.-P. Uzan, Varying constants, gravitation and cosmology, | 2011LRR....14....2U | 2010arXiv1009.5514U | 1.0 | 1 | | | | | DIFF |\\n\" \\\n \"| Living Rev. Rel. 14 (2011) 2, [1009.5514]. | | | | | | | | | |\\n\" \\\n \"+--------------------------------------------------------------+---------------------+---------------------+-----------------+-----------------+-------+-------+-------+-------+-------+\\n\" \\\n \"| C. J. A. P. Martins, The status of varying constants: A | 2017RPPh...80l6902M | 2017arXiv170902923M | 1.0 | 1 | | | | | DIFF |\\n\" \\\n \"| review of the physics, searches and implications, | | | | | | | | | |\\n\" \\\n \"| 1709.02923. | | | | | | | | | |\\n\" \\\n \"+--------------------------------------------------------------+---------------------+---------------------+-----------------+-----------------+-------+-------+-------+-------+-------+\"\n result_got, num_references, num_resolved = self.app.get_service_classic_compare_stats_grid(source_bibcode='0001arXiv.........Z',\n source_filename=os.path.join(self.arXiv_stubdata_dir,'00001.raw'))\n self.assertEqual(result_got, result_expected)\n self.assertEqual(num_references, 2)\n self.assertEqual(num_resolved, 2)", "title": "" }, { "docid": "fa4b257c3e385890ec8a34270f580037", "score": "0.6076862", "text": "def test_monster_report_creation(self):\r\n report = creates_monster_report(sighting=creates_sighting(researcher=creates_reseacher(),monster=creates_monster()))\r\n self.assertEqual(report.id, 1)\r\n monster_report = MonsterReport.objects.get(id=1)\r\n \r\n self.assertEqual(report.title, monster_report.title)\r\n self.assertEqual(report.filename, monster_report.filename)\r\n self.assertEqual(report.path, monster_report.path)\r\n #self.assertQuerysetEqual(report.sighting.values(), monster_report.sighting.values())\r", "title": "" }, { "docid": "5d8fcb2c0e2239c299e927d682553700", "score": "0.60713834", "text": "def test_4_3_1_13(self):\n pass", "title": "" }, { "docid": "7f6a02381368fcb7de63d92523de84e8", "score": "0.6065926", "text": "def test_update_report_by_id(self):\n pass", "title": "" }, { "docid": "77241cd948f173c426c528f67d1f8623", "score": "0.6065002", "text": "def run_report(self):", "title": "" }, { "docid": "15b6c964fef5b75137b1cd4c443c9e44", "score": "0.6048618", "text": "def test_failure(database):\n\n state_code = States(state_code=\"NY\")\n det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"001****\", record_type=1)\n det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"NA*****\", record_type=2)\n det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"NA1234R\", record_type=2)\n det_award_4 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"\", record_type=1)\n det_award_5 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=None, record_type=2)\n # Invalid ppop format\n det_award_6 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"NA1234X\", record_type=2)\n errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, det_award_5,\n det_award_6, state_code])\n assert errors == 6", "title": "" }, { "docid": "113bc3f461eca6a6cd618e8cefb00ca5", "score": "0.6047885", "text": "def test_summary(self):\r\n assert '1 test, 0 failures, 0 errors, 1 skip in ' in self.output", "title": "" }, { "docid": "0ab9ddbc7671417f147da5da881a35a0", "score": "0.604077", "text": "def test_basic(self):\n self.assertEqual(solution(\"\"\"Valve AA has flow rate=0; tunnels lead to valves DD, II, BB\nValve BB has flow rate=13; tunnels lead to valves CC, AA\nValve CC has flow rate=2; tunnels lead to valves DD, BB\nValve DD has flow rate=20; tunnels lead to valves CC, AA, EE\nValve EE has flow rate=3; tunnels lead to valves FF, DD\nValve FF has flow rate=0; tunnels lead to valves EE, GG\nValve GG has flow rate=0; tunnels lead to valves FF, HH\nValve HH has flow rate=22; tunnel leads to valve GG\nValve II has flow rate=0; tunnels lead to valves AA, JJ\nValve JJ has flow rate=21; tunnel leads to valve II\"\"\"), 1707)", "title": "" }, { "docid": "f7dbcd8ebb1212099aa048923afe817e", "score": "0.6039031", "text": "def scrab(self, report):\n assert False, \"You have to implement this function\"", "title": "" }, { "docid": "db3ebbf37ba4c18071e3dcc9d3ac599a", "score": "0.6032928", "text": "def test_report_with_zero_unittests(self):\n report = FakeUnitTestReport(unittests=0)\n project = domain.Project(metric_sources={metric_source.UnitTestReport: report})\n failing_unittests = metric.FailingUnittests(subject=FakeSubject(), project=project)\n self.assertEqual('Er zijn geen unittesten.', failing_unittests.report())", "title": "" }, { "docid": "fce7e4c48997c5121a3f620827776d44", "score": "0.60319555", "text": "def testReport(self):\n self.scorer.report(\n [self.url1, self.url2], [self.url3, self.url4])\n self.assertEquals(1, memcache.get('scoring:test:success:' + self.domain1))\n self.assertEquals(0, memcache.get('scoring:test:failure:' + self.domain1))\n self.assertEquals(1, memcache.get('scoring:test:success:' + self.domain2))\n self.assertEquals(1, memcache.get('scoring:test:failure:' + self.domain2))\n self.assertEquals(0, memcache.get('scoring:test:success:' + self.domain3))\n self.assertEquals(1, memcache.get('scoring:test:failure:' + self.domain3))\n\n self.scorer.report(\n [self.url1, self.url2, self.url3, self.url4], [])\n self.assertEquals(2, memcache.get('scoring:test:success:' + self.domain1))\n self.assertEquals(0, memcache.get('scoring:test:failure:' + self.domain1))\n self.assertEquals(3, memcache.get('scoring:test:success:' + self.domain2))\n self.assertEquals(1, memcache.get('scoring:test:failure:' + self.domain2))\n self.assertEquals(1, memcache.get('scoring:test:success:' + self.domain3))\n self.assertEquals(1, memcache.get('scoring:test:failure:' + self.domain3))\n\n self.scorer.report(\n [], [self.url1, self.url2, self.url3, self.url4])\n self.assertEquals(2, memcache.get('scoring:test:success:' + self.domain1))\n self.assertEquals(1, memcache.get('scoring:test:failure:' + self.domain1))\n self.assertEquals(3, memcache.get('scoring:test:success:' + self.domain2))\n self.assertEquals(3, memcache.get('scoring:test:failure:' + self.domain2))\n self.assertEquals(1, memcache.get('scoring:test:success:' + self.domain3))\n self.assertEquals(2, memcache.get('scoring:test:failure:' + self.domain3))", "title": "" }, { "docid": "5d8d84fe9ab1ba200988c6c2255ff20a", "score": "0.60315496", "text": "def test_error_report(self):\n postJson = {\"submission_id\": self.error_report_submission_id}\n response = self.app.post_json(\n \"/v1/submission_error_reports/\", postJson, headers={\"x-session-id\":self.session_id})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.headers.get(\"Content-Type\"), \"application/json\")\n self.assertEqual(len(response.json), 5)\n self.assertIn(\"cross_file_error_url\", response.json)", "title": "" }, { "docid": "833618b712d9dc8f2303c2124b9276c5", "score": "0.6012028", "text": "def test_success(database):\n award_fin = AwardFinancialFactory(availability_type_code='X')\n award_fin_lower = AwardFinancialFactory(availability_type_code='x')\n award_fin_null = AwardFinancialFactory(availability_type_code=None)\n\n errors = number_of_errors(_FILE, database, models=[award_fin, award_fin_null, award_fin_lower])\n assert errors == 0", "title": "" }, { "docid": "7d5f6f9833d9d06c892a2be2595208c7", "score": "0.60055333", "text": "def testEventInformationReport(self):\n eventInfo = self.reporter.getEventInformation('cmsRun1',\n self.processingReport)\n self.assertEqual(eventInfo['inputEvents'], 18192,\n 'Input events do not match')\n self.assertEqual(eventInfo['OutputEventInfo'].count('Run2012B-WElectron-PromptSkim-v1:USER:1603'), 1)\n self.assertEqual(eventInfo['OutputEventInfo'].count('Run2012B-LogErrorMonitor-PromptSkim-v1:USER:137'), 1)\n self.assertEqual(eventInfo['OutputEventInfo'].count('Run2012B-LogError-PromptSkim-v1:RAW-RECO:66'), 1)\n self.assertEqual(eventInfo['OutputEventInfo'].count('Run2012B-TOPElePlusJets-PromptSkim-v1:AOD:2320'), 1)\n self.assertEqual(eventInfo['OutputEventInfo'].count('Run2012B-HighMET-PromptSkim-v1:RAW-RECO:8'), 1)\n self.assertEqual(eventInfo['OutputEventInfo'].count('Run2012B-DiTau-PromptSkim-v1:RAW-RECO:192'), 1)\n\n eventInfo = self.reporter.getEventInformation('stageOut1',\n self.processingReport)\n self.assertEqual(eventInfo, {},\n 'stageOut1 event info is not empty')\n\n eventInfo = self.reporter.getEventInformation('logArch1',\n self.processingReport)\n self.assertEqual(eventInfo, {},\n 'logArch1 event info is not empty')\n\n eventInfo = self.reporter.getEventInformation('cmsRun1',\n self.mergeReport)\n self.assertEqual(eventInfo['inputEvents'], 0,\n 'Input events do not match')\n self.assertEqual(eventInfo['OutputEventInfo'].count('Run2012B-LogError-PromptSkim-v1:RAW-RECO:0'), 1)\n\n eventInfo = self.reporter.getEventInformation('cmsRun1',\n self.errorReport)\n self.assertEqual(eventInfo, {},\n 'Error report event info is not empty')", "title": "" }, { "docid": "bfdc5fabc80aee2ecba1afae9f857904", "score": "0.60054886", "text": "def test_success(database):\n cc_1 = CountryCode(country_code='USA', country_name='United States', territory_free_state=False)\n cc_2 = CountryCode(country_code='UKR', country_name='Ukraine', territory_free_state=False)\n fabs = FABSFactory(place_of_perform_country_c='USA', record_type=1, correction_delete_indicatr='')\n fabs_2 = FABSFactory(place_of_perform_country_c='uKr', record_type=2, correction_delete_indicatr='C')\n fabs_3 = FABSFactory(place_of_perform_country_c='abc', record_type=3, correction_delete_indicatr=None)\n # Ignore correction delete indicator of D\n fabs_4 = FABSFactory(place_of_perform_country_c='xyz', record_type=1, correction_delete_indicatr='d')\n\n errors = number_of_errors(_FILE, database, models=[cc_1, cc_2, fabs, fabs_2, fabs_3, fabs_4])\n assert errors == 0", "title": "" }, { "docid": "1d91d67fab2c431f9c15ee6872956e84", "score": "0.5998276", "text": "def test_get_all_reports(self):\n pass", "title": "" }, { "docid": "7239cfe2bb6a3b1d87748fcf787681c7", "score": "0.5986639", "text": "def report(self):\n assert False, \"You have to implement this function\"", "title": "" }, { "docid": "24d2be284efe60c818384249605548f9", "score": "0.5985607", "text": "def report(self):\n self.header()\n\n tests_results = self.checker.results()\n for tr in tests_results:\n self.summary(tr)\n\n self.footer()", "title": "" }, { "docid": "fba95fc9d2b22b7717be2ac2baca40d5", "score": "0.5983299", "text": "def test_new_report_complete_as_assistant(self):\n user_promoter = User.objects.create_user('promoter', '[email protected]', 'testpassword')\n base_user_promoter = BaseUser.objects.create(user=user_promoter, name=\"PromotoraTest\",\n last_name_paternal=\"last_name_paternal\",\n last_name_maternal=\"last_name_maternal\",\n phone_number=\"phone_number\",\n email=\"[email protected]\",\n address=\"address\")\n base_user_promoter.save()\n\n community = Community.objects.create(name = 'Name',\n municipality = 'Municipality',\n state = 'State')\n\n promoter = Promoter.objects.create(base_user=base_user_promoter,\n contact_name = \"Contacto\",\n contact_phone_number = \"1234512312\"\n )\n create_user_for_group('Assistant')\n self.client.login(username=\"Assistant\", password=\"testpassword\")\n\n beneficiary = Beneficiary.objects.create(name=\"Rodolfo\",\n last_name_paternal=\"Rodriguez\",\n last_name_maternal=\"Rocha\",\n community=community,\n promoter=promoter,\n num_of_family_beneficiaries=16,\n contact_name=\"Juan\",\n contact_phone=\"4424325671\",\n account_number=123456,\n bank_name=\"Banamets\")\n\n self.client.login(username=\"user\", password=\"testpassword\")\n response = self.client.get('/administrative/production_report/')\n self.assertEqual(response.status_code, 302)", "title": "" }, { "docid": "5a24b1d592455e11ac8f25a526deed70", "score": "0.5981717", "text": "def test_passing(self):\n pass", "title": "" }, { "docid": "6c4ca71f9a9c391a37ae1b98830f785c", "score": "0.5980563", "text": "def test_4_3_1_14(self):\n pass", "title": "" }, { "docid": "49087f00e36b822aed0a1ba01f52811c", "score": "0.5973367", "text": "def test_get_report(self, app, report, size, column, value):\n request = app.get(f'/ms-ophelie/customers/{report}', status=200)\n\n assert request.status_code == 200\n headers = request.headers\n assert headers['Content-Disposition'] == 'attachment; filename=customers.csv'\n\n body = request.body\n lines = body.split(b'\\n')\n assert len(lines) == size\n assert column in lines[0]\n assert value in lines[1]", "title": "" }, { "docid": "77e157ccd55944e60ff82982def2138e", "score": "0.59659743", "text": "def test_invalid_report_missing_args(self):\n self.assertEqual(\n first=\"Missing information, please provide name and gym. Type *?help raid* for help\",\n second=_validate_report([\"Sneasel\"])\n )", "title": "" }, { "docid": "7268abb184e3b5daf787c598f511ccfc", "score": "0.5951311", "text": "def test_monthly_employer_report_has_all_book_keeping_where_vat_is_required_fields(self):\n #arrange\n book_keeping_fields = [\n 'list_of_names_and_income_tax_where_vat_is_required',\n 'sum_of_income_tax_where_vat_is_required',\n 'list_of_names_and_social_security_employee_where_vat_is_required',\n 'sum_of_social_security_employer_where_vat_is_required',\n 'sum_of_social_security_where_vat_is_required'\n ]\n #act\n monthly_employer_report = self.reports_maker.monthly_employer_report(for_year=2015, for_month=1)\n #assert\n for field in book_keeping_fields:\n self.assertIn(field , monthly_employer_report['book_keeping_where_vat_is_required'])\n # print(monthly_employer_report['book_keeping_where_vat_is_required']['list_of_names_and_income_tax_where_vat_is_required'])", "title": "" }, { "docid": "99983d844d9cbaa0856d5b2943e0615e", "score": "0.5937383", "text": "def test_repeat_information(self):\n self.fail('Condition not met.')", "title": "" } ]
d04a316f8aae73b9a1bd180338544829
Forward method for NetVladOrthoReg.
[ { "docid": "cf964e07b576847f8055ac4ab4df9c93", "score": "0.0", "text": "def forward(self, inputs, **unused_params):\n\n reshaped_input = tf.reshape(inputs, [-1, self.max_frames, self.feature_size])\n\n #\n # New: Compute attention-based cluster similarity weights:\n #\n with tf.variable_scope(\"cluster_attention\"):\n encoder_block = transformer_utils.TransformerEncoderMod(feature_size=self.feature_size,\n hidden_size=self.encoder_hidden_size,\n num_heads=self.num_heads,\n attention_dropout=self.dropout_ratio,\n ff_filter_size=self.filter_size,\n ff_relu_dropout=0.1,\n is_train=self.is_training,\n scope_id=\"encode\",\n final_size=self.cluster_size)\n cluster_similarities = encoder_block.forward(reshaped_input)\n\n\n cluster_centres = tf.get_variable(\"cluster_centers\", [self.feature_size, self.cluster_size],\n initializer=tf.random_normal_initializer(\n stddev=1 / math.sqrt(self.feature_size)))\n\n # (xi - ck)\n reshaped_input = tf.expand_dims(reshaped_input, axis=3) # B x N x F x 1\n residuals = tf.subtract(reshaped_input, cluster_centres) # B x N x F x C\n\n # Sum of ak(xi) * (xi - ck)\n cluster_similarities = tf.expand_dims(cluster_similarities, axis=3) # B x N x C x 1\n weighted_residuals = tf.multiply(residuals, cluster_similarities)\n residual_sum = tf.reduce_sum(weighted_residuals, axis=1) # B x F x C\n\n # Normalization of flattened global descriptor:\n vlad = tf.nn.l2_normalize(residual_sum, 1) # Normalize per cluster\n vlad = tf.reshape(vlad, [-1, self.cluster_size * self.feature_size]) # Flatten\n vlad = tf.nn.l2_normalize(vlad, 1,\n name=self.scope_id + \"vlad_final\") # Normalize global descriptor\n\n #vlad = tf.reshape(vlad, [-1, self.cluster_size, self.feature_size]) # Optional: output shape\n\n # batch_size x (cluster_size * feature_size)\n return vlad", "title": "" } ]
[ { "docid": "71873500bedcb65572a0302e356690c9", "score": "0.6630026", "text": "def forward(self):\n self.HR_G = self.netG(self.LR)", "title": "" }, { "docid": "c5b726b3aa05d9e7850ccdbcc9745924", "score": "0.63454056", "text": "def forward(self):\n self.fodpred = self.net(self.fodlr)", "title": "" }, { "docid": "ae22194d7828f224675efbaffef5ea8c", "score": "0.60776716", "text": "def forward(self):\n\t\tself.h = self.sigmoid(self.W @ self.patterns)\n\n\t\tones = np.ones(self.Npts)\n\t\tself.h = np.vstack((self.h, ones))\n\n\t\tself.o = self.sigmoid(self.V @ self.h)", "title": "" }, { "docid": "3b865dd6f6fff494a36b7239e965a666", "score": "0.6040339", "text": "def _forward_pass(self, X):", "title": "" }, { "docid": "f4be156efa75831e447a4c74200f57e6", "score": "0.6039019", "text": "def relu_forward_hook_function(module, ten_in, ten_out):\n self.forward_relu_outputs.append(ten_out)", "title": "" }, { "docid": "f4be156efa75831e447a4c74200f57e6", "score": "0.6039019", "text": "def relu_forward_hook_function(module, ten_in, ten_out):\n self.forward_relu_outputs.append(ten_out)", "title": "" }, { "docid": "9624eb632869b8978080bc4c20ae0a1d", "score": "0.601469", "text": "def forward(self, X: Tensor) -> Tensor:\n pass # pragma: no cover", "title": "" }, { "docid": "9624eb632869b8978080bc4c20ae0a1d", "score": "0.601469", "text": "def forward(self, X: Tensor) -> Tensor:\n pass # pragma: no cover", "title": "" }, { "docid": "79224f4788dd8dc90068d30a8bbd12d7", "score": "0.6013028", "text": "def forward(self):\n\n for hl in self.hidden_layers:\n hl.forward()\n self.output_layer.forward()", "title": "" }, { "docid": "a49c4cc47f60f7764bf01b3292a079ff", "score": "0.6004738", "text": "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n out = self.model(x) \n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "title": "" }, { "docid": "29168d5432a7db1722bbc57fe62be284", "score": "0.5969051", "text": "def forward(self,z):\r\n o = z\r\n for l in self.layers:\r\n o = l.forward(o)\r\n return o", "title": "" }, { "docid": "2ff4a19f9092b2a24ffe67f6ca8c410d", "score": "0.59354204", "text": "def forward(self, x):", "title": "" }, { "docid": "afcc468744161c27617bba086e62d6c0", "score": "0.5916494", "text": "def forward(self, src):\n dim = src.shape[1]\n # dist = torch.norm(src, dim=1) #dist from center or add it before?\n\n # FIRST LAYER\n x11 = self.linear11(src)\n x12 = self.linear12(src)\n # ACTIVATIONS POST LAYER 1\n x11 = self.tanh(x11)\n x12 = self.gaussian(x12) # or sigmoid?\n # SECOND LAYER\n x21 = self.linear21(torch.cat([x11, x12], dim=1))\n x22 = self.linear22(torch.cat([x11, x12], dim=1))\n x23 = self.linear23(torch.cat([x11, x12], dim=1))\n # ACTIVATIONS POST LAYER 2\n x21 = self.sin(x21)\n x22 = self.cos(x22)\n x23 = self.gaussian(x23)\n # THIRD LAYER\n x3 = self.linear3(torch.cat([x21, x22, x23], dim=1))\n\n # ACTIVATIONS POST LAYER 3\n out = self.gaussian(x3) # or remove?\n\n # OUT\n # out=x3[-1] #Last dimension, ie size one_hot_dim\n out[:, 0] = self.sigmoid(out[:, 0])\n out[:, 1:] = self.softmax(out[:, 1:])\n return out[-1]", "title": "" }, { "docid": "5e30cea036f8424609762682b80e694e", "score": "0.59137875", "text": "def forward(self, x):\n x = self.layer1(x)\n # x = self.layer2(x)\n x = self.output(x)\n return x.view(-1)", "title": "" }, { "docid": "17caa37304daf2b1b7a7569e41676225", "score": "0.5908465", "text": "def _forward(self, features, *args):\n pass", "title": "" }, { "docid": "8d406d3ad198ac2412f3c3059088a6e0", "score": "0.5885031", "text": "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n x = x.reshape(-1, self.sz_input)\n\n for idx, layer in enumerate(self.layers):\n x = layer(x)\n\n out = x\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "title": "" }, { "docid": "d19f4ed6db35a61bf6cd3630fa703fc1", "score": "0.5881967", "text": "def forward(self, x: Tensor) -> Tensor:\n pass", "title": "" }, { "docid": "7f85851c460215ae8fadb25b4b7ca67b", "score": "0.58683854", "text": "def forward(self, *args):\n with torch.no_grad():\n return self.net.forward(*args)", "title": "" }, { "docid": "85cd1feb218ae1f41b27f62066e02f7e", "score": "0.58388567", "text": "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n #raise NotImplementedError\n\n for pre_layer,activation in self.layers:\n x_tilde = pre_layer.forward(x)\n x = activation.forward(x_tilde)\n\n out = x\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "title": "" }, { "docid": "f7aa248dd90ac1cb0f8fd9c10154c7a1", "score": "0.5833892", "text": "def forward(self):\n self.out = self.network(self.batch_x)", "title": "" }, { "docid": "56327e157301def0ffc8f8d3d16b3cb0", "score": "0.5831324", "text": "def forward(self, x):\n\n x = self.act(self.layers[0].forward(x))\n\n for i in range(1,self.nTh):\n x = x + self.h * self.act(self.layers[i](x))\n\n return x", "title": "" }, { "docid": "ba53e46a3f74ec16c471340b6baed3d2", "score": "0.58250225", "text": "def forward(self, x):\n pass", "title": "" }, { "docid": "37fe21fd5ed86ed152085c9b22b9ea5d", "score": "0.5798656", "text": "def forward(self, x):\n out = self.model.forward(x)\n return out", "title": "" }, { "docid": "28b8bda0e724c030d1de252d668ef1c5", "score": "0.57758516", "text": "def forward(self, x): \n return self.model(x)", "title": "" }, { "docid": "a2b3d66f8128144fee56cb9fcf306dfa", "score": "0.5766445", "text": "def forward( self, inputs ) :\n pass", "title": "" }, { "docid": "5aabada339df6cd94a24bbbc076590d0", "score": "0.57533294", "text": "def forward(self, X_conv_out):\n \n X_proj = F.relu(self.projection(X_conv_out))\n X_gate = torch.sigmoid(self.gate(X_conv_out))\n X_highway = torch.mul(X_gate, X_proj) + torch.mul((1 - X_gate),X_conv_out)\n\n return X_highway", "title": "" }, { "docid": "4e2bff5fa51961ae18374fdb7c3e7692", "score": "0.5744095", "text": "def forward(self, input):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n shape = input.size()\n \n if len(shape) == 1:\n input.view(1, -1)\n shape = input.size()\n if len(shape) > 2:\n raise ValueError('Expected 1-D or 2-D tensor (got {})'.format(str(shape)))\n elif input.shape[1] != self.n_neurons:\n raise ValueError(\n 'Expected _ x {} tensor (got {} x {})'.format(str(self.n_neurons), str(shape[0]), str(shape[1])))\n\n fct = CustomBatchNormManualFunction()\n out = fct.apply(input, self.gammas, self.betas, self.eps)\n ########################\n # END OF YOUR CODE #\n #######################\n \n return out", "title": "" }, { "docid": "b0ea9b98bf6ff2811767350aeafbac18", "score": "0.5736423", "text": "def forward(self, input: tensor) -> tensor:\n input, l0 = self.L0(input)\n input, l1 = self.L1(input)\n input, l2 = self.L2(input)\n input, l3 = self.L3(input)\n _, l4 = self.L4(input)\n\n input = self.R3(l4, l3)\n input = self.R2(input, l2)\n input = self.R1(input, l1)\n input = self.R0(input, l0)\n\n return self.last_layer(input)", "title": "" }, { "docid": "29d6e055178d48f2d3f3e81e01447ec2", "score": "0.57324886", "text": "def forward(self, features):\n pass", "title": "" }, { "docid": "8109fd839ede0481a335999357eda073", "score": "0.5706891", "text": "def forward(self, x):\n x =F.relu(self.l1(x))\n x =F.relu(self.l2(x))\n x =F.relu(self.l3(x))\n x =F.relu(self.l4(x))\n x =F.relu(self.l5(x))\n return x", "title": "" }, { "docid": "1d38306cc3817d686794ff7f02fd581c", "score": "0.570267", "text": "def forward(self,x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n return self.fc3(x)", "title": "" }, { "docid": "78489c06434059700a9a6fbea726fc7f", "score": "0.5702236", "text": "def forward(self, x):\n return self.model_forward(x)", "title": "" }, { "docid": "276a2a93faeefa2dce8330d968a46678", "score": "0.56946826", "text": "def forward(self):\n\n raise NotImplementedError", "title": "" }, { "docid": "87c66da211cc0abeb34286506f89c9cb", "score": "0.5691196", "text": "def forward(self, x):\n\n x = self.fc1(x)\n x = self.relu(x)\n\n# x = self.fc2(x)\n# x = self.relu(x)\n\n x = self.fc3(x)\n \n x = x.view(-1, 979, 3)\n x = self.softmax(x)\n \n return x", "title": "" }, { "docid": "6961c88745c0ac2d201e35993757de66", "score": "0.5675258", "text": "def forward(self, x) -> Tensor:\n # hack to enable backbone to work properly.\n self.model.device = self.device\n return self.model(x)", "title": "" }, { "docid": "02282b08d4747e8781784235a7682ea3", "score": "0.56741774", "text": "def forward(self, x_or_z: Iterable[Tensor], c: Iterable[Tensor] = None,\n rev: bool = False, jac: bool = True) \\\n -> Tuple[Tuple[Tensor], Tensor]:\n raise NotImplementedError(\n f\"{self.__class__.__name__} does not provide forward(...) method\")", "title": "" }, { "docid": "cd51289e6e2dd2d50c155cf8e8c5f429", "score": "0.56569755", "text": "def forward(self, X, use_parameter_copy=False):\r\n self.outputs_cache = []\r\n \r\n \"\"\"\r\n Sometimes the vector input will be a matrix [[3,2,1]]\r\n othertimes it will be just an array [3, 2, 1]\r\n So the program needs to convert 1d arrays into matrix arrays\r\n \"\"\"\r\n self.outputs_cache.append(np.matrix(X))\r\n \r\n previous_layer_input = self.outputs_cache[0]\r\n for i in range(len(self.layers)):\r\n layer = self.layers[i]\r\n if layer[0] == 'input':\r\n continue \r\n \r\n weight_matrix = self.weights[i-1]\r\n bias = self.biases[i-1]\r\n \r\n if(use_parameter_copy):\r\n weight_matrix = self.weights_copy[i-1]\r\n bias = self.biases_copy[i-1]\r\n \r\n if layer[0] == 'conv':\r\n opts = layer[1]\r\n height, width = self.layers[i-1][1][\"size\"]\r\n \r\n previous_layer_input = previous_layer_input.reshape(height, width)\r\n summed_inputs = self.convolution(previous_layer_input, self.weights[i-1], opts) + bias\r\n summed_inputs = summed_inputs.reshape(height, width)\r\n activation_func = opts[\"activation_func\"]\r\n else:\r\n \"\"\"\r\n Weights are off by one, but not layers.\r\n \"\"\"\r\n previous_height, previous_width = self.layers[i-1][1][\"size\"]\r\n previous_layer_input = previous_layer_input.reshape(1, previous_height * previous_width)\r\n summed_inputs = np.dot(previous_layer_input, weight_matrix) + bias\r\n activation_func = layer[0]\r\n \r\n previous_layer_input = self.activation_func[activation_func](summed_inputs)\r\n self.outputs_cache.append(previous_layer_input)\r\n \r\n return previous_layer_input", "title": "" }, { "docid": "9e6dc8ce9c13b42548c958f9f381b0b1", "score": "0.5641033", "text": "def forward(self, x):\n\n return self.model.forward(x)", "title": "" }, { "docid": "7f4095bfa578a59d446b375ad0df8ee8", "score": "0.56231856", "text": "def forward(self, inputs):\n if self.requires_dr:\n inputs[Properties.R].requires_grad_()\n if self.requires_stress:\n # Generate Cartesian displacement tensor\n displacement = torch.zeros_like(inputs[Properties.cell]).to(\n inputs[Properties.R].device\n )\n displacement.requires_grad = True\n inputs[\"displacement\"] = displacement\n\n # Apply to coordinates and cell\n inputs[Properties.R] = inputs[Properties.R] + torch.matmul(\n inputs[Properties.R], displacement\n )\n inputs[Properties.cell] = inputs[Properties.cell] + torch.matmul(\n inputs[Properties.cell], displacement\n )\n\n inputs[\"representation\"] = self.representation(inputs)\n outs = {}\n for output_model in self.output_modules:\n outs.update(output_model(inputs))\n return outs", "title": "" }, { "docid": "e522388258a0322a19fee96b478139ee", "score": "0.5610246", "text": "def relu_forward(x):\n out = None\n #############################################################################\n # TODO: Implement the ReLU forward pass. #\n #############################################################################\n # 1 line of code expected\n\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = x\n return out, cache", "title": "" }, { "docid": "9c32dae2a350674e3ddde8751dd14f41", "score": "0.560829", "text": "def forward(self, input):\r\n x = input\r\n for l in self.linear_layers[:-1]:\r\n x = l(x)\r\n x = self.act(x)\r\n\r\n output_layer = self.linear_layers[-1]\r\n return output_layer(x)", "title": "" }, { "docid": "b89073d391c5a001bbc024203cdcd526", "score": "0.5607582", "text": "def forward(self,x):\n\t\tout = self.linear1(x)\n\t\tout = self.act(out)\n\n\t\tout = self.linear2(out)\n\t\t\n\t\treturn out", "title": "" }, { "docid": "c2663ed52055bdc9e2b7dbd9164e2b33", "score": "0.56043416", "text": "def forward(self, inputs):\n return gradient_reversal(inputs)", "title": "" }, { "docid": "ee90ae088d42f53c0cf6b89d6e2ffb97", "score": "0.559809", "text": "def forward(self):\n raise NotImplementedError", "title": "" }, { "docid": "ee90ae088d42f53c0cf6b89d6e2ffb97", "score": "0.559809", "text": "def forward(self):\n raise NotImplementedError", "title": "" }, { "docid": "ee90ae088d42f53c0cf6b89d6e2ffb97", "score": "0.559809", "text": "def forward(self):\n raise NotImplementedError", "title": "" }, { "docid": "cad7b342d7ea2bddf0e5bc39bbb39845", "score": "0.55956966", "text": "def forward(self, src):\n\n x1 = self.linear1(src)\n x1 = self.relu(x1) # or sigmoid\n if self.n_layers == 2:\n out = self.linear2(x1)\n # out=x3[-1] #Last dimension, ie size one_hot_dim\n out[:, 0] = self.sigmoid(out[:, 0])\n out[:, 1:] = self.softmax(out[:, 1:])\n else:\n x2 = self.linear2(x1)\n x2 = self.sigmoid(x2)\n out = self.linear3(x2)\n # out=x3[-1] #Last dimension, ie size one_hot_dim\n out[:, 0] = self.sigmoid(out[:, 0])\n out[:, 1:] = self.softmax(out[:, 1:])\n return out[-1]", "title": "" }, { "docid": "ca2255abc12e6ea77bbf7af5996fa539", "score": "0.5590943", "text": "def forward(self,data):\n # def forward(self,gt,idx, img,tracker,bbox,se):\n \n template = data['template'].cuda().unsqueeze(0)\n search =data['search'].cuda().unsqueeze(0)\n bbox=data['bbox']\n\n \n zf,zf1 = self.backbone(template)\n xf,xf1 = self.backbone(search)\n xff,ress=self.grader(xf1,zf1)\n \n anchors=self.getcenter(xff,gt) #gt != bbox\n size=xff.size()[3] \n data2=self.generatedata(anchors,bbox,size)\n \n label_cls = t.Tensor(data2['label_cls']).cuda().unsqueeze(0)\n label_loc = t.Tensor(data2['label_loc']).cuda().unsqueeze(0)\n label_loc_weight = t.Tensor(data2['label_loc_weight']).cuda().unsqueeze(0)\n labelcls2=data2['label_cls2']\n \n \n \n #new3\n cls1,cls2,cls3,loc=self.new(xf,zf,ress)\n shape=cls1.size()[3]\n label_cls=label_cls[:,:,size//2-shape//2:size//2+shape//2+1,size//2-shape//2:size//2+shape//2+1].contiguous()\n label_loc=label_loc[:,:,:,size//2-shape//2:size//2+shape//2+1,size//2-shape//2:size//2+shape//2+1].contiguous()\n label_loc_weight=label_loc_weight[:,:,size//2-shape//2:size//2+shape//2+1,size//2-shape//2:size//2+shape//2+1].contiguous()\n \n pre=(8*(np.linspace(0,size-1,size))+63).reshape(-1,1)-287//2\n pr=t.zeros(size**2,4).cuda()\n pr[:,0]=t.Tensor(np.maximum(0,np.tile(pre,(size)).T.reshape(-1)+287//2))\n pr[:,1]=t.Tensor(np.maximum(0,np.tile(pre,(size)).reshape(-1)+287//2))\n \n labelxff=t.zeros_like(xff).cuda()\n labelxff[0,0,:,:]=(pr[:,0]-bbox[0]).reshape(21,21)\n labelxff[0,1,:,:]=(bbox[2]-pr[:,0]).reshape(21,21)\n labelxff[0,2,:,:]=(pr[:,1]-bbox[1]).reshape(21,21)\n labelxff[0,3,:,:]=(bbox[3]-pr[:,1]).reshape(21,21)\n labelxff=labelxff/143\n\n pr[:,2]=self.con(xff[0,0,:,:]).view(-1)+self.con(xff[0,1,:,:]).view(-1)\n pr[:,3]=self.con(xff[0,2,:,:]).view(-1)+self.con(xff[0,3,:,:]).view(-1)\n pr[:,0]=pr[:,0]-self.con(xff[0,0,:,:]).view(-1)+pr[:,2]/2\n pr[:,1]=pr[:,1]-self.con(xff[0,2,:,:]).view(-1)+pr[:,3]/2\n \n \n\n \n def transform(center):\n x, y, w, h = center[:,0], center[:,1], center[:,2], center[:,3]\n x1 = x - w * 0.5\n y1 = y - h * 0.5\n x2 = x + w * 0.5\n y2 = y + h * 0.5\n return t.cat((x1.view(-1,1),y1.view(-1,1),x2.view(-1,1),y2.view(-1,1)),1)\n pr=transform(pr)\n \n \n index=np.minimum(shape-1,np.maximum(0,np.int32((bbox-63-(size-shape)*4)/8)))\n w=int(index[2]-index[0])\n h=int(index[3]-index[1])\n\n weightcls3=t.zeros(1,1,shape,shape).cuda()\n weightcls3[0,0,index[1]:index[3]+1,index[0]:index[2]+1]=1\n weightcls33=t.zeros(1,1,shape,shape).cuda()\n for ii in np.arange(index[1],index[3]+1):\n for jj in np.arange(index[0],index[2]+1):\n l1=t.min(t.Tensor([ii-index[1]]),t.Tensor([(index[3]-ii)]))/(t.max(t.Tensor([ii-index[1]]),t.Tensor([(index[3]-ii)]))+1e-4)\n l2=t.min(t.Tensor([jj-index[0]]),t.Tensor([(index[2]-jj)]))/(t.max(t.Tensor([jj-index[0]]),t.Tensor([(index[2]-jj)]))+1e-4)\n weightcls33[0,0,ii,jj]=weightcls3[0,0,ii,jj]*t.sqrt(l1*l2)\n \n\n \n cls1 = self.log_softmax(cls1) \n cls2 = self.log_softmax(cls2) \n\n \n cls_loss1 = select_cross_entropy_loss(cls1, label_cls,0.5)\n cls_loss2 = select_cross_entropy_loss(cls2, labelcls2,0.5)\n cls_loss3 = l1loss(cls3, weightcls33,weightcls3) \n\n cls_loss= cls_loss3 + cls_loss1 + cls_loss2\n loc_loss = weight_l1_loss(loc, label_loc, label_loc_weight) \n \n weightxff=t.zeros(1,1,size,size).cuda()\n index2=np.int32((bbox-63)/8) #特征图上的位置\n w=int(index2[2]-index2[0])\n h=int(index2[3]-index2[1])\n weightxff[0,0,np.maximum(0,index2[1]-h//2):np.minimum(size,index2[3]+1+h//2),np.maximum(0,index2[0]-w//2):np.minimum(size,index2[2]+1+w//2)]=1\n\n \n shapeloss=l1loss(xff,labelxff,weightxff) \n \n \n\n \n outputs=loc_loss+cls_loss+shapeloss #2 4 1 都用loss2\n\n return outputs", "title": "" }, { "docid": "0329eb71dcb93e2a2ed7e2efd1ed7153", "score": "0.5589707", "text": "def forward(self, target, source): \r\n #* Concatenate source and target\r\n x = torch.cat([target, source],1)\r\n x_1l = self.module1l(x)\r\n x_2l = self.module2l(x_1l)\r\n x_3l = self.module3l(x_2l)\r\n x_4l = self.module4l(x_3l)\r\n x_5l = self.module5l(x_4l)\r\n x_6l = self.module6l(x_5l)\r\n # embed = embed.view(-1, 1, 4, 4)\r\n # x = torch.cat([x_6l, embed], 1)\r\n x=x_6l\r\n\r\n r6 = self.module6r(x)\r\n x = torch.cat([r6, x_5l], 1)\r\n r5 = self.module5r(x)\r\n x = torch.cat([r5, x_4l], 1)\r\n r4 = self.module4r(x)\r\n x = torch.cat([r4, x_3l], 1)\r\n r3 = self.module3r(x)\r\n x = torch.cat([r3, x_2l], 1)\r\n r2 = self.module2r(x)\r\n x = torch.cat([r2, x_1l], 1)\r\n x = self.module1r(x)\r\n return x\r\n\r\n # x = self.leftmodule(x,embed)\r\n # x = self.rightmodule(x)\r\n # return x\r", "title": "" }, { "docid": "78382338c45908367d6211e5e69ba844", "score": "0.5586166", "text": "def forward(self, x):\n ### START YOUR CODE HERE ### (You can change anything inside this block)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.flattening(x)\n x = self.layer4(x)\n x = self.layer5(x)\n return x\n ### END YOUR CODE HERE ### ", "title": "" }, { "docid": "247124953ec614c39c027c2e71c44e72", "score": "0.558313", "text": "def forward(self, x):\n z = self.model(z)\n return z", "title": "" }, { "docid": "b4ed1a4ce75679cc6f54fc809f89e993", "score": "0.5575662", "text": "def relu_forward(x):\n out, cache = None, x\n ########################################################################\n # TODO: Calculate {out} #\n ########################################################################\n\n ########################################################################\n # End of the code #\n ########################################################################\n return out, cache", "title": "" }, { "docid": "985cbc1183bc26f98059ade0be88b7c5", "score": "0.55717224", "text": "def forward(self,):\n raise NotImplementedError", "title": "" }, { "docid": "a8438774483e860bc23e3f7d39c3603b", "score": "0.5570057", "text": "def forward(self, x):\n arm_sources = list()\n odm_sources = list()\n for i in range(23):\n x = self.vgg[i](x)\n c2 = x\n c2 = self.L2Norm_4_3(c2)\n arm_sources.append(c2)\n for k in range(23, len(self.vgg)):\n x = self.vgg[k](x)\n c3 = x\n c3 = self.L2Norm_5_3(c3)\n arm_sources.append(c3)\n x = F.relu(self.extras[0](x), inplace=True)\n x = F.relu(self.extras[1](x), inplace=True)\n c4 = x\n arm_sources.append(c4)\n x = F.relu(self.extras[2](x), inplace=True)\n x = F.relu(self.extras[3](x), inplace=True)\n c5 = x\n arm_sources.append(c5)\n if len(self.extras) > 4:\n x = F.relu(self.extras[4](x), inplace=True)\n x = F.relu(self.extras[5](x), inplace=True)\n c6 = x\n arm_sources.append(c6)\n odm_sources = self.weave(arm_sources)\n return arm_sources, odm_sources", "title": "" }, { "docid": "516daa46e26621a3a98aa41a1ed9ba00", "score": "0.5569684", "text": "def _forward(self, x):\n raise NotImplementedError(\"forward not implemented.\")", "title": "" }, { "docid": "c205a780d50e749fde6e6c9398dcc404", "score": "0.5567876", "text": "def forward(self):\n pass", "title": "" }, { "docid": "c3d10844ce77bccbe18c655ad7ed8165", "score": "0.5562897", "text": "def forward(self, x: Tensor) -> Tensor:\n direction1_conv = self.direction1_conv(x)\n direction2_conv = self.direction2_conv(x)\n direction1_feat = self.direction1_pool(direction1_conv)\n direction2_feat = self.direction2_pool(direction2_conv)\n aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat)\n conv1 = self.conv1(x)\n relu = self.relu(aftpool_conv + conv1)\n conv2 = self.conv2(relu)\n return conv2", "title": "" }, { "docid": "531037f4aed7228617544351e3ad708d", "score": "0.5551876", "text": "def forward(self, x_conv_out): \n x_proj = torch.relu(self.conv_out_proj(x_conv_out))\n x_gate = torch.sigmoid(self.gate(x_conv_out))\n\n x_highway = x_gate * x_proj + (1 - x_gate) * x_conv_out\n\n return x_highway", "title": "" }, { "docid": "7138b9e9d37c879ce8c047ef07ca30bd", "score": "0.5547672", "text": "def forward(self, x):\n\n sources = list()\n loc = list()\n conf = list()\n if self.modeltype == 'SSD300KL':\n loc_std = list()\n # apply vgg up to conv4_3 relu\n for k in range(23):\n # print('debug: apply vgg')\n x = self.vgg[k](x)\n\n if self.forward_vgg_base_only:\n return x\n # TODO: Why apply L2norm already? => because conv4_3 has larger scale than the rest\n s = self.L2Norm(x)\n sources.append(s)\n\n # apply vgg up to fc7 TODO: Why FC layers? => Doesn't use FC layers, UP TO FC layers..\n for k in range(23, len(self.vgg)):\n # print('debug2: apply vgg')\n x = self.vgg[k](x)\n sources.append(x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n # print('debug3: apply extra layers')\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1: #TODO: Why only every second layer of the extra layers? => because thats how the paper states it. It has conv blocks of 2 conv layers\n sources.append(x)\n\n if self.modeltype != 'SSD300KL':\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n # print('debug4: apply multibox head')\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n # print('debug foward 1')\n if self.phase == \"test\":\n # if self.sampling_strategy != 'p-max_localization-stability' :\n output = self.detect(loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1,self.num_classes)), # conf preds\n self.priors.type(type(x.data)), # default boxes\n )\n # else:\n # output = self.detect()\n\n # training phase => no merging or other forwards used\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n else:\n # apply multibox head to source layers\n for (x, l, c, std) in zip(sources, self.loc, self.conf, self.loc_std):\n # print('debug4: apply multibox head')\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n loc_std.append(std(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n loc_std = torch.cat([o.view(o.size(0), -1) for o in loc_std], 1)\n\n if self.phase == \"test\":\n # during training alpha = log(sigma^2), during testing, this needs to be converted back\n loc_std = torch.exp(loc_std)\n\n output = self.detect(loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1,self.num_classes)), # conf preds\n self.priors.type(type(x.data)), # default boxes\n torch.abs(loc_std.view(loc_std.size(0), -1, 4)) # alphas (predicted log of std deviations of loc preds)\n )\n else:\n # during training, alpha = log(sigma^2) is predicted\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors,\n torch.abs(loc_std.view(loc_std.size(0), -1, 4)) #alphas\n )\n\n return output", "title": "" }, { "docid": "56661dc1490f81ce40e174ff628c76c1", "score": "0.5547487", "text": "def forward(self):\n self.fake_B = self.netG_A(self.real_A, self.prior_z_B) \n self.mu_z_realA = self.netE_A(torch.cat((self.real_A, self.fake_B),1))\n self.rec_A = self.netG_B(self.fake_B, self.mu_z_realA)\n self.rec_prior_z_B = self.netE_B(torch.cat((self.real_A, self.fake_B),1))\n \n self.fake_A = self.netG_B(self.real_B, self.prior_z_A) \n self.mu_z_realB = self.netE_B(torch.cat((self.fake_A, self.real_B),1))\n self.rec_B = self.netG_B(self.fake_A, self.mu_z_realB)\n self.rec_prior_z_A = self.netE_B(torch.cat((self.fake_A, self.real_B),1))", "title": "" }, { "docid": "349080759e22f4c00e27fa2217537476", "score": "0.5536985", "text": "def forward(self, obs):\n return self.module(obs)", "title": "" }, { "docid": "da7cef8509a5b765aa0ad6d7be6962b6", "score": "0.55360246", "text": "def forward(self, x):\n out = self.layers(x)\n return out", "title": "" }, { "docid": "b17e5aa9fc23258b98d3d1fe505b770c", "score": "0.55275697", "text": "def forward(self, x:Tensor) -> Tensor:\n for layer in self.layers: \n x = layer(x)\n return x", "title": "" }, { "docid": "768406794a38089e3b30c4511ebb8601", "score": "0.5525852", "text": "def forward(self, state):\n # Pass the input tensor through each of our operations\n x, h = self.input(state)\n #x = x[:, self.step_window - 1, :] ##batch, seq, inp_size\n # Reshaping the outputs such that it can be fit into the fully connected layer\n #x = x.contiguous().view(-1, self.hidden_dim)\n x = x.contiguous().view(state.shape[0], -1)\n \n x = self.relu(x)\n x = self.hidden(x)\n x = self.relu(x)\n x = self.hidden2(x)\n x = self.relu(x)\n \n x = self.output(x)\n \n return x", "title": "" }, { "docid": "0bf2f17daa6068c949504a011b29cd85", "score": "0.55236065", "text": "def forward(self, x, debug=False):\n\n c = torch.addmv(self.bA, self.A, x) #apical input (plastic synapses)\n d = torch.addmv(self.b1, self.J1, x) #basal input (fixed weight synapses)\n r = torch.sigmoid( torch.addmv(torch.addmv(self.bW, self.Wx, x), self.Wh, self.h) ) #read gate (recurrent!)\n a1 = r*d + (1-r)*c #hidden layer activation. r=1 read from fixed syn, r=0 read from plastic \n self.h = self.f( a1 ) #hidden layer output \n \n u = torch.sigmoid( torch.addmv(torch.addmv(self.bU, self.Ux, x), self.Uh, self.h) ) #update gate \n self.A = u.unsqueeze(1)*self.A + torch.ger(1-u, x) #plastic weight update. u=1 retain, u=0 overwrite\n \n a2 = torch.addmv(self.b2, self.J2, self.h) #output layer activation\n y = self.fOut( a2 ) #output layer output\n \n if debug:\n execvars = locals()\n execvars = {var:execvars[var].clone().detach() for var in ('c','d','r','a1','u','a2','y')}\n execvars['h'] = self.h.clone().detach()\n execvars['A'] = self.A.clone().detach()\n execvars['WA'] = self.J1.clone().detach() + self.A.clone().detach()\n return execvars\n return y", "title": "" }, { "docid": "ecbc7431c520c391565a384ac62149b0", "score": "0.5522756", "text": "def forward(self, *args):\n\n # If torch.is_grad_enabled() and self.module.training, update u and v.\n if self.module.training:\n self._update_u_v()\n # Otherwise don't update.\n else:\n self._noupdate_u_v()\n # In both above cases, this will still calculate the spectral normalization and set the normalized chosen attribute to the inner module.\n\n # Finally do a forward pass of the inner module.\n return self.module.forward(*args)", "title": "" }, { "docid": "a9995ea64a8981285a3d826562aee53c", "score": "0.55224055", "text": "def forward(self, inputx):\n raise NotImplementedError", "title": "" }, { "docid": "f5438f8330fad1418368bdcd5f07cb64", "score": "0.55115527", "text": "def forward(self, x):\n self.cache[\"input\"] = x\n for i in range(len(self.layers)):\n if i==0:\n self.layers[i].forward(x)\n else:\n previous = self.layers[i-1].output # (x, y) * (y, 1)\n self.layers[i].forward(previous)", "title": "" }, { "docid": "6c6477566f2a99c431907bc6bc8f45a7", "score": "0.5508786", "text": "def forward(self, state: torch.Tensor) -> torch.Tensor:\n x = state\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n x = F.relu(x)\n # x = self.fc3(x)\n # x = F.relu(x)\n x = self.fc4(x)\n x = torch.tanh(x) # TODO: remove this layer\n\n return x", "title": "" }, { "docid": "82113daec5e727e60bd3e95d6c9a8eb1", "score": "0.5508522", "text": "def forward_process(self):\n x = self.prev_layer.output\n self.z = np.add(np.dot(x, self.W), self.b)\n\n # if self.optimizer.name == \"SGD\":\n # if self.optimizer.nesterov:\n # nesterov_W = np.subtract(self.W, self.optimizer.gamma * self.cache_W)\n # nesterov_b = np.subtract(self.b, self.optimizer.gamma * self.cache_b)\n # self.z_nesterov = np.add(np.dot(x, nesterov_W), nesterov_b)\n\n self.output = self.act(self.z)\n\n assert self.output.shape == self.output_size\n\n for layer in self.next_layer:\n layer.forward_process()", "title": "" }, { "docid": "91ec14ff33f80dce4673d276498a9571", "score": "0.55082315", "text": "def forward(self):\n self.fake_A_BC = self.netG_BC(self.real_A) # G_BC(A)\n self.fake_B_BC = self.netG_BC(self.real_B) # G_BC(B)\n self.fake_C_BC = self.netG_BC(self.real_C) # G_BC(C)\n\n #self.rev_A = self.netG_BC_rev(self.real_A) # G_BC(A)\n #self.rev_B = self.netG_BC_rev(self.real_B) # G_BC(B)\n #self.rev_C = self.netG_BC_rev(self.real_C) # G_BC(C)", "title": "" }, { "docid": "75dcfbab03753e89ab11874fbc06169f", "score": "0.5503262", "text": "def forward(self, inputs: Tensor) -> Tensor:\r\n raise NotImplementedError", "title": "" }, { "docid": "011d19e782a562e1586471caf680121f", "score": "0.5502175", "text": "def forward(self, input):\n\n ########################\n # PUT YOUR CODE HERE #\n\n # check correctness of shape\n if not (input.shape[1] == self.n_neurons): raise Exception(\n \"Input size is not correct. Input is {}, while it was initalized with {}\".format(input.shape[1],\n self.n_neurons))\n # Instantiate a CustomBatchNormManualFunction.\n custom_batch_norm_manual_function = CustomBatchNormManualFunction()\n\n # Call it via its .apply() method.\n out = custom_batch_norm_manual_function.apply(input, self.gamma, self.beta, self.eps)\n\n # END OF YOUR CODE #\n #######################\n\n return out", "title": "" }, { "docid": "be4562a9ad281c4afcde3b47da5596ba", "score": "0.5498866", "text": "def forward(self, x:Tensor) -> Tensor:\n return x.relu()", "title": "" }, { "docid": "9c6d8eec80bd43410ef547c77f6eb75b", "score": "0.5489186", "text": "def forward(self, x, x_tild):\n x_all = torch.cat([x, x_tild], -1)\n p = self.model(x_all)\n return p.view((-1,))", "title": "" }, { "docid": "c9c646211abb2ca29fc63949793a4bbc", "score": "0.5488537", "text": "def forward(self, inputs, *args, **kwargs):\n\n x = inputs\n y = self.layer_norm(x)\n\n # Get layer output\n y = self.layer(y, *args, **kwargs)\n\n # Postprocessing: apply dropout and residual connection\n if self.is_train:\n y = tf.nn.dropout(y, rate=self.postprocess_dropout)\n return x + y", "title": "" }, { "docid": "e2206fbbf75f13f47c5cb5e761969c56", "score": "0.5486745", "text": "def forward(self):\n\t\tpass", "title": "" }, { "docid": "f495986d55c644da61da5c203c5349fa", "score": "0.54825795", "text": "def forward(ctx, run_ctx: RuntimeStates, *input_tensor_list: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]:\n ctx.current_step = run_ctx.global_states.execution_step\n ctx.run_ctx = run_ctx\n\n if ctx.current_step >= 0:\n print(f\"{'='*6} Completed forward pass for STEP {ctx.current_step} {'='*6}\")\n\n if ORT_NO_INCREASE_GLOBAL_STEP[0] is False:\n ctx.run_ctx.global_states.execution_step += 1\n\n return tuple(t.detach().requires_grad_(t.requires_grad) for t in input_tensor_list)", "title": "" }, { "docid": "207cecc02e7d760adfb3d8a0dff39672", "score": "0.5481281", "text": "def forward(self, x):\n return self.model(x)", "title": "" }, { "docid": "207cecc02e7d760adfb3d8a0dff39672", "score": "0.5481281", "text": "def forward(self, x):\n return self.model(x)", "title": "" }, { "docid": "207cecc02e7d760adfb3d8a0dff39672", "score": "0.5481281", "text": "def forward(self, x):\n return self.model(x)", "title": "" }, { "docid": "149569d940b9a2dccfa17447de445736", "score": "0.5479809", "text": "def forward(self, state):\n x = state.unsqueeze(1) # add dimension for the convolutions\n x = F.relu(self.layers[0](x)) # first layer\n for layer in self.layers[1:-1]:\n x = F.relu(layer(x)) # middle layers\n return torch.tanh(self.layers[-1](x)) # last layer", "title": "" }, { "docid": "549f23325347e218cadf325467e59ad5", "score": "0.5479493", "text": "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n\n # get all the features\n out = self.feats(x)\n\n # map all features to n_classes dimensional space\n out = self.linearLayer(out.view(x.shape[0], -1))\n\n # END OF YOUR CODE #\n #######################\n\n return out", "title": "" }, { "docid": "30cde4e2ed4b830b912557c509ff2fd1", "score": "0.5476395", "text": "def forward(self, x):\n out = self.model(x)\n return self.last_layer(squeeze(out))", "title": "" }, { "docid": "8ffa528e9b950535f3985a3684cd641a", "score": "0.54756325", "text": "def forward(self, obs):\n # Don't concat last two layers\n # Remeber each layer has an activation function\n # So count = 4\n\n in_put = obs.clone()\n\n for fc_layer, activation_f in group_layers(self.pi[:-4]):\n x = fc_layer(obs)\n x = activation_f(x)\n x = torch.cat([x, in_put], dim=-1)\n\n obs = x\n\n for layer in self.pi[-4:]:\n x = layer(x)\n\n act = x\n\n return act * self.act_limit", "title": "" }, { "docid": "caf090e16074efbdeb2df731ec50e992", "score": "0.5472815", "text": "def rnn_forward(x, h0, Wx, Wh, b):\n h, cache = None, []\n ##############################################################################\n # TODO: Implement forward pass for a vanilla RNN running on a sequence of #\n # input data. You should use the rnn_step_forward function that you defined #\n # above. You can use a for loop to help compute the forward pass. #\n ##############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n N, T, _ = x.shape\n _, H = h0.shape\n \n next_h = h0\n h = np.zeros((N, T, H))\n \n for t in np.arange(T):\n next_h, step_cache = rnn_step_forward(x[:, t, :], next_h, Wx, Wh, b)\n h[:, t, :] = next_h\n cache.append(step_cache)\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return h, cache", "title": "" }, { "docid": "ba5980f99929ee99f566dc2286371c7e", "score": "0.5471552", "text": "def forward(self, x: Union[Tensor, AbsEle]) -> Union[Tensor, AbsEle]:\n for lin in self.all_linears[:-1]:\n x = lin(x)\n x = self.acti(x)\n\n x = self.all_linears[-1](x)\n return x", "title": "" }, { "docid": "1a5b2e78a87193ddba439f9fc58d1a22", "score": "0.54670876", "text": "def forward_pass(self, x, w, b, gamma, beta, mode='train'):\n affine_out = self.affine_layer.forward_pass(x, w, b)\n batch_norm_out = self.batch_norm_layer.forward_pass(affine_out, gamma, beta, mode)\n relu_out = self.relu_layer.forward_pass(batch_norm_out)\n\n return relu_out", "title": "" }, { "docid": "1b2c053ecee0086f088378a215ceacc8", "score": "0.5458805", "text": "def forward(self, x):\n\n \"\"\" YOUR CODE HERE!\n Complete this function, based on the network architecture\n that you have chosen in Net.__init__\n \"\"\"\n # Hidden layer\n x = F.relu(self.fc1(x))\n\n # Output layer\n actions_value = self.out(x)\n return actions_value", "title": "" }, { "docid": "70e42bc301299e488f13f0fb2c3b744a", "score": "0.54522395", "text": "def forward(self, x):\n out = self.net(x)\n return out", "title": "" }, { "docid": "7f0dbff1fc023699cc6c78c9a692e296", "score": "0.54430234", "text": "def forward(self, graph_holders, init_feature, degree_norm=None):\n # pad a zeros to prevent empty graph happen\n zeros_tensor1 = paddle.zeros([1, init_feature.shape[-1]])\n zeros_tensor2 = paddle.zeros([1, 1], dtype=\"int64\")\n init_feature = paddle.concat([zeros_tensor1, init_feature])\n feature = init_feature\n self.hcl_buffer.append(feature)\n\n if degree_norm is not None:\n degree_norm = degree_norm.reshape([self.etype_len, -1]).T\n degree_norm = paddle.sum(degree_norm, -1)\n degree_norm = model_util.get_degree_norm(degree_norm)\n degree_norm = paddle.concat(\n [paddle.ones(\n [1, 1], dtype=\"float32\"), degree_norm], axis=0)\n\n for i in range(self.num_layers):\n graph_holder = graph_holders[self.num_layers - i - 1]\n num_nodes = graph_holder[0] + 1\n next_num_nodes = graph_holder[1] + 1\n edges_src = graph_holder[2] + 1\n edges_dst = graph_holder[3] + 1\n split_edges = graph_holder[4]\n\n # if self.return_weight:\n # edges_weight = graph_holder[5]\n\n nxt_fs = []\n feature = self.shared_sub_pre_layer[i](feature, degree_norm)\n self.hcl_buffer.append(feature)\n\n for j in range(self.etype_len):\n start = paddle.zeros(\n [1], dtype=\"int64\") if j == 0 else split_edges[j - 1]\n new_edges_src = paddle.concat(\n [zeros_tensor2, edges_src[start:split_edges[j]]])\n new_edges_dst = paddle.concat(\n [zeros_tensor2, edges_dst[start:split_edges[j]]])\n graph = pgl.Graph(\n num_nodes=num_nodes,\n edges=paddle.concat(\n [new_edges_src, new_edges_dst], axis=1))\n\n # generate feature of single relation\n nxt_f = self.rgnn_dict[(i, j)](graph, feature, next_num_nodes,\n degree_norm)\n nxt_fs.append(nxt_f)\n # feature intergation\n feature = self.rgnn_dict[(i, self.etype_len)](nxt_fs)\n\n # heter graph residual\n feature = init_feature[:\n next_num_nodes] * self.alpha_residual + feature * (\n 1 - self.alpha_residual)\n if degree_norm is not None:\n degree_norm = degree_norm[:next_num_nodes]\n # remove first zeros to prevent empty graph happen\n return feature[1:]", "title": "" }, { "docid": "a2c0e63b2a040167a7702987e316c24e", "score": "0.54401815", "text": "def fc_relu_forward(x, w, b):\n out, cache = None, None\n \n ###########################################################################\n # TODO: Implement fc-relu forward pass. #\n ###########################################################################\n pass\n outs, fc = fc_forward(x, w, b)\n out, rl = relu_forward(outs)\n cache = (fc, rl) ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return out, cache", "title": "" }, { "docid": "2b0a98e6fbd29c889234f684738e9c0a", "score": "0.5437155", "text": "def forward(self, inp):\n x = inp[0]\n seq_length, batch_size, _ = x.shape\n x_pe = self.pe[:seq_length, :]\n x_pe = torch.repeat_interleave(x_pe.unsqueeze(1), batch_size, dim=1)\n x = torch.cat([x, x_pe], dim=-1)\n x = self.dropout(torch.relu(self.normalize(self.linear(x))))\n inp[0] = x\n return inp", "title": "" }, { "docid": "e856015093cfa58862e79f6b8a894129", "score": "0.54359686", "text": "def forward(self, x):\n\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x", "title": "" }, { "docid": "e856015093cfa58862e79f6b8a894129", "score": "0.54359686", "text": "def forward(self, x):\n\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x", "title": "" }, { "docid": "e856015093cfa58862e79f6b8a894129", "score": "0.54359686", "text": "def forward(self, x):\n\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x", "title": "" }, { "docid": "76884cf7d2ff5f0a20bcc19d4809fdd2", "score": "0.54320407", "text": "def hybrid_forward(self, F, x, *args):\n x = self.feature(x)\n x = self.yolo_block(x)\n all_box_centers, all_box_scales, all_objectness, all_class_pred, all_anchors, \\\n all_offsets, all_feat_maps, all_detections = [], [], [], [], [], [], []\n if autograd.is_training():\n dets, box_centers, box_scales, objness, class_pred, anchors, offsets = self.yolo_output(x)\n all_box_centers.append(box_centers.reshape((0, -3, -1)))\n all_box_scales.append(box_scales.reshape((0, -3, -1)))\n all_objectness.append(objness.reshape((0, -3, -1)))\n all_class_pred.append(class_pred.reshape((0, -3, -1)))\n all_anchors.append(anchors)\n all_offsets.append(offsets)\n # here we use fake featmap to reduce memory consuption, only shape[2, 3] is used\n fake_featmap = F.zeros_like(x.slice_axis(axis=0, begin=0, end=1).slice_axis(axis=1, begin=0, end=1))\n all_feat_maps.append(fake_featmap)\n else:\n dets = self.yolo_output(x)\n all_detections.append(dets)\n\n if autograd.is_training():\n # during training, the network behaves differently since we don't need detection results\n if autograd.is_recording():\n # generate losses and return them directly\n box_preds = F.concat(*all_detections, dim=1)\n all_preds = [F.concat(*p, dim=1) for p in [\n all_objectness, all_box_centers, all_box_scales, all_class_pred]]\n all_targets = self._target_generator(box_preds, *args)\n return self._loss(*(all_preds + all_targets))\n\n # return raw predictions, this is only used in DataLoader transform function.\n return (F.concat(*all_detections, dim=1), all_anchors, all_offsets, all_feat_maps,\n F.concat(*all_box_centers, dim=1), F.concat(*all_box_scales, dim=1),\n F.concat(*all_objectness, dim=1), F.concat(*all_class_pred, dim=1))\n\n # concat all detection results from different stages\n result = F.concat(*all_detections, dim=1)\n # apply nms per class\n if self.nms_thresh > 0 and self.nms_thresh < 1:\n result = F.contrib.box_nms(\n result, overlap_thresh=self.nms_thresh, valid_thresh=0.01,\n topk=self.nms_topk, id_index=0, score_index=1, coord_start=2, force_suppress=False)\n if self.post_nms > 0:\n result = result.slice_axis(axis=1, begin=0, end=self.post_nms)\n ids = result.slice_axis(axis=-1, begin=0, end=1)\n scores = result.slice_axis(axis=-1, begin=1, end=2)\n bboxes = result.slice_axis(axis=-1, begin=2, end=None)\n return ids, scores, bboxes", "title": "" }, { "docid": "825b62c9b8e727e3626cf1340fefaaa4", "score": "0.54302377", "text": "def forward(self):\n self.views[self.figure].forward()\n self.positions[self.figure].forward()", "title": "" }, { "docid": "a0c69c942b7d1567c859b5f355fbc023", "score": "0.54290086", "text": "def forward(self, input, target):\n return self.updateOutput(input, target)", "title": "" }, { "docid": "4fa42c073082abff8e6558e11a37d857", "score": "0.5423739", "text": "def forward(self, *args, **kwargs):\n x = super(PETRTransformerEncoder, self).forward(*args, **kwargs)\n if self.post_norm is not None:\n x = self.post_norm(x)\n return x", "title": "" }, { "docid": "ac26ac9d7bf3014015bf66628671df6d", "score": "0.542298", "text": "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = self.fc4(x)\n# x = self.fc3(x)\n \n return x", "title": "" } ]
beaf0668b7f4e27436a093896c5aac5c
Description contains select property options.
[ { "docid": "4bad8f2c849cc57db064cf607e5f9106", "score": "0.6118933", "text": "def test_description(self):\n description = SampleClass.get_description()\n descr_json = json.dumps(description)\n self.assertIsInstance(json.loads(descr_json), dict)\n self.assertIn('select_property', description)\n prop = description.get('select_property')\n self.assertIsNotNone(prop.get('options'))\n self.assertIn('options', prop)\n prop_opt = prop.get('options')\n self.assertDictEqual(prop_opt, {\n 'option1': 0,\n 'option2': 1,\n 'option3': 2\n })", "title": "" } ]
[ { "docid": "e2512257a3a8f5dfb5d85dfad4e052fd", "score": "0.7056538", "text": "def getOptionDescriptions(self) -> List[unicode]:\n ...", "title": "" }, { "docid": "f6d2d26e25b447083259be74f68c47d8", "score": "0.66717213", "text": "def _select (self, name, description=None):\n str = u'<select name=\"%s\" size=\"1\">' % (name,)\n if not description:\n description = self.valid_options[name]\n keys = description.keys()\n keys.sort()\n for value in keys:\n if value == self.values[name]:\n selected = u'selected'\n else:\n selected = u''\n str += u'<option value=\"%s\" %s>%s</option>' % (value, selected, description[value],)\n str += u'</select>'\n return str", "title": "" }, { "docid": "a3cdd949d7a3c442bb3411db4650bb6b", "score": "0.63967276", "text": "def props(self):\n return {\n \"name\": self.dropdowns[\"names\"].label,\n \"number\": self.dropdowns[\"numbers\"].label,\n \"reverse\": len(self.checkbox.active) == 1,\n }", "title": "" }, { "docid": "4d21a9f3b0e079441869a6f100fc2ec5", "score": "0.6324956", "text": "def getOptions(self, text, value, description) :\n\n return { \n \"text\": text, \n \"value\": value,\n \"description\" : description\n }", "title": "" }, { "docid": "106ad2c999dcfe379fb77e32a3aa4e45", "score": "0.6166941", "text": "def get_all_options(self, field):", "title": "" }, { "docid": "957060947f6bc79961ea6d955a5003fb", "score": "0.61292577", "text": "def options(self) -> List[str]:\n return self._options", "title": "" }, { "docid": "ed494dad0097d89929e3ddd55c988812", "score": "0.61121804", "text": "def options(self) -> 'outputs.OptionsResponse':\n return pulumi.get(self, \"options\")", "title": "" }, { "docid": "008acf6781d5b59532080738aa7dd188", "score": "0.6039252", "text": "def options(self) -> List:\n return self._options", "title": "" }, { "docid": "436502566a0dd0039851c666ad3581e7", "score": "0.60144883", "text": "def options(self):\n\n return self.__dict__", "title": "" }, { "docid": "80dd158e49427e2253498545915e903e", "score": "0.5962604", "text": "def options(self):\n return self._options", "title": "" }, { "docid": "80dd158e49427e2253498545915e903e", "score": "0.5962604", "text": "def options(self):\n return self._options", "title": "" }, { "docid": "80dd158e49427e2253498545915e903e", "score": "0.5962604", "text": "def options(self):\n return self._options", "title": "" }, { "docid": "dce9167ad0e41f54840e455dfdded1cf", "score": "0.59361976", "text": "def options_info(self) -> List[str]:\n return [\"with-{0.name}-extractor\".format(self)]", "title": "" }, { "docid": "42bd6ac852cc7e66214311bfafebbf14", "score": "0.5898295", "text": "def options(self) -> List[WebElement]:\n return self._el.find_elements(By.TAG_NAME, \"option\")", "title": "" }, { "docid": "e342020f454acf213114292e19c681c2", "score": "0.58699906", "text": "def getOptionsNames(self) -> List[unicode]:\n ...", "title": "" }, { "docid": "df94bcd2dfa7201e0e0ecbc20899d9e4", "score": "0.5851834", "text": "def get_options(self) -> Dict[str, str]:\n pass # pragma: no cover", "title": "" }, { "docid": "3777e20fce5b93218bb54a60a9df0d11", "score": "0.5834703", "text": "def option_widget(self, ):\n pass", "title": "" }, { "docid": "820305b3d9533214f15a1274ffdf475b", "score": "0.5824721", "text": "def getOptionNames(self) -> List[unicode]:\n ...", "title": "" }, { "docid": "8aee55549a218681b3c86193a975a734", "score": "0.58180726", "text": "def options(self):\n return self.cf.options(self.main_section)", "title": "" }, { "docid": "639c2b03fd46b2ad3a76d75b7aa874d1", "score": "0.58031416", "text": "def description(self):\n return self.properties.get('description', IdentitySet())", "title": "" }, { "docid": "cfbf3707e42f254eede4d2f289b60c6e", "score": "0.57529825", "text": "def options(self, section: str) -> List[str]:", "title": "" }, { "docid": "cd81efa3275872d408150fc4404aac31", "score": "0.5748922", "text": "def options(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Option]:\n pass", "title": "" }, { "docid": "cd81efa3275872d408150fc4404aac31", "score": "0.5748922", "text": "def options(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Option]:\n pass", "title": "" }, { "docid": "2db2505a62825b06772997617a954817", "score": "0.5735146", "text": "def showOptions(self):\n allItems = vars(self.options)\n for item in allItems:\n print(item + ': ' + str(allItems[item]))", "title": "" }, { "docid": "0ad5f2f6d2edcd2b5671525d85396f4a", "score": "0.57324857", "text": "def describe(self):\n return self.name, self.value, self.__str__", "title": "" }, { "docid": "d03630ad14bb603b8d34902d8067736c", "score": "0.571595", "text": "def get_options():\n return OPTIONS", "title": "" }, { "docid": "205594ffc98dfd7a4ec0543e858c69f8", "score": "0.57154465", "text": "def get_description(self):\n return {}", "title": "" }, { "docid": "3df4d740ea6b933cc67ffa6b89a93128", "score": "0.5714965", "text": "def _makeOptions(self):\n return (\n (\"diff\",\n {\"type\": \"string\",\n \"metavar\": \"<result-file>\",\n \"help\": \"Set comparing result file to automatically \"\n \"generate a diff.\"}\n ),\n ('pep8',\n {'type': 'yn', 'metavar': '<y_or_n>',\n 'default': False,\n 'help': 'Show pep8 warnings.'}\n ),\n ('strict-epydoc',\n {'type': 'yn', 'metavar': '<y_or_n>',\n 'default': False,\n 'help': \"Check '@type' and '@rtype' in epydoc.\"}\n ),\n )", "title": "" }, { "docid": "ccec128bbfb3eaaf2bebbb5d9f27cf3e", "score": "0.570452", "text": "def option_descrip(self, key, field=1):\n return self.imexam_option_funcs[key][field]", "title": "" }, { "docid": "c257219a0e3af59447149a4789a1be9b", "score": "0.5694852", "text": "def description(self):\n return", "title": "" }, { "docid": "f6cdc1ca45e2bc00d9fda8ec807868c7", "score": "0.56932014", "text": "def get_options(self):\n return None", "title": "" }, { "docid": "a18cc13cd0a7114e5ca970742147f9cf", "score": "0.5684077", "text": "def option(self, option):\n \n pass", "title": "" }, { "docid": "c263243d9c10704feb2d29fef9775e94", "score": "0.56828207", "text": "def description(self):\n pass", "title": "" }, { "docid": "84fddb38a2d76b6b5274c367614bf683", "score": "0.5679177", "text": "def description(self):\n return self.properties.get('Description', None)", "title": "" }, { "docid": "ab9e1f0a50fbb6e2fbeb6baffa5683f8", "score": "0.5674953", "text": "def describe(self) -> str:\n return f\"{self.field_name}:{self.value} ({self.description})\"", "title": "" }, { "docid": "be47ee1bd33f9f6c9295f14bbd687622", "score": "0.56694996", "text": "def dropdown(self):\n return db.select(self.table).fields('cid', 'title').execute(as_dict=True)", "title": "" }, { "docid": "6c424ae378399045f23f0eea6aed1344", "score": "0.56471103", "text": "def choice_list(self, options, selected_option=None):\r\n select_tag = '<select>'\r\n\r\n for value, label in options.iteritems():\r\n checked = ' selected=\"selected\"' if value == selected_option else ''\r\n select_tag += '<option value=\"%s\"%s>%s</option>' \\\r\n % (value, checked, label)\r\n select_tag += '</select>'\r\n return select_tag", "title": "" }, { "docid": "50c7a626dd2d65104abb9711a99fd7a8", "score": "0.5636724", "text": "def select(self,desc,name,dict,selected=''):\n\t\tself.output += '<tr><td align=right><b>' + str(desc) + '</b></td><td><select name=' + str(name) + '>'\n\t\tfor key in dict:\n\t\t\tif str(dict[key]) == str(selected):\n\t\t\t\tself.output += '<option value=' + str(dict[key]) + ' selected>' + str(key) + '</option>'\n\t\t\telse:\n\t\t\t\tself.output += '<option value=' + str(dict[key]) + '>' + str(key) + '</option>'\n\t\tself.output += '</select></td></tr>'", "title": "" }, { "docid": "84fc3af0b4df93099e6e36ee106f44fb", "score": "0.562898", "text": "def get_selection_options(self) -> list:\n\n\n options = []\n for division_nr, division in self.divisions.items():\n option = {'label': division['title'], 'value': int(division_nr)}\n options.append(option)\n \n return options", "title": "" }, { "docid": "db9210f66e7c558bc01f0e74bfb3f4e8", "score": "0.5628344", "text": "def PROPERTIES(self):\n return \"properties\"", "title": "" }, { "docid": "db9210f66e7c558bc01f0e74bfb3f4e8", "score": "0.5628344", "text": "def PROPERTIES(self):\n return \"properties\"", "title": "" }, { "docid": "c9c17b4343d96b098b48c9c4ae31dbdf", "score": "0.56102115", "text": "def help_select_request():\n get_select_request_parser().print_help()", "title": "" }, { "docid": "e205f54f1b24e5381c0051d3b228cac0", "score": "0.5593993", "text": "def select(self):\n name = 'id-name'\n html = '<select name=\"{0}\" id=\"{0}\">'.format(name)\n for i in self.ids():\n if i['id'] == self.selected_id():\n html += '<option value=\"{0}\" selected=\"selected\">{0}</option>'.format(i['id'])\n else:\n html += '<option value=\"{0}\">{0}</option>'.format(i['id'])\n html += '</select>'\n return html", "title": "" }, { "docid": "98902c969d2ec2b3b0d6204b2f92d768", "score": "0.5591246", "text": "def print_options(self):\n keys = self.get_options()\n for key in keys:\n print(\"%s\\t%s\" % (key, self.option_descrip(key)))\n print()", "title": "" }, { "docid": "51a048ccda4971aed94c18d40fe30e93", "score": "0.55757827", "text": "def options(self) -> pulumi.Output[Optional[Mapping[str, str]]]:\n return pulumi.get(self, \"options\")", "title": "" }, { "docid": "439d44da923055ad2516bda1c45fa251", "score": "0.5562587", "text": "def describe_properties(self):\n (names, descriptions, types, flags, defaults) = self._call(\"describeProperties\")\n types = [DataType(a) for a in types]\n return (names, descriptions, types, flags, defaults)", "title": "" }, { "docid": "9c11f11c567d300f5b8e0f7351c18c1a", "score": "0.5552684", "text": "def choices(self):\n if self.data_type == YES_NO:\n cs = [(YES_CODE, '%s-Yes' % YES_CODE), (NO_CODE, '%s-No' % NO_CODE)]\n else:\n cs = [(fo.code, \"%s-%s\" % (fo.code.lstrip('0'), fo.description))\n for fo in self.factoption_set.all().order_by('code')]\n cs.insert(0, ('', 'Make a selection'))\n return cs", "title": "" }, { "docid": "fa7cf169525b00499cdd42df2db86f55", "score": "0.55510545", "text": "def show_options(self, options):\n formatted_options = []\n if len(options) > 0:\n self.print_meta()\n for option_name, option in options.items():\n if option[\"Default\"] != \"\" and option[\"Value\"] == \"\":\n formatted_options.append(\n {\n 'Name': option_name,\n 'Value': option[\"Default\"],\n 'Required': option[\"Required\"],\n 'Description': option[\"Description\"]\n }\n )\n else:\n formatted_options.append(\n {\n 'Name': option_name,\n 'Value': option[\"Value\"],\n 'Required': option[\"Required\"],\n 'Description': option[\"Description\"]\n }\n )\n self.logger.print_tabulate(formatted_options, headers={\"Name\": \"Name\", \"Value\": \"Value\",\n \"Required\": \"Required\",\n \"Description\": \"Description\"})", "title": "" }, { "docid": "512e6783b5efba06b52a419581222389", "score": "0.5544582", "text": "def list(self):\n return self._options", "title": "" }, { "docid": "486e026063217af1cd679961c45abf01", "score": "0.55333984", "text": "def get_description():\n desc = {\"description\": __doc__, \"data\": True}\n ets = datetime.datetime.now().replace(minute=0)\n sts = ets - datetime.timedelta(days=1)\n desc[\"arguments\"] = [\n dict(\n type=\"select\",\n name=\"opt\",\n default=\"1\",\n options=PLOTTYPES,\n label=\"Select Plot Type:\",\n ),\n dict(\n type=\"date\",\n name=\"date\",\n default=sts.strftime(\"%Y/%m/%d\"),\n label=\"Select Date\",\n min=\"2012/01/01\",\n ),\n ]\n return desc", "title": "" }, { "docid": "9231ad911a116f7b7d7960abbc86bcb6", "score": "0.552388", "text": "def _get_options(self):\n options = dict()\n\n _top = self.options.get('$top')\n if _top is not None:\n options['$top'] = _top\n\n _offset = self.options.get('$skip')\n if _offset is not None:\n options['$skip'] = _offset\n\n _select = self.options.get('$select')\n if _select:\n options['$select'] = ','.join(_select)\n\n _filters = self.options.get('$filter')\n if _filters:\n options['$filter'] = ' and '.join(_filters)\n\n _expand = self.options.get('$expand')\n if _expand:\n options['$expand'] = ','.join(_expand)\n\n _order_by = self.options.get('$orderby')\n if _order_by:\n options['$orderby'] = ','.join(_order_by)\n return options", "title": "" }, { "docid": "4b9d10899951c79ba3e084c2997a1885", "score": "0.5504767", "text": "def props(self):\n _props = {}\n for key, select in self.selects.items():\n if select.value is not None:\n _props[key] = select.value\n _props[\"reverse\"] = len(self.checkboxes[\"reverse\"].active) == 1\n return _props", "title": "" }, { "docid": "ad184a8b84bdce9a0e0cfb7d53b90322", "score": "0.5501245", "text": "def options(self):\n pclass_options = self.get_product_class().options.all()\n return set(pclass_options) or set(self.product_options.all())", "title": "" }, { "docid": "9f977ed64ede02edaf4829532bd6926d", "score": "0.5500868", "text": "def describe(self):\n if self.value == 1:\n desc = \"America\"\n elif self.value == 2:\n desc = \"Europe\"\n elif self.value == 3:\n desc = \"Korea\"\n\n return desc", "title": "" }, { "docid": "c8c4a6dfab6f1561c1bcc0b32f6913e6", "score": "0.5498223", "text": "def options(self):\n return _abstract()", "title": "" }, { "docid": "cda8eb1c73279b400d54b764d95ec495", "score": "0.5494206", "text": "def getOptions(self):\n if self.table is None:\n return [(v, v) for v in self.options.split(\",\")]\n else:\n options = []\n for f in self.table.getFeatures():\n options.append(\n (\n f.attribute(self.value_field),\n f.attribute(self.label_field),\n )\n )\n return options # sorted(options, key=lambda o, o[1])", "title": "" }, { "docid": "bc10c898f65e0603b2b2c93f15b12087", "score": "0.54757726", "text": "def options(self) -> Options:\n return self._options", "title": "" }, { "docid": "6a4caa2bcbd6421f88e2e1863dd77947", "score": "0.54730445", "text": "def description(self):\n\t\treturn self.__description", "title": "" }, { "docid": "75420c11a072d9ac790084172d5b142f", "score": "0.54670405", "text": "def get_description(self):\n pass", "title": "" }, { "docid": "75420c11a072d9ac790084172d5b142f", "score": "0.54670405", "text": "def get_description(self):\n pass", "title": "" }, { "docid": "0ee29b55b6557a09491c91dbd48b5598", "score": "0.5466294", "text": "def description(self):\n return self.__description", "title": "" }, { "docid": "2426da60614cb75ba9fc7509f27da587", "score": "0.5458335", "text": "def _fill_menu(self) -> List[discord.SelectOption]:\n options = []\n\n for cog in self.bot.cogs:\n option = discord.SelectOption(\n label=self.bot.cogs[cog].__cog_name__,\n description=self.bot.cogs[cog].description,\n emoji=self.bot.cogs[cog].emoji,\n )\n options.append(option)\n\n return options", "title": "" }, { "docid": "4a55532e9fed2fb083e8bee6c63df129", "score": "0.54538804", "text": "def options_help(cls) -> str:\n return cls._parser.format_help()", "title": "" }, { "docid": "e57327aa95f13778e594c26c0d372bd0", "score": "0.5441144", "text": "def description(self) -> str:\n pass", "title": "" }, { "docid": "ce531d90a821012a5288bc17d972c7bc", "score": "0.54369366", "text": "def options(self) -> typing.List['SlashOption']:\n return self._json.get(\"options\")", "title": "" }, { "docid": "7b1bad903fcc32e569ef55423ab3f56b", "score": "0.54208523", "text": "def options(self):\n excluded = ['others']\n return [name.replace('_', '-') for name in vars(self)\n if name not in excluded]", "title": "" }, { "docid": "583316e52de52495bd2f0ce3613ca97d", "score": "0.54205734", "text": "def options(self):\n return self._options_list.copy()", "title": "" }, { "docid": "5c299f653e342335106df1f926c490c8", "score": "0.5415215", "text": "def make_properties_control(form_name,\n field_name,\n properties,\n select_name=None):\n\n # Generate a name for the select control if none was specified.\n if select_name is None:\n select_name = \"_propsel_\" + field_name\n name_control_name = \"_propname_\" + field_name\n value_control_name = \"_propval_\" + field_name\n add_change_button_name = \"_propaddedit_\" + field_name\n\n # Construct the select control.\n select = '''\n <select name=\"%s\"\n size=\"6\"\n width=\"240\"\n onchange=\"property_update_selection(document.%s.%s,\n document.%s.%s,\n document.%s.%s);\n document.%s.%s.value = ' Change ';\"\n >\\n''' \\\n % (select_name, form_name, select_name,\n form_name, name_control_name, form_name, value_control_name,\n form_name, add_change_button_name)\n # Add an option for each initial property.\n keys = properties.keys()\n keys.sort()\n for k in keys:\n select = select + \\\n '<option value=\"%s=%s\">%s = %s</option>\\n' \\\n % (k, properties[k], k, properties[k])\n select = select + '</select>\\n'\n \n # Construct the hidden control contianing the set's elements. Its\n # initial value is the encoding of the initial elements.\n initial_value = encode_properties(properties)\n contents = '<input type=\"hidden\" name=\"%s\" value=\"%s\"/>' \\\n % (field_name, initial_value)\n\n # Construct a control for the property name.\n name_control = \\\n '''<input type=\"text\"\n name=\"%s\"\n size=\"32\"\n onkeydown=\"document.%s.%s.value = ' Add ';\"\n />''' % (name_control_name, form_name, add_change_button_name)\n # Construct a control for the property value.\n value_control = '<input type=\"text\" name=\"%s\" size=\"32\"/>' \\\n % value_control_name\n\n vars = { 'form' : form_name,\n 'button' : add_change_button_name,\n 'select' : select_name,\n 'field' : field_name,\n 'name' : name_control_name,\n 'value' : value_control_name }\n \n # Construct the \"Change\" button. When it's clicked, call\n # 'property_update', passing the select control and the hidden\n # control whose value should be updated with the new encoded\n # property list.\n add_change_button = \\\n '''<input type=\"button\"\n name=\"%(button)s\"\n size=\"12\"\n value=\" Add \"\n onclick=\"property_add_or_change\n (document.%(form)s.%(select)s,\n document.%(form)s.%(field)s,\n document.%(form)s.%(name)s,\n document.%(form)s.%(value)s);\"\n />''' % vars\n\n # Construct the \"Remove\" button.\n remove_button = \\\n '''<input type=\"button\"\n size=\"12\"\n value=\" Remove \"\n onclick=\"property_remove(document.%(form)s.%(select)s,\n document.%(form)s.%(field)s,\n document.%(form)s.%(name)s,\n document.%(form)s.%(value)s,\n document.%(form)s.%(button)s);\"\n />''' % vars\n\n # Arrange everything in a table to control the layout.\n return contents + '''\n <table border=\"0\" cellpadding=\"0\" cellspacing=\"0\"><tbody>\n <tr valign=\"top\">\n <td colspan=\"2\" width=\"240\">%s</td>\n <td>&nbsp;</td>\n <td>%s</td>\n </tr>\n <tr>\n <td>Name:&nbsp;</td>\n <td align=\"right\">%s </td>\n <td>&nbsp;</td>\n <td>%s</td>\n </tr>\n <tr>\n <td>Value:&nbsp;</td>\n <td align=\"right\">%s </td>\n <td>&nbsp;</td>\n <td>&nbsp;</td>\n </tr>\n </tbody></table>\n ''' % (select, remove_button, name_control, add_change_button,\n value_control)", "title": "" }, { "docid": "042ec0c0ade4697b30c038febd677a10", "score": "0.5398497", "text": "def description(self):\n return [x.description for x in self.features]", "title": "" }, { "docid": "805c4ab0027cc1254520404385b7ed46", "score": "0.5396357", "text": "def description(self):", "title": "" }, { "docid": "c77deb196638e451a68f074af485d3ee", "score": "0.5395566", "text": "def _sel_prep(self):\n sel_blob = []\n for sel in self.arg['options']:\n if self.arg['defaultValue'] == sel['name']:\n sel_blob.append({'value': sel['name'], 'selected': 'true'})\n else:\n sel_blob.append({'value': sel['name']})\n return sel_blob", "title": "" }, { "docid": "6f1186084c2efb91d5a358867c9dca4b", "score": "0.53930086", "text": "def experimental_options(self):\n ...", "title": "" }, { "docid": "3b0cb9a4c3d8b47f90cb738caf2e3995", "score": "0.538827", "text": "def description(self):\n if \"description\" in self._prop_dict:\n return self._prop_dict[\"description\"]\n else:\n return None", "title": "" }, { "docid": "173f55932d9acaa9b5ee45bfc9b9352a", "score": "0.5380856", "text": "def select(self, child, name):\n\t\tvalueList = []\n\t\toptions = child.findall(\"option\")\n\n\t\tdef mapOptions(e):\n\t\t\tif \"value\" in e.attrib and e.attrib[\"value\"]:\n\t\t\t\tvalueList.append(self.Option(e.text, e.attrib[\"value\"]))\n\n\t\tmap(mapOptions, options)\n\t\tself.options.update({name : valueList})", "title": "" }, { "docid": "92e1e491a12bebc982e7cd97a3686aba", "score": "0.53753823", "text": "def description(self):\n raise NotImplementedError('Must implement description')", "title": "" }, { "docid": "3ea879b737f995a19c617750d33f7b94", "score": "0.53735715", "text": "def options():\n return list(map(lambda c: c.value, MortgagePoints))", "title": "" }, { "docid": "708f68a5c25dd0280621f58cd8fe0ceb", "score": "0.53661954", "text": "def description(self):\n return self._get_field(\"description\")", "title": "" }, { "docid": "78b7e09b5442e6772ef51b8ec86a8352", "score": "0.5351083", "text": "def options(self):\n if self.__option_provider is None:\n raise SALMAException(\"No option provider defined.\")\n return self.__option_provider()", "title": "" }, { "docid": "8b064a3675bee2b9947c33ed0f540c17", "score": "0.534159", "text": "def description(self):\n\n return self.__description", "title": "" }, { "docid": "9a2d0bab9223dd272398a76da9243bda", "score": "0.53386384", "text": "def get_plan_options(self):\n choices = []\n for plan in Plan.query.all():\n choices.append((str(plan.id), plan.name))\n return choices", "title": "" }, { "docid": "9b69d886b667ebf4294365e0a680828c", "score": "0.5334411", "text": "def option_info(self, name):\n try:\n return self._get_property('option-info/' + name)\n except AttributeError:\n return None", "title": "" }, { "docid": "aedfc41f0091af49f003bc0177323c9b", "score": "0.5330287", "text": "def description(self):\n # type: () -> string_types\n return self._description", "title": "" }, { "docid": "fcc153252e75e98a2db2374597ffc5b7", "score": "0.5329173", "text": "def getDescription(self):\n return self.description", "title": "" }, { "docid": "3ab6d405c4a16ddc07ed6241f5da0b07", "score": "0.53287405", "text": "def shortDescription(self, ):\n\t\tpass", "title": "" }, { "docid": "702d84e9a22ccff17e90c6fe422987a0", "score": "0.53286654", "text": "def choiceWidget(field):\n label = field.verbose_name\n\n choices = []\n choices.append(('', label))\n for choice in field.choices:\n choices.append((str(choice), unicode(choice)))\n return widgets.Select(choices=choices)", "title": "" }, { "docid": "3b8e9a4491e25ed6e3d96b7d6a3d7bf2", "score": "0.5323539", "text": "def create_select(self, name, field, val, choices):\n if 'id' in self.attrs:\n id_ = self.attrs['id']\n else:\n id_ = 'id_%s' % name\n if not (self.required and val):\n choices.insert(0, self.none_value)\n local_attrs = self.build_attrs(id=field % id_)\n s = Select(choices=choices)\n select_html = s.render(field % name, val, local_attrs)\n return select_html", "title": "" }, { "docid": "3371037289115c8bc5abbcdc6fbae5fa", "score": "0.5323379", "text": "def metadata_options(self) -> 'outputs.MetadataOptionsResponse':\n return pulumi.get(self, \"metadata_options\")", "title": "" }, { "docid": "ebd3abba17d7706b6b5e15105a90b225", "score": "0.5321099", "text": "def get_country_options(self):\n choices = []\n countries = Country.query.all()\n for country in countries:\n choices.append((country.iso, country.printable_name))\n return choices", "title": "" }, { "docid": "887fccc50c39ce5721e24c1e6b66ada3", "score": "0.53174794", "text": "def options():\n return {}", "title": "" }, { "docid": "8ea02498160b9aec2d4b62841984dc99", "score": "0.5309157", "text": "def description(self):\n ret = self._get_attr(\"description\")\n return ret", "title": "" }, { "docid": "8ea02498160b9aec2d4b62841984dc99", "score": "0.5309157", "text": "def description(self):\n ret = self._get_attr(\"description\")\n return ret", "title": "" }, { "docid": "8ea02498160b9aec2d4b62841984dc99", "score": "0.5309157", "text": "def description(self):\n ret = self._get_attr(\"description\")\n return ret", "title": "" }, { "docid": "8ea02498160b9aec2d4b62841984dc99", "score": "0.5309157", "text": "def description(self):\n ret = self._get_attr(\"description\")\n return ret", "title": "" }, { "docid": "8ea02498160b9aec2d4b62841984dc99", "score": "0.5309157", "text": "def description(self):\n ret = self._get_attr(\"description\")\n return ret", "title": "" }, { "docid": "8ea02498160b9aec2d4b62841984dc99", "score": "0.5309157", "text": "def description(self):\n ret = self._get_attr(\"description\")\n return ret", "title": "" }, { "docid": "8ea02498160b9aec2d4b62841984dc99", "score": "0.5309157", "text": "def description(self):\n ret = self._get_attr(\"description\")\n return ret", "title": "" }, { "docid": "8ea02498160b9aec2d4b62841984dc99", "score": "0.5309157", "text": "def description(self):\n ret = self._get_attr(\"description\")\n return ret", "title": "" }, { "docid": "ae04d0ca00172660196cd974bf4f7a32", "score": "0.53056276", "text": "def getOptions(self,productCode):\r\n\r\n\t\turl = MozuUrl(\"/api/commerce/catalog/admin/products/{productCode}/Options\", \"GET\", UrlLocation.TenantPod, False);\r\n\t\turl.formatUrl(\"productCode\", productCode);\r\n\t\tself.client.withResourceUrl(url).execute();\r\n\t\treturn self.client.result();", "title": "" }, { "docid": "98aaee81e33529f42a9a32747fb83978", "score": "0.5304145", "text": "def description(self):\n response = {\n 'type': self.type,\n 'retrievable': True,\n }\n parameters = self.parameters()\n if parameters is not None:\n response['parameters'] = parameters\n\n return response", "title": "" }, { "docid": "6a31479a234750fe82d2c049845fa13d", "score": "0.52951527", "text": "def description(self) -> str:\n return self.__description", "title": "" } ]
2c45436778ad7ce358304fdbf918c506
Check if an element exists on page for the given html xpath
[ { "docid": "1e4d1a418f66980ddb3dd3fbbc47c247", "score": "0.7942066", "text": "def __check_exists_by_xpath(self, xpath):\n\n try:\n self.browser.find_element_by_xpath(xpath)\n except NoSuchElementException:\n return False\n return True", "title": "" } ]
[ { "docid": "0413ab94fca2024c2b3c704acc52310e", "score": "0.8264476", "text": "def elem_exists(self, xpath):\r\n try:\r\n self.driver.find_element_by_xpath(xpath)\r\n return True\r\n except:\r\n return False", "title": "" }, { "docid": "57c27d4116e980599636e42a1ad3c8cb", "score": "0.719222", "text": "def verify_element(self, xpath):\n return True if self.find_element(xpath) else False", "title": "" }, { "docid": "ae2c06dc6892874e1f0d365ce3016158", "score": "0.7128561", "text": "def checkObjExists(self, xpath):\n try:\n self.browser.find_element_by_xpath(xpath)\n return True\n except (StaleElementReferenceException, NoSuchElementException):\n return False", "title": "" }, { "docid": "741207fdaf522decc49f858fd8527fd6", "score": "0.7006103", "text": "def _element_exists(driver, element_locator):\n exists = True\n try:\n driver.find_element(*element_locator)\n except NoSuchElementException:\n exists = False\n return exists", "title": "" }, { "docid": "b2593b5203837c7f47926861d8aa83d6", "score": "0.6947715", "text": "def _IsElementPresent(self, locator):\n try:\n self.driver.find_element(*locator)\n except NoSuchElementException:\n return False\n return True", "title": "" }, { "docid": "f8eb73b98bb1c31f794efcf1923162d0", "score": "0.6857992", "text": "def exists(self, **kwargs):\n self.__stop_execution_on_timeout()\n self.run_phantom_driver_click('Search')\n if kwargs.has_key('element'):\n try:\n return kwargs['element']\n except:\n return False\n else:\n try:\n return self.get_element(**kwargs)\n except NoSuchElementException:\n return False\n # finally:\n # self.driver.implicitly_wait(self.default_implicit_wait)", "title": "" }, { "docid": "bad59cc19734e687b8e0d93fa43455cd", "score": "0.6844576", "text": "def element_exists(self, selector: Selector) -> bool:\n\n return self.execute_file(Path(\"element_exists.js\"), selector.css)", "title": "" }, { "docid": "f36eb7223a1d0fcb5ba6f9612034f081", "score": "0.6677813", "text": "def check_element_on_page(self, list_of_element_locator):\n \"\"\"Browse the list elements\"\"\"\n for widget in list_of_element_locator:\n assert self.is_element_present(widget) is False, \\\n '''Check present widget on hom page'''\n print('\\n----------------------------\\n' + str(widget))", "title": "" }, { "docid": "4438f4027b937ee4735b234b8115b04c", "score": "0.6660369", "text": "def is_element_present(locator: By, time_to_wait=2):\n try:\n s(locator).should(exist, time_to_wait)\n return True\n except Exception as e:\n log.info(\">>>> \", e)\n return False", "title": "" }, { "docid": "78f19f95638b8d84e947188f67e8f16a", "score": "0.66583884", "text": "def __check_exists_by_id(self, element_id):\n\n try:\n self.browser.find_element_by_id(element_id)\n except NoSuchElementException:\n return False\n return True", "title": "" }, { "docid": "a5c7ed7c8235a39bea548159580020bc", "score": "0.66272414", "text": "def is_element_present(self, locator):\n selenium2lib = BuiltIn().get_library_instance('Selenium2Library')\n try:\n return selenium2lib.page_should_contain_element(locator)\n except AssertionError:\n return False", "title": "" }, { "docid": "ea68a099c36645a5b5c52b5f8cb51ad0", "score": "0.6549581", "text": "def assertElementExists(self, tag, content):\n soup = BeautifulSoup(content)\n hrefs = soup.find_all(tag)\n self.assertEqual(\n len(hrefs), 1,\n \"Couldn't find element {} in {}\".format(tag, content)\n )", "title": "" }, { "docid": "7931ede371eca45067b02feeac2e1c2c", "score": "0.64777756", "text": "def find_element(self, xpath):\n try:\n return WebDriverWait(self.driver, self.wait_time).until(EC.visibility_of_element_located((By.XPATH, xpath)))\n except TimeoutException as err:\n self.logger.error(\"Web element {elem} was not loaded in time.\".format(elem=xpath))\n Common.finish_work(self.driver)", "title": "" }, { "docid": "36040e787a541d80e0303f3f1ace96fc", "score": "0.64552027", "text": "def find_by_xpath(driver, xpath):\n Log.info(f\"Finding element with Xpath {xpath}\")\n return driver.find_element_by_xpath(xpath)", "title": "" }, { "docid": "df851bdcd366b5a43aa9723c45750b17", "score": "0.6393488", "text": "def is_element_present(self, *locator, timeout=0):\n self.selenium.implicitly_wait(timeout)\n try:\n self.selenium.find_element(*locator)\n return True\n except NoSuchElementException:\n return False\n finally:\n # set back to where you once belonged\n self.selenium.implicitly_wait(self.timeout)", "title": "" }, { "docid": "e28ef2c015ed52c651def7f88b810049", "score": "0.63905245", "text": "def element_is_present(self, id):\n try:\n self.driver.find_element_by_id(id)\n return True\n except NoSuchElementException:\n return False", "title": "" }, { "docid": "ec5e7b83d2a2127164d6c64ebe33505e", "score": "0.6385624", "text": "def _check_exists(self, soup):\n # Simple test: is this div present?\n if soup.find(attrs={\"id\": \"product-details\"}) is None:\n return False\n return True", "title": "" }, { "docid": "0f16f42515392b78c52607d115503bac", "score": "0.6379915", "text": "def test_find_by_xpath_with_context(self):\n xpath_elements = self.browser.within('body').find_by_xpath('//h1')\n css_elements = self.browser.within('body').find_by_css('h1')\n assert css_elements.value == xpath_elements.value", "title": "" }, { "docid": "0f16f42515392b78c52607d115503bac", "score": "0.6379915", "text": "def test_find_by_xpath_with_context(self):\n xpath_elements = self.browser.within('body').find_by_xpath('//h1')\n css_elements = self.browser.within('body').find_by_css('h1')\n assert css_elements.value == xpath_elements.value", "title": "" }, { "docid": "d51132e18f1814684adadb83c461695e", "score": "0.63742864", "text": "def has_element(self, by = \"id\", value = None):\n try:\n el = self.automation_script.driver.find_element(by = by, value = value)\n return el\n except NoSuchElementException:\n return None", "title": "" }, { "docid": "9365806d6b14e064b9ef728784622725", "score": "0.6333851", "text": "def is_element_present(self, how, what):\n\n try:\n self.driver.find_element(by=how, value=what)\n except NoSuchElementException as nsee:\n print(\"The element does not exist :\", nsee)\n return False\n return True", "title": "" }, { "docid": "e9fd83a24b6984b2de87724c2c7eb354", "score": "0.6283403", "text": "def find_element(browser, xpath):\n try:\n return browser.find_element_by_xpath(xpath).text\n except NoSuchElementException:\n return None", "title": "" }, { "docid": "cc97258f4ed0506c6735cf4bef559132", "score": "0.6264245", "text": "def is_element_present(self, field_locator, error='There is no element on page'):\n field = self.wait.until(EC.presence_of_element_located(field_locator), error)\n if field is True:\n return field\n else:\n return False", "title": "" }, { "docid": "410bd39be87860ed51f4165c8fb63fec", "score": "0.6254509", "text": "def get_element_by_xpath(driver, xpath):\n Log.info(f\"Getting element at xpath {xpath}\")\n return driver.find_element_by_xpath(xpath)", "title": "" }, { "docid": "67856a377d2063a1333936d4be1e679b", "score": "0.6248889", "text": "def find_element(xpath, driver):\n # xpath = xpath_soup(html)\n # print \"xpath:\", xpath\n try:\n e = driver.find_element_by_xpath(xpath)\n except:\n return -1, -1\n location = e.location\n size = e.size\n return location, size", "title": "" }, { "docid": "76bd98409419b76d80d698ac7a68efc4", "score": "0.6230654", "text": "def is_element_present(self, how, what):\n try: driver.find_element(by=how, value=what)\n except NoSuchElementException: return False\n return True", "title": "" }, { "docid": "eae90de294f2c38f87bea70223dc092b", "score": "0.6230255", "text": "def is_element_present(self, how, what):\n try: self.driver.find_element(by=how, value=what)\n except NoSuchElementException: return False\n return True", "title": "" }, { "docid": "17913bae331b0ee28af7b90a32a7b673", "score": "0.62215805", "text": "def is_element_present(self, how, what):\n try:\n self.driver.find_element(by=how, value=what)\n except NoSuchElementException:\n return False\n return True", "title": "" }, { "docid": "ab12c357c7335a8cc462aa743d4ee019", "score": "0.621138", "text": "def find_visible_element(self, xpath):\n try:\n return WebDriverWait(self.webdriver, WTF_TIMEOUT_MANAGER.SHORT).until(\n EC.visibility_of_element_located((By.XPATH, xpath)))\n except TimeoutException:\n msg = \"No element located by XPATH - '{0}'\".format(xpath)\n raise TimeoutException(msg)", "title": "" }, { "docid": "9c05185e334500b8d2654a684ffa1085", "score": "0.6199574", "text": "def isElementPresent(self, locator=\"\", locatorType=\"id\", element=None):\n try:\n if locator: # This means if locator is not empty\n element = self.getElement(locator, locatorType)\n if element is not None:\n self.log.info(\"Element present with locator: \" + locator +\n \" locatorType: \" + locatorType)\n return True\n else:\n self.log.info(\"Element not present with locator: \" + locator +\n \" locatorType: \" + locatorType)\n return False\n except:\n print(\"Element not found\")\n return False", "title": "" }, { "docid": "27c76ce808f07640cf7eb46025eb83fb", "score": "0.6186323", "text": "def __click_exists_by_xpath(self, xpath, error=None, success=None, wait=False):\n\n try:\n self.browser.find_element_by_xpath(xpath).click()\n if wait:\n sleep(self.sleep_time)\n except selenium_exception.NoSuchElementException:\n if error:\n self.error += \"ERROR: \" + error + \" [Element Not Found]\\r\\n\"\n return False\n except selenium_exception.ElementNotInteractableException:\n if error:\n self.error += \"ERROR: \" + error + \" [Element Not Intractable]\\r\\n\"\n return False\n except (selenium_exception.WebDriverException, selenium_exception.ElementClickInterceptedException):\n try:\n btn = self.browser.find_element_by_xpath(xpath)\n self.browser.execute_script(\"arguments[0].click();\", btn)\n if wait:\n sleep(self.sleep_time)\n except selenium_exception:\n if error:\n self.error += \"ERROR: \" + error + \"\\r\\n\"\n return False\n if success:\n self.success += \"SUCCESS: \" + success + \"\\r\\n\"\n return True", "title": "" }, { "docid": "4ca23acaba9c60c0bacbfab98c4b6f51", "score": "0.61676073", "text": "def is_element_present(self, locator_type, locator):\n try:\n element = self.get_element(locator_type, locator)\n if element is not None:\n return True\n else:\n return False\n except:\n return False", "title": "" }, { "docid": "34e46b1f42346789570d5ba2c328a178", "score": "0.6134949", "text": "def find_elements(browser, xpath):\n try:\n return wait.until(\n EC.presence_of_element_located((By.XPATH, xpath))\n ).find_elements_by_xpath(xpath)\n except NoSuchElementException:\n return None", "title": "" }, { "docid": "65f62a964e912db9ea1fd203ebb645c4", "score": "0.6103649", "text": "def visibility_of_element_wait(self, driver, xpath, timeout=10) -> WebElement:\n locator = (By.XPATH, xpath)\n element_located = EC.presence_of_element_located(locator)\n\n if hasattr(driver, 'wrapped_driver'):\n unwrapped_driver = driver.wrapped_driver\n wait = WebDriverWait(unwrapped_driver, timeout)\n else:\n wait = WebDriverWait(driver, timeout)\n return wait.until(element_located, f\"Element for xpath: '{xpath}'and url: {driver.current_url} not found\")", "title": "" }, { "docid": "0c4f87a6135ced23868fa47d4c2e247b", "score": "0.60806686", "text": "def wait_for_element_presence_and_get(driver=driver, timeout=settings['secondary_timeout'], xpath='//div'):\n\treturn WebDriverWait(driver, timeout).until(\n\t\tEC.presence_of_element_located((By.XPATH, xpath))\n\t)", "title": "" }, { "docid": "4088cad3d960275bc0f0928710d6e7c7", "score": "0.6055563", "text": "def find_element_by_xpath(self, xpath):\n return self.find_element(by=By.XPATH, value=xpath)", "title": "" }, { "docid": "7da527c9c04b30d6dc06dd1bef368c6d", "score": "0.60330236", "text": "def wait_until_ele_visible(self, xpath):\n wait = WebDriverWait(self.driver, DEFAULT_WAIT)\n ele = wait.until(EC.visibility_of_element_located((By.XPATH, xpath)))", "title": "" }, { "docid": "2fa1c9cbb5c03d51676df15f89110358", "score": "0.600521", "text": "def wait_element(self, element_xpath):\n try:\n self.logger.info(f\"Waiting element: {element_xpath}\")\n\n # Searching element\n element = self.wait.until(\n EC.presence_of_element_located((By.XPATH, element_xpath))\n )\n\n # Returning element\n return element\n\n except Exception: # pragma: no cover\n self.logger.info(\"Failed to find element\")\n raise ElementNotFoundError(f\"Element not found: {element_xpath}\")", "title": "" }, { "docid": "7c5245703877f9d4f1122fc74aad32b1", "score": "0.6004208", "text": "def session_is_available(step, service, search_string, element):\n if not world.launch_request:\n raise Exception('The %s service page is not found') % service\n tree = html.fromstring(world.launch_request.text)\n if search_string in tree.xpath('//%s' % element)[0].text:\n LOG.info(\"The %s service is launched.\" % service)\n else:\n raise AssertionError(\"The %s service is not launched.\" % service)", "title": "" }, { "docid": "15f9590dfef25e46d33ac7e0ba1d3e8f", "score": "0.60035795", "text": "def find_element(self, _type, location, step_desc=NOW()):\n try:\n if _type == 'id':\n loc = (By.ID, location)\n elif _type == 'xpath':\n loc = (By.XPATH, location)\n elif _type == 'name':\n loc = (By.NAME, location)\n elif _type == 'class_name':\n loc = (By.CLASS_NAME, location)\n elif _type == 'css_selector':\n loc = (By.CSS_SELECTOR, location)\n else:\n loc = (By.ID, location)\n wait_time = int(self.cfg.get('WAIT', 'time'))\n el = WebDriverWait(self.driver, wait_time).until(EC.presence_of_element_located(loc))\n logger.info(\"element: '%s' locate success, method: '%s'\" % (location, _type))\n # the function of hightlight in appium is unavailable\n # self.hightlight(el)\n return True, el\n except NoSuchElementException as e:\n logger.info(\"element: '%s' locate failure, method: '%s', error: %s\" % (location, _type, e))\n self.get_screenshot('Fail_' + step_desc)\n return False,\n except TimeoutException as e:\n logger.info(\"element: '%s' locate failure, method: '%s', error: %s\" % (location, _type, e))\n self.get_screenshot('Fail_' + step_desc)\n return False,\n except Exception as e:\n logger.info(\"element: '%s' locate failure, method: '%s', error: %s\" % (location, _type, e))\n self.get_screenshot('Fail_' + step_desc)\n return False,", "title": "" }, { "docid": "eca8aad2aa268b41b2824c7c605d7da6", "score": "0.5990495", "text": "def elementPresenceCheck(self, locator, byType):\n try:\n elementList = self.driver.find_elements(byType, locator)\n if len(elementList) > 0:\n self.log.info(\"Element present with locator: \" + locator +\n \" locatorType: \" + str(byType))\n return True\n else:\n self.log.info(\"Element not present with locator: \" + locator +\n \" locatorType: \" + str(byType))\n return False\n except:\n self.log.info(\"Element not found\")\n return False", "title": "" }, { "docid": "f8638006a55b3b6b76161d02b23fb57b", "score": "0.59882814", "text": "def __click_exists_by_xpath_elements(self, xpath, wait=False):\n\n elms = self.browser.find_elements_by_xpath(xpath)\n for elm in elms:\n try:\n elm.click()\n except selenium_exception.NoSuchElementException:\n continue\n except selenium_exception.ElementNotInteractableException:\n continue\n except (selenium_exception.WebDriverException, selenium_exception.ElementClickInterceptedException):\n try:\n self.browser.execute_script(\"arguments[0].click();\", elm)\n except selenium_exception:\n continue\n if wait:\n sleep(self.sleep_time)", "title": "" }, { "docid": "0fe6b1ba6db86f6eaf0d0399739e6d03", "score": "0.5957042", "text": "def wait_for_by_xpath(driver, element_xpath, wait_for_seconds=40, return_element=False):\n Log.info(f\"Waiting {wait_for_seconds} seconds for element with xpath {element_xpath} to appear!\")\n sleep(wait_for_seconds)\n element = driver.find_elements_by_xpath(element_xpath)\n # WebDriverWait(driver, wait_for_seconds).until(\n # expected_conditions.elem((By.XPATH, element_xpath)))\n Log.info(\"Element found\")\n if return_element:\n return element", "title": "" }, { "docid": "fd0ba1ced434785ae503534fecc51c80", "score": "0.58812094", "text": "def _has(self, path):\r\n \r\n try:\r\n # traverse to the actual child element \r\n curelem = self\r\n for pathelem in utils.dottedref.split_ref(path):\r\n curelem = curelem._children[pathelem]\r\n return True\r\n except KeyError: \r\n return False", "title": "" }, { "docid": "f7e628a369757b185a98fbc9cda93f98", "score": "0.5864475", "text": "def objectExists(self, objObject, sObjName):\n try:\n if self.objDriverWait.until(EC.presence_of_element_located(objObject)) is not None:\n # print sObjName + \" exists on page\"\n # self.logger.info(sObjName + \" exists on page\")\n return True\n else:\n print sObjName + \" does not exist on page\"\n self.logger.error(sObjName + \" does not exist on page\")\n print self.get_base64_encoded_screen_shot('objectExists')\n return False\n except Exception as e:\n print self.get_base64_encoded_screen_shot('objectExists')\n print \"ERROR: while checking existence of the element \" + sObjName, \"Error: {}\".format(e)\n self.logger.error(\"ERROR: while checking existence of the element \" + sObjName, \"Error: {}\".format(e))\n raise Exception( \"ERROR: while checking existence of the element \" + sObjName, \"Error: {}\".format(e))", "title": "" }, { "docid": "66649d124164373035da847c001082f7", "score": "0.5858754", "text": "def is_existing(self, log=True):\n if log:\n self.logger.info('determining whether page object {} is existing'.format(self._log_id_short))\n self.logger.debug('determining whether page object is existing; {}'.format(self._log_id_long))\n try:\n self.webelement\n except WebDriverException:\n if log:\n self.logger.info('page object {} is not existing'.format(self._log_id_short))\n self.logger.debug('page object is not existing; {}'.format(self._log_id_long))\n return False\n if log:\n self.logger.info('page object {} is existing'.format(self._log_id_short))\n self.logger.debug('page object is existing; {}'.format(self._log_id_long))\n return True", "title": "" }, { "docid": "74f1f68ebf0f31ffafdc0a57a1028e6a", "score": "0.5817192", "text": "def is_element_present(self, locator, timeout=None):\n\n is_ele_present = False\n element = Locator(self.context).get_element(locator, ElementWaitState.PRESENT, False, timeout)\n if isinstance(element, WebElement):\n self.context.logger.info(f'The given element {locator} does present on DOM.')\n is_ele_present = True\n else:\n self.context.logger.error(\n f'The given element `{locator}` does not present on DOM.')\n return is_ele_present", "title": "" }, { "docid": "6b5f48afed13d9dd25e1d330e56ec757", "score": "0.5778819", "text": "def is_not_element_present(self, *locator, timeout=0):\n self.selenium.implicitly_wait(timeout)\n try:\n self.selenium.find_element(*locator)\n return False\n except NoSuchElementException:\n return True\n finally:\n # set back to where you once belonged\n self.selenium.implicitly_wait(self.timeout)", "title": "" }, { "docid": "d8f83b5f1f524ed21db07fcb1601cc9f", "score": "0.57736385", "text": "def assertXpathsExist(self, node, xpaths):\n expressions = [etree.XPath(xpath) for xpath in xpaths]\n for expression in expressions:\n if not expression.evaluate(node):\n self.fail('No result found for XPath on element %s:\\n'\n 'XPath: %s\\n'\n 'Element:\\n'\n '%s' % (node.tag,\n expression.path,\n etree.tostring(node, pretty_print=True)))", "title": "" }, { "docid": "f1d07d0addca1bdd0ff19549be4184b2", "score": "0.5773113", "text": "def __contains__(self, x):\r\n if self.root:\r\n return self.root.search(x)\r\n else:\r\n #print(\"You looked for \" + str(x) + \", no such element exist... actually there are no elements dumbass\")\r\n return False", "title": "" }, { "docid": "7e01cf79b1a62bf127e22c91976f2e31", "score": "0.5746686", "text": "def __wait_for_element__(self, element_tag, locator, timeout=30):\n result = False\n self.driver.implicitly_wait(0)\n locator = locator.upper()\n for i in range(timeout):\n initTime = time()\n try:\n if locator == 'ID' and self.is_element_present(By.ID, element_tag):\n result = True\n break\n elif locator == 'NAME' and self.is_element_present(By.NAME, element_tag):\n result = True\n break\n elif locator == 'XPATH' and self.is_element_present(By.XPATH, element_tag):\n result = True\n break\n elif locator == 'CSS' and self.is_element_present(By.CSS_SELECTORS, element_tag):\n result = True\n break\n else:\n logging.info(f\"Error: Incorrect locator = {locator}\")\n except Exception as e:\n logging.error(e)\n print(f\"Exception when __wait_for_element__ : {e}\")\n\n sleep(1 - (time() - initTime))\n else:\n print(f\"Timed out. Element not found with {locator} : {element_tag}\")\n self.driver.implicitly_wait(DEFAULT_IMPLICIT_WAIT)\n return result", "title": "" }, { "docid": "4c793d3926be9cce92afdf46a71be49d", "score": "0.5736478", "text": "def verify_object_exists(self, locator, timeout=DEFAULT_TIMEOUT):\n find_flag = False\n\n try:\n self.driver.implicitly_wait(2)\n except Exception:\n exception = str(Exception).format()\n raise Exception(\n \"Get exception error when change selenium default wait time, error is \" + exception)\n\n try:\n for i in range(0, timeout):\n list_object = self.driver.find_elements(*locator)\n if len(list_object) > 0:\n find_flag = True\n break\n time.sleep(1)\n\n except Exception:\n exception = str(Exception).format()\n raise Exception(\n \"Can not find elements with provided locator \" + str(locator) + \", Exception error:\" + exception)\n\n\n try:\n self.driver.implicitly_wait(0)\n except Exception:\n exception = str(Exception).format()\n raise Exception(\n \"Can not find elements with provided locator \" + str(locator) + \", Exception error:\" + exception)\n\n return find_flag", "title": "" }, { "docid": "fea1665b9b995038fe14bad5e1ee1910", "score": "0.57334715", "text": "def find_clickable_element(self, xpath):\n try:\n return WebDriverWait(self.webdriver, WTF_TIMEOUT_MANAGER.SHORT).until(\n EC.element_to_be_clickable((By.XPATH, xpath)))\n except TimeoutException:\n msg = \"No element located by XPATH - '{0}'\".format(xpath)\n raise TimeoutException(msg)", "title": "" }, { "docid": "aa8dbe7cdb39d5a47776e7a791c135e9", "score": "0.5729332", "text": "def test_login_form_exists(self):\n login_form = self.browser.find_elements_by_id(\"login-form\")\n self.assertTrue(login_form != None)", "title": "" }, { "docid": "b853b820470040dcdf7e0fb0a3338aa8", "score": "0.5722861", "text": "def is_element_visible(self, *locator):\n try:\n self.selenium.find_element(*locator).is_displayed()\n return True\n except (NoSuchElementException, ElementNotVisibleException):\n return False", "title": "" }, { "docid": "c9185bfe0be871616f5fc004e2b88487", "score": "0.5715335", "text": "def find_elements(self, _type, location, step_desc=NOW()):\n try:\n if _type == 'ids':\n loc = (By.ID, location)\n elif _type == 'xpaths':\n loc = (By.XPATH, location)\n elif _type == 'names':\n loc = (By.NAME, location)\n elif _type == 'class_names':\n loc = (By.CLASS_NAME, location)\n elif _type == 'css_selectors':\n loc = (By.CSS_SELECTOR, location)\n else:\n loc = (By.ID, location)\n wait_time = int(self.cfg.get('WAIT', 'time'))\n el = WebDriverWait(self.driver, wait_time).until(EC.presence_of_all_elements_located(loc))\n logger.info(\"element: '%s' locate success, method: '%s'\" % (location, _type))\n # the function of hightlight in appium is unavailable\n # self.hightlight(el)\n return True, el\n except NoSuchElementException as e:\n logger.info(\"element: '%s' locate failure, method: '%s', error: %s\" % (location, _type, e))\n self.get_screenshot('Fail_' + step_desc)\n return False,\n except TimeoutException as e:\n logger.info(\"element: '%s' locate failure, method: '%s', error: %s\" % (location, _type, e))\n self.get_screenshot('Fail_' + step_desc)\n return False,\n except Exception as e:\n logger.info(\"element: '%s' locate failure, method: '%s', error: %s\" % (location, _type, e))\n self.get_screenshot('Fail_' + step_desc)\n return False,", "title": "" }, { "docid": "93339d1f2aa96670b326da87660ac0d4", "score": "0.5690771", "text": "def wait_for_element(driver, xpath, condition='present', attempt=5, timeout=1):\n\n result = False\n\n #\n if condition == 'present':\n condition_function = EC.presence_of_element_located\n elif condition == 'visible':\n condition_function = EC.visibility_of_element_located\n else:\n return result, None\n\n for iattempt in range(attempt):\n try:\n element = WebDriverWait(driver, timeout).until(\n condition_function((By.XPATH, xpath))\n )\n result = True\n break\n except TimeoutException:\n continue\n\n if result:\n return result, element\n else:\n return result, None", "title": "" }, { "docid": "75d9013bac67c3a955c094d7d13b64a1", "score": "0.56686205", "text": "def assertElement(self, html, element, *args, **kwargs):\r\n count = kwargs.pop(\"count\", 1)\r\n if isinstance(html, basestring):\r\n html = BeautifulSoup(html)\r\n actual = len(html.findAll(element, *args, **kwargs))\r\n self.assertEqual(\r\n actual,\r\n count,\r\n \"Element {0}({1}, {2}) is present {3} times, not {4}. \"\r\n \"Full HTML: {5}\".format(\r\n element, args, kwargs, actual, count, html)\r\n )", "title": "" }, { "docid": "89ac698bb2c4de9fe2eb4113f30329bb", "score": "0.5661348", "text": "def xpath(self, xpath: str, at_least_one=True, timeout: int = 0) -> Union[Element, Elements]:\n by = By.XPATH\n if at_least_one:\n self.log.step(f'py.xpath() - Find at least one element with xpath: ``{xpath}``')\n elements = self.wait(timeout).until(\n lambda x: x.find_elements(by, xpath),\n f'Could not find any elements with the CSS ``{xpath}``'\n )\n else:\n self.log.step(f'py.xpath() - Find elements with xpath (no wait): ``{xpath}``')\n elements = self.webdriver.find_elements(by, xpath)\n\n if len(elements) == 1:\n self.log.info('Only 1 element matched your xpath')\n return Element(self, elements[0], locator=(by, xpath))\n\n self.log.info(f'{len(elements)} elements matched your xpath')\n return Elements(self, elements, locator=(by, xpath))", "title": "" }, { "docid": "6a3bf8b28eab4c796b1f7e844ff39556", "score": "0.5658978", "text": "def hasLabel(html):\n label_list = html.xpath('//div[contains(@class,\"discussion-sidebar-item\")]//span[@class=\"css-truncate css-truncate-target width-fit\"]/text()')\n for label in label_list:\n if label == 'bug':\n return True\n return False", "title": "" }, { "docid": "f07962254f5cae166c64dc28743d8550", "score": "0.56467986", "text": "def wait_element_appear(MAXTIME, XPATH, browser):\n LOADING_ELEMENT_XPATH = XPATH\n try:\n WebDriverWait(browser, MAXTIME\n ).until(EC.presence_of_element_located((By.XPATH, LOADING_ELEMENT_XPATH)))\n logging.debug(\"Waited element appear\")\n return True\n except TimeoutException:\n logging.debug(\"Doesn't Wait element appear\")\n return False", "title": "" }, { "docid": "99e3a213a11235394400566840229f46", "score": "0.56364965", "text": "def wait_for_xpath(self, xpath_expr, visible=False, timeout=30):\n slept = 0.0\n interval = 0.1\n element_list = []\n while slept < timeout:\n element_list = self.xpath(xpath_expr)\n if element_list:\n if visible:\n visible_elements = [e for e in element_list if e.is_visible]\n if len(visible_elements):\n return visible_elements\n else:\n return element_list\n time.sleep(interval)\n slept += interval\n raise PageError('Timed out waiting for element at `%s`' % xpath_expr)", "title": "" }, { "docid": "4bed65d756c829825e6fecf01c70e805", "score": "0.5635294", "text": "def assertElement(self, html, element, *args, **kwargs):\n count = kwargs.pop(\"count\", 1)\n if isinstance(html, basestring):\n html = BeautifulSoup(html)\n actual = len(html.findAll(element, *args, **kwargs))\n self.assertEqual(\n actual,\n count,\n \"Element {0}({1}, {2}) is present {3} times, not {4}. \"\n \"Full HTML: {5}\".format(\n element, args, kwargs, actual, count, html)\n )", "title": "" }, { "docid": "55b2a7d3e61134dc0de6361b672c7b06", "score": "0.56263244", "text": "def wait_for_elements_presence_and_get(driver=driver, timeout=settings['secondary_timeout'], xpath='//div'):\n\treturn WebDriverWait(driver, timeout).until(\n\t\tEC.presence_of_all_elements_located((By.XPATH, xpath))\n\t)", "title": "" }, { "docid": "4c48bf67953f1d88282204ff6cd6b8f0", "score": "0.5619677", "text": "def at_non_exist_page(resp):\n # If there is this td, we are at the last page, specifically\n # this is the greyed-out next-page button.\n NOTICE = 'No unfiltered results in this page range. You either requested an invalid page or used too aggressive filters.'\n text = resp.html.text\n return re.search(NOTICE, text) is not None", "title": "" }, { "docid": "0a14c4cae79cca2796762764b90a0fbf", "score": "0.56101245", "text": "def test_find(self):\n page_html = str(self.app.get('/find').data)\n elem = 'Find Internships'\n self.assertIn(elem, page_html)", "title": "" }, { "docid": "872a6480130327e1cec77dd8023b85d3", "score": "0.5606078", "text": "def is_elem_visible(page_element: PageElement) -> bool:\n visible_elems = ['style', 'script', 'head', 'title', 'meta', '[document]']\n if page_element.parent.name in visible_elems:\n return False\n if isinstance(page_element, Comment):\n return False\n return True", "title": "" }, { "docid": "1101bdc1d8f70c43f07fafc1342f79cb", "score": "0.56006175", "text": "def wait_to_load_webelement(driver, search_element, search_attribute):\r\n for i in range(0, 30):\r\n try:\r\n if search_element == \"class_name\":\r\n loading_element = driver.find_element_by_class_name(search_attribute)\r\n elif search_element == \"id\":\r\n loading_element = driver.find_element_by_id(search_attribute)\r\n elif search_element == \"tag_name\":\r\n loading_element = driver.find_elements_by_tag_name(search_attribute)\r\n if loading_element.is_displayed():\r\n if loading_element.is_enabled():\r\n return True\r\n except Exception:\r\n time.sleep(1)\r\n continue", "title": "" }, { "docid": "254441c92691419046b3f9294ad3ac63", "score": "0.5598314", "text": "def find_element_and_submit(self, by=By.XPATH, value=None):\n #Catching the exception if element is not found\n try:\n self.browser.find_element(by=by, value=value).submit()\n return self.is_url_broken()\n except NoSuchElementException:\n logger.error(\"Not able to find the web element by %s having value : %s\" % (by, value))\n self.capture_screenshot()\n raise AssertionError((\"Not able to find the web element by %s having value : %s\" % (by, value)))", "title": "" }, { "docid": "3c3a1858fd78b0b3ae658d974161299d", "score": "0.5594874", "text": "def get_elem_wait_by_xpath(wait: WebDriverWait, xpath_cond: str) -> WebElement:\n return wait.until(\n EC.presence_of_element_located(\n (By.XPATH, xpath_cond)\n )\n )", "title": "" }, { "docid": "f3fb1ff64b69c18c7d495cbf7a9e2a45", "score": "0.5587785", "text": "def test_does_page_exist_results(self):\n conf_reg_helper = ConfluenceRequestHelper(confluence_request=0, pageid='123', attachment='ABCD')\n test_json = {\"type\": \"page\"}\n self.assertTrue(conf_reg_helper.does_page_exist(test_json))\n test_json = {\"type\": \"url\"}\n self.assertTrue(conf_reg_helper.does_page_exist(test_json))", "title": "" }, { "docid": "ec153d996af73c8bdf98224fa30d0b51", "score": "0.5587382", "text": "def element_present(self, locator_method, locator, wait_time=10):\n WebDriverWait(self.driver.instance, wait_time).until(EC.presence_of_element_located((\n locator_method, locator)))\n print(f\"✅ Element '{locator}' was present.\")", "title": "" }, { "docid": "006c3965a1416318a87483d7a3efcba5", "score": "0.5578114", "text": "def page_contains(self, value):\n log.debug(\"Check if page contains {}\".format(value))\n does_contain_text = False\n text_regex = re.compile(re.escape(value))\n contains_text = text_regex.search(self.appium_driver.page_source())\n if contains_text:\n does_contain_text = True\n return does_contain_text", "title": "" }, { "docid": "339a893c0320e76bb7b6db1f5ef72ec8", "score": "0.557379", "text": "def page_not_found(self):\n print(\"def page_not_found \"+ TimeStamp.timestamp()) #Elina 08-12-2020\n couldnt_get_page = \"Unfortunately, we couldn't find the page you were looking for. Please try again.\"\n page_not_found = self.current_page.find('div', text=reg_ex.compile(rf\"{couldnt_get_page}\"))\n return True if page_not_found else False", "title": "" }, { "docid": "e6d46236a85da47a3b79c626c15b3128", "score": "0.556649", "text": "def isElementDisplayed(self, locator=\"\", locatorType=\"id\", element=None):\n isDisplayed = False\n try:\n if locator: # This means if locator is not empty\n element = self.getElement(locator, locatorType)\n if element is not None:\n isDisplayed = element.is_displayed()\n self.log.info(\"Element is displayed\" )\n else:\n self.log.info(\"Element not displayed\")\n return isDisplayed\n except:\n self.log.info(\"Element not found\")\n return False", "title": "" }, { "docid": "89f4619996063ac579b4983be85e4eba", "score": "0.55480355", "text": "def wait_for(self, selector, timeout=2):\n end_time = time.time() + timeout\n while time.time() < end_time:\n if self.has_element(selector):\n element = self.find_by_css(selector)\n try:\n if element.visible:\n return element\n except StaleElementReferenceException:\n # May need to wait across page reload.\n pass\n raise NeverShowedUp(selector)", "title": "" }, { "docid": "db786f589db0193e721732b3c3cfe5e8", "score": "0.5500879", "text": "def test_A1_not_found(self):\r\n try:\r\n Browser.page.not_found()\r\n\r\n except selenium.common.exceptions.TimeoutException:\r\n \"\"\" 'Not found' response not found \"\"\"", "title": "" }, { "docid": "ec38108486467dfcdd69c13e043f5b4b", "score": "0.5496124", "text": "def unit_has_page(unitCode):\n return os.path.isfile('units/'+unitCode+'.html')", "title": "" }, { "docid": "4a8926d3c28dde9f58605e8357d6e436", "score": "0.5494836", "text": "def the_users_add_page_should_open(driver):\n assert wait_on_element(driver, 1, 30, '//h4[contains(.,\"Identification\")]')", "title": "" }, { "docid": "5fabddf28269c0ad661293bea028c4d2", "score": "0.54838306", "text": "def __elem_exists(self, agent, resource_type, elem, aggregated):\n context = ExecutionContext()\n params = {}\n if aggregated:\n name = elem[1]\n param_name, param_value = elem[0].split('/', 1)\n if param_name[-1] == 's':\n param_name = param_name[:-1]\n\n # Just because the aggregation returned a parameter\n # does not mean the get API takes it. Confirm before adding.\n if (agent.resource_type_to_discovery_info(resource_type)\n .get('methods', {}).get('get', {}).get('parameters', {})\n .get(param_name)):\n params[param_name] = param_value\n\n name = elem[1] if aggregated else elem\n try:\n agent.get_resource(context, resource_type, resource_id=name, **params)\n return True\n except HttpError as http_error:\n if http_error.resp.status == httplib.NOT_FOUND:\n return False\n if http_error.resp.status in _RETRYABLE_DELETE_HTTP_CODES:\n return True\n print 'Unexpected error while waiting for delete: {0} {1}={2}'.format(\n resource_type, name, http_error)\n return False", "title": "" }, { "docid": "9e412ad66c5cc74812bb851cbe4568fc", "score": "0.54822147", "text": "def _check_presence_of_css_selector(self, css_selector):\n\n try:\n self.source.browser.find_element_by_css_selector(css_selector)\n except NoSuchElementException:\n raise NoSuchElementException(\"Unable to find CSS selector {}.\".format(css_selector))", "title": "" }, { "docid": "7f8534778d57403747818a19cb41dd22", "score": "0.5475012", "text": "def test_node_exists():\n assert Node", "title": "" }, { "docid": "a36872ee022c6df27c175f270495a39d", "score": "0.5468958", "text": "def __contains__(self, element: Element):\n return element in self.get_elements()", "title": "" }, { "docid": "9e19fd1919e3498cfc0853ff48a7252a", "score": "0.5464233", "text": "def _reference_has_css_selector(self, css_selector):\n\n try:\n self._reference.find_element_by_css_selector(css_selector)\n except NoSuchElementException:\n return False\n else:\n return True", "title": "" }, { "docid": "9e857a372c1007545154b8f74359ae7b", "score": "0.5459874", "text": "def assertExists(self, element_locator, msg=None, wait_timeout=None):\n wait_timeout = wait_timeout or self.DEFAULT_ASSERTION_TIMEOUT\n if not test.existence_change_test(self.driver, element_locator, test_exists=True, wait_timeout=wait_timeout):\n failure_message = 'No elements located using ' + self._locator_string(element_locator)\n msg = self._formatMessage(msg, failure_message)\n raise self.failureException(msg)", "title": "" }, { "docid": "9e07a6aaddb6cc8aea869d1e20855726", "score": "0.54469746", "text": "def __send_text_exists_by_xpath(self, xpath, text, error=None, success=None, wait=False):\n\n try:\n self.browser.find_element_by_xpath(xpath).send_keys(text)\n if wait:\n sleep(self.sleep_time)\n except selenium_exception.NoSuchElementException:\n if error:\n self.error += \"ERROR: \" + error + \" [Element Not Found]\\r\\n\"\n return False\n except selenium_exception.ElementNotInteractableException:\n if error:\n self.error += \"ERROR: \" + error + \" [Element Not Interactable]\\r\\n\"\n return False\n if success:\n self.success += \"SUCCESS: \" + success + \"\\r\\n\"\n return True", "title": "" }, { "docid": "d5424f16f1e4188e00ff0de4cfb94801", "score": "0.5441264", "text": "def table_contains(element, wishlist_name, customer_id):\n rows = element.find_elements(By.TAG_NAME, \"tr\")\n row_number = -1\n for row in rows:\n row_number += 1\n # Skip the header row\n if row_number == 0:\n continue\n\n cols = row.find_elements(By.TAG_NAME, \"td\")\n logging.error(cols)\n if cols[1].text == wishlist_name and cols[2].text == customer_id:\n return True\n return False", "title": "" }, { "docid": "5c12cca545becc89d36ff2f84291f252", "score": "0.5434821", "text": "def assert_element_text(self, by_locator, element_text):\n web_element=WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(by_locator))\n return bool (web_element.get_attribute('textContent') == element_text)", "title": "" }, { "docid": "962d029cacd884687b566c69601ecf5e", "score": "0.5431348", "text": "def _find_webelement(root, selector_type, selector_value,\n element_name, remaining_time):\n webelement = None\n start_time = time.time()\n try:\n if selector_type == 'id':\n webelement = root.find_element_by_id(selector_value)\n elif selector_type == 'css':\n webelement = root.find_element_by_css_selector(selector_value)\n elif selector_type == 'text':\n webelement = root.find_element_by_css_selector(\"text[{}]\".format(selector_value))\n elif selector_type == 'link_text':\n webelement = root.find_element_by_link_text(selector_value)\n elif selector_type == 'partial_link_text':\n webelement = root.find_element_by_partial_link_text(selector_value)\n elif selector_type == 'name':\n webelement = root.find_element_by_name(selector_value)\n elif selector_type == 'xpath':\n webelement = root.find_element_by_xpath(selector_value)\n elif selector_type == 'tag_name':\n webelement = root.find_element_by_tag_name(selector_value)\n else:\n msg = 'Selector {0} is not a valid option'.format(selector_type)\n raise IncorrectSelectorType(msg)\n execution.logger.debug('Element found')\n except:\n time.sleep(0.5)\n end_time = time.time()\n remaining_time -= end_time - start_time\n if remaining_time > 0:\n execution.logger.debug('Element not found yet, remaining time: {}'.format(remaining_time))\n webelement = _find_webelement(root, selector_type, selector_value,\n element_name, remaining_time)\n else:\n raise ElementNotFound('Element {0} not found using selector {1}:\\'{2}\\''\n .format(element_name, selector_type, selector_value))\n \n # Use remaining time to wait until element is visible (is_displayed)\n # TODO add this as a setting\n remaining_time = remaining_time - (time.time() - start_time)\n while not webelement.is_displayed() and remaining_time > 0:\n # Element is not visible yet\n execution.logger.debug('Element still not visible, waiting')\n time.sleep(0.5)\n remaining_time = remaining_time - (time.time() - start_time)\n\n if not webelement.is_displayed():\n execution.logger.debug('Element not visible, continuing..')\n \n return webelement", "title": "" }, { "docid": "0dd9a1ff42ab4aea23575395b55a0c92", "score": "0.5424125", "text": "def _selenium_element_load_waiting(\n self, by_selector_type, selector,\n success_msg='', timeout_exception_msg=''):\n try:\n element_present = EC.visibility_of_element_located(\n (by_selector_type, selector))\n WebDriverWait(\n self.browser, settings.TIMEOUT_PAGE_LAODING).until(\n element_present)\n logger.info(success_msg)\n except TimeoutException:\n logger.error(timeout_exception_msg)\n return False\n except SoftTimeLimitExceeded as e:\n raise e\n except Exception as e:\n logger.error(e)\n return False\n\n return True", "title": "" }, { "docid": "8554daa61fcae887d8d9e9f3d411b9c6", "score": "0.54139864", "text": "def is_loaded(self):\n # Note: this just checks that the title is displayed;\n # it doesn't guaranteed that everything we expect is rendered on the\n # page, because angular fetches the data asynchronously\n\n try:\n self.find_element(*AccountAdminDashboardPageLocators.PAGE_TITLE)\n except NoSuchElementException:\n return False\n return True", "title": "" }, { "docid": "716c58935bed1bde834c90d444c41513", "score": "0.5412483", "text": "def _find_element(self, **kwargs):\n\n if kwargs.has_key('timeout'):\n self.set_implicit_wait(int(kwargs['timeout']))\n\n if kwargs.has_key('name'):\n return self._find_element_by_xpath(\n '//*[@text=\"{0}\" or @content-desc=\"{1}\"]'.format(kwargs['name'], kwargs['name']))\n elif kwargs.has_key('class_name'):\n return self._find_element_by_class_name(kwargs['class_name'])\n elif kwargs.has_key('id'):\n return self._find_element_by_id(kwargs['id'])\n elif kwargs.has_key('accessibility_id'):\n return self._find_element_by_accessibility_id(kwargs['accessibility_id'])\n elif kwargs.has_key('xpath'):\n return self._find_element_by_xpath(kwargs['xpath'])\n elif kwargs.has_key('element'):\n return kwargs['element']\n else:\n self.assertTrueWithScreenShot(False,\n msg=\"The element with provided args '{0}' was not found\".format(str(kwargs)),\n screenshot=True)", "title": "" }, { "docid": "ee1be970aeee75b9c9bd4713d6c8f5d7", "score": "0.5412282", "text": "def test_bmg_log_exists(self):\n logo = self.browser.find_elements_by_class_name(\"fa.fa-trophy\")\n self.assertTrue(logo != None)", "title": "" }, { "docid": "a7ce210fad178ade2589d24c03a629b5", "score": "0.54095894", "text": "def is_visible(self,by_locator):\n element=WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(by_locator))\n return bool(element)", "title": "" }, { "docid": "54e6ad2e6ca58929eaeda7f7555736b0", "score": "0.54019624", "text": "def contains(self, element): \n return element in self.elements", "title": "" }, { "docid": "06e425aea2476acf5699aba34f762082", "score": "0.5376935", "text": "def get_elem(self, xpath, single=True):\r\n if single:\r\n d = self.driver.find_element_by_xpath\r\n else:\r\n d = self.driver.find_elements_by_xpath\r\n for i in range(0, 3):\r\n try:\r\n return d(xpath)\r\n except:\r\n self.announce_exception(i + 1)\r\n return None", "title": "" }, { "docid": "cc3e87194e24b35f2a460b237a5dbe47", "score": "0.5370379", "text": "def verify_input_element(locator, anchor='1', timeout=0, index=1, **kwargs):\n input_element = input_.get_input_elements_from_all_documents(\n locator, anchor, timeout=timeout, index=index, **kwargs)\n if input_element:\n return", "title": "" }, { "docid": "f281a1c58a1eaf2536ffba932d874cff", "score": "0.5362437", "text": "def test_does_page_exist_type(self):\n conf_reg_helper = ConfluenceRequestHelper(confluence_request=0, pageid='123', attachment='ABCD')\n test_json = {\"results\": [{\"type\": \"ABCDEFG\"}]}\n self.assertTrue(conf_reg_helper.does_page_exist(test_json))", "title": "" }, { "docid": "6ed6d08fe71e0e94ff842ce124ff22f8", "score": "0.53591317", "text": "def is_report_page_loaded_properly(self, report_title):\n page_title_locator = (By.XPATH, \"//span[contains(@id, 'ReportName_') and contains(text(), '%s')]\" % report_title)\n return self.is_element_present(page_title_locator)", "title": "" }, { "docid": "158a59ad8c380dde3d4f4a07169fa467", "score": "0.53485036", "text": "def find_element(self, locator):\n try:\n # TODO: test the idea of using the self.driver this deep\n # to refresh DOM elements in the self.element\n if self.element:\n return self.element.find_element(*locator)\n except NoSuchElementException:\n return None", "title": "" } ]
fac0c73ff53d4aabbc7b5486460264a9
Edits all constants dependent on TEST_DIR.
[ { "docid": "f837724e3253e8bbc4d88dd36f79bc34", "score": "0.0", "text": "def set_save_dir(save_name):\n\n global SAVE_DIR, SAVE_NAME, MODEL_SAVE_DIR, SUMMARY_SAVE_DIR, RES_SAVE_DIR\n\n SAVE_NAME = save_name\n MODEL_SAVE_DIR = get_dir(os.path.join(SAVE_DIR, 'Models/', SAVE_NAME))\n SUMMARY_SAVE_DIR = get_dir(os.path.join(SAVE_DIR, 'Summaries/', SAVE_NAME))\n RES_SAVE_DIR = get_dir(os.path.join(SAVE_DIR, 'Results/', SAVE_NAME))", "title": "" } ]
[ { "docid": "f8af1f1f9c72bf95108b654dee7cefb3", "score": "0.6374559", "text": "def setUp(self):\n super(TestConstants, self).setUp()", "title": "" }, { "docid": "fbc5782d868b60b19dec637d5154487a", "score": "0.5857609", "text": "def _patch_main_src(project_dir, placeholder, value):\n path = os.path.join(project_dir, \"src\", \"integration_test.cc\")\n _patch_file(path, placeholder, value)", "title": "" }, { "docid": "926edde0d15ffc946f6bdbbd561fe550", "score": "0.5544705", "text": "def setUp(self):\n self.test_path = \\\n '/mnt/S/Some/Path/to/a/file/with/numbers/file.0010.exr'", "title": "" }, { "docid": "006d0cf4858745bd8dcd1408857a40ff", "score": "0.55214983", "text": "def setUp(self):\n self.test_path =\\\n os.path.abspath(os.path.join(os.sep,'mnt', 'S', 'Some','Path','to','a','file','with','numbers','file.0010.exr'))", "title": "" }, { "docid": "117fc984b24cffd1560d4ec3ea0ff4be", "score": "0.5487263", "text": "def test_environ():\n testDir = os.path.dirname(__file__)\n mainDir = os.path.dirname(os.path.dirname(os.path.dirname(testDir)))\n rootDir = os.path.join(mainDir, \"src\", \"python\")\n assert rootDir is not None\n assert os.path.isdir(rootDir)\n sys.path.insert(0,rootDir)", "title": "" }, { "docid": "198b979dbd3cb1c72f3bb83619469940", "score": "0.54659396", "text": "def test_update_custom_theme_settings(self):\n pass", "title": "" }, { "docid": "676b561646794aa552f2802193cc58d1", "score": "0.5430694", "text": "def test_config(self):\n self.do_test_config()", "title": "" }, { "docid": "870d15e487640d93b866baa658af9f5e", "score": "0.5370025", "text": "def rebase_paths(cls, config, base_path):\r\n for name in ('gettext_dir', 'resource_dir'):\r\n value = getattr(config, name, None)\r\n if value is not None:\r\n setattr(config, name, path.normpath(path.join(base_path, value)))", "title": "" }, { "docid": "45f5b11b2b16a7c2ed3baf0f09a8ecde", "score": "0.53533", "text": "def check_constants():\n global BASE_URL, BASE_PATH\n if BASE_URL.endswith('/'):\n BASE_URL = BASE_URL[:-1]\n if BASE_PATH.endswith('/'):\n BASE_PATH = BASE_PATH[:-1]", "title": "" }, { "docid": "54b0babb2a940ed2e33650aa2801f36e", "score": "0.53252125", "text": "def config_test(): # type: ignore", "title": "" }, { "docid": "0d4700ab1c91cfab4301c14aed2eb379", "score": "0.5315792", "text": "def setUp(self):\n main.app.config['DATA_CSV'] = TEST_DATA_CSV\n main.app.config['USERS_XML'] = TEST_USERS_XML", "title": "" }, { "docid": "996087468f01d8a2b8098d3d61fc2151", "score": "0.531258", "text": "def edit_test_name_conf(self):\n\n for line in fileinput.input('conf.py', inplace=True):\n # inside this loop the STDOUT will be redirected to the file\n print(line.replace('{Test}', '{' + self.test_name + '}'), end='')", "title": "" }, { "docid": "adb400472c428fe5321870eb67c5eddf", "score": "0.53124243", "text": "def source_test_environ():\n log.debug('source_test_environ()')\n root_dir = path.dirname(path.realpath(oc.__file__))\n config_path = path.abspath(path.join(root_dir, oc.ENVIRONMENT_PATH))\n parser = configparser.ConfigParser()\n parser.read(config_path)\n for k, v in parser['dev'].items():\n os.environ[k] = v", "title": "" }, { "docid": "211df47405048b742c839d2175170e5d", "score": "0.5311669", "text": "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()\n self.base_required_keys = [\n 'workspace_dir',\n 'lulc_lookup_uri',\n 'lulc_transition_matrix_uri',\n 'carbon_pool_initial_uri',\n 'carbon_pool_transient_uri',\n 'lulc_baseline_map_uri',\n 'lulc_baseline_year',\n ]", "title": "" }, { "docid": "5e3bc62e9880c2e1f3113489d5285e69", "score": "0.5298334", "text": "def setUp(self):\n\n # Save the existing STYLEPATH (if there is one)\n self.stylepath = os.environ.get(\"STYLEPATH\", None)", "title": "" }, { "docid": "f09064ea350bdfb82957fd405e0f2856", "score": "0.52904224", "text": "def constants_use_custom(v):\n global _constants_use_custom\n _constants_use_custom = v", "title": "" }, { "docid": "7771e7fc2f090163db6e1b7692079760", "score": "0.5284119", "text": "def setup_testenv():\n get_tshark_status()\n # If testing from ./tests, change to root directory (useful in PyCharm)\n if os.getcwd().endswith('tests'):\n os.chdir('..')", "title": "" }, { "docid": "57bc7c53b45bfbec78e06c06230c12cf", "score": "0.5269316", "text": "def setUp(self):\n testutil.HandlerTestBase.setUp(self)\n self.old_environ = os.environ.copy()\n os.environ['PATH_INFO'] = '/foobar_path'", "title": "" }, { "docid": "e488823a8d84ca0a5f7c433e1806b388", "score": "0.5269227", "text": "def tearDown(self):\n super(TestConstants, self).tearDown()", "title": "" }, { "docid": "97c990f4405373523fe2d3ac1b3e5306", "score": "0.5260858", "text": "def test_path_rebase():\r\n file = StringIO('''--gettext ../gettext\\n--android ../res''')\r\n file.name = '/opt/proj/android/shared/.config'\r\n c = read_config(file)\r\n print c.gettext_dir\r\n assert c.gettext_dir == '/opt/proj/android/gettext'\r\n assert c.resource_dir == '/opt/proj/android/res'", "title": "" }, { "docid": "a10714653a7ceb06a271353099be8dd1", "score": "0.52514654", "text": "def reconfigure_environment(robot_file: Path) -> None:\n\n # Remove ourselves from the path and insert the competitor code\n sys.path.pop()\n sys.path.insert(0, str(ROOT / \"modules\"))\n sys.path.insert(0, str(robot_file.parent))\n\n os.chdir(str(robot_file.parent))", "title": "" }, { "docid": "b7fd3aef4493d245d8d09dd1a84ab716", "score": "0.52435184", "text": "def test_expand_paths_vars(test_path, expected, monkeypatch):\n monkeypatch.setenv('TEST_PATH', '/test/path')\n assert utils.expand_paths_vars([test_path]) == [expected]", "title": "" }, { "docid": "06e4cb0ac0d39a8afa4f82fcac11f91d", "score": "0.51998633", "text": "def setUp(self):\n super(BaseTestCase, self).setUp()\n self._original_flag_settings = dict((key, getattr(flag_settings, key))\n for key in flag_settings.__all__)\n for key in flag_settings.__all__:\n setattr(flag_settings, key, flag_settings._DEFAULTS[key])", "title": "" }, { "docid": "8bef31afffe60cf6b3f8fab90eaa8b00", "score": "0.5198444", "text": "def test_only_set_on_env(self):\n # DEV: We have to set the environment variable first, since they get loaded into the class on definition\n os.environ['NEW_SETTING'] = 'set_by_env'\n\n TestConfiguration = self._get_test_configuration()\n self.assertEqual(TestConfiguration.NEW_SETTING, 'set_by_env')", "title": "" }, { "docid": "f4934260b876a20a25a7bd9f3f9e4b7d", "score": "0.518901", "text": "def setUp(self):\n self.config_file = os.path.join(os.path.dirname(__file__),\n \"sample_cfg.txt\")\n super(TestOVFInjectConfig, self).setUp()", "title": "" }, { "docid": "167eceff07b7277b0b902f92a14a27e5", "score": "0.51639694", "text": "def test_set_constant(self):\n self._test_constant_common(set)", "title": "" }, { "docid": "3b0b47fa24160e187d4feab7721dca50", "score": "0.5156388", "text": "def temp_testing_env(test_client, temp_cache_env, monkeypatch):\n for k, v in test_client.items():\n monkeypatch.setenv(k, v)", "title": "" }, { "docid": "d5d035a0627d2083c4ff7c0b57d0d0b8", "score": "0.5148852", "text": "def test_otoroshi_controllers_adminapi_global_config_controller_update_global_config(self):\n pass", "title": "" }, { "docid": "456e0387636d1018f16001a5fc11d728", "score": "0.51474917", "text": "def test_dir(cls):\n return join(cls.root_dir, MATURITY_TEST)", "title": "" }, { "docid": "77d490628e1cd921097b5cd41982989c", "score": "0.51220614", "text": "def pytest_modify_app_settings(settings):\n pass", "title": "" }, { "docid": "bed48f8deacaacd35b7c30d390c3a878", "score": "0.51192766", "text": "def config_files(test_folder):\n base_dir = test_folder.parent\n return [base_dir.joinpath(\"config.toml\"), base_dir.joinpath(\"env\", \"config.toml\")]", "title": "" }, { "docid": "f278fb5b5e52923e6edf138120055c9e", "score": "0.5115623", "text": "def test_env_var_2():", "title": "" }, { "docid": "05f80efbd5861a0be1377220fef4c15d", "score": "0.51103663", "text": "def test_update_folder(self):\n pass", "title": "" }, { "docid": "94286d3ab8d7e0eedac08b4576127c08", "score": "0.5080099", "text": "def test_env_var_1():", "title": "" }, { "docid": "42a394f1588cca8647bb8789f3f52db8", "score": "0.50796044", "text": "def test_update_dedupe_settings(self):\n pass", "title": "" }, { "docid": "4576de8de93bf9cf1daee3b86d636aba", "score": "0.5079286", "text": "def test_put_settings(self):\n pass", "title": "" }, { "docid": "beaa5e6100938d01863af2b52646f06e", "score": "0.5076165", "text": "def test_env_prefix(self):\n # DEV: We have to set the environment variable first, since they get loaded into the class on definition\n os.environ['TEST_DEFAULT_SETTING'] = 'set_by_env'\n\n TestConfiguration = self._get_test_configuration(env_prefix='TEST_')\n self.assertEqual(TestConfiguration.DEFAULT_SETTING, 'set_by_env')", "title": "" }, { "docid": "ef568ad1d110b907957ac840d8212acb", "score": "0.5074793", "text": "def edit(ctx):\n edit_files(ctx, None, \"config\")", "title": "" }, { "docid": "e42b1a71e97c1f49e1af9c2b27d899a3", "score": "0.50611037", "text": "def tearDown(self):\r\n shutil.rmtree(self.tmp_dir, ignore_errors=True)\r\n pynag.Model.ObjectDefinition.objects.get_all()\r\n pynag.Model.config._edit_static_file(attribute='cfg_dir',old_value=self.tmp_dir,new_value=None)", "title": "" }, { "docid": "61e631a37cad0920699c9d5bc7292fcb", "score": "0.5060482", "text": "def test_write_all_relative_flags_with_content(self):\n hfile = StringIO()\n flag = 'flag'\n path = 'path_to_rel'\n tag = '_CMAKE_C_FLAGS'\n clion_project_file_gen._write_all_relative_file_path_flags(\n hfile, {flag: path}, tag)\n hfile.seek(0)\n content = hfile.read()\n expected = clion_project_file_gen._SET_RELATIVE_PATH.format(\n tag, clion_project_file_gen._add_dollar_sign(tag), flag,\n clion_project_file_gen._build_cmake_path(path))\n self.assertEqual(content, expected)", "title": "" }, { "docid": "2c398f6029f650f403f01fc5314a7eb7", "score": "0.5056233", "text": "def update_pathvals(config):\n if config[\"pathvals\"] is None:\n config[\"pathvals\"] = build_pathvals_fname(config)\n config[\"pathvals_zenodo\"] = True\n\n return config", "title": "" }, { "docid": "c97d4e21b3d3a4a66c473a0d4624d6c3", "score": "0.5056064", "text": "def setUp(self):\r\n self.tmp_dir = tempfile.mkdtemp() # Will be deleted after test runs\r\n\r\n os.chdir(tests_dir)\r\n os.chdir('dataset01')\r\n pynag.Model.cfg_file = \"./nagios/nagios.cfg\"\r\n pynag.Model.config = None\r\n pynag.Model.pynag_directory = self.tmp_dir\r\n pynag.Model.ObjectDefinition.objects.get_all()\r\n pynag.Model.config._edit_static_file(attribute='cfg_dir', new_value=self.tmp_dir)", "title": "" }, { "docid": "13a362b414fcb2cbb51b092374b042fb", "score": "0.505048", "text": "def update_resources(self, test):\n pass", "title": "" }, { "docid": "01a0c5626fb8f407fdcaaedb9a129a75", "score": "0.50455314", "text": "def setUp(self):\n self.val_a = 1\n self.val_b = 1", "title": "" }, { "docid": "5758b978af6ead72bec3677a531f946f", "score": "0.5040186", "text": "def test_prefix(self):\n return join(self.build_folder, '_t_env')", "title": "" }, { "docid": "c427b7fd1aad3d37202c36e07f1b2172", "score": "0.504013", "text": "def setUp(self):\n\n for var in ENV:\n setattr(ENV, var, None)", "title": "" }, { "docid": "f30bc678c4da6b5424574c664e2eab9c", "score": "0.5037183", "text": "def setUp(self):\r\n self.workspace_dir = tempfile.mkdtemp()", "title": "" }, { "docid": "f30bc678c4da6b5424574c664e2eab9c", "score": "0.5037183", "text": "def setUp(self):\r\n self.workspace_dir = tempfile.mkdtemp()", "title": "" }, { "docid": "f30bc678c4da6b5424574c664e2eab9c", "score": "0.5037183", "text": "def setUp(self):\r\n self.workspace_dir = tempfile.mkdtemp()", "title": "" }, { "docid": "e4c9353d22d2e696cf76371716e5fcaa", "score": "0.50354147", "text": "def test_setting_directory_using_application_env_variable(\n self,\n monkeypatch,\n ):\n monkeypatch.setattr(\n os,\n 'environ',\n {\n 'ASTRALITY_CONFIG_HOME': '/test/dir',\n 'XDG_CONFIG_HOME': '/xdg/dir',\n },\n )\n assert resolve_config_directory() == Path('/test/dir')", "title": "" }, { "docid": "8a42ef79ba7d402a5a0a98fe174b1756", "score": "0.5034331", "text": "def test_environment_variables(self):\n directory = tempfile.mkdtemp(\n prefix=\"rez_pip_boy_\", suffix=\"_test_environment_variables\"\n )\n\n if platform.system() == \"Windows\":\n os.environ[\"STUFF\"] = \"C:\"\n directory = (os.sep).join(directory.split(os.sep)[1:])\n directory = os.path.join(\"%STUFF%\", directory)\n else:\n os.environ[\"STUFF\"] = \"~\"\n directory = \"$STUFF\" + directory\n\n _run_command(\n \"rez_pip_boy --install six==1.14.0 --python-version=2.7 -- {directory}\".format(\n directory=directory\n )\n )\n\n atexit.register(\n functools.partial(\n shutil.rmtree, os.path.expanduser(os.path.expandvars(directory))\n )\n )\n\n source_directory = os.path.join(directory, \"six\", \"1.14.0\")\n self._verify_source_package(source_directory, [[\"python-2.7\"]])", "title": "" }, { "docid": "7d414ba869600f2314dc0ca293a7aafb", "score": "0.50328016", "text": "def setUp(self):\n self.test_points = {}\n for test_file in glob(TEST_FILES_PATH + \"tests/*.py\"):\n env = {}\n with open(test_file) as f:\n exec(f.read(), env)\n self.test_points[env['test']['name']] = env['test']['points']\n return super().setUp()", "title": "" }, { "docid": "52a0239c2e8041248269e6264ac3dfe7", "score": "0.5015121", "text": "def setUp(self):\n # add var/function/class/.. to globals:\n # self.scope[\"add\"] = lambda x: x+1\n # exec setup test script:\n # self.exec_other_script(\"setup.air\")\n # set custom parameter in Settings:\n # ST.THRESHOLD = 0.75\n super(AndroidCase, self).setUp()", "title": "" }, { "docid": "480a8fed499240fad74a9ce85ecbe0fb", "score": "0.5014918", "text": "def setUp(self):\n # Setup of outdated tests:", "title": "" }, { "docid": "60d48bc929f62dbc90231b8581dbc970", "score": "0.5004683", "text": "def setUp(self):\n assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)\n self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)\n self.lib_path = '/'", "title": "" }, { "docid": "e6798b02a652b3dcf9b4e79f0ac88bfb", "score": "0.50026256", "text": "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()", "title": "" }, { "docid": "172a1f96b69f76b179fa10f6325b6fee", "score": "0.5001571", "text": "def _update_env_vars(base_env: Dict, test_env: Dict) -> Dict:\n conflict = base_env.keys() & test_env.keys()\n if conflict:\n raise Exception(\n f\"The following environment variables cannot be overwritten for this test: {', '.join(conflict)}\"\n )\n return {**base_env, **test_env}", "title": "" }, { "docid": "85bfc234e44dcdeb9e43aa90d360263f", "score": "0.49924907", "text": "def fix_paths(self, maindir):\n # We override any modification that could have been made in\n # .advenerc. Rationale: if the .advenerc was really correct, it\n # would have set the correct paths in the first place.\n print \"Overriding 'resources', 'locale', 'advene' and 'web' config paths\"\n self.path['resources']=os.path.sep.join((maindir, 'share'))\n self.path['locale']=os.path.sep.join( (maindir, 'locale') )\n self.path['web']=os.path.sep.join((maindir, 'share', 'web'))\n self.path['advene']=maindir\n\n if not os.path.exists(self.path['shotdetect']):\n if self.os == 'win32':\n sdname='shotdetect.exe'\n else:\n sdname='shotdetect'\n sd=find_in_path(sdname)\n if sd is not None:\n self.path['shotdetect']=sd\n else:\n sd=self.advenefile(sdname, 'resources')\n if os.path.exists(sd):\n self.path['shotdetect']=sd\n\n #config.data.path['plugins']=os.path.sep.join( (maindir, 'vlcplugins') )", "title": "" }, { "docid": "468237c4b77cfcc249d818e9025af019", "score": "0.4989056", "text": "def test_build_tests (self):\n pass", "title": "" }, { "docid": "bc7371e960d34e2e4d800deb28fb4775", "score": "0.49861464", "text": "def test_directory_custom(self):\n self.env.directory = '/tmp'\n assert Bundle('foo').get_files(self.env) == ['/tmp/foo']\n # We do not recognize references to modules.\n assert Bundle('module/bar').get_files(self.env) == ['/tmp/module/bar']", "title": "" }, { "docid": "27f0aa582a68b5a17fceaac1bc60b3d9", "score": "0.4974166", "text": "def test_settings(self):\n folder = join(SCAFFOLDINGS_DIR, \"settings\")\n commands = (\n # Odoo should install\n (\"--stop-after-init\",),\n # Odoo settings work\n (\"./custom/scripts/test_settings.py\",),\n )\n if \"11.0\" in ODOO_VERSIONS:\n commands += (\n # Check Odoo settings using python-odoo-shell, which is available\n # only for Odoo 9-11 (for 8 too, but it had no built-in shell)\n (\"./custom/scripts/test_settings_python_odoo_shell.py\",),\n )\n commands += (\n # DB was created with the correct language\n (\n \"bash\",\n \"-xc\",\n \"\"\"test \"$(psql -Atqc \"SELECT code FROM res_lang\n WHERE active = TRUE\")\" == es_ES\"\"\",\n ),\n # If `preparedb` is executed, we should have `report.url` set\n (\"preparedb\",),\n (\"./custom/scripts/test_ir_config_parameters.py\",),\n )\n for sub_env in matrix():\n self.compose_test(folder, sub_env, *commands)", "title": "" }, { "docid": "d00aafbd0e5b5c3c1ba7ad3a31c753bc", "score": "0.49588907", "text": "def update_resources(self, test):", "title": "" }, { "docid": "ad1fd1a007a4bc1ceb1e3101ba3726de", "score": "0.49565864", "text": "def test_override_from_env(self):\n # DEV: We have to set the environment variable first, since they get loaded into the class on definition\n os.environ['DEFAULT_SETTING'] = 'set_by_env'\n\n TestConfiguration = self._get_test_configuration()\n self.assertEqual(TestConfiguration.DEFAULT_SETTING, 'set_by_env')", "title": "" }, { "docid": "611e6e9a581329d28df5c1c384eaa74d", "score": "0.49512306", "text": "def testincludemenull(config):\n pass", "title": "" }, { "docid": "dae2c5fee481cbac009d31f9bad83baa", "score": "0.4944041", "text": "def runTest(self):\r\n\r\n # Confirm default value.\r\n self._tester.go_to_admin(\"Basic Settings\")\r\n tc.find(r'<option value=\"WikiModule\" selected=\"selected\">'\r\n r'WikiModule</option>')\r\n tc.go(self._tester.url)\r\n tc.find(\"Welcome to Trac\")\r\n\r\n # Set to another valid default handler.\r\n self._tester.go_to_admin(\"Basic Settings\")\r\n tc.formvalue('modbasic', 'default_handler', 'TimelineModule')\r\n tc.submit()\r\n tc.find(\"Your changes have been saved.\")\r\n tc.find(r'<option value=\"TimelineModule\" selected=\"selected\">'\r\n r'TimelineModule</option>')\r\n tc.go(self._tester.url)\r\n tc.find(r'<h1>Timeline</h1>')\r\n\r\n # Set to valid disabled default handler.\r\n try:\r\n self._testenv.set_config('components',\r\n 'trac.timeline.web_ui.TimelineModule',\r\n 'disabled')\r\n self._tester.go_to_admin(\"Basic Settings\")\r\n tc.find(r'<option value=\"TimelineModule\">TimelineModule</option>')\r\n tc.find(r'<span class=\"hint\">TimelineModule is not a valid '\r\n r'IRequestHandler or is not enabled.</span>')\r\n tc.go(self._tester.url)\r\n tc.find(r'<h1>Configuration Error</h1>')\r\n tc.find(r'Cannot find an implementation of the '\r\n r'<code>IRequestHandler</code> interface named '\r\n r'<code>TimelineModule</code>')\r\n finally:\r\n self._testenv.remove_config('components',\r\n 'trac.timeline.web_ui.timelinemodule')\r\n\r\n # Set to invalid default handler.\r\n try:\r\n self._testenv.set_config('trac', 'default_handler',\r\n 'BatchModifyModule')\r\n self._tester.go_to_admin(\"Basic Settings\")\r\n tc.find(r'<option value=\"BatchModifyModule\">BatchModifyModule'\r\n r'</option>')\r\n tc.find(r'<span class=\"hint\">BatchModifyModule is not a valid '\r\n r'IRequestHandler or is not enabled.</span>')\r\n tc.go(self._tester.url)\r\n tc.find(r'<h1>Configuration Error</h1>')\r\n tc.find(r'<code>BatchModifyModule</code> is not a valid default '\r\n r'handler.')\r\n finally:\r\n self._testenv.set_config('trac', 'default_handler', 'WikiModule')", "title": "" }, { "docid": "b19a1777b089dc5b1f187b28bb8a9b77", "score": "0.49398404", "text": "def test_otoroshi_controllers_adminapi_global_config_controller_patch_global_config(self):\n pass", "title": "" }, { "docid": "67cc02fb2979674457d76455e9e77125", "score": "0.49250656", "text": "def test_expand_path_vars(monkeypatch):\n test_path = '/test/path'\n monkeypatch.setenv('TEST_PATH', test_path)\n assert utils.expand_path_vars('~') == os.path.expanduser('~')\n assert utils.expand_path_vars('$TEST_PATH') == test_path", "title": "" }, { "docid": "1aee4e13a296e868457c0e3dab5ddd2e", "score": "0.4922943", "text": "def setUp(self):\n\n self.path = 'media/style.css'\n self.html_files = set(['html.py'])\n for template in glob.glob('templates/*.html'):\n self.html_files.add(template)", "title": "" }, { "docid": "5d0a3043cd61ec1d1747f3e37dcc5e23", "score": "0.49214402", "text": "def setUp(self):\n self._out_dir = os.getcwd()", "title": "" }, { "docid": "497bd6c2aaf5bce99f133cd37f66adc8", "score": "0.4908257", "text": "def test_work_directory_config(self):\n\n os.chdir(get_helper_directory())\n cli.add_config('config.py')\n self.assertEqual(general_config.hosts, config.general_config.hosts)\n os.chdir(work_dir)", "title": "" }, { "docid": "5d0fcc034f80d98726e5c188b066fcc7", "score": "0.49069148", "text": "def pre_test_commands():\n # Set an indicator that a test is running, so we can set paths differently.\n env.HOUDINI_TOOLBOX_TESTING = True\n\n if test.name == \"unit\":\n env.HOUDINI_DSO_PATH.prepend(\"{root}/houdini/dso\")\n env.HOUDINI_OTLSCAN_PATH.prepend(\"{root}/houdini/otls\")\n\n # When doing unit tests we need to set the TOOLBAR_PATH variable to point to the folder\n # containing shelf files so that we can access and run tests against them.\n env.TOOLBAR_PATH = f\"{this.root}/houdini/toolbar\"", "title": "" }, { "docid": "d5886bcdb47f794bb9201d1bb96c69fd", "score": "0.4903894", "text": "def setUp(self):\n # pylint: disable=no-member\n self.setUpPyfakefs()\n\n # pyhcl automatically writes \"parsetab.dat\" in its site-package path.\n for path in sys.path:\n if path.endswith('site-packages'):\n self.fs.makedirs(os.path.join(path, 'hcl'))\n\n # Create variables.tf file (and terraform/ directory).\n self.fs.create_file(\n VARIABLES_FILE,\n contents='\\n'.join([\n 'variable \"aws_account_id\" {}',\n 'variable \"aws_region\" {}',\n 'variable \"name_prefix\" {}',\n 'variable \"enable_carbon_black_downloader\" {}',\n 'variable \"carbon_black_url\" {}',\n 'variable \"encrypted_carbon_black_api_token\" {}'\n ])\n )\n\n # Create terraform.tfvars file.\n self._write_config()", "title": "" }, { "docid": "6539f28f657dc3ee42e896de00e59862", "score": "0.4900867", "text": "def test_write_my_settings(monkeypatch, tmp_path, my_settings):\n monkeypatch.setattr(my_code, \"MY_SETTINGS_PATH\", tmp_path / \".my_fake_settings\")\n my_code.write_my_settings(my_settings)\n retrieved_settings = eval(my_code.MY_SETTINGS_PATH.read_text())\n assert retrieved_settings == my_settings", "title": "" }, { "docid": "2b21ac15bbec94ba8b68943be02c75a9", "score": "0.49005377", "text": "def test_add_personal_alias_file_name_to_settings(self):\n print('--------------->', my_import('BB_HOME') )", "title": "" }, { "docid": "248f7492f849e06551a5a65227c33493", "score": "0.48945534", "text": "def setUpClass(cls):\n # Change working directory to test location\n os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n # Use internal Aquaveo data directory to test protected models.\n config['pre_existing_data_dir'] = WINDOWS_CI_TEST_DATA_DIR", "title": "" }, { "docid": "5f1f904c32297f44ee6b3782b88eceb4", "score": "0.48921716", "text": "def test_ci():\n test_type = os.environ.get('TEST_TYPE', '')\n if test_type == \"os-independent\":\n call_task('lint')\n else:\n call_task('test')", "title": "" }, { "docid": "d47a831fa36a95827f9a859fbca98fea", "score": "0.4891144", "text": "def _patch_xcschemes(project_dir, placeholder, value):\n schemes = [\"integration_test.xcscheme\", \"integration_test_tvos.xcscheme\"]\n for scheme in schemes:\n path = os.path.join(\n project_dir,\n \"integration_test.xcodeproj\",\n \"xcshareddata\",\n \"xcschemes\",\n scheme)\n _patch_file(path, placeholder, value)", "title": "" }, { "docid": "5752224e8328c6671c346a2540c7859c", "score": "0.4886923", "text": "def setUp(self):\n test_env_setup()", "title": "" }, { "docid": "175a9ded8defbddcaa9f760fc97cda01", "score": "0.48753557", "text": "def tearDown(self):\n for key, value in self._original_flag_settings.items():\n setattr(flag_settings, key, value)\n super(BaseTestCase, self).tearDown()", "title": "" }, { "docid": "bc8a499f0c6d9946d87b622715f2c2b1", "score": "0.48705027", "text": "def constant_test(self, *args, **kwargs):\n import constant\n print(constant.PI)\n print(constant.GRAVITY)", "title": "" }, { "docid": "0974f3676777bbf703e043a5111871e6", "score": "0.48668554", "text": "def setUp(self):\n import os\n self.d7dname = os.path.join(os.path.dirname(__file__),\n \"Example_Drive01.d7d\")", "title": "" }, { "docid": "1b93b169676be1a60a06abf5af95f78d", "score": "0.48617452", "text": "def test_config_file():\n return os.path.join(os.path.dirname(__file__), \"test.conf\")", "title": "" }, { "docid": "51b64791198c7d63d26c7e10e947217e", "score": "0.4859707", "text": "def reformat_test_classes(config: Dict[str, Any], session_directory: Path) -> None:\n test_directory: Path = session_directory / \"solutions/correct/test\"\n test_classes = config.get(\"junit\").get(\"assessableTestClasses\")\n java_paths = []\n\n for test_class in test_classes:\n matches = list(test_directory.glob(f\"**/{test_class}\"))\n if not matches:\n raise FileNotFoundError\n match = matches[0]\n text = f\"{'.'.join(match.parts[5:])}\".replace(\".java\", \"\")\n java_paths.append(text)\n\n config[\"junit\"][\"assessableTestClasses\"] = java_paths", "title": "" }, { "docid": "fce663f17527b09533b6fa4bdb516b4d", "score": "0.48594043", "text": "def before_all(context):\n context.test_dir = tempfile.mkdtemp()", "title": "" }, { "docid": "1df57eb7f6c6f2712debcc484e3347d8", "score": "0.48582196", "text": "def test_update_project(self):\n pass", "title": "" }, { "docid": "730d75cb618d6488ff4fc8973b98024c", "score": "0.48519835", "text": "def setUpModule():\n setUpAll()", "title": "" }, { "docid": "f71ce2a8333144df29b6896092990239", "score": "0.4848907", "text": "def set_test_dir(directory):\n global TEST_FOLDER, TEST_IMAGES_FOLDERS, TEST_LABELS, MULTI_SCALES\n\n # check directory\n assert os.path.isdir(directory), '{} is not a directory!'.format(directory)\n\n # check whether there is a Label.csv file in this directory or not\n new_test_labels = os.path.join(directory, 'Label.csv')\n assert os.path.exists(new_test_labels), 'There is not label file {}!'.format(new_test_labels)\n\n # check whether there is scale folders in this directory or not\n new_scale_folders = []\n for scale in MULTI_SCALES:\n scale_folder = 'Image_' + str(scale) + 'x' + str(scale)\n new_scale_folder = os.path.join(directory, scale_folder)\n assert os.path.exists(new_scale_folder), '{} is not a directory!'.format(new_scale_folder)\n new_scale_folders.append(new_scale_folder)\n\n TEST_FOLDER = directory\n TEST_IMAGES_FOLDERS = new_scale_folders\n TEST_LABELS = new_test_labels", "title": "" }, { "docid": "009866694bb5e27345d47355d040de14", "score": "0.48472142", "text": "def test_change_attribute(self):\n pass", "title": "" }, { "docid": "5dc38e69888fd2d44881897b7d7a5d29", "score": "0.4841835", "text": "def test_multiple_conf_py(\n self, checkout_path, get_conf_py_path, _, get_config_params, docs_dir\n ):\n\n tmp_docs_dir = py.path.local(tempfile.mkdtemp())\n tmp_docs_dir.join('conf.py').write('')\n tmp_docs_dir.join('test').mkdir().join('conf.py').write('')\n docs_dir.return_value = str(tmp_docs_dir)\n checkout_path.return_value = str(tmp_docs_dir)\n get_config_params.return_value = {}\n get_conf_py_path.side_effect = ProjectConfigurationError\n python_env = Virtualenv(\n version=self.version,\n build_env=self.build_env,\n config=None,\n )\n base_sphinx = BaseSphinx(\n build_env=self.build_env,\n python_env=python_env,\n )\n with pytest.raises(ProjectConfigurationError):\n with override_settings(DOCROOT=tmp_docs_dir):\n base_sphinx.append_conf()", "title": "" }, { "docid": "1df45c533ca18b311aee5c77f8254519", "score": "0.48413974", "text": "def _patch_reztoolsconfig():\n for member in dir(reztoolsconfig):\n if member.startswith(\"__\"):\n continue\n\n setattr(reztoolsconfig, \"_%s\" % member,\n getattr(reztoolsconfig, member))", "title": "" }, { "docid": "904716a6ac2d72f262ee570ad98ba618", "score": "0.48351654", "text": "def test_update_project_brief(self):\n pass", "title": "" }, { "docid": "df4f3875eb9c5b8b94fce4c1cbfb3ddd", "score": "0.4833237", "text": "def local(monkeypatch: MonkeyPatch):\n monkeypatch.setattr('fastgenomics.common.DEFAULT_APP_DIR', str(APP_DIR))\n monkeypatch.setattr('fastgenomics.common.DEFAULT_DATA_ROOT', str(DATA_ROOT))\n monkeypatch.setattr('fastgenomics.common._PATHS', local_paths)\n monkeypatch.setattr('fastgenomics.common._PARAMETERS', None)", "title": "" }, { "docid": "268209a3fc5d0028b889ac5c9f234d8c", "score": "0.4823046", "text": "def constants(self):\n pass", "title": "" }, { "docid": "ac0c996266f51e955e70903b3a67921d", "score": "0.4822324", "text": "def test_otoroshi_controllers_adminapi_global_config_controller_global_config(self):\n pass", "title": "" }, { "docid": "ed09ad849e82343d1032cfbd139da764", "score": "0.482176", "text": "def set_up(self):\n self.dut.to_base_dir()\n self.tester.to_base_dir()", "title": "" }, { "docid": "4722b1a96677c8701c200f932d7c13c8", "score": "0.48212403", "text": "def test_base_modules_regex(pyi_builder):\n pyi_builder.test_source(\n \"\"\"\n import resources_testmod\n print('OK')\n \"\"\")", "title": "" }, { "docid": "6355504487d46b842c41e8a49849bbbd", "score": "0.48186448", "text": "def revert_edit(self):\n for line in fileinput.input('conf.py', inplace=True):\n # inside this loop the STDOUT will be redirected to the file\n print(line.replace('{' + self.test_name + '}', '{Test}'), end='')", "title": "" }, { "docid": "53486a1085b45ccc9196a25dddb239c1", "score": "0.4812623", "text": "def configure_env_for_dev_build(session: nox.session) -> None:\n session.env[\"CFLAGS\"] = \"-Werror -Wno-deprecated-declarations -g --coverage\"\n session.env[\"COCOTB_LIBRARY_COVERAGE\"] = \"1\"\n session.env[\"CXXFLAGS\"] = \"-Werror\"\n session.env[\"LDFLAGS\"] = \"--coverage\"", "title": "" }, { "docid": "1c72eb40bc801ac340ddd9ecd62007a9", "score": "0.48070794", "text": "def setUp(self):\n super(BackendTestCase, self).setUp()\n self.project_root = tempfile.mkdtemp(prefix=\"elpy-test\")\n self.addCleanup(shutil.rmtree, self.project_root, True)", "title": "" }, { "docid": "b99fed4310acbbe7cd2f4be30d62b5f8", "score": "0.4794812", "text": "def set_vars(suite, procday, dummycase, testcase, svn_version, toacase):\n # suite.add_variable('TURTLES', 'I like turtles')\n suite.add_variable(\"ECF_MICRO\", \"%\")\n\n # Specify the python interpreter to be used:\n suite.add_variable(\"PYTHON\",\n \"PYTHONPATH=$PYTHONPATH:{0} {1}\".format(perm,\n python_path))\n\n # Directory on the remote machine, where all generated \n # files from \"ECF_HOME\" will be copied before execution\n # suite.add_variable(\"REMOTE_HOME\", remote_home_dir)\n\n # Directory on the remote machine, \n # where all jobs write their output\n suite.add_variable(\"REMOTE_LOGDIR\", remote_log_dir)\n\n # Remote user and host names\n suite.add_variable(\"REMOTE_USER\", remote_user_name)\n suite.add_variable(\"REMOTE_HOST\", remote_host_name)\n\n # Standard ecflow variables:\n suite.add_variable(\"ECF_HOME\", ecf_home_dir)\n suite.add_variable(\"ECF_FILES\", ecf_files_dir)\n suite.add_variable(\"ECF_INCLUDE\", ecf_include_dir)\n suite.add_variable(\"ECF_OUT\", ecf_out_dir)\n # default value\n suite.add_variable(\"EC_FILTER_TASKFILE\", \"n/a\")\n suite.add_variable(\"EC_TOTAL_SLAVES\", 1)\n suite.add_variable(\"EC_GET_ERA_SLAVES\", 1)\n suite.add_variable(\"NDAYS_SATDATA\", 1)\n\n # Miscellaneous:\n suite.add_variable(\"SVN_VERSION\", svn_version)\n suite.add_variable(\"ECF_TRIES\", '1')\n suite.add_variable(\"ECF_SUBMIT\", ecflow_submit)\n suite.add_variable(\"GACDB_CLIENT\", gacdb_client)\n suite.add_variable(\"MAKE_CFG_FILE\", make_cfg_files)\n suite.add_variable(\"COUNT_ORBIT_FILES\", count_orbit_files)\n suite.add_variable(\"CLEANUP_SCRATCH\", cleanup_scratch)\n suite.add_variable(\"ARCHIVE_DATA\", archive_data)\n suite.add_variable(\"ECFS_L3_DIR\", ecfs_l3_dir)\n suite.add_variable(\"ECFS_L2_DIR\", ecfs_l2_dir)\n suite.add_variable(\"LD_LIB_PATH\", ld_lib_path)\n suite.add_variable(\"PROCDAY\", procday)\n suite.add_variable(\"TESTRUN\", testcase)\n suite.add_variable(\"PROC_TOA\", toacase)\n suite.add_variable(\"DUMMYRUN\", dummycase)\n suite.add_variable(\"WRITE_MPMD_TASKFILE\", write_mpmd_taskfile)\n suite.add_variable(\"WRITE_MPMD_CFGFILES\", write_mpmd_cfgfiles)\n suite.add_variable(\"WRITE_MPMD_REMAP_TASKFILE\", write_mpmd_remap_taskfile)\n suite.add_variable(\"COUNT_AVHRR_ORBITS\", count_avhrr_orbits)\n suite.add_variable(\"MPMD_SUBMITTER\", mpmd_submitter)\n\n # some processing directories\n suite.add_variable(\"ESA_ROUTINE\", esa_routine)\n suite.add_variable(\"ESA_OUTPUTDIR\", esa_outputdir)\n suite.add_variable(\"ESA_LEVEL3DIR\", esa_level3dir)\n suite.add_variable(\"ESA_INPUTDIR\", esa_inputdir)\n suite.add_variable(\"ESA_LOGDIR\", esa_logdir)\n suite.add_variable(\"ESA_CONFIGDIR\", esa_configdir)\n suite.add_variable(\"ESA_LIST_L2FILES\", esa_listl2files)\n suite.add_variable(\"ESA_ECF_LOG_DIR\", esa_ecflogdir)\n\n # Config files\n suite.add_variable(\"CFG_PATHS_FILE\", cfg_paths_file)\n suite.add_variable(\"CFG_ATTRI_FILE\", cfg_attri_file)\n suite.add_variable(\"CFG_PREFIX\", cfg_prefix)\n suite.add_variable(\"CFG_SUFFIX\", cfg_suffix)\n suite.add_variable(\"SQL_AVHRR_GAC\", sql_avhrr_gac)\n\n # ksh scripts\n suite.add_variable(\"GET_AVHRR_KSH\", get_avhrr_ksh)\n suite.add_variable(\"GET_MODIS_KSH\", get_modis_ksh)\n suite.add_variable(\"GET_MARS_KSH\", get_mars_ksh)\n #suite.add_variable(\"GET_MARS_SEQUENTIAL_KSH\", get_mars_sequential_ksh)\n suite.add_variable(\"GET_AUX_KSH\", get_aux_ksh)\n suite.add_variable(\"PROC2_ORAC_KSH\", proc2_orac_ksh)\n suite.add_variable(\"SINGLE_DAY_KSH\", single_day_ksh)\n suite.add_variable(\"RUN_L2TOL3_KSH\", run_l2tol3_ksh)\n suite.add_variable(\"REMAP_ERA_PARALLEL_KSH\", remap_era_parallel_ksh)\n suite.add_variable(\"REMAP_ERA_SEQUENTIAL_KSH\", remap_era_sequential_ksh)\n suite.add_variable(\"WRAPPER_EXE\", wrapper_exe)\n suite.add_variable(\"BIG_FAM\", big_fam)\n suite.add_variable(\"MAINPROC_FAM\", mainproc_fam)", "title": "" }, { "docid": "7d1cc4080a076e8937b73fcfca5da8a9", "score": "0.4791834", "text": "def setUp(self):\n self.config = open_config(CONF_FILE)\n self.train_file = os.path.join(TEMP_DIR, 'train_data.tsv')\n self.ann_file = os.path.join(TEMP_DIR, 'Cypripedium_section.ann')\n self.test_result = os.path.join(TEMP_DIR, 'test_result.tsv')\n self.train_dir = os.path.join(TEMP_DIR, 'train_data')\n self.ann_dir = os.path.join(TEMP_DIR, 'ann_dir')\n self.test_result_batch = os.path.join(TEMP_DIR, 'test_result_batch.tsv')\n\n for path in (self.train_dir, self.ann_dir):\n if not os.path.isdir(path):\n os.mkdir(path)", "title": "" } ]
6de1468ded4e201bad09caa6bd19724c
update parameters theta to make move probabilities (p, v) = f_theta(S) more closely match the improved search probabilities and self play closer to self play winner (pi, z). parameters f_theta^t+1 will theoretically be stronger
[ { "docid": "0d4e7e628cf81276cea6a848a60c8d36", "score": "0.0", "text": "def PSEUDO_backward(self):\n pass", "title": "" } ]
[ { "docid": "9839b71af2270eef26b36476b6975aff", "score": "0.6501566", "text": "def updateW(self, trj_Sp_theta, W_0):\n def fun(x):\n global trj_Sp_theta_z\n W_0 = [[x[0], x[1]],[x[2], x[3]]]\n r_0 = self.reward_trj(trj_Sp_theta, W_0)\n return -1*r_0 \n import numpy as np\n from scipy.optimize import minimize\n \n global trj_Sp_theta_z \n trj_Sp_theta_z = trj_Sp_theta\n alpha = 0.1\n\n cons = ({'type': 'eq',\n 'fun' : lambda x: np.array([x[0]+x[1]+x[2]+x[3]-1])},\n {'type': 'ineq',\n 'fun' : lambda x: np.array([x[1]])},\n {'type': 'ineq',\n 'fun' : lambda x: np.array([x[0]])},\n {'type': 'ineq',\n 'fun' : lambda x: np.array([x[2]])},\n {'type': 'ineq',\n 'fun' : lambda x: np.array([x[3]])})\n\n x0 = [W_0[0][0], W_0[0][1], W_0[1][0], W_0[1][1]] \n \n res = minimize(fun, x0, constraints=cons)\n x = res.x\n W = [[x[0], x[1]],[x[2], x[3]]]\n return W", "title": "" }, { "docid": "0baacfc89589f24cb584e899f31a48cf", "score": "0.6144343", "text": "def demo_svf(trajs, n_states, r_weight, p_weight, proposed):\n\n decay = 0.99\n\n p = torch.zeros(n_states)\n count = torch.zeros(n_states)\n\n for traj in trajs:\n step_count = torch.zeros(n_states)\n prev_reward = 0\n seq = 0\n mean = 0\n\n for step in traj:\n seq += 1\n if proposed == 1:\n '''\n This part is where propsed method placed.\n '''\n count[step.cur_state] += 1\n\n #p[step.cur_state] = p[step.cur_state] + (2.0 * step.reward) # set_1_Original proposed\n #p[step.cur_state] = p[step.cur_state] + (1.0 * step.reward * math.pow(decay, (count[step.cur_state]))) # set 2\n #p[step.cur_state] = p[step.cur_state] + (3.0 * step.reward) # set_3\n p[step.cur_state] = p[step.cur_state] + (r_weight * step.reward) # set_3\n\n elif proposed == 2:\n #p[step.cur_state] = p[step.cur_state] + (r_weight * step.reward) + (r_weight / p_weight * prev_reward) # set_3\n #prev_reward = step.reward\n\n if seq == len(traj)-1:\n p[step.cur_state] = p[step.cur_state] + (r_weight * step.reward)\n\n else:\n post = seq + 1\n p[step.cur_state] = p[step.cur_state] + ((r_weight - p_weight)*step.reward) + (p_weight * traj[post].reward)\n\n\n\n elif proposed == 3:\n if step_count[step.cur_state] == 0:\n p[step.cur_state] += 1 * np.sqrt(seq)\n if seq == 0:\n p[step.cur_state] += 1\n mean += p[step.cur_state]\n # else:\n # p[step.cur_state] += 1\n step_count[step.cur_state] += 1\n\n\n else:\n p[step.cur_state] += 1\n\n mean = mean/len(trajs)\n\n for step in traj:\n if p[step.cur_state] < 0.5*mean and proposed == 3:\n p[step.cur_state] = 0\n\n p = p / len(trajs)\n return p", "title": "" }, { "docid": "dbe761cae8a8b3d21c600a2fa7fe9951", "score": "0.61006147", "text": "def update_theta(self):\n eta = 0.1\n T = len(self.state_history) - 1\n\n [m, n] = self.theta.shape\n delta_theta = self.theta.copy()\n\n for i in range(0, m):\n for j in range(0, n):\n if not(np.isnan(self.theta[i, j])):\n sa_i = [SA for SA in self.state_history if SA[0] == i]\n sa_ij = [SA for SA in self.state_history if SA == [i, j]]\n sa_i_tot = len(sa_i)\n sa_ij_tot = len(sa_ij)\n delta_theta[i, j] = (\n sa_ij_tot - self.pi[i, j] * sa_i_tot) / T\n\n self.theta = self.theta + eta * delta_theta", "title": "" }, { "docid": "5d9a21fe0c9dcdb8f453fbf786802a1d", "score": "0.6084923", "text": "def step(theta0, s, alpha):\n # Compute new parameter vector as sum of old vector and steepest descent step\n theta1 = theta0 - alpha * s\n \n return theta1", "title": "" }, { "docid": "f47c35767bf24e493fef4bceb8565c49", "score": "0.6004303", "text": "def update_params(self, dtheta):\n assert len(dtheta) == 2, len(dtheta)\n dW, db = dtheta\n assert dW.shape == self.W.shape, dW.shape\n assert db.shape == self.b.shape, db.shape\n self.W += dW\n self.b += db", "title": "" }, { "docid": "2aa56c0d83a9872b39e3763a397d3df1", "score": "0.59005797", "text": "def update(self, theta, epsilon):\n\t\tgrad = self.grad_theta_H(theta, self.p_half_leap)\n\t\tp_epsilon = self.p_half_leap - epsilon/2.0*grad\n\t\tself.p = p_epsilon\n\t\treturn p_epsilon", "title": "" }, { "docid": "c55c54cfbe6bac1fdd9e11f91761ab33", "score": "0.5866934", "text": "def update_theta(self, w):\n self.theta = self.rho * self.theta + self.sig_theta * w", "title": "" }, { "docid": "7a925739a3951940c8f4963e18c34f41", "score": "0.58577096", "text": "def update(self,\n learning_rate,\n num_best_deltas,\n sigma_rewards,\n rollouts):\n step = np.zeros(self.theta.shape)\n for r_pos, r_neg, delta in rollouts:\n step += (r_pos - r_neg) * delta\n self.theta += learning_rate/(num_best_deltas*sigma_rewards)*step", "title": "" }, { "docid": "27729771d7db5b2c5cccc296c41a1240", "score": "0.5849624", "text": "def _update_model_params(self, iter):\n theta = self.model_params.copy()\n\n if iter > 20:\n s = iter - 20\n gamma = (s + 1) ** -0.6\n\n theta_prop = scipy.stats.multivariate_normal.rvs(\n mean=theta, cov=math.exp(self.log_a) * self.model_prop_cov)\n theta_prop = np.reshape(theta_prop, theta.shape)\n\n l_old = self.likelihood()\n p_old = self.model_prior(theta)\n\n self.model_params = theta_prop.copy()\n l_prop = self.likelihood()\n p_prop = self.model_prior(theta_prop)\n\n if math.log(random.random()) < p_prop + l_prop - p_old - l_old:\n accepted = 1\n\n else:\n self.model_params = theta.copy()\n accepted = 0\n\n if iter > 20:\n theta = self.model_params.copy()\n self.model_prop_cov = (1.0-gamma) * self.model_prop_cov + gamma \\\n * (theta - self.mu)[:, np.newaxis] \\\n @ (theta - self.mu)[:, np.newaxis].T\n self.mu = (1.0-gamma) * self.mu + gamma * theta\n self.log_a += gamma * (accepted - 0.25)", "title": "" }, { "docid": "b39603a81d55503a1804000e0c5de335", "score": "0.58464223", "text": "def move2goal(self,x,y,angleOfApproach,steerOnly=0):\n goal_pose = Pose()\n # Get the input from the user.\n goal_pose.x = x\n goal_pose.y = y\n goal_pose.theta = angleOfApproach\n goal_pose.theta = math.radians(goal_pose.theta)\n self.goal_pose = goal_pose\n # Please, insert a number slightly greater than 0 (e.g. 0.01).\n distance_tolerance = 0.05 #input(\"Set your tolerance: \")\n angle_tolerance = math.radians(1)\n vel_msg = Twist()\n max_linear_velocity = 0.1\n max_angular_velocity = 5\n if steerOnly == 0:\n while abs(self.pose.theta - self.steering_angle(goal_pose))>=angle_tolerance:\n vel_msg.linear.x = 0\n vel_msg.linear.y = 0\n vel_msg.linear.z = 0\n\n # Angular velocity in the z-axis.\n vel_msg.angular.x = 0\n vel_msg.angular.y = 0\n vel_msg.angular.z = self.angular_vel(goal_pose)\n if vel_msg.angular.z > max_angular_velocity:\n vel_msg.angular.z = max_angular_velocity\n\n # Publishing our vel_msg\n self.velocity_publisher.publish(vel_msg)\n print math.degrees(self.steering_angle(goal_pose))\n\n # Publish at the desired rate.\n self.rate.sleep()\n\n vel_msg.linear.x = 0\n vel_msg.angular.z = 0\n self.velocity_publisher.publish(vel_msg)\n rospy.sleep(3.)\n print \"Initial steerign complete\"\n\n\n while self.euclidean_distance(goal_pose) >= distance_tolerance:\n\n # Porportional controller.\n # https://en.wikipedia.org/wiki/Proportional_control\n\n # Linear velocity in the x-axis.\n vel_msg.linear.x = self.linear_vel(goal_pose)\n if vel_msg.linear.x > max_linear_velocity:\n vel_msg.linear.x = max_linear_velocity\n vel_msg.linear.y = 0\n vel_msg.linear.z = 0\n\n # Angular velocity in the z-axis.\n vel_msg.angular.x = 0\n vel_msg.angular.y = 0\n vel_msg.angular.z = self.angular_vel(goal_pose)\n if vel_msg.angular.z > max_angular_velocity:\n vel_msg.angular.z = max_angular_velocity\n\n # Publishing our vel_msg\n self.velocity_publisher.publish(vel_msg)\n\n # Publish at the desired rate.\n self.rate.sleep()\n\n # Stopping our robot after the movement is over.\n vel_msg.linear.x = 0\n vel_msg.angular.z = 0\n self.velocity_publisher.publish(vel_msg)\n print \"translation complete\"\n\n #steer to angleOfApproach\n\n while abs(self.steering_angle(goal_pose,1)-self.pose.theta) >= angle_tolerance:\n vel_msg.linear.x = 0\n vel_msg.linear.y = 0\n vel_msg.linear.z = 0\n\n # Angular velocity in the z-axis.\n vel_msg.angular.x = 0\n vel_msg.angular.y = 0\n vel_msg.angular.z = self.angular_vel(goal_pose,1)\n if vel_msg.angular.z > max_angular_velocity:\n vel_msg.angular.z = max_angular_velocity\n\n # Publishing our vel_msg\n self.velocity_publisher.publish(vel_msg)\n print math.degrees(self.steering_angle(goal_pose))\n\n # Publish at the desired rate.\n self.rate.sleep()\n\n # Stopping our robot after the movement is over.\n vel_msg.linear.x = 0\n vel_msg.angular.z = 0\n self.velocity_publisher.publish(vel_msg)\n rospy.sleep(3.)\n print \"Reached angleOfApproach\"\n msg=rospy.wait_for_message('/mybot/camera1/image_raw', Image)\n\n # xyz =rospy.wait_for_message('/odom', Odometry)\n # orlist = [xyz.pose.pose.orientation.x, xyz.pose.pose.orientation.y, xyz.pose.pose.orientation.z, xyz.pose.pose.orientation.w]\n # (r,p,y) = euler_from_quaternion(orlist)\n # print xyz\n # print 'OREINTATION'\n # print math.degrees(y)\n res = self.navigateImageCheck(msg)\n return res\n # If we press control + C, the node will stop.\n # rospy.spin()", "title": "" }, { "docid": "e027f9e12bf6510f0351532ac6dbee4d", "score": "0.58196485", "text": "def update(self, transition):\n\n # extract transition information\n state, action, next_state, reward, done, info = transition\n phi = self.config.phi_func(transition)\n\n if phi[0]:\n self.obj_visits_per_reward_func_counter[self.active_reward_func_idx][0] += 1\n \n if phi[1]:\n self.obj_visits_per_reward_func_counter[self.active_reward_func_idx][1] += 1\n\n if self.learn_w:\n # update the reward weights\n w = self.w_per_reward_func[self.active_reward_func_idx]\n w = w + self.alpha_w * (reward - np.matmul(phi, w)) * phi\n self.enforce_weight_maximum(w)\n self.w_per_reward_func[self.active_reward_func_idx] = w\n\n # set gamma to 0 if goal state is reached\n gamma = self.config.gamma if not done else 0\n\n # select which policies are updated\n # the active policy is always updated\n update_policies_idxs = [self.active_reward_func_idx]\n\n if self.config.dogpi and len(self.z_per_reward_func) > 1:\n\n if self.config.policy_update_mode == 'active_and_gpi_optimal':\n # get task from which the current action was taken\n _, c = self.calc_max_action(state)\n if c != self.active_reward_func_idx:\n update_policies_idxs.append(c)\n\n elif self.config.policy_update_mode == 'active_and_optimal':\n # identify the policies, for which the current action is also optimal\n for p_idx in range(len(self.z_per_reward_func)):\n if p_idx != self.active_reward_func_idx:\n a, _ = self.calc_max_action(state, policy_idx=p_idx, target_reward_func_idx=p_idx)\n if a == action:\n update_policies_idxs.append(p_idx)\n\n elif self.config.policy_update_mode == 'all':\n # update all policies (besides the current)\n additional_policies_idxs = np.arange(len(self.z_per_reward_func)).tolist()\n del additional_policies_idxs[self.active_reward_func_idx]\n\n update_policies_idxs.extend(additional_policies_idxs)\n\n\n for p_idx in update_policies_idxs:\n # which action should be used as optimal next action ?\n # if active policy: use GPI procedure\n # if other policy: depends on additional_policy_update_action_mode config\n if p_idx == self.active_reward_func_idx or self.config.additional_policy_update_action_mode == 'gpi':\n if self.config.dogpi:\n next_action, _ = self.calc_max_action(next_state, target_reward_func_idx=p_idx)\n else:\n next_action = self.calc_own_max_action(next_state)\n else:\n # self.config.additional_policy_update_action_mode == 'target_policy':\n next_action, _ = self.calc_max_action(next_state, policy_idx=p_idx, target_reward_func_idx=p_idx)\n\n z_weight = self.z_per_reward_func[p_idx]\n\n current_psi = np.matmul(state, z_weight[action, :])\n next_psi = np.matmul(next_state, z_weight[next_action, :])\n\n for k in range(self.config.phi_size):\n z_weight[action, :, k] += self.alpha * (phi[k] + gamma * next_psi[k] - current_psi[k]) * state\n\n self.enforce_weight_maximum(z_weight)", "title": "" }, { "docid": "31d216c5dce46c5b2dcb324002a11208", "score": "0.58085537", "text": "def update(self, x, y, theta):\n theta = wrap_theta(theta - self.old_theta) + self.old_theta\n self.old_theta = theta\n self.theta = theta\n self.x = x\n self.y = y\n self._state_f = np.array([self.x, self.y, self.theta], dtype=np.float32).T\n self.update_called = True", "title": "" }, { "docid": "cc87459c102a01eb0828790e9e6be5ca", "score": "0.57942295", "text": "def fit_theta(self):\n thetas = []\n r0 = self.rates[0, 0]\n\n for i in self.zcb[1:]:\n p0 = i\n func = (lambda t: self.forward_tree(\n r0, self.sigma, self.dt, thetas+[t])[1][0, 0]-p0)\n new_theta = fsolve(func, .001)\n thetas.append(new_theta[0])\n\n self.thetas = thetas\n self.rates = self.forward_tree(r0, self.sigma, self.dt, thetas)[0]", "title": "" }, { "docid": "a424ea76475a0045ea9bb30640044dec", "score": "0.57771176", "text": "def action_to_stateparams(self,state,action): \n (x,y,theta,v) = state.get_stateparams()\n if(theta == 0): #heading 0\n control_set = self.theta_0\n elif(theta == math.pi/2): #heading 90\n control_set = self.theta_90 \n elif(theta == math.pi): # heading 180\n control_set = self.theta_180\n elif(theta == 3*math.pi/2): # heading 270\n control_set = self.theta_270 \n\n elif(theta == math.pi/4): # heading 45\n control_set = self.theta_45\n elif(theta == 3*math.pi/4): # heading 135\n control_set = self.theta_135\n elif(theta == 5*math.pi/4): # heading 225\n control_set = self.theta_225\n elif(theta == 7*math.pi/4): # heading 315\n control_set = self.theta_315\n\n elif(theta == 0.464):\n control_set = self.theta_26_6\n elif(theta == 2.035):\n control_set = self.theta_116_6\n elif(theta == 3.606):\n control_set = self.theta_206_6\n elif(theta == 5.177):\n control_set = self.theta_296_6 \n\n elif(theta == 1.107):\n control_set = self.theta_63_4\n elif(theta == 2.677):\n control_set = self.theta_153_4\n elif(theta == 4.248):\n control_set = self.theta_243_4\n elif(theta == 5.819):\n control_set = self.theta_333_4\n \n # get precomputed new state on taking action from origin\n newstate_origin = control_set[action]\n return newstate_origin", "title": "" }, { "docid": "3adcbccc3972f7e8e15f7c82866e08c2", "score": "0.5711314", "text": "def _update(self, s, r, q_p, f_vals, alpha):\n assert len(f_vals) == len(self.weights)\n actions = s.possible_actions()\n # Compute Q values for new state s based on current approximation\n q_vs = np.array([self._qfunc(s, a) for a in actions])\n # Assume optimal future behavior (see: Bellman-Ford equation)\n q_max = np.max(q_vs) if len(q_vs) > 0 else 0\n # Calculate delta term, i.e. partial derivative of Q function with respect to each weight\n delta = alpha * (r + self.gamma * q_max - q_p) * f_vals\n self.weights += delta\n return actions", "title": "" }, { "docid": "6ce6d5ddf7bc22f6b57d9df8b612f657", "score": "0.5668202", "text": "def params_update(self, replacement):\r\n #assert len(self.actor_target_theta_params) == len(self.actor_eval_theta_params), \"Local and target model parameters must have the same size\"\r\n\r\n if replacement == 'hard':\r\n\r\n [tf.assign(target_c, eval_c) for target_c, eval_c in zip(self.actor_target_theta_params, self.actor_eval_theta_params)]\r\n [tf.assign(target_a, eval_a) for target_a, eval_a in zip(self.critic_target_theta_params, self.critic_eval_theta_params)]\r\n\r\n else:\r\n [tf.assign(target_c, (1 - self.tau) * target_c + self.tau * eval_c) for target_c, eval_c in zip(self.critic_target_theta_params, self.critic_eval_theta_params)]\r\n [tf.assign(target_a, (1 - self.tau) * target_a + self.tau * eval_a) for target_a, eval_a in zip(self.actor_target_theta_params, self.actor_eval_theta_params)]", "title": "" }, { "docid": "4e212eea1c1681c5e516c849a71232fd", "score": "0.5665278", "text": "def calc_reward(self, prev_v, prev_angle_v, rotor_speeds):\n distance_reward = 0\n# Basic working function which generates increasing rewards\n# distance_x = (self.target_pos[0] - abs(self.sim.pose[0])) ** 2\n# distance_y = (self.target_pos[0] - abs(self.sim.pose[0])) ** 2\n# distance_z = (self.target_pos[2] - abs(self.sim.pose[2])) ** 2\n# distance = math.sqrt(distance_x + distance_y + distance_z)\n# distance = (distance_x + distance_y + distance_z) # Squared Euclidean difference\n# distance = max(abs(self.target_pos[0] - self.sim.pose[0]), abs(self.target_pos[1] - self.sim.pose[1]) , abs(self.target_pos[2] - self.sim.pose[2])) #Chebyshev Distance\n# distance = abs(self.target_pos[0] - self.sim.pose[0]) + abs(self.target_pos[1] - self.sim.pose[1]) + 1.2 * abs(self.target_pos[2] - self.sim.pose[2]) #Mahattan Distance\n distance = self.get_distance()\n if distance == 0 or ( self.sim.pose[2] >=9 and self.sim.pose[2] <= 11):\n# print(\"\\ndistance == 0\")\n distance_reward += 100\n else:\n if self.sim.pose[0] == self.target_pos[0] and self.target_pos[1] == self.sim.pose[1]:\n distance_reward += 0.2\n else:\n distance_reward += -0.025\n if self.sim.pose[2] > 0:\n distance_reward += 0.5\n else:\n distance_reward += -0.75\n if abs(self.sim.v[0]) + abs(self.sim.v[1]) > 0:\n distance_reward += -2\n else:\n distance_reward += 0.5\n \n if self.sim.v[2] > 0:\n distance_reward += 0.5\n else:\n distance_reward += -0.75\n if sum(abs(self.sim.angular_v)) > 0:\n distance_reward += -2\n else:\n distance_reward += 1\n \n# elif self.sim.pose[2] > 0:\n# distance_reward += 2\n# if self.sim.v[2] > 0:\n# distance_reward += 2\n# elif self.sim.v[2] == 0:\n# distance_reward += -1\n# # elif self.sim.pose[0] != self.target_pos[0] and self.sim.pose[1] != self.target.pos[1]:\n# else: \n# distance_reward += -5\n# distance_reward = np.tanh(1-distance)\n# Calculate velocity rewards\n# velocity_diff = [ min(prev_v[x],self.sim.v[x]) for x in range(len(self.sim.v)) ] \n# velocity_reward = (sum(velocity_diff) * 0.0075)\n# velocity_reward = sum(velocity_diff)\n\n return np.clip(distance_reward,-1,1)", "title": "" }, { "docid": "ce44fba203ff5aeeec689eafd201c89c", "score": "0.566031", "text": "def upwind(theta):\r\n return 0.0", "title": "" }, { "docid": "ffdc66f98e60052f4da76b2a4bc13e96", "score": "0.5659589", "text": "def update_parameters_adam(parameters, grads, v, s, t, learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-6):\n L = len(parameters) // 2\n v_corrected = {}\n s_corrected = {}\n\n for l in range(L):\n # moving average of gradients\n v[\"dW\"+str(l+1)] = beta1 * v[\"dW\"+str(l+1)] + (1-beta1) * grads[\"dW\"+str(l+1)]\n v[\"db\"+str(l+1)] = beta1 * v[\"db\"+str(l+1)] + (1-beta1) * grads[\"db\"+str(l+1)]\n # bias corrected first moment estimate\n v_corrected[\"dW\"+str(l+1)] = v[\"dW\"+str(l+1)] / (1 - beta1**t)\n v_corrected[\"db\"+str(l+1)] = v[\"db\"+str(l+1)] / (1 - beta1**t)\n # moving average of squared gradients\n s[\"dW\"+str(l+1)] = beta2 * s[\"dW\"+str(l+1)] + (1 - beta2) * np.power(grads[\"dW\"+str(l+1)], 2)\n s[\"db\"+str(l+1)] = beta2 * s[\"db\"+str(l+1)] + (1 - beta2) * np.power(grads[\"db\"+str(l+1)], 2)\n # bias corrected second raw movement estimate\n s_corrected[\"dW\"+str(l+1)] = s[\"dW\"+str(l+1)] / (1 - beta2**t)\n s_corrected[\"db\"+str(l+1)] = s[\"db\"+str(l+1)] / (1 - beta2**t)\n # upate parameters\n parameters[\"W\" + str(l+1)] -= learning_rate * v_corrected[\"dW\" + str(l+1)] / (np.sqrt(s_corrected[\"dW\" + str(l+1)]) + epsilon)\n parameters[\"b\" + str(l+1)] -= learning_rate * v_corrected[\"db\" + str(l+1)] / (np.sqrt(s_corrected[\"db\" + str(l+1)]) + epsilon)\n\n return parameters, v, s", "title": "" }, { "docid": "d8a43702714d9be62a98cfa5fe3d33cb", "score": "0.5652458", "text": "def update(self, action, dt): \n g = self.gravity\n mp = self.mpole\n mc = self.mcart\n l = self.lpole/2 \n f = action\n\n temp = (f+mp*l*self.dot_theta**2*torch.sin(self.theta))/(mp+mc)\n denom = l*(4/3-(mp*torch.cos(self.theta)**2)/(mp+mc))\n ddot_theta = (g*torch.sin(self.theta)-torch.cos(self.theta)*temp)/denom\n ddot_x = temp - (mp*l*ddot_theta*torch.cos(self.theta))/(mp+mc)\n\n dot_x = self.dot_x + dt * ddot_x\n dot_theta = self.dot_theta + dt * ddot_theta\n x = self.x + dt * dot_x\n theta = self.theta + dt * dot_theta\n\n self.x = torch.clamp(x, -self._max_x, self._max_x).reshape(1)\n self.theta = torch.clamp(theta, -self._max_theta, self._max_theta).reshape(1)\n self.dot_x = torch.clamp(dot_x, -self._max_dot_x, self._max_dot_x).reshape(1)\n self.dot_theta = torch.clamp(dot_theta, -self._max_dot_theta, self._max_dot_theta).reshape(1)\n\n if DEBUG:\n print(f\"x={self.x.item():.4f} theta={self.theta.item():.4f} f={f.item()}\")", "title": "" }, { "docid": "d836f8187411d9281194348201520e1d", "score": "0.5635243", "text": "def runModel(theta, T, pi, M, P, beta_dist, v,c,q):\n \n #tracking data; this could be way more efficient :face-palm:\n old_u = []\n time_data_diff = []\n num_players_in_model = [M]\n tot_shown_A = 0\n tot_in_model = 0\n t = 1\n pi_a = pi[1]\n \n #dictionaries to keep track of who is shown which articles and who clicks on which articles\n #indexed (user group, article shown)\n shown_dict = {(1,1): 0,\n (-1,1): 0, \n (1,-1): 0,\n (-1,-1): 0}\n \n click_dict = {(1,1): 0,\n (-1,1): 0, \n (1,-1): 0,\n (-1,-1): 0}\n \n share_dict = {(1,1): 0,\n (-1,1): 0, \n (1,-1): 0,\n (-1,-1): 0}\n \n\n\n while (t <= T) and (t == 1 or len(old_u) > 0):\n num_shown_A = 0 #number of players at this timestep that are shown article A\n new_u = [] # list of new players that arrive at the timestep\n\n if t == 1: # initial mass of users arrives\n for i in range(M): # iterating over the size of the unit mass\n tot_in_model = tot_in_model + 1\n g = coin_toss(pi[1]) # determine players group according to the true group distribution\n s = coin_toss(theta[g]) # show article A according to the platform's policy. (right now, this is just a placeholder)\n player = Player(group=g, article=s)\n shown_dict[(g,s)] = shown_dict[(g,s)] + 1\n if s == 1:\n num_shown_A = num_shown_A + 1\n player.article = 1\n else:\n player.article = -1\n\n P_personal = P\n P_personal[(g,s)] = np.random.beta(*beta_dist[(g,s)])\n P_personal[(g,-s)] = np.random.beta(*beta_dist[(g,-s)])\n\n player.clicked = calcclickdict(player, 1, \n P_personal, \n q, \n theta,\n c,\n v)\n if player.clicked: \n click_dict[(g,s)] = click_dict[(g,s)] + 1 \n if random.uniform(0, 1) <= P[(player.group, player.article)]:\n player.shared = True\n share_dict[(g,s)] = share_dict[(g,s)] + 1\n old_u.append(player)\n \n\n else:\n for user in old_u:\n\n if user.shared == 1: # new user only added to the system if the previous user shared the article\n tot_in_model = tot_in_model + 1\n if random.uniform(0, 1) <= q[user.group]: # if next person is drawn by homophily\n new_user = Player(group=user.group)\n else:\n new_user = Player(group=-user.group)\n \n # show the previous person's article, regardless of the new user's group \n new_user.article = user.article\n shown_dict[(new_user.group, new_user.article)] = shown_dict[(new_user.group, new_user.article)] + 1\n if new_user.article == 1:\n num_shown_A = num_shown_A + 1\n\n\n g = new_user.group\n s = new_user.article\n P_personal = P\n P_personal[(g,s)] = np.random.beta(*beta_dist[(g,s)])\n P_personal[(g,-s)] = np.random.beta(*beta_dist[(g,-s)])\n new_user.clicked = calcclickdict(new_user, 1, \n P_personal, \n q, \n theta,\n c,\n v)\n # decide if user shares article, according to P.\n if new_user.clicked == 1: \n click_dict[(new_user.group, new_user.article)] = click_dict[(new_user.group, new_user.article)] + 1\n if random.uniform(0, 1) <= P[(new_user.group, new_user.article)]:\n new_user.shared = True\n share_dict[(new_user.group, new_user.article)] = share_dict[(new_user.group, new_user.article)] + 1\n else:\n new_user.shared = False\n\n #add user to list\n new_u.append(new_user)\n else: #only add a user to the next round if the previous user shared the article \n pass\n\n num_players_in_model.append(len(new_u)) #tracks how many players are being shown articles at all timesteps\n old_u = new_u\n\n\n\n t = t + 1\n tot_shown_A = tot_shown_A + num_shown_A\n\n \n return num_players_in_model, shown_dict, click_dict, share_dict", "title": "" }, { "docid": "96496ab5ee53faf8b2405ab6a1c41054", "score": "0.5633333", "text": "def train_motion(t,y,designParams,constParams):\r\n P0Eq = lambda P0gage: P0gage + Patm #Pa initial tank absolute pressure\r\n mTotalEq = lambda Lt, rO, rhoT, Lr, rp: mw + (rhoT*np.pi*Lt*(rO**2 - riEq(rO)**2)) + MpEq(rp, Lr) #kg total mass of train = mass of wheels + mass of tank + mass of piston\r\n AtEq = lambda rO: np.pi * rO**2 #m^2 train frontal area\r\n V0Eq = lambda rO, Lt: np.pi * riEq(rO)**2 * Lt #m^3 tank volume\r\n LpEq = lambda Lr: 1.5 * Lr #m total length of pistion\r\n MpEq = lambda rp, Lr: 1250.0 * (np.pi * rp**2 *LpEq(Lr)) #kg mass of piston\r\n ApEq = lambda rp: np.pi * rp**2 #m^2 area of the piston\r\n riEq = lambda rO: rO/1.15 #m inside radius of tank\r\n \r\n #Assign params to variables\r\n Lt, rO, rhoT, P0gage, rg, Lr, rp = designParams[:7]\r\n rhoA, Patm, Cd, Cr, muS, rw, mw, g = constParams[:8]\r\n \r\n #extract position and velocity from y\r\n position = y[0] \r\n velocity = y[1]\r\n \r\n Ap = ApEq(rp)\r\n P0 = P0Eq(P0gage)\r\n V0 = V0Eq(rO, Lt)\r\n At = AtEq(rO)\r\n mTotal = mTotalEq(Lt, rO, rhoT, Lr, rp)\r\n\r\n Fd = 0.5 * Cd * rhoA * At * velocity**2 #part of drag term\r\n Fr = Cr * mTotal * g #part of rolling resistance term\r\n \r\n #Determine derivative values of dydt and dvdt\r\n if(position <= (Lr * (rw/rg))): #accelerating\r\n Ft = Ap * (rg/rw) * (((P0*V0) / (V0 + Ap*(rg/rw)*position)) - Patm) #part of thrust term \r\n dydt = velocity\r\n dvdt = (1.0 / (mTotal + mw)) * (Ft - Fd - Fr)\r\n else: #decelerating\r\n dydt = velocity\r\n dvdt = (1.0 / mTotal) * (-Fd - Fr)\r\n return dydt, dvdt", "title": "" }, { "docid": "cf07f88bb12a271d8e0fae446952ab5d", "score": "0.5632704", "text": "def updateTheta(self, iterations = 100, logger = logging.getLogger('SGD'), debug = False):\n\n self.theta = self.model.getParameters(trackgrad = True)\n\n #set optimzier \n self.optimizer = torch.optim.SGD(self.theta, lr = self.lr, momentum = self.momentum)\n\n #keep track of trajectories\n trace = {'OBJ': [], 'time': []}\n t_start = time.time()\n\n DL = DataLoader(self.anomalyReducedDataset, batch_size = self.batch_size)\n\n #iterable dataloader\n iterableData = iter( DL)\n\n for k in range(iterations):\n #reset gradients\n self.optimizer.zero_grad()\n \n #load new batch\n try:\n data_batch = next( iterableData )\n except StopIteration:\n iterableData = iter( DL)\n\n #transfer to device \n data_batch = data_batch.to( self.device )\n \n #forward pass\n if self.p == -2:\n loss = torch.sum( torch.norm( self.model(data_batch), dim = 1, p = 2) ** 2 ) / self.batch_size + self.regularizerCoeff / 2 * self.theta.frobeniusNormSq() \n else:\n loss = torch.sum( torch.norm( self.model(data_batch), dim = 1, p = self.p) ) / self.batch_size + self.regularizerCoeff / 2* self.theta.frobeniusNormSq()\n\n \n\n loss.backward()\n\n self.optimizer.step() \n\n \n OBJ = self.getObjective( self.anomalyReducedDataset )\n\n if k % 20 == 0:\n\n logger.info(\"{}-th iteration of SGD, the objective is {:.4f}.\".format(k, OBJ ) )\n\n if k == 0 or OBJ < BEST_Obj:\n BEST_Obj = OBJ\n BEST_var = self.theta * 1\n\n\n\n \n #reset parameters\n self.model.setParameters( BEST_var )", "title": "" }, { "docid": "d968b25c78e0cddd39d3b35856cb6e4e", "score": "0.56321716", "text": "def thetaW(n, s):\n\n if s == 0:\n tw = 0.0\n else: \n a1 = sum(1.0 / i for i in range(1, n))\n tw = s / a1\n \n return tw", "title": "" }, { "docid": "a195b78b261a3bb9a853cf734c13da64", "score": "0.5626314", "text": "def _Q_at_theta(self, thetavals):\n\n optimizer = pyo.SolverFactory('ipopt')\n dummy_tree = lambda: None # empty object (we don't need a tree)\n dummy_tree.CallbackModule = None\n dummy_tree.CallbackFunction = self._instance_creation_callback\n dummy_tree.ThetaVals = thetavals\n dummy_tree.cb_data = self.callback_data\n \n if self.diagnostic_mode:\n print(' Compute objective at theta = ',str(thetavals))\n\n # start block of code to deal with models with no constraints\n # (ipopt will crash or complain on such problems without special care)\n instance = _pysp_instance_creation_callback(dummy_tree, \"FOO1\", None) \n try: # deal with special problems so Ipopt will not crash\n first = next(instance.component_objects(pyo.Constraint, active=True))\n except:\n sillylittle = True \n else:\n sillylittle = False\n # end block of code to deal with models with no constraints\n\n WorstStatus = pyo.TerminationCondition.optimal\n totobj = 0\n for snum in self._numbers_list:\n sname = \"scenario_NODE\"+str(snum)\n instance = _pysp_instance_creation_callback(dummy_tree,\n sname, None)\n if not sillylittle:\n if self.diagnostic_mode:\n print(' Experiment = ',snum)\n print(' First solve with with special diagnostics wrapper')\n status_obj, solved, iters, time, regu \\\n = ipopt_solver_wrapper.ipopt_solve_with_stats(instance, optimizer, max_iter=500, max_cpu_time=120)\n print(\" status_obj, solved, iters, time, regularization_stat = \",\n str(status_obj), str(solved), str(iters), str(time), str(regu))\n\n results = optimizer.solve(instance)\n if self.diagnostic_mode:\n print('standard solve solver termination condition=',\n str(results.solver.termination_condition))\n\n if results.solver.termination_condition \\\n != pyo.TerminationCondition.optimal :\n # DLW: Aug2018: not distinguishing \"middlish\" conditions\n if WorstStatus != pyo.TerminationCondition.infeasible:\n WorstStatus = results.solver.termination_condition\n \n objobject = getattr(instance, self._second_stage_cost_exp)\n objval = pyo.value(objobject)\n totobj += objval\n retval = totobj / len(self._numbers_list) # -1??\n\n return retval, thetavals, WorstStatus", "title": "" }, { "docid": "6a9c3122077aeca2f123c065fbbb9779", "score": "0.5622754", "text": "def train_episode(self):\n\n # Update each state...\n theta = 1e-5 #Initializing a very small of theta\n delta = 0 #Initializing the value of delta to be zero\n for s in range(self.env.nS): #looping through all the states\n # Do a one-step lookahead to find the best action\n # Update the value function. Ref: Sutton book eq. 4.10.\n\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n # Do a one-step lookahead to calculate state-action values\n action_values = np.zeros(self.env.nA) #Initaizing all the action values of states\n for a in range(self.env.nA):\n for prob, next_state, reward, done in self.env.P[s][a]:\n action_values[a] += prob * (reward + self.options.gamma * self.V[next_state]) #updating the action values\n # Select best action to perform based on the highest state-action value\n best_action_value = np.max(action_values)\n # Calculate change in value\n delta = max(delta, np.abs(self.V[s] - best_action_value)) #updating the delta value\n # Update the value function for current state\n self.V[s] = best_action_value\n\n # In DP methods we don't interact with the environment so we will set the reward to be the sum of state values\n # and the number of steps to -1 representing an invalid value\n self.statistics[Statistics.Rewards.value] = np.sum(self.V)\n self.statistics[Statistics.Steps.value] = -1", "title": "" }, { "docid": "f515f8f8eeb02412c1238c3260e2df3f", "score": "0.56206197", "text": "def parameters():\n # key parameters for the model, as described in Spearman 2018\n params = {}\n # model parameters\n params['amax'] = 7. # maximum player acceleration m/s/s\n params['vmax'] = 5. # maximum player speed m/s\n params['ttrl_sigma'] = 0.54 # Standard deviation of sigmoid function in Spearman 2018 ('s') that determines uncertainty in player arrival time\n params['kappa_def'] = 1. # kappa parameter in Spearman 2018 that gives the advantage defending players to control ball\n params['lambda_att'] = 3.99 # ball control parameter for attacking team\n params['lambda_def'] = 3.99 * params['kappa_def'] # ball control parameter for defending team\n params['average_ball_speed'] = 15. # average ball travel speed in m/s\n # numerical parameters for model evaluation\n params['int_dt'] = 0.04 # integration timestep (dt)\n params['max_int_time'] = 10 # upper limit on integral time\n params['model_converge_tol'] = 0.01 # assume convergence when PPCF>0.99 at a given location.\n # The following are 'short-cut' parameters. We do not need to calculated PPCF explicitly when a player has a sufficient head start. \n # A sufficient head start is when the a player arrives at the target location at least 'time_to_control' seconds before the next player\n params['time_to_control_att'] = 3*np.log(10) * (np.sqrt(3)*params['ttrl_sigma']/np.pi + 1/params['lambda_att'])\n params['time_to_control_def'] = 3*np.log(10) * (np.sqrt(3)*params['ttrl_sigma']/np.pi + 1/params['lambda_def'])\n \n # sigma normal distribution for relevant pitch control\n params['sigma_normal'] = 23.9\n # alpha : dependence of the decision conditional probability by the PPCF\n params['alpha'] = 1.04\n \n return params", "title": "" }, { "docid": "5c700cb2fcd628ef6a22448a5ac19fbf", "score": "0.5618156", "text": "def iterate(self):\n self.current_theta = self.get_parameters()\n self.next_theta = self.current_theta - inv(\n self.cost_function.cost_function_second_derivative()).dot(\n self.cost_function.cost_function_derivative())\n self.cost_function.update_parameters(self.next_theta)", "title": "" }, { "docid": "6d56e73fccd8b91d40dea1e010d4798f", "score": "0.56179667", "text": "def update_weights_continuous(self, s, a, r, ns, eta, gamma=1):\n # (neuronindex/popsize)/num_actions = stateindex\n # (neuronindex/popsize)%num_actions = actionindex\n # stateindex/17 = positionindex\n # stateindex%17 = velocityindex\n p, v = s\n pp, vv = ns\n Swphi = np.dot((self.W + self.competition).T[:, :-1],\n np.array([0 if (a != i / self.pop_size % 3) else\n cf.phi((p - i / self.pop_size / (3 * self.num_v) + 8) % 16 - 8) *\n cf.phi(v - (i / self.pop_size / 3) % self.num_v)\n for i in xrange(self.K - 1)]))\n Intphia = [cf.phi((pp - i / self.pop_size / (3 * self.num_v) + 8) % 16 - 8) *\n cf.phi(vv - (i / self.pop_size / 3) % self.num_v) * gamma\n for i in xrange(self.K - 1)]\n Intphia += [r]\n factor = eta * (np.array(Intphia) - Swphi)\n for i in xrange(self.num_states):\n # skip update for neurons 'far away'\n if abs(i / self.num_v - s[0]) > 2 or abs(i % self.num_v - s[1]) > 2:\n continue\n for k in xrange(self.pop_size):\n self.W[self.sa[i, a, k]] += factor *\\\n cf.phi((p - i / self.num_v + 8) % 16 - 8) *\\\n cf.phi(v - i % self.num_v)", "title": "" }, { "docid": "b8ea239ea13639d421b927b81ee2b0c8", "score": "0.56070286", "text": "def _Q_at_theta(self, thetavals, initialize_parmest_model=False):\n\n optimizer = pyo.SolverFactory('ipopt')\n\n if len(thetavals) > 0:\n dummy_cb = {\n \"callback\": self._instance_creation_callback,\n \"ThetaVals\": thetavals,\n \"theta_names\": self._return_theta_names(),\n \"cb_data\": self.callback_data,\n }\n else:\n dummy_cb = {\n \"callback\": self._instance_creation_callback,\n \"theta_names\": self._return_theta_names(),\n \"cb_data\": self.callback_data,\n }\n\n if self.diagnostic_mode:\n if len(thetavals) > 0:\n print(' Compute objective at theta = ', str(thetavals))\n else:\n print(' Compute objective at initial theta')\n\n # start block of code to deal with models with no constraints\n # (ipopt will crash or complain on such problems without special care)\n instance = _experiment_instance_creation_callback(\"FOO0\", None, dummy_cb)\n try: # deal with special problems so Ipopt will not crash\n first = next(instance.component_objects(pyo.Constraint, active=True))\n active_constraints = True\n except:\n active_constraints = False\n # end block of code to deal with models with no constraints\n\n WorstStatus = pyo.TerminationCondition.optimal\n totobj = 0\n scenario_numbers = list(range(len(self.callback_data)))\n if initialize_parmest_model:\n # create dictionary to store pyomo model instances (scenarios)\n scen_dict = dict()\n\n for snum in scenario_numbers:\n sname = \"scenario_NODE\" + str(snum)\n instance = _experiment_instance_creation_callback(sname, None, dummy_cb)\n\n if initialize_parmest_model:\n # list to store fitted parameter names that will be unfixed\n # after initialization\n theta_init_vals = []\n # use appropriate theta_names member\n theta_ref = self._return_theta_names()\n\n for i, theta in enumerate(theta_ref):\n # Use parser in ComponentUID to locate the component\n var_cuid = ComponentUID(theta)\n var_validate = var_cuid.find_component_on(instance)\n if var_validate is None:\n logger.warning(\n \"theta_name %s was not found on the model\", (theta)\n )\n else:\n try:\n if len(thetavals) == 0:\n var_validate.fix()\n else:\n var_validate.fix(thetavals[theta])\n theta_init_vals.append(var_validate)\n except:\n logger.warning(\n 'Unable to fix model parameter value for %s (not a Pyomo model Var)',\n (theta),\n )\n\n if active_constraints:\n if self.diagnostic_mode:\n print(' Experiment = ', snum)\n print(' First solve with special diagnostics wrapper')\n (\n status_obj,\n solved,\n iters,\n time,\n regu,\n ) = utils.ipopt_solve_with_stats(\n instance, optimizer, max_iter=500, max_cpu_time=120\n )\n print(\n \" status_obj, solved, iters, time, regularization_stat = \",\n str(status_obj),\n str(solved),\n str(iters),\n str(time),\n str(regu),\n )\n\n results = optimizer.solve(instance)\n if self.diagnostic_mode:\n print(\n 'standard solve solver termination condition=',\n str(results.solver.termination_condition),\n )\n\n if (\n results.solver.termination_condition\n != pyo.TerminationCondition.optimal\n ):\n # DLW: Aug2018: not distinguishing \"middlish\" conditions\n if WorstStatus != pyo.TerminationCondition.infeasible:\n WorstStatus = results.solver.termination_condition\n if initialize_parmest_model:\n if self.diagnostic_mode:\n print(\n \"Scenario {:d} infeasible with initialized parameter values\".format(\n snum\n )\n )\n else:\n if initialize_parmest_model:\n if self.diagnostic_mode:\n print(\n \"Scenario {:d} initialization successful with initial parameter values\".format(\n snum\n )\n )\n if initialize_parmest_model:\n # unfix parameters after initialization\n for theta in theta_init_vals:\n theta.unfix()\n scen_dict[sname] = instance\n else:\n if initialize_parmest_model:\n # unfix parameters after initialization\n for theta in theta_init_vals:\n theta.unfix()\n scen_dict[sname] = instance\n\n objobject = getattr(instance, self._second_stage_cost_exp)\n objval = pyo.value(objobject)\n totobj += objval\n\n retval = totobj / len(scenario_numbers) # -1??\n if initialize_parmest_model and not hasattr(self, 'ef_instance'):\n # create extensive form of the model using scenario dictionary\n if len(scen_dict) > 0:\n for scen in scen_dict.values():\n scen._mpisppy_probability = 1 / len(scen_dict)\n\n if use_mpisppy:\n EF_instance = sputils._create_EF_from_scen_dict(\n scen_dict,\n EF_name=\"_Q_at_theta\",\n # suppress_warnings=True\n )\n else:\n EF_instance = local_ef._create_EF_from_scen_dict(\n scen_dict, EF_name=\"_Q_at_theta\", nonant_for_fixed_vars=True\n )\n\n self.ef_instance = EF_instance\n # set self.model_initialized flag to True to skip extensive form model\n # creation using theta_est()\n self.model_initialized = True\n\n # return initialized theta values\n if len(thetavals) == 0:\n # use appropriate theta_names member\n theta_ref = self._return_theta_names()\n for i, theta in enumerate(theta_ref):\n thetavals[theta] = theta_init_vals[i]()\n\n return retval, thetavals, WorstStatus", "title": "" }, { "docid": "db7eeda569cf9266984e6b5a572e6cfe", "score": "0.5591962", "text": "def T(self, s, a, s_, target_loc):\n \n p_int = self.p_int # intended direction\n p_unint = self.p_unint # unintended, perpendicular direction\n prob_matrix = np.zeros([self.city_size, self.city_size])\n \n # clip function handles agent hitting the wall and staying put\n clip = lambda val: np.clip(val, 0, self.city_size-1)\n \n if s == target_loc:\n prob_matrix[s[0], s[1]] += 1\n\n elif a == \"north\":\n \n prob_matrix[clip(s[0]-1), s[1]] += p_int\n prob_matrix[s[0], clip(s[1]-1)] += p_unint\n prob_matrix[s[0], clip(s[1]+1)] += p_unint\n \n elif a == \"south\":\n \n prob_matrix[clip(s[0]+1), s[1]] += p_int\n prob_matrix[s[0], clip(s[1]-1)] += p_unint\n prob_matrix[s[0], clip(s[1]+1)] += p_unint\n \n elif a == \"west\":\n\n prob_matrix[s[0], clip(s[1]-1)] += p_int\n prob_matrix[clip(s[0]-1), s[1]] += p_unint\n prob_matrix[clip(s[0]+1), s[1]] += p_unint\n \n elif a == \"east\":\n\n prob_matrix[s[0], clip(s[1]+1)] += p_int\n prob_matrix[clip(s[0]-1), s[1]] += p_unint\n prob_matrix[clip(s[0]+1), s[1]] += p_unint\n \n # fails when probabilities don't sum to 1\n assert np.sum(prob_matrix) == 1\n \n return prob_matrix[s_[0], s_[1]]", "title": "" }, { "docid": "e6da49b42f52c099c1229050cab35846", "score": "0.5588335", "text": "def updateObjective(self, series):\n self.prob.setObjective(pulp.lpSum([series[k]*self.player_vars[k] for k in self.players]) - self.pen*self.z)", "title": "" }, { "docid": "e6da49b42f52c099c1229050cab35846", "score": "0.5588335", "text": "def updateObjective(self, series):\n self.prob.setObjective(pulp.lpSum([series[k]*self.player_vars[k] for k in self.players]) - self.pen*self.z)", "title": "" }, { "docid": "2e6d51ebbe25c67ac5b4668d7fa9d48f", "score": "0.5581631", "text": "def passive_aggressive_single_step_update(feature_vector, label, L, current_theta, current_theta_0):\n \n #Prepend theta_0 to theta vector\n # i.e. classification params = [theta_0; theta]\n current_theta_rolled = np.insert(current_theta, 0, current_theta_0) \n\n #Prepend bias unit to our feature vector\n # i.e. [x0; x]\n feature_vector = np.insert(feature_vector, 0, 1) \n\n\n\n #return current_theta, current_theta_0\n raise NotImplementedError", "title": "" }, { "docid": "a4626245cf1f2d5223af52161aaee1a1", "score": "0.55787396", "text": "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n \n #update q value weights (omega)\n diff = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n for feature_name, feature_value in self.featExtractor.getFeatures(state, action).iteritems():\n self.q_value_weights[feature_name] += self.alpha * diff * feature_value\n \n #update policy weights (theta)\n \n expectedFeatureValues = util.Counter()\n for action in self.legalActions:\n for feature_name, value in self.featExtractor.getFeatures(state, action).iteritems():\n expectedFeatureValues[feature_name] += value\n for feature_name, value in expectedFeatureValues.iteritems():\n expectedFeatureValues[feature_name] /= len(self.legalActions)\n \n for feature_name, value in self.featExtractor.getFeatures(state, action).iteritems():\n scoreFunc = value - expectedFeatureValues[feature_name]\n self.policy_weights[feature_name] += self.beta * scoreFunc * self.getQValue(state, action)", "title": "" }, { "docid": "08858c0f0b47a90bc3f1e2c05a25f9ef", "score": "0.55701524", "text": "def update_theta_recursive(self, x, y, A):\n x_reg = self.poly.fit_transform(x).T\n for m in range(self.M_):\n if A[m, :] > self.activity_threshold:\n gamma = self.P_[m] @ x_reg / (x_reg.T @ self.P_[m] @ x_reg + self.forgetting_factor * np.reciprocal(A[m, :]))\n self.Theta_[m, :] = self.Theta_[m, :] + gamma @ (y - (x_reg.T @ self.Theta_[m, :]))\n self.P_[m] = 1/self.forgetting_factor * (self.P_[m] - gamma @ x_reg.T @ self.P_[m])", "title": "" }, { "docid": "78c8ce2247510049e1de597c389bc0ef", "score": "0.55666375", "text": "def train(self):\n p1_images = []\n p2_images = []\n p1_pi = []\n p2_pi = []\n\n g = game.Game()\n mcts = MonteCarloTreeSearch(\n simulations = self.mcts_simulations,\n model = self.model,\n )\n player = 1\n while not g.game_over():\n pi = list(mcts.mcts(g))\n best_prob = 0\n best_moves = []\n for i, prob in enumerate(pi):\n if prob > best_prob:\n best_prob = prob\n best_moves = [i]\n elif prob == best_prob:\n best_moves.append(i)\n else:\n continue\n\n images, pi = mcts.get_training_data()\n assert len(images) == len(pi)\n\n if player == 1:\n p1_images += images\n p1_pi += pi\n else:\n p2_images += images\n p2_pi += pi\n\n g.make_move_index(random.choice(best_moves))\n player *= -1\n\n \n if g.result == 0:\n labels = [0 for _ in range(len(p1_pi) + len(p2_pi))]\n else:\n if player == 1:\n labels = [-1. for _ in range(len(p1_images))]\n labels += [1. for _ in range(len(p2_images))]\n else:\n labels = [1. for _ in range(len(p1_images))]\n labels += [-1. for _ in range(len(p2_images))]\n\n p1_images = np.array(p1_images)\n p2_images = np.array(p2_images)\n data = np.vstack((p1_images, p2_images))\n p1_pi = np.array(p1_pi)\n p2_pi = np.array(p2_pi)\n policy = np.vstack((p1_pi, p2_pi))\n labels = np.array(labels)\n \n self.model.train(data, policy, labels)", "title": "" }, { "docid": "00f47086b39f7db55e810f024649ffe3", "score": "0.55533063", "text": "def updateParams(theta, dtheta, eta, regularizer=None, my_lambda=0.):\n\n if regularizer == None:\n return theta - eta * dtheta\n elif regularizer == 'L1':\n return theta - eta * my_lambda * np.sign(theta) - eta * dtheta\n elif regularizer == 'L2':\n return (1. - eta * my_lambda) * theta - eta * dtheta\n else:\n raise NotImplementedError", "title": "" }, { "docid": "4d88de957624a46e4623ae2337cf46c0", "score": "0.55334306", "text": "def penalty(qs, ms, theta, r):\n out = 0\n for i in range(r):\n out += ms[i] * (theta(qs[i+1]) - theta(qs[i]))\n return 0.5 * out", "title": "" }, { "docid": "80de34c9b75ed456e178bd52193bec0c", "score": "0.55274934", "text": "def update_robot_env(workspace, robot_team_initial_target, robot_move, robot_waypoint, robot_time, robot_path,\n robot_progress, next_time, neg_clause, partial_or_full):\n # find robots that need to move\n robot_future_time_min_length_path_target = dict()\n for robot in robot_progress.keys():\n if robot not in robot_move:\n # time difference between the next progress (edge) and the key time point for the considered robot\n if robot_progress[robot] + 1 < len(robot_time[robot]):\n future_time = robot_time[robot][robot_progress[robot]+1] - next_time\n else:\n continue\n # calculate the shortest path from the current location to assigned region\n min_length = np.inf\n path = []\n min_target = None\n\n for target in workspace.regions[robot_waypoint[robot][robot_progress[robot] + 1]]:\n length, p = nx.algorithms.single_source_dijkstra(workspace.graph_workspace,\n source=robot_path[robot][-1],\n target=target)\n if min_length > length:\n min_length = length\n path = p\n min_target = target\n # robots need to move since the remaining time is not enough\n if future_time < min_length:\n robot_future_time_min_length_path_target[robot] = [future_time, min_length, path, min_target]\n\n # those robots that are be involved in the negative literals\n # robot_team_initial_target['constraint'] = {(neg_lit[1], robot): (robot_path[(neg_lit[1], robot)][-1], None)\n # for clause in neg_clause.values() for neg_lit in clause\n # for robot in range(workspace.type_num[neg_lit[1]])}\n # robot_move.update(set(robot_team_initial_target['constraint'].keys()))\n\n # treat robots that do not move as obstacles\n if partial_or_full == 'p':\n new_obstacles = [path[-1] for robot, path in robot_path.items() if robot not in robot_move\n and robot not in robot_future_time_min_length_path_target.keys()]\n elif partial_or_full == 'f':\n new_obstacles = []\n\n # treat all robots that do not execute the current subtask as obstacles\n remove_edge = []\n for obs in new_obstacles:\n remove_edge += list(workspace.graph_workspace.edges(obs))\n workspace.graph_workspace.remove_node(obs)\n\n # determine the target point at the next time\n other_edge = dict()\n for robot in robot_future_time_min_length_path_target.keys():\n future_time, min_length, path, min_target = robot_future_time_min_length_path_target[robot]\n target = path[min_length-future_time]\n # find the intermediate target for mapp\n if target in new_obstacles:\n l, p = nx.algorithms.single_source_dijkstra(workspace.graph_workspace,\n source=robot_path[robot][-1],\n target=min_target)\n # if target is already selected, select previous one\n index = l - 1 - future_time\n while target in new_obstacles:\n target = p[index]\n try:\n index -= 1\n except KeyError:\n break\n\n if target != robot_path[robot][-1]:\n # the free cell is occupied by some robot\n new_obstacles.append(target)\n other_edge[robot] = (robot_path[robot][-1], target)\n robot_move.add(robot)\n # update the set of robots that need to move\n robot_team_initial_target['other_edges'] = other_edge\n\n return remove_edge", "title": "" }, { "docid": "69a765afc4bb2b071f97c34ab1cf3942", "score": "0.55236924", "text": "def PSO(self):\n w = 0.9\n c1 = 2\n c2 = 2\n X_pos = []\n p_pos = []\n vel = []\n X_pos.append(np.random.rand(self.numPositions,2)*2.0-1)\n vel.append(np.random.rand(self.numPositions,2))\n k = 0\n fval = []\n for i,val in enumerate(X_pos[-1]):\n fval.append(self.f(X_pos[-1][i][0],X_pos[-1][i][1]))\n gBest = []\n p_pos = X_pos\n gBest.append(X_pos[-1][fval.index(min(fval))])\n r=np.random.rand(self.numPositions,2)\n s=np.random.rand(self.numPositions,2)\n while k<self.epochs:\n vel.append(np.multiply(w,vel[-1])+np.multiply(np.multiply(c1,r), (p_pos[-1]-X_pos[-1])) + \\\n np.multiply(np.multiply(c2,s), (gBest[-1]-X_pos[-1])))\n p_pos.append(p_pos[-1])\n X_pos.append(X_pos[-1] + vel[-1])\n fval=[]\n gBestTemp = gBest[-1]\n for i,val in enumerate(X_pos[-1]):\n fval.append(self.f(X_pos[-1][i][0],X_pos[-1][i][1]))\n if(fval[-1]<self.f(p_pos[-1][i][0],p_pos[-1][i][1])):\n p_pos[-1][i] = X_pos[-1][i]\n else:\n pass\n if(fval[-1]<self.f(gBest[-1][0],gBest[-1][1])):\n gBestTemp = X_pos[-1][i]\n else:\n pass\n gBest.append(gBestTemp)\n self.gbvals.append(self.f(gBest[-1][0], gBest[-1][1]))\n self.meanVals.append(np.mean(fval))\n self.wrsVals.append(self.f(X_pos[-1][fval.index(max(fval))][0],X_pos[-1][fval.index(max(fval))][1]))\n# print(fval,end='\\n')\n k+=1\n return gBest[-1]", "title": "" }, { "docid": "5c7a0db74ba09fd4fc204c5108aa56af", "score": "0.5520737", "text": "def theta(flag, S, K, t, r, sigma): \n \n b = r\n\n return numerical_theta(flag, S, K, t, r, sigma, b, f)", "title": "" }, { "docid": "36c321e988114c4e65d0d3e9158ddfaa", "score": "0.55127543", "text": "def fit(self, theta):\n\n theta = np.array(theta)\n\n assert theta.shape == (self.n_params,), \"bad shape for hyperparameters\"\n\n self._theta = theta\n\n switch = self.mean.get_n_params(self.inputs)\n\n m = self.mean.mean_f(self.inputs, self.theta[:switch])\n Q = self.kernel.kernel_f(self.inputs, self.inputs, self.theta[switch:-1])\n\n if self.nugget_type == \"adaptive\":\n self.L, self._nugget = jit_cholesky(Q)\n self.P = np.arange(0, self.n)\n elif self.nugget_type == \"pivot\":\n self.L, self.P = pivot_cholesky(Q)\n self._nugget = 0.\n else:\n if self.nugget_type == \"fit\":\n self._nugget = np.exp(self.theta[-1])\n Q += self._nugget*np.eye(self.n)\n self.L = linalg.cholesky(Q, lower=True)\n self.P = np.arange(0, self.n)\n\n self.invQt = pivot_cho_solve(self.L, self.P, self.targets - m)\n\n self.current_logpost = 0.5*(2.0*np.sum(np.log(np.diag(self.L))) +\n np.dot(self.targets - m, self.invQt) +\n self.n*np.log(2. * np.pi))\n\n for i in range(self.n_params):\n if not self._priors[i] is None:\n self.current_logpost -= self._priors[i].logp(self.theta[i])", "title": "" }, { "docid": "458933697e2672712bfa0c38913f46bf", "score": "0.55088395", "text": "def update(self, reward, winner, state, states, actions):\n\n # Finding estimated future value by finding max(Q(s', a'))\n # If terminal condition is reached, future reward is 0\n future_val = 0\n state_index = self.get_state_index(state, states)\n if winner == None:\n future_states = states\n i = self.optimal_next(future_states)\n future_state = future_states[i-1]\n future_st_index = self.get_state_index(future_state, future_states)\n future_val = self.qvalue(future_st_index)\n # Q-value update\n if self.algorithm is \"1\":\n self.q_table[state_index] = ((1 - self.learning_rate) * self.qvalue(state_index)) + (self.learning_rate * (reward + self.discount * future_val))\n\n if self.algorithm is \"2\":\n future_state = future_states[0]\n self.q_table[state_index] = ((1 - self.learning_rate) * self.qvalue(state_index)) + (self.learning_rate * (reward + self.discount * future_val))", "title": "" }, { "docid": "2df21f4f2ea169c281e50d9341bf159e", "score": "0.55009824", "text": "def estimate_next_pos(measurement, heading, turning, OTHER = None):\n N = 500\n if not OTHER:\n OTHER = {'particles' : create_particles(N), 'last_m': measurement}\n else:\n distance = distance_between(OTHER['last_m'], measurement)\n for p in OTHER['particles']:\n #p.measurement_noise = 0.05 * distance\n p.move(turning, distance)\n #print len(OTHER['particles'])\n w = []\n for p in OTHER['particles']:\n w.append(compute_weight(measurement, (p.x, p.y)))\n xy_estimate = mean_estimation(OTHER['particles'], w)\n\n#print xy_estimate\n new_particles = []\n index = int(random.random() * N)\n beta = 0.0\n mw = max(w)\n for i in range(N):\n beta += random.random() * 2.0 * mw\n while beta > w[index]:\n beta -= w[index]\n index = (index + 1) % N\n p = OTHER['particles'][index]\n r = robot(p.x, p.y, heading) #prob we need to calculate de heading\n new_particles.append(r)\n#p = p3\n\n#nu = sum(w)\n #print nu\n# for i in range(len(OTHER['particles'])):\n# if w[i] != 0:\n# w[i] /= nu\n#distribution = calculate_distribution(w)\n#print len(w)\n#new_particles = []\n# for _ in range(N):\n #p = pick_particle(OTHER['particles'], distribution)\n #if p:\n # r = robot(p.x, p.y, p.heading) #prob we need to calculate de heading\n # new_particles.append(r)\n #else:\n# new_particles.append(create_particles(1)[0])\n \n OTHER['particles'] = new_particles\n # You must return xy_estimate (x, y), and OTHER (even if it is None)\n # in this order for grading purposes.\n #OTHER['measurements'].append(measurement)\n#xy_estimate = (0,0)\n\n return xy_estimate, OTHER", "title": "" }, { "docid": "af205f5c9187ad5eea4033d0f5433a4b", "score": "0.54997575", "text": "def update(self, u, dt):\n\n # calculate the force that drives people to goal\n x_rel_goal = self.x_goal - np.array([self.x, self.y])\n v_goal = self.vd * x_rel_goal / np.linalg.norm(x_rel_goal)\n\n f_goal_x = self.k * (v_goal[0] - self.vx)\n f_goal_y = self.k * (v_goal[1] - self.vy)\n\n # calculate accelerations\n ax = f_goal_x + u[0] - self.c * self.vx\n ay = f_goal_y + u[1] - self.c * self.vy\n\n # update velocity and positions\n vx_next = self.vx + ax * dt\n vy_next = self.vy + ay * dt\n\n v_next = np.linalg.norm(np.array([vx_next, vy_next]))\n if v_next > self.max_v:\n vx_next *= self.max_v / v_next\n vy_next *= self.max_v / v_next\n\n self.x += 0.5 * (self.vx + vx_next) * dt\n self.y += 0.5 * (self.vy + vy_next) * dt\n\n self.vx = vx_next\n self.vy = vy_next", "title": "" }, { "docid": "e261b61b665865f2cc8842c0b7af7610", "score": "0.5499105", "text": "def pTransverse(p,theta):\n\treturn protonMomentum*theta*zeta(p,theta)", "title": "" }, { "docid": "97c3ec6baa1e033d486c04e8423d1a91", "score": "0.54917073", "text": "def learn_policy(self, env, theta):\n\n # Initialize V(s) arbitrarily except V(terminal) = 0\n self.values = np.zeros(len(env))\n\n # On each sweep, greedify w/r/t current action using bootstrapped V(s) estimates\n # until convergence is achieved and optimal policies identified\n optimal_policy = np.zeros(len(self.values), int)\n delta = 1\n iter = 0\n\n while delta >= theta:\n iter += 1\n delta = 0\n for s in range(len(self.values)):\n v_s = self.values[s]\n values = []\n for a in range(self.nactions):\n reward, next_s, terminal = env.env_step(s, a)\n update = reward + self.discount*self.values[next_s]\n values.append(update)\n\n max_a = self.argmax(values)\n optimal_policy[s] = max_a\n self.values[s] = values[max_a]\n delta = max(delta, abs(v_s - self.values[s]))\n\n print(\"Iteration = {} | Delta = {}\".format(iter, delta))\n\n # Overwrite with optimal policies\n self.policy = self.policy * 0\n for s in range(len(self.values)):\n self.policy[s,int(optimal_policy[s])] = 1\n\n return self.policy", "title": "" }, { "docid": "feec0f2245804faf63157409c2a33eb4", "score": "0.5484921", "text": "def adaptive_pso(p_count, lr):\n particles = [Particle() for i in range(p_count)] \n highest_reward = 0\n bsp = random.choice(particles) # best swarm particle\n wv = 0.9\n c1 = 2\n c2 = 2\n state = 'S1'\n for i in particles:\n reward = i.episode(i.bw)\n if reward > highest_reward:\n highest_reward = reward\n bsp.w = i.bw\n\n for t in range(40): # no. of iterations\n rewards = deque(maxlen=p_count)\n for particle in particles:\n rp = np.random.rand(particle.w.shape[0], particle.w.shape[1])\n rg = np.random.rand(particle.w.shape[0], particle.w.shape[1])\n particle.v = wv*particle.v + c1*rp*(particle.bw-particle.w) + c2*rg*(bsp.w-particle.w)\n particle.w += lr*particle.v\n\n reward = particle.episode(particle.w)\n rewards.append(reward)\n print(reward)\n\n if reward > particle.episode(particle.bw):\n particle.bw = particle.w\n if particle.episode(particle.bw) > bsp.episode(bsp.w):\n bsp.w = particle.bw\n \n # calculate distance matrix and phi\n d_list = [] # distances\n for i in particles:\n d_list.append(np.sum([np.sqrt((j.w - i.w)**2) for j in particles if i not in [j]]))\n d_g = np.sum([np.sqrt((bsp.w - q.w)**2) for q in particles if q not in [bsp]])\n d_g = d_g * 1/(p_count-1) # average distance from global\n d_max = max(d_list) * 1/(p_count-1)\n d_min = min(d_list) * 1/(p_count-1)\n phi = (d_g - d_min) / (d_max - d_min)\n # update wv\n wv = 1 / (1+1.5*math.exp(-2.6*phi))\n # calculate state change and update c parameters\n state = update_state(phi,state)\n c1, c2 = update_c((c1,c2),state)\n \n print(\"wv: \", wv)\n print('state: ',state)\n print(\"c1,c2: \",c1,c2)\n\n print(\"---------------Iteration through all particles complete---------------\")\n if np.mean(rewards)==500:\n print(\"*All particles converged unto the maximum*\")\n return particles[0]\n if t == 39:\n end_rewards = [particle.episode(particle.bw) for particle in particles]\n maximum_reward = max(end_rewards)\n for i in range(len(end_rewards)):\n if end_rewards[i] == maximum_reward:\n return particles[i]", "title": "" }, { "docid": "775666ee0bde8528afce2f2869773d34", "score": "0.5483392", "text": "def move(self, state):\n \n global player\n player = state.player\n \n global depthLimit\n depthLimit = 7\n\n utility = self.deepeningAlphaBetaSearch(state)\n #print(utility)\n return self.bestAction", "title": "" }, { "docid": "55bfb4da0ef2b202a26debd4865838f8", "score": "0.54830796", "text": "def reward_function(params):\n\n # load input variables\n speed = params['speed']\n steering_angle = params['steering_angle']\n waypoints = params['waypoints']\n closest_waypoints = params['closest_waypoints']\n heading = params['heading']\n\n # Initialize reward\n reward = 1.0\n\n # penalize against large steering inputs\n # (based on aws examples)\n ABS_STEERING_THRESHOLD = 20.0\n if steering_angle > ABS_STEERING_THRESHOLD:\n reward -= 0.25\n\n # penalize against going too slow\n # (based on aws examples)\n SPEED_THRESHOLD = 1.0\n if speed < SPEED_THRESHOLD:\n reward -= 0.20\n\n # calculate the track direction based on nearest waypoints\n # (based on aws examples)\n next_point = waypoints[closest_waypoints[1]] # [x, y]\n prev_point = waypoints[closest_waypoints[0]] # [x, y]\n track_dir = math.atan2(next_point[1] - prev_point[1], next_point[0] - prev_point[0])\n track_dir = math.degrees(track_dir)\n\n # diff between track direction and current heading\n dir_diff = abs(track_dir - heading)\n if dir_diff > 180:\n dir_diff = 360 - dir_diff\n\n # Penalize if the difference is too large\n DIRECTION_THRESHOLD = 15.0\n if dir_diff > DIRECTION_THRESHOLD:\n reward -= 0.50\n\n # return the result\n return float(reward)", "title": "" }, { "docid": "5117c594be8296f72898260c305cd7da", "score": "0.5481977", "text": "def find_best_parameters(self, steps=500):\n def simulate(T, steps):\n \"\"\"\n Anneals a system at constant temperature and returns the \n rate of acceptance and the rate of improvement.\n \"\"\"\n E = self.energy()\n prev_energy = E\n accepts, improves = 0, 0\n for _ in range(steps):\n self.state = self.neighbour()\n E = self.energy()\n dE = prev_energy - E\n if self.probability(dE, T) >= np.random.random():\n accepts += 1\n if dE > .0:\n improves += 1\n prev_energy = E\n else:\n E = prev_energy\n return float(accepts) / steps, float(improves) / steps\n\n print(\"-------------------------------------------------------------------\")\n print()\n print(\"Calculating optimal parameters...\")\n\n T = .0\n E = self.energy()\n\n while T <= 1e-4:\n self.state = self.neighbour()\n T = abs(self.energy() - E)\n\n # Search for t_max - a temperature that gives 99% acceptance\n acceptance, improvement = simulate(T, steps)\n # t_max cannot be bigger than 1e+10\n while acceptance < .99 and T < 1e+10:\n T *= 1.5\n acceptance, improvement = simulate(T, steps)\n self.t_max = T\n\n # Search for t_min - a temperature that gives 0% improvement\n # acceptance, improvement = simulate(T, steps)\n # t_min cannot be smaller than 1e-4\n # while improvement > .0 and T > 1e-4:\n # T /= 2\n # acceptance, improvement = simulate(T, steps)\n # self.t_min = T\n self.t_min = 1e-4\n\n return self.t_max, self.t_min, self.state", "title": "" }, { "docid": "40e47317d145b6fe736cedc31e80e356", "score": "0.54800797", "text": "async def move_helioprojective(self, theta_x: float, theta_y: float, **kwargs: Any) -> None:\n ...", "title": "" }, { "docid": "6b35b4414facb2166d26bbd288f2b155", "score": "0.5478801", "text": "def solve(self):\n\n self.remove_impossible_targets()\n random.shuffle(self.targets)\n best_move = list(self.targets)\n mean, battery = self.compute_performance()\n best_perf = 10000 * mean + self.penalizer * battery\n for i in range(settings.MAX_RANDOM_PLANNER_ITERATION):\n random.shuffle(self.state)\n mean, battery = self.compute_performance()\n perf = 10000 * mean + self.penalizer * battery\n if perf < best_perf:\n best_move = list(self.state)\n\n self.state = best_move", "title": "" }, { "docid": "67a5d60f8743f9e468c610ae08b5defc", "score": "0.5473263", "text": "def update_parameters(self):\n for k in range(self.K):\n for n in range(self.V):\n self.phi[k][n] = (self.nkt[k][n] + self.beta)/(self.nkt_sum[k] + self.V*self.beta)\n for m in range(self.M):\n for k in range(self.K):\n self.theta[m][k] = (self.nmk[m][k] + self.alpha)/(self.nmk_sum[m] + self.M*self.alpha)", "title": "" }, { "docid": "85db3f8e404f95c88f03054fce5379c1", "score": "0.5470738", "text": "def next_move(self, game):\n states, actions = game.get_open_moves()\n # print ([c[0].units for c in actions])\n # Exploit\n i = self.optimal_next(states, game)\n randolorian = np.random.random_sample()\n #if randolorian < self.epsilon:\n if True:\n if game.player == self.player: \n self.clear_move_weights_of_tiles(actions)\n frontline_actions = [action for action in actions if action[0].team == self.player and action[0].team != action[1].team]\n self.add_move_weights_to_tiles(frontline_actions, 0)\n player_actions = [a for a in actions if a[0].team == self.player]\n sorted_actions_by_weight = sorted(player_actions, key=lambda a: a[0].weight, reverse=True)\n filtered_actions_by_direction = [a for a in sorted_actions_by_weight if a[0].weight - a[1].weight >= 0]\n actions_with_viable_moves = [a[0].weight for a in filtered_actions_by_direction if (a[0].units > 0 and a[1].units == 0 and a[1].team != self.player) or (a[0].units > 0 and a[1].units != 0 and a[1].team == self.player) or (a[0].units > 0 and a[0].weight == 1 and a[1].team != self.player)]\n if len(actions_with_viable_moves) > 0:\n max_weight_with_units = max(actions_with_viable_moves)\n filtered_actions_by_highest_weight = [a for a in filtered_actions_by_direction if a[0].weight == max_weight_with_units]\n filtered_attack_actions = [action for action in filtered_actions_by_highest_weight if action[1].team != self.player]\n actions_to_use = filtered_attack_actions if len(filtered_attack_actions) > 0 else filtered_actions_by_highest_weight\n best_action = actions_to_use[np.random.randint(0, len(actions_to_use))]\n i = actions.index(best_action)\n else:\n i = np.random.randint(0, len(states))\n else: \n i = np.random.randint(0, len(states))\n return states[i], actions[i]", "title": "" }, { "docid": "2b3360ffbaa6617f86887c100b2693eb", "score": "0.54668087", "text": "def update(self):\n # Pass 1: compute average payoffs\n for x in range(30):\n for y in range(30):\n self._average_payoffs(x, y)\n\n # Pass 2: Determine which strategy will become next round\n for x in range(30):\n for y in range(30):\n self._best_neighbor(x, y)\n\n # Pass 3: Switch to new strategy\n for x in range(30):\n for y in range(30):\n self._update_strategy(x, y)", "title": "" }, { "docid": "e397f7131687e57f7f7a92e5a662b394", "score": "0.5465065", "text": "def next_move(self, current_state):\r\n\r\n\r\n # update my paddle pos\r\n # I need to do this because GameCore moves my paddle randomly\r\n self.my_paddle_pos = current_state['paddle1_pos'] if self.my_goal == 'left' \\\r\n else current_state['paddle2_pos']\r\n\r\n # estimate puck path\r\n path = estimate_path(current_state, self.future_size)\r\n\r\n # computing both goal centers\r\n self.my_goal_center = {'x': 0 if self.my_goal == 'left' else current_state['board_shape'][1],\r\n 'y': current_state['board_shape'][0]/2}\r\n self.opponent_goal_center = {'x': 0 if self.my_goal == 'right' else current_state['board_shape'][1],\r\n 'y': current_state['board_shape'][0]/2}\r\n\r\n # find if puck path is inside my interest area\r\n roi_radius = current_state['board_shape'][0] * current_state['goal_size'] * 2 -100 ######## 300 campo de vision #########################################################3\r\n pt_in_roi = None\r\n for p in path:\r\n #print(utils.distance_between_points(p[0], self.my_goal_center), '<', roi_radius)\r\n if utils.distance_between_points(p[0], self.my_goal_center) < roi_radius:\r\n pt_in_roi = p\r\n break\r\n \r\n \r\n #Si el puck esta dentro de la cancha...\r\n if pt_in_roi:\r\n\r\n #Anti Auto-Gol\r\n goalR = current_state['board_shape'][0]*0.45/2\r\n #Checar si el puck esta detras de mi\r\n puckPos = current_state['puck_pos']\r\n if self.my_goal is 'left': #Si estoy en la izquierda\r\n if puckPos['x'] < self.my_paddle_pos['x']: #Si el puck esta detras de mi...\r\n #print(\"Puck esta detras de mi!\")\r\n #Clalcular direccion del puck\r\n path = estimate_path(current_state, self.future_size)\r\n target_pos = {'x': current_state['board_shape'][0]*0.45/2, 'y': current_state['board_shape'][0]/2}\r\n moveToTarget = True\r\n for x in path:\r\n if x[1]['x'] > 0: #Puck va hacia la porteria contraria\r\n #print(\"Puck va hacia el enemigo \", x[1]['x'])\r\n #Me tengo que mover en diagonal para que no me pegue·············································································\r\n #1. Calculo mi vector de direccion hacia mi posicion de defensa de mi paddle\r\n target_pos = {'x': current_state['board_shape'][0]*0.45/2, 'y': current_state['board_shape'][0]/2}\r\n direction_vector = {'x': target_pos['x'] - self.my_paddle_pos['x'],\r\n 'y': target_pos['y'] - self.my_paddle_pos['y']}\r\n direction_vector = {k: v / utils.vector_l2norm(direction_vector)\r\n for k, v in direction_vector.items()}\r\n\r\n movement_dist = min(current_state['paddle_max_speed'] * current_state['delta_t'],\r\n utils.distance_between_points(x[0], self.my_paddle_pos))\r\n direction_vector = {k: v * movement_dist\r\n for k, v in direction_vector.items()}\r\n \r\n #Obtener path del paddle, basandome en la direccion del puck\r\n paddlepath = estimate_path_paddle(current_state, self.future_size, self.my_paddle_pos, direction_vector)\r\n for y in paddlepath: #Si el path de mi paddle intersecta el path de mi puck\r\n if y[0]['x'] > x[0]['x'] - current_state['puck_radius'] and y[0]['x'] < x[0]['x'] + current_state['puck_radius'] and y[0]['y'] > x[0]['y'] - current_state['puck_radius'] and y[0]['y'] < x[0]['y'] + current_state['puck_radius']:\r\n #print(\"Intersecta!\")\r\n if self.my_paddle_pos['y'] > current_state['board_shape'][0]/2:\r\n target_pos = {'x': current_state['board_shape'][0]*0.45/2, 'y': current_state['board_shape'][0] - current_state['puck_radius']}\r\n #print(\"Me muevo pa arriba\")\r\n else:\r\n target_pos = {'x': current_state['board_shape'][0]*0.45/2, 'y': current_state['puck_radius']}\r\n #print(\"Me muevo pa abajo\")\r\n\r\n else:\r\n target_pos = {'x': current_state['board_shape'][0]*0.45/2, 'y': current_state['board_shape'][0]/2}\r\n #print(\"Me muevo normal\")\r\n\r\n direction_vector = {'x': target_pos['x'] - self.my_paddle_pos['x'],\r\n 'y': target_pos['y'] - self.my_paddle_pos['y']}\r\n direction_vector = {k: v / utils.vector_l2norm(direction_vector)\r\n for k, v in direction_vector.items()}\r\n\r\n movement_dist = min(current_state['paddle_max_speed'] * current_state['delta_t'],\r\n utils.distance_between_points(x[0], self.my_paddle_pos))\r\n direction_vector = {k: v * movement_dist\r\n for k, v in direction_vector.items()} \r\n new_paddle_pos = {'x': self.my_paddle_pos['x'] + direction_vector['x'], 'y': self.my_paddle_pos['y'] + direction_vector['y']}\r\n \r\n if utils.is_inside_goal_area_paddle(new_paddle_pos, current_state) is False and \\\r\n utils.is_out_of_boundaries_paddle(new_paddle_pos, current_state) is None:\r\n self.my_paddle_pos = new_paddle_pos\r\n\r\n\r\n \r\n\r\n else:\r\n #Rebote #############################################################################################\r\n pos_enemigo = current_state['paddle2_pos']\r\n if pos_enemigo['y'] > self.my_goal_center['y']: #Si esta arriba...\r\n lineaRebote = current_state['puck_radius']\r\n A = current_state['puck_pos']\r\n D = {'x': A['x'], 'y': A['y']-lineaRebote}\r\n B = self.opponent_goal_center\r\n else: #Esta abajo...\r\n lineaRebote = current_state['board_shape'][0] - current_state['puck_radius']\r\n A = current_state['puck_pos']\r\n D = {'x': A['x'], 'y': -(A['y']) + current_state['puck_radius']}\r\n B = self.opponent_goal_center\r\n\r\n #Recta de rebote, donde corta en el eje y del radio del puck\r\n m1 = 0 \r\n n1 = lineaRebote\r\n\r\n #Recta entre B y D\r\n m2 = (D['y'] - B['y']) / (D['x'] - B['x'])\r\n n2 = (B['x']*D['y'] - D['x']*B['y']) / (B['x'] - D['x'])\r\n\r\n Cx = (n2 - n1) / (m1 - m2)\r\n Cy = m1 * Cx + n1\r\n C = {'x': Cx, 'y': Cy}\r\n #print(current_state['paddle1_pos'])\r\n \r\n # estimate an aiming position\r\n target_pos = utils.aim(pt_in_roi[0], pt_in_roi[1],\r\n C, current_state['puck_radius'],\r\n current_state['paddle_radius'])\r\n\r\n # move to target position, taking into account the max. paddle speed\r\n if target_pos != self.my_paddle_pos:\r\n direction_vector = {'x': target_pos['x'] - self.my_paddle_pos['x'], #donde quiero ir, donde estoy\r\n 'y': target_pos['y'] - self.my_paddle_pos['y']}\r\n direction_vector = {k: v / utils.vector_l2norm(direction_vector)\r\n for k, v in direction_vector.items()}\r\n\r\n movement_dist = min(current_state['paddle_max_speed'] * current_state['delta_t'],\r\n utils.distance_between_points(target_pos, self.my_paddle_pos))\r\n direction_vector = {k: v * movement_dist\r\n for k, v in direction_vector.items()}\r\n \r\n new_paddle_pos = {'x': self.my_paddle_pos['x'] + direction_vector['x'],\r\n 'y': self.my_paddle_pos['y'] + direction_vector['y']}\r\n\r\n\r\n \r\n #Rectas del Triangulo ######################################################################################################\r\n if self.my_goal is 'left': #Si estoy en la izquierda\r\n R1 = (100,0,(current_state['board_shape'][1]/2)-64,(current_state['board_shape'][0]/2)-64)\r\n R2 = (100,(current_state['board_shape'][0]),(current_state['board_shape'][1]/2)-64,(current_state['board_shape'][0]/2)-64)\r\n else: #Si estoy en la derecha\r\n R1 = (current_state['board_shape'][1]-100,0, (current_state['board_shape'][1]/2)+64, (current_state['board_shape'][0])/2)\r\n R2 = ((current_state['board_shape'][1]/2)+64, (current_state['board_shape'][0])/2, current_state['board_shape'][1]+100, current_state['board_shape'][0])\r\n \r\n\r\n m1 = (R1[1]-R1[3])/(R1[0]-R1[2])\r\n m2 = (R2[1]-R2[3])/(R2[0]-R2[2])\r\n\r\n n1 = (R1[0]*R1[3] - R1[2]*R1[1]) / (R1[0] - R1[2])\r\n n2 = (R2[0]*R2[3] - R2[2]*R2[1]) / (R2[0] - R2[2])\r\n\r\n # Izquierda\r\n # R1 | Y = 0.4429065743944637x + -0.0\r\n # R2 | Y = -0.5905420991926182x + 448.0\r\n # Derecha\r\n # R1 | Y = -0.5905420991926182 x + 587.5893886966551\r\n # R2 | Y = 0.5905420991926182 x + -75.58938869665513\r\n \r\n #print(\"Y = \",m1,\"x + \", n1)\r\n #print(\"Y = \",m2,\"x + \", n2)\r\n\r\n #Calcular n de ambas rectas paralelas\r\n n1p = new_paddle_pos['y'] - new_paddle_pos['x'] * m1\r\n n2p = new_paddle_pos['y'] - new_paddle_pos['x'] * m2\r\n\r\n #Si me movimiento se va a pasar de la recta, no me muevo.\r\n if n1p < n1 or n2p > n2:\r\n new_paddle_pos = {'x': self.my_paddle_pos['x'], 'y': self.my_paddle_pos['y']}\r\n\r\n #print(\"Paralelo contra R1 = Y = \",m1,\" x + \",n1)\r\n #print(\"Paralelo contra R2 = Y = \",m2,\" x + \",n2)\r\n\r\n # check if computed new position in not inside goal area\r\n # check if computed new position in inside board limits\r\n # Check if computed new position is inside triangle\r\n if utils.is_inside_goal_area_paddle(new_paddle_pos, current_state) is False and \\\r\n utils.is_out_of_boundaries_paddle(new_paddle_pos, current_state) is None:\r\n self.my_paddle_pos = new_paddle_pos\r\n \r\n else: #Si estoy en la derecha ***************************************************************************************************\r\n if puckPos['x'] > self.my_paddle_pos['x']: #Si el puck esta detras de mi...\r\n #print(\"Puck esta detras de mi!\")\r\n #Clalcular direccion del puck\r\n goalR = current_state['board_shape'][0]*0.45/2\r\n \r\n path = estimate_path(current_state, self.future_size)\r\n target_pos = {'x': current_state['board_shape'][1] - goalR, 'y': current_state['board_shape'][0]/2}\r\n for x in path:\r\n if x[1]['x'] < 0: #Puck va hacia la porteria contraria\r\n #print(\"Puck va hacia el enemigo \", x[1]['x'])\r\n #Me tengo que mover en diagonal para que no me pegue·············································································\r\n #1. Calculo mi vector de direccion hacia mi posicion de defensa de mi paddle\r\n target_pos = {'x': current_state['board_shape'][1] - goalR, 'y': current_state['board_shape'][0]/2}\r\n direction_vector = {'x': target_pos['x'] - self.my_paddle_pos['x'],\r\n 'y': target_pos['y'] - self.my_paddle_pos['y']}\r\n direction_vector = {k: v / utils.vector_l2norm(direction_vector)\r\n for k, v in direction_vector.items()}\r\n\r\n movement_dist = min(current_state['paddle_max_speed'] * current_state['delta_t'],\r\n utils.distance_between_points(x[0], self.my_paddle_pos))\r\n direction_vector = {k: v * movement_dist\r\n for k, v in direction_vector.items()}\r\n \r\n #Obtener path del paddle, basandome en la direccion del puck\r\n paddlepath = estimate_path_paddle(current_state, self.future_size, self.my_paddle_pos, direction_vector)\r\n for y in paddlepath: #Si el path de mi paddle intersecta el path de mi puck\r\n if y[0]['x'] > x[0]['x'] - current_state['puck_radius'] and y[0]['x'] < x[0]['x'] + current_state['puck_radius'] and y[0]['y'] > x[0]['y'] - current_state['puck_radius'] and y[0]['y'] < x[0]['y'] + current_state['puck_radius']:\r\n #print(\"Intersecta!\")\r\n if self.my_paddle_pos['y'] > current_state['board_shape'][0]/2:\r\n target_pos = {'x': current_state['board_shape'][1] - goalR, 'y': current_state['board_shape'][0] - current_state['puck_radius']}\r\n #print(\"Me muevo pa arriba\")\r\n else:\r\n target_pos = {'x': current_state['board_shape'][1] - goalR, 'y': current_state['puck_radius']}\r\n #print(\"Me muevo pa abajo\")\r\n\r\n else:\r\n target_pos = {'x': current_state['board_shape'][1] - goalR, 'y': current_state['board_shape'][0]/2}\r\n #print(\"Me muevo normal\")\r\n\r\n direction_vector = {'x': target_pos['x'] - self.my_paddle_pos['x'],\r\n 'y': target_pos['y'] - self.my_paddle_pos['y']}\r\n direction_vector = {k: v / utils.vector_l2norm(direction_vector)\r\n for k, v in direction_vector.items()}\r\n\r\n movement_dist = min(current_state['paddle_max_speed'] * current_state['delta_t'],\r\n utils.distance_between_points(x[0], self.my_paddle_pos))\r\n direction_vector = {k: v * movement_dist\r\n for k, v in direction_vector.items()} \r\n new_paddle_pos = {'x': self.my_paddle_pos['x'] + direction_vector['x'], 'y': self.my_paddle_pos['y'] + direction_vector['y']}\r\n \r\n if utils.is_inside_goal_area_paddle(new_paddle_pos, current_state) is False and \\\r\n utils.is_out_of_boundaries_paddle(new_paddle_pos, current_state) is None:\r\n self.my_paddle_pos = new_paddle_pos\r\n \r\n else:\r\n #Rebote #############################################################################################\r\n pos_enemigo = current_state['paddle2_pos']\r\n if pos_enemigo['y'] > self.my_goal_center['y']: #Si esta arriba...\r\n lineaRebote = current_state['puck_radius']\r\n A = current_state['puck_pos']\r\n D = {'x': A['x'], 'y': A['y']-lineaRebote}\r\n B = self.opponent_goal_center\r\n else: #Esta abajo...\r\n lineaRebote = current_state['board_shape'][0] - current_state['puck_radius']\r\n A = current_state['puck_pos']\r\n D = {'x': A['x'], 'y': -(A['y']) + current_state['puck_radius']}\r\n B = self.opponent_goal_center\r\n\r\n #Recta de rebote, donde corta en el eje y del radio del puck\r\n m1 = 0 \r\n n1 = lineaRebote\r\n\r\n #Recta entre B y D\r\n m2 = (D['y'] - B['y']) / (D['x'] - B['x'])\r\n n2 = (B['x']*D['y'] - D['x']*B['y']) / (B['x'] - D['x'])\r\n\r\n Cx = (n2 - n1) / (m1 - m2)\r\n Cy = m1 * Cx + n1\r\n C = {'x': Cx, 'y': Cy}\r\n #print(current_state['paddle1_pos'])\r\n \r\n # estimate an aiming position\r\n target_pos = utils.aim(pt_in_roi[0], pt_in_roi[1],\r\n C, current_state['puck_radius'],\r\n current_state['paddle_radius'])\r\n\r\n # move to target position, taking into account the max. paddle speed\r\n if target_pos != self.my_paddle_pos:\r\n direction_vector = {'x': target_pos['x'] - self.my_paddle_pos['x'], #donde quiero ir, donde estoy\r\n 'y': target_pos['y'] - self.my_paddle_pos['y']}\r\n direction_vector = {k: v / utils.vector_l2norm(direction_vector)\r\n for k, v in direction_vector.items()}\r\n\r\n movement_dist = min(current_state['paddle_max_speed'] * current_state['delta_t'],\r\n utils.distance_between_points(target_pos, self.my_paddle_pos))\r\n direction_vector = {k: v * movement_dist\r\n for k, v in direction_vector.items()}\r\n \r\n new_paddle_pos = {'x': self.my_paddle_pos['x'] + direction_vector['x'],\r\n 'y': self.my_paddle_pos['y'] + direction_vector['y']}\r\n\r\n\r\n \r\n #Rectas del Triangulo ######################################################################################################\r\n if self.my_goal is 'left': #Si estoy en la izquierda\r\n R1 = (100,0,(current_state['board_shape'][1]/2)-64,(current_state['board_shape'][0]/2)-64)\r\n R2 = (100,(current_state['board_shape'][0]),(current_state['board_shape'][1]/2)-64,(current_state['board_shape'][0]/2)-64)\r\n else: #Si estoy en la derecha\r\n R1 = (current_state['board_shape'][1]-100,0, (current_state['board_shape'][1]/2)+64, (current_state['board_shape'][0])/2)\r\n R2 = ((current_state['board_shape'][1]/2)+64, (current_state['board_shape'][0])/2, current_state['board_shape'][1]+100, current_state['board_shape'][0])\r\n \r\n\r\n m1 = (R1[1]-R1[3])/(R1[0]-R1[2])\r\n m2 = (R2[1]-R2[3])/(R2[0]-R2[2])\r\n\r\n n1 = (R1[0]*R1[3] - R1[2]*R1[1]) / (R1[0] - R1[2])\r\n n2 = (R2[0]*R2[3] - R2[2]*R2[1]) / (R2[0] - R2[2])\r\n\r\n # Izquierda\r\n # R1 | Y = 0.4429065743944637x + -0.0\r\n # R2 | Y = -0.5905420991926182x + 448.0\r\n # Derecha\r\n # R1 | Y = -0.5905420991926182 x + 587.5893886966551\r\n # R2 | Y = 0.5905420991926182 x + -75.58938869665513\r\n \r\n #print(\"Y = \",m1,\"x + \", n1)\r\n #print(\"Y = \",m2,\"x + \", n2)\r\n\r\n #Calcular n de ambas rectas paralelas\r\n n1p = new_paddle_pos['y'] - new_paddle_pos['x'] * m1\r\n n2p = new_paddle_pos['y'] - new_paddle_pos['x'] * m2\r\n\r\n #Si me movimiento se va a pasar de la recta, no me muevo.\r\n if n1p < n1 or n2p > n2:\r\n new_paddle_pos = {'x': self.my_paddle_pos['x'], 'y': self.my_paddle_pos['y']}\r\n\r\n #print(\"Paralelo contra R1 = Y = \",m1,\" x + \",n1)\r\n #print(\"Paralelo contra R2 = Y = \",m2,\" x + \",n2)\r\n\r\n # check if computed new position in not inside goal area\r\n # check if computed new position in inside board limits\r\n # Check if computed new position is inside triangle\r\n if utils.is_inside_goal_area_paddle(new_paddle_pos, current_state) is False and \\\r\n utils.is_out_of_boundaries_paddle(new_paddle_pos, current_state) is None:\r\n self.my_paddle_pos = new_paddle_pos\r\n \r\n # Jugador se regresa a su porteria si el puck no esta en su lado de la cancha. ################################################3\r\n else:\r\n goalR = current_state['board_shape'][0]*0.45/2\r\n if self.my_goal is 'left': #Si estoy en la izquierda\r\n target_pos = {'x': current_state['board_shape'][0]*0.45/2, 'y': current_state['board_shape'][0]/2} #current_state['board_shape'][0]/2\r\n else: #Si estoy en la derecha\r\n target_pos = {'x': current_state['board_shape'][1] - goalR, 'y': current_state['board_shape'][0]/2} #current_state['board_shape'][0]/2\r\n\r\n if target_pos != self.my_paddle_pos:\r\n direction_vector = {'x': target_pos['x'] - self.my_paddle_pos['x'],\r\n 'y': target_pos['y'] - self.my_paddle_pos['y']}\r\n direction_vector = {k: v / utils.vector_l2norm(direction_vector)\r\n for k, v in direction_vector.items()}\r\n\r\n movement_dist = min(current_state['paddle_max_speed'] * current_state['delta_t'],\r\n utils.distance_between_points(target_pos, self.my_paddle_pos))\r\n direction_vector = {k: v * movement_dist\r\n for k, v in direction_vector.items()}\r\n new_paddle_pos = {'x': self.my_paddle_pos['x'] + direction_vector['x'],\r\n 'y': self.my_paddle_pos['y'] + direction_vector['y']}\r\n\r\n # check if computed new position in not inside goal area\r\n # check if computed new position in inside board limits\r\n if utils.is_inside_goal_area_paddle(new_paddle_pos, current_state) is False and \\\r\n utils.is_out_of_boundaries_paddle(new_paddle_pos, current_state) is None:\r\n self.my_paddle_pos = new_paddle_pos\r\n\r\n # time.sleep(2)\r\n # return {'x': -12, 'y': -6543}\r\n return self.my_paddle_pos", "title": "" }, { "docid": "f6ea59e1a524431bd93a81494e8a2c9b", "score": "0.5461726", "text": "def test_controller(t, x, theta, f, x_dot_dot, theta_dot_dot, K, state_0=None, t_f=10):\n if state_0 is None:\n state_0 = [0, 0, np.radians(70), 0] # recovery from being tilted 70 degrees\n # state_0 = [-15, 0, 0, 0] # recovery from being offset horizontally by 15m\n # state_0 = [0, 5, 0, 0] # recovery from being imparted a velocity of 5 m/s\n # state_0 = [0, 0, 0, 30] # recovery from being imparted an angular velocity of 30 rad/s\n\n params = [[x, sp.diff(x, t), theta, sp.diff(theta, t)], f]\n x_dot_dot_func = sp.lambdify(params, x_dot_dot)\n theta_dot_dot_func = sp.lambdify(params, theta_dot_dot)\n\n def state_derivative(_, state):\n # compute control output based on LQR gains\n u = -np.dot(K, state)\n\n # calculate derivative based on unsimplified equations of motion\n x_dot_dot_val = x_dot_dot_func(state, u)[0]\n theta_dot_dot_val = theta_dot_dot_func(state, u)[0]\n return np.array([state[1], x_dot_dot_val, state[3], theta_dot_dot_val])\n\n result = solve_ivp(state_derivative, (0, t_f), state_0, method='RK45', rtol=1e-6)\n # noinspection PyUnresolvedReferences\n return result.t, result.y", "title": "" }, { "docid": "c1ae304b4d24a21d12618b91d50fb81c", "score": "0.54584044", "text": "def main_iterative():\n gamma = 0.999\n iteration = 0\n T = np.load(\"T.npy\")\n\n #Generate the first policy randomly\n # Nan=Nothing, -1=Terminal, 0=Up, 1=Left, 2=Down, 3=Right\n p = np.random.randint(0, 4, size=(12)).astype(np.float32)\n p[5] = np.NaN\n p[3] = p[7] = -1\n\n #Utility vectors\n u = np.array([0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0])\n\n #Reward vector\n r = np.array([-0.04, -0.04, -0.04, +1.0,\n -0.04, 0.0, -0.04, -1.0,\n -0.04, -0.04, -0.04, -0.04])\n\n while True:\n iteration += 1\n epsilon = 0.0001\n #1- Policy evaluation\n u1 = u.copy()\n u = return_policy_evaluation(p, u, r, T, gamma)\n #Stopping criteria\n delta = np.absolute(u - u1).max()\n if delta < epsilon * (1 - gamma) / gamma: break\n for s in range(12):\n if not np.isnan(p[s]) and not p[s]==-1:\n v = np.zeros((1,12))\n v[0,s] = 1.0\n #2- Policy improvement\n a = return_expected_action(u, T, v) \n if a != p[s]: p[s] = a\n print_policy(p, shape=(3,4))\n\n print(\"=================== FINAL RESULT ==================\")\n print(\"Iterations: \" + str(iteration))\n print(\"Delta: \" + str(delta))\n print(\"Gamma: \" + str(gamma))\n print(\"Epsilon: \" + str(epsilon))\n print(\"===================================================\")\n print(u[0:4])\n print(u[4:8])\n print(u[8:12])\n print(\"===================================================\")\n print_policy(p, shape=(3,4))\n print(\"===================================================\")", "title": "" }, { "docid": "56039660cc01b461e14ad9b4b184f78e", "score": "0.54574776", "text": "def update_vals(self, traj):\n G = 0 # Returns\n W = 1 # Importance scaling factor\n\n # Update policy and state-action values for each step in (reversed) trajectory\n n_steps = len(traj)\n for step in range(n_steps-1 , -1, -1):\n (y, x, vy, vx, act, greedy_act), reward = traj[step] # Unpack trajectory step\n G = self.gamma*G + reward # Return from action\n self.C[y, x, vy, vx, act] += W # Update cumulative weight sum\n self.Q[y, x, vy, vx, act] += (W*(G - self.Q[y, x, vy, vx, act]))/(self.C[y, x, vy, vx, act])\n\n # Update target policy\n self.target_policy[y, x, vy, vx] = np.random.choice(np.flatnonzero(self.Q[y, x, vy, vx] \\\n == self.Q[y, x, vy, vx].max()))\n\n if act != self.target_policy[y, x, vy, vx]:\n break # If action is off-policy then importance ratio is 0 so end episode\n else: # Update importance ratio\n W /= (1 - self.e + self.e/9)", "title": "" }, { "docid": "ce89bc0b820aeef94a97997ec05ff893", "score": "0.5447505", "text": "def spam_objective_function(theta, params, m, params_twop, m_twop):\n\n u1, u2 = theta\n params_twop.u = [u1, u2]\n\n return np.sum((m.light_curve(params) - m_twop.light_curve(params_twop))**2)", "title": "" }, { "docid": "8475cc751154997148e73d98ba28b78c", "score": "0.5441211", "text": "def __init__(self, mdp, discount = 0.9, iterations = 100, theta = 1e-5):\n self.mdp = mdp\n self.discount = discount\n self.iterations = iterations\n self.values = collections.defaultdict(float)\n states = self.mdp.getStates()\n for state in states:\n self.values[state] = 0\n\n \"*** YOUR CODE HERE ***\"\n # Compute predecessors of all states\n predecessors = {state: [] for state in states}\n for state in states:\n actions = self.mdp.getPossibleActions(state)\n for action in actions:\n transitions = self.mdp.getTransitionStatesAndProbs(state, action)\n for transition in transitions:\n transitionState = transition[0]\n transitionProb = transition[1]\n if transitionProb > 0:\n predecessors[transitionState].append(state)\n\n q = util.PriorityQueue()\n for state in states:\n if not self.mdp.isTerminal(state):\n actions = self.mdp.getPossibleActions(state)\n curr = self.values[state]\n qValues = [self.computeQValueFromValues(state, action) for action in actions]\n highQ = max(qValues)\n diff = abs(curr - highQ)\n q.update(state, -diff)\n\n times = []\n for i in range(iterations):\n #print(sum(abs(value - 100) for state, value in self.values.items() if not self.mdp.isTerminal(state)))\n start = time.time()\n if q.isEmpty():\n return\n s = q.pop()\n if not self.mdp.isTerminal(state):\n # update state\n actions = mdp.getPossibleActions(s)\n qValues = [self.computeQValueFromValues(s, action) for action in actions]\n highQ = max(qValues)\n self.values[s] = highQ\n \n for p in predecessors[s]:\n actions = self.mdp.getPossibleActions(p)\n curr = self.values[p]\n qValues = [self.computeQValueFromValues(p, action) for action in actions]\n highQ = max(qValues)\n diff = abs(curr - highQ)\n if diff > theta:\n q.update(p, -diff)\n print('Iteration: ' + str(i))\n print(sum(abs(value - 100) for state, value in self.values.items() if not self.mdp.isTerminal(state)))\n elapsed = time.time()-start\n print('Time elapsed: ' + str(elapsed))", "title": "" }, { "docid": "3067b696dba5234830702b50613995e1", "score": "0.5431126", "text": "def get_reward(self, done):\n \n ### Implementation 1 ###\n# # Rewards agent for moving as close to max speed as possible\n# cur_velocity = np.sum(i**2 for i in self.sim.v) ** 0.5\n# vel = 0.2 * abs(cur_velocity - self.max_v)\n# vel = np.tanh(vel)\n \n# # Rewards the agent for being stable in regards to its angular velocity\n# cur_angular_velocity = np.sum(i**2 for i in self.sim.angular_v) ** 0.5\n# angle = 0.002 * abs(cur_angular_velocity)\n# angle = np.tanh(angle)\n \n# # Rewards agent for being closer and closer to the objective\n# pos = 0.02 * abs(self.sim.pose[:3] - self.target_pos).sum()\n# pos = 2 * np.tanh(pos)\n\n ### Implementaion 2 ###\n# angle = 0.05 * abs(np.sum(self.sim.angular_v))\n# pos = 0.25 * abs(np.sum(self.sim.pose[:3] - self.target_pos))\n# vel = abs(np.sum(np.subtract(self.target_pos - self.sim.pose[:3] , self.sim.v)))\n\n\n # Current Velocity\n cur_velocity = np.sum(i**2 for i in self.sim.v) ** 0.5\n\n pos = 0.25 * abs(np.sum(self.sim.pose[:3] - self.target_pos)) # rewards agent for being close to target\n vel = 0.25 * abs(np.sum(self.sim.v)) # rewards agent for having a low velocity\n angle = 0.05 * abs(np.sum(self.sim.angular_v)) # rewards agent for being stable\n elevation = 0.125 * abs(self.sim.pose[3]) # rewards agent for staying away from the ground\n \n print(\"{:7.3f} || {:7.3f} || {:7.3f} || {:7.3f} || {:7.3f} || {}\".format\\\n (cur_velocity, pos, vel, angle, elevation, self.sim.pose[:3]))\n \n # the 1 rewards the agent for staying alive\n return 1. - pos - vel - angle - elevation", "title": "" }, { "docid": "3c2220e8c5fce9264bcafc24ac7ad627", "score": "0.5425407", "text": "def parameters_7v_random_motion():\n # load graph\n graph_file = 'G7V_test.p'\n g = ext.get_graph(graph_file)\n # input for target initial vertices (belief)\n v_target = [7]\n # initial searcher vertices\n v_searchers = [1, 2]\n deadline = 3\n # type of motion\n target_motion = 'random'\n belief_distribution = 'uniform'\n # initialize parameters\n b_0 = cp.set_initial_belief(g, v_target, belief_distribution)\n M = cp.set_motion_matrix(g, target_motion)\n searchers = cp.create_dict_searchers(g, v_searchers)\n\n n = 7\n return n, b_0, M, searchers", "title": "" }, { "docid": "65150f0d5337ddf59dd72224693bc3e3", "score": "0.5425303", "text": "def steer_phi(self, start_state, goal_state, t0 = 0, dt = 0.01, delta_t = 2):\n pass", "title": "" }, { "docid": "cbf83b7e8d5d95d5e901c78445e07c89", "score": "0.54228806", "text": "def move(self, state):\n \n global player\n player = state.player\n\n utility = self.alphaBetaSearch(state)\n #print(utility)\n return self.bestAction", "title": "" }, { "docid": "58a1a1158c8dbc74f966a5ce2315f93d", "score": "0.5422273", "text": "def moveThetaPhi(self, cobras, thetaMoves, phiMoves, thetaFroms=None, phiFroms=None,\n thetaFast=True, phiFast=True, doRun=True, ccwLimit=True):\n\n nCobras = len(cobras)\n if np.ndim(thetaMoves) == 0:\n thetaMoves = np.zeros(nCobras) + thetaMoves\n if np.ndim(phiMoves) == 0:\n phiMoves = np.zeros(nCobras) + phiMoves\n\n if nCobras != len(thetaMoves):\n raise RuntimeError(\"number of theta moves must match number of cobras\")\n if nCobras != len(phiMoves):\n raise RuntimeError(\"number of phi moves must match number of cobras\")\n if thetaFroms is not None and nCobras != len(thetaFroms):\n raise RuntimeError(\"number of theta froms must match number of cobras\")\n if phiFroms is not None and nCobras != len(phiFroms):\n raise RuntimeError(\"number of phi froms must match number of cobras\")\n\n nCobras = self.calibModel.nCobras\n _phiMoves = np.zeros(nCobras)\n _thetaMoves = np.zeros(nCobras)\n _phiFroms = np.zeros(nCobras)\n _thetaFroms = np.zeros(nCobras)\n\n cIdx = [self._mapCobraIndex(c) for c in cobras]\n _phiMoves[cIdx] = phiMoves\n _thetaMoves[cIdx] = thetaMoves\n if phiFroms is not None:\n _phiFroms[cIdx] = phiFroms\n if thetaFroms is not None:\n _thetaFroms[cIdx] = thetaFroms\n elif not ccwLimit:\n _thetaFroms = (self.calibModel.tht1 - self.calibModel.tht0 + np.pi) % (2*np.pi) + np.pi\n\n if isinstance(thetaFast, bool):\n _thetaFast = thetaFast\n elif len(thetaFast) == len(cobras):\n _thetaFast = np.full(nCobras, True)\n _thetaFast[cIdx] = thetaFast\n else:\n raise RuntimeError(\"number of thetaFast must match number of cobras\")\n\n if isinstance(phiFast, bool):\n _phiFast = phiFast\n elif len(phiFast) == len(cobras):\n _phiFast = np.full(nCobras, True)\n _phiFast[cIdx] = phiFast\n else:\n raise RuntimeError(\"number of phiFast must match number of cobras\")\n\n thetaSteps, phiSteps = self.calculateSteps(\n _thetaFroms, _thetaMoves, _phiFroms, _phiMoves, _thetaFast, _phiFast)\n cThetaSteps = thetaSteps[cIdx]\n cPhiSteps = phiSteps[cIdx]\n\n \"\"\"\n Looking for NaN values and put them as 0\n \"\"\"\n thetaIndex = np.isnan(cThetaSteps)\n phiIndex = np.isnan(cPhiSteps)\n cThetaSteps[thetaIndex] = 0\n cPhiSteps[phiIndex] = 0\n\n self.logger.debug(f'steps (run={doRun}): {list(zip(cThetaSteps, cPhiSteps))}')\n if doRun:\n self.moveSteps(cobras, cThetaSteps, cPhiSteps, thetaFast=thetaFast, phiFast=phiFast)\n\n return cThetaSteps, cPhiSteps", "title": "" }, { "docid": "8d6c50773c992e86f20077b76d7ac2fc", "score": "0.54179865", "text": "def correction(self, depth, beta, x_s, y_s, theta_s, xt):\r\n \r\n psg, psg_inv = self.transform_matrix(x_s, y_s, theta_s)\r\n \r\n \r\n # From true state x to depth, beta\r\n z = np.array([depth, beta])\r\n \r\n for i in range(xt.count()[0]):\r\n xt_s = np.dot(psg_inv, np.array([xt['x'].loc[i], xt['y'].loc[i], 1]))\r\n \r\n depth_hat = np.sqrt(xt_s[0]**2 + xt_s[1]**2)\r\n beta_hat = np.arctan(xt_s[0]/xt_s[1])\r\n \r\n mean = np.array([depth_hat, beta_hat]) + self.z_mu\r\n \r\n # Likelihood \r\n p_zx = multivariate_normal.pdf(z, mean, self.z_cov)\r\n \r\n # Update weight\r\n xt['weight'].loc[i] = p_zx\r\n \r\n return xt", "title": "" }, { "docid": "a1f1eda4096b3221b4993f04d68ae1ec", "score": "0.54165655", "text": "def update_policy(self):\n x_k = self.state_basis(self.state)\n x_k1 = self.state_basis(self.next_state)\n delta = np.clip(self.q_predicted - self.q_observed, -1, 1)\n self.average_reward += self.ALPHA_r * delta\n self.cumulative_reward += self.r\n # self.average_reward = self.cummulative_reward / (self.timestep + 1)\n # self.z_w = self.lam_w * self.z_w + (x_k1 - x_k)\n self.mu = self.theta_mu @ x_k\n self.mu = np.clip(self.mu, self.actionspace[0], self.actionspace[1])\n grad_pi_mu = (self.SIGMA**2) * (self.action - self.mu) * x_k\n self.z_theta_mu = self.lam_theta * self.z_theta_mu + (grad_pi_mu)\n # self.w += self.ALPHA_w * delta * self.z_w # update reward function\n self.theta_mu += self.ALPHA_mu * delta * self.z_theta_mu", "title": "" }, { "docid": "b9a3f150108ddb2471232956ffe28e39", "score": "0.5414264", "text": "def update_psi(self, psi):\r\n\r\n X1, X2 = np.meshgrid(self.xvecs[0], self.xvecs[1])\r\n\r\n p = np.zeros((len(self.xvecs[0]), len(self.xvecs[1])), dtype=complex)\r\n N = psi.dims[0][0]\r\n\r\n for n1 in range(N):\r\n kn1 = exp(-1j * self.theta1 * n1) / \\\r\n sqrt(sqrt(pi) * 2 ** n1 * factorial(n1)) * \\\r\n exp(-X1 ** 2 / 2.0) * np.polyval(hermite(n1), X1)\r\n\r\n for n2 in range(N):\r\n kn2 = exp(-1j * self.theta2 * n2) / \\\r\n sqrt(sqrt(pi) * 2 ** n2 * factorial(n2)) * \\\r\n exp(-X2 ** 2 / 2.0) * np.polyval(hermite(n2), X2)\r\n i = state_number_index([N, N], [n1, n2])\r\n p += kn1 * kn2 * psi.data[i, 0]\r\n\r\n self.data = abs(p) ** 2", "title": "" }, { "docid": "965db6d855926175487656d3b70ee717", "score": "0.5411206", "text": "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n # actualizamos el qValor\n \"\"\"\n para ello el qValor pasa a ser la suma del qValor actual + alpha * ( recompensa + (gamma * el qValor mas\n alto para el estado s' )) - el qValor actual\n \"\"\"\n if state:\n #: gamma dinamico\n #self.n[(state, action)] += 1\n #learning_rate = self.alpha / self.n[(state, action)]\n learning_rate = self.alpha\n #: update\n self.q[state, action] += learning_rate * (\n reward + ((self.gamma * self.getValue(nextState)) - self.getQValue(state, action)))", "title": "" }, { "docid": "5465e6193f6223f2fadf38f45ceac8d3", "score": "0.54104245", "text": "def reweight( self, r ):\n\n self.res = []\n bounds = [(0,1)] * len(self.w0) # weights have to be bound between 0 and 1\n opt = { 'maxiter': 10000, 'maxfun': 10000000 }\n flags = [] # stores error messages of minimizations that didn't converge\n\n w0 = self.w0\n for t in self.ths: # run minimization for all values of theta\n\n print(f\"# THETA: {t} \\r\", end = \"\")\n\n rs = optimize.minimize( self._penalty, w0, args = (r,t), bounds = bounds, jac = False, options = opt, method = 'L-BFGS-B' )\n self.res.append( rs )\n w0 = rs.x / np.sum(rs.x)\n\n if not rs.success: # some minimizations had problems!\n flags.append( [t, rs.message] )\n\n if flags == []:\n print(\"\\n# Done! All minimizations terminated successfully\")\n else:\n print(\"\\n# Done! Some minimizations terminated unsuccessfully: \")\n print(flags)\n\n self._save()\n print( f\"# Saved {self.out}.pkl\" )", "title": "" }, { "docid": "42270b6135a9c0caf68f62e0c0cb7f81", "score": "0.54091257", "text": "def update_parameters(self):\n if self.num_episodes_to_train_left > 0.7 * self.num_episodes_to_train:\n self.epsilon -= self.small_decrement\n elif self.num_episodes_to_train_left > 0.3 * self.num_episodes_to_train:\n self.epsilon -= self.big_decrement\n elif self.num_episodes_to_train_left > 0:\n self.epsilon -= self.small_decrement\n else:\n self.epsilon = 0.0\n self.alpha = 0.0\n\n self.num_episodes_to_train_left -= 1", "title": "" }, { "docid": "646363c73103f408f2ea03b9de55e970", "score": "0.5404007", "text": "def policy_evaluation(env, v, pi, gamma, theta):\n\n delta = theta + 1\n iter = 0\n\n while delta >= theta:\n old_v = v.copy()\n delta = 0\n\n # Traverse all states\n for x in range(env.n):\n for y in range(env.n):\n # Run one iteration of the Bellman update rule for the value function\n bellman_update(env, v, old_v, x, y, pi, gamma)\n # Compute difference\n delta = max(delta, abs(old_v[x, y] - v[x, y]))\n\n iter += 1\n\n # Plot new value function\n plot_v_values(v, env.n)\n print(\"\\nThe Policy Evaluation algorithm converged after {} iterations\".format(iter))", "title": "" }, { "docid": "c228b9626fe87d47d080ae9dceb051b3", "score": "0.53989005", "text": "def QLearning(NumAgents,NUM_ITERATIONS,CostFunction,noV):\n import numpy as np\n import random\n import math\n \n # start condition for the decision variables\n xVar = np.random.randint(2, size=[noV,NumAgents])\n\n # learning rate settings\n alpha = 0.8 #0.8; \n gamma = 0.2 #0.5;\n\n # build a state action matrix by finding all valid states from maze\n # we have two actions (flip state or stay, 0 or 1);\n Q = np.zeros([NumAgents,NUM_ITERATIONS])\n Best = np.ones(NUM_ITERATIONS)*math.nan\n \n NumbConv = 200\n IterNumb=0\n stepEqual = 0\n\n while IterNumb < (NUM_ITERATIONS-1): \n IterNumb += 1\n # calculate the fitness for each xVar\n fitness = np.ones(NumAgents)*math.nan\n for i in range(NumAgents):\n fitness[i] = 1/CostFunction(xVar[:,i])\n\n Best[IterNumb] = max(fitness) #find the agent with the better fitness\n indexB = (-fitness).argsort()\n \n # Getting the rewarding\n for i in range(NumAgents):\n if i == indexB[0]:\n rewardVal = 2\n Q[i,IterNumb] = Q[i,IterNumb] + alpha*(rewardVal+gamma*max(Q[:,IterNumb]) - Q[i,IterNumb])\n elif i == indexB[1] or i==indexB[2]:\n rewardVal = 1\n Q[i,IterNumb] = Q[i,IterNumb] + alpha*(rewardVal+gamma*max(Q[:,IterNumb]) -Q[i,IterNumb])\n else:\n rewardVal = 0;\n Q[i,IterNumb] = Q[i,IterNumb] + alpha*(rewardVal+gamma*max(Q[:,IterNumb]) -Q[i,IterNumb])\n\n indexQ = (-Q[:,IterNumb]).argsort()\n for i in range(NumAgents):\n if i == indexQ[0]:\n xVar[:,i] = xVar[:,i]\n elif i == indexQ[1] or i == indexQ[2]:\n indFlip = np.random.randint(noV)\n xVar[indFlip,i] = 1-xVar[indFlip,i]\n else:\n indFlip = np.random.randint(noV)\n xVar[:,i] = xVar[:,indexQ[0]]\n xVar[indFlip,i] = 1-xVar[indFlip,i]\n \n if Best[IterNumb-1] == Best[IterNumb]:\n stepEqual += 1\n else:\n stepEqual = 0\n if stepEqual == NumbConv:\n break\n\n BestxVar = xVar[:,indexQ[0]]\n BestCost = CostFunction(BestxVar)\n \n return(BestxVar,BestCost,1/Best,IterNumb)", "title": "" }, { "docid": "0c59785f059630ff479632c56afc801f", "score": "0.5393824", "text": "def eq_of_motion(w, t, p):\n x, y, b, x_dot, y_dot, beta_dot = w\n m, k, g, H, c, D, t, Izz, k_beta, c_beta, L_1_init, L_2_init = p\n\n # Create sysODE = (x', theta', x_dot', theta_dot'):\n sysODE = [x_dot,\n y_dot,\n beta_dot,\n (D*beta_dot**2*m*sin(b)/2 - D*m*(-D*g*m*sin(b)/2 - D*(D*beta_dot**2*m*sin(b)/2 - c*x*(x_dot + y_dot)/sqrt(x**2 + y**2) + c*(H - x)*(-x_dot + y_dot)/sqrt(y**2 + (H - x)**2) - 1.0*k*x*(-L_1_init + sqrt(x**2 + y**2))/sqrt(x**2 + y**2) - 1.0*k*(-H + x)*(-L_2_init + sqrt(y**2 + (H - x)**2))/sqrt(y**2 + (H - x)**2))*cos(b)/2 - D*(-D*beta_dot**2*m*cos(b)/2 - c*y*(-x_dot + y_dot)/sqrt(y**2 + (H - x)**2) - c*y*(x_dot + y_dot)/sqrt(x**2 + y**2) + g*m - 1.0*k*y*(-L_1_init + sqrt(x**2 + y**2))/sqrt(x**2 + y**2) - 1.0*k*y*(-L_2_init + sqrt(y**2 + (H - x)**2))/sqrt(y**2 + (H - x)**2))*sin(b)/2 - 1.0*b*k_beta - beta_dot*c_beta - m*(-D*beta_dot*x_dot*sin(b)/2 + D*beta_dot*y_dot*cos(b)/2 + D*(-beta_dot*x_dot*sin(b) + beta_dot*y_dot*cos(b))/2)/2 + m*(-D*beta_dot*x_dot*sin(b)/2 + D*beta_dot*y_dot*cos(b)/2 + D*beta_dot*(-x_dot*sin(b) + y_dot*cos(b))/2)/2)*cos(b)/(2*(-D**2*m*sin(b)**2/4 - D**2*m*cos(b)**2/4 + D**2*m/4 + Izz)) - c*x*(x_dot + y_dot)/sqrt(x**2 + y**2) + c*(H - x)*(-x_dot + y_dot)/sqrt(y**2 + (H - x)**2) - 1.0*k*x*(-L_1_init + sqrt(x**2 + y**2))/sqrt(x**2 + y**2) - 1.0*k*(-H + x)*(-L_2_init + sqrt(y**2 + (H - x)**2))/sqrt(y**2 + (H - x)**2))/m\n (-D*beta_dot**2*m*cos(b)/2 - D*m*(-D*g*m*sin(b)/2 - D*(D*beta_dot**2*m*sin(b)/2 - c*x*(x_dot + y_dot)/sqrt(x**2 + y**2) + c*(H - x)*(-x_dot + y_dot)/sqrt(y**2 + (H - x)**2) - 1.0*k*x*(-L_1_init + sqrt(x**2 + y**2))/sqrt(x**2 + y**2) - 1.0*k*(-H + x)*(-L_2_init + sqrt(y**2 + (H - x)**2))/sqrt(y**2 + (H - x)**2))*cos(b)/2 - D*(-D*beta_dot**2*m*cos(b)/2 - c*y*(-x_dot + y_dot)/sqrt(y**2 + (H - x)**2) - c*y*(x_dot + y_dot)/sqrt(x**2 + y**2) + g*m - 1.0*k*y*(-L_1_init + sqrt(x**2 + y**2))/sqrt(x**2 + y**2) - 1.0*k*y*(-L_2_init + sqrt(y**2 + (H - x)**2))/sqrt(y**2 + (H - x)**2))*sin(b)/2 - 1.0*b*k_beta - beta_dot*c_beta - m*(-D*beta_dot*x_dot*sin(b)/2 + D*beta_dot*y_dot*cos(b)/2 + D*(-beta_dot*x_dot*sin(b) + beta_dot*y_dot*cos(b))/2)/2 + m*(-D*beta_dot*x_dot*sin(b)/2 + D*beta_dot*y_dot*cos(b)/2 + D*beta_dot*(-x_dot*sin(b) + y_dot*cos(b))/2)/2)*sin(b)/(2*(-D**2*m*sin(b)**2/4 - D**2*m*cos(b)**2/4 + D**2*m/4 + Izz)) - c*y*(-x_dot + y_dot)/sqrt(y**2 + (H - x)**2) - c*y*(x_dot + y_dot)/sqrt(x**2 + y**2) + g*m - 1.0*k*y*(-L_1_init + sqrt(x**2 + y**2))/sqrt(x**2 + y**2) - 1.0*k*y*(-L_2_init + sqrt(y**2 + (H - x)**2))/sqrt(y**2 + (H - x)**2))/m\n (-D*g*m*sin(b)/2 - D*(D*beta_dot**2*m*sin(b)/2 - c*x*(x_dot + y_dot)/sqrt(x**2 + y**2) + c*(H - x)*(-x_dot + y_dot)/sqrt(y**2 + (H - x)**2) - 1.0*k*x*(-L_1_init + sqrt(x**2 + y**2))/sqrt(x**2 + y**2) - 1.0*k*(-H + x)*(-L_2_init + sqrt(y**2 + (H - x)**2))/sqrt(y**2 + (H - x)**2))*cos(b)/2 - D*(-D*beta_dot**2*m*cos(b)/2 - c*y*(-x_dot + y_dot)/sqrt(y**2 + (H - x)**2) - c*y*(x_dot + y_dot)/sqrt(x**2 + y**2) + g*m - 1.0*k*y*(-L_1_init + sqrt(x**2 + y**2))/sqrt(x**2 + y**2) - 1.0*k*y*(-L_2_init + sqrt(y**2 + (H - x)**2))/sqrt(y**2 + (H - x)**2))*sin(b)/2 - 1.0*b*k_beta - beta_dot*c_beta - m*(-D*beta_dot*x_dot*sin(b)/2 + D*beta_dot*y_dot*cos(b)/2 + D*(-beta_dot*x_dot*sin(b) + beta_dot*y_dot*cos(b))/2)/2 + m*(-D*beta_dot*x_dot*sin(b)/2 + D*beta_dot*y_dot*cos(b)/2 + D*beta_dot*(-x_dot*sin(b) + y_dot*cos(b))/2)/2)/(-D**2*m*sin(b)**2/4 - D**2*m*cos(b)**2/4 + D**2*m/4 + Izz)\n ]\n return sysODE", "title": "" }, { "docid": "d3157b019491b178ea458f6e106f0deb", "score": "0.53889453", "text": "def calculate_params(self, conf: float = 0.95):\n \n # Mean Position\n self.parameters['mean_position'] = [self.x.mean(), self.y.mean()]\n \n # Displacement and length of the COP\n displacement_x = [ np.abs(self.x[i+1] - self.x[i]) for i in range(len(self.x) -1) ]\n displacement_y = [np.abs(self.y[i+1] - self.y[i]) for i in range(len(self.y) -1)]\n displacement_total = [np.sqrt(displacement_x[i]**2 + displacement_y[i]**2) for i in range(len(displacement_x))]\n self.parameters['lenght_cop'] = np.sum(displacement_total)\n\n # Range of movement\n self.parameters['range'] = [self.x.max() - self.x.min(), self.y.max() - self.y.min()]\n \n # Standard Deviation of the movement\n self.parameters['standard_deviation'] = [self.x.std(), self.y.std()]\n \n # Speed of the movement \n movement_speed_x = [displacement_x[i] / self.period for i in range(len(displacement_x))]\n movement_speed_y = [displacement_y[i] / self.period for i in range(len(displacement_y))]\n movement_speed = [displacement_total[i] / self.period for i in range(len(displacement_total))]\n\n self.parameters['speed_by_axis'] = [np.mean(movement_speed_x), np.mean(movement_speed_y)]\n self.parameters['speed_total'] = np.mean(movement_speed)\n\n # Freq analysis\n ''' TODO Frequency analysis'''", "title": "" }, { "docid": "02cb9ddda14462df2d0573581182b69b", "score": "0.5387585", "text": "def _update_white_win_probability(self) -> None: \n if self._subtrees == []: \n pass \n elif self.is_white_move: \n maxi = self.find_max() \n \n self.white_win_probability = maxi \n else: \n average = self.avg_helper() \n self.white_win_probability = average[0] / average[1]", "title": "" }, { "docid": "22647870b939658422a65d3772c508c0", "score": "0.5384894", "text": "def _update_parameters_with_rollback(self) -> None:\n\n if len(self._to_rollback) == 0:\n # nothing to rollback, fall back to simple parameter update\n self.model.update_param_epoch()\n return\n\n m = self.model\n lpj_fn = m.log_pseudo_joint if isinstance(m, Optimized) else m.log_joint\n\n assert self.train_data is not None and self.train_states is not None # to make mypy happy\n all_data = self.train_data.dataset.tensors[1]\n states = self.train_states\n\n old_params = {p: m.theta[p].clone() for p in self._to_rollback}\n old_F = m.free_energy(idx=to.arange(all_data.shape[0]), batch=all_data, states=states)\n all_reduce(old_F)\n old_lpj = states.lpj.clone()\n m.update_param_epoch()\n states.lpj[:] = lpj_fn(all_data, states.K)\n new_F = m.free_energy(idx=to.arange(all_data.shape[0]), batch=all_data, states=states)\n all_reduce(new_F)\n if new_F < old_F:\n for p in self._to_rollback:\n m.theta[p][:] = old_params[p]\n states.lpj[:] = old_lpj", "title": "" }, { "docid": "4a68f008b775473de79acfdbfc819eb4", "score": "0.5380981", "text": "def train(self):\n \n for i in range(self.nb_steps):\n deltas = self.gen_random_deltas()\n r_pos = np.zeros(self.num_deltas) # Initialising r[+]\n r_neg = np.zeros(self.num_deltas) # Initialising r[-]\n\n # Calculating positive and negative rewards\n for k in range(self.num_deltas):\n r_pos[k] = self.execute(direction=\"+\", delta=deltas[k])\n r_neg[k] = self.execute(direction=\"-\", delta=deltas[k])\n\n # Calculate Standard Deviation of the rewards\n self.sigma_rewards = np.array(r_pos + r_neg ).std()\n\n rollouts = []\n\n for k,(r1,r2,delta) in enumerate(zip(r_pos,r_neg,deltas)):\n rollouts.append([r1,r2,delta])\n\n rollouts = self.roll_best(rollouts)\n\n # Update the parameter theta\n self.update(self.learning_rate,self.num_best_deltas,self.sigma_rewards,rollouts)\n\n reward_evaluation = self.execute()\n print('Episode Number:', i, 'Reward:', reward_evaluation)", "title": "" }, { "docid": "6de93728ead9d8d4efdae29975568635", "score": "0.53796923", "text": "def update_parameters_adam(parameters, grads, learning_rate, v, s, t, beta1=0.9, beta2=0.999, epsilon=1e-8):\n\n L = len(parameters) // 2 # number of layers in the neural network\n v_corrected = {}\n s_corrected = {}\n\n # Adam update for each parameter\n for l in range(1, L + 1):\n # compute velocities\n v['dW' + str(l)] = beta1 * v['dW' + str(l)] + \\\n (1 - beta1) * grads['dW' + str(l)]\n v['db' + str(l)] = beta1 * v['db' + str(l)] + \\\n (1 - beta1) * grads['db' + str(l)]\n\n s['dW' + str(l)] = beta2 * s['dW' + str(l)] + \\\n (1 - beta2) * grads['dW' + str(l)]**2\n s['db' + str(l)] = beta2 * s['db' + str(l)] + \\\n (1 - beta2) * grads['db' + str(l)]**2\n\n # bias correction\n v_corrected[\"dW\" + str(l)] = v[\"dW\" + str(l)] / (1 - beta1**t)\n v_corrected[\"db\" + str(l)] = v[\"db\" + str(l)] / (1 - beta1**t)\n\n s_corrected[\"dW\" + str(l)] = s[\"dW\" + str(l)] / (1 - beta2**t)\n s_corrected[\"db\" + str(l)] = s[\"db\" + str(l)] / (1 - beta2**t)\n\n # update parameters\n parameters['W' + str(l)] -= learning_rate * v_corrected[\"dW\" +\n str(l)] / (np.sqrt(s_corrected[\"dW\" + str(l)]) + epsilon)\n parameters['b' + str(l)] -= learning_rate * v_corrected[\"db\" +\n str(l)] / (np.sqrt(s_corrected[\"db\" + str(l)]) + epsilon)\n\n return parameters, v, s", "title": "" }, { "docid": "b3caf88e2807dd40f25584f050fcd21b", "score": "0.53782845", "text": "def act(self, s, a):\n\n self.name = 'DynaMaze'\n # update velocity with probability 1-beta\n global V, V_MIN, V_MAX, REWARD, POSITION\n if np.random.random() < 1-self.config['beta']:\n if a in [_RIGHT, _UP, _LEFT] and V > V_MIN:\n V -= 1\n elif a in [RIGHT_, UP_, LEFT_] and V < V_MAX:\n V += 1\n\n r_border = range(WIDTH-1, WIDTH**2, WIDTH) # states on the right border\n l_border = range(0, WIDTH**2, WIDTH) # states on the left border\n t_border = range(WIDTH) # states on the top border\n\n units = range(V)\n check = False # flag to indicate if we visited the checkpoint\n\n # ----------------------------- #\n # --- move RIGHT of V units --- #\n # ----------------------------- #\n if a < len(ACTIONS) / 3:\n for i in units:\n self.world[STATE2WORLD[s+i]] = '>' # draw my path gradualy in the world\n # crash: reset world and velocities, return to start state\n if s+i in r_border or s+i+1 in POSITION['WALLS']:\n self.reset()\n return POSITION['START'], REWARD['CRASH'], False\n # went through the checkpoint: increase V_MAX and return bonus (only the first time!)\n elif s+i+1 == POSITION['CHECKPNT']:\n check = V_MAX != 5\n V_MAX = 5\n # goal: draw where I end up & return\n elif s+i+1 == POSITION['GOAL']:\n self.world[STATE2WORLD[s+i+1]] = 'O'\n return s+i+1, REWARD['WIN'], True\n # draw where I end up & return\n self.world[STATE2WORLD[s+V]] = 'O'\n return (s+V, REWARD['CHECKPNT'], False) if check else (s+V, REWARD['STEP'], False)\n\n # ----------------------------- #\n # ---- move UP of V units ----- #\n # ----------------------------- #\n elif a < 2*len(ACTIONS) / 3:\n for i in units:\n self.world[STATE2WORLD[s-i*WIDTH]] = '|' # draw my path gradualy in the world\n # crash: reset world and velocities, return to start state\n if s-i*WIDTH in t_border or s-(i+1)*WIDTH in POSITION['WALLS']:\n self.reset()\n return POSITION['START'], REWARD['CRASH'], False\n # went through the checkpoint: increase V_MAX and return bonus (only the first time!)\n elif s-(i+1)*WIDTH == POSITION['CHECKPNT']:\n check = V_MAX != 5\n V_MAX = 5\n # goal: draw where I end up & return\n elif s-(i+1)*WIDTH == POSITION['GOAL']:\n self.world[STATE2WORLD[s-(i+1)*WIDTH]] = 'O'\n return s-(i+1)*WIDTH, REWARD['WIN'], True\n # nothing special: draw where I end up & return\n self.world[STATE2WORLD[s-V*WIDTH]] = 'O'\n return (s-V*WIDTH, REWARD['CHECKPNT'], False) if check else (s-V*WIDTH, REWARD['STEP'], False)\n\n # ----------------------------- #\n # --- move LEFT of V units ---- #\n # ----------------------------- #\n elif a < len(ACTIONS):\n for i in units:\n self.world[STATE2WORLD[s-i]] = '<' # draw my path gradualy in the world\n # crash: reset world and velocities, return to start state\n if s-i in l_border or s-i-1 in POSITION['WALLS']:\n self.reset()\n return POSITION['START'], REWARD['CRASH'], False\n # went through the checkpoint: increase V_MAX and return bonus (only the first time!)\n elif s-i-1 == POSITION['CHECKPNT']:\n check = V_MAX != 5\n V_MAX = 5\n # goal: draw where I end up & return\n elif s-i-1 == POSITION['GOAL']:\n self.world[STATE2WORLD[s-i-1]] = 'O'\n return s-i-1, REWARD['WIN'], True\n # draw where I end up & return\n self.world[STATE2WORLD[s-V]] = 'O'\n return (s-V, REWARD['CHECKPNT'], False) if check else (s-V, REWARD['STEP'], False)\n\n return s, REWARD['STEP'], False # WARNING: SHOULD NEVER HAPPEN", "title": "" }, { "docid": "186d7e4e797fb8cb4356d3726f59b67b", "score": "0.5377268", "text": "def wandering(self):\n # TODO need test and improve\n print(\"Now wandering\")\n print(\"--------------------\")\n turnProbability = 0.28\n stopTurnProbability = 0.72\n leftTurnProbability = 0.72\n\n times = 30 # This Wandering has (times) operations\n i = 0\n find = False\n while i < times \\\n and self.curPoseData[0] == None \\\n and self.ObstacleData >= 250 : # If have detect change end loop\n \n r = random.random()\n\n time.sleep(1)\n self.move.goOneStep()\n \n i += 1\n if r <= turnProbability: # Turn switch\n stop = random.random()\n\n time.sleep(1)\n while stop >= stopTurnProbability \\\n and self.curPoseData[0] == None \\\n and self.ObstacleData >= 250:\n \n stop = random.random()\n s = random.random()\n if s < leftTurnProbability: # left or right\n self.move.turnLeftOneStep()\n i += 1\n time.sleep(1)\n else:\n self.move.turnRightOneStep()\n i += 1\n time.sleep(1)\n \n time.sleep(0.4)\n \n if self.ObstacleData < 250:\n self.move.backOneStep()\n time.sleep(1)\n self.move.backOneStep()\n time.sleep(1)\n self.move.backOneStep()\n time.sleep(1)\n \n self.move.turnLeftOneStep()\n time.sleep(1)\n self.move.turnLeftOneStep()\n time.sleep(1)\n self.move.turnLeftOneStep()\n time.sleep(1)\n self.move.turnLeftOneStep()\n time.sleep(1)\n self.move.turnLeftOneStep()\n time.sleep(1)\n \n self.move.goOneStep()\n time.sleep(1)\n self.move.goOneStep()\n time.sleep(1)\n self.move.goOneStep()\n time.sleep(1)", "title": "" }, { "docid": "b5344fbbfd25123cfcf9e13f897c6a51", "score": "0.53764534", "text": "def _update(S):\n S_tilde = S.copy()\n\n cont = _H(S)\n\n i, j = np.random.randint(low=0, high=m, size=(2,))\n S_tilde[i,j] *= -1 # Flip atom\n new_cont = _H(S_tilde) # Compute new contribution \n\n acceptance = np.exp(-beta*(new_cont - cont))\n\n if new_cont < cont: # Keep spin if new_cont is smaller.\n return S_tilde\n elif np.random.uniform() < acceptance: #Keep with prob exp(-\\beta(H_v-H_\\mu))\n return S_tilde \n else: # Stay in same state.\n return S", "title": "" }, { "docid": "6bf17cb2ea1b46672ca10a40f2f57f87", "score": "0.5374331", "text": "def _iterate(v, mdp, policy):\n v = evaluation(mdp, policy)\n x_policy, y_policy = policy\n M, N = mdp.get_state().shape\n \n # obstacle padding\n bigger_state =[]\n \n #create a List that has Obstacles on all 4 sides\n for i in range(M+2):\n sublist = []\n for j in range(N+2):\n sublist.append('O')\n bigger_state.append(sublist)\n \n bigger_state = np.asarray(bigger_state)\n \n #insert grid world so it is surrounded by Obstacles\n bigger_state[1:M+1,1:N+1] = mdp.get_state()\n \n #Also pad the valuefunction so it does not want to go on the boarder\n bigger = np.ones((M+2,N+2)) * -9999999999\n bigger[1:M+1,1:N+1] = v\n v = bigger\n \n #update policy in a greedy manner\n #go over every field 'F'\n for i,j in np.argwhere(bigger_state == 'F'):\n \n elem_list = []\n cords_list = []\n \n #get values of 4-Neighbourhood\n elem_list.append(old_value(bigger_state,(i-1,j),v,(i,j)))\n cords_list.append((i-1,j))\n\n elem_list.append(old_value(bigger_state,(i,j-1),v,(i,j)))\n cords_list.append((i,j-1))\n\n elem_list.append(old_value(bigger_state,(i+1,j),v,(i,j)))\n cords_list.append((i+1,j))\n\n elem_list.append(old_value(bigger_state,(i,j+1),v,(i,j)))\n cords_list.append((i,j+1))\n \n #take the greedy action\n max_point = cords_list[elem_list.index(max(elem_list))]\n \n #get action by substracting point with highest reward and state's posittion\n x,y = tuple(np.subtract(max_point,(i,j)))\n \n #put coordinates in non-padded Policy\n x_policy[i-1,j-1] = x\n y_policy[i-1,j-1] = y\n \n #rezip policy\n policy = (x_policy,y_policy)\n \n #return value function in non padded area and policy\n return v[1:M+1,1:N+1],policy", "title": "" }, { "docid": "e99a929003fc222dd5b9227ac20ea4ab", "score": "0.5370354", "text": "def train(self, s, a, r, sp, done=False):\n astar = self.Q.get_optimal_action(sp)\n maxQ = self.Q[sp,astar]\n self.Q[s, a] = self.Q[s, a] + self.alpha * (r + self.gamma * maxQ - self.Q[s, a])\n # raise NotImplementedError(\"Implement function body\")", "title": "" }, { "docid": "d2ba4d1212ed2ad9ceea0c7b4084010c", "score": "0.53594965", "text": "def __init__(self, num_steps, v_limit=2, a_limit=.01, phi_limit=.2, p_slack=1):\n\t\tself.initial_v = tf.placeholder(tf.float32, name=\"initial_speed\")\n\t\tself.initial_theta = tf.placeholder(tf.float32, name=\"initial_theta\")\n\t\tself.targets = tf.placeholder(tf.float32, [2,1,None], name=\"targets\")\n\n\t\tself.turn = tf.Variable(\n\t\t\t\tname=\"turn\",\n\t\t\t\tinitial_value=np.zeros(num_steps, dtype=np.float32).reshape(num_steps,1))\n\t\tself.throttle = tf.Variable(\n\t\t\t\tname=\"throttle\",\n\t\t\t\tinitial_value=np.zeros(num_steps, dtype=np.float32).reshape(num_steps,1))\n\n\t\twith tf.name_scope(\"input_translation\"):\n\t\t\tself.phi = self.turn\n\t\t\tself.t = self.throttle\n\t\t\tself.v = tf.add(tf.cumsum(self.t), self.initial_v)\n\n\t\twith tf.name_scope(\"XY_tranlation\"):\n\t\t\tself.theta = tf.add(tf.cumsum(self.phi), self.initial_theta)\n\t\t\tself.x_v = tf.multiply(self.v, tf.cos(self.theta))\n\t\t\tself.y_v = tf.multiply(self.v, tf.sin(self.theta))\n\n\t\t\tself.x = tf.cumsum(self.x_v)\n\t\t\tself.y = tf.cumsum(self.y_v)\n\t\t\tself.xy = tf.pack([self.x,self.y])\n\n\t\twith tf.name_scope(\"constraints\"):\n\t\t\tself.target_distances = tf.reduce_min(tf.reduce_max(tf.abs(tf.subtract(self.xy, self.targets)), axis=0), axis=0, name=\"postion_constraint\")\n\t\t\tself.max_v = tf.reduce_max(tf.abs(self.v))\n\t\t\tself.a = tf.add(tf.multiply(tf.square(tf.abs(self.t)), tf.abs(self.phi)),tf.abs(self.t))\n\t\t\tself.max_a = tf.reduce_max(self.a)\n\t\t\tself.max_phi = tf.reduce_max(tf.abs(self.phi))\n\t\t\tself.avg_phi = tf.reduce_mean(tf.abs(self.phi))\n\t\t\tself.avg_v = tf.reduce_mean(self.v)\n\n\t\twith tf.name_scope(\"loss\"):\n\t\t\tself.acc_loss = tf.nn.relu(tf.subtract(self.max_a, a_limit))\n\t\t\tself.velocity_loss = tf.abs(tf.subtract(self.max_v, v_limit))\n\t\t\tself.phi_loss = tf.nn.relu(tf.subtract(self.max_phi, phi_limit))\n\t\t\tself.postion_loss = tf.reduce_mean(tf.nn.relu(tf.subtract(self.target_distances, p_slack)))\n\n\t\t\tself.loss = 2*self.postion_loss + 2*self.velocity_loss + 2*self.phi_loss + 2*self.acc_loss - self.avg_v", "title": "" }, { "docid": "f5d8b9e11cdbf172f6f4f555bbc3b473", "score": "0.5357459", "text": "def update(s, a, s1, p):\n states.add(s)\n actions.add(a)\n dist = transitions.get((s, a), {})\n if s1 in dist:\n print 'Summing', s, a, s1\n dist[s1] = dist[s1] + float(p)\n else:\n dist[s1] = float(p)\n transitions[(s,a)] = dist", "title": "" }, { "docid": "ebf99f4c7bc88dd8ebcbc317215b1b4b", "score": "0.5355332", "text": "def optimize(self, trans, gamma=0.99, epsilon=0.01):\n # get the length of the transitions\n trans_len = len(trans)\n # init the matrices\n R = np.zeros((trans_len,1))\n P_phi = np.zeros((trans_len,self.features))\n Phi = np.zeros((trans_len,self.features))\n # fill the matrices\n for i, transition in enumerate(trans):\n # decompose the transition\n state, action, reward, next_state, terminal = transition\n R[i,0] = reward\n P_phi[i,:] = self.phi(next_state, max(range(self.nact), key = lambda a: self.get_value(next_state,a)) ) #??\n Phi[i,:] = self.phi(state,action)\n A = (1/trans_len) * np.dot(Phi.T, ( Phi - gamma * P_phi ))\n b = (1/trans_len) * np.dot(Phi.T, R)\n from numpy.linalg import inv\n self.weights = np.dot(inv(A), b)\n self.weights = np.squeeze(self.weights)", "title": "" }, { "docid": "8d0ebd0591f60cf4b842d50a3f571c05", "score": "0.5346729", "text": "def update_alpha(self, s, a, t):\n s_ = t.state\n s_dist = self.Q[a, s.y, s.x]\n\n a_ = np.argmax(expected_value(self.Q[:, s_.y, s_.x]))\n\n s__dist = self.Q[a_, s_.y, s_.x]\n\n var_ix = s_dist.var_index(self.alpha)\n\n var__ix = clip(var_ix - t.reward)\n\n # prob before var\n p_pre = np.sum(s_dist.p[:var_ix])\n # prob at var\n p_var = s_dist.p[var_ix]\n # prob before next var\n p__pre = np.sum(s__dist.p[:var__ix])\n # prob at next var\n p__var = s__dist.p[var__ix]\n\n # how much does t add to the full var\n # p_portion = (t.prob * p__var) / self.p_portion_sum(s, a, var_ix)\n p_portion = 1\n\n # we care about this portion of var\n p_active = (self.alpha - p_pre) / p_var\n\n self.alpha = p__pre + p_active * p__var * p_portion", "title": "" }, { "docid": "bb248824ed837d5020245b1768040279", "score": "0.5344299", "text": "def objective_at_theta(self, theta_values=None, initialize_parmest_model=False):\n if len(self.theta_names) == 1 and self.theta_names[0] == 'parmest_dummy_var':\n pass # skip assertion if model has no fitted parameters\n else:\n # create a local instance of the pyomo model to access model variables and parameters\n model_temp = self._create_parmest_model(self.callback_data[0])\n model_theta_list = [] # list to store indexed and non-indexed parameters\n # iterate over original theta_names\n for theta_i in self.theta_names:\n var_cuid = ComponentUID(theta_i)\n var_validate = var_cuid.find_component_on(model_temp)\n # check if theta in theta_names are indexed\n try:\n # get component UID of Set over which theta is defined\n set_cuid = ComponentUID(var_validate.index_set())\n # access and iterate over the Set to generate theta names as they appear\n # in the pyomo model\n set_validate = set_cuid.find_component_on(model_temp)\n for s in set_validate:\n self_theta_temp = repr(var_cuid) + \"[\" + repr(s) + \"]\"\n # generate list of theta names\n model_theta_list.append(self_theta_temp)\n # if theta is not indexed, copy theta name to list as-is\n except AttributeError:\n self_theta_temp = repr(var_cuid)\n model_theta_list.append(self_theta_temp)\n except:\n raise\n # if self.theta_names is not the same as temp model_theta_list,\n # create self.theta_names_updated\n if set(self.theta_names) == set(model_theta_list) and len(\n self.theta_names\n ) == set(model_theta_list):\n pass\n else:\n self.theta_names_updated = model_theta_list\n\n if theta_values is None:\n all_thetas = {} # dictionary to store fitted variables\n # use appropriate theta names member\n theta_names = self._return_theta_names()\n else:\n assert isinstance(theta_values, pd.DataFrame)\n # for parallel code we need to use lists and dicts in the loop\n theta_names = theta_values.columns\n # # check if theta_names are in model\n for theta in list(theta_names):\n theta_temp = theta.replace(\"'\", \"\") # cleaning quotes from theta_names\n\n assert theta_temp in [\n t.replace(\"'\", \"\") for t in model_theta_list\n ], \"Theta name {} in 'theta_values' not in 'theta_names' {}\".format(\n theta_temp, model_theta_list\n )\n assert len(list(theta_names)) == len(model_theta_list)\n\n all_thetas = theta_values.to_dict('records')\n\n if all_thetas:\n task_mgr = utils.ParallelTaskManager(len(all_thetas))\n local_thetas = task_mgr.global_to_local_data(all_thetas)\n else:\n if initialize_parmest_model:\n task_mgr = utils.ParallelTaskManager(\n 1\n ) # initialization performed using just 1 set of theta values\n # walk over the mesh, return objective function\n all_obj = list()\n if len(all_thetas) > 0:\n for Theta in local_thetas:\n obj, thetvals, worststatus = self._Q_at_theta(\n Theta, initialize_parmest_model=initialize_parmest_model\n )\n if worststatus != pyo.TerminationCondition.infeasible:\n all_obj.append(list(Theta.values()) + [obj])\n # DLW, Aug2018: should we also store the worst solver status?\n else:\n obj, thetvals, worststatus = self._Q_at_theta(\n thetavals={}, initialize_parmest_model=initialize_parmest_model\n )\n if worststatus != pyo.TerminationCondition.infeasible:\n all_obj.append(list(thetvals.values()) + [obj])\n\n global_all_obj = task_mgr.allgather_global_data(all_obj)\n dfcols = list(theta_names) + ['obj']\n obj_at_theta = pd.DataFrame(data=global_all_obj, columns=dfcols)\n return obj_at_theta", "title": "" }, { "docid": "21c2eb6ec2c96dc4a71f203b80662e56", "score": "0.5339837", "text": "def updateObjective(self, series):\n self.prob.setObjective(pulp.lpSum([series[k]*self.player_vars[k] for k in self.players]))", "title": "" }, { "docid": "21c2eb6ec2c96dc4a71f203b80662e56", "score": "0.5339837", "text": "def updateObjective(self, series):\n self.prob.setObjective(pulp.lpSum([series[k]*self.player_vars[k] for k in self.players]))", "title": "" }, { "docid": "e47eb3f9d6191108ebb5bf815f65778b", "score": "0.53389174", "text": "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n\n #: definimos el valor de correccion\n correction = (reward + (self.gamma * self.getValue(nextState))) - self.getQValue(state, action)\n # actualizamos el peso de cada feature, para ello, recuperamos las features\n self.features = self.featExtractor.getFeatures(state, action)\n # y actulizamos las que tenemos, o las ponemos nuevas\n for feature in self.features:\n self.weigths[feature] += self.alpha * correction * self.features[feature]", "title": "" }, { "docid": "9e76b4077dcc85cf9d56672869eabc87", "score": "0.533809", "text": "def optimize_parameters(self, input=None):\n if self.is_train:\n self.iter_count += 1\n self.output, self.phi, self.afimg_or_afparam, loss = self.forward()\n\n self.backward_net(loss / self.criticUpdates)\n self.loss = loss.item()\n if self.iter_count % self.criticUpdates == 0:\n self.optimizer.step()\n self.optimizer.zero_grad()\n update_lr, lr = self.network.check_if_update_lr()\n if update_lr:\n self.update_learning_rate(lr)", "title": "" }, { "docid": "16d2f1091a705d2337ddfc69f86dfef4", "score": "0.5334167", "text": "def update(self,z_t):\n # YOUR CODE HERE\n \n dh = np.eye(3) # Jacobian of measurement model wrt states\n # print \"P_t (pre-update) = \", self.P_t\n # print \"num_meas = \", z_t.shape[0]\n for i in range(z_t.shape[0]):\n # print \"meas_idx = \", i\n # print \"P_t (pre-update) = \", self.P_t\n K = self.P_t.dot(dh.T).dot(np.linalg.inv(dh.dot(self.P_t).dot(dh.T) + self.R_t))\n # print \"K = \", K\n\n # Using the pose of the observed tag in the robot frame, calculate the\n # pose of the robot in the world frame\n tag_id = int(z_t[i][3])\n # print \"tag_id = \", tag_id\n pose_tag_in_robot = z_t[i][0:3] # get pose of tag in robot frame\n H_RT = util.get_transform_matrix(pose_tag_in_robot) # get transform matrix of robot to tag frame\n pose_tag_in_world = self.markers[tag_id, 0:3] # get pose of tag in world frame\n H_WT = util.get_transform_matrix(pose_tag_in_world) # get transform matrix of world to tag frame\n H_TR = np.linalg.inv(H_RT) # get transform matrix of tag to robot frame\n H_WR = np.dot(H_WT, H_TR) # get transform matrix of world to robot frame\n pose_robot_in_world = util.get_pose_from_transform(H_WR) # get pose of robot in world frame\n \n # print \"x_t = \", self.x_t\n # print \"pose_robot_in_world = \", pose_robot_in_world\n self.x_t = self.x_t + K.dot(np.array(pose_robot_in_world)-self.x_t)\n\n ########### KEY STEP ############\n # Limit the angle to range [-pi, pi]\n self.x_t[2] = util.limit_angle(self.x_t[2])\n\n # print \"x_t.shape = \", self.x_t.shape\n self.P_t = (np.eye(3) - K.dot(dh)).dot(self.P_t)\n # print \"P_t (post-update) = \", self.P_t\n \n # self.P_t = (np.eye(3) - K.dot(dh)).dot(self.P_t)\n # print \"P_t (post-update) = \", self.P_t\n return self.x_t, self.P_t", "title": "" }, { "docid": "f337e8394828645373c6b357b313a28c", "score": "0.5330655", "text": "def CoplanarParametersFitness(self,params):\n\t\tmasses_and_evecs = params[:3*self.nplanets].reshape(-1,3)\n\t\tmass = masses_and_evecs[:,0]\n\t\tex,ey = masses_and_evecs[:,1],masses_and_evecs[:,2]\n\t\tpmgs = np.arctan2(ey,ex)\n\t\t\n\t\tother_params = params[3*self.nplanets:].reshape(-1,2)\n\t\tperiodRatios = np.append(np.array([1.0]), other_params[:,0])\n\t\tmeanLongs = np.append(np.array([0.0]), other_params[:,1])\t\t\n\n\t\tnbody_params = (np.vstack([ mass,ex,ey,periodRatios,meanLongs-pmgs ]).T).reshape(-1)\n\n\t\tnbody_transits,success = self.CoplanarParametersTransits(nbody_params,t0=0.0)\n\t\tif not success:\n\t\t\treturn -np.inf\n\t\t\n\t\t####################################################################################################################\n\t\tnbodyTransitOrder = np.argsort( np.array([ ntransits[0] for ntransits in nbody_transits] ))\n\t\twhile nbodyTransitOrder[0] != self.transitOrder[0]:\n\t\t\tfirstToTransit = self.transitOrder[0]\n\t\t\tfor i in np.arange(nbodyTransitOrder.tolist().index(firstToTransit)):\n\t\t\t\tplanetNumber = nbodyTransitOrder[i]\n\t\t\t\tnbody_transits[planetNumber] = nbody_transits[planetNumber][1:]\n\t\t\t\n\t\t\tnbodyTransitOrder = np.argsort( np.array([ntransits[0] for ntransits in nbody_transits]) )\t\n\t\t####################################################################################################################\n\t\t\t\n\t\tobserved_times,observed_numbers,uncertainties,nbody_times = np.array([]),np.array([]),np.array([]),np.array([])\n\t\t\n\t\tfor i in range(self.nplanets):\n\t\t\tif max(self.transit_numbers[i]) >= len(nbody_transits[i]):\n\t\t\t\treturn -np.inf\n\n\t\t\tobserved_times=np.append(observed_times, self.transit_times[i])\n\t\t\tobserved_numbers=np.append(observed_numbers, self.transit_numbers[i])\n\t\t\tuncertainties=np.append(uncertainties,self.transit_uncertainties[i])\n\t\t\tnbody_times=np.append(nbody_times , (nbody_transits[i])[self.transit_numbers[i]])\n\t\t\n\t\t\n\t\tdef func(x,tau,t0):\n\t\t\treturn tau * x + t0\n\n\t\t# Solve for the transform of N-body time that gives the best fit to observed transits\n\t\tx0 = np.array((self.period_estimates[0],self.tInit_estimates[0]))\n\t\ttau,t0 = curve_fit(func, nbody_times, observed_times,x0, uncertainties)[0]\n\t\t\n\t\ttransform = np.vectorize(lambda x: func(x,tau,t0))\n\t\t\n\t\tchi2 = 0.0\n\t\t\n\t\tfor i in range(self.nplanets):\n\t\t\tuncertainties = self.transit_uncertainties[i]\n\t\t\tdiff = transform((nbody_transits[i])[self.transit_numbers[i]]) - self.transit_times[i]\n\t\t\tchi2+= -0.5 * np.sum( np.power(diff,2.) / np.power(uncertainties,2.) )\n\n\t\treturn chi2", "title": "" }, { "docid": "de67c3673fbc787687a8e7e1461d205b", "score": "0.5327907", "text": "def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,\n beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):\n\n L = len(parameters) // 2\n v_corrected = {}\n s_corrected = {}\n\n for i in range(L):\n\n v[\"dW\" + str(i + 1)] = beta1 * v[\"dW\" + str(i + 1)] + (1 - beta1) * grads[\"dW\" + str(i + 1)]\n v[\"db\" + str(i + 1)] = beta1 * v[\"db\" + str(i + 1)] + (1 - beta1) * grads[\"db\" + str(i + 1)]\n\n v_corrected[\"dW\" + str(i + 1)] = v[\"dW\" + str(i + 1)] / (1 - beta1 ** t)\n v_corrected[\"db\" + str(i + 1)] = v[\"db\" + str(i + 1)] / (1 - beta1 ** t)\n\n s[\"dW\" + str(i + 1)] = beta2 * s[\"dW\" + str(i + 1)] + (1 - beta2) * (grads[\"dW\" + str(i + 1)] ** 2)\n s[\"db\" + str(i + 1)] = beta2 * s[\"db\" + str(i + 1)] + (1 - beta2) * (grads[\"db\" + str(i + 1)] ** 2)\n\n s_corrected[\"dW\" + str(i + 1)] = s[\"dW\" + str(i + 1)] / (1 - beta2 ** t)\n s_corrected[\"db\" + str(i + 1)] = s[\"db\" + str(i + 1)] / (1 - beta2 ** t)\n\n parameters[\"W\" + str(i + 1)] = parameters[\"W\" + str(i + 1)] - learning_rate * v_corrected[\"dW\" + str(i + 1)] / (np.sqrt(s_corrected[\"dW\" + str(i + 1)]) + epsilon)\n parameters[\"b\" + str(i + 1)] = parameters[\"b\" + str(i + 1)] - learning_rate * v_corrected[\"db\" + str(i + 1)] / (np.sqrt(s_corrected[\"db\" + str(i + 1)]) + epsilon)\n\n return parameters, v, s", "title": "" }, { "docid": "f8b15111763137d40684702c0b4ebd7c", "score": "0.53273284", "text": "def move2goal(self):\n\t\tdistance_tolerance = 0.1 \n\t\t# fuer die Funktion lokale Objekte instanzieren => kein self notwendig\t\t\n\t\tvel_msg = Twist()\n\t\t\n\t\t# Debug Info\n\t\trospy.loginfo(\"Start Pose is %s %s\", self.pose.x, self.pose.y)\n\t\trospy.loginfo(\"Goal is %s %s\", self.goal.x, self.goal.y)\n\t\trospy.loginfo(\"Distannce to Goal is %f \", self.euclidean_distance(self.goal))\n\t\trospy.loginfo(\"SteeringAngle to Goal is %f \", self.steering_angle(self.goal))\n\t\t#raw_input(\"Hit any Key to start\")\n\n\t\twhile self.euclidean_distance(self.goal) >= distance_tolerance:\n\t\t\t# Porportional controller.\n\t\t\t# https://en.wikipedia.org/wiki/Proportional_control\n\n\t\t\t# Linear velocity in the x-axis.\n\t\t\tvel_msg.linear.x = self.linear_vel(self.goal)\n\t\t\tvel_msg.linear.y = 0\n\t\t\tvel_msg.linear.z = 0\n\n\t\t\t# Angular velocity in the z-axis.\n\t\t\tvel_msg.angular.x = 0\n\t\t\tvel_msg.angular.y = 0\n\t\t\tvel_msg.angular.z = self.angular_vel(self.goal)\n\n\t\t\t# Publishing our vel_msg\n\t\t\tself.velocity_publisher.publish(vel_msg)\n\n\t\t\t# Publish at the desired rate.\n\t\t\tself.rate.sleep()\n\t\t\trospy.loginfo(\"Pose is %s %s\", self.pose.x, self.pose.y)\n\t\t\trospy.loginfo(\"Speed is x: %s theta: %s\", vel_msg.linear.x, vel_msg.angular.z)\n\n\t\t# Stopping our robot after the movement is over.\n\t\trospy.loginfo(\" ###### Goal reached #######\" )\n\t\tvel_msg.linear.x = 0\n\t\tvel_msg.angular.z = 0\n\t\tself.velocity_publisher.publish(vel_msg)\n\t\t#exit()", "title": "" }, { "docid": "763ca13e49a616b25a0d1fa7a74615f1", "score": "0.53210944", "text": "def set_theta(self, theta):\n while theta > 2*PI:\n theta = theta - 2*PI\n while theta < 0:\n theta = theta + 2*PI\n if theta > PI and theta < 2*PI:\n theta = 2*PI - theta\n if theta < 0.001: # is this cheating?\n theta = 0\n self._theta = np.around(theta, 8)", "title": "" } ]
0c4c6dd2c51c2603b9664594c8f91331
add_new_action(action) > None Call this function to add a new action to the vistrail being controlled by the vistrailcontroller.
[ { "docid": "f1ccc36bc99322d00b2852fc0a974e5c", "score": "0.68400925", "text": "def add_new_action(self, action, description=None):\n if action is not None:\n BaseController.add_new_action(self, action, description)\n self.emit(QtCore.SIGNAL(\"new_action\"), action)\n self.recompute_terse_graph()", "title": "" } ]
[ { "docid": "dbabcc32c1471581471bccd01caecead", "score": "0.733523", "text": "def add_action(self, action):\n self.action.append(action)", "title": "" }, { "docid": "1ce8936babc98ab865f03fc98c217138", "score": "0.7085356", "text": "def register_action(self, action):\n self.actions.append(action)", "title": "" }, { "docid": "43599a81d7293373409f0176c744c2ec", "score": "0.69343936", "text": "def append_action(self, action):\n self.actions.append(action)", "title": "" }, { "docid": "e4380025286cfbd968126fbf38c03dde", "score": "0.69314337", "text": "def add_menu_action(self, action):\n self.action_list.append(action)", "title": "" }, { "docid": "1b4a1a9b8b1ff49e07172ea5cee59869", "score": "0.67136365", "text": "def addallowedaction(self,allowed_action):\n self.allowed_actions.append(allowed_action)", "title": "" }, { "docid": "1b4a1a9b8b1ff49e07172ea5cee59869", "score": "0.67136365", "text": "def addallowedaction(self,allowed_action):\n self.allowed_actions.append(allowed_action)", "title": "" }, { "docid": "3ead1c38be1f14efbaf4765db093302f", "score": "0.6647426", "text": "def add_to_menu(self, action):\n pass", "title": "" }, { "docid": "3c1fefbfbb4ac59c2664c478d107f19a", "score": "0.65769815", "text": "def add_action(self, action):\n self._action_queue.append(action)", "title": "" }, { "docid": "18d9af50b3dc7e92e20b31058b0e1d43", "score": "0.6560309", "text": "def add_action(self, action, name=None):\r\n name = name or action.__name__\r\n self._actions[name] = action\r\n self._global_actions[name] = action", "title": "" }, { "docid": "485da20efb9f8bcbb54c68f59ecfcd43", "score": "0.65518814", "text": "def addAction(self,action,name=None):\n if name is None:\n name = \"Action\" + str(len(self._actions)+1)\n self._actions[name] = action\n self._par_name_dict[\"Action\"].append(name)\n self.widgets.update()", "title": "" }, { "docid": "d8fd7485e9d261ba5cb994f5c41d08ff", "score": "0.64513177", "text": "def add_action(self, action):\n if action != 'c' and action != 'd':\n raise ValueError(\"action must be either 'c' or 'd', {} given\".format(action))\n self.history.append(action)", "title": "" }, { "docid": "934c16a1ffe6fe4dfacb29ae8eb304ab", "score": "0.64440006", "text": "def __addAction(self, action):\n\t\tactionId = self.__getUnreservedActionId()\n\t\tself.usedActionIds[actionId] = action\n\t\taction['action'] = actionId", "title": "" }, { "docid": "a6f0f3b51a6bde9f9bd2c28b09012909", "score": "0.6382038", "text": "def add(self, action):\n self.pending.append(action)", "title": "" }, { "docid": "ceea5be1f85a8d8fe31c0387944b5f45", "score": "0.6360827", "text": "def register_action(self, registered_action):\n self.actions[registered_action.name]=registered_action", "title": "" }, { "docid": "1d48f54b792032ca315ad62bddee8860", "score": "0.63584614", "text": "def register_action(self, action):\n return self.event_cb(self.name, \"register_action\", action)", "title": "" }, { "docid": "4435cf5f8e43d153b34394ef238d53aa", "score": "0.6270335", "text": "def custom_action(self, agent, action):\n pass", "title": "" }, { "docid": "0ae386e466aeec49792f5768e89fc301", "score": "0.6167991", "text": "def addAction(self, name, func):\n\n self.actions[self.actionid] = [name, func]\n self._update('addAction', self.actionid, name)\n self.actionid += 1", "title": "" }, { "docid": "bec7579f6ce65ae653dd078a8bc7f289", "score": "0.6140846", "text": "def _append_action(self, *args, **kwargs):\n self._action_list[-1].update({'args': args, 'kwargs': kwargs})\n return self", "title": "" }, { "docid": "15475a2b1c2071df60e902c1ff9b00c1", "score": "0.61173046", "text": "def addActionCollection(self, collection):\r\n if collection.name not in self._actioncollections:\r\n self._actioncollections[collection.name] = collection", "title": "" }, { "docid": "712b36334dcd5f9080c4e4442d0f149a", "score": "0.61108315", "text": "def add_route(self, route, action):\n self.routes.append((route, action))", "title": "" }, { "docid": "0546f94bf97f59741cddf4f755aa24ae", "score": "0.6081501", "text": "def addAction(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n return QAction", "title": "" }, { "docid": "dead81791bcbd7c9c96b3f8388f84006", "score": "0.60801834", "text": "def _set_action(self, action):\n\t\t\t\tpublish_action(action)", "title": "" }, { "docid": "e80e7cd93c82870966f9464c9f50a28f", "score": "0.599889", "text": "def apply_action(self, action):\n pass", "title": "" }, { "docid": "1f03c39badb0384f94f4202b3b7908ae", "score": "0.59924424", "text": "def action_add(post_id):\n post = Posts.query.get_or_404(post_id)\n if request.method == 'POST':\n\n new_action = request.form.to_dict()\n new_action['created_by'] = current_user.id\n new_action['posts_id'] = post.id\n if new_action['stage'] == 1:\n new_action['case_per_layer'] = 0\n new_action['total_layers'] = 0\n new_action['total_cases'] = 0\n new_action['ex_case_per_layer'] = 0\n new_action['ex_total_layers'] = 0\n new_action['ex_total_cases'] = 0\n\n new_action = Actions(**new_action)\n db.session.add(new_action)\n db.session.commit()\n\n flash('New action has been added for {}.'.format(post.title),\n 'success')\n return redirect(url_for('main.index'))\n\n return render_template('action_add.html', post=post)", "title": "" }, { "docid": "020bff0b32499810633a44c00898390b", "score": "0.5936558", "text": "def _set_action(self, action):\n\t\t\t\t#TODO \n\t\t\t\t# Need to implement \n\t\t\t\t# Take action in each agent\n\t\t\t\t\n\t\t\t\tpass", "title": "" }, { "docid": "0c48c3160af70cce8880fe9e6509365b", "score": "0.59207684", "text": "def _add_new_action(self, state, action, terminal=False):\n if terminal:\n self._model[state][action] = {\"__count\": 1,\n \"__reward\": 0,\n \"__status\": \"known\"}\n\n else:\n self._model[state][action] = {\"__count\": 0,\n \"__reward\": 0,\n \"__status\": \"unknown\"}", "title": "" }, { "docid": "c187422cf994185d53858eba79fd00a8", "score": "0.5878581", "text": "def add_to_toolbar(self, action):\n pass", "title": "" }, { "docid": "369e10753352f100bc1a20dc1f9222b7", "score": "0.58763176", "text": "def action(self, action):\n self._action = action", "title": "" }, { "docid": "369e10753352f100bc1a20dc1f9222b7", "score": "0.58763176", "text": "def action(self, action):\n self._action = action", "title": "" }, { "docid": "6c59cdf7980c5b258214d97bd0665470", "score": "0.58093846", "text": "def addNewFileAction(self, label, slot=None):\n self._createNewFileActions.append(self._application.createAction(label, slot,image='filenew'))", "title": "" }, { "docid": "83101a39f3e292576833c6198497a33c", "score": "0.57991993", "text": "def register_action(func):\n ACTIONS[func.__name__] = func\n return func", "title": "" }, { "docid": "1fc091f8e5719fa81ebe4f7dc5e63e6d", "score": "0.5777367", "text": "def action(self, action):\n\n self._action = action", "title": "" }, { "docid": "1fc091f8e5719fa81ebe4f7dc5e63e6d", "score": "0.5777367", "text": "def action(self, action):\n\n self._action = action", "title": "" }, { "docid": "1fc091f8e5719fa81ebe4f7dc5e63e6d", "score": "0.5777367", "text": "def action(self, action):\n\n self._action = action", "title": "" }, { "docid": "1fc091f8e5719fa81ebe4f7dc5e63e6d", "score": "0.5777367", "text": "def action(self, action):\n\n self._action = action", "title": "" }, { "docid": "1078f2c786b6595a827e9ec29373ea92", "score": "0.57711446", "text": "def add_action(action_name):\n actions.click('#testSteps button.add-step')\n action_inputs = elements(\"#testSteps .step-first-input\")\n last_input = action_inputs[-1]\n actions.send_keys(last_input, action_name)\n actions.press_key(last_input, 'DOWN')\n actions.press_key(last_input, 'ENTER')", "title": "" }, { "docid": "44541c0437d7adc17098a85baea15287", "score": "0.5713389", "text": "def _set_action(self, action):\n raise NotImplementedError()", "title": "" }, { "docid": "44541c0437d7adc17098a85baea15287", "score": "0.5713389", "text": "def _set_action(self, action):\n raise NotImplementedError()", "title": "" }, { "docid": "44541c0437d7adc17098a85baea15287", "score": "0.5713389", "text": "def _set_action(self, action):\n raise NotImplementedError()", "title": "" }, { "docid": "44541c0437d7adc17098a85baea15287", "score": "0.5713389", "text": "def _set_action(self, action):\n raise NotImplementedError()", "title": "" }, { "docid": "f4428fbaccf8c1db7b21d24f17fb32fa", "score": "0.57132435", "text": "def act_normal_action_2(self, action):", "title": "" }, { "docid": "da6315859b275bc94cef7b216141de53", "score": "0.5695793", "text": "def act(self, action=None):\n if action:\n self.action = action", "title": "" }, { "docid": "291e971f91c0506ff71bb0032a784a95", "score": "0.5688573", "text": "async def add_action(client: ClientAsync, action_name: str, action: Action):\n response = await client.add_action(action_name=action_name, action=action)\n assert response.status_code == 200, f\"failed to put action ({action_name})\"\n retrieved_action = await client.get_action(action_name=action_name)\n assert isinstance(retrieved_action, Action), str(type(retrieved_action))", "title": "" }, { "docid": "2e1ead2b4907e9e6ac909f509fd98220", "score": "0.5688056", "text": "def recordAction(ed, action):\n\ted.actionsRecorded.append(action)", "title": "" }, { "docid": "e7c497f5aa72f7612eeee39214f2d0ea", "score": "0.564512", "text": "def _take_action(self, action):\n pass", "title": "" }, { "docid": "a09641cfd564eb1235d14cf14ec1d889", "score": "0.56450343", "text": "def _action(self, action, vsm, info=None, **kwargs):\n body = {action: info}\n self.run_hooks('modify_body_for_action', body, **kwargs)\n url = '/servers/%s/action' % base.getid(vsm)\n return self.api.client.post(url, body=body)", "title": "" }, { "docid": "e245f596b7aa89e5fcdbf53618b28cc2", "score": "0.5644852", "text": "def create_action(self, name='', action='', app='', device='', arguments=None, risk=0):\n arguments = arguments if arguments is not None else []\n action = Action(name=name, action_name=action, app_name=app, device_id=device, arguments=arguments, risk=risk)\n self.actions[action.uid] = action\n self.branches[action.uid] = []\n self._total_risk += risk\n logger.info('Action added to workflow {0}. Action: {1}'.format(self.name, self.actions[action.uid].read()))", "title": "" }, { "docid": "ab919e8fa399490211f4132eb07341f2", "score": "0.5629433", "text": "def set_action(self, action):\n self._action = action", "title": "" }, { "docid": "e6a1f8b11245e35410a338aa89fba7f5", "score": "0.5616287", "text": "def createActions(self, parent=None):\r\n pass", "title": "" }, { "docid": "20b4de764f614485a70704aee4979dc9", "score": "0.56064236", "text": "def handle_action(self, action: Action) -> None:\n ...", "title": "" }, { "docid": "8a5e2c72aaf0cf8a921152bbee14b16d", "score": "0.56043345", "text": "def action(self, value=1):\n self.actions = self.actions + value", "title": "" }, { "docid": "a845eabbe47e0dbc952aa337d9039352", "score": "0.559845", "text": "def add(self, action):\r\n tag = \"hyper-%d\" % len(self.links)\r\n self.links[tag] = action\r\n return (\"hyper\", tag)", "title": "" }, { "docid": "a0d40be6fe170fd9b4473ecd47079e73", "score": "0.5579642", "text": "def add_hook(self, action, func, **kwargs):\n try:\n self._hooks[action].append(functools.partial(func, **kwargs))\n except KeyError:\n raise ValueError('`%s` is not a valid entry action' % action)", "title": "" }, { "docid": "8136714ed349b47e5225c5a4af53188c", "score": "0.5568049", "text": "def set_action(self, action):\n self._action = action\n return self", "title": "" }, { "docid": "42bbf1dd8cda7924e74ecebe0ac58e71", "score": "0.55176723", "text": "def act (self, action):\n raise NotImplementedError()", "title": "" }, { "docid": "b258de966588982a71223f08a1b9b187", "score": "0.548759", "text": "def addE5Actions(self, actions):\n self.actions.extend(actions)", "title": "" }, { "docid": "5d1f663ed52174349c98ee6582347b96", "score": "0.54694647", "text": "def perform_action(self, action):\r\n # TODO: implement\r\n pass", "title": "" }, { "docid": "bc37c31b54bd91972e4e7875cfe1640c", "score": "0.5468841", "text": "def action(self):\n pass", "title": "" }, { "docid": "3163912de842eba18cb8a6efa9e30dc0", "score": "0.5436439", "text": "def _take_action(self, action):\n action_type = ACTION_LOOKUP[action]\n self.env.act(action_type)", "title": "" }, { "docid": "17e12c9a1abccc332b0e03768ca116d5", "score": "0.54251695", "text": "def SetActionFlags(self):\n self.action = 'AuditLog'", "title": "" }, { "docid": "62dcca65b25ffbf915bcc1c7f854d4c6", "score": "0.5424677", "text": "def addClicked(self):\n self.param.addNew()", "title": "" }, { "docid": "651af8750d01fba26fa94fe3b6d6a973", "score": "0.54038227", "text": "def performAction(action):\n # not terribly different from \"idv.handleAction('action:edit.paramdefaults')\"\n # key diffs:\n # *only* handles actions\n # does not require you to prepend everything with 'action:' (but you can if you must)\n available = allActions()\n if not action.startswith('action:'):\n prefixedId = 'action:' + action\n else:\n prefixedId = action\n action = action.replace('action:', '')\n \n if action in available:\n getStaticMcv().handleAction(prefixedId)\n else:\n raise ValueError(\"Couldn't find the action ID \", action, \"; try calling 'allActions()' to get the available action IDs.\")", "title": "" }, { "docid": "5da47bf0c18de24400f629b4d7a7f442", "score": "0.5401063", "text": "def realAction(self, name):\r\n pass", "title": "" }, { "docid": "2c81fb18dd8122141e959db9d46b5750", "score": "0.5385923", "text": "def can_add_to_menu(self, action):\n return True", "title": "" }, { "docid": "1ad9382b905bab78a29283939078a008", "score": "0.5383812", "text": "async def insert_action(ctx, adventure_id, action) -> bool:\n action_url = f\"user:{ctx.user_uid}\" \\\n f\"#service:{ctx.backend.get_identity()}\" \\\n f\"#adventure:{adventure_id}\" \\\n f\"#action:{action['id']}\"\n if await ctx.app.storage.load(action_url) is None:\n await ctx.app.storage.save(action_url, action['text'])\n return True\n return False", "title": "" }, { "docid": "a2f77184c909d8438e84c02b2968d09f", "score": "0.53772116", "text": "def receiveAction(self, action):\r\n\r\n self.action = action", "title": "" }, { "docid": "a4faf0550925229c4e40eb70c148471c", "score": "0.53761333", "text": "def action(self):", "title": "" }, { "docid": "bece39aba56e10fc7b598f54da7ba9be", "score": "0.5375457", "text": "def addAction(hook : Callable, short_text : str, key : str=None, description : str=None) -> None:\n _init()\n nativeWindow().addAction(hook,short_text,key,description)", "title": "" }, { "docid": "9ce661b644285e3bf287db1ce4ade1e5", "score": "0.53728735", "text": "def addAction(self, message):\n f = open(LOG_FILE, \"a\")\n timestamp = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n f.write(\"%s %s%s\" % (timestamp, message, os.linesep))\n f.close()", "title": "" }, { "docid": "f185de89a3137811efbd0d5b07e23b7f", "score": "0.5368428", "text": "def set_action(self, action, priority):\r\n if priority > self.action_priority:\r\n self.action = action\r\n self.action_priority = priority", "title": "" }, { "docid": "f556dd24833b73d5e9211906f229a200", "score": "0.53470683", "text": "def SetActionFlags(self):\n self.action = 'SetMaintenance'", "title": "" }, { "docid": "c4c31dfb7853660970f860ce6257e5c1", "score": "0.5339087", "text": "def on_action(self, handler: 'Handler', action: Action):\n pass", "title": "" }, { "docid": "2d333ca1f018424daa55fb9cee46971e", "score": "0.53314966", "text": "def add_child(self, child, action, rollout=False):\n if rollout:\n self.rollout_children[action] = child\n return\n self.children[action] = child", "title": "" }, { "docid": "428cef0a004774e07c7ec0449d0c6774", "score": "0.5325913", "text": "def applyAction(self, action: Action):\r\n self.stack.append(action)\r\n action.apply(self.game)", "title": "" }, { "docid": "da8c17e6c98077e58c840abd085944bc", "score": "0.53225106", "text": "def _add_event(self, action, status, reason):\n ev = event.Event(self.context, self.stack, action, status, reason,\n self.resource_id, self.properties,\n self.name, self.type())\n\n ev.store()\n self.stack.dispatch_event(ev)", "title": "" }, { "docid": "14def8af0c21ee27a8c6a7df793db62a", "score": "0.5312919", "text": "def performAction(self, action):\n\n # The environment can't affect the action\n print action\n return action", "title": "" }, { "docid": "4bf9ad35956ee66486454713739261f7", "score": "0.53106976", "text": "def add(self, obs, action, reward, done):\n raise NotImplementedError()", "title": "" }, { "docid": "d9b964347f11fcb76cabfc675e331b9f", "score": "0.53089887", "text": "def action(self, action):\n allowed_values = [\"None\", \"Create\", \"Start\", \"Pause\", \"Resume\", \"Retry\", \"RetryFailed\", \"Cancel\"]\n if action not in allowed_values:\n raise ValueError(\n \"Invalid value for `action` ({0}), must be one of {1}\"\n .format(action, allowed_values)\n )\n\n self._action = action", "title": "" }, { "docid": "34c9b81215f547b8d6a22371495edecf", "score": "0.53056175", "text": "def _action(self): # pragma: no cover", "title": "" }, { "docid": "6cf7e7651e0cad40cf9c5f5eebf183d3", "score": "0.5303351", "text": "def perform_action(self, action_id: int) -> None:\r\n ...", "title": "" }, { "docid": "186e1dbdbc4d31d99d7ee31f7f7de6e1", "score": "0.52983385", "text": "def load_action(self):\n pass", "title": "" }, { "docid": "f8076ac8791ec1e7c35ee91184885a1e", "score": "0.5290628", "text": "def processAction(self, action):\n self.actionHandler.doAction(action)", "title": "" }, { "docid": "989db6f7d96ffd99c1a507b9f9317686", "score": "0.5289302", "text": "def action(self, action):\n if action is None:\n raise ValueError(\"Invalid value for `action`, must not be `None`\")\n\n self._action = action", "title": "" }, { "docid": "8481472651a032ff8ac08965f15cd097", "score": "0.5277443", "text": "def add_action_listener(self, listener):\n self._listeners.append(listener)", "title": "" }, { "docid": "c3fcb76495658c8da97f9f04f2989473", "score": "0.52691627", "text": "def __init__(self, name, action=None):\n super().__init__(name)\n self.action = action", "title": "" }, { "docid": "ea166f6cf4a7630957bd52128c0751c0", "score": "0.5266529", "text": "def action(self, action):\n if self.local_vars_configuration.client_side_validation and action is None: # noqa: E501\n raise ValueError(\"Invalid value for `action`, must not be `None`\") # noqa: E501\n allowed_values = [\"BLOCK\", \"ALLOW\", \"FILTER_REMOVE\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and action not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `action` ({0}), must be one of {1}\" # noqa: E501\n .format(action, allowed_values)\n )\n\n self._action = action", "title": "" }, { "docid": "51a7574bf9a52541f377d9369395b52b", "score": "0.52653795", "text": "def add_action(self, title: str):\n\n self.__pyAlert__.addAction(title)", "title": "" }, { "docid": "74f0df0c6fdf3e3dfe595dcf1b91791d", "score": "0.5263064", "text": "def set_action(self, action):\n action_mappings = {\n Actions.QUERY: self.__set_query_action,\n Actions.SERP: self.__set_serp_action,\n Actions.SNIPPET: self.__set_snippet_action,\n Actions.DOC: self.__set_assess_document_action,\n Actions.MARK: self.__set_mark_action\n }\n \n if action_mappings[action]:\n self.__actions.append(action)\n action_mappings[action]()", "title": "" }, { "docid": "6d8bcd8f29e8f79e91125ad5b0939b30", "score": "0.5260046", "text": "def _add_action_rule(self,\n action_rule_stable: list,\n action_rule_flexible: list,\n action_rule_decision: list,\n action_rule_supp: list,\n action_rule_conf: list):\n action_rule = [action_rule_stable, action_rule_flexible, action_rule_decision]\n self.action_rules.append([action_rule, action_rule_supp, action_rule_conf])", "title": "" }, { "docid": "cd8a640ad862865928b0206940d4a5c7", "score": "0.5241337", "text": "def add_action(\n self,\n icon_path,\n text,\n callback,\n enabled_flag=True,\n add_to_menu=True,\n add_to_toolbar=True,\n status_tip=None,\n whats_this=None,\n parent=None):\n\n## # Create the dialog (after translation) and keep reference\n## self.dlg = SPREADSHEETDialog()\n\n icon = QIcon(icon_path)\n action = QAction(icon, text, parent)\n action.triggered.connect(callback)\n action.setEnabled(enabled_flag)\n\n if status_tip is not None:\n action.setStatusTip(status_tip)\n\n if whats_this is not None:\n action.setWhatsThis(whats_this)\n\n if add_to_toolbar:\n self.toolbar.addAction(action)\n\n if add_to_menu:\n self.iface.addPluginToMenu(\n self.menu,\n action)\n\n self.actions.append(action)\n\n return action", "title": "" }, { "docid": "584a8e11d7689835f0d7636daf6d2ccf", "score": "0.52352804", "text": "def addAction(self, itemRow):\n self.__items.append(itemRow)", "title": "" }, { "docid": "ce3d8467b51a5a532e56c459398a041b", "score": "0.523144", "text": "def add_action(\n self,\n icon_path,\n text,\n callback,\n enabled_flag=True,\n add_to_menu=True,\n add_to_toolbar=True,\n status_tip=None,\n whats_this=None,\n parent=None):\n self.dlg = indicesDialog()\n\n\n icon = QIcon(icon_path)\n action = QAction(icon, text, parent)\n action.triggered.connect(callback)\n action.setEnabled(enabled_flag)\n\n if status_tip is not None:\n action.setStatusTip(status_tip)\n\n if whats_this is not None:\n action.setWhatsThis(whats_this)\n\n if add_to_toolbar:\n # Adds plugin icon to Plugins toolbar\n self.iface.addToolBarIcon(action)\n\n if add_to_menu:\n self.iface.addPluginToRasterMenu(\n self.menu,\n action)\n\n self.actions.append(action)\n\n return action", "title": "" }, { "docid": "3213b94eb2fad05a6a693d069102d81a", "score": "0.523022", "text": "def insertBeforeAction(self):\r\n return None", "title": "" }, { "docid": "c32e4dc177967d82d07ee20dbb19485d", "score": "0.52240974", "text": "def add_action(self, icon_path, text, callback, enabled_flag = True, add_to_menu = True, add_to_toolbar = True, status_tip = None, whats_this = None, parent = None):\n\n icon = QIcon(icon_path)\n action = QAction(icon, text, parent)\n action.triggered.connect(callback)\n action.setEnabled(enabled_flag)\n\n if status_tip is not None:\n action.setStatusTip(status_tip)\n\n if whats_this is not None:\n action.setWhatsThis(whats_this)\n\n if add_to_toolbar:\n # Adds plugin icon to Plugins toolbar\n self.iface.addToolBarIcon(action)\n\n if add_to_menu:\n self.iface.addPluginToMenu(self.menu, action)\n\n self.actions.append(action)\n\n return action", "title": "" }, { "docid": "147312187267c5f267e7b78cfed7c79a", "score": "0.5208372", "text": "def execute_action(self, action):\n pass", "title": "" }, { "docid": "a80767a3ea2425b8e3201ef1084c6db6", "score": "0.52043796", "text": "def post(self, action):\n \n return getattr(self, '_%s' % action)()", "title": "" }, { "docid": "e068c293c4eb7812a5cc17c82a069002", "score": "0.5202544", "text": "def __init__(self):\r\n # CONFIGURE ME PLEASE, LOVE YOU BIG TIME !\r\n self.actions = {\r\n }", "title": "" }, { "docid": "2b2a3221e8e765df8e86a0fc1b43a84d", "score": "0.5200028", "text": "def add_action(\n self,\n icon_path,\n text,\n callback,\n enabled_flag=True,\n add_to_menu=True,\n add_to_toolbar=True,\n status_tip=None,\n whats_this=None,\n parent=None):\n\n icon = QIcon(icon_path)\n action = QAction(icon, text, parent)\n action.triggered.connect(callback)\n action.setEnabled(enabled_flag)\n\n if status_tip is not None:\n action.setStatusTip(status_tip)\n\n if whats_this is not None:\n action.setWhatsThis(whats_this)\n\n if add_to_toolbar:\n parent.findChildren(QToolBar, 'mPluginToolBar')[0].addAction(action)\n # self.toolbar.addAction(action)\n\n if add_to_menu:\n self.iface.addPluginToMenu(\n self.menu,\n action)\n\n self.actions.append(action)\n\n return action", "title": "" }, { "docid": "03138c367f7094d89d3f221f8c06ef5a", "score": "0.51967865", "text": "def create_action(action_body):\n\n base_url = config.RESOURCE\n action = MSGRAPH.post('%s%s/security/securityActions' %\n (base_url, config.SECURITYACTION_VERSION),\n json=action_body, headers=request_headers()).json()\n\n # error handling\n if b'' in action:\n print(\"Please Sign-in using a on.microsoft.com account for demo data\")\n action = None\n elif 'error' in action:\n if action['error']['code'] == 'InvalidAuthenticationToken':\n return redirect(url_for('login'))\n else:\n #success\n message = '<strong>Success</strong> action created. Id: %s' % \\\n action.get('id')\n flash(message, category='success')\n\n return action", "title": "" }, { "docid": "9b666ad97c77972a0f1222c781dc6815", "score": "0.5177768", "text": "def manage_add_openxchange_helper(dispatcher, id, title=None, REQUEST=None):\n\n sp = plugin.OpenXChangeHelper(id, title)\n\n dispatcher._setObject( sp.getId(), sp)\n\n if REQUEST is not None:\n REQUEST['RESPONSE'].redirect( '%s/manage_workspace'\n '?manage_tabs_message='\n 'OpenXChangeHelper+added.'\n % dispatcher.absolute_url())", "title": "" } ]
c0d786c1e94cc697f1b50ea9592d76c8
Predict the class for a new given data.
[ { "docid": "7a06abf630e048043c35be2fb7f84503", "score": "0.0", "text": "def predict(self, data):\n feat_df = data[['value']].copy()\n feat_df['length'] = feat_df['value'].apply(lambda val: len(val))\n feat_df['digit_frac'] = feat_df['value'].apply(\n lambda val: 0 if len(val) == 0 else\n sum(char.isdigit() for char in val) / len(val))\n feat_df['digit_num'] = feat_df['value'].apply(\n lambda val: sum(char.isdigit() for char in val))\n feat_df['alpha_frac'] = feat_df['value'].apply(\n lambda val: 0 if len(val) == 0 else\n sum(char.isalpha() for char in val) / len(val))\n feat_df['alpha_num'] = feat_df['value'].apply(\n lambda val: sum(char.isalpha() for char in val))\n feat_df['space_frac'] = feat_df['value'].apply(\n lambda val: 0 if len(val) == 0 else\n sum(char.isspace() for char in val) / len(val))\n feat_df['space_num'] = feat_df['value'].apply(\n lambda val: sum(char.isspace() for char in val))\n features = feat_df.ix[:, 1:].as_matrix()\n return self.clf.predict_proba(features)", "title": "" } ]
[ { "docid": "5448685d9015ae40b0c3e981383ba774", "score": "0.78026956", "text": "def predict_class(self, X_new):\n result = [self.predict_class_single(x) for x in X_new]\n return result", "title": "" }, { "docid": "af34c97d9232b7d9a55031dcf07ddbc8", "score": "0.7721786", "text": "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_classifier.predict(data)", "title": "" }, { "docid": "bb2126760d1f5fbf7b1d45ec0c0a8f50", "score": "0.76865447", "text": "def predict(self, data):\n\t\traise NotImplementedError", "title": "" }, { "docid": "8c90688c16349cc1f2f21ec40bc2decf", "score": "0.7506565", "text": "def predict(self, data):\n try:\n getattr(self, \"tree\")\n except AttributeError:\n raise RuntimeError(\"You must train classifer before predicting data!\")\n\n predicts_proba = self.predict_proba(data)\n predicts = _classify_from_probs(predicts_proba)\n return predicts", "title": "" }, { "docid": "fe3846a11556c983528eb2cc678b9484", "score": "0.742696", "text": "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_svm.predict(data)", "title": "" }, { "docid": "e03b67b3e4d60e398b36255a880d5f4b", "score": "0.740293", "text": "def predict(self, data):\n return self.result.predict(data)", "title": "" }, { "docid": "bf21a1962a845954d88af595da11c5c9", "score": "0.73955154", "text": "def _predict(self, data):\n # make sure we're talking about arrays\n data = N.asarray(data)\n\n # checks only in debug mode\n if __debug__:\n if not data.ndim == 2:\n raise ValueError, \"Data array must be two-dimensional.\"\n\n if not data.shape[1] == self.__data.nfeatures:\n raise ValueError, \"Length of data samples (features) does \" \\\n \"not match the classifier.\"\n\n # compute the distance matrix between training and test data with\n # distances stored row-wise, ie. distances between test sample [0]\n # and all training samples will end up in row 0\n dists = self.__dfx(self.__data.samples, data).T\n\n # determine the k nearest neighbors per test sample\n knns = dists.argsort(axis=1)[:, :self.__k]\n\n # predicted class labels will go here\n predicted = []\n\n if self.__voting == 'majority':\n vfx = self.getMajorityVote\n elif self.__voting == 'weighted':\n vfx = self.getWeightedVote\n else:\n raise ValueError, \"kNN told to perform unknown voting '%s'.\" \\\n % self.__voting\n\n # perform voting\n results = [vfx(knn) for knn in knns]\n\n # extract predictions\n predicted = [r[0] for r in results]\n\n # store the predictions in the state. Relies on State._setitem to do\n # nothing if the relevant state member is not enabled\n self.predictions = predicted\n self.values = [r[1] for r in results]\n\n return predicted", "title": "" }, { "docid": "8fed17ecec824eb4353b986f2322c351", "score": "0.7265701", "text": "def predict_classification(self, data, current_timestamp):\n\n latest_train_file = self._train_file_manager.latest_train_filename(current_timestamp)\n\n predict_x, symbols, prediction_timestamp, target_timestamp = self._data_transformation.create_predict_data(\n data)\n predict_x = self._preprocess_inputs(predict_x)\n\n if self._topology is None:\n n_timesteps = predict_x.shape[2]\n self.initialise_topology(n_timesteps)\n\n # Verify data is the correct shape\n network_input_shape = self._topology.get_network_input_shape()\n data_input_shape = predict_x.shape[-3:]\n\n if data_input_shape != network_input_shape:\n err_msg = 'Data shape' + str(data_input_shape) + \" doesnt match network input \" + str(\n network_input_shape)\n raise ValueError(err_msg)\n\n predict_y = cromulon_eval.eval_neural_net(\n predict_x, self._topology,\n self._tensorflow_flags,\n latest_train_file\n )\n\n if self._tensorflow_flags.predict_single_shares: # Return batch axis to series position\n predict_y = np.swapaxes(predict_y, axis1=1, axis2=2)\n predict_y = np.squeeze(predict_y, axis=1)\n\n target_timestamps = []\n for i in range(self._topology.n_forecasts):\n temp_timestamp = deepcopy(target_timestamp)\n target_timestamps.append(temp_timestamp)\n target_timestamp += timedelta(days=self._data_transformation.target_delta_ndays)\n\n return predict_y, symbols, target_timestamps", "title": "" }, { "docid": "3b80ec6f26b9580d940accac7f0fc4e9", "score": "0.72496915", "text": "def predict(self, data_in):\n pass", "title": "" }, { "docid": "4b312033de0a541c20ab409b67aa405e", "score": "0.724147", "text": "def predict(self, data: np.array) -> np.array:\n raise NotImplementedError", "title": "" }, { "docid": "cb875f5751635a745fb12c24f4a51e47", "score": "0.72259176", "text": "def predict(self, testData=[]):\n result = []\n for classValue in self._classAttrs:\n #print(f'Computing Label: {classValue}, {self._classLabelMap[classValue]}')\n result.append(self._computeCondProb(testData, classValue))\n return self._classLabelMap[result.index(max(result))]", "title": "" }, { "docid": "626fe6f0a57e68c00153c3580c25da02", "score": "0.7214135", "text": "def predict_class(self, original_image_numpy: np.ndarray) -> None:\n from app.dl_model.image import ClassifierInput\n # scale up coordinates\n self.scale_up_coordinates()\n x1, y1, x2, y2 = [int(coord) for coord in self.scale_coordinates.round()]\n # crop original numpy image\n numpy_image = original_image_numpy[y1:y2, x1:x2, :].copy()\n # create classifier input object\n classifier_input = ClassifierInput(numpy_image, new_shape=(224, 224))\n # classify input\n prediction = classifier_input.predict_class()\n # set attributes\n self.class_name = prediction.class_name # update class_name\n self.conf = prediction.conf # update probability\n self.product_id = prediction.product_id # set product external id\n self.detection_index = prediction.detection_index # set detection index\n self.top_k_names = prediction.top_k_names # set top k names list\n self.top_k_indices = prediction.top_k_indices # set top k detection index\n self.top_k_confidences = prediction.top_k_confidences # set top k confidieces values\n self.top_k_product_ids = prediction.top_k_product_ids # set top k product external ids", "title": "" }, { "docid": "db46c54eef48ac7435e03dbbe95c6193", "score": "0.7139843", "text": "def predict(self):\n raise NotImplementedError", "title": "" }, { "docid": "56993e302971bc1558fe9fc91a5d282c", "score": "0.71199054", "text": "def predict(self):\n probabilities = self.probability_array()\n # THIS ASSUMES the classifiers are in order: 0th column of the\n # probabilities corresponds to label = 0, ..., 9th col is for 9.\n classes = np.argmax(probabilities, axis=1)\n return classes", "title": "" }, { "docid": "261008f0eec43d39ae3aa089dde830bf", "score": "0.705342", "text": "def predict(self, data):\n data['predicted'] = self.sentiment_classifier.predict_estimator(data)\n return data", "title": "" }, { "docid": "f5aee682ba128214b5c0e303d74c73db", "score": "0.7046517", "text": "def predict(self, x, **kwargs):\n kwargs = self.filter_sk_params(Sequential.predict_classes, kwargs)\n classes = self.model.predict_classes(x, **kwargs)\n return self.classes_[classes]", "title": "" }, { "docid": "8e255c7fdfac58c10ac436e39cd5b42b", "score": "0.7039909", "text": "def predict(self, instances):\r\n raise NotImplementedError", "title": "" }, { "docid": "d97b47f7e7ca1adefee482353d531bfe", "score": "0.7032927", "text": "def predict(self, X):\n ...", "title": "" }, { "docid": "d97b47f7e7ca1adefee482353d531bfe", "score": "0.7032927", "text": "def predict(self, X):\n ...", "title": "" }, { "docid": "d97b47f7e7ca1adefee482353d531bfe", "score": "0.7032927", "text": "def predict(self, X):\n ...", "title": "" }, { "docid": "ed1c0c89527d385858ff8cfc44648d61", "score": "0.7029193", "text": "def predict(self, input_data):\n if not self.predict_as_probability_:\n return self.ensemble_model_.predict(input_data)\n else:\n return self.ensemble_model_.predict_proba(input_data)", "title": "" }, { "docid": "6ccdf11d32a3c7f601fad3ab25bf529d", "score": "0.70203143", "text": "def predict(self, inp_data: T_co) -> T_co:\n raise NotImplementedError", "title": "" }, { "docid": "121c8483efc473bd70b5b886e58d1773", "score": "0.7010002", "text": "def predict(self, data):\n\t\tres = self.neuralNetworks.inference(self.dataCenter.process_inference_data(data))\n\t\tprint(res)", "title": "" }, { "docid": "dcfe3d23c4ffc9fa82400b66d3106e6a", "score": "0.70093834", "text": "def predict(self, datum):\r\n probs = {}\r\n for class_ in set(self.train_classes):\r\n probs[class_] = self.distribution.class_prob[class_] * reduce(lambda x,y:x*y, [self.distribution.prob(feat_ind_feat[0],feat_ind_feat[1],class_) for feat_ind_feat in enumerate(datum)])\r\n return max(probs, key=lambda x:probs[x])", "title": "" }, { "docid": "9cc043d9d9478e885b1a30ac99afa1a3", "score": "0.700338", "text": "def predict(self, X: np.ndarray):\n return np.apply_along_axis(self.estimate_class, 1, X)", "title": "" }, { "docid": "84bf4d2544ab5f8485485f35b420dbfb", "score": "0.6986614", "text": "def predict(self, X):", "title": "" }, { "docid": "84bf4d2544ab5f8485485f35b420dbfb", "score": "0.6986614", "text": "def predict(self, X):", "title": "" }, { "docid": "7963a7c9c6b7aff4a6001d4d6d9ccf03", "score": "0.698226", "text": "def predict(self, data):\n\n prediction = None\n if self.model is not None:\n prediction = self.model.predict(data)\n return prediction", "title": "" }, { "docid": "2dd77e2840bf20e8475bd1becf9ecb70", "score": "0.6978952", "text": "def predict(self, x):\n # *** START CODE HERE ***\n return self.clf.predict_classes(x.reshape(x.shape[0], 28, 28, 1))\n # *** END CODE HERE ***", "title": "" }, { "docid": "0dbe749244a9371cb2af13d397a014cc", "score": "0.69650275", "text": "def predict(self, X):\n pass", "title": "" }, { "docid": "0dbe749244a9371cb2af13d397a014cc", "score": "0.69650275", "text": "def predict(self, X):\n pass", "title": "" }, { "docid": "0dbe749244a9371cb2af13d397a014cc", "score": "0.69650275", "text": "def predict(self, X):\n pass", "title": "" }, { "docid": "ad041756f38f3e9cd22342ecb9f0215a", "score": "0.6946739", "text": "def predict(self, data):\n length = len(data)\n\n \"\"\"\n We check if the size of the data is equal to the number of input neurons\n \"\"\"\n assert length == self.structure[0], 'ERROR: the length of the input list is not equal to the number of input neurons'\n\n data = np.reshape(data, (length, 1)).astype(float)\n\n # print(type(data))\n\n \"\"\"\n We loop over all the transitions between the layers of our brain\n \"\"\"\n for i in range(self.number_of_transitions):\n if self.activation_function == 'sigmoid':\n data = self.sigmoid(np.dot(self.weights[i], data) + self.biases[i])\n elif self.activation_function == 'ReLU':\n data = self.ReLU(np.dot(self.weights[i], data) + self.biases[i])\n elif self.activation_function == 'tanh':\n data = self.tanh(np.dot(self.weights[i], data) + self.biases[i])\n\n \"\"\"\n We allow our brain to store the last prediction. This might be helpful for printing it out on the screen for the user to investigate\n \"\"\"\n self.output = data\n\n return data", "title": "" }, { "docid": "dce47bad0d25fdc55cca32178c9ba248", "score": "0.6935033", "text": "def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)", "title": "" }, { "docid": "008198d7784fb2e7a731aeb408713053", "score": "0.6930602", "text": "def predict(self, X):\n\n\t\tn_samples = X.shape[0]\n\t\tpredicted = np.zeros(n_samples)\n\n\t\tfor i in xrange(n_samples):\n\t\t\tpredicted[i] = self.classify_example(X[i])\n\n\t\treturn predicted", "title": "" }, { "docid": "e1d5775ec7c714a694639add4f756ed5", "score": "0.69304484", "text": "def predict(self, dataset, output_type='class',\n missing_value_action='auto'):\n\n return super(_Classifier, self).predict(dataset,\n output_type=output_type,\n missing_value_action=missing_value_action)", "title": "" }, { "docid": "ea4de14c20cdd28242f5a94aaffc9160", "score": "0.69281465", "text": "def _predict(self, X):\n raise NotImplementedError", "title": "" }, { "docid": "607aad6a5afb0aeb89f1deaecbb307af", "score": "0.690092", "text": "def predict(self, X):\n raise NotImplementedError", "title": "" }, { "docid": "9e58d43645fdbcee4007acc57a5cc668", "score": "0.68968695", "text": "def predict(self, X):\n score = self.decision_function(X)\n decisions = self.loss_._score_to_decision(score)\n return self.classes_.take(decisions, axis=0)", "title": "" }, { "docid": "c57c06bc8419e2933493e380ed00c981", "score": "0.6892712", "text": "def predict(self, X):\n\t\tR = self.predict_soft(X)\t\t\t\t\t\t\t\t\t\t\t# compute soft output values\n\t\tY = R.argmax(1)\t\t\t\t\t\t\t\t\t\t\t\t\t\t# get index of maximum response\n\t\treturn self.classes[Y]\t\t\t\t\t\t\t\t\t\t\t\t# convert to saved class values", "title": "" }, { "docid": "63fa4cd743ea400c1b8497378f899c52", "score": "0.68859375", "text": "def predict(self, X):\n res = self.predict_proba(X)\n positive_mask = res >= 0.5\n negative_mask = res < 0.5\n res[positive_mask] = self.POSITIVE_CLASS\n res[negative_mask] = self.NEGATIVE_CLASS\n return res", "title": "" }, { "docid": "2c2c0e3daa084d766248bec126332fda", "score": "0.68834656", "text": "def predict(self, X):\r\n \r\n # To speed up, we apply the scoring function to all the instances\r\n # at the same time.\r\n scores = X.dot(self.w)\r\n \r\n # Create the output array.\r\n # At the positions where the score is positive, this will contain\r\n # self.positive class, otherwise self.negative_class.\r\n out = numpy.select([scores>=0.0, scores<0.0], [self.positive_class, \r\n self.negative_class])\r\n return out", "title": "" }, { "docid": "1d57e018a133bff8f40036f2e162e0cd", "score": "0.688265", "text": "def predict(self, data):\n return self.forward_propagate(data)", "title": "" }, { "docid": "97e292de42861a41e122e4956179e61d", "score": "0.6881437", "text": "def predict(self, data: List):", "title": "" }, { "docid": "f360c95885406f5d31f95a86f42c7d0b", "score": "0.6865131", "text": "def oldPredict(self, data):\n\n predictions = []\n\n if len(self.observations) < self.k_neighbors:\n print(f\"Data length ({len(data)}) was too small.\")\n\n for row in data:\n neighbors_info = {}\n\n for row_index in range(len(self.observations)):\n distance = self.calcualteEuclideanDistance(self.observations[row_index], row)\n if len(neighbors_info) > self.k_neighbors - 1:\n largest_distance = max(neighbors_info.keys())\n if distance < largest_distance:\n neighbors_info[distance] = self.labels[row_index]\n del neighbors_info[largest_distance]\n else:\n neighbors_info[distance] = self.labels[row_index]\n\n unique_values = set(neighbors_info.values())\n if len(unique_values) == 1:\n value = unique_values.pop()\n predictions.append(value)\n else:\n best_value = 0\n best_value_weight = 0\n for label in unique_values:\n weight = 0\n for distance in neighbors_info.keys():\n if label == neighbors_info[distance]:\n if 'inverse_distance' == self.weight_type:\n weight += self.calulateWeightedVote(distance)\n elif 'no_weight' == self.weight_type:\n weight += 1\n else:\n print(\"Not a valid_weight_type.\")\n\n if weight > best_value_weight:\n best_value_weight = weight\n best_value = label\n\n predictions.append(best_value)\n # print(f\"Neighbors Info: {neighbors_info}\")\n\n return predictions", "title": "" }, { "docid": "f395fc4155a42398448fddfb8600a70f", "score": "0.68589246", "text": "def doPredict(self, data: StockData) -> float:\r\n pass", "title": "" }, { "docid": "f4156213f8e559b921e3d20ac4f7f8ed", "score": "0.68580794", "text": "def predict(self, data):\n xdata, _ = self.array_from_cases(data)\n preds = self.model.predict(xdata)\n label_preds = [dict(zip(self.binarizer.classes_, pred)) for pred in preds]\n return label_preds", "title": "" }, { "docid": "8d4ffe73b4e8a71f8e9aa17cb94dcfe7", "score": "0.6854315", "text": "def predict(self, input_data: dict)-> str:\n if self.probabilities is None or self.target_probabilities is None:\n raise ValueError('You need to fit the data first!!')\n\n # This will store target:probability for given dataset.\n all_probs = {} # a dict.\n\n # iterating all the target classes to find probab.. of it's occurence.\n\n for uniq_target_name in set(self.dataset[self.target_name]):\n probability = 1\n for feat_name in input_data:\n probability *= self.probabilities[feat_name][(input_data[feat_name], uniq_target_name)]\n probability *= self.target_probabilities[uniq_target_name]\n\n all_probs[probability] = uniq_target_name\n return all_probs[max(all_probs)]", "title": "" }, { "docid": "70c642e23f600a42129711c5aa4fd1c0", "score": "0.6849167", "text": "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n return self.model.predict(data, 1, verbose) # ,steps)", "title": "" }, { "docid": "70c642e23f600a42129711c5aa4fd1c0", "score": "0.6849167", "text": "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n return self.model.predict(data, 1, verbose) # ,steps)", "title": "" }, { "docid": "9e3bc21ab3b3d4680d27141f31860e52", "score": "0.682975", "text": "def predict(self, test_data):\n return self.leader.predict(test_data)", "title": "" }, { "docid": "ad26a2e61342fccd6dcf3bd7ec1a6e88", "score": "0.68279463", "text": "def predict(self,X):\n return self.classifier.predict(X)", "title": "" }, { "docid": "ad26a2e61342fccd6dcf3bd7ec1a6e88", "score": "0.68279463", "text": "def predict(self,X):\n return self.classifier.predict(X)", "title": "" }, { "docid": "ad26a2e61342fccd6dcf3bd7ec1a6e88", "score": "0.68279463", "text": "def predict(self,X):\n return self.classifier.predict(X)", "title": "" }, { "docid": "ad26a2e61342fccd6dcf3bd7ec1a6e88", "score": "0.68279463", "text": "def predict(self,X):\n return self.classifier.predict(X)", "title": "" }, { "docid": "7b05aeda478273f2fe3a5d87bd13320c", "score": "0.6827877", "text": "def predict_class(self, feature):\n return self._clf.predict(feature)", "title": "" }, { "docid": "7b05aeda478273f2fe3a5d87bd13320c", "score": "0.6827877", "text": "def predict_class(self, feature):\n return self._clf.predict(feature)", "title": "" }, { "docid": "547d9ffd080b7c963929ea2c71edda12", "score": "0.6826277", "text": "def predict(self, data):\n return self.model.predict(data, batch_size=data.shape[1])", "title": "" }, { "docid": "f51c28041330a2a7dc57ef5e9b203647", "score": "0.68258864", "text": "def predict(self, X):\n (t0, t1, t2) = self.theta\n g = lambda x: t0 + t1 * x[0] + t2 * x[1]\n return np.array([\n self.classes[1] if g(x) > 0 else self.classes[0]\n for x in X\n ])", "title": "" }, { "docid": "202ef32309f91949151ff98804186a5d", "score": "0.68162555", "text": "def predict(self, X):\n\t\tproba = numpy.array(self.predict_proba(X))\n\t\treturn self.classes_.take(numpy.argmax(proba, axis=0))", "title": "" }, { "docid": "1ad04fa5e2266bfb20180f0000b9efb8", "score": "0.6813913", "text": "def predict(self, X):\n prob = self.predict_proba(X)\n if self.rule == 'fda':\n prob_1 = prob[:, :self.n_class_]\n prob_2 = prob[:, self.n_class_:]\n return np.vstack((self.labels_[prob_1.argmax(1)], self.labels_[prob_2.argmax(1)]))\n else:\n return self.labels_[prob.argmax(1)]", "title": "" }, { "docid": "6c47192d153d1f02fdfd0043ad9d9797", "score": "0.6808118", "text": "def predict(self, test_data):\r\n return self.gs.predict(test_data)", "title": "" }, { "docid": "9f00cbe64f7bcb9fa7db9c3e649bc758", "score": "0.68043154", "text": "def class_predict(trained_model, X_test, y_test, image_name):\n if MODEL == 1:\n return class_predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 3:\n return class_predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 2:\n return class_predict_2(trained_model, X_test, y_test)\n else:\n # For models 4, 5 and 6\n return class_predict_3(trained_model, X_test, y_test, image_name)", "title": "" }, { "docid": "d682af87785fe6074508576f467a35a1", "score": "0.68000424", "text": "def predict(self, X):\n scores = self.decision_function(X)\n if self.classes.shape[0] == 2:\n indices = np.array(scores > 0, dtype=np.int)\n else:\n indices = scores.argmax(axis=1)\n return self.classes[np.ravel(indices)]", "title": "" }, { "docid": "f96084d7939704b9d6624fbd61212240", "score": "0.6799546", "text": "def predict(self, review):\n raise NotImplementedError", "title": "" }, { "docid": "4c3b8e0347e9320bde865b98ecc41fc3", "score": "0.6795649", "text": "def proba_redefined_predict(model,X,weigh,classes=string.ascii_lowercase):\n\n y_proba=model.predict_proba(X)\n tuned=renorm(y_proba,weigh)\n y_max_arg=tuned.argmax(axis=1)\n predict=to_class(y_max_arg,classes)\n \n return predict", "title": "" }, { "docid": "98bc300eb7ad6272df5e0d064ca21173", "score": "0.6790358", "text": "def predict(self, model, context, data):\n pass", "title": "" }, { "docid": "c7a4874f8c103bfb771eafcd60b142a9", "score": "0.678795", "text": "def predict(self, data):\r\n return self.sess.run([self.predict_op, self.Mu], feed_dict={self.X: data})", "title": "" }, { "docid": "fdee209b5eb7170cfaf475b168d5fd58", "score": "0.6785362", "text": "def make_predictions(self):\n if is_classification(self.model):\n if self.ct == None:\n prediction = self.model.predict(self.input_data.to_numpy())\n probabilities = self.model.predict_proba(self.input_data.to_numpy())\n return prediction, probabilities\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n probabilities = self.model.predict_proba(self.data_into_model())\n return prediction, probabilities\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.model))\n \n else:\n if self.ct == None:\n prediction = self.model.predict(self.input_data)\n return prediction\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n return prediction\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.self.model))", "title": "" }, { "docid": "9d411cbae359e543a5bd5d20b5b622fd", "score": "0.67694813", "text": "def predict(data, samples, classifier='SVM',\r\n classification='combined', selectFeatures=('CUK', 10)):\r\n if (classification == \"trained\"):\r\n classifyTrained = True\r\n classifySurface = False\r\n elif (classification == 'surface'):\r\n classifyTrained = False\r\n classifySurface = True\r\n else:\r\n classifyTrained = True\r\n classifySurface = True\r\n if (classifier == \"SVM\"):\r\n clf = cl.classifyDataSVM(data, classifyTrained,\r\n classifySurface, selectFeatures,scaling=False)\r\n elif (classifier == \"DT\"):\r\n clf = cl.classifyDataDT(data, classifyTrained,\r\n classifySurface, selectFeatures,scaling=False)\r\n elif (classifier == \"KNN\"):\r\n clf = cl.classifyDataKNN(data, classifyTrained,\r\n classifySurface, selectFeatures,scaling=False)\r\n elif (classifier == \"LogReg\"):\r\n clf = cl.classifyDataLR(data, classifyTrained,\r\n classifySurface, selectFeatures,scaling=False)\r\n else:\r\n print (str(classifier) + \" is not a valid option\")\r\n \r\n [samples, _,_,_] = clf.extractData(samples,scaling=False)\r\n \r\n predictions = [clf.predict(s) for s in samples]\r\n return predictions", "title": "" }, { "docid": "98a71204ce337506e87c96c13cf2e8bf", "score": "0.67673403", "text": "def predict(self, X):\n return self.classifier.predict(X)", "title": "" }, { "docid": "164c739fc5e5f85a928caeb1c18495ea", "score": "0.6764149", "text": "def _predict(self, inputs):\n node = self.tree_\n while node.left:\n if inputs[node.feature_index] < node.split:\n node = node.left\n else:\n node = node.right\n return node.predicted_class", "title": "" }, { "docid": "24f938238804b5b01301dde7fe1abe71", "score": "0.67549306", "text": "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n # print len(data[0])\n # print type(data[0])\n # print data.shape\n return self.model.predict(data, 1, verbose) # ,steps)", "title": "" }, { "docid": "241f6a7ff1bd687b329920d0091928d7", "score": "0.67508453", "text": "def predict(cls, input):\n clf = cls.get_model()\n return clf.predict(input)", "title": "" }, { "docid": "151c52e44642b6c09f31d0a1dff2d364", "score": "0.6750476", "text": "def predict(self, X):\n\n # Get a matrix with the probabilities of a sample belonging to each class.\n probs = self.predict_proba(X)\n\n # Get the predicted classes by choosing the class which has biggest probability.\n y_ = np.argmax(probs, axis=1)\n\n # Get the original class ints before one hot encoding\n y = self.oneHot_.retransform(y_)\n\n return y", "title": "" }, { "docid": "18e51bdebd5b0434ab6b9734461c3a5f", "score": "0.6746477", "text": "def _predict(self, classify: np.array, n_preds=1):\r\n tmp = classify.argsort()[:, :n_preds] # Return the index of the best label classification\r\n preds = copy(tmp) # allow to copy tmp\r\n for index, target in enumerate(self.targets):\r\n preds = np.where(tmp == index, target, preds) # Return the target label corresponding to the index\r\n self.preds = preds", "title": "" }, { "docid": "62a0e1b07df36d2179a56ee64503f32a", "score": "0.67386496", "text": "def predict(self, data, version='default'):\n if isinstance(data, list):\n inputs = [self._indarray(x) for x in data]\n else:\n inputs = [self._indarray(data)]\n\n classification_response = self.skil.api.multipredict(\n deployment_name=self.deployment.name,\n model_name=self.model_name,\n version_name=version,\n body=skil_client.MultiPredictRequest(\n id=str(uuid.uuid1()),\n needs_pre_processing=False,\n inputs=inputs\n )\n )\n outputs = classification_response.outputs\n outputs = [np.asarray(o.data).reshape(o.shape) for o in outputs]\n if len(outputs) == 1:\n return outputs[0]\n return outputs", "title": "" }, { "docid": "acfb6f5f21b9591d03a0d9fb4d0549db", "score": "0.67359585", "text": "def predict(self, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "b767930d934e9cec13edfb17a3796e55", "score": "0.6733655", "text": "def _predict(self, testX):\n pass", "title": "" }, { "docid": "98721a5068510eb60720b6a1a0b30001", "score": "0.6731271", "text": "def predict(self, X, check_input=True):\n if check_input:\n X = check_array(X)\n proba = self.predict_proba(X)\n return self.classes_.take(np.argmax(proba, axis=1), axis=0)", "title": "" }, { "docid": "63a5e6e51372ee008d907a7f7b0a1703", "score": "0.6725918", "text": "def process(self, data):\n return self.estimator.predict(data)", "title": "" }, { "docid": "194a3fc44938896c50e92bf8ed3adfc0", "score": "0.6710733", "text": "def apply_classifier(self):\n for detected_object in self.detected_objects:\n detected_object.predict_class(self.original_image)", "title": "" }, { "docid": "cb71f43e44a81dfecb0a3d36c11e0a4f", "score": "0.67100644", "text": "def predict(self, X):\n raise NotImplementedError('Abstract method \"predict\" must be '\n 'specialised!')", "title": "" }, { "docid": "215bf65ee00cb051142c2b1bf4247c5b", "score": "0.6706792", "text": "def clf1_predict(self):\n self._pred_clf_1 = self._clf1.predict(self._vectorized_input)[0]", "title": "" }, { "docid": "25f88650082830a56954e9443f6bb71d", "score": "0.6698542", "text": "def predict(self, data):\r\n\r\n distances = [np.linalg.norm(data-self.centroids[centroid]) for centroid in self.centroids]\r\n classification = distances.index(min(distances))\r\n return classification", "title": "" }, { "docid": "5ecbc054bb54d6b53a91b040b1608546", "score": "0.66977125", "text": "def predict(self, dataset):\n # TODO: self.model(training=False)\n # logging.info('Predicting')\n # if self.verbosity > 1:\n # print('Predicting')\n dataset = rdata.data2dataset(dataset) # Convert to dataset\n assert dataset.get_dim_input() == self.n_inputs, \\\n 'Number of covariates does not match the model %d -> %d' % (dataset.get_dim_input(), self.n_inputs)\n n_data = dataset.get_n_data()\n\n pred = self._predict(dataset=dataset) # Predict\n\n if self.isprobabilistic():\n assert pred[0].shape == (n_data, self.n_outputs)\n assert pred[1].shape == (n_data, self.n_outputs)\n else:\n assert pred.shape == (n_data, self.n_outputs)\n return pred", "title": "" }, { "docid": "a6c06a9d0d4f7c56296e033cbd48fff4", "score": "0.66906804", "text": "def predict(self, X):\n if isinstance(self.model, ClassifierMixin):\n scores = self._decision_function(X)\n if len(scores.shape) == 1:\n indices = (scores > 0).astype(np.int)\n else:\n indices = scores.argmax(axis=1)\n return self.classes_[indices]\n else:\n return self._decision_function(X)", "title": "" }, { "docid": "ae422e85522680cf98cc5f477a554ba4", "score": "0.6690219", "text": "def predict(self, Xnew, compute_var=None):\n raise NotImplementedError('')", "title": "" }, { "docid": "3f7c6b7e9f66cb01aed654dbdb929a56", "score": "0.66843516", "text": "def predict(cls, input):\n clf = cls.get_model()\n print('input=')\n print(input)\n return clf.predict(input)", "title": "" }, { "docid": "9c0ee096479defb84d0a13dc9026ecae", "score": "0.6673259", "text": "def _predict(self, x):\n pass", "title": "" }, { "docid": "8c6ebfba9d4ddf5c27aba3a32fd51bb4", "score": "0.66670716", "text": "def predict_class(self, inputs):\n if not self.trained:\n if self.verbose:\n print(\"KMeans Model Class - Predict Class Function: No trained model\")\n return -1\n\n\n return self.cluster_classes[self.model.predict(inputs)]", "title": "" }, { "docid": "7c0440ea5f414c719fac6d0f83c360ea", "score": "0.6666943", "text": "def predict(self, X):\n\n # this will be an np.array of integers representing classes\n lp_prediction = self.classifier.predict(self.ensure_input_format(X))\n\n return self.inverse_transform(lp_prediction)", "title": "" }, { "docid": "6ef54b2d4137fc246acf5fbc8d66cbf3", "score": "0.6661385", "text": "def predict(self, X):\n predictions = []\n \n for i in range(len(self.estimators)):\n predictions.append(self.estimators[i].predict(X))\n \n final_output = []\n for i in range(len(X)):\n output = {}\n for j in range(len(predictions)):\n if predictions[j][i] in output.keys():\n output[predictions[j][i]] += 1\n else:\n output[predictions[j][i]] = 1\n \n Class = max(output, key=output.get)\n # print(Class)\n final_output.append(Class)\n final_output = pd.Series(final_output)\n\n return final_output", "title": "" }, { "docid": "6ef54b2d4137fc246acf5fbc8d66cbf3", "score": "0.6661385", "text": "def predict(self, X):\n predictions = []\n \n for i in range(len(self.estimators)):\n predictions.append(self.estimators[i].predict(X))\n \n final_output = []\n for i in range(len(X)):\n output = {}\n for j in range(len(predictions)):\n if predictions[j][i] in output.keys():\n output[predictions[j][i]] += 1\n else:\n output[predictions[j][i]] = 1\n \n Class = max(output, key=output.get)\n # print(Class)\n final_output.append(Class)\n final_output = pd.Series(final_output)\n\n return final_output", "title": "" }, { "docid": "7ade2230ca13f40ba219a71a80b9c874", "score": "0.66568995", "text": "def predict(self, X):\n\n stuff = self._vectorizer.transform(X)\n result = self._classifier.predict(stuff)\n return result\n pass", "title": "" }, { "docid": "0e6b826721f83c094d3267c1379bde9d", "score": "0.6655942", "text": "def predict(self, data: np.array) -> np.array:\n return self.model.predict(squeeze_keep_batch(data))", "title": "" }, { "docid": "c45f8cdcd9e76a9b99f008a58a5e7e4e", "score": "0.6650768", "text": "def predict(self, data):\n params = self.get_params(self.opt_state)\n return self.predict_jax(params, data)", "title": "" }, { "docid": "845116bc58bd4b6cc232a77e0debd1b2", "score": "0.6648379", "text": "def predict_proba(self):\n ...", "title": "" }, { "docid": "86a04caed95c9e02c3d3a479d67ad4b8", "score": "0.6642965", "text": "def predict_only(self):", "title": "" }, { "docid": "05c0a6b3b1ffdc95848239807b3413c8", "score": "0.66365135", "text": "def predict(self, data, version='default'):\n if self.transform_service:\n data = self.transform_service.predict(data, version)\n return self.model_service.predict(data, version)", "title": "" }, { "docid": "5f9b58ca7eb069bf3c074ac2524b6490", "score": "0.66355693", "text": "def predict_classes(self, X, boundary=0.5):\n # Add an intercept if desired.\n X = self._add_intercept(X)\n # Predict the probabilities of belonging to class 1.\n predicted_probabilities = self.predict_probabilities(X)\n # Set predictions to 1 or 0 based on the decision boundary.\n predicted_classes = np.where(predicted_probabilities >= boundary, 1, 0)\n \n return predicted_classes", "title": "" } ]
70ef012d45d6735cabcc3559dbf60dea
Reorganise correlation product dim of vis so that correlations are grouped as given in cp_argsort.
[ { "docid": "68dbac0613c9b8f44e24e710655b1b80", "score": "0.6483097", "text": "def _reorganise_product(vis, cp_argsort, out_vis):\n n_time = vis.shape[0]\n n_chan = vis.shape[1]\n n_bl = cp_argsort.shape[0]\n n_stok = cp_argsort.shape[1]\n for tm in range(n_time):\n bstep = 128\n bblocks = (n_bl + bstep - 1) // bstep\n for bblock in numba.prange(bblocks):\n bstart = bblock * bstep\n bstop = min(n_bl, bstart + bstep)\n for prod in range(bstart, bstop):\n in_cp = cp_argsort[prod]\n for stok in range(n_stok):\n in_stok = in_cp[stok]\n for chan in range(n_chan):\n out_vis[tm, prod, chan, stok] = vis[tm, chan, in_stok]\n return out_vis", "title": "" } ]
[ { "docid": "5bd7cb3cf863508164617877e800fafb", "score": "0.5846539", "text": "def argsort(self, axis=-1, kind=None, order=None):\n ...", "title": "" }, { "docid": "383269bebe3899d49aa4ec6c45b2b337", "score": "0.58052355", "text": "def _sort(self):\n\n indices = self.xarr.argsort()\n self.xarr = self.xarr[indices]\n self.cube = self.cube[indices,:,:]\n if self.errorcube is not None:\n self.errorcube = self.errorcube[indices,:,:]", "title": "" }, { "docid": "a51d8989b5bb80f579f1f2d2ef481899", "score": "0.5365585", "text": "def computeVisibilityCorrelations(self, data, samples):\n for (ch1,bl1,ch2,bl2) in self.blChanPairs.keys():\n w = np.logical_and(samples[bl1][:,ch1] != 0, samples[bl2][:,ch2] != 0)\n if np.all(np.logical_not(w)):\n self.blChanPairs[(ch1,bl1,ch2,bl2)]['visCorr'] = 0.0+0.0j\n self.blChanPairs[(ch1,bl1,ch2,bl2)]['samples'] = 0\n else: \n self.blChanPairs[(ch1,bl1,ch2,bl2)]['visCorr'] = np.average((data[bl1][:,ch1]*np.conj(data[bl2][:,ch2]))[w])\n self.blChanPairs[(ch1,bl1,ch2,bl2)]['samples'] = np.sum((samples[bl1][:,ch1] * samples[bl2][:,ch2])[w])\n self.visibilitiesAreCorrelated = True", "title": "" }, { "docid": "1fb971b22b7849422a26b1b33d80d1a2", "score": "0.5276112", "text": "def _reorder_connectivity_vtk(cells):\n con, offsets, types = cells\n _reorder_data(con, offsets, types, sesam2vtk_connectivity)", "title": "" }, { "docid": "801d3e19eefc48c700d56955aca1fc3e", "score": "0.5252306", "text": "def _reorder_ds(_ds, dims_c, dims_g):\n\n _DS = _copy.deepcopy(_ds)\n for _dim in [dims_c.Y, dims_g.Y]:\n _DS[\"n\" + _dim] = -(_DS[_dim] - (int(_DS[_dim][0].data)))\n _DS = (\n _DS.swap_dims({_dim: \"n\" + _dim})\n .drop_vars([_dim])\n .rename({\"n\" + _dim: _dim})\n )\n\n for var in _ds.data_vars:\n DIMS = [dim for dim in _ds[var].dims]\n dims = Dims(DIMS[::-1])\n if len(dims) > 1 and \"nv\" not in DIMS:\n dtr = list(dims)[::-1]\n dtr[-1], dtr[-2] = dtr[-2], dtr[-1]\n _da = _ds[var].transpose(*dtr)[::-1, :]\n _DS[var] = _da\n return _DS", "title": "" }, { "docid": "67fdde9a2fb034a1f0d971420d8dfc00", "score": "0.52446276", "text": "def clipcorr():", "title": "" }, { "docid": "1e1e037e5c20bd00d65d32c7222ee9c4", "score": "0.513695", "text": "def _collapse_composite_index(dims):\n return [np.prod(dims)]", "title": "" }, { "docid": "d9ff5d707f7c1726f087a774ddc8d1d0", "score": "0.5061077", "text": "def _consolidate(self, diagrams):\n return np.vstack(diagrams)", "title": "" }, { "docid": "d82ebc480455d0b00427bb55d78f53ca", "score": "0.5035242", "text": "def plot_com_order2(com_data,rownames,colnames):\n # Get the basic info of the community array.\n com_data = com_data.astype(int) # Make sure that the communities are denoted as integers.\n n_row = np.shape(com_data)[0] # Number of rows.\n n_col = np.shape(com_data)[1] # Number of columns.\n data = com_data.reshape(com_data.size) # Convert to 1-dimension array.\n all_count = np.bincount(data) # Count frequency of each community.\n all_order = np.argsort(all_count)[::-1] # Generate the ranking list of communities by frequency.\n n_com = len(all_order) # n_com is equal to the largest integer in com_data plus 1.\n \n # First apply the majority rule to the rows.\n majority_row = {}\n for i in range(n_com):\n majority_row[i] = {}\n \n # For each row, the most frequent communities are stored.\n for i in range(n_row):\n myrow = com_data[i,:]\n rowcount = np.bincount(myrow)\n rowmax = max(rowcount)\n for (j,k) in enumerate(rowcount):\n if k==rowmax:\n majority_row[j][i] = k\n \n unreach = set(range(n_row))\n row_order = [] # The list of new order of the rows.\n \n for i in all_order:\n if len(majority_row[i])>0:\n temp1 = majority_row[i]\n temp2 = sorted(temp1.items(), key=operator.itemgetter(1), reverse=True)\n for (m,n) in temp2:\n if (m in unreach):\n row_order.append(m)\n unreach.remove(m)\n \n new_data = com_data[row_order,:] # The row-rearranged data. \n \n # Then apply the majority rule to the columns.\n majority_col = {}\n for i in range(n_com):\n majority_col[i] = {}\n\n # For each column, the most frequent communities are stored.\n for i in range(n_col):\n mycol = com_data[:,i]\n colcount = np.bincount(mycol)\n colmax = max(colcount)\n for (j,k) in enumerate(colcount):\n if k==colmax:\n majority_col[j][i] = k\n \n unreach = set(range(n_col))\n col_order = [] # The list of new order of the columns.\n \n for i in all_order:\n if len(majority_col[i])>0:\n temp1 = majority_col[i]\n temp2 = sorted(temp1.items(), key=operator.itemgetter(1), reverse=True)\n for (m,n) in temp2:\n if (m in unreach):\n col_order.append(m)\n unreach.remove(m)\n \n new_data2 = new_data[:,col_order] # The row-and-column-rearranged data. \n \n # Generate n_com \"distinct\" enough colors.\n HLS_color = []\n i = 0\n step = 0.9/n_com\n init = step\n while i < n_com:\n temp_hue = init\n temp_lig = rd.random()\n temp_sat = rd.random()\n HLS_color.append((temp_hue,temp_lig,temp_sat))\n i += 1\n init += step\n RGB_color = [cs.hls_to_rgb(a,b,c) for (a,b,c) in HLS_color]\n \n # Prepare the discrete colormap for each integer/community.\n cmap = colors.ListedColormap(RGB_color)\n \n # Reorder the row names and the column names.\n rownames = [rownames[i] for i in row_order]\n colnames = [colnames[i] for i in col_order]\n \n # Prepare the plot.\n fig, ax = plt.subplots(figsize=(16,16))\n xticks = np.arange(0,n_col,1)+0.5\n yticks = np.arange(0,n_row,1)+0.5\n ax.set_xticks(xticks, minor=False)\n ax.set_yticks(yticks, minor=False)\n ax.pcolor(new_data2, cmap=cmap, alpha=0.8, edgecolors='white', linewidths=1)\n ax.invert_yaxis() # This will make the rows start from the top. \n ax.xaxis.tick_top() # This will make x labels on top.\n ax.set_xticklabels(colnames, minor=False)\n ax.set_yticklabels(rownames, minor=False)\n \n plt.savefig('order2.png')", "title": "" }, { "docid": "4648b222fde729ade78e458f8a66856e", "score": "0.4994326", "text": "def plot_com_order1(com_data,rownames,colnames,row=True):\n # Get the basic info of the community array.\n com_data = com_data.astype(int) # Make sure that the communities are denoted as integers.\n n_row = np.shape(com_data)[0] # Number of rows.\n n_col = np.shape(com_data)[1] # Number of columns.\n data = com_data.reshape(com_data.size) # Convert to 1-dimension array.\n all_count = np.bincount(data) # Count frequency of each community.\n all_order = np.argsort(all_count)[::-1] # Generate the ranking list of communities by frequency.\n n_com = len(all_order) # n_com is equal to the largest integer in com_data plus 1.\n \n # Apply the majority rule depending on the logical value of row.\n majority = {}\n for i in range(n_com):\n majority[i] = {}\n \n if (row==True): # When rows are to be rearranged.\n # For each row, the most frequent communities are stored.\n for i in range(n_row):\n myrow = com_data[i,:]\n rowcount = np.bincount(myrow)\n rowmax = max(rowcount)\n for (j,k) in enumerate(rowcount):\n if k==rowmax:\n majority[j][i] = k\n \n unreach = set(range(n_row))\n row_order = [] # The list of new order of the rows.\n \n for i in all_order:\n if len(majority[i])>0:\n temp1 = majority[i]\n temp2 = sorted(temp1.items(), key=operator.itemgetter(1), reverse=True)\n for (m,n) in temp2:\n if (m in unreach):\n row_order.append(m)\n unreach.remove(m)\n \n new_data = com_data[row_order,:] # The rearranged data. \n \n else: # When columns are to be rearranged.\n # For each column, the most frequent communities are stored.\n for i in range(n_col):\n mycol = com_data[:,i]\n colcount = np.bincount(mycol)\n colmax = max(colcount)\n for (j,k) in enumerate(colcount):\n if k==colmax:\n majority[j][i] = k\n \n unreach = set(range(n_col))\n col_order = [] # The list of new order of the columns.\n \n for i in all_order:\n if len(majority[i])>0:\n temp1 = majority[i]\n temp2 = sorted(temp1.items(), key=operator.itemgetter(1), reverse=True)\n for (m,n) in temp2:\n if (m in unreach):\n col_order.append(m)\n unreach.remove(m)\n \n new_data = com_data[:,col_order] # The rearranged data. \n \n # Generate n_com \"distinct\" enough colors.\n HLS_color = []\n i = 0\n step = 0.9/n_com\n init = step\n while i < n_com:\n temp_hue = init\n temp_lig = rd.random()\n temp_sat = rd.random()\n HLS_color.append((temp_hue,temp_lig,temp_sat))\n i += 1\n init += step\n RGB_color = [cs.hls_to_rgb(a,b,c) for (a,b,c) in HLS_color]\n\n # Prepare the discrete colormap for each integer/community.\n cmap = colors.ListedColormap(RGB_color)\n\n # Reorder the row names or the column names depending on the logical value of row.\n if (row==True):\n rownames = [rownames[i] for i in row_order]\n else:\n colnames = [colnames[i] for i in col_order]\n \n # Prepare the plot.\n fig, ax = plt.subplots(figsize=(16,16))\n xticks = np.arange(0,n_col,1)+0.5\n yticks = np.arange(0,n_row,1)+0.5\n ax.set_xticks(xticks, minor=False)\n ax.set_yticks(yticks, minor=False)\n ax.pcolor(new_data, cmap=cmap, alpha=0.8, edgecolors='white', linewidths=1)\n ax.invert_yaxis() # This will make the rows start from the top. \n ax.xaxis.tick_top() # This will make x labels on top.\n ax.set_xticklabels(colnames, minor=False)\n ax.set_yticklabels(rownames, minor=False)\n \n if (row==True):\n plt.savefig('order1'+'_row.png')\n else:\n plt.savefig('order1'+'_col.png')", "title": "" }, { "docid": "d0ab639836b6cb7f962cbe25298fb7cc", "score": "0.48830307", "text": "def output_reshape(ct):\n return np.moveaxis(ct, 1, -1)", "title": "" }, { "docid": "04f698291c78d36cfa9d34e1d3b74957", "score": "0.48545033", "text": "def _order_axes(named_axes: Dict[str, np.ndarray], copy: bool = True) -> List[np.ndarray]:\n order = np.arange(len(named_axes.keys()) - (1 if 'data' in named_axes else 0))\n return [(named_axes[chr(x) * y].copy() if copy else named_axes[chr(x) * y])\n for x, y in zip(order % 3 + 120, order // 3 + 1)]", "title": "" }, { "docid": "50b027de83aacb8db60870be68f0b353", "score": "0.48531938", "text": "def _acompcor_mask(confounds_json, anat_mask, compcor_cols_filt, n_compcor):\n collector = []\n for mask in anat_mask:\n cols = _json_mask(compcor_cols_filt, confounds_json, mask)\n cols = _select_compcor(cols, n_compcor)\n collector += cols\n return collector", "title": "" }, { "docid": "2c4c3726ab15065c6ca091bb1675dcc1", "score": "0.48245475", "text": "def Reshape():", "title": "" }, { "docid": "230e09f7fe3ac4e2a82dc19f3c0ff43a", "score": "0.48232546", "text": "def sort (self):\n self._shapes.sort(cmp=lambda a, b: cmp(b.shape.size(), a.shape.size()))", "title": "" }, { "docid": "faad95389439fe7cd1092ff12bcf7a25", "score": "0.48226258", "text": "def argsort(array, *, axis=None):\n ...", "title": "" }, { "docid": "7e944a1208ffb98599cf58fa476e6b46", "score": "0.48223144", "text": "def sortVariants(self, variants):\n bDim = range(self.getBoardDim())\n self.sortedVariants = {}\n # initialize empty lists\n for lenght in bDim:\n self.sortedVariants[lenght] = []\n\n min_lenght = \"inf\"\n for row in bDim:\n for col in bDim:\n vars = variants[row][col]\n if type(vars) != str:\n lenght = len(vars)\n if lenght < min_lenght:\n min_lenght = lenght\n self.sortedVariants[lenght].append((row, col))\n self.setMinVarLen(min_lenght)", "title": "" }, { "docid": "a0581ec0992370ebc91819bc4213b2e3", "score": "0.48201624", "text": "def __sort(self):\n sorter = np.argsort(self.x)\n nondatavars = self.__nondata\n ownvarnames = self.__dict__.keys()\n ownvarnames = [\n i for i in\n filter(lambda a: a not in nondatavars, ownvarnames)\n ]\n for cVarname in ownvarnames:\n self.__dict__[cVarname] = self.__dict__[cVarname][sorter]", "title": "" }, { "docid": "3db481f3ebd37c0ab8507851389c48de", "score": "0.4796329", "text": "def get_c2f_order(img: SpatialImage) -> np.ndarray:\n mask = img.get_data()\n reorder = np.arange(int(np.prod(mask.shape)))\n reorder = reorder.reshape(mask.shape, order='F')\n reorder = reorder[mask.astype(bool)]\n reorder = np.argsort(reorder)\n return reorder", "title": "" }, { "docid": "a35ca1e82f9510a797329e0e60d8d8f0", "score": "0.4790781", "text": "def sort(data, ordering):\n if ordering == 'rows_freq':\n uniques, counts = np.unique(data, return_counts=True, axis=0)\n counter = 0\n for j in counts.argsort()[::-1]:\n #argsort() used to return the indices that would sort an array.\n #[::-1] from end to first\n for z in range(counts[j]):\n data[counter,:,:] = uniques[j,:,:]\n counter += 1\n return data\n elif ordering == 'cols_freq':\n uniques, counts = np.unique(data, return_counts=True, axis=1)\n counter = 0 #\n for j in counts.argsort()[::-1]:\n for z in range(counts[j]):\n data[:,counter,:] = uniques[:,j,:]\n counter += 1\n return data\n elif ordering == 'rows_dist':\n uniques, counts = np.unique(data, return_counts=True, axis=0)\n # most frequent row in float\n top = uniques[counts.argsort()[::-1][0]].transpose().astype('float32')\n # distances from most frequent row\n distances = np.mean(np.abs(uniques[:,:,0] - top), axis=1)\n # fill in from top to bottom\n counter = 0\n for j in distances.argsort():\n for z in range(counts[j]):\n data[counter,:,:] = uniques[j,:,:]\n counter += 1\n return data\n elif ordering == 'cols_dist':\n uniques, counts = np.unique(data, return_counts=True, axis=1)\n # most frequent column\n top = uniques[:,counts.argsort()[::-1][0]].astype('float32')\n # distances from most frequent column\n distances = np.mean(np.abs(uniques[:,:,0] - top), axis=0)\n # fill in from left to right\n counter = 0\n for j in distances.argsort():\n for z in range(counts[j]):\n data[:,counter,:] = uniques[:,j,:]\n counter += 1\n return data\n elif ordering == 'rows_similarity':\n #global snpsnew\n data = np.squeeze(data, axis=2)\n neigh = NearestNeighbors(len(data), metric='manhattan')\n neigh.fit(data)\n inx = neigh.kneighbors(data)\n middle = np.argmin(inx[0].sum(axis=1))\n data = data[inx[1][middle]]\n data = data[:, :, newaxis]\n return data\n elif ordering == 'cols_similarity':\n data = np.squeeze(data, axis=2)\n data = data.transpose()\n neigh = NearestNeighbors(len(data), metric='manhattan')\n neigh.fit(data)\n inx = neigh.kneighbors(data)\n middle = np.argmin(inx[0].sum(axis=1))\n data = data[inx[1][middle]]\n data = data.transpose()\n data = data[:, :, newaxis]\n return data", "title": "" }, { "docid": "393b75d60b4a14233dc7c48479389b67", "score": "0.4786278", "text": "def sort(self):\n\t\t# Index of sorted elements\n\t\ti=self.lognu.argsort()\n\t\tself.nu,self.nlnu = self.nu[i],self.nlnu[i]\n\t\tself.lognu,self.ll = self.lognu[i],self.ll[i]\n\t\t\n\t\t# Precaution in case the user used the interp method\n\t\tif hasattr(self, 'nlnui'): \n\t\t\tself.nui,self.nlnui = self.nui[i],self.nlnui[i]\n\t\t\tself.lognui,self.lli = self.lognui[i],self.lli[i]", "title": "" }, { "docid": "f6dd6cc149a18092ec841a92c723c4ae", "score": "0.47862294", "text": "def _order_axes_old(named_axes: Dict[str, np.ndarray], copy: bool = True) -> List[np.ndarray]:\n result = []\n for key in ['row', 'column']:\n if key in named_axes:\n result.append(named_axes[key])\n axes_no = len(named_axes.keys()) - (1 if 'data' in named_axes else 0)\n if axes_no > 2:\n order = np.arange(2, axes_no)\n result.extend([(named_axes[chr(x) * y].copy() if copy else named_axes[chr(x) * y])\n for x, y in zip(order % 3 + 120, order // 3 + 1)])\n return result", "title": "" }, { "docid": "595d2a1a72789c3bbdd52a23853fffdc", "score": "0.47720328", "text": "def test_group_design():\r\n for i in np.arange(2):\r\n if i == 0:\r\n categ = (0,1,1,0)\r\n elif i == 1:\r\n categ = (0,1,1,2,2)\r\n\r\n X = pr.group_design(categ)\r\n\r\n assert X.shape[0] == len(categ)\r\n assert X.shape[1] == len(np.unique(categ))\r\n assert np.sum(np.ravel(X)) == len(categ)", "title": "" }, { "docid": "2b93a4a476bffad4b2589c60c50d2641", "score": "0.47665155", "text": "def get_f2c_order(img: SpatialImage) -> np.ndarray:\n mask = img.get_data()\n reorder = np.arange(int(np.prod(img.shape)))\n reorder = reorder.reshape(img.shape, order='C')\n reorder = reorder.flatten(order='F')\n reorder = reorder[mask.flatten(order='F').astype(bool)]\n reorder = np.argsort(reorder)\n return reorder", "title": "" }, { "docid": "622587cb814eb50445953f1b2e77ed43", "score": "0.47541466", "text": "def remove_correlations(design_matrix, stim_labels):\n # np.apply_along_axis( function1d, axis, array), will apply function1d to\n # every column independently\n # np.random.permutation() shuffles contents of columns BUT all columns are\n # permuted the same...so correlations between units are NOT REMOVED\n # This is equivalent to permuted trials\n\n Xcorr = np.zeros(design_matrix.shape)\n\n for stim_id in np.unique(stim_labels):\n stim_inds = np.where(stim_labels == stim_id)[0]\n current_vals = design_matrix[stim_inds, :]\n shuffled_vals = np.apply_along_axis(np.random.permutation, 0, current_vals)\n Xcorr[stim_inds, :] = shuffled_vals\n\n return Xcorr", "title": "" }, { "docid": "1f18241862afafd2eee172c64623c969", "score": "0.47392532", "text": "def reshape(self, shape: Tuple[int, ...], order: Text = \"C\") -> ndarray:\n ...", "title": "" }, { "docid": "d5477db9eecbdb73218609646044740f", "score": "0.4736185", "text": "def order_image(self, frame, ordind, sm=False):\n clbr = self.clbr\n img = self.images[frame][clbr.cutting_masks[ordind]]\n try:\n ordr = img.reshape(clbr.dv * 2 + 1, clbr.DIMW)\n except ValueError:\n ordr = img.reshape(clbr.dv * 2 + 1, int(img.shape[0] / (clbr.dv * 2 + 1)))\n if sm:\n return ordr.sum(axis=0)\n else:\n return ordr", "title": "" }, { "docid": "3b72aba89654cde52f4fa8b9e8e418e7", "score": "0.47201246", "text": "def reorder(self, labels):\n if not self.dimension == len(labels):\n raise ValueError(\"dimension mismatch\")\n newindices = list(self.axes.index(label) for label in labels)\n new = self.__class__(\n tuple(self.axes[index] for index in newindices), self.config, self.metadata\n )\n new.photons = numpy.transpose(self.photons, axes=newindices)\n new.contributions = numpy.transpose(self.contributions, axes=newindices)\n return new", "title": "" }, { "docid": "3ee9a0c4f214e1921b3ee2555e981f55", "score": "0.4712366", "text": "def _sort_details(id, scale, mass, mdot, rho, cs):\n inds = np.lexsort((scale, id))\n id = id[inds]\n scale = scale[inds]\n mass = mass[inds]\n mdot = mdot[inds]\n rho = rho[inds]\n cs = cs[inds]\n return id, scale, mass, mdot, rho, cs", "title": "" }, { "docid": "64ad0ff76849cf82961a98d6c117aed8", "score": "0.4709704", "text": "def reorder_analysis(matrix):\n return [reorder.rcm(matrix),\n reorder.rcm_min_degree(matrix),\n reorder.cm(matrix)]", "title": "" }, { "docid": "13f37784b394f04d50396d1c3c1e8d34", "score": "0.47044215", "text": "def __sort_filters_spikes(self):\n for lay_info in self.conv_layinfos:\n for filtr in lay_info['filters']:\n max_indx = np.argsort(filtr[\"max_spikes\"])[::-1]\n filtr[\"max_spikes\"] = np.array(filtr[\"max_spikes\"])[max_indx].tolist()\n filtr[\"max_imgs\"] = np.array(filtr[\"max_imgs\"])[max_indx].tolist()\n filtr[\"max_slices\"] = np.array(filtr[\"max_slices\"])[max_indx].tolist()\n\n avg_indx = np.argsort(filtr[\"avg_spikes\"])[::-1]\n filtr[\"avg_spikes\"] = np.array(filtr[\"avg_spikes\"])[avg_indx].tolist()\n filtr[\"avg_imgs\"] = np.array(filtr[\"avg_imgs\"])[avg_indx].tolist()", "title": "" }, { "docid": "5591354a5fa1066531ae3835091a4fc8", "score": "0.47030693", "text": "def _update_reactions_layout(self,):\n for reaction_index in range(sbnw.nw_getNumRxns(self.h_network)):\n h_reaction = sbnw.nw_getReactionp(self.h_network, reaction_index)\n reaction_id = sbnw.reaction_getID(h_reaction)\n reaction = self.reactions[reaction_id]\n\n reaction.centroid = sbnw.reaction_getCentroid(h_reaction)\n\n reaction.curves = []\n for curve_index in range(sbnw.reaction_getNumCurves(h_reaction)):\n h_curve = sbnw.reaction_getCurvep(h_reaction, curve_index)\n reaction.curves.append(Curve(h_curve))", "title": "" }, { "docid": "09d5ce8cb428336908c07afce87c8536", "score": "0.4703043", "text": "def _translate_glob_loc(self, indexes):\n all_comp = [[] for i in range(len(self.vis_dict.keys()))]\n for i_g in indexes:\n i_c, i_a = self._translate_i_glob_loc[i_g]\n all_comp[i_c].append(i_a)\n\n return all_comp", "title": "" }, { "docid": "69de82881f0d10c66f272cd7e1734198", "score": "0.47027138", "text": "def psthcorrtype(trackrecs, pool=False, alpha=0.0005, vmin=0, vmax=1, separatetypeplots=True):\n ntracks = len(trackrecs)\n tracknames = [ trackrec[0].tr.absname for trackrec in trackrecs ]\n rhotype = listarr(np.empty((8, 8))) # init rho cell type 2D array of lists\n npairs = 0 # init npairs\n for tracki, recs in enumerate(trackrecs):\n track = recs[0].tr\n natexps = False\n trackname = recs[0].tr.absname\n if trackname == 'ptc15.tr7c':\n natexps = True\n ssnids, recsecnids = get_nids(recs)\n ssrhos = []\n for rec in recs:\n ssrho = psthcorr(rec, nids=None, ssnids=ssnids, natexps=natexps, plot=False)\n ssrhos.append(ssrho)\n ssrhos = np.asarray(ssrhos) # convert to 3D array\n if pool == False:\n listarr(rhotype) # reset between tracks\n npairs = 0 # reset between tracks\n nn = len(ssnids)\n nanis = np.isnan(ssrhos) # indices of non-nan values\n ssrhos[nanis] = 0 # replace nans with 0s\n maxabsssrhos = core.maxabs(ssrhos, axis=0) # keep only the max rho of each cell pair\n alln = track.alln\n for i in range(nn):\n ni = alln[ssnids[i]] # neuron i\n si = celltype2int[ni.spiketype]\n ri = celltype2int[ni.rftype]\n for j in range(i+1, nn): # use only upper triangle, don't double count cell pairs\n nj = alln[ssnids[j]] # neuron j\n sj = celltype2int[nj.spiketype]\n rj = celltype2int[nj.rftype]\n rho = maxabsssrhos[i, j]\n if rho == 0:\n # ignore this cell pair's rho (they were never simultaneously active) so it\n # doesn't mess up the celltype stats\n continue\n # fill in the upper triangle of rhotype matrix:\n rhotype[si, sj].append(rho)\n rhotype[ri, rj].append(rho)\n # these cross terms should best be left disabled, because they conflate the\n # correlations between spiketype and rftype:\n #rhotype[ri, sj].append(rho)\n #rhotype[si, rj].append(rho)\n npairs += 1\n rhotypemeans = np.zeros(rhotype.shape); rhotypemeans.fill(nan)\n rhotypestds = np.zeros(rhotype.shape); rhotypestds.fill(nan)\n rhotypeps = np.zeros(rhotype.shape); rhotypeps.fill(nan)\n rhotypesigmeans = np.zeros(rhotype.shape); rhotypesigmeans.fill(nan)\n # calculate rho stats for each combination of cell type:\n for i in range(8):\n for j in range(i, 8): # use only upper triangle, don't double count celltype stats\n if len(rhotype[i, j]) > 0:\n rhotypemeans[i, j] = np.mean(rhotype[i, j])\n rhotypestds[i, j] = np.std(rhotype[i, j])\n # 2-sided sample mean ttest relative to 0:\n t, p = ttest_1samp(rhotype[i, j], 0)\n rhotypeps[i, j] = p\n sigis = rhotypeps < alpha # indices of significant deviations of mean from 0\n rhotypesigmeans[sigis] = rhotypemeans[sigis]\n #arrs = [rhotypemeans, rhotypestds, rhotypeps, rhotypesigmeans]\n #plottypes = ['mean', 'stdev', 'pval', 'sigmean']\n arrs = [rhotypesigmeans]\n plottypes = ['sigmean']\n if pool:\n if tracki < ntracks-1:\n continue # only plot once all tracks have been iterated over\n trackname = ', '.join(tracknames)\n for arr, plottype in zip(arrs, plottypes):\n # get symmetric arr by copying upper triangle, transposing to get lower triangle,\n # and adding to arr:\n symarr = nansum([arr, np.triu(arr, k=1).T], axis=0)\n thisvmin, thisvmax = nanmin(symarr), nanmax(symarr)\n vmin = min(vmin, thisvmin) # set to manual vmin at most\n vmax = max(vmax, thisvmax) # set to manual vmax at least\n if separatetypeplots:\n figure(figsize=(8, 3))\n # plot spiketypes:\n subplot(121)\n imshow(symarr[:4, :4], vmin=vmin, vmax=vmax, origin='upper', cmap='jet')\n xticks(np.arange(4), spiketypelabels, rotation=90)\n yticks(np.arange(4), spiketypelabels)\n colorbar(ticks=(vmin, vmax), format='%.2f')\n # plot rftypes:\n subplot(122)\n imshow(symarr[4:, 4:], vmin=vmin, vmax=vmax, origin='upper', cmap='jet')\n xticks(np.arange(4), rftypelabels, rotation=90)\n yticks(np.arange(4), rftypelabels)\n colorbar(ticks=(vmin, vmax), format='%.2f')\n plottype += ' separate'\n else: # plot spike and rf types in the same matrix\n figure(figsize=(4, 4))\n imshow(symarr, vmin=vmin, vmax=vmax, origin='upper', cmap='jet')\n xticks(np.arange(8), typelabels, rotation=90)\n yticks(np.arange(8), typelabels)\n colorbar(ticks=(vmin, vmax), format='%.2f')\n plottype += ' combined'\n titlestr = (trackname + ' psthcorrtype ' + plottype +\n ' alpha=%.4f, npairs=%d' % (alpha, npairs))\n gcfm().window.setWindowTitle(titlestr)\n tight_layout(pad=0.4)\n if pool: # only return rhotype if pooling across all tracks\n insigis = np.logical_not(sigis)\n rhotype[insigis] = listarr(rhotype[insigis]) # set insig entries to empty lists\n return rhotype # only significant entires aren't empty", "title": "" }, { "docid": "9dc4cc068b2ce09b984905cee0a06d44", "score": "0.46904805", "text": "def reorder(self, dst_order, arr, src_order=None):\n if dst_order is None:\n dst_order = self.viewer.rgb_order\n if src_order is None:\n src_order = self.rgb_order\n if src_order != dst_order:\n arr = trcalc.reorder_image(dst_order, arr, src_order)\n\n return arr", "title": "" }, { "docid": "f3115bbc4c032f98e992dfc1d9987c5d", "score": "0.46691003", "text": "def argsort(\n a: ArrayLike,\n axis: int = -1,\n kind: Optional[Text] = None,\n order: Optional[Union[Text, Tuple[Text]]] = None,\n) -> Union[ndarray, int]:\n ...", "title": "" }, { "docid": "f0ceabff359ed590cdfdc6bcc1028e95", "score": "0.46617693", "text": "def concatvis():", "title": "" }, { "docid": "106bda774eb9714707046fefa998e398", "score": "0.46616408", "text": "def sort(a, axis=-1, kind=None, order=None):\n ...", "title": "" }, { "docid": "7d47ab247247db8e519e04d1ffadbd4b", "score": "0.46540332", "text": "def __resort(self):\n self.patchesList.sortItems(\n self.patchesList.sortColumn(),\n self.patchesList.header().sortIndicatorOrder())", "title": "" }, { "docid": "6c7891c6e6d6febbd6331fb6def918d7", "score": "0.46433562", "text": "def Sort_Arrays(self):\n\t\tfor i in range(len(self.k)):\n\t\t\tself.Theta0.append(self.variables[i][0][0])\n\t\t\tself.Theta1.append(self.variables[i][0][1])\n\t\t\tself.Theta2.append(self.variables[i][0][2])\n\t\t\tself.Theta3.append(self.variables[i][0][3])\n\t\t\tself.Theta4.append(self.variables[i][0][4])\n\t\t\tself.Theta5.append(self.variables[i][0][5])\n\t\t\tself.Theta6.append(self.variables[i][0][6])\n\t\t\tself.delta.append(self.variables[i][0][7])\n\t\t\tself.deltab.append(self.variables[i][0][8])\n\t\t\tself.v.append(self.variables[i][0][9])\n\t\t\tself.vb.append(self.variables[i][0][10])\n\t\t\tself.Phi.append(self.variables[i][0][11])\n\n\t\t\tself.Theta1Deriv.append(self.variables[i][1][0])\n\t\t\tself.Theta2Deriv.append(self.variables[i][1][1])\n\t\t\tself.Theta3Deriv.append(self.variables[i][1][2])\n\t\t\tself.PhiDeriv.append(self.variables[i][1][3])\n\t\t\tself.vbDeriv.append(self.variables[i][1][4])", "title": "" }, { "docid": "f820b7ede6771d5e4843fba1d99a0fa8", "score": "0.46360913", "text": "def unsort(input, indices, dim=1):\n \n \"\"\" dim is the dimension of batch size \"\"\"\n output = input.new(*input.size())\n \n output.scatter_(dim, indices.unsqueeze(0).unsqueeze(2), input)\n \n return output", "title": "" }, { "docid": "ceba9219110f52c141677ba258760067", "score": "0.4634767", "text": "def _reshape_for_cs(dims, data):\n\n # the data should already come shaped correctly for cs\n # but the dims are not yet correct\n\n # the only dimensions that get expanded into faces\n expand_dims = ['j', 'j_g']\n for dim in expand_dims:\n if dim in dims:\n # add face dimension to dims\n jdim = dims.index(dim)\n dims.insert(jdim+1, FACE_DIMNAME)\n assert data.ndim == len(dims), '%r %r' % (data.shape, dims)\n return dims, data", "title": "" }, { "docid": "a378239868d6d1e5ef89bd06e55679de", "score": "0.46341452", "text": "def get_axes_order(self):\n temp = [\"\" for _ in self._permutation[1]]\n # Permutate\n for i, pr in enumerate(self._permutation[1]):\n temp[pr] = self.variable.dimensions[i]\n return tuple(temp)", "title": "" }, { "docid": "7c2d25e6686d5584beedea617793725d", "score": "0.46319005", "text": "def sort(self):\n if not self:\n return\n\n # Sort all three attributes\n for attr in (\"flag_values\", \"_flag_meanings\", \"_flag_masks\"):\n if hasattr(self, attr):\n indices = np.argsort(getattr(self, attr))\n break\n\n for attr in (\"_flag_values\", \"_flag_meanings\", \"_flag_masks\"):\n if hasattr(self, attr):\n array = getattr(self, attr).view()\n array[...] = array[indices]", "title": "" }, { "docid": "b38e568d2bb91cb0de8e123822e914f0", "score": "0.46215335", "text": "def ccw_sort(p):\n d = p-np.mean(p,axis=0)\n s = np.arctan2(d[:,0], d[:,1])\n return p[np.argsort(s),:]", "title": "" }, { "docid": "499dc55892310ad88717214cc659801f", "score": "0.46023214", "text": "def order_images(channels):\n bar = Bar('Verifying Z-Stack',max=len(channels.keys()))\n for keys in channels.keys():\n channels[keys].sort()\n bar.next()\n bar.finish()", "title": "" }, { "docid": "5c09adab5de153d70816c66ac01f558e", "score": "0.45822406", "text": "def autocorr_timescale(self, trace):\n acors = []\n for i in range(trace.shape[1]):\n tau, mean, sigma = acor.acor(trace[:, i].real) # Warning, does not work with numpy.complex\n acors.append(tau)\n return np.array(acors)", "title": "" }, { "docid": "50e1aa32ff227988aff918565db0f412", "score": "0.4575556", "text": "def sort(self, Ncol, order):\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\n self.arraydata = sorted(self.arraydata, key=operator.itemgetter(Ncol))\n if order == Qt.DescendingOrder:\n self.arraydata.reverse()\n self.emit(SIGNAL(\"layoutChanged()\"))", "title": "" }, { "docid": "d151f4f4190e62d740348231416a29ab", "score": "0.45670125", "text": "def principal_axes(I):\n Ip, C = np.linalg.eig(I)\n indices = np.argsort(-Ip)\n Ip = Ip[indices]\n C = C.T[indices]\n return Ip, C", "title": "" }, { "docid": "688c211fa3b4c08174b7f566988aacbf", "score": "0.45618933", "text": "def sort_to(self, other):\n new_axes = []\n for idx in other.indices:\n if idx not in self.indices:\n raise EinsteinSummationAlignmentError(\"mismatched lhs/rhs\"\n \" indices: ({}) != ({})\".format(\n \", \".join(self.indices),\n \", \".join(other.indices)\n ))\n new_axes.append(self.indices.index(idx))\n other.sliced_tensor._impl.sort_into(self.sliced_tensor._impl, new_axes)", "title": "" }, { "docid": "b1d5f41a9b7a450895875e415d9793a4", "score": "0.45550755", "text": "def reorder_channel(self, data, graph):\r\n if graph == 'TS':\r\n graph_idx = self.TS\r\n elif graph == 'O':\r\n graph_idx = self.original_order\r\n\r\n idx = []\r\n\r\n for chan in graph_idx:\r\n idx.append(self.original_order.index(chan))\r\n\r\n return data[:, idx, :]", "title": "" }, { "docid": "ccbf942ae746e1ee896cce87b3b62f33", "score": "0.45539203", "text": "def dimreduce_means_covs(Means, Covs, redtype='diagonal'):\n n1, d1 = Means[0].shape\n n2, d2 = Means[1].shape\n k = d1\n\n print(n1, d1, n2, d2)\n if redtype == 'diagonal':\n ## Leave Means As Is, Keep Only Diag of Covariance Matrices, Independent DR for Each Task\n Covs[0] = torch.stack([torch.diag(C) for C in Covs[0]])\n Covs[1] = torch.stack([torch.diag(C) for C in Covs[1]])\n elif redtype == 'mds':\n ## Leave Means As Is, Use MDS to DimRed Covariance Matrices, Independent DR for Each Task\n Covs[0] = mds(Covs[0].view(Covs[0].shape[0], -1), output_dim=k)\n Covs[1] = mds(Covs[1].view(Covs[1].shape[0], -1), output_dim=k)\n elif redtype == 'distance_embedding':\n ## Leaves Means As Is, Use Bipartitie MSE Embedding, Which Embeds the Pairwise Distance Matrix, Rather than the Cov Matrices Directly\n print('Will reduce dimension of Σs by embedding pairwise distance matrix...')\n D = torch.zeros(n1, n2)\n print('... computing pairwise bures distances ...')\n for (i, j) in tqdm(itertools.product(range(n1), range(n2))):\n D[i, j] = bures_distance(Covs[0][i], Covs[1][j])\n print('... embedding distance matrix ...')\n U, V = bipartite_mse_embedding(D, k=k)\n Covs = [U, V]\n print(\"Done! Σ's Dimensions: {} (Task 1) and {} (Task 2)\".format(\n list(U.shape), list(V.shape)))\n else:\n raise ValueError('Reduction type not recognized')\n return Means, Covs", "title": "" }, { "docid": "d38838b4492c6708216f468289285484", "score": "0.45491737", "text": "def reorder(nmrs, dim_tag_list):\n\n # Check existing tags are in the list of desired tags\n for idx, tag in enumerate(nmrs.dim_tags):\n if tag not in dim_tag_list\\\n and tag is not None:\n raise utils.NIfTI_MRSIncompatible(\n f'The existing tag ({tag}) does not appear '\n f'in the requested tag order ({dim_tag_list}).')\n\n # Create singleton dimensions if required\n original_dims = nmrs.ndim\n new_dim = sum(x is not None for x in nmrs.dim_tags) + 4\n dims_to_add = tuple(range(original_dims, new_dim + 1))\n data_with_singleton = np.expand_dims(nmrs[:], dims_to_add)\n\n # Create list of source indicies\n # Create list of destination indicies\n # Keep track of singleton tags\n source_indicies = []\n dest_indicies = []\n singleton_tags = {}\n counter = 0\n for idx, tag in enumerate(dim_tag_list):\n if tag is not None:\n if tag in nmrs.dim_tags:\n source_indicies.append(nmrs.dim_tags.index(tag) + 4)\n else:\n source_indicies.append(nmrs.ndim + counter)\n counter += 1\n singleton_tags.update({(idx + 5): tag})\n\n dest_indicies.append(idx + 4)\n\n # Sort header extension dim_tags\n dim_n = re.compile(r'^dim_[567]$')\n new_hdr_ext = nmrs.hdr_ext.copy()\n for key in nmrs.hdr_ext:\n if dim_n.match(key):\n # Look for matching _info/_header tags\n if (key + '_info') in nmrs.hdr_ext:\n tmp_info = nmrs.hdr_ext[key + '_info']\n else:\n tmp_info = None\n if (key + '_header') in nmrs.hdr_ext:\n tmp_header = nmrs.hdr_ext[key + '_header']\n else:\n tmp_header = None\n\n new_index = dest_indicies[source_indicies.index(int(key[4]) - 1)] + 1\n new_ind_str = f'{new_index}th'\n new_hdr_ext.set_dim_info(\n new_ind_str,\n nmrs.hdr_ext[key],\n info=tmp_info,\n hdr=tmp_header)\n\n # For any singleton dimensions we've added\n for dim in singleton_tags:\n new_hdr_ext.set_dim_info(f'{dim}th', singleton_tags[dim])\n\n new_header = utils.modify_hdr_ext(\n new_hdr_ext,\n nmrs.header)\n\n new_nmrs = NIFTI_MRS(\n np.moveaxis(data_with_singleton, source_indicies, dest_indicies),\n header=new_header)\n\n return new_nmrs", "title": "" }, { "docid": "8bae1a704416c7874f7adc2ecbcdb8b4", "score": "0.45344096", "text": "def _sort_equiv_features(features, equivlists):\n \n for idx, row in features.iterrows():\n rowdict = row.to_dict()\n for group in equivlists:\n vals = []\n labels = []\n for item in group:\n vals.append(rowdict[item])\n labels.append(item)\n vals.sort()\n vals.reverse()\n for idx2, label in enumerate(labels):\n rowdict[label] = vals[idx2]\n\n for idx2, cname in enumerate(row.index.tolist()):\n row[idx2] = rowdict[cname] \n\n #for group in equivlists:\n \n #res = list(row[group])\n #res.sort()\n #res.reverse()\n #row[group] = res\n pass\n return features", "title": "" }, { "docid": "41664a04d7e2e73f86b70e2c179a52f1", "score": "0.45262906", "text": "def sort(array, *, axis=0):\n ...", "title": "" }, { "docid": "f00bd918b87692d2f888d9066b5e5442", "score": "0.45133105", "text": "def _dynconjtranspose(self):\n return np.conj(self.dyn_spec).transpose()", "title": "" }, { "docid": "9a70fd44453c0699cc4ed527c5da2ac6", "score": "0.44957477", "text": "def lens_cov_pol(shape,wcs,iucov,alpha_pix,lens_order=5,kbeam=None,npixout=None,comm=None):\n from pixell import lensing as enlensing\n\n assert iucov.ndim==4\n ncomp = iucov.shape[0]\n assert ncomp==iucov.shape[1]\n assert 1 <= ncomp <= 3\n if len(shape)==2: shape = (1,)+shape\n n = shape[-2]\n assert n==shape[-1]\n\n ucov = iucov.copy()\n ucov = np.transpose(ucov,(0,2,1,3))\n ucov = ucov.reshape((ncomp*n**2,ncomp*n**2))\n\n npix = ncomp*n**2\n\n if comm is None:\n from orphics import mpi\n comm = mpi.MPI.COMM_WORLD\n\n def efunc(vec):\n unlensed = enmap.enmap(vec.reshape(shape),wcs)\n lensed = enlensing.displace_map(unlensed, alpha_pix, order=lens_order)\n if kbeam is not None: lensed = maps.filter_map(lensed,kbeam) # TODO: replace with convolution\n # because for ~(60x60) arrays, it is probably much faster. >1 threads means worse performance\n # with FFTs for these array sizes.\n return np.asarray(lensed).reshape(-1)\n\n \n Scov = np.zeros(ucov.shape,dtype=ucov.dtype)\n for i in range(comm.rank, npix, comm.size):\n Scov[i,:] = efunc(ucov[i,:])\n Scov2 = utils.allreduce(Scov, comm)\n\n Scov = np.zeros(ucov.shape,dtype=ucov.dtype)\n for i in range(comm.rank, npix, comm.size):\n Scov[:,i] = efunc(Scov2[:,i])\n Scov = utils.allreduce(Scov, comm)\n\n \n Scov = Scov.reshape((ncomp,n*n,ncomp,n*n))\n if (npixout is not None) and (npixout!=n):\n Scov = Scov.reshape((ncomp,n,n,ncomp,n,n))\n s = n//2-npixout//2\n e = s + npixout\n Scov = Scov[:,s:e,s:e,:,s:e,s:e].reshape((ncomp,npixout**2,ncomp,npixout**2)) \n Scov = np.transpose(Scov,(0,2,1,3))\n \n \n return Scov", "title": "" }, { "docid": "1a24c082e76cfaee4964ea05602b815a", "score": "0.4494325", "text": "def dimension_sort(odict, kdims, vdims, key_index):\n sortkws = {}\n ndims = len(kdims)\n dimensions = kdims+vdims\n indexes = [(dimensions[i], int(i not in range(ndims)),\n i if i in range(ndims) else i-ndims)\n for i in key_index]\n cached_values = {d.name: [None]+list(d.values) for d in dimensions}\n\n if len(set(key_index)) != len(key_index):\n raise ValueError(\"Cannot sort on duplicated dimensions\")\n else:\n sortkws['key'] = lambda x: tuple(cached_values[dim.name].index(x[t][d])\n if dim.values else x[t][d]\n for i, (dim, t, d) in enumerate(indexes))\n return python2sort(odict.items(), **sortkws)", "title": "" }, { "docid": "aecbe575a0a80c2753fbc7f58da9b8a6", "score": "0.44939438", "text": "def _sort_order_2d(points):\n fire_probe(probe_sort_order_2d)\n scale = points.max() + 1\n return np.argsort(points[0, :]*scale + points[1, :])", "title": "" }, { "docid": "c4e191054273e70d6421c48960d108b4", "score": "0.44926253", "text": "def _reorder_clusters(self):\n # get all clusters from the extractor queue\n clusters = []\n try:\n while True:\n clusters.append(self.extractor_queue.get_nowait())\n except Empty:\n pass\n # sort by S/N\n # parameters in each cluster are dm, snr, toa, downsamp, sb\n snrs = [cluster[1] for cluster in clusters]\n order = np.argsort(snrs)[::-1]\n # put each cluster back on the queue, highest S/N first\n for ind in order:\n cluster = clusters[ind]\n self.extractor_queue.put(cluster)", "title": "" }, { "docid": "30067b751f2f3518bfdb941760437637", "score": "0.44806778", "text": "def remesh(boxes):", "title": "" }, { "docid": "3cd1e781490aa9c6a7fe38d8763dc172", "score": "0.44405037", "text": "def custom_reorder_2q(mat):\n mask = [0, 4, 1, 5, 8, 12, 9, 13, 2, 6, 3, 7, 10, 14, 11, 15]\n return np.reshape(np.reshape(mat, np.size(mat))[mask], np.shape(mat))", "title": "" }, { "docid": "d67ca5694d07cf655accb59087e59b3e", "score": "0.44398516", "text": "def sort_color_grid(palette):\n palette = palette.sort_values(by=['ratio_width'], ascending=False)\n return palette", "title": "" }, { "docid": "9ad3a89c5d54b60b744593318f5fbf06", "score": "0.44378185", "text": "def _reshape_grid(self, grid, cols):\n if cols is None: return grid\n flattened = [view for row in grid for view in row if (view is not None)]\n row_num = int(math.ceil(len(flattened) / float(cols)))\n\n reshaped_grid = []\n for rind in range(row_num):\n new_row = flattened[rind*cols:cols*(rind+1)]\n reshaped_grid.append(new_row)\n\n return reshaped_grid", "title": "" }, { "docid": "6786b4a6c6ee54431e05e5fc141d4c41", "score": "0.44370818", "text": "def argsort(self, *args, **kwargs):\n ...", "title": "" }, { "docid": "6786b4a6c6ee54431e05e5fc141d4c41", "score": "0.44370818", "text": "def argsort(self, *args, **kwargs):\n ...", "title": "" }, { "docid": "629402221f6a0100c8af91996bc8964f", "score": "0.44274905", "text": "def sort_contours(cnts):\n # construct the list of bounding boxes and sort them from top to bottom\n bounding_boxes = [cv2.boundingRect(c) for c in cnts]\n (cnts, bounding_boxes) = zip(*sorted(zip(cnts, bounding_boxes), key=lambda b: b[1][0]))\n return (cnts, bounding_boxes)", "title": "" }, { "docid": "d938c6054ea346bb3465c3004106416b", "score": "0.44256645", "text": "def rearrange(rearrange_order, dim=0, *inputs):\n rearranged_inputs = []\n for input_tensor in inputs:\n assert (\n input_tensor.shape[dim] == rearrange_order.shape[0]\n ), \"Rearrange \" \"along dim {0} is incompatible!\".format(dim)\n rearranged_inputs.append(input_tensor.index_select(dim, rearrange_order))\n return tuple(rearranged_inputs)", "title": "" }, { "docid": "c24d61a3e9b39106c3382ded29bd4eae", "score": "0.44247687", "text": "def visnorm():", "title": "" }, { "docid": "17963095323f2118fec0afbecefd204f", "score": "0.44198647", "text": "def sort_rot(dat_set_o, dat_set_c, rot):\n N_o = len(dat_set_o[0])\n N_c = len(dat_set_c[0])\n\n fNum_o = np.zeros(N_o)\n fNum_c = np.zeros(N_c)\n \n for ii in range(N_o):\n fNum_o[ii] = int(dat_set_o[0]['Image'][ii].split('obj')[1][:3]) \n for ii in range(N_c):\n fNum_c[ii] = int(dat_set_c[0]['Image'][ii].split('obj')[1][:3]) \n\n if rot == 1:\n ind_rot_o = np.where((fNum_o>=16)&(fNum_o<=48))\n ind_rot_c = np.where((fNum_c>=16)&(fNum_c<=48))\n if rot == 2:\n ind_rot_o = np.where((fNum_o>=49)&(fNum_o<=74))\n ind_rot_c = np.where((fNum_c>=49)&(fNum_c<=74))\n if rot == 3:\n ind_rot_o = np.where((fNum_o>=75)&(fNum_o<=80))\n ind_rot_c = np.where((fNum_c>=75)&(fNum_c<=80))\n if rot == 4:\n ind_rot_o = np.where((fNum_o>=81)&(fNum_o<=98)) \n ind_rot_c = np.where((fNum_c>=81)&(fNum_c<=98))\n\n dat_o_B_rot = dat_set_o[0][ind_rot_o]\n dat_o_V_rot = dat_set_o[1][ind_rot_o]\n dat_o_R_rot = dat_set_o[2][ind_rot_o]\n dat_o_I_rot = dat_set_o[3][ind_rot_o]\n\n dat_c_B_rot = dat_set_c[0][ind_rot_c]\n dat_c_V_rot = dat_set_c[1][ind_rot_c]\n dat_c_R_rot = dat_set_c[2][ind_rot_c]\n dat_c_I_rot = dat_set_c[3][ind_rot_c]\n\n dat_set_o_rot = [dat_o_B_rot, dat_o_V_rot, dat_o_R_rot, dat_o_I_rot]\n dat_set_c_rot = [dat_c_B_rot, dat_c_V_rot, dat_c_R_rot, dat_c_I_rot]\n\n return dat_set_o_rot, dat_set_c_rot", "title": "" }, { "docid": "9659c1ba9ab212034f039a38554cfafc", "score": "0.44197693", "text": "def reshape(self, *args, **kwargs):\n ...", "title": "" }, { "docid": "676d3914a49f92f1cef568a7fcbcbad5", "score": "0.44163305", "text": "def _reorder_cellpointdata_vtk(resultarr, offsets, types):\n _reorder_data(resultarr, offsets, types, sesam2vtk_elemresults)", "title": "" }, { "docid": "c6dfd5394c90a490c4ce3b56736207f8", "score": "0.44133598", "text": "def order(self):\n if isinstance(self._order, np.ndarray):\n return self._order\n else:\n self._order = np.fromiter(itertools.chain(*self.clusters), int)\n return self._order", "title": "" }, { "docid": "1072a4880aac74ef52c5b6775bb3d0be", "score": "0.4403015", "text": "def order_protein_domains(data, domain_composition):\n\n n_dims = len(data.shape)\n n_domains = len(domain_composition)\n n_resids = data.shape[0]\n print('# Residues per domain: '+str(n_resids/n_domains))\n n_resids_per_domain = int(n_resids/n_domains)\n\n swap_inds = np.zeros(n_domains)\n shuffled_data = np.zeros(data.shape)\n\n # Find the whereabouts of the domains so\n # that we can put the domain data at correct position in the matrix.\n for i_domain in range(n_domains):\n swap_inds[i_domain] = np.where(domain_composition == i_domain)[0]\n\n # Flip the matrix rows\n for i_domain in range(n_domains):\n tmp_domain_data = data[i_domain*n_resids_per_domain:(i_domain+1)*n_resids_per_domain]\n domain_ind = int(swap_inds[i_domain])\n\n shuffled_data[domain_ind*n_resids_per_domain:(domain_ind+1)*n_resids_per_domain] = np.copy(tmp_domain_data)\n\n if n_dims > 1:\n data = np.copy(shuffled_data)\n # Flip the matrix columns\n for i_domain in range(n_domains):\n tmp_domain_data = data[:, i_domain*n_resids_per_domain:(i_domain+1)*n_resids_per_domain]\n domain_ind = int(swap_inds[i_domain])\n\n shuffled_data[:, domain_ind*n_resids_per_domain:(domain_ind+1)*n_resids_per_domain] = np.copy(tmp_domain_data)\n\n return shuffled_data", "title": "" }, { "docid": "8b7d8557e628ec87f62303aca2016b0a", "score": "0.4398417", "text": "def pd_num_correl_associations(\n df, colcat=None, mark_columns=False, theil_u=False, plot=True, return_results=False, **kwargs\n):\n # df = convert(df, \"dataframe\")\n col = df.columns\n if colcat is None:\n colcat = list()\n elif colcat == \"all\":\n colcat = col\n corr = pd.DataFrame(index=col, columns=col)\n for i in range(0, len(col)):\n for j in range(i, len(col)):\n if i == j:\n corr[col[i]][col[j]] = 1.0\n else:\n if col[i] in colcat:\n if col[j] in colcat:\n if theil_u:\n corr[col[j]][col[i]] = np_correl_cat_cat_theils_u(\n df[col[i]], df[col[j]]\n )\n corr[col[i]][col[j]] = np_correl_cat_cat_theils_u(\n df[col[j]], df[col[i]]\n )\n else:\n cell = np_correl_cat_cat_cramers_v(df[col[i]], df[col[j]])\n corr[col[i]][col[j]] = cell\n corr[col[j]][col[i]] = cell\n else:\n cell = np_correl_cat_num_ratio(df[col[i]], df[col[j]])\n corr[col[i]][col[j]] = cell\n corr[col[j]][col[i]] = cell\n else:\n if col[j] in colcat:\n cell = np_correl_cat_num_ratio(df[col[j]], df[col[i]])\n corr[col[i]][col[j]] = cell\n corr[col[j]][col[i]] = cell\n else:\n cell, _ = sci.stats.pearsonr(df[col[i]], df[col[j]])\n corr[col[i]][col[j]] = cell\n corr[col[j]][col[i]] = cell\n corr.fillna(value=np.nan, inplace=True)\n if mark_columns:\n marked_columns = [\n \"{} (nom)\".format(col) if col in colcat else \"{} (con)\".format(col) for col in col\n ]\n corr.columns = marked_columns\n corr.index = marked_columns\n if plot:\n pass\n \"\"\"\n plt.figure(figsize=kwargs.get('figsize',None))\n sns.heatmap(corr, annot=kwargs.get('annot',True), fmt=kwargs.get('fmt','.2f'))\n plt.show()\n \"\"\"\n if return_results:\n return corr", "title": "" }, { "docid": "68e6c40034d64edee19b7ed5157991a3", "score": "0.43971658", "text": "def distance_scipy_spatial_corr(z, k=4, metric='correlation'):\n d = scipy.spatial.distance.pdist(z, metric)\n d = scipy.spatial.distance.squareform(d)\n idx = np.argsort(-d)[:, :k]\n d = -np.sort(-d)\n d = d[:, :k]\n\n return d, idx", "title": "" }, { "docid": "43b32c08ef4a2d79d9fdea95ea4139e3", "score": "0.43957835", "text": "def test_col_cor_(self):\n for su in [True, False]:\n pca_col_cor = \"pca_col_cor_scale_unit_true.txt\" if su \\\n else \"pca_col_cor_scale_unit_false.txt\"\n self._X_Y_comparison(\"col_cor_\", pca_col_cor, std_unit = su,\n n_components = None)\n for i in np.arange(-10, 10, 0.5):\n self._X_Y_comparison(\"col_cor_\", pca_col_cor, std_unit = su,\n n_components = i)", "title": "" }, { "docid": "5101fd8b23d8597bec615b4eb48a6df3", "score": "0.43953228", "text": "def __call__(self, cols=None):\n # Plots are sorted first by precedence, then grouped by row_precedence\n values = sorted(self.values(),\n key=lambda x: x.metadata.get('precedence', 0.5))\n precedences = sorted(\n set(v.metadata.get('row_precedence', 0.5) for v in values))\n\n coords=[]\n # Can use collections.Counter in Python >= 2.7\n column_counter = dict((i, 0) for i, _ in enumerate(precedences))\n for view in values:\n # Find the row number based on the row_precedences\n row = precedences.index(view.metadata.get('row_precedence', 0.5))\n # Look up the current column position of the row\n col = column_counter[row]\n # The next view on this row will have to be in the next column\n column_counter[row] += 1\n coords.append((row, col, view))\n\n grid = self._reshape_grid(self._grid(coords), cols)\n self._data = map_type(self._grid_to_items(grid))\n return self", "title": "" }, { "docid": "a871f1e72bc35f049b415eb3e51165a6", "score": "0.43880677", "text": "def group_data(cv_result):\n num_methods = 3\n groups = [[], [], []]\n for _, res in cv_result.items():\n tr = np.array(res).T.tolist()\n for ind in range(num_methods):\n groups[ind].append(tr[ind])\n return groups", "title": "" }, { "docid": "fbbcbe80c751912d1bc38db0d6384ddc", "score": "0.43877548", "text": "def test_correlations_2(self):\n smart_explainer = self.smart_explainer\n\n df = pd.DataFrame({\n \"A\": [8, 90, 10, 110],\n \"B\": [4.3, 7.4, 10.2, 15.7],\n \"C\": [\"C8\", \"C8\", \"C9\", \"C9\"],\n \"D\": [1, -3, -5, -10]\n }, index=[8, 9, 10, 11])\n\n output = smart_explainer.plot.correlations(df, max_features=3, facet_col='C')\n\n assert len(output.data) == 2\n assert len(output.data[0].x) == 3\n assert len(output.data[0].y) == 3\n assert output.data[0].z.shape == (3, 3)", "title": "" }, { "docid": "7d369e56fcab924baeb5836e1b3bab60", "score": "0.43687785", "text": "def parallel_sort(proz, divided_annotations_array, image_ids_condensed_sorted):\n # needed vars\n len_divided_annotations_array = len(divided_annotations_array)\n\n # needed main arrays\n divided_annotations_sorted = []\n divided_annotations_chunk_sorted = []\n chunk_sorted = []\n divided_annotations_array_chunks_sorted = []\n border_table = []\n divided_annotations_array_chunks = [None] * (int(len_divided_annotations_array / PICTURE_CAP) + 1)\n if proz == 31: print((int(len_divided_annotations_array / PICTURE_CAP) + 1))\n border_table_chunk = []\n border_table_chunks_array = []\n\n # needed vars\n len_divided_annotations_array_chunks = len(divided_annotations_array_chunks)\n\n # dividing \"divided_annotations_array\" further into chunks of size PICTURE_CAP\n if proz == 31: print(\"dividing divided_annotations_array further into chunks of size PICTURE_CAP\")\n count = 0\n for i in range(len_divided_annotations_array_chunks):\n start = i * PICTURE_CAP\n stop = start + PICTURE_CAP\n if stop > len_divided_annotations_array:\n stop = len_divided_annotations_array\n divided_annotations_array_chunks[i] = divided_annotations_array[start:stop]\n\n # Sorting every chunk\n if proz == 31: print(\"Sorting every chunk\")\n count = 0\n for chunk in divided_annotations_array_chunks:\n if proz == 31:\n count += 1\n print(count)\n end_image_id_pos = 0\n for image_id in image_ids_condensed_sorted:\n start_image_id_pos = end_image_id_pos\n annotations_for_picture = [a for a in chunk if a[\"image_id\"] == image_id]\n len_annotations_for_picture = len(annotations_for_picture)\n end_image_id_pos += len_annotations_for_picture\n chunk_sorted += annotations_for_picture\n border_table_chunk_entry = {\"image_id\": image_id, \"start_image_id_pos\": start_image_id_pos,\n \"end_image_id_pos\": end_image_id_pos}\n # border_table_chunk_entry = {\"image_id\": image_id, \"start_image_id_pos\": -1, \"end_image_id_pos\": -1}\n # if proz == 31: print(border_table_chunk_entry)\n border_table_chunk.append(border_table_chunk_entry)\n divided_annotations_array_chunks_sorted.append(chunk_sorted)\n chunk_sorted = []\n border_table_chunks_array.append(border_table_chunk)\n border_table_chunk = []\n\n\n # Zipping every chunk to one sorted array\n if proz == 31: print(\"Zipping every chunk to one sorted array\")\n end_of_image_id = 0\n for j in range(len(image_ids_condensed_sorted)):\n image_id_piece_of_all_chunks = []\n start_of_image_id = end_of_image_id\n count = 0\n for b_t in border_table_chunks_array:\n start_image_id_pos_in_chunk = b_t[j][\"start_image_id_pos\"]\n end_image_id_pos_in_chunk = b_t[j][\"end_image_id_pos\"]\n assert b_t[j][\"image_id\"] == image_ids_condensed_sorted[j]\n # if proz == 31:\n # print(start_image_id_pos_in_chunk, end_image_id_pos_in_chunk)\n # if end_image_id_pos_in_chunk > -1:\n length = (end_image_id_pos_in_chunk - start_image_id_pos_in_chunk)\n end_of_image_id += length\n image_id_piece_array_in_chunk = divided_annotations_array_chunks_sorted[count][start_image_id_pos_in_chunk:\n end_image_id_pos_in_chunk]\n\n image_id_piece_of_all_chunks += image_id_piece_array_in_chunk\n image_id_piece_array_in_chunk = []\n count += 1\n border_table_entry = {\"image_id\": image_ids_condensed_sorted[j], \"start_image_id_pos\": start_of_image_id,\n \"end_image_id_pos\": end_of_image_id}\n border_table.append(border_table_entry)\n\n divided_annotations_sorted += image_id_piece_of_all_chunks\n\n return_element = {\"divided_annotations_sorted\": divided_annotations_sorted, \"border_table\": border_table}\n\n return return_element", "title": "" }, { "docid": "c9e797d73a92e526cbd47f6bc682ff02", "score": "0.4366225", "text": "def _sort_clusters(\n clusters: Object1DArray, mat: FloatMatrix,\n) -> Object1DArray:\n # sort the order of the cluster\n clusters_permuted = clusters[\n _sort_coarse_clustermatrix(\n clusters, _coarse_clustermatrix(clusters, mat),\n )\n ]\n\n # sort inside each cluster\n for cluster_idx, cluster in enumerate(clusters_permuted):\n clusters_permuted[cluster_idx] = np.array(cluster)[\n np.argsort(\n np.nanmean(mat[np.ix_(cluster, cluster)], axis=1),\n )[::-1]\n ].tolist()\n return clusters_permuted", "title": "" }, { "docid": "cd8cb83c94036e49012fffeb3e23a2e2", "score": "0.43616974", "text": "def reorder_pixels_gncrsfn(in_data, n_adc, n_col_in_row_blk):\n\n (n_grp, n_pads, _, n_gncrsfn) = in_data.shape\n adc_cols = get_adc_col_array()\n col_grp = get_col_grp()\n\n output_shape = (n_grp, n_pads, n_adc, n_col_in_row_blk, n_gncrsfn)\n out_data = np.zeros(output_shape).astype('uint8')\n pix_sorted = np.zeros(output_shape).astype('uint8')\n\n # pixel reorder inside each block\n for i in range(n_adc * n_col_in_row_blk):\n (ord_adc, ord_col) = adc_cols[i]\n pix_sorted[:, :, ord_adc, ord_col, :] = in_data[:, :, i, :]\n\n # ColGrp order from chipscope to P2M\n for i in range(n_pads):\n out_data[:, i, :, :, :] = pix_sorted[:, col_grp[i], :, :, :]\n\n return out_data", "title": "" }, { "docid": "37192c359e93f70833b7c7d144086866", "score": "0.43612826", "text": "def _plot_fourth_row_classification(self, best_results_dict):\n for g_i, g in enumerate(self.genera):\n for m_i, m in enumerate(['unifrac', 'braycurtis']):\n for i_i, i in enumerate(['original_three']):\n if m == 'unifrac':\n norm_abund = 1000\n else:\n norm_abund = 10000\n # The kmeans calculations takes automatically takes up multiple cores and \n # can't be limited. The following num_procs seem to work quite well.\n if i == 'original_three':\n num_proc = 50\n else:\n num_proc = 50\n # If unifrac, we need to incorporate the island_list string \n # diretly when looking for the distance matrix.\n # If braycurtis or jaccard then we will work with a single base \n # distance matrix and remove samples according to which\n # island list we will be working with.\n # If PCA, then we will not be looking for a dist matrix and rather will be looking direclty for a pcoa\n # We will plot the genera per row, and then the dist method and islands on the same row\n col_index = (m_i*2) + i_i\n contour = self._plot_countour_classification(ax=self.contour_ax[g_i, col_index], genus=g, normalisation_abundance=norm_abund, normalisation_method='rai', distance_method=m, snp_only=False, island_list=i, num_proc=num_proc, best_results_dict=best_results_dict)\n if contour:\n self.contour_ax[g_i,col_index].set_title(f'{g}_{m}\\n{i}', fontsize='x-small')\n cbar = self.contour_fig.colorbar(contour, ax=self.contour_ax[g_i,col_index])\n cbar.ax.set_ylabel(\"Agreement\")\n else:\n self.contour_ax[g_i,col_index].set_title(f'no data', fontsize='x-small')", "title": "" }, { "docid": "045fd5435c93aa014b3a62ea576baf09", "score": "0.43604773", "text": "def sort(self, Ncol, order):\n try:\n self.layoutAboutToBeChanged.emit()\n self._data = self._data.sort_values(self._data.columns[Ncol], ascending=not order)\n self.layoutChanged.emit()\n except Exception as e:\n print(e)", "title": "" }, { "docid": "6e1d766727d872443530e3643f43c2c8", "score": "0.43528852", "text": "def reduceDim(data, dim, func='pca'):\r\n try:\r\n pcaFunc = globals()[func]\r\n except KeyError:\r\n raise ValueError('Unknown function to calc principal components')\r\n pc = pcaFunc(data, dim)\r\n return (pc * asmatrix(makeCentered(data)).T).T", "title": "" }, { "docid": "631a5e5c855183c56a7fe386b2ee8c54", "score": "0.43524566", "text": "def argsort(self, axis=0, kind='quicksort', order=None):\n values = self.values\n mask = isnull(values)\n\n if mask.any():\n result = values.copy()\n notmask = -mask\n result[notmask] = np.argsort(values[notmask])\n return Series(result, index=self.index)\n else:\n return Series(np.argsort(values), index=self.index)", "title": "" }, { "docid": "654288becf822dad65171699b1f9edaf", "score": "0.43507448", "text": "def reorder_step_channels(self):\r\n step_channels = step_channels=self.getStepChannels()\r\n\r\n order = []\r\n rest = []\r\n for ii, step_channel in enumerate(step_channels):\r\n nbr_points = 0\r\n # Approximate nbr_points\r\n for range_item in step_channel['step_items']:\r\n if range_item['range_type'] == 'Single':\r\n nbr_points += 1\r\n if range_item['range_type'] == 'Start - Stop':\r\n nbr_points += 2\r\n if range_item['range_type'] == 'Center - Span':\r\n nbr_points += 2\r\n if nbr_points > 1:\r\n order.append(ii)\r\n else:\r\n rest.append(ii)\r\n\r\n order.extend(rest)\r\n\r\n reordered_step_channels = [step_channels[ii] for ii in order]\r\n self.scenario['step_channels'] = reordered_step_channels", "title": "" }, { "docid": "fc7e6b93eaa8ca36e9f8837df9cd20d9", "score": "0.4349101", "text": "def layout_tranform(self):\n \n \n custom2xir =GLOBAL_MAP.get_ele(NNDCT_KEYS.CUSTOM_TO_XIR_LIST) \n if custom2xir is None:\n custom2xir = []\n \n def _find_swim_order(ndim):\n return {\n 2: [0, 1],\n 3: [0, 2, 1],\n 4: [0, 2, 3, 1],\n 5: [0, 3, 4, 2, 1]\n }[ndim]\n \n def _find_sink_order(ndim):\n return {\n 2: [0, 1],\n 3: [0, 2, 1],\n 4: [0, 3, 1, 2],\n 5: [0, 4, 3, 1, 2]\n }[ndim]\n \n def _is_dim_transparent(node):\n return node.in_tensors[0].ndim and node.out_tensors[0].ndim and node.in_tensors[0].ndim == node.out_tensors[0].ndim\n \n def _is_shape_transparent(node):\n return node.in_tensors[0].shape and node.out_tensors[0].shape and node.in_tensors[0].shape == node.out_tensors[0].shape\n \n def _have_special_layout(node):\n return node.out_tensors[0].ndim and node.out_tensors[0].ndim >=3\n \n def _is_custom_op(node):\n return isinstance(node.op, base_op.CustomOp) and node.op.type not in custom2xir\n \n def _is_permute_op(node):\n return isinstance(node.op, base_op.Permute)\n \n implicit_ops = [NNDCT_OP.CONV2D, \n NNDCT_OP.DEPTHWISE_CONV2D, \n NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D,\n NNDCT_OP.CONVTRANSPOSE2D,\n NNDCT_OP.MAX_POOL,\n NNDCT_OP.AVG_POOL,\n NNDCT_OP.ADAPTIVEAVGPOOL2D,\n NNDCT_OP.INTERPOLATE,\n NNDCT_OP.UP_SAMPLING,\n NNDCT_OP.RESIZE,\n NNDCT_OP.BATCH_NORM,\n NNDCT_OP.MAX_POOL1D,\n NNDCT_OP.CONV1D,\n NNDCT_OP.BATCH_NORM1D,\n NNDCT_OP.CONV3D,\n NNDCT_OP.DEPTHWISE_CONV3D,\n NNDCT_OP.DEPTHWISE_CONVTRANSPOSE3D,\n NNDCT_OP.CONVTRANSPOSE3D,\n NNDCT_OP.BATCH_NORM3D,\n NNDCT_OP.PIXEL_SHUFFLE,\n NNDCT_OP.PIXEL_UNSHUFFLE,\n NNDCT_OP.RESIZE_3D,\n NNDCT_OP.RESIZE_NEAREST_3D,\n NNDCT_OP.REORG]\n \n special_ops_fn = {\n NNDCT_OP.RESHAPE: shape_attr_transform_fn,\n NNDCT_OP.CONCAT: axis_attr_transform_fn,\n NNDCT_OP.STRIDED_SLICE: slice_attr_transform_fn,\n NNDCT_OP.SUM: reduce_op_attr_transform_fn,\n NNDCT_OP.MAX: reduce_op_attr_transform_fn,\n NNDCT_OP.MEAN: reduce_op_attr_transform_fn,\n NNDCT_OP.SHAPE: axis_attr_transform_fn,\n NNDCT_OP.SOFTMAX: axis_attr_transform_fn,\n NNDCT_OP.ZEROS: shape_attr_transform_fn,\n } \n \n \n # collect insert point for transpose\n insert_pos = []\n for node in self._dev_graph.nodes:\n if node.op.type in implicit_ops:\n insert_pos.append(node)\n\n swim_transpose = defaultdict(list)\n sink_transpose = defaultdict(list)\n \n for node in insert_pos:\n tranpose_order = tuple(_find_swim_order(node.out_tensors[0].ndim))\n swim_transpose[tranpose_order].append(node)\n tranpose_order = tuple(_find_sink_order(node.out_tensors[0].ndim))\n sink_transpose[tranpose_order].append(node)\n \n \n nodes_need_to_remove = []\n transpose_insert_between_swim = defaultdict(list)\n visited = []\n # swim_transpose_order, nodes = next(iter(swim_transpose.items()))\n for swim_transpose_order, nodes in swim_transpose.items():\n for insert_node in nodes:\n q = deque()\n q.append(insert_node)\n visited.append(insert_node)\n insert_node.transpose_order = swim_transpose_order\n while len(q) > 0:\n node = q.popleft()\n for pn in self._dev_graph.parents(node):\n if pn not in visited:\n \n if not _have_special_layout(pn) or pn.op.type in implicit_ops:\n continue\n \n elif pn.op.type in [NNDCT_OP.INPUT, NNDCT_OP.QUANT_STUB, NNDCT_OP.CONST, NNDCT_OP.ZEROS] or _is_dim_transparent(pn) and (not _is_permute_op(pn)) and (not _is_custom_op(pn)):\n pn.transpose_order = node.transpose_order\n if pn.op.type in special_ops_fn:\n special_ops_fn[pn.op.type](pn, pn.transpose_order)\n q.append(pn) \n visited.append(pn)\n \n else:\n # pn.transpose_order = [0, 2, 3, 1]\n transpose_insert_between_swim[swim_transpose_order].append((pn, node))\n \n \n index = 0\n for transpose_order, node_pairs in transpose_insert_between_swim.items():\n for pn, cn in node_pairs:\n node_name = \"::\".join([self._dev_graph.name, \"_\".join([pn.name, \"swim_tranpose\", f\"{index}\"])])\n op = base_op.Permute(NNDCT_OP.PERMUTE)\n new_node = Node(node_name, op=op, dtype=pn.dtype, in_quant_part=pn.in_quant_part)\n tensor = Tensor(name=node_name, node=new_node)\n new_node.out_tensors.append(tensor) \n new_node.set_node_attr(new_node.op.AttrName.ORDER, list(transpose_order))\n \n new_node.in_tensors.append(pn.out_tensors[0])\n for i, in_tensor in enumerate(cn.in_tensors):\n if in_tensor is pn.out_tensors[0]:\n cn.in_tensors[i] = new_node.out_tensors[0]\n \n self._dev_graph.add_node(new_node)\n nodes_need_to_remove.append(new_node)\n index += 1\n \n \n if transpose_insert_between_swim:\n self._dev_graph.reconnect_nodes()\n \n # debug\n # print(\"#####swim######\")\n # for node in self._dev_graph.nodes:\n # print(node.op.type, node.name, node.transpose_order)\n \n transpose_insert_between_sink = defaultdict(list)\n visited = []\n for node in self._dev_graph.nodes:\n if node.transpose_order:\n nodes = sink_transpose[tuple(_find_sink_order(len(node.transpose_order)))]\n if node not in nodes:\n nodes.append(node)\n \n for sink_transpose_order, nodes in sink_transpose.items():\n for insert_node in nodes:\n q = deque()\n q.append(insert_node)\n visited.append(insert_node)\n while len(q) > 0:\n node = q.popleft()\n for cn in self._dev_graph.children(node):\n if cn not in visited:\n if cn.op.type in implicit_ops:\n continue\n elif cn.op.type == NNDCT_OP.SHAPE:\n visited.append(cn)\n if node.transpose_order:\n special_ops_fn[cn.op.type](cn, node.transpose_order)\n continue\n elif cn.transpose_order:\n q.append(cn)\n visited.append(cn)\n elif _is_dim_transparent(cn) and (not _is_permute_op(cn)) and (not _is_custom_op(cn)):\n cn.transpose_order = node.transpose_order\n q.append(cn)\n visited.append(cn)\n if cn.op.type in special_ops_fn:\n special_ops_fn[cn.op.type](cn, cn.transpose_order)\n else:\n transpose_insert_between_sink[sink_transpose_order].append((node, cn))\n \n \n \n index = 0\n for transpose_order, node_pairs in transpose_insert_between_sink.items():\n for pn, cn in node_pairs:\n node_name = \"::\".join([self._dev_graph.name, \"_\".join([pn.name, \"sink_tranpose\", f\"{index}\"])])\n op = base_op.Permute(NNDCT_OP.PERMUTE)\n new_node = Node(node_name, op=op, dtype=pn.dtype, in_quant_part=cn.in_quant_part)\n tensor = Tensor(name=node_name, node=new_node, shape=pn.out_tensors[0].shape)\n new_node.out_tensors.append(tensor) \n new_node.set_node_attr(new_node.op.AttrName.ORDER, list(transpose_order))\n \n new_node.in_tensors.append(pn.out_tensors[0])\n for i, in_tensor in enumerate(cn.in_tensors):\n if in_tensor is pn.out_tensors[0]:\n cn.in_tensors[i] = new_node.out_tensors[0]\n \n self._dev_graph.add_node(new_node)\n nodes_need_to_remove.append(new_node)\n index += 1\n \n \n if transpose_insert_between_sink: \n self._dev_graph.reconnect_nodes()\n \n # debug\n # print(\"#####sink######\")\n # for node in self._dev_graph.nodes:\n # print(node.op.type, node.name, node.transpose_order)\n neighbor_broadcast = {}\n for node in self._dev_graph.nodes:\n if len(node.in_nodes) <= 1 or node in implicit_ops:\n continue\n if all([node.transpose_order is None for node in self._dev_graph.parents(node)]) or all([node.transpose_order is not None for node in self._dev_graph.parents(node)]):\n continue\n #if node.out_tensors[0].dtype != \"float32\":\n # continue\n transpose_order = None\n for pn in self._dev_graph.parents(node):\n transpose_order = pn.transpose_order\n if transpose_order is not None:\n break\n \n neighbor_broadcast[node] = transpose_order\n\n have_neighbors = False\n for node, transpose_order in neighbor_broadcast.items():\n index = 0\n for pn in self._dev_graph.parents(node):\n if pn.transpose_order is None and pn.out_tensors[0].ndim and node.out_tensors[0].ndim and pn.out_tensors[0].ndim == node.out_tensors[0].ndim:\n # pn.transpose_order = node.transpose_order\n node_name = \"::\".join([self._dev_graph.name, \"_\".join([node.name, \"neighbor_transpose\", f\"{index}\"])])\n op = base_op.Permute(NNDCT_OP.PERMUTE)\n new_node = Node(node_name, op=op, dtype=node.dtype, in_quant_part=pn.in_quant_part)\n tensor = Tensor(name=node_name, node=new_node)\n new_node.out_tensors.append(tensor) \n new_node.set_node_attr(new_node.op.AttrName.ORDER, list(transpose_order))\n new_node.in_tensors.append(pn.out_tensors[0])\n \n for i, in_tensor in enumerate(node.in_tensors):\n if in_tensor is pn.out_tensors[0]:\n node.in_tensors[i] = new_node.out_tensors[0]\n \n index += 1\n self._dev_graph.add_node(new_node)\n nodes_need_to_remove.append(new_node)\n have_neighbors = True\n \n if have_neighbors:\n self._dev_graph.reconnect_nodes()\n \n # Debug\n # print(\"####neightbor######\")\n # for node in self._dev_graph.nodes:\n # print(node.op.type, node.name, node.transpose_order) \n # remove consecutive transpose\n \n def merge_father_and_child(node, visited, transpose_group, reserverd_nodes):\n visited.append(node)\n if _is_permute_op(node):\n if node.out_nodes and all([_is_permute_op(cn) for cn in self._dev_graph.children(node)]):\n transpose_group.append(node)\n else:\n transpose_group.append(node)\n \n order = []\n reserved_trans = None\n for trans in transpose_group:\n if trans not in nodes_need_to_remove:\n reserved_trans = trans\n \n if not order:\n order = trans.node_attr(trans.op.AttrName.ORDER)\n else:\n new_order = len(order) * [None]\n tmp_order = trans.node_attr(trans.op.AttrName.ORDER)\n for i in range(len(order)):\n t_i = order.index(i)\n new_order[i] = tmp_order.index(t_i)\n order = new_order \n \n if reserved_trans is None:\n reserved_trans = transpose_group[-1]\n \n reserved_trans.set_node_attr(reserved_trans.op.AttrName.ORDER, order)\n reserverd_nodes.append(reserved_trans)\n \n transpose_group.clear()\n\n for cn in self._dev_graph.children(node):\n if cn not in visited:\n merge_father_and_child(cn, visited, transpose_group, reserverd_nodes)\n \n def merge_brothers(reserverd_nodes):\n remove_nodes = []\n for node in self._dev_graph.nodes:\n if len(node.out_nodes) > 1 and all([_is_permute_op(cn) for cn in self._dev_graph.children(node)]):\n need_merge = True\n order = None\n for trans_node in self._dev_graph.children(node):\n if order is not None:\n if order != trans_node.node_attr(trans_node.op.AttrName.ORDER):\n need_merge = False\n break\n else:\n order = trans_node.node_attr(trans_node.op.AttrName.ORDER)\n \n if need_merge:\n reserverd_node = None\n for trans_node in self._dev_graph.children(node):\n if trans_node not in nodes_need_to_remove:\n reserverd_node = trans_node\n \n if reserverd_node is None:\n reserverd_node = self._dev_graph.children(node)[0]\n \n for trans_node in self._dev_graph.children(node):\n if trans_node is not reserverd_node and trans_node in reserverd_nodes:\n remove_nodes.append(trans_node)\n \n for cn in self._dev_graph.children(trans_node):\n index = cn.in_tensors.index(trans_node.out_tensors[0])\n cn.in_tensors[index] = reserverd_node.out_tensors[0] \n \n for node in remove_nodes:\n self._dev_graph.remove_node_forcely(node)\n \n if remove_nodes:\n self._dev_graph.reconnect_nodes() \n \n source_nodes = []\n for node in self._dev_graph.nodes:\n if not node.in_tensors:\n source_nodes.append(node)\n \n transpose_group = []\n reserverd_nodes = []\n visited = []\n for source in source_nodes:\n merge_father_and_child(source, visited, transpose_group, reserverd_nodes)\n \n nodes_need_to_remove = [node for node in nodes_need_to_remove if node not in reserverd_nodes]\n \n for node in reserverd_nodes:\n order = node.node_attr(node.op.AttrName.ORDER)\n keep_order = True\n if any([index != dim for index, dim in enumerate(order)]):\n keep_order = False\n if keep_order:\n nodes_need_to_remove.append(node)\n \n for node in nodes_need_to_remove:\n self._dev_graph.remove_node(node)\n \n merge_brothers(reserverd_nodes)\n # debug\n # print(\"#####finalize######\") \n # for node in self._dev_graph.nodes:\n # print(node.op.type, node.name, node.transpose_order)", "title": "" }, { "docid": "e93c6175dcbd4e290f8c6ba317ceb423", "score": "0.434843", "text": "def general_axis(data, order=0):\r\n direction_tensor = np.cov(data.T[:3, :])\r\n # print direction_tensor\r\n eigen_values, eigen_vectors = np.linalg.eigh(direction_tensor, UPLO='U')\r\n eigen_values_order = eigen_values.argsort()[::-1]\r\n cone_axis = eigen_vectors[:,eigen_values_order[order]]\r\n return cone_axis/np.linalg.norm(cone_axis)", "title": "" }, { "docid": "55b9b79892377577832a4efe0a2b7bee", "score": "0.43476322", "text": "def cum_dist_rev(residuals, errors): ###Function to plot a mirror image of the cumulative distribution to ensure symmetry - I don't really use this function\n\t\n\tall_res, all_err = res_cat(residuals, errors) #Concatenate errors and residuals\n\tnum_res = len(all_res) \n\ty_ax = np.linspace(0, 1, num_res)\n\tnormalized_res = all_res/all_err\n\trev_norm_res = normalized_res[::-1] #Reverse normalized residuals\n\tcdist = np.sort(rev_norm_res)\n\treturn y_ax, cdist", "title": "" }, { "docid": "3ca2028af55d8910f82e1831805e385b", "score": "0.43466708", "text": "def nsga_sort(objVals, returnFronts=False):\n fronts = getFronts(objVals)\n\n # Rank each individual in each front by crowding distance\n for f in range(len(fronts)):\n x1 = objVals[fronts[f],0]\n x2 = objVals[fronts[f],1] \n crowdDist = getCrowdingDist(x1) + getCrowdingDist(x2)\n frontRank = np.argsort(-crowdDist)\n fronts[f] = [fronts[f][i] for i in frontRank]\n \n # Convert to ranking\n tmp = [ind for front in fronts for ind in front] \n rank = np.empty_like(tmp)\n rank[tmp] = np.arange(len(tmp))\n\n if returnFronts is True:\n return rank, fronts\n else:\n return rank", "title": "" }, { "docid": "908293e9deb4b9a19516d89635ea88ae", "score": "0.4345683", "text": "def _reshape(tmp):\n tmp = numpy.c_[tmp[:, 0:1], tmp, tmp[:, -1:]]\n tmp = numpy.r_[tmp[0:1, :], tmp, tmp[-1:, :]]\n return tmp", "title": "" }, { "docid": "7877b7ea5cb2272565c938c459f02a05", "score": "0.4345631", "text": "def sortAxes(self, rows, itemGetter, x_index, y_index):\n\n rows = sorted(rows, key=itemGetter)\n xaxis = []\n yaxis = []\n for r in rows:\n xaxis.append(r[x_index])\n yaxis.append(r[y_index])\n\n return {'xaxis': xaxis, 'yaxis': yaxis}", "title": "" }, { "docid": "ef87531b20295c9d0da4b061892230e0", "score": "0.4345455", "text": "def make_cs_prekspace(shape_cs, preshape_cs, swapindex):\n\n p = self.p\n\n if p['apptype'] in ['im2D']:\n\n pass\n\n elif p['apptype'] in ['im3D']:\n\n kspace = np.vectorize(complex)(self.data[:,0::2], self.data[:,1::2])\n kspace = np.reshape(kspace, preshape_cs, order='F')\n kspace = np.reshape(kspace, shape_cs, order='C')\n print('kspace shape: {}'.format(kspace.shape))\n kspace = np.moveaxis(kspace, swapindex, [0,1,2,3,4])\n #kspace = np.moveaxis(kspace, [0,4,1,2,3], [0,1,2,3,4])\n\n return kspace\n\n elif p['apptype'] in ['im2Depi']:\n\n pass\n\n elif p['apptype'] in ['im2Dfse']:\n\n pass\n \n else:\n print('apptype not implemented')", "title": "" }, { "docid": "25829b5f1518226c2f40e85b42df1d4a", "score": "0.43452215", "text": "def ext2intdc(pdc):\n\n ##----- Check grid numbering -----\n griddc = unique(pdc['busdc'][:,GRIDDC])\n\n if griddc.shape[0] > 1 and any(gradient(sort(griddc))>1.):\n sys.stderr.write('Non-successive dc grid numbering detected\\n')\n\n ##----- Permutation of dc bus matrix -----\n ## Part 1: Group all dc busses without ac grid connection\n noacbusi = where(pdc['busdc'][:,BUSAC_I] == 0)[0]\n acbusi = where(pdc['busdc'][:,BUSAC_I] )[0]\n i2edcpmt = r_[acbusi,noacbusi]\n pdc['busdc'] = pdc['busdc'][i2edcpmt,:]\n\n ## Part 2: Sort dc busses based on dc grid number\n busdcext = c_[pdc['busdc'],i2edcpmt]\n busdcext = busdcext[busdcext[:,GRIDDC].argsort()]\n pdc['busdc'] = busdcext[:,:-1]\n i2edcpmt = busdcext[:,-1].astype(int)\n\n ##----- Rename dc nodes -----\n i2edc = pdc['busdc'][:, BUSDC_I].astype(int)\n e2idc = zeros(max(i2edc) + 1)\n e2idc[i2edc] = arange(1,pdc['busdc'].shape[0]+1)\n i2edc = r_[[0],i2edc]\n\n pdc['busdc'][:, BUSDC_I] = e2idc[ pdc['busdc'][:, BUSDC_I].astype(int) ]\n pdc['convdc'][:, CONV_BUS] = e2idc[ pdc['convdc'][:, CONV_BUS].astype(int) ]\n pdc['branchdc'][:, F_BUSDC] = e2idc[ pdc['branchdc'][:, F_BUSDC].astype(int) ]\n pdc['branchdc'][:, T_BUSDC] = e2idc[ pdc['branchdc'][:, T_BUSDC].astype(int) ]\n\n return i2edcpmt, i2edc, pdc", "title": "" }, { "docid": "8f353498f363bc5eee620b2ba9d5f41e", "score": "0.43433073", "text": "def make_group_corr_mat(df):\n\n # for each subject do the following\n \n for i, (sub, f_id) in enumerate(df[['SUB_ID', 'FILE_ID']].values):\n \n #read each subjects aal roi time series files\n ts_df = pd.read_table('DATA/{}_rois_aal.1D'.format(f_id))\n\n #create a correlation matrix from the roi all time series files\n corr_mat_r = ts_df.corr()\n #the correlations need to be transformed to Fisher z, which is\n #equivalent to the arctanh function.\n corr_mat_z = np.arctanh(corr_mat_r)\n \n #for the first subject, add a correlation matrix of zeros that is the same dimensions as the aal roi-roi matrix\n if i == 0:\n all_corr_mat = np.zeros([corr_mat_z.shape[0], corr_mat_z.shape[1], len(df)])\n\n #now add the correlation matrix you just created for each subject to the all_corr_mat matrix (3D)\n all_corr_mat[:, :, i] = corr_mat_z\n \n #create the mean correlation matrix (ignore nas - sometime there are some...)\n av_corr_mat = np.nanmean(all_corr_mat, axis=2)\n #create the group covariance matrix (ignore nas - sometime there are some...)\n var_corr_mat = np.nanvar(all_corr_mat, axis=2)\n \n return all_corr_mat, av_corr_mat, var_corr_mat", "title": "" }, { "docid": "e03ef24b9eb7c7dbcfedcf44cd921039", "score": "0.43417847", "text": "def _sortLayers(self):\n self.layers.sort(lambda l1, l2 : cmp(l1.order, l2.order))\n self._sort_needed = False", "title": "" }, { "docid": "69a81430276c71d0c8fdce05725e36d0", "score": "0.43416956", "text": "def sort_contours(cnts, method=\"left-to-right\"):\n reverse = False\n i = 0 # handle if we need to sort in reverse\n if method == \"right-to-left\" or method == \"bottom-to-top\":\n reverse = True \n # handle if we are sorting against the y-coordinate rather than\n # the x-coordinate of the bounding box\n if method == \"top-to-bottom\" or method == \"bottom-to-top\":\n i = 1\n # construct the list of bounding boxes and sort them from top to\n # bottom\n boundingBoxes = [cv2.boundingRect(c) for c in cnts]\n if method == \"biggest\":\n (cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),\n key=lambda b:b[1][2]+b[1][3], reverse=True))\n else:\n (cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),\n key=lambda b:b[1][i], reverse=reverse))\n # return the list of sorted contours and bounding boxes\n return (cnts, boundingBoxes)", "title": "" }, { "docid": "567041835288034decb9f3406fa7c5f3", "score": "0.4338919", "text": "def build_shared_sorted_neuronIDs(ratemap, included_unit_neuron_IDs, sort_ind):\n if not isinstance(sort_ind, np.ndarray):\n sort_ind = np.array(sort_ind)\n assert np.size(included_unit_neuron_IDs) == np.size(sort_ind), f\"`sort_ind` should be the indicies to sort `included_unit_neuron_IDs`.\"\n\n #TODO 2023-06-29 06:50: - [ ] SOOO CLOSE. This is the right way to do it... the way that's done in `neuropy.plotting.ratemaps.plot_ratemap_1D`, but because i'm trying to plot the ratemaps as a heatmap I need to fill the missing entries with appropriately sized np.nans or something.\n active_maps, title_substring, included_unit_indicies = _help_plot_ratemap_neuronIDs(ratemap, included_unit_neuron_IDs=included_unit_neuron_IDs, debug_print=True)\n n_neurons = len(included_unit_indicies) # n_neurons includes Non-active neurons without a placefield if they're provided in included_unit_indicies.\n if not isinstance(included_unit_indicies, np.ndarray):\n included_unit_indicies = np.array(included_unit_indicies)\n included_unit_indicies\n\n needed_empty_map_shape = np.shape(active_maps)[1:]\n\n sorted_included_unit_indicies = included_unit_indicies[sort_ind]\n rediculous_final_sorted_all_included_pfmap = []\n rediculous_final_sorted_all_included_neuron_ID = []\n\n for i, curr_included_unit_index in enumerate(sorted_included_unit_indicies):\n # `curr_included_unit_index` is either an index into the `included_unit_neuron_IDs` array or None\n ### Three things must be considered for each \"row\" of the plot: 1. the pfmap curve values, 2. the cell id label displayed to the left of the row, 3. the color which is used for the row.\n if curr_included_unit_index is not None:\n # valid neuron ID, access like normal\n pfmap = active_maps[curr_included_unit_index]\n # normal (non-shared mode)\n curr_ratemap_relative_neuron_IDX = curr_included_unit_index\n curr_neuron_ID = ratemap.neuron_ids[curr_ratemap_relative_neuron_IDX]\n \n else:\n # invalid neuron ID, generate blank entry\n curr_ratemap_relative_neuron_IDX = None # This neuron_ID doesn't correspond to a neuron_IDX in the current ratemap, so we'll mark this value as None\n assert included_unit_neuron_IDs is not None\n curr_neuron_ID = included_unit_neuron_IDs[sort_ind[i]]\n\n # pfmap = np.zeros((np.shape(active_maps)[1],)) # fully allocated new array of zeros\n pfmap = np.zeros(needed_empty_map_shape) # fully allocated new array of zeros\n \n rediculous_final_sorted_all_included_pfmap.append(pfmap)\n rediculous_final_sorted_all_included_neuron_ID.append(curr_neuron_ID)\n\n rediculous_final_sorted_all_included_neuron_ID = np.array(rediculous_final_sorted_all_included_neuron_ID)\n \n rediculous_final_sorted_all_included_pfmap = np.vstack(rediculous_final_sorted_all_included_pfmap)\n # rediculous_final_sorted_all_included_pfmap.shape # (68, 117)\n return rediculous_final_sorted_all_included_neuron_ID, rediculous_final_sorted_all_included_pfmap", "title": "" } ]
34065d080ef4038154acee194ae6693c
Get a random note sequence from training set.
[ { "docid": "afe2ca4defa15ef4b4b0f50983be91fd", "score": "0.5828556", "text": "def get_random_sequence_i(i,direction,sequences,durseqs,chordseqs,lows,highs,spseq):\n i += direction*1\n if i >= len(sequences)-1 or i <= 0:\n direction *= -1\n notes = sequences[i]\n durs = durseqs[i]\n chordseq = chordseqs[i]\n chordkeys_onehot = [x[0] for x in chordseq]\n chordkeys = [x[0] for x in chordseq]\n chordnotes = [x[1] for x in chordseq]\n #print(chordnotes)\n low = lows[i]\n high = highs[i]\n sequence_length = len(notes)\n n0 = copy.deepcopy(spseq[i][0])\n n1 = copy.deepcopy(spseq[i][1])\n n2 = copy.deepcopy(spseq[i][2])\n n3 = copy.deepcopy(spseq[i][3])\n \n # start_pitch = spseq[i][0]\n # start_duration = spseq[i][1]\n # start_beat = spseq[i][2]\n # start_chordkey = spseq[i][3]\n # start_dura = spseq[i][4]\n return i,direction,notes,durs,chordkeys,chordkeys_onehot,chordnotes,low,high,sequence_length,n0,n1,n2,n3", "title": "" } ]
[ { "docid": "9efef894a4effa2c2a1f8c61a64c5a52", "score": "0.65925586", "text": "def playRandom(self):\r\n\t\tindex = random.randint(0, len(self.notes)-1)\r\n\t\tnote = list(self.notes.values())[index]\r\n\t\t#return note\r\n\t\tnote.play()", "title": "" }, { "docid": "27c293fdfdbad760c9135fca1c0dec19", "score": "0.62792486", "text": "def generate_notes_gan(pitchnames, notes, model, sequence_length, temperature=1.0):\n # pick a random sequence from the input as a starting point for the prediction\n start = np.random.randint(0, len(notes)-sequence_length-1)\n\n note_to_int = dict((note, number) for number, note in enumerate(pitchnames)) \n int_to_note = dict((number, note) for number, note in enumerate(pitchnames))\n \n pattern = notes[start: (start + sequence_length)] \n prediction_output = []\n patterns = []\n\n # generate 500 notes, roughly two minutes of music\n\n prediction_input = np.zeros((1, sequence_length, len(pitchnames)))\n for j, note in enumerate(pattern):\n prediction_input[0, j, note_to_int[note]] = 1.0\n preds = model.predict(prediction_input, verbose=0)[0] \n\n for elem in list(preds):\n next_index = sample(elem, temperature=temperature)\n next_note = int_to_note[next_index]\n \n prediction_output.append(next_note)\n\n patterns.append(next_index)\n\n return prediction_output, patterns", "title": "" }, { "docid": "8f579437a87365bef39fc64b3b121b29", "score": "0.6234844", "text": "def generate_notes(model, network_input, pitchnames, n_vocab,seqLen):\r\n # pick a random sequence from the input as a starting point for the prediction\r\n start = np.random.randint(0, len(network_input)-1)\r\n\r\n int_to_note = dict((number, note) for number, note in enumerate(pitchnames))\r\n \r\n pattern = network_input[start]\r\n prediction_output = []\r\n\r\n # generate 500 notes\r\n for note_index in range(seqLen):\r\n prediction_input = np.reshape(pattern, (1, len(pattern), 1))\r\n prediction_input = prediction_input / float(n_vocab)\r\n\r\n prediction = model.predict(prediction_input, verbose=0)\r\n\r\n index = np.argmax(prediction)\r\n result = int_to_note[index]\r\n prediction_output.append(result)\r\n\r\n pattern.append(index)\r\n pattern = pattern[1:len(pattern)]\r\n\r\n return prediction_output", "title": "" }, { "docid": "f9d666f35a6000d8920cb14fbb906930", "score": "0.6217913", "text": "def generate_notes(pitchnames, notes, model, sequence_length, temperature=1.0):\n # pick a random sequence from the input as a starting point for the prediction\n start = np.random.randint(0, len(notes)-sequence_length-1)\n\n note_to_int = dict((note, number) for number, note in enumerate(pitchnames)) \n int_to_note = dict((number, note) for number, note in enumerate(pitchnames))\n \n pattern = notes[start: (start + sequence_length)] \n prediction_output = []\n patterns = []\n\n # generate 500 notes, roughly two minutes of music\n for note_index in range(100):\n prediction_input = np.zeros((1, sequence_length, len(pitchnames)))\n for j, note in enumerate(pattern):\n prediction_input[0, j, note_to_int[note]] = 1.0\n preds = model.predict(prediction_input, verbose=0)[0] \n \n next_index = sample(preds, temperature=temperature)\n next_note = int_to_note[next_index]\n\n pattern = pattern[1:]\n pattern.append(next_note)\n\n prediction_output.append(next_note)\n\n patterns.append(next_index)\n \n return prediction_output, patterns", "title": "" }, { "docid": "81b76a4ead32cbcb5f21373c8ccb83f1", "score": "0.6202551", "text": "def generate_notes(model, network_input, pitchnames, n_vocab):\n # pick a random sequence from the input as a starting point for the prediction\n start = numpy.random.randint(0, len(network_input)-1)\n\n int_to_note = dict((number, note) for number, note in enumerate(pitchnames))\n\n pattern = network_input[1]\n prediction_output = []\n\n # generate 500 notes\n for note_index in range(500):\n prediction_input = numpy.reshape(pattern, (1, len(pattern), 1))\n prediction_input = prediction_input / float(n_vocab)\n\n prediction = model.predict(prediction_input, verbose=0)\n\n index = numpy.argmax(prediction)\n result = int_to_note[index]\n prediction_output.append(result)\n\n pattern.append(index)\n pattern = pattern[1:len(pattern)]\n\n return prediction_output", "title": "" }, { "docid": "8d43c0cded04ba66e33d3a103057194d", "score": "0.61905444", "text": "def getRandomTrainExample(self):\n\n\t\tindex = np.random.randint(self.nTrain)\n\t\tdata = self.trainSet[index,1:]\n\t\tlabel = self.trainSet[index,0]\n\t\tlabelVector = (label == self.labels).astype(int)\n\n\t\treturn [{'data': data,\n\t\t\t\t 'label': labelVector}]", "title": "" }, { "docid": "6fe7d164a7402d0acaa171e5fb275936", "score": "0.6147312", "text": "def generate_notes(model, network_input, int_to_note, n_vocab, num_notes):\n # pick a random sequence from the input as a starting point for the prediction\n start = np.random.randint(0, len(network_input) - 1)\n\n pattern = network_input[start]\n prediction_output = []\n\n # generate 100 notes\n for note_index in range(num_notes):\n prediction_input = np.reshape(pattern, (1, len(pattern), 1))\n\n prediction = model.predict(prediction_input, verbose=0)\n\n index = np.argmax(prediction)\n result = int_to_note[index]\n prediction_output.append(result)\n\n # pattern.append(index)\n pattern = np.append(pattern, [index])\n pattern = pattern[1:]\n\n return prediction_output", "title": "" }, { "docid": "7a6e1c4052d2ff3dadf319ae980b4cf5", "score": "0.5993876", "text": "def get_random_sequence(sequences,durseqs,chordseqs,lows,highs,spseq):\n i = np.random.randint(len(sequences))\n notes = sequences[i]\n durs = durseqs[i]\n chordseq = chordseqs[i]\n chordkeys = np.array([x[0] for x in chordseq])\n chordkeys_onehot = [x[0] for x in chordseq]\n chordkeys = [x[0] for x in chordseq]\n chordnotes = [x[1] for x in chordseq]\n low = lows[i]\n high = highs[i]\n sequence_length = len(notes)\n n0 = spseq[i][0]\n n1 = spseq[i][1]\n n2 = spseq[i][2]\n n3 = spseq[i][3]\n \n # start_pitch = spseq[i][0]\n # start_duration = spseq[i][1]\n # start_beat = spseq[i][2]\n # start_chordkey = spseq[i][3]\n # start_dura = spseq[i][4]\n return notes,durs,chordkeys,chordkeys_onehot,chordnotes,low,high,sequence_length,n0,n1,n2,n3", "title": "" }, { "docid": "a2eaec6637b6267a5035e1d9d0c14a04", "score": "0.5958698", "text": "def generate(**kwargs):\n #load the notes used to train the model\n \n song = kwargs.get('song', None)\n\n with open('data/tnotes', 'rb') as filepath:\n notes = pickle.load(filepath)\n\n # Get all pitch names\n pitchnames = sorted(set(item for item in notes))\n\n print(len(notes))\n freqs={}\n for i in notes:\n if i in freqs.keys():# if key is present in the list, just append the value\n freqs[i] = freqs[i]+1\n else:\n freqs[i] = 1 # else create a empty list as value for the key\n print('most common element:')\n print(freqs[max(freqs, key=freqs.get)])\n print(max(freqs, key=freqs.get))\n print('least common element')\n print(freqs[min(freqs, key=freqs.get)])\n print(min(freqs, key=freqs.get))\n print(len(freqs))\n print(freqs)\n #print_distribution(freqs)\n #exit(1)\n # Get all pitch names\n n_vocab = len(set(notes))\n\n print(n_vocab)\n if song == None:\n network_input, normalized_input = prepare_sequences(notes, pitchnames, n_vocab)\n model = create_network(normalized_input, n_vocab)\n prediction_output = generate_notes(model, network_input, pitchnames, n_vocab)\n else:\n network_input, normalized_input = prepare_sequences(song, pitchnames, n_vocab)\n print(network_input)\n model = create_network(normalized_input, n_vocab)\n prediction_output = generate_notes(model, network_input, pitchnames, n_vocab)\n\n create_midi(prediction_output)", "title": "" }, { "docid": "bde1a877559efe954a1eb758592f9b84", "score": "0.58838683", "text": "def random_sent(self, index):\n t1, t2 = self.get_corpus_line(index)\n if random.random() > 0.5:\n label = 0\n else:\n t2 = self.get_random_line()\n label = 1\n\n assert len(t1) > 0\n assert len(t2) > 0\n return t1, t2, label", "title": "" }, { "docid": "da3c960ba730caffa25f91d656bccefa", "score": "0.58777654", "text": "def gen_data(sequence,N):\n\n fake = Faker()\n train_data =[]\n for _ in range(N):\n entities = [] \n for (offset, s) in enumerate(sequence): \n if s['p'] is None:\n try:\n value = eval(s['value'])\n except:\n value = s['value'] \n else:\n value = random.choice(s['value'], 1, p=s['p']).item(0) \n if offset == 0:\n start = 0\n end = len(value)\n else:\n start = entities[offset-1]['end']\n end = start + len(value)\n label = s['label']\n entities.append({'value': value, 'start': start, 'end': end, 'label': label})\n data = (''.join([str(e['value']) for e in entities]),{'entities': [(e['start'],e['end'],e['label']) for e in entities if e['label'] is not None]}) \n train_data.append(data)\n \n return train_data", "title": "" }, { "docid": "e29d3f6a8342158c1b482fbb15b6d6ce", "score": "0.5779398", "text": "def get_start(self) -> str:\n return np.random.choice(self.starts)", "title": "" }, { "docid": "5157b5c8bb1f969ef04c9db19a4766b9", "score": "0.5766413", "text": "def generate():\n notes = Util.get_notes()\n\n # get amount of pitch names\n n_vocab = [len(set(instrument)) for instrument in notes]\n\n # Get all pitch names\n pitch_names = [sorted(set(item for item in instrument_in_mid)) for instrument_in_mid in notes]\n\n network_input = prepare_sequences(notes, pitch_names, n_vocab)\n model = create_network()\n prediction_output = generate_notes(model, network_input, pitch_names, n_vocab)\n create_midi(prediction_output)", "title": "" }, { "docid": "2a3a7f1d3614fc5635b917f9f90c719c", "score": "0.57538587", "text": "def _get_noun():\n return _random_line('nouns.txt')", "title": "" }, { "docid": "19eb000ed46bd97dbc8423f63f86e097", "score": "0.566456", "text": "def playRandom(self):\n index = random.randint(0, len(self.notes)-1)\n try: \n note = list(self.notes.values())[index]\n # Debug statement below\n # print(type(note))\n note.play()\n except KeyboardInterrupt:\n exit()", "title": "" }, { "docid": "429731d2ac5514185c38a74fc5388fde", "score": "0.56436324", "text": "def choose_from(seq, random_state):\n return seq[random_state.choice(len(seq))]", "title": "" }, { "docid": "5a76349e1ba66647efe7c1b5f926f990", "score": "0.56019807", "text": "def sample(self, seed=None):\n return Observation.sample()", "title": "" }, { "docid": "66e14c1c6f79d123792aae886c9e3499", "score": "0.55971736", "text": "def random(self):\n\n return choice(self.list_of_words)", "title": "" }, { "docid": "c5ec786f11535c20fe2493949cc176ad", "score": "0.5596083", "text": "def generate_string_1(self,note_seed,length,note_database,rhythm_database,rhythm_seed=[2.0]):\n rhythm_output=[]\n note_output=[]\n note_output.append(note_seed[0])\n rhythm_output.append(rhythm_seed[0])\n\n for i in range(length-1):\n i=i+1\n key=note_output[i-1]\n options=note_database.get(key,[\"C\",\"D\",\"E\",\"F\",\"G\",\"A\",\"B\"])\n next_state=random.choice(options)\n note_output.append(next_state)\n rhythm_key=rhythm_output[i-1]\n rhythm_options=rhythm_database.get(rhythm_key)\n print rhythm_options\n next_rhythm_state=random.choice(rhythm_options,[0.25,0.5,0.5,1.0,1.0,1.0,1.0,2.0,2.0,2.0])\n rhythm_output.append(next_rhythm_state)\n\n return (note_output,rhythm_output)", "title": "" }, { "docid": "154d71bf897edc8f1ab628fd8a51ff28", "score": "0.5590438", "text": "def get_training_sample(self, seed: int) -> TextClasDataBunch:\n pass", "title": "" }, { "docid": "9f1ea729ef2d853d9228a08102808180", "score": "0.556841", "text": "def random_sample(self, count):\n\n return islice(self.random_article_generator, count)", "title": "" }, { "docid": "68d0f2c5fb0338da3844889e66c1a825", "score": "0.55410194", "text": "def random(self):\n return choice(self.word_list)", "title": "" }, { "docid": "5db76a117f730d2de15f5c887c12a8e5", "score": "0.5451048", "text": "def get_next_instance_random(self):\n if not len(self.unlabeled_corpus):\n return None\n index = randint(0, len(self.unlabeled_corpus) - 1)\n return index", "title": "" }, { "docid": "3249f869b6597f37b821fe1bd8139254", "score": "0.54506046", "text": "def randomSentence() -> \"Sentences\":\r\n\t\treturn Session().query(Sentences).order_by(func.random()).first()", "title": "" }, { "docid": "f7ffe8107d98fd4a3d1b1caaf9f77fcc", "score": "0.5428545", "text": "def set_first_model_positives(self, config, random_seed) -> List[TextElement]:\n general_dataset_name = config.train_dataset_name.split('_train')[0]\n queries = self.queries_per_dataset[general_dataset_name][config.category_name]\n sampled_unlabeled_text_elements = []\n for query in queries:\n sampled_unlabeled_text_elements.extend(\n self.data_access.sample_unlabeled_text_elements(workspace_id=config.workspace_id,\n dataset_name=config.train_dataset_name,\n category_name=config.category_name,\n sample_size=self.first_model_positives_num,\n query=query, remove_duplicates=True)['results']\n )\n logging.info(\n f\"Positive sampling, after query {query} size is {len(sampled_unlabeled_text_elements)} \")\n\n if len(sampled_unlabeled_text_elements) > self.first_model_positives_num:\n random.seed(random_seed)\n sampled_unlabeled_text_elements = random.sample(sampled_unlabeled_text_elements,\n self.first_model_positives_num)\n\n sampled_uris = [t.uri for t in sampled_unlabeled_text_elements]\n sampled_uris_and_gold_labels = dict(\n oracle_data_access_api.get_gold_labels(config.train_dataset_name, sampled_uris))\n sampled_uris_and_label = \\\n [(x.uri, {config.category_name: sampled_uris_and_gold_labels[x.uri][config.category_name]})\n for x in sampled_unlabeled_text_elements]\n orchestrator_api.set_labels(config.workspace_id, sampled_uris_and_label)\n\n logging.info(f'Set the label of {len(sampled_uris_and_label)} instances sampled by queries {queries} '\n f'using the oracle for category {config.category_name}')\n logging.info(f\"Positive sampling, returned {len(sampled_uris)} elements\")\n\n return sampled_uris", "title": "" }, { "docid": "2b5300645044911500be2bd2b3bb89ec", "score": "0.5428368", "text": "def generate_random_sequence(n, p):\n sequence = []\n nums = bernoulli.rvs(p, size=n)\n for num in nums:\n if num == 1:\n sequence.append(\"Head\")\n else:\n sequence.append(\"Tail\")\n return sequence", "title": "" }, { "docid": "8e8bcc57095052ad90b7f1035d688259", "score": "0.54217315", "text": "def select_exercise(bot):\n return bot.exercises[random.randrange(0, len(bot.exercises))]", "title": "" }, { "docid": "2dde91f722c128af026b15a2151de249", "score": "0.5419068", "text": "def choose_note(self,rn,last_two):\n # find the appropriate counts matrix\n mat = self.counts[rn.scaleDegree-1]\n\n # find the row in our matrix, and sum it.\n row = mat[tuple(last_two)]\n total = np.sum(row)\n\n # choose a random number between [0,total)\n rand = (random.randint(0,total-1) if total > 0 else 0)\n\n i = 0\n rand -= row[i]\n\n # and select the corresponding row in the matrix\n while rand > 0:\n i += 1\n rand -= row[i]\n\n return music21.pitch.Pitch(midi2str(i))", "title": "" }, { "docid": "777824c5fb60a471b3d0fd1d17b52d9f", "score": "0.54099435", "text": "def ran_seq(seq_len):\n sequence = \"\"\n for i in range(seq_len):\n sequence += random.choice('ACTG')\n return sequence", "title": "" }, { "docid": "36c0bb0210a0ddcc77b5edd7c3865bc7", "score": "0.53826326", "text": "def pick(self):\n if self.words is None:\n self.read()\n return random.choice(self.words)", "title": "" }, { "docid": "147bd33b0d46c95ee92370e28e4009b6", "score": "0.5374686", "text": "def random_sample(self) -> str:\n raise NotImplementedError", "title": "" }, { "docid": "5242cad5f624e678a1e2afb4da9c4bed", "score": "0.5371694", "text": "def random_discussion():\n\tcount = Discussion.objects.count()\n\treturn Discussion.objects.limit(-1).skip(randint(0,count-1)).next()", "title": "" }, { "docid": "119ccad64c5b735ad7dbab9feecd5f4d", "score": "0.53713626", "text": "def __get_random_adjective(self):\n basic_words = ['beautiful', 'amazing', 'wonderful']\n shuffle(basic_words)\n random_base_word = basic_words[0]\n synonyms = self.dictionary.synonym(random_base_word)\n shuffle(synonyms)\n return synonyms[0]", "title": "" }, { "docid": "ba48d34c447d9054deda12c240e65d05", "score": "0.5369419", "text": "def __oldgetitem__(self, index):\n paragraph, language = self.lines[index], self.line_languages[index]\n paragraph_length = len(paragraph)\n\n offset = np.random.randint(0, paragraph_length-self.sequence_length)\n inputs = np.array([ch for ch in paragraph[offset:offset+self.sequence_length]])\n target = self.lang_to_idx[language]\n\n return inputs, target", "title": "" }, { "docid": "59e848fe9cddb2b0829a01030f50d53f", "score": "0.53605676", "text": "def generate_string_3(self,note_seed,length,note_database,rhythm_database,rhythm_seed=[2.0,1.0,1.0]):\n rhythm_output=[]\n note_output=[]\n note_output.append(note_seed[0])\n note_output.append(note_seed[0])\n note_output.append(note_seed[0])\n rhythm_output.append(rhythm_seed[0])\n rhythm_output.append(rhythm_seed[0])\n rhythm_output.append(rhythm_seed[0])\n\n for i in range(length-3):\n i=i+3\n key=(note_output[i-3],note_output[i-2],note_output[i-1])\n options=note_database.get(key,[\"C\",\"D\",\"E\",\"F\",\"G\",\"A\",\"B\"])\n next_state=random.choice(options)\n note_output.append(next_state)\n rhythm_key=(rhythm_output[i-3],rhythm_output[i-2],rhythm_output[i-1])\n rhythm_options=rhythm_database.get(rhythm_key,[0.25,0.5,0.5,1.0,1.0,1.0,1.0,2.0,2.0,2.0])\n next_rhythm_state=random.choice(rhythm_options)\n rhythm_output.append(next_rhythm_state)\n\n return (note_output,rhythm_output)", "title": "" }, { "docid": "e5ab78059b05d947668c2a57a7b14c1b", "score": "0.5354795", "text": "def get_word():\n s=0\n list1=[]\n with open(LEXICON_FILE) as f:\n for line in f:\n line=line.strip()\n list1.append(line)\n s=s+1\n f.close()\n p=random.randint(0,s)\n return list1[p]", "title": "" }, { "docid": "bb7b4993d04777b4c7ddae1daf745b84", "score": "0.53517085", "text": "def getRandomTestExample(self):\n\n\t\tindex = np.random.randint(self.nTest)\n\t\tdata = self.testSet[index,1:]\n\t\tlabel = self.testSet[index,0]\n\t\tlabelVector = (label == self.labels).astype(int)\n\n\t\treturn [{'data': data,\n\t\t\t\t 'label': labelVector}]", "title": "" }, { "docid": "322bcfd8ec32306b7fa360385c555cb6", "score": "0.5349645", "text": "def get_random_prompt(train_data: pd.DataFrame, num_examples: int = 10) -> str:\n prefix_exs_rows = sample_train_data(train_data, num_examples)\n serialized_prefixes = [\n (txt + label).strip()\n for txt, label in zip(prefix_exs_rows[\"text\"], prefix_exs_rows[\"label_str\"])\n ]\n prefix_exs = \"\\n\\n\".join(serialized_prefixes) + \"\\n\"\n return prefix_exs", "title": "" }, { "docid": "72e667ebd0f505ac3f2f74cb9f871f8e", "score": "0.5348437", "text": "def fixture_reagent_sequence(reagent: MockReagentType) -> str:\n return reagent.sequence", "title": "" }, { "docid": "957fa528a8983b140482eb21ca7aae68", "score": "0.5337165", "text": "def generate_midi_sample(model, data_path, outputFile_name, num_notes):\n # get notes from dataset\n notes = read_midi(data_path)\n\n network_input, _ = prepare_sequences(SEQUENCE_LENGTH, notes)\n\n int_to_note = map_int_to_notes(notes)\n\n n_vocab = get_num_unique_notes(notes)\n\n # generate a new sequence\n prediction_output = generate_notes(model, network_input, int_to_note, n_vocab, num_notes)\n\n # make midi file\n create_midi(prediction_output, outputFile_name)", "title": "" }, { "docid": "15248468933ab6c56c7a0de69b85352a", "score": "0.5331739", "text": "def getRandom(self):\n selected = random.randint(0,self.count)\n repre = self.head\n while selected > 0:\n repre = repre.next\n selected -= 1\n return repre.val", "title": "" }, { "docid": "6a589fda26c6abe4d68390ea00c6d5a4", "score": "0.53314304", "text": "def sample(self):\n points = self._choose_starting_points()\n return points + list(np.random.choice(self.train_indices, self.train_size - len(points), replace=False))", "title": "" }, { "docid": "1b89caee496f4da9d6c9574df9143d38", "score": "0.53293943", "text": "def loc_annotome_rand_samp():\n client = MongoClient(mongo_db_url)\n return client.BioFlow_database[pymongo_prefix + \"UP_r_samples\" + pymongo_suffix]", "title": "" }, { "docid": "2297471fa6b694564d0a9e178f4249d8", "score": "0.532717", "text": "def getRandom(self):\n if self.length <= 0:\n return -1\n index = random.randint(0,self.length-1)\n return self.data_list[index]", "title": "" }, { "docid": "0ad672b9be6ee7f6155817dbf3462efe", "score": "0.53237545", "text": "def generate_random_song(epoch):\n model.eval()\n with torch.no_grad(): \n sample_song = model.decoder(torch.rand(1, 120))\n \n song_np = ((sample_song[0].numpy())*127)\n song_int = song_np.astype(int)\n\n if not os.path.exists(f'{CD_PATH}/generated_songs/'):\n os.mkdir(f'{CD_PATH}/generated_songs/')\n\n np.save(\n f'{CD_PATH}/generated_songs/epoch{epoch}.npy', song_int)\n \n model.train()", "title": "" }, { "docid": "edd615cb4f81d9f463e66fa7f5413c3e", "score": "0.52951187", "text": "def getRandom(self):\n return random.choice(self.l)", "title": "" }, { "docid": "cfc550f5382e2a29195828e65cfc3adc", "score": "0.5291704", "text": "def randori_notes(self, randori_notes):\n\n self._randori_notes = randori_notes", "title": "" }, { "docid": "8de4213af5aee784640bb2908e3612eb", "score": "0.5288199", "text": "def get_random_line(self):\n # Similar to original tf repo: This outer loop should rarely go for more than one iteration for large\n # corpora. However, just to be careful, we try to make sure that\n # the random document is not the same as the document we're processing.\n for _ in range(10):\n if self.on_memory:\n rand_doc_idx = random.randint(0, len(self.all_docs)-1)\n rand_doc = self.all_docs[rand_doc_idx]\n line = rand_doc[random.randrange(len(rand_doc))]\n else:\n rand_index = random.randint(1, self.corpus_lines if self.corpus_lines < 1000 else 1000)\n #pick random line\n for _ in range(rand_index):\n line = self.get_next_line()\n #check if our picked random line is really from another doc like we want it to be\n if self.current_random_doc != self.current_doc:\n break\n return line", "title": "" }, { "docid": "d3dc796ea528a554f7e3988fa1ad3520", "score": "0.5274762", "text": "def getRandom(self):\n import random\n return self.nums[random.randint(0, len(self.nums)-1)]", "title": "" }, { "docid": "49360bca596b57ae37c581c29a0e5233", "score": "0.52730757", "text": "def sampleSequence(emissions, m, ins, d):\r\n\r\n #Define the states and beginning of the sequence\r\n states = [\"MATCH\", \"INSERTION\", \"DELETE\"]\r\n begin = np.random.choice(states, p = m[0])\r\n\r\n seq = []\r\n i = 0\r\n \r\n #Define the previous list based on one of three conditions\r\n if begin == \"MATCH\":\r\n prev_list = m\r\n elif begin == \"INSERTION\":\r\n prev_list = ins\r\n elif begin == \"DELETE\":\r\n prev_list = d\r\n\r\n #while i is smaller than length of the match list (randomly chosen, equal length)\r\n while i < len(m)-1:\r\n #Get a random choice at the ith position in states\r\n begin = np.random.choice(states, p = prev_list[i])\r\n #Choose from the emissions an amino acid based on the probabilities\r\n if begin == 'MATCH':\r\n #seq.append(sample(pa))\r\n seq.append(sample(emissions[i][0]))\r\n prev_list = m\r\n elif begin == 'INSERTION':\r\n seq.append(sample(pa))\r\n prev_list = ins\r\n continue\r\n elif begin == \"DELETE\":\r\n prev_list = d\r\n i += 1\r\n\r\n return \"\".join(seq)", "title": "" }, { "docid": "ac15ffbe49716ae1d2ab4422a3b0edc5", "score": "0.52689195", "text": "def get_random():\n\n cursor = [recipe for recipe in db.recipes.aggregate([{\"$sample\": {\"size\": 14}}])]\n\n # Return values are 10 for the slideshow and 4 for the you may also like section\n return [cursor[10:], cursor[:10]]", "title": "" }, { "docid": "e2ab94078e925bd7b300e4f48ec713b3", "score": "0.5268822", "text": "def generate_sentence(word1, length, vocab, model, sample_n=10):\n\n reverse_vocab = {idx: word for word, idx in vocab.items()}\n previous_state = None\n\n first_string = word1\n first_word_index = vocab[word1]\n next_input = [[first_word_index]]\n text = [first_string]\n\n for i in range(length):\n logits, previous_state = model.call(next_input, previous_state)\n logits = np.array(logits[0, 0, :])\n top_n = np.argsort(logits)[-sample_n:]\n n_logits = np.exp(logits[top_n])/np.exp(logits[top_n]).sum()\n out_index = np.random.choice(top_n, p=n_logits)\n\n text.append(reverse_vocab[out_index])\n next_input = [[out_index]]\n\n print(\" \".join(text))", "title": "" }, { "docid": "f6825b6117702f622355aecef78a74a9", "score": "0.52669907", "text": "def getRandomTrainingSet(self, seq_len, batch_size):\n import torch\n import random\n Jointsdata = self.getJointsData()\n X_train = []\n y_train = []\n for batch_index in range(batch_size):\n start_index = random.randint(0, self.data_len - seq_len)\n end_index = start_index + seq_len + 1\n chunk = Jointsdata[start_index:end_index].values\n X_train.append(chunk[:-1])\n y_train.append(chunk[1:])\n X_train = Variable(torch.Tensor(X_train))\n y_train = Variable(torch.Tensor(y_train))\n cuda = True\n if cuda:\n X_train = X_train.cuda()\n y_train = y_train.cuda()\n return X_train, y_train", "title": "" }, { "docid": "75bb2189d08978323fb4f8cf77e8cb60", "score": "0.5265433", "text": "def randomreadstart(entry, length):\n l = len(entry.sequence)\n if length > l:\n raise ValueError(\"Reads cannot be longer than the original reference.\")\n return random.randint(0, l-length-1)", "title": "" }, { "docid": "c9e02acda4ab59e9b57356243a13aa3e", "score": "0.52639395", "text": "def n_random_seqs(alignment,n):\n seq_names = alignment.Names\n shuffle(seq_names)\n return alignment.takeSeqs(seq_names[:n])", "title": "" }, { "docid": "fed3d892fa8e7d4a5752e6f0a7160c2e", "score": "0.5233923", "text": "def rand_augmenter(self) -> iaa.Sequential:\n aug_ind = np.random.choice(np.arange(len(self.augmenters)), 3, replace=False)\n aug = iaa.Sequential([self.augmenters[aug_ind[0]], self.augmenters[aug_ind[1]], self.augmenters[aug_ind[2]]])\n return aug", "title": "" }, { "docid": "84d87940b9e2a58b661f250e1b58d0b8", "score": "0.5232895", "text": "def rand_choice(seq):\n seq_len = len(seq)\n rand_index = random.randrange(seq_len) # get a random index\n return seq[rand_index] # return the value at that index", "title": "" }, { "docid": "f54d59eef01fb5c02ffbff773e25e8c9", "score": "0.5226268", "text": "def sample_random(sample_sequences, total):\n sample_random=[]\n n=1\n while n<(total+1):\n i=random.randint(0, total-1)\n sample_random.append(sample_sequences[i])\n n+=1\n if n==(total+1):\n break\n return(sample_random)", "title": "" }, { "docid": "3775fe7bd3b6f4f8099cee3bab4d531b", "score": "0.52140856", "text": "def randomspeak():\n\n file = open(f\"{loc}\\\\Assets\\\\random_sentences.txt\", \"r\")\n line = r.choice(file.readlines())\n file.close()\n\n return shaniTTS(line)", "title": "" }, { "docid": "1287db22f354eec357c4acee0a44e162", "score": "0.52132857", "text": "def get_random(self):\n return self.data['list']", "title": "" }, { "docid": "dc8634b55483a8c5b2a27af2ae2af1d8", "score": "0.52088004", "text": "def test_get_seq(self):\n\n\t\tfail = False\n\t\ttry:\n\t\t\tseq = wP.get_patch_sequence(self.source)\n\t\texcept Exception as e:\n\t\t\tprint e\n\t\t\tfail = True\n\t\tself.assertTrue (not fail)", "title": "" }, { "docid": "0af2a215a862f54e5aea53e69797ec5e", "score": "0.5207874", "text": "def sample(self, seq_len):\n voc_freq = self.dataset.voc_freq\n with torch.no_grad():\n # The starting hidden state of LSTM is None\n h_prev = None\n # Accumulate tokens into texts\n texts = []\n # Randomly draw the starting token and convert it to a torch.tensor\n x = np.random.choice(voc_freq.shape[0], 1, p=voc_freq)[None, :]\n x = torch.from_numpy(x).type(torch.int64).to(self.device)\n ##### Complete the code here #####\n # Append each generated token to texts\n # hint: you can use self.forward\n ##################################\n texts.append(int(x))\n for i in range(seq_len):\n logits, h_prev = self.forward(x, h_prev)\n dist = Categorical(logits=logits)\n x = dist.sample(torch.Size([1]))\n texts.append(int(x))\n\n return texts", "title": "" }, { "docid": "725c61a64600d766c468ae6356760624", "score": "0.5203492", "text": "def new_sequence(self):\n self.need_reset_rnn_state = True", "title": "" }, { "docid": "00ba78e7ef62270bf5cd9d56147d58a2", "score": "0.5202185", "text": "def __getitem__(self, index: int):\n rng = np.random.default_rng(self._seed + index)\n return self._generate_example(rng)", "title": "" }, { "docid": "d6cbbd7b2d3e21387f4bb7a2a51c05b9", "score": "0.5199776", "text": "def generate_sequence(self, length: int,\n seed_sequence: list,\n history: int = 100,\n temperature: float = 1.0):\n generated_sequence = list()\n generated_sequence += seed_sequence\n for _ in range(length):\n sequence_history = np.array([generated_sequence[-history:]])\n next_sample_logits = self(sequence_history)\n next_sample_logits = next_sample_logits / temperature\n next_sample = tf.random.categorical(next_sample_logits, 1)\n generated_sequence.append(int(next_sample))\n return generated_sequence[len(seed_sequence):]", "title": "" }, { "docid": "6a2b48a1110eaee8554425c93a145dd2", "score": "0.51954925", "text": "def generate_sequence(n_samples, params):\n\tobs_seq = np.zeros(n_samples, dtype=int)\n\tstate_seq = np.zeros(n_samples, dtype=int)\n\t\n\tstate_seq[0] = np.random.choice([0, 1], p=params['initial_prob'])\n\tfor i in range(1, n_samples):\n\t\tstate_seq[i] = np.random.choice([0, 1], p=params['trans_mat'][state_seq[i - 1]])\n\n\tfor i in range(n_samples):\n\t\tobs_seq[i] = np.random.choice(np.arange(6), p=params['emission_prob'][state_seq[i]])\n\t\n\treturn obs_seq, state_seq", "title": "" }, { "docid": "bb91c1b3c6c5024b13c1113f134fe41d", "score": "0.51909345", "text": "def get_random_sample(model_logic, params, length):\n # initiate series storage\n series = list()\n\n # calculate noised predictions\n max_noise_rate = 0.1\n for x_t in range(1, length + 1):\n # get clean prediction\n prediction = model_logic.predict(params, x_t)\n\n # # add random noise\n # max_noise_size = noise_rate * prediction\n # noise = 2 * np.random.random() * max_noise_size - max_noise_size\n # prediction += noise\n noise_rate = np.random.random() * (2 * max_noise_rate)\n prediction *= (1 - max_noise_rate + noise_rate)\n\n # store prediction\n series.append(prediction)\n\n # return series\n return series", "title": "" }, { "docid": "57124db0b7074aeb3ebc66fa95229155", "score": "0.5176014", "text": "def rnd_snp(locus, seq):\n ref = seq[locus-1].upper()\n if ref not in util.BASES:\n raise RuntimeError(\"invalid base character\")\n\n return ref, random.choice([b for b in util.BASES if b != ref])", "title": "" }, { "docid": "0ea15e0162b87b6824371695bb4d31cc", "score": "0.5168235", "text": "def getRandom(self) -> int:\n\n n = len(self.datalist)\n randIndex = random.randint(0, n - 1)\n return self.datalist[randIndex]", "title": "" }, { "docid": "38e737063244250b3a40c8fb1656dd40", "score": "0.515942", "text": "def getRandom(self):\n return random.choice(self.array)", "title": "" }, { "docid": "5aa4f5a23ca31670f3bd5375b42d238b", "score": "0.51534677", "text": "def get_next_features_random(self, class_index):\n return sample(range(self.n_feat), self.number_of_features)", "title": "" }, { "docid": "17e4dba252da4fd7a19132041e75347e", "score": "0.5146014", "text": "def getRandom(self) -> int:\n return random.sample(self.data, 1)[0]", "title": "" }, { "docid": "a8be05f76373e11e8e55d2f62de38dc8", "score": "0.5143785", "text": "def getRandom(self):\n return random.choice(self.arr)", "title": "" }, { "docid": "c9d5fbd40f9b2c043144aedf67e63d14", "score": "0.51424795", "text": "def random_sentences(filepath):\n stuff = list(sentences(filepath))\n\n np.random.shuffle(stuff)\n\n yield from stuff", "title": "" }, { "docid": "79deb4fdba15b90bc2a089064aed6de3", "score": "0.51399565", "text": "def getRandom(self):\n if len(self.l) == 0:\n return 0\n import random\n length = len(self.l)\n random_index = random.randint(0, length-1)\n return self.l[random_index]", "title": "" }, { "docid": "c06a5be783a94de6a66ada58fea2fd12", "score": "0.5134265", "text": "async def get_random(self):\n resp = await self._get_resp(random=True)\n return Word(resp['list'][0])", "title": "" }, { "docid": "27fa1ea24b228e08606f1f229e11534e", "score": "0.5134064", "text": "def generate_random_sentence(self):\n sentence = []\n mgram = ['BOS'] * (self.n - 1)\n while True:\n token = self.generate_random_token(tuple(mgram))\n if token == 'EOS':\n break\n sentence.append(token)\n mgram = mgram[1:] + [token]\n return sentence", "title": "" }, { "docid": "8f4508cfdf82f73004c9fb60ab3e0032", "score": "0.51199746", "text": "def getRandom(self):\n if len(self.l) == 0: return -1\n return self.l[random.randint(0,len(self.l)-1)]", "title": "" }, { "docid": "f1ccb3703a236fb82ccaf669d1848962", "score": "0.5116013", "text": "def generate_sequence(chord: str = None, start: bool = False, data: list = bigrams, length: int = 4):\n # create list to store future chords\n chords = []\n if start:\n chords.append(chord)\n bigrams = [\" \".join(ngram) for ngram in ngrams]\n for n in range(length):\n # append next chord for the list\n chords.append(predict_next_state(chord, bigrams))\n # use last chord in sequence to predict next chord\n chord = chords[-1]\n return chords", "title": "" }, { "docid": "12557a6097252db31af791bb80dbbeae", "score": "0.51140356", "text": "def test_get_random_unlabelled_document(self):\n class TestModel(LearningModel):\n name = 'testmodel'\n queryset = Document.objects.all()\n\n document1 = Document.objects.create()\n document2 = Document.objects.create()\n document3 = Document.objects.create()\n\n LabelledDocumentFactory.create(\n document=document1, model_name=TestModel.get_name(), value='foo')\n LabelledDocumentFactory.create(\n document=document2, model_name='othermodel', value='foo')\n\n self.assertIn(\n TestModel().get_random_unlabelled_document().pk,\n [document2.pk, document3.pk])", "title": "" }, { "docid": "a33e0ae9eb9ed16fb50b0a0fd82f136e", "score": "0.5110634", "text": "def next(self):\n speaking, nspeaking = self.resample()\n merged = speaking + nspeaking\n random.shuffle(merged)\n return merged", "title": "" }, { "docid": "737e2df0d7c8bbef9e3ad8b3d393a060", "score": "0.5109187", "text": "def training_ids(self):\n if self._set_ids is None:\n self._set_ids = np.random.choice(self._n_instances, replace=True, size=self._n_instances)\n return self._set_ids", "title": "" }, { "docid": "affa7e15998d673a27ba356bbeafceb7", "score": "0.5094711", "text": "def testNotes():\n # Get preprocessed training data\n P = Preprocess()\n P.loadData('preprocessed/notes_test_19.txt') #Load preprocessed data from file, since net has been trained\n X, Y = P.getXY()\n input_size = P.getInputLength()\n output_size = P.getOutputLength()\n\n # Load weights for neural net\n net = NeuralNetwork([input_size,100,output_size],activeFn='sigmoid')\n net.loadWeights('weights/notes_train_19.txt') # Load weights from file, since net has been trained\n\n # Test testing data\n print('Testing Pitch Recognizition')\n net.testBatch(X,Y)", "title": "" }, { "docid": "4fa89eb60aab2b31cb4aca3891884d12", "score": "0.50923103", "text": "def random(self):\n n = floatX(normal(size=self.L.shape[0]))\n return np.dot(self.L, n)", "title": "" }, { "docid": "1c26a8ba0a2ccde9137df838242baa66", "score": "0.50876397", "text": "def UIS_WR(seq, n):\n return [random.choice(seq) for i in xrange(n)]", "title": "" }, { "docid": "b00966e0874c37a2670337d2fa83a80e", "score": "0.5082077", "text": "def getRandom(self) -> int:\n idx = random.randint(0, self.len)\n return self.data[idx]", "title": "" }, { "docid": "06a5f7ca470671cdb04ead45dae917c2", "score": "0.507932", "text": "def gen_ABCseq(self, length):\n cA = [2, 3, 5, 7, 11, 13]\n cB = [0, 1, 4, 8, 9]\n cC = [6, 10, 12, 14, 15]\n self.vocab[\"cA\"] = cA\n self.vocab[\"cB\"] = cB\n self.vocab[\"cC\"] = cC\n\n nA = len(cA)\n nB = len(cB)\n nC = len(cC)\n res = []\n for ii in range(length):\n if ii%3==0:\n # pick from class A\n id=int(np.floor(np.random.rand()*nA))\n pknum=cA[id]\n res.append(pknum)\n elif ii%3==1:\n # pick from class B\n id = int(np.floor(np.random.rand() * nB))\n pknum = cB[id]\n res.append(pknum)\n else:\n # pick from class C\n id = int(np.floor(np.random.rand() * nC))\n pknum = cC[id]\n res.append(pknum)\n return res", "title": "" }, { "docid": "2ae67f4cc3b8cb2cf4966167edce08f7", "score": "0.5078872", "text": "def paragraph():\n return ' '.join(sentence() for i in range(random.randint(1, 4)))", "title": "" }, { "docid": "53d4422cb66f5199c1cf61ca9f811219", "score": "0.507621", "text": "def _get_adj():\n return _random_line('adjectives.txt')", "title": "" }, { "docid": "6bf7a7af3671a42a011ed2ffab1f9c99", "score": "0.5067014", "text": "def generate_string_2(self,note_seed,length,note_database,rhythm_database,rhythm_seed=[2.0,2.0]):\n rhythm_output=[]\n note_output=[]\n note_output.append(note_seed[0])\n note_output.append(note_seed[0])\n rhythm_output.append(rhythm_seed[0])\n rhythm_output.append(rhythm_seed[0])\n\n for i in range(length-2):\n i=i+2\n key=(note_output[i-2],note_output[i-1])\n options=note_database.get(key,[\"C\",\"D\",\"E\",\"F\",\"G\",\"A\",\"B\"])\n next_state=random.choice(options)\n note_output.append(next_state)\n rhythm_key=(rhythm_output[i-2],rhythm_output[i-1])\n rhythm_options=rhythm_database.get(rhythm_key,[0.25,0.5,0.5,1.0,1.0,1.0,1.0,2.0,2.0,2.0])\n next_rhythm_state=random.choice(rhythm_options)\n rhythm_output.append(next_rhythm_state)\n\n return (note_output,rhythm_output)", "title": "" }, { "docid": "07b37733476d46cecdc121b72df2f856", "score": "0.5051347", "text": "def generate_sequence(inputchord, model, n):\n\n sequence = [inputchord]\n \n i = 0\n while i < n:\n \n inputchord = generate_chord(inputchord, model)\n sequence.append(inputchord)\n i += 1\n\n return sequence", "title": "" }, { "docid": "2cbb2b39b8f2ed84af8da9d2dc121534", "score": "0.5048223", "text": "def randomNick( self ):\n if len( self.nicks ) > 0:\n k = self.byNick.keys()\n return k[ random.randrange( len( k ) ) ]", "title": "" }, { "docid": "ae231740b01c2583102ef4f1c4450e48", "score": "0.5047859", "text": "def sample_rand_sent(file_paths_set: Set[str], n: int, bad_files: Set[str],\n npy_cache: Dict[str, np.ndarray]) -> np.ndarray:\n\n count = 0\n # Go over each book and sample\n while count < n:\n if len(bad_files) == len(file_paths_set):\n raise RuntimeError(\"All the files are bad\")\n # Open random book and sample random sentence indices\n target_file = random.choice(list(file_paths_set - bad_files))\n cur_book_mat = maybe_load_and_cache(target_file, bad_files, npy_cache,\n len(file_paths_set))\n if cur_book_mat is None:\n continue\n count += 1\n\n yield random.choice(cur_book_mat)", "title": "" }, { "docid": "4baadab0a721333bd658ecfd32cffa95", "score": "0.50459373", "text": "def _sample_sequences(self):\n rs = check_random_state(self.random_state) # type: np.random.RandomState\n\n sequences = []\n datasets = []\n for i, ds in enumerate(rs.choice(self._datasets, self.N)):\n seq_idc = rs.choice(self._dataset2idx[ds], np.minimum(self._L[i], self._n_spec_per_dataset[ds]),\n replace=False)\n seq_spectra = [self.spectra[sig] for sig in seq_idc]\n seq_labels = [self.labels[sig] for sig in seq_idc]\n\n # FIXME: Here we can have multiple times the same molecule in the sample, e.g. due to different adducts.\n # assert pd.Series(seq_labels).is_unique, \"Each molecule should appear only ones in the set of molecules.\"\n\n # Sort the sequence elements by their retention time\n if self.sort_sequence_by_rt:\n seq_spectra, seq_labels = zip(*sorted(zip(seq_spectra, seq_labels),\n key=lambda s: s[0].get(\"retention_time\")))\n\n if self.use_sequence_specific_candidates:\n if not isinstance(self.candidates, ABCRandomSubsetCandSQLiteDB):\n raise ValueError(\"Sequence specific candidate sets are only supported for random candidate subset \"\n \"candidate databases.\")\n\n # Copy the candidate class\n seq_candidates = deepcopy(self.candidates)\n\n # Replace the random seed to be sequence specific\n seq_rs = seq_candidates.__getattribute__(\"random_state\")\n assert isinstance(seq_rs, int)\n seq_candidates.__setattr__(\"random_state\", seq_rs + i)\n else:\n seq_candidates = self.candidates\n\n sequences.append(\n LabeledSequence(seq_spectra, candidates=seq_candidates, ms_scorer=self.ms_scorer, labels=seq_labels)\n )\n datasets.append(ds)\n\n return sequences, datasets", "title": "" }, { "docid": "7aafc024e3cccaf688536b60076047b2", "score": "0.5044466", "text": "def randomSequences (n, stringLength = 10):\n seqs = []\n for s in range(n):\n seqs.append(randomString(stringLength))\n return seqs", "title": "" }, { "docid": "fb33e2cda28d345b25f71cee11c697bd", "score": "0.50439733", "text": "def sample_seq(s, mu=8.9, sigma=0.5):\n direction = random.choice((-1, 1)) # NOTE: this is not strand, but for generating terminal-ending reads\n start = random.randint(0, len(s) - 1)\n length = int(random.lognormvariate(mu, sigma))\n return s[start:min(start + length, len(s))] if direction == 1 else s[max(0, start - length + 1):start + 1]", "title": "" }, { "docid": "62b3dcfd6e48325f3c1b41746efae7a6", "score": "0.5041706", "text": "def generate_random_tour(self):\n print('Generating initial random tour')\n tour = self.data_list.copy()\n\n random.shuffle(tour)\n\n # append the starting point to the end of the list to complete the loop\n tour.append(tour[0])\n\n return tour", "title": "" }, { "docid": "0173852db24bfdbfc9db58f9e0a8f14d", "score": "0.50369203", "text": "def random_text(n=100):\n # choose a random prefix (not weighted by frequency)\n start = random.choice(suffix_map.keys())\n\n for i in range(n):\n suffixes = suffix_map.get(start, None)\n if suffixes == None:\n # if the start isn't in map, we got to the end of the\n # original text, so we have to start again.\n random_text(n-i)\n return\n\n # choose a random suffix\n word = random.choice(suffixes)\n print word,\n start = shift(start, word)", "title": "" }, { "docid": "244771ba54f44b20c9f817a6f6857a33", "score": "0.5028329", "text": "def random_tetromino():\n\treturn copy.copy(Tetromino.ALL[random.randint(0, len(Tetromino.ALL)-1)])", "title": "" }, { "docid": "8ca675b0d46f24903e7e22681c8bf776", "score": "0.5027583", "text": "def getNextTrainExample(self):\n\n\t\tdata = self.trainSet[self.counterTrain,1:]\n\t\tlabel = self.trainSet[self.counterTrain,0]\n\t\tlabelVector = (label == self.labels).astype(int)\n\n\t\t# Iterate the counter\n\t\tself.counterTrain = (self.counterTrain + 1)%self.nTrain\n\n\t\treturn [{'data': data,\n\t\t\t\t 'label': labelVector}]", "title": "" } ]
44188704f713f90f0cc6cef7bc8e768d
SetMaskValue(itkMaskedImageToHistogramFilterICF3ISS3 self, itkSimpleDataObjectDecoratorSS _arg) SetMaskValue(itkMaskedImageToHistogramFilterICF3ISS3 self, short const & _arg)
[ { "docid": "613ab156bf6cd5556dffcf7d31191dfd", "score": "0.8657261", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICF3ISS3_SetMaskValue(self, *args)", "title": "" } ]
[ { "docid": "30a00a57275b8763d29c667bf2f392c2", "score": "0.8788932", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICF3ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "afccb617bcb8b56e3b5de9de9c2fdcad", "score": "0.8766513", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVIUS3ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "868199305fe11433492909868bbd69df", "score": "0.8764808", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVISS3ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "067602420373ee75d9d2bbdbce5824ca", "score": "0.8756903", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF23ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "41c36a47f3650f9bced2e93a30aff459", "score": "0.87558955", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterISS3ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "2c40a4adcd62a2ce04dc1e72bd21cef9", "score": "0.8744762", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIF3ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "932670b4a082cfcc7c8933467f19b4c8", "score": "0.87441236", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIRGBUC3ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "b12fc953e7208e36e0e37e82053d4854", "score": "0.87386036", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF33ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "7001cb7de863f22b093f399303e8bf42", "score": "0.87377465", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorF') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVISS3IF3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "9a9d7ee3efa19cc462ddac6276e63008", "score": "0.87366796", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF23ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "b54a8d5a375b159ad000d342dd5d14c1", "score": "0.8724266", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF43ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "31b7ae1a4af335af1afccdf7dbf84dbf", "score": "0.8721981", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF33ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "2611e2cd57ca7e06a1176b91a228141b", "score": "0.8719281", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF43ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "2792bdfb5572aaf8cf7d734d463c463d", "score": "0.8716034", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVISS3ISS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "3217027b85c0b11ccdff05f8c3c579df", "score": "0.87115157", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICF3IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "2c886050210ace6e8146fb3ed9aff601", "score": "0.8701434", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVISS3IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "b7ce6cbdb386ff1a447879d683e1ff41", "score": "0.86901724", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF23ISS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "045d05de87e4cc4722513470173447f0", "score": "0.86875695", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVIUC3ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "af0c4b5ad189814cc857a4d92b767ac8", "score": "0.86853147", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIUS3ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "941a7694a182ce0c2bc074d869ba1c43", "score": "0.86811024", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVISS3IF3_SetMaskValue(self, *args)", "title": "" }, { "docid": "557aac7ee4a557e21cdc461d175455ba", "score": "0.86808956", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF23IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "aca8a883978b8e5028a9724b7da94020", "score": "0.86808777", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF33ISS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "6c5109519a00c24674f3d15a46e61ed3", "score": "0.86761224", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterISS3IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "c3cc91d3e46a011267f934ecc524d3e8", "score": "0.8672957", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorF') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVIUS3IF3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "5a875fee3a95d181e5479c80e7d45d3a", "score": "0.86717546", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF43ISS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "c42460cb1b318eeeba7db049f57860fd", "score": "0.866745", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVIUS3IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "41bf20640809a2f29695ab51507bf585", "score": "0.8666833", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF43IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "71ebe681f47644097b737cb5713008b1", "score": "0.86653876", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIUC3ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "a5643028b8f18d3d26acbe3717116362", "score": "0.8664861", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIF3IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "eb1c09d5008c9fbd7b7bd95f88e4d316", "score": "0.8663704", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIRGBUC3IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "740c62fb89cbed12a1d3641a930d6c8a", "score": "0.86627316", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF23IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "e5429d86f0f104ce02355fe3bacaab20", "score": "0.8661066", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF23ISS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "514da32ebf5a59c735f21457558eb18b", "score": "0.8660029", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF33ISS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "60764ef7613fe081339b986b117e3205", "score": "0.8658923", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF43ISS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "eea43ad7920ed4447bcf0a7fa32edb36", "score": "0.86568457", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIRGBAUC3ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "ee6a77e9694c5a83a21ea10177feb8fa", "score": "0.8648137", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF33IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "11571e70605b7576fe66bd496627f3bb", "score": "0.86475974", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIRGBUC3ISS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "a6d67ac1fa4ebc83b9a8e968ea959ee3", "score": "0.8647407", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF43IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "3aa486445564fec3246265cdf5e8bbbc", "score": "0.8646873", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVIUS3ISS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "2f31a583dc15b0c50745aa832082d82b", "score": "0.86419785", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUC') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVISS3IUC3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "c2d824fb0c47016e352c28fc0ed5bd3d", "score": "0.8635762", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIF3ISS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "d7fe6839844b5d1a2fd1b00c193e54b3", "score": "0.86341566", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVIF3ISS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "512906abe67644154719a9f1a07a0165", "score": "0.862531", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterISS3ISS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "ef1ff4757bf66bee4f8f0cbcc4447638", "score": "0.8621516", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF33IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "3aefd79a9a403d2f2c248df88ec5bbde", "score": "0.86130476", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUC') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICF3IUC3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "70f84ed6fa838cd4721269fb7b238bb2", "score": "0.8593202", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF43IUS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "553e1606ae5eb37300a0e7b91c2cc9f9", "score": "0.8592036", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVISS3IUS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "8b9064ca077dd0dce29ae29e5f7e1060", "score": "0.8590871", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIUS3IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "c054ffc70f06868fd4aa7c1b881aa5cd", "score": "0.85892576", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUC') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF23IUC3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "bef856d3f2bc1e19afde704aefa506a9", "score": "0.85856134", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUC') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF23IUC3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "f605937b17eab6bbf6915d48b8ba5d02", "score": "0.8583937", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF23IUS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "c9081b4781f34f7fe96dfd7c2c9f68dd", "score": "0.85825586", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF42ISS2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "2867b21c40072944166252e0b2be43ba", "score": "0.8580751", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorF') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVIUC3IF3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "00ef08a39e45549f3c579ffa3715e38b", "score": "0.85785156", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICF3IUS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "0931e4b71cd052b55d877339f609b31d", "score": "0.8577086", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUC') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF43IUC3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "fff4ca828fe43dc1c3a5c5bce0a7ed53", "score": "0.8576928", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF22ISS2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "f1be5a92a30e83222fad42fd0b8a22cb", "score": "0.857682", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUC') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIRGBUC3IUC3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "fe44019ff55909425780622c21409c64", "score": "0.85764533", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUC') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterISS3IUC3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "019a4514c0131c35f4ad50e173caa0aa", "score": "0.85746706", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUC') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIF3IUC3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "90e7240135f46ba85559b9211b809609", "score": "0.8574234", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF42ISS2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "1dee4de09e1b9a6c40bc81ed251ba546", "score": "0.8574154", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUC') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF33IUC3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "a28c99df6c5f097fb26153e6008c7a01", "score": "0.8570837", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF32ISS2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "c08bb01bb8b1743550ccb71f056ac915", "score": "0.85695386", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF22ISS2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "ceb8be012ac03911c111bf3449f35ddb", "score": "0.8568625", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVIF3IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "bb76464d76c8464b08f0028bf8331f97", "score": "0.8563855", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVIUS2ISS2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "fe43cf4181398efd99cb24eca7cab14d", "score": "0.8562273", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIF3IUS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "a86fe673ecda741358b15ec6a31f67a3", "score": "0.8562239", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterISS3IUS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "e045d91c8562dc4062377de159bfa87c", "score": "0.855996", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUC') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIUC3IUC3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "8f8075c7e97b7b10d87bb8af7f867d58", "score": "0.85567856", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUC') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF33IUC3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "5960230a35f44fe89f75b3cc20f3d2d8", "score": "0.8556585", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIRGBAUC3IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "1f2a16280d9e26136c544ff1486654a8", "score": "0.8550808", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF32ISS2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "d3c965f7a64f96c1379ba01c350144b3", "score": "0.8548735", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF43IUC3_SetMaskValue(self, *args)", "title": "" }, { "docid": "3f00bde4edc644368b7ba677f886f990", "score": "0.8547878", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUC') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF43IUC3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "d121e5514454f36c1740febf6c63e379", "score": "0.85458666", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF43IUS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "b8467391b5fde2eae9b96680b40c37e7", "score": "0.8545734", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVIUC3IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "bc8737336c15a36a7478e83a2df47384", "score": "0.854407", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVIUS3IF3_SetMaskValue(self, *args)", "title": "" }, { "docid": "84ec66e225ed4b2b03c6626f9afa60bc", "score": "0.854141", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF33IUS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "a21ceb2a6e80561cf925995bba47eefd", "score": "0.8539171", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF23IUS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "891232cf998a1b3c2ca6a88be10aa3a8", "score": "0.8535268", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUC') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVIUS3IUC3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "54ef99093b9a0fb96678d64e711368fe", "score": "0.85306543", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVISS3IUC3_SetMaskValue(self, *args)", "title": "" }, { "docid": "20cc3fb6d0685f9f62029af5dfd3fa7e", "score": "0.85281384", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVIUS3IUS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "606f8de0c55fa244c6db106272d65720", "score": "0.85274154", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF22IUS2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "f6f05490279274009f5b3911682d6080", "score": "0.8527075", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF23IUC3_SetMaskValue(self, *args)", "title": "" }, { "docid": "7b3f8cc194692063d6f21f52dc4bcadf", "score": "0.85260475", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIRGBUC2ISS2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "19a4028d79cff192e97fd8da5f967530", "score": "0.8524239", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICF2ISS2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "9c0a58c59335fafda0942b2770b8240c", "score": "0.8519773", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorSS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVISS2ISS2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "d368bc85db80e0667071fe18529984e7", "score": "0.8518238", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorF') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVISS2IF2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "d57efdc93b4362f1cec7b3f3535907d2", "score": "0.85175157", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF42IUS2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "7c03aafb48e84b4b984e052bc989e303", "score": "0.85120666", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUC') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVIUC3IUC3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "19638556ac7a94f9cb5a964190ea0399", "score": "0.85108864", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF42IUS2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "a7037923205992f243e9ce254643d182", "score": "0.8510629", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIUS3ISS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "648adb55f941f2510968406bf42abcee", "score": "0.85089153", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorF') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterVIUS2IF2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "9435eb30bcf109c0ed6e0fb472ff7fce", "score": "0.85065687", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF32IUS2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "c39a1a004f663ac0f70662b4a612d921", "score": "0.8502256", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF23IUC3_SetMaskValue(self, *args)", "title": "" }, { "docid": "f72e3dda877fecde473c0aa821ad4285", "score": "0.85001796", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIRGBUC3IUS3_SetMaskValue(self, *args)", "title": "" }, { "docid": "4605b0de2eb681fa04f1d569895943f1", "score": "0.84998137", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIUC3IUS3_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "5ab67a9c4bd602d42eb0ef5f5fdc1b1a", "score": "0.84977686", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUS') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF22IUS2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "0bc39de77e644602b4740acec5a18308", "score": "0.8491145", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF43IUC3_SetMaskValue(self, *args)", "title": "" }, { "docid": "6e46fffe15358494a8ca02e5e3c0e84c", "score": "0.84849036", "text": "def SetMaskValueInput(self, _arg: 'itkSimpleDataObjectDecoratorUC') -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterICVF22IUC2_SetMaskValueInput(self, _arg)", "title": "" }, { "docid": "62478cf195ffada002bf5011e8f5240b", "score": "0.84841436", "text": "def SetMaskValue(self, *args) -> \"void\":\n return _itkMaskedImageToHistogramFilterPython.itkMaskedImageToHistogramFilterIVF33IUS3_SetMaskValue(self, *args)", "title": "" } ]
166a4a04e4927ffada15efd6da0a1719
Returns a tuple containing various accuracy metrics.
[ { "docid": "2b887430f4857c841603d0818710045c", "score": "0.6452261", "text": "def get_accuracy(model: Model,\n result: Result,\n example: Example) -> Tuple[float, float, float, float, float]:\n del model # Unused.\n\n # TODO(kshi): Refactor to reuse the simpler code in f_beta_loss.\n\n predictions = tf.cast(tf.greater(tf.sigmoid(result.operation_logits), 0.5),\n tf.int32)\n operations = tf.clip_by_value(tf.cast(example.operations, tf.int32), 0, 1)\n\n correct = tf.equal(predictions, operations)\n num_correct = tf.reduce_sum(tf.cast(correct, tf.int32))\n accuracy = num_correct / tf.size(correct)\n\n # Precision.\n # Among ops that the model predicted were used, how often was it correct?\n predicted_positive_indices = tf.where(tf.equal(predictions, 1))\n predicted_positive_truth = tf.gather_nd(operations,\n predicted_positive_indices)\n num_correct = tf.reduce_sum(predicted_positive_truth)\n precision = num_correct / tf.size(predicted_positive_truth)\n\n # Recall, i.e., true positive rate.\n # Among ops that were actually used, how often was the model correct?\n actually_positive_indices = tf.where(tf.equal(operations, 1))\n actually_positive_predictions = tf.gather_nd(predictions,\n actually_positive_indices)\n num_correct = tf.reduce_sum(actually_positive_predictions)\n recall = num_correct / tf.size(actually_positive_predictions)\n\n # True negative rate.\n # Among ops that were actually not used, how often was the model correct?\n actually_negative_indices = tf.where(tf.equal(operations, 0))\n actually_negative_predictions = tf.gather_nd(predictions,\n actually_negative_indices)\n num_correct = tf.reduce_sum(1 - actually_negative_predictions)\n true_negative_rate = num_correct / tf.size(actually_negative_predictions)\n\n # F1 score.\n f1_score = 2 * precision * recall / (precision + recall)\n\n return accuracy, precision, recall, true_negative_rate, f1_score", "title": "" } ]
[ { "docid": "5e1ecb6edb312cf47bb0601254ba1805", "score": "0.7734463", "text": "def accuracy(self):\n\n\t\treturn (self.truePositives + self.trueNegatives) / self.items", "title": "" }, { "docid": "e9c2bf54ead0f330e10b9669a8749124", "score": "0.77266836", "text": "def _get_accuracy(self):\n return self.__accuracy", "title": "" }, { "docid": "d5dc91b08407da6607d60e64f9c64cbf", "score": "0.75807375", "text": "def get_accuracy(self):\n return self.accuracy", "title": "" }, { "docid": "8c4ddb03cd52bc42e9520ad9d88c5a8c", "score": "0.7542849", "text": "def accuracy(self, y_pred: PO, y: PO) -> Tuple[int, int]:\n raise Exception(\"Not implemented\")", "title": "" }, { "docid": "9a852719a21f454e40e048ec73ab6bf0", "score": "0.7484951", "text": "def get_accuracy(self):\n _verif(self._accuracy, \"accuracy\")\n return self._accuracy", "title": "" }, { "docid": "b73a4cc8e6fea5fbd29849b157b6633d", "score": "0.7396155", "text": "def calc_accuracy(self):\n self.__build_data_dicts()\n print(\"Data dictionaries built...\")\n classifier = ml_algs.Classifier(self.training_tweets)\n\n # TESTING DATA: loop through each user and their tweets\n total = naive_correct = log_correct = 0\n for user, tweets in self.testing_tweets.items():\n for tweet in tweets:\n # for each tweet, check if the predicted user is the same as the actual user\n most_sim_naive = classifier.predict_user(\"naive\", tweet)\n most_sim_log = classifier.predict_user(\"log\", tweet)\n if most_sim_log == user:\n log_correct += 1\n if most_sim_naive == user:\n naive_correct += 1\n total += 1\n print(\"Finished predicting \" + \"@\" + user)\n return naive_correct / total, log_correct / total", "title": "" }, { "docid": "d1d2616e9a06e9769ad2e5a857d0d8bd", "score": "0.73574716", "text": "def computed_accuracy(self):\n return self._computed_accuracy", "title": "" }, { "docid": "a98d52aba057ae619fdbdfb6c17734e8", "score": "0.734711", "text": "def accuracy_score(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "07ac136898830477cb9285e5ea82c6c3", "score": "0.73407435", "text": "def getMetrics(self):\n acc = self.accuracy_metrics.result()\n return [acc]", "title": "" }, { "docid": "bb71871f4413cf6d8af8411b9cb8ffb5", "score": "0.72258323", "text": "def getAccuracy(self):\n return float(self.accuracy)", "title": "" }, { "docid": "a98ab8c679873adaf5c33d06f68460ec", "score": "0.71761113", "text": "def accuracy(reference, test):\n ...", "title": "" }, { "docid": "0d65df80045a80c574ab112c428347b2", "score": "0.70866895", "text": "def get_training_accuracy(self):\n \n total_outs = self.queery(self.training_input_set)\n accuracy_matrix = total_outs\n self.row_accuracy = []\n self.col_accuracy = []\n\n # build accuracy matrix\n for i,out_set in enumerate(total_outs):\n for j,out in enumerate(out_set):\n if out == self.target_set[i][j]:\n accuracy_matrix[i][j] = 1\n else:\n accuracy_matrix[i][j] = 0\n\n # get row-wise accuracy \n for i,row in enumerate(accuracy_matrix):\n self.row_accuracy.append(float(sum(row))/len(row))\n\n # transpose the matrix to get columnwise accuracy\n accuracy_matrix = zip(*accuracy_matrix)\n for i,col in enumerate(accuracy_matrix):\n self.col_accuracy.append(float(sum(col))/len(col))\n\n # get total accuracy and cortex learning age\n self.accuracy = sum(self.col_accuracy)/len(self.col_accuracy)\n self.learn_age= self.neurons[0].r_age\n \n return self.accuracy", "title": "" }, { "docid": "503d974bec99eff4809b2ec8aaaea492", "score": "0.70798814", "text": "def number_found_accuracy(self):\n return self._number_found_accuracy", "title": "" }, { "docid": "6bac0ca9de2d061ddeed7518f2b27376", "score": "0.7067593", "text": "def accuracy(self) -> int:\n\n return self._accuracy", "title": "" }, { "docid": "5a21b300f6a481dac143a6405c4caf47", "score": "0.7063283", "text": "def acc_score(self):\n if 0 == self.total_labels:\n return 0.0\n accuracy = float(self.correct_labels) / self.total_labels\n return accuracy", "title": "" }, { "docid": "f237d766781119951cf8b4b8dc31e8c8", "score": "0.7061018", "text": "def accuracy(self):\n num_correct = self.prediction_matrix.diag().sum()\n num_total = self.recorded.sum()\n\n return num_correct.float() / num_total.float()", "title": "" }, { "docid": "4e9301ec4c5bc6b8707bbed354a4b2ba", "score": "0.70434004", "text": "def label_accuracy_score(self):\n hist = self.hist\n\n acc = np.diag(hist).sum() / hist.sum()\n with np.errstate(divide='ignore', invalid='ignore'):\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n mean_acc_cls = np.nanmean(acc_cls)\n\n with np.errstate(divide='ignore', invalid='ignore'):\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n mean_iu = np.nanmean(iu)\n self.mIoU = mean_iu\n\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n return acc, acc_cls, mean_acc_cls, mean_iu, fwavacc, iu", "title": "" }, { "docid": "c970990905425ce6e83cb4f95c762f66", "score": "0.69957066", "text": "def get_accuracy(predictions, labels):\n\n return float(sum(predictions == labels).data[0]) / labels.size()[0]", "title": "" }, { "docid": "a394705497a3161db9f90da55919f0a0", "score": "0.6989651", "text": "def get(self):\n return self.losses, self.accuracy", "title": "" }, { "docid": "bc1bddff86b4eb79db3094afc6278591", "score": "0.6974721", "text": "def measure_accuracy(predictions, test):\r\n match = [1 if predictions[y] == test.iloc[y] else 0 for y in range(len(test))]\r\n accuracy = sum(match)/len(match)\r\n misclassification_rate = 1-accuracy\r\n\r\n return accuracy, misclassification_rate", "title": "" }, { "docid": "f01d331a9a79b12af9403301062a6cc2", "score": "0.69661015", "text": "def _calculate_accuracy(infer_image, mask_image):\n mask_image = (mask_image / 255.0).astype(int)\n mask_image = (np.arange(2) == mask_image[..., None]).astype(int)\n\n inter = np.dot(infer_image.flatten(), mask_image.flatten())\n union = np.dot(infer_image.flatten(), infer_image.flatten()) + \\\n np.dot(mask_image.flatten(), mask_image.flatten())\n\n single_dice = 2 * float(inter) / float(union + 1e-6)\n single_iou = single_dice / (2 - single_dice)\n return single_dice, single_iou", "title": "" }, { "docid": "257980cabbc6f0d3a61946a555ff5fe7", "score": "0.69342077", "text": "def accuracy(self, data, convert=False):\n if convert:\n results = [(np.argmax(self.feedforward(x)), np.argmax(y))\n for (x, y) in data]\n else:\n results = [(np.argmax(self.feedforward(x)), y)\n for (x, y) in data]\n\n result_accuracy = sum(int(x == y) for (x, y) in results)\n return result_accuracy", "title": "" }, { "docid": "83eb5d040e9e197a03bc1cecd8f7ed81", "score": "0.6926932", "text": "def accuracy_helper(self, data):\n accuracy = {\"g\": 0, \"ug\": 0}\n\n for item in data:\n if item.stimulus < 13: # grammatical items\n if self.grammatical(item.response):\n accuracy[\"g\"] += 1\n else: # ungrammatical items\n if not self.grammatical(item.response):\n accuracy[\"ug\"] += 1\n\n accuracy[\"overall\"] = accuracy[\"g\"] + accuracy[\"ug\"]\n accuracy[\"percent\"] = float(accuracy[\"overall\"])/len(data)\n accuracy[\"g_percent\"] = float(accuracy[\"g\"]) * 2/len(data)\n accuracy[\"ug_percent\"] = float(accuracy[\"ug\"]) * 2 / len(data)\n print accuracy[\"g_percent\"]\n\n return accuracy", "title": "" }, { "docid": "e1f2412ea3ead7a00a859054c04a27e5", "score": "0.6888889", "text": "def get_accuracy(\n predictions: torch.FloatTensor, labels: torch.LongTensor\n) -> torch.FloatTensor:\n return torch.mean(predictions.eq(labels).float())", "title": "" }, { "docid": "aef95c06e2431b18e88ff8e62c006ee8", "score": "0.6885254", "text": "def accuracy(self, test_data_letter, test_data_color):\n letter_accuracy = self.accuracy_helper(test_data_letter)\n color_accuracy = self.accuracy_helper(test_data_color)\n overall_accuracy = self.accuracy_helper(test_data_letter + test_data_color)\n\n return letter_accuracy, color_accuracy, overall_accuracy", "title": "" }, { "docid": "41ea7a9c6dedd5836a7ec6d57305f28b", "score": "0.68852526", "text": "def accuracy(output, labels):\r\n preds = output.max(1)[1].type_as(labels)\r\n correct = preds.eq(labels).double()\r\n correct = correct.sum()\r\n return correct / len(labels)", "title": "" }, { "docid": "09195711087679b924da5a6c067a9e49", "score": "0.68712676", "text": "def accuracy(self, logits, labels):\n\t\traise NotImplementedError\n\t\treturn accuracy", "title": "" }, { "docid": "58549a5fdbeeea74d8a8a9a96873dcf1", "score": "0.68658495", "text": "def get_test_accuracy(self, nImages=-1):\n return self.env.get_test_accuracy(nImages)", "title": "" }, { "docid": "fbf93677637d2197f123ab9affc652e4", "score": "0.68634707", "text": "def accuracy_score(data):\n return 100 * sum([1 if p == t else 0 for p, t in data]) / len(data)", "title": "" }, { "docid": "3eec69e9cf3a4abf800967af37436c31", "score": "0.68372947", "text": "def calculate_accuracy_score(self) -> float:\n return accuracy_score(self.labels, self.y_pred)", "title": "" }, { "docid": "86a2d5e7820bbeab4132c185c1624001", "score": "0.68173736", "text": "def calculate_accuracy(cluster_assignments, true_classes):\n\n ca = best_map(true_classes, cluster_assignments)\n #print 'best map'\n #print ca\n return accuracy_score(ca, true_classes)", "title": "" }, { "docid": "e6212e24828b3aa9a68fdbf83dbfdf8b", "score": "0.681322", "text": "def accuracy(self, x, y):\n y_pred = self.predict(x)\n y_true = y.flatten()\n return (y_pred == y_true).mean()", "title": "" }, { "docid": "a531630070a66ca2ad99a52e99cb26fb", "score": "0.6804446", "text": "def accuracy_estimate(self):\n return AccuracyEstimate(self.__impl.accuracy_estimate())", "title": "" }, { "docid": "ff2a0c9a835f45ae0ec687f9f46f364d", "score": "0.68037313", "text": "def calc_classification_accuracy(examples):\n total_correct = sum(1 for example in examples.values()\n if example.gold_class_index == example.pred_class_index)\n return total_correct / len(examples)", "title": "" }, { "docid": "c063ecd97f828626725f1b5792bbaa08", "score": "0.6803292", "text": "def get_accuracy(ds, a):\n n = 0.0\n acc = 0.0\n sil = 0.0\n for y, p in zip(ds, a):\n n += 1.0\n\n acc += 1.0 if (y == 0 and (p[0] > 0.5)) or (y == 1 and (p[0] <= 0.5)) else 0.0\n sil += 1.0 if y == 0 else 0.0\n\n # print '---'\n # print n\n # print y, p\n # print acc, sil\n\n return acc/n*100, sil/n*100", "title": "" }, { "docid": "0b112d7bdca94c90815106ec16419467", "score": "0.67976165", "text": "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n accuracy = np.mean(np.argmax(predictions, axis=1) == np.argmax(targets, axis=1))\n # raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "title": "" }, { "docid": "5d098801cb0623af83a6fc6364aac723", "score": "0.67959476", "text": "def compute_accuracy(total_cm):\n denominator = total_cm.sum().astype(float)\n cm_diag_sum = np.diagonal(total_cm).sum().astype(float)\n\n # If the number of valid entries is 0 (no classes) we return 0.\n accuracy = np.where(\n denominator > 0,\n cm_diag_sum / denominator,\n 0)\n accuracy = float(accuracy)\n print('Pixel Accuracy: {:.4f}'.format(float(accuracy)))", "title": "" }, { "docid": "059e0cd15a587f894cea7286b9bb393f", "score": "0.67920136", "text": "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n o = np.argmax(predictions, axis=1)\n t = np.argmax(targets, axis=1)\n compared = np.equal(o, t)\n correct = np.sum(compared)\n accuracy = correct / len(compared)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "title": "" }, { "docid": "c43dc038d929bf2385e5ed4b5c845eb2", "score": "0.67842835", "text": "def compute_accuracy(n_correct, n_total):\n\treturn n_correct/n_total", "title": "" }, { "docid": "5219ce3fabd7d1fe3b387d846a7d3dff", "score": "0.6759189", "text": "def calculate_accuracy(labels, results):\n total = len(labels)\n \n pos_correct = 0\n pos_total = 0\n \n neg_correct = 0\n neg_total = 0\n \n neut_correct = 0\n neut_total = 0\n\n #\n # 0 = negative, 1 = positive, 2 = neutral\n #\n\n # Count results\n for i in xrange(total):\n #print i\n this_label = labels[i]\n\n if this_label == 0:\n # it's a negative...\n if results[i] == this_label:\n neg_correct += 1\n neg_total += 1\n elif this_label == 1:\n # it's a positive\n if results[i] == this_label:\n pos_correct += 1\n pos_total += 1\n else:\n # it's a neutral\n if results[i] == this_label:\n neut_correct += 1\n neut_total += 1\n \n print pos_total, neg_total, neut_total\n\n print \"Accuracy:\", ((pos_correct + neg_correct + neut_correct) / float(total))*100\n print \"Pos accuracy:\", (pos_correct / float(pos_total))*100\n print \"Neg accuracy:\", (neg_correct / float(neg_total))*100\n print \"Neut accuracy:\", (neut_correct / float(neut_total))*100\n print \"\"", "title": "" }, { "docid": "b58f0b06cfcb0bee4a20eb244c6dc923", "score": "0.6753528", "text": "def accuracy(self,data):\n results = [(np.argmax(self.feed_forward(x)),np.argmax(y)) for(x, y) in data]\n return sum( int(x == y) for(x,y) in results)", "title": "" }, { "docid": "dd47545f1bea6d12d56beb42e742d1f8", "score": "0.67478657", "text": "def get_accuracy(img_list):\n correct = 0\n for image in img_list:\n if image.orientation == image.pred_orientation:\n correct += 1\n return (0.0 + correct) / len(img_list)", "title": "" }, { "docid": "6f3b39d7c6dba26fa40b967dd173de13", "score": "0.673991", "text": "def accuracy(predictions, labels):\n\treturn (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])", "title": "" }, { "docid": "dcc4ca4aa9ae06dd780cc71e0bbbe319", "score": "0.67337894", "text": "def compute_accuracy(predictions, labels):\n predicted_labels = torch.argmax(predictions, dim=1)\n n_correct = torch.sum(predicted_labels == labels).item()\n batch_size = torch.numel(labels)\n acc = float(n_correct) / float(batch_size)\n return acc * 100", "title": "" }, { "docid": "fa322c6d6684901617f42e510fbc0133", "score": "0.67304766", "text": "def accuracy(pred, label):\n pred = torch.argmax(pred, dim=1).long()\n acc = torch.mean((pred == label).float())\n pred = to_numpy(pred)\n label = to_numpy(label)\n p = precision_score(label, pred)\n r = recall_score(label, pred)\n return p, r, acc", "title": "" }, { "docid": "7ab3800f4403d386c873f367a9c74fda", "score": "0.67175525", "text": "def accuracy(label1,label2):\n\n return np.mean(label1 == label2)", "title": "" }, { "docid": "26e4cb6a09d393289ca3c4974155a25a", "score": "0.6700975", "text": "def accuracy(preds, targets):\r\n return (preds == targets).mean()", "title": "" }, { "docid": "a64602ba77507b4b32343d70336a4e4c", "score": "0.6698138", "text": "def get_accuracy(y_true, y_pred):\n scores = []\n for true, pred in zip(y_true, y_pred):\n scores.append(true == pred)\n avg_score = np.mean(scores)\n return avg_score", "title": "" }, { "docid": "afe7c15cbe14e393f5c49c33d372ea20", "score": "0.66981107", "text": "def accuracy(self):\n self.correct = zeros((N_BLOCKS, N_TEST_TRIALS_PER_BLOCK), int)\n for b in range(N_BLOCKS):\n for trial in range(N_TEST_TRIALS_PER_BLOCK):\n\n y = self.Y[b][trial][:2]\n response = self.Y[b][trial][2]\n label = self.classify( self.Y[b][trial][:2] )\n if response==label:\n self.correct[b,trial] = 1\n\n # average accuracy within each training block\n self.accuracy = map(mean, self.correct)", "title": "" }, { "docid": "57679b86597c0c9b4e0f4d54442c80ae", "score": "0.6690591", "text": "def test_accuracy(self, X_test, y_test):\n # make predictions for X_test\n yhat = self.predict(X_test)\n # calculate number of correct predictions\n correct_preds = 0\n for i in range(len(yhat)):\n # compare each prediction to actual classification value\n if yhat[i] == y_test[i]:\n correct_preds += 1\n # return accuracy\n return correct_preds/len(yhat)", "title": "" }, { "docid": "2e42c34bcc746fa8209563346b18ff73", "score": "0.66715795", "text": "def accuracy(self, y):\n return T.mean(T.eq(y, self.y_out))", "title": "" }, { "docid": "91605eaf550f99f7365098da7a58880f", "score": "0.6667447", "text": "def accuracy(self, outputs, labels):\n predicted = outputs.argmax(dim=1)\n correct = (predicted == labels).sum().item()\n return correct / labels.size(0)", "title": "" }, { "docid": "aa00c1ffefb685d3d396eef8da57ed1d", "score": "0.66671145", "text": "def get_accuracy(self, preds, y):\n\n _, rounded_preds = torch.max(torch.sigmoid(preds), 1)\n correct = (rounded_preds == y).float() # convert into float for division\n acc = correct.sum() / len(correct)\n return acc", "title": "" }, { "docid": "7e5dbfc3c034a5c53f59797d5f959556", "score": "0.6664019", "text": "def get_accuracy(truth, prediction):\n # Ensure that both lists have the same length\n assert len(truth) == len(prediction)\n correct = 0\n for i in range(len(truth)):\n\n # Check if elements are identical, increase correct count if they are\n if truth[i] == prediction[i]:\n correct += 1\n return correct/len(truth)", "title": "" }, { "docid": "1c81cadca19fe327a3ad13a5026321ff", "score": "0.66594195", "text": "def accuracy(self, x, y):\r\n predictions = self.predict(x)\r\n return np.mean(predictions == y)", "title": "" }, { "docid": "d3681c359df348d5d786835f69128fd2", "score": "0.6656654", "text": "def calculate_accuracy(predictions, targets):\n accurate_predictions = predictions.argmax(dim=1) == targets\n acc = float(accurate_predictions.cpu().float().mean(dim=0).numpy())\n return acc", "title": "" }, { "docid": "5bd2f21f3a3ba3f9aabae2f9b20a0fad", "score": "0.66513515", "text": "def report_accuracy(test_set, model, thresh = 0.5, output1 = 0.0, output2 = 1.0, label = 1):\n # Extracting X\n X = test_set[:,:-1]\n\n # Extracting labels\n Y = test_set[:,-1]\n\n # Predicted labels\n pred = model.predict(X)\n pred[pred <= thresh] = output1\n pred[pred > thresh] = output2\n\n accuracy = measure_accuracy(pred, Y)\n precision = measure_precision(pred, Y, label)\n recall = measure_recall(pred, Y, label)\n f_score = measure_f_score(precision, recall)\n\n return accuracy, precision, recall, f_score", "title": "" }, { "docid": "fb2259002b1d1929988ccaaf326a5801", "score": "0.664832", "text": "def compute_accuracy(predictions, data):\n total = len(data)\n correct = [predictions[i] == data[i].label\n for i in range(total)].count(True)\n return correct / total * 100", "title": "" }, { "docid": "4cee9fdf3c7cd63c8ad67060d4704e0c", "score": "0.6624935", "text": "def accuracy(predictions, labels):\n # argmax of prediction == which label it thinks\n # argmax of label = which label\n # equate, sum = number of accurate predictions\n num_correct = np.sum(np.argmax(predictions, axis=1) == np.argmax(labels, axis=1))\n return 100.0 * num_correct / predictions.shape[0]", "title": "" }, { "docid": "f2ce41fceeb8c99701f561343cb378a8", "score": "0.6612167", "text": "def evaluate_accuracy(model, X, y):\n\ty_pred = model.predict(X, verbose=0)\n\tacc = 100.0 * np.sum(np.argmax(y_pred, axis=1) == np.argmax(y, axis=1))/y.shape[0]\n\n\tpositive_examples = np.where(y[:, 1]==1)[0]\n\tpositive_acc = 100.0 * np.sum(np.argmax(y_pred[positive_examples], axis=1) == np.argmax(y[positive_examples], axis=1))/y[positive_examples].shape[0]\n\n\tnegative_examples = np.where(y[:, 0]==1)[0]\n\tnegative_acc = 100.0 * np.sum(np.argmax(y_pred[negative_examples], axis=1) == np.argmax(y[negative_examples], axis=1))/y[negative_examples].shape[0]\n\tprint(\"%s: %.2f%%\" % (\"Accuracy\", acc))\n\tprint(\"%s: %.2f%%\" % (\"Cat Accuracy\", positive_acc))\n\tprint(\"%s: %.2f%%\" % (\"Non Cat Accuracy\", negative_acc))", "title": "" }, { "docid": "565ebb572cc70c8d5a2f9cf044a10e4e", "score": "0.66119915", "text": "def accuracy(self, x_test, y_test):\n layer_output = self.classify(x_test)\n count = 0\n for i in range(len(layer_output)):\n if layer_output[i][0] < 0.5:\n layer_output[i][0] = 0\n else:\n layer_output[i][0] = 1\n if(layer_output[i][0] == y_test[i][0]):\n count += 1\n return count * 100.0 / len(layer_output)", "title": "" }, { "docid": "167b23f18e7bd0ccefb269a6a3831d55", "score": "0.66043854", "text": "def get_evaluation_metrics(model, x_test, y_test, show_summary=False):\n # Evaluation of the model\n scores = model.evaluate(x_test, y_test, verbose=0)\n if show_summary:\n print (model.summary())\n print (\"Accuracy: %.2f%%\" % (scores[1] * 100))", "title": "" }, { "docid": "445471ecfeeb358b8281596a9ce678ac", "score": "0.66040254", "text": "def test_accuracy2():\n import paddle.fluid as fluid\n batch1_size = 128\n accuracy_manager = fluid.metrics.Accuracy()\n # 假设第一个batch的准确率为0.9\n batch1_acc = 0.9\n accuracy_manager.update(value=batch1_acc, weight=batch1_size)\n print(\"expect accuracy: %.2f, get accuracy: %.2f\" %\n (batch1_acc, accuracy_manager.eval()))\n acc = batch1_acc\n tools.compare(acc, accuracy_manager.eval())\n # 假设第二个batch的准确率为0.8\n batch2_size = 64\n batch2_acc = 0.8\n accuracy_manager.update(value=batch2_acc, weight=batch2_size)\n print(\"expect accuracy: %.2f, get accuracy: %.2f\" %\n ((batch1_acc * batch1_size + batch2_acc * batch2_size) /\n (batch1_size + batch2_size), accuracy_manager.eval()))\n acc = (batch1_acc * batch1_size + batch2_acc * batch2_size) / (\n batch1_size + batch2_size)\n tools.compare(acc, accuracy_manager.eval())\n # 假设第三个batch的准确率为0.4\n batch3_size = 32\n batch3_acc = 0.4\n accuracy_manager.update(value=batch3_acc, weight=batch3_size)\n print(\"expect accuracy: %.2f, get accuracy: %.2f\" %\n ((batch1_acc * batch1_size + batch2_acc * batch2_size + batch3_acc *\n batch3_size) / (batch1_size + batch2_size + batch3_size),\n accuracy_manager.eval()))\n acc = (batch1_acc * batch1_size + batch2_acc * batch2_size + batch3_acc * batch3_size) / \\\n (batch1_size + batch2_size + batch3_size)\n tools.compare(acc, accuracy_manager.eval())", "title": "" }, { "docid": "f8c0ef777f1b7167ed6e1c9b9729d96d", "score": "0.6602881", "text": "def print_accuracy(self):\n print(\"Starting accuracy test...\")\n results = self.calc_accuracy()\n print(\"Naive Bayes:\", str(round(results[0] * 100, 4)) + \"%\")\n print(\"Logistic Regression:\", str(round(results[1] * 100, 4)) + \"%\")", "title": "" }, { "docid": "3791b272292162c01d6446739f64c248", "score": "0.65818816", "text": "def get_metrics(actual_classes, pred_classes):\r\n \r\n conf_mat = metrics.confusion_matrix(actual_classes, pred_classes)\r\n acc = metrics.balanced_accuracy_score(actual_classes, pred_classes)\r\n \r\n \"\"\"\r\n the next portion of code is copied from:\r\n https://towardsdatascience.com/multi-class-classification-extracting-performance-metrics-from-the-confusion-matrix-b379b427a872\r\n \"\"\"\r\n FP = conf_mat.sum(axis=0) - np.diag(conf_mat) \r\n FN = conf_mat.sum(axis=1) - np.diag(conf_mat)\r\n TP = np.diag(conf_mat)\r\n TN = conf_mat.sum() - (FP + FN + TP) \r\n \r\n FP = np.sum(FP)\r\n FN = np.sum(FN)\r\n TP = np.sum(TP)\r\n TN = np.sum(TN)\r\n \"\"\"\r\n end of copied code\r\n \"\"\"\r\n MK = (TP/(TP+FP)) + (TN/(TN+FN)) - 1\r\n return conf_mat, acc, MK", "title": "" }, { "docid": "97e8d7f2027f65f8f446ec0c2dcdbcbe", "score": "0.6580785", "text": "def accuracy(setting, model, outputs, labels, data=None):\n outputs = np.argmax(outputs, axis=1)\n return np.sum(outputs==labels)/float(labels.size)", "title": "" }, { "docid": "e33649ee5bd15de9a40870db638fe612", "score": "0.6576593", "text": "def calcul_metric_classification(y_true, y_pred, average=\"weighted\", print_score=True):\n acc = np.round(accuracy_score(y_true, y_pred), 4)\n f1 = np.round(f1_score(y_true, y_pred, average=average), 4)\n recall = np.round(recall_score(y_true, y_pred, average=average), 4)\n precision = np.round(precision_score(y_true, y_pred, average=average), 4)\n\n if print_score:\n logger.info('\\nScores :')\n logger.info('precision {} = {}'.format(average, precision))\n logger.info('recall {} = {}'.format(average, recall))\n logger.info('f1 score {} = {}'.format(average, f1))\n\n return acc, f1, recall, precision", "title": "" }, { "docid": "d68af29e5e067c4cf1c1ef438cb34078", "score": "0.6575327", "text": "def accuracy(preds: torch.Tensor, targets: torch.Tensor, cls_dim: int = 1,\n ) -> Tuple[float, float, float, float]:\n # Calc true positives, true negatives, and tn+tp+fn+fp for batch\n # from one-hot or binary encoding to class indices:\n class_pred = torch.argmax(preds, dim=cls_dim) if preds.size()[cls_dim] > 1 else \\\n (torch.sigmoid(preds) > 0.5).squeeze(cls_dim).float()\n\n class_gt = torch.argmax(targets, dim=cls_dim).float()\n # accuracy calculation\n batch_tp = float(torch.sum(class_pred * class_gt))\n batch_tn = float(torch.sum((1 - class_pred) * (1 - class_gt)))\n batch_all = float(class_gt.size()[0])\n batch_acc = (batch_tp + batch_tn) / batch_all\n return batch_acc, batch_tp, batch_tn, batch_all", "title": "" }, { "docid": "42c4a3b8172000d7c95e00f99b4c562a", "score": "0.65592915", "text": "def compute_metrics(preds, targets, eval=False):\n if eval is True:\n return classification_report(preds, targets, zero_division=1)\n\n report = classification_report(preds, targets, output_dict=True, zero_division=1)\n return report['accuracy'], report['macro avg']['f1-score']", "title": "" }, { "docid": "5b62ddcc44767bd29bf5c67d51de2a9e", "score": "0.6556655", "text": "def accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))\n / predictions.shape[0])", "title": "" }, { "docid": "21936be10f9e5b9f717c35038d482591", "score": "0.6554846", "text": "def accuracy_score(self):\n raise TypeError(\"Accuracy score is not valid for regression\")", "title": "" }, { "docid": "6e16b641aa905b3de4e8f2f079f0c427", "score": "0.6554267", "text": "def accuracy(confusion_matrix):\n\n total = 0\n correct = 0\n for ii in confusion_matrix:\n total += sum(confusion_matrix[ii].values())\n correct += confusion_matrix[ii].get(ii, 0)\n\n if total:\n return float(correct) / float(total)\n else:\n return 0.0", "title": "" }, { "docid": "d6af82edc1c5e172da9e22ecec55771c", "score": "0.65506816", "text": "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n # use predictions and targets to calculate accuracy\n pred = np.argmax(predictions, axis=1)\n\n lab = np.argmax(targets, axis=1)\n\n # check how many predictions are equal to the labels\n count = 0\n for i in range(len(pred)):\n if pred[i] == lab[i]:\n count += 1\n\n accuracy = count / len(pred)\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "title": "" }, { "docid": "27e002028ffd9c4b4d82221757dcdcd5", "score": "0.65506446", "text": "def accuracy(activations, fixations, gpu):\n\n ##Accuracy for one image\n\n #drop unnecessary first dimension of activations (there is only one channel)\n activations = activations.reshape(activations.size()[-2], activations.size()[-1])\n\n #how many fixations are there?\n num_fix = 0\n for i,j in fixations:\n if (i,j) == (-1000,-1000):\n break\n num_fix += 1\n #flatten\n activations_f = activations.view(-1)\n\n #find x largest values and their indices in flattened activation-tensor\n lar_val, lar_val_idx = torch.topk(activations_f, num_fix)\n\n idx_unfl = []\n for idx_fl in lar_val_idx:\n idx_unfl.append(map_idx(activations, idx_fl.item(), gpu))\n\n #see if they match with fixations indices\n hits = 0\n #does each fixation lead to one of the x biggest activation values?\n for fix in range(num_fix):\n for idx in idx_unfl:\n current = torch.all(torch.eq(idx,fixations[fix]))\n hits += current.item()\n\n #calcualte proportion of hits\n acc = hits / num_fix\n \n return acc, hits, num_fix", "title": "" }, { "docid": "4c1f9bb3a333b8b9e5236d8594353943", "score": "0.6545999", "text": "def get_validation_accuracy(self, nImages=-1):\n return self.env.get_validation_accuracy(nImages)", "title": "" }, { "docid": "516c187db085cb659fed27254f0276b1", "score": "0.6539113", "text": "def accuracy_score(contingency):\n \n hits = contingency.where(contingency.reference_category == contingency.comparison_category) \\\n .sum(dim=('reference_category','comparison_category'), skipna=True)\n N = _sum_contingency(contingency, 'total')\n \n return (hits / N).rename('accuracy_score')", "title": "" }, { "docid": "6f4fad119067a1f588d635bd8c87053f", "score": "0.65383565", "text": "def get_accuracy(test_loader, net):\n total = 0\n correct = 0\n for ingredients, type in test_loader:\n outputs = net(ingredients)\n prediction = torch.argmax(outputs.data, 1)\n total += type.shape[0]\n correct += torch.sum(prediction == type)\n return float(correct) / float(total)", "title": "" }, { "docid": "276af3676b487312e92fe2864c49c254", "score": "0.65355015", "text": "def accuracy(t, prediction):\n return accuracy_score(t, prediction, normalize=True)", "title": "" }, { "docid": "76970d9d5850e2e2ab733c028cf2901b", "score": "0.653527", "text": "def accuracy(result,retrieval_labels,retrieval_number, feature_extraction_method, probability_vector):\n if(feature_extraction_method == 'cnn_training' or feature_extraction_method == 'fine_tuning_inception'):\n text = []\n cont = 0\n for probability_query in probability_vector:\n class_ = np.argmax(probability_query)\n\n format_str = ('%.2f class %d\\n (GT class: %.2f)')\n text.append(format_str % (probability_query[class_], class_,int(retrieval_labels[cont])))\n cont+=1\n return text\n else:\n percent = []\n for i in result:\n un = np.unique(i)\n r = []\n for j in un:\n r.append(sum(i==j)/len(i)*100)\n percent.append((max(r),un[np.argmax(r)]))\n text = []\n for i in range(len(result)):\n text.append(str(percent[i][0])+' class '+ str(percent[i][1])+'\\n (GT class: '+str(int(retrieval_labels[i]))+')')\n return text", "title": "" }, { "docid": "6e96a910a3cf1553b529491dd3744177", "score": "0.65332246", "text": "def accuracy_func(self, logits, labels):\n num_correct_predictions = tf.equal(tf.argmax(logits, 1), labels)\n\n return tf.reduce_mean(tf.cast(num_correct_predictions, tf.float32))", "title": "" }, { "docid": "f480eaa3622ade5f0b46de325b7c963c", "score": "0.6525663", "text": "def accuracy(self, ypred, yexact) -> float:\r\n p = np.array(ypred == yexact, dtype=int)\r\n return np.sum(p) / float(len(yexact))", "title": "" }, { "docid": "522b71c011b4787bd3e178e0b34d2388", "score": "0.6522072", "text": "def calculate_accuracy(self, output, target):\n n, _ = output.shape\n output = 1 * (output > self.threshold) ## change the true false label in to 0,1\n num_correct = np.sum(output==target,axis=0)\n num_dise_pred = np.sum(output*target,0)\n num_dise_tar = np.sum(target,0)\n num_heth_pred = np.sum((1-output)*(1-target),axis=0)\n num_heth_tar = np.sum((1-target), axis=0)\n num_sample = n\n\n return (num_sample, num_correct, num_dise_pred, num_dise_tar, num_heth_pred, num_heth_tar)", "title": "" }, { "docid": "84a37523b153311666f412df46bc56c4", "score": "0.6521625", "text": "def calculate_accuracy(batch, pipeline, predict_name):\n predict = pipeline.get_variable(predict_name)\n predict_top3 = predict[-1].argsort()[:, -3::]\n\n top1 = np.mean(np.argmax(predict[-1], axis=1) == batch.labels)\n top3 = np.mean([1 if batch.labels[i] in pred else 0 for i, pred in enumerate(predict_top3)])\n return top1, top3", "title": "" }, { "docid": "b3a24ace5e20dd8bad60735c9cd301e1", "score": "0.6520743", "text": "def compute_metrics(eval_pred):\n labels = eval_pred.label_ids\n preds = eval_pred.predictions.argmax(-1)\n\n ## TODO: Return a dictionary containing the accuracy, f1, precision, and recall scores.\n ## You may use sklearn's precision_recall_fscore_support and accuracy_score methods.\n\n metrics = precision_recall_fscore_support(labels,preds,average='binary')\n accuracy = accuracy_score(labels,preds)\n return {'accuracy': accuracy, 'precision': metrics[0], 'recall': metrics[1], 'f1': metrics[2] }", "title": "" }, { "docid": "b73e557006dd2cee06545cdfe53b59bb", "score": "0.6518828", "text": "def accuracy(y_true: np.ndarray,\n y_pred: np.ndarray) -> float:\n\n return accuracy_score(y_true, y_pred)", "title": "" }, { "docid": "71ec9bf883a5b5757d3c3703abefd66c", "score": "0.651382", "text": "def accuracy_per_class(output, y):\n num_class = output.size(1)\n cnt_per_class = np.zeros(num_class) ## num of classes\n acc_per_class = np.zeros(num_class) ## num of classes\n np.add.at(cnt_per_class, y.cpu().numpy(), 1)\n np.add.at(acc_per_class, y.cpu().numpy(), (output.max(1)[1] == y).cpu().numpy())\n return cnt_per_class, acc_per_class", "title": "" }, { "docid": "6568adfc6951f310e0056b5422951df4", "score": "0.64932144", "text": "def accuracy(model, X_test, y_test):\n predictions = model.predict(X_test)\n return (np.array(predictions) == np.array(y_test)).mean()", "title": "" }, { "docid": "8488dac1af75d552776bf1a3a65dce47", "score": "0.64835525", "text": "def accuracy(self, logits, labels):\n correct_predictions = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))\n return tf.reduce_mean(tf.cast(correct_predictions, tf.float32))", "title": "" }, { "docid": "5295d676c0c4752c240efc4fc32063c3", "score": "0.64805", "text": "def accuracy(labels_true, labels_predicted, tolerance, square_error=False):\n correct_results = np.zeros(len(labels_true))\n for i in range(len(labels_true)):\n if (square_error):\n diff = np.square(labels_predicted[i] - labels_true[i])\n else:\n diff = np.absolute(labels_predicted[i] - labels_true[i])\n # print labels_true[i]\n # print labels_predicted[i]\n # print 'diff=',diff\n\n if (diff <= tolerance):\n correct_results[i] = 1\n else:\n correct_results[i] = 0\n\n correct = np.sum(correct_results)\n total = float(len(labels_true))\n acc = correct/total\n\n # print 'Accuracy'\n # print 'correct =', correct\n # print 'total =', total\n # print 'acc =', acc\n # print type(acc)\n return acc", "title": "" }, { "docid": "318b9cdc7f1c2adac93b94138da1bdd3", "score": "0.6479837", "text": "def accuracy(self, X, t):\n return accuracy_score(t, self.prediction(X))", "title": "" }, { "docid": "a76d9d3ec8e0f16486c31391cd26290d", "score": "0.6475529", "text": "def accuracy_(output, target, topk=(1,)) -> Tuple[\n List[float], torch.autograd.Variable]:\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n topk_acc = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n topk_acc.append(\n (correct_k.mul_(100.0 / batch_size)).data.cpu().numpy().item())\n return topk_acc, pred[0]", "title": "" }, { "docid": "fce62069bcf95f3772cd22142c8d0755", "score": "0.647167", "text": "def accuracy(pred, gt, eps=1e-5):\n\n N = gt.size(0)\n pred_flat = pred.view(N, -1)\n gt_flat = gt.view(N, -1)\n tp = torch.sum((pred_flat != 0) * (gt_flat != 0))\n fp = torch.sum((pred_flat != 0) * (gt_flat == 0))\n tn = torch.sum((pred_flat == 0) * (gt_flat == 0))\n fn = torch.sum((pred_flat == 0) * (gt_flat != 0))\n\n score = ((tp + tn).float() + eps) / ((tp + fp + tn + fn).float() + eps)\n\n return score.sum() / N", "title": "" }, { "docid": "e8282480bf01e65d3344d9f3ac680b3a", "score": "0.6468633", "text": "def accuracy(y_true, y_pred):\n y_pred_decision = y_pred > 0.5\n return (y_pred_decision.float() == y_true.float()).float().mean()", "title": "" }, { "docid": "5689f736d1b9436ffe14c18cb90274f6", "score": "0.646839", "text": "def accuracy(output, y):\n return y.shape[0], (output.max(1)[1] == y ).sum().item()", "title": "" }, { "docid": "e65c91c845b31873112ed87adc1ad5d4", "score": "0.6467158", "text": "def classification_accuracy(classification_scores, true_scores):\r\n return np.mean(np.argmax(classification_scores, axis=1) == np.argmax(true_scores, axis=1))", "title": "" }, { "docid": "1d78c3ac7201ec1da4e3d663e3634790", "score": "0.64667356", "text": "def accuracy(predictions, targets):\r\n\r\n ########################\r\n # PUT YOUR CODE HERE #\r\n #######################\r\n\r\n # Turn into array of 1's for each row if argmax is the same - then take mean\r\n mask = np.where(predictions.argmax(axis=1) == targets.argmax(axis=1), 1, 0)\r\n accuracy = np.mean(mask)\r\n\r\n ########################\r\n # END OF YOUR CODE #\r\n #######################\r\n\r\n return accuracy", "title": "" }, { "docid": "f6c4fabc57b66c0b2ea412bc882c8819", "score": "0.6464271", "text": "def printAccuracy(pred, labels_test):\n\tprint(TAG, \"Accuracy Score:\", accuracy_score(pred,labels_test))", "title": "" }, { "docid": "ad42a99e2ed3450fda78e0358d33656f", "score": "0.64604384", "text": "def accuracy(a, b):\n return (np.array(a) == np.array(b)).mean()", "title": "" }, { "docid": "6bf1190a76d86737b7cdaf0a860707cf", "score": "0.6460172", "text": "def accuracy(y_test, predictions):\n accuracy = 0.0\n\n for i in range(len(y_test)):\n intersection = 0.0\n union = 0.0\n for j in range(len(y_test[1])):\n # a = int(y_test[i][ j])\n # b = int(predictions[i][j])\n if int(y_test[i][ j]) == 1 or int(predictions[i][ j]) == 1:\n union += 1\n if int(y_test[i][ j]) == 1 and int(predictions[i][j]) == 1:\n intersection += 1\n\n if union != 0:\n accuracy = accuracy + float(intersection / union)\n\n accuracy = float(accuracy / len(y_test))\n\n return accuracy", "title": "" }, { "docid": "d1cd718ba15764c5166d1a6e2a0332a4", "score": "0.6458382", "text": "def getAccuracy(self, data=None, labels=None, atype = None):\n\n if atype == 'test':\n data,labels = self.__readPhotos(ptest_ims,ptest_labels)\n return super().getAccuracy(data=data, labels=labels)", "title": "" } ]
4623600639342d32adeb153e3e2c9649
Loads TIFF images from paths as numpy arrays.
[ { "docid": "bd8456f3f24ed1c045f7ed3f6bda792e", "score": "0.65337104", "text": "def load_images(self) -> Generator[np.ndarray, None, None]:\n for path_ in self._paths:\n if not os.path.exists(path=path_):\n raise ImageNotFoundError(path_)\n\n try:\n tiff = TIFF.open(filename=path_, mode='r')\n except IOError as e:\n raise TiffLoadError() from e\n\n for im in tiff.iter_images():\n arr = np.array(im, np.uint8)\n if len(arr.shape) > 2:\n logging.warning(\n 'Image {} has not two dimensional greyscale data, '\n 'actually has shape of {} - filtering first dimension.'.format(\n path_,\n arr.shape\n ))\n yield arr[:, :, 0]\n\n yield arr", "title": "" } ]
[ { "docid": "37bc7bb047c51d9c59c99605658da6b9", "score": "0.7312007", "text": "def read_tiff_sequence (path):\n\ttomo_files = sorted(glob(path))\n\tnum_files = len(tomo_files)\n\n\t# Read first to understand sizes:\n\ttomo = imread(tomo_files[0])\n\n\t# Prepare dataset:\n\tdata = zeros((tomo.shape[0], tomo.shape[1], num_files), tomo.dtype)\n\n\t# Read all files:\n\tfor i in range(0, num_files): \n\t\t\n\t\tim = imread(tomo_files[i])\n\t\tdata[:,:,i] = im\n\n\treturn data", "title": "" }, { "docid": "d8fa87586287cda2b8584f72aafd71fc", "score": "0.7176566", "text": "def numpy_from_tiff(path):\n\n return imageio.volread(path)", "title": "" }, { "docid": "d00d838e1cf9bcdc38b99cf29a44fd66", "score": "0.7149819", "text": "def load_tiff(tiffpath):\r\n data = xr.open_rasterio(tiffpath)\r\n return data.values", "title": "" }, { "docid": "b2018cf9e2be50c10a49cd500bbbc520", "score": "0.70712024", "text": "def _load_multipage_tiff(self, path):\n return np.array([np.array(p) for p in ImageSequence.Iterator(Image.open(path))])", "title": "" }, { "docid": "923db230a355ead48188f99be93d84e1", "score": "0.6913694", "text": "def load_tiff_data(filePath, dtype='float32'):\n X = [];\n for dataFile in filePath:\n if not os.path.isfile(dataFile):\n raise RuntimeError('could not find file \"%s\"' % dataFile)\n \n # load the data from TIF files\n dataImg = Image.open(dataFile)\n \n Xi = np.array(dataImg, dtype=dtype)\n if len(Xi.shape)==3:\n Xi = Xi[...,0]\n Xi = np.reshape(Xi, (1, Xi.shape[0], Xi.shape[1])) # add a slice dimension\n X.append(Xi)\n if len(X)==0:\n raise RuntimeError('No file was found in {}!'.format(filePath))\n X = np.concatenate(X, axis=0) # list -> tensor\n \n return X", "title": "" }, { "docid": "4bd4d63ee7aad4bb246d359787465661", "score": "0.6861453", "text": "def load_images(path):\n with gzip.open(path) as f:\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\n pixels = np.frombuffer(f.read(), 'B', offset=16)\n return pixels.reshape(-1, 784)", "title": "" }, { "docid": "158ba7107ed8eda522667fa101644fab", "score": "0.67458874", "text": "def get_images(self, subpath=\"\"):\n directory = self.path + \"Radiographs/\" + subpath\n filenames = fnmatch.filter(os.listdir(directory), '*.tif')\n images = []\n for fname in filenames:\n img = cv2.imread(directory + \"/\" + fname, 0)\n images.append(img)\n\n return np.array(images)", "title": "" }, { "docid": "e955841079f75cc9bf1f1cf2698fda1a", "score": "0.6691674", "text": "def load_images(path, size=(64, 64)):\n data_list = list()\n for filename in listdir(path):\n try:\n pixels = load_img(path + filename, target_size=size) # load and resize the image\n pixels = img_to_array(pixels) # convert to numpy array\n data_list.append(pixels)\n except:\n pass\n return asarray(data_list)", "title": "" }, { "docid": "8a1529ce2dad00e0cbb0e4c3ecb07518", "score": "0.6569826", "text": "def load_nii(paths: List[bytes]) -> np.ndarray:\n\n # creating the array to store the data\n data: Union[np.ndarray, None] = None\n\n # for every path in the list of given paths\n for i in range(len(paths)):\n\n # read the file\n d = sitk.GetArrayFromImage(sitk.ReadImage(paths[i].decode('utf-8')))\n\n # allocating the variable to store the data if it has not been allocated\n # already\n if data is None:\n data = np.zeros((len(paths), *d.shape), dtype=np.float32)\n\n # assigning the file contents to the data array\n data[i] = d\n\n # returning the data\n return data", "title": "" }, { "docid": "19be8d371925e1fdbaf342863fce4aea", "score": "0.64484924", "text": "def load_images(directory, bits, mask=None):\r\n elements = []\r\n pixels = []\r\n shape = None\r\n for path in directory.glob(f\"*_{bits}bt_*.tif\"):\r\n #import pdb; pdb.set_trace()\r\n #print(path.basename)\r\n elements.append(path.stem.split(\"_\")[-1])\r\n if shape is None:\r\n shape = imread(path).shape\r\n pixels.append(imread(path).flatten())\r\n\r\n df = pd.DataFrame(np.dstack(pixels)[0], columns=elements)\r\n df = df.reset_index().rename(columns={\"index\": \"order\"})\r\n\r\n if mask:\r\n df['mask'] = (imread(mask).flatten() > 0).astype(int)\r\n else:\r\n df['mask'] = 1\r\n\r\n return df, shape", "title": "" }, { "docid": "0c0af77398f7fa4b4db2daed672f9d98", "score": "0.6446353", "text": "def load_images(images_path, as_array=True):\n file_list = os.listdir('./HBTN')\n images = []\n file_names = []\n for file in sorted(file_list):\n path = images_path + '/' + file\n image = cv2.imread(path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n images.append(image)\n file_names.append(file)\n if as_array:\n images = np.asarray(images)\n return images, file_names", "title": "" }, { "docid": "fc3a787ecb75eb398cd2bae4dc1c3e6b", "score": "0.64295894", "text": "def image_file_to_image_array(path):\n \n return io.imread(path)", "title": "" }, { "docid": "52e48f890b4456bbd8bf53306754e41a", "score": "0.6428036", "text": "def imloader(fname):\n return np.asarray(imageio.imread(fname))", "title": "" }, { "docid": "d47b487c567bb2c18cbe121c37796047", "score": "0.6417844", "text": "def numpy_parse_image(image_path):\n image_path = Path(bytes.decode(image_path))\n with TiffFile(image_path) as tifi:\n image = tifi.asarray()\n return image", "title": "" }, { "docid": "99e6315f5f73a2700f28beb79ccfc8b1", "score": "0.6310351", "text": "def _images(path):\n with gzip.open(path) as f:\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\n pixels = np.frombuffer(f.read(), 'B', offset=16)\n return pixels.reshape(-1, 784).astype('float32') / 255", "title": "" }, { "docid": "80881c93347b15392b1061d77d95e846", "score": "0.6308395", "text": "def _prep(self, paths):\n assert isinstance(paths, str)\n ext = os.path.splitext(paths)[1]\n\n tif_path = None\n if ext == '.zip': # Need to unpack\n\n tif_path = self._unpack(paths)\n\n if ext == '.vrt': # Already unpacked\n\n unpack_folder = os.path.dirname(paths)\n tif_path = get_merged_path(unpack_folder)\n\n assert tif_path is not None, f'Error: Unsupported extension {ext}'\n\n return [tif_path]", "title": "" }, { "docid": "a5dd56300408ecb6a582b257b9f7e95e", "score": "0.6273469", "text": "def load_images():\n url = \"https://clockdrawingimages1.s3.us-west-1.amazonaws.com/10000003.tif\"\n\n response = requests.get(url) # , stream = True)\n f = io.BytesIO(response.content)\n im = Image.open(f)\n imarray = np.logical_not(np.array(im)).astype(int)\n return imarray", "title": "" }, { "docid": "9fdecf4e3741d38384dc4dbbf75c41ac", "score": "0.6185078", "text": "def read_images(path):\n id_num = 0\n images, id_nums = [], []\n\n # Process individual files from subdirectories in given path\n for dirname, dirnames, filenames in os.walk(path):\n\n for subdirname in dirnames:\n subject_path = os.path.join(dirname, subdirname) # path/subject_path/\n\n for imagename in os.listdir(subject_path): # path/subject_path/imagename\n img = read_single_image(os.path.join(subject_path, imagename))\n \n if img is not None:\n images.append(np.asarray(img, dtype='uint8'))\n id_nums.append(id_num)\n\n id_num += 1\n\n assert len(images) == len(id_nums)\n return [images, id_nums]", "title": "" }, { "docid": "bed98cff4d02acf85007bfca69838506", "score": "0.617069", "text": "def path2arr(path):\n try:\n img = Image.open(path)\n img = img.resize((47, 55))\n # return np.array(img, dtype=\"int\")\n return img\n except:\n return None", "title": "" }, { "docid": "9b1bd30407baaf5af38cdac2ded282f8", "score": "0.6160901", "text": "def load_dataset(path):\n data = load_files(path)\n dog_files = np.array(data['filenames'])\n dog_targets = np_utils.to_categorical(np.array(data['target']), 133)\n return dog_files, dog_targets", "title": "" }, { "docid": "ce86a37fce1c56197358883e008be6b5", "score": "0.6132814", "text": "def extract_to_numpy(root_path):\n\n def process(filepath, N, type='images'):\n with gzip.open(filepath) as bs:\n if type == 'images':\n bs.read(16)\n temp_buf = bs.read(28 * 28 * N)\n temp_data = np.frombuffer(temp_buf, dtype=np.uint8).astype(np.float32)\n temp_data = temp_data.reshape(N, 28*28)\n return temp_data\n\n elif type == 'labels':\n bs.read(8)\n temp_buf = bs.read(1 * N)\n temp_labels = np.frombuffer(temp_buf, dtype=np.uint8).astype(np.int64)\n return temp_labels\n\n else:\n pass\n\n\n train_images = process(os.path.join(root_path, 'train-images-idx3-ubyte.gz'), N=60000)\n train_labels = process(os.path.join(root_path, 'train-labels-idx1-ubyte.gz'), N=60000, type='labels')\n test_images = process(os.path.join(root_path, 't10k-images-idx3-ubyte.gz'), N=10000)\n test_labels = process(os.path.join(root_path, 't10k-labels-idx1-ubyte.gz'), N=10000, type='labels')\n\n return train_images, train_labels, test_images, test_labels", "title": "" }, { "docid": "d07790848fdbe7a3681f562e99512beb", "score": "0.61297756", "text": "def load_images_to_array(folder_path):\n\n # get all images\n images_paths = get_images_in_folder(folder_path)\n\n # convert images to numpy arrays\n return [np.array(Image.open(path)) for path in images_paths]", "title": "" }, { "docid": "2b0b130a6635192f35ff1b5b06bf8722", "score": "0.60836786", "text": "def load_image_into_numpy_array(path):\n return np.array(Image.open(path))", "title": "" }, { "docid": "cda15bbb1e60d5bbcb6d7ba21d551753", "score": "0.6083134", "text": "def load_image_into_numpy_array(path):\n return np.array(Image.open(path))", "title": "" }, { "docid": "63465c0063074f278984225b322577b4", "score": "0.60663617", "text": "def tifffile_arr(fname, verbose=False):\r\n with TiffFile(fname) as tif:\r\n if verbose:\r\n print(\"\\nTiff file: {}\\nflags: {}\".format(fname, tif.flags))\r\n if tif.is_shaped and verbose:\r\n print(\"Shape info: {}\\n\".format(tif.shaped_metadata))\r\n if tif.is_geotiff and verbose:\r\n print(\"Geotiff info:\")\r\n d = tif.geotiff_metadata\r\n for key in d.keys():\r\n print(\"- {}: {}\".format(key, d[key]))\r\n #\r\n a = tif.asarray()\r\n #\r\n if tif.is_tiled:\r\n a = np.rollaxis(a, axis=2, start=0)\r\n return a, tif # uncomment and return tif for testing\r", "title": "" }, { "docid": "7983e897cefc7efe269a8969cbaef4c1", "score": "0.60507065", "text": "def imread(files, **kwargs):\n kwargs_file = {}\n if 'multifile' in kwargs:\n kwargs_file['multifile'] = kwargs['multifile']\n del kwargs['multifile']\n else:\n kwargs_file['multifile'] = True\n kwargs_seq = {}\n if 'pattern' in kwargs:\n kwargs_seq['pattern'] = kwargs['pattern']\n del kwargs['pattern']\n\n if isinstance(files, basestring) and any(i in files for i in '?*'):\n files = glob.glob(files)\n if not files:\n raise ValueError('no files found')\n if len(files) == 1:\n files = files[0]\n\n if isinstance(files, basestring):\n with TiffFile(files, **kwargs_file) as tif:\n return tif.asarray(**kwargs)\n else:\n with TiffSequence(files, **kwargs_seq) as imseq:\n return imseq.asarray(**kwargs)", "title": "" }, { "docid": "c6439a851ca5f4ffc773bac5a261b8cf", "score": "0.6050581", "text": "def open_tiffs(self):\n\n # Open tiffs\n filenames = self.filenames\n section_sets = dict()\n section_meta = dict()\n for fn in filenames:\n # Break up filename into components\n comp_ = path.basename(fn)[:-5].split(\"_\")\n if len(comp_) >= 6:\n section = comp_[2]\n # Add new section\n if section_sets.setdefault(section, dict()) == {}:\n im = imageio.imread(fn)\n section_meta[section] = {'shape':im.shape,'dtype':im.dtype,'filenames':[]}\n\n for i, comp in enumerate(comp_):\n # Add components\n section_sets[section].setdefault(i, set())\n section_sets[section][i].add(comp)\n section_meta[section]['filenames'].append(fn)\n\n im_names = []\n for s in section_sets.keys():\n # Lazy open images\n filenames = section_meta[s]['filenames']\n lazy_arrays = [dask.delayed(imageio.imread)(fn) for fn in filenames]\n shape = section_meta[s]['shape']\n dtype = section_meta[s]['dtype']\n lazy_arrays = [da.from_delayed(x, shape=shape, dtype=dtype) for x in lazy_arrays]\n\n # Organize images\n fn_comp_sets = list(section_sets[s].values())\n if len(comp_) == 6:\n comp_order = {'ch':0, 'AorB':1, 's':2, 'r':3, 'x':4, 'o':5}\n elif len(comp_) == 7:\n comp_order = {'ch':0, 'AorB':1, 's':2, 'r':3, 'i':4, 'x':5, 'o':6}\n int_comps = ['ch', 'r', 'x', 'o']\n for i in [comp_order[c] for c in comp_order.keys() if c in int_comps]:\n fn_comp_sets[i] = [int(x[1:]) for x in fn_comp_sets[i]]\n fn_comp_sets[i] = sorted(fn_comp_sets[i])\n if 'i' in comp_order.keys():\n i = comp_order['i']\n fn_comp_sets[i] = [int(x) for x in fn_comp_sets[i]]\n fn_comp_sets[i] = sorted(fn_comp_sets[i])\n remap_comps = [fn_comp_sets[0], fn_comp_sets[3], fn_comp_sets[4], fn_comp_sets[6], [1], fn_comp_sets[5]]\n # List of sorted x steps for calculating overlap\n #x_steps = sorted(list(fn_comp_sets[5]), reverse=True)\n x_steps = fn_comp_sets[5]\n else:\n remap_comps = [fn_comp_sets[0], fn_comp_sets[3], fn_comp_sets[5], [1], fn_comp_sets[4]]\n # List of sorted x steps for calculating overlap\n #x_steps = sorted(list(fn_comp_sets[4]), reverse=True)\n x_steps = fn_comp_sets[4]\n\n a = np.empty(tuple(map(len, remap_comps)), dtype=object)\n for fn, x in zip(filenames, lazy_arrays):\n comp_ = path.basename(fn)[:-5].split(\"_\")\n channel = fn_comp_sets[0].index(int(comp_[0][1:]))\n cycle = fn_comp_sets[3].index(int(comp_[3][1:]))\n co = comp_order['o']\n obj_step = fn_comp_sets[co].index(int(comp_[co][1:]))\n co = comp_order['x']\n x_step = fn_comp_sets[co].index(int(comp_[co][1:]))\n if 'i' in comp_order.keys():\n co = comp_order['i']\n image_i = fn_comp_sets[co].index(int(comp_[co]))\n a[channel, cycle, image_i, obj_step, 0, x_step] = x\n else:\n a[channel, cycle, obj_step, 0, x_step] = x\n\n # Label array\n if 'i' in comp_order.keys():\n dim_names = ['channel', 'cycle', 'image', 'obj_step', 'row', 'col']\n coord_values = {'channel':fn_comp_sets[0], 'cycle':fn_comp_sets[3], 'image':fn_comp_sets[4], 'obj_step':fn_comp_sets[6]}\n else:\n dim_names = ['channel', 'cycle', 'obj_step', 'row', 'col']\n coord_values = {'channel':fn_comp_sets[0], 'cycle':fn_comp_sets[3], 'obj_step':fn_comp_sets[5]}\n try:\n im = xr.DataArray(da.block(a.tolist()),\n dims = dim_names,\n coords = coord_values,\n name = s[1:])\n\n\n im = self.register_channels(im.squeeze())\n im = im.assign_attrs(first_group = 0, machine = '', scale=1,\n overlap=0, fixed_bg = 0)\n im_names.append(s[1:])\n except:\n im = None\n self.im.append(im)\n\n return im_names", "title": "" }, { "docid": "e95d1be505e36b95f652b9c4ee0c3ce9", "score": "0.60438997", "text": "def load_data():\n \n # Creates a list of file names in the data directory\n filelist = glob.glob(\"../data/Images/*.tif\")\n \n # Loads all data images in a list\n data = [Image.open(fname) for fname in filelist]\n \n # Creates a list of file names in the labels directory\n filelist = glob.glob(\"../data/Labels/*.tif\")\n \n # Loads all labels images in a list\n labels = [Image.open(fname) for fname in filelist]\n\n return data, labels", "title": "" }, { "docid": "0b8187cb38dce913b2d4d6bc00c62512", "score": "0.60433304", "text": "def load_np_files(data, target):\n\n # Get data\n x_data = np.load(data)\n y_data = np.load(target)\n\n # Need to add that extra dimension for grayscale depth of 1 channel\n x_data = np.expand_dims(x_data, 1)\n print(x_data.shape)\n\n # Zip image data and labels together\n data = [(x, y) for x, y in zip(x_data, y_data)]\n\n # turn targets into tensors\n y_tensor = torch.from_numpy(y_data)\n\n return data, y_tensor", "title": "" }, { "docid": "1846dc9b9db69eef3b41372fad13f3a2", "score": "0.60404104", "text": "def load_data(mdir, n_h, n_w):\n f_images = mdir + 'images/'\n f_targets = mdir + 'targets/'\n f_i = os.listdir(f_images)\n f_i.sort()\n f_t = os.listdir(f_targets)\n f_t.sort() \n images, images_o = img_transform(f_images, f_i, n_h, n_w)\n targets = np.zeros((len(f_t), 1))\n for j in range(len(f_t)):\n data = json.load(open(f_targets + f_t[j]))\n targets[j] = data['relative_reading']\n return images, targets, images_o", "title": "" }, { "docid": "26ec558835401c5bb0694e584adbc0d2", "score": "0.6033038", "text": "def load_and_resize_images(paths, resize):\n\n images = np.zeros((len(paths), *resize, 3), dtype=np.uint8)\n for i, path in enumerate(paths):\n img = imread(path)\n img = cv2.resize(img, resize[::-1])\n images[i] = img\n\n return images", "title": "" }, { "docid": "9f60df7f5c97a7465860fd675f4839b2", "score": "0.60119665", "text": "def get_images():\n filename = 'images.npy'\n\n if os.path.exists(filename):\n return np.load(filename)\n else:\n X = []\n\n for path in glob.glob(vehicle_path):\n X.append(io.imread(path))\n for path in glob.glob(non_vehicle_path):\n X.append(io.imread(path))\n\n X = np.array(X)\n np.save(filename, X)\n\n return X", "title": "" }, { "docid": "63efe72095efa8f3dde3d18ff9afede1", "score": "0.5965849", "text": "def load_image(path):\n return misc.imread(path)", "title": "" }, { "docid": "46749ba41acda629e66efcf380e1271f", "score": "0.59632874", "text": "def import_imagesText(folder):\n images = []\n a =os.listdir(folder)\n for image in a: \n img=numpy.loadtxt(os.path.join(folder,image))\n images.append(img)\n return images", "title": "" }, { "docid": "016a7e411db71bec8b2a8c64304a236d", "score": "0.5959633", "text": "def load_mnist(path, kind='train'):\r\n labels_path = os.path.join(path,\r\n '%s-labels-idx1-ubyte.gz'\r\n % kind)\r\n images_path = os.path.join(path,\r\n '%s-images-idx3-ubyte.gz'\r\n % kind)\r\n\r\n with gzip.open(labels_path, 'rb') as lbpath:\r\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)\r\n\r\n with gzip.open(images_path, 'rb') as imgpath:\r\n images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16).reshape(len(labels), 784)\r\n\r\n return images, labels", "title": "" }, { "docid": "4440e23857a5bbf0bc4af50150450a66", "score": "0.5948486", "text": "def images_from_disk(im_path, names):\n images, nf = [], []\n for n in names:\n p = path.join(im_path, n)\n im = cv2.imread(p)\n if im is None:\n nf.append(p)\n else:\n images.append(im)\n return images, nf", "title": "" }, { "docid": "6ba1119aad8bba40ba0dc83f47c0787b", "score": "0.593786", "text": "def read_tiff(img_path, uint8=True, cm=None):\n # Read the image\n read, imgs = cv2.imreadmulti(img_path, flags=cv2.IMREAD_ANYDEPTH) \n assert read, \"The image could not be read. Make sure the file is accessible and try again.\"\n \n if uint8:\n # Scale down to uint8\n imgs = [cv2.convertScaleAbs(img, alpha=(255/img.max())) for img in imgs] \n if cm is not None: \n # Apply colormap (for visualization only)\n imgs_cm = [cv2.applyColorMap(img, cm) for img in imgs] \n return imgs, imgs_cm\n\n return imgs", "title": "" }, { "docid": "04f659de7f48596658429f8721d31d18", "score": "0.5913557", "text": "def read(self, path, gray=True):\n path_list = self._readAllFilesPath(path)\n images = defaultdict(lambda: defaultdict(list))\n for p in path_list:\n name_list = str.split(p.path, '\\\\')\n idnum, side = name_list[-3], name_list[-2]\n img = self._readImageFile(p.path, gray)\n if not img is None:\n images[idnum][side].append(img)\n return images", "title": "" }, { "docid": "7b9adc5333ae1492b967abc04a7f765b", "score": "0.5907543", "text": "def read_images(rootpath, data_part):\n h, w = 32, 32\n n_folders = 43 if data_part == 'train' else 1\n images = [] # images\n labels = [] # corresponding labels\n # loop over all 42 classes\n for cls in range(0, n_folders):\n if data_part == 'train':\n prefix = rootpath + format(cls, '05d') + '/' # subdirectory for class\n f_annotation = prefix + 'GT-' + format(cls, '05d') + '.csv'\n else:\n prefix = rootpath\n f_annotation = prefix + 'GT-final_test.csv'\n gtFile = open(f_annotation) # annotations file\n gtReader = csv.reader(gtFile, delimiter=';') # csv parser for annotations\n # loop over all images in current annotations file\n for row in list(gtReader)[1:]:\n img = Image.open(prefix + row[0])\n img = img.resize((h, w), Image.ANTIALIAS)\n images.append(np.array(img)) # the 1th column is the filename\n labels.append(int(row[7])) # the 8th column is the label\n gtFile.close()\n return np.array(images, dtype=np.uint8), np.array(labels)", "title": "" }, { "docid": "0d4a91f67e0b185cafb9c60a7a3d6ba0", "score": "0.58999825", "text": "def test_issue_pathlib():\n data = random_data(numpy.uint16, (219, 301))\n with TempFileName('pathlib') as fname:\n fname = pathlib.Path(fname)\n assert isinstance(fname, os.PathLike)\n # imwrite\n imwrite(fname, data)\n # imread\n im = imread(fname)\n assert_array_equal(im, data)\n # memmap\n im = memmap(fname)\n try:\n assert_array_equal(im, data)\n finally:\n del im\n # TiffFile\n with TiffFile(fname) as tif:\n with TempFileName('pathlib_out') as outfname:\n outfname = pathlib.Path(outfname)\n # out=file\n im = tif.asarray(out=outfname)\n try:\n assert isinstance(im, numpy.core.memmap)\n assert_array_equal(im, data)\n assert os.path.samefile(im.filename, str(outfname))\n finally:\n del im\n # TiffSequence\n with TiffSequence(fname) as tifs:\n im = tifs.asarray()\n assert_array_equal(im[0], data)\n with TiffSequence([fname]) as tifs:\n im = tifs.asarray()\n assert_array_equal(im[0], data)\n\n # TiffSequence container\n if SKIP_PRIVATE or SKIP_CODECS:\n pytest.skip(REASON)\n fname = pathlib.Path(private_file('TiffSequence.zip'))\n with TiffSequence('*.tif', container=fname, pattern=None) as tifs:\n im = tifs.asarray()\n assert im[9, 256, 256] == 135", "title": "" }, { "docid": "8945b38c5c8a8588b33a18d9b113074b", "score": "0.588466", "text": "def read_tif_image(fname):\n img = Image.open(fname)\n # positive values correspond to counterclockwise rotation\n img_r = img.rotate(0)\n width = img_r.size[0]\n height = img_r.size[1]\n return np.array(img_r.getdata()).reshape(height,width)", "title": "" }, { "docid": "2b45cb19fe449ed4b1ffb7883ef4ad57", "score": "0.5884306", "text": "def read_multiimg_PIL(tiffile):\n\n from PIL import Image\n import numpy as np\n\n img = Image.open(tiffile)\n\n imgs = []\n read = True\n\n frame = 0\n\n while read:\n try:\n img.seek(frame) # select this as the image\n imgs.append(np.array(img)[None,:,:])\n frame += 1\n except EOFError:\n # Not enough frames in img\n break\n\n return np.concatenate(imgs, axis=0)", "title": "" }, { "docid": "8b52cc8efa3cfaba8390a84b13030035", "score": "0.58840173", "text": "def read_images(path, sz=None):\n c = 0\n X,y = [], []\n for dirname, dirnames, filenames in os.walk(path):\n for subdirname in dirnames:\n subject_path = os.path.join(dirname, subdirname)\n for filename in os.listdir(subject_path):\n try:\n im = Image.open(os.path.join(subject_path, filename))\n im = im.convert(\"L\")\n # resize to given size (if given)\n if (sz is not None):\n im = im.resize(sz, Image.ANTIALIAS)\n im=np.asarray(im, dtype=np.uint8)\n X.append(extractItem(im,'face'))\n y.append(c)\n except IOError, (errno, strerror):\n print \"I/O error({0}): {1}\".format(errno, strerror)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise\n c = c+1\n return [X,y]", "title": "" }, { "docid": "cfc0b37952806f942e19380949863e6e", "score": "0.5879211", "text": "def _load_mnist(path, kind='train'):\n labels_path = join(path, '{}-labels-idx1-ubyte.gz'.format(kind))\n with gzip.open(labels_path, 'rb') as fp:\n labels = np.frombuffer(fp.read(), dtype=np.uint8, offset=8)\n\n images_path = join(path, '{}-images-idx3-ubyte.gz'.format(kind))\n with gzip.open(images_path, 'rb') as fp:\n images = np.frombuffer(fp.read(), dtype=np.uint8, offset=16)\n\n return images.reshape(len(labels), 28, 28), labels", "title": "" }, { "docid": "d4760328d22f9f4ee1a8a44be3ac723c", "score": "0.58755875", "text": "def load_path(path):\n path_tr = np.load(path)['paths']\n y_tr = np.load(path)['y']\n y_tr = y_tr.reshape((-1))\n y_tr = torch.Tensor(y_tr)\n return path_tr, y_tr", "title": "" }, { "docid": "a7f6f897b4584f2b97beb60dc2575dd4", "score": "0.58669764", "text": "def download(self, path):\n\t\tfailed_files = []\n\t\tself.path = os.path.join(path)\n\t\tfilenames = self.__createfilenames()\n\t\tfilenames = self.__paths(filenames)\n\t\tprint filenames\n\t\ttiffiles = [] \n\t\tfor f in filenames:\n\t\t\tbinfile = self.__extract(f)\n\t\t\ttif = self.__process(binfile)\n\t\t\ttiffiles.append(tif)\n\t\t\tprint tif\n\t\treturn tiffiles", "title": "" }, { "docid": "8ee0658046bcc957bb50ccf86678068c", "score": "0.5858958", "text": "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels", "title": "" }, { "docid": "8ee0658046bcc957bb50ccf86678068c", "score": "0.5858958", "text": "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels", "title": "" }, { "docid": "7545cda3d93fa58db4e6618202cbc969", "score": "0.5847173", "text": "def imread(fname, dtype=None, img_num=None, **kwargs):\n if isinstance(fname, string_types):\n with open(fname, 'rb') as f:\n im = Image.open(f)\n return pil_to_ndarray(im, dtype=dtype, img_num=img_num)\n else:\n im = Image.open(fname)\n return pil_to_ndarray(im, dtype=dtype, img_num=img_num)", "title": "" }, { "docid": "0d3432cdccc36c6135892098db38c293", "score": "0.58448523", "text": "def load_images(self, img_path):\n \n # -- convert to absolute path and verify the path\n img_path = os.path.abspath(img_path)\n print \"Image source:\", img_path\n if not os.path.isdir(img_path):\n raise ValueError, \"%s is not a directory\" % (img_path)\n \n # -- extract the file names\n tree = os.walk(img_path)\n filelist = []\n #categories = tree.next()[1] \n for path, dirs, files in tree:\n if dirs != []:\n msgs = [\"invalid image tree structure:\"]\n for d in dirs:\n msgs += [\" \"+\"/\".join([root, d])]\n msg = \"\\n\".join(msgs)\n raise Exception, msg\n filelist += [ path+'/'+f for f in files if os.path.splitext(f)[-1] in self.extentions ]\n filelist.sort() \n \n # -- load and preprocess images\n for img_fname in filelist: #[0:1]:\n img = self.load_process_image(img_fname)\n self.images.append(img)\n #utils.visualize_array(img)\n \n #print len(categories), \"categories found:\"\n #print categories", "title": "" }, { "docid": "a0e90d56df52d790eebb21c0590a1966", "score": "0.58376414", "text": "def load_images(self):\n\n images = []\n\n for image_path in self.images_path:\n images.append(img_to_array(load_img(image_path)))\n\n images = np.asarray(images)\n print('The images were successfully loaded.')\n \n return images", "title": "" }, { "docid": "199c2b0c0f65a4c95a7a6f73e7f4ee3c", "score": "0.5829741", "text": "def load_rasters(path, subUL, band_ind): # Subset from original raster with extent and upperleft coord\n file_list = path # List image name\n assert len(file_list) == 2\n\n # Ensure the order of the list: base image first !!\n for file in file_list: # Organize file list\n img_name = str(file)\n if 'image' in img_name:\n base = file\n elif 'label' in img_name:\n label = file\n file_list = [base, label]\n \n stack = [] # Stack base and label together into a 3D array\n for file in file_list:\n if 'image' in str(file):\n data = gdal_array.LoadFile(str(file), xoff=subUL[0], yoff=subUL[1]) #.astype(np.int),ysize=extent[1],xsize=extent[0]\n data = data[tuple(band_ind),:,:] # Worldview image with 3rd dimension at first\n data = np.transpose(data,(1,2,0)) # Transpose 3rd to last \n print(data.shape)\n stack.append(data)\n else:\n data = gdal_array.LoadFile(str(file), xoff=subUL[0], yoff=subUL[1]) #.astype(np.int),xsize=extent[0],ysize=extent[1]\n if len(data.shape)==3: # For 3-band TIFF\n data = data[0,:,:]/255.0\n data = data[:,:,np.newaxis]\n print(data.shape)\n stack.append(data)\n# image = Image.fromarray(data)\n# data = nan_remover(data)\n# setattr(image, 'filename', file)\n # Ensure the size of base and label is are consistent\n assert stack[0].shape[0] == stack[-1].shape[0]\n assert stack[0].shape[1] == stack[-1].shape[1]\n return stack[:-1], stack[-1]", "title": "" }, { "docid": "c62782552f5df688a9e931b1c3ec209b", "score": "0.58200365", "text": "def GetImgPaths(path):\n types = '(jpg|jpeg|png|bmp|tif|tiff|ppm|pgm|pbm)'\n return GetFilePaths(path, types)", "title": "" }, { "docid": "48345630e55d293bfa95435ce1fed81e", "score": "0.58179593", "text": "def load_sample(path):\n _, extension = os.path.splitext(path)\n\n if extension == '.mat': \n output_data = scipy.io.loadmat(path)\n output_values = list(output_data.values())\n output_image = output_values[3]\n elif extension == '.npy':\n output_image = np.load(path)\n else:\n raise ValueError('Input file with extension %s is not a valid file type' %extension)\n return output_image", "title": "" }, { "docid": "46214f8660f60a7647c55470b5214d6e", "score": "0.5814822", "text": "def load_multi(self, filenames):\n self.filenames = filenames\n self.pixels = None\n if filenames:\n self.loaded = True\n\n for f in filenames:\n ext = f.split(os.path.extsep)[-1]\n if ext == 'raw':\n with open(f) as fh:\n d = fh.read()\n\n # XXX this assumes pilatus 100K...\n p = np.fromstring(d, '>f').astype('int32').reshape((195,-1))\n else:\n im = Image.open(f)\n p = np.asarray(im)\n\n if self.pixels is None:\n self.pixels = p.copy()\n else:\n self.pixels += p", "title": "" }, { "docid": "338541c47af5ae297561f5c4d91dedfa", "score": "0.5780349", "text": "def read_geotiff(image_path):\n\n ##print('Reading GeoTIFF data ...')\n #if type(image_path) is not list:\n # input = [image_path]\n\n ## read all arrays\n bs = []\n #for layer in input:\n with rasterio.open(image_path) as src:\n layer = src.read()#[0,:,:]\n try:\n crs = src.get_crs()\n except:\n crs = src.read_crs()\t \n w, h = (src.width, src.height)\n xmin, ymin, xmax, ymax = src.bounds\n\n del src\n bs.append({'bs':layer, 'w':w, 'h':h, 'xmin':xmin, 'xmax':xmax, 'ymin':ymin, 'ymax':ymax, 'crs':crs})\n\n ## resize arrays so common grid\n ##get bounds\n xmax = max([x['xmax'] for x in bs])\n xmin = min([x['xmin'] for x in bs])\n ymax = max([x['ymax'] for x in bs])\n ymin = min([x['ymin'] for x in bs])\n nz, nx, ny = np.shape(bs[0]['bs'])\n for k in range(len(bs)):\n bs[k]['h'] = nx\n bs[k]['w'] = ny\n bs[k]['xmin'] = xmin\n bs[k]['xmax'] = xmax\n bs[k]['ymin'] = ymin\n bs[k]['ymax'] = ymax\n\n img = np.dstack(bs[0]['bs']).astype('uint8').reshape((nx,ny,nz))\n\n return np.squeeze(img), bs", "title": "" }, { "docid": "dbec83e6d9f66cbfaa75cd7b60066da2", "score": "0.5774529", "text": "def load_mnist(path, kind):\r\n\tlabels_path = os.path.join(path, '%s-labels-idx1-ubyte.gz' % kind)\r\n\timages_path = os.path.join(path, '%s-images-idx3-ubyte.gz' % kind)\r\n\r\n\twith gzip.open(labels_path, 'rb') as lbpath:\r\n\t\tlbpath.read(8)\r\n\t\tbuffer = lbpath.read()\r\n\t\tlabels = np.frombuffer(buffer, dtype = np.uint8)\r\n\twith gzip.open(images_path, 'rb') as imgpath:\r\n\t\timgpath.read(16)\r\n\t\tbuffer = imgpath.read()\r\n\t\timages = np.frombuffer(buffer, dtype = np.uint8).reshape(len(labels), 28, 28).astype(np.float64)\r\n\t\r\n\treturn images, labels", "title": "" }, { "docid": "74e4e94852e262b01f35f5cb16d20b67", "score": "0.57532895", "text": "def loadResizeNormalizeImages (basepath, path_array, img_shape):\n images = np.empty ((len(path_array), img_shape[0], img_shape[1], img_shape[2]), dtype=np.float32)\n for i in range (len(path_array)):\n images[i] = normalizedArrayFromImageInPath (os.path.join(basepath,path_array[i]), img_shape)\n return images", "title": "" }, { "docid": "46da15a81b0bc324ab242e82f4f4b066", "score": "0.57515824", "text": "def convert_to_numpy(dir_name, img_list):\n image_arr = list()\n for im in img_list:\n im_name = os.path.join(dir_name, im)\n im = np.array(Image.open(im_name))\n image_arr.append(im)\n \n return np.array(image_arr)", "title": "" }, { "docid": "f9176aac4d7aa96a228a840d203a6ede", "score": "0.5750715", "text": "def load_array(path):\n path = str(path)\n if path.endswith('.npy'):\n try:\n import numpy as np\n mmap_mode = 'r' if op.getsize(path) > 1e8 else None\n return np.load(path, mmap_mode=mmap_mode)\n except ImportError:\n logger.warning(\"NumPy is not available.\")\n return\n except ValueError as e:\n logger.error(\"Impossible to read %s.\", path)\n raise e\n elif path.endswith('.tsv'):\n try:\n import pandas as pd\n return pd.read_csv(path, sep='\\t')\n except ImportError:\n logger.warning(\"Pandas is not available.\")\n except ValueError as e:\n logger.error(\"Impossible to read %s.\", path)\n raise e\n raise NotImplementedError(path)", "title": "" }, { "docid": "15d9f5d0e5517ef2988d7fa1b48a39d9", "score": "0.5746286", "text": "def _load_images(self, track_ids):\n images = []\n for track_id in track_ids:\n fpath = spectr_template.format(track_id[:3] + '/' + track_id + '.png')\n print('Loading spectrogram: {} ({})'.format(fpath, self.dataset_label))\n images.append(np.asarray(Image.open(fpath).getdata()).reshape(img_width, img_height))\n return np.array(images)", "title": "" }, { "docid": "a919d6e918e0e310226e40301e3d9702", "score": "0.5743716", "text": "def paths_to_tensor(img_paths: list) -> np.array:\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm.tqdm(img_paths)]\n return np.vstack(list_of_tensors)", "title": "" }, { "docid": "da165fe3e3b337856e4bf3083271ece7", "score": "0.57404053", "text": "def read_img(path):\n img = Image.open(path)\n img_arr = np.array(img, dtype='int32')\n# img_arr = np.array(img.getdata())\n\n img.close()\n return img_arr", "title": "" }, { "docid": "7fec3ff3e5dce1cc1678a485a676914a", "score": "0.5727729", "text": "def get_img_from_file(filepath):\n img_array = io.imread(filepath)\n return img_array", "title": "" }, { "docid": "9d32d4013b73c767b4a6f21dab26615c", "score": "0.5725686", "text": "def im_open(pth):\n\n try:\n #Double check to make sure pth is a directory.\n assert pth.is_dir()\n #Get list of tif files in pth and sort based on 'stem' which is the index of the B-scan (i.e. y position)\n #files = sorted(pth.glob('*.tif'), key=sort_key) \n #load the collection\n #raw_stack = io.imread_collection(files)\n raw_stack = io.imread_collection(str(pth/'*.tif'))\n #turn them into a stack that can be manipulated as a multdimensional array.\n stack = io.collection.concatenate_images(raw_stack)\n \n return stack\n\n except AssertionError:\n #if the assert fails the program stops. This is a bit heavy handed...\n sys.exit(\"A non-directory object was given to the __open__ function\")", "title": "" }, { "docid": "1b0bb6bc7272d49401acf26ec3da9301", "score": "0.5725431", "text": "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"Data\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "title": "" }, { "docid": "3d9c594f17e5bc468963cc31ebe1efdc", "score": "0.57143843", "text": "def load_mnist_test(path, kind):\n\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte'\n % kind)\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII',\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels", "title": "" }, { "docid": "3145eeed5a30b6ea9db7a4bd6360c954", "score": "0.5706746", "text": "def load_images(path, size_of_image=SIZE_OF_IMAGE):\n\n X = [] # Stores images\n file_names = [] # Stores file names\n if os.path.isfile(path): # Single image\n try:\n # Read in image\n image = cv.imread(path)\n # Resize image so that all images are of the same size\n image = cv.resize(image, (size_of_image, size_of_image))\n\n X.append(image)\n file_names.append(path)\n except:\n print(\"Failed to read and resize image: \", path)\n\n else: # Folder containing images\n for path_image in os.listdir(path):\n if path_image[0] != '.': # To avoid .DS_Store files\n full_path = os.path.join(path, path_image)\n try:\n # Read in image\n image = cv.imread(full_path)\n # Resize image so that all images are of the same size\n image = cv.resize(image, (size_of_image, size_of_image))\n\n X.append(image)\n file_names.append(path_image)\n except:\n print(\"Failed to read and resize image: \", full_path)\n \n # Convert X into a numpy array\n X = np.array(X)\n\n return X, file_names", "title": "" }, { "docid": "ffeea1653875e608bdb8ef0afff1b28e", "score": "0.57016224", "text": "def get_numpy_from_path(\n path: pathlib.Path, internal_path: str = \"/data\"\n) -> Tuple[np.array, Union[Tuple[int, int], bool]]:\n if path.suffix in cfg.TIFF_SUFFIXES:\n return numpy_from_tiff(path), True\n elif path.suffix in cfg.HDF5_SUFFIXES:\n nexus = path.suffix == \".nxs\"\n return numpy_from_hdf5(path, hdf5_path=internal_path, nexus=nexus)", "title": "" }, { "docid": "b16f224d61b688da79dacd3a28e4b37f", "score": "0.5686152", "text": "def load_images_real(images_path, noise_path, reference_path, as_array=True, samples=None, scale=None):\n backgnd_paths = glob.glob(images_path + \"/*.jpeg\")\n noisy_img_paths = glob.glob(noise_path + \"/*.jpeg\")\n references = glob.glob(reference_path + \"/*.jpeg\")\n\n references = [path.split('/')[-1].split('.')[0] for path in references]\n backgnd_paths = [im for im in backgnd_paths if im.split('/')[-1].split('.')[0] not in references]\n backgnd_paths.sort(reverse=True)\n noisy_img_paths.sort(reverse=True)\n\n if samples != None:\n noisy_img_paths = noisy_img_paths[:samples]\n backgnd_paths = backgnd_paths[:samples]\n print(\"backgnd: \", backgnd_paths[0], \" - \", backgnd_paths[-1])\n print(\"loading\", end='')\n x_data = []\n x_noisy_data = []\n for n, img in enumerate(zip(backgnd_paths, noisy_img_paths)):\n if n % 100 == 0:\n print(\".\", end='')\n for i in range(2):\n img_data = cv2.imread(img[i])\n img_data = cv2.cvtColor(img_data, cv2.COLOR_BGR2RGB)\n if scale != None:\n img_data = rescale(img_data, scale)\n if i == 0:\n x_data.append(img_data)\n else:\n x_noisy_data.append(img_data)\n print(\"\")\n\n if as_array:\n x_data = np.stack(x_data, axis=0)\n x_noisy_data = np.stack(x_noisy_data, axis=0)\n\n return x_data, x_noisy_data", "title": "" }, { "docid": "f15c1a0a6402d200a45a47f23b9cb6b2", "score": "0.5683242", "text": "def load_input(imgpath=\"\"):\n img_list = []\n for file in os.listdir(imgpath):\n filepath = imgpath + \"/\" + file\n print(\"filepath=\", filepath)\n img = load(filepath)\n img_list.append(img)\n return img_list", "title": "" }, { "docid": "bcb67a27c4516dc7cc822724af0c2c20", "score": "0.568194", "text": "def load_images(directory):\n images = []\n jpgs = jpgs_in_dir(directory)\n for filename in jpgs:\n print(\"Loading\", filename)\n image = SimpleImage(filename)\n images.append(image)\n return images", "title": "" }, { "docid": "1837a8cf3b8ff11daa069258bab454d0", "score": "0.5679936", "text": "def load_data(self, paths, targets, Path, use_left_right=True):\n\n dataset = []\n labels = []\n\n for i, t in enumerate(targets):\n\n if use_left_right:\n i_lrc = np.random.randint(3)\n if (i_lrc == 0):\n p = paths['left'][i].strip()\n shift_ang = .18\n if (i_lrc == 1):\n p = paths['center'][i].strip()\n shift_ang = 0\n if (i_lrc == 2):\n p = paths['right'][i].strip()\n shift_ang = -.18\n else:\n p = paths['center'][i].strip()\n shift_ang = 0\n\n path = '/'.join(Path.split('/')[0:-1]) + '/IMG/' + p.split('/')[-1]\n if i == 0:\n print('looking for images at {}'.format(path))\n\n im_side = cv2.imread(path)\n im_side = cv2.cvtColor(im_side, cv2.COLOR_BGR2RGB)\n\n dataset.append(im_side)\n labels.append(t + shift_ang)\n\n return np.array(dataset), np.array(labels)", "title": "" }, { "docid": "030381cfcab71bed34cd5107fa77d509", "score": "0.56769687", "text": "def tif_to_numpy(tif_folder, band_list, n_bands, n_rows=None, n_cols=None): \n \n for i, im_band in enumerate(band_list):\n ds = gdal.Open(os.path.join(tif_folder, im_band), gdal.GA_ReadOnly)\n \n # initialize numpy array\n if i == 0:\n imageidx = 0\n if n_cols == None:\n n_cols = ds.RasterXSize\n if n_rows == None:\n n_rows = ds.RasterYSize \n n_cols = 688\n n_rows = 639\n image = np.zeros((n_rows, n_cols, n_bands)) \n \n # check if object has correct shape\n if not ds.RasterXSize == n_cols or not ds.RasterYSize == n_rows:\n print(\"resample... {}\".format(im_band))\n ds = gdal.Warp(\"\", ds, format='mem', width=n_cols, height=n_rows, resampleAlg=0)\n \n # read values in tiff image\n for b in range(ds.RasterCount):\n band = b+1\n srcband = ds.GetRasterBand(band)\n if srcband is None:\n continue\n \n # save values in numpy array\n image[:,:,imageidx] = srcband.ReadAsArray()\n imageidx += 1\n \n # reset\n ds = None\n \n return image", "title": "" }, { "docid": "5e1363238a2fb56051578e3c2b0d5d22", "score": "0.5676234", "text": "def _build_test_dataset(filepaths, input_shape=(256,256,3), norm=255,\n single_channel=False):\n img_arr = np.stack([_load_img(f, norm=norm, num_channels=input_shape[2],\n resize=input_shape[:2]) \n for f in filepaths])\n\n mask = _make_test_mask(*input_shape)\n mask = np.stack([mask for _ in range(img_arr.shape[0])])\n\n return img_arr, mask", "title": "" }, { "docid": "379716558b415d2c3ee085cad7b49dab", "score": "0.5667801", "text": "def load_imagesets(self):\n \n imageset_list = []\n for filename in os.listdir(loading_directory):\n if filename.endswith('.npy'):\n load_file = os.path.join(loading_directory, filename)\n imageset = np.load(load_file)\n imageset_list.append(imageset)\n print('Loading', load_file)\n \n return imageset_list", "title": "" }, { "docid": "740ae593ddd05dd1df17645ea4b06102", "score": "0.56672513", "text": "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path, '%s-labels-idx1-ubyte' % kind)\n images_path = os.path.join(path, '%s-images-idx3-ubyte' % kind)\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II', lbpath.read(8))\n labels = np.fromfile(lbpath, dtype=np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))\n images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels", "title": "" }, { "docid": "dc30f1371c1eafde13bfb4397c621e5e", "score": "0.56643677", "text": "def get_array(filename):\n return pyplot.imread(f'../bmp/{filename}', 'bmp')", "title": "" }, { "docid": "414586e19fa95eddbf6365c913efa376", "score": "0.56602234", "text": "def load_image_file(file_path: str) -> np.ndarray:\n image_data = Image.open(file_path)\n return np.array(image_data).astype('uint8')", "title": "" }, { "docid": "a1a9f09f948d3dc8189d80f061095c31", "score": "0.565655", "text": "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels.idx1-ubyte'\n % kind)\n images_path = os.path.join(path,\n '%s-images.idx3-ubyte'\n % kind)\n print(os.path.abspath(labels_path))\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII',\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels", "title": "" }, { "docid": "8a2ad9c855f390002a6118a900c6707c", "score": "0.5650054", "text": "def load_mnist(path, kind):\n\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte'\n % kind)\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII',\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels", "title": "" }, { "docid": "8d12b6190841852c361a3d85e1cdb299", "score": "0.56458443", "text": "def img_loader(data_path):\n # get format of data, using the extension\n ext = os.path.basename(data_path).split(os.path.extsep)[1]\n\n if not os.path.exists(data_path):\n raise IOError(\"No such file: %s\" % data_path)\n\n # load using numpy\n if ext == '.npy':\n img = np.load(data_path)\n\n # else default to PIL.Image supported extensions.\n # Loads most basic image formats.\n else:\n try:\n img = np.array(Image.open(data_path))\n except IOError:\n raise IOError(\"img_loader does not recognize file ext: %s\" % ext)\n return img", "title": "" }, { "docid": "5206587847c263e30364f8a09097a1ce", "score": "0.56386465", "text": "def read(dataset = \"training\", path = \".\"):\n\n if dataset is \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset is \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError, \"dataset must be 'testing' or 'training'\"\n\n # Load everything in some numpy arrays\n with open(fname_lbl, 'rb') as flbl:\n magic, num = struct.unpack(\">II\", flbl.read(8))\n lbl = np.fromfile(flbl, dtype=np.int8)\n\n with open(fname_img, 'rb') as fimg:\n magic, num, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n print ('Done')\n img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)\n\n get_img = lambda idx: (lbl[idx], img[idx])\n\n # Create an iterator which returns each image in turn\n for i in xrange(len(lbl)):\n yield get_img(i)", "title": "" }, { "docid": "fa3b8c555648d8f88a62f0cebb9a46a2", "score": "0.5634524", "text": "def read_tiff(self):\n image = Image.open(self._filepath)\n image_array = np.asarray(image)\n # Reshape to reflect dimensions of the map\n raster = image_array.reshape(self._shape)\n return raster", "title": "" }, { "docid": "2aa7c554bf7f102e6ae50cb8588bc8be", "score": "0.5624485", "text": "def ReadImage(path):\n return sitk.GetArrayFromImage(sitk.ReadImage(path)).astype(np.float32)", "title": "" }, { "docid": "47dc933b74965738403c5c9b6f30c9c7", "score": "0.5623461", "text": "def read(dataset = \"training\", path = \".\"):\n \n if dataset is \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset is \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError, \"dataset must be 'testing' or 'training'\"\n \n # Load everything in some numpy arrays\n with open(fname_lbl, 'rb') as flbl:\n magic, num = struct.unpack(\">II\", flbl.read(8))\n lbl = np.fromfile(flbl, dtype=np.int8)\n \n with open(fname_img, 'rb') as fimg:\n magic, num, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)\n \n get_img = lambda idx: (lbl[idx], img[idx])\n \n # Create an iterator which returns each image in turn\n for i in xrange(len(lbl)):\n yield get_img(i)", "title": "" }, { "docid": "a1bd2d67c7bc8e209d59628f26a6a5d2", "score": "0.56223583", "text": "def load_im(im_path: Path or str) -> np.array:\n out = np.asarray(Image.open(im_path))\n if out.ndim == 2:\n # Add dim to greyscales\n out = np.tile(out[:, :, None], 3)\n return out", "title": "" }, { "docid": "7843c1a9418d1036269d19e223000083", "score": "0.5619957", "text": "def _demo_tif():\r\n# a = np.arange(5*3*4, dtype=np.int8).reshape(5, 3, 4) # int8\r\n# a = np.arange(5*3*4, dtype=np.int16).reshape(5, 3, 4) # int16\r\n# a = np.arange(5*3*4, dtype=np.int32).reshape(5, 3, 4) # int32\r\n# a = np.arange(5*3*4, dtype=np.int64).reshape(5, 3, 4) # int64\r\n# a = np.arange(5*3*4, dtype=np.float16).reshape(5, 3, 4) # float16\r\n# a = np.arange(5*3*4, dtype=np.float32).reshape(5, 3, 4) # float32\r\n a = np.arange(5*3*4, dtype=np.float64).reshape(5, 3, 4) # float64\r\n return a", "title": "" }, { "docid": "73e35e5d8f576a9aca6b1fa1379524eb", "score": "0.5619342", "text": "def get_images_from_filename_array(coords,chips,classes,folder_names,res=(250,250)):\n\n images =[]\n boxes = []\n clses = []\n\n k = 0\n bi = 0\n\n for folder in folder_names:\n fnames = glob.glob(folder + \"*.tif\")\n fnames.sort()\n for fname in tqdm(fnames):\n #Needs to be \"X.tif\" ie (\"5.tif\")\n name = fname.split(\"\\\\\")[-1]\n arr = wv.get_image(fname)\n\n img,box,cls = wv.chip_image(arr,coords[chips==name],classes[chips==name],res)\n\n for im in img:\n images.append(im)\n for b in box:\n boxes.append(b)\n for c in cls:\n clses.append(cls)\n k = k + 1\n\n return images, boxes, clses", "title": "" }, { "docid": "a8f12cba9850f66dc812fe8ddbfff9a5", "score": "0.5618064", "text": "def test_read_tigers(fname):\n # ftp://ftp.graphicsmagick.org/pub/tiff-samples\n with TiffFile(fname) as tif:\n byteorder = {'le': '<', 'be': '>'}[os.path.split(fname)[0][-2:]]\n databits = int(fname.rsplit('.tif')[0][-2:])\n\n # assert file properties\n assert_file_flags(tif)\n assert tif.byteorder == byteorder\n assert tif.is_bigtiff == ('bigtiff' in fname)\n assert len(tif.pages) == 1\n\n # assert page properties\n page = tif.pages.first\n assert_page_flags(page)\n assert page.tags['DocumentName'].value == os.path.basename(fname)\n assert page.imagewidth == 73\n assert page.imagelength == 76\n assert page.bitspersample == databits\n assert (page.photometric == RGB) == ('rgb' in fname)\n assert (page.photometric == PALETTE) == ('palette' in fname)\n assert page.is_tiled == ('tile' in fname)\n assert (page.planarconfig == CONTIG) == ('planar' not in fname)\n if 'minisblack' in fname:\n assert page.photometric == MINISBLACK\n\n # float24 not supported\n # if 'float' in fname and databits == 24:\n # with pytest.raises(ValueError):\n # data = tif.asarray()\n # return\n\n # assert data shapes\n data = tif.asarray()\n assert isinstance(data, numpy.ndarray)\n assert data.flags['C_CONTIGUOUS']\n # if 'palette' in fname:\n # shape = (76, 73, 3)\n if 'rgb' in fname:\n if 'planar' in fname:\n shape = (3, 76, 73)\n else:\n shape = (76, 73, 3)\n elif 'separated' in fname:\n if 'planar' in fname:\n shape = (4, 76, 73)\n else:\n shape = (76, 73, 4)\n else:\n shape = (76, 73)\n assert data.shape == shape\n\n # assert data types\n if 'float' in fname:\n if databits == 24:\n dtype = numpy.float32\n else:\n dtype = f'float{databits}'\n # elif 'palette' in fname:\n # dtype = numpy.uint16\n elif databits == 1:\n dtype = numpy.bool_\n elif databits <= 8:\n dtype = numpy.uint8\n elif databits <= 16:\n dtype = numpy.uint16\n elif databits <= 32:\n dtype = numpy.uint32\n elif databits <= 64:\n dtype = numpy.uint64\n assert data.dtype == dtype\n\n assert_decode_method(page, data)\n assert_aszarr_method(page, data)\n assert__str__(tif)", "title": "" }, { "docid": "06af18d86f9fc20513d46237b57dd221", "score": "0.5612518", "text": "def _read_txt(self, path: str) -> Tuple[np.ndarray, np.ndarray]:\n image_root_path = os.path.join(self.data_path, \"core50_128x128\")\n\n paths, targets = [], []\n with open(path, \"r\") as f:\n for line in f:\n p, t = line.strip().split(\" \")\n paths.append(os.path.join(image_root_path, p))\n targets.append(int(t))\n\n return np.array(paths), np.array(targets)", "title": "" }, { "docid": "cccf1a89413dda0f70bd77ee77ab2f10", "score": "0.5608811", "text": "def load_from_file(path, bands):\n dataset = gdal.Open(path, gdal.GA_ReadOnly)\n array = dataset.ReadAsArray()\n\n if len(array.shape) == 3:\n # The bands column is in the first position, but we want it last\n array = np.rollaxis(array, 0, 3)\n elif len(array.shape) == 2:\n # This image seems to have one band, so we add an axis for ease\n # of use in the rest of the library\n array = array[:, :, np.newaxis]\n\n image = array.astype('float32')\n\n return dataset, image, bands", "title": "" }, { "docid": "23a5eb9e22d154b286afc07275b9baf2", "score": "0.5608254", "text": "def read(digits, dataset = \"training\", path = \".\"):\n\n if dataset is \"training\":\n # fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n # fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n\n fname_img = path+\"train-images.idx3-ubyte\"\n fname_lbl = path+\"train-labels.idx1-ubyte\"\n elif dataset is \"testing\":\n # fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n # fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n fname_img = path+\"t10k-images.idx3-ubyte\"\n fname_lbl = path+\"t10k-labels.idx1-ubyte\"\n else:\n raise ValueError, \"dataset must be 'testing' or 'training'\"\n\n flbl = open(fname_lbl, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n lbl = array(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(fname_img, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = array(\"B\", fimg.read())\n fimg.close()\n\n ind = [ k for k in xrange(size) if lbl[k] in digits ]\n images = matrix(0, (len(ind), rows*cols))\n labels = matrix(0, (len(ind), 1))\n for i in xrange(len(ind)):\n images[i, :] = img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]\n labels[i] = lbl[ind[i]]\n\n return images, labels", "title": "" }, { "docid": "5c873227ff66c6bf7bd48b3cdb2b5baa", "score": "0.55986685", "text": "def get_image_arr(img):\r\n return np.asarray(Image.open(os.path.join(images_folder, img)))", "title": "" }, { "docid": "3ccf6918d97c571b7c3f50a6345bb7fe", "score": "0.5596762", "text": "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte'\n % kind)\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII',\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels", "title": "" }, { "docid": "3ccf6918d97c571b7c3f50a6345bb7fe", "score": "0.5596762", "text": "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte'\n % kind)\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII',\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels", "title": "" }, { "docid": "0df3a32c5cdd27e865d88dbf14993a9a", "score": "0.5592266", "text": "def images_to_library(t_path, images_path): \r\n images_list = glob.iglob(os.path.join(images_path, \"*.jpg\")) #contains the files\r\n num_of_images = len(os.listdir(images_path)) #the num of files from the images library \r\n os.chdir(t_path)\r\n # cheaks if t_path is for test or train\r\n if r\"\\test\" in t_path: \r\n for jpgfile in images_list: #copy all of the images to test\r\n shutil.copy(jpgfile, t_path)\r\n else:\r\n counter = 0 #counts the number of times the loop repeat \r\n for jpgfile in images_list: #copy 70% of the images to train\r\n if (counter < 0.7*num_of_images):\r\n shutil.copy(jpgfile, t_path)\r\n counter = counter + 1", "title": "" }, { "docid": "13efd2e6951812d6179edad5be822c6a", "score": "0.5591965", "text": "def load_data(path, size=(32, 32), sub_type=False):\n x = []\n y = []\n categories = os.listdir(path)\n\n for category in categories:\n print(\"loading category: \" + category, \"class: \", categories.index(category))\n category_path = os.path.join(path, category)\n if sub_type:\n for sub_type in os.listdir(category_path):\n sub_type_path = os.path.join(category_path, sub_type)\n for filename in os.listdir(sub_type_path):\n image_path = os.path.join(sub_type_path, filename)\n image = cv2.resize(cv2.imread(image_path), size, cv2.INTER_CUBIC)\n x.append(image.astype(np.float32) / 255.)\n y.append(categories.index(category))\n else:\n for filename in os.listdir(category_path):\n image_path = os.path.join(category_path, filename)\n image = cv2.resize(cv2.imread(image_path), size, cv2.INTER_CUBIC)\n x.append(image.astype(np.float32) / 255.)\n y.append(categories.index(category))\n\n return np.array(x), np.array(y), categories", "title": "" }, { "docid": "996112fae50ca04f9a9f9a13d083e0b9", "score": "0.5591404", "text": "def load_data(self, path='data/patches_48/data.npz'):\n path = PROJECT + path\n\n imgs, gt_imgs, labels = pc.load_from_file(path)\n\n self.X = imgs\n self.Y = labels", "title": "" }, { "docid": "4d904ff93e7ed68f16df706c95a16fd4", "score": "0.5591299", "text": "def load_image_into_numpy_array(path):\n img_data = tf.io.gfile.GFile(path, 'rb').read()\n image = Image.open(BytesIO(img_data))\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)", "title": "" } ]
f68ccc29c713b415872ba580eb64f23a
Compute the cosine similarity of all pairs in ``X``. To be added.
[ { "docid": "eb0c58c1d49eab42e27d8b5a21698968", "score": "0.74439645", "text": "def transform(self, X):\n n_pairs, two = X.shape\n Xt = np.zeros(n_pairs, dtype=float)\n i=0\n for x1, x2 in X:\n Xt[i] = cosine_similarity(x1, x2)\n i+=1\n\n return Xt.reshape(n_pairs, 1)", "title": "" } ]
[ { "docid": "7cd76d82ac6c4ff23bce6e140b37c164", "score": "0.75480765", "text": "def cosine_similarity(self, x1, x2, dim=1):\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return (w12 / (w1 * w2).clamp(min=self.eps)).squeeze()", "title": "" }, { "docid": "b979e144043a73bf69d9c4e1e3e4df3b", "score": "0.7494125", "text": "def cosine_similarity(self, x1, x2, dim=1):\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return (w12 / (w1 * w2).clamp(min=self.eps))", "title": "" }, { "docid": "58d82ac1378ad32fdbb3d835a0b871bd", "score": "0.7158918", "text": "def cosine_distances(self):\n n_train = len(self.training_data)\n n_valid = len(self.data)\n scalar_products = np.zeros((n_valid, n_train))\n for i in range(n_valid):\n scalar_products[i] = np.sum(np.outer(np.ones(n_train), self.data[i]) * self.training_data, axis=1)\n train_norm = np.outer(np.ones(n_valid), np.sqrt(np.sum(np.power(self.training_data, 2), axis=1)))\n valid_norm = np.outer(np.sqrt(np.sum(np.power(self.data, 2), axis=1)), np.ones(n_train))\n similarity = scalar_products/(train_norm*valid_norm)\n for i in range(n_valid):\n for j in range(n_train):\n if similarity[i][j] <= 0:\n similarity[i][j] = 1e-12\n elif similarity[i][j] >= 1:\n similarity[i][j] = 1 - 1e-12\n self.distance_matrix = np.arccos(similarity)/np.pi", "title": "" }, { "docid": "45d89388ff7f1707328b3ec4988b8adc", "score": "0.7142306", "text": "def cosine_similarity(x1, x2, dim=1, eps=1e-8):\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()", "title": "" }, { "docid": "45d89388ff7f1707328b3ec4988b8adc", "score": "0.7142306", "text": "def cosine_similarity(x1, x2, dim=1, eps=1e-8):\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()", "title": "" }, { "docid": "45d89388ff7f1707328b3ec4988b8adc", "score": "0.7142306", "text": "def cosine_similarity(x1, x2, dim=1, eps=1e-8):\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()", "title": "" }, { "docid": "32334ad09d35582d9f8d4237f422d810", "score": "0.7127534", "text": "def cosine_similarity(X, Y=..., dense_output=...):\n ...", "title": "" }, { "docid": "7ca4b4221c61c9b9339ba4f41f76c421", "score": "0.7094364", "text": "def cosine_similarity(x1, x2, dim=1, eps=1e-8):\n # w12 = torch.sum(x1 * x2, dim)\n w12 = torch.mm(x1, x2.t())\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n # return (w12 / (w1 * w2).clamp(min=eps)).squeeze()\n return w12 / torch.mm(w1, w2.t()).clamp(min=eps)", "title": "" }, { "docid": "694464fd73f52a78149ce814adf26e8c", "score": "0.70592815", "text": "def cosine_similarity(self, person_x, person_y):\n assert person_x.size == person_y.size\n numerator = sum(x*y for x,y in zip(person_x, person_y))\n denominator = (sqrt(sum(x*x for x in person_x))) * (sqrt(sum(y*y for y in person_y)))\n return numerator/denominator", "title": "" }, { "docid": "729166ac8bc7e91fcc141110b2a6285e", "score": "0.7048186", "text": "def cosine_distances(X, Y=...):\n ...", "title": "" }, { "docid": "fa2245ebd75daf9212f1bd88aa5f74ee", "score": "0.70204204", "text": "def _calculate_cosine_similarities(self):\n # Drop irrelevant columns\n cols_to_drop = ['artists', 'id', 'name']\n X = self.features.drop(cols_to_drop, axis=1)\n Y = self.saved_tracks.drop(cols_to_drop, axis=1)\n # Calculate cosine similarity scores\n scores = cosine_similarity(X, Y)\n return scores", "title": "" }, { "docid": "7eb46bf03068746532ec634f023c2898", "score": "0.69474447", "text": "def paired_cosine_distances(X, Y):\n ...", "title": "" }, { "docid": "65f1f4b649600078f953339b773e9594", "score": "0.69176626", "text": "def get_cosine_similarity(self, feature_vec_1, feature_vec_2):\n return cosine_similarity(\n feature_vec_1.reshape(1, -1), feature_vec_2.reshape(1, -1))[0][0]", "title": "" }, { "docid": "9a94e0f43d69727b3a46bd2ebb11460b", "score": "0.6812001", "text": "def cosine_similarity(self,vecA,vecB):\n return np.dot(vecA,vecB)/(np.dot(vecA,vecA)*np.dot(vecB,vecB))", "title": "" }, { "docid": "f334697798f9f388809a11bd8c11cce5", "score": "0.677862", "text": "def cos(self, x, y):\n c = nn.CosineSimilarity()\n return c(x, y).mean()", "title": "" }, { "docid": "c34691a769f998efbf0368f3f0e9f165", "score": "0.6636954", "text": "def compute_similarity(x):\r\n n_samples = x.shape[0]\r\n similarity_matrix = np.eye(n_samples)\r\n\r\n norms = np.linalg.norm(x, axis=1) # Compute euclidean norms once for all\r\n for i in range(1, n_samples):\r\n for j in range(i):\r\n dist_i_j = np.vdot(x[i], x[j]) / (norms[i] * norms[j])\r\n similarity_matrix[i, j] = similarity_matrix[j, i] = dist_i_j\r\n\r\n return similarity_matrix", "title": "" }, { "docid": "868e5845e405cb9244f583988b712f99", "score": "0.65832746", "text": "def cos_similarity(vecx, vecy):\n # if vecx is list, no need transform to matrix\n # if type(vecx) == type([]):\n if isinstance(vecx, list):\n return float(dot(vecx, vecy))\n # if other type, we transform to matrix\n a = mat(vecx)\n b = mat(vecy)\n c = float(dot(a, b.T))\n return c", "title": "" }, { "docid": "3e2d4d494460a16b73d8e32c9f1d946c", "score": "0.65807843", "text": "def cosine_similarity(F):\n cos_dist_matrix = None\n\n F_norm = F / torch.linalg.norm(F, dim=1, keepdim=True)\n cos_dist_matrix = torch.matmul(F_norm, F_norm.T)\n return cos_dist_matrix", "title": "" }, { "docid": "63cdd63f8e6a0fd67d9fb677987ce384", "score": "0.653795", "text": "def CosineSimilarity(test_vec, source_vecs):\n cos_dist = 0\n for source_vec in source_vecs:\n cos_dist += findCosineDistance(test_vec, source_vec)\n return cos_dist / len(source_vecs)", "title": "" }, { "docid": "c932d5654829259176f6308d57724549", "score": "0.64287114", "text": "def cosine_similarity(v1: Vector, v2: Vector) -> float:\n return dot_product(v1, v2) / (vector_len(v1) * vector_len(v2))", "title": "" }, { "docid": "c5dc3c514235e4f9a137c206165fa2dd", "score": "0.64220136", "text": "def evaluate_similarity(word_pairs, word_vectors):\n cosine_similarities = []\n for index, row in word_pairs.iterrows():\n if row[0] in word_vectors.keys() and row[1] in word_vectors.keys():\n cos_sim = cosine_similarity(word_vectors[row[0]], word_vectors[row[1]])\n cosine_similarities.append(cos_sim)\n else:\n cosine_similarities.append(np.nan)\n\n return cosine_similarities", "title": "" }, { "docid": "5058a907e3c2408c0dd6349fdbe32caa", "score": "0.64186233", "text": "def cosine_similarity_matrix(samples, eps=1e-8):\n assert samples.dim() >= 2, \\\n 'Shape of input should be (*, num_samples, num_features)'\n w = samples.norm(dim=-1, keepdim=True)\n return samples.matmul(samples.transpose(-1, -2)) / (w * w.transpose(-1, -2)).clamp(min=eps)", "title": "" }, { "docid": "89d583ce37d90b63f405a941a28d9291", "score": "0.63767064", "text": "def cosine_similarity(a: np.ndarray, b: np.ndarray) -> float:\n return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))", "title": "" }, { "docid": "6fba17d7a21322fd92371566167287eb", "score": "0.6367167", "text": "def cosine_distance(x: Tensor, y: Tensor) -> np.ndarray:\n x = x.cpu().numpy()\n y = y.cpu().numpy()\n x = x / np.linalg.norm(x, axis=1, keepdims=True)\n y = y / np.linalg.norm(y, axis=1, keepdims=True)\n dists = 1. - np.dot(x, y.T)\n return dists", "title": "" }, { "docid": "0e29031eed266b01bd37b559b752aeea", "score": "0.63635343", "text": "def cost_matrix_cosine(x, y, eps=1e-5):\n assert x.dim() == y.dim()\n assert x.size(0) == y.size(0)\n assert x.size(2) == y.size(2)\n x_norm = F.normalize(x, p=2, dim=-1, eps=eps)\n y_norm = F.normalize(y, p=2, dim=-1, eps=eps)\n cosine_sim = x_norm.matmul(y_norm.transpose(1, 2))\n cosine_dist = 1 - cosine_sim\n return cosine_dist", "title": "" }, { "docid": "6285ad5d50523bd74d0ba37fd70fe670", "score": "0.635209", "text": "def cosine_similarity(vec1, vec2):\n\n num = np.dot(vec1, vec2)\n denom = np.linalg.norm(vec1) * np.linalg.norm(vec2)\n cos = num / denom\n return cos", "title": "" }, { "docid": "4f9970988d514a679d938f23b090d525", "score": "0.63291526", "text": "def measure_cosine_similarity(tokens_one, tokens_two):\r\n\r\n return (np.dot(tokens_one, tokens_two)/(np.linalg.norm(tokens_one) * np.linalg.norm(tokens_two)))", "title": "" }, { "docid": "e3c77f46f2a8b42fd93ed20cf39ab068", "score": "0.62985075", "text": "def cosine_similarity(a, b):\n\n return dot(a, b) / (norm(a) * norm(b))", "title": "" }, { "docid": "595fc924e93440eb043ab6c295f1d512", "score": "0.6298484", "text": "def get_setenence_cosine_similarity(cls, x, y):\n x_list = word_tokenize(x)\n y_list = word_tokenize(y)\n ps = PorterStemmer()\n # sw contains the list of stopwords\n stoplist = list(string.punctuation)\n stoplist += ['-lrb-', '-rrb-', '-lcb-', '-rcb-', '-lsb-', '-rsb-']\n stoplist += stopwords.words('english')\n l1 = []\n l2 = []\n\n # remove stop words from the string\n x_set = {ps.stem(w) for w in x_list if not w in stoplist}\n y_set = {ps.stem(w) for w in y_list if not w in stoplist}\n\n # form a set containing keywords of both strings\n rvector = x_set.union(y_set)\n for w in rvector:\n if w in x_set:\n l1.append(1) # create a vector\n else:\n l1.append(0)\n if w in y_set:\n l2.append(1)\n else:\n l2.append(0)\n c = 0\n\n # cosine formula\n for i in range(len(rvector)):\n c += l1[i]*l2[i]\n try:\n cosine = c / float((sum(l1)*sum(l2))**0.5)\n except:\n cosine = 0\n\n return cosine", "title": "" }, { "docid": "f313ce675b00f7ef204e78e2de57ca7c", "score": "0.6298477", "text": "def cosine_similarity(A, B):\n return np.dot(A, B) / (np.linalg.norm(A) * np.linalg.norm(B))", "title": "" }, { "docid": "b742bac5c39709c9bd8a424fac35ac1f", "score": "0.6291276", "text": "def cosine_similarity_pairwise(sentence_embedding_source, sentence_embedding_target):\n try:\n sentence_embedding_source_array = np.array(list(sentence_embedding_source[0].values())).reshape(1, -1)\n sentence_embedding_target_array = np.array(list(sentence_embedding_target[0].values())).reshape(1, -1)\n try:\n return cosine_similarity(X=sentence_embedding_source_array, Y=sentence_embedding_target_array,\n dense_output=True)[0][0]\n except ValueError:\n return 0\n except TypeError:\n sentence_embedding_source_array = np.array(sentence_embedding_source).reshape(1, -1)\n sentence_embedding_target_array = np.array(sentence_embedding_target).reshape(1, -1)\n try:\n return cosine_similarity(X=sentence_embedding_source_array, Y=sentence_embedding_target_array,\n dense_output=True)[0][0]\n except ValueError:\n return 0", "title": "" }, { "docid": "6856e6496af5d1392ec7c4986481c313", "score": "0.62721443", "text": "def get_cosine_distance_between_features(cls,\r\n calibrate_feature,\r\n cmp_feature):\r\n cosine_similarity = {}\r\n for key, dst_feature in calibrate_feature.items():\r\n src_feature = cmp_feature.get(key)\r\n abs_eps = cls.absolute_tolerance()\r\n dst_sum = np.sum(dst_feature * dst_feature)\r\n src_sum = np.sum(src_feature * src_feature)\r\n dot_sum = np.sum(dst_feature * src_feature)\r\n\r\n if dst_sum < abs_eps and src_sum < abs_eps:\r\n value = 1.0\r\n elif dst_sum * src_sum < abs_eps:\r\n if dst_sum < abs_eps or src_sum < abs_eps:\r\n value = 1.0\r\n else:\r\n value = 0.0\r\n else:\r\n value = dot_sum / (np.sqrt(dst_sum) * np.sqrt(src_sum) + abs_eps)\r\n\r\n cosine_similarity[key] = f'{round(value * 100, 3)}%'\r\n\r\n return cosine_similarity", "title": "" }, { "docid": "f8dead98409e1d4fb20179da39980543", "score": "0.6267143", "text": "def cosine_similarity(self, docA, docB):\n weightedA = self.tfidf(docA.token_counts)\n weightedB = self.tfidf(docB.token_counts)\n dotAB = dot(weightedA, weightedB)\n normA = math.sqrt(dot(weightedA, weightedA))\n normB = math.sqrt(dot(weightedB, weightedB))\n if (normA == 0 or normB == 0): # check for equal 0, to avoid division by 0.\n return 0\n else:\n return dotAB / (normA * normB)", "title": "" }, { "docid": "615ae43555119ebcb19b10161c9a5d2e", "score": "0.62481153", "text": "def _cosine_distance(x: 'np.ndarray', y: 'np.ndarray') -> 'np.ndarray':\n return 1 - np.dot(x, y.T) / np.outer(\n np.linalg.norm(x, axis=1), np.linalg.norm(y, axis=1)\n )", "title": "" }, { "docid": "5cd1b2f884020c68958f0643c74dc86f", "score": "0.622074", "text": "def cosine_similarity_calculator(self,a, b):\n nominator = np.dot(a, b)\n \n a_norm = np.sqrt(np.sum(a**2))\n b_norm = np.sqrt(np.sum(b**2))\n \n denominator = a_norm * b_norm\n \n cosine_similarity = nominator / denominator\n \n return cosine_similarity", "title": "" }, { "docid": "014c10db1043301dc224acc2ce71d667", "score": "0.6217149", "text": "def calculate_cosine_similarity (self, tweet): # tweets are in a list.\n if not tweet:\n print 'No tweets to classify.'\n return\n query_term_vector = {}\n #tweet = json.loads(tweet)\n for token in re.findall('[a-zA-Z0-9]+', tweet['text']):\n token = self._case_fold(token)\n if not token or token in nltk.corpus.stopwords.words('english'):\n continue\n token = nltk.WordNetLemmatizer().lemmatize(token)\n if query_term_vector.get(token,False):\n query_term_vector[token] += query_term_vector.get(token, 0.0) + 1\n else:\n query_term_vector[token] = 1\n query_magnitude = math.sqrt(\n math.fsum([math.pow(count, 2) for count in query_term_vector.values()]))\n # Calculate cosine scores.\n cosine_scores = {}\n for city, postings in self.city_vectors.iteritems():\n for word in query_term_vector.keys():\n cosine_scores[city] = cosine_scores.get(city, 0.0) + (\n query_term_vector[word] * postings.get(word, 0.0))\n cosine_scores[city] = cosine_scores[city] / (self.city_vectors_magnitude[city] * query_magnitude)\n return sorted(cosine_scores.iteritems(),key=lambda x: x[1],reverse=True)[:50]", "title": "" }, { "docid": "d83d37d28999d59825ae3a14c2188b5f", "score": "0.61963344", "text": "def cosine_similarity(self, u: embedding_pb2.FeatureVector,\n v: embedding_pb2.FeatureVector) -> float:\n return self._embedder.cosine_similarity(u.to_pb2(), v.to_pb2())", "title": "" }, { "docid": "8cdaaf1b9f112ff59fe2cf0a9511278c", "score": "0.61912173", "text": "def cosine_similarity(vector1, vector2):\n\t\n\treturn 1 - spatial.distance.cosine(vector1, vector2)", "title": "" }, { "docid": "00cb3d9400d5263106577cc3157c4122", "score": "0.61786616", "text": "def cosine_similarity(a, b):\n dot_product = np.dot(a, b) \n norm_a = np.linalg.norm(a) \n norm_b = np.linalg.norm(b) \n return dot_product / (norm_a * norm_b)", "title": "" }, { "docid": "a71628b874ae83724bd4eff157bdfbfb", "score": "0.6177", "text": "def cosine_similarity(self, mass_a, mass_b):\n numerator = sum(a * b for a, b in zip(mass_a, mass_b))\n denominator = self.square_rooted(mass_a) * self.square_rooted(mass_b)\n return round(numerator/float(denominator), 3)", "title": "" }, { "docid": "27829cad9bbfbaaf1bc37b7b055a1566", "score": "0.6166796", "text": "def cosine_sim(self, item1, item2):\n user1 = set(map(lambda x:x[0],self.data.rating_item[item1]))\n user2 = set(map(lambda x:x[0],self.data.rating_item[item2]))\n commons = user1 & user2\n\n e_ij = []\n e_i = []\n e_j = []\n for user in commons:\n u_ij = filter(lambda x:(x[0]==item1 or x[0]==item2),\n self.data.u_rating[user])\n e_ij.append((u_ij[0][1]-self.u_mean[user])*\\\n (u_ij[1][1]-self.u_mean[user]))\n e_i.append((u_ij[0][1]-self.u_mean[user])**2)\n e_j.append((u_ij[1][1]-self.u_mean[user])**2)\n try:\n sim = sum(e_ij)/(sqrt(sum(e_i))*sqrt(sum(e_j)))\n except ZeroDivisionError:\n #msg(\"The second number can't be zero!\")\n sim = 1 # cos(0) = 1\n return sim", "title": "" }, { "docid": "90059f3ec7ea13de2a9658562031da41", "score": "0.6145148", "text": "def cosine_sim(vec1, vec2):\n num = sum([vec1[i]*vec2[i] for i in xrange(len(vec1))])\n print num\n result = num / (norm(vec1) * norm(vec2))\n print numpy.dot(vec1, vec2)\n\n result2 = numpy.dot(vec1, vec2) / (norm(vec1) * norm(vec2))\n return (result, result2)", "title": "" }, { "docid": "a969625430b62a4d76f501f4f3e313b0", "score": "0.61431926", "text": "def cosine_similarity(self, tv1, tv2):\n return self.dot_product(tv1, tv2) / (self.length(tv1) * self.length(tv2))", "title": "" }, { "docid": "c9e4e29795a1550831981b95006dc4e8", "score": "0.61163485", "text": "def cosine_similarity(x, y):\n x = tf.nn.l2_normalize(x, 1) \n y = tf.nn.l2_normalize(y, 1)\n return 1 - tf.matmul(tf.expand_dims(x, axis=1), tf.expand_dims(y, axis=2))", "title": "" }, { "docid": "9dd4c6fea88fadae24589cf557ed3293", "score": "0.60705036", "text": "def cosineSimilarity(record, idfsRDD, idfsRDD2, corpusNorms1, corpusNorms2):\n vect1Rec = record[0][0]\n vect2Rec = record[0][1]\n tokens = record[1]\n s = sum((idfsRDD[vect1Rec][i]*idfsRDD2[vect2Rec][i] for i in tokens))\n value = s/((corpusNorms1[vect1Rec])*(corpusNorms2[vect2Rec]))\n key = (vect1Rec, vect2Rec)\n return (key, value)", "title": "" }, { "docid": "3b34427d881d9ef800c98e80e3100497", "score": "0.6065262", "text": "def cosine_similarity(vect2, vect1):\n assert sum(vect1 <= 0) == 0, \"Test User's rating score has zero value\"\n assert np.size(vect1) == np.size(vect2), \"Don't have the same dimension\"\n non_zeros = vect2 > 0\n if sum(non_zeros) == 0:\n # vector2 has no rated scores\n cosine = 0.0\n elif sum(non_zeros) == 1:\n # fix the issue that if vectors have dimension of 1, their cosine similarity is always 1\n cosine = 0.8 - float(abs(vect1[non_zeros] - vect2[non_zeros])) * 0.2\n else:\n vector1 = vect1[non_zeros].astype('float')\n vector2 = vect2[non_zeros].astype('float')\n cosine = sum(vector1 * vector2) / np.sqrt(sum(vector1 * vector1) * sum(vector2 * vector2))\n return cosine", "title": "" }, { "docid": "55af1a1e61b0c380357c23f80bb2b206", "score": "0.60589135", "text": "def cosine_sim(a: [float], b: [float]) -> float:\n return cosine_similarity([a], [b])[0][0]", "title": "" }, { "docid": "0f2dc20cc366fd2284587bda61f09ca0", "score": "0.6039406", "text": "def cosine_similarity(d1, d2):\n #Magnitudes for each document vector\n mag_d1 = np.linalg.norm(d1.data)\n mag_d2 = np.linalg.norm(d2.data)\n \n #Calculate dot product of documents\n dotProd = d1.dot(d2.T).data\n\n #calculate cosine similarity\n cos = dotProd/(mag_d1*mag_d2) \n \n return cos", "title": "" }, { "docid": "02ca8205797e44800aef7b89d81f1e73", "score": "0.60322493", "text": "def cosine_similarity(vector_query, normalised_doc):\r\n dotProduct = 0 # Since the document is normalised, the cosine similarity is the dot product\r\n for term in vector_query:\r\n if term in normalised_doc: # if the term is found in this document\r\n dotProduct += vector_query[term] * normalised_doc[term] # Calculate the dot product\r\n return dotProduct", "title": "" }, { "docid": "b866e1ae486905c4cc63327242f965bb", "score": "0.60298187", "text": "def similarity(book_index, X, kernel='cosine'):\n return PAIRWISE_KERNEL_FUNCTIONS[kernel](X[book_index], X)", "title": "" }, { "docid": "6664e919747644e42cd8a649c60aeced", "score": "0.6022178", "text": "def cosineSimilarity(title, sentence):\n\ttitle = Counter(title)\n\tsentence = Counter(sentence)\n\tsharedWords = set(title.keys()) & set(sentence.keys())\n\tfirstTerm = sum([title[word] * sentence[word] for word in sharedWords])\n\tfirstSum = sum([title[word]**2 for word in list(title.keys())])\n\tsecondSum = sum([sentence[word]**2 for word in list(sentence.keys())])\n\tsecondTerm = math.sqrt(firstSum) * math.sqrt(secondSum)\n\tif not secondTerm:\n\t\treturn 0.0\n\telse:\n\t\treturn firstTerm/secondTerm", "title": "" }, { "docid": "1145a3216870ce1df17407968c232cb6", "score": "0.6014272", "text": "def compute_distances_one_loop(self, X):\r\n num_test = X.shape[0]\r\n num_train = self.X_train.shape[0]\r\n dists = np.zeros((num_test, num_train))\r\n for i in range(num_test):\r\n # using the broadcasting\r\n dists[i] = np.sqrt(np.sum(np.square(X[i] - self.X_train), axis=1))\r\n return dists", "title": "" }, { "docid": "c9ec73cf145984d301a0780f309014ec", "score": "0.5994774", "text": "def compute_sse(self, X, labels=None):\n if labels is None:\n labels = self.find_closest_clusters(X)\n distance = np.zeros(X.shape[0])\n for k in range(self.n_cluster):\n dist = np.linalg.norm(X[labels == k] - self.centroids[k], axis=1)\n distance[labels == k] = dist\n return np.sum(np.square(distance))", "title": "" }, { "docid": "e0d5e62f3d0003d4a79404db4dc6ea5a", "score": "0.59853154", "text": "def entropy_weighted_cosine_distance(X_saliency, X_model):\n\tdef cosine_distance(X_norm, X_model):\n\t\tnorm1 = np.sqrt(np.sum(X_norm**2, axis=0))\n\t\tnorm2 = np.sqrt(np.sum(X_model**2, axis=0))\n\n\t\tdist = np.sum(X_norm*X_model, axis=0)/norm1/norm2\n\t\treturn dist\n\n\tdef entropy(X):\n\t\tinformation = np.log2(4) - np.sum(-X*np.log2(X+1e-10),axis=0)\n\t\treturn information\n\n\tX_norm = utils.normalize_pwm(X_saliency, factor=3)\n\tcd = cosine_distance(X_norm, X_model)\n\tmodel_info = entropy(X_model)\n\ttpr = np.sum(model_info*cd)/np.sum(model_info)\n\n\tinv_model_info = -(model_info-2)\n\tinv_cd = -(cd-1)\n\tfpr = np.sum(inv_cd*inv_model_info)/np.sum(inv_model_info)\n\n\treturn tpr, fpr", "title": "" }, { "docid": "c10273e0d5dea49fe0218521120ec8b5", "score": "0.5984344", "text": "def compute_distance(self, X: np.array):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dist = np.zeros((num_test, num_train))\n\n dist = np.sqrt((X ** 2).sum(axis=1, keepdims=1) +\n (self.X_train ** 2).sum(axis=1) - 2 * X.dot(self.X_train.T))\n return dist", "title": "" }, { "docid": "10aa2438cd0d1427f49e1f4f23db6de8", "score": "0.598314", "text": "def calculate_cosine_similarity (self, tweets_of_a_trend): # tweets are in a list.\n if not tweets_of_a_trend:\n print 'No tweets to classify.'\n return\n query_term_vector = {}\n for tweet in tweets_of_a_trend:\n #print tweet\n tweet = json.loads(tweet)\n for token in re.findall('[a-zA-Z0-9]+', tweet['text']):\n token = self._case_fold(token)\n if not token or token in nltk.corpus.stopwords.words('english'):\n continue\n '''wrd = enchant.Dict(\"en_US\")\n if not wrd.check(token):\n continue'''\n token = nltk.WordNetLemmatizer().lemmatize(token)\n if query_term_vector.get(token,False):\n query_term_vector[token] += query_term_vector.get(token, 0.0) + 1\n else:\n query_term_vector[token] = 1\n query_magnitude = math.sqrt(\n math.fsum([math.pow(count, 2) for count in query_term_vector.values()]))\n\n # Calculate cosine scores.\n cosine_scores = {}\n for city, postings in self.city_vectors.iteritems():\n for word in query_term_vector.keys():\n cosine_scores[city] = cosine_scores.get(city, 0.0) + (\n query_term_vector[word] * postings.get(word, 0.0))\n cosine_scores[city] = cosine_scores[city] / (\n self.city_vectors_magnitude[city] * query_magnitude)\n\n return sorted(cosine_scores.iteritems(),\n key=lambda x: x[1],\n reverse=True\n )[:50]", "title": "" }, { "docid": "6f58a1914c392a72b4edce83ccda0d53", "score": "0.5963523", "text": "def cosine_distance(a, b):\n pass", "title": "" }, { "docid": "1842c1c85ddbde88f181780fab1a681f", "score": "0.59447294", "text": "def cosine_similarity (a, b):\n if a is None:\n a = 0\n if b is None:\n b = 0\n\n norm_a = np.linalg.norm(a)\n norm_b = np.linalg.norm(b)\n adotb = np.dot(a,b)\n\n return adotb / (norm_a * norm_b)", "title": "" }, { "docid": "af169fead87080f7f78f35fd025a7011", "score": "0.5926683", "text": "def user_sim(data): \n similarities = cosine_similarity(data)\n return similarities", "title": "" }, { "docid": "78428b8c25cae613329b22045915a810", "score": "0.5923504", "text": "def cal_pairwise_distance(self,X):\n\n Y =X\n X = np.expand_dims(X[0], axis=0)\n rx=np.reshape(np.sum(np.power(X,2),axis=1),(-1,1))\n ry=np.reshape(np.sum(np.power(Y,2),axis=1),(-1,1))\n dist=np.clip(rx-2.0*np.matmul(X,np.transpose(Y))+np.transpose(ry),0.0,float('inf'))\n\n return np.sqrt(dist)", "title": "" }, { "docid": "7275e1321e4d39835a22354c2fba9578", "score": "0.5920771", "text": "def transform(self, X):\n X1, X2 = np.split(X, 2, axis=1)\n\n vectorized = np.vectorize(self.similarity_function)\n n_samples = X1.shape[0]\n\n val = vectorized(X1, X2)\n return val.reshape((n_samples, 1))", "title": "" }, { "docid": "c1cd1ab2c5c050ba03451623d0af9f72", "score": "0.5919124", "text": "def cosine_similarity(ratings_1, ratings_2):\n intersection_size = 0\n summation_xy = summation_power_x = summation_power_y = 0\n\n for key in ratings_1:\n x = ratings_1[key] # rating by x for a particular item\n if key in ratings_2:\n y = ratings_2[key] # rating by y for a particular item\n intersection_size += 1\n else:\n y = 0\n summation_xy += x*y # the sum of the product Xi*Yi, from i to n\n summation_power_x += pow(x, 2) # the sum of Xi^2, from i to n\n summation_power_y += pow(y, 2) # the sum of Yi^2, from i to n\n\n if (len(ratings_2) > intersection_size):\n for key in ratings_2:\n y = ratings_2[key]\n if key not in ratings_1:\n x = 0\n summation_xy += x*y\n summation_power_x += pow(x, 2)\n summation_power_y += pow(y, 2)\n\n denominator = math.sqrt(summation_power_x) * math.sqrt(summation_power_y)\n\n if (denominator == 0):\n return 0\n else:\n numerator = summation_xy\n return numerator / denominator", "title": "" }, { "docid": "d6d5bfe6521000953545fead3f5f050f", "score": "0.5914005", "text": "def _cosine_sim(self, vecA, vecB):\n csim = np.dot(vecA, vecB) / (np.linalg.norm(vecA) * np.linalg.norm(vecB))\n if np.isnan(np.sum(csim)):\n return 0\n return csim", "title": "" }, { "docid": "86082a30d0cb5bb887fe4def182c0722", "score": "0.59043473", "text": "def CosineSimilarity(A, B):\n numerator = (A * B).sum()\n denoma = (A * A).sum()\n denomb = (B * B).sum()\n return 1 - numerator / np.sqrt(denoma*denomb)", "title": "" }, { "docid": "f550e3efe0c748787f420a29caf3935b", "score": "0.5895549", "text": "def cosine_sim(text1, text2):\n doc1 = nlp(text1)\n doc2 = nlp(text2)\n return doc1.similarity(doc2)", "title": "" }, { "docid": "09e88710e5a9a607c60ee0e3450801c5", "score": "0.58891916", "text": "def cosineSimilarity(s1, s2):\r\n return len(s1 & s2) / math.sqrt(float(len(s1) * len(s2)))", "title": "" }, { "docid": "d579c779cc981aa3d4b7b9c939a0d9b0", "score": "0.5868174", "text": "def CosineSimilarity(inverted_index, doc_set =None):\r\n\r\n def cosine_ranking(field_name, query_terms_as_string):\r\n docScore =defaultdict(float)\r\n tokenized_text = inverted_index.field_names[field_name](query_terms_as_string)\r\n query_emb =inverted_index.get_avg_doc_embedding(tokenized_text)\r\n docs_wth_term =set()\r\n for word in tokenized_text:\r\n docs_wth_term =set.union(docs_wth_term, inverted_index.term_frequency[field_name][word].keys())\r\n #print(len(docs_wth_term))\r\n #print(query_emb)\r\n for doc_id in docs_wth_term:\r\n #print(doc_id)\r\n d_scor = 1- cosine_similarity(query_emb.reshape( 1,EMBEDDING_DIM), inverted_index.doc_avg_embeddings[field_name][doc_id].reshape( 1,EMBEDDING_DIM))[0]\r\n docScore [doc_id] = d_scor\r\n #print(doc_id, d_scor)\r\n return docScore\r\n return cosine_ranking", "title": "" }, { "docid": "e6f7d14b5bd1b5207ad38ac64a30acfe", "score": "0.5837006", "text": "def compute_distances(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train)) \n\n #for i in range(num_test):\n # dists[i, :] = np.sqrt(np.sum(np.square(self.X_train - X[i, :]), axis=1))\n \n \n # Computes the sum of the X_train values, by summing along the column. \n # This results in a row vector.\n X_train_sum = np.sum(self.X_train**2, axis = 1)\n \n # Computes the sum of the X_test values by summing along the column, which\n # gives a row vector. The row vector is rechaped into a column vector.\n X_test_sum = np.sum(X**2, axis = 1).reshape(-1,1)\n \n # Computing the cross product with matrix multiplication, with X_train\n # transposed. The train values are along the rows and the test values \n # along the columns\n inner_prod = X.dot(self.X_train.T)\n \n # Computes the sqrt\n dists = np.sqrt(X_train_sum + X_test_sum - 2*inner_prod)\n \n return dists", "title": "" }, { "docid": "3929d5e1947f0e378e7c0457018d67de", "score": "0.58193153", "text": "def cosine_similarity_text_embedding(variable_1, variable_2, model):\n return pd.Series([_cosine_similarity_numeric(_text_embedding(variable_1.iloc[i], model), _text_embedding(variable_2.iloc[i], model))\n for i in range(0, variable_1.shape[0])])", "title": "" }, { "docid": "aa5401cfe53137b77c3fc59503313cbc", "score": "0.5818264", "text": "def compute_distances(Xtrain, X):\n\t#####################################################\n\t#\t\t\t\t YOUR CODE HERE\t\t\t\t\t #\n\t#####################################################\n\tdists = np.zeros(shape=(np.size(X, 0), np.size(Xtrain, 0)))\n\tfor row in np.arange(1,np.size(X, 0)):\n\t\tdists[row, :] = np.transpose(np.sqrt(np.sum(np.square(np.subtract(Xtrain, X[row,:])), axis=1)))\n\treturn dists", "title": "" }, { "docid": "fe6539359cd33ee4097987a9ff266f20", "score": "0.57948875", "text": "def cosine_similarity(text_collection, text1, text2, template):\n return 2.0", "title": "" }, { "docid": "fd5b077126e87f8eb8bd19b70466d771", "score": "0.57913035", "text": "def cosine_sim(d1, d2):\n return (np.dot(d1, d2)) / (np.linalg.norm(d1)* np.linalg.norm(d2))", "title": "" }, { "docid": "b5b8ebe0599cbd7e48241ac45d9518f0", "score": "0.57850695", "text": "def cosine_sim(v1, v2):\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "title": "" }, { "docid": "2d83e0000ca550e944c5c21e40e03863", "score": "0.5784923", "text": "def cosine(a, b):\n return np.dot(a, b) / (norm(a) * norm(b))", "title": "" }, { "docid": "f7eb6f9b97ae78ea29bb1422a0ee2ea8", "score": "0.5781667", "text": "def cosine_similarity(u, v):\n norm_u = np.sqrt(np.sum(np.square(u)))\n norm_v = np.sqrt(np.sum(np.square(v)))\n\n return np.dot(u, v) / (norm_u * norm_v)", "title": "" }, { "docid": "856295effa6d5069c5665068147954a7", "score": "0.57674575", "text": "def euclidean_distance(self, X):\n # input: single data point\n if X.ndim == 1:\n l2 = np.sqrt(np.sum((self.data - X)**2, axis=1))\n\n # input: matrix of data points\n if X.ndim == 2:\n n_samples, _ = X.shape\n l2 = [np.sqrt(np.sum((self.data - X[i])**2, axis=1)) for i in range(n_samples)]\n\n return np.array(l2)", "title": "" }, { "docid": "a872a060e30115c50195618530a76dea", "score": "0.5750101", "text": "def cosine(qv, av):\n qI, qJ, qV = scipy.sparse.find(qv) # these are sorted\n aI, aJ, aV = scipy.sparse.find(av)\n\n dot = 0.0\n nrmq = 0.0\n nrma = 0.0\n \n # compute norms and dot products of sparse row vectors\n i = 0; j = 0\n for i in xrange(len(qJ)):\n nrmq += qV[i]*qV[i]\n\n while j < len(aJ) and aJ[j] <= qJ[i]:\n nrma += aV[j]*aV[j]\n \n if aJ[j] == qJ[i]:\n dot += qV[i]*qV[i]\n\n j += 1\n\n nrmq = math.sqrt(nrmq)\n nrma = math.sqrt(nrma)\n \n if nrmq > 0 and nrma > 0:\n score = dot / nrmq / nrma\n else:\n score = 0.0\n\n return score", "title": "" }, { "docid": "15cbb1ce0628d241a7d6e1a2e0e34966", "score": "0.5721069", "text": "def my_mds_cosine(x, n=2):\n dists = np.array([[1 - cosine(a, b) for a in x] for b in x])\n return MDS(n, dissimilarity='precomputed').fit_transform(\n dists,\n init=PCA(n).fit_transform(x),\n )", "title": "" }, { "docid": "1a602e6521b226f75c5d9fda2c8005d6", "score": "0.57185566", "text": "def cosine_sim(a, b):\n return a.multiply(b).sum()/(math.sqrt(sum(a.data**2)) * math.sqrt(sum(b.data**2)))", "title": "" }, { "docid": "d6f5d5646f460af429285790675107ee", "score": "0.5704644", "text": "def compute_distance(self, X):\n distance = np.zeros((X.shape[0], self.n_cluster))\n for k in range(self.n_cluster):\n row_norm = np.linalg.norm(X - self.centroids[k, :], axis=1)\n distance[:, k] = np.square(row_norm)\n return distance", "title": "" }, { "docid": "e5d4d6a80a28c4f4334d042b52631866", "score": "0.5697984", "text": "def create_similarity_matrix(self, X):\n W = []\n for x_i in X:\n W.append(self.k_nearest_list(X, x_i))\n W = np.array(W)\n return np.where(np.logical_or(W, W.T), 1, 0)", "title": "" }, { "docid": "518bbe213a8b4ce8f5850714cf15eac6", "score": "0.5693336", "text": "def cosine_similarity(u, v):\r\n distance = 0.0\r\n\r\n dot = np.dot(u, v)\r\n\r\n norm_u = np.sqrt(np.sum(np.square(u)))\r\n norm_v = np.sqrt(np.sum(np.square(v)))\r\n\r\n cosine_similarity = dot / (norm_u * norm_v)\r\n\r\n return cosine_similarity", "title": "" }, { "docid": "d62cae04fe545d2922a964838d89708c", "score": "0.56843895", "text": "def compute_cosine_distance_matrix(source_samples: tf.Tensor,\n target_samples: tf.Tensor) -> tf.Tensor:\n distance_matrix = tf.matmul(source_samples,\n tf.transpose(target_samples, perm=(1, 0)))\n return 1 - distance_matrix", "title": "" }, { "docid": "16704c53c82c55f8095ea2ffdcd6e486", "score": "0.5677103", "text": "def testGetCosineSimilarity(self):\n list1 = [1,0,1,0]\n list2 = [0,1,0,1]\n list3 = [2,0,2,0]\n self.assertEquals(self.dataProcessorGaussAndCosine._getCosineSimilarity(list1, list2), 0)\n self.assertAlmostEquals(self.dataProcessorGaussAndCosine._getCosineSimilarity(list1, list3), 1.0, delta=0.01)", "title": "" }, { "docid": "379013d76bc41d7f069250ae78d95b1f", "score": "0.56737804", "text": "def cosine(doc1,doc2):\n weights1 = doc_topic_weights[doc1]\n weights2 = doc_topic_weights[doc2]\n dotProduct = np.dot(weights1,weights2)\n mag1 = np.sqrt(sum([np.square(weight) for weight in weights1]))\n mag2 = np.sqrt(sum([np.square(weight) for weight in weights2]))\n return dotProduct/(mag1*mag2)", "title": "" }, { "docid": "5fae32dd25bdcb75172f285dec677838", "score": "0.5656921", "text": "def euclidean_dist(self, x_1, x_2):\n return F.pairwise_distance(x_1, x_2, p=2)", "title": "" }, { "docid": "f34c1ab7a580c00acd696e211fb7a06e", "score": "0.56515634", "text": "def similarity_Cosine(query, simDocs, df_signature):\r\n try:\r\n query = df_signature[query]\r\n simDocs = df_signature[simDocs]\r\n return np.dot(simDocs,query)/(np.sum(simDocs**2) * np.sum(query**2))**0.5\r\n except:\r\n return 0", "title": "" }, { "docid": "b37e0a73dd98ec889f2cfb5f56709d7f", "score": "0.5646891", "text": "def getCosine(self, query_words):\n\t\treturn self.cosine_score(query_words, self.inv_idx, self.idf, self.doc_norms)", "title": "" }, { "docid": "6889c722964e5ca929a49619a3fcd592", "score": "0.564246", "text": "def calculate_cosine(vec1: np.ndarray, vec2: np.ndarray) -> np.ndarray:\n if np.shape(vec1) != np.shape(vec2):\n raise ValueError('{} must have the same shape as {}'.format(vec1, vec2))\n ndim = np.ndim(vec1)\n norm_product = (np.linalg.norm(vec1, axis=-1) * np.linalg.norm(vec2, axis=-1))\n zero_norms = norm_product == 0\n if np.any(zero_norms):\n if ndim>1:\n norm_product[zero_norms] = 1\n else:\n norm_product = 1\n # Return the batched dot product.\n return np.einsum('...i,...i', vec1, vec2) / norm_product", "title": "" }, { "docid": "5c30e33e06cae22501a87b8a31986e67", "score": "0.5637486", "text": "def test_cos_sim(self):\n a = onp.random.randn(50, 100)\n b = onp.random.randn(50, 100)\n\n pytorch_cos_scores = torch_util.cos_sim(a, b).numpy()\n jax_cos_scores = onp.asarray(jax_util.cos_sim(a, b))\n\n assert pytorch_cos_scores.shape == jax_cos_scores.shape\n for i in range(len(jax_cos_scores)):\n for j in range(len(jax_cos_scores[0])):\n assert abs(pytorch_cos_scores[i][j] - jax_cos_scores[i][j]) < 0.001, \"Output : torch - {}, jax - {}\" \\\n .format(pytorch_cos_scores[i], jax_cos_scores[i])", "title": "" }, { "docid": "9cb5b74458c9ff48d51ca44719c888a3", "score": "0.5635511", "text": "def get_cosine_sim(query, distributions):\n similarities = []\n for k, dist in distributions.items():\n sim = cossim(query, dist)\n similarities.append((k, sim))\n return similarities", "title": "" }, { "docid": "2ef46aa5c51c405be19549bba26e4348", "score": "0.5625789", "text": "def cosine_similarity(u, v):\n \n distance = 0.0\n \n # Compute the dot product between u and v\n dot = np.dot(u.T,v)\n # Compute the L2 norm of u\n norm_u = np.sqrt(np.sum(np.square(u)))\n \n # Compute the L2 norm of v\n norm_v = np.sqrt(np.sum(np.square(v)))\n # Compute the cosine similarity\n cosine_similarity = dot / (norm_u*norm_v)\n #print(dot.shape,norm_u.shape,norm_v.shape)\n \n return cosine_similarity", "title": "" }, { "docid": "a440a5f30844c2be8744b2fd9da74ebe", "score": "0.5624569", "text": "def adjusted_cosine_similarity(self, wanted_movie, movie, averages):\n assert wanted_movie.size == movie.size\n wanted_movie_mm = np.zeros(shape=(200))\n movie_mm = np.zeros(shape=(200))\n #Subract average user rating from the movies for adjusted part\n i = 0\n for wm, m in zip(wanted_movie, movie):\n if m != 0:\n movie_mm[i] = (m - averages[i])\n if wm != 0:\n wanted_movie_mm[i] = (wm - averages[i])\n i+=1\n\n numerator = sum(((float(x)*float(y)) for x, y in zip(movie_mm, wanted_movie_mm)))\n denominator = (sqrt(sum((float(x))**2 for x in movie_mm))) * (sqrt(sum(float(y)**2 for y in wanted_movie_mm)))\n\n try:\n return float(numerator)/ float(denominator)\n except:\n return 0.0", "title": "" }, { "docid": "b01a18aa42bb46278cdedc6674ea69ff", "score": "0.56040674", "text": "def csr_cos(x: CSRTensor) -> CSRTensor:\n if not isinstance(x, CSRTensor):\n raise_type_error('Expects CSRTensor for csr_cos')\n return CSRTensor(x.indptr, x.indices, math_func.cos(x.values), x.shape)", "title": "" }, { "docid": "e79362c6a3fdfbbea2cde37e8e3d2a2e", "score": "0.5587757", "text": "def pairwise_distance(X: List[List[float]], Y: List[List[float]]) -> List[List[float]]:\n pass", "title": "" }, { "docid": "50af573461a19e5d08ee01ebe3ec39c5", "score": "0.557988", "text": "def adjusted_cosine_similarity(ratings_1, ratings_2):\n intersection_size = 0\n summation_xy = summation_power_x = summation_power_y = 0\n\n mean_x = numpy.mean(ratings_1.values()) # mean value of ratings vector x\n mean_y = numpy.mean(ratings_2.values()) # mean value of ratings vector y\n\n for key in ratings_1:\n x = ratings_1[key] # rating by x for a particular item\n if key in ratings_2:\n y = ratings_2[key] # rating by y for a particular item\n intersection_size += 1\n summation_xy += (x-mean_x)*(y-mean_y)\n summation_power_x += pow(x-mean_x, 2)\n summation_power_y += pow(y-mean_y, 2)\n else:\n y = 0\n summation_xy += (x-mean_x)*(y)\n summation_power_x += pow(x-mean_x, 2)\n summation_power_y += pow(y, 2)\n\n if (len(ratings_2) > intersection_size):\n for key in ratings_2:\n y = ratings_2[key] # rating by y for a particular item\n if key not in ratings_1:\n x = 0\n summation_xy += (x)*(y-mean_y)\n summation_power_x += pow(x, 2)\n summation_power_y += pow(y-mean_y, 2)\n\n denominator = math.sqrt(summation_power_x) * math.sqrt(summation_power_y)\n\n if (denominator == 0):\n return 0\n else:\n numerator = summation_xy\n return numerator / denominator", "title": "" }, { "docid": "06d110daa9ad10de0c7c4b98dd368f52", "score": "0.55794835", "text": "def calculate_similarity(data_items): # data_items is a DataFrame\r\n data_sparse = sparse.csr_matrix(data_items)\r\n similarities = cosine_similarity(data_sparse.transpose())\r\n sim = pd.DataFrame(data=similarities)\r\n return sim", "title": "" }, { "docid": "a226aa1df34ba5b15a06abffaa5e288b", "score": "0.5578007", "text": "def cosine_seq(seq1, seq2, epsilon=1e-12, xp=np):\r\n assert seq1.shape == seq2.shape\r\n seq1 = mat_normalize(seq1, epsilon=epsilon, xp=xp)\r\n seq2 = mat_normalize(seq2, epsilon=epsilon, xp=xp)\r\n cos = xp.clip(xp.sum(seq1 * seq2, axis=1), 0.0, 1.0)\r\n return cos", "title": "" }, { "docid": "39f1cb10401a8401a684a16e37f82936", "score": "0.55689454", "text": "def objfunction(self, x):\n if len(x) != self.dim:\n raise ValueError('Dimension mismatch')\n total = 1\n for i, y in enumerate(x):\n total *= np.cos(y / np.sqrt(i+1))\n return 1.0 / 4000.0 * sum([y**2 for y in x]) - total + 1", "title": "" }, { "docid": "1a565346a78f7d2edb36fe6c82a37642", "score": "0.5560888", "text": "def create_similarity_matrix_cosine(matrix):\n mc_matrix = matrix - matrix.mean(axis = 0)\n return pd.DataFrame(pw.cosine_similarity(mc_matrix.fillna(0)), index = matrix.index, columns = matrix.index)", "title": "" } ]
73d4f1f8bd56fd8e6e8a192a4a148516
Used for AllXY measurement and calibration for multiple qubits simultaneously.
[ { "docid": "3e9c1545d6f0eb6c0c3e576e4611f682", "score": "0.6008026", "text": "def multi_qubit_AllXY(qubits_idx: list, platf_cfg: str, double_points: bool = True) -> OqlProgram:\n\n p = OqlProgram(\"Multi_qubit_AllXY\", platf_cfg)\n\n allXY = [['i', 'i'], ['rx180', 'rx180'], ['ry180', 'ry180'],\n ['rx180', 'ry180'], ['ry180', 'rx180'],\n ['rx90', 'i'], ['ry90', 'i'], ['rx90', 'ry90'],\n ['ry90', 'rx90'], ['rx90', 'ry180'], ['ry90', 'rx180'],\n ['rx180', 'ry90'], ['ry180', 'rx90'], ['rx90', 'rx180'],\n ['rx180', 'rx90'], ['ry90', 'ry180'], ['ry180', 'ry90'],\n ['rx180', 'i'], ['ry180', 'i'], ['rx90', 'rx90'],\n ['ry90', 'ry90']]\n\n # this should be implicit\n if 0: # FIXME: p.set_sweep_points has been replaced by p.sweep_points, since that was missing here they are probably not necessary for this function\n p.set_sweep_points(np.arange(len(allXY), dtype=float))\n\n for i, xy in enumerate(allXY):\n if double_points:\n js = 2\n else:\n js = 1\n for j in range(js):\n k = p.create_kernel(\"AllXY_{}_{}\".format(i, j))\n for qubit in qubits_idx:\n k.prepz(qubit)\n k.gate(xy[0], [qubit])\n k.gate(xy[1], [qubit])\n k.measure(qubit)\n p.add_kernel(k)\n\n p.compile()\n return p", "title": "" } ]
[ { "docid": "051cce46202ff410a55de1980f5f67e2", "score": "0.5637091", "text": "def AP_Keq_examples(self):\n\n # calculate keq values using latest parameter values\n c1, c2, c3 = self.coeffs\n\n # calc keq for all\n for its in self.dg100 + self.dg400:\n its.calc_keq(c1, c2, c3)\n\n output = {}\n # How to return in a way that is easy for plotting?\n # The whole dictionary approach is getting tedious.\n # Can you use tuples as keys? yes!\n dg1xx = 'DG164'\n dg4xx = 'DG444'\n dg100Obj = [d for d in self.dg100 if d.name == dg1xx][0]\n dg400Obj = [d for d in self.dg400 if d.name == dg4xx][0]\n\n row = 0 # AP\n for col in range(4):\n key = (row, col)\n if col == 0:\n data = dg100Obj.abortiveProb\n data_std = dg100Obj.abortiveProb_std\n\n output[key] = (data, data_std, dg1xx, 'AP')\n\n if col == 1:\n data = dg400Obj.abortiveProb\n data_std = dg400Obj.abortiveProb_std\n\n output[key] = (data, data_std, dg4xx, 'AP')\n\n if col == 2:\n data = np.mean([d.abortiveProb for d in self.dg100], axis=0)\n data_std = np.std([d.abortiveProb for d in self.dg100], axis=0)\n\n output[key] = (data, data_std, 'DG100 library', 'AP')\n\n if col == 3:\n data = np.mean([d.abortiveProb for d in self.dg400], axis=0)\n data_std = np.std([d.abortiveProb for d in self.dg400], axis=0)\n\n output[key] = (data, data_std, 'DG400 library', 'AP')\n\n row = 1 # Keq\n for col in range(4):\n key = (row, col)\n if col == 0:\n data = dg100Obj.keq\n data_std = []\n\n output[key] = (data, data_std, dg1xx, 'Keq')\n\n if col == 1:\n data = dg400Obj.keq\n data_std = []\n\n output[key] = (data, data_std, dg4xx, 'Keq')\n\n if col == 2:\n data = np.mean([d.keq for d in self.dg100], axis=0)\n data_std = np.std([d.keq for d in self.dg100], axis=0)\n\n output[key] = (data, data_std, 'DG100 library', 'Keq')\n\n if col == 3:\n data = np.mean([d.keq for d in self.dg400], axis=0)\n data_std = np.std([d.keq for d in self.dg400], axis=0)\n\n output[key] = (data, data_std, 'DG400 library', 'Keq')\n\n return output", "title": "" }, { "docid": "e25558d314d2acd1914bea93fc875480", "score": "0.56229573", "text": "def run(self):\r\n \r\n #DAQ\r\n with nidaqmx.Task() as slave_Task3, nidaqmx.Task() as master_Task:\r\n #slave_Task3 = nidaqmx.Task()\r\n slave_Task3.ao_channels.add_ao_voltage_chan(\"/Dev1/ao0:1\")\r\n master_Task.ai_channels.add_ai_voltage_chan(\"/Dev1/ai0\")\r\n \r\n slave_Task3.timing.cfg_samp_clk_timing(rate = self.sampleRate, source='ai/SampleClock',\r\n sample_mode = nidaqmx.constants.AcquisitionType.CONTINUOUS)\r\n \r\n \r\n # Analoginput\r\n master_Task.timing.cfg_samp_clk_timing(rate = self.sampleRate,\r\n sample_mode = nidaqmx.constants.AcquisitionType.CONTINUOUS,\r\n samps_per_chan = self.readNumber)\r\n \r\n reader = AnalogSingleChannelReader(master_Task.in_stream)\r\n writer = AnalogMultiChannelWriter(slave_Task3.out_stream)\r\n \r\n reader.auto_start = False\r\n writer.auto_start = False\r\n \r\n writer.write_many_sample(self.wave)\r\n \r\n \"\"\"Reading data from the buffer in a loop. \r\n The idea is to let the task read more than could be loaded in the buffer for each iteration.\r\n This way the task will have to wait slightly longer for incoming samples. And leaves the buffer\r\n entirely clean. This way we always know the correct numpy size and are always left with an empty\r\n buffer (and the buffer will not slowly fill up).\"\"\"\r\n output = np.zeros(self.readNumber)\r\n slave_Task3.start() #Will wait for the readtask to start so it can use its clock\r\n master_Task.start()\r\n print('Contour scanning!!')\r\n while not self.isInterruptionRequested():\r\n reader.read_many_sample(data = output, \r\n number_of_samples_per_channel = self.readNumber)\r\n \r\n #Emiting the data just received as a signal\r\n\r\n Dataholder_average = np.mean(output.reshape(self.averagenumber, -1), axis=0)\r\n \r\n self.data_PMT = np.reshape(Dataholder_average, (self.ypixelnumber, self.ScanArrayXnum))\r\n \r\n self.data_PMT= self.data_PMT*-1\r\n #self.measurement.emit(self.data_PMT) \r", "title": "" }, { "docid": "1b0e4760cdbe1e069ec89904bf0ac7fb", "score": "0.5547117", "text": "def two_qubit_AllXY(\n q0: int,\n q1: int,\n platf_cfg: str,\n sequence_type='sequential',\n replace_q1_pulses_with: str = None,\n repetitions: int = 1\n) -> OqlProgram:\n p = OqlProgram('two_qubit_AllXY', platf_cfg)\n\n pulse_combinations = [['i', 'i'], ['rx180', 'rx180'], ['ry180', 'ry180'],\n ['rx180', 'ry180'], ['ry180', 'rx180'],\n ['rx90', 'i'], ['ry90', 'i'], ['rx90', 'ry90'],\n ['ry90', 'rx90'], ['rx90', 'ry180'],\n ['ry90', 'rx180'],\n ['rx180', 'ry90'], ['ry180', 'rx90'],\n ['rx90', 'rx180'],\n ['rx180', 'rx90'], ['ry90', 'ry180'],\n ['ry180', 'ry90'],\n ['rx180', 'i'], ['ry180', 'i'], ['rx90', 'rx90'],\n ['ry90', 'ry90']]\n\n pulse_combinations_q0 = np.repeat(pulse_combinations, repetitions, axis=0)\n\n if replace_q1_pulses_with is not None:\n # pulse_combinations_q1 = [[replace_q1_pulses_with]*2 for val in pulse_combinations]\n pulse_combinations_q1 = np.repeat(\n [[replace_q1_pulses_with] * 2], len(pulse_combinations_q0), axis=0)\n else:\n pulse_combinations_q1 = np.tile(pulse_combinations, [repetitions, 1])\n i = 0\n for pulse_comb_q0, pulse_comb_q1 in zip(pulse_combinations_q0,\n pulse_combinations_q1):\n i += 1\n k = p.create_kernel('AllXY_{}'.format(i))\n k.prepz(q0)\n k.prepz(q1)\n # N.B. The identity gates are there to ensure proper timing\n if sequence_type == 'interleaved':\n k.gate(pulse_comb_q0[0], [q0])\n k.gate('i', [q1])\n\n k.gate('i', [q0])\n k.gate(pulse_comb_q1[0], [q1])\n\n k.gate(pulse_comb_q0[1], [q0])\n k.gate('i', [q1])\n\n k.gate('i', [q0])\n k.gate(pulse_comb_q1[1], [q1])\n\n elif sequence_type == 'sandwiched':\n k.gate('i', [q0])\n k.gate(pulse_comb_q1[0], [q1])\n\n k.gate(pulse_comb_q0[0], [q0])\n k.gate('i', [q1])\n k.gate(pulse_comb_q0[1], [q0])\n k.gate('i', [q1])\n\n k.gate('i', [q0])\n k.gate(pulse_comb_q1[1], [q1])\n\n elif sequence_type == 'sequential':\n k.gate(pulse_comb_q0[0], [q0])\n k.gate('i', [q1])\n k.gate(pulse_comb_q0[1], [q0])\n k.gate('i', [q1])\n k.gate('i', [q0])\n k.gate(pulse_comb_q1[0], [q1])\n k.gate('i', [q0])\n k.gate(pulse_comb_q1[1], [q1])\n\n elif sequence_type == 'simultaneous':\n k.gate(pulse_comb_q0[0], [q0])\n k.gate(pulse_comb_q1[0], [q1])\n k.gate(pulse_comb_q0[1], [q0])\n k.gate(pulse_comb_q1[1], [q1])\n else:\n raise ValueError(\"sequence_type {} \".format(sequence_type) +\n \"['interleaved', 'simultaneous', \" +\n \"'sequential', 'sandwiched']\")\n k.measure(q0)\n k.measure(q1)\n p.add_kernel(k)\n\n p.compile()\n return p", "title": "" }, { "docid": "6004eda722e7a64ada60fedf81204449", "score": "0.5541819", "text": "def estimate_all(self):\r\n data = self.input\r\n Q = np.zeros((data.num_samples, 4))\r\n for t in range(data.num_samples):\r\n Q[t] = self.update(data.acc[t], data.mag[t])\r\n return Q", "title": "" }, { "docid": "037ce5fc3bdce315cd940a9dad87ccfb", "score": "0.55030704", "text": "def init_all_x(self):\n\n self.signs_minus = set()\n self.signs_i = set()\n\n self.col_x = [{i} for i in range(self.num_qubits)]\n self.col_z = [set() for i in range(self.num_qubits)]\n\n self.row_x = [{i} for i in range(self.num_qubits)]\n self.row_z = [set() for i in range(self.num_qubits)]", "title": "" }, { "docid": "7a455d52efb4938573e4708822a3daac", "score": "0.5471003", "text": "def run(self):\r\n \r\n #DAQ\r\n with nidaqmx.Task() as slave_Task3, nidaqmx.Task() as master_Task:\r\n #slave_Task3 = nidaqmx.Task()\r\n slave_Task3.ao_channels.add_ao_voltage_chan(\"/Dev1/ao0:1\")\r\n master_Task.ai_channels.add_ai_voltage_chan(\"/Dev1/ai0\")\r\n \r\n slave_Task3.timing.cfg_samp_clk_timing(rate = self.sampleRate, source='ai/SampleClock',\r\n sample_mode = nidaqmx.constants.AcquisitionType.CONTINUOUS)\r\n \r\n \r\n # Analoginput\r\n master_Task.timing.cfg_samp_clk_timing(rate = self.sampleRate,\r\n sample_mode = nidaqmx.constants.AcquisitionType.CONTINUOUS,\r\n samps_per_chan = self.readNumber)\r\n \r\n reader = AnalogSingleChannelReader(master_Task.in_stream)\r\n writer = AnalogMultiChannelWriter(slave_Task3.out_stream)\r\n \r\n reader.auto_start = False\r\n writer.auto_start = False\r\n \r\n writer.write_many_sample(self.wave)\r\n \r\n \"\"\"Reading data from the buffer in a loop. \r\n The idea is to let the task read more than could be loaded in the buffer for each iteration.\r\n This way the task will have to wait slightly longer for incoming samples. And leaves the buffer\r\n entirely clean. This way we always know the correct numpy size and are always left with an empty\r\n buffer (and the buffer will not slowly fill up).\"\"\"\r\n output = np.zeros(self.readNumber)\r\n slave_Task3.start() #Will wait for the readtask to start so it can use its clock\r\n master_Task.start()\r\n while not self.isInterruptionRequested():\r\n reader.read_many_sample(data = output, \r\n number_of_samples_per_channel = self.readNumber)\r\n \r\n #Emiting the data just received as a signal\r\n\r\n Dataholder_average = np.mean(output.reshape(self.averagenumber, -1), axis=0)\r\n \r\n self.data_PMT = np.reshape(Dataholder_average, (self.ypixelnumber, self.ScanArrayXnum))\r\n \r\n if self.ypixelnumber == 500:\r\n self.data_PMT= self.data_PMT[:, 50:550]*-1\r\n elif self.ypixelnumber == 256:\r\n self.data_PMT= self.data_PMT[:, 70:326]*-1\r\n \r\n self.measurement.emit(self.data_PMT)", "title": "" }, { "docid": "4ecf96a076338d8d3899f3ef5d62ec01", "score": "0.5470626", "text": "def __init__(self,measurements,p_space,include,pos = []):\r\n self.measurements = measurements #dictionary with the state and the data (ineterpolated and the time) \r\n self.modelnames = list(set([self.measurements[i].model for i in range(len(self.measurements))]))\r\n self.models = [self.measurements[i].model for i in range(len(self.measurements))]\r\n \r\n \"\"\"the boundaries in the system\"\"\"\r\n self.boundaries,self.observables,self.control,self.fixed = {},[],[],[]\r\n for model in self.models:\r\n \"\"\"unpack the information in the models\"\"\"\r\n for k,v in model.boundaries.items():\r\n self.boundaries[k] = v\r\n for i in model.observables:\r\n self.observables.append(i)\r\n for i in model.control:\r\n self.control.append(i)\r\n \"\"\"remove duplicates\"\"\"\r\n self.observables = list(set(self.observables))\r\n self.control = list(set(self.control))\r\n \r\n self.include = include\r\n if len(include) == 0:\r\n self.include = [i for i in self.boundaries.keys() if i not in self.control] \r\n for i in self.control:\r\n self.fixed.append(i)\r\n for parameter,value in self.boundaries.items():\r\n if i not in include and include != []:\r\n self.fixed.append(parameter)\r\n \r\n \"\"\"the needed information for the parameters and and parameter coordinates for mutation\"\"\"\r\n self.p_space = p_space\r\n self.spacing = len(list(self.p_space.values())[-1])\r\n self.forward = True\r\n \r\n \"\"\" inverted dictionary for parametres\"\"\"\r\n self.c_space = {}\r\n for p,dct in self.p_space.items():\r\n self.c_space[p] = {v:k for k, v in self.p_space[p].items()}\r\n \r\n \"\"\"these are the continuously updated parameter sets, its corresponding coordinates\r\n and the score sequence. They are only updated if the score is better\"\"\"\r\n self.position = pos\r\n self.coordinates = {k:self.c_space[k][v] for k,v in self.position.items()}\r\n self.height = None\r\n \r\n self.pareto_parameters = []\r\n self.last_forward_update = 0\r\n \"\"\"information of the EA et al. is still information on the kinetics\r\n of the model and range of potential behavior thus we store the data\r\n and the transition moreover we can track their status over time\"\"\"\r\n self.track = {}\r\n self.coordinate_track = {}\r\n \r\n \"\"\"the list of accepted mutations/recombinations\"\"\"\r\n self.progress = {}\r\n \"\"\"\"forward motion acceptance\"\"\"\r\n self.accepted_movements = []\r\n \"\"\"track agents which keep track of the evolution itself\"\"\"\r\n self.forward_sensitivity = {}", "title": "" }, { "docid": "d25ba1e45d43d95ce38bbb4548943325", "score": "0.54642355", "text": "def __loadXY(self):\r\n if self.xcoord=='xp':\r\n self.xplot = self.xp[self.indices]\r\n elif self.xcoord=='yp':\r\n self.xplot = self.yp[self.indices]\r\n elif self.xcoord=='dist':\r\n # Calculate the distance along the transect\r\n self.xplot=self.calcDistAxis() \r\n elif self.xcoord=='time':\r\n self.xplot = self.time[self.tstep]\r\n \r\n if self.ycoord=='z':\r\n self.yplot = self.z[self.klayer]\r\n elif self.ycoord=='time':\r\n self.yplot = self.time[self.tstep]\r\n elif self.ycoord=='dist':\r\n # Calculate the distance along the transect\r\n self.yplot=self.calcDistAxis()", "title": "" }, { "docid": "097918309b391464a146480471950f89", "score": "0.5424763", "text": "def two_qubit_VQE(q0: int, q1: int, platf_cfg: str) -> OqlProgram:\n tomo_pulses = ['i', 'rx180', 'ry90', 'rym90', 'rx90', 'rxm90']\n tomo_list_q0 = tomo_pulses\n tomo_list_q1 = tomo_pulses\n\n p = OqlProgram(\"two_qubit_VQE\", platf_cfg)\n\n # Tomography pulses\n i = 0\n for p_q1 in tomo_list_q1:\n for p_q0 in tomo_list_q0:\n i += 1\n kernel_name = '{}_{}_{}'.format(i, p_q0, p_q1)\n k = p.create_kernel(kernel_name)\n k.prepz(q0)\n k.prepz(q1)\n k.gate('ry180', [q0]) # Y180 gate without compilation\n k.gate('i', [q0]) # Y180 gate without compilation\n k.gate(\"wait\", [q1], 40)\n k.barrier([]) # alignment workaround\n k.gate('fl_cw_02', [2, 0])\n k.barrier([]) # alignment workaround\n k.gate(\"wait\", [q1], 40)\n k.gate(p_q0, [q0]) # compiled z gate+pre_rotation\n k.gate(p_q1, [q1]) # pre_rotation\n k.measure(q0)\n k.measure(q1)\n p.add_kernel(k)\n # every calibration point is repeated 7 times. This is copied from the\n # script for Tektronix driven qubits. I do not know if this repetition\n # is important or even necessary here.\n p.add_two_q_cal_points(q0=q1, q1=q0, reps_per_cal_pt=7)\n p.compile()\n return p", "title": "" }, { "docid": "81bcc5b7bea833502420b66fdc54c9ac", "score": "0.54167813", "text": "def initSpecialQuadMatrices(self):\n self._MatfromNtoUpsamp = cf.ResamplingMatrix(self._nptsUpsample,self._N,chebGridType,chebGridType);\n self._MatfromNto2panUp = cf.ResamplingMatrix(self._nptsUpsample,self._N,chebGridType,chebGridType,\\\n nPantarg=2);\n self._MatfromNtoUniform = cf.ResamplingMatrix(self._nptsUniform,self._N,'u',chebGridType);\n self._UpsampledCoefficientsMatrix = cf.CoeffstoValuesMatrix(self._nptsUpsample,self._nptsUpsample,chebGridType);\n self._UpsampCoeffLU = lu_factor(self._UpsampledCoefficientsMatrix);\n # Initialize LU factorization of vandermonde matrix for special quadrature\n self._specQuadNodes = cf.chebPts(self._nptsUpsample,[-1,1],chebGridType);\n self._upsampledWeights = cf.chebWts(self._nptsUpsample,[0, self._L], chebGridType);\n self._NupsampLUpiv = lu_factor(np.vander(self._specQuadNodes,increasing=True).T);", "title": "" }, { "docid": "c39d6a70d1d386c3ebe139f1c59f05c5", "score": "0.5369863", "text": "def finalize( self ):\n if self.presentation.__class__.__name__==\"GYx\" or\\\n self.presentation.__class__.__name__==\"Gfi\":\n var = self.vars[0]\n axmax = self.axmax[var.id]\n axmin = self.axmin[var.id]\n varmax = self.varmax[var.id]\n varmin = self.varmin[var.id]\n for v in self.vars[1:]:\n for ax in axmax.keys():\n axmax[ax] = max(axmax[ax],self.axmax[v.id][ax])\n axmin[ax] = min(axmin[ax],self.axmin[v.id][ax])\n varmax = max(varmax,self.varmax[v.id])\n varmin = min(varmin,self.varmin[v.id])\n if self.presentation.__class__.__name__==\"GYx\":\n # VCS Yxvsx\n ax = axmax.keys()[0]\n self.presentation.datawc_x1 = axmin[ax]\n self.presentation.datawc_x2 = axmax[ax]\n self.presentation.datawc_y1 = varmin\n self.presentation.datawc_y2 = varmax\n elif self.presentation.__class__.__name__==\"Gfi\":\n # VCS Isofill\n # First we have to identify which axes will be plotted as X and Y.\n # The following won't cover all cases, but does cover what we have:\n axaxi = {ax:id for id,ax in self.axax[var.id].items()}\n if 'X' in axaxi.keys():\n axx = axaxi['X']\n axy = axaxi['Y']\n else:\n axx = axaxi['Y']\n axy = axaxi['Z']\n # Now send the plotted min,max for the X,Y axes to the graphics:\n self.presentation.datawc_x1 = axmin[axx]\n self.presentation.datawc_x2 = axmax[axx]\n self.presentation.datawc_y1 = axmin[axy]\n self.presentation.datawc_y2 = axmax[axy]\n # The variable min and max, varmin and varmax, should be passed on to the graphics\n # for setting the contours. But apparently you can't tell VCS just the min and max;\n # you have to give it all the contour levels. So...\n nlevels=10\n nlrange = range(nlevels+1)\n nlrange.reverse()\n vminl = varmin/nlevels\n vmaxl = varmax/nlevels\n levels = [a*vminl+(nlevels-a)*vmaxl for a in nlrange]\n levels[0] = math.floor(levels[0]) # could do better but too much trouble\n levels[-1] = math.ceil(levels[-1])\n self.presentation.levels = (levels,)\n # Once you set the levels, the VCS default color choice looks bad. So you really\n # have to set contour fill colors (integers from 0 through 255) too:\n cmin = 32./nlevels\n cmax = 255./nlevels\n # A more flexible way to do what's going on here, thanks to Charles Doutriaux:\n # r=10\n # g=16\n # b=20\n # X.setcolorcell(16,r,g,b)\n # colors = [16,17,18,...] etc.\n # vcs.getcolors is useful, more complicated - see its doc string\n # vcs.mkscale probably does exactly what we need here - see its doc string\n colors = [int(round(a*cmin+(nlevels-a)*cmax)) for a in nlrange]\n self.presentation.fillareacolors = colors\n #self.presentation.fillareacolors=[32,48,64,80,96,112,128,144,160,176,240]", "title": "" }, { "docid": "473fe45544ea1109a113a82f11188953", "score": "0.53629273", "text": "def _compute_all(self) -> np.ndarray:\n if self.acc.shape != self.mag.shape:\n raise ValueError(\"acc and mag are not the same size\")\n num_samples = len(self.acc)\n Q = np.zeros((num_samples, 4))\n for t in range(num_samples):\n Q[t] = self.estimate(self.acc[t], self.mag[t])\n return Q", "title": "" }, { "docid": "473fe45544ea1109a113a82f11188953", "score": "0.53629273", "text": "def _compute_all(self) -> np.ndarray:\n if self.acc.shape != self.mag.shape:\n raise ValueError(\"acc and mag are not the same size\")\n num_samples = len(self.acc)\n Q = np.zeros((num_samples, 4))\n for t in range(num_samples):\n Q[t] = self.estimate(self.acc[t], self.mag[t])\n return Q", "title": "" }, { "docid": "473fe45544ea1109a113a82f11188953", "score": "0.53629273", "text": "def _compute_all(self) -> np.ndarray:\n if self.acc.shape != self.mag.shape:\n raise ValueError(\"acc and mag are not the same size\")\n num_samples = len(self.acc)\n Q = np.zeros((num_samples, 4))\n for t in range(num_samples):\n Q[t] = self.estimate(self.acc[t], self.mag[t])\n return Q", "title": "" }, { "docid": "43a5776de5c82b4e44c11beeb7c0036d", "score": "0.5358042", "text": "def configure(self):\n\n funcgen = SRS.SRSfunc(0)\n \n # first, disable function generator \n funcgen.setoutputenable(\"off\")\n\n # configure parameters:\n funcgen.setmode(self.funcstate.mode)\n\n funcgen.setamp(self.funcstate.amp)\n self.funcstate.amp = funcgen.amp # update with actual set value\n\n funcgen.setfreq(self.funcstate.freq)\n self.funcstate.freq = funcgen.freq # update with actual, set value\n\n funcgen.setoffset(self.funcstate.offset)\n self.funcstate.offset = funcgen.offset\n \n\n # now, the acqboard:\n acqout = AcqSocketOut()\n acqout.open()\n acqstat = AcqSocketStat()\n acqcmd = AcqBoardCmd()\n acqstat.open() \n \n # first, set mode\n acqout.send(acqcmd.switchmode(self.acqstate.mode, \\\n self.acqstate.rawchan))\n\n tmpcmdid = -1\n while tmpcmdid != acqcmd.cmdid:\n stat = unpack(\"BBB\", acqstat.read())\n tmpcmdid = stat[1]/2\n\n print \"mode set\"\n \n # then, set gain for each channel\n\n for i in AcqBoardCmd.channames.keys():\n \n acqout.send(acqcmd.setgain(i, self.acqstate.gains[i]))\n tmpcmdid = -1\n while tmpcmdid != acqcmd.cmdid:\n stat = unpack(\"BBB\", acqstat.read())\n tmpcmdid = stat[1]/2\n\n acqout.send(acqcmd.sethpfilter(i, self.acqstate.hpfs[i]))\n tmpcmdid = -1\n while tmpcmdid != acqcmd.cmdid:\n stat = unpack(\"BBB\", acqstat.read())\n tmpcmdid = stat[1]/2\n\n print \"filters and gains set\"\n \n \n \n # reenable function generator\n funcgen.setoutputenable(\"on\")", "title": "" }, { "docid": "2dd7facbac47aee56bc2b0322d0a2ead", "score": "0.5324179", "text": "def _init_qubits(self):\n\n # Initialize arrays to hold the error information. The qubits will be\n # arranged in the geometry of the surface code.\n\n self.data_qubits = np.zeros(shape=[self.dist, self.dist, 2],\n dtype=bool)\n self.anc_qubits = np.zeros(shape=[self.dist + 1, self.dist + 1,\n 2], dtype=bool)\n\n # Generate lists with qubit positions.\n\n self.data_l = [(m, n) for m in range(self.dist) for n in\n range(self.dist)]\n (self.x_anc_l, self.z_anc_l) = ([], [])\n for m in range(self.dist + 1):\n for n in range(self.dist + 1):\n if self._anc_exists(m, n):\n if np.mod(m + n, 2) == 0:\n self.x_anc_l.append((m, n))\n else:\n self.z_anc_l.append((m, n))\n\n # To produce small outputs, we can condense the matrix of ancilla qubits\n # into a one dimensional vector. Here we do this, and create some meta\n # information about which ancillas in this list correspond to x- and\n # which to z-stabilizer measurements.\n\n self.anc_l = list(sorted(self.x_anc_l)) \\\n + list(sorted(self.z_anc_l))\n (self.x_indcs, self.z_indcs) = ([], [])\n for ncond in range(len(self.anc_l)):\n if self.anc_l[ncond] in self.x_anc_l:\n self.x_indcs.append(ncond)\n elif self.anc_l[ncond] in self.z_anc_l:\n self.z_indcs.append(ncond)\n else:\n raise ValueError('ancilla is neither x- nor z')\n ncond += 1", "title": "" }, { "docid": "fbc71216964120ed66788802dc821180", "score": "0.53016627", "text": "def startCalibration( self ):\n self.logger.info(\"Starting Calibration Procedure\")\n funcReference = __name__ + '.startCalibration' # 2011-11-28 sp -- added logging\n self.svrLog.logID('xx', self.logPrefix, funcReference, 'startCalibration') # 2011-11-28 sp -- added logging; logging not tested\n self.m_jigZHeight = None\n self.m_1mlTipAngle = None\n self.m_5mlTipAngle = None\n self.m_sampleVialAngle = None\n self.m_reagentVialAngle = None\n self.m_lysisVialAngle = None\n self.m_separationVialAngle = None\n self.m_wasteVialAngle = None\n self.m_jigCarouselAngle1 = None\n self.m_jigCarouselAngle2 = None\n self.m_jigCarouselAngle1_v2 = None\n self.m_jigCarouselAngle2_v2 = None\n self.m_1mlStripTipAngle = None\n self.m_5mlStripTipAngle = None\n self.m_bccmAngle = None\n self.m_stripZHeight1ml = None\n self.m_stripZHeight5ml = None\n self.m_bccmHeight = None\n\n self.m_smallVialTipHeight = None\n self.m_sampleTubeTipHeight = None\n \n self.m_sampleVialRef = None\n self.m_sampleVial2Ref = None\n self.m_1mlTipStripRef = None\n self.m_5mlTipStripRef = None\n self.m_bccmRef = None\n\n\n #to fix mid script file write problem\n self.m_tip_1ml_position1_degrees = None\n self.m_tip_1ml_position2_degrees = None\n self.m_tip_1ml_position3_degrees = None\n self.m_tip_5ml_position4_degrees = None\n self.m_tip_5ml_position5_degrees = None\n self.m_antibodyvial_degrees = None\n self.m_cocktailvial_degrees = None\n self.m_particlevial_degrees = None\n self.m_samplevial_14ml_degrees = None\n self.m_separationvial_14ml_degrees = None\n self.m_smallvial_tip_height = None\n self.m_14ml_tube_tip_height = None\n self.m_50ml_tube_tip_height = None\n\n self.m_barcode_offset_degrees = None", "title": "" }, { "docid": "7d4933bb7f92155f1af68c22d71ece7a", "score": "0.52921087", "text": "def calibrate(self):", "title": "" }, { "docid": "091fea0ed1cd32eaf985b984b84c7457", "score": "0.5263", "text": "def init_qpos(self):\n raise NotImplementedError", "title": "" }, { "docid": "5ecb79859fad76df09979bbf76f03107", "score": "0.52583617", "text": "def eval_acq(self, Xq):\n\n\n # make the query points a 2d array if a 1d array is passed in\n if len(Xq.shape)==1:\n Xq = Xq[:,np.newaxis]\n\n self.update_y_mean()\n\n # Generate cached predictors for those test points\n predictors = [gp.query(r, Xq) for r in self.regressors]\n\n # Compute the posterior distributions at those points\n # Note: No covariance information implemented at this stage\n Yq_exp = np.asarray([gp.mean(p) for p in predictors]).T + \\\n self.y_mean\n Yq_var = np.asarray([gp.variance(p) for p in predictors]).T\n\n # Aquisition Functions\n acq_defs_current = acq_defs(y_mean=self.y_mean,\n explore_priority=self.explore_priority)\n # Compute the acquisition levels at those test points\n yq_acq = acq_defs_current[self.acq_name](Yq_exp, Yq_var)\n\n return yq_acq, np.argmax(yq_acq)", "title": "" }, { "docid": "390243dd0ea5f19e8d6f8e47a1c4a444", "score": "0.52552575", "text": "def setup(self):\n self.ctx.current_structure = self.inputs.structure\n self.ctx.current_number_of_bands = None\n # self.ctx.bands_kpoints = self.inputs.get('bands_kpoints', None)", "title": "" }, { "docid": "5495b7876538c072affcb7fb982450a1", "score": "0.5254005", "text": "def _prepare_to_multiplot(self):\n #\n start_time_inhuman = time.time()\n start_time = time.strftime(\"%Y_%m_%d__%H:%M:%S\",\n time.gmtime(start_time_inhuman))\n printcol(\"Script beginning at \" + str(start_time), 'blue')\n start_time = time.time()\n #\n # print(self.__dict__.keys())\n # print(self._method_properties)\n self._report_arguments()\n #\n # grofsc\n fit_minpoints = self._fit_minpoints\n # ---\n #\n # read metafile\n printcol(\"reading files\", 'blue')\n (vsteps_list, file_list) = self._read_meta()\n n_steps = len(vsteps_list)\n n_img_slice = len(self._use_imgs)\n frm_img = self._use_imgs[0]\n to_img = self._use_imgs[-1] + 1\n frm_row = self._row.start\n to_row = self._row.stop\n n_row_slice = to_row - frm_row\n frm_col = self._col.start\n to_col = self._col.stop\n n_col_slice = to_col - frm_col\n #\n aux_shape = (n_steps, n_img_slice, n_smplrst,\n n_row_slice, n_col_slice, n_gncrsfn)\n data2show_y_2d5 = np.zeros(aux_shape).astype('int16')\n #\n aux_shape = (n_steps, n_img_slice)\n data2show_x_1d = np.zeros(aux_shape).astype('float')\n #\n for i_file, this_file in enumerate(file_list):\n if os.path.isfile(this_file) is False:\n msg = \"{0} file does not exist\".format(this_file)\n printcol(msg, 'red')\n raise Exception(msg)\n (data_smpl, data_rst) = read_2xh5(this_file,\n '/data/', '/reset/')\n (aux_n_img, aux_n_row, aux_n_col) = data_smpl.shape\n if self._verbose:\n printcol(\"{0} Images in file:\"\n \"{1}\".format(aux_n_img, this_file), 'green')\n elif ((i_file + 1) % 10) == 0:\n dot()\n\n if self._use_imgs[-1] >= aux_n_img:\n msg = \"{0} Img on this file\".format(aux_n_img)\n printcol(msg, 'red')\n raise Exception(msg)\n\n data2show_y_2d5[\n i_file, :, :, :, :,\n :] = convert_dlsraw_2_gncrsfn(data_smpl[frm_img:to_img, :, :],\n data_rst[frm_img:to_img, :, :],\n False)[:, :, frm_row:to_row,\n frm_col:to_col, :]\n data2show_x_1d[i_file, :] = float(vsteps_list[i_file])\n printcol(\" \", 'blue')\n\n printcol(\"reshaping\", 'blue')\n aux_shape = (n_steps * n_img_slice, n_smplrst,\n n_row_slice * n_col_slice, n_gncrsfn)\n self._data2show_y_2d5 = data2show_y_2d5.reshape(aux_shape)\n self._data2show_x_1d = data2show_x_1d.reshape((n_steps * n_img_slice))\n\n printcol(\"preparing legend\", 'blue')\n info_list = []\n pixel_List = []\n for i_row in range(frm_row, to_row):\n for i_col in range(frm_col, to_col):\n info_list += [\"pix \"+str((i_row, i_col))]\n pixel_List += [(i_row, i_col)]\n self._info_list = info_list\n self._pixel_list = pixel_List\n # ---\n #\n # identify lost packs\n aux_mask_3d = np.zeros((n_steps * n_img_slice,\n n_smplrst,\n n_row_slice * n_col_slice)).astype(bool)\n aux_maskfn_3d = np.zeros_like(aux_mask_3d).astype(bool)\n\n for i_pix, this_pix in enumerate(pixel_List):\n for i_smplrst in range(n_smplrst):\n aux_mask_3d[:, i_smplrst, i_pix] = (\n self._data2show_y_2d5[:, i_smplrst, i_pix, ign] >= 0) & (\n self._data2show_y_2d5[:, i_smplrst, i_pix, ign] <= 2)\n self._validmask_3d = aux_mask_3d\n # ---\n #\n # printcol(\"preparing to fit\", 'blue')\n aux_shape = (n_smplrst, n_gncrsfn, len(self._pixel_list))\n fit_slope = np.ones(aux_shape) * (-1)\n fit_offset = np.ones(aux_shape) * (-1)\n fitquality_r2 = np.ones(aux_shape) * (-1)\n fitquality_chi2 = np.ones(aux_shape) * (-1)\n fitquality_fnhisto = np.ones(aux_shape) * (-1)\n\n printcol(\"judging linear-fit quality\", 'blue')\n for i_pix, this_pix in enumerate(self._pixel_list):\n for i_smplrst in range(n_smplrst):\n if i_smplrst == ismpl:\n smplrst_str = \"Smpl\"\n else:\n smplrst_str = \"Rst\"\n # fit Crs\n reduced_x_crs = self._data2show_x_1d[\n aux_mask_3d[:, i_smplrst, i_pix]]\n reduced_y_crs = self._data2show_y_2d5[\n :, i_smplrst, i_pix, icrs][\n aux_mask_3d[:, i_smplrst, i_pix]]\n\n if len(reduced_x_crs) < fit_minpoints:\n aux_msg = \"{0},{1},Crs: unable to fit\".format(this_pix,\n smplrst_str)\n aux_col = \"orange\"\n else:\n (aux_slope, aux_offset) = linear_fit(reduced_x_crs,\n reduced_y_crs)\n aux_r2 = linear_fit_r2(reduced_x_crs, reduced_y_crs)\n fit_slope[i_smplrst, icrs, i_pix] = aux_slope\n fit_offset[i_smplrst, icrs, i_pix] = aux_offset\n fitquality_r2[i_smplrst, icrs, i_pix] = aux_r2\n\n aux_msg = (\"{0},{4},Crs: slope={1},offset={2},\"\n \"R2={3}\".format(this_pix, aux_slope, aux_offset,\n aux_r2, smplrst_str))\n aux_col = \"green\"\n\n if self._show_fitquality_chi2:\n aux_chi2 = linear_fit_chi2(reduced_x_crs,\n reduced_y_crs)\n fitquality_chi2[i_smplrst, icrs, i_pix] = aux_chi2\n aux_msg = aux_msg + \",Chi2/degFree=\" + str(aux_chi2)\n\n if self._verbose:\n printcol(aux_msg, aux_col)\n\n #\n # fit Fn\n (mostcommon_crs, aux_err_flag) = find_mostcommon_uint(\n self._data2show_y_2d5[:, i_smplrst, i_pix, icrs],\n self._fnfit_mincrs, self._fnfit_maxcrs)\n if aux_err_flag:\n aux_msg = \"{0},{1},Fn: unable to Fn-fit\".format(\n this_pix, smplrst_str)\n if self._verbose:\n printcol(aux_msg, 'orange')\n else:\n aux_maskfn_3d[:, i_smplrst, i_pix] = (\n (self._data2show_y_2d5[:, i_smplrst,\n i_pix, ign] >= 0) &\n (self._data2show_y_2d5[:, i_smplrst,\n i_pix, ign] <= 2) &\n (self._data2show_y_2d5[:,\n i_smplrst,\n i_pix,\n icrs] == mostcommon_crs))\n reduced_x_fn = self._data2show_x_1d[aux_maskfn_3d[\n :, i_smplrst, i_pix]]\n reduced_y_fn = self._data2show_y_2d5[\n :, i_smplrst, i_pix, ifn][aux_maskfn_3d[:, i_smplrst,\n i_pix]]\n\n if len(reduced_x_fn) < fit_minpoints:\n aux_msg = \"{0},{1},Fn: unable to Fn-fit\".format(\n this_pix, smplrst_str)\n aux_col = \"orange\"\n else:\n (aux_slope, aux_offset) = linear_fit(reduced_x_fn,\n reduced_y_fn)\n aux_r2 = linear_fit_r2(reduced_x_fn, reduced_y_fn)\n\n fit_slope[i_smplrst, ifn, i_pix] = aux_slope\n fit_offset[i_smplrst, ifn, i_pix] = aux_offset\n fitquality_r2[i_smplrst, ifn, i_pix] = aux_r2\n\n aux_msg = (\"{0},{4},Fn (Crs=={5}): slope={2},\"\n \"offset={2},\"\n \"R2={3}\".format(this_pix, aux_slope,\n aux_offset,\n aux_r2, smplrst_str,\n mostcommon_crs))\n aux_col = \"green\"\n\n if self._show_fitquality_chi2:\n aux_chi2 = linear_fit_chi2(reduced_x_fn,\n reduced_y_fn)\n fitquality_chi2[i_smplrst, ifn, i_pix] = aux_chi2\n aux_msg = aux_msg + \",Chi2/degFree=\" + str(\n aux_chi2)\n\n if self._show_fitquality_fnhist:\n aux_binhisto = np.arange(256)\n (aux_fnhist, ignore) = np.histogram(reduced_y_fn,\n aux_binhisto)\n fitquality_fnhisto[i_smplrst, ifn, i_pix] = max(\n aux_fnhist)\n aux_msg = aux_msg + \",same-Fn occurr=\" + str(\n max(aux_fnhist))\n\n if self._verbose:\n printcol(aux_msg, aux_col)\n\n if self._verbose is False:\n if ((i_pix+1) % 1000) == 0:\n dot()\n if ((i_pix+1) % 100000) == 0:\n printcol(str(this_pix), 'green')\n printcol(\"- - -\", 'green')\n\n self._validmask_fn_3d = aux_maskfn_3d\n\n self._fit_slope = fit_slope\n self._fit_offset = fit_offset\n self._fitquality_r2 = fitquality_r2\n self._fitquality_chi2 = fitquality_chi2\n self._fitquality_fnhisto = fitquality_fnhisto\n\n end_time_inhuman = time.time()\n aux_dur = end_time_inhuman - start_time_inhuman\n end_time = time.strftime(\"%Y_%m_%d__%H:%M:%S\",\n time.gmtime(end_time_inhuman))\n printcol(\"Data elaboration ended at \" + str(end_time) +\n \" (\" + str(aux_dur) + \"s)\", 'blue')\n # ---\n # end of _prepare_to_multiplot", "title": "" }, { "docid": "9fed187bdf1937c93d2eb89a67f095d2", "score": "0.52487415", "text": "def q2partc():\n model1 = mamodel(1, 64)\n model2 = mamodel(1, 256)\n model3 = mamodel(1, 1024)\n real1 = arprocess(1, 64)\n real2 = arprocess(1, 256)\n real3 = arprocess(1, 1024)\n\n f1 = np.linspace(1, 32, 32)\n f1 = f1 / 64\n\n f2 = np.linspace(1, 128, 128)\n f2 = f2 / 256\n\n f3 = np.linspace(1, 512, 512)\n f3 = f3 / 1024\n\n spectra1 = sdfma(f1)\n spectra2 = sdfma(f2)\n spectra3 = sdfma(f3)\n\n Sper1 = periodogram(1, model1, f1)\n Sper2 = periodogram(1, model2, f2)\n Sper3 = periodogram(1, model3, f3)\n\n Dspec1 = dirspecest(1, model1, f1)\n Dspec2 = dirspecest(1, model2, f2)\n Dspec3 = dirspecest(1, model3, f3)\n\n YWnot1 = ywnotaper(1, real1, f1)\n YWnot2 = ywnotaper(1, real2, f2)\n YWnot3 = ywnotaper(1, real3, f3)\n\n YWwith1 = ywwithtaper(1, real1, f1)\n YWwith2 = ywwithtaper(1, real2, f2)\n YWwith3 = ywwithtaper(1, real3, f3)\n\n fig11, axes = plt.subplots(nrows=4, ncols=3, figsize=(15, 10))\n axes[0, 0].plot(f1, Sper1.transpose(), 'b',\n f1, spectra1, 'r')\n axes[0, 1].plot(f2, Sper2.transpose(), 'b',\n f2, spectra2, 'r')\n axes[0, 2].plot(f3, Sper3.transpose(), 'b',\n f3, spectra3, 'r')\n\n axes[1, 0].plot(f1, Dspec1.transpose(), 'b',\n f1, spectra1, 'r')\n axes[1, 1].plot(f2, Dspec2.transpose(), 'b',\n f2, spectra2, 'r')\n axes[1, 2].plot(f3, Dspec3.transpose(), 'b',\n f3, spectra3, 'r')\n\n axes[2, 0].plot(f1, YWnot1.transpose(), 'b',\n f1, spectra1, 'r')\n axes[2, 1].plot(f2, YWnot2.transpose(), 'b',\n f2, spectra2, 'r')\n axes[2, 2].plot(f3, YWnot3.transpose(), 'b',\n f3, spectra3, 'r')\n\n axes[3, 0].plot(f1, YWwith1.transpose(), 'b',\n f1, spectra1, 'r')\n axes[3, 1].plot(f2, YWwith2.transpose(), 'b',\n f2, spectra2, 'r')\n axes[3, 2].plot(f3, YWwith3.transpose(), 'b',\n f3, spectra3, 'r')\n axes[0, 0].set_title('N = 64')\n axes[0, 1].set_title('N = 256')\n axes[0, 2].set_title('N = 1024')\n fig11.suptitle(\n \"Name: Tudor Trita Trita, CID:01199397 \\n Figure 11: Plots for Question 2 Part C. Red line = sdf, Blue line = current method.\")\n plt.savefig('fig11.png')\n plt.show()\n\n return None", "title": "" }, { "docid": "e14a2e14fb6eabd3d375e8daf104ba75", "score": "0.52222353", "text": "def do_the_right_thing():\n sample_param_grid_mpi()\n # dmaps_param_set()\n # test_sympy_of()\n # dmaps_param_set_grad_kernel()\n # dmaps_param_set()\n # abc_analytical_contour()\n # qssa_comparison()\n # plot_data_dmaps_results()\n # dmaps_param_set_data_kernel()", "title": "" }, { "docid": "17e5eb2bc67cf354eb166fe792f3f84f", "score": "0.5215059", "text": "def AP_vs_Keq(self):\n\n # you're modifying it so make a copy\n dset = copy.deepcopy(self.dg100 + self.dg400)\n #dset = copy.deepcopy(self.dg100)\n #dset = copy.deepcopy(self.dg400)\n\n if not self.coeffs:\n print('Coeffs must be set for this figure!')\n 1/0\n\n c1, c2, c3 = self.coeffs\n for its in dset:\n its.calc_keq(c1, c2, c3, self.msat_normalization, rna_len=20)\n\n movSize = 0 # effectively NOT a moving winding, but nt-2-nt comparison\n\n # Ignore 2, since it's just AT and very little variation occurs.\n #xmers = range(2, 21)\n xmers = range(3, 16)\n # the bar height will be the correlation with the above three values for\n # each moving average center position\n\n result = {}\n for norm in ['Non-Normalized', 'Normalized']:\n # in the second loop\n if norm == 'Normalized':\n normalize_AP(dset)\n\n bar_height = []\n pvals = []\n\n for mer_center_pos in xmers:\n\n ## testing the N25_anti\n #movAP = get_movAv_array(dset, center=mer_center_pos,\n #movSize=movSize, attr='abortiveProb', prePost='post')\n\n #movKeq = get_movAv_array(dset, center=mer_center_pos,\n #movSize=0, attr='keq', prePost='both')\n\n # This is how the figure currently is\n movAP = get_movAv_array(dset, center=mer_center_pos,\n movSize=movSize, attr='abortiveProb',\n prePost='both')\n\n movKeq = get_movAv_array(dset, center=mer_center_pos,\n movSize=movSize, attr='keq',\n prePost='both')\n\n corr, pval = spearmanr(movAP, movKeq)\n\n bar_height.append(corr)\n pvals.append(pval)\n\n result[norm] = xmers, bar_height, pvals, movSize\n\n return result", "title": "" }, { "docid": "e932359a029a079361c87fdd8eada198", "score": "0.520291", "text": "def init(self):\n self.q_mu.mean = self.Y.mean(1)", "title": "" }, { "docid": "dbd04e76fa6043a7dcd780ea16dad841", "score": "0.5202023", "text": "def calibrate(inputs):", "title": "" }, { "docid": "5efa6f49defd814f0917baf3590e79c5", "score": "0.52010137", "text": "def multi_fit(dataX, dataY, peaks, new_param, upp, low, fit_fun_name, peak_param = np.asarray([]), path = None, file = '', ALL = False, method = 'ODR', forced = False, Auto = True):\r\n global __prior, __range, trouble\r\n attempt = 0\r\n #forced = False\r\n initial_run = 0\r\n #if method == 'LS':\r\n # globals()['func'] = __import__('{}'.format(fit_fun_name.replace('.py', '')))\r\n multimodel = Model(compFun11)\r\n fitfun = [o[1] for o in inspect.getmembers(fitFunctions) if inspect.isfunction(o[1])]\r\n #global trouble\r\n print('Starting Multi-Fit: {}'.format(method))\r\n #peak_index = np.arange(peak_index[0], peak_index[0] + peaks.size + 1)######\r\n #tally = 0\r\n old_param = new_param[:]\r\n if not path:\r\n path = 'C:\\\\Presentations\\\\Figures'\r\n skip = False\r\n while not skip:\r\n try:\r\n for iteration in range(10):\r\n rem = np.where(0 == dataY[low: upp])[0]\r\n ydat = np.delete(dataY[low: upp], rem)\r\n xdat = np.delete(dataX[low: upp], rem)\r\n if method == 'ODR':\r\n trouble = [dataX, dataY, new_param, low, upp]\r\n full_data = RealData(xdat, ydat, sy = np.sqrt(ydat))\r\n full_odr = ODR(full_data, multimodel, beta0 = new_param)\r\n full_odr.set_job(fit_type = 0)\r\n full_odr_out = full_odr.run()\r\n nout = np.abs(full_odr_out.beta)\r\n nerr = full_odr_out.sd_beta\r\n if method == 'LS':\r\n nout, ncov = cfit(fitfun[int(len(new_param)//3) - 1], xdat,\r\n ydat, p0 = new_param,\r\n sigma = np.sqrt(ydat),\r\n absolute_sigma = True)\r\n nout = np.abs(nout)\r\n nerr = np.sqrt(np.diag(ncov))\r\n if all(np.abs(new_param - nout) <= 0.001*np.ones(nout.size)):\r\n break\r\n new_param = nout\r\n avg_peak_dis = np.asarray(new_param[3::3] - new_param[:-3:3]).mean()/2\r\n if not forced:\r\n low = np.where(dataX <= nout[0] - min(nout[-2], avg_peak_dis))[0][-1]\r\n upp = np.where(nout[-3] + min(2*nout[-2], avg_peak_dis) <= dataX)[0][0]\r\n low = max(low, 0)\r\n upp = min(upp, dataX.size)\r\n ydat, rem = removepts(dataY[low: upp], spe = 0, ret = True)\r\n xdat = removepts(dataX[low: upp], rem = rem)\r\n if any([math.isinf(el) or math.isnan(el) for el in nerr]):\r\n print('\\nImproper fit, the error is not finite.\\n')\r\n raise RuntimeError\r\n if any([el < dataX[low] or el > dataX[upp] for el in new_param[::3]]):\r\n print('A centroid is outside the bounds.')\r\n raise RuntimeError\r\n avg_sigma = new_param[1::3].mean()\r\n if any([el > 2*avg_sigma or el <avg_sigma/2 for el in new_param[1::3]]):\r\n print('A sigma varies greatly beyond the other sigmas')\r\n print('Sigma: {}'.format(new_param[1::3]))\r\n if Auto and attempt != 0 and 'y' == input('Would you like to continue anyways?\\n'):\r\n pass\r\n else:\r\n raise RuntimeError\r\n temp_values = np.asarray(reindex([nout, nerr])).flatten()\r\n temp_values = [np.asarray(temp_values[a::6]) for a in range(6)]\r\n file_column = [file]*temp_values[0].size\r\n peak_index = np.round(np.polyval(peak_param, nout[::3])).astype(int)\r\n energy_column = 3.5*peak_index if peak_index.size else np.ones(len(file_column))\r\n temp_values = [file_column, energy_column] + list(np.round(temp_values, 5))\r\n temp_values = reindex(temp_values)\r\n lplot = int(max(nout[0] - 3*min(nout[1], avg_peak_dis), 0))\r\n lplot = np.where(dataX <= lplot)[0][-1]\r\n uplot = int(min(nout[-3] + 3*min(nout[-2], avg_peak_dis), dataY.size))\r\n uplot = np.where(dataX <= uplot)[0][0]\r\n plt.clf()\r\n \"\"\"\r\n plt.bar(dataX[lplot:uplot], dataY[lplot:uplot], width = 1, bottom = 1,\r\n color = 'r', label = 'Raw', align = 'center')\r\n plt.bar(xdat, ydat, width = 1, bottom = 1, color = 'g', label = 'Used',\r\n align = 'center')\r\n plt.plot(dataX[lplot:uplot], compFun11(nout, dataX[lplot:uplot]),\r\n 'k', linewidth = 5, label = 'Multi-Peak Fits')\r\n plt.legend(loc = 'upper center', prop = {'size': 40}, ncol = 2)\r\n save_fig(dataY = dataY, llim = lplot, ulim = uplot, path = path,\r\n file = method + '_' + file, title = False, no_clr = True)\r\n \"\"\"\r\n if Auto:\r\n break\r\n else:\r\n print('Allowing for user input')\r\n raise\r\n #except(TypeError):\r\n # global trouble\r\n # trouble = [xdat, ydat, low, upp, old_param, new_param, nout, avg_peak_dis]\r\n # raise\r\n except:\r\n #raise\r\n if attempt == 0:\r\n print('Attempting prior values')\r\n try:\r\n new_param = np.asarray(__prior[:])\r\n low, upp = __range\r\n except NameError:\r\n attempt += 1\r\n print('No prior values stored...')\r\n if attempt != 0:\r\n Auto = False\r\n hold_on = False\r\n plt.close('all')\r\n plt.plot(xdat, ydat, 'o', markersize = 10)\r\n plt.plot(xdat, compFun11(new_param, xdat), 'k', linewidth = 5)\r\n print('Possible Error: {}'.format(sys.exc_info()))\r\n print('new_param:\\n{}'.format(list(new_param)))\r\n print('Interval:\\n{}'.format([dataX[low], dataX[upp]]))\r\n print('Method type is: {}'.format(method))\r\n plt.show()\r\n while True:\r\n action = input('The multi-gaussian did not converge ' +\r\n 'enter s to skip, i to change interval, ' +\r\n 'p to change the parameters, ' +\r\n 'h to toggle hold_on and change multiple parameters ' +\r\n 'o to see the original parameters ' +\r\n 'A to switch auto back on.\\n')\r\n if action.startswith('s'):\r\n skip = True\r\n break\r\n if action.startswith('h'):\r\n hold_on = True if not hold_on else False\r\n print('hold_on: {}'.format(hold_on))\r\n if action.startswith('o'):\r\n print('The input parameters were: \\n', list(old_param))\r\n if action == 'raise':\r\n raise\r\n try:\r\n if action.startswith('i'):\r\n if action.endswith('f'):\r\n forced = True\r\n action = action[:-1]\r\n low, upp = str2num(action[1:])\r\n low = np.where(dataX <= low)[0][-1]\r\n upp = np.where(upp <= dataX)[0][0]\r\n elif action.startswith('p'):\r\n temp_param = str2num(action[1:])\r\n if len(temp_param)%3 != 0:\r\n raise TypeError\r\n else:\r\n new_param = np.asarray(temp_param[:])\r\n elif action.startswith('A'):\r\n print('Continuing Auto')\r\n Auto = True\r\n if hold_on:\r\n hold_on = False\r\n print('hold_on: {}'.format(hold_on))\r\n else:\r\n print('Rerunning with prior settings...')\r\n if not hold_on:\r\n break\r\n except(ValueError, TypeError):\r\n print('Error in edits to Multi-Fit: {}'.format(sys.exc_info()))\r\n attempt += 1\r\n if not skip:\r\n __prior = nout\r\n __range = [low, upp]\r\n if ALL:\r\n return temp_values, low, upp\r\n else:\r\n return temp_values\r\n else:\r\n return None", "title": "" }, { "docid": "4ac8098c07f1953153adced8b5d42560", "score": "0.520007", "text": "def runAll(self):\n self.find()\n self.getContours()\n self.getSizes()\n self.getFluxes()\n self.cleanSample()\n self.getCenterOfMass()\n self.plot()\n self.generateOutput()\n self.doAperturePhotometry()\n self.writePhotometry()\n\n results = dict(xcms=self.xcms, ycms=self.ycms, cms=self.cms,\n sizes=self.sizes, fluxes=self.fluxes,\n photometry=self.photometry)\n\n return results", "title": "" }, { "docid": "2369ad3b2c82e78d761f4e6e085bc12a", "score": "0.51936615", "text": "def q2partd():\n Nr = 10000\n model1 = mamodel(Nr, 64)\n model2 = mamodel(Nr, 256)\n model3 = mamodel(Nr, 1024)\n real1 = arprocess(Nr, 64)\n real2 = arprocess(Nr, 256)\n real3 = arprocess(Nr, 1024)\n\n f1 = np.linspace(1, 32, 32)\n f1 = f1 / 64\n\n f2 = np.linspace(1, 128, 128)\n f2 = f2 / 256\n\n f3 = np.linspace(1, 512, 512)\n f3 = f3 / 1024\n\n spectra1 = sdfma(f1)\n spectra2 = sdfma(f2)\n spectra3 = sdfma(f3)\n\n Sper1 = periodogram(Nr, model1, f1)\n Sper2 = periodogram(Nr, model2, f2)\n Sper3 = periodogram(Nr, model3, f3)\n Sper1 = np.mean(Sper1, axis=0)\n Sper2 = np.mean(Sper2, axis=0)\n Sper3 = np.mean(Sper3, axis=0)\n\n Dspec1 = dirspecest(Nr, model1, f1)\n Dspec2 = dirspecest(Nr, model2, f2)\n Dspec3 = dirspecest(Nr, model3, f3)\n Dspec1 = np.mean(Dspec1, axis=0)\n Dspec2 = np.mean(Dspec2, axis=0)\n Dspec3 = np.mean(Dspec3, axis=0)\n\n YWnot1 = ywnotaper(Nr, real1, f1)\n YWnot2 = ywnotaper(Nr, real2, f2)\n YWnot3 = ywnotaper(Nr, real3, f3)\n YWnot1 = np.mean(YWnot1, axis=0)\n YWnot2 = np.mean(YWnot2, axis=0)\n YWnot3 = np.mean(YWnot3, axis=0)\n\n YWwith1 = ywwithtaper(Nr, real1, f1)\n YWwith2 = ywwithtaper(Nr, real2, f2)\n YWwith3 = ywwithtaper(Nr, real3, f3)\n YWwith1 = np.mean(YWwith1, axis=0)\n YWwith2 = np.mean(YWwith2, axis=0)\n YWwith3 = np.mean(YWwith3, axis=0)\n\n # Figure 12: n=64\n plt.figure(figsize=(10, 7))\n plt.plot(f1, spectra1, 'k', label='real sdf')\n plt.plot(f1, Sper1, 'b', label='periodogram')\n plt.plot(f1, Dspec1, 'g', label='direct est.')\n plt.plot(f1, YWnot1, 'y', label='YW no taper')\n plt.plot(f1, YWwith1, 'r', label='YW with taper')\n plt.legend(loc='upper right')\n plt.xlabel('f')\n plt.title(\n \"Name: Tudor Trita Trita, CID:01199397 \\n Figure 12: Plot N=64 for Question 2 Part D\")\n plt.savefig('fig12.png')\n plt.show()\n\n # Figure 13: n=256\n plt.figure(figsize=(10, 7))\n plt.plot(f2, spectra2, 'k', label='real sdf')\n plt.plot(f2, Sper2, 'b', label='periodogram')\n plt.plot(f2, Dspec2, 'g', label='direct est.')\n plt.plot(f2, YWnot2, 'y', label='YW no taper')\n plt.plot(f2, YWwith2, 'r', label='YW with taper')\n plt.legend(loc='upper right')\n plt.xlabel('f')\n plt.title(\n \"Name: Tudor Trita Trita, CID:01199397 \\n Figure 13: Plot N=64 for Question 2 Part D\")\n plt.savefig('fig13.png')\n plt.show()\n\n # Figure 14: n=1024\n plt.figure(figsize=(10, 7))\n plt.plot(f3, spectra3, 'k', label='real sdf')\n plt.plot(f3, Sper3, 'b', label='periodogram')\n plt.plot(f3, Dspec3, 'g', label='direct est.')\n plt.plot(f3, YWnot3, 'y', label='YW no taper')\n plt.plot(f3, YWwith3, 'r', label='YW with taper')\n plt.legend(loc='upper right')\n plt.xlabel('f')\n plt.title(\n \"Name: Tudor Trita Trita, CID:01199397 \\n Figure 14: Plot N=64 for Question 2 Part D\")\n plt.savefig('fig14.png')\n plt.show()\n return None", "title": "" }, { "docid": "389b8b1e500694f5f7962e5dd0bb4795", "score": "0.51880044", "text": "def apply_correction(self):\n\n \n freq, dummy,chI, chQ = self.calibration.AWG_parameters()\n if freq is None or self.calibration.frequency() is None:\n print('Init not completed\\n')\n return\n \n #self._AWG.apply_correction(self.calibration.calibration_dictionary,self._amplitude)\n par_list = self.calibration.cal_par_list()\n self._AWG.set_dc_offset_by_qe(self._mixer_ID, 'I', float(par_list[0]))\n self._AWG.set_dc_offset_by_qe(self._mixer_ID, 'Q', float(par_list[1]))\n self._AWG.set_correction(self._mixer_ID,IQ_imbalance(1-par_list[2],par_list[4]))", "title": "" }, { "docid": "a20179a47c50214a80204b1c9f752ca8", "score": "0.51808333", "text": "def calibrate():", "title": "" }, { "docid": "397f15ec2f6b9957e0280a69b974b9ca", "score": "0.51747507", "text": "def QCD_old(self,*args,**kwargs):\n \n verbosity = getVerbosity(kwargs,verbositySampleTools)\n var, nbins, xmin, xmax, xbins, cuts0 = unwrapVariableSelection(*args)\n isJetCategory = re.search(r\"(nc?btag|n[cf]?jets)\",cuts0)\n relax = 'emu' in self.channel or isJetCategory\n if verbosity > 1:\n print header(\"estimating QCD for variable %s\" % (var))\n #LOG.verbose(\"\\n>>> estimating QCD for variable %s\" % (self.var),verbosity,level=2)\n \n samples = self.samples\n name = kwargs.get('name', makeHistName(\"QCD\",var) )\n title = kwargs.get('title', \"QCD multijet\" )\n append = kwargs.get('append', \"\" )+\"_SS\"\n ratio_WJ_QCD = kwargs.get('ratio_WJ_QCD_SS', False )\n doRatio_WJ_QCD = isinstance(ratio_WJ_QCD, c_double )\n weight = kwargs.get('weight', \"\" )\n weight_data = kwargs.get('weight', \"\" )\n shift = kwargs.get('shift', 0.0 ) + self.shiftQCD # for systematics\n #vetoRelax = kwargs.get('vetoRelax', relax )\n relax = kwargs.get('relax', relax ) #and not vetoRelax\n file = kwargs.get('saveToFile', None )\n \n if relax and re.search(r\"(nc?btag|n[cf]?jets)\",var):\n LOG.warning('SampleSet::QCD: not relaxing cuts in QCD CR for \"%s\"'%(var))\n relax = False\n \n scaleup = 1.0 if \"q_1*q_2>0\" else 2.0 if \"emu\" in self.channel else OSSS_ratio\n scaleup = kwargs.get('scaleup', scaleup )\n LOG.verbose(\" QCD: scaleup = %s, shift = %s, self.shiftQCD = %s\"%(scaleup,shift,self.shiftQCD),verbosity,level=2)\n \n # CUTS: invert charge\n cuts = invertCharge(cuts0,to='SS')\n \n # CUTS: relax cuts for QCD_SS_SB\n # https://indico.cern.ch/event/566854/contributions/2367198/attachments/1368758/2074844/QCDStudy_20161109_HTTMeeting.pdf\n QCD_OS_SR = 0\n if relax:\n \n # GET yield QCD_OS_SR = SF * QCD_SS_SR\n if 'emu' in self.channel: # use weight instead of scaleup\n scaleup = 1.0\n weight = combineWeights(\"getQCDWeight(pt_2, pt_1, dR_ll)\",weight)\n weight_data = \"getQCDWeight(pt_2, pt_1, dR_ll)\" # SF ~ 2.4 average\n kwargs_SR = kwargs.copy()\n kwargs_SR.update({ 'scaleup':scaleup, 'weight':weight, 'weight_data':weight_data, 'relax':False })\n histQCD_OS_SR = self.QCD(*args,**kwargs_SR)\n QCD_OS_SR = histQCD_OS_SR.Integral(1,N) # yield\n scaleup = 1.0\n deleteHist(histQCD_OS_SR)\n if QCD_OS_SR < 10:\n LOG.warning('QCD: QCD_SR = %.1f < 10 for \"%s\"'%(QCD_OS_SR,cuts0))\n \n # RELAX cuts for QCD_OS_SB = SF * QCD_SS_SB\n append = \"_isorel\" + append\n iso_relaxed = \"iso_1>0.15 && iso_1<0.5 && iso_2_medium==1\" #iso_2_medium\n if 'emu' in self.channel: iso_relaxed = \"iso_1>0.20 && iso_1<0.5 && iso_2<0.5\"\n elif isJetCategory: cuts = relaxJetSelection(cuts)\n cuts = invertIsolation(cuts,to=iso_relaxed)\n \n ## CHECK for 30 GeV jets\n #if \"bpt_\" in var and \"btag\" in cuts0 and \"btag\" not in cuts:\n # btags_g = re.findall(r\"&*\\ *nc?btag\\ *>\\ *(\\d+)\\ *\",cuts0)\n # btags_ge = re.findall(r\"&*\\ *nc?btag\\ *>=\\ *(\\d+)\\ *\",cuts0)\n # btags_e = re.findall(r\"&*\\ *nc?btag\\ *==\\ *(\\d+)\\ *\",cuts0)\n # nbtags = 0\n # if btags_g: nbtags = int(btags_g[0])+1\n # elif btags_ge: nbtags = int(btags_ge[0])\n # elif btags_e: nbtags = int(btags_e[0])\n # if nbtags>0:\n # if \"bpt_1\" in var and nbtags>0:\n # cuts+=\" && bpt_1>30\"\n # LOG.warning(\"QCD: %s - added 30 GeV cut on b jets in \\\"%s\\\"\"%(var,cuts))\n # if \"bpt_2\" in var and nbtags>1:\n # cuts+=\" && bpt_2>30\"\n # LOG.warning(\"QCD: %s - added 30 GeV cut on b jets in \\\"%s\\\"\"%(var,cuts))\n \n LOG.verbose(\" QCD - cuts = %s %s\"%(cuts,\"(relaxed)\" if relax else \"\"),verbosity,level=2)\n \n # HISTOGRAMS\n gROOT.cd()\n histD = None\n histWJ = None\n histsD_SS, histsB_SS, _ = self.createHistograms(var,nbins,xmin,xmax,xbins,cuts,weight=weight,weight_data=weight_data,append=append,\n signal=False,QCD=False,task=\"calculating QCD\",split=False,blind=False,verbosity=verbosity-1)\n \n # GET WJ\n if doRatio_WJ_QCD:\n for hist in histsB_SS:\n if (\"WJ\" in hist.GetName() or re.findall(r\"w.*jets\",hist.GetName(),re.IGNORECASE)):\n if histWJ:\n LOG.warning(\"SampleSet::QCD: more than one W+jets sample in SS region, going with first instance!\", pre=\" \")\n break\n else: histWJ = hist\n if not histWJ:\n LOG.warning(\"SampleSet::QCD: Did not find W+jets sample!\", pre=\" \")\n \n # CHECK data\n if not histsD_SS:\n LOG.warning(\"SampleSet::QCD: No data to make DATA driven QCD!\")\n return None\n histD_SS = histsD_SS[0]\n \n # QCD HIST\n histMC_SS = histsB_SS[0].Clone(\"MC_SS\")\n for hist in histsB_SS[1:]: histMC_SS.Add(hist)\n histQCD = substractHistsFromData(histsD_SS[0],histMC_SS,name=name+append,title=title)\n if not histQCD: LOG.warning(\"SampleSet::QCD: Could not make QCD! QCD histogram is none!\", pre=\" \")\n \n # SAVE histograms\n if file:\n dir = file.mkdir('QCD_relaxed' if relax else 'QCD')\n dir.cd()\n canvas, pave = canvasWithText(cuts)\n pave.AddText(\"weight: \"+weight)\n canvas.Write(\"selections\")\n for hist in histsB_SS+histsD_SS+[histMC_SS]:\n hist.GetXaxis().SetTitle(var)\n hist.Write(hist.GetName())\n gROOT.cd()\n \n # YIELD only\n if relax:\n QCD_SS = histQCD.Integral(1,N)\n if QCD_SS:\n scaleup = QCD_OS_SR/QCD_SS # normalizing to OS_SR\n LOG.verbose(\" QCD - scaleup = QCD_OS_SR/QCD_SS_SB = %.1f/%.1f = %.3f\"%(QCD_OS_SR,QCD_SS,scaleup),verbosity,level=2)\n else:\n LOG.warning(\"SampleSet::QCD: QCD_SS_SB.Integral() == 0!\")\n scale = scaleup*(1.0+shift) # scale up QCD 6% in OS region by default\n histQCD.Scale(scale)\n histQCD.SetFillColor(getColor('QCD'))\n histQCD.SetOption(\"HIST\")\n MC_SS = histMC_SS.Integral()\n data_SS = histD_SS.Integral()\n QCD_SS = histQCD.Integral()\n \n # WJ/QCD ratio in SS\n if doRatio_WJ_QCD and histWJ:\n WJ_SS = histWJ.Integral()\n if QCD_SS: ratio_WJ_QCD.value = WJ_SS/QCD_SS\n else: LOG.warning(\"SampleSet::QCD - QCD integral is 0!\", pre=\" \")\n LOG.verbose(\" QCD - data_SS = %.1f, MC_SS = %.1f, QCD_SS = %.1f, scale=%.3f, WJ_SS = %.1f, ratio_WJ_QCD_SS = %.3f\"%(data_SS,MC_SS,QCD_SS,scale,WJ_SS,ratio_WJ_QCD.value),verbosity,level=2)\n else:\n LOG.verbose(\" QCD - data_SS = %.1f, MC_SS = %.1f, QCD_SS = %.1f, scale=%.3f\"%(data_SS,MC_SS,QCD_SS,scale),verbosity,level=2)\n \n close(histsB_SS+histsD_SS+[histMC_SS])\n return histQCD", "title": "" }, { "docid": "180a300d5b97f78ddd125fac67e27375", "score": "0.5164584", "text": "def setup_axis(self, **kwargs):\n\n self.motor_type = kwargs.get('motor_type')\n self.feedback_configuration = kwargs.get('feedback_configuration')\n self.full_step_resolution = kwargs.get('full_step_resolution')\n self.position_display_resolution = kwargs.get('position_display_'\n 'resolution')\n self.current = kwargs.get('current')\n self.voltage = kwargs.get('voltage')\n self.units = int(kwargs.get('units'))\n self.encoder_resolution = kwargs.get('encoder_resolution')\n self.max_acceleration = kwargs.get('max_acceleration')\n self.max_velocity = kwargs.get('max_velocity')\n self.max_base_velocity = kwargs.get('max_base_velocity')\n self.homing_velocity = kwargs.get('homing_velocity')\n self.jog_high_velocity = kwargs.get('jog_high_velocity')\n self.jog_low_velocity = kwargs.get('jog_low_velocity')\n self.acceleration = kwargs.get('acceleration')\n self.velocity = kwargs.get('velocity')\n self.deceleration = kwargs.get('deceleration')\n self.estop_deceleration = kwargs.get('estop_deceleration')\n self.jerk = kwargs.get('jerk')\n self.error_threshold = kwargs.get('error_threshold')\n self.proportional_gain = kwargs.get('proportional_gain')\n self.derivative_gain = kwargs.get('derivative_gain')\n self.integral_gain = kwargs.get('integral_gain')\n self.integral_saturation_gain = kwargs.get('integral_saturation_gain')\n self.home = kwargs.get('home')\n self.microstep_factor = kwargs.get('microstep_factor')\n self.acceleration_feed_forward = kwargs.get('acceleration_feed_forward')\n self.trajectory = kwargs.get('trajectory')\n self.hardware_limit_configuration = kwargs.get('hardware_limit_'\n 'configuration')\n if 'reduce_motor_torque_time' in kwargs and 'reduce_motor_torque_percentage' in kwargs:\n motor_time = kwargs['reduce_motor_torque_time']\n motor_time = int(assume_units(motor_time, pq.ms).rescale(pq.ms).magnitude)\n if motor_time < 0 or motor_time > 60000:\n raise ValueError(\"Time must be between 0 and 60000 ms\")\n percentage = kwargs['reduce_motor_torque_percentage']\n percentage = int(assume_units(percentage, pq.percent).rescale(\n pq.percent).magnitude)\n if percentage < 0 or percentage > 100:\n raise ValueError(\"Time must be between 0 and 60000 ms\")\n self._newport_cmd(\n \"QR\", target=self._axis_id, params=[motor_time, percentage])\n\n # update motor configuration\n self._newport_cmd(\"UF\", target=self._axis_id)\n self._newport_cmd(\"QD\", target=self._axis_id)\n # save configuration\n self._newport_cmd(\"SM\")\n return self.read_setup()", "title": "" }, { "docid": "5a1a317268037beb6490f8bef38fedf0", "score": "0.5160277", "text": "def initialize_variables(self):\n self.params.globalize_parameters(self) # make all the variables available \n self.calculate_boundaries() # calculate the boundaries\n\n # Function Space\n if self.per_func_space == False:\n self.Q = FunctionSpace(self.mesh, \"CG\", 1)\n self.Q_flat = FunctionSpace(self.flat_mesh, \"CG\", 1)\n self.Q2 = MixedFunctionSpace([self.Q]*2)\n self.Q4 = MixedFunctionSpace([self.Q]*4)\n \n # surface and bed :\n self.S = interpolate(self.S_ex, self.Q)\n self.B = interpolate(self.B_ex, self.Q)\n self.Shat = Function(self.Q_flat)\n self.dSdt = Function(self.Q_flat)\n \n else:\n # surface and bed :\n self.S = interpolate(self.S_ex, self.Q_non_periodic)\n self.B = interpolate(self.B_ex, self.Q_non_periodic)\n self.Shat = Function(self.Q_flat_non_periodic)\n self.dSdt = Function(self.Q_flat)\n \n # Coordinates of various types \n self.x = self.Q.cell().x\n self.sigma = project((self.x[2] - self.B) / (self.S - self.B))\n\n # Velocity model\n self.U = Function(self.Q2)\n self.u = Function(self.Q)\n self.v = Function(self.Q)\n self.w = Function(self.Q)\n self.beta2 = Function(self.Q)\n self.mhat = Function(self.Q)\n self.b = Function(self.Q)\n self.epsdot = Function(self.Q)\n self.E = Function(self.Q)\n self.eta = Function(self.Q)\n self.P = Function(self.Q)\n self.Tstar = Function(self.Q) # None\n self.W = Function(self.Q) # None \n self.Vd = Function(self.Q) # None \n self.Pe = Function(self.Q) # None \n self.Sl = Function(self.Q) # None \n self.Pc = Function(self.Q) # None\n self.Nc = Function(self.Q) # None\n self.Pb = Function(self.Q)\n self.Lsq = Function(self.Q)\n \n # Enthalpy model\n self.H_surface = Function(self.Q)\n self.H = Function(self.Q)\n self.T = Function(self.Q)\n self.W = Function(self.Q)\n self.Mb = Function(self.Q)\n self.q_geo = Function(self.Q)\n self.cold = Function(self.Q)\n self.Hhat = Function(self.Q) # Midpoint values, usually set to H_n\n self.uhat = Function(self.Q) # Midpoint values, usually set to H_n\n self.vhat = Function(self.Q) # Midpoint values, usually set to H_n\n self.what = Function(self.Q) # Midpoint values, usually set to H_n\n self.mhat = Function(self.Q) # ALE is required: we change the mesh \n self.H0 = Function(self.Q) # None initial enthalpy\n self.T0 = Function(self.Q) # None\n self.h_i = Function(self.Q) # None\n self.kappa = Function(self.Q) # None\n\n # free surface model :\n self.ahat = Function(self.Q_flat)\n self.uhat_f = Function(self.Q_flat)\n self.vhat_f = Function(self.Q_flat)\n self.what_f = Function(self.Q_flat)\n self.M = Function(self.Q_flat)\n \n # Age model \n self.age = Function(self.Q)\n self.a0 = Function(self.Q)\n\n # Surface climate model\n self.smb = Function(self.Q)\n self.precip = Function(self.Q)\n self.T_surface = Function(self.Q)\n\n # Adjoint model\n self.u_o = Function(self.Q)\n self.v_o = Function(self.Q)\n self.U_o = Function(self.Q)\n self.lam = Function(self.Q)\n self.adot = Function(self.Q)\n\n # Balance Velocity model :\n self.dSdx = Function(self.Q_flat)\n self.dSdy = Function(self.Q_flat)\n self.Ub = Function(self.Q_flat)\n self.u_balance = Function(self.Q)\n self.v_balance = Function(self.Q)", "title": "" }, { "docid": "521b1ad48b08cb76976099769d867620", "score": "0.5160018", "text": "def updateAQMatrix(self):\n # Save interpolation to DB \n sizeX= len(self.matrix)\n sizeY= len(self.matrix[0])\n YEAR = str(datetime.date.today().year)\n MONTH = str(datetime.date.today().month).zfill(2)\n DATE = str(datetime.date.today().day).zfill(2)\n HOUR = str(datetime.datetime.now().hour).zfill(2)\n timestamp = datetime.datetime.strptime( YEAR+'-'+MONTH+'-'+DATE+' '+HOUR+':00:00', '%Y-%m-%d %H:%M:%S')\n first =False\n for i in range(sizeX):\n for j in range(sizeY):\n self.matrix[i][j]['pollutants']=self.getInterpolated(self.matrix[i][j]['midpoint'])\n try:\n count=0\n try:\n cursorQ=self.mydb.cursor(buffered=True)\n query= (\"select * from interpolatedmetrics limit 1\")\n cursorQ.execute(query)\n count=cursorQ.rowcount\n if i==0 and j==0 and count==0 :\n first=True\n except Error as error:\n print(error)\n finally:\n cursorQ.close()\n if first:\n insertIP=(\"insert into interpolatedmetrics(idinterpolation_algorithm,idcell,idPollutant,interpolatedValiue,timestamp) values (%(algorithm)s,%(id)s,%(poll)s,%(val)s,%(time)s)\")\n cursor = self.mydb.cursor(buffered=True)\n getPoll= (\"select * from pollutant\")\n cursor.execute(getPoll)\n pollutants = list(cursor.fetchall())\n print('poll ',pollutants,'----------')\n cursor.close()\n for (idpoll,polName,metric) in pollutants:\n print('metrics: ',polName,'--------',self.matrix[i][j]['pollutants'][polName])\n values={'algorithm':'IDW', 'id': str(i).zfill(2) +'_'+str(j).zfill(2), 'poll':int(idpoll),'val':float(self.matrix[i][j]['pollutants'][polName]),'time':timestamp }\n cursor=self.mydb.cursor(buffered=True)\n cursor.execute(insertIP,values)\n self.mydb.commit()\n else:\n updateIP=(\"update interpolatedmetrics set interpolatedValiue=%(val)s, timestamp=%(time)s where idinterpolation_algorithm=%(algorithm)s and idcell=%(id)s and idPollutant=%(poll)s \")\n cursor = self.mydb.cursor(buffered=True)\n getPoll= (\"select * from pollutant\")\n cursor.execute(getPoll)\n pollutants = list(cursor.fetchall())\n print('poll ',pollutants,'----------u')\n cursor.close()\n for (idpoll,polName,metric) in pollutants:\n print('metrics: ',polName,'--------',self.matrix[i][j]['pollutants'][polName])\n values={'algorithm':'IDW', 'id': str(i).zfill(2) +'_'+str(j).zfill(2), 'poll':int(idpoll),'val':float(self.matrix[i][j]['pollutants'][polName]),'time':timestamp }\n cursor=self.mydb.cursor(buffered=True)\n cursor.execute(updateIP,values)\n self.mydb.commit()\n except Error as error:\n print(error)\n finally:\n cursor.close()", "title": "" }, { "docid": "9e825298741d79ae6e63b0f8967129ea", "score": "0.51428944", "text": "def __call__(self, qs):\n\n qs = np.abs(qs) # Peak is symmetric\n\n if self.nu>self.gauss_cutoff:\n # Gaussian\n val = (2/(np.pi*self.delta))*np.exp( -(4*(qs**2))/(np.pi*(self.delta**2)) )\n elif self.nu<self.lorentz_cutoff:\n # Lorentzian\n val = (self.delta/(2*np.pi))/(qs**2 + ((self.delta/2)**2) )\n else:\n # Brute-force the term\n val = (2/(np.pi*self.delta))\n\n if self.gamma_method:\n\n print( \"WARNING: The gamma method does not currently work.\" )\n\n # Use gamma functions\n y = (4*(qs**2))/( (np.pi**2) * (self.delta**2) )\n\n # Note that this equivalence comes from the paper:\n # Scattering Curves of Ordered Mesoscopic Materials\n # S. Förster, A. Timmann, M. Konrad, C. Schellbach, A. Meyer, S.S. Funari, P. Mulvaney, R. Knott,\n # J. Phys. Chem. B, 2005, 109 (4), pp 1347–1360 DOI: 10.1021/jp0467494\n # (See equation 27 and last section of Appendix.)\n # However there seems to be a typo in the paper, since it does not match the brute-force product.\n\n numerator = gamma( (self.nu/2) + 1.0j*self.gamma_nu*y )\n #numerator = gamma.GammaComplex( (self.nu/2) + 1.0j*self.gamma_nu*(sqrt(y)) )\n denominator = gamma( self.nu/2 )\n term = numerator/denominator\n\n val *= 0.9*term*term.conjugate()\n\n else:\n # Use a brute-force product calculation\n for n in range(0, self.product_terms):\n #print n, self.nu, self.gamma_nu\n term1 = (self.gamma_nu**2)/( (n+self.nu/2)**2 )\n #print \" \" + str(term1)\n term2 = (4*(qs**2))/( (np.pi**2) * (self.delta**2) )\n val *= 1/(1+term1*term2)\n\n return val", "title": "" }, { "docid": "5793d9b9a7a824127c87738c5f8058fc", "score": "0.5141601", "text": "def perform_minical(self, q=None, save_config=True):\n self.logger.info(\"Performing minical...\")\n return_vals = None\n if not self._simulated:\n try:\n results = self._set_WBDC(29)\n gains = results[0]\n Tlinear = results[1]\n Tquadratic = results[2]\n Tnd = results[3]\n NonLin = results[4]\n x = results[5]\n self.logger.info(\"Minical : gain calibrated: {}\".format(gains))\n self.logger.info(\"Minical : Linear Ts: {}\".format(Tlinear))\n self.logger.info(\"Minical : Corrected Ts: {}\".format(Tquadratic))\n self.logger.info(\"Minical : Noise Diode T: {}\".format(Tnd))\n self.logger.info(\"Minical : Nonlinearity: {}\".format(NonLin))\n self.logger.info(\"Minical : Calibrated reading: {}\".format(x))\n color1 = ['r', 'b', 'g', 'purple']\n self.logger.info(\"Minical : Minical performed; Corrected PM readings-{}\".format(str(x)))\n self.logger.info(\"Minical : Noise diode temperatures-{}\".format(str(Tnd)))\n read_pm1, read_pm2, read_pm3, read_pm4 = x[0][0], x[1][0], x[2][0], x[3][0]\n tsys_pm1, tsys_pm2, tsys_pm3, tsys_pm4 = Tquadratic[0][0], Tquadratic[1][0], Tquadratic[2][0], \\\n Tquadratic[3][0]\n self.logger.info(\n \"Minical : Tsys for PM1 {}, PM2 {}, PM3 {}, PM4 {}\".format(tsys_pm1, tsys_pm2, tsys_pm3, tsys_pm4))\n self.tsysfactor1 = tsys_pm1 / read_pm1\n self.tsysfactor2 = tsys_pm2 / read_pm2\n self.tsysfactor3 = tsys_pm3 / read_pm3\n self.tsysfactor4 = tsys_pm4 / read_pm4\n self.logger.info(\n \"Minical : Minical derived tsys factors ,{},{},{},{}\".format(self.tsysfactor1, self.tsysfactor2,\n self.tsysfactor3, self.tsysfactor4))\n return_vals = {\n 'tsysfactors': [self.tsysfactor1, self.tsysfactor2,\n self.tsysfactor3, self.tsysfactor4],\n 'tsys_pm': [tsys_pm1, tsys_pm2, tsys_pm4, tsys_pm4],\n 'x': x,\n 'Tlinear': Tlinear,\n 'Tquadratic': Tquadratic\n }\n\n\n # self.logger.error(\"Saving minical results not yet implemented.\")\n\n\n except Exception as err:\n self.logger.error(\"Couldn't perform minical. Error: {}\".format(err), exc_info=True)\n\n if save_config:\n self.logger.debug(\"Saving minical data to {}\".format(self.settings_file))\n with open(self.settings_file, 'r+') as f:\n try:\n data = json.load(f)\n data.update({'minical': return_vals})\n f.seek(0)\n json.dump(data, f)\n f.truncate()\n except ValueError as err:\n self.logger.debug(\"Error updating file: {}\".format(err))\n f.seek(0)\n json.dump({'minical': return_vals}, f)\n f.truncate()\n\n\n if not q:\n return return_vals\n else:\n q.put(return_vals)", "title": "" }, { "docid": "c591276e7113447beb1dca934b685e8f", "score": "0.5137685", "text": "def main(ddir, pdir, spin_up_time=3):\n print('### Read in data ###')\n odir = ddir+'/output'\n Tr = tracer_engine(ddir)\n ds_di = Tr.dataset_readin(['tracer_diags'])\n ds_sn = Tr.dataset_readin(['tracer_snapshots'])\n ds_fi = xr.open_dataset(odir+'/KOC_FINAL.nc')\n\n # Should I be able to read this from a file?\n cut_time = spin_up_time*30*24*60*60\n tr_num = Tr.tracernum\n\n print('### reset QC ###')\n QC_reset_plot(ds_di, ds_sn, Tr, cut_time, tr_num)\n plt.gcf().savefig(pdir+'/QC_reset.png')\n plt.close()\n\n print('### Timeseries full QC ###')\n QC_global_timeseries_plot(ds_fi)\n plt.gcf().savefig(pdir+'/QC_global_timeseries_full.png')\n plt.close()\n\n print('### Timeseries valid QC ###')\n QC_global_timeseries_plot(ds_fi.where(ds_fi.valid_index))\n plt.gcf().savefig(pdir+'/QC_timeseries_valid.png')\n plt.close()\n\n print('### Valid time map QC ###')\n QC_validtime_plot(ds_fi)\n plt.gcf().savefig(pdir+'/QC_map_valid.png')\n plt.close()\n\n print('### Criterion QC ###')\n QC_crit_plot(ds_fi)\n plt.gcf().savefig(pdir+'/QC_crit.png')\n plt.close()\n\n print('### Landmask QC ###')\n QC_mask_plot(ds_fi)\n plt.gcf().savefig(pdir+'/QC_landmask.png')\n plt.close()", "title": "" }, { "docid": "1556dd292c62d92377512fa836097dd8", "score": "0.51225", "text": "def initialize_meas_calibration(self, N_qubits, layout):\n if layout is None:\n cal_q = QuantumRegister(N_qubits)\n meas_cals, state_labels = complete_meas_cal(qr=cal_q)\n else:\n meas_cals, state_labels = complete_meas_cal(qubit_list=layout)\n\n # Run the calibration circuits with the device noise model\n backend = Aer.get_backend('qasm_simulator')\n job = execute(meas_cals, backend=backend, shots=10000, noise_model=self.noise_model)\n cal_results = job.result()\n\n return CompleteMeasFitter(cal_results, state_labels).filter", "title": "" }, { "docid": "aa31749633dbfb634abb92ae3e1fdcee", "score": "0.5121901", "text": "def _scale_prepare(self):\n\n # acknowledge all of the programs we are about to use...\n\n Citations.cite(\"pointless\")\n Citations.cite(\"aimless\")\n Citations.cite(\"ccp4\")\n\n # ---------- GATHER ----------\n\n self._sweep_handler = SweepInformationHandler(self._scalr_integraters)\n\n for epoch in self._sweep_handler.get_epochs():\n si = self._sweep_handler.get_sweep_information(epoch)\n pname, xname, dname = si.get_project_info()\n sname = si.get_sweep_name()\n\n exclude_sweep = False\n\n for sweep in PhilIndex.params.xia2.settings.sweep:\n if sweep.id == sname and sweep.exclude:\n exclude_sweep = True\n break\n\n if exclude_sweep:\n self._sweep_handler.remove_epoch(epoch)\n logger.debug(\"Excluding sweep %s\", sname)\n else:\n logger.debug(\"%-30s %s/%s/%s\", \"adding data from:\", xname, dname, sname)\n\n # gather data for all images which belonged to the parent\n # crystal - allowing for the fact that things could go wrong\n # e.g. epoch information not available, exposure times not in\n # headers etc...\n\n for e in self._sweep_handler.get_epochs():\n si = self._sweep_handler.get_sweep_information(e)\n assert is_mtz_file(\n si.get_reflections()\n ), f\"{si.get_reflections()!r} is not a valid MTZ file\"\n\n p, x = self._sweep_handler.get_project_info()\n self._scalr_pname = p\n self._scalr_xname = x\n\n # verify that the lattices are consistent, calling eliminate if\n # they are not N.B. there could be corner cases here\n\n need_to_return = False\n\n multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing\n\n # START OF if more than one epoch\n if len(self._sweep_handler.get_epochs()) > 1:\n\n # if we have multi-sweep-indexing going on then logic says all should\n # share common lattice & UB definition => this is not used here?\n\n # START OF if multi_sweep indexing and not input pg\n if multi_sweep_indexing and not self._scalr_input_pointgroup:\n pointless_hklins = []\n\n max_batches = 0\n for epoch in self._sweep_handler.get_epochs():\n si = self._sweep_handler.get_sweep_information(epoch)\n hklin = si.get_reflections()\n\n batches = MtzUtils.batches_from_mtz(hklin)\n if 1 + max(batches) - min(batches) > max_batches:\n max_batches = max(batches) - min(batches) + 1\n\n logger.debug(\"Biggest sweep has %d batches\", max_batches)\n max_batches = nifty_power_of_ten(max_batches)\n\n counter = 0\n\n refiners = []\n\n for epoch in self._sweep_handler.get_epochs():\n si = self._sweep_handler.get_sweep_information(epoch)\n hklin = si.get_reflections()\n integrater = si.get_integrater()\n refiner = integrater.get_integrater_refiner()\n refiners.append(refiner)\n\n hklin = self._prepare_pointless_hklin(\n hklin, si.get_integrater().get_phi_width()\n )\n\n hklout = os.path.join(\n self.get_working_directory(),\n \"%s_%s_%s_%s_prepointless.mtz\"\n % (pname, xname, dname, si.get_sweep_name()),\n )\n\n # we will want to delete this one exit\n FileHandler.record_temporary_file(hklout)\n\n first_batch = min(si.get_batches())\n si.set_batch_offset(counter * max_batches - first_batch + 1)\n\n rebatch(\n hklin,\n hklout,\n first_batch=counter * max_batches + 1,\n pname=pname,\n xname=xname,\n dname=dname,\n )\n\n pointless_hklins.append(hklout)\n\n # update the counter & recycle\n counter += 1\n\n # SUMMARY - have added all sweeps to pointless_hklins\n\n s = self._factory.Sortmtz()\n\n pointless_hklin = os.path.join(\n self.get_working_directory(),\n \"%s_%s_prepointless_sorted.mtz\"\n % (self._scalr_pname, self._scalr_xname),\n )\n\n s.set_hklout(pointless_hklin)\n\n for hklin in pointless_hklins:\n s.add_hklin(hklin)\n\n s.sort()\n\n # FIXME xia2-51 in here look at running constant scaling on the\n # pointless hklin to put the runs on the same scale. Ref=[A]\n\n pointless_const = os.path.join(\n self.get_working_directory(),\n \"%s_%s_prepointless_const.mtz\"\n % (self._scalr_pname, self._scalr_xname),\n )\n FileHandler.record_temporary_file(pointless_const)\n\n aimless_const = self._factory.Aimless()\n aimless_const.set_hklin(pointless_hklin)\n aimless_const.set_hklout(pointless_const)\n aimless_const.const()\n\n pointless_const = os.path.join(\n self.get_working_directory(),\n \"%s_%s_prepointless_const_unmerged.mtz\"\n % (self._scalr_pname, self._scalr_xname),\n )\n FileHandler.record_temporary_file(pointless_const)\n pointless_hklin = pointless_const\n\n # FIXME xia2-51 in here need to pass all refiners to ensure that the\n # information is passed back to all of them not just the last one...\n logger.debug(\n \"Running multisweep pointless for %d sweeps\", len(refiners)\n )\n pointgroup, reindex_op, ntr, pt = self._pointless_indexer_multisweep(\n pointless_hklin, refiners\n )\n\n logger.debug(\"X1698: %s: %s\", pointgroup, reindex_op)\n\n lattices = [get_lattice(pointgroup)]\n\n for epoch in self._sweep_handler.get_epochs():\n si = self._sweep_handler.get_sweep_information(epoch)\n intgr = si.get_integrater()\n hklin = si.get_reflections()\n refiner = intgr.get_integrater_refiner()\n\n if ntr:\n intgr.integrater_reset_reindex_operator()\n need_to_return = True\n\n # SUMMARY - added all sweeps together into an mtz, ran\n # _pointless_indexer_multisweep on this, made a list of one lattice\n # and potentially reset reindex op?\n # END OF if multi_sweep indexing and not input pg\n\n # START OF if not multi_sweep, or input pg given\n else:\n lattices = []\n\n for epoch in self._sweep_handler.get_epochs():\n\n si = self._sweep_handler.get_sweep_information(epoch)\n intgr = si.get_integrater()\n hklin = si.get_reflections()\n refiner = intgr.get_integrater_refiner()\n\n if self._scalr_input_pointgroup:\n pointgroup = self._scalr_input_pointgroup\n reindex_op = \"h,k,l\"\n ntr = False\n\n else:\n pointless_hklin = self._prepare_pointless_hklin(\n hklin, si.get_integrater().get_phi_width()\n )\n\n pointgroup, reindex_op, ntr, pt = self._pointless_indexer_jiffy(\n pointless_hklin, refiner\n )\n\n logger.debug(\"X1698: %s: %s\", pointgroup, reindex_op)\n\n lattice = get_lattice(pointgroup)\n\n if lattice not in lattices:\n lattices.append(lattice)\n\n if ntr:\n intgr.integrater_reset_reindex_operator()\n need_to_return = True\n # SUMMARY do pointless_indexer on each sweep, get lattices and make a list\n # of unique lattices, potentially reset reindex op.\n # END OF if not multi_sweep, or input pg given\n\n # SUMMARY - still within if more than one epoch, now have a list of number\n # of lattices\n\n # START OF if multiple-lattices\n if len(lattices) > 1:\n\n # why not using pointless indexer jiffy??!\n\n correct_lattice = sort_lattices(lattices)[0]\n\n logger.info(\"Correct lattice asserted to be %s\", correct_lattice)\n\n # transfer this information back to the indexers\n for epoch in self._sweep_handler.get_epochs():\n\n si = self._sweep_handler.get_sweep_information(epoch)\n refiner = si.get_integrater().get_integrater_refiner()\n sname = si.get_sweep_name()\n\n state = refiner.set_refiner_asserted_lattice(correct_lattice)\n\n if state == refiner.LATTICE_CORRECT:\n logger.info(\n \"Lattice %s ok for sweep %s\", correct_lattice, sname\n )\n elif state == refiner.LATTICE_IMPOSSIBLE:\n raise RuntimeError(\n f\"Lattice {correct_lattice} impossible for {sname}\"\n )\n elif state == refiner.LATTICE_POSSIBLE:\n logger.info(\n \"Lattice %s assigned for sweep %s\", correct_lattice, sname\n )\n need_to_return = True\n # END OF if multiple-lattices\n # SUMMARY - forced all lattices to be same and hope its okay.\n # END OF if more than one epoch\n\n # if one or more of them was not in the lowest lattice,\n # need to return here to allow reprocessing\n\n if need_to_return:\n self.set_scaler_done(False)\n self.set_scaler_prepare_done(False)\n return\n\n # ---------- REINDEX ALL DATA TO CORRECT POINTGROUP ----------\n\n # all should share the same pointgroup, unless twinned... in which\n # case force them to be...\n\n pointgroups = {}\n reindex_ops = {}\n probably_twinned = False\n\n need_to_return = False\n\n multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing\n\n # START OF if multi-sweep and not input pg\n if multi_sweep_indexing and not self._scalr_input_pointgroup:\n pointless_hklins = []\n\n max_batches = 0\n for epoch in self._sweep_handler.get_epochs():\n si = self._sweep_handler.get_sweep_information(epoch)\n hklin = si.get_reflections()\n\n batches = MtzUtils.batches_from_mtz(hklin)\n if 1 + max(batches) - min(batches) > max_batches:\n max_batches = max(batches) - min(batches) + 1\n\n logger.debug(\"Biggest sweep has %d batches\", max_batches)\n max_batches = nifty_power_of_ten(max_batches)\n\n counter = 0\n\n refiners = []\n\n for epoch in self._sweep_handler.get_epochs():\n si = self._sweep_handler.get_sweep_information(epoch)\n hklin = si.get_reflections()\n integrater = si.get_integrater()\n refiner = integrater.get_integrater_refiner()\n refiners.append(refiner)\n\n hklin = self._prepare_pointless_hklin(\n hklin, si.get_integrater().get_phi_width()\n )\n\n hklout = os.path.join(\n self.get_working_directory(),\n \"%s_%s_%s_%s_prepointless.mtz\"\n % (pname, xname, dname, si.get_sweep_name()),\n )\n\n # we will want to delete this one exit\n FileHandler.record_temporary_file(hklout)\n\n first_batch = min(si.get_batches())\n si.set_batch_offset(counter * max_batches - first_batch + 1)\n\n rebatch(\n hklin,\n hklout,\n first_batch=counter * max_batches + 1,\n pname=pname,\n xname=xname,\n dname=dname,\n )\n\n pointless_hklins.append(hklout)\n\n # update the counter & recycle\n counter += 1\n\n # FIXME related to xia2-51 - this looks very very similar to the logic\n # in [A] above - is this duplicated logic?\n s = self._factory.Sortmtz()\n\n pointless_hklin = os.path.join(\n self.get_working_directory(),\n \"%s_%s_prepointless_sorted.mtz\"\n % (self._scalr_pname, self._scalr_xname),\n )\n\n s.set_hklout(pointless_hklin)\n\n for hklin in pointless_hklins:\n s.add_hklin(hklin)\n\n s.sort()\n\n pointless_const = os.path.join(\n self.get_working_directory(),\n f\"{self._scalr_pname}_{self._scalr_xname}_prepointless_const.mtz\",\n )\n FileHandler.record_temporary_file(pointless_const)\n\n aimless_const = self._factory.Aimless()\n aimless_const.set_hklin(pointless_hklin)\n aimless_const.set_hklout(pointless_const)\n aimless_const.const()\n\n pointless_const = os.path.join(\n self.get_working_directory(),\n \"%s_%s_prepointless_const_unmerged.mtz\"\n % (self._scalr_pname, self._scalr_xname),\n )\n FileHandler.record_temporary_file(pointless_const)\n pointless_hklin = pointless_const\n\n pointgroup, reindex_op, ntr, pt = self._pointless_indexer_multisweep(\n pointless_hklin, refiners\n )\n\n for epoch in self._sweep_handler.get_epochs():\n pointgroups[epoch] = pointgroup\n reindex_ops[epoch] = reindex_op\n # SUMMARY ran pointless multisweep on combined mtz and made a dict\n # of pointgroups and reindex_ops (all same)\n # END OF if multi-sweep and not input pg\n\n # START OF if not mulit-sweep or pg given\n else:\n for epoch in self._sweep_handler.get_epochs():\n si = self._sweep_handler.get_sweep_information(epoch)\n\n hklin = si.get_reflections()\n\n integrater = si.get_integrater()\n refiner = integrater.get_integrater_refiner()\n\n if self._scalr_input_pointgroup:\n logger.debug(\n \"Using input pointgroup: %s\", self._scalr_input_pointgroup\n )\n pointgroup = self._scalr_input_pointgroup\n reindex_op = \"h,k,l\"\n pt = False\n\n else:\n\n pointless_hklin = self._prepare_pointless_hklin(\n hklin, si.get_integrater().get_phi_width()\n )\n\n pointgroup, reindex_op, ntr, pt = self._pointless_indexer_jiffy(\n pointless_hklin, refiner\n )\n\n logger.debug(\"X1698: %s: %s\", pointgroup, reindex_op)\n\n if ntr:\n\n integrater.integrater_reset_reindex_operator()\n need_to_return = True\n\n if pt and not probably_twinned:\n probably_twinned = True\n\n logger.debug(\"Pointgroup: %s (%s)\", pointgroup, reindex_op)\n\n pointgroups[epoch] = pointgroup\n reindex_ops[epoch] = reindex_op\n # SUMMARY - for each sweep, run indexer jiffy and get reindex operators\n # and pointgroups dictionaries (could be different between sweeps)\n\n # END OF if not mulit-sweep or pg given\n\n overall_pointgroup = None\n\n pointgroup_set = {pointgroups[e] for e in pointgroups}\n\n if len(pointgroup_set) > 1 and not probably_twinned:\n raise RuntimeError(\n \"non uniform pointgroups: %s\" % str(list(pointgroup_set))\n )\n\n if len(pointgroup_set) > 1:\n logger.debug(\n \"Probably twinned, pointgroups: %s\",\n \" \".join(p.replace(\" \", \"\") for p in pointgroup_set),\n )\n numbers = (spacegroup_name_to_number(ps) for ps in pointgroup_set)\n overall_pointgroup = spacegroup_number_to_name(min(numbers))\n self._scalr_input_pointgroup = overall_pointgroup\n\n logger.info(\"Twinning detected, assume pointgroup %s\", overall_pointgroup)\n\n need_to_return = True\n\n else:\n overall_pointgroup = pointgroup_set.pop()\n # SUMMARY - Have handled if different pointgroups & chosen an overall_pointgroup\n # which is the lowest symmetry\n\n # Now go through sweeps and do reindexing\n for epoch in self._sweep_handler.get_epochs():\n si = self._sweep_handler.get_sweep_information(epoch)\n\n integrater = si.get_integrater()\n\n integrater.set_integrater_spacegroup_number(\n spacegroup_name_to_number(overall_pointgroup)\n )\n integrater.set_integrater_reindex_operator(\n reindex_ops[epoch], reason=\"setting point group\"\n )\n # This will give us the reflections in the correct point group\n si.set_reflections(integrater.get_integrater_intensities())\n\n if need_to_return:\n self.set_scaler_done(False)\n self.set_scaler_prepare_done(False)\n return\n\n # in here now optionally work through the data files which should be\n # indexed with a consistent point group, and transform the orientation\n # matrices by the lattice symmetry operations (if possible) to get a\n # consistent definition of U matrix modulo fixed rotations\n\n if PhilIndex.params.xia2.settings.unify_setting:\n self.unify_setting()\n\n if self.get_scaler_reference_reflection_file():\n self._reference = self.get_scaler_reference_reflection_file()\n logger.debug(\"Using HKLREF %s\", self._reference)\n\n elif PhilIndex.params.xia2.settings.scale.reference_reflection_file:\n self._reference = (\n PhilIndex.params.xia2.settings.scale.reference_reflection_file\n )\n logger.debug(\"Using HKLREF %s\", self._reference)\n\n params = PhilIndex.params\n use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs\n if len(self._sweep_handler.get_epochs()) > 1 and use_brehm_diederichs:\n self.brehm_diederichs_reindexing()\n # If not Brehm-deidrichs, set reference as first sweep\n elif len(self._sweep_handler.get_epochs()) > 1 and not self._reference:\n\n first = self._sweep_handler.get_epochs()[0]\n si = self._sweep_handler.get_sweep_information(first)\n self._reference = si.get_reflections()\n\n # Now reindex to be consistent with first dataset - run pointless on each\n # dataset with reference\n if self._reference:\n\n md = self._factory.Mtzdump()\n md.set_hklin(self._reference)\n md.dump()\n\n datasets = md.get_datasets()\n\n # then get the unit cell, lattice etc.\n\n reference_lattice = get_lattice(md.get_spacegroup())\n reference_cell = md.get_dataset_info(datasets[0])[\"cell\"]\n\n # then compute the pointgroup from this...\n\n # ---------- REINDEX TO CORRECT (REFERENCE) SETTING ----------\n\n for epoch in self._sweep_handler.get_epochs():\n\n # if we are working with unified UB matrix then this should not\n # be a problem here (note, *if*; *should*)\n\n # what about e.g. alternative P1 settings?\n # see JIRA MXSW-904\n if PhilIndex.params.xia2.settings.unify_setting:\n continue\n\n pl = self._factory.Pointless()\n\n si = self._sweep_handler.get_sweep_information(epoch)\n hklin = si.get_reflections()\n\n pl.set_hklin(\n self._prepare_pointless_hklin(\n hklin, si.get_integrater().get_phi_width()\n )\n )\n\n hklout = os.path.join(\n self.get_working_directory(),\n \"%s_rdx2.mtz\" % os.path.split(hklin)[-1][:-4],\n )\n\n # we will want to delete this one exit\n FileHandler.record_temporary_file(hklout)\n\n # now set the initial reflection set as a reference...\n\n pl.set_hklref(self._reference)\n\n # https://github.com/xia2/xia2/issues/115 - should ideally iteratively\n # construct a reference or a tree of correlations to ensure correct\n # reference setting - however if small molecule assume has been\n # multi-sweep-indexed so can ignore \"fatal errors\" - temporary hack\n pl.decide_pointgroup(\n ignore_errors=PhilIndex.params.xia2.settings.small_molecule\n )\n\n logger.debug(\"Reindexing analysis of %s\", pl.get_hklin())\n\n pointgroup = pl.get_pointgroup()\n reindex_op = pl.get_reindex_operator()\n\n logger.debug(\"Operator: %s\", reindex_op)\n\n # apply this...\n\n integrater = si.get_integrater()\n\n integrater.set_integrater_reindex_operator(\n reindex_op, reason=\"match reference\"\n )\n integrater.set_integrater_spacegroup_number(\n spacegroup_name_to_number(pointgroup)\n )\n si.set_reflections(integrater.get_integrater_intensities())\n\n md = self._factory.Mtzdump()\n md.set_hklin(si.get_reflections())\n md.dump()\n\n datasets = md.get_datasets()\n\n if len(datasets) > 1:\n raise RuntimeError(\n \"more than one dataset in %s\" % si.get_reflections()\n )\n\n # then get the unit cell, lattice etc.\n\n lattice = get_lattice(md.get_spacegroup())\n cell = md.get_dataset_info(datasets[0])[\"cell\"]\n\n if lattice != reference_lattice:\n raise RuntimeError(\n \"lattices differ in %s and %s\"\n % (self._reference, si.get_reflections())\n )\n\n logger.debug(\"Cell: %.2f %.2f %.2f %.2f %.2f %.2f\" % cell)\n logger.debug(\"Ref: %.2f %.2f %.2f %.2f %.2f %.2f\" % reference_cell)\n\n for j in range(6):\n if (\n math.fabs((cell[j] - reference_cell[j]) / reference_cell[j])\n > 0.1\n ):\n raise RuntimeError(\n \"unit cell parameters differ in %s and %s\"\n % (self._reference, si.get_reflections())\n )\n\n # ---------- SORT TOGETHER DATA ----------\n\n self._sort_together_data_ccp4()\n\n self._scalr_resolution_limits = {}\n\n # store central resolution limit estimates\n\n batch_ranges = [\n self._sweep_handler.get_sweep_information(epoch).get_batch_range()\n for epoch in self._sweep_handler.get_epochs()\n ]\n\n self._resolution_limit_estimates = ersatz_resolution(\n self._prepared_reflections, batch_ranges\n )", "title": "" }, { "docid": "9b96fa0762f7e30368af2f97d3d6df1e", "score": "0.5118416", "text": "def integrate(self):\n [rhs1,rhs2] = calcrhs(self.qc_1, self.qc_2, self.psic_1, self.psic_2, \n self.N,self.N2,self.ll,self.kk,self.Q_2c,self.U,self.beta,self.km,self.opt)\n if self.first_step:\n #Forward step\n self.qc_1[2, :] = fs(self.qc_1[1, :, :], rhs1[:], self.dt,self.kk,self.ll,self.nu)\n self.qc_2[2, :] = fs(self.qc_2[1, :, :], rhs2[:], self.dt,self.kk,self.ll,self.nu)\n self.first_step = False\n \n else:\n self.qc_1[2, :, :] = lf(self.qc_1[0, :, :], rhs1[:], self.dt,self.ll,self.kk,self.nu)\n self.qc_2[2, :, :] = lf(self.qc_2[0, :, :], rhs2[:], self.dt,self.ll,self.kk,self.nu)\n self.qc_1[1, :] = filt( self.qc_1[1, :], self.qc_1[0, :], self.qc_1[2, :],self.robert_coefficient)\n self.qc_2[1, :] = filt( self.qc_2[1, :], self.qc_2[0, :], self.qc_2[2, :],self.robert_coefficient)\n \n #Invert:\n self.qtp()\n \n #Transfer values:\n self.qc_1[0, :, :] = self.qc_1[1, :, :]\n self.qc_2[0, :, :] = self.qc_2[1, :, :]\n self.qc_1[1, :, :] = self.qc_1[2, :, :]\n self.qc_2[1, :, :] = self.qc_2[2, :, :]\n self.t += self.dt\n self.count += 1", "title": "" }, { "docid": "6040c372d8a9466cb6266afe7ae93344", "score": "0.5114311", "text": "def update_Q(self):\n self.Q = self.G.dot(self.P.transpose()).transpose()", "title": "" }, { "docid": "b4e2b89b663690fe6e7c0dacb217b8d4", "score": "0.5112209", "text": "def f_conti_all(self, xval, pp):\n f_Fe_MgII = Fe_flux_mgii(xval, pp[0:3]) # iron flux for MgII line region\n f_Fe_Balmer = Fe_flux_balmer(xval, pp[3:6]) # iron flux for balmer line region\n f_pl = pp[6]*(xval/3000.0)**pp[7] # power-law continuum\n f_conti_BC = balmer_conti(xval, pp[8:11]) # Balmer continuum\n f_poly = f_poly_conti(xval, pp[11:]) # polynormal conponent for reddened spectra\n\n\n if self.Fe_uv_op == True and self.poly == False and self.BC == False :\n yval = f_pl + f_Fe_MgII + f_Fe_Balmer \n elif self.Fe_uv_op == True and self.poly == True and self.BC == False:\n yval = f_pl + f_Fe_MgII + f_Fe_Balmer + f_poly \n elif self.Fe_uv_op == True and self.poly == False and self.BC == True :\n yval = f_pl + f_Fe_MgII + f_Fe_Balmer + f_conti_BC \n elif self.Fe_uv_op == False and self.poly == True and self.BC == False :\n yval = f_pl + f_poly\n elif self.Fe_uv_op == False and self.poly == False and self.BC == False :\n yval = f_pl \n elif self.Fe_uv_op == False and self.poly == False and self.BC == True :\n yval = f_pl + f_Fe_Balmer + f_conti_BC \n elif self.Fe_uv_op == True and self.poly == True and self.BC == True :\n yval = f_pl + f_Fe_MgII + f_Fe_Balmer + f_poly + f_conti_BC \n elif self.Fe_uv_op == False and self.poly == True and self.BC == True :\n yval = f_pl + f_Fe_Balmer + f_poly + f_conti_BC \n else:\n raise RuntimeError('No this option for Fe_uv_op, poly and BC!')\n return yval", "title": "" }, { "docid": "158a0ef9553cdd2073107694ce2b0648", "score": "0.5108937", "text": "def setUp(self):\n self.scale = 1.5\n self.m = 3.0\n x = np.arange(0.0001, 0.1, 0.0001)\n y = np.asarray([self.scale * math.pow(q ,-1.0*self.m) for q in x])\n dy = y*.1\n self.data = Data1D(x=x, y=y, dy=dy)\n self.npts = 20", "title": "" }, { "docid": "7a993dab87c4868478df529d8e1c43f4", "score": "0.5102174", "text": "def setup(self, *args, **kwargs):\n # magic taken from\n # https://github.com/pydata/xarray/blob/stable/asv_bench/benchmarks/rolling.py\n super().setup(**kwargs)\n spatial_dims = [\"lon\", \"lat\"]\n self.ds = self.ds.mean(spatial_dims)\n self.control = self.control.mean(spatial_dims)\n self.iterations = 500", "title": "" }, { "docid": "b2ffbfc3ebb4654747b4229fc9264567", "score": "0.50982517", "text": "def _compute_all(self) -> np.ndarray:\r\n if self.gyr.ndim < 2:\r\n raise ValueError(f\"All inputs must have at least two observations.\")\r\n if self.acc.shape != self.gyr.shape:\r\n raise ValueError(f\"Could not operate on acc array of shape {self.acc.shape} and gyr array of shape {self.gyr.shape}.\")\r\n W = np.zeros_like(self.acc)\r\n if self.mag is None:\r\n # Estimation with IMU only (Gyroscopes and Accelerometers)\r\n W2 = self.am_estimation(self.acc)\r\n W[0] = W2[0] if self.w0 is None else self.w0\r\n else:\r\n # Estimation with MARG (IMU and Magnetometer)\r\n if self.mag.shape != self.gyr.shape:\r\n raise ValueError(f\"Could not operate on mag array of shape {self.mag.shape} and gyr array of shape {self.gyr.shape}.\")\r\n W2 = self.am_estimation(self.acc, self.mag)\r\n W[0] = W2[0] if self.w0 is None else self.w0\r\n # Complemetary filter\r\n if self.mag is None:\r\n # Estimation with IMU only (Gyroscopes and Accelerometers)\r\n for i in range(1, len(W)):\r\n W[i, :2] = (W[i-1, :2] + self.gyr[i, :2]*self.Dt)*self.gain + W2[i, :2]*(1.0-self.gain)\r\n return W\r\n # Estimation with MARG (IMU and Magnetometer)\r\n for i in range(1, len(W)):\r\n W[i] = (W[i-1] + self.gyr[i]*self.Dt)*self.gain + W2[i]*(1.0-self.gain)\r\n return W", "title": "" }, { "docid": "e5d9914259df5ad7cd3497818b1a67e7", "score": "0.5095781", "text": "def checkQueue1(self, q1, q2):\n try:\n data1 = q1.get(0)\n data2 = q2.get(0)\n # print(data)\n # print(dir(self.wave))\n if self.ch1:\n # print(len(self.x))\n # print(len(data1))\n self.p.set_xdata(self.x[:len(data1)])\n self.p.set_ydata(data1)\n if self.ch2:\n self.p2.set_xdata(self.x[:len(data2)])\n self.p2.set_ydata(data2)\n except Empty:\n pass\n finally:\n if self.ch1 or self.ch2:\n self.wf.canvas.draw()\n self.root.after(self.checkqueuedelay, self.checkQueue1, q1, q2)", "title": "" }, { "docid": "4fb10f137fbe4a5be27f7f39262d1d23", "score": "0.5095399", "text": "def _run_arguement_object_fits(self):\n # run fits\n # fit catalog object\n Y_dict = self._catalog_object.get_transformed_Y()\n # fit designmatrix object\n X = self._designmatrix_object.get_matrix()\n Xcol_names = self._designmatrix_object.get_columnnames()\n row_names = self._designmatrix_object.get_rownames()\n # make sure waveforms are matched\n # loop through Xrow_names, use as key for Y_dict, populate Y_matrix\n Y = np.empty((len(row_names),len(Y_dict[list(Y_dict.keys())[0]])))\n\n # check if waveforms are complex valued. if so, instantiate Y as complex type\n if sum(np.iscomplex(Y_dict[list(Y_dict.keys())[0]])):\n # then it is complex\n Y = np.empty((len(row_names),len(Y_dict[list(Y_dict.keys())[0]]))).astype(np.complex)\n\n for i in np.arange(0,len(row_names)):\n Y[i,:] = Y_dict[row_names[i]]\n # fit basis object\n A = self._basis_object.fit_transform(Y)\n return Y, X, A, Xcol_names, row_names", "title": "" }, { "docid": "cfa8d2600e5327bc94e8c176851b6556", "score": "0.5087115", "text": "def _update(self):\n # this method is called by PrmDictBase.set\n P = self.physical_prm; N = self.numerical_prm # short forms\n # ensure that whatever the user has provided for I, f, etc.\n # we can call the quantity as a plain function of x:\n for funcname in 'I', 'f', 'bc_0', 'bc_L', 'c':\n P[funcname] = wrap2callable(P[funcname])\n dx = P['L']/float(N['n'])\n # update coordinates and solution arrays:\n if len(self.u) != N['n'] +1:\n self.x = seq(0, P['L'], dx)\n self.up = zeros(N['n']+1)\n self.u = self.up.copy()\n self.um = self.up.copy()\n # stability limit: dt = dx/max(c)\n # (enable non-constant c(x,t) - subclasses need this)\n max_c = max([P['c'](x, 0) for x in self.x]) # loop is safest\n dt_limit = dx/max_c\n if N['dt'] <= 0 or N['dt'] > dt_limit:\n N['dt'] = N['safety_factor']*dt_limit", "title": "" }, { "docid": "8b0719305b96a0a8f9a8441587ca780a", "score": "0.5086077", "text": "def QC(self):\n self.cell_umi=self.data.apply(lambda x:x.sum())\n self.cell_gene=self.data.apply(lambda x:(x>0).sum())\n self.cell_mt_ratio=self.data.ix[[x for x in self.data.index if x[:3]==\"MT-\"]].apply(lambda x:x.sum())/self.cell_umi\n self.gene_detected=self.data.apply(lambda x:(x>0).sum(),axis=1)\n #self.gene_detected=self.gene_detected[self.gene_detected>0]\n fig=plt.figure()\n #sns.set(style=\"white\",font_scale=1.2)\n ax1=fig.add_subplot(1,3,1)\n ax1=sns.boxplot(self.cell_umi,orient='v')\n ax1.set_xlabel(\"UMI\",fontsize=14)\n ax2=fig.add_subplot(1,3,2)\n ax2=sns.boxplot(self.cell_gene,orient='v')\n ax2.set_xlabel(\"GENE\",fontsize=14)\n ax3=fig.add_subplot(1,3,3)\n ax3=sns.boxplot(self.cell_mt_ratio,orient='v')\n ax3.set_xlabel(\"MT_Ratio\",fontsize=14)\n #ax4=fig.add_subplot(1,4,4)\n #ax4=sns.boxplot(self.gene_detected,orient='v')\n #ax4.set_xlabel(\"GENE_NUM\",fontsize=14)\n plt.subplots_adjust(wspace=0.4)\n plt.show()\n #plt.close()", "title": "" }, { "docid": "44196e32b3126c4b60c318044e7ff74a", "score": "0.5085261", "text": "def prepare_calc(self):\n \n # mass\n self.XM=self.RHO*4.*np.pi/3.*self.R0**3\n \n # polarisibility\n self.Polaris=4.*np.pi*Epsi0*(self.EPSR-1)/(self.EPSR+2)*self.R0**3 # from Tania\n \n # tweezer properties\n WK= 2*np.pi / self.lambda_tw #=2*pi/lambda=k\n OMOPT=c*WK\n W2=self.waist**2\n _epsTW=4*self.Pin1/(self.WX*self.WY*np.pi*c*Epsi0)\n self.epsTW = np.sqrt(_epsTW)\n \n # cavity properties\n VOL=self.XL*np.pi*W2/4 # add a factor of 4 here.\n KAPPin=np.pi*c/self.Finesse/self.XL\n _epsCAV=hbar*OMOPT/(2.*VOL*Epsi0)\n self.epsCAV = np.sqrt(_epsCAV)\n ZR=self.WX*self.WY*WK/2\n self.ZR = ZR\n \n # linewiddth\n coeff=WK*self.Polaris/Epsi0/OMOPT**2/np.pi\n kappnano=4*coeff**2*self.DelFSR*np.cos(WK*self.X0)*np.cos(WK*self.X0)\n self.kappa=kappnano+KAPPin\n \n # damping rate\n GAMMAM=1600*self.Press/np.pi\n GAMMAM=GAMMAM/500/self.RHO/self.R0\n self.Gamma=GAMMAM/2 # our Gamma is Gammam/2\n \n # mechanical frequencies\n Det2pi=self.detuning*2*np.pi\n kapp2=0.5*self.kappa\n Wkx0=WK*self.X0\n OmX=self.Polaris*self.epsTW**2/self.XM/self.WX**2\n OmY=self.Polaris*self.epsTW**2/self.XM/self.WY**2\n OmZ=0.5*self.Polaris*self.epsTW**2/self.XM/ZR**2\n \n # theta[rad]\n thet = self.theta0 * np.pi\n \n # photon field\n Edip=-0.5*self.Polaris*self.epsTW*self.epsCAV*np.sin(thet)\n Ediph=Edip/hbar\n ALPRe=Det2pi*Ediph*np.cos(Wkx0)/(kapp2**2+Det2pi**2)\n ALPim=-kapp2*Ediph*np.cos(Wkx0)/(kapp2**2+Det2pi**2)\n Nphoton=Ediph*Ediph*np.cos(Wkx0)*np.cos(Wkx0)\n self.n_photon=Nphoton/(kapp2**2+Det2pi**2)\n \n # corrections to frequencies due to cavity\n C1=-Edip/self.XM*2.*ALPRe*WK**2*np.cos(Wkx0)\n OmX=OmX+C1*np.sin(thet)*np.sin(thet)\n OmY=OmY+C1*np.cos(thet)*np.cos(thet)\n OmZ=OmZ-2.*Edip/self.XM*ALPRe*(WK-1/ZR)**2*np.cos(Wkx0)\n \n self.omega_mech = np.array([np.sqrt(OmX), np.sqrt(OmY), np.sqrt(OmZ)])\n \n # phonon number at equilibrium\n self.n_mech = k*self.T/(hbar * self.omega_mech)\n \n ### COUPLINGS\n # Optomechanical couplings\n XZPF = np.sqrt(hbar/(2*self.XM*self.omega_mech[0]))\n YZPF = np.sqrt(hbar/(2*self.XM*self.omega_mech[1]))\n ZZPF = np.sqrt(hbar/(2*self.XM*self.omega_mech[2]))\n \n # light-matter couplings\n GX = Ediph*WK*XZPF*np.sin(thet)*np.sin(Wkx0)\n GY = Ediph*WK*YZPF*np.cos(thet)*np.sin(Wkx0)\n GZ = -Ediph*(WK-1/ZR)*ZZPF*np.cos(Wkx0)\n \n # matter-matter couplings\n GXY = Ediph*WK*XZPF*WK*YZPF*ALPRe*np.sin(2*thet)*np.cos(Wkx0)\n GZX = 2*Ediph*(WK-1/ZR)*ZZPF*WK*XZPF*ALPim*np.sin(Wkx0)*np.sin(thet)\n GYZ = 2*Ediph*(WK-1/ZR)*ZZPF*WK*YZPF*ALPim*np.sin(Wkx0)*np.cos(thet)\n \n self.g = np.array([GX, GY, GZ, GXY, GYZ, GZX])", "title": "" }, { "docid": "4a38c830af65ca699d2a49aa2fd3d719", "score": "0.5082382", "text": "def estimate_all_q_values(self, model):\n\n print(\"Re-calculating all Q-values in replay memory ...\")\n\n # Process the entire replay-memory in batches.\n for begin, end, progress in self.all_batches():\n # Print progress.\n msg = \"\\tProgress: {0:.0%}\"\n msg = msg.format(progress)\n #print_progress(msg)\n\n # Get the states for the current batch.\n states = self.states[begin:end]\n\n # Calculate the Q-values using the Neural Network\n # and update the replay-memory.\n self.q_values[begin:end] = model.get_q_values(states=states)\n\n # Newline.\n print()", "title": "" }, { "docid": "7fc0cc9a4c0f6123adb6aad9d854be0a", "score": "0.5080061", "text": "def __calibration(self):\n if not self.dacWorker.calibrating:\n stating_msg = \"Are you sure you want to start calibration?\"\n reply = QtWidgets.QMessageBox.warning(\n self.MainWindow, \"Message\", stating_msg, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No,\n )\n if reply == QtWidgets.QMessageBox.Yes:\n # self.dacWorker.calibration(self.__mfc1,step=10,waiting_time=1)\n try:\n pi = pigpio.pi()\n except:\n print(\"pigpio is not defined\")\n return\n self.calibration_thread = Calibrator(self.__app, self.dacWorker,self.adcWorker,self.__mfc1,10,1)\n self.qmsSigThread = qmsSignal.SyncSignal(pi, self.__app, 2, self.adcWorker)\n self.calibration_thread.finished.connect(self.calibration_terminated)\n self.calibration_thread.start()\n self.qmsSigThread.start()\n\n\n else:\n pass\n else:\n ending_msg = \"Are you sure you want to stop calibration?\"\n reply = QtWidgets.QMessageBox.warning(\n self.MainWindow, \"Message\", ending_msg, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No,\n )\n if reply == QtWidgets.QMessageBox.Yes:\n self.dacWorker.calibrating = False\n self.calibration_terminated()\n else:\n pass", "title": "" }, { "docid": "547ff18e52894891a06861a03b4f4e95", "score": "0.5077407", "text": "def __vectorProcess(self,measurements):\n #Currents\n if(self.__isCalibrated()):\n measurements = np.array(measurements)\n sDebug = \"\"\n if(self.__channels[channels.MainCurrent]):\n #Main Coarse\n scale = self.monsoon.statusPacket.mainCoarseScale\n zeroOffset = self.monsoon.statusPacket.mainCoarseZeroOffset\n calRef = self.__mainCal.getRefCal(True)\n calZero = self.__mainCal.getZeroCal(True)\n zeroOffset += calZero\n if(calRef - zeroOffset != 0):\n slope = scale / (calRef - zeroOffset)\n else:\n slope = 0\n Raw = measurements[:,self.__mainCoarseIndex] - zeroOffset\n mainCoarseCurrents = Raw * slope\n\n #Main Fine\n scale = self.monsoon.statusPacket.mainFineScale\n zeroOffset = self.monsoon.statusPacket.mainFineZeroOffset\n calRef = self.__mainCal.getRefCal(False)\n calZero = self.__mainCal.getZeroCal(False)\n zeroOffset += calZero\n if(calRef - zeroOffset != 0):\n slope = scale / (calRef - zeroOffset)\n else:\n slope = 0\n Raw = measurements[:,self.__mainFineIndex] - zeroOffset\n mainFinecurrents = Raw * slope / 1000\n mainCurrent = np.where(measurements[:,self.__mainFineIndex] < self.__fineThreshold, mainFinecurrents, mainCoarseCurrents)\n self.__addMeasurement(channels.MainCurrent,mainCurrent)\n #self.__mainCurrent.append(mainCurrent)\n sDebug = \"Main Current: \" + repr(round(mainCurrent[0],2))\n\n if(self.__channels[channels.USBCurrent]):\n #USB Coarse\n scale = self.monsoon.statusPacket.usbCoarseScale\n zeroOffset = self.monsoon.statusPacket.usbCoarseZeroOffset\n calRef = self.__usbCal.getRefCal(True)\n calZero = self.__usbCal.getZeroCal(True)\n zeroOffset += calZero\n if(calRef - zeroOffset != 0):\n slope = scale / (calRef - zeroOffset)\n else:\n slope = 0\n Raw = measurements[:,self.__usbCoarseIndex] - zeroOffset\n usbCoarseCurrents = Raw * slope\n\n #USB Fine\n scale = self.monsoon.statusPacket.usbFineScale\n zeroOffset = self.monsoon.statusPacket.usbFineZeroOffset\n calRef = self.__usbCal.getRefCal(False)\n calZero = self.__usbCal.getZeroCal(False)\n zeroOffset += calZero\n if(calRef - zeroOffset != 0):\n slope = scale / (calRef - zeroOffset)\n else:\n slope = 0\n Raw = measurements[:,self.__usbFineIndex] - zeroOffset\n usbFineCurrents = Raw * slope/ 1000\n usbCurrent = np.where(measurements[:,self.__usbFineIndex] < self.__fineThreshold, usbFineCurrents, usbCoarseCurrents)\n self.__addMeasurement(channels.USBCurrent,usbCurrent)\n #self.__usbCurrent.append(usbCurrent)\n sDebug = sDebug + \" USB Current: \" + repr(round(usbCurrent[0], 2))\n\n if(self.__channels[channels.AuxCurrent]):\n #Aux Coarse\n scale = self.monsoon.statusPacket.auxCoarseScale\n zeroOffset = 0\n calRef = self.__auxCal.getRefCal(True)\n calZero = self.__auxCal.getZeroCal(True)\n zeroOffset += calZero\n if(calRef - zeroOffset != 0):\n slope = scale / (calRef - zeroOffset)\n else:\n slope = 0\n Raw = measurements[:,self.__auxCoarseIndex] - zeroOffset\n auxCoarseCurrents = Raw * slope\n\n #Aux Fine\n scale = self.monsoon.statusPacket.auxFineScale\n zeroOffset = 0\n calRef = self.__auxCal.getRefCal(False)\n calZero = self.__auxCal.getZeroCal(False)\n zeroOffset += calZero\n if(calRef - zeroOffset != 0):\n slope = scale / (calRef - zeroOffset)\n else:\n slope = 0\n Raw = measurements[:,self.__auxFineIndex] - zeroOffset\n auxFineCurrents = Raw * slope / 1000\n auxCurrent = np.where(measurements[:,self.__auxFineIndex] < self.__auxFineThreshold, auxFineCurrents, auxCoarseCurrents)\n self.__addMeasurement(channels.AuxCurrent,auxCurrent)\n #self.__auxCurrent.append(auxCurrent)\n sDebug = sDebug + \" Aux Current: \" + repr(round(auxCurrent[0], 2))\n\n #Voltages\n if(self.__channels[channels.MainVoltage]):\n mainVoltages = measurements[:,self.__mainVoltageIndex] * self.__ADCRatio * self.__mainVoltageScale\n self.__addMeasurement(channels.MainVoltage,mainVoltages)\n #self.__mainVoltage.append(mainVoltages)\n\n sDebug = sDebug + \" Main Voltage: \" + repr(round(mainVoltages[0],2))\n\n\n if(self.__channels[channels.USBVoltage]):\n usbVoltages = measurements[:,self.__usbVoltageIndex] * self.__ADCRatio * self.__usbVoltageScale\n self.__addMeasurement(channels.USBVoltage,usbVoltages)\n #self.__usbVoltage.append(usbVoltages)\n sDebug = sDebug + \" USB Voltage: \" + repr(round(usbVoltages[0],2))\n timeStamp = measurements[:,self.__timestampIndex]\n self.__addMeasurement(channels.timeStamp,timeStamp)\n\n #self.__timeStamps.append(timeStamp)\n sDebug = sDebug + \" Dropped: \" + repr(self.dropped)\n sDebug = sDebug + \" Total Sample Count: \" + repr(self.__sampleCount)\n\n if(self.__outputConsoleMeasurements):\n print(sDebug)\n\n if not self.__startTriggerSet:\n self.__ClearOutput()", "title": "" }, { "docid": "d42225246a3f1569a79127c23c8a2f49", "score": "0.50751877", "text": "def trigger_control(self, event):\n #first = rospy.get_rostime()\n if not self.run:\n return\n\n if self.are_prerquisites_incomplete():\n # missing some information to calculate the the control variable\n if self.notification_no_pos:\n self.notification_no_pos = False\n print self.name,'no positions received, if the simulation/localization running?'\n self.stop_experiment()\n\n self.Pub_twist.publish(Twist())\n return\n\n self.notification_no_pos = True # reset the notification, in case something stops working\n # get the analytical value of the state vector q at time ti for the shape\n ti = toSecs(rospy.Time.now()-self.current_plan.start_t)\n coefficients = self.current_plan.params\n #if self.ID==0:\n # print self.name,ti\n coefficients = [(coefficients[0],coefficients[1],coefficients[2]),\n (coefficients[3], coefficients[4], coefficients[5]),\n (coefficients[6], coefficients[7], coefficients[8])]\n q = np.array([aq * (ti ** 2) / 2. + bq * ti + cq for aq, bq, cq in coefficients]) # q = [x y a]\n q_dot = np.array([aq * ti + bq for aq, bq, cq in coefficients])\n\n # saturate the variables... bad way to fix issues, the planner module should deal with it\n for i in range(self.exp_obj.n_dim/2):\n q[i] = max(q[i],self.X[i][0])\n q[i] = min(q[i], self.X[i][1])\n\n x_shape, y_shape, a = q\n x_dot, y_dot, a_dot = q_dot\n #if self.ID==0:\n # print q,self.current_plan.start_q\n\n # Publish the current state of the shape to update the explored area (the covered area is always assumed to be\n # free of obstacles)\n state = Float32MultiArray()\n state.layout.dim.append(MultiArrayDimension())\n state.layout.dim[0].label = \"state\"\n state.layout.dim[0].size = len(self.exp_obj.q_init)\n state.data = np.append(q,q_dot)\n self.Pub_state.publish(state)\n\n\n rho, phi = Convert2Polar(self.own_pose.position.x, self.own_pose.position.y, x_shape, y_shape)\n\n # Calculate the reference distance for each agent (depends on the shape parameters a and b and the current angle phi\n A = self.exp_obj.shape_params['area']\n b = A / (np.pi * a)\n ref_rho = a * b / (np.sqrt(b ** 2 * np.cos(phi) ** 2 + a ** 2 * np.sin(phi) ** 2)) # reference rho for each\n\n self.draw_shape(x=x_shape,y=y_shape,a=a,b=b)\n\n # Calculate the reference phi, average between its neighbors and an additional rotation term\n # phi[i+1] left\n # phi[i-1] right\n\n\n #if self.ID==0:\n # print self.name,'yaw',round(self_yaw/np.pi*180)\n\n\n\n #print self_yaw\n _,left_phi = Convert2Polar(self.left_pose.position.x, self.left_pose.position.y, x_shape, y_shape)\n _,right_phi = Convert2Polar(self.right_pose.position.x, self.right_pose.position.y, x_shape, y_shape)\n angle_diff = np.arctan2(np.sin(left_phi-right_phi), np.cos(left_phi-right_phi))\n if angle_diff<0:\n angle_diff+=2*np.pi\n phi_av = angle_diff / 2. + right_phi\n if phi_av<-np.pi:\n phi_av+=2*np.pi\n if phi_av>np.pi:\n phi_av-=2*np.pi\n phi_error = np.arctan2(np.sin(phi_av - phi), np.cos(phi_av - phi))\n #if self.ID==0:\n # print self.name,'phi ref',round(phi_av/np.pi*180),'phi error',round(phi_error/np.pi*180),'phi diff',round(angle_diff/np.pi*180)\n\n dot_phi = self.omeg + self.kphi * phi_error\n if self.verbose>=2:\n print self.name,'left phi',left_phi,'phi',phi,'right phi',right_phi,'angle diff',angle_diff,'phi_av',phi_av,'phi_error',phi_error\n\n partial_derivative_a = (-(A * (2 * a * np.sin(phi) ** 2 - 2 * A ** 2 * np.cos(phi) ** 2 / (np.pi ** 2 * a ** 3))) /\n (2 * np.pi * (A ** 2 * np.cos(phi) ** 2 / (np.pi ** 2 * a ** 2) + a ** 2 * np.sin(phi) ** 2) ** (3 / 2)))\n partial_derivative_phi = (- (A * (a ** 4 - A ** 2 / np.pi ** 2) * np.sin(2 * phi)) /\n (2 * np.pi * a ** 2 * (\n (A ** 2 * np.cos(phi) ** 2) / (np.pi ** 2 * a ** 2) + a ** 2 * np.sin(phi) ** 2) ** (\n 3 / 2)))\n ref_rho_dot = partial_derivative_a * a_dot + partial_derivative_phi * dot_phi\n\n dot_rho = ref_rho_dot + self.kp * (ref_rho - rho)\n\n\n\n #print self.name,'(ref_rho - rho)',round(ref_rho - rho,3)\n vel_vec = np.zeros(2)\n vel_vec[0] = dot_rho * np.cos(phi) - rho * dot_phi * np.sin(phi) + x_dot\n vel_vec[1] = dot_rho * np.sin(phi) + rho * dot_phi * np.cos(phi) + y_dot\n\n # feedback linearization\n d = self.feedback_d\n v = np.cos(self.yaw)*vel_vec[0]+np.sin(self.yaw)*vel_vec[1]\n #if v<0.:\n # v=0.\n if np.abs(v)>self.exp_obj.robot_vel:\n v = self.exp_obj.robot_vel*np.sign(v)\n\n\n w = -np.sin(self.yaw)/d*vel_vec[0]+np.cos(self.yaw)/d*vel_vec[1]\n if np.abs(w)>self.exp_obj.robot_rot:\n w = self.exp_obj.robot_rot*np.sign(w)\n if self.cnter==0:\n self.cnter = 30\n\n print self.name, 'v', round(v, 3),'w',round(w,3), 'e_rho',round(self.kp * (ref_rho - rho),3),'e_phi',\\\n round(self.kphi * phi_error,3),'angle(v)',round(np.arctan2(vel_vec[1],vel_vec[0])/np.pi*180)\n self.cnter -= 1\n\n self.logfile.write(\"%f %f %f %f %f\\r\\n\" % (toSecs(rospy.Time.now()),(ref_rho - rho), phi_error,v,w))\n\n # publish\n new_cmd = Twist()\n new_cmd.linear.x = v\n new_cmd.angular.z = w\n if self.IS_SIM:\n new_cmd.linear.x *= 10\n self.Pub_twist.publish(new_cmd)\n #print self.name,'time diff',round(toSecs(rospy.get_rostime()-first),5)", "title": "" }, { "docid": "8875c86f43f1aaa6aee46312932feac6", "score": "0.5072562", "text": "def test_measure_control_qubit(self):\n bk = Backend(name='ibmq', device='qasm_simulator')\n qc = QComp(qubit_num=2, cmem_num=3, backend=bk)\n res = qc.h(0).cx(0,1).measure([0],[0]).x(0, ctrl=0).x(1, ctrl=0).measure([0,1]).run(shots=10)\n self.assertEqual(res['measured_qid'], [0,1])\n self.assertEqual(res['frequency']['00'], 10)", "title": "" }, { "docid": "21cfbb4273b762a575d3251dd48d6792", "score": "0.50629294", "text": "def qKq(self):\n raise NotImplementedError", "title": "" }, { "docid": "60c55bae19bf388cb787f414d21715cc", "score": "0.5058339", "text": "def update_data(self):\n\n # Get the current slider values\n \n func = self.text.value\n CFL = self.CFL.value\n solver = self.solver.value\n time = self.time.value\n CFL = str(float(CFL))\n Period = self.Period.value\n Period = str(float(Period))\n time = str(float(time))\n print \"CFL, time: \", CFL, time\n print solver\n \n if func =='step':\n X = np.array([])\n Y = np.array([])\n cpath = 'solutions/' + func +'/' + solver +'/' + CFL + '/'\n #with open(path + '0')\n tpath = cpath + time + '.txt'\n with open(tpath,'r') as filename:\n for line in filename:\n Xtemp = float(line.split()[0])\n Ytemp = float(line.split()[1])\n X = np.append(X, Xtemp)\n Y = np.append(Y, Ytemp)\n filename.close()\n elif func=='sine':\n X = np.array([])\n Y = np.array([])\n cpath = 'solutions/' + func +'/' + solver +'/' + Period +'/' + CFL + '/'\n #with open(path + '0')\n tpath = cpath + time + '.txt'\n with open(tpath,'r') as filename:\n for line in filename:\n Xtemp = float(line.split()[0])\n Ytemp = float(line.split()[1])\n X = np.append(X, Xtemp)\n Y = np.append(Y, Ytemp)\n filename.close()\n \n\n logging.debug(\n \"PARAMS: CFL: %s time: %s\", self.CFL.value,\n self.time.value\n )\n\n self.source.data = dict(x=X, y=Y)", "title": "" }, { "docid": "56d88eefdffc6069bbcc6a2d06900f2a", "score": "0.50579375", "text": "def __init__(self,model,units,rep='narrowband',nmels=128,waveletype='morl'):\n self.model_type=model\n self.units=units\n self.rep=rep\n self.PATH=os.path.dirname(os.path.abspath(__file__)) \n self.nmels=nmels\n self.waveletype = waveletype\n \n pt_path = self.PATH+\"/pts/\"+self.rep+\"/\"\n\n if not os.path.isdir(pt_path): \n print(\"Inputs are wrong or 'pts' directory is incorrect...\")\n \n# try:\n SCALERS = pd.read_csv(\"scales.csv\")\n if self.rep=='narrowband' or self.rep=='broadband':\n self.min_scaler= float(SCALERS['Min '+self.rep+' Scale']) #MIN value of total energy.\n self.max_scaler= float(SCALERS['Max '+self.rep+' Scale']) #MAX value of total energy.\n else:\n self.min_scaler=-10.429498640058068\n self.max_scaler=10.460396126590783\n \n# except:\n# print(\"Scalers not found..\")\n \n \n if model==\"CAE\":\n if rep=='narrowband' or rep=='broadband':\n self.AE=CAEn(units)\n if torch.cuda.is_available():\n self.AE.load_state_dict(torch.load(pt_path+\"/\"+str(units)+'_CAE.pt')['model'])\n self.AE.cuda()\n else:\n self.AE.load_state_dict(torch.load(pt_path+\"/\"+str(units)+'_CAE.pt', map_location='cpu')['model'])\n elif rep=='wvlt':\n self.AE=wvCAEn(units)\n if torch.cuda.is_available():\n self.AE.load_state_dict(torch.load(pt_path+\"/\"+str(units)+'_CAE.pt')['model'])\n self.AE.cuda()\n else:\n self.AE.load_state_dict(torch.load(pt_path+\"/\"+str(units)+'_CAE.pt', map_location='cpu')['model'])\n elif rep=='mc_fuse':\n self.AE=mcCAEn(units)\n if torch.cuda.is_available():\n self.AE.load_state_dict(torch.load(pt_path+\"/\"+str(units)+'_CAE.pt')['model'])\n self.AE.cuda()\n else:\n self.AE.load_state_dict(torch.load(pt_path+\"/\"+str(units)+'_CAE.pt', map_location='cpu')['model'])\n elif rep=='full_narrowband' or rep=='full_broadband':\n self.AE=fullCAEn(units)\n if torch.cuda.is_available():\n self.AE.load_state_dict(torch.load(pt_path+\"/\"+str(units)+'_CAE.pt')['model'])\n self.AE.cuda()\n else:\n self.AE.load_state_dict(torch.load(pt_path+\"/\"+str(units)+'_CAE.pt', map_location='cpu')['model'])\n elif model==\"RAE\":\n if rep=='narrowband' or rep=='broadband':\n self.AE=RAEn(units)\n if torch.cuda.is_available():\n self.AE.load_state_dict(torch.load(pt_path+\"/\"+str(units)+'_RAE.pt'))\n self.AE.cuda()\n else:\n self.AE.load_state_dict(torch.load(pt_path+\"/\"+str(units)+'_RAE.pt', map_location='cpu'))\n elif rep=='wvlt':\n self.AE=wvRAEn(units)\n if torch.cuda.is_available():\n self.AE.load_state_dict(torch.load(pt_path+\"/\"+str(units)+'_RAE.pt'))\n self.AE.cuda()\n else:\n self.AE.load_state_dict(torch.load(pt_path+\"/\"+str(units)+'_RAE.pt', map_location='cpu'))\n\n else:\n raise ValueError(\"Model \"+model+\" is not valid. Please choose only CAE or RAE\")", "title": "" }, { "docid": "3f38246e9eb8fa5c65f5fe65a7f25873", "score": "0.50578004", "text": "def update_target_q(self):\n with torch.no_grad():\n # Polyak averaging:\n critics_params = chain(\n self._critic_1.parameters(), self._critic_2.parameters()\n )\n targets_params = chain(\n self.critic_1_targ.parameters(), self.critic_2_targ.parameters()\n )\n for q_params, targ_params in zip(critics_params, targets_params):\n targ_params.data.mul_(1 - self.tau)\n targ_params.data.add_((self.tau) * q_params.data)", "title": "" }, { "docid": "3dbf2fc147a591938dc2ad60a70622b9", "score": "0.5057075", "text": "def get_position_arrays(self):\n\n\t\tif self._got_refined_param == False and self.refine_region == True:\n\t\t\tself.get_caustic_param(refine_region=self.refine_region)\n\n\t\tif 'caustic' in self.region:\n\t\t\t\"\"\"Defines the grid to be centered on the caustic,\n\t\t\tdetermined by approximations in get_size_caustics() and\n\t\t\tassign_center_caustics().\n\t\t\t\"\"\"\n\n\t\t\tregion_xmin = self.xcenter_caustic - 0.8*self.width_caustic\n\t\t\tregion_xmax = self.xcenter_caustic + 0.8*self.width_caustic\n\t\t\tregion_ymin = -0.8*self.height_caustic + self.ycenter_caustic\n\t\t\tregion_ymax = 0.8*self.height_caustic + self.ycenter_caustic\n\n\t\telif 'onax_cusp' in self.region:\n\t\t\tregion_xmin = self.xcenter_caustic + 0.55*self.width_caustic\n\t\t\tregion_xmax = self.xcenter_caustic + 0.8*self.width_caustic\n\t\t\tregion_ymin = -0.10*self.height_caustic + self.ycenter_caustic\n\t\t\tregion_ymax = 0.10*self.height_caustic + self.ycenter_caustic\n\n\t\telif 'offax_cusp' in self.region:\n\t\t\tregion_xmin = self.xcenter_caustic - 0.10*self.width_caustic\n\t\t\tregion_xmax = self.xcenter_caustic + 0.10*self.width_caustic\n\t\t\tregion_ymin = 0.55*self.height_caustic + self.ycenter_caustic\n\t\t\tregion_ymax = 0.8*self.height_caustic + self.ycenter_caustic\n\n\t\telif 'both' in self.region:\n\t\t\tregion_xmin = -0.5*self.s\n\t\t\tregion_xmax = 0.5*self.s\n\t\t\tregion_ymin = -0.5*self.s\n\t\t\tregion_ymax = 0.5*self.s\n\n\t\telif 'custom' in self.region:\n\t\t\t(xmin, xmax, ymin, ymax) = (*self.region_lim,)\n\t\t\tgrid_xmin = self.xcenter_caustic + 0.5*xmin*self.width_caustic\n\t\t\tgrid_xmax = self.xcenter_caustic + 0.5*xmax*self.width_caustic\n\t\t\tgrid_ymin = 0.5*ymin*self.height_caustic + self.ycenter_caustic\n\t\t\tgrid_ymax = 0.5*ymax*self.height_caustic + self.ycenter_caustic\n\n\t\t\txcent_grid = (grid_xmax + grid_xmin) / 2.\n\t\t\tycent_grid = (grid_ymax + grid_ymin) / 2.\n\t\t\tself._custom_xshift = xcent_grid - self.xcenter_caustic\n\t\t\tself._custom_yshift = ycent_grid - self.ycenter_caustic\n\t\t\tself.assign_center_caustic()\n\n\t\t\tregion_xmin = self.xcenter_plot - 0.5*(grid_xmax - grid_xmin)\n\t\t\tregion_xmax = self.xcenter_plot + 0.5*(grid_xmax - grid_xmin)\n\t\t\tregion_ymin = self.ycenter_plot - 0.5*(grid_ymax - grid_ymin)\n\t\t\tregion_ymax = self.ycenter_plot + 0.5*(grid_ymax - grid_ymin)\n\n\t\telse:\n\t\t\traise ValueError('Unknown region {:}'.format(self.region))\n\n\t\tx_grid = np.linspace(region_xmin, region_xmax, self.res)\n\t\ty_grid = np.linspace(region_ymin, region_ymax, self.res)\n\t\tself.x_array = np.zeros(self.res**2)\n\t\tself.y_array = np.zeros(self.res**2)\n\n\t\tfor (i, xx) in enumerate(x_grid):\n\t\t\tfor (j, yy) in enumerate(y_grid):\n\t\t\t\tidx = self.res*i + j\n\t\t\t\tself.x_array[idx] = xx\n\t\t\t\tself.y_array[idx] = yy", "title": "" }, { "docid": "9aaab139251bd2833d75d5c975636f3f", "score": "0.5055029", "text": "def update(self, gx, gy, gz, ax, ay, az, mx, my, mz):\n\n recipNorm = 0\n q0q0 = q0q1 = q0q2 = q0q3 = q1q1 = q1q2 = q1q3 = q2q2 = q2q3 = q3q3 = 0\n hx = hy = bx = bz = 0\n halfvx = halfvy = halfvz = halfwx = halfwy = halfwz = 0\n halfex = halfey = halfez = 0\n qa = qb = qc = 0\n\n # Use IMU algorithm if magnetometer measurement invalid\n # (avoids NaN in magnetometer normalisation)\n if (mx == 0.0) and (my == 0.0) and (mz == 0.0):\n self.update_IMU(gx, gy, gz, ax, ay, az)\n return\n\n # Convert gyroscope degrees/sec to radians/sec\n gx *= 0.0174533\n gy *= 0.0174533\n gz *= 0.0174533\n\n # Compute feedback only if accelerometer measurement valid\n # (avoids NaN in accelerometer normalisation)\n if not ((ax == 0.0) and (ay == 0.0) and (az == 0.0)):\n # Normalise accelerometer measurement\n recipNorm = self._inv_sqrt(ax * ax + ay * ay + az * az)\n ax *= recipNorm\n ay *= recipNorm\n az *= recipNorm\n\n # Normalise magnetometer measurement\n recipNorm = self._inv_sqrt(mx * mx + my * my + mz * mz)\n mx *= recipNorm\n my *= recipNorm\n mz *= recipNorm\n\n # Auxiliary variables to avoid repeated arithmetic\n q0q0 = self.q0 * self.q0\n q0q1 = self.q0 * self.q1\n q0q2 = self.q0 * self.q2\n q0q3 = self.q0 * self.q3\n q1q1 = self.q1 * self.q1\n q1q2 = self.q1 * self.q2\n q1q3 = self.q1 * self.q3\n q2q2 = self.q2 * self.q2\n q2q3 = self.q2 * self.q3\n q3q3 = self.q3 * self.q3\n\n # Reference direction of Earth's magnetic field\n hx = 2.0 * (\n mx * (0.5 - q2q2 - q3q3) + my * (q1q2 - q0q3) + mz * (q1q3 + q0q2)\n )\n hy = 2.0 * (\n mx * (q1q2 + q0q3) + my * (0.5 - q1q1 - q3q3) + mz * (q2q3 - q0q1)\n )\n bx = math.sqrt(hx * hx + hy * hy)\n bz = 2.0 * (\n mx * (q1q3 - q0q2) + my * (q2q3 + q0q1) + mz * (0.5 - q1q1 - q2q2)\n )\n\n # Estimated direction of gravity and magnetic field\n halfvx = q1q3 - q0q2\n halfvy = q0q1 + q2q3\n halfvz = q0q0 - 0.5 + q3q3\n halfwx = bx * (0.5 - q2q2 - q3q3) + bz * (q1q3 - q0q2)\n halfwy = bx * (q1q2 - q0q3) + bz * (q0q1 + q2q3)\n halfwz = bx * (q0q2 + q1q3) + bz * (0.5 - q1q1 - q2q2)\n\n # Error is sum of cross product between estimated direction\n # and measured direction of field vectors\n halfex = (ay * halfvz - az * halfvy) + (my * halfwz - mz * halfwy)\n halfey = (az * halfvx - ax * halfvz) + (mz * halfwx - mx * halfwz)\n halfez = (ax * halfvy - ay * halfvx) + (mx * halfwy - my * halfwx)\n\n # Compute and apply integral feedback if enabled\n if self.twoKi > 0.0:\n # integral error scaled by Ki\n self.integralFBx += self.twoKi * halfex * self.invSampleFreq\n self.integralFBy += self.twoKi * halfey * self.invSampleFreq\n self.integralFBz += self.twoKi * halfez * self.invSampleFreq\n gx += self.integralFBx # apply integral feedback\n gy += self.integralFBy\n gz += self.integralFBz\n else:\n self.integralFBx = 0.0 # prevent integral windup\n self.integralFBy = 0.0\n self.integralFBz = 0.0\n\n # Apply proportional feedback\n gx += self.twoKp * halfex\n gy += self.twoKp * halfey\n gz += self.twoKp * halfez\n\n # Integrate rate of change of quaternion\n gx *= 0.5 * self.invSampleFreq # pre-multiply common factors\n gy *= 0.5 * self.invSampleFreq\n gz *= 0.5 * self.invSampleFreq\n qa = self.q0\n qb = self.q1\n qc = self.q2\n self.q0 += -qb * gx - qc * gy - self.q3 * gz\n self.q1 += qa * gx + qc * gz - self.q3 * gy\n self.q2 += qa * gy - qb * gz + self.q3 * gx\n self.q3 += qa * gz + qb * gy - qc * gx\n\n # Normalise quaternion\n recipNorm = self._inv_sqrt(\n self.q0 * self.q0\n + self.q1 * self.q1\n + self.q2 * self.q2\n + self.q3 * self.q3\n )\n self.q0 *= recipNorm\n self.q1 *= recipNorm\n self.q2 *= recipNorm\n self.q3 *= recipNorm\n self._anglesComputed = False", "title": "" }, { "docid": "b03fef41b4ddddbb9c9c7ae94d01cb4c", "score": "0.5053588", "text": "def cavi(self):\n self.pi = self.update_pi()\n assert self.pi is not None, \"Update for pi not implemented\"\n self.s2 = self.update_s2()\n assert self.s2 is not None, \"Update for s2 not implemented\"\n self.m = self.update_m()\n assert self.m is not None, \"Update for m not implemented\"", "title": "" }, { "docid": "0dd617bbfcb3d0b61b94c13d253461a3", "score": "0.5052499", "text": "def __update_c(self):\n if self.jindex is None:\n jindex = 0\n else:\n jindex = self.jindex\n\n if self.qlim is None:\n if self.axis[0] == \"R\":\n qlim = array([-pi, pi])\n else:\n qlim = array([0, 1])\n else:\n qlim = self.qlim\n\n ET_update(\n self.fknm,\n self._isstaticsym,\n self.isjoint,\n self.isflip,\n jindex,\n self.__axis_to_number(self.axis),\n self._T,\n qlim,\n )", "title": "" }, { "docid": "bf79b3ce0e7c23591af02fd1a5e496ca", "score": "0.5052254", "text": "def qtp(self):\n psi_bt = -(self.qc_1[2] + self.qc_2[2]) / (self.ll[:, np.newaxis] ** 2 + self.kk[np.newaxis, :] ** 2) / 2. # (psi_1 + psi_2)/2\n psi_bc = -(self.qc_1[2] - self.qc_2[2]) / (self.ll[:, np.newaxis] ** 2 + self.kk[np.newaxis, :] ** 2 + 1. ) / 2. # (psi_1 - psi_2)/2\n psi_bt[0, 0] = 0.\n self.psic_1[1] = psi_bt + psi_bc\n self.psic_2[1] = psi_bt - psi_bc\n \n psi_bt = -(self.qc_1[1] + self.qc_2[1]) / (self.ll[:, np.newaxis] ** 2 + self.kk[np.newaxis, :] ** 2) / 2. # (psi_1 + psi_2)/2\n psi_bc = -(self.qc_1[1] - self.qc_2[1]) / (self.ll[:, np.newaxis] ** 2 + self.kk[np.newaxis, :] ** 2 + 1. ) / 2. # (psi_1 - psi_2)/2\n psi_bt[0, 0] = 0.\n self.psic_1[0] = psi_bt + psi_bc\n self.psic_2[0] = psi_bt - psi_bc", "title": "" }, { "docid": "616ea9567644f87f97bee733a5db4acf", "score": "0.50446606", "text": "def set_values(self):\n value = format_number(self.qstar_container.qstar)\n self.invariant_tcl.SetValue(value)\n value = format_number(self.qstar_container.qstar_err)\n self.invariant_err_tcl.SetValue(value)\n value = format_number(self.qstar_container.qstar_low)\n self.invariant_low_tcl.SetValue(value)\n value = format_number(self.qstar_container.qstar_low_err)\n self.invariant_low_err_tcl.SetValue(value)\n value = format_number(self.qstar_container.qstar_high)\n self.invariant_high_tcl.SetValue(value)\n value = format_number(self.qstar_container.qstar_high_err)\n self.invariant_high_err_tcl.SetValue(value)", "title": "" }, { "docid": "6c8a302844aa4ff4529d69df44c5e563", "score": "0.50427663", "text": "def __init__(self, func,bounds,Noise,Noise_level, acq_name,device='cuda',verbose=1):\n self.X = None # The sampled point in original domain\n self.Y = None # original output\n self.X_S=None # scaled output (The input is scaled [0,1] in all D) \n self.Y_S=None # scaled inpout ( output is scaled as (Y - mu) / sigma )\n self.obj=None\n self.turbo=None\n self.bounds=bounds # original bounds\n self.dim = len(bounds)\n self.bounds_s=np.array([np.zeros(self.dim), np.ones(self.dim)]).T # scaled bounds\n self.func = func\n self.acq_name = acq_name\n scaler = MinMaxScaler(feature_range=(0, 1)) # Tranform from orignal to scaled vales\n scaler.fit(self.bounds.T)\n self.Xscaler=scaler\n self.verbose=verbose\n # self.gp=GaussianProcess(self.bounds_s,verbose=verbose) # GP over observed values\n self.time_opt=0\n self.count=0 # keeps a count of no of times a new value is sampled\n self.var=1\n self.ls=1\n self.Noise=Noise\n self.Noise_level=Noise_level\n self.Noise_S=Noise_level\n self.improv_counter=0\n self.max=0", "title": "" }, { "docid": "90fcdf731edb08a617d2f63d1995dfbe", "score": "0.50357944", "text": "def determine_cube_parameters(self):\n # initialize\n wave_roi = None\n weight_power = None\n\n number_bands = len(self.list_par1)\n spaxelsize = np.zeros(number_bands)\n spectralsize = np.zeros(number_bands)\n rois = np.zeros(number_bands)\n roiw = np.zeros(number_bands)\n power = np.zeros(number_bands)\n softrad = np.zeros(number_bands)\n minwave = np.zeros(number_bands)\n maxwave = np.zeros(number_bands)\n\n for i in range(number_bands):\n if self.instrument == 'MIRI':\n par1 = self.list_par1[i]\n par2 = self.list_par2[i]\n elif self.instrument == 'NIRSPEC':\n par1 = self.list_par1[i]\n par2 = self.list_par2[i]\n\n roiw[i] = self.instrument_info.GetWaveRoi(par1, par2)\n rois[i] = self.instrument_info.GetSpatialRoi(par1, par2)\n\n a_scale, b_scale, w_scale = self.instrument_info.GetScale(par1,\n par2)\n spaxelsize[i] = a_scale\n spectralsize[i] = w_scale\n\n power[i] = self.instrument_info.GetMSMPower(par1, par2)\n softrad[i] = self.instrument_info.GetSoftRad(par1, par2)\n minwave[i] = self.instrument_info.GetWaveMin(par1, par2)\n maxwave[i] = self.instrument_info.GetWaveMax(par1, par2)\n# Check the spatial size. If it is the same for the array set up the parameters\n all_same = np.all(spaxelsize == spaxelsize[0])\n if all_same:\n self.spatial_size = spaxelsize[0]\n spatial_roi = rois[0]\n else:\n index_min = np.argmin(spaxelsize)\n self.spatial_size = spaxelsize[index_min]\n spatial_roi = rois[index_min]\n# find min and max wavelength\n min_wave = np.amin(minwave)\n max_wave = np.amax(maxwave)\n\n if self.wavemin is None:\n self.wavemin = min_wave\n else:\n self.wavemin = np.float64(self.wavemin)\n\n if self.wavemax is None:\n self.wavemax = max_wave\n else:\n self.wavemax = np.float64(self.wavemax)\n\n# now check spectral step\n all_same_spectral = np.all(spectralsize == spectralsize[0])\n\n# check if scalew has been set - if yes then linear scale\n if self.scalew != 0:\n self.spectral_size = self.scalew\n self.linear_wavelength = True\n wave_roi = np.amin(roiw)\n weight_power = np.amin(power)\n self.soft_rad = np.amin(softrad)\n elif all_same_spectral:\n self.spectral_size = spectralsize[0]\n wave_roi = roiw[0]\n weight_power = power[0]\n self.soft_rad = softrad[0]\n else:\n self.linear_wavelength = False\n if self.instrument == 'MIRI':\n table = self.instrument_info.Get_multichannel_table()\n table_wavelength, table_sroi, table_wroi, table_power, table_softrad = table\n\n # getting NIRSPEC Table Values\n elif self.instrument == 'NIRSPEC':\n # determine if have Prism, Medium or High resolution\n med = ['g140m', 'g235m', 'g395m']\n high = ['g140h', 'g235h', 'g395h']\n prism = ['prism']\n\n for i in range(number_bands):\n par1 = self.list_par1[i]\n if par1 in prism:\n table = self.instrument_info.Get_prism_table()\n if par1 in med:\n table = self.instrument_info.Get_med_table()\n if par1 in high:\n table = self.instrument_info.Get_high_table()\n table_wavelength, table_sroi, table_wroi, table_power, table_softrad = table\n # based on Min and Max wavelength - pull out the tables values that fall in this range\n # find the closest table entries to the self.wavemin and self.wavemax limits\n imin = (np.abs(table_wavelength - self.wavemin)).argmin()\n imax = (np.abs(table_wavelength - self.wavemax)).argmin()\n if imin > 1 and table_wavelength[imin] > self.wavemin:\n imin = imin - 1\n if (imax < len(table_wavelength) and\n self.wavemax > table_wavelength[imax]): imax = imax + 1\n# print('index of wavelength values',imin,imax)\n\n self.roiw_table = table_wroi[imin:imax]\n self.rois_table = table_sroi[imin:imax]\n if self.num_files < 4:\n self.rois_table = [i*1.5 for i in self.rois_table]\n\n self.softrad_table = table_softrad[imin:imax]\n self.weight_power_table = table_power[imin:imax]\n self.wavelength_table = table_wavelength[imin:imax]\n\n # check if the user has set the cube parameters to use\n if self.rois == 0:\n self.rois = spatial_roi\n if self.output_type == 'single' or self.num_files < 4:\n self.rois = self.rois * 1.5\n log.info('Increasing spatial region of interest ' +\n 'default value set for 4 dithers %f', self.rois)\n if self.scale1 != 0:\n self.spatial_size = self.scale1\n\n # set wave_roi, weight_power, soft_rad to same values if they are in list\n if self.roiw == 0:\n self.roiw = wave_roi\n if self.weight_power == 0:\n self.weight_power = weight_power", "title": "" }, { "docid": "52e684c1d3a6c29b65ee15a60b0e64e9", "score": "0.5034553", "text": "def optimize(self):\n n = self.no_of_robots\n msg = target_positions_5quad()\n m = Model(\"qcp\") \n x = m.addVars(n, lb = -15, ub = 15, vtype=GRB.CONTINUOUS, name=\"x\")\n y = m.addVars(n, lb = -15, ub = 15, vtype=GRB.CONTINUOUS, name=\"y\")\n z = m.addVars(n, lb = 0, ub = 15, vtype=GRB.CONTINUOUS, name=\"z\")\n #p = m.addVars(n, vtype=GRB.CONTINUOUS, name=\"p\")\n\n Q = np.eye(n,n)\n m.update()\n w1 = 0.8; w2 = 0.1; w3 = 0.1; w4 = 0.1; w5 = 0.1; w = [w1, w2, w3, w4, w5]\n obj1 = quicksum(w[i]*(x[i]-self.xt[0]) * quicksum(Q[i][j] * (x[j]-self.xt[0]) for j in range(n)) for i in range(n))\n obj2 = quicksum(w[i]*(y[i]-self.xt[1]) * quicksum(Q[i][j] * (y[j]-self.xt[1]) for j in range(n)) for i in range(n))\n obj3 = quicksum(w[i]*(z[i]-self.xt[2]) * quicksum(Q[i][j] * (z[j]-self.xt[2]) for j in range(n)) for i in range(n))\n obj = obj1 + obj2 + obj3\n\n # since greater than and equal to constraints are not convex, we need to approximate them\n # see the unsolved problem 8.27 from Convex Optimization by S Boyd\n #choose a random initial position for the 3 robots\n if self.counter == 0: \n xr_initial = [self.xt + np.array([np.random.uniform(0.5,1), np.random.uniform(-1,1), \\\n np.random.uniform(0,1)]) for i in range(n)]\n #xr_initial.append(xr_initial[0]) # append first element to find a's easily\n else: \n xr_initial = [self.x1, self.x2, self.x3, self.x4, self.x5]\n #xr_initial.append(xr_initial[0])\n \n b = [(xr_initial[i]-self.xt)/np.linalg.norm(xr_initial[i]-self.xt) for i in range(n)]\n m.addConstr(b[0][0]*(x[0]-self.xt[0]) + b[0][1]*(y[0]-self.xt[1]) + b[0][2]*(z[0]-self.xt[2]) >= self.drt_min)\n m.addConstr(b[1][0]*(x[1]-self.xt[0]) + b[1][1]*(y[1]-self.xt[1]) + b[1][2]*(z[1]-self.xt[2]) >= self.drt_min)\n m.addConstr(b[2][0]*(x[2]-self.xt[0]) + b[2][1]*(y[2]-self.xt[1]) + b[2][2]*(z[2]-self.xt[2]) >= self.drt_min)\n m.addConstr(b[3][0]*(x[3]-self.xt[0]) + b[3][1]*(y[3]-self.xt[1]) + b[3][2]*(z[3]-self.xt[2]) >= self.drt_min) \n m.addConstr(b[4][0]*(x[4]-self.xt[0]) + b[4][1]*(y[4]-self.xt[1]) + b[4][2]*(z[4]-self.xt[2]) >= self.drt_min)\n\n\n if self.counter == 0.0: \n comb = combinations(range(n), 2)\n for k in comb:\n #print k, k[0], k[1]\n s1 = self.xt + np.array([np.random.uniform(-1,1), np.random.uniform(-1,1), np.random.uniform(0,1)])\n s2 = self.xt + np.array([np.random.uniform(-1,1), np.random.uniform(-1,1), np.random.uniform(0,1)])\n a = (s1-s2)/np.linalg.norm(s1-s2)\n\n m.addConstr(a[0]*(x[k[0]]-x[k[1]]) + a[1]*(y[k[0]]-y[k[1]]) + a[2]*(z[k[0]]-z[k[1]]) >= self.drr)\n else: \n comb = combinations(range(n), 2)\n for k in comb: \n \n s1 = self.xtp[k[0]]; s2 = self.xtp[k[1]]\n a = (s1-s2)/np.linalg.norm(s1-s2)\n m.addConstr(a[0]*(x[k[0]]-x[k[1]]) + a[1]*(y[k[0]]-y[k[1]]) + a[2]*(z[k[0]]-z[k[1]]) >= self.drr) \n \n \n \n \n \n #a = [(xr_initial[i]-xr_initial[i+1])/np.linalg.norm(xr_initial[i]-xr_initial[i+1]) for i in range(len(xr_initial)-1)]\n #b = [(xr_initial[i]-self.xt)/np.linalg.norm(xr_initial[i]-self.xt) for i in range(len(xr_initial)-1)]\n \n #m.addConstr(a[0][0]*(x[0]-x[1]) + a[0][1]*(y[0]-y[1]) + a[0][2]*(z[0]-z[1])>= self.drr)\n #m.addConstr(a[1][0]*(x[1]-x[2]) + a[1][1]*(y[1]-y[2]) + a[1][2]*(z[1]-z[2]) >= self.drr)\n #m.addConstr(a[2][0]*(x[2]-x[0]) + a[2][1]*(y[2]-y[0]) + a[2][2]*(z[2]-z[0])>= self.drr)\n \n \n #m.addConstr(a[3][0]*(x[1]-x[2]) + a[3][1]*(y[1]-y[2]) + a[3][2]*(z[1]-z[2]) >= self.drr)\n #m.addConstr(a[4][0]*(x[2]-x[0]) + a[4][1]*(y[2]-y[0]) + a[4][2]*(z[2]-z[0])>= self.drr)\n \n #m.addConstr(b[0][0]*(x[0]-self.xt[0]) + b[0][1]*(y[0]-self.xt[1]) + b[0][2]*(z[0]-self.xt[2])>= self.drt_min)\n #m.addConstr(b[1][0]*(x[1]-self.xt[0]) + b[1][1]*(y[1]-self.xt[1]) + b[1][2]*(z[1]-self.xt[2])>= self.drt_min)\n #m.addConstr(b[2][0]*(x[2]-self.xt[0]) + b[2][1]*(y[2]-self.xt[1]) + b[2][2]*(z[2]-self.xt[2])>= self.drt_min)\n\n m.addConstr(z[0] >= self.xt[2] + self.heigt_difference)\n m.addConstr(z[1] >= self.xt[2] + self.heigt_difference)\n m.addConstr(z[2] >= self.xt[2] + self.heigt_difference)\n m.addConstr(z[3] >= self.xt[2] + self.heigt_difference)\n m.addConstr(z[4] >= self.xt[2] + self.heigt_difference)\n \n #m.addConstr((z[0]-self.xt[2])*(z[0]-self.xt[2]) <= p[0])\n #m.addConstr((x[0]-self.xt[0])*(x[0]-self.xt[0]) + (y[0]-self.xt[1])*(y[0]-self.xt[1]) <= p[0])\n \n m.setObjective(obj, GRB.MINIMIZE)\n #m.write('model.lp')\n m.setParam('OutputFlag', 0) \n m.setParam('PSDtol', 1e-3) \n\n m.optimize()\n \n #runtime = m.Runtime\n\n #if m.status == 2 or m.status == 13: \n # self.x1 = np.array([x[0].X, y[0].X, z[0].X])\n # self.x2 = np.array([x[1].X, y[1].X, z[1].X])\n # self.x3 = np.array([x[2].X, y[2].X, z[2].X])\n # self.xtp = np.array([self.x1, self.x2, self.x3])\n #else: \n # self.x1 = self.x1 + np.array([0.01, 0.01, 0.01])\n # self.x2 = self.x2 + np.array([0.01, 0.01, 0.01])\n # self.x3 = self.x3 + np.array([0.01, 0.01, 0.01])\n # self.xtp = np.array([self.x1, self.x2, self.x3]) \n \n \n \n if m.status == 2 or m.status == 13: \n #opt_subopt += 1\n self.x1 = np.array([x[0].X, y[0].X, z[0].X])\n self.x2 = np.array([x[1].X, y[1].X, z[1].X])\n self.x3 = np.array([x[2].X, y[2].X, z[2].X])\n self.x4 = np.array([x[3].X, y[3].X, z[3].X])\n self.x5 = np.array([x[4].X, y[4].X, z[4].X])\n self.xtp = np.array([self.x1, self.x2, self.x3, self.x4, self.x5])\n else: \n #nosol += 1\n self.x1 = self.x1 + np.array([0.01, 0.01, 0.01])\n self.x2 = self.x2 + np.array([0.01, 0.01, 0.01])\n self.x3 = self.x3 + np.array([0.01, 0.01, 0.01])\n self.x4 = self.x4 + np.array([0.01, 0.01, 0.01])\n self.x5 = self.x5 + np.array([0.01, 0.01, 0.01])\n self.xtp = np.array([self.x1, self.x2, self.x3, self.x4, self.x5])\n\n row_index, col_index = self.hungarian_algorithm() \n \n msg.robot1.x = self.xtp[row_index[0]][0]; msg.robot1.y = self.xtp[row_index[0]][1]; msg.robot1.z = self.xtp[row_index[0]][2]\n msg.robot2.x = self.xtp[row_index[1]][0]; msg.robot2.y = self.xtp[row_index[1]][1]; msg.robot2.z = self.xtp[row_index[1]][2]\n msg.robot3.x = self.xtp[row_index[2]][0]; msg.robot3.y = self.xtp[row_index[2]][1]; msg.robot3.z = self.xtp[row_index[2]][2]\n msg.robot4.x = self.xtp[row_index[3]][0]; msg.robot4.y = self.xtp[row_index[3]][1]; msg.robot4.z = self.xtp[row_index[3]][2]\n msg.robot5.x = self.xtp[row_index[4]][0]; msg.robot5.y = self.xtp[row_index[4]][1]; msg.robot5.z = self.xtp[row_index[4]][2]\n self.pub.publish(msg)\n self.counter += 1", "title": "" }, { "docid": "715c59cc55521cf407bc99dccab86cd7", "score": "0.5029687", "text": "def choose_squad_setup(self):\r\n raise NotImplementedError()", "title": "" }, { "docid": "88d68ff4eaa91bbf09486e59c2986197", "score": "0.50275135", "text": "def _contact_geometry_GCS(self, q):\n # print \"_contact_geometry_GCS()@\",__name__\n # num = self._parent._parent.solver.DAE_fun.number_of_evaluations\n\n # evaluate coordinates in GCS of mid frame section and pin center\n pin_CP_GCS, slot_CP_GCS, self.slot_frame_nodes_GCS, slot_frame_normals_GCS, slot_frame_tangents_GCS = self._contact_geometry_CP_GCS(q)\n\n # vectors in GCS\n self.r_iP = slot_CP_GCS[0]\n self.r_iR = slot_CP_GCS[1]\n self.r_jP = pin_CP_GCS\n\n # center points in GCS\n # self.r_CP_GCS_list = [slot_CP_GCS[0], slot_CP_GCS[1], pin_CP_GCS]\n\n # evaluate position of pin in slot:\n # section jP\n # section jPjR\n # section jR\n # distance object of pin slot clearance joint has 4 distance values as array\n if self._contact_point_obj is not None:\n self._contact_point_obj.update_contact_point(self.r_iP, self.r_iR, self.r_jP)\n pin_in_section = self._contact_point_obj.pin_in_section\n self._contact_point_obj.contact_points_GCS(frame_nodes_GCS=self.slot_frame_nodes_GCS)\n\n else:\n self._distance_obj = DistancePSCJ(self.r_iP, self.r_iR, self.r_jP, parent=self)\n pin_in_section = self._distance_obj.pin_in_section\n\n\n if pin_in_section != self.pin_in_section:\n self.section_changed = True\n else:\n self.section_changed = False\n\n\n self.pin_in_section = pin_in_section\n\n\n if self.pin_in_section == \"iPiR\":\n self.contact_model = self.contact_models[1]\n\n # self.slot_frame_nodes_GCS\n\n if self.pin_in_section in [\"iP\", \"iR\"]:\n self.contact_model = self.contact_models[0]\n\n if self._distance_obj is not None and self._contact_point_obj is None:\n # print \"NO CONTACT OBJ\"\n distance, delta, n_GCS, t_GCS = self._distance_obj.contact_geometry_GCS()\n\n else:\n # print \"CONTACT OBJ\"\n distance, delta, n_GCS, t_GCS = self._contact_point_obj.contact_geometry_GCS()\n\n # print \"self.pin_in_section =\", self.pin_in_section, \"delta =\", delta,\n\n # print \"delta =\", delta\n # print \"n_GCS =\", np.rad2deg(np.arctan2(n_GCS[1], n_GCS[0]))\n return distance, delta, n_GCS, t_GCS", "title": "" }, { "docid": "861bcce4d0db9256858f15d95e05ca63", "score": "0.5021511", "text": "def update_function_by_mouse(self, event: tk.Event) -> None:\n bounds = list(self.ax[0].get_xlim())\n bounds.extend(self.ax[0].get_ylim())\n pixel_bounds = [120, 430, 494, 530, 865, 632]\n bounds_ft = list(self.ax[1].get_xlim())\n bounds_ft.extend(self.ax[1].get_ylim())\n pixel_ft_bounds = [120, 78, 493, 180, 863, 279] \n height = self.canvas.get_tk_widget().winfo_height()\n if in_bounds(event, pixel_bounds, height):\n x, y = locate_mouse(event, bounds, height, pixel_bounds)\n change_array(self.x, self.y, x, y)\n self._update_appearance()\n elif in_bounds(event, pixel_ft_bounds, height):\n x, y = locate_mouse(event, bounds_ft, height, pixel_ft_bounds)\n # y = -y if (int(x/(self.freq[1] - self.freq[0]))) % 2 else y\n change_array(self.freq, self.fourier_amps, x, y)\n change_array(self.freq, self.fourier_amps, -x, y)\n # change_array(self.freq, self.fourier_amps, x, y*np.exp(x*1.0j))\n # change_array(self.freq, self.fourier_amps, -x, y*np.exp(-x*1.0j))\n self.line2.set_ydata(np.real(self.fourier_amps))\n self.line3.set_ydata(np.imag(self.fourier_amps))\n self.line4.set_ydata(np.abs(self.fourier_amps))\n self.y = np.fft.ifft(\n np.fft.ifftshift(self.fourier_amps))*len(self.x)\n self.line.set_ydata(np.real(self.y))", "title": "" }, { "docid": "4d875b7bb0e30aab5828c3a54f6bbd93", "score": "0.50158244", "text": "def setup(self):\n # self.logger.setLevel(\"DEBUG\")\n self.train_flag = False\n self.obs_object = ObservationObject(0, ['d_closest_coin_dir',\n 'd_closest_safe_field_dir',\n 'me_has_bomb',\n 'dead_end_detect',\n 'd4_is_safe_to_move_a_l',\n 'd4_is_safe_to_move_b_r',\n 'd4_is_safe_to_move_c_u',\n 'd4_is_safe_to_move_d_d',\n 'd_best_bomb_dropping_dir',\n 'd_closest_enemy_dir'\n ], None)\n\n self.clf = None\n\n with open(\"data/qtables/\" + self.obs_object.get_file_name_string() + \"/3.dt.p\", \"rb\") as f:\n self.clf = pickle.load(f) # FIXME load regression model\n self.CLASSIFIER = True # Indicates whether we are using a classifier or a regressor\n\n self.FORCE_REGRESSION = True # FIXME\n\n self.logger.debug(\"Called setup\")\n # Used for plotting\n self.total_steps_over_episodes = 0\n self.total_deaths_over_episodes = 0\n self.number_of_episode = 0\n\n observation_size = self.obs_object.get_observation_size() - 1 #cut out field radius\n # Zx6 array with actions ['UP', 'DOWN', 'LEFT', 'RIGHT', 'BOMB', 'WAIT']\n filename = self.obs_object.get_file_name_string()\n\n try:\n self.logger.info(\"Loading Q tables and quantities for: \" + filename)\n self.q_table = np.load(os.path.join('data', 'qtables', filename, 'q_table-' + filename + '.npy'))\n self.quantities = np.load(os.path.join('data', 'qtables', filename, 'quantity-' + filename + '.npy'))\n self.logger.info('LOADED Q')\n if self.q_table.shape[1] != 6:\n raise Exception('q_table size does not fit')\n except Exception as e:\n self.q_table = np.empty([0, 6])\n self.logger.info(f'OVERWRITTEN: {e}')\n\n # Zx10 array with 3x3 observation around agent plus coin_flag\n try:\n self.observation_db = np.load(os.path.join('data', 'qtables', filename, 'observation-' + filename + '.npy'))\n self.logger.info('LOADED Obs')\n except:\n self.observation_db = np.empty([0,observation_size])\n if self.observation_db.shape[1] != observation_size:\n raise Exception('observation_db size does not fit')\n\n self.repeated_deadlock = 1\n self.last_visited = np.array([[-1, -1], [-1, -1], [-1, -1], [-1, -1]])", "title": "" }, { "docid": "9d1fa8c2d15c0a42cc4c8b4a28d9473e", "score": "0.50113064", "text": "def test_multi_measurements(self, max_workers):\n x, y = np.array(0.732), np.array(0.488)\n qs = qml.tape.QuantumScript(\n [qml.RX(x, wires=0), qml.CNOT(wires=[0, 1]), qml.RY(y, wires=1)],\n [qml.expval(qml.Hadamard(0)), qml.probs(wires=range(2)), qml.sample(wires=range(2))],\n shots=10000,\n )\n\n dev = DefaultQubit2(max_workers=max_workers)\n result = dev.execute(qs)\n\n assert isinstance(result, tuple)\n assert len(result) == 3\n\n assert all(isinstance(res, (float, np.ndarray)) for res in result)\n\n assert result[0].shape == ()\n assert np.allclose(result[0], np.cos(x) / np.sqrt(2), atol=0.1)\n\n assert result[1].shape == (4,)\n assert np.allclose(\n result[1],\n [\n np.cos(x / 2) ** 2 * np.cos(y / 2) ** 2,\n np.cos(x / 2) ** 2 * np.sin(y / 2) ** 2,\n np.sin(x / 2) ** 2 * np.sin(y / 2) ** 2,\n np.sin(x / 2) ** 2 * np.cos(y / 2) ** 2,\n ],\n atol=0.1,\n )\n\n assert result[2].shape == (10000, 2)", "title": "" }, { "docid": "2790c8895458d355e29ce4abc10e5e75", "score": "0.50088745", "text": "def __plot(self, pId: int, fscale: int):\n \n aio = adc(0x49, 0x3e) # instance of AIO_32_0RA_IRC from AIO.py\n # Why this addresses?\n \n totalStep = 0\n step = 0\n \n while not (self.__abort):\n time.sleep(TIMESLEEP)\n \n # READ DATA\n voltage = aio.analog_read_volt(pId, aio.DataRate.DR_860SPS, pga=fscale)\n\n deltaSeconds = (datetime.datetime.now() - self.__startTime).total_seconds()\n if self.__ttype == ThreadType.PRESSURE1:\n m = 10**self.__IGrange\n print(m)\n else:\n m = 1\n value = self.__ttype.getCalcValue(voltage,IGrange=m)\n\n # READ DATA\n # I do not know why this is needed\n # What happens if these two lines are removed?\n # Just reading from two channels, right? Some communication problem?\n aio.analog_read_volt(CHP1, aio.DataRate.DR_860SPS, pga=2)\n aio.analog_read_volt(CHP2, aio.DataRate.DR_860SPS, pga=2)\n\n self.__rawData[step] = [deltaSeconds, voltage, self.__presetTemp]\n\n if step%(STEP-1) == 0 and step != 0:\n # average 10 points of data\n aveValue = np.mean(self.__rawData[:, 1], dtype=float)\n # convert vlots to actual value\n aveValue = self.__ttype.getCalcValue(aveValue)\n \n self.__calcData = self.__ttype.getCalcArray(self.__rawData)\n self.sigStep.emit(self.__rawData, self.__calcData, aveValue, self.__ttype, self.__startTime)\n self.__rawData = np.zeros(shape=(STEP, 3))\n self.__calcData = np.zeros(shape=(STEP, 3))\n step = 0\n else:\n step += 1\n totalStep += 1 \n self.__app.processEvents()\n else:\n if self.__rawData[step][0] == 0.0:\n step -= 1\n if step > -1:\n self.__calcData = self.__ttype.getCalcArray(self.__rawData)\n aveValue = np.mean(self.__rawData[:step+1][1], dtype=float)\n aveValue = self.__ttype.getCalcValue(aveValue)\n self.sigStep.emit(self.__rawData[:step+1, :], self.__calcData, aveValue, self.__ttype, self.__startTime)\n self.sigMsg.emit(\n \"Worker #{} aborting work at step {}\".format(self.__id, totalStep)\n )\n\n self.sigDone.emit(self.__id, self.__ttype)\n return", "title": "" }, { "docid": "1bb4c3bd98a8662dccc6845126001c0e", "score": "0.50075895", "text": "def __calibrate_for_minimum(self, dict_pointer,band,index,step, timeout,plot,min_points, args):\n min_points = int(min_points)\n \n def measure(x):\n if dict_pointer == 'Offset chI':\n self.calibration.offI = x\n elif dict_pointer == 'Offset chQ':\n self.calibration.offQ = x\n elif dict_pointer == 'Amplitude ratio':\n self.calibration.ratio = x\n elif dict_pointer == 'Phase correction chQ':\n self.calibration.phase = x\n else:\n print('Unexpected error')\n raise ValueError\n \n self.apply_correction()\n return x,self.measure_SB(False,False, band, *args)[index]\n \n #------------------------------------------------------\n def detect_slope(y0,y1,y2):\n slope1 = y1 - y0\n slope2 = y2 - y1\n \n \n if slope1 > 0 and slope2 > 0:\n status = 'up'\n elif slope1 < 0 and slope2 < 0:\n status = 'down'\n elif slope1 > 0 and slope2 < 0:\n status = 'max'\n elif slope1 < 0 and slope2 > 0:\n status = 'min'\n else:\n raise self.SLOPEEXC('cal ratio values','slope1 and 2 are zero')\n \n \n return status\n \n def refine_scan(x,y,min_points):\n \n if min_points %2 == 0:\n min_points+=1\n \n if len(x)<min_points:\n sweep = self.np.linspace(self.np.min(x),self.np.max(x),min_points)\n else:\n x,y = self.np.array(x),self.np.array(y)\n indexes = self.np.argsort(x)\n return x[indexes],y[indexes]\n \n for s in sweep:\n try:\n x.index(s)\n except ValueError:\n x.append(s)\n y.append(measure(x[-1])[1])\n \n x,y = self.np.array(x),self.np.array(y)\n indexes = self.np.argsort(x)\n return x[indexes],y[indexes]\n \n #------------------------------------------------------\n \n start = self.time.time()\n x,y = [],[]\n \n starting_point = self.calibration.calibration_dictionary[dict_pointer] #at the begin the central point is x[1],y[1]\n \n x.append(starting_point-step)\n y.append(measure(x[0])[1])\n x.append(x[-1]+step)\n y.append(measure(x[1])[1])\n x.append(x[-1]+step)\n y.append(measure(x[2])[1])\n \n if plot:\n progressive_plot_2d(x,y,'o',plt_title=dict_pointer)\n \n slope = detect_slope(*y)\n \n try:\n while(1):\n \n if slope == 'up':\n starting_point -=step\n tmp=measure(starting_point-step) #adding 1 point on the left\n x.insert(0,tmp[0])\n y.insert(0,tmp[1])\n \n slope = detect_slope(*y[:3])\n if plot:\n progressive_plot_2d(x,y,'o',plt_title=dict_pointer)\n continue\n if slope == 'down':\n starting_point +=step\n tmp=measure(starting_point+step) #adding 1 point on the right\n x.append(tmp[0])\n y.append(tmp[1])\n slope = detect_slope(*y[-3:])\n if plot:\n progressive_plot_2d(x,y,'o',plt_title=dict_pointer)\n continue\n if slope =='min':\n \n x,y = refine_scan(x,y,min_points)\n if plot:\n progressive_plot_2d(x,y,'o',plt_title=dict_pointer)\n \n return [x[self.np.argmin(y)],self.np.min(y)],x,y,False\n \n elif slope == 'max':\n import random\n starting_point += random.uniform(-2*step, 2*step)\n \n if self.time.time() - start > timeout:\n print('TIMEOUT reached')\n center = x[-2]\n return center,x,y,True\n \n except KeyboardInterrupt:\n return 0,x,y,True", "title": "" }, { "docid": "aba2bc8d4a7c0339f5e56cef3445fd49", "score": "0.5004844", "text": "def _exchange_metric(self):\n dealer = self.solver.dealer\n for arrname in self.solver.solvertype._interface_init_:\n for sdw in dealer: sdw.cmd.exchangeibc(arrname,\n with_worker=True)", "title": "" }, { "docid": "852e256b33dd48c177830a9879898fdf", "score": "0.5004129", "text": "def testing_data(data_set, params, plot = 'y'):\n os.chdir(data_set.input_params['outdir'])\n\n# if data_set.input_params['n_functions'] in [0,1,2]:\n# params = np.append([data_set.input_params['q_value']], params)\n \n if plot == 'y':\n sns.set_context(\"paper\")\n #fig, axs = plt.subplots(4,1, figsize=(10, 20), facecolor='w', edgecolor='k')\n \n count_plot = 0\n if data_set.input_params['n_functions'] in [0,1,2]:\n aux = [data_set.input_params['q_value']]\n aux.extend(params.tolist()[0])\n params = np.array(aux)\n \n for i in range(2):\n if i == 0:\n data = data_set.energy_ts\n elif i == 1:\n data = data_set.force_ts\n \n for _n_water in data_set.input_params['N_water']:\n print(\"SET: {}, # of water molecules {}\".format(data.flag, _n_water))\n y_ref = data.training_set['y_test{}'.format(_n_water)]\n\n q = data.test_set['q_test{}'.format(_n_water)]\n c6 = data.test_set['c6_test{}'.format(_n_water)]\n c12 = data.test_set['c12_test{}'.format(_n_water)] \n\n \n if data_set.input_params['n_vs'] == 0:\n if data_set.input_params['n_functions'] == 3 :\n Hvs = np.matrix([q, c12, c6]).T # MxN functionts\n elif data_set.input_params['n_functions'] == 0 :\n Hvs = np.matrix([q, c12, c6]).T\n \n elif data_set.input_params['n_vs'] == 2:\n # VS Descriptor\n q_vs1 = data.test_set['q_test_vs1{}'.format(_n_water)]\n c6_vs1 = data.test_set['c6_test_vs1{}'.format(_n_water)]\n c12_vs1 = data.test_set['c12_test_vs1{}'.format(_n_water)]\n\n # VS Descriptor\n q_vs2 = data.test_set['q_test_vs2{}'.format(_n_water)]\n c6_vs2 = data.test_set['c6_test_vs2{}'.format(_n_water)]\n c12_vs2 = data.test_set['c12_test_vs2{}'.format(_n_water)]\n \n q_vs = data.test_set['q_test_vs{}'.format(_n_water)]\n c6_vs = data.test_set['c6_test_vs{}'.format(_n_water)]\n c12_vs = data.test_set['c12_test_vs{}'.format(_n_water)]\n\n if data_set.input_params['n_functions'] == 0 :\n Hvs = np.matrix([q, c12, c6, q_vs1, q_vs2]).T # MxNfunctionts\n elif data_set.input_params['n_functions'] == 1 :\n Hvs = np.matrix([q, c12, c6, q_vs1, c12_vs1, q_vs2, c12_vs2]).T\n elif data_set.input_params['n_functions'] == 2 :\n Hvs = np.matrix([q, c12, c6, q_vs1, c12_vs1, c6_vs1, q_vs2, c12_vs2, c6_vs2]).T \n elif data_set.input_params['n_functions'] == 3 :\n Hvs = np.matrix([q, c12, c6, q_vs1, c12_vs1, c6_vs1, q_vs2, c12_vs2, c6_vs2]).T \n elif data_set.input_params['n_functions'] == 4 :\n Hvs = np.matrix([q, c12, c6, q_vs1, q_vs2]).T \n elif data_set.input_params['n_functions'] == 5 :\n Hvs = np.matrix([q, c12, c6, q_vs1, c12_vs1, q_vs2, c12_vs2]).T \n elif data_set.input_params['n_functions'] == 6:\n Hvs = np.matrix([q, c12, c6, q_vs, c12_vs, c6_vs]).T \n elif data_set.input_params['n_functions'] == 7:\n Hvs = np.matrix([c12, c6, q_vs, c6_vs]).T \n \n \n y_test = np.dot(Hvs, params.T).reshape(np.shape(y_ref)).T\n\n # OLD PARAMS\n q_old = data_set.input_params['old_params'][0]\n sigma_old = data_set.input_params['old_params'][1]\n epsilon_old = data_set.input_params['old_params'][2]\n\n C12old = (4*(epsilon_old)*(sigma_old**12))**0.5\n C6old = (4*(epsilon_old)*(sigma_old**6))**0.5\n\n c_old = np.array([q_old, C12old, C6old])\n\n H = np.matrix([q, c12, c6]).T\n y_test_old = np.dot(H, c_old.T).T\n y_test_old = np.array(y_test_old)\n \n \n mse_old_test = np.mean(np.abs(y_test_old.T - y_test)) \n\n y_test_s = np.sort(y_test,axis=0)\n y_ref_s = np.sort(y_ref, axis=0)\n y_test_old_s = np.sort(y_test_old,axis=0)\n\n mse_test = np.mean(np.power(y_test_s - y_ref_s, 2))\n mae_test_s = np.mean(np.abs(y_ref_s - y_test_s.T))\n mae_test_old = np.mean(np.abs(y_test_old_s - y_ref_s))\n\n print('MSE (lrr-de) = {}'.format(mse_test))\n print('MAE (opls) = {}'.format(mae_test_old))\n print('MAE (lrr-de) = {}'.format(mae_test_s))\n print(\"-------\")\n if plot == 'y':\n sns.set_context(\"paper\")\n fig = plt.figure(dpi = 100)\n# plt.rc('text', usetex=True)\n# plt.rc('font', family='serif')\n plt.title(\" TEST SET: ${0},{1}$ water molecules\".format(data.flag, _n_water))\n plt.plot(y_test,label=\"lrr-de\",marker=\"*\")\n plt.plot(y_ref, label=\"ref\",marker=\"*\")\n plt.plot(y_test_old, label=\"opls\",marker=\"*\")\n plt.legend(loc='upper left')\n if data.flag == \"energy\":\n plt.ylabel(\"Energy - Test set [kj]\")\n elif data.flag == \"force\":\n plt.ylabel(\"Force - Test set [kj/mol nm]\")\n plt.xlabel(\"$N_{confs}$\") \n plt.savefig(\"set{}.pdf\".format(count_plot), bbox_inches='tight')\n count_plot = count_plot + 1", "title": "" }, { "docid": "777de4d3acb2453db9f46e3ef4aee252", "score": "0.50028086", "text": "def get_qubitpol_vs_xval(self, xvals_dict=None):\n ncircuits = len(self._qobj['circuits'])\n #Is this the best way to get the number of qubits?\n nqubits = self._qobj['circuits'][0]['compiled_circuit']['header']['number_of_qubits']\n qubitpol = numpy.zeros([ncircuits, nqubits], dtype=float)\n xvals = numpy.zeros([ncircuits], dtype=float)\n\n #build Z operators for each qubit\n z_dicts = []\n for qubit_ind in range(nqubits):\n z_dicts.append(dict())\n for qubit_state in range(2**nqubits):\n new_key = (\"{0:0\"+\"{:d}\".format(nqubits) + \"b}\").format(qubit_state)\n z_dicts[-1][new_key] = -1\n if new_key[nqubits-qubit_ind-1] == '1':\n z_dicts[-1][new_key] = 1\n\n #go through each circuit and for eqch qubit and apply the operators using \"average_data\"\n for circuit_ind in range(ncircuits):\n if not xvals_dict is None:\n xvals[circuit_ind] = xvals_dict[self._qobj['circuits'][circuit_ind]['name']]\n for qubit_ind in range(nqubits):\n qubitpol[circuit_ind, qubit_ind] = self.average_data(self._qobj['circuits'][circuit_ind]['name'], z_dicts[qubit_ind])\n\n return qubitpol, xvals", "title": "" }, { "docid": "d93295ea60b006d3ad2a525e091147b4", "score": "0.4997001", "text": "def main():\n # all B (0 -> 0), all alpha (0 -> 2pi/3)\n a_ans = (2*np.pi)/3\n # q_start = np.array([0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001]) # a_ans, a_ans, a_ans\n # q_end = np.array([0.0001, 0.0001, 0.0001, a_ans + 0.2, a_ans + 0.2, a_ans + 0.2]) # ([1.0001, -1.0001, 0.7001, a_ans + 0.2, a_ans + 0.2, a_ans + 0.2])\n \n q_start = np.array([-0.06235794, -0.00409771, 0.02960726, 0.14837708, 0.22618857, 0.09228618])\n q_end = np.array([-0.19746493, -0.00637689, 0.00991869, 0.17226557, 1.68673423, -0.22740581])\n uz_0 = np.array([[0, 0, 0]]).transpose()\n\n (r1,r2,r3,Uz) = moving_CTR(q_start, uz_0)\n x_cur_pos = r1[-1]\n (r1e,r2e,r3e,Uze) = moving_CTR(q_end, uz_0)\n x_end_pos = r1e[-1]\n\n\n # x_cur_pos = [0.0, -0.07, 0.1]\n # x_end_pos = [0.05, 0.05, 0.1]\n # waypoints = [[0.0, 0.0, 0.0], [a_ans, a_ans, a_ans]]\n waypoints = [x_cur_pos, x_end_pos]\n a1_coeffs = []\n a2_coeffs = []\n a3_coeffs = []\n\n for x in range(len(waypoints)):\n traj = TrajectoryGenerator(waypoints[x], waypoints[(x + 1) % len(waypoints)], total_time)\n traj.solve()\n a1_coeffs.append(traj.x_c)\n a2_coeffs.append(traj.y_c)\n a3_coeffs.append(traj.z_c)\n\n print('START des x_cur_pos:', x_cur_pos)\n print('END des x_end_pos:', x_end_pos)\n CTR_sim(a1_coeffs, a2_coeffs, a3_coeffs, q_start, x_end_pos)\n print('START des q_start:', q_start)\n print('END des q_end:', q_end)", "title": "" }, { "docid": "6ab23b693c61620495edafea5b17da7a", "score": "0.49967322", "text": "def do_processing_all(chain):\n block = chain._block\n set = chain._block.set\n dataset = chain._dataset\n \n #------------------------------------------------------\n # Global inits and one time calculations\n \n zfmult = 4 # larger zfmult here improves peak shift accuracy\n raw_dim0 = dataset.raw_dims[0]\n raw_hpp = dataset.sw / raw_dim0\n fid_dim0 = raw_dim0 * zfmult\n fid_hpp = dataset.sw / fid_dim0\n\n xx = np.arange(raw_dim0) / dataset.sw\n search = np.zeros((raw_dim0 * zfmult),complex)\n \n # reset results arrays and temporary arrays\n chain.time_all = np.zeros(dataset.raw_dims[::-1],complex)\n chain.freq_all = np.zeros(dataset.raw_dims[::-1],complex)\n\n # convert algorithm values from PPM to points\n search_start = set.reference_peak_center + set.peak_search_width\n search_end = set.reference_peak_center - set.peak_search_width\n refpt = (fid_dim0 / 2) - (dataset.frequency * (set.reference_peak_center - dataset.resppm) / fid_hpp)\n search_start = int((fid_dim0 / 2) - (dataset.frequency * (search_start - dataset.resppm) / fid_hpp))\n search_end = int((fid_dim0 / 2) - (dataset.frequency * (search_end - dataset.resppm) / fid_hpp))\n \n ph0_start = set.phase0_range_start\n ph0_end = set.phase0_range_end\n ph0_start = int((raw_dim0 / 2) - (dataset.frequency * (ph0_start - dataset.resppm) / raw_hpp))\n ph0_end = int((raw_dim0 / 2) - (dataset.frequency * (ph0_end - dataset.resppm) / raw_hpp))\n\n # one time calculations \n apod = util_generic_spectral.apodize(xx, set.gaussian_apodization, 'Gaussian')\n chop = ((((np.arange(raw_dim0) + 1) % 2) * 2) - 1)\n apod *= chop\n \n global_phase0 = np.exp(1j * block.set.global_phase0 * common_constants.DEGREES_TO_RADIANS)\n\n nfids = chain.raw.shape[2]\n \n\n #------------------------------------------------------\n # Coil combination section\n #\n # - do not combine if only 1 coil, no need\n\n if chain.raw.shape[1] > 1: # number of coils\n\n if set.coil_combine_method=='None':\n raw_combined = funct_combine.coil_combine_none(chain)\n if set.coil_combine_method=='Siemens':\n raw_combined = funct_combine.coil_combine_siemens(chain)\n if set.coil_combine_method=='CMRR':\n raw_combined = funct_combine.coil_combine_cmrr(chain)\n if set.coil_combine_method=='CMRR-Sequential':\n raw_combined = funct_combine.coil_combine_cmrr_sequential(chain)\n else:\n # single coil, copy first channel only\n raw_combined = funct_combine.coil_combine_none(chain) \n\n\n\n #------------------------------------------------------\n # FID averaging section\n # \n # - if nfids % navgs is not 0, then extra FIDs at the end are ignored\n # - this step changes the dimensionality of the final data object, all\n # downstream processing will be affected \n #\n\n if set.fids_to_average > 1: # number of raw FIDs to average into one new FID\n navgs = set.fids_to_average\n nfids = int(nfids/navgs)\n\n new_shape = list(raw_combined.shape)\n new_shape[-2] = nfids\n time = np.zeros(new_shape, complex)\n \n for i in range(nfids):\n for j in range(navgs):\n time[0,0,i,:] += raw_combined[0,0,j+i*navgs,:]\n \n raw_combined = time\n \n #------------------------------------------------------\n # FID correction section\n #\n # - global_phase0 and global_phase1 do not affect this algorithm\n\n for i in range(nfids):\n \n time = raw_combined[0,0,i,:].copy()\n \n if set.fid_left_shift != 0:\n # shift fid to the left and set last points to zero\n time = np.roll(time, -set.fid_left_shift) \n time[-set.fid_left_shift:] = time[0]*0.0 \n \n if set.apply_peak_shift and chain.calculate_flag:\n # Calculate peaks shift if flag set, use oversized zfmult\n # Peak search is performed over user-set range on magnitude data\n search *= 0.0\n search[0:raw_dim0] = time * apod\n search = np.fft.fft(search) \n temp = np.abs(search)\n imax = temp[search_start:search_end].argmax()\n delta = (refpt-(search_start+imax))*fid_hpp\n block.frequency_shift[i] = delta\n \n # Phase 0 NOT calculated here because reference peak has to be \n # calculated from summed peak-shifted data\n\n # Apply freq shift and phase0 corrections to the time data\n time *= np.exp(1j * 2.0 * np.pi * block.frequency_shift[i] * xx)\n time *= np.exp(1j * block.phase_0[i] * common_constants.DEGREES_TO_RADIANS)\n \n # Sum up FIDs for display, and calculate current voxel if needed\n chain.time_all[0,0,i,:] = time\n\n \n #---------------------------------------------------------------------\n # Calculate Phase0 optimization if flag set ON in widget. We need \n # to do this in a second loop, since we need the peaks shifted before \n # we create a reference spectrum from the summed FIDs\n #\n # - global_phase0 DOES affect this algorithm, global_phase1 does not\n # - ref spectrum that all individual FIDs are adjusted to match is created\n # from an average of all FIDs with global_phase0 applied\n \n if set.apply_phase0 and chain.calculate_flag:\n # create reference spectrum and optimize range\n freq_all = chain.time_all.copy() \n freq_all = np.sum(freq_all, axis=0) * apod\n freq_all[0] *= 0.5\n freq_all = (np.fft.fft(freq_all) / len(freq_all))\n freq_all /= nfids # scale for comparison to single FID\n freq_all *= global_phase0 # if there is a global_phase0\n ph0range = [ph0_start, ph0_end]\n \n # reset global variable so as to fill in below with new ph0 values\n chain.time_all *= 0 \n \n for i in range(nfids):\n\n time = chain.raw[0,0,i,:].copy()\n\n if set.fid_left_shift != 0:\n # shift fid to the left and set last points to zero\n time = np.roll(time, -set.fid_left_shift) \n time[-set.fid_left_shift:] = time[0]*0.0 \n \n time *= np.exp(1j * 2.0 * np.pi * block.frequency_shift[i] * xx) \n \n # this is where phase 0 is optimized ...\n tmp = time.copy()\n\n # if global_phase0 apply here before we calculate FID independent phase 0\n tmp *= global_phase0\n \n tmp[0] *= 0.5\n tmp_freq = (np.fft.fft(tmp * apod) / len(tmp))\n phdeg = optimize_phase0(tmp_freq, freq_all, ph0range)\n block.phase_0[i] = phdeg\n \n time *= np.exp(1j * block.phase_0[i] * common_constants.DEGREES_TO_RADIANS)\n \n chain.time_all[0,0,i,:] = time\n\n\n if set.global_phase0 != 0.0:\n chain.time_all *= global_phase0\n\n if set.global_phase1 != 0.0:\n # move all time result into frequency domain\n time_all = chain.time_all.copy()\n\n # calc phase 1 \n piv = np.round(dataset.ppm2pts(dataset.phase_1_pivot, acq=True))\n xx = (np.arange(raw_dim0,dtype=float)-piv)/raw_dim0\n phase1 = np.exp(1j * (set.global_phase1 * DTOR * xx))\n \n for i in range(nfids):\n tmp = time_all[0,0,i,:].copy()\n tmp[0] *= 0.5 \n tmp = np.fft.fft(tmp * chop)\n # apply to spectral data and invert fourier transform\n tmp *= phase1\n tmp = np.fft.ifft(tmp)\n chain.time_all[0,0,i,:] = tmp * chop\n \n for i in range(nfids): \n tmp = apod * chain.time_all[0,0,i,:].copy() \n tmp[0] *= 0.5\n chain.freq_all[0,0,i,:] = (np.fft.fft(tmp) / len(tmp))", "title": "" }, { "docid": "9df8589bbe7c9c6aabe8e6a0d557dc01", "score": "0.4985901", "text": "def processCoordinates(self):\n\n raise NotImplementedError", "title": "" }, { "docid": "52b77f20141f9a6261f11e0a4353a375", "score": "0.49851283", "text": "def clf_data(self):\n # yield self.gyro.w\n # yield self.gyro.x\n # yield self.gyro.y\n # yield self.gyro.z\n yield self.accel.x\n yield self.accel.y\n yield self.accel.z\n yield self.flex", "title": "" }, { "docid": "90000035b46e94ee70ad9438dd50805e", "score": "0.49843118", "text": "def define_data(self):\n self.__data = [] #data qubits\n if self.__fault_tolerant_b:\n # split out the second list item \n # under scheme b repeated measurment is only needed of the second qubit\n self.__data_classical = [ [], [[] for i in range(self.__num_data_rounds)]]\n else:\n self.__data_classical = []\n if self.__fault_tolerant_c:\n self.__ftc = []\n self.__ftc_classical = [[] for i in range(self.__num_data_rounds)]\n if self.__ancilla:\n if self.__fault_tolerant_ancilla:\n self.__mx = [[] for i in range(self.__d)]\n self.__mz = [[] for i in range(self.__d)]\n self.__mx_classical = [[[] for i in range(self.__num_ancilla)] for j in range(self.__d) ]\n self.__mz_classical = [[[] for i in range(self.__num_ancilla)] for j in range(self.__d) ]\n else:\n self.__mx = [] #ancilla qubits to detect X operator\n self.__mz = [] #ancilla qubits to detect Z operator\n self.__mx_classical = []\n self.__mz_classical = [] \n if self.__extend_ancilla: \n self.__extra_ancilla = []\n self.__extra_ancilla_classical = []", "title": "" }, { "docid": "4f53a2b780c83fa03f56c951e0e79b68", "score": "0.49795353", "text": "def _setupQAinfo(self):\n # QA information for MOD09GA\n # https://lpdaac.usgs.gov/products/modis_products_table/mod09ga\n print \"Masking data...\"\n # Bit 0-1 cloud state: 00 - clear\n bit0, bit1 = 0, 1\n self.m_cloud = np.where(((self.m_QA / np.power(2,bit0)) % 2 == 0) &\n ((self.m_QA / np.power(2,bit1)) % 2 == 0), 1, 0)\n\n # Bit 2 cloud shadow: 0 - no cloud shadow\n bit2 = 2\n self.m_cloudshadow = np.where( (self.m_QA / np.power(2,bit2)) % 2 == 0, 1, 0)\n\n # Bits 3-5 land-water flag\n # 000 - shallow ocean\n # 110 - continental/moderate ocean\n # 111 - deep ocean\n bit3 = 3\n bit4 = 4\n bit5 = 5\n\n self.m_land_water = np.where(((self.m_QA / np.power(2,bit3)) % 2 == 0) &\n ((self.m_QA / np.power(2,bit4)) % 2 == 0) &\n ((self.m_QA / np.power(2,bit5)) % 2 == 0), 0, 1)\n\n self.m_land_water = np.where(((self.m_QA / np.power(2,bit3)) % 2 == 0) &\n ((self.m_QA / np.power(2,bit4)) % 2 == 1) &\n ((self.m_QA / np.power(2,bit5)) % 2 == 1), 0, self.m_land_water)\n\n self.m_land_water = np.where(((self.m_QA / np.power(2,bit3)) % 2 == 1) &\n ((self.m_QA / np.power(2,bit4)) % 2 == 1) &\n ((self.m_QA / np.power(2,bit5)) % 2 == 1), 0, self.m_land_water)\n\n # Bit 10 internal cloud flag: 0 - no cloud\n bit10 = 10\n self.m_internal_cloud_flag = np.where((self.m_QA / np.power(2,bit10)) % 2 == 0, 1, 0)\n\n # Bit 12 MOD35 snow/ice flag: 1 - snow\n bit12 = 12\n self.m_snow_ice_flag = np.where( (self.m_QA / np.power(2,bit12)) % 2 == 1, 1, 0)\n\n # Bit 13 Pixel is adjacent to cloud : 0 - no\n bit13 = 13\n self.m_pixel_adjacent_to_cloud = np.where( (self.m_QA / np.power(2,bit13)) % 2 == 0, 1, 0)\n\n # Set uncertainty based aerosol QA\n # Bit 6-7 Aerosol quantity:\n # 00 - climatology\n # 01 - low\n # 10 - average\n # 11 - high\n aerosols = {1: 0.01, 2: 0.02, 3: 0.03, 4: 0.04}\n bit6, bit7 = 6, 7\n\n # UncerntAOD = np.array(4, np.float32)\n self.m_aerosols_QA = np.zeros((self.m_rows, self.m_cols), np.float32)\n self.m_aerosols_QA = np.where(((self.m_QA / np.power(2, bit6)) % 2 == 0)\n & ((self.m_QA / np.power(2, bit7)) % 2 == 0),\n aerosols[1], self.m_aerosols_QA)\n self.m_aerosols_QA = np.where(((self.m_QA / np.power(2,bit6)) % 2 == 0)\n & ((self.m_QA / np.power(2, bit7)) % 2 == 1),\n aerosols[2], self.m_aerosols_QA)\n self.m_aerosols_QA = np.where(((self.m_QA / np.power(2,bit6)) % 2 == 1)\n & ((self.m_QA / np.power(2, bit7)) % 2 == 0),\n aerosols[3], self.m_aerosols_QA)\n self.m_aerosols_QA = np.where(((self.m_QA / np.power(2,bit6)) % 2 == 1)\n & ((self.m_QA / np.power(2, bit7)) % 2 == 1),\n aerosols[4], self.m_aerosols_QA)", "title": "" }, { "docid": "0ade4d340bf343c0550164c7907dd1ab", "score": "0.49767637", "text": "def solve(self, qdq=True, rotateq=True, masterq=True, currentq=True, *args, **kwargs):\n if qdq:\n self.qd.diagonalise()\n if rotateq:\n self.rotate()\n #\n if masterq:\n self.prepare_kern()\n self.generate_fct()\n if not self.funcp.mfreeq:\n self.generate_kern()\n self.solve_kern()\n else:\n self.solve_matrix_free()\n if currentq:\n self.generate_current()", "title": "" }, { "docid": "3c216a909a1b0178bd947467358b795e", "score": "0.49744493", "text": "def setUp(self):\n self.scale = 1.5\n self.rg = 30.0\n x = np.arange(0.0001, 0.1, 0.0001)\n y = np.asarray([\n self.scale * math.exp(-(q*self.rg)**2 / 3.0) for q in x])\n dy = y*.1\n self.data = Data1D(x=x, y=y, dy=dy)\n self.npts = len(x)-10", "title": "" }, { "docid": "a893bd8694884a21900abec26df3fe06", "score": "0.49724585", "text": "def setup_function(self):\n\n # consider 'true_coszen\" and 'true_energy' containers\n for container in self.data:\n if len(container[\"true_energy\"]) == 0:\n container[\"airs_1s_perturb\"] = np.zeros(container.size, dtype=FTYPE)\n else:\n container[\"airs_1s_perturb\"] = self.airs_spline.evaluate_simple(\n (np.log10(container[\"true_energy\"]), container[\"true_coszen\"])\n )\n container.mark_changed(\"airs_1s_perturb\")", "title": "" }, { "docid": "24aa95bdb883e338017f6049a8a62c88", "score": "0.4964958", "text": "def setUp(self):\n self.scale = 1.5\n self.m = 3.0\n x = np.arange(0.0001, 0.1, 0.0001)\n y = np.asarray([self.scale * math.pow(q ,-1.0*self.m) for q in x])\n dy = y*.1\n self.data = Data1D(x=x, y=y, dy=dy)", "title": "" }, { "docid": "cb0ae964f71726060612a05356adb001", "score": "0.49637735", "text": "def calibrate(self, calibration_time=1):\n self.reset_device()\n\n ####### get stable time source\n self.write(MPU_REG_PWR_MGMT_1, 0x01)\n self.write(MPU_REG_PWR_MGMT_2, 0x00)\n time.sleep(0.2)\n\n ####### configure device for bias calculation\n self.write(MPU_REG_INT_ENABLE, 0x00) ### disable all interrupts\n self.write(MPU_REG_FIFO_ENABLE, 0x00) ### disable FIFO\n self.write(MPU_REG_I2C_MST_CTRL, 0x00) ### disable I2C master\n self.write(MPU_REG_USER_CTRL, 0x0C) ### disable FIFO, I2C master mode, reset FIFO and DMP\n time.sleep(0.1)\n\n ####### configure gyroscope and accelerometer for bias calculation\n self.set_gyro_dlpf(1) ### set gyroscope DLPF to 184\n self.set_sample_rate(100) ### set sample rate to 100Hz (1 sample every each 0.01s)\n self.set_gyro_range(250) ### set the gyroscope range to 250, the maximum sensitivity\n self.set_acce_range(2) ### Set accelerometer full-scale to 2 g, maximum sensitivity\n\n ####### Configure FIFO to capture accelerometer and gyro data for bias calculation\n self.write(MPU_REG_USER_CTRL, 0x40) ### Enable FIFO\n self.set_fifo(acce=True, temp=False, gyro=True) ### Enable gyro and accelerometer sensors for FIFO\n\n ####### mean values over calibration_time\n acce_bias_3d = Point3D()\n gyro_bias_3d = Point3D()\n\n count = 0\n start = time.time()\n while time.time() - start < calibration_time:\n count += 1\n time.sleep(self._delay_to_next_sample)\n acce_bias_3d += self.get_raw_acceleration()\n gyro_bias_3d += self.get_raw_gyroscope()\n acce_bias_3d = acce_bias_3d / count\n gyro_bias_3d = gyro_bias_3d / count\n\n acce_gravity = Point3D(0, 0, -1 / self._acce_resolution) ### taking into account Earth gravity (doesn't work\n ### if the calibration is done on another planet!)\n acce_bias_3d = acce_bias_3d - acce_gravity ### the gravity shouldn't be counted as a bias\n\n ####### set the bias to the hardware\n self.set_acce_bias_to_hardware(acce_bias_3d)\n self.set_gyro_bias_to_hardware(gyro_bias_3d) ### set gyro bias to hardware", "title": "" }, { "docid": "c16a675073ef4df74b5dc06883bc84f6", "score": "0.49598232", "text": "def __init__(self, xPos, yPos, zPos,measurements, uncertainties,deltaChiSqToStop = 0.01,dampingFactor = 1,useDampedGaussNeutonLineSearch = False, recordHistory = False):\n super().__init__(deltaChiSqToStop = deltaChiSqToStop,dampingFactor = dampingFactor,useDampedGaussNeutonLineSearch = useDampedGaussNeutonLineSearch,recordHistory=recordHistory)\n self.xPos = xPos\n self.yPos = yPos\n self.zPos = zPos\n self.measurements = measurements\n self.uncert = uncertainties", "title": "" }, { "docid": "27b2bf7e7144ae1ccb0c7f93abfffd65", "score": "0.4958775", "text": "def setup_joints():\n commands = [None] * AXIS_TOTAL\n feedbacks = [None] * AXIS_TOTAL\n for i in xrange(AXIS_TOTAL):\n commands[i] = hal.newsig('machine.joint.%d.command' % i, hal.HAL_FLOAT)\n feedbacks[i] = hal.newsig('machine.joint.%d.feedback' % i, hal.HAL_FLOAT)\n\n core_xy = config.find('FDM','CORE_XY')\n if core_xy is not None and int(core_xy) > 0:\n sum_cmd_a = rtapi.newinst('sum2', 'corexy.sum2.cmd.a')\n sum_fb_x = rtapi.newinst('sum2', 'corexy.sum2.fb.x')\n sum_cmd_b = rtapi.newinst('sum2', 'corexy.sum2.cmd.b')\n sum_fb_y = rtapi.newinst('sum2', 'corexy.sum2.fb.y')\n hal.addf(sum_cmd_a.name, SERVO_THREAD)\n hal.addf(sum_cmd_b.name, SERVO_THREAD)\n hal.addf(sum_fb_x.name, SERVO_THREAD)\n hal.addf(sum_fb_y.name, SERVO_THREAD)\n\n sum_cmd_a.pin('gain0').set(1)\n sum_cmd_a.pin('gain1').set(1)\n sum_cmd_b.pin('gain0').set(1)\n sum_cmd_b.pin('gain1').set(-1)\n\n sum_fb_x.pin('gain0').set(0.5)\n sum_fb_x.pin('gain1').set(0.5)\n sum_fb_y.pin('gain0').set(0.5)\n sum_fb_y.pin('gain1').set(-0.5)\n\n corex_cmd = hal.newsig('machine.joint.corex.command', hal.HAL_FLOAT)\n corey_cmd = hal.newsig('machine.joint.corey.command', hal.HAL_FLOAT)\n\n corex_cmd.link('axis.0.motor-pos-cmd')\n corey_cmd.link('axis.1.motor-pos-cmd')\n\n sum_cmd_a.pin('in0').link(corex_cmd)\n sum_cmd_a.pin('in1').link(corey_cmd)\n sum_cmd_b.pin('in0').link(corex_cmd)\n sum_cmd_b.pin('in1').link(corey_cmd)\n sum_cmd_a.pin('out').link(commands[0])\n sum_cmd_b.pin('out').link(commands[1])\n\n sum_fb_x.pin('in0').link(feedbacks[0])\n sum_fb_x.pin('in1').link(feedbacks[1])\n sum_fb_y.pin('in0').link(feedbacks[0])\n sum_fb_y.pin('in1').link(feedbacks[1])\n sum_fb_x.pin('out').link('axis.0.motor-pos-fb')\n sum_fb_y.pin('out').link('axis.1.motor-pos-fb')\n else:\n commands[0].link('axis.0.motor-pos-cmd')\n feedbacks[0].link('axis.0.motor-pos-fb')\n commands[1].link('axis.1.motor-pos-cmd')\n feedbacks[1].link('axis.1.motor-pos-fb')\n\n for i in xrange(AXIS_TOTAL):\n if i >= 2:\n commands[i].link('axis.%d.motor-pos-cmd' % i)\n feedbacks[i].link('axis.%d.motor-pos-fb' % i)\n\n return (commands, feedbacks)", "title": "" }, { "docid": "9a5945968be11ec854e59f50df98986d", "score": "0.49525112", "text": "def __init__ (self, n, pause, V1, V2, V3, I1, I2, I3, theta, f, rangeV, rangeI, counts, mtype):\n self.repetitions = n\n self.pause = pause\n # Enforce the type for single-phase measurement types.\n if mtype == MeasurementType.SinglePhase1:\n self.V = [ V1, 0.0, 0.0 ]\n self.I = [ I1, 0.0, 0.0 ] \n elif mtype == MeasurementType.SinglePhase2:\n self.V = [ 0.0, V2, 0.0 ]\n self.I = [ 0.0, I2, 0.0 ]\n elif mtype == MeasurementType.SinglePhase3:\n self.V = [ 0.0, 0.0, V3 ]\n self.I = [ 0.0, 0.0, I3 ]\n elif mtype == MeasurementType.DeltaPhase1:\n self.V = [ V1, V2, V3 ]\n self.I = [ I1, 0.0, 0.0 ] \n elif mtype == MeasurementType.DeltaPhase2:\n self.V = [ V1, V2, V3 ]\n self.I = [ 0.0, I2, 0.0 ]\n elif mtype == MeasurementType.DeltaPhase3:\n self.V = [ V1, V2, V3 ]\n self.I = [ 0.0, 0.0, I3 ] \n elif mtype == MeasurementType.DeltaPhase12:\n self.V = [ V1, V2, V3 ]\n self.I = [ I1, I2, 0.0 ]\n elif mtype == MeasurementType.DeltaPhase23:\n self.V = [ V1, V2, V3 ]\n self.I = [ 0.0, I2, I3 ]\n elif mtype == MeasurementType.DeltaPhase31:\n self.V = [ V1, V2, V3 ]\n self.I = [ I1, 0.0, I3 ]\n else:\n self.V = [ V1, V2, V3 ]\n self.I = [ I1, I2, I3 ]\n self.theta = theta\n self.f = f\n self.rangeV = rangeV\n self.rangeI = rangeI\n self.mtype = mtype\n self.countsperwh = counts", "title": "" }, { "docid": "d2c47c251c5e4482dc8ecfd7fd7f6488", "score": "0.49512354", "text": "def calibrate(self):\n # TODO", "title": "" }, { "docid": "249c155dc30decb33c25e2e2ed5b4a60", "score": "0.49452075", "text": "def design(self, \n sar_lch, \n sar_pw, \n sar_nw, \n sar_sa_m, \n sar_sa_m_d, \n sar_sa_m_rst, \n sar_sa_m_rst_d, \n sar_sa_m_rgnn, \n sar_sa_m_rgnp_d, \n sar_sa_m_buf,\n doubleSA,\n vref_sf_m_mirror, vref_sf_m_bias, vref_sf_m_off, vref_sf_m_in, vref_sf_m_bias_dum, vref_sf_m_in_dum,\n vref_sf_m_byp, vref_sf_m_byp_bias, vref_sf_bias_current, vref_sf,\n sar_drv_m_list,sar_ckgen_m,sar_ckgen_fo,\n sar_ckgen_ndelay,\n sar_ckgen_fast, sar_ckgen_fastest,\n sar_logic_m,\n sar_fsm_m,\n sar_ret_m,\n sar_ret_fo,\n sar_device_intent,\n sar_c_m,\n sar_rdx_array,\n samp_lch,\n samp_wp,\n samp_wn,\n samp_fgn,\n samp_fg_inbuf_list,\n samp_fg_outbuf_list,\n samp_nduml,\n samp_ndumr,\n samp_nsep,\n samp_intent,\n num_bits,\n num_inv_bb,\n samp_use_laygo,\n sf_lch, sf_nw, sf_m_mirror, sf_m_bias, sf_m_off, sf_m_in, sf_m_bias_dum, sf_m_in_dum, sf_m_byp,\n sf_m_byp_bias, sf_intent, bias_current, use_sf,\n use_offset,\n num_slices,\n clk_lch,\n clk_pw,\n clk_nw,\n clk_m_dff,\n clk_m_inv1,\n clk_m_inv2,\n clk_m_tgate,\n clk_n_pd,\n clk_m_capsw,\n clk_unit_cell,\n clk_clock_pulse,\n clk_device_intent,\n clkcal_order,\n ret_lch,\n ret_pw,\n ret_nw,\n ret_m_ibuf,\n ret_m_obuf,\n ret_m_latch,\n ret_m_srbuf,\n ret_m_sr,\n ret_device_intent,\n rdac_lch, rdac_pw, rdac_nw, rdac_m, rdac_num_series, rdac_num_bits, rdac_num_dacs, rdac_device_intent\n ):\n\n self.parameters['sar_lch'] = sar_lch\n self.parameters['sar_pw'] = sar_pw\n self.parameters['sar_nw'] = sar_nw\n self.parameters['sar_sa_m'] = sar_sa_m\n self.parameters['sar_sa_m_d'] = sar_sa_m_d\n self.parameters['sar_sa_m_rst'] = sar_sa_m_rst\n self.parameters['sar_sa_m_rst_d'] = sar_sa_m_rst_d\n self.parameters['sar_sa_m_rgnn'] = sar_sa_m_rgnn\n self.parameters['sar_sa_m_rgnp_d'] = sar_sa_m_rgnp_d\n self.parameters['sar_sa_m_buf'] = sar_sa_m_buf\n self.parameters['doubleSA'] = doubleSA\n self.parameters['vref_sf_m_mirror'] = vref_sf_m_mirror\n self.parameters['vref_sf_m_bias'] = vref_sf_m_bias\n self.parameters['vref_sf_m_in'] = vref_sf_m_in\n self.parameters['vref_sf_m_off'] = vref_sf_m_off\n self.parameters['vref_sf_m_bias_dum'] = vref_sf_m_bias_dum\n self.parameters['vref_sf_m_in_dum'] = vref_sf_m_in_dum\n self.parameters['vref_sf_m_byp'] = vref_sf_m_byp\n self.parameters['vref_sf_m_byp_bias'] = vref_sf_m_byp_bias\n self.parameters['vref_sf_bias_current'] = vref_sf_bias_current\n self.parameters['vref_sf'] = vref_sf\n self.parameters['sar_drv_m_list'] = sar_drv_m_list\n self.parameters['sar_ckgen_m'] = sar_ckgen_m\n self.parameters['sar_ckgen_fo'] = sar_ckgen_fo\n self.parameters['sar_ckgen_ndelay'] = sar_ckgen_ndelay\n self.parameters['sar_ckgen_fast'] = sar_ckgen_fast\n self.parameters['sar_ckgen_fastest'] = sar_ckgen_fastest\n self.parameters['sar_logic_m'] = sar_logic_m\n self.parameters['sar_fsm_m'] = sar_fsm_m\n self.parameters['sar_ret_m'] = sar_ret_m\n self.parameters['sar_ret_fo'] = sar_ret_fo\n self.parameters['sar_device_intent'] = sar_device_intent\n self.parameters['sar_c_m'] = sar_c_m\n self.parameters['sar_rdx_array'] = sar_rdx_array\n self.parameters['samp_lch'] = samp_lch\n self.parameters['samp_wp'] = samp_wp\n self.parameters['samp_wn'] = samp_wn\n self.parameters['samp_fgn'] = samp_fgn\n self.parameters['samp_fg_inbuf_list'] = samp_fg_inbuf_list\n self.parameters['samp_fg_outbuf_list'] = samp_fg_outbuf_list\n self.parameters['samp_nduml'] = samp_nduml\n self.parameters['samp_ndumr'] = samp_ndumr\n self.parameters['samp_nsep'] = samp_nsep\n self.parameters['samp_intent'] = samp_intent\n self.parameters['num_bits'] = num_bits\n self.parameters['num_inv_bb'] = num_inv_bb\n self.parameters['samp_use_laygo'] = samp_use_laygo # if true, use laygo for sampler generation\n self.parameters['sf_lch'] = sf_lch\n self.parameters['sf_nw'] = sf_nw\n self.parameters['sf_m_mirror'] = sf_m_mirror\n self.parameters['sf_m_bias'] = sf_m_bias\n self.parameters['sf_m_in'] = sf_m_in\n self.parameters['sf_m_off'] = sf_m_off\n self.parameters['sf_m_bias_dum'] = sf_m_bias_dum\n self.parameters['sf_m_in_dum'] = sf_m_in_dum\n self.parameters['sf_m_byp'] = sf_m_byp\n self.parameters['sf_m_byp_bias'] = sf_m_byp_bias\n self.parameters['sf_intent'] = sf_intent\n self.parameters['bias_current'] = bias_current\n self.parameters['use_sf'] = use_sf # if true, source follower is used before the sampler\n self.parameters['use_offset'] = use_offset\n self.parameters['num_slices'] = num_slices\n self.parameters['clk_lch'] = clk_lch\n self.parameters['clk_pw'] = clk_pw\n self.parameters['clk_nw'] = clk_nw\n self.parameters['clk_m_dff'] = clk_m_dff\n self.parameters['clk_m_inv1'] = clk_m_inv1\n self.parameters['clk_m_inv2'] = clk_m_inv2\n self.parameters['clk_m_tgate'] = clk_m_tgate\n self.parameters['clk_n_pd'] = clk_n_pd\n self.parameters['clk_m_capsw'] = clk_m_capsw\n self.parameters['clk_unit_cell'] = clk_unit_cell\n self.parameters['clk_clock_pulse'] = clk_clock_pulse\n self.parameters['clk_device_intent'] = clk_device_intent\n self.parameters['clkcal_order'] = clkcal_order\n self.parameters['ret_lch'] = ret_lch\n self.parameters['ret_pw'] = ret_pw\n self.parameters['ret_nw'] = ret_nw\n self.parameters['ret_m_ibuf'] = ret_m_ibuf\n self.parameters['ret_m_obuf'] = ret_m_obuf\n self.parameters['ret_m_latch'] = ret_m_latch\n self.parameters['ret_m_srbuf'] = ret_m_srbuf\n self.parameters['ret_m_sr'] = ret_m_sr\n self.parameters['ret_device_intent'] = ret_device_intent\n self.parameters['rdac_lch'] = rdac_lch\n self.parameters['rdac_pw'] = rdac_pw\n self.parameters['rdac_nw'] = rdac_nw\n self.parameters['rdac_m'] = rdac_m\n self.parameters['rdac_num_series'] = rdac_num_series\n self.parameters['rdac_num_bits'] = rdac_num_bits\n self.parameters['rdac_num_dacs'] = rdac_num_dacs\n self.parameters['rdac_device_intent'] = rdac_device_intent\n\n term_list = [{\n ','.join(['ASCLKD%d<3:0>' % (i) for i in range(num_slices)]):\n ','.join(['I_ASCLKD%d<3:0>' % (i) for i in range(num_slices)]),\n ','.join(['EXTSEL_CLK%d' % (i) for i in range(num_slices)]):\n ','.join(['I_EXTSEL_CLK%d' % (i) for i in range(num_slices)]),\n ','.join(['ADCOUT_RET%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]):\n ','.join(['I_ADCOUT_RET%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]),\n ','.join(['ADCO%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]):\n ','.join(['I_ADCO%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]),\n ','.join(['CLKO%d' % (i) for i in range(num_slices)]):\n ','.join(['I_CLKO%d' % (i) for i in range(num_slices)]),\n ','.join(['samp_body%d' % (i) for i in range(num_slices+2)]):\n ','.join(['I_samp_body%d' % (i) for i in range(num_slices+2)]),\n ','.join(['bottom_body%d' % (i) for i in range(num_slices+2)]):\n ','.join(['I_bottom_body%d' % (i) for i in range(num_slices+2)]),\n 'ADCO_CAL0<%d:0>'%(num_bits-1): 'I_ADCO_CAL0<%d:0>'%(num_bits-1),\n 'ADCO_CAL1<%d:0>'%(num_bits - 1): 'I_ADCO_CAL1<%d:0>'%(num_bits - 1),\n ','.join(['CLKCAL%d<4:0>' % i for i in range(num_slices)]):\n ','.join(['I_CLKCAL%d<4:0>' % i for i in range(num_slices)]),\n 'RDAC_SEL<%d:0>'%(rdac_num_dacs*rdac_num_bits-1): 'I_RDAC_SEL<%d:0>'%(rdac_num_dacs*rdac_num_bits-1),\n }]\n name_list = (['ADCI'])\n self.array_instance('ADCI', name_list, term_list=term_list)\n self.instances['ADCI'][0].design(\n sar_lch,\n sar_pw,\n sar_nw,\n sar_sa_m,\n sar_sa_m_d,\n sar_sa_m_rst,\n sar_sa_m_rst_d,\n sar_sa_m_rgnn,\n sar_sa_m_rgnp_d,\n sar_sa_m_buf,\n doubleSA,\n vref_sf_m_mirror, vref_sf_m_bias, vref_sf_m_off, vref_sf_m_in, vref_sf_m_bias_dum, vref_sf_m_in_dum,\n vref_sf_m_byp, vref_sf_m_byp_bias, vref_sf_bias_current, vref_sf,\n sar_drv_m_list, sar_ckgen_m, sar_ckgen_fo,\n sar_ckgen_ndelay,\n sar_ckgen_fast, sar_ckgen_fastest,\n sar_logic_m,\n sar_fsm_m,\n sar_ret_m,\n sar_ret_fo,\n sar_device_intent,\n sar_c_m,\n sar_rdx_array,\n samp_lch,\n samp_wp,\n samp_wn,\n samp_fgn,\n samp_fg_inbuf_list,\n samp_fg_outbuf_list,\n samp_nduml,\n samp_ndumr,\n samp_nsep,\n samp_intent,\n num_bits,\n num_inv_bb,\n samp_use_laygo,\n sf_lch, sf_nw, sf_m_mirror, sf_m_bias, sf_m_off, sf_m_in, sf_m_bias_dum, sf_m_in_dum, sf_m_byp,\n sf_m_byp_bias, sf_intent, bias_current, use_sf,\n use_offset,\n num_slices,\n clk_lch,\n clk_pw,\n clk_nw,\n clk_m_dff,\n clk_m_inv1,\n clk_m_inv2,\n clk_m_tgate,\n clk_n_pd,\n clk_m_capsw,\n clk_unit_cell,\n clk_clock_pulse,\n clk_device_intent,\n clkcal_order,\n ret_lch,\n ret_pw,\n ret_nw,\n ret_m_ibuf,\n ret_m_obuf,\n ret_m_latch,\n ret_m_srbuf,\n ret_m_sr,\n ret_device_intent,\n rdac_lch, rdac_pw, rdac_nw, rdac_m, rdac_num_series, rdac_num_bits, rdac_num_dacs, rdac_device_intent,\n )\n\n term_list = [{\n ','.join(['ASCLKD%d<3:0>' % (i) for i in range(num_slices)]):\n ','.join(['Q_ASCLKD%d<3:0>' % (i) for i in range(num_slices)]),\n ','.join(['EXTSEL_CLK%d' % (i) for i in range(num_slices)]):\n ','.join(['Q_EXTSEL_CLK%d' % (i) for i in range(num_slices)]),\n ','.join(['ADCOUT_RET%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]):\n ','.join(['Q_ADCOUT_RET%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]),\n ','.join(['ADCO%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]):\n ','.join(['Q_ADCO%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]),\n ','.join(['CLKO%d' % (i) for i in range(num_slices)]):\n ','.join(['Q_CLKO%d' % (i) for i in range(num_slices)]),\n ','.join(['samp_body%d' % (i) for i in range(num_slices+2)]):\n ','.join(['Q_samp_body%d' % (i) for i in range(num_slices+2)]),\n ','.join(['bottom_body%d' % (i) for i in range(num_slices+2)]):\n ','.join(['Q_bottom_body%d' % (i) for i in range(num_slices+2)]),\n 'ADCO_CAL0<%d:0>'%(num_bits - 1): 'Q_ADCO_CAL0<%d:0>'%(num_bits-1),\n 'ADCO_CAL1<%d:0>'%(num_bits - 1): 'Q_ADCO_CAL1<%d:0>'%(num_bits - 1),\n ','.join(['CLKCAL%d<4:0>' % i for i in range(num_slices)]):\n ','.join(['Q_CLKCAL%d<4:0>' % i for i in range(num_slices)]),\n 'RDAC_SEL<%d:0>' % (rdac_num_dacs * rdac_num_bits - 1): 'Q_RDAC_SEL<%d:0>' % (\n rdac_num_dacs * rdac_num_bits - 1),\n }]\n name_list = (['ADCQ'])\n self.array_instance('ADCQ', name_list, term_list=term_list)\n self.instances['ADCQ'][0].design(\n sar_lch,\n sar_pw,\n sar_nw,\n sar_sa_m,\n sar_sa_m_d,\n sar_sa_m_rst,\n sar_sa_m_rst_d,\n sar_sa_m_rgnn,\n sar_sa_m_rgnp_d,\n sar_sa_m_buf,\n doubleSA,\n vref_sf_m_mirror, vref_sf_m_bias, vref_sf_m_off, vref_sf_m_in, vref_sf_m_bias_dum, vref_sf_m_in_dum,\n vref_sf_m_byp, vref_sf_m_byp_bias, vref_sf_bias_current, vref_sf,\n sar_drv_m_list, sar_ckgen_m, sar_ckgen_fo,\n sar_ckgen_ndelay,\n sar_ckgen_fast, sar_ckgen_fastest,\n sar_logic_m,\n sar_fsm_m,\n sar_ret_m,\n sar_ret_fo,\n sar_device_intent,\n sar_c_m,\n sar_rdx_array,\n samp_lch,\n samp_wp,\n samp_wn,\n samp_fgn,\n samp_fg_inbuf_list,\n samp_fg_outbuf_list,\n samp_nduml,\n samp_ndumr,\n samp_nsep,\n samp_intent,\n num_bits,\n num_inv_bb,\n samp_use_laygo,\n sf_lch, sf_nw, sf_m_mirror, sf_m_bias, sf_m_off, sf_m_in, sf_m_bias_dum, sf_m_in_dum, sf_m_byp,\n sf_m_byp_bias, sf_intent, bias_current, use_sf,\n use_offset,\n num_slices,\n clk_lch,\n clk_pw,\n clk_nw,\n clk_m_dff,\n clk_m_inv1,\n clk_m_inv2,\n clk_m_tgate,\n clk_n_pd,\n clk_m_capsw,\n clk_unit_cell,\n clk_clock_pulse,\n clk_device_intent,\n clkcal_order,\n ret_lch,\n ret_pw,\n ret_nw,\n ret_m_ibuf,\n ret_m_obuf,\n ret_m_latch,\n ret_m_srbuf,\n ret_m_sr,\n ret_device_intent,\n rdac_lch, rdac_pw, rdac_nw, rdac_m, rdac_num_series, rdac_num_bits, rdac_num_dacs, rdac_device_intent,\n )\n\n self.rename_pin('I_CLKCAL', ','.join(['I_CLKCAL%d<4:0>'%i for i in range(num_slices)]))\n self.rename_pin('I_OSP', ','.join(['I_OSP%d'%(i) for i in range(num_slices)]))\n self.rename_pin('I_OSM', ','.join(['I_OSM%d'%(i) for i in range(num_slices)]))\n self.rename_pin('I_ASCLKD<3:0>', ','.join(['I_ASCLKD%d<3:0>'%(i) for i in range(num_slices)]))\n self.rename_pin('I_EXTSEL_CLK', ','.join(['I_EXTSEL_CLK%d'%(i) for i in range(num_slices)]))\n self.rename_pin('I_ADCOUT_RET', ','.join(['I_ADCOUT_RET%d<%d:0>'%(i, num_bits-1) for i in range(num_slices)]))\n self.rename_pin('I_ADCO', ','.join(['I_ADCO%d<%d:0>'%(i, num_bits-1) for i in range(num_slices)]))\n self.rename_pin('I_CLKO', ','.join(['I_CLKO%d'%(i) for i in range(num_slices)]))\n self.rename_pin('I_ADCO_CAL0', 'I_ADCO_CAL0<%d:0>'%(num_bits-1))\n self.rename_pin('I_ADCO_CAL1', 'I_ADCO_CAL1<%d:0>'%(num_bits-1))\n self.rename_pin('I_samp_body', ','.join(['I_samp_body%d'%(i) for i in range(num_slices+2)]))\n self.rename_pin('I_bottom_body', ','.join(['I_bottom_body%d'%(i) for i in range(num_slices+2)]))\n self.rename_pin('I_RDAC_SEL', 'I_RDAC_SEL<%d:0>'%(rdac_num_dacs*rdac_num_bits-1))\n\n self.rename_pin('Q_CLKCAL', ','.join(['Q_CLKCAL%d<4:0>'%i for i in range(num_slices)]))\n self.rename_pin('Q_OSP', ','.join(['Q_OSP%d'%(i) for i in range(num_slices)]))\n self.rename_pin('Q_OSM', ','.join(['Q_OSM%d'%(i) for i in range(num_slices)]))\n self.rename_pin('Q_ASCLKD<3:0>', ','.join(['Q_ASCLKD%d<3:0>'%(i) for i in range(num_slices)]))\n self.rename_pin('Q_EXTSEL_CLK', ','.join(['Q_EXTSEL_CLK%d'%(i) for i in range(num_slices)]))\n self.rename_pin('Q_ADCOUT_RET', ','.join(['Q_ADCOUT_RET%d<%d:0>'%(i, num_bits-1) for i in range(num_slices)]))\n self.rename_pin('Q_ADCO', ','.join(['Q_ADCO%d<%d:0>'%(i, num_bits-1) for i in range(num_slices)]))\n self.rename_pin('Q_CLKO', ','.join(['Q_CLKO%d'%(i) for i in range(num_slices)]))\n self.rename_pin('Q_ADCO_CAL0', 'Q_ADCO_CAL0<%d:0>'%(num_bits-1))\n self.rename_pin('Q_ADCO_CAL1', 'Q_ADCO_CAL1<%d:0>'%(num_bits-1))\n self.rename_pin('Q_samp_body', ','.join(['Q_samp_body%d'%(i) for i in range(num_slices+2)]))\n self.rename_pin('Q_bottom_body', ','.join(['Q_bottom_body%d'%(i) for i in range(num_slices+2)]))\n self.rename_pin('Q_RDAC_SEL', 'Q_RDAC_SEL<%d:0>'%(rdac_num_dacs*rdac_num_bits-1))\n\n if use_offset == False:\n self.remove_pin(','.join(['I_OSP%d'%(i) for i in range(num_slices)]))\n self.remove_pin(','.join(['I_OSM%d'%(i) for i in range(num_slices)]))\n self.remove_pin(','.join(['Q_OSP%d'%(i) for i in range(num_slices)]))\n self.remove_pin(','.join(['Q_OSM%d'%(i) for i in range(num_slices)]))\n if num_inv_bb == 0:\n self.remove_pin(','.join(['I_bottom_body%d'%(i) for i in range(num_slices+2)]))\n self.remove_pin(','.join(['Q_bottom_body%d'%(i) for i in range(num_slices+2)]))\n if vref_sf == False:\n self.remove_pin('I_VREF_SF_bypass')\n self.remove_pin('Q_VREF_SF_bypass')\n if use_sf == False:\n self.remove_pin('I_SF_bypass')\n self.remove_pin('Q_SF_bypass')\n self.remove_pin(','.join(['I_samp_body%d'%(i) for i in range(num_slices+2)]))\n self.remove_pin(','.join(['Q_samp_body%d'%(i) for i in range(num_slices+2)]))", "title": "" }, { "docid": "8f95e4403039959dca1a2e9b58bfa864", "score": "0.4943666", "text": "def prepare_proc():\n global Plot2,Plot3\n curves = []\n if Plot2.ds is not None:\n curves = curves + map(lambda a:a.title,Plot2.ds)\n if Plot3.ds is not None:\n curves = curves + map(lambda a:a.title,Plot3.ds)\n first_ds.options = curves\n subbed_ds.options = curves\n mult_ds.options = curves", "title": "" }, { "docid": "7f91067fcd8c38ff1a5cea4aa334783e", "score": "0.49419048", "text": "def test_qat_update_shared_qspec(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(3, 3, 3)\n self.bn = torch.nn.BatchNorm2d(3)\n self.hardtanh = torch.nn.Hardtanh()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.hardtanh(x)\n return x\n m = M()\n example_inputs = (torch.randn(1, 3, 5, 5),)\n self._verify_symmetric_qnnpack_qat_numerics(\n M(), example_inputs, is_per_channel=False, verify_convert=True,\n )\n self._verify_symmetric_qnnpack_qat_numerics(\n M(), example_inputs, is_per_channel=True, verify_convert=True,\n )", "title": "" }, { "docid": "609e32f33d5f62ecb16b39af06d10ffd", "score": "0.4938077", "text": "def _init_internal_params(self):\n super(AfmCalculation, self)._init_internal_params()", "title": "" }, { "docid": "7bf744ad5c182fd1ad82dc68bcb79a92", "score": "0.49378535", "text": "def Correction(circuit,qubit0,qubit1):\n if qubit1 == 1:\n circuit.x(0) #open control\n \n circuit.cx(0,1)\n \n if qubit1 == 1:\n circuit.x(0) #open control\n \n if qubit0 == 1:\n circuit.x(0)\n circuit.x(1)\n \n \n return circuit", "title": "" } ]
a177402ca95837ce905adefe4e4b8d25
Load user in login manager after successful authentication
[ { "docid": "be28d3224ee95759744be9229953ab08", "score": "0.0", "text": "def load_user(id):\n return User.query.get(int(id))", "title": "" } ]
[ { "docid": "2e9c56a0d4e0780642877e9c4cf20843", "score": "0.7383222", "text": "def load_user():\n g.user = User.objects(email=session.get('logged_in')).first()", "title": "" }, { "docid": "9921d39e5804f4838eab20edbbce2cad", "score": "0.7299501", "text": "def authentication(app, user_model):\n login_manager.login_view = 'user.login'\n\n @login_manager.user_loader\n def load_user(uid):\n return user_model.query.get(uid)\n\n # @login_manager.token_loader\n def load_token(token):\n duration = app.config['REMEMBER_COOKIE_DURATION'].total_seconds()\n max = 999999999999\n serializer = URLSafeTimedSerializer(app.secret_key)\n\n data = serializer.loads(token, max_age=max)\n user_uid = data[0]\n\n return user_model.query.get(user_uid)", "title": "" }, { "docid": "705936db05148316c7e76365f23784bd", "score": "0.7239008", "text": "def authentication(app, user_model):\n login_manager.login_view = 'user.login'\n\n @login_manager.user_loader\n def load_user(uid):\n return user_model.query.get(uid)\n\n @login_manager.token_loader\n def load_token(token):\n duration = app.config['REMEMBER_COOKIE_DURATION'].total_seconds()\n serializer = URLSafeTimedSerializer(app.secret_key)\n\n data = serializer.loads(token, max_age=duration)\n user_uid = data[0]\n\n return user_model.query.get(user_uid)", "title": "" }, { "docid": "9cabbe8a75b820d558a765a8c5e3f68e", "score": "0.70479035", "text": "def load_logged_in_user():\n # import pdb; pdb.set_trace()\n # user_id = session.get('user_id')\n account_id = session.get('account_id')\n\n if account_id is None:\n g.user = None\n else:\n g.user = Account.query.get(account_id)", "title": "" }, { "docid": "dd4d5ee56102cb61910c8ac00893eced", "score": "0.67827636", "text": "def try_login(self, request, userdict):\n\n # try to log in the user\n (user, errordict) = self.sitecomp_usermanager().login_user(userdict)\n if (user != None):\n # ok it's a success, user was found and loaded.\n # tell the session about the user's identity (i.e. the client browser BECOMES this new user and is logged in immediately)\n request.set_user(user)\n # and now a message to show the user on the next page they load\n request.add_sessionmessage_simple(\"You have successfully logged in.\",'success')\n # return success or error\n return errordict", "title": "" }, { "docid": "9efaa0c2761deae0302b77c9b1cd642c", "score": "0.6777426", "text": "def load_logged_in_user():\n user_id = session.get(\"user_id\")\n\n if user_id is None:\n g.user = None\n else:\n g.user = (\n get_db().execute(\"SELECT * FROM user WHERE id = ?\", (user_id,)).fetchone()\n )", "title": "" }, { "docid": "c4b9b87d193e420ad6d4ee5c47786a87", "score": "0.6771475", "text": "def init_login_manager(db):\n login_manager = flask_login.LoginManager()\n principals = flask_principal.Principal()\n login_manager.anonymous_user = Anonymous\n\n @login_manager.unauthorized_handler\n def unauthorized():\n flask.abort(403)\n\n @login_manager.user_loader\n def load_user(user_id):\n return db.session.query(UserAccount).get(int(user_id))\n\n @principals.identity_loader\n def identity_loader():\n return flask_principal.AnonymousIdentity()\n\n return login_manager, principals", "title": "" }, { "docid": "e808da98ae19de3431e2ba51fc20a0b0", "score": "0.66220355", "text": "def authenticate(self):\n user = self.load_user()\n self.user_adapter.authenticate(user.login, user.password)\n logger.info('Successfully authenticated!')", "title": "" }, { "docid": "5da8be12c006a5f5d25df300d15169ca", "score": "0.65349", "text": "def login(self):\r\n self.verify_user_exists()\r\n if len(self.users) == 1:\r\n self.load_user(self.users[0])\r\n return\r\n\r\n print('~ Users: ', str(self.users)[1:-1])\r\n username = ''\r\n while username not in self.users:\r\n username = input('\\n~ Username: ')\r\n if username not in self.users:\r\n print('\\n~ Please enter a valid username')\r\n continue\r\n self.load_user(username)", "title": "" }, { "docid": "de6fa07d5567b0585ef23190555f6c3b", "score": "0.6505009", "text": "def user_access(self):\n self.verify_user(new_user)\n response = self.login_user(user_login)\n self.add_credentials(response.data['token'])", "title": "" }, { "docid": "68f439963d591894a678278034ee131e", "score": "0.64659286", "text": "def do_login(user):\n\n session[CURR_USER_KEY] = user.id", "title": "" }, { "docid": "68f439963d591894a678278034ee131e", "score": "0.64659286", "text": "def do_login(user):\n\n session[CURR_USER_KEY] = user.id", "title": "" }, { "docid": "1aeb3bba33e0eb5870e2056ba497391b", "score": "0.64284664", "text": "def user_loader_callback(identity):\n\n if not identity:\n return None\n\n return User.query.filter_by(id=identity).first()", "title": "" }, { "docid": "ec401362c3f27ec4974e24a9e4a1f04f", "score": "0.64179", "text": "def setup_user():\n if 'auth_user' in flask.session:\n user = dataBaser.User.query.get(flask.session['auth_user'])\n if user is None:\n # old bad cookie, no good\n del flask.session['auth_user']\n # save the user in `flask.g`, which is a set of globals for this request\n flask.g.user = user", "title": "" }, { "docid": "53c23a6302db35fb2ba9df0892a21a6c", "score": "0.64165235", "text": "def login(self, user_id):\n pass", "title": "" }, { "docid": "43ba4cf4d547be693a777a332ff95cf7", "score": "0.6394678", "text": "def getUserByLogin(self):", "title": "" }, { "docid": "220813ba256b52fe6b60e3067c576226", "score": "0.63622826", "text": "def at_pre_login(self):\n pass", "title": "" }, { "docid": "18d525be9fb2bd155cae3baaa6e28289", "score": "0.6353752", "text": "def session_login():\r\n response = user_obj.login_user()\r\n return response", "title": "" }, { "docid": "d9e62b157a9a4bb7aeeb475c0809205b", "score": "0.6322814", "text": "def authenticate(self, *args, **kwargs):\r\n return User.objects.get(username='root')", "title": "" }, { "docid": "1ee6e2b6b3c730dc7b57cbb7b2b5be5e", "score": "0.63151944", "text": "def _login(self):\n user_profile = self.query_user_profile()\n user_setting = self.query_user_setting()\n transfer_bid = self.get_transfer_bid()\n user_profile.update(user_setting)\n user_profile.update(transfer_bid)\n self._update_user_info(user_profile)", "title": "" }, { "docid": "2b8f8a438ea7d6eb14cdd69e51454efe", "score": "0.62990123", "text": "def get_user_to_login(**kwargs):\n\n try:\n user = UserModel._default_manager.get(**kwargs)\n return user\n except ObjectDoesNotExist:\n raise ObjectDoesNotExist", "title": "" }, { "docid": "9e4bc32284a141dd84a998cf61cb6ef2", "score": "0.6269496", "text": "def login_user(user):\n session[CURR_USER_KEY] = user.id", "title": "" }, { "docid": "3c6d425126347f5da3ed5a1b7bc6450a", "score": "0.6253112", "text": "def login_user(request, user):\n\tfrom django.contrib.auth import load_backend, login\n\timport settings\n\tif not hasattr(user, 'backend'):\n\t\tfor backend in settings.AUTHENTICATION_BACKENDS:\n\t\t\tif user == load_backend(backend).get_user(user.pk):\n\t\t\t\tuser.backend = backend\n\t\t\t\tbreak\n\tif hasattr(user, 'backend'):\n\t\treturn login(request, user)", "title": "" }, { "docid": "a3ae34843b2adf57aee6d6de4d32b7e2", "score": "0.6237531", "text": "def form_valid(self, form):\n user = form.save()\n login(self.request, user)\n return super().form_valid(form)", "title": "" }, { "docid": "2b973c9495b348c137b30cc415aef0a9", "score": "0.6185735", "text": "async def load_user(self, request: Request) -> Any:\n if USER_KEY not in request:\n session = self.load_from_request(request)\n if \"id\" not in session:\n return None\n\n user = self._user_loader(session[\"id\"])\n if isawaitable(user):\n user = await user\n request[USER_KEY] = user\n\n return request[USER_KEY]", "title": "" }, { "docid": "acad0c7f84c8fe6187311043997dbcb1", "score": "0.617693", "text": "def login():\n\tstore = get_default_store()\n\tuser = User.authenticate(store, request.form['email'],request.form['password'])\n\tif user:\n\t\tsession['user'] = user.id\n\t\treturn user.json()\n\tabort(403)", "title": "" }, { "docid": "7c07f9c050d8e8f2b18983c8d2be6945", "score": "0.6174583", "text": "def loginAsManager(self, user='root', pwd='secret'):\n self.browser.open('http://nohost/plone/')\n self.browser.getLink('Log in').click()\n self.browser.getControl('Login Name').value = user\n self.browser.getControl('Password').value = pwd\n self.browser.getControl('Log in').click()", "title": "" }, { "docid": "f173da31964a995aff940c35f09349f5", "score": "0.61741865", "text": "def login_user(service, user):\n session['logged_in'] = True\n session['user'] = user\n session['organization'] = dict(service.getRelatingOrganization(employeeEmail=user['email']))", "title": "" }, { "docid": "96f20e1682be57c86ac098a987bcc7de", "score": "0.61732084", "text": "async def initialize(self) -> None:\n self.user = await self._load_data()\n if not self.user:\n LOGGER.debug('Session initialized without a user')\n return\n\n self.authenticated = await self.user.authenticate()\n if not self.authenticated:\n self.user = None\n return\n\n if self.user.should_refresh:\n await self.user.refresh()\n await self.save()", "title": "" }, { "docid": "90ed83bdd0310f08a7f611bf359b0a23", "score": "0.6167959", "text": "def login_as(request, user):\n\tfrom django.contrib.auth import load_backend, login\n\tif not hasattr(user, 'backend'):\n\t\tfor backend in settings.AUTHENTICATION_BACKENDS:\n\t\t\tif user == load_backend(backend).get_user(user.pk):\n\t\t\t\tuser.backend = backend\n\t\t\t\tbreak\n\tif hasattr(user, 'backend'):\n\t\treturn login(request, user)", "title": "" }, { "docid": "2e5ec4e5faac158d60739b9d77ed16fe", "score": "0.6140863", "text": "def configure_extensions(app):\n\n login_manager.init_app(app)\n login_manager.login_view = '.login'\n login_manager.login_message = u'Ops! Você ainda está deslogado.'\n login_manager.login_message_category = 'info'\n\n from bolao.models import User\n\n @login_manager.user_loader\n def load_user(userid):\n return User.query.get(userid)", "title": "" }, { "docid": "aa9a264eba13989fde7cf7f779e6619f", "score": "0.6134488", "text": "def user_loader(user_id):\n return User.query.get(int(user_id))", "title": "" }, { "docid": "d71bcfa30d3f91143fac59072aad5dc3", "score": "0.61334944", "text": "def user_loader(user_id):\n return User.query.get(user_id)", "title": "" }, { "docid": "d71bcfa30d3f91143fac59072aad5dc3", "score": "0.61334944", "text": "def user_loader(user_id):\n return User.query.get(user_id)", "title": "" }, { "docid": "641963c6e8e88ab25948bea0384a7f0d", "score": "0.6106284", "text": "def secondarylogin(self, request, *args, **kwargs):\n\n # Get the parameters from the request\n username = request.data['username']\n password = request.data['password']\n remember = request.data.get('remember', False)\n\n # I don't imagine I need to pass the username and password for this to work. :D\n\n # Attempt authentication\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n # set the expiration to 0 if remember wasn't requested\n if not remember:\n request.session.set_expiry(0)\n return HttpResponse(self.serializer_class(user).data)\n else:\n return HttpResponse(status=status.HTTP_403_FORBIDDEN)\n else:\n return HttpResponse(status=status.HTTP_401_UNAUTHORIZED)", "title": "" }, { "docid": "16e9cdbc3e4b07b19fb2e546361e9edc", "score": "0.61047673", "text": "def get_login_data(self, user):\r\n token, created = Token.objects.get_or_create(user=user)\r\n result = self.get_token_data(user, token)\r\n return result", "title": "" }, { "docid": "8784c96b111d362dd6a07dd6f39a4a47", "score": "0.6094695", "text": "def configure_user(self, user):\r\n return user", "title": "" }, { "docid": "cd961fd63fcbc085c765d2c8a7061fd7", "score": "0.6094294", "text": "def login():", "title": "" }, { "docid": "cd961fd63fcbc085c765d2c8a7061fd7", "score": "0.6094294", "text": "def login():", "title": "" }, { "docid": "48f2dd0d9a3a7449efccb53defe1c770", "score": "0.60679466", "text": "def set_user(self):\r\n if not self.user:\r\n response = flickr.auth.oauth.checkToken(format='parsed-json')\r\n if response['stat'] == \"ok\":\r\n self.user = response['oauth']['user']", "title": "" }, { "docid": "ef04e9cdf71f20785f70820446776f11", "score": "0.60487455", "text": "def user_loader(username):\n user = User()\n user.id = username\n return user", "title": "" }, { "docid": "329d0aa653e64a03640e8b6dc5aeeedd", "score": "0.6043262", "text": "def load_current_user():\n\n cookie_name = flask.current_app.config['COOKIE_NAME']\n flask.g.user = None\n if cookie_name in request.cookies:\n cookie_value = utils.decode_value(cookie_name, request.cookies[cookie_name])\n try:\n flask.g.user = _build_session_user(**json.loads(cookie_value))\n except Exception as e:\n # Ignore invalid user cookie and overwrite on the next login.\n _log.error('Can not load the user session: %s\\ncookie_value: %s' % (str(e), cookie_value))", "title": "" }, { "docid": "41f1eff9f17107f3c942fe6b7151cd18", "score": "0.60412294", "text": "def post(self):\n return Auth.login_user()", "title": "" }, { "docid": "f1fca10a405a216ec30571c103f187f1", "score": "0.60168934", "text": "def setup_auth(app, user_lookup):\n #ToDo: This has to be made more general to use it with other \"modules\" of the api.\n\n @app.before_request\n def load_user():\n ctx = _request_ctx_stack.top\n ctx.user = ApiUser()\n user_id = session.get(\"login_user_id\")\n if user_id is not None:\n user = user_lookup(user_id)\n if user is not None and not user.is_banned():\n ctx.user = user\n\n #todo: better session handling (permanent session, expiring, bla)", "title": "" }, { "docid": "b04f4b261cef8104cf28d4390f14f80d", "score": "0.60137856", "text": "def load(self, user):\n\t\traise NotImplementedError('Subclass SessionStore and override load()')", "title": "" }, { "docid": "8d44a273479f5f4412cefaf8ee7bde8e", "score": "0.6007149", "text": "def login(app):\n with allure.step('Login as a user'):\n app.signin.enter_actor(\n CREDENTIALS['User_name'],\n CREDENTIALS['User_password'])\n # loger('Login', 'info', 'Login as User')", "title": "" }, { "docid": "48d87b1247a4ba21712deb46978ca2a0", "score": "0.5978155", "text": "def userinit():\n session.userid = auth.user\n session.continent = auth.user.continent\n session.country = auth.user.country\n session.subdivision = auth.user.subdivision\n session.level = auth.user.level\n return", "title": "" }, { "docid": "f58072c2bdb82aeea34d19a6053b4ef8", "score": "0.59691894", "text": "def load(self, req_handler):\n user_id = req_handler.get_secure_cookie(self.SESSION_KEY)\n if not user_id:\n return None\n author = self.author_thing.find_by_id(int(user_id))\n req_handler.current_user = author", "title": "" }, { "docid": "3dd3b49a13f471e289d5d43115ccad02", "score": "0.5957962", "text": "def load_user(self, username):\r\n with open(os.path.join(settings.USERS_DIR, username+'.yml'), 'r') as f:\r\n self.user = yaml.load(f)\r\n log.debug('Logged in as: '+self.user['user_api']['username'])", "title": "" }, { "docid": "634e1f49a59fbb89a31f929b204da2df", "score": "0.59478927", "text": "def test_login_existing_user_success(self, mock_validate_request):\n mock_validate_request.return_value = True\n self.client.post(\n \"/lti/lti_initializer/\",\n {\n \"lis_person_contact_email_primary\": \"[email protected]\",\n \"custom_canvas_course_id\": \"327\",\n \"context_title\": \"test title\"\n }\n )\n user = User.objects.get(username=\"user1\")\n self.assertTrue(user.is_authenticated)", "title": "" }, { "docid": "1e2b5517c56379db18076f5eba97e2cf", "score": "0.59470356", "text": "def load_user(user_id):\n db_session = db.get_session()\n return UserStore.get(db_session, user_id)", "title": "" }, { "docid": "4208740198c9f35c85a9cf6377a72985", "score": "0.59369904", "text": "def test_login(self):\n models.User('THE_USER', '[email protected]', 'default')\n session.commit()\n response = self.client.get('/en/')\n self.assert_('THE_USER' not in response.data)\n self.login('THE_USER', 'default')\n response = self.client.get('/en/')\n self.assert_('THE_USER' in response.data)", "title": "" }, { "docid": "382bf51bc51cdda17e8126052c0cdd96", "score": "0.5931378", "text": "def loginUser (self, un, pw):\n uid,h,spw = self.getSession (un, pw)\n flmo.req_privs (flmo.Handle (h), spw)\n u = flmwu.User (uid=uid)\n u.readTags ()\n return u", "title": "" }, { "docid": "a62cabfa3f4aeb807e53374c712a1974", "score": "0.5924504", "text": "def login_user(username):\n save_user_session(username)\n show_home_screen()", "title": "" }, { "docid": "ce986f838ce11d4fe339ae7a0e49b9b7", "score": "0.59210575", "text": "def get_user():\n pass", "title": "" }, { "docid": "e0b3c2a893818e24e5d423d107c15aed", "score": "0.59201837", "text": "def on_login(self, username):", "title": "" }, { "docid": "be5d05c7733a6973fa59186dc5cbf240", "score": "0.5917878", "text": "def user_loader(user_id):\n user_entry = User.getById(user_id)\n if user_entry is not None:\n user = User(user_entry[0], user_entry[1])\n return user\n else:\n return None", "title": "" }, { "docid": "3bd7a75746e9ad0ca72d3fedbc9b2cd3", "score": "0.59140617", "text": "def user_login():\n data = request.get_json(force=True)\n email = data['email']\n password = data['password']\n\n try:\n logged_in_user = User.get_by_login_credentials(email=email, password=password)\n except ValueError as e:\n return jsonify(success=False, error=str(e)), 400\n\n auth_token = logged_in_user.generate_auth_token()\n return jsonify(success=True, auth_token=auth_token)", "title": "" }, { "docid": "ed2aa0108eaf60a7602b423c3ead2e53", "score": "0.5911971", "text": "def test_force_login_with_backend_missing_get_user(self):\n self.client.force_login(self.u1)\n self.assertEqual(self.u1.backend, \"django.contrib.auth.backends.ModelBackend\")", "title": "" }, { "docid": "c60f29b09f647949cb48ff5f8c426dd0", "score": "0.5910997", "text": "def login_user():\n\tuser_data = request.get_json()\n\n\tif not user_data:\n\t\traise AuthException(\"Missing user data\")\n\n\tuser = User(user_data['username'], password=user_data['password'])\n\n\tif (user.role != Role.admin and\n\t\t\tuser.role != Role.user and\n\t\t\tuser.role != Role.guest):\n\t\traise AuthException(\"Unsupported role\")\n\n\tuser = auth.login_user(user)\n\n\tsession_id = auth.store_session(user)\n\n\treturn(json_util.dumps({\"session_id\" : session_id, \"user\" : user.to_dict()}))", "title": "" }, { "docid": "86af10186c500af05b92fc8dc36e5886", "score": "0.59012175", "text": "def _login(self, fsm):\n\n loginname = self.param(\"loginname\")\n passwd = self.param(\"password\")\n cr_id = self.param(\"cr_id\")\n cr_key = self.param(\"cr_key\")\n cr_auth = self.param(\"cr_auth\")\n\n if (loginname == None) or (loginname == \"\"):\n self._login_error()\n return\n if passwd == None:\n passwd = \"\"\n\n pwman = PasswordMan.PasswordMan(self.get_config(\"path_to_users_db\"), self.get_config(\"temp_dir\"))\n\n if cr_auth == \"on\":\n userId = pwman.get_userId_with_cr(loginname, passwd, cr_id)\n else:\n userId = pwman.get_userId(loginname, passwd)\n\n if userId < 0:\n self._login_error()\n #print \"login:\" + loginname + \" passwd:\" + passwd\n return\n else:\n self._login_succeed()", "title": "" }, { "docid": "591e8eecb14e644d9b86a17f4e9055a0", "score": "0.58985186", "text": "def user_required(handler):\n def check_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n self.redirect(self.uri_for('login'), abort=True)\n else:\n return handler(self, *args, **kwargs)\n\n return check_login", "title": "" }, { "docid": "6a109d217064268b545e572f3ae364c3", "score": "0.5898421", "text": "def authenticate_human_user(self, user_login, user_password, auth_token=None):\n if not user_login:\n raise ValueError(\"Please supply a username to authenticate.\")\n\n if not user_password:\n raise ValueError(\"Please supply a password for the user.\")\n\n # Override permissions on Config obj\n original_login = self.config.user_login\n original_password = self.config.user_password\n original_auth_token = self.config.auth_token\n\n self.config.user_login = user_login\n self.config.user_password = user_password\n self.config.auth_token = auth_token\n\n try:\n data = self.find_one(\"HumanUser\", [[\"sg_status_list\", \"is\", \"act\"],\n [\"login\", \"is\", user_login]],\n [\"id\", \"login\"], \"\", \"all\")\n # Set back to default - There finally and except cannot be used together in python2.4\n self.config.user_login = original_login\n self.config.user_password = original_password\n self.config.auth_token = original_auth_token\n return data\n except Fault:\n # Set back to default - There finally and except cannot be used together in python2.4\n self.config.user_login = original_login\n self.config.user_password = original_password\n self.config.auth_token = original_auth_token\n except Exception:\n # Set back to default - There finally and except cannot be used together in python2.4\n self.config.user_login = original_login\n self.config.user_password = original_password\n self.config.auth_token = original_auth_token\n raise", "title": "" }, { "docid": "39aa08ccde12c897798e24d5b38449cc", "score": "0.58976084", "text": "def before_request():\n g.user = None\n if 'openid' in session:\n g.user = User.query.filter_by(openid=session['openid']).first()", "title": "" }, { "docid": "7eb2d9eee9b8088e9cd6fb0c37721c9b", "score": "0.58855635", "text": "def before_request():\n\n # Check whether or not the server-side cookie (session created on log in) exists and is valid\n\n g.user = okta_client.get_user(oidc.user_getfield(\n \"sub\")) if oidc.user_loggedin else None", "title": "" }, { "docid": "367bbc31719e45440e4e78edbef8bf16", "score": "0.58822334", "text": "def as_user(self, user, restore='admin'):\n @contextlib.contextmanager\n def user_ctx():\n try:\n login_ok = False\n try:\n self.logout()\n except:\n pass\n if user:\n self.login(user)\n login_ok = True\n yield self\n finally:\n if login_ok:\n try:\n self.logout()\n except:\n pass\n if restore:\n self.login(restore)\n\n return user_ctx()", "title": "" }, { "docid": "527f904931dec21e988b097378928711", "score": "0.5875898", "text": "def set_login(self, user, passwd):\n self.session.auth = (user, passwd)\n\n return", "title": "" }, { "docid": "78df3fc52f2ea4e79def8a94cafd97ae", "score": "0.5870215", "text": "def login(self):\n r = self.get(self.home_page)\n self.user = self.extract_user_data(r.content)\n if self.user:\n self.is_logged_in = True\n else:\n time.sleep(2)\n login_page = self.home_page + 'login/?referrer=home_page'\n self.get(login_page)\n time.sleep(3)\n data = url_encode({\n 'source_url': '/login/?referrer=home_page',\n 'data': json.dumps({\n 'options': {'username_or_email': self.username_or_email,\n 'password': self.password},\n \"context\": {}\n }).replace(' ', '')\n })\n url = self.home_page + 'resource/UserSessionResource/create/'\n result = self.post(url=url, data=data, ajax=True).json()\n # By using get avoid KeyError if \"error\" key is not present\n error = result['resource_response'].get('error')\n if error is None:\n self.user = self.extract_user_data(self.get(self.home_page).content)\n self.is_logged_in = True\n else:\n raise PinterestLoginFailedException('[%s Login failed] %s' %\n (error['http_status'], error['message']))\n return self.is_logged_in", "title": "" }, { "docid": "1696860695ab7dd50478f09eea7ffdfe", "score": "0.5868915", "text": "def login_user(self, token):\n if \"Project Service\" in self.rmt._config.sections():\n host = self.rmt._config.get(\"Project Service\", \"host\")\n protocol = self.rmt._config.get(\"Project Service\", \"protocol\")\n else:\n host = self.rmt._config.get(\"Default\", \"host\")\n protocol = self.rmt._config.get(\"Default\", \"protocol\")\n\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Authorization': 'Bearer ' + token\n }\n\n # Hit one of the API endpoints to effectively, login.\n url = protocol + '://' + host + '/' + API_VER + '/collection/'\n session = Session()\n request = Request('GET', url, headers=headers)\n prep = session.prepare_request(request)\n response = session.send(prep, verify=False)", "title": "" }, { "docid": "d7181bb6645fd73e350500470a67316b", "score": "0.5867491", "text": "def login(self):\n return True", "title": "" }, { "docid": "382087a9b4c7721eecb4cedcfedc8731", "score": "0.5863386", "text": "def on_start(self):\n self.login()", "title": "" }, { "docid": "705800e8263d711048aa9e0817b41871", "score": "0.58533853", "text": "def _auth(self, uid=None):\n if uid is not None:\n self.uid = uid\n else:\n self.uid = self.load_user().uid\n\n self.user_adapter.uid = self.uid", "title": "" }, { "docid": "ea70d08b398a187d1a0254622a1eafe9", "score": "0.5852568", "text": "def login_user():\n print('Login')\n auth = request.authorization\n # print(request.authorization)\n if not auth or not auth.username or not auth.password:\n return make_response('could not verify', 401, {'WWW.Authentication': 'Basic realm: \"login required\"'})\n # print(auth.username)\n session = Session()\n repository = UserRepository(session)\n users_db = repository.get_all()\n user = next((s for s in users_db if s.email == auth.username), None)\n if user is not None and check_password_hash(user.password, auth.password):\n token = jwt.encode(\n {'public_id': user.id, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60)},\n app.config['SECRET_KEY'])\n return jsonify({'token': token, 'user': user.to_json()})\n return make_response('could not verify', 401, {'WWW.Authentication': 'Basic realm: \"login required\"'})", "title": "" }, { "docid": "d085635d0fd13170bffa09bbfe631f95", "score": "0.5851649", "text": "def test_user_in_login(self):\n response = self.client.post('/accounts/login/', self.usercreate, follow=True)\n self.assertTrue(response.context['user'].is_active)", "title": "" }, { "docid": "80b36adac91b91a782b532e7a7b0d70e", "score": "0.5849562", "text": "def before_request():\n g.user = None\n if 'user_id' in session:\n g.user = User.query.get(session['user_id'])", "title": "" }, { "docid": "e2a037bfc1b2853634ea8cc975eb8d94", "score": "0.584342", "text": "def login():\n\tglobal logged_in\n\tusername = input(\"Username: \")\n\tpassword = getpass.getpass(\"Password: \")\n\n\tuser = session.query(User).filter(User.username == username, User.password == password).first()\n\t# if user == None:\n\t# \tprint(\"User is not registered, please register.\")\n\n\tlogged_in = user\n\n\treturn user", "title": "" }, { "docid": "7024cd8d28b0b6bb8f06dbc6cd2b1d79", "score": "0.58419806", "text": "def load_user(user_id):\n return User.get_by_id(int(user_id))", "title": "" }, { "docid": "7024cd8d28b0b6bb8f06dbc6cd2b1d79", "score": "0.58419806", "text": "def load_user(user_id):\n return User.get_by_id(int(user_id))", "title": "" }, { "docid": "7024cd8d28b0b6bb8f06dbc6cd2b1d79", "score": "0.58419806", "text": "def load_user(user_id):\n return User.get_by_id(int(user_id))", "title": "" }, { "docid": "1af727859f07fea83e4c3f102ad99a6f", "score": "0.58376396", "text": "def user_loader(user_id):\n return Member.query.get(user_id)", "title": "" }, { "docid": "1265127fea8b69168664e6f6b4731f35", "score": "0.58294654", "text": "def load_user(user_id):\n user = session.query(User).filter_by(id=user_id).first()\n if not user:\n flash('invalid username or password')\n abort(400)\n return user", "title": "" }, { "docid": "07926a0481cdbee46f7260bf1b066d5a", "score": "0.582542", "text": "def handle_login():\n data = get_request_data()\n email = str(data['email']).lower()\n password = str(data['password'])\n user = User.query.filter_by(_email=email).first()\n\n if not user:\n raise BadRequest('UserNotFound', 'user not found')\n\n if not user.check_password(password):\n raise BadRequest('WrongPassword', 'incorrect password')\n\n login_user(user, remember=False)\n\n return successful_response()", "title": "" }, { "docid": "a69e9a408574ed7345653f03655cb837", "score": "0.5823755", "text": "def _handle_successful_authentication_and_login(user, request):\n if LoginFailures.is_feature_enabled():\n LoginFailures.clear_lockout_counter(user)\n\n _track_user_login(user, request)\n\n try:\n django_login(request, user)\n request.session.set_expiry(604800 * 4)\n log.debug(\"Setting user session expiry to 4 weeks\")\n\n # Announce user's login\n SESSION_LOGIN_COMPLETED.send_event(\n user=UserData(\n pii=UserPersonalData(\n username=user.username,\n email=user.email,\n name=user.profile.name,\n ),\n id=user.id,\n is_active=user.is_active,\n ),\n )\n except Exception as exc:\n AUDIT_LOG.critical(\"Login failed - Could not create session. Is memcached running?\")\n log.critical(\"Login failed - Could not create session. Is memcached running?\")\n log.exception(exc)\n raise", "title": "" }, { "docid": "a9471cb7330959a88fe293958689fb73", "score": "0.58202374", "text": "def login(self, user):\n self.set_secure_cookie('user_id', str(user.key.id()))", "title": "" }, { "docid": "f18e083930f73bfeb7bba8fd3ad15e3f", "score": "0.58194053", "text": "def test_load_user(self):\n\n load_user = services.load_user\n\n user = makeOne()\n user_id = user.id\n self.assertEqual(user, load_user(unicode(user_id)))\n\n # A non existing user should return None.\n self.assertEqual(None, load_user(unicode(50)))", "title": "" }, { "docid": "ba41f070295a78085fc6a2f330a0d236", "score": "0.5814357", "text": "def user_required(handler):\n\n def check_login(self, *args, **kwargs):\n # Make sure there is a provider and credentials stored in session\n provider = session.get('provider')\n credentials = session.get('credentials')\n if not credentials or not provider:\n return redirect(\"/login\")\n\n # check if there is a user\n if not self.user:\n return redirect(\"/login\")\n\n return handler(self, *args, **kwargs)\n\n return check_login", "title": "" }, { "docid": "273888bdf1fc6e49b4bad20149b1e991", "score": "0.5813547", "text": "async def _load_data(self) -> typing.Optional['user.User']:\n LOGGER.debug('Loading session %s', self.id)\n result = await self._redis.get(self._redis_key)\n if not result:\n LOGGER.info('Session %r not found', self.id)\n return\n data = json.loads(result.decode('utf-8'))\n self.last_save = data['last_save']\n self.start = data['start']\n if not data.get('user'):\n return\n\n password = data['user'].pop('password', None)\n if password is not None:\n password = self._application.decrypt_value(password)\n\n user_obj = user.User(self._handler.application, password=password)\n for key, value in data['user'].items():\n setattr(user_obj, key, value)\n return user_obj", "title": "" }, { "docid": "653b2df4c9c85634b89f14a02eca56ac", "score": "0.58096874", "text": "def on_login(self, request, user):\n # logger.info(\"20130923 on_login(%s)\" % user)\n\n request.user = user\n\n user_language = user.language or settings.SITE.get_default_language()\n\n if request.method == 'GET':\n rqdata = request.GET\n elif request.method in ('PUT', 'DELETE'):\n # raw_post_data before Django 1.4\n rqdata = http.QueryDict(request.body)\n elif request.method == 'POST':\n rqdata = request.POST\n else:\n # e.g. OPTIONS, HEAD\n if len(settings.SITE.languages) > 1:\n translation.activate(user_language)\n request.LANGUAGE_CODE = translation.get_language()\n #~ logger.info(\"20121205 on_login %r\",translation.get_language())\n request.requesting_panel = None\n request.subst_user = None\n return\n # ~ else: # DELETE\n #~ request.subst_user = None\n #~ request.requesting_panel = None\n #~ return\n\n if len(settings.SITE.languages) > 1:\n\n user_language = rqdata.get(\n constants.URL_PARAM_USER_LANGUAGE, user_language)\n translation.activate(user_language)\n request.LANGUAGE_CODE = translation.get_language()\n\n su = rqdata.get(constants.URL_PARAM_SUBST_USER, None)\n if su is not None:\n if su:\n try:\n su = settings.SITE.user_model.objects.get(id=int(su))\n #~ logger.info(\"20120714 su is %s\",su.username)\n except settings.SITE.user_model.DoesNotExist:\n su = None\n else:\n su = None # e.g. when it was an empty string \"su=\"\n request.subst_user = su\n request.requesting_panel = rqdata.get(\n constants.URL_PARAM_REQUESTING_PANEL, None)\n #~ logger.info(\"20121228 subst_user is %r\",request.subst_user)\n #~ if request.subst_user is not None and not isinstance(request.subst_user,settings.SITE.user_model):\n #~ raise Exception(\"20121228\")", "title": "" }, { "docid": "de6adbd2448ba725f2c9813d91a463b0", "score": "0.5800675", "text": "def process_login():\n\n # Get user's login credentials from login form\n email = request.form.get('email')\n password = request.form.get('password')\n \n # Look up if user exists in system\n user = customers.get_by_email(email)\n if user != None:\n\n # Verify if user provided correct password\n if user.is_correct_password(password):\n flash('Successfully logged in.')\n session['email'] = email\n return redirect('/melons')\n\n # Handle incorrect password\n flash('Incorrect username and/or password.')\n return redirect('/login')\n \n # Handle users not in system\n flash('Username does not exist.')\n return redirect('/login')", "title": "" }, { "docid": "62560ab3365be4509e55da6f6dadf01e", "score": "0.57992184", "text": "def user_login():\n global curr_user, user_database\n curr_user = \"\"\n username_input = input(\"\\nenter your username: \")\n password_input = getpass(prompt=\"enter your password: \", stream = None)\n if(not user_database.user_in_DB(username_input)): # checking if the user in the databse\n user_database.register_user(username_input, password_input)\n curr_user = User(username_input, password_input, user_database.getID(username_input)) # creating a User object with its credentials\n return user_database.confirm_User(username_input, password_input)", "title": "" }, { "docid": "625b5d89730d3e2d2f949a7cb5daa9ef", "score": "0.57826453", "text": "def lfs_login(request, user):\n if user is None:\n user = request.user\n # TODO: It would be nice to support different login methods, like signed cookies.\n user.last_login = datetime.datetime.now()\n user.save()\n\n if SESSION_KEY in request.session:\n if request.session[SESSION_KEY] != user.id:\n # To avoid reusing another user's session, create a new, empty\n # session if the existing session corresponds to a different\n # authenticated user.\n request.session.flush()\n else:\n pass\n # request.session.cycle_key()\n request.session[SESSION_KEY] = user.id\n request.session[BACKEND_SESSION_KEY] = user.backend\n if hasattr(request, 'user'):\n request.user = user\n\n ### LFS stuff\n cart_utils.update_cart_after_login(request)\n customer_utils.update_customer_after_login(request)", "title": "" }, { "docid": "638800daf502303db6da0dee63ed5da4", "score": "0.57755417", "text": "def get_user_from_login(login):\n try:\n # Make sure the user exist\n if not FacadeUser.is_exist_by_login(login):\n return None\n return FacadeUser.get_by_login(login)\n except Exception as e:\n raise ServiceException('USER_DATABASE_QUERY_FAIL', \"Unable to load the user information.\", str(e))", "title": "" }, { "docid": "446280c20ac78904baac3adfc0a92ec4", "score": "0.576996", "text": "def inject_user():\n if session.get('logged_in', None):\n return dict(user=User.objects.get(email=session['logged_in']))\n else:\n return dict(user=None)", "title": "" }, { "docid": "24f5887be17070a895a63a0c47d1433e", "score": "0.57607996", "text": "def test_authenticated_user_can_load_profile(self):\n create_user('ron', 'Ron')\n self.client.login(username='ron', password='123123')\n response = self.client.get(reverse('user_api'))\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.content)\n self.assertEqual(json.loads(response.content),\n {'first_name': 'Ron',\n 'last_name': 'Bobski'})", "title": "" }, { "docid": "bc7471d9cc707c17c44d1c3887f8a57e", "score": "0.57559454", "text": "def login_admin(cls):\n utils.check_or_try_logout_user(cls) # in case user changed during another test\n cls.headers, cls.cookies = utils.check_or_try_login_user(cls, username=cls.usr, password=cls.pwd)\n assert cls.headers and cls.cookies, cls.require # nosec\n cls.headers.update(cls.json_headers)", "title": "" }, { "docid": "77336a0827d71f23aebefc54afd8e153", "score": "0.5749649", "text": "def auto_login(self, user):\n self.set_login_field_text(user.login)\n self.set_password_field_text(user.password)\n return self.click_submit_button()", "title": "" }, { "docid": "cf1636cc0dd2a81fccf05c7079fff940", "score": "0.574934", "text": "def load_user_from_token(token):\n #The Token was encrypted using itsdangerous.URLSafeTimedSerializer which\n #allows us to have a max_age on the token itself. When the cookie is stored\n #on the users computer it also has a expiry date, but could be changed by\n #the user, so this feature allows us to enforce the expiry date of the token\n #server side and not rely on the users cookie to expire.\n max_age = app.config[\"REMEMBER_COOKIE_DURATION\"].total_seconds()\n\n try:\n #Decrypt the Security Token, data = [username, hashpass]\n data = login_serializer.loads(token, max_age=max_age)\n user_data = user_service_proxy.get_user(\n user_id=data[0],\n auth_token_username=data[0]\n )\n except:\n return None\n if user_data:\n return AuthUser(user_data)\n else:\n return None", "title": "" }, { "docid": "d1dcfe372233520c693ab38e9c100a93", "score": "0.5746156", "text": "def load_user(id):\n if id in USERS:\n return User(id)\n return None", "title": "" }, { "docid": "b5c3e10e3a51752b38073e4b8d29902f", "score": "0.5744994", "text": "def get_user(self):\n return TsUser.objects.get(user_name=self.validated_data[\"user_name\"])", "title": "" }, { "docid": "283405a0ab88725ac25ffac789008782", "score": "0.5741192", "text": "def test_user_authentication(self):\n response = self.client.get(self.home_url)\n user = response.context.get('user')\n self.assertTrue(user.is_authenticated)", "title": "" }, { "docid": "ba7427846f6be12bda38d7fb7757142c", "score": "0.573319", "text": "def authenticate(self, request):\n session = request.session\n if not session:\n return None\n user_id = session.get('auth_user_id', None)\n user = OSFUser.load(user_id)\n if not user:\n return None\n if waffle.switch_is_active(features.ENFORCE_CSRF):\n self.enforce_csrf(request)\n # CSRF passed with authenticated user\n check_user(user)\n return user, None", "title": "" } ]
f262780237a318b08ee6533c0c2b718c
The initial requested URI cannot be modified during the user interactive authentication session.
[ { "docid": "89a8b1d7679d46e64d3283cc34e9200c", "score": "0.5436516", "text": "def test_cannot_change_uri(self) -> None:\n # Create a second login.\n self.login(\"test\", self.user_pass, \"dev2\")\n\n # Attempt to delete the first device.\n # Returns a 401 as per the spec\n channel = self.delete_device(\n self.user_tok, self.device_id, HTTPStatus.UNAUTHORIZED\n )\n\n # Grab the session\n session = channel.json_body[\"session\"]\n # Ensure that flows are what is expected.\n self.assertIn({\"stages\": [\"m.login.password\"]}, channel.json_body[\"flows\"])\n\n # Make another request providing the UI auth flow, but try to delete the\n # second device. This results in an error.\n #\n # This makes use of the fact that the device ID is embedded into the URL.\n self.delete_device(\n self.user_tok,\n \"dev2\",\n HTTPStatus.FORBIDDEN,\n {\n \"auth\": {\n \"type\": \"m.login.password\",\n \"identifier\": {\"type\": \"m.id.user\", \"user\": self.user},\n \"password\": self.user_pass,\n \"session\": session,\n },\n },\n )", "title": "" } ]
[ { "docid": "e27c96184d3d5eb28e76d38f0383954d", "score": "0.5817669", "text": "def test_authenticated_uri(self):\n request = self.factory.get(self.uri)\n force_authenticate(request, self.user)\n response = self.view(request)\n response.render()\n self.assertEqual(response.status_code, 200,\n 'Expected Response Code 200, received {0} instead.'\n .format(response.status_code))", "title": "" }, { "docid": "acf78a1c561910dc77d533a3c271b786", "score": "0.5647407", "text": "def test_not_authenticated_uri(self):\n request = self.factory.get(self.uri)\n response = self.view(request)\n response.render()\n self.assertEqual(response.status_code, 401,\n 'Expected Response Code 401, received {0} instead.'\n .format(response.status_code))", "title": "" }, { "docid": "0a788b2a511d9cb1ab78c878e9a0823b", "score": "0.55987644", "text": "def request_unauthorised(self):\n pass", "title": "" }, { "docid": "f7701270a58a66f693254399ae08a150", "score": "0.556975", "text": "def test_login_redirect(self):\n edit_userRes = self.client.get(url_for('edit_user'))\n endpoint = urlparse(edit_userRes.location).path\n\n assert endpoint == url_for('login')", "title": "" }, { "docid": "b80a8b303f5c6d741036e519601e98ce", "score": "0.5523225", "text": "def unauthorized():\n return redirect(url_for('core.login'))", "title": "" }, { "docid": "75ddbba20b1b055ddd137ac3d4c9fbe1", "score": "0.5462326", "text": "def test_login_redirect(self):\n edit_res = self.client.get(url_for('edit_activity'))\n endpoint = urlparse(edit_res.location).path\n\n assert endpoint == url_for('login')", "title": "" }, { "docid": "d7e275a216e7a5ac746cbce41ac03ce4", "score": "0.5461187", "text": "def open_default_url(self):\n self.se.open(\"/\")", "title": "" }, { "docid": "9a1ab4de2efd2fb0d72aeaa2ad3aca54", "score": "0.5383744", "text": "def missing_auth(handler: ZoeRequestHandler):\n handler.redirect(handler.get_argument('next', u'/login'))", "title": "" }, { "docid": "73cc25ac282b73de60c7df728b7af11f", "score": "0.5370964", "text": "def default_to_private():\n if current_user.is_authenticated():\n return\n \n if request.is_xhr:\n return json_error('Please Login to access {0}'.format(request.path))\n \n if (\n request.endpoint and\n not request.path.startswith('/static/') and\n not request.path == 'favicon.ico' and\n not getattr(app.view_functions[request.endpoint], 'is_public', False)\n ):\n flash('Please Login before visiting {0}'.format(request.path), 'info')\n return redirect(url_for('login', next=request.path))", "title": "" }, { "docid": "94289ca5ded7dfbf69feb050372b1f50", "score": "0.53354377", "text": "def test_no_current_user_redirect(self):\n\n rv = self.app.get('/users/home')\n assert rv.status_code == 302", "title": "" }, { "docid": "798e7bd14573f9f91d8ba76422cdb685", "score": "0.5318694", "text": "def _uriChangedSlot(self, newUri):\r\n \r\n newUri = unicode(newUri)\r\n configuration = self._preferences.getConnection(newUri)\r\n if not configuration is None:\r\n self.presetUsername(configuration.username)\r\n self.presetPassword(configuration.password)", "title": "" }, { "docid": "83a3507347db27ec8b722a1d8bf4da2a", "score": "0.530178", "text": "def test_view_url_accesible_by_name(self):\n self.client.login(username=\"john.doe\", password=\"XVAP11!0$\")\n response = self.client.get(\n reverse(\"menu-update\", kwargs={\"uuid\": self.menu.pk})\n )\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "d27bafbcc31ffdbd98de1e9a415374d7", "score": "0.5265992", "text": "def test_anonymous(self):\n response_for_anonymous = self.client.get(self.url)\n self.assertRedirects(\n response_for_anonymous,\n reverse('login') + '?next=%s' % self.url\n )", "title": "" }, { "docid": "9e0aa921650c72b3723ba95925a52f1b", "score": "0.52501434", "text": "def _is_correct_lti_request(self):\n lti_endpoint = self.server.config.get('lti_endpoint', self.DEFAULT_LTI_ENDPOINT)\n return lti_endpoint in self.path", "title": "" }, { "docid": "e8bf98137f99185efaa5fc9a779c3ddf", "score": "0.5241938", "text": "def _add_login(self, uri):\n uri = \"users;use_login=1/{0}\".format(uri)\n\n return uri", "title": "" }, { "docid": "a637112790f571159ce98b126af7c9b5", "score": "0.52381897", "text": "def before_app_request():\n if current_user.is_authenticated:\n current_user.ping()\n if not current_user.confirmed and request.blueprint != 'auth' and request.endpoint != 'static':\n return redirect(url_for('auth.unconfirmed'))", "title": "" }, { "docid": "f7df01ba0b3d70413e66bec90416c159", "score": "0.5211868", "text": "def test_editorial_un_authenticated(self): \n self.client.force_authenticate(user=None)\n response = self.client.get('/editorials/')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "title": "" }, { "docid": "f3c976acf601053b7a8d02708780c7ed", "score": "0.5211297", "text": "def perform_authentication(self, request):\n return None", "title": "" }, { "docid": "a017c69e85a36b53c0da0bfec98c8d7d", "score": "0.51997983", "text": "def test_view_url_accesible_by_name(self):\n self.client.login(username=\"john.doe\", password=\"XVAP11!0$\")\n response = self.client.get(reverse(\"menu-list\"))\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "05e71135aa0ab4147d74a165b746099f", "score": "0.51926464", "text": "def test_views_unauthenticated(self, client):\n assert client.get(url_for(\"index\")).status_code == 200\n assert client.get(url_for(\"resources\")).status_code == 200\n assert client.get(url_for(\"ask\")).status_code == 302\n assert client.get(url_for(\"logout\")).status_code == 302\n assert client.get(url_for(\"authorize\")).status_code == 404", "title": "" }, { "docid": "9765a9f3a54e5f7272432c732ddad3fa", "score": "0.5187907", "text": "def test_login_handler_request_is_made(self):\n self.assertURIused(self.LOGIN_HANDLER_URI)", "title": "" }, { "docid": "bc2ba92a73eca72aacdcf3a655ae2596", "score": "0.518555", "text": "def on_authenticated(self):\n pass", "title": "" }, { "docid": "e5bc20885d569086d3a2d682275675e9", "score": "0.51671493", "text": "def test_view_url_accesible_by_name(self):\n self.client.login(username=\"john.doe\", password=\"XVAP11!0$\")\n response = self.client.get(reverse(\"menu-add\"))\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "995c50c473558143e5391fcdf94086ab", "score": "0.5156552", "text": "def old_url_login_view(request):\n if request.method == \"GET\":\n return redirect('login', permanent=True)", "title": "" }, { "docid": "71dd3c1494dbdba41b91a30580a5acef", "score": "0.51539516", "text": "def process_request(self, request):\n if not request.user.is_authenticated:\n return\n if not can_disguise(request):\n return\n request.original_user = get_original_user(request)", "title": "" }, { "docid": "19bfad21f88e5c49761494ca72d069f3", "score": "0.5151358", "text": "def test_options_anonymous(self):\n client = Client()\n response = client.get(reverse('planet:options'))\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(\n response,\n '/accounts/login/?next=%s' % (reverse('planet:options'))\n )", "title": "" }, { "docid": "ae1afd523721b8a28eb692aed910d92c", "score": "0.51329106", "text": "def before_request():\r\n if \"db\" not in session or global_var.contact_book is None:\r\n if request.endpoint not in [\"init.db_select\", \"db_select\"]:\r\n return redirect(url_for(\"init.db_select\"))\r\n return None\r\n if (\"user\" not in session or session[\"user\"] is None) and request.endpoint not in [\r\n \"login.login\",\r\n \"login.register\",\r\n ]:\r\n return redirect(url_for(\"login.login\"))\r\n return None", "title": "" }, { "docid": "c45ec9989af854b0f46a6902183b468d", "score": "0.51160914", "text": "def _IsInteractiveRequest(self):\n return self.request.method in ['GET', 'HEAD']", "title": "" }, { "docid": "076d097d001431b600cac132ebeb9242", "score": "0.51104724", "text": "def test_read_o_auth_window_uri(self):\n pass", "title": "" }, { "docid": "6fd79463f5945dae3d0b0e876b325c81", "score": "0.5106464", "text": "def test_unauthenticated(self):\n \n from weblayer import RequestHandler\n \n class A(RequestHandler):\n def get(self):\n return '%s %s' % (\n self.auth.is_authenticated,\n self.auth.current_user\n )\n \n \n \n mapping = [(r'/', A)]\n \n app = self.make_app(mapping)\n res = app.get('/')\n \n self.assertTrue(res.body == 'False None')", "title": "" }, { "docid": "0d403ff24e6072deba961a0bf8dc828b", "score": "0.5090401", "text": "def homepage():\n return redirect(url_for('auth.login'))", "title": "" }, { "docid": "307a2711c2d1ce0a1476568a73630d86", "score": "0.50897837", "text": "def redirect_null_user_home():\n return redirect('/')", "title": "" }, { "docid": "d5671ee2b54a1c3fd1a61117e553f778", "score": "0.5089234", "text": "def test_view_url_accesible_by_name(self):\n self.client.login(username=\"john.doe\", password=\"XVAP11!0$\")\n response = self.client.get(reverse(\"meal-add\"))\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "9ae03d19920f8bc6de0d6bdfb854e6fe", "score": "0.50790393", "text": "def http_auth(self):\n return None", "title": "" }, { "docid": "7292373cec7cd5714d721e2a8c8ef723", "score": "0.5078344", "text": "def test_view_url_accesible_by_name(self):\n self.client.login(username=\"john.doe\", password=\"XVAP11!0$\")\n response = self.client.get(reverse(\"meal-list\"))\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "51f7df7b41f91ceea9123c94a8c3ff6f", "score": "0.5070941", "text": "def test_view_url_accesible_by_name(self):\n self.client.login(username=\"john.doe\", password=\"XVAP11!0$\")\n response = self.client.get(reverse(\"meal-update\", kwargs={\"pk\": self.meal.pk}))\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "405927981c882a42ba7687bbd9d9d1cd", "score": "0.506296", "text": "def test_unauthorized_access(self):\n self.client = Client()\n response = self.client.get(reverse(\"login_redirect\"))\n self.assertRedirects(response, reverse(\"login\") + \"?next=\" + reverse(\"login_redirect\"))", "title": "" }, { "docid": "326000b77d187e097d9a5af9105eb22a", "score": "0.50607765", "text": "def _handle_initial_call(self, request):\n\n log.debug(\"Instagram handling get-request: identified as initial\")\n\n self.fetch_next_uri(request)\n\n # lookup if user is already connected to instagram\n try:\n InstagramAccount.objects.get(user=request.user)\n return self.success(request)\n except InstagramAccount.DoesNotExist:\n pass\n\n # create verify token and save it to the session\n verify_token = hmac_new(Config.get(\"CLIENT_SECRET\").encode(\"utf-8\"),\n msg=request.user.email.encode(\"utf-8\"),\n digestmod=sha1).hexdigest()\n request.session[SESSKEY_OAUTH_VERIFY_TOKEN] = verify_token\n\n redirect_uri = self.build_redirect_uri(request, verify_token)\n\n instagram_oauth_uri = ''.join((Config.get(\"API_OAUTH_BASE_URI\"),\n \"/authorize/?client_id=\",\n Config.get(\"CLIENT_ID\"),\n \"&redirect_uri=\",\n redirect_uri,\n \"&response_type=code\"))\n\n return redirect(instagram_oauth_uri)", "title": "" }, { "docid": "9af537e63955e8bcd82fae442194ee6d", "score": "0.5056657", "text": "async def auth_UNKNOWN(self):\n await self.push(501, \"5.5.4 Syntax: AUTH mechanism\")", "title": "" }, { "docid": "9e6a34facfb55daaf22bd617b2aa3e59", "score": "0.5053442", "text": "def not_authed():\n print SPACER\n print \"Not Authenticated!\\n Please authenticate using option #1 (use local credentials file) or use option #2 (enter credentials manually)\"\n clear_screen()", "title": "" }, { "docid": "e12431b75808223aa5e8acaf8a3a0686", "score": "0.50502247", "text": "def test_login_link_present_for_anonymous_user(self):\n page = self.app.get('/')\n self.assertIn(_('login'), page)\n self.assertIn(reverse('doppler_auth_login'), page)", "title": "" }, { "docid": "ffd6a869c6b323579bb6b88209ffb364", "score": "0.50340617", "text": "def is_refreshable_url(self, request):\n return (\n request.method == 'GET' and\n is_authenticated(request.user) and\n request.path not in self.exempt_urls\n )", "title": "" }, { "docid": "442e9ff84517371839822f77d41d2a57", "score": "0.5032053", "text": "def main(request):\n\n if request.user.is_authenticated:\n return redirect(\"question\")\n else:\n return redirect(\"login\")", "title": "" }, { "docid": "2356cdd26c70273945e4919a7eac1b27", "score": "0.5018828", "text": "def test_user_not_authorized(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_302_FOUND)\n self.assertRedirects(response, reverse('home'))", "title": "" }, { "docid": "7dcae1ebe6dbd2000da98ee1c5a6a64e", "score": "0.50168514", "text": "def test_login_page_already_authenticated(self):\n response = self.app.get('/login', follow_redirects=True)\n # The user is redirected elsewhere\n assert 'Log In - dogpound' not in response.data", "title": "" }, { "docid": "b66945486b802a1f41f49dac1c6885f5", "score": "0.5016547", "text": "def test_view_url_accesible_by_name(self):\n self.client.login(username=\"john.doe\", password=\"XVAP11!0$\")\n response = self.client.get(\n reverse(\"menu-detail\", kwargs={\"uuid\": self.menu.pk})\n )\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "a0cb2ac0087ddb9ec85ed36ce0286362", "score": "0.50053287", "text": "def check_allowed_uri(self, uri):\n\n if not uri.startswith(self.base_uri):\n raise rpki.exceptions.ForbiddenURI", "title": "" }, { "docid": "b4af122b39d70421a6d3fdbbc47aa43f", "score": "0.5000593", "text": "def inaccessible_callback(self, name, **kwargs):\n return redirect(url_for('public.home', next=request.url))", "title": "" }, { "docid": "7e8689d8095fe262a617588fd34f9211", "score": "0.5000299", "text": "def userInitialLogin(obj, event):\n registry = getUtility(IRegistry)\n\n # check if we need to redirect at all\n do_redirect = registry.get('collective.onlogin.interfaces.' \\\n 'IOnloginSettings.first_login_redirect_enabled')\n if not do_redirect:\n return\n\n # get portal object\n portal = getSite()\n\n # check if we have an access to request object\n request = getattr(portal, 'REQUEST', None)\n if not request:\n return\n\n # check if we need to ignore came_from variable\n ignore_came_from = registry.get(\n 'collective.onlogin.interfaces.IOnloginSettings.' \\\n 'first_login_redirect_ignore_came_from')\n # when we try to log from logged_out page the came_from doesn't bin canceled\n if not ignore_came_from and request.get('came_from'):\n return\n\n # check if we got redirect expression\n redirect_expr = registry.get('collective.onlogin.interfaces.' \\\n 'IOnloginSettings.first_login_redirect_expr')\n\n if not redirect_expr:\n return\n\n # now complile and render our expression to url\n expr = Expression(redirect_expr)\n econtext = getExprContext(portal, portal)\n try:\n url = expr(econtext)\n except Exception, e:\n logException(u'Error during user initial login redirect')\n return\n else:\n # check if came_from is not empty, then clear it up, otherwise further\n # Plone scripts will override our redirect\n if request.get('came_from'):\n request['came_from'] = ''\n request.form['came_from'] = ''\n request.RESPONSE.redirect(url)", "title": "" }, { "docid": "e2590c6529995b2024566b61d57ffc2d", "score": "0.49973208", "text": "def get(self):\n self.clear_cookie(\"user\")\n self.redirect(\"/\")", "title": "" }, { "docid": "0107a0158caa2c4e8522cee1588e7f4b", "score": "0.4989841", "text": "def test_redirect_http(self):\n response = self.client.get(\"/http_redirect_view/\", follow=True)\n self.assertFalse(response.test_was_secure_request)", "title": "" }, { "docid": "7468bf595a15753d11d6419066a6d71e", "score": "0.49838006", "text": "def preflight(self):\r\n return True", "title": "" }, { "docid": "b9e9ba716cace72b7ba78c5a310c9671", "score": "0.49834", "text": "def test_response_code_unauthenticated_user_vote(self):\n response = self.client.get(self.question1_url)\n self.assertEqual(response.status_code, 302)", "title": "" }, { "docid": "79aede55857201dec28883fd6f255b71", "score": "0.4981123", "text": "def before_request():\n if request.path not in public_routes:\n g.user = None\n if 'openid' in session:\n flask.g.user = User.query.\\\n filter_by(openid=session['openid']).first()", "title": "" }, { "docid": "99d3186ab9868135abc44b507ed95c25", "score": "0.49804088", "text": "def test_loginredirect(self): #R7.1\n self.open(base_url + '/logout')\n self.open(base_url + '/')\n self.open(base_url)\n self.assert_element(\"#login-prompt\")", "title": "" }, { "docid": "5579ebe20458e78b103955434e693cd9", "score": "0.49797714", "text": "def isNotCompetentToAuthenticate(request):", "title": "" }, { "docid": "de369baeb4ce108868ecc78792458d50", "score": "0.49782902", "text": "def initial(self, request, *args, **kwargs):\n\n # It's checks the permissions for the third party endpoint or not. It give access if key present.\n bool_value, message = self.check_api_keys(request)\n if bool_value:\n super(IBRestrictedGenericViewSet, self).initial(request, *args, **kwargs)\n # Check action permissions\n self.check_action_permissions(request)\n else:\n self.app_permission_denied(request, message)", "title": "" }, { "docid": "4c8a7e4d0bfc051627a8d78eeaf5abeb", "score": "0.49773416", "text": "def authorizer(self, session, uri, action): #intended ot be overwridden pylint: disable=unused-argument,no-self-use\n try:\n authid = session['authid']\n print \"authorize called\", authid, action, uri\n raise NotImplementedError\n #return True\n except Exception: #False prevents logins pylint: disable=broad-except\n return False", "title": "" }, { "docid": "71bf092bc3e26366c4b7b47c480c8e3b", "score": "0.49720645", "text": "def unauthorized():\n\tflash('You must be logged in to view that page.')\n\treturn redirect('/api/login')", "title": "" }, { "docid": "cfffc1cefe066a82ea726ad9a6f72b78", "score": "0.49663478", "text": "def authredir(approval_prompt='auto'):\n authurl, state = auth.redir(TOOLNAME, approval_prompt)\n session['oauth_state'] = state\n LOG.error(\"Redirecting for authorization\")\n return redirect(authurl)", "title": "" }, { "docid": "be12f53212debf0f6db287e23f18392c", "score": "0.49653333", "text": "def cannotbeacurrentuser():\n if not current_user.is_authenticated:\n return app.send_static_file('./login/index.html')\n else:\n return redirect('/events/index.html')", "title": "" }, { "docid": "6167bd75522036962d67ec9d9f7f4c8b", "score": "0.49571556", "text": "def _authenticated_path(self):\n basepath = self.proxy.path.decode().split('?')[0]\n params = urlencode({'session-id': self.session_id,\n 'session-key': self.session_key,\n 'callnum': self.callnum})\n result = '%s?%s' % (basepath, params)\n return result.encode('utf-8')", "title": "" }, { "docid": "176204fd95c1ec5774ae19f2f30bedce", "score": "0.49494448", "text": "def test_login_link_does_not_present_for_authorized_user(self):\n page = self.app.get('/', user=UserFactory())\n self.assertIn(_('login'), page)\n self.assertIn(reverse('doppler_auth_login'), page)", "title": "" }, { "docid": "d0f8e73b111392814919a73dab58ef9f", "score": "0.49469364", "text": "def is_authenticated(self):\n return True", "title": "" }, { "docid": "61d060c64f4cc81456f02b3fdb0b5c6c", "score": "0.49452665", "text": "def test_unauthenticated_users_are_redirected_to_login_page(self):\n request = RequestFactory().get(\"\")\n request.user = AnonymousUser()\n response = PollCreate.as_view()(request)\n self.assertEqual(response.status_code, 302)\n self.assertIn(reverse(\"users:login\"), response.url)", "title": "" }, { "docid": "220813ba256b52fe6b60e3067c576226", "score": "0.4931002", "text": "def at_pre_login(self):\n pass", "title": "" }, { "docid": "7e1fcc798bfdf6d5fdae61a79f381d1e", "score": "0.4930363", "text": "def auth_before_request():\r\n pass", "title": "" }, { "docid": "880718e21f908d9876299ca3c2633bfa", "score": "0.49274886", "text": "def before_request():\n if current_user.is_authenticated \\\n and not current_user.confirmed \\\n and request.endpoint[:8] != 'account.' \\\n and request.endpoint != 'static':\n return redirect(url_for('account.unconfirmed'))", "title": "" }, { "docid": "631f7709d5f327fee8da3b25b8355f87", "score": "0.4925386", "text": "def test_editorial_authenticated(self):\n response = self.client.get('/editorials/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "9820172815cd57ec356cf7b538801f0e", "score": "0.49239048", "text": "def welcome():\n return redirect(\"http://\" + request.host + \"/auth?state=\" + get_token_key())", "title": "" }, { "docid": "6a51b41ac0c72385429152d020827e40", "score": "0.49236798", "text": "def test_with_anonymous_with_public_access(self):\n response = self.client.get(local_site_reverse('root'))\n\n self.assertRedirects(response, '/r/')", "title": "" }, { "docid": "046100d80c7667415461f3991afd3ddd", "score": "0.49234572", "text": "def auth_url(self):\n raise NotImplementedError('Implement in subclass')", "title": "" }, { "docid": "618bf959d86d5d0c16ff3caf0bc164cd", "score": "0.49222872", "text": "def globus_authorize(request):\n flow = globus_initFlow()\n auth_uri = flow.step1_get_authorize_url()\n auth_uri += '&authentication_hint=36007761-2cf2-4e74-a068-7473afc1d054'\n auth_uri = auth_uri.replace('access_type=offline','access_type=online')\n logger.warn(auth_uri)\n return HttpResponseRedirect(auth_uri)", "title": "" }, { "docid": "164c72625e47f15c3c0f8b4ee62095b0", "score": "0.49180964", "text": "def is_redirector(self):\n return False", "title": "" }, { "docid": "c1fc5877f7f7fe4a06d9455b96c8ee21", "score": "0.49175695", "text": "def request_access(self, request, url, session):", "title": "" }, { "docid": "2bc4ad937e699f060710c11d7cf589cc", "score": "0.49156022", "text": "def unauthorized_error_handle(error=None):\n return redirect(url_for('auth.login'))", "title": "" }, { "docid": "18f2936297d668fa8b1fed54ffe27425", "score": "0.491328", "text": "def content_redirect_url(self, path):\r\n return None", "title": "" }, { "docid": "b0e661deed7be38a2bb162e0aad2d082", "score": "0.49100345", "text": "def uses_redirect(self):\n return True", "title": "" }, { "docid": "4e56c05e81898126db2a76dc1821e233", "score": "0.49080443", "text": "def test_bad_request(self):\n response = self.client.get(reverse('main:logout'))\n self.assertTrue(response.status_code, 302)\n self.assertTrue(response.url, reverse('main:login'))", "title": "" }, { "docid": "2102cc1dba57e410c9a1b222a28bd89b", "score": "0.48966533", "text": "def get_authorization_url(self):\n return super().authorization_url(MINUT_AUTH_URL)", "title": "" }, { "docid": "c8b53a8ef091c9d58cd19fe003d67096", "score": "0.4893925", "text": "def unauthorized():\n flash('You must be logged in to view that page.')\n return redirect(url_for('auth.login', next=request.path))", "title": "" }, { "docid": "3b63ca9005595b951c6cc12c29900575", "score": "0.4890008", "text": "def test_unauthenticated_users_are_redirected_to_login_page(self):\n question = QuestionFactory(question_text=\"question\")\n choice1 = ChoiceFactory(question=question, choice_text=\"Choice 1\")\n request = RequestFactory().post(\"\", {\"choice\": choice1.pk})\n request.user = AnonymousUser()\n response = vote(request, pk=question.pk)\n self.assertEqual(response.status_code, 302)\n self.assertIn(reverse(\"users:login\"), response.url)", "title": "" }, { "docid": "d38c7f4ca44fb7c09372d2d12638a4a7", "score": "0.48842797", "text": "def authy_redirect(self):\n return _url_to_appropriate_authy_page(self.request, self.authy_required_session_token)", "title": "" }, { "docid": "51c3784f4e481ce93a7ce5e9b80d5021", "score": "0.4883318", "text": "def is_authenticated(self):\r\n return True", "title": "" }, { "docid": "69fbace1f6310d5026e049eefd4d0f37", "score": "0.4865873", "text": "def initial(self, request, resource_type, *args, **kwargs):\n\n logger.debug(\"resource_type: %s\" % resource_type)\n logger.debug(\"Interaction: read\")\n logger.debug(\"Request.path: %s\" % request.path)\n\n super().initial(request, *args, **kwargs)\n\n if resource_type not in ALLOWED_RESOURCE_TYPES:\n logger.info('User requested read access to the %s resource type' % resource_type)\n raise exceptions.NotFound('The requested resource type, %s, is not supported' % resource_type)\n\n self.crosswalk = self.check_resource_permission(request, resource_type, *args, **kwargs)\n if self.crosswalk is None:\n raise exceptions.PermissionDenied(\n 'No access information was found for the authenticated user')\n\n self.resource_type = resource_type", "title": "" }, { "docid": "b91abeb3d107a7b8aa268a6c9c37b709", "score": "0.48649818", "text": "def request(self, method, url, *pargs, **kwargs):\n resp = requests.sessions.Session.request(self, method, url, *pargs, **kwargs)\n if resp.history:\n h = resp.history[0]\n if h.status_code == 302 and h.url == url:\n raise VXAuthRequired('Authentication Error - url: {0} was redirected to homepage.'.format(url))\n return resp", "title": "" }, { "docid": "1e505f9e2a9ccaee7916c9b0a7f3bdd0", "score": "0.48648226", "text": "def test_with_anonymous_with_private_access(self):\n with self.siteconfig_settings({'auth_require_sitewide_login': True},\n reload_settings=False):\n response = self.client.get(local_site_reverse('root'))\n\n self.assertRedirects(response, '/account/login/?next=/')", "title": "" }, { "docid": "da7f51af41d4766dd3e06cd37aa039bb", "score": "0.48647642", "text": "def resetUrl(self):\r\n self.url = defaultUrl", "title": "" }, { "docid": "d4533cd16e9c1aa48d43014eade2afd9", "score": "0.4863076", "text": "def test_unauth_user_cannot_view_specific_applications(self):\r\n\r\n # confirm that unauth user gets 302\r\n response = self.client.get(reverse('app:list_specific_applications', args=(1,)))\r\n self.assertEqual(response.status_code, 302)", "title": "" }, { "docid": "a177bac91905a424aa7ea07fe5bb0dea", "score": "0.4861174", "text": "def check_uri(credentials, environ):\n # Doing this by stripping known parts from the passed uri field\n # until something trivial remains, as the uri cannot be\n # reconstructed from the environment exactly.\n try:\n uri = credentials[\"uri\"]\n except KeyError:\n raise ProtocolViolation(\"uri missing in client credentials\")\n if environ.get(\"QUERY_STRING\"):\n if not uri.endswith(environ[\"QUERY_STRING\"]):\n raise AuthenticationRequired(\"url mismatch\")\n uri = uri[:-len(environ[\"QUERY_STRING\"])]\n if environ.get(\"SCRIPT_NAME\"):\n if not uri.startswith(environ[\"SCRIPT_NAME\"]):\n raise AuthenticationRequired(\"url mismatch\")\n uri = uri[len(environ[\"SCRIPT_NAME\"]):]\n if environ.get(\"PATH_INFO\"):\n if not uri.startswith(environ[\"PATH_INFO\"]):\n raise AuthenticationRequired(\"url mismatch\")\n uri = uri[len(environ[\"PATH_INFO\"]):]\n if uri not in ('', '?'):\n raise AuthenticationRequired(\"url mismatch\")", "title": "" }, { "docid": "9eb980a4b1d1bcdb3aee0f1296aa40f0", "score": "0.48583117", "text": "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "title": "" }, { "docid": "9eb980a4b1d1bcdb3aee0f1296aa40f0", "score": "0.48583117", "text": "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "title": "" }, { "docid": "9eb980a4b1d1bcdb3aee0f1296aa40f0", "score": "0.48583117", "text": "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "title": "" }, { "docid": "536270d5665fa61ea472ae04df1e7a01", "score": "0.4858089", "text": "def test_auth_user_cannot_view_list_of_animals(self):\r\n\r\n self.client.login(email='[email protected]', password='secret2')\r\n\r\n # confirm that unauth user gets 302\r\n response = self.client.get(reverse('app:list_applications'))\r\n self.assertEqual(response.status_code, 302)", "title": "" }, { "docid": "57eed4da7dcc2382fbaa9c3d4c71bda8", "score": "0.48563746", "text": "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('user.login', next=request.url))", "title": "" }, { "docid": "d58824c1ed2a846d72d4e9d25b7f877f", "score": "0.48549291", "text": "def processNoAuthRequired(self):\n\n self.state = self.ST_READ_REQUEST\n self.processRequest()", "title": "" }, { "docid": "39c8e338c8d31ee92ddbad717f76511e", "score": "0.48503116", "text": "def test_with_anonymous_with_local_site_private(self):\n response = self.client.get(\n local_site_reverse('root', local_site_name=self.local_site_name))\n\n self.assertRedirects(response,\n '/account/login/?next=/s/%s/'\n % self.local_site_name)", "title": "" }, { "docid": "e22372e87f15020e6e217d3c016db605", "score": "0.48496184", "text": "def test_redirect_anonymous(self):\n for url in StaticURLTests.url_private_names.values():\n with self.subTest(url=url):\n response = StaticURLTests.guest_client.get(url)\n self.assertEqual(response.status_code, 302)", "title": "" }, { "docid": "8750c6826255eb1d4acac790a2530510", "score": "0.4846512", "text": "def test_unauth_user_cannot_view_list_of_animals(self):\r\n\r\n # confirm that unauth user gets 302\r\n response = self.client.get(reverse('app:list_applications'))\r\n self.assertEqual(response.status_code, 302)", "title": "" }, { "docid": "aa5736ea12cc28269a9b8ffebf555ec0", "score": "0.48405406", "text": "def missing_auth():\n return Response('Could not verify your access level for that URL.\\nYou have to login with proper credentials',\n 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" } ]
f87df1f868d8a0937a9ece2303097018
Returns the UID, the UID where the Brick is connected to, the position, the hardware and firmware version as well as the device identifier. The position can be '0''8' (stack position).
[ { "docid": "ff1efcecb313519d85108b3f0b5cec55", "score": "0.0", "text": "def get_identity(self):\n return GetIdentity(*self.ipcon.send_request(self, BrickDC.FUNCTION_GET_IDENTITY, (), '', '8s 8s c 3B 3B H'))", "title": "" } ]
[ { "docid": "81f8d864177e6dabd9725d2750ae5105", "score": "0.59945315", "text": "def read_uid(self):\n return self.ipcon.send_request(self, BrickHAT.FUNCTION_READ_UID, (), '', 'I')", "title": "" }, { "docid": "e4d920a2598f5f2aad915fd74784a61c", "score": "0.59237516", "text": "def uid(self):\n return self._serial_number[0:6]", "title": "" }, { "docid": "a4cfb453d4838ba04607bef3bf934903", "score": "0.5797364", "text": "def read_device_uid(self):\n family = self.configuration[\"family\"]\n if not family:\n self.debug(0, \"Supply -f [family] to see flash size and device UID, e.g: -f F1\")\n return\n\n try:\n if family != \"F4\":\n flash_size = self.stm32.get_flash_size(family)\n device_uid = self.stm32.get_uid(family)\n else:\n # special fix for F4 devices\n flash_size, device_uid = self.stm32.get_flash_size_and_uid_f4()\n except bootloader.CommandError as e:\n self.debug(0, \"Something was wrong with reading chip family data: \" + e.message)\n return\n\n device_uid_string = self.stm32.format_uid(device_uid)\n self.debug(0, \"Device UID: %s\" % device_uid_string)\n self.debug(0, \"Flash size: %d KiB\" % flash_size)", "title": "" }, { "docid": "93e42982e08d09b98ae8ce5426745e45", "score": "0.5756101", "text": "def uid(self) -> str:\n return self._dev", "title": "" }, { "docid": "141f6d082f7b7c94266f67cbafb5fd62", "score": "0.57353586", "text": "def read_device_id(self):\n boot_version = self.stm32.get()\n self.debug(0, \"Bootloader version: 0x%X\" % boot_version)\n device_id = self.stm32.get_id()\n self.debug(\n 0, \"Chip id: 0x%X (%s)\" % (device_id, bootloader.CHIP_IDS.get(device_id, \"Unknown\"))\n )", "title": "" }, { "docid": "bafdf35ea38d985a0f2828de07c98302", "score": "0.5710896", "text": "def uid(self):\n return str(self._serial)", "title": "" }, { "docid": "6ee9ca592e371558f369e863196bf08e", "score": "0.5596329", "text": "def bugid(self):\n return self._bugid", "title": "" }, { "docid": "37cd011dd0e6bfd10feaebbe43ecb1c4", "score": "0.5591085", "text": "def print_platform_info(self):\n bid_output = self.send_cmd_get_output('bid')\n bid = re.search(r'Board ID: (\\S+?)[:,]', bid_output).group(1)\n if bid == ERASED_BID:\n logging.warning(DEBUG_ERASED_BOARD_ID)\n raise ValueError('Cannot run RMA Open when board id is erased')\n bid = int(bid, 16)\n chrs = [chr((bid >> (8 * i)) & 0xff) for i in range(4)]\n logging.info('RLZ: %s', ''.join(chrs[::-1]))", "title": "" }, { "docid": "37fa091816be786d7e38d6f787e0a1b1", "score": "0.5590901", "text": "def vendor(self):\n if not self.current_machine:\n return None\n\n reg = Cpuid()\n return Cpuid.registers_to_str(reg.ebx, reg.edx, reg.ecx)", "title": "" }, { "docid": "49f2729836a34dbaf581ebbfc96d14b6", "score": "0.558591", "text": "def unique_id(self):\n return f\"{self._mac_address}:{self._device_id}:battery\"", "title": "" }, { "docid": "35f0193e7e47405b025ece0cfc2f3604", "score": "0.5582155", "text": "def uid(self):\n return self._serial_number", "title": "" }, { "docid": "f8e9051e9b7a847c36af11b0224dd438", "score": "0.5567897", "text": "def getIdent(self):\r\n rx = self.readVariable(b'DeviceIdent')\r\n if self.protocol == self.COLA_A:\r\n offset = 0\r\n deviceName, offset = unpack_flexstring_from_cola_a(rx, offset)\r\n deviceVersion, offset = unpack_flexstring_from_cola_a(rx, offset)\r\n else:\r\n # expect to receive \"[16bit length]Name [16bit length]Version\"\r\n offset = 0\r\n deviceName, offset = unpack_flexstring_from(rx, offset)\r\n deviceVersion, offset = unpack_flexstring_from(rx, offset)\r\n return (deviceName, deviceVersion)", "title": "" }, { "docid": "8943bb7d413a75de927934e3ca215c98", "score": "0.5563971", "text": "def getJoystickPosition(self):\n\t\tdata = self.read()\n\t\treturn data[0],data[1]", "title": "" }, { "docid": "eacf98e32a940842deacd96b78eec9d6", "score": "0.5508794", "text": "def output_device_identification(self):\n info = self.query(b\"\\x1BA0000\\r\")\n return info", "title": "" }, { "docid": "13d5c04b035de0be6ebb349d591754db", "score": "0.55014855", "text": "def identify_boards(self):\n self.th.get_serial()\n self.dut.get_serial()", "title": "" }, { "docid": "ab6c8b82c593f8d3e194dcf63a5dc5f6", "score": "0.5485916", "text": "def brand(self):\n if not self.current_machine:\n return None\n\n if self.cpuid_highest_extended_function < 0x80000004:\n return None\n\n brand_list = []\n for eax in (0x80000002, 0x80000003, 0x80000004):\n reg = Cpuid(eax)\n brand_list += [reg.eax, reg.ebx, reg.ecx, reg.edx]\n return Cpuid.registers_to_str(*brand_list)", "title": "" }, { "docid": "27f881ce0dce3a1d216aeda203a575e7", "score": "0.5455919", "text": "def uid(self):\n\t\tversion = requests.get(self._freebox_url + API_VERSION)\n\t\treturn version.json()['uid']", "title": "" }, { "docid": "8888f0aef84fa059a39cbe2b44ac2fe3", "score": "0.54281545", "text": "def usb_info(self):\n return 'USB VID:PID={:04X}:{:04X}{}{}'.format(\n self.vid or 0,\n self.pid or 0,\n ' SER={}'.format(self.serial_number) if self.serial_number is not None else '',\n ' LOCATION={}'.format(self.location) if self.location is not None else '')", "title": "" }, { "docid": "524c3914188b55bb0005ee8af7aadcec", "score": "0.54185313", "text": "def read_chipid(self):\n return self.talk( 0x32, 0x33)[1:]", "title": "" }, { "docid": "0dd3cf424739b0b0c93d5b4b953daa4f", "score": "0.5391904", "text": "def _get_location(self):\n self.hiddev = open(self.dev, \"rb\")\n barcode = ''\n continue_looping = True\n k = 0\n\n while continue_looping:\n report = self.hiddev.read(8)\n k += 1\n for i in report:\n j = ord(i)\n if j == 0:\n continue\n if j == 0x1E:\n barcode += '1'\n continue\n elif j == 0x1F:\n barcode += '2'\n continue\n elif j == 0x20:\n barcode += '3'\n continue\n elif j == 0x21:\n barcode += '4'\n continue\n elif j == 0x22:\n barcode += '5'\n continue\n elif j == 0x23:\n barcode += '6'\n continue\n elif j == 0x24:\n barcode += '7'\n continue\n elif j == 0x25:\n barcode += '8'\n continue\n elif j == 0x26:\n barcode += '9'\n continue\n elif j == 0x27:\n barcode += '0'\n continue\n elif j == 0x28:\n self.hiddev.close()\n continue_looping = False\n break\n else:\n pass\n return int(barcode)", "title": "" }, { "docid": "d4d6bcc22826d277bfb79610b97a8816", "score": "0.5386642", "text": "def unique_id(self):\n return self._device.serial", "title": "" }, { "docid": "d4d6bcc22826d277bfb79610b97a8816", "score": "0.5386642", "text": "def unique_id(self):\n return self._device.serial", "title": "" }, { "docid": "9af0e426d54c16037f22f69074508aff", "score": "0.5385407", "text": "def uid(self):\n return ctypes.string_at(nfc.freefare_get_tag_uid(self.target))", "title": "" }, { "docid": "9438ac58fe9f0ec33ef089dc45f5c1c5", "score": "0.53801084", "text": "def get_uid(self):\n return self._camera.uid", "title": "" }, { "docid": "1840cf0ff09f45e6e9a46adc26df1ff0", "score": "0.5370706", "text": "def get_camera_serial(self):\n function_id = 0x00050002\n\n res = self._send_packet(function_id, receive_size=4)\n res = self._decode_packet(res, receive_size=4)\n\n return struct.unpack(\">I\", res)[0]", "title": "" }, { "docid": "537eac71dc6738a6595bddc30d5f3ec1", "score": "0.5355614", "text": "def Identify(self):\n\t\tself.writeLine('*IDN?')\n\t\tdeviceID = self.device.readline().decode('ascii')[:-2] # Discard carraige return\n\t\treturn deviceID", "title": "" }, { "docid": "17e8754a5988a1cedc88c728d4738cfa", "score": "0.5343578", "text": "def unique_id(self):\n return f\"{self._mac_address}:{self._device_id}:temp\"", "title": "" }, { "docid": "6594c752d7a196ba5e00e1fc5866e69a", "score": "0.5318576", "text": "def bbid(self) -> str:\n return self.__bbid", "title": "" }, { "docid": "cb45ba27cae8e3144d482303940b9448", "score": "0.53009725", "text": "def device_info(self):\n return {\n \"name\": self._camera.name,\n \"identifiers\": {(LOGI_CIRCLE_DOMAIN, self._camera.id)},\n \"model\": self._camera.model_name,\n \"sw_version\": self._camera.firmware,\n \"manufacturer\": DEVICE_BRAND,\n }", "title": "" }, { "docid": "24aa6000127401353e358186c8cb4db2", "score": "0.5290462", "text": "def read_device_id(self):\n try:\n resp = self.avr.activate_physical()\n except Jtagice3ResponseError:\n self.logger.error(\"Unable to activate debugWIRE. Maybe ISP is active?\")\n return \"Error\"\n self.logger.info(\"ID read: %02X%02X\", resp[1], resp[0])\n self.avr.deactivate_physical()\n return bytearray(resp[0:2])", "title": "" }, { "docid": "caec19b98db8582871640059f958cdbe", "score": "0.5276739", "text": "def unique_id(self):\n return f\"{self._izzibridge.unique_id}_binary_sensor_{self._sensor_type}\"", "title": "" }, { "docid": "2d876136d79ea0ed9b2dc82e6b8ae3fd", "score": "0.52685046", "text": "def unique_id(self):\n return f\"battery_{self._blid}\"", "title": "" }, { "docid": "8136463ef836e771caa1ac6dee7e797a", "score": "0.52654046", "text": "def unique_id(self) -> str:\n return f\"{self.charger.id}_{self._sensor_name}\"", "title": "" }, { "docid": "7bc8e7310d4329937991a3f3fff95ba3", "score": "0.5260044", "text": "def getUid():", "title": "" }, { "docid": "f82ac6bfb0992cce1db8b7950c91f045", "score": "0.5256357", "text": "def unique_id(self):\n return f\"occupancygroup_{self._bridge_unique_id}_{self.device_id}\"", "title": "" }, { "docid": "8b2af42a1ce2bdd87948df3f8943e3e4", "score": "0.5256298", "text": "def getUID(self):\n return self.world.getDataManager().getUUID()", "title": "" }, { "docid": "0dd5695217b97e6f4e70e897db087b0b", "score": "0.5241363", "text": "def identstr(self):\n self.writecmd(self.AVRAPP,0x83,0, None);\n vendor=self.AVRVendors.get(ord(self.data[0]));\n deviceid=(ord(self.data[1])<<8)+ord(self.data[2]);\n device=self.AVRDevices.get(deviceid);\n \n #Return hex if device is unknown.\n #They are similar enough that it needn't be known.\n if device==None:\n device=(\"0x%04x\" % deviceid);\n \n return \"%s %s\" % (vendor,device);", "title": "" }, { "docid": "42bb1bfc09e06a8d40cfe9962cc3ae1b", "score": "0.5230703", "text": "def unique_id(self) -> str:\n serial = self.accessory_info.value(CharacteristicsTypes.SERIAL_NUMBER)\n return f\"homekit-{serial}-{self._iid}-{self.device_class}\"", "title": "" }, { "docid": "ad3706e59c2bf6bf5b6661720751a6d5", "score": "0.5230008", "text": "def uid(self):\n # Dev Note: This also returned nothing in my harry potter dataset, not sure if it was supposed to contain anything\n return self._uid", "title": "" }, { "docid": "caf8436b2e4ab667e67aec4ef0dc0e58", "score": "0.5217923", "text": "def __read_id_info(self):\n info = self.__read_i2c_data(cmd=self.__CMD_READ_ID, length = 9)\n self.__id = self.__struct.unpack('H', bytes(info[0:2]))[0]\n self.__max_arg_num = info[2]\n self.__num_funcs = info[3]\n #18/Nov/2016 Ardpy의 버전을 받아오도록 했다.\n #20/Nov/2016 firmware의 버젼도 받아오도록 했다.\n self.__ver_apy = (self.__struct.unpack('H', bytes(info[4:6])))[0]\n self.__ver_firmw = (self.__struct.unpack('H', bytes(info[6:8])))[0]\n self.__str_ver_Ardpy = \"%d.%d.%d\"%self.__decode_ver(self.__ver_apy)\n self.__str_ver_firmw = \"%d.%d.%d\"%self.__decode_ver(self.__ver_firmw)", "title": "" }, { "docid": "320c486aba11caa3c56f170b2fe3af48", "score": "0.5203072", "text": "def unique_id(self):\n return f\"{self._mac_address}:{self._device_type}:{self._name}\"", "title": "" }, { "docid": "c702e57f5821c885342833af04004cb0", "score": "0.5197346", "text": "def get_id(self):\n chip_id = self.bus.read_byte_data(self.addr, self._REG_ID)\n return chip_id", "title": "" }, { "docid": "327ced1301af8e472b4e1570e661ef15", "score": "0.5190064", "text": "def unique_id(self):\n return _RSSI_swig.ctc_decode_bb_sptr_unique_id(self)", "title": "" }, { "docid": "53afdd54b5bc622cf5110d0065519a8f", "score": "0.5179092", "text": "def _current_bank_details(self):\n bank_name, bank = super(_DeviceComponent, self)._current_bank_details()\n if bank and len(bank) > 4 and self._param_offset:\n bank = bank[4:]\n return (bank_name, bank)", "title": "" }, { "docid": "bdfef113865f0f7bb4db11661a8a2d18", "score": "0.5174438", "text": "def get_barcode(self):\n return self.__id", "title": "" }, { "docid": "0979b58d34a4983ba6df10148d47ba26", "score": "0.5169018", "text": "def device_id(self):\n return self.unique_id", "title": "" }, { "docid": "b780d09d3f3c17b4beed8bff8b017e5a", "score": "0.5160714", "text": "def poll_nfc_reader() -> str:\n\n command = \"sudo nfc-poll | grep UID | cut -d : -f 2\"\n output = subprocess.getoutput(command)\n if output:\n tag_id = output.split(\"\\n\")[1].strip()\n return tag_id\n else:\n return \"\"", "title": "" }, { "docid": "37120e14c4889de4cd8c8c1141eae27b", "score": "0.5156137", "text": "def get_branch_id(self):\n return # osid.id.Id", "title": "" }, { "docid": "589ebd52f8b20b612eeb08be54d7a9c6", "score": "0.5152278", "text": "def get_sensor_serial(self):\n function_id = 0x00050006\n\n res = self._send_packet(function_id, receive_size=4)\n res = self._decode_packet(res, receive_size=4)\n\n return struct.unpack(\">I\", res)[0]", "title": "" }, { "docid": "3611f52a706e8c760258285ead1f9616", "score": "0.51513153", "text": "def unique_id(self):\n return f\"{self._ccb.unique_id}-{self._sensor_type}\"", "title": "" }, { "docid": "165ed966f1f30d9e3c2452ad02a6ddfd", "score": "0.5148492", "text": "def drive_id(sbx):\n \n str_out = systxt(['udevadm', 'info', '--query=all', '--name='+sbx])\n str_out = str_out.split('ID_SERIAL=')\n if len(str_out)==1:\n return 'none'\n else:\n return str_out[1].split('\\n')[0].strip()", "title": "" }, { "docid": "2f46d07844c65f63eb5e7fc02626d008", "score": "0.51335716", "text": "def get_serial_number(self):\n return self.device_serialno_", "title": "" }, { "docid": "b00e647213c646b1626351a0fc32bc15", "score": "0.5132947", "text": "def unique_id(self):\n devID = self._device.getId()\n return f\"{devID}\"", "title": "" }, { "docid": "a8bd3b61585deb6202dd250a26b80e32", "score": "0.5131884", "text": "def product_id(self):\n return self._read_register(_OV7670_REG_PID)", "title": "" }, { "docid": "ca3299b20a20f4b3cf8a14666e6e6dff", "score": "0.5124243", "text": "def serial_number(self):\n return self.device.serialNumber()", "title": "" }, { "docid": "c2ee828c0fb22cc78e07694d618b5b68", "score": "0.5108483", "text": "def unique_id(self):\n return self.device.id", "title": "" }, { "docid": "768b8953be784c3c841c43b9797e1aab", "score": "0.5102161", "text": "def get_serial(self):\n if self.is_psu_fan:\n serial = self.psu.get_serial()\n else:\n serial = \"Unknown\"\n\n return serial", "title": "" }, { "docid": "d6eb1ed794f98e69c0f62cda4be64a7f", "score": "0.50972813", "text": "def identify(self):\n\t\t\n\t\t# send message and wait for a response\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tself._start_bootloader_command()\n\t\t\t\tresponse = self._send(subject = MessageSubject.IDENTIFY, timeout = 0.1, attempts = 10)\n\t\t\texcept BootloaderException:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\n\t\t# split up the message and fill in the board-representation\n\t\tself.board.bootloader_type = response.data[0] >> 4\n\t\tself.board.version = response.data[0] & 0x0F\n\t\t\n\t\tself.board.pagesize = {0: 32, 1:64, 2:128, 3:256}[response.data[1]]\n\t\tself.board.pages = (response.data[2] << 8) + response.data[3]\n\t\t\n\t\t#print response.data\n\t\t\n\t\tself.board.connected = True", "title": "" }, { "docid": "22d915ed26d724f3871bf524fc46e848", "score": "0.5092659", "text": "def uid(self):\n return self._platform_impl.get_process_uid()", "title": "" }, { "docid": "749a915fed6d92dc4bcf3644e395c13d", "score": "0.5090617", "text": "def unique_id(self):\n return self._innogy_device.id", "title": "" }, { "docid": "121fa51a37f2bd8fb58857b6fb9769b1", "score": "0.5084747", "text": "def unique_id(self):\n return self._device.guid + self._sensor_type[\"name\"]", "title": "" }, { "docid": "7a9d4a70baa65cf2c19786a5ba78d665", "score": "0.5084697", "text": "def unique_id(self) -> str:\n return self.fibaro_device.unique_id_str", "title": "" }, { "docid": "a9c78ecb067e048582fa93ffc3fbda30", "score": "0.5081286", "text": "def uid(self) -> str:\n return f'{self.group}.{self.name}'", "title": "" }, { "docid": "2e8f09961d3c1c02f78b191e24652714", "score": "0.5074994", "text": "def unique_id(self):\n return self._device_info.unique_id", "title": "" }, { "docid": "e73a15cd284e7e6895849177ae48e347", "score": "0.5074046", "text": "def get_device_ID(self):\n\n return \"Libera Electron BPM with the Epics ID \"+ \"\\\"\"+self.epicsID+\"\\\" and the MAC Address \\\"\"+self.macaddress+\"\\\"\"", "title": "" }, { "docid": "ad704892e4d72fe78ad68fb54894c008", "score": "0.50660396", "text": "def ioreg():\n cmd = ['/usr/sbin/ioreg', '-rd1', '-c', 'IOPlatformExpertDevice']\n full_reg = subprocess.check_output(cmd)\n reg_list = full_reg.split('\\n')\n for reg in reg_list:\n if reg.startswith(' \"IOPlatformUUID\"'):\n uuid = reg[26:-1]\n return uuid", "title": "" }, { "docid": "75cb903e95fd5c07aa6921d9f01ac073", "score": "0.5064587", "text": "def getIDN(self):\n\n result = self.__get_command('*IDN?').split(',')\n\n try:\n self.id = result[1][1:]\n self.serial = result[2]\n self.version = result[3]\n self.logger.debug(f'Device ID {self.id}, Serial {self.serial}, Version {self.version}')\n except IndexError:\n self.logger.error('Could not read IDN')", "title": "" }, { "docid": "db8f87e7a5db2a6bc9e47fe73bba38b2", "score": "0.5057146", "text": "def device_info(self):\n return self._name", "title": "" }, { "docid": "e8b6bfa056003ad5b8cfeb2f0939468a", "score": "0.5055792", "text": "def uid(self):\n # type: () -> str\n return self.__uid", "title": "" }, { "docid": "e8b6bfa056003ad5b8cfeb2f0939468a", "score": "0.5055792", "text": "def uid(self):\n # type: () -> str\n return self.__uid", "title": "" }, { "docid": "dd6f4268dfb765dfa5383b401e608fd6", "score": "0.5049177", "text": "def unique_id(self):\n return self._device.device_uuid", "title": "" }, { "docid": "581e488f423af4e942bbd7b68aff95ed", "score": "0.50490427", "text": "def device_info(self):\n\n model = (\n (\"BRC1H\" + self.dev_info[\"Model Number String\"])\n if \"Model Number String\" in self.dev_info\n else \"\"\n )\n sw_version = (\n self.dev_info[\"Software Revision String\"]\n if \"Software Revision String\" in self.dev_info\n else \"\"\n )\n return {\n \"identifiers\": {\n # Serial numbers are unique identifiers within a specific domain\n (DOMAIN, self.unique_id)\n },\n \"name\": self.name,\n \"manufacturer\": \"DAIKIN\",\n \"model\": model,\n \"sw_version\": sw_version,\n \"via_device\": (DOMAIN, self.unique_id),\n }", "title": "" }, { "docid": "83c7ef2356fa5a67521034bd4f06d3e3", "score": "0.5029478", "text": "def getUid(self):\n return self._uid.hex", "title": "" }, { "docid": "d1af858e3298d11d5350b73bbea3d602", "score": "0.50288445", "text": "def unique_id(self):\n return f\"{self._device.uuid}-SENSOR\"", "title": "" }, { "docid": "faaabbb146cd7bbd3dc4aaef8247f6bf", "score": "0.50270987", "text": "def get_uid(self):\n\t\treturn self.__uid", "title": "" }, { "docid": "7c948c1699f321f866ec8ec4a5b3fe69", "score": "0.5026972", "text": "def get_position(self):\n self.query_position()\n c = \"\"\n buf = \"\"\n while c != \"R\":\n c = self.read_char()\n buf += c\n return tuple([int(val) for val in buf[len(self.csi):-1].split(\";\")])", "title": "" }, { "docid": "5db95cee40e03a4a344b0bbfac7ebf77", "score": "0.5025362", "text": "def device_info(self):\n bbq_info = self._bbq_info\n\n if bbq_info.model_name == \"HeaterMeter\":\n return None\n\n return {\n \"name\": bbq_info.friendly_name,\n \"identifiers\": {(BBQ_DOMAIN, bbq_info.uuid.replace(\"-\", \"\"))},\n \"model\": bbq_info.model_name,\n \"manufacturer\": bbq_info.manufacturer,\n }", "title": "" }, { "docid": "336084aeed93822c4865dbff503f2d09", "score": "0.5024149", "text": "def framework(self):\n # type: () -> str\n return self.__fw_uid", "title": "" }, { "docid": "efbb493ea2bc32950a226a581c2a14e8", "score": "0.5018398", "text": "def DisplaySensorID():\n print (\"Not yet Implemented\")\n return", "title": "" }, { "docid": "d7aa0cfee17fd4587408e4757f56fdc6", "score": "0.5015297", "text": "def return_uuid(self):\n output = self.shell_cmd('system_profiler SPHardwareDataType')\n if output:\n # hard coding the 12th item in the index to memory. Don't go changing!\n uuid = output[12].strip()\n if uuid.lower().startswith('hardware'):\n return uuid\n else:\n return \"Error returning Hardware UUID\"", "title": "" }, { "docid": "0d2de678dd016aca72d7adeaaa79f0d2", "score": "0.50138474", "text": "def uid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "0d2de678dd016aca72d7adeaaa79f0d2", "score": "0.50138474", "text": "def uid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "0d2de678dd016aca72d7adeaaa79f0d2", "score": "0.50138474", "text": "def uid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "0d2de678dd016aca72d7adeaaa79f0d2", "score": "0.50138474", "text": "def uid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "0d2de678dd016aca72d7adeaaa79f0d2", "score": "0.50138474", "text": "def uid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "0d2de678dd016aca72d7adeaaa79f0d2", "score": "0.50138474", "text": "def uid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "0d2de678dd016aca72d7adeaaa79f0d2", "score": "0.50138474", "text": "def uid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "83cb2ce78b75ce9400e65b7e8e4b7586", "score": "0.5008975", "text": "def deviceID(self):\n self.write(\"*IDN?\")\n print \"DEVICE: \" + self.read()", "title": "" }, { "docid": "111a784b71b409534e02169eb8d5e987", "score": "0.50069386", "text": "def uid(self):\n return self._uid", "title": "" }, { "docid": "04369d7dbdd8677eaceaba30de90a34a", "score": "0.50034964", "text": "def uid(self):\n return self[\"uid\"]", "title": "" }, { "docid": "025b2ba52f91b5dcae8e819f3575c31b", "score": "0.5000718", "text": "def serial_id(self):\n\t\t\n\t\treturn self.get_serial_id()", "title": "" }, { "docid": "1618002b525351985e77633b579c3c3d", "score": "0.5000214", "text": "def sensor_frame_id(self):\n return self.ikid", "title": "" }, { "docid": "7cc5ca386a7a78d7b80b9809a7b2bdae", "score": "0.499963", "text": "def sysinfo():\n return (RPI_REVISION_HEX, 'B+', RPI_REVISION, '512', 'RaspberryPi')", "title": "" }, { "docid": "7dd7d32bc3fb956e036bcf41f98f96ed", "score": "0.499695", "text": "def unique_id(self):\r\n return f\"{self.vin}/starter_battery_condition\"", "title": "" }, { "docid": "d2aa3604f53c2353ba89fbc15dc14d20", "score": "0.49866962", "text": "def get_bk_position(self):\r\n return self._bK_position", "title": "" }, { "docid": "2782b09549d168113860a71a26ef9295", "score": "0.49846953", "text": "def unique_id(self):\n return f\"{self._mac_address}:{self._device_id}:state\"", "title": "" }, { "docid": "64dce32188359a1de57f90bfc998fa1c", "score": "0.49803117", "text": "def boot2docker_id() -> Tuple[Optional[int], Optional[int]]:\n uid = cmd_output_to_int([\"boot2docker\", \"ssh\", \"id\", \"-\"])\n gid = cmd_output_to_int([\"boot2docker\", \"ssh\", \"id\", \"-g\"])\n return (uid, gid)", "title": "" }, { "docid": "17e09e9ddb4acafc266f70d5ea59dabf", "score": "0.49754748", "text": "def get_device_id() -> str:\n pass", "title": "" }, { "docid": "04f2b4888f7478884ecd2c88087931d1", "score": "0.49742174", "text": "def uid(self) -> str:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "04f2b4888f7478884ecd2c88087931d1", "score": "0.49742174", "text": "def uid(self) -> str:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "04f2b4888f7478884ecd2c88087931d1", "score": "0.49742174", "text": "def uid(self) -> str:\n return pulumi.get(self, \"uid\")", "title": "" } ]
9929accb08cf07af7a71119ec5e2502c
Parse packages from filename.
[ { "docid": "f66162ef1cde617aa750947290fd4e76", "score": "0.0", "text": "def get_packages_async_2(filename):\n pool = Pool(processes=cpu_count() + 2)\n num = 0\n for packages in pool.map(_get_packages, _iter(filename)):\n if packages:\n for package in packages:\n package.set_num(num)\n num += 1\n yield package", "title": "" } ]
[ { "docid": "69f75f8ea60b3debaabd6fc32d0dabc1", "score": "0.66556424", "text": "def parse(self, filename):\n pass # implemented in Ada", "title": "" }, { "docid": "e39c948dfe9585ded737137cfc2c7717", "score": "0.6559397", "text": "def get_packages(filename):\n num = 0\n pcap_num = 0\n with open(filename) as f:\n pcap = dpkt.pcap.Reader(f)\n if pcap.datalink() == dpkt.pcap.DLT_LINUX_SLL:\n for ts, buf in pcap:\n pcap_num += 1\n sll = dpkt.sll.SLL(buf)\n ip = sll.data\n if type(ip.data) != TCP and type(ip.data) != UDP:\n continue\n if ip.data.data:\n package_type = determine_package_type(ip.data.data)\n if package_type:\n try:\n for package in package_type.parse(ip.data.data):\n num += 1\n package.set_ts(ts)\n package.set_pcap_package(sll)\n package.set_num(num)\n package.set_pcap_num(pcap_num)\n package.set_ip_dst(socket.inet_ntoa(ip.dst))\n package.set_ip_src(socket.inet_ntoa(ip.src))\n yield package\n except ParseException:\n continue\n else:\n raise WrongFileFormatException(filename)", "title": "" }, { "docid": "3bd19dcf3eecda355695906edf67e8bd", "score": "0.65228814", "text": "def parse(self, filename):\n if filename.endswith('.pbf'):\n return self.parse_pbf_file(filename)\n elif filename.endswith(('.osm', '.osm.bz2')):\n return self.parse_xml_file(filename)\n else:\n raise NotImplementedError('unknown file extension')", "title": "" }, { "docid": "0c4d6c11ae0c61459bf128cdf43a1e2f", "score": "0.63581765", "text": "def parse_package(package_path, package_format='tango'):\n\n if package_format == 'tango':\n return parse_tango_package(package_path)\n else:\n raise Exception('Specified unsupported package_format {}. Supported formats: tango.'.format(package_format))", "title": "" }, { "docid": "9d9890f088b21202243d7f231978c9a9", "score": "0.6340313", "text": "def parse_filename(filename, project_hint=None):\n for pkg_type, rgx in GOOD_PACKAGE_RGXN:\n m = rgx.match(filename)\n if m:\n return (m.group('project'), m.group('version'), pkg_type)\n if project_hint is not None:\n proj_rgx = re.sub(r'[^A-Za-z0-9]+', '[-_.]+', project_hint)\n proj_rgx = re.sub(\n r'([A-Za-z])',\n lambda m: '[' + m.group(1).upper() + m.group(1).lower() + ']',\n proj_rgx,\n )\n m = re.match(proj_rgx + r'(?=-)', filename)\n if m:\n project = m.group(0)\n rest_of_name = filename[m.end(0):]\n for pkg_type, rgx in BAD_PACKAGE_BASES:\n m = rgx.match(rest_of_name)\n if m:\n return (project, m.group('version'), pkg_type)\n for pkg_type, rgx in BAD_PACKAGE_RGXN:\n m = rgx.match(filename)\n if m:\n return (m.group('project'), m.group('version'), pkg_type)\n return (None, None, None)", "title": "" }, { "docid": "87dc6a577bdc5db195da5c49955591a9", "score": "0.61758703", "text": "def parse(self, file_path, package_name='pdfminer'):\n implementation = self.packageMapper.get(package_name)\n return implementation(file_path)", "title": "" }, { "docid": "3b908b4d4f644cb58ab73634d423c155", "score": "0.615252", "text": "def parse(cls, filename):\n if filename.endswith('.rpm'):\n filename = filename[:-4]\n m = cls._re.match(filename)\n if not m:\n return None\n n, v, r, a = m.groups()\n if ':' not in v:\n return n, None, v, r, a\n e, v = v.split(':', 1)\n e = int(e)\n return cls(n, e, v, r, a)", "title": "" }, { "docid": "036d6fe16caa565455888ed3873b604b", "score": "0.6119223", "text": "def _parse_package(cls, content):\n content = content.split(\" \")\n protocol, filter_strings = None, None\n name = None # darkplaces\n\n # one of the first two is the protocol\n if content[0].isdigit():\n protocol = content[0]\n filter_strings = content[1:]\n\n elif content[1].isdigit():\n # darkplaces\n name = content[0]\n protocol = content[1]\n filter_strings = content[2:]\n\n return protocol, filter_strings, name", "title": "" }, { "docid": "a6f2c27c8606935cea753786f19f682d", "score": "0.6114801", "text": "def load_package_list(packages_file):\n with open(packages_file) as pkgs:\n packages = pkgs.read().splitlines()\n return packages", "title": "" }, { "docid": "f1c65c907751261301502490c4a0bd16", "score": "0.6110994", "text": "def parse(self, fname):", "title": "" }, { "docid": "55d9d88dbb2271564654b0c993967593", "score": "0.60790217", "text": "def parse_requirements(filename):\n\n # Get absolute filepath\n filepath = os.path.join(os.getcwd(), filename)\n\n # Check if file exists\n if not os.path.exists(filepath):\n print(\"[!] File {} not found\".format(filename))\n return []\n\n # Parse install requirements\n with open(filepath, encoding=\"utf-8\") as f:\n return [requires.strip() for requires in f.readlines()]", "title": "" }, { "docid": "c7a30c9c0e019bd90bf340c972230a6d", "score": "0.59785664", "text": "def dmfile(filename):\n result = []\n with open(filename, 'r') as fp:\n for line in fp:\n line = line.strip()\n comment_pos = comment_find(line)\n\n if comment_pos >= 0:\n line = line[:comment_pos].strip()\n\n if not line:\n continue\n\n match = DMFILE_RE.match(line)\n if match is None:\n raise InvalidPackageSpec(f\"'{line}'\")\n\n pkg = match.groupdict()\n if pkg['version']:\n invalid = DMFILE_INVALID_VERSION_RE.match(pkg['version'])\n if invalid:\n raise InvalidPackageSpec(f\"'{line}'\")\n\n pkg['fullspec'] = line\n result.append(pkg)\n\n if not result:\n raise EmptyPackageSpec(\"Nothing to do\")\n\n return result", "title": "" }, { "docid": "e73b8ecf46b9d21258deb3a2952677a2", "score": "0.5970222", "text": "def parse(path):\n fmt = path.split(\".\")[-1]\n with open(path, 'r') as parsefile:\n return parsers[fmt](parsefile.read())", "title": "" }, { "docid": "65c65e4b4170e94089f0c794228aca2d", "score": "0.5903436", "text": "def parse_requirements(file):\n required_packages = []\n with open(os.path.join(os.path.dirname(__file__), file)) as req_file:\n for line in req_file:\n if '/' not in line:\n required_packages.append(line.strip())\n return required_packages", "title": "" }, { "docid": "3ff005220688b99976c899996db5d76b", "score": "0.5872778", "text": "def _parse_from_file(self, filepath, fname,\n dependencies, recursive, greedy):\n #Now that we have the file contents, we can parse them using the parsers\n string = self.tramp.read(filepath)\n pmodules = self.modulep.parse(string, self, filepath=filepath)\n file_mtime = self._get_mod_mtime(filepath)\n\n for module in pmodules:\n module.change_time = file_mtime\n self.modules[module.name.lower()] = module\n self._modulefiles[fname].append(module.name.lower())\n\n pprograms = self.modulep.parse(string, self, False)\n for program in pprograms:\n program.change_time = file_mtime\n self.programs[program.name.lower()] = program\n self._programfiles[fname].append(program.name.lower())\n\n #There may be xml files for the docstrings that also need to be parsed.\n self._parse_docstrings(filepath)\n \n return (pmodules, pprograms)", "title": "" }, { "docid": "8c0427d89e517552eb101aaab15362ff", "score": "0.5868963", "text": "def parse_filenames(self, filenames):\n for inputfile in filenames:\n if inputfile.endswith('.lib') or inputfile.endswith('.off'):\n self.process_library_file(inputfile)\n elif inputfile.endswith('.dat'):\n self.process_dat_file(inputfile)\n elif inputfile.endswith(\"mol2\"):\n self.process_mol2_file(inputfile)\n else:\n self.process_frc_file(inputfile)\n\n self.reduce_atomtypes()", "title": "" }, { "docid": "d76124eee9f4567c1247e7b91f7c68d5", "score": "0.58236897", "text": "def parse_requirements(fname='requirements.txt'):\n require_fpath = join(dirname(__file__), fname)\n\n def parse_line(line):\n \"\"\"\n Parse information from a line in a requirements text file\n \"\"\"\n info = {}\n if line.startswith('-e '):\n info['package'] = line.split('#egg=')[1]\n else:\n # Remove versioning from the package\n pat = '(' + '|'.join(['>=', '==', '>']) + ')'\n parts = re.split(pat, line, maxsplit=1)\n parts = [p.strip() for p in parts]\n\n info['package'] = parts[0]\n if len(parts) > 1:\n # FIXME: This breaks if the package doesnt have a version num\n op, rest = parts[1:]\n if ';' in rest:\n # Handle platform specific dependencies\n # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies\n version, platform_deps = map(str.strip, rest.split(';'))\n info['platform_deps'] = platform_deps\n else:\n version = rest # NOQA\n info['version'] = (op, version)\n return info\n\n # This breaks on pip install, so check that it exists.\n if exists(require_fpath):\n with open(require_fpath, 'r') as f:\n packages = []\n for line in f.readlines():\n line = line.strip()\n if line and not line.startswith('#'):\n info = parse_line(line)\n package = info['package']\n if not sys.version.startswith('3.4'):\n # apparently package_deps are broken in 3.4\n platform_deps = info.get('platform_deps')\n if platform_deps is not None:\n package += ';' + platform_deps\n packages.append(package)\n return packages\n return []", "title": "" }, { "docid": "0fee4ea6b6a7e8d44887031aa903be35", "score": "0.58157295", "text": "def _parse_requirements(filename: str) -> List[str]:\n # Ref: https://stackoverflow.com/a/42033122/\n return distutils.text_file.TextFile(filename=str(Path(__file__).with_name(filename))).readlines()", "title": "" }, { "docid": "f8166eea4d675efb49ea29ce7ca30c98", "score": "0.57794666", "text": "def parse_file(self, filename):\n gfile = open(filename, 'r')\n gstr = gfile.readlines()\n gfile.close()\n self.parse_lines(gstr)", "title": "" }, { "docid": "0da83c587815d8399f13d94f936c70d7", "score": "0.573872", "text": "def parse(self, filename):\r\n\r\n new_components = []\r\n\r\n with open(filename) as f:\r\n for line in f:\r\n if line.startswith('DEF '):\r\n new_components.append(ComponentParser(line).parse(f))\r\n\r\n for new_component in new_components:\r\n new_component.name = new_component.name.upper()\r\n if new_component.name not in self.name2cpt:\r\n self.name2cpt[new_component.name] = new_component\r\n self.components.append(new_component)\r\n if 'kicad_alias' in new_component.attributes:\r\n for name in new_component.attributes['kicad_alias'].split():\r\n self.name2cpt[name.upper()] = new_component", "title": "" }, { "docid": "1e2c70386c70c4adb2fc706b0f0f87ac", "score": "0.57234955", "text": "def parse_from_file(filename, parser, splitter=split_on_vnums, validate=None):\n with open(filename) as f:\n file_text = f.read()\n\n if validate:\n validate(file_text)\n\n file_text = file_text.rstrip('$\\n') # world files\n file_text = file_text.rstrip('$~\\n') # shop files\n\n return parse_from_string(file_text, parser, splitter=splitter)", "title": "" }, { "docid": "99aaddcdeb7e0178c37df9982ce0f010", "score": "0.57223636", "text": "def parse_requirements(filename: str) -> List[str]:\n # Ref: https://stackoverflow.com/a/42033122/\n requirements = (Path(__file__).parent / filename).read_text().strip().split('\\n')\n requirements = [r.strip() for r in requirements]\n requirements = [r for r in sorted(requirements) if r and not r.startswith('#')]\n return requirements", "title": "" }, { "docid": "017563706adb6a703d7507171bc8f7e4", "score": "0.5690299", "text": "def parse_file(self, filename):\n efile = open(filename, 'r')\n estr = efile.readlines()\n efile.close()\n self.parse_lines(estr)", "title": "" }, { "docid": "02ae437673d7e8e0db8b5553bbda069f", "score": "0.56521785", "text": "def parse_input_file(filename):\n modname = os.path.splitext(os.path.basename(filename))[0]\n spec = importlib.util.spec_from_file_location(modname, filename)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n assert 'A' not in mod.all_mss\n assert type(mod.all_mss) == set\n return mod.struct, mod.all_mss", "title": "" }, { "docid": "71afb66bcc02b57db7f04298febf78c8", "score": "0.5607634", "text": "def main( packageName, filenames=(), force=0):\n packageModule = __import__(\n packageName, {}, {},\n string.split(packageName, '.')\n )\n if not hasattr( packageModule, 'package' ):\n # build the default package object...\n from resourcepackage import package\n packageObject = package.Package(\n packageModule.__name__,\n directory = os.path.dirname( os.path.abspath(packageModule.__file__) ),\n )\n else:\n packageObject = packageModule.package\n if filenames:\n for filename in filenames:\n packageObject.scanFile( source = filename, force=force )\n else:\n packageObject.scan( force=force )", "title": "" }, { "docid": "0a03fee77d200f66d46accafe6613f47", "score": "0.558873", "text": "def parse(filename):\n stream = open(filename, 'r')\n attributes = parse_stream(stream)\n stream.close()\n return attributes", "title": "" }, { "docid": "861b15a5256adad33d29c0841c11ed50", "score": "0.55766684", "text": "def test_package_parse(self):\r\n\r\n# TODO embedded name\r\n\r\n# external name\r\n _valid_chunk = b''.join((b\"\\x1e\\x00\\x0d\\x00\\x7c\\xfe\\xb5\\xff\",\r\n b\"\\x84\\x01\\x97\\x00\\x00\\x7f\\x34\\xe3\",\r\n b\"\\x2a\\x09\\x7f\\x2b\\xe3\\x2a\\x09\\x00\"))\r\n _package = Eagle.Package.parse(_valid_chunk)\r\n\r\n self.assertEqual(_package.name, 'name_a')\r\n self.assertEqual(_package.desc, 'name_b')\r\n self.assertEqual(_package.numofshapes, 13)\r\n\r\n return", "title": "" }, { "docid": "7478d291f5ead83c65d1aa0065c6c654", "score": "0.5562234", "text": "def _ParsePkgmapLine(self, line):\n if line.startswith(\"#\"):\n return None\n parts = re.split(c.WS_RE, line.strip())\n # logging.debug(\"_ParsePkgmapLine(): %s\", parts)\n if len(parts) < 4:\n raise ParsingError(\"Line does not have enough fields: %s\"\n % repr(parts))\n file_type = parts[1]\n f_path = None\n f_target = None\n f_type = None\n f_class = None\n f_major = None\n f_minor = None\n f_mode = None\n f_owner = None\n f_group = None\n f_size = None\n f_cksum = None\n f_modtime = None\n f_pkgname = None\n pkgnames = []\n if file_type == 's':\n # ftype s: path=rpath s class package\n #\n # Spotted in real life:\n # ['/opt/csw/man=share/man', 's', 'none', 'CSWschilybase',\n # 'CSWschilyutils', 'CSWstar', 'CSWcommon']\n (f_path_rpath, f_type, f_class) = parts[:3]\n pkgnames.extend(parts[3:])\n f_path, f_target = f_path_rpath.split(\"=\")\n elif file_type == 'l':\n # ftype l: path l class package\n f_path, f_type, f_class, f_pkgname = parts\n elif file_type in ('d', 'x', 'p'):\n # ftype d: path d class mode owner group package(s)\n # ftype x: path x class mode owner group package\n f_path, f_type, f_class, f_mode, f_owner, f_group = parts[:6]\n pkgnames.extend(parts[6:])\n elif file_type == '?':\n # Does not follow the specfication. A specimen:\n # /opt/csw/gcc3/lib/gcc/sparc-sun-solaris2.8/3.4.6/include \n # ? none CSWgcc3g77 CSWgcc3core\n logging.warning(\"File type of %s is '?', assuming it's a directory.\",\n parts[0])\n f_type = 'd'\n f_path, unused_type, f_class = parts[:3]\n pkgnames.extend(parts[3:])\n elif file_type in ('b', 'c'):\n # ftype b: path b class major minor mode owner group package\n # ftype c: path c class major minor mode owner group package\n (f_path, f_type, f_class, f_major, f_minor, f_mode, f_owner,\n f_group, f_pkgname) = parts\n elif file_type in ('f', 'v', 'e'):\n # ftype f: path f class mode owner group size cksum modtime package\n # ftype v: path v class mode owner group size cksum modtime package\n # ftype e: path e class mode owner group size cksum modtime package\n #\n # Spotted in real life:\n # ['/etc/.java/.systemPrefs/.system.lock', 'e', 'preserve',\n # '0644', 'root', 'bin', '0', '0', '1265116929', 'SUNWj6cfg',\n # 'SUNWj5cfg']\n (f_path, f_type, f_class, f_mode, f_owner, f_group, f_size,\n f_cksum, f_modtime) = parts[:9]\n pkgnames.extend(parts[9:])\n else:\n raise ParsingError(\"Wrong file type: %s in %s\"\n % (repr(file_type), repr(line)))\n if f_pkgname:\n pkgnames.append(f_pkgname)\n\n static_parts = parts[:9]\n dynamic_parts = parts[9:]\n # ['/usr/lib/sparcv9/libpthread.so.1', 'f', 'none', '0755', 'root',\n # 'bin', '41296', '28258', '1018129099', 'SUNWcslx']\n d = {\n \"path\": f_path,\n \"target\": f_target,\n \"type\": f_type,\n \"class\": f_class,\n \"major\": f_major,\n \"minor\": f_minor,\n \"mode\": f_mode,\n \"owner\": f_owner,\n \"group\": f_group,\n \"size\": f_size,\n \"cksum\": f_cksum,\n \"modtime\": f_modtime,\n \"pkgnames\": pkgnames,\n \"line\": line,\n }\n return d", "title": "" }, { "docid": "e2863f7678fd12749a269fb0e3f84131", "score": "0.55536395", "text": "def parse_file_name(self, path):\n pass", "title": "" }, { "docid": "c7517fa6812b71bf3332db81e124d028", "score": "0.55464613", "text": "def parseFilename(filename, verbose=False):\n\n fname_patt = r\"(?P<product>[qomens]\\w\\w\\w)-(?P<itype>[aV])\"\n fname_patt = fname_patt + r\"-(?P<region>\\w\\w\\w)(?P<year>\\d\\d)\"\n fname_patt = fname_patt + r\"-(?P<doy_start>\\d\\d\\d)\"\n fname_patt = fname_patt + r\"-(?P<doy_end>\\d\\d\\d)\\.\"\n\n m = re.match(fname_patt, filename)\n if m is not None:\n year2 = int(m.group(\"year\"))\n if year2 > 80:\n year4 = year2 + 1900\n else:\n year4 = 2000 + year2\n fparts = {\n \"product\": m.group(\"product\"),\n \"itype\": m.group(\"itype\"),\n \"region\": m.group(\"region\"),\n \"year\": year4,\n \"doy_start\": int(m.group(\"doy_start\")),\n \"doy_end\": int(m.group(\"doy_end\")),\n }\n return fparts\n else:\n return None", "title": "" }, { "docid": "181d267a83e86e776a0d23cf59fa8773", "score": "0.5531865", "text": "def _parse_from_file_path(self, path):\n return self._parse_from_file_name(os.path.basename(path))", "title": "" }, { "docid": "65fab89e846a43aef0239d59f9185953", "score": "0.5527418", "text": "def parse(self, filename: Path) -> Metadata:\n raise NotImplementedError()", "title": "" }, { "docid": "9c93af09ade5a806578fc9fbf02ea8b6", "score": "0.5521588", "text": "def parse_info_from_filename(self, files):\n filename = os.path.basename(self.files[\".out\"])\n rogue_string = \".zib.de\"\n file_path_clean = filename.replace(rogue_string, \"\")\n fnparts = file_path_clean.split(\".\")\n if len(fnparts) != 8:\n return {}\n\n info = {\n \"test_set\": fnparts[1], # short, bug, etc,\n \"settings_short_name\": fnparts[-2],\n \"run_environment\": fnparts[-3],\n \"opt_flag\": fnparts[-5],\n \"architecture\": fnparts[-7],\n \"os\": fnparts[-8]}\n return info", "title": "" }, { "docid": "0eb51c8a5ef68c086bbb99ec26058eb8", "score": "0.5517156", "text": "def fetch(self, filename):\n return self.packages.get(filename)", "title": "" }, { "docid": "5b54f2ae75c211d113b86f87e9b7c63d", "score": "0.5515779", "text": "def parse_file(filename, comments=';', skiprows=0, opener='(', closer=')'):\n text = Parser._readFileContents(filename, comments=comments, skiprows=skiprows)\n # Run the parsing and get a nest list of the results\n return Parser.parse_string(text, opener=opener, closer=closer)", "title": "" }, { "docid": "f304ebaf975bef7fc03b66fad5f6a482", "score": "0.5514139", "text": "def parse(cls, filename):\n return cls(et.parse(filename))", "title": "" }, { "docid": "a4f7f31cacadfe281bf974348f97c635", "score": "0.54876864", "text": "def parse_requirements(args):\n # pip version needs to be greater than 19.3.1 to run this script\n # see https://github.com/pypa/pip/issues/6070\n pip_version = str(subprocess.check_output([\"pip\", \"--version\"]))\n pip_version_major = int(str(pip_version).split(\".\")[0].split(\"pip\")[1].strip())\n if pip_version_major < 20:\n sys.exit(\n \"pip version is lower or equal to 19.3.1. Please upgrade the pip version to run this script.\"\n )\n\n # Start a requests session to reuse HTTP connections\n session = requests.Session()\n\n with open(args.file) as f:\n cache_path = os.path.realpath(args.cache_path)\n cache_path = check_cache_path_writable(cache_path)\n for line in f:\n char_list = line.split(\"==\")\n if len(char_list) == 2:\n # Parse PyPi and Piwheels pages to install package according to\n # its name and version\n parse_pypi_and_piwheels(\n char_list[0].strip(), char_list[1].strip(), cache_path, session\n )\n # Ignore comments\n elif not line.startswith(\"#\"):\n sys.exit(\n \"\\nName format in cext.txt is incorrect. Should be 'packageName==packageVersion'.\\n\"\n )", "title": "" }, { "docid": "af7be10db8e2dfdb2519a5bbbcb06b27", "score": "0.54868144", "text": "def _parse(filename):\n try:\n with open(\"{0}.json\".format(filename)) as file:\n return json.load(file)\n except FileNotFoundError:\n try:\n with open(\"{0}.yml\".format(filename)) as file:\n return yaml.load(file, Loader=yaml.FullLoader)\n except yaml.scanner.ScannerError:\n print(f'Invalid Yaml for {filename}.yml')\n except FileNotFoundError:\n print(f'Could not open {filename}.yml')", "title": "" }, { "docid": "ee408e490dce6aacd4128c36be4fa6f6", "score": "0.5485742", "text": "def parse_requirements(file_name):\n requirements = []\n for line in open(file_name, \"r\").read().split(\"\\n\"):\n if re.match(r\"(\\s*#)|(\\s*$)\", line) or line.startswith(\"-\"):\n continue\n elif \"://\" in line or line.startswith(\"-e\"):\n # TODO support version numbers\n if \"egg\" in line:\n requirements.append(re.sub(r\".*#egg=(.*)$\", r\"\\1\", line))\n elif \"file\" in line:\n requirements.append(line.strip().rsplit(\"/\", 1)[1])\n else:\n pass\n elif re.match(r\"\\s*-f\\s+\", line):\n pass\n else:\n requirements.append(line)\n return requirements", "title": "" }, { "docid": "087173793729735f65d177da4d34d01d", "score": "0.54575944", "text": "def _parse_file(file_name):\n with open(file_name, 'r') as f:\n return json.load(f)", "title": "" }, { "docid": "94914eaa069ee41b962988b54b9a6f03", "score": "0.5445027", "text": "def parser_from_file(filename):\n if not os.path.isfile(filename):\n raise ValueError(\"The specified file does not exist\")\n parsed = parse_file(filename)\n parsed.map_leaves(tokenize_leaf)\n return parsed", "title": "" }, { "docid": "fe2697c6b5fb6d33c08bdfc38712f3e6", "score": "0.5420339", "text": "def parse_requirements(fname='requirements.txt', with_version=True):\n import sys\n from os.path import exists\n import re\n require_fpath = fname\n\n def parse_line(line):\n \"\"\"\n Parse information from a line in a requirements text file\n \"\"\"\n if line.startswith('-r '):\n # Allow specifying requirements in other files\n target = line.split(' ')[1]\n for info in parse_require_file(target):\n yield info\n else:\n info = {'line': line}\n if line.startswith('-e '):\n info['package'] = line.split('#egg=')[1]\n else:\n # Remove versioning from the package\n pat = '(' + '|'.join(['>=', '==', '>']) + ')'\n parts = re.split(pat, line, maxsplit=1)\n parts = [p.strip() for p in parts]\n\n info['package'] = parts[0]\n if len(parts) > 1:\n op, rest = parts[1:]\n if ';' in rest:\n # Handle platform specific dependencies\n # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies\n version, platform_deps = map(str.strip,\n rest.split(';'))\n info['platform_deps'] = platform_deps\n else:\n version = rest # NOQA\n info['version'] = (op, version)\n yield info\n\n def parse_require_file(fpath):\n with open(fpath, 'r') as f:\n for line in f.readlines():\n line = line.strip()\n if line and not line.startswith('#'):\n for info in parse_line(line):\n yield info\n\n def gen_packages_items():\n if exists(require_fpath):\n for info in parse_require_file(require_fpath):\n parts = [info['package']]\n if with_version and 'version' in info:\n parts.extend(info['version'])\n if not sys.version.startswith('3.4'):\n # apparently package_deps are broken in 3.4\n platform_deps = info.get('platform_deps')\n if platform_deps is not None:\n parts.append(';' + platform_deps)\n item = ''.join(parts)\n yield item\n\n packages = list(gen_packages_items())\n return packages", "title": "" }, { "docid": "c98fac07d682f8a879f15082bae5270d", "score": "0.54199517", "text": "def read_pipfile(file: str) -> dict:\n flag = ''\n pattern = re.compile(r\"([\\w\\-_]+)\\s=\\s\\\"(.*?)\\\"\")\n packages = {}\n dev_packages = {}\n with open(file, 'r') as pipfile:\n for line in pipfile:\n line = line.strip()\n if line == '[packages]':\n flag = 'packages'\n continue\n if line == '[dev-packages]':\n flag = 'dev-packages'\n continue\n if line == '':\n flag = ''\n if flag == 'packages':\n package = pattern.match(line).group(1)\n version = pattern.match(line).group(2)\n packages[package] = version\n elif flag == 'dev-packages':\n package = pattern.match(line).group(1)\n version = pattern.match(line).group(2)\n dev_packages[package] = version\n\n return {'packages': packages,\n 'dev-packages': dev_packages}", "title": "" }, { "docid": "2175108e0e78c6ff4cc46e456744c27c", "score": "0.54165584", "text": "def parse_requirements(filename):\n # type: (str) -> List[str]\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]", "title": "" }, { "docid": "96736157960f0cd4689b0a4f16db166a", "score": "0.54133344", "text": "def specs_from_text_file(filename, concretize=False):\n with open(filename, \"r\") as f:\n specs_in_file = f.readlines()\n specs_in_file = [s.strip() for s in specs_in_file]\n return spack.cmd.parse_specs(\" \".join(specs_in_file), concretize=concretize)", "title": "" }, { "docid": "9ac2fe71b701c2b15f174da2491d3064", "score": "0.54005617", "text": "def tokenize_package_name(package_name):\n return PACKAGE_NAME_TOKENIZATION_PATTERN.split(package_name)", "title": "" }, { "docid": "fe16d7fadb57df970be695ab433027a9", "score": "0.53911173", "text": "def loadManifestFile(filename):\n manifest = xmlparser.parseFile(filename, xmlparser.NS_FORMATS_MANIFEST)\n if manifest.name != 'bakefile-manifest':\n raise errors.ReaderError(manifest, 'invalid manifest file')\n for fmt in manifest.children:\n if fmt.name != 'format':\n raise errors.ReaderError(fmt, 'invalid manifest file')\n info = FormatInfo()\n info.name = fmt.props['id'] \n for node in fmt.children:\n if node.name == 'description':\n info.desc = node.value\n elif node.name == 'default-filename':\n info.defaultFile = node.value\n else:\n raise errors.ReaderError(node, 'invalid format description')\n if info.name == None or info.desc == None or info.defaultFile == None:\n raise errors.ReaderError(fmt, 'format not fully described')\n formats[info.name] = info", "title": "" }, { "docid": "453be84be753d56e0f62565c888b2379", "score": "0.53778034", "text": "def parse(filename):\n\tglobal _catalogAddress, _catalogPort\n\n\ttry:\n\t\twith open(filename, 'r') as startupFile:\n\t\t\tstartupContent = startupFile.read()\n\t\t\tstartupContent = startupContent.split(ENDL)\n\n\t\t\t# First two lines : server address and port\n\t\t\t_catalogAddress = startupContent[0].split(': ')[1]\n\t\t\t_catalogPort = int(startupContent[1].split(': ')[1])\n\n\t\t\t# All other lines : path to media descriptor files\n\t\t\tmediaDescriptors = startupContent[2:]\n\t\t\tfor mediaDescriptor in mediaDescriptors:\n\t\t\t\ttry:\n\t\t\t\t\tmedia = _parseMediaDescriptor(mediaDescriptor)\n\t\t\t\t\t# Add this media to the catalog, but only if at least one of his images were readable\n\t\t\t\t\tif len(media['files']) > 0:\n\t\t\t\t\t\t_catalog.append(media)\n\t\t\t\t\telse:\n\t\t\t\t\t\traise EnvironmentError()\n\t\t\t\texcept EnvironmentError:\n\t\t\t\t\tprint('The media descriptor', mediaDescriptor, 'could not be read, ignoring this media.')\n\n\t\t\treturn (_catalogAddress, _catalogPort)\n\texcept EnvironmentError:\n\t\tprint('The catalog descriptor', filename, 'could not be read, server cannot run.')\n\t\texit()", "title": "" }, { "docid": "823c4650183f15a8843633b98e164199", "score": "0.5370685", "text": "def _parse_package_list(self, data):\n try:\n data_string = data.decode('utf-8')\n parsed_data = yaml.safe_load(data_string)\n\n if isinstance(parsed_data, dict):\n self.package_list = self._extract_package_list(parsed_data)\n else:\n raise errors.InvalidPackageListFormat(\n \"Package data should have a top-level mapping/object.\")\n except yaml.YAMLError as ex:\n raise errors.InvalidPackageListFormat(\n \"Invalid YAML in package list: %s\" % str(ex))", "title": "" }, { "docid": "e9073187ca7e0d7117d40e88f26fe1f8", "score": "0.5342954", "text": "def readsrgs(filename):\n\n packages=[]; classes=[]; methods=[]; fields=[]\n srgsdata = open(filename,'r').read().splitlines()\n\n for row in srgsdata:\n row = row.strip()\n #HINT: We check for comments and whitelines\n if not row or row[0] == '#':continue\n\n #HINT: We extract the flag for the row (first element of a split on ':')\n if not len(row.split(':')) == 2: raise Exception(\"The row is not conforming to TAG:ENTRY syntax. [%s]\"%row)\n flag = row.split(':')[0]\n row = row.split(':')[1].strip()\n if not flag in ['PK', 'CL', 'FD', 'MD']:raise Exception(\"Flag not recognized : %s\"%flag)\n\n #HINT: We check the value of the flag, and append the corresponding list\n #The way we are splitting, the file does support comments after the useful text\n if flag == 'PK': packages.append([row.split()[0], row.split()[1]])\n if flag == 'CL': classes.append([row.split()[0], row.split()[1]])\n if flag == 'FD': fields.append([row.split()[0], row.split()[1]])\n if flag == 'MD': methods.append([' '.join(row.split()[0:2]), ' '.join(row.split()[2:4])])\n\n return {'PK':packages, 'CL':classes, 'FD':fields, 'MD':methods}", "title": "" }, { "docid": "930a37b3ed3bcbe1a48aaf4b2f2d86fa", "score": "0.5342714", "text": "def parse_file(filename):\n # Prep the result with none values in case of error.\n result = {}\n for key in KEYS:\n result[key] = None\n\n result['major_release'] = None\n\n try:\n with open(filename, 'r') as f:\n lines = f.readlines()\n\n # Iterate lines in file.\n for line in lines:\n if line.startswith('DISTRIB'):\n key, value = line.split('=', 1)\n # Remove white space\n key = key.strip()\n value = value.strip()\n # Remove quotes\n value = value.strip('\"')\n if key in KEYS:\n result[key] = value\n\n # Get major release from DISTRIB_RELEASE\n if result.get('DISTRIB_RELEASE') is not None:\n result['major_release'] = major_release(result['DISTRIB_RELEASE'])\n except IOError:\n # Host probably does not have osa.\n pass\n\n return result", "title": "" }, { "docid": "45618b928f0f950e9c5785fe2e8094bf", "score": "0.53414226", "text": "def parse_filename(filename):\n\n beta_value = filename.split('_beta_')[1].split('_')[0]\n ionfrac_value = filename.split('_ionfrac_')[-1].split('.txt')[0]\n\n return float(beta_value), float(ionfrac_value)", "title": "" }, { "docid": "09568c00edf9f522bb757237afdb56c4", "score": "0.5341071", "text": "def parsedata(filename):\n return report.parse(os.path.join(os.path.dirname(__file__), \"data\", filename))", "title": "" }, { "docid": "5fda65c549e8a63cf621acddea1a6c07", "score": "0.5338784", "text": "def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]", "title": "" }, { "docid": "5fda65c549e8a63cf621acddea1a6c07", "score": "0.5338784", "text": "def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]", "title": "" }, { "docid": "5fda65c549e8a63cf621acddea1a6c07", "score": "0.5338784", "text": "def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]", "title": "" }, { "docid": "5fda65c549e8a63cf621acddea1a6c07", "score": "0.5338784", "text": "def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]", "title": "" }, { "docid": "5fda65c549e8a63cf621acddea1a6c07", "score": "0.5338784", "text": "def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]", "title": "" }, { "docid": "5fda65c549e8a63cf621acddea1a6c07", "score": "0.5338784", "text": "def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]", "title": "" }, { "docid": "5fda65c549e8a63cf621acddea1a6c07", "score": "0.5338784", "text": "def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]", "title": "" }, { "docid": "6599054120d62e811f0480125a09993a", "score": "0.5331465", "text": "def parse_input(filename, url):\n print(\"Processing\", filename, \"\\n\")\n try:\n if os.path.isdir(filename): # if it's a directory\n files = os.listdir(filename)\n for file in files:\n new_path = os.path.join(filename,file)\n parse_input(new_path, url)\n return\n\n # check for known file extensions\n file, ext = os.path.splitext(filename)\n if ext == \".json\": \n parse_json(filename, url)\n\n elif ext == \".csv\":\n parse_csv(filename, url)\n\n else: # otherwise, log and move on\n print(\"Not a supported file: \" +filename+ \"\\n\")\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])", "title": "" }, { "docid": "af32075ddb1a582c17a7aa298e306fb0", "score": "0.5315196", "text": "def parse_package_page(files, pk_version, index_url, cache_path):\n\n result = []\n for file in files.find_all(\"a\"):\n # Skip if not a whl file\n if not file.string.endswith(\"whl\"):\n continue\n\n file_name_chunks = file.string.split(\"-\")\n\n package_version = file_name_chunks[1]\n package_name = file_name_chunks[0]\n python_version = file_name_chunks[2][2:]\n platform = file_name_chunks[4].split(\".\")[0]\n implementation = file_name_chunks[2][:2]\n abi = file_name_chunks[3]\n\n if package_version != pk_version:\n continue\n if python_version == \"26\" or python_version == \"34\" or python_version == \"35\":\n continue\n if \"macosx\" in platform:\n continue\n if \"win_amd64\" in platform and python_version != \"39\":\n continue\n\n # Cryptography builds for Linux target Python 3.4+ but the only existing\n # build is labeled 3.4 (the lowest version supported).\n # Expand the abi3 tag here. e.g. cp34 abi3 is expanded to cp34m, cp35m, cp36m, cp37m\n # https://cryptography.io/en/latest/faq/#why-are-there-no-wheels-for-my-python3-x-version\n if abi == \"abi3\":\n for actual_version in range(int(python_version), 38):\n actual_version = str(actual_version)\n actual_abi = \"\".join([implementation, actual_version, \"m\"])\n info = {\n \"platform\": platform,\n \"implementation\": implementation,\n \"version\": actual_version,\n \"abi\": actual_abi,\n }\n result.append(info)\n else:\n info = {\n \"platform\": platform,\n \"implementation\": implementation,\n \"version\": python_version,\n \"abi\": abi,\n }\n result.append(info)\n\n install_package(package_name, pk_version, index_url, result, cache_path)", "title": "" }, { "docid": "592a0f3201ad5b54da583c80d50ab47b", "score": "0.5304188", "text": "def parseFile(filename):\n\tfilepath = root + filename\n\ttry:\n\t\tif filename.endswith('.txt'):\n\t\t\tfile = open(filepath)\n\t\t\treturn file.read().splitlines()\n\t\telif filename.endswith('.csv'):\n\t\t\treader = csv.reader(open(filepath))\n\t\t\tresult = {}\n\t\t\tfor row in reader:\n\t\t\t key = row[0]\n\t\t\t result[key] = row[1]\n\t\t\treturn result\n\texcept FileNotFoundError:\n\t\topen(filepath,\"w+\").close()", "title": "" }, { "docid": "e6bb291d0c1050811b9dcab12f2315f5", "score": "0.53000575", "text": "def parsefile(self, filename):\n g = dict()\n return g", "title": "" }, { "docid": "2acf56483cd6625ca12f778a799bfa1f", "score": "0.5293795", "text": "def parse(self, filename):\r\n log.debug('Starting parse of %s', filename)\r\n with open(filename) as f:\r\n read = json.loads(f.read())\r\n\r\n self.parse_components(read.get('components'))\r\n self.parse_component_instances(read.get('component_instances'))\r\n if read.get('shapes') is not None:\r\n self.parse_sch_shapes(read.get('shapes'))\r\n self.parse_design_attributes(read.get('design_attributes'))\r\n self.parse_nets(read.get('nets'))\r\n self.parse_version(read.get('version'))\r\n\r\n # layout aspects\r\n self.parse_layer_options(read.get('layer_options'))\r\n self.parse_trace_segments(read.get('trace_segments'))\r\n self.parse_layout_objects(read.get('gen_objs'))\r\n self.parse_paths(read.get('paths'))\r\n self.parse_pours(read.get('pours'))\r\n self.parse_pcb_text(read.get('text'))\r\n\r\n return self.design", "title": "" }, { "docid": "452fb45a344976c57c4fa857b9a8b248", "score": "0.52927643", "text": "def get_packages():\n line = split_words(run('rospack','list'))\n packages = [{'name': name, 'path':path} for name, path in line]\n return packages", "title": "" }, { "docid": "11d106b34a06a5a58a2c185e4b9890b1", "score": "0.529203", "text": "def parse(self, file):\n raise NotImplementedError", "title": "" }, { "docid": "d0f8e0568cc908d38a47afd2e3f55aff", "score": "0.52908236", "text": "def testable_packages(filename, prefix):\n pkgdir = os.path.join(prefix, 'pkgs')\n paths = []\n yaml = YAML(typ='safe')\n\n for record in dmfile(filename):\n # Reconstruct ${package}-${version} format (when possible)\n pattern = f\"{record['name']}-\"\n if record['version']:\n pattern += record['version']\n pattern += '*'\n\n # Record path to extracted package\n path = ''.join([x for x in glob(os.path.join(pkgdir, pattern))\n if os.path.isdir(x)])\n paths.append(path)\n\n for root in paths:\n info_d = os.path.join(root, 'info')\n recipe_d = os.path.join(info_d, 'recipe')\n git_log = os.path.join(info_d, 'git')\n\n if not os.path.exists(git_log):\n continue\n\n git_log_data = open(git_log).readlines()\n\n if not git_log_data:\n continue\n\n with open(os.path.join(recipe_d, 'meta.yaml')) as yaml_data:\n source = yaml.load(yaml_data)['source']\n\n if not isinstance(source, dict):\n continue\n\n repository = source['git_url']\n head = git_log_data[1].split()[1]\n yield dict(repo=repository, commit=head)", "title": "" }, { "docid": "3b31af5ad6d7270ec4b68d9d43e5a07f", "score": "0.5283741", "text": "def _parse_build_names(file_path: Path) -> List[ReleaseInfo]:\n file_name = file_path.name\n is_nightly = NIGHTLY_NAME_SUFFIX.casefold() in file_name.casefold()\n\n if is_nightly:\n version = \"nightly\"\n else:\n version = file_path.with_suffix(\"\").name\n\n formatted_version = None if is_nightly else format_release_str(version)\n\n with open(file_path) as handle:\n # Strip and get non-empty lines\n parsed_file_names = [line.strip() for line in handle if line.strip()]\n\n # All non-nightly files have the date as their first line and they will all be on the same date\n if not is_nightly:\n release_date = datetime.date.fromisoformat(parsed_file_names.pop(0))\n # For an official release we have a single ReleaseInfo and multiple packages\n package_details: List[PackageDetails] = []\n for executable_name in parsed_file_names:\n package_details.append(\n PackageDetails(os_details=get_os(executable_name, version),\n download_url=get_download_url(executable_name, version, is_nightly)))\n releases = [\n ReleaseInfo(date=release_date,\n version=version,\n formatted_version=formatted_version,\n package_details=package_details)\n ]\n else:\n # For nightlies we have separate releases per file as sometimes not all builds complete\n releases: List[ReleaseInfo] = []\n for filename in parsed_file_names:\n releases.append(\n ReleaseInfo(date=get_nightly_date(filename),\n version=version,\n package_details=[\n PackageDetails(os_details=get_os(filename, version),\n download_url=get_download_url(\n filename, version, is_nightly))\n ]))\n\n return releases", "title": "" }, { "docid": "0601a121b5808d8d35cd6ff7e2ab624d", "score": "0.52721345", "text": "def parse_requirements_file(*args, **kwargs):\n return (\n Parser.parser_map.get(Config.parser, PipParser)\n .parse_requirements_file(*args, **kwargs)\n )", "title": "" }, { "docid": "07613738cba0ba6d09b6b5215efd450b", "score": "0.526506", "text": "def LoadPackageFile(self, package_file, skip_missing=False):\n archive_names = None\n self._archive_list = []\n\n # TODO(dyen): Support old format temporarily when it was a list of archives.\n if isinstance(package_file, list) or isinstance(package_file, dict):\n if isinstance(package_file, list):\n self._package_version = 0\n archive_list = package_file\n else:\n self._package_version = package_file[PACKAGE_KEY_VERSION]\n archive_list = package_file[PACKAGE_KEY_ARCHIVES]\n\n if archive_list:\n if isinstance(archive_list[0], archive_info.ArchiveInfo):\n # Setting a list of ArchiveInfo objects, no need to interpret JSON.\n self._archive_list = archive_list\n else:\n # Assume to be JSON.\n for archive_json in archive_list:\n archive = archive_info.ArchiveInfo(archive_info_file=archive_json)\n self._archive_list.append(archive)\n\n elif isinstance(package_file, str) or isinstance(package_file, unicode):\n package_data = ReadPackageFile(package_file)\n self._package_version = package_data[PACKAGE_KEY_VERSION]\n archive_names = package_data[PACKAGE_KEY_ARCHIVES]\n\n package_name = GetLocalPackageName(package_file)\n archive_dir = os.path.join(os.path.dirname(package_file), package_name)\n for archive in archive_names:\n arch_file = archive + '.json'\n arch_path = os.path.join(archive_dir, arch_file)\n if not os.path.isfile(arch_path):\n if not skip_missing:\n raise error.Error(\n 'Package (%s) points to invalid archive file (%s).' %\n (package_file, arch_path))\n archive_desc = archive_info.ArchiveInfo(name=archive)\n else:\n archive_desc = archive_info.ArchiveInfo(archive_info_file=arch_path)\n self._archive_list.append(archive_desc)\n else:\n raise error.Error('Invalid load package file type (%s): %s.' %\n (type(package_file), package_file))", "title": "" }, { "docid": "a93ee9340ab0b2d51f99810977c842a6", "score": "0.5262331", "text": "def parse_file(self, path):\n raise NotImplementedError()", "title": "" }, { "docid": "8551e97b63b6b09cf974cd7f0cf372ec", "score": "0.5239013", "text": "def parse_file(filename):\n try:\n fname = open(filename, 'r')\n except:\n logger.error('File do no exists %s' % filename)\n exit()\n\n with fname as f:\n for line in f:\n if line.strip()[0] == \"#\":\n continue\n\n command = line.strip().upper().split(' ')\n\n yield (command[0], command[1:])", "title": "" }, { "docid": "3d8696f015bc04d6db571cce9115d881", "score": "0.5235695", "text": "def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith('#')]", "title": "" }, { "docid": "a934ef8abb8d532f6c013632bec09d28", "score": "0.5232518", "text": "def parse_filename(self, fname):\n\n m = re.match(self.pattern, fname, re.IGNORECASE)\n if m is not None:\n m = m.groupdict()\n if m[\"name\"] is None:\n m[\"name\"] = \"Unknown\"\n if m[\"date\"] is None:\n m[\"date\"] = \"000101\"\n else:\n m[\"date\"] = m[\"date\"].replace(\" \", \"0\")\n if m[\"time\"] is None:\n m[\"time\"] = \"000000\"\n else:\n m[\"time\"] = m[\"time\"].replace(\" \", \"0\")\n if m[\"file\"] is None:\n m[\"file\"] = \"unknown\"\n else:\n m[\"file\"] = m[\"file\"].lower()\n\n return m", "title": "" }, { "docid": "b046b2a6fe1d59282e975821ca3e1f33", "score": "0.52197796", "text": "def parse_file(self):\n # Read in the entire file\n contents = Path(self.file).read_text()\n\n # Find the name of the network\n self.parse_network(contents)\n\n # Find all the variables\n self.parse_variables(contents)\n\n # Find all the probabilities\n self.parse_probabilities(contents)", "title": "" }, { "docid": "d5e3e308024390b095850d07f176ee59", "score": "0.5206896", "text": "def handle_package(self, response):\n\n package_path = response.url.replace('file://', '')\n self.log('Handling package: %s' % package_path, logging.INFO)\n\n # extract the name of the package without extension\n filename = os.path.basename(response.url).rstrip(\"A.tar\").rstrip('.zip')\n\n # create temporary directory to extract zip packages:\n target_folder = mkdtemp(prefix=filename + \"_\", dir=ELSEVIER_UNPACK_FOLDER)\n\n # uncompress files to temp directory\n files = uncompress(package_path, target_folder)\n\n self.log('Files uncompressed to: %s' % target_folder, logging.INFO)\n\n for f in files:\n if 'dataset.xml' in f:\n return self.parse_dataset(target_folder, filename, package_path, f)", "title": "" }, { "docid": "765fa3762c8c744ce80a71c37d7bc430", "score": "0.5203204", "text": "def parse_list(self, this_list, ignore_packages=None):\n packages = defaultdict(list)\n # TO-DO pass ignored and error to result\n # packages_ignored = ()\n packages_error = ()\n for item in this_list:\n # sanitize\n ignored = False\n if \"metadata expiration\" in item:\n continue\n try:\n nvr = utils.splitFilename(item)\n # <lon> labelcompare functions expect (epoch, version, release)\n epoch = nvr[3]\n version = nvr[1]\n release = nvr[2]\n if ignore_packages:\n search_term = \"{}\".format(nvr[0])\n for ignore in ignore_packages:\n if \".*\" in ignore:\n r = re.compile(ignore)\n match = r.match(search_term)\n if match:\n logging.info(\n \"Ignoring Package: {}\".format(search_term))\n ignored = True\n else:\n if ignore in search_term:\n logging.info(\n \"Ignoring Package: {}\".format(search_term))\n ignored = True\n if not ignored:\n packages[nvr[0]].append((epoch, version, release))\n except Exception as e:\n logging.error(e)\n logging.warning(\n \"error found getting package name/version for {}\"\n \" OR the ignore package regex {}\".format(str(item), ignore))\n is_alpha = re.match('[a-zA-Z]', nvr[0])\n if not is_alpha:\n packages_error.add(item)\n logging.warning(\"package name does not meet criteria \"\n \"for item: {}\".format(item))\n continue\n continue\n return packages", "title": "" }, { "docid": "1e5beacfa145757fb877c255cac880c6", "score": "0.51924807", "text": "def parse_config(file_path):\n pass", "title": "" }, { "docid": "708380378263e95599952e7aafb3910e", "score": "0.51904434", "text": "def ReadPackageFile(package_file):\n with open(package_file, 'rt') as f:\n json_value = json.load(f)\n\n # TODO(dyen): Support old format temporarily when it was a list of archives.\n if isinstance(json_value, list):\n return { PACKAGE_KEY_ARCHIVES: json_value, PACKAGE_KEY_VERSION: 0 }\n else:\n return json_value", "title": "" }, { "docid": "f0cfd78c74d8e44194b320f40ee0fbb3", "score": "0.518023", "text": "def unmarshal(self, filename):\n rule_a = re.compile(\n r\"^(.+) -> (.+) if the (.+) of words i([+-]\\d+)...i([+-]\\d+) is '(.+)'$\",\n re.UNICODE,\n )\n rule_b = re.compile(\n r\"^(.+) -> (.+) if the (.+) of the (.+) word is '(.+)'$\", re.UNICODE\n )\n\n # erase any previous rules\n self._rules = []\n\n # load from file\n handler = file(filename, \"r\")\n lines = handler.readlines()\n handler.close()\n\n # remove '\\n's, even though $ would catch them\n lines = [line[:-1] for line in lines]\n # remove empty lines\n lines = [line for line in lines if len(line) > 0]\n\n # parse rules\n for rule in lines:\n match = re.match(rule_b, rule)\n if match:\n groups = list(match.groups())\n if groups[3] == \"preceding\":\n groups.pop(3)\n groups.insert(3, \"-1\")\n groups.insert(4, \"-1\")\n else:\n groups.pop(3)\n groups.insert(3, \"1\")\n groups.insert(4, \"1\")\n else:\n match = re.match(rule_a, rule)\n groups = list(match.groups())\n\n conditions = (int(groups[3]), int(groups[4]), groups[5])\n if groups[2] == \"tag\":\n r = ProximateTagsRule(groups[0], groups[1], conditions)\n else:\n r = ProximateWordsRule(groups[0], groups[1], conditions)\n\n self._rules.append(r)", "title": "" }, { "docid": "61396350116d480ea062fbe6372ef107", "score": "0.51705", "text": "def LoadPackageData(path):\n data_list = []\n with open(path, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n data_list.append(Package(row[0], row[1], row[2], row[3], row[4],\n row[5], row[6], row[7]))\n\n return data_list", "title": "" }, { "docid": "f6056ce5c78261ea82ac23bc039eb74e", "score": "0.5151144", "text": "def _parse_file(self, filename, file_url):\n file_set = set()\n with open(filename, 'r') as plug_in:\n lines = plug_in.readlines()\n for plug in self.plugins:\n res = plug.run(lines)\n if len(res) > 0:\n for i in res:\n i = file_url + i\n file_set.add(i)\n return file_set", "title": "" }, { "docid": "1112d3093286a1222fe0a95669d8110e", "score": "0.51428115", "text": "def parse_requirements(filename):\n with open(filename, 'r') as f:\n lineiter = (line.strip() for line in f.readlines())\n return [line for line in lineiter if line and not line.startswith(\"#\")]", "title": "" }, { "docid": "b68e3ee59d6e6f1dbedb6da6d9a3ad3f", "score": "0.5141832", "text": "def parse_xml_file(self, filename):\n from imposm.parser.xml.multiproc import XMLMultiProcParser\n\n with fileinput(filename) as input:\n return self._parse(input, XMLMultiProcParser)", "title": "" }, { "docid": "137017c66d8fec8e664e8e4cc3e66ca8", "score": "0.51341844", "text": "def parse_file(self, filename: str) -> str:\n self.add_path(os.path.dirname(os.path.abspath(filename)))\n self.includes = [filename]\n\n with open(filename) as f:\n data = f.read()\n\n # TODO: No stream wrapper here? I don't really need it \n # but it was in the example implementations.\n output = io.StringIO()\n self.parse(data)\n self.write(output)\n result = output.getvalue()\n output.close()\n return result", "title": "" }, { "docid": "1713330e52f4d2ab91655ed48b2821ef", "score": "0.5130838", "text": "def full_parse(self):\n match = re.search(self.FILE_REGEX, self.filename)\n if not match:\n raise ValueError('Invalid {} filename: {}'.format(self.__class__.__name__,\n self.filename))\n else:\n return match.groups()", "title": "" }, { "docid": "09f903f9b47b3f4c8572db9104660101", "score": "0.5129876", "text": "def parseFile(filename):\n people = []\n f = file(filename)\n for line in f:\n pieces = line.split(':')\n name = pieces[0].strip()\n if name:\n priorities = pieces[1].strip().split(',')\n for i in range(len(priorities)):\n priorities[i] = priorities[i].strip()\n people.append((name,priorities))\n f.close()\n return people", "title": "" }, { "docid": "cd4c206b91d57d4fb31dd94c80201159", "score": "0.5118317", "text": "def get_parser(file):\n\tif file[-4:] == \".png\":\n\t\tparser = pngParse(file)\n\telif file[-4:] == \".bmp\":\n\t\tparser = bmpParse(file)\n\telif file[-4:] == \".gif\":\n\t\tparser = gifParse(file)\n\telif file[-4:] == \".tif\" or file[-4:] == \"tiff\":\n\t\tparser = tiffParse(file)\n\telif file[-4:] in [\".pgm\", \".ppm\"]:\n\t\tparser = pgmParse(file)\n\telif \"genericParse\" in globals()\n\t\tparser = genericParse(file)\n\t\twarn(\"Using generic parser for \"+file[-4:])\n\telse: # Catch when PIL or numpy not installed\n\t\twarn(\"Couldn't parse this file at all\", 2)\n\treturn parser", "title": "" }, { "docid": "65e31c99308f84c95cd4098ed29ba5be", "score": "0.511791", "text": "def load(klass, filename):\n p = PipfileParser(filename=filename)\n pipfile = klass(filename=filename)\n pipfile.data = p.parse()\n return pipfile", "title": "" }, { "docid": "c3aade5623e5edb18716aa9690b64556", "score": "0.5114332", "text": "def parse_config_file(filename):\n from rbtools.config.loader import parse_config_file as _parse_config_file\n\n return _parse_config_file(filename)", "title": "" }, { "docid": "4305140f1a438ad32ff05478eb3de62d", "score": "0.5113209", "text": "def __add_package(self, directory, filename, packages):\n file_ = utils.absolute_file_path(filename, directory)\n package = self.system.packager.package(file_)\n\n #for now skip source packages let's deal with only with binary packages\n if package.is_source():\n return\n\n if not self.package_for_arch(package):\n return\n\n if package.name in packages and \\\n package <= packages[package.name]:\n return\n\n packages[package.name] = package", "title": "" }, { "docid": "f8d89027e3aef2364fb69ebca9d8ee9c", "score": "0.5111192", "text": "def GetPackageList(path):\n\n pattern = re.compile(\"^[A-Za-z0-9\\-\\_]*.m?pkg$\", re.IGNORECASE)\n packages = []\n for package in os.listdir(path):\n if pattern.match(package):\n packages.append(package)\n packages.sort()\n return packages", "title": "" }, { "docid": "f29e8ad13c84c383ef42b291b0f38a84", "score": "0.5109514", "text": "def parse_requirements(\n filename, # type: str\n finder=None, # type: Optional[PackageFinder]\n comes_from=None, # type: Optional[str]\n options=None, # type: Optional[optparse.Values]\n session=None, # type: Optional[PipSession]\n constraint=False, # type: bool\n wheel_cache=None, # type: Optional[WheelCache]\n use_pep517=None # type: Optional[bool]\n):\n # type: (...) -> Iterator[InstallRequirement]\n if session is None:\n raise TypeError(\n \"parse_requirements() missing 1 required keyword argument: \"\n \"'session'\"\n )\n\n _, content = get_file_content(\n filename, comes_from=comes_from, session=session\n )\n\n lines_enum = preprocess(content, options)\n\n for line_number, line in lines_enum:\n req_iter = process_line(line, filename, line_number, finder,\n comes_from, options, session, wheel_cache,\n use_pep517=use_pep517, constraint=constraint)\n for req in req_iter:\n yield req", "title": "" }, { "docid": "6bcae7febfbd790b301c393db5b7d2d2", "score": "0.5105408", "text": "def ParsePkginfo(lines):\n d = {}\n for line in lines:\n try:\n # Can't use split, because there might be multiple '=' characters.\n line = line.strip()\n # Skip empty and commented lines\n if not line: continue\n if line.startswith(\"#\"): continue\n var_name, var_value = line.split(\"=\", 1)\n d[var_name] = var_value\n except ValueError, e:\n raise PackageError(\"Can't parse %s: %s\" % (repr(line), e))\n return d", "title": "" }, { "docid": "6bcae7febfbd790b301c393db5b7d2d2", "score": "0.5105408", "text": "def ParsePkginfo(lines):\n d = {}\n for line in lines:\n try:\n # Can't use split, because there might be multiple '=' characters.\n line = line.strip()\n # Skip empty and commented lines\n if not line: continue\n if line.startswith(\"#\"): continue\n var_name, var_value = line.split(\"=\", 1)\n d[var_name] = var_value\n except ValueError, e:\n raise PackageError(\"Can't parse %s: %s\" % (repr(line), e))\n return d", "title": "" }, { "docid": "0daff9aaa962acdb62c10392050fc700", "score": "0.5098932", "text": "def _load(self, filename):\n if filename[-4:].lower() == '.npz':\n data = np.load(filename)\n self.paths = [data[k] for k in sorted(data.keys())]\n data.close()\n elif filename[-4:].lower() == '.svg':\n self.paths = []\n self._add_from_svg(filename)\n else:\n raise RuntimeError('Bad file suffix, npz or svg expected.')", "title": "" }, { "docid": "d339532c4d6a008d31c18838691e8b18", "score": "0.50980526", "text": "def parse(file_path: str) -> List[Dict[str, Union[str, Sequence[str]]]]:\n try:\n with open(file_path) as rfile:\n deps = [line.rstrip(\"\\n\") for line in rfile]\n return [\n {\"version\": RequirementsFile.clean_dependency(dep), \"hashes\": []}\n for dep in deps\n if not any(\n [dep.strip().startswith(s) for s in INVALID_REQUIREMENTS_LINES]\n )\n ]\n except OSError as ex:\n raise OchronaFileException(f\"OS error when parsing {file_path}\") from ex", "title": "" }, { "docid": "4a9e12b566380b6ea43faeeaac692631", "score": "0.50866497", "text": "def parse_file(filename, start):\n with open(filename) as fd:\n events = parse_lines(fd, start)\n return events", "title": "" }, { "docid": "d2a245ebc1b93c92ae15151414302a40", "score": "0.5079447", "text": "def read_cfg_file(filename):\n rval = {'root': None,\n 'packages': {}}\n with open(filename, 'r') as rbl:\n cpkg = None\n for line in rbl:\n # throw away any comment at the end of the line\n # the '#' must be followed by whitespace to be a valid comment\n line = re.sub(r\"\\s*#\\s.*$\", \"\", line)\n # ignore blank lines\n if re.match(r\"^\\s*$\", line):\n continue\n (key, val) = line.split()\n if ':' in key:\n msg = \"Syntax error in config file: colons not allowed\"\n raise pyppi_error(msg)\n key = key.strip()\n val = val.strip()\n if key == 'root':\n if rval['root'] is None:\n rval['root'] = tbx.expand(val)\n else:\n raise pyppi_error(\"root was already set\")\n elif key == 'package':\n rval['packages'][val] = []\n cpkg = rval['packages'][val]\n elif key == 'version':\n cpkg.append({'version': val})\n elif key == 'url':\n cpkg[-1]['url'] = val\n elif key == 'minpy':\n cpkg[-1]['minpy'] = val\n return rval", "title": "" }, { "docid": "6b34d11c2df925c09521cb44e74a52c7", "score": "0.5075152", "text": "def file_parser(file):\n\twith open(file, \"r\") as vmf:\n\n\t\tindent = 0\n\t\tprevious_line = \"versioninfo\\n\" # We only know it's a category the next line (the curly brackets open)\n\t\textracted = []\n\n\t\tfor line in vmf.readlines()[1:]:\n\t\t\tif \"}\" in line:\n\t\t\t\tindent -= 1\n\t\t\t\tif indent == 0: # If indent is not 0 we know it's a child category and not a main category\n\t\t\t\t\textracted.append(t)\n\t\t\t\tcontinue\n\n\t\t\tif \"{\" in line:\n\t\t\t\tif indent > 0: # If indent is not 0 we know it's a child category and not a main category\n\t\t\t\t\t# Open curly brackets ALWAYS follow a category, so we know the previous line is the category name\n\t\t\t\t\tt.add_child(previous_line, indent)\n\t\t\t\telse:\n\t\t\t\t\tt = TempCategory(previous_line, indent) # This is a main category (not a child category)\n\t\t\t\tindent += 1\n\t\t\t\tcontinue\n\n\t\t\tif \"\\\"\" in line: t.add_line(line, indent) # ALL lines with data have double quotes in them\n\n\t\t\tprevious_line = line\n\n\tfor c in extracted:\n\t\t# clean_up is a recursive function we only need to call it on the main categories\n\t\tc.clean_up()\n\n\treturn extracted # This is used when creating a VMT class", "title": "" } ]
f5cf5fc9fafde2916e90572ab278c0bf
Creates a MongoDB filter for new documents. By "new", we mean documents in this Store that were last updated later than any document in targets.
[ { "docid": "f6226e52defd180c7c9cbc3640c0ea6b", "score": "0.59591556", "text": "def lu_filter(self, targets):\n if isinstance(targets, Store):\n targets = [targets]\n\n lu_list = [t.last_updated for t in targets]\n return {self.lu_field: {\"$gt\": self.lu_key[1](max(lu_list))}}", "title": "" } ]
[ { "docid": "202637605fe0d6c4cd9b4f70374b6863", "score": "0.54216784", "text": "def changesWithFilter(self, filter, limit=1000, since=-1):\n if since < 0:\n since = self.last_seq\n data = self.get('/%s/_changes?limit=%s&since=%s&filter=%s' % (self.name, limit, since, filter))\n self.last_seq = data['last_seq']\n return data", "title": "" }, { "docid": "0a079424f872943400b8498eb6dbef9f", "score": "0.5367098", "text": "def get_new_by_field(self, old, new, deps, fields):\n\t\tfound_objs = self.get_new_objects(old, new, deps)\n\t\tnew_objs = []\n\t\t\n\t\tfor o in found_objs:\n\t\t\texist = False\n\t\t\tfor o2 in old[deps[0]]['data']:\n\t\t\t\tsimilar = True\n\t\t\t\tfor f in fields:\n\t\t\t\t\tif not o[f] == o2[f]:\n\t\t\t\t\t\tsimilar = False\n\t\t\t\t\t\tbreak\n\t\t\t\tif similar:\n\t\t\t\t\texist = True\n\t\t\t\t\tbreak\n\t\t\tif not exist:\n\t\t\t\tnew_objs.append(o)\n\n\t\treturn new_objs", "title": "" }, { "docid": "12173978af170f5e00e2250e65104765", "score": "0.5290616", "text": "def _fixer_apply_fix_to_all(self,old,new):\n col = self.collection\n count = col.find({\"sender\":old}).count()\n col.update({\"sender\":old},{\"$set\":{\"sender\":new}},safe=True,multi=True)\n debug(\"Fixed %d documents\\n(%s -> %s)\\n\" % (count, old, new))", "title": "" }, { "docid": "6009b4d6bc1202aad70aed76cf0afc4e", "score": "0.5076374", "text": "def only_new_news(self):\n last_date = self._database_client.get_last_posted_date()\n last_news = self.news\n if last_date:\n # filter only new news by posted date\n last_news = [news for news in self.news if news.posted > last_date]\n return last_news", "title": "" }, { "docid": "39b347e690703728f063d45dea3ff7c2", "score": "0.49514052", "text": "def index_all_documents(self, new_index):\n collection = self.db_client[self.config['collection']]\n results = collection.find(\n self.config['collectionFilter'],\n list(self.config['fields'].keys())\n ).sort('createdAt', pymongo.DESCENDING).limit(10)\n ct = 0\n for result in results:\n try:\n self.index_document(result, new_index)\n ct += 1\n # TODO: what are the possible specific exceptions\n except Exception as exc:\n logging.warning(\n 'Failed to create document for %s. Error: %s',\n result['_id'], exc\n )\n logging.info('Index all complete: {} docs'.format(ct))", "title": "" }, { "docid": "87e69021f8648dd1bc041043937c66f4", "score": "0.4876476", "text": "def trackit(self, *args, **kwargs):\n\n t = Tracking.objects.filter(**kwargs)\n # update existing...refresher..\n if len(t) > 0:\n t = t[0]\n if (datetime.now() - t.created).seconds <= 300:\n setattr(t, 'counter', (getattr(t, 'counter', 0) + 1))\n t.save()\n else:\n # or add new record\n kwargs['created'] = datetime.now()\n t = Tracking(**kwargs)\n t.save()\n else:\n # or add new record\n kwargs['created'] = datetime.now()\n t = Tracking(**kwargs)\n t.save()", "title": "" }, { "docid": "deefbcea0dfdc71205b85ec705ed2f10", "score": "0.48670688", "text": "def get_new_objects(self, old, new, deps):\n\t\thashable_united_old = set([])\n\t\thashable_united_new = set([])\n\t\tfor dep in deps:\n\t\t\thashable_united_old = hashable_united_old | set([hashabledict(d) for d in old[dep]['data']])\n\t\t\thashable_united_new = hashable_united_new | set([hashabledict(d) for d in new[dep]['data']])\n\t\treturn list(set(hashable_united_new) - set(hashable_united_old))", "title": "" }, { "docid": "533de09a0520ad27ff81cbb895a3a677", "score": "0.47890252", "text": "def is_new(self):\n if self.first_extraction_date:\n job_age = datetime.datetime.utcnow() - self.first_extraction_date\n job_age_days = job_age.days\n return job_age_days < settings.NUM_DAYS_NEW_JOBS\n else:\n return False", "title": "" }, { "docid": "7787ac955e45985d957ac30742d14c1f", "score": "0.4786153", "text": "def pull_changes(self, fields=None, add_query=None):\n\n query = self._get_query_dict()\n query.update(add_query or {})\n\n db_document = self.collection().find_one(query, projection=fields)\n if not db_document:\n raise self.NotFound(self.id)\n\n if not fields:\n fields = set(list(db_document) + list(self))\n\n db_document = MongoObject(db_document)\n\n for field in fields:\n try:\n db_document[field]\n except KeyError:\n try:\n self[field]\n except KeyError:\n pass\n else:\n del self[field]\n else:\n self[field] = db_document[field]", "title": "" }, { "docid": "175e7bd301f5977ead3c8ed6296e976c", "score": "0.47576076", "text": "def filter_documents(self, filters: Optional[Dict[str, Any]] = None) -> List[Document]:", "title": "" }, { "docid": "313f7f5ef5782ce3695b2e6778ebc055", "score": "0.47341478", "text": "def groups_with_new_post(self):\n\t\treturn GroupSubscriptions.objects.filter(person=self , isNewPublication=True)", "title": "" }, { "docid": "9e5bd37f0b7ca63b4e57da6ad41eb32b", "score": "0.46846232", "text": "def get_new_timers(self, old, new, deps):\n\t\treturn self.get_new_by_field(old, new, deps, ['offset'])", "title": "" }, { "docid": "f81002333fb2cde822ac2e5429b7ca9b", "score": "0.46810097", "text": "def new_ids(self):\n return self._ids(self.new_records)", "title": "" }, { "docid": "f8ddb706130147e198aee3f2674c3399", "score": "0.4670848", "text": "def merge_none_updated_fields(self,\n new_document,\n old_document):\n pass", "title": "" }, { "docid": "db4e64c349177f254e1de132785e1af2", "score": "0.46506092", "text": "def get_recent(self, model_cls, destination_hostname=None):\n source_instances = model_cls.objects.none()\n if not destination_hostname:\n destination_hostname = socket.gethostname()\n options = self.get_last_modified_options(model_cls)\n if options:\n qset = Q()\n for dct in options:\n qset.add(Q(**dct), Q.OR)\n source_instances = model_cls.objects.using(self.get_using_source()).filter(qset).order_by('id')\n else:\n source_instances = model_cls.objects.using(self.get_using_source()).all().order_by('id')\n return source_instances", "title": "" }, { "docid": "573e97a11c363632a3dced1701909054", "score": "0.46124834", "text": "def new_or_changed(self):\n # return set(k for k, v in self.current_dict.items()\n # if k not in self.past_keys or v != self.past_dict[k])\n return self.added().union(self.changed())", "title": "" }, { "docid": "b566f0895ec60e1b3318413c370224cf", "score": "0.46086276", "text": "def filter_modified_since(request, objects):\n since = isoparse(request.GET.get(\"changes-since\"))\n if since:\n modified_objs = objects.filter(updated__gte=since)\n if not modified_objs:\n raise faults.NotModified()\n return modified_objs\n else:\n return objects.filter(deleted=False)", "title": "" }, { "docid": "700950607e364a6ddd4494c9e9a693e9", "score": "0.45968056", "text": "def add(self, name, query, **kwargs):\n obj = models.Filter({\"name\": name, \"query\": query}, self.api)\n obj.temp_id = obj[\"id\"] = self.api.generate_uuid()\n obj.data.update(kwargs)\n self.state[self.state_name].append(obj)\n cmd = {\n \"type\": \"filter_add\",\n \"temp_id\": obj.temp_id,\n \"uuid\": self.api.generate_uuid(),\n \"args\": {key: obj.data[key] for key in obj.data if key != \"id\"},\n }\n self.queue.append(cmd)\n return obj", "title": "" }, { "docid": "36828dc75c451b7b9dc15db2faa51522", "score": "0.45955482", "text": "def new_ids(self):\n return self._ids(\"new\")", "title": "" }, { "docid": "45c4d7d7b73870cc33323fc001dd3f22", "score": "0.4589334", "text": "def check_last_modified(self, queryset):\n modified = self.request.META.get('HTTP_IF_MODIFIED_SINCE')\n last_count = self.request.META.get('HTTP_X_COUNT')\n if modified and last_count and last_count.isdigit():\n\n # Check to see if any records have been updated since the last query\n kw = {}\n kw['%s__gt' % self.audit_field] = date_parser.parse(modified)\n if queryset.filter(**kw).exists():\n return\n\n # Check to make sure the total is the same (no deletions)\n last_count = int(last_count)\n if queryset.count() != last_count:\n return\n raise NotModified", "title": "" }, { "docid": "5e6318c702e79cd23260f82b3b476026", "score": "0.45117402", "text": "def test_Collection_init_collection_filter(coll_id, start_date, end_date):\n # The target collection ID should be removed from the collections lists\n args = default_coll_args()\n args['collections'] = [coll_id]\n args['start_date'] = start_date\n args['end_date'] = end_date\n assert model.Collection(**args).collections == []", "title": "" }, { "docid": "27d50817b24f94405d1b01ceccb48c5e", "score": "0.44911405", "text": "def gen_news(self, target_period, old_data, new_data):\n return self.modelling.gen_news(self, target_period, old_data, new_data)", "title": "" }, { "docid": "01cd57251ac1cf94e797910f49d83bbc", "score": "0.4477826", "text": "def process_record(self, new, old=None):\n new = super(Record, self).process_record(new, old)\n\n schema = self._collection.get('schema')\n settings = self.request.registry.settings\n schema_validation = 'experimental_collection_schema_validation'\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n\n collection_timestamp = self._collection[self.model.modified_field]\n\n try:\n stripped = copy.deepcopy(new)\n stripped.pop(self.model.id_field, None)\n stripped.pop(self.model.modified_field, None)\n stripped.pop(self.model.permissions_field, None)\n stripped.pop(self.schema_field, None)\n jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n try:\n field = e.path.pop() if e.path else e.validator_value.pop()\n except AttributeError:\n field = None\n raise_invalid(self.request, name=field, description=e.message)\n\n new[self.schema_field] = collection_timestamp\n return new", "title": "" }, { "docid": "24f231586e50f5f4fd51541eb006e974", "score": "0.44752893", "text": "def get_events(source_index: str, dest_index: str):\n\n res = es.search(index=source_index, body={\"query\": {\"match_all\": {}}})\n source_hits = [hit[\"_source\"] for hit in res[\"hits\"][\"hits\"]]\n source_set = set([hit[\"collection_id\"] for hit in source_hits])\n source_drs_set = set([tuple(hit['drsId']) for hit in source_hits if hit.get('drsId')])\n\n res = es.search(index=dest_index, body={\"query\": {\"match_all\": {}}})\n dest_hits = [hit[\"_source\"] for hit in res[\"hits\"][\"hits\"]]\n dest_set = set([hit[\"collection_id\"] for hit in dest_hits])\n dest_drs_set = set([tuple(hit['drsId']) for hit in source_hits if hit.get('drsId')])\n\n added = dest_set - source_set\n removed = source_set - dest_set\n updated = dest_drs_set - source_drs_set\n\n # creation of record of events.\n events = []\n\n for additions in added:\n res = es.search(index=dest_index, body={\"query\": {\"term\": {'_id': additions}}})\n hit = res['hits']['hits'][0]['_source']\n events.append(\n {\n \"collection_id\": hit[\"collection_id\"],\n \"collection_title\": hit[\"title\"],\n \"action\": \"added\",\n \"datetime\": datetime.now().isoformat(),\n\n }\n )\n\n for removals in removed:\n res = es.search(index=source_index, body={\"query\": {\"term\": {'_id': removals}}})\n hit = res['hits']['hits'][0]['_source']\n events.append(\n {\n \"collection_id\": hit[\"collection_id\"],\n \"collection_title\": hit[\"title\"],\n \"action\": \"removed\",\n \"datetime\": datetime.now().isoformat(),\n\n }\n )\n\n for update in updated:\n res = es.search(index=dest_index, body={\"query\": {\"term\": {'drsId': update}}})\n collection_id = res['hits']['hits'][0]['_id']\n res = es.search(index=source_index, body={\"query\": {\"term\": {'_id': collection_id}}})\n if res['hits']['hits']:\n hit = res['hits']['hits'][0]['_source']\n events.append(\n {\n \"collection_id\": hit[\"collection_id\"],\n \"collection_title\": hit[\"title\"],\n \"action\": \"updated\",\n \"datetime\": datetime.now().isoformat(),\n\n }\n )\n\n events_json = json.dumps(events, indent=4)\n # print(events_json)\n r = requests.post(\n url,\n data=events_json,\n headers=headers,\n )\n print(f'HTTP Response {r.status_code}') # HTTP response", "title": "" }, { "docid": "ac6c1e24bbc0f2c64c8b490968dcf572", "score": "0.44725478", "text": "def aging_filter(days):\n now: datetime = pendulum.now(tz='UTC')\n\n def mr_aging_filter(project: Project) -> Optional[Project]:\n if not project:\n return None\n\n project.merge_requests = list(filter(lambda mr: (now - mr.created_at).days > days,\n project.merge_requests))\n\n return project if project.merge_requests else None\n\n return mr_aging_filter", "title": "" }, { "docid": "ace3bc9b0c7daf109a30118c8265a9c6", "score": "0.44568688", "text": "def ref_overwrite(oldref, newref, days):\n if len(oldref) != len(newref):\n return True\n oldref_minus_retrieved = [x for x in oldref if x.get_prop_nr() != retrieved_pid]\n newref_minus_retrieved = [x for x in newref if x.get_prop_nr() != retrieved_pid]\n if not all(x in oldref_minus_retrieved for x in newref_minus_retrieved):\n return True\n oldref_retrieved = [x for x in oldref if x.get_prop_nr() == retrieved_pid]\n newref_retrieved = [x for x in newref if x.get_prop_nr() == retrieved_pid]\n if (len(newref_retrieved) != len(oldref_retrieved)) or not (\n len(newref_retrieved) == len(oldref_retrieved) == 1):\n return True\n datefmt = '+%Y-%m-%dT%H:%M:%SZ'\n retold = list([datetime.strptime(r.get_value()[0], datefmt) for r in oldref if r.get_prop_nr() == retrieved_pid])[0]\n retnew = list([datetime.strptime(r.get_value()[0], datefmt) for r in newref if r.get_prop_nr() == retrieved_pid])[0]\n return (retnew - retold).days >= days", "title": "" }, { "docid": "9129a007a71f8ff4350363b2e4c0a561", "score": "0.4452713", "text": "def _prepare_new_events_for_datastore(df_new_log_events: pd.DataFrame) -> pd.DataFrame:\n # create a copy of the DataFrame\n df_new_log_events = df_new_log_events.copy(deep=True)\n\n # extract the EventId and EventTemplate columns\n df_new_log_events = df_new_log_events[[\"EventId\", \"EventTemplate\"]]\n\n # drop duplicates\n df_new_log_events = df_new_log_events.drop_duplicates()\n\n return df_new_log_events", "title": "" }, { "docid": "94f7d701fff3bc9da707d0b19b4ef7b9", "score": "0.44456246", "text": "def test_new_filter_existing_default(self):\r\n self.build(\r\n 'ir.filters',\r\n dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),\r\n dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'),\r\n )\r\n\r\n Filters = self.registry('ir.filters')\r\n Filters.create_or_replace(self.cr, self.USER_ID, {\r\n 'name': 'c',\r\n 'model_id': 'ir.filters',\r\n 'user_id': self.USER_ID,\r\n 'is_default': True,\r\n })\r\n filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')\r\n\r\n self.assertItemsEqual(map(noid, filters), [\r\n dict(name='a', user_id=self.USER, is_default=False, domain='[]', context='{}'),\r\n dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'),\r\n dict(name='c', user_id=self.USER, is_default=True, domain='[]', context='{}'),\r\n ])", "title": "" }, { "docid": "d60bc9782037ddc413b44f2178010022", "score": "0.44298473", "text": "def retain(self, **terms):\n self.l = self.filter(**terms)\n return self", "title": "" }, { "docid": "615da49516b3c5b788b504992ce5b9ba", "score": "0.44183674", "text": "def filter(self):\n if self.is_search:\n self.query[\"name\"] = {\"$regex\": self.dt_search[\"value\"], \"$options\": \"i\" }", "title": "" }, { "docid": "148981904164813b891e0e80043e7741", "score": "0.44177032", "text": "def test_latest_added_with_future_product_and_past_product(self):\n time = timezone.now() - datetime.timedelta(days=30)\n Product.objects.create(name='Now', description='now', created_at=timezone.now(), price=30)\n Product.objects.create(name='Past', description='past', created_at=time, price=30)\n response = self.client.get(reverse('last_added'))\n self.assertQuerysetEqual(\n response.context['last_added_pr'], ['<Product: Now>'])", "title": "" }, { "docid": "a628e9f4e3c72a787430dc11392782ad", "score": "0.44123897", "text": "def test_new_no_filter(self):\r\n Filters = self.registry('ir.filters')\r\n Filters.create_or_replace(self.cr, self.USER_ID, {\r\n 'name': 'a',\r\n 'model_id': 'ir.filters',\r\n 'user_id': self.USER_ID,\r\n 'is_default': True,\r\n })\r\n filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')\r\n\r\n self.assertItemsEqual(map(noid, filters), [\r\n dict(name='a', user_id=self.USER, is_default=True,\r\n domain='[]', context='{}')\r\n ])", "title": "" }, { "docid": "e0450525714e0c7090b4a8ae8cc6ad2a", "score": "0.44090873", "text": "def search_new(self, n=10):\n\n return quickview(self._bibdb.sort_values(by='import_date')[-n:])", "title": "" }, { "docid": "959e290dd06d726020d9bf60b58d06e2", "score": "0.4406169", "text": "def new_objects(self):\n return [self]", "title": "" }, { "docid": "2f1c89d2e646ad6a89e74cd310cbda2f", "score": "0.44010144", "text": "def test_find_one_latest():\n collection = mongomock.MongoClient().db.collection\n obj1 = {'_id': 1, 'name': 'first'}\n obj2 = {'_id': 2, 'name': 'seond'}\n obj3 = {'_id': 3, 'name': 'third'}\n\n collection.insert_many([obj1, obj2, obj3])\n res = find_one_latest(collection)\n assert res == {'name': 'third'}", "title": "" }, { "docid": "77e4d886ea925e79075f06906acdbf50", "score": "0.43964005", "text": "def query_add_filters(self, query):\n return query", "title": "" }, { "docid": "43416a64b9a1b2d98e1d83c05674e9d8", "score": "0.439603", "text": "def mergeFilter(self, target):\r\n pass", "title": "" }, { "docid": "43416a64b9a1b2d98e1d83c05674e9d8", "score": "0.439603", "text": "def mergeFilter(self, target):\r\n pass", "title": "" }, { "docid": "ad6677e42ba8f98224175530fa255cb3", "score": "0.43942013", "text": "def populate_collection(collection: Collection, documents: List[Dict[str, Any]], filter_field: str) -> None:\n logger.debug(f\"Populating/updating '{collection.full_name}' using '{filter_field}' as the filter\")\n\n for document in documents:\n _ = collection.find_one_and_update({filter_field: document[filter_field]}, {\"$set\": document}, upsert=True)", "title": "" }, { "docid": "e7acf8ed05e07ba5e1aa67a9b9280c30", "score": "0.43924293", "text": "def new_hits(self): \n return self._new_hits", "title": "" }, { "docid": "1e385a246616def42200172fc2c938d3", "score": "0.4385931", "text": "def get_new_devices(self, old, new, deps):\n\t\told_drivers = old[deps[0]]['data']\n\t\tnew_drivers = new[deps[0]]['data']\n\t\tfound_devs = []\n\t\tfor new_driver in new_drivers:\n\t\t\tnew_devices = new_driver[\"devices\"]\n\t\t\tfor new_device in new_devices:\n\t\t\t\tis_new = True\n\t\t\t\tfor old_driver in old_drivers:\n\t\t\t\t\told_devices = old_driver[\"devices\"]\n\t\t\t\t\tfor old_device in old_devices:\n\t\t\t\t\t\tif old_device[\"device_name\"] == new_device[\"device_name\"] and \\\n\t\t\t\t\t\t old_device[\"device_type\"] == new_device[\"device_type\"]:\n\t\t\t\t\t\t\tis_new = False\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif not is_new:\n\t\t\t\t\t\tbreak\n\t\t\t\tif is_new:\n\t\t\t\t\tfound_devs.append(new_device)\n\t\treturn found_devs", "title": "" }, { "docid": "b78dcc3c5a8329645f9ea5971e5188fe", "score": "0.43826064", "text": "def get_new_services(self, old, new, deps):\n\t\treturn self.get_new_by_field(old, new, deps, ['service_name'])", "title": "" }, { "docid": "8a91edae33a6c4dcd71bf58f47b7fd0e", "score": "0.43811777", "text": "def startCreation(self):\r\n nids = []\r\n newNids = mw.col.findNotes(\"is:new\")\r\n\r\n for nid in newNids:\r\n\t if mw.col.getNote(nid).model()['name'] == self.vocabNote['VModel']:\r\n\t nids.append(nid)\r\n \r\n if nids != []:\r\n\t self.addCardsBulk(nids)", "title": "" }, { "docid": "8c8458de93516d9881be35cfe3afe987", "score": "0.4379371", "text": "def test_latest_added_with_a_past_product(self):\n self.client.force_login(User.objects.get_or_create(username='test')[0])\n time = timezone.now() - datetime.timedelta(days=30)\n Product.objects.create(name='Past', description='past', created_at=time, price=30)\n response = self.client.get(reverse('last_added'))\n self.assertContains(response, \"No items are available.\", status_code=200)\n self.assertQuerysetEqual(response.context['last_added_pr'], [])", "title": "" }, { "docid": "33229abbc088493431c10821fd3685e3", "score": "0.43776506", "text": "def after_new(self, form, instance):\n\n # Set the 'afternew' URL\n self.afternewurl = reverse('search_gold')\n\n # Create a new equality set to which we add this Gold sermon\n if instance.equal == None:\n geq = EqualGold.create_empty()\n instance.equal = geq\n instance.save()\n\n # Return positively\n return True, \"\"", "title": "" }, { "docid": "a6336f3f5c0b25376fc9acd9a754227d", "score": "0.43725342", "text": "def mergeFilter(self, target):\r\n self.entries = [entry for entry in self.entries if entry.listId.ValidateFormID(target)]", "title": "" }, { "docid": "a6336f3f5c0b25376fc9acd9a754227d", "score": "0.43725342", "text": "def mergeFilter(self, target):\r\n self.entries = [entry for entry in self.entries if entry.listId.ValidateFormID(target)]", "title": "" }, { "docid": "a6336f3f5c0b25376fc9acd9a754227d", "score": "0.43725342", "text": "def mergeFilter(self, target):\r\n self.entries = [entry for entry in self.entries if entry.listId.ValidateFormID(target)]", "title": "" }, { "docid": "a6336f3f5c0b25376fc9acd9a754227d", "score": "0.43725342", "text": "def mergeFilter(self, target):\r\n self.entries = [entry for entry in self.entries if entry.listId.ValidateFormID(target)]", "title": "" }, { "docid": "a6336f3f5c0b25376fc9acd9a754227d", "score": "0.43725342", "text": "def mergeFilter(self, target):\r\n self.entries = [entry for entry in self.entries if entry.listId.ValidateFormID(target)]", "title": "" }, { "docid": "a6336f3f5c0b25376fc9acd9a754227d", "score": "0.43725342", "text": "def mergeFilter(self, target):\r\n self.entries = [entry for entry in self.entries if entry.listId.ValidateFormID(target)]", "title": "" }, { "docid": "90aec0c1378eadcf98ed80d7ac1923a4", "score": "0.43724656", "text": "def create_watcher_and_commit_if_needed(self, new_responsible):\n watcher = Watcher.query.get_by_actorid(new_responsible)\n if not watcher:\n\n if txn_is_dirty():\n # Creating and committing the watcher should always be the\n # first thing that's being done in the txn when reassigning.\n # Otherwise we would be committing unrelated, unexpected\n # changes.\n #\n # Detect if that happens, but still proceed and log to sentry.\n msg = 'Dirty transaction when creating and committing watcher'\n logger.warn(msg)\n logger.warn('Registered objects: %r' % registered_objects())\n log_msg_to_sentry(msg, level='warning', extra={\n 'registered_objects': repr(registered_objects())}\n )\n\n session = create_session()\n watcher = Watcher(actorid=new_responsible)\n session.add(watcher)\n transaction.commit()\n transaction.begin()", "title": "" }, { "docid": "7134cf56ac2621464b9b48be993c1bf3", "score": "0.43698126", "text": "def potentially_expired_records(repo,\n days_to_expire=DEFAULT_EXPIRATION_DAYS):\n import utils\n cutoff_date = utils.get_utcnow() - timedelta(days_to_expire)\n return Person.all(filter_expired=False).filter(\n 'source_date <=',cutoff_date).filter(\n 'repo =', repo)", "title": "" }, { "docid": "183d6b4d161855c36c202097a9f6b087", "score": "0.43619275", "text": "def view_update(self, context):\n\n for collection in self._watch_list:\n collection_name = get_collection_name(collection)\n collection_set = set(collection)\n tracking_set = self._tracking_sets[collection_name]\n\n # Check for new items\n add_set = collection_set - tracking_set\n self.add_delta[collection_name] = add_set\n tracking_set |= add_set\n\n # Check for removed items\n remove_set = tracking_set - collection_set\n self.remove_delta[collection_name] = remove_set\n tracking_set -= remove_set\n\n # Check for updates\n update_set = {item for item in collection if item.is_updated}\n self.update_delta[collection_name] = update_set", "title": "" }, { "docid": "b1626abbeb20ccfeb37b58d5fa63e5ab", "score": "0.43597463", "text": "def create_watcher_and_commit_if_needed(self, new_responsible):\n watcher = Watcher.query.get_by_actorid(new_responsible)\n if not watcher:\n if txn_is_dirty():\n # Creating and committing the watcher should always be the\n # first thing that's being done in the txn when reassigning.\n # Otherwise we would be committing unrelated, unexpected\n # changes.\n #\n # Detect if that happens, but still proceed and log to sentry.\n msg = 'Dirty transaction when creating and committing watcher'\n logger.warn(msg)\n logger.warn('Registered objects: %r' % registered_objects())\n log_msg_to_sentry(msg, level='warning', extra={\n 'registered_objects': repr(registered_objects())}\n )\n\n session = create_session()\n watcher = Watcher(actorid=new_responsible)\n session.add(watcher)\n transaction.commit()\n transaction.begin()", "title": "" }, { "docid": "99933fbe235a4a40a2756ab9ae967c7e", "score": "0.43514693", "text": "def last_remote_addr_actions(self, request, seconds):\r\n queryset = self.get_same_remote_addr(request)\r\n\r\n timedelta = datetime.timedelta(seconds=seconds)\r\n point_in_time = datetime.datetime.now() - timedelta\r\n queryset = queryset.filter(createtime__gte=point_in_time)\r\n\r\n return queryset", "title": "" }, { "docid": "111afa825ee50c080f30f40b61deba39", "score": "0.43379867", "text": "def test_filter_latest(self):\n fs = self.bound(MultiValueDict())\n\n qs = Mock()\n fs.filter(qs)\n\n qs.filter.assert_called_with(latest=True)", "title": "" }, { "docid": "16e70fbc0cc1645b0b4f9a825da0e893", "score": "0.43332693", "text": "def test_new_filter_not_default(self):\r\n self.build(\r\n 'ir.filters',\r\n dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),\r\n dict(name='b', user_id=self.USER_ID, model_id='ir.filters'),\r\n )\r\n\r\n Filters = self.registry('ir.filters')\r\n Filters.create_or_replace(self.cr, self.USER_ID, {\r\n 'name': 'c',\r\n 'model_id': 'ir.filters',\r\n 'user_id': self.USER_ID,\r\n 'is_default': True,\r\n })\r\n filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')\r\n\r\n self.assertItemsEqual(map(noid, filters), [\r\n dict(name='a', user_id=self.USER, is_default=False, domain='[]', context='{}'),\r\n dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'),\r\n dict(name='c', user_id=self.USER, is_default=True, domain='[]', context='{}'),\r\n ])", "title": "" }, { "docid": "b1e08fedc40da914f1a7de309596bf09", "score": "0.43327948", "text": "def new_users(self):\n if 'new_users' not in self.cache:\n self.cache['new_users'] = self._user_event_counter(EventTypes.NEW_USER)\n return self.cache['new_users']", "title": "" }, { "docid": "ea22be4d67d7720869d940115c4bba21", "score": "0.4332354", "text": "def new_query(self):\n self._ext_queries += 1", "title": "" }, { "docid": "cba20063393162c3c6e85feb247f5c7d", "score": "0.43227044", "text": "def keep(self, **kwargs):\n matches = [all(value == getattr(event, key) if key in Event.exact_matches else value in getattr(event, key) for key, value in kwargs.items()) if len(kwargs) > 0 else True for event in self.schedule]\n\n sched = Schedule([event for index, event in enumerate(self.schedule) if not self.__inner_filter[index] or matches[index]], name=self.name)\n \n self.__inner_filter = [True for _ in range(len(sched))]\n\n if VERBOSE:\n print(f'Filtered {self} to {len(sched)} events')\n\n return sched", "title": "" }, { "docid": "e20ee497b71259461c15f742537efbae", "score": "0.4322676", "text": "def test_new_filter_not_default(self):\r\n self.build(\r\n 'ir.filters',\r\n dict(name='a', user_id=False, model_id='ir.filters'),\r\n dict(name='b', user_id=False, model_id='ir.filters'),\r\n )\r\n\r\n Filters = self.registry('ir.filters')\r\n Filters.create_or_replace(self.cr, self.USER_ID, {\r\n 'name': 'c',\r\n 'model_id': 'ir.filters',\r\n 'user_id': False,\r\n 'is_default': True,\r\n })\r\n filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')\r\n\r\n self.assertItemsEqual(map(noid, filters), [\r\n dict(name='a', user_id=False, is_default=False, domain='[]', context='{}'),\r\n dict(name='b', user_id=False, is_default=False, domain='[]', context='{}'),\r\n dict(name='c', user_id=False, is_default=True, domain='[]', context='{}'),\r\n ])", "title": "" }, { "docid": "692f5e6a148421eee5d2449545e79e4b", "score": "0.43191355", "text": "def add(query, params, filters):\n for fltr in filters:\n fltr.add(query, params)", "title": "" }, { "docid": "351f441945bf9908a35d63825b9d2d73", "score": "0.43181813", "text": "def get_new_connections(self, old, new, deps):\n\t\tfound_connections = self.get_new_objects(old, new, deps)\n\t\tnew_connections = []\n\t\tcuckoo_conf = Config(os.path.join(CUCKOO_ROOT, \"conf\", \"cuckoo.conf\"))\n\t\tremote_port = cuckoo_conf.resultserver.port\n\t\tremote_address = cuckoo_conf.resultserver.ip\n\t\tfor c in found_connections:\n\t\t\tif not (c[\"local_port\"] == CUCKOO_GUEST_PORT or \\\n\t\t\t c['remote_port'] == remote_port or \\\n\t\t\t c[\"remote_address\"]\t== remort_address):\n\t\t\t\tnew_connections.append(c)\n\t\treturn new_connections", "title": "" }, { "docid": "3ad82fb8cb46c8ef697102e0edd357ef", "score": "0.43161115", "text": "def rerun_filters(dpi=None):\n\n # get the database links.\n match_db = database.get_collection('match')\n filter_db = database.get_collection('filter')\n\n # create the object.\n cbio = CBioEngine(settings.MONGO_URI,\n settings.MONGO_DBNAME,\n data_model.match_schema,\n muser=settings.MONGO_USERNAME,\n mpass=settings.MONGO_PASSWORD,\n collection_clinical=settings.COLLECTION_CLINICAL,\n collection_genomic=settings.COLLECTION_GENOMIC)\n\n query = {'status': 1, 'temporary': False, 'trial_watch': {'$exists': False}}\n filters = list(filter_db.find(query))\n for filter_ in filters:\n\n # lots of logging.\n logging.info(\"rerun_filters: filter: %s\" % filter_['_id'])\n\n # prepare the filters.\n c, g, txt = prepare_criteria(filter_)\n\n # execute the match.\n cbio.match(c=c, g=g)\n\n if cbio.match_df is not None and cbio.genomic_df is not None and cbio.clinical_df is not None:\n logging.info(\"rerun_filters: new matches: match=%d, genomic=%d, clinical=%d\" % (\n len(cbio.match_df), len(cbio.genomic_df), len(cbio.clinical_df)))\n\n # get existing matches for this filter.\n matches = list(match_db.find({'FILTER_ID': ObjectId(filter_['_id'])}))\n\n rec_cnt = 0\n for m in matches:\n rec_cnt += len(m['VARIANTS'])\n\n logging.info(\"rerun_filters: exisiting: %d %d\" % (len(matches), rec_cnt))\n\n # parse the old matches.\n clinical_old_id = set()\n old_lu = {}\n match_lu = {}\n for match in matches:\n\n # get the clincal id.\n clinical_id = match['CLINICAL_ID']\n\n # now build tuples of variants.\n for genomic_id in match['VARIANTS']:\n\n # make pair\n pair = (clinical_id, genomic_id)\n clinical_old_id.add(pair)\n\n # build id lookup.\n old_lu[pair] = match['_id']\n\n # cache matches.\n match_lu[pair] = match\n\n # parse the new matches.\n clinical_new_id = set()\n new_lu = {}\n i = 0\n for match in cbio.match_iter():\n\n # simplify.\n clinical_id = match['CLINICAL_ID']\n genomic_id = match['GENOMIC_ID']\n\n # build set.\n pair = (clinical_id, genomic_id)\n clinical_new_id.add(pair)\n\n # cache matches.\n match_lu[pair] = match\n\n # build lookup.\n new_lu[pair] = i\n i += 1\n\n # find the ones which need to be deleted and delete them.\n to_delete = clinical_old_id - clinical_new_id\n logging.info(\"rerun_filters: removing: %d\" % len(to_delete))\n updated = list()\n for pair in to_delete:\n\n # extract ids\n match_id = old_lu[pair]\n match = match_lu[pair]\n\n # find the variant.\n good = list()\n hit = False\n for v in match['VARIANTS']:\n if v != pair[1]:\n good.append(v)\n else:\n hit = True\n\n # update it if necessary.\n if hit:\n\n # check if will empty this.\n if len(good) == 0:\n\n # delete it.\n match_db.delete_one({'_id': match_id})\n else:\n\n # just update it.\n match_db.update({\"_id\": match_id}, {\"$set\": {\"VARIANTS\": good}})\n\n # update the local one to make sure we delete all variants\n match['VARIANTS'] = good\n\n # find the intersection and remove them from data frame.\n remove_frame = clinical_new_id.intersection(clinical_old_id)\n bad_list = []\n for pair in remove_frame:\n\n # lookup index.\n idx = new_lu[pair]\n bad_list.append(idx)\n\n logging.info(\"rerun_filters: skipping: %d\" % len(bad_list))\n\n # remove them.\n if cbio.match_df is not None and len(cbio.match_df) > 0:\n cbio.match_df.drop(cbio.match_df.index[bad_list], inplace=True)\n\n # insert the counts.\n count_matches(cbio, filter_)\n\n # insert the matches if not temporary.\n insert_matches(cbio, filter_, from_filter=False, dpi=dpi)", "title": "" }, { "docid": "cc6c9583961accd484391452b742401b", "score": "0.43107936", "text": "def update_retrieved_if_new(olditem, newitem, days=180, retrieved_pid='P813'):\n def ref_overwrite(oldref, newref, days):\n \"\"\"\n If the newref is the same as the oldref except the retrieved date is `days` newer, return True\n the retrieved date is NOT `days` newer, return False\n the refs are different, return True\n \"\"\"\n if len(oldref) != len(newref):\n return True\n oldref_minus_retrieved = [x for x in oldref if x.get_prop_nr() != retrieved_pid]\n newref_minus_retrieved = [x for x in newref if x.get_prop_nr() != retrieved_pid]\n if not all(x in oldref_minus_retrieved for x in newref_minus_retrieved):\n return True\n oldref_retrieved = [x for x in oldref if x.get_prop_nr() == retrieved_pid]\n newref_retrieved = [x for x in newref if x.get_prop_nr() == retrieved_pid]\n if (len(newref_retrieved) != len(oldref_retrieved)) or not (\n len(newref_retrieved) == len(oldref_retrieved) == 1):\n return True\n datefmt = '+%Y-%m-%dT%H:%M:%SZ'\n retold = list([datetime.strptime(r.get_value()[0], datefmt) for r in oldref if r.get_prop_nr() == retrieved_pid])[0]\n retnew = list([datetime.strptime(r.get_value()[0], datefmt) for r in newref if r.get_prop_nr() == retrieved_pid])[0]\n return (retnew - retold).days >= days\n\n newrefs = newitem.references\n oldrefs = olditem.references\n if not (len(newrefs) == len(oldrefs) == 1):\n #print(\"overwriting refs, not 1\")\n olditem.references = copy.deepcopy(newitem.references)\n return None\n overwrite = ref_overwrite(oldrefs[0], newrefs[0], days)\n if overwrite:\n print(\"updating ref\")\n olditem.references = newrefs\n else:\n print(\"don't change\")\n pass", "title": "" }, { "docid": "53fd50eb7ad9d9da17f61c0410f769d4", "score": "0.43082923", "text": "def filter_for_new_sensor_updates(aurora_creds: dict, new_sensor_list: List) -> List:\n with UseDatabase(aurora_creds) as cursor:\n sql_check = \"\"\"SELECT EXISTS (SELECT 1 FROM pg_tables\n WHERE tablename = 'all_sensor_info');\"\"\"\n cursor.execute(sql_check)\n response = cursor.fetchone()\n if response[0] == True:\n sql_collect = \"\"\"SELECT row_to_json(all_sensor_info)\n FROM all_sensor_info;\"\"\"\n cursor.execute(sql_collect)\n all_stored_sensors = cursor.fetchall()\n else:\n # If 'all_sensor_info' table does not exist\n all_stored_sensors = []\n stored_sensor_ids = []\n for i in all_stored_sensors:\n stored_sensor_ids.append(i[0]['sensor_id'])\n new_sensor_info = []\n for i in new_sensor_list:\n # If sensor is not already stored\n if i[0] not in stored_sensor_ids:\n new_sensor_info.append(i)\n else:\n for stored_sensor in all_stored_sensors:\n # If sensor has new data not yet stored\n if stored_sensor[0]['sensor_id'] == i[0] and \\\n stored_sensor[0]['end_date'].replace('-','').replace(':','') != i[3]:\n new_sensor_info.append(i)\n return new_sensor_info, stored_sensor_ids", "title": "" }, { "docid": "4ddc395193ce066fcc35eb3969c54651", "score": "0.43050253", "text": "def _test_stream_events_before_target(self, target_timestamp):\n records = []\n for tag, r in self._readers.items():\n # The target_timestamp is the maximum timestamp that was read from the\n # stream. Some readers may have elements that are less than this. Thus,\n # we skip all readers that already have elements that are at this\n # timestamp so that we don't read everything into memory.\n if self._stream_times[tag] >= target_timestamp:\n continue\n try:\n record = next(r).recorded_event\n if record.HasField('processing_time_event'):\n self._stream_times[tag] += timestamp.Duration(\n micros=record.processing_time_event.advance_duration)\n records.append((tag, record, self._stream_times[tag]))\n except StopIteration:\n pass\n return records", "title": "" }, { "docid": "998012bd237e2bd6e6f1f5a85f2cee3c", "score": "0.43020946", "text": "def getMongoFilter(self):\n raise NotImplementedError", "title": "" }, { "docid": "3a57735da602b605765fb47fa5346855", "score": "0.4300831", "text": "def create_comparison(comparison_update: schemas.FilterComparisonCreate,\n db: Session = Depends(deps.get_db),\n current_user: schemas.UserVerify = Depends(\n deps.get_current_user)) -> JSONResponse:\n \n comparison = schemas.FilterComparison(\n text_sample_id_1 = comparison_update.text_sample_id_1, \n text_sample_id_2 = comparison_update.text_sample_id_2, \n item_1_is_better = comparison_update.item_1_is_better,\n user_id = current_user.id,\n id = str(uuid.uuid4().hex)\n )\n\n\n data = crud_filter_comparisons.create_comparison(comparison=comparison, db=db)\n if data is None:\n return JSONResponse(status_code=500,\n content={\"message\": \"Internal Server Error\"})\n return JSONResponse(status_code=200,\n content={\"message\": \"success\"})", "title": "" }, { "docid": "2d2b94ae69ba5e405654e6e1be2c7236", "score": "0.4300264", "text": "def remove_all_by_source_and_date(self, source, month_delta=-2):\n d_minus_2m = self.monthdelta(datetime.now(), month_delta)\n results = self.all_dns_collection.find({'sources.source': source,\n 'sources.updated': {\"$lt\": d_minus_2m}}\n ).batch_size(30)\n\n for result in results:\n if len(result['sources']) > 1:\n self.all_dns_collection.update({'_id': ObjectId(result['_id'])},\n {\"$pull\": {\"sources\": {\"source\": source}}})\n else:\n self.all_dns_collection.remove({'_id': ObjectId(result['_id'])})\n\n return True", "title": "" }, { "docid": "a287ab109944ccdb2739ad858430a8c2", "score": "0.42931166", "text": "def get_new_processes(self, old, new, deps):\n\t\treturn self.get_new_by_field(old, new, deps, ['process_id'])", "title": "" }, { "docid": "43b0c368ed3ca8a013f5a500a9e786e6", "score": "0.42913648", "text": "def get_new(self):\n\n # Refresh file series\n self.refresh()\n\n # Load current and previous set of files\n new_file_series = self._load(update_path=False)\n old_file_series = self._load(prev_version=True, update_path=False)\n\n # Select files that are in the new series and not the old series\n new_files = new_file_series[-new_file_series.isin(old_file_series)]\n\n return new_files", "title": "" }, { "docid": "cc00b7369e38815d1e12193d7f61f767", "score": "0.42837262", "text": "def __FilterAndSampleSearchTerms(self, existingSearchTerms, newSearchAttributes, sampleSize, pullDate):\n # Normalize the existing search terms:\n existingSearchTerms = { re.sub('[^\\w\\d]', '', term.lower()) : True for term in existingSearchTerms }\n # Filter out already sampled search terms, simple words etc:\n filtered = [re.sub('[^\\w\\d]', '', row[0].lower()) for row in newSearchAttributes.values if row[0] not in existingSearchTerms and row[1] <= pullDate.date()]\n # Randomly sample:\n sampleSize = min(len(filtered), sampleSize)\n indices = choose(range(0, len(filtered)), sampleSize, replace = False)\n return [filtered[i] for i in indices]", "title": "" }, { "docid": "66ca8e3e47f19f1efeb414bc7e78aaf1", "score": "0.42815423", "text": "def update_or_create(\n self, filters: Optional[Dict[str, Any]] = None, **kwargs\n ) -> Tuple[bool, Model]:\n created = False\n instance = None\n if filters:\n instance = self.find(**filters).first()\n\n if instance is None:\n created = True\n\n return created, self.__create(instance, partial=True, **kwargs)", "title": "" }, { "docid": "e9564f73fd19de0d3be77b7ade465d1f", "score": "0.42791924", "text": "def test_from_logs(self):\n params = deepcopy(self.DEFAULT_PARAMS)\n params['filters']['type'].append('cloudify_log')\n Events._build_select_query(**params)\n self.assertGreater(\n self.db.session.query().filter().union.call_count,\n 1,\n )", "title": "" }, { "docid": "a3b2cf0a5e78dce85dbe171096849252", "score": "0.42666724", "text": "def _merge_report(self, target, new):\n query_millis = int(new['parsed']['millis'])\n current_sig = self._get_existing_query(target, new['queryAnalysis'])\n\n if current_sig is not None:\n current_sig['totalTimeMillis'] += query_millis\n current_sig['queryCount'] += 1\n current_sig['avgTimeMillis'] = current_sig['totalTimeMillis'] / current_sig['queryCount']\n\n else:\n initial_query_detail = self._get_initial_query_detail(new)\n target['queryDetails'].append(initial_query_detail)\n target['queries'].append(initial_query_detail['queryMask'])\n\n target['totalTimeMillis'] += query_millis\n target['queryCount'] += 1\n target['avgTimeMillis'] = target['totalTimeMillis'] / \\\n target['queryCount']", "title": "" }, { "docid": "81a2b44c253985c84c7c94889f220207", "score": "0.4264802", "text": "def refresh_history(old, new):\n keys_list = ['val_loss', 'val_acc', 'loss', 'acc']\n for key in keys_list:\n new_entry = new[key]\n if len(new_entry) > 0:\n for elem in new_entry:\n old[key].append(elem)\n else:\n old[key].append(elem)\n return(old)", "title": "" }, { "docid": "49a2076165a9003aa6d137115e2d9ea4", "score": "0.42604247", "text": "def make_filter(dbstate, uistate, objclass, gramps_ids, title=None):\n FilterClass = GenericFilterFactory(objclass)\n rule = getattr(getattr(rules, objclass),'RegExpIdOf')\n filter = FilterClass()\n if title is None:\n title = _(\"Filter %s from Clipboard\") % objclass\n if isinstance(title, collections.Callable):\n title = title()\n filter.set_name(title)\n struct_time = time.localtime()\n filter.set_comment( _(\"Created on %(year)4d/%(month)02d/%(day)02d\") % { \n 'year': struct_time.tm_year,\n 'month': struct_time.tm_mon,\n 'day': struct_time.tm_mday})\n re = \"|\".join([\"^%s$\" % gid for gid in sorted(gramps_ids)])\n filter.add_rule(rule([re]))\n filterdb = FilterList(CUSTOM_FILTERS)\n filterdb.load()\n EditFilter(objclass, dbstate, uistate, [],\n filter, filterdb,\n lambda : edit_filter_save(uistate, filterdb, objclass))", "title": "" }, { "docid": "733f1d77640c103fc96759879a6773f4", "score": "0.4258774", "text": "def index_queryset(self, using=None):\n return self.get_model().objects.filter(created__lte=datetime.datetime.now())", "title": "" }, { "docid": "733f1d77640c103fc96759879a6773f4", "score": "0.4258774", "text": "def index_queryset(self, using=None):\n return self.get_model().objects.filter(created__lte=datetime.datetime.now())", "title": "" }, { "docid": "6411cb608331f15c16fd65774e03dd3b", "score": "0.4257283", "text": "def test_get_filtered_changes_list(self):\n self.login()\n self.set_session_user_tokens()\n qs = \"?validated=True&{}=1234&action={}\".format(\n settings.IDENTITY_FIELD, settings.ACTIONS[0][0])\n self.add_changes_callback(num=10, qs=qs)\n\n qs = \"?mother_id=1234&action={}&validated=True\".format(\n settings.ACTIONS[0][0])\n response = self.client.get('{}{}'.format(reverse('changes'), qs))\n context = response.context\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(context['changes']), 5)", "title": "" }, { "docid": "b7d6a2bc08dfbbbb680863253331ccc4", "score": "0.4253213", "text": "def is_new(self):\r\n return self._new", "title": "" }, { "docid": "47439e4e441dc5f200bc6f7567699361", "score": "0.42502186", "text": "def history_bullshit_filter(self, old_jobs):\n\n self.job_index = 0\n refreshed_jobs = []\n print('RECHECKING HISTORY')\n\n for this_job in old_jobs:\n\n self.job_index += 1\n print(self.name + \" processing job # %d\" % self.job_index)\n\n if this_job.rejection_identifier in ('a', 'r'): # already marked as removed, don't filter\n refreshed_jobs.append(this_job)\n continue\n\n this_job.approve()\n this_job.good_hits = []\n\n self.filter_title(this_job)\n\n if this_job.is_relevant:\n self.filter_body(this_job)\n\n if this_job.is_relevant:\n print('job # %d approved O' % self.job_index)\n\n refreshed_jobs.append(this_job)\n\n return refreshed_jobs", "title": "" }, { "docid": "d0e112468a4ba2633a0dbe1de61263ac", "score": "0.42451245", "text": "def listNewFiles(self):\n return self.newfiles", "title": "" }, { "docid": "4bf5ff9af7afa77835836560ed6e0f5d", "score": "0.4244852", "text": "def filter(cls, args, latest=True, offset=None, count=None):\n all_versions = super().filter(args, offset, count)\n if not latest:\n return all_versions\n return cls._filter_latest_versions(all_versions)", "title": "" }, { "docid": "34d8a574e68eabdcb4a303992767ff78", "score": "0.42433754", "text": "def filter(self, *args, **kwargs):\n\n for arg in args:\n if isinstance(arg, Q) and arg.action == \"end\":\n self.qes.append(arg)\n elif isinstance(arg, Q) and arg.action == \"start\":\n self.qs.append(arg)\n elif isinstance(arg, In):\n self.relation.append(arg)\n else:\n raise Exception(\"unsupported type:{},value:{}\".format(type(arg), arg))\n\n filter_origin, filter_destination = process_filter_args(self.manager.source_class, kwargs)\n if filter_origin:\n self.filters.append(filter_origin)\n if filter_destination:\n self.filters_destination.append(filter_destination)\n return self", "title": "" }, { "docid": "9c2a1a6833f9ae0fc64204b115b0b75c", "score": "0.4231966", "text": "def is_stale(self, version=None):\n if self.updated is None:\n return True\n else:\n return self.updated < datetime.datetime.now() - datetime.timedelta(seconds=settings.AGGREGATE_STALE)", "title": "" }, { "docid": "2cbd2fe032830261d2fecc28d1a0d81d", "score": "0.42293826", "text": "def __init__(self,\n append_documents=None,\n ddl_only_recovery=None,\n documents_filter_type=None,\n filter_expression=None,\n id_regex=None,\n overwrite_users=None,\n suffix=None,\n ):\n\n # Initialize members of the class\n self.append_documents = append_documents\n self.ddl_only_recovery = ddl_only_recovery\n self.documents_filter_type = documents_filter_type\n self.filter_expression = filter_expression\n self.id_regex = id_regex\n self.overwrite_users = overwrite_users\n self.suffix = suffix", "title": "" }, { "docid": "dfaceeb411184ded7c08c47faae5b78e", "score": "0.4227273", "text": "def testCreateWithFilter(self):\n self._MakeEventType()\n self._MakeSource(self.source_crd)\n self._MakeTrigger(self.source, self.event_type.type)\n\n self.operations.CreateTrigger.return_value = self.trigger\n self.Run('events triggers create my-trigger --platform=gke '\n '--cluster=cluster-1 --cluster-location=us-central1-a '\n '--target-service=my-service --type=google.source.my.type '\n '--trigger-filters key=value1,key2=value2')\n trigger_ref = self._TriggerRef('my-trigger', 'default')\n self.validate_params.assert_called_once_with(mock.ANY, self.event_type)\n self.source.name = 'source-for-my-trigger'\n self.operations.CreateTrigger.assert_called_once_with(\n trigger_ref, self.source, self.event_type.type,\n collections.OrderedDict([('key', 'value1'), ('key2', 'value2')]),\n 'my-service', 'default')", "title": "" }, { "docid": "f74b5d22e9afaed32b97f60877dbfe44", "score": "0.42246795", "text": "def find_one_and_delete(self, filter, projection=None, sort=None, _deadline=None):\n return self._new_find_and_modify(filter, projection, sort, remove=True,\n _deadline=_deadline)", "title": "" }, { "docid": "62b67fc985d847f220e8fa8acc58e3d1", "score": "0.42183864", "text": "def _existing(self, attr, value):\n return Document.uncached.filter(locale=self.locale,\n **{attr: value})", "title": "" }, { "docid": "a4cfffbe710266e12ac68aa5e46bdae9", "score": "0.42128938", "text": "def test_new_readings(self):\n query = db.session.query(MeterReading).filter_by(meter=self.meter_id)\n self.assertEqual(7, query.count(), \"7 readings from setup\")\n dt = (datetime.today() - timedelta(days=3)).date()\n start_dt = dt\n readings: Dict[str, List[float]] = {}\n for idx in range(3):\n readings[dt.strftime(\"%Y-%m-%d\")] = [2.0] * 96\n dt += timedelta(days=1)\n MeterReading.merge_readings(MeterReading.from_json(self.meter_id, readings))\n db.session.flush()\n self.assertEqual(10, query.count(), \"10 readings after save\")\n for row in query.filter(MeterReading.occurred < start_dt):\n self.assertEqual(\n 96.0,\n sum(row.readings),\n \"existing readings for %s unchanged\" % row.occurred,\n )\n self.assertEqual(row.occurred, row.modified.date())\n for row in query.filter(MeterReading.occurred >= start_dt):\n self.assertEqual(\n 96.0 * 2, sum(row.readings), \"new readings added for %s\" % row.occurred\n )\n self.assertEqual(date.today(), row.modified.date())", "title": "" }, { "docid": "eca9286b757a9aa8048856399fa434c4", "score": "0.42085913", "text": "def set_for_events(self, delete=True, **event_filter):\n\n # Delete existing EventResults (using denormalized Event fields)\n if delete:\n er_filter = {\n 'event_' + key: val for key, val in event_filter.items()\n }\n self.filter(**er_filter).delete()\n\n # Filter results (using JOIN through Report to Event)\n Result = swapper.load_model('vera', 'Result')\n result_filter = {\n 'report__event__' + key: val for key, val in event_filter.items()\n }\n ers = []\n results = Result.objects.valid_results(\n **result_filter\n ).select_related('report__event')\n for result in results:\n er = self.model(\n event=result.report.event,\n result=result\n )\n er.denormalize()\n ers.append(er)\n self.bulk_create(ers)", "title": "" }, { "docid": "dff57a0cfa2c31a0d9576ce2b14d3e7b", "score": "0.4207803", "text": "def find_docs_to_update(coll,\n condition=None,\n progress_path=None,\n logger=None):\n if not condition or condition == [] or condition == {}:\n return coll.find()\n\n logger = logger or LOGGER\n progress_path = progress_path or PROGRESS_FILE\n method = condition['method']\n name = coll.name\n\n create_file_if_not_exists(progress_path, yaml.dump({}))\n\n if method == 'object_id':\n # Find all documents having IDs greater than the saved Object ID\n with open(progress_path, 'r') as input:\n start_id = yaml.load(input).get(name, '')\n\n if start_id == '':\n return coll.find().sort('_id')\n else:\n logger.info('starting from ObjectId: %s', start_id)\n return coll.find({ \"_id\": { \"$gt\": ObjectId(start_id) }}).sort('_id')\n\n elif method == 'date_delta':\n # Find all documents having 'date' field ≥ now() - delta\n delta = timedelta(**{ condition['unit']: condition['value']})\n start_date = (dt.now().date() - delta).strftime('%Y-%m-%d')\n\n logger.info('starting from date: %s', start_date)\n return coll.find({ 'date': { \"$gte\": start_date } })", "title": "" }, { "docid": "ab82d8705fbb2d284798c5eb8e6f3087", "score": "0.42074516", "text": "def changes(self, since=-1):\n if since < 0:\n since = self.last_seq\n data = self.get('/%s/_changes/?since=%s' % (self.name, since))\n self.last_seq = data['last_seq']\n return data", "title": "" }, { "docid": "924376f9e3cd792acbb6f9ff7ba2b93c", "score": "0.41954598", "text": "def insert_or_replace(self, collection, filter_data, data, by_user=None):\n if filter_data is None:\n \"\"\"\n for safety, just insert a new document when filter not provide\n \"\"\"\n result = None\n else:\n if isinstance(filter_data, str):\n filter_data = {self.PYRENEES_GUID_KEY: filter_data}\n result = self.find_one(collection, filter_data)\n if result is not None:\n self.delete(collection, {self.PYRENEES_GUID_KEY: result[self.PYRENEES_GUID_KEY]})\n data.update({\n '_db_created_time': result['_db_created_time'],\n '_db_created_by': result['_db_created_by'],\n '_db_updated_time': get_25(),\n '_db_updated_by': by_user.username if by_user else None,\n '_db_deleted': result['_db_deleted'],\n '_db_owner': result['_db_owner'],\n self.PYRENEES_GUID_KEY: result[self.PYRENEES_GUID_KEY],\n })\n self.insert(collection, data, by_user)\n result = data\n else:\n result = self.insert(collection, data, by_user)\n return result", "title": "" }, { "docid": "3925065665166b820d854dae74bd45c3", "score": "0.41932076", "text": "def update_locations_filter(self):\n locations = []\n for doc in self.list_events():\n locations.append(doc[\"location\"])\n locations = list(set(locations))\n return self._update_filter(\"locations\", locations)", "title": "" }, { "docid": "9295980659972baba3c6baac660131d0", "score": "0.41906235", "text": "def test_remote_local_new(self):\n results = get_diff_results(\n [self._run_names['test_project_2']], [self._report_dir_orig],\n '--new', 'json',\n ['--url', self._url])[0]\n\n self.assertEqual(\n len(results), 7)\n self.assertEqual(\n len(self.filter_by_checker(results, 'core.CallAndMessage')), 5)\n self.assertEqual(\n len(self.filter_by_checker(results, 'core.DivideZero')), 1)\n self.assertEqual(\n len(self.filter_by_checker(results, 'deadcode.DeadStores')), 1)", "title": "" }, { "docid": "27fd64f24c3880e487147ea28d2da966", "score": "0.41902626", "text": "def test_post_confirm_relevance_with_old_date(self):\n house, *_, flat = self.init_house_structure()\n post, *_ = self.init_post(house, flat)\n\n post = Post.objects.first()\n post.created = datetime.datetime(2021, 6, 10, tzinfo=pytz.UTC)\n post.save()\n self.assertEqual(Post.objects.first().created.month, 6)\n\n url = reverse('main:posts-detail', args=[post.pk])\n response = self.client.patch(url, data={'created': True})\n self.assertEqual(response.status_code, 200)\n post = Post.objects.first()\n today = datetime.datetime.now(tz=pytz.UTC)\n self.assertEqual(post.created.month, today.month)\n self.assertEqual(post.created.day, today.day)", "title": "" } ]
4cfcf17fb7ed12a4a17e4ecac43019fe
Get version number from the pkmodel module. The easiest way would be to just ``import pkmodel ``, but note that this may fail if the dependencies have not been installed yet. Instead, we've put the version number in a simple version_info module, that we'll import here by temporarily adding the oxrse directory to the pythonpath using sys.path.
[ { "docid": "1f2fc9112122d6cd672fa39a911acc3b", "score": "0.8605629", "text": "def get_version():\n import os\n import sys\n\n sys.path.append(os.path.abspath('pkmodel'))\n from version_info import VERSION as version\n sys.path.pop()\n\n return version", "title": "" } ]
[ { "docid": "d98e82f0b1ecff5519def17ba5a4e53f", "score": "0.73453414", "text": "def get_model_version():\n from .. import __model_version__\n\n return __model_version__", "title": "" }, { "docid": "e3003c2756e7ba9382ad7419a59451ea", "score": "0.6573163", "text": "def get_version():\n path = Path(__file__).parent / \"toolkit\" / \"version.py\"\n code = path.read_text()\n env = {}\n exec(code, env, env)\n return env['__version__']", "title": "" }, { "docid": "d27a8c0dfcf44ca35b239b3dd9b49139", "score": "0.643786", "text": "def get_version():\n\n import os\n module_path = os.path.join(os.path.dirname('__file__'), 'stuffpages',\n '__meta__.py')\n\n import importlib.util\n spec = importlib.util.spec_from_file_location('__meta__', module_path)\n meta = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(meta)\n return meta.__version__", "title": "" }, { "docid": "63d96b9742ff7a2e7bb2ed3907447ef0", "score": "0.6361907", "text": "def get_version_string():\n with open(\"mssdk/__init__.py\", \"rb\") as file:\n version_line = re.search(\n r\"__version__\\s+=\\s+(.*)\", file.read().decode(\"utf-8\")\n ).group(1)\n return str(ast.literal_eval(version_line))", "title": "" }, { "docid": "a44daee00853d228aa848509be8a4d27", "score": "0.6348057", "text": "def _get_current_version(session):\n fn = os.path.abspath(os.path.join(os.path.dirname(__file__), 'segno/__init__.py'))\n with open(fn, 'r', encoding='utf-8') as f:\n content = f.read()\n m = re.search(r'^__version__ = [\"\\']([^\"\\']+)[\"\\']$', content, flags=re.MULTILINE)\n if m:\n return m.group(1)\n session.error('Cannot find any version information')", "title": "" }, { "docid": "91e8ea1c462b5e3923a73f3dc603666a", "score": "0.62948644", "text": "def get_version():\n VERSION_FILE = 'entity_emailer/version.py'\n mo = re.search(r'^__version__ = [\\'\"]([^\\'\"]*)[\\'\"]', open(VERSION_FILE, 'rt').read(), re.M)\n if mo:\n return mo.group(1)\n else:\n raise RuntimeError('Unable to find version string in {0}.'.format(VERSION_FILE))", "title": "" }, { "docid": "336fe72f9d303058aa42b4c777397929", "score": "0.6275719", "text": "def get_version():\n return VERSION", "title": "" }, { "docid": "4d0152895bba0050cd2ba2cfc5750a32", "score": "0.62730026", "text": "def get_pkg_info_revision(): # -> int:\n ...", "title": "" }, { "docid": "914ca0885176df2cd308515ece8a1ecd", "score": "0.62698287", "text": "def version_number(self):\r\n return self.read_number(\"SV\")", "title": "" }, { "docid": "82137b305161c253dd0ce76d3cbc52cc", "score": "0.6236268", "text": "def _getVersion(self):\n ## Skip Error check since handling of errors is version specific\n idn = self._instQuery('*IDN?', checkErrors=False).split(',')\n ver = idn[3].split('.')\n # put major and minor version into floating point format so can numerically compare\n self._version = float(ver[0]+'.'+ver[1])", "title": "" }, { "docid": "99dccc7c63c1c77d0fded97e75b54df6", "score": "0.6207717", "text": "def get_version():\n reg = re.compile(r'__version__ = [\\'\"]([^\\'\"]*)[\\'\"]')\n with open('requests_kerberos/__init__.py') as fd:\n matches = list(filter(lambda x: x, map(reg.match, fd)))\n\n if not matches:\n raise RuntimeError(\n 'Could not find the version information for requests_kerberos'\n )\n\n return matches[0].group(1)", "title": "" }, { "docid": "523e44e2f51fa9a73f095e59fbcf5fd1", "score": "0.6182873", "text": "def get_version(self):\n return self._run_and_parse(\n '--version',\n regexp='(?P<version>\\d+(\\.\\d+)+)',\n group='version')", "title": "" }, { "docid": "3842ad3b9469f53bcf75e54d32eb5432", "score": "0.61712533", "text": "def get_setup_version(reponame):\n import json\n basepath = os.path.split(__file__)[0]\n version_file_path = os.path.join(basepath, reponame, '.version')\n try:\n from param import version\n except:\n version = embed_version(basepath)\n if version is not None:\n return version.Version.setup_version(basepath, reponame, archive_commit=\"$Format:%h$\")\n else:\n print(\"WARNING: param>=1.6.0 unavailable. If you are installing a package, this warning can safely be ignored. If you are creating a package or otherwise operating in a git repository, you should install param>=1.6.0.\")\n return json.load(open(version_file_path, 'r'))['version_string']", "title": "" }, { "docid": "9ff2cf70b87d66a591e1173d3ee40763", "score": "0.61458707", "text": "def get_package_version(module_obj, value):\n for key in ['version', '__version__']:\n if hasattr(module_obj, key):\n return getattr(module_obj, key)\n parts = value.split('.')\n for index, part in enumerate(parts):\n try:\n return pkg_resources.get_distribution(\n '.'.join(parts[0:index + 1])).version\n except (pkg_resources.DistributionNotFound,\n pkg_resources.RequirementParseError):\n continue", "title": "" }, { "docid": "53797a0453718ce0f09a21c5d8b659aa", "score": "0.61398184", "text": "def get_version(self):\r\n return self.version", "title": "" }, { "docid": "dbf7b3b36a95d27eff16053f1660f389", "score": "0.6139258", "text": "def _getSVNVersion(self):\n mod = sys.modules.get(self.package)\n if mod:\n ent = os.path.join(os.path.dirname(mod.__file__),\n '.svn',\n 'entries')\n if os.path.exists(ent):\n return self._parseSVNEntries(open(ent))", "title": "" }, { "docid": "7b0470e2129f316384b3c8c0c3624722", "score": "0.6136939", "text": "def get_version():\n return __version__", "title": "" }, { "docid": "b43f2161ede25b9b2a9428ff1a348c7c", "score": "0.6133572", "text": "def get_version_info():\n return parse_version(get_version())", "title": "" }, { "docid": "1964580059fd75f2c3e053e1aba32752", "score": "0.6132279", "text": "def version(self):\n s = self._name + \" \"\n modinfo = Command(['modinfo', self._name]).runOutput()\n for line in modinfo.splitlines():\n if line.find('version') == 0:\n s += line\n return s", "title": "" }, { "docid": "08d04756bef49b55a5bf19c6f5bc513f", "score": "0.6121759", "text": "def get_version():\n f = open('./%s/project.mml' % env.name,'r')\n return json.load(f)['version']", "title": "" }, { "docid": "0482cb2c256cce456523859815e8dbc3", "score": "0.6109442", "text": "def get_package_version(bioc_pack):\n data = bad_yaml_parser(bioc_pack)\n if 'Version' in data:\n return data['Version']\n else:\n log.warn(\"Could not obtain a version number for %s\" % (bioc_pack))\n return None", "title": "" }, { "docid": "c69758ed5d33179a3cdb930527a7f895", "score": "0.61091554", "text": "def get_version():\n return pkg_resources.require(\"crumple\")[0].version", "title": "" }, { "docid": "c3248ffbfdd84b2a6c919189d9d412bf", "score": "0.6085035", "text": "def get__version__(module):\n\n version = module.__version__\n # Extract '2.1.1' from '2.1.1-r1785' / '3.2' from '3.2.dev'\n regexp = \"\\d+(\\.\\d+)+\"\n match = re.match(regexp, version)\n if match is None:\n msg = \"cannot extract version from '%s' (%s)\" % (version, module)\n raise Exception(msg)\n else:\n version = match.group(0)\n return str_to_version(version)", "title": "" }, { "docid": "5ab06dc228579a3c7d15482519a6d600", "score": "0.6076484", "text": "def _get_version(self):\n return self.__version", "title": "" }, { "docid": "5ab06dc228579a3c7d15482519a6d600", "score": "0.6076484", "text": "def _get_version(self):\n return self.__version", "title": "" }, { "docid": "5ab06dc228579a3c7d15482519a6d600", "score": "0.6076484", "text": "def _get_version(self):\n return self.__version", "title": "" }, { "docid": "5ab06dc228579a3c7d15482519a6d600", "score": "0.6076484", "text": "def _get_version(self):\n return self.__version", "title": "" }, { "docid": "5ab06dc228579a3c7d15482519a6d600", "score": "0.6076484", "text": "def _get_version(self):\n return self.__version", "title": "" }, { "docid": "5ab06dc228579a3c7d15482519a6d600", "score": "0.6076484", "text": "def _get_version(self):\n return self.__version", "title": "" }, { "docid": "5ab06dc228579a3c7d15482519a6d600", "score": "0.6076484", "text": "def _get_version(self):\n return self.__version", "title": "" }, { "docid": "5ab06dc228579a3c7d15482519a6d600", "score": "0.6076484", "text": "def _get_version(self):\n return self.__version", "title": "" }, { "docid": "5ab06dc228579a3c7d15482519a6d600", "score": "0.6076484", "text": "def _get_version(self):\n return self.__version", "title": "" }, { "docid": "5ab06dc228579a3c7d15482519a6d600", "score": "0.6076484", "text": "def _get_version(self):\n return self.__version", "title": "" }, { "docid": "5ab06dc228579a3c7d15482519a6d600", "score": "0.6076484", "text": "def _get_version(self):\n return self.__version", "title": "" }, { "docid": "c26deaa782d58a154402f90368d3ce60", "score": "0.60709417", "text": "def get_version():\n init_path = os.path.join('forecast_api', '__init__.py')\n content = read_file(init_path)\n match = re.search(r\"__version__ = '([^']+)'\", content, re.M)\n version = match.group(1)\n return version", "title": "" }, { "docid": "288a4ffa90418373983b4b98e2f60fc9", "score": "0.60685843", "text": "def _get_current_version(self):\n try:\n from cryptoadvance.specter._version import version\n except ModuleNotFoundError:\n return \"unknown\"\n return \"v\" + version", "title": "" }, { "docid": "68ad2dfef04bb649ea75845e223453ea", "score": "0.60665447", "text": "def get_version():\r\n return get_config()['version']", "title": "" }, { "docid": "7081851d0f03dd8d610e7b53e10d31a9", "score": "0.60542274", "text": "def version_number(self):\n return self._get_attribute('VersionNumber')", "title": "" }, { "docid": "b96be4c815ec29945f0c2ee054d15ebe", "score": "0.6030953", "text": "def get_package_version():\n version_file = read('metocean_pygeoapi/__init__.py')\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")", "title": "" }, { "docid": "3888ac67cb78dfb848129971f39da320", "score": "0.60265505", "text": "def version(self):\n return self._controller.query('++ver').rstrip()", "title": "" }, { "docid": "8acd86929e83c50b8188480e2481478d", "score": "0.6023656", "text": "def get_version():\n return \"{}.{}.{}\".format(FtiVersion.MAJOR, FtiVersion.MINOR, FtiVersion.PATCH)", "title": "" }, { "docid": "df4c0ff31e0c183fb2667ec58d86d448", "score": "0.6017856", "text": "def get_version():\n with io.open('flask_snow/__init__.py') as input_file:\n for line in input_file:\n if line.startswith('__version__'):\n return ast.parse(line).body[0].value.s", "title": "" }, { "docid": "1de638ba2d777bf80e4c3c14fa2e66ff", "score": "0.60163826", "text": "def get_version():\n return utils.read_file(utils.get_data_path(), \"VERSION\").strip()", "title": "" }, { "docid": "8e8e917c61d23db684f8719c5067296f", "score": "0.60037786", "text": "def get_version(self, version):\n return version", "title": "" }, { "docid": "e5634a5c9c9b55bd40ca8372f20e43ba", "score": "0.5994476", "text": "def getDeclaredVersion(self, module, version = \"head\"):\n requirements = self._getRequirements(module, version)\n for l in requirements.splitlines():\n l = l.strip()\n if l and l.startswith(\"version\"):\n return l.split()[1]\n return None", "title": "" }, { "docid": "2034c2898b5e77b58a3e4513e765a68a", "score": "0.5984139", "text": "def package_version(package_info):\n return semantic_version.Version.coerce(package_info.get('version'))", "title": "" }, { "docid": "a64874430a45b3e0d2e3a8d38e71966b", "score": "0.59841233", "text": "def get_version(self, calc):\n\n for line in calc.output.file_lines:\n if 'Program Version' in line and len(line.split()) >= 3:\n return line.split()[2]\n\n logger.warning('Could not find the ORCA version number')\n return '???'", "title": "" }, { "docid": "fef096eeefbb92077b6282f0493a71d8", "score": "0.5980591", "text": "def version(self):\n return self.get('version')", "title": "" }, { "docid": "3e642f5af1a57671535048eaeba98674", "score": "0.59747994", "text": "def get_versao_erp():\n\n\tprint frappe.get_attr(\"erpnext\"+\".__version__\")\n\n\treturn frappe.get_attr(\"erpnext\"+\".__version__\")", "title": "" }, { "docid": "5477d5e61d9e12bf6481ba43a37745e5", "score": "0.5974082", "text": "def get_version():\n\tscript_path = abspath(__file__)\n\tpipe = Popen('git --git-dir=%s/.git describe' % dirname(dirname(script_path)), stdout=PIPE, shell=True)\n\tversion = pipe.stdout.read().strip()\n\n\tif version:\n\t\treturn version\n\telse:\n\t\treturn 'unknown version'", "title": "" }, { "docid": "59cf3779f4cb02b4e817ad5273f00f37", "score": "0.5966683", "text": "def osgGetSOVersion():\n return _osg.osgGetSOVersion()", "title": "" }, { "docid": "9ab5a0d8ba5cf20d18a28dac6c7f0122", "score": "0.5963181", "text": "def get_version(cls) -> str:\n if not cls.is_available():\n return 'None'\n else:\n import pkg_resources\n try:\n return pkg_resources.get_distribution('pyppl').version\n except Exception:\n return 'None'", "title": "" }, { "docid": "885d504e9ba1f8eb5dbfa3d22aec7064", "score": "0.5962192", "text": "def version_code(self):\n if \"versionCode\" in self._prop_dict:\n return self._prop_dict[\"versionCode\"]\n else:\n return None", "title": "" }, { "docid": "2d71972846a0e5abc46fb0b3c6e6ce9f", "score": "0.5959791", "text": "def _version() -> str:\n with io.open(\"graphtik/__init__.py\", \"rt\", encoding=\"utf8\") as f:\n return re.search(r'__version__ = \"(.*?)\"', f.read()).group(1).strip()", "title": "" }, { "docid": "8b3c7048eda52e72ad9d1b7f03cc64e7", "score": "0.59576255", "text": "def get_version(self) -> str:\n return self.version_", "title": "" }, { "docid": "6fbaf96609974d0b8124823697d4f685", "score": "0.59567475", "text": "def version():\n return meta.version", "title": "" }, { "docid": "5ed5a91955480752700c72c000190673", "score": "0.59528214", "text": "def _get_version():\n search_major_version = search.VERSION[0]\n version = \"v{}\" if SEARCH_DISTRIBUTION == ES else \"os-v{}\"\n return version.format(search_major_version)", "title": "" }, { "docid": "46ac054f3cd05bdd6ee3a4c4dccea769", "score": "0.5949391", "text": "def cmd_version(self, environ, payload):\n jid = payload.get('jid', environ['xmpp.jid'])\n if isinstance(jid, JID):\n jid = jid.full\n\n try:\n resp = self.client.plugin['xep_0092'].get_version(jid)\n if resp:\n return resp\n except Exception:\n self.app.logger.exception('Failed to retrieve version info for %s',\n jid)\n return None", "title": "" }, { "docid": "39d7aa77ae5cb73d84a8dfc1c8a8bad8", "score": "0.5939144", "text": "def version():\n return __version__", "title": "" }, { "docid": "8e76d368677737e382fcdf727f903a32", "score": "0.59261984", "text": "def get_version(self):\n return gig.get_version()", "title": "" }, { "docid": "ca50bacce979c5972b580434d84b9e8c", "score": "0.59198177", "text": "def plone_version():\n return get_distribution('Products.CMFPlone').version", "title": "" }, { "docid": "a0bda0dd6dac1ed42bedeadfc50e1b02", "score": "0.59191257", "text": "def get_version(self):\n return self._version", "title": "" }, { "docid": "d38d3b851bd786dd2aa1dd6949b8827b", "score": "0.59091115", "text": "def get_version():\n vers = \"UNKNOWN\"\n build = \"\"\n # find the munkilib directory, and the version file\n munkilibdir = os.path.dirname(os.path.abspath(__file__))\n versionfile = os.path.join(munkilibdir, \"version.plist\")\n if os.path.exists(versionfile):\n try:\n vers_plist = FoundationPlist.readPlist(versionfile)\n except FoundationPlist.NSPropertyListSerializationException:\n pass\n else:\n try:\n vers = vers_plist['CFBundleShortVersionString']\n build = vers_plist['BuildNumber']\n except KeyError:\n pass\n if build:\n vers = vers + \".\" + build\n return vers", "title": "" }, { "docid": "459fba069e5d323c64705a2185fe2ad9", "score": "0.5908982", "text": "def get_version() -> str:\n base_dir = Path(__file__).parent\n version_file = base_dir / 'blurwal/_version.py'\n\n version = {}\n exec(version_file.read_text(), version)\n return version['__version__']", "title": "" }, { "docid": "5a6248e7229f3b6ebd0dc38e828bb0ef", "score": "0.59079635", "text": "def get_version(self):\n\n return self.version", "title": "" }, { "docid": "6ed975a8361fc31fc702b38d05f153db", "score": "0.59058887", "text": "def get_version_of(self, module_name: str) -> str:\n return self.__requirements_dict.get(module_name)", "title": "" }, { "docid": "ad014303a6b8f939a8bff605bd5f08a0", "score": "0.59057707", "text": "def GetVersion(self):", "title": "" }, { "docid": "adbde565ec3b94385671d038bcd6efe3", "score": "0.5905454", "text": "def control_version_number(self) -> str:\n return pulumi.get(self, \"control_version_number\")", "title": "" }, { "docid": "d16fdca1c6abeb76ed05513e5d7f6e7f", "score": "0.5904017", "text": "def get_version(self) -> str:\n return self.version", "title": "" }, { "docid": "c4dd8fce97c53d6e29204bf142fc2653", "score": "0.588681", "text": "def get_version(self) -> str:\n pass", "title": "" }, { "docid": "a5aedd285bd685648e4b0ed36a4b6445", "score": "0.58864534", "text": "def version(cls):\n if not cls._version:\n with open(cls._dir + os.sep + '..' + os.sep + 'setup.py') as fh:\n cls._version = re.search('version=\\'(?P<version>(\\d+.?)+)\\'', fh.read()).group('version')\n return cls._version", "title": "" }, { "docid": "2aed9fe6d8db0b38769b47e5084f16f2", "score": "0.58695257", "text": "def get_current_version(leader,pk):\n pk = str(pk)\n with PersistentDefaultDict(STORE_FILENAME, format=FORMAT, flag='r') as store:\n version = store[leader][pk]\n return version", "title": "" }, { "docid": "0a2d0fe418cc6a880501a396e1275973", "score": "0.5865516", "text": "def get_version(self):\n return self.VERSION", "title": "" }, { "docid": "e2606f1d85f3fe9c199753a74eced8f9", "score": "0.5864724", "text": "def get_version(request):\n if 'jsonrpc' in request:\n return 2.0\n elif 'id' in request:\n return 1.0\n\n return None", "title": "" }, { "docid": "b542c2c2bdd0314fb7f7fcc9d26d4511", "score": "0.5851857", "text": "def _get_version_number(self):\n pattern = re.compile(self.config['version_regex'])\n match = pattern.search(self.pull_request_title)\n\n if match:\n return match.group()\n\n return", "title": "" }, { "docid": "169b7b3de8a0b0f7797cd793305e812b", "score": "0.5845273", "text": "def getVersionInfo(cls):\n \n return __version__", "title": "" }, { "docid": "532f8cc04985a540da0328e45a5680ba", "score": "0.58301324", "text": "def module_version(self):\n return self.module.__version__", "title": "" }, { "docid": "fa1644d3552a5042581deddc63dea3c2", "score": "0.5826343", "text": "def version(self) -> Optional[int]:\n return pulumi.get(self, \"version\")", "title": "" }, { "docid": "c4d334c677768f691e4ccc3dd73605a7", "score": "0.5804097", "text": "def get_version_pair(self):", "title": "" }, { "docid": "20af45de0d0a1f4bd3ad64754735651c", "score": "0.57987696", "text": "def version(self):\n return self.xmltree.get('version')", "title": "" }, { "docid": "b1a95be8cb8bf2d9a5e9f893454287be", "score": "0.5794001", "text": "def GetVndkVersion(self):\n return self._GetProp(\"ro.vndk.version\")", "title": "" }, { "docid": "44eff0daa9b1cfc11bdba2b9d9f18da5", "score": "0.5791213", "text": "def load_version():\n import re\n version_file = \"wtftz/_version.py\"\n version_line = open(version_file).read().rstrip()\n vre = re.compile(r'__version__ = \"([^\"]+)\"')\n matches = vre.findall(version_line)\n if matches and len(matches) > 0:\n return matches[0]\n else:\n raise RuntimeError(\n \"Cannot find version string in {version_file}.\".format(\n version_file=version_file))", "title": "" }, { "docid": "b4d96f0316b2a6c4c306a009099abbc9", "score": "0.5783504", "text": "def version():\n click.echo(_get_version_from_pkg() or _get_version_from_file() or 'unknown')", "title": "" }, { "docid": "78ad92bfc23df9b2a63c4423538c30d5", "score": "0.57825404", "text": "def get_package_version() -> str:\n return pkg_version", "title": "" }, { "docid": "d288d39591e4f9031273a974966afcca", "score": "0.5773661", "text": "def version():\n version = pkg_resources.require(PROJECT_NAME)[0].version\n floyd_logger.info(version)", "title": "" }, { "docid": "303b9c4500cf56b51022c0050e0ad22e", "score": "0.5771967", "text": "def version(self):\n return int()", "title": "" }, { "docid": "d85987de6c774c4c5b63a89b488dab23", "score": "0.5767821", "text": "def get_version(base_dir=pathlib.Path(\".\")) -> str:\n pyproject_path = get_pyproject_path(base_dir)\n\n pyproject = tomlkit.parse(pyproject_path.read_text())\n return pyproject[\"tool\"][\"poetry\"][\"version\"]", "title": "" }, { "docid": "bc22681de8abce01ad7b121243b093bd", "score": "0.5763598", "text": "def version():\n return VERSION", "title": "" }, { "docid": "4cdd0ffccfd4e8f945a85ea0b1322402", "score": "0.57597244", "text": "def get_version():\n return {\"version\": \"1.0.0\"}", "title": "" }, { "docid": "110965a08a157bb61983eb23c9e4c92f", "score": "0.5758407", "text": "def load_version():\n import re\n version_file = \"multimerchant/_version.py\"\n version_line = open(version_file).read().rstrip()\n vre = re.compile(r'__version__ = \"([^\"]+)\"')\n matches = vre.findall(version_line)\n if matches and len(matches) > 0:\n return matches[0]\n else:\n raise RuntimeError(\n \"Cannot find version string in {version_file}.\".format(\n version_file=version_file))", "title": "" }, { "docid": "967f501b3c05e568c07b23eed89e1820", "score": "0.5756387", "text": "def get_version(self):\n return self.link.get_version()", "title": "" }, { "docid": "3d0e1fe8965c0d71a48d3041d7f260e4", "score": "0.5755824", "text": "def version_id(self) -> str:\n return pulumi.get(self, \"version_id\")", "title": "" }, { "docid": "b41bf9b31e204d94f14ea16594e6a11c", "score": "0.5752546", "text": "def ods_version(self) -> int:\n return self.get_info(DbInfoCode.ODS_VERSION)", "title": "" }, { "docid": "170f5f3fa7145c0373eeaa8a9b883fb8", "score": "0.5750279", "text": "def fetch_version(self):\n return self._fetch_json(f\"{self.server}/api/version\")['Version']", "title": "" }, { "docid": "122595062b3b73ace295d65f8126c683", "score": "0.5742279", "text": "def version(self):\n return self['version']", "title": "" }, { "docid": "3d9163e3494500b8abf7af6ffa4431b6", "score": "0.57282364", "text": "def get_version():\n with open(os.path.join(\"rioxarray\", \"_version.py\")) as vfh:\n for line in vfh:\n if line.find(\"__version__\") >= 0:\n # parse __version__ and remove surrounding \" or '\n return line.split(\"=\")[1].strip()[1:-1]\n sys.exit(\"ERROR: rioxarray version not fount.\")", "title": "" }, { "docid": "de2ac46f3eee1c3bd206364a5c143173", "score": "0.5720011", "text": "def version(self) -> int:\n return pulumi.get(self, \"version\")", "title": "" }, { "docid": "4938e14a3b50a7513e9d87b0b95db47c", "score": "0.57199633", "text": "def version_number(self, version:str) -> int:\n version = self.run_executable([self.get_setting(\"drc.magic.magic_bin\"), \"--version\"])\n return int(version.replace(\".\", \"\"))", "title": "" }, { "docid": "18d7ec59ad0c7b7aad819df02120248b", "score": "0.5718626", "text": "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "title": "" }, { "docid": "18d7ec59ad0c7b7aad819df02120248b", "score": "0.5718626", "text": "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "title": "" } ]
118cd7923b03484afb4cefa3d6f41144
Show spectrum and SB map in a given direction of MoCaLaTA output. Example >>> showobs('gal03/0068/','xm')
[ { "docid": "323b42005808b2f724446e4cce0907ac", "score": "0.66047466", "text": "def showobs(galdir, view,\n spatial_aperture = None,\n spec_ap = None,\n mother = './',\n cosmo = Planck15,\n vmin = None,\n vmax = None,\n endianness = 'big',\n seeing = 0,\n get_value = None,\n showit = True,\n comp_spec = 'spectrum.dat',\n obs2rest = True,\n fix_amplitude = 1,\n smoothspec = 1*u.AA,#0,\n ymax_spec = 1e-16,#None,\n saveit = None, #file extension for saved figure, e.g. 'png'\n window_position = '+580+580' # x-y position of plot's upper left corner; set to None to let matplotlib decide\n ):\n\n # Load binfile\n binfile = mother + '/' + galdir + '/' + view + '.bin'\n par,spec1D,IFU = readMoCaLaTA(binfile,endianness=endianness,cosmo=cosmo)\n Rvals,angvals = par['Rvals'],par['angvals']\n\n #Fix amplitude\n spec1D = fix_amplitude * spec1D\n IFU = fix_amplitude * IFU\n\n # Cut out desired wavelength range\n wavelength = (1+par['z']) * np.linspace(par['BW'][0],par['BW'][1],par['SpecRes2D'])\n if spec_ap is None:\n spec_ap = [(1+par['z'])*par['BW'][0], (1+par['z'])*par['BW'][1]]\n isp = (spec_ap[0] <= wavelength) & (wavelength <= spec_ap[1])\n IFU = IFU[isp,:,:]\n wavelength = wavelength[isp]\n\n # Collapse along spectral direction\n SBmap = np.sum(IFU, axis=0) * par['dlam2']\n SBmap = SBmap.value\n\n # Blur image by seeing\n if seeing != 0:\n assert seeing.unit != u.dimensionless_unscaled, \"Seeing must have units of angle\"\n seeing_kpc = (seeing / par['as_kpc']).to(u.kpc)\n seeing_pix = (seeing_kpc / par['dx']).value\n stddev_pix = seeing_pix / 2.355\n kernel = Gaussian2DKernel(stddev_pix)\n SBmap = convolve(SBmap,kernel)\n\n # Extract spectrum from aperture\n if spatial_aperture is not None:\n mask2d = np.array([[LA.norm([x-spatial_aperture[0],y-spatial_aperture[1]]) < spatial_aperture[2]\n for x in Rvals.value] for y in Rvals.value]) #True if inside aperture\n mask3d = np.broadcast_to(mask2d, IFU.shape)\n spec1D = np.sum(IFU*mask3d,axis=(1,2)) * par['dOmega']#Coll. along spatial directions\n else:\n spec1D = np.sum(IFU, axis=(1,2)) * par['dOmega']#Coll. along spatial directions\n\n # Redshift-dilute spectral density\n spec1D = spec1D / (1+par['z'])\n # Smooth spectrum\n if smoothspec != 0:\n assert (u.Quantity(smoothspec)).unit.is_equivalent(u.m), '`smoothspec` must have dimensions of length.'\n smoothres = (smoothspec / (par['dlam2'] * (1+par['z']))).decompose()\n # print('par =', par['dlam2'])\n # print('len =', len(spec1D))\n kernel = Gaussian1DKernel(smoothres)\n # print('smoothres =', smoothres)\n spec1D = convolve(spec1D.value,kernel) * spec1D.unit\n\n SBmap[SBmap==0] = SBmap[np.nonzero(SBmap)].min() # adding the min ???\n logSBmap = log10(SBmap)\n\n # Plot SB map, SB profile, and spectrum\n if showit:\n dx = 1 / 13.\n wtot = 10.\n htot = 5 * dx * wtot\n dy = dx / htot * wtot\n mleft = .8 * dx\n wfig = 3 * dx\n hfig = 3 * dy\n mbot = .8 * dy\n mtop = 1 * dy\n pad = .39\n hbar = .3 * dy\n\n SBlo = SBmap.min()\n SBhi = SBmap.max()\n logSBlo = log10(SBlo)\n logSBhi = log10(SBhi)\n if vmin is None: vmin = logSBlo\n if vmax is None: vmax = logSBhi\n # print(logSBlo, logSBhi)\n\n # Trying to color pixels according to red/blue peak ratio\n red_blue = False\n if red_blue:\n assert False, \"This doesn't really seem to work\"\n # ncol = 256\n # if spec_ap == None: spec_ap = [1213*u.AA,1219*u.AA]\n # cmap = np.zeros((ncol,4))\n # for i in range(ncol):\n ired = (spec_ap[0] <= wavelength) & (wavelength <= lam0)\n iblue = (lam0 <= wavelength) & (wavelength <= spec_ap[1])\n redmap = np.sum(IFU[ired, :,:], axis=0).value\n bluemap = np.sum(IFU[iblue,:,:], axis=0).value\n # Fred = simps(spec1D[ired], x=wavelength[ired])\n # Fblue = simps(spec1D[iblue],x=wavelength[iblue])\n # img = Fblue / Fred\n img = log10(redmap / bluemap)\n vmin = img.min()\n vmax = img.max()\n cmap = 'bwr'\n else:\n img = logSBmap\n cmap = 'hot'\n\n plt.close('all')\n fig = plt.figure(figsize=(wtot,htot))\n ax1 = fig.add_axes([mleft,mbot,wfig,hfig+.13])\n # alphamap = (logSBmap-logSBlo) / (logSBhi - logSBlo)\n im = ax1.imshow(img, cmap=cmap, origin='lower',\n vmin=vmin, vmax=vmax,\n # alpha=alphamap,\n extent=[par['Rlim'][0].value,\n par['Rlim'][1].value,\n par['Rlim'][0].value,\n par['Rlim'][1].value],\n aspect='auto')\n\n backend = mpl.get_backend() \n if backend == 'TkAgg':\n wm = plt.get_current_fig_manager()\n wm.window.wm_geometry(window_position)\n else:\n if window_position is not None:\n print(\"WARNING: window positioning not possible with backend '\"+backend+\"'. Try 'TkAgg', or set 'window_position' to None.\")\n ax1.set_xlabel('x / kpc')\n ax1.set_ylabel('y / arcsec')\n Lbox = par['Lbox']\n Rtix = u.Quantity([-Lbox/2, -Lbox/4, 0*u.kpc, Lbox/4, Lbox/2])\n ax1.set_yticks(Rtix.value)\n angtix = ('{:4.1f} '*len(Rtix)).format(*(Rtix*par['as_kpc']).value).split()\n ax1.set_yticklabels(angtix)\n\n # Color bar\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n divider = make_axes_locatable(ax1)\n cax = divider.new_vertical(size=\"5%\", pad=pad, pack_start=False)\n fig.add_axes(cax)\n cb = fig.colorbar(im, cax=cax, orientation=\"horizontal\")\n cb.ax.xaxis.set_ticks_position('bottom')\n cb.ax.xaxis.set_label_position('top')\n cb.ax.tick_params(labelsize=8)\n cb.set_label(label='log(SB / $\\mathrm{erg}\\,\\mathrm{s}^{-1}\\,\\mathrm{cm}^{-2}\\,\\mathrm{arcsec}^{-1}$)',\n size=8)\n\n # SB profile\n r,SBprof = SB_profile(SBmap,par,spatial_aperture)\n if spatial_aperture is not None:\n apCircle = plt.Circle((spatial_aperture[0],spatial_aperture[1]),spatial_aperture[2], color='lime', fill=False)\n ax1.add_artist(apCircle)\n ax2 = fig.add_axes([mleft+wfig+1.3*dx,mbot,wfig,hfig])\n ax2.set_xlim([0,r.max()])\n ax2.set_ylim([1e-4*SBprof.max(),2*SBprof.max()])\n ax2.plot(r,SBprof)\n ax2.set_yscale('log')\n ax2.set_ylabel('log(SB / $\\mathrm{erg}\\,\\mathrm{s}^{-1}\\,\\mathrm{cm}^{-2}\\,\\mathrm{arcsec}^{-1}$)')\n\n # Spectrum\n ax3 = fig.add_axes([mleft+2*(wfig+1.3*dx),mbot,wfig,hfig])\n if ymax_spec is None: ymax_spec = 2*spec1D.value.max()\n ax3.set_ylim([0,ymax_spec])\n if comp_spec is not None:\n wavelength_comp,flux_comp = np.loadtxt(comp_spec,unpack=True)\n comp_spec = np.interp(wavelength,wavelength_comp,flux_comp)\n ax3.plot(wavelength,comp_spec,'-k',alpha=.5)\n ax3.plot(wavelength,spec1D,'-b')\n # ax3.scatter(wavelength,spec1D,color='r',s=5)\n ax3.plot(lam0*(1+par['z'])*np.array([1,1]), [0,spec1D.max().value],'k--',alpha=.25)\n ax3.set_ylabel('Flux / $\\mathrm{erg}\\,\\mathrm{s}^{-1}\\,\\mathrm{cm}^{-2}\\,\\mathrm{\\AA}^{-1}$' )\n\n if saveit is not None:\n figname = mother + '/' + galdir + '/' + view + '.' + saveit\n plt.savefig(figname,box_inches='tight',dpi=200)\n\n # Output\n # print(spec1D[100])\n # print(wavelength[100])\n Ftot = simps(spec1D.to(u.erg/u.s/u.AA/u.cm**2), x=wavelength.to(u.AA))\n # ftest1 = (spec1D*(1+par['z']) * par['dlam2']).sum().value\n # ftest2 = (SBmap * par['dOmega']).sum().value\n Ltot = Ftot * 4*pi*(par['dL'].to(u.cm).value)**2\n # SFR = Ltot / 1.1012e42\n # np.testing.assert_approx_equal(Ftot,ftest1,significant=2) #only if full aperture\n # np.testing.assert_approx_equal(Ftot,ftest2,significant=2) #only if full aperture\n # print('dlam2 =', par['dlam2'])\n # print('dL =', par['dL'])\n # print('Ftot =', Ftot)\n # print('ftest1 =', ftest1)\n # print('ftest2 =', ftest2)\n # print('Ltot =', Ltot)\n # print('SFR =', SFR)\n\n if get_value == 'Ftot':\n return Ftot\n elif get_value == 'Ltot':\n return Ltot", "title": "" } ]
[ { "docid": "674790eb55368a06bbb1c8dcc4ff1401", "score": "0.5732015", "text": "def show_obs(obs_index):\n if obs_index == 27:\n return '*'\n elif obs_index == 26:\n return ' '\n else:\n return chr(obs_index + ord('a'))", "title": "" }, { "docid": "754eabd45fa8e1e47de1319d01eb6429", "score": "0.5248873", "text": "def display(): \n plot.plot2D(getfile().get_experimental_data(), marker = 'o', title = 'File', xtitle = 'Strain ($\\epsilon$)', ytitle= 'Stress ($\\sigma$)')", "title": "" }, { "docid": "15aac089a43f4c9aad8a8bf1b291e4db", "score": "0.52391565", "text": "def do_show(self, arg=None):\r\n print(' %s samples are defined:' % len(self.samples))\r\n for s in self.samples:\r\n print(' ..%s has %d files:' % (s, len(self.samples[s])))\r\n for i, f in enumerate(self.samples[s]):\r\n print(' ....(%2d) %s' % (i+1, f))\r\n\r\n if arg and arg.lower() == 'verbose':\r\n print('\\n LC_run/TXT_file to sample mappings:')\r\n for i, (f, s) in enumerate(self.which_sample.items()):\r\n print(' ..(%2d) %s: %s' % (i+1, f, s))\r\n return", "title": "" }, { "docid": "49cfabbd2b45d83c3ff5ba7040c1b1df", "score": "0.5193085", "text": "def do_show(self, arg):\n (e,a0,a1,a2,lv) = parse_cfg(arg,[],\n 'arwildst',{'l':'0','r':'0','w':'120','s':' ','t':'50'})\n self.lasterr = e\n if e:\n return\n if a0 == 'config':\n self.onecmd('save > screen')\n return\n if a0 == 'log':\n tail = tstint(lv['t'],0)\n if not a1:\n a1 = 'event'\n (e,txt) = self.srv.get_log(a1,tail,a2)\n self.lasterr = e\n if not e and tail:\n print 'showing last %d lines, use -t option to change tail size\\n' % tail\n print txt\n return\n\n path = self.fullpath(a0)\n (e,txt) = self.srv.print_obj(path,a1,lv)\n self.lasterr = e\n print txt", "title": "" }, { "docid": "01092a1dfb385f21247dfa3f89e396fb", "score": "0.5173866", "text": "def view_atoms(self, ind):\n # | - view_atoms\n df = self.data_frame\n\n path_i = df.iloc[ind][\"path\"]\n rev_num = df.iloc[ind][\"revision_number\"].astype(str)\n full_path = path_i + \"_\" + rev_num\n\n print(full_path)\n\n try:\n atoms = df.iloc[ind][\"atoms_object\"][-1]\n view(atoms)\n except:\n print(\"Couldn't read atoms object\")\n # __|", "title": "" }, { "docid": "d310327fba9cd162d62873d56900f8d4", "score": "0.5121284", "text": "def showo(self):\n display(self.output)", "title": "" }, { "docid": "d848d6ee1025fcfe77042c4955bc1225", "score": "0.5083212", "text": "def display(sv, filename, X, Y, closing=False, tim=0): # required by Whand\r\n return", "title": "" }, { "docid": "e4798958bd9856b74a6c3d75541b5ca1", "score": "0.5056081", "text": "def show(self, ax=None, savefile=None, show=True, cmap=None,\n show_colorbar=True, clabel=\"Wavelength [A]\",\n labelkey=None, guess_airmass=None,**kwargs):\n import matplotlib.pyplot as mpl\n from .tools import figout, insert_ax, colorbar\n if ax is None:\n fig = mpl.figure(figsize=[5.5,4])\n ax = fig.add_axes([0.14,0.13,0.76,0.75])\n ax.set_xlabel(\"spaxels x-axis\", fontsize=\"medium\")\n ax.set_ylabel(\"spaxels y-axis\", fontsize=\"medium\")\n else:\n fig = ax.figure\n \n # - Colors\n if cmap is None:\n cmap = mpl.cm.viridis\n vmin, vmax = np.nanmin(self.lbda),np.nanmax(self.lbda)\n colors = cmap( (self.lbda-vmin)/(vmax-vmin) )\n\n\n # - data\n scd = ax.scatter(self.x, self.y, facecolors=colors, edgecolors=\"None\",\n lw=1., label=\"data\", **kwargs)\n # - error\n if self.dx is not None or self.dy is not None:\n ax.errorscatter(self.x, self.y, dx=self.dx, dy=self.dy,\n ecolor=\"0.7\", zorder=0)\n # - model \n xmodel, ymodel = self.model.get_model(self.lbda)\n scm = ax.scatter(xmodel, ymodel, edgecolors=colors, facecolors=\"None\",\n lw=2., label=\"model\", **kwargs)\n\n \n ax.legend(loc=\"best\", frameon=True, ncol=2)\n if labelkey is None:\n textlabel = \" ; \".join([\"%s: %.2f\"%(k,self.fitvalues[k]) for k in self.model.FREEPARAMETERS]) + \"\\n\"+\" %s: %.1f\"%(\"lbdaref\",self.model.adr.lbdaref) + \" | unit: %.2f\"%self.model._unit\n else:\n textlabel = \" ; \".join([\"%s: %.2f\"%(k,self.fitvalues[k]) for k in labelkey])\n \n if guess_airmass is not None:\n textlabel += \" (input airmass: %.2f)\"%guess_airmass\n \n ax.text(0.5,1.01, textlabel, fontsize=\"small\", transform=ax.transAxes, va=\"bottom\", ha=\"center\")\n if show_colorbar:\n axc = ax.insert_ax(\"right\", shrunk=0.89)\n axc.colorbar(cmap, vmin=vmin, vmax=vmax,\n label=clabel, fontsize=\"medium\")\n \n fig.figout(savefile=savefile, show=show)\n return {\"ax\":ax, \"fig\":fig, \"plot\":[scd,scm]}", "title": "" }, { "docid": "7714d246c12dbc4b829859b3efd8f7bb", "score": "0.49717873", "text": "def plot_streamfunctions(config, trans, name='simulated', basename='', obs=None,lw=4):\n\n # Extract variables from data objects\n z = trans.variables['z'][:]\n sf_rapid = trans.variables['sf_rapid'][:].mean(axis=0)\n sf_model = trans.variables['sf_model'][:].mean(axis=0)\n sfmax_rapid = sf_rapid.max()\n zmax_rapid = z[np.argmax(sf_rapid)]\n sfmax_model = sf_model.max()\n zmax_model = z[np.argmax(sf_model)]\n\n # Create labels\n model_label = ('%s (model velocities) (max=%4.1f Sv, depth=%6i m)' %\n (name, sfmax_model, zmax_model))\n rapid_label = ('%s (RAPID approx) (max=%4.1f Sv, depth=%6i m)' %\n (name, sfmax_rapid, zmax_rapid))\n\n # Add data to axis\n fig = plt.figure(figsize=(6,8))\n plt.plot(sf_model, -z,'-', color=c1, linewidth=lw, label=model_label)\n plt.plot(sf_rapid, -z,'-', linewidth=lw, color=c2, label=rapid_label)\n\n # Plot optional observational data\n if obs is not None:\n z_obs = obs.z\n sf_obs = obs.sf.mean(axis=0)\n sfmax_obs = sf_obs.max()\n zmax_obs = z_obs[np.argmax(sf_obs)]\n obs_label = ('RAPID observations (max=%4.1f Sv, depth=%6i m)' %\n (sfmax_obs, zmax_obs))\n plt.plot(sf_obs, -z_obs, '-k', linewidth=lw, label=obs_label)\n \n # Annotate plot\n plt.title('Atlantic overturning streamfunction at 26N')\n plt.xlabel('Sverdrups')\n plt.ylabel('Depth (m)')\n plt.legend(loc='best', fontsize=8) \n\n # Save plot\n plt.tight_layout()\n savef = basename + 'overturning_streamfunctions_at_26n.png'\n print('SAVING: {}'.format(savef))\n fig.savefig(savef, dpi=300)\n plt.close()", "title": "" }, { "docid": "71539803562dccb634f53f11503cf8d5", "score": "0.48961893", "text": "def print_ave_offsets(self):\n\n\n wind = self.plsm['Wind'].index.min()\n dscvr= np.percentile(self.plsm['DSCOVR_offset'].offsets,(5,95))*1.e-9/60.\n ace = np.percentile(self.plsm['ACE_offset'].offsets,(5,95))*1.e-9 /60.\n soho = np.percentile(self.plsm['SOHO_offset'].offsets,(5,95))*1.e-9 /60.\n print('####################################################')\n print('DSCVOR-Wind (5%,95%) = {0:2.1f},{1:2.1f}m'.format(*dscvr))\n print('ACE -Wind (5%,95%) = {0:2.1f},{1:2.1f}m'.format(*ace))\n print('SOHO -Wind (5%,95%) = {0:2.1f},{1:2.1f}m'.format(*soho))\n\n print('THEMIS Coordinates and Time')\n print(self.plsm[self.earth_craft[0]].index.max())\n print(self.plsm[self.earth_craft[0]][['GSEx','GSEy','GSEz']].mean()/self.Re)\n print('####################################################')", "title": "" }, { "docid": "45eaade6417fbb41ca26644cead4b4d1", "score": "0.48830724", "text": "def detail_recording(x, fs):\n st.write('In order to create features for our model, we look at the cough segments recognized in your audio, as \\\n displayed below.')\n # Display audio\n inject_segmented_spectrogram(x, fs)\n st.write('An important step we take before analyzing your audio is applying a Fourier transformation which \\\n in simple terms displays the frequencies that are present in your cough in a logarithmic scale.\\\n Displayed below is what your audio looks like after this transformation.')\n inject_mel_spectrogram(x, fs)", "title": "" }, { "docid": "42df0194e79685643392f05ac489392f", "score": "0.48250514", "text": "def display(self):\n print(\"Power spectrum {}\".format(self.label))", "title": "" }, { "docid": "bb47df40dda28bf38f28b452279eedaa", "score": "0.4821721", "text": "def table_ao_observations():\n\n outfile = plot_dir + 'tab_ao_obs.tex'\n \n _out = open(outfile, 'w')\n _out.write('% Code to make figure:\\n')\n _out.write('% jlu_python/\\n')\n _out.write('% from jlu.microlens import paper_2015\\n')\n _out.write('% paper_2015.table_ao_observations()\\n')\n _out.write('%\\n')\n _out.write('\\\\begin{deluxetable*}{lrrrrrrrrr}\\n')\n _out.write('\\\\tabletypesize{\\\\footnotesize}\\n')\n _out.write('\\\\tablewidth{0pt}\\n')\n _out.write('\\\\tablecaption{AO Observations}\\n')\n _out.write('\\\\tablehead{\\n')\n _out.write('Event & RA (J2000) & Dec (J2000) & Date & $N_{\\mathrm{exp}}$ & $N_{\\star}$ & \\n')\n _out.write('Strehl & FWHM & $\\sigma_{\\mathrm{pos}}$ & $\\sigma_{\\mathrm{aln}}$ \\\\\\\\ \\n')\n _out.write('& [hr] & [deg] & [UT] & & & & [mas] & [mas] & [mas]\\n')\n _out.write('}\\n')\n _out.write('\\\\startdata\\n')\n _out.write('\\\\\\\\\\n')\n\n targ_name = {'ob110022': 'OB110022', 'ob110125': 'OB110125', 'ob120169': 'OB120169'}\n ra = {'ob110022': '17:53:17.93', 'ob110125': '18:03:32.95', 'ob120169': '17:49:51.38'}\n dec = {'ob110022': '-30:02:29.3', 'ob110125': '-29:49:43.0', 'ob120169': '-35:22:28.0'}\n date = {'ob110022': ['May 25, 2011', 'July 7, 2011', 'June 23, 2012', 'July 10, 2012', 'April 30, 2013', 'July 15, 2013'],\n 'ob110125': ['May 23, 2012', 'June 23, 2012', 'July 10, 2012', 'April 30, 2013', 'July 15, 2013'],\n 'ob120169': ['May 23, 2012', 'June 23, 2012', 'July 10, 2012', 'April 30, 2013', 'July 15, 2013']}\n Nexp = {'ob110022': [27, 16, 40, 34, 22, 30],\n 'ob110125': [21, 33, 18, 48, 39],\n 'ob120169': [ 5, 10, 22, 31, 11]}\n Nstar = {'ob110022': [285, 178, 701, 717, 485, 636],\n 'ob110125': [104, 327, 221, 332, 329],\n 'ob120169': [ 35, 122, 192, 207, 84]}\n strehl = {'ob110022': [0.14, 0.13, 0.24, 0.26, 0.24, 0.34],\n 'ob110125': [0.10, 0.36, 0.21, 0.29, 0.36],\n 'ob120169': [0.10, 0.24, 0.29, 0.29, 0.26]}\n fwhm = {'ob110022': [ 91, 69, 70, 68, 71, 60],\n 'ob110125': [ 96, 57, 70, 64, 57],\n 'ob120169': [110, 69, 64, 61, 74]}\n\n for tt in range(len(final_targets)):\n target = final_targets[tt]\n \n err_p, err_a = plot_alignment_errors(target)\n\n for ee in range(len(date[target])):\n if ee == 0:\n _out.write('{0:s} & {1:s} & {2:s} \\n'.format(targ_name[target], ra[target], dec[target]))\n _out.write(' & ')\n else:\n _out.write('& & & ')\n \n \n fmt = '{0:15s} & {1:2d} & {2:3d} & {3:4.2f} & {4:3d} & {5:4.2f} & {6:4.2f} \\\\\\\\ \\n'\n _out.write(fmt.format(date[target][ee], Nexp[target][ee], Nstar[target][ee],\n strehl[target][ee], fwhm[target][ee], err_p[ee], err_a[ee]))\n \n if tt != (len(final_targets) - 1):\n _out.write('\\\\\\\\ \\n')\n \n _out.write('\\\\enddata\\n')\n _out.write('\\\\tablenotetext{}{$N_{\\star}$: Number of stars detected. Strehl and\\n')\n _out.write('FWHM are the average values over all individual\\n')\n _out.write('exposures. $\\sigma_{\\mathrm{pos}}$ and $\\sigma_{\\mathrm{aln}}$ are\\n')\n _out.write('calculated after cross-epoch transformation from the median \\n')\n _out.write('of all stars with r$<$4'' and Kp$<$19 mag. \\n')\n _out.write('}\\n')\n _out.write('\\\\label{tb:AOobs}\\n')\n _out.write('\\\\end{deluxetable*}\\n')\n\n return", "title": "" }, { "docid": "5bf66ee4ac532a79929d4d5657859810", "score": "0.4795336", "text": "def mostrar(self):\n\t\tfrom os import sys\n\t\t# No conviene tener límites que cambian, queda muy feo\n\t\t#m = min(self.posicion)\n\t\t#M = max(self.posicion)\n\t\tm = -1.0\n\t\tM = 1.0\n\t\tyres = 12\n\t\tpaso = (M-m)/yres\n\t\tfor n in xrange(yres):\n\t\t\ty = M - n*paso\n\t\t\t#print y\n\t\t\tfor i in xrange(self.resolucion):\n\t\t\t\tsys.stdout.write([' ','-'][abs(self.posicion[i]-y) < paso/2.0])\n\t\t\tprint\n\t\t\t\n\t\tprint", "title": "" }, { "docid": "31b4b79d89f19f20e7c2186f3de1a628", "score": "0.47855893", "text": "def show(self, mesg='', verb=1):\n if mesg: mesg = ' (%s)' % mesg\n print(\"AFNI filename%s:\" % mesg)\n if verb > 1: print(\" curdir : %s\" % os.path.abspath(os.curdir))\n\n print(\" initial : %s\" % self.initname)\n if verb > 1: print(\" name : %s\" % self.ppve())\n if verb > 1: print(\" path : %s\" % self.path)\n\n print(\" prefix : %s\" % self.prefix) \n print(\" view : %s\" % self.view)\n print(\" exten. : %s\" % self.extension)\n print(\" type : %s\" % self.type)\n print(\" On Disk : %d\" % self.exist())\n print(\" Row Sel : %s\" % self.rowsel)\n print(\" Col Sel : %s\" % self.colsel)\n print(\" Node Sel: %s\" % self.nodesel)\n print(\" RangeSel: %s\" % self.rangesel)", "title": "" }, { "docid": "998ca6657c8afca3e99c9c331666b17d", "score": "0.47684094", "text": "def writeDisplay(self):\n for segment in self.disp:\n segment.writeDisplay()", "title": "" }, { "docid": "5db0dc5d2e11aeaba50aa4f65316cce9", "score": "0.47622776", "text": "def show():", "title": "" }, { "docid": "9148a639ec92a92578e87d4ca0f04df1", "score": "0.4745893", "text": "def obs_metadata(commands):\n return ObservationMetaData(pointingRA=commands['rightascension'],\n pointingDec=commands['declination'],\n mjd=commands['mjd'],\n rotSkyPos=commands['rotskypos'],\n bandpassName=commands['bandpass'],\n m5=LSSTdefaults().m5(commands['bandpass']),\n seeing=commands['seeing'])", "title": "" }, { "docid": "c27cc6361be92364311f4be9972d7544", "score": "0.47407678", "text": "def do_show(self, args):\n\t arg = args.split()\n\t\n\t if arg[0] == \"test\":\n\t print test\n\t\n\t if arg[0] == \"ver\":\n\t print \"Version 0.2, Touko\"\n\t\n\t else:\n\t for n in var:\n\t\t if arg[0] == n:\n\t\t print var[n][arg[0]]", "title": "" }, { "docid": "d6837123e3a8ca6a1343067331f424c8", "score": "0.47342396", "text": "def plot_magnetic_isochrone(ax, ax2, age=None, c='k', ls='-', lw=1, root='magnetic_isochrones/MagneticUpperSco-master/models/iso/mag/'):\n filename = os.path.join(root, 'dmestar_%07.1fmyr_z+0.00_a+0.00_phx_magBeq.iso'%age)\n magiso10Myr = magiso.get_isochrone(filename=filename)\n ax.plot(magiso10Myr[:,0], magiso10Myr[:,1], c=c, lw=lw, ls=ls, label='F16, magnetic, %g Myr'%age)\n ax2.plot(magiso10Myr[:,0], magiso10Myr[:,1], c=c, lw=lw, ls=ls)", "title": "" }, { "docid": "e6d23c9b148c1ec7658fe054d59f123a", "score": "0.47262955", "text": "def molecule_info(mol, show=False):\n intro_string = f'{MolToSmiles(mol)} Molecule info ' + '.' * 50\n print('\\n' + intro_string + '\\n')\n print(mol.Debug())\n print('\\nAdjacency Matrix: \\n', GetAdjacencyMatrix(mol))\n print('.' * len(intro_string))\n if show:\n show_mol(mol)", "title": "" }, { "docid": "41f41fde14e953a86d7f818a970ad94d", "score": "0.47244027", "text": "def display(self):\n self.maze.display(self.path)", "title": "" }, { "docid": "429c1cc6fc1927629e000b39ac675aef", "score": "0.47190535", "text": "def source_reciever_plot(st, **kwargs):\n save = kwargs.get('save',False)\n topo = kwargs.get('topo',False)\n mt = kwargs.get('moment_tensor',False)\n w = kwargs.get('width',(900000,900000))\n title = kwargs.get('title',True)\n proj = kwargs.get('proj','aeqd')\n\n m = mapplot(proj,lat_0=st[0].stats.sac['evla'],lon_0=st[0].stats.sac['evlo'])\n m.drawparallels(np.arange(-80.,81.,20.),labels=[True])\n m.drawmeridians(np.arange(-180.,181.,20.))\n source_coord = add_source(st,m)\n coord_list = stat_coord(st)\n add_station(coord_list,m)\n for ii in range(0,len(coord_list[0]),2):\n m.drawgreatcircle(source_coord[1],source_coord[0],\n coord_list[1][ii],coord_list[0][ii],c='k',lw=0.3,alpha=0.3)\n title = os.getcwd().split('/')\n ax = plt.gca()\n x,y = m(st[0].stats.sac['evlo'],st[0].stats.sac['evla'])\n\n if mt == False:\n try:\n b = beachball(st[0],xy=(x,y),plot='map')\n b.set_zorder(2)\n ax.add_collection(b)\n except KeyError:\n print('No focal mechanism found')\n else:\n b = Beach(mt,width=w,xy=(x,y))\n b.set_zorder(2)\n ax.add_collection(b)\n\n\n if title == True:\n ax.set_title('{} \\n Depth (km): {} '.format(\n title[5],round(st[0].stats.sac['evdp'],3)))\n if topo != False:\n myplot.basemap.drawtopography(m,alpha=0.5,cmap=matplotlib.cm.gray)\n\n if save != False:\n plt.savefig(save+'/map.pdf',format='pdf')\n if save == False:\n plt.show()", "title": "" }, { "docid": "11b209e9e91f5a1717011cbfc3484acd", "score": "0.47126386", "text": "def display_coordinates(self):\n self.check_read_data()\n for i in range(self.nr_datapoints):\n if self.north_pos:\n print(\"N\", self.north_position[i], end=\" \")\n if self.south_pos:\n print(\"S\", self.south_position[i], end=\" \")\n if self.east_pos:\n print(\" E\", self.east_position[i])\n if self.west_pos:\n print(\" W\", self.west_position[i])", "title": "" }, { "docid": "0f44485fc14175049c07d841e6d9c53f", "score": "0.47099888", "text": "def display():\n\n #Create a plot of current Osciolloscope Output\n g = Gnuplot.Gnuplot(debug=1)\n #Setup title, axis labels etc.\n setupScopeScreen(g)\n\n g.plot([[0,0],[1,1],[2,2],[3,3]])", "title": "" }, { "docid": "c1ac79e15c9390f9879aff4de7bca637", "score": "0.46825156", "text": "def plot_eos(eos_pk):\n import pylab as pl\n from aiida.orm import load_node\n eos_calc=load_node(eos_pk)\n eos_result=eos_calc.out.result\n raw_data = eos_result.dict.eos_data\n \n data = []\n for V, E, units in raw_data:\n data.append((V,E))\n \n data = np.array(data)\n params, covariance = fit_birch_murnaghan_params(data[:,0],data[:,1])\n \n vmin = data[:,0].min()\n vmax = data[:,0].max()\n vrange = np.linspace(vmin, vmax, 300)\n\n pl.plot(data[:,0],data[:,1],'o')\n pl.plot(vrange, birch_murnaghan(vrange, *params))\n\n pl.xlabel(\"Volume (ang^3)\")\n # I take the last value in the list of units assuming units do not change\n pl.ylabel(\"Energy ({})\".format(units))\n pl.show()", "title": "" }, { "docid": "cb91dd7b99db0648dbfa42e37dba4bc4", "score": "0.4679431", "text": "def Mobs_list_display(self):\n my_mobs_list = self.Mobs_list_make()\n print(\" ID - Type (Index)\")\n print(\"---------------------\")\n previous_Mobs = 0\n for each_mobs in my_mobs_list:\n # print each_mobs[0] + ' - ' + each_mobs[2] + ' ('+ each_mobs[1] +')'\n Mobs_size = int(each_mobs[1]) - previous_Mobs\n \n print(('{:>5} - {} ({}), size:{}' .format(each_mobs[0], each_mobs[2], each_mobs[1], Mobs_size)))\n previous_Mobs = int(each_mobs[1])", "title": "" }, { "docid": "98cdf37a325d25c4aaa5655a37a6fa64", "score": "0.46726915", "text": "def show(self, *args):\n return self.to_measure().show(*args)", "title": "" }, { "docid": "3ce17d498ade9ef4b51809b6c384e9c6", "score": "0.46670252", "text": "def show(filename):\n track = Track.read(filename)\n track.show()", "title": "" }, { "docid": "2aa95daac0072cf0d99d2e041fcff3e2", "score": "0.46634898", "text": "def obsfig(wave, obsvec, specvecs, unc=None,\n labelprefix='Mock Observed', fax=None):\n if fax is None:\n ofig, oax = pl.subplots()\n else:\n ofig, oax = fax\n # Plot posterior samples of the observed spectrum\n for i, specs in enumerate(specvecs):\n if i==0:\n label = 'Posterior samples'\n else:\n label = None\n oax.plot(wave, specs[3],\n color='green', alpha = 0.3, label=label)\n #plot the observation itself\n if unc is not None:\n x, y, e = wave, obsvec, unc\n oax.fill_between(x, y-e, y+e, facecolor='grey', alpha=0.3)\n oax.plot(wave, obsvec, color='black', label=labelprefix +' Spectrum',\n linewidth=1.0, alpha=1.0)\n return ofig, oax", "title": "" }, { "docid": "c1aa4ce49818af26540daeacda95e22f", "score": "0.46584958", "text": "def show(*molecules, arrange=True, nx=5, distance=(-10, -10), camera='perspective', caps=True, save=None):\n atoms, coordinates, group_numbers = arrange_molecules(molecules, arrange=arrange, nx=nx, distance=distance)\n\n # nglview require atom names in all caps to color them properly\n if caps:\n atoms = [name.upper() for name in atoms]\n\n if save is None:\n temp_pdb_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.pdb')\n write_pdb(temp_pdb_file, atoms, coordinates, group=group_numbers)\n view = nglview.show_structure_file(temp_pdb_file.name)\n temp_pdb_file.close()\n else:\n with open(save, 'w') as save_file:\n write_pdb(save_file, atoms, coordinates, group=group_numbers)\n view = nglview.show_structure_file(save)\n\n view.camera = camera\n return view", "title": "" }, { "docid": "47234a4bbca5dd7489ab72451b5e45b7", "score": "0.4650998", "text": "def show_2d(self, display=True):\n # Will do this someday\n pass", "title": "" }, { "docid": "18b342bac7c39982eff5677c91bc6980", "score": "0.4649894", "text": "def plot_streamfunction_hovmollers(config, trans, name='simulated', basename='', obs=None):\n\n # Extract variables from data objects\n dts = utils.get_ncdates(config, trans)\n z = trans.variables['z'][:]\n sf_rapid = trans.variables['sf_rapid'][:]\n sf_model = trans.variables['sf_model'][:]\n \n # Set up figure\n fig = plt.figure(figsize=(8,12))\n cmap=plt.cm.viridis\n levels = np.arange(15) * 2 - 4\n norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)\n \n # Add model data to axis\n fig.add_subplot(3,1,1)\n plt.pcolormesh(dts, -z, sf_model.transpose(), shading='nearest', cmap=cmap, norm=norm)\n plt.colorbar(orientation='vertical')\n plt.title('Overturning streamfunction at 26N in %s (model velocities)' % name)\n plt.xlabel('Dates')\n plt.ylabel('Depth (m)')\n \n # Add model data to axis (RAPID approx)\n fig.add_subplot(3,1,2)\n plt.pcolormesh(dts, -z, sf_rapid.transpose(), shading='nearest', cmap=cmap, norm=norm)\n plt.colorbar(orientation='vertical')\n plt.title('Overturning streamfunction at 26N in %s (RAPID approx)' % name)\n plt.xlabel('Dates')\n plt.ylabel('Depth (m)')\n\n # Add optional observed data to axis\n if obs is not None:\n fig.add_subplot(3,1,3)\n plt.pcolormesh(obs.dates, -obs.z, obs.sf.transpose(), shading='nearest', cmap=cmap, norm=norm)\n plt.colorbar(orientation='vertical')\n plt.title('Overturning streamfunction at 26N from RAPID array')\n plt.xlabel('Dates')\n plt.ylabel('Depth (m)')\n \n # Save plot\n plt.tight_layout()\n savef = basename + 'overturning_streamfunction_at_26n_hovmoller.png'\n print('SAVING: {}'.format(savef))\n fig.savefig(savef, dpi=300)\n plt.close()", "title": "" }, { "docid": "16aafb5851961159183c7afa46eb0211", "score": "0.46492985", "text": "def show_mode(self,n,what='amplitude'):\n if self.dim_flag=='':\n print(\"the system kernel is empty \")\n sys.exit(1)\n elif self.dim_flag=='1D':\n plt.figure()\n if what=='amplitude':\n plt.plot(self.x1,np.abs(self.v[:,n]))\n elif what=='phase':\n plt.plot(self.x1,np.angle(self.v[:,n]))\n elif what=='intensity':\n plt.plot(self.x1,np.abs((self.v[:,n])**2))\n else:\n print(\"what must be 'amplitude','intensity' or 'phase'\")\n \n elif self.dim_flag=='2D':\n npts=np.size(self.x1)\n tem=self.v[:,n].reshape(npts,npts);\n plt.figure()\n if what=='amplitude': \n plt.pcolor(self.x1,self.y1,np.abs(tem))\n plt.colorbar()\n elif what=='phase':\n plt.pcolor(self.x1,self.y1,np.angle(tem))\n plt.colorbar()\n elif what=='intensity':\n plt.pcolor(self.x1,self.y1,np.abs(tem**2))\n plt.colorbar()\n else:\n print(\"what must be 'amplitude','intensity' or 'phase'\")\n \n \n \n return", "title": "" }, { "docid": "df06de85d7e5f8719ea3e42ade6c899b", "score": "0.46423554", "text": "def showall(self, loc=0):\n self.showts(loc=loc, figure=1)\n self.showfit(loc=loc, figure=2)\n self.showdeviation(loc=loc, figure=3)", "title": "" }, { "docid": "f821b6fcda31e11eb9ba4b370f0c06fd", "score": "0.46092078", "text": "def show_mol(mol, name=False):\n # Draw the molecule\n plt.imshow(Draw.MolToImage(mol))\n # Add the name if required\n if name:\n name = smiles_to_name(MolToSmiles(mol))\n plt.suptitle(name, fontsize=20)\n # Delete the axis and improve the visualization\n plt.axis('off')\n plt.tight_layout()\n plt.show()", "title": "" }, { "docid": "1c0888b459289522ba05baaefcd99b4c", "score": "0.46066645", "text": "def show_file(args, filename, auf, swf):\n if args.list:\n print(filename)\n return\n\n flags = []\n if auf:\n flags.append('--assume-unchanged')\n if swf:\n flags.append('--skip-worktree')\n reset = 'reset ' if args.reset else ''\n print('{}: {}{}'.format(filename, reset, ' '.join(flags)))", "title": "" }, { "docid": "54bb108115614787b102dec53ec20429", "score": "0.46027616", "text": "def display(param=None, scaling=1.0):\n\n # Test if the current pipe exists.\n check_pipe()\n\n # Test if the sequence data is loaded.\n if not exists_mol_res_spin_data():\n raise RelaxNoSequenceError\n\n # Print the data.\n write_data(param=param, file=sys.stdout, scaling=scaling)", "title": "" }, { "docid": "18121a9ca81ef6d7811663da50b2cb2d", "score": "0.45930666", "text": "def plot_cmd(table):\n y = table['g_mean_psf_mag']\n x = table['g_mean_psf_mag'] - table['i_mean_psf_mag']\n\n plt.plot(x, y, 'ko', markersize=0.3, alpha=0.3)\n\n plt.xlim([0, 1.5])\n plt.ylim([14, 22])\n plt.gca().invert_yaxis()\n\n plt.ylabel('$g_0$')\n plt.xlabel('$(g-i)_0$')", "title": "" }, { "docid": "0a64e62aad52939fdcbc81b4600f983b", "score": "0.4589208", "text": "def info(self,showHeader=False):\n arcmin = 180*60./numpy.pi\n print \"Dimensions (Ny,Nx) = (%d,%d)\"%(self.Ny,self.Nx)\n print \"Pixel Scales: (%f,%f) arcmins. \"%(self.pixScaleY*arcmin,self.pixScaleX*arcmin)\n print \"Map Bounds: [(x0,y0), (x1,y1)]: [(%f,%f),(%f,%f)] (degrees)\"%(self.x0,self.y0,self.x1,self.y1)\n print \"Map Bounds: [(x0,y0), (x1,y1)]: [(%s,%s),(%s,%s)]\"%\\\n (astCoords.decimal2hms(self.x0,':'),\\\n astCoords.decimal2dms(self.y0,':'),\\\n astCoords.decimal2hms(self.x1,':'),\\\n astCoords.decimal2dms(self.y1,':'))\n \n print \"Map area = %f sq. degrees.\"%(self.area)\n print \"Map mean = %f\"%(self.data.mean())\n print \"Map std = %f\"%(self.data.std())\n \n if showHeader:\n print \"Map header \\n %s\"%(self.header)", "title": "" }, { "docid": "b1756c089e968913ca68000b63f08a7e", "score": "0.45854902", "text": "def show_magicians(magicians):\n\tfor magician in magicians:\n\t\tprint(magician.title())", "title": "" }, { "docid": "b1756c089e968913ca68000b63f08a7e", "score": "0.45854902", "text": "def show_magicians(magicians):\n\tfor magician in magicians:\n\t\tprint(magician.title())", "title": "" }, { "docid": "a02c413a9c5ca45dcfb13f6d87568fef", "score": "0.45754507", "text": "def do_show(self, args):\n\n if args.lower() in ['c', 'copying']:\n # Show the conditions.\n print(copying_str)\n elif args.lower() in ['w', 'warranty']:\n # Show the warranty.\n print(warranty_str)\n else:\n # Show the entire license.\n print('%s%s' % (copying_str, warranty_str))", "title": "" }, { "docid": "2a15a94956f4eecd5f69dd3190584274", "score": "0.45727122", "text": "def show(self, state = True):\r\n arg_str = p2e._util._convert_args_to_string(\"movie.show\", state)\r\n p2e._app.Exec(arg_str)", "title": "" }, { "docid": "953125e748f47055e5ba544491951099", "score": "0.45617634", "text": "def show(self, qid=None, nonzero=False, preal=0):\n if qid is None or len(qid) == 0:\n digits = self.qubit_num\n else:\n digits = len(qid)\n\n vec = self.get_amp(qid=qid)\n\n if preal >= 0:\n exp_i_phase = 1.+0.j\n if abs(vec[preal].imag) > cfg.EPS:\n exp_i_phase = vec[preal] / abs(vec[preal])\n elif vec[preal].real < 0.0:\n exp_i_phase = -exp_i_phase\n vec = vec / exp_i_phase\n\n for i, v in enumerate(vec):\n bits = \"{:0{digits}b}\".format(i, digits=digits)\n absval2 = abs(v) * abs(v)\n if absval2 < cfg.EPS:\n bar_len = 0\n else:\n bar_len = int(absval2 / 0.1 + 1.5)\n bar_str = \"|\" + \"+\" * bar_len\n if nonzero is True and absval2 < cfg.EPS:\n continue\n else:\n print(\"c[{}] = {:+.4f}{:+.4f}*i : {:.4f} {}\"\n .format(bits, v.real, v.imag, abs(v)**2, bar_str))", "title": "" }, { "docid": "06f7aaf5e70e4c010b5ea8e495f26228", "score": "0.45592082", "text": "def plot(self, outdir='', stacode='', ampfactor=40, delta=0.025, longitude='', latitude='', browseflag=False, saveflag=True,\\\n obsflag=True, diffflag=False, repflag=True, rep0flag=True, rep1flag=True, rep2flag=True):\n totalpn = obsflag+diffflag+repflag+rep0flag+rep1flag+rep2flag\n cpn = 1\n plt.close('all')\n fig = plb.figure(num=1, figsize=(12.,8.), facecolor='w', edgecolor='k')\n ylabelflag = False\n if obsflag:\n ax = plt.subplot(1, totalpn,cpn)\n cpn = cpn+1\n self.obsST.ploths(ampfactor=ampfactor, delta=delta, title='Observed Refs', ax=ax)\n plt.ylabel('Backazimuth(deg)')\n ylabelflag = True\n if diffflag:\n ax = plt.subplot(1, totalpn,cpn)\n cpn = cpn+1\n self.diffST.ploths(ampfactor=ampfactor, delta=delta, title='Residual Refs', ax=ax)\n if not ylabelflag:\n plt.ylabel('Backazimuth(deg)')\n if repflag:\n ax = plt.subplot(1, totalpn,cpn)\n cpn = cpn+1\n self.repST.ploths(ampfactor=ampfactor, delta=delta, title='Predicted Refs', ax=ax)\n if not ylabelflag:\n plt.ylabel('Backazimuth(deg)')\n if rep0flag:\n ax = plt.subplot(1, totalpn,cpn)\n cpn = cpn+1\n self.repST0.ploths(ampfactor=ampfactor, delta=delta, title='A0 Refs', ax=ax)\n if not ylabelflag:\n plt.ylabel('Backazimuth(deg)')\n if rep1flag:\n ax = plt.subplot(1, totalpn,cpn)\n cpn = cpn+1\n self.repST1.ploths(ampfactor=ampfactor, delta=delta, title='A1 Refs', ax=ax)\n if not ylabelflag:\n plt.ylabel('Backazimuth(deg)')\n if rep2flag:\n ax = plt.subplot(1, totalpn,cpn)\n self.repST2.ploths(ampfactor=ampfactor, delta=delta, title='A2 Refs', ax=ax)\n if not ylabelflag:\n plt.ylabel('Backazimuth(deg)')\n fig.suptitle(stacode+' Longitude:'+str(longitude)+' Latitude:'+str(latitude), fontsize=15)\n if browseflag:\n plt.draw()\n plt.pause(1) # <-------\n raw_input(\"<Hit Enter To Close>\")\n plt.close('all')\n if saveflag and outdir!='':\n fig.savefig(outdir+'/'+stacode+'_COM.ps', orientation='landscape', format='ps')\n return", "title": "" }, { "docid": "5ea659eed4f6356630b8a5658d2c0e9c", "score": "0.45528254", "text": "def display(xmax, ymax, history, seed=0):\n head = \" \".join([str(x%10) for x in range(xmax+1)])\n head = head.replace('0', '*')\n result = [\" \"+head]\n for y in range(ymax+1):\n line = [str(y%10)]\n for x in range(xmax+1):\n if (x, y) in history:\n line.append('O')\n elif is_open(x, y, seed):\n line.append('.')\n else:\n line.append('#')\n line.append(str(y%10))\n result.append(\" \".join(line))\n result.append(\" \"+head)\n return \"\\n\".join(result)", "title": "" }, { "docid": "4a67cc64ba8be69e949d184af0506a53", "score": "0.45523393", "text": "def showAniso(self, targets, color=None, scale=1.0, smoothing=1,\n\t\t\t\tshowEllipsoid=True, ellipsoidTransparency=None,\n\t\t\t\taxisColor=None, axisFactor=None, axisThickness=0.01,\n\t\t\t\tellipseColor=None, ellipseFactor=None, ellipseThickness=0.02):\n\t\tmolMap = self._makeMolMap(targets)\n\t\tself.removeAniso(molMap)\n\t\tnoneShowing = not self._surfMap\n\t\tnewlyShown = 0\n\t\tfor m, atoms in molMap.items():\n\t\t\tif not m.display:\n\t\t\t\tcontinue\n\t\t\tsurfMap = self._surfMap.setdefault(m, {})\n\t\t\tif surfMap:\n\t\t\t\tmodel = surfMap[\"model\"]\n\t\t\telse:\n\t\t\t\timport _surface\n\t\t\t\tmodel = _surface.SurfaceModel()\n\t\t\t\tsurfMap[\"model\"] = model\n\t\t\t\topenModels.add([model], sameAs=m, hidden=True)\n\t\t\tfor a in atoms:\n\t\t\t\tif not a.display or a.hide:\n\t\t\t\t\tcontinue\n\t\t\t\tif not hasattr(a, \"anisoU\"):\n\t\t\t\t\tcontinue\n\t\t\t\tnoneColor = a.color\n\t\t\t\tif noneColor is None:\n\t\t\t\t\tnoneColor = a.molecule.color\n\t\t\t\tif showEllipsoid:\n\t\t\t\t\tif color is None:\n\t\t\t\t\t\t_ellipsoidColor = noneColor\n\t\t\t\t\telse:\n\t\t\t\t\t\t_ellipsoidColor = color\n\t\t\t\telse:\n\t\t\t\t\t_ellipsoidColor = None\n\t\t\t\tif axisFactor is None:\n\t\t\t\t\t_axisColor = None\n\t\t\t\telif axisColor is None:\n\t\t\t\t\t_axisColor = noneColor\n\t\t\t\telse:\n\t\t\t\t\t_axisColor = axisColor\n\t\t\t\tif ellipseFactor is None:\n\t\t\t\t\t_ellipseColor = None\n\t\t\t\telif ellipseColor is None:\n\t\t\t\t\t_ellipseColor = noneColor\n\t\t\t\telse:\n\t\t\t\t\t_ellipseColor = ellipseColor\n\t\t\t\tsurfMap[a] = self._makePieces(model, a, (_ellipsoidColor,\n\t\t\t\t\t_axisColor, _ellipseColor), ellipsoidTransparency, scale,\n\t\t\t\t\tsmoothing, axisFactor, ellipseFactor, axisThickness,\n\t\t\t\t\tellipseThickness)\n\t\t\t\tnewlyShown += 1\n\t\t\t\t# can't look up 'molecule' in deleted atoms,\n\t\t\t\t# so remember it...\n\t\t\t\tself._atomMolLookup[a] = a.molecule\n\t\tif noneShowing and self._surfMap:\n\t\t\tself._handlerID = triggers.addHandler('Atom',\n\t\t\t\t\t\t\tself._atomCB, None)\n\t\treturn newlyShown", "title": "" }, { "docid": "d9722d856d29fc4566fed8daa5a6a7be", "score": "0.4552072", "text": "def show_magicians(magicians):\n for magician in magicians:\n print(magician.title())", "title": "" }, { "docid": "d9722d856d29fc4566fed8daa5a6a7be", "score": "0.4552072", "text": "def show_magicians(magicians):\n for magician in magicians:\n print(magician.title())", "title": "" }, { "docid": "fd6c531a74418cba550204b810534a57", "score": "0.45516902", "text": "def display_Maze(self):\n for y in range(self.height_max):\n for x in range(self.width_max):\n position = x, y\n char = \"\"\n if position in self.walls:\n char = \"1\"\n elif position in self.paths:\n if position == self.hero.position:\n char = \"s\"\n elif position == self.guardian_position:\n char = \"f\"\n elif position in self.items:\n char = \"k\"\n else:\n char = \"0\"\n print(char, end=\"\")\n x += 1\n print()\n y += 1", "title": "" }, { "docid": "51c83dca66f608b1aeec98307ce4899f", "score": "0.45491838", "text": "def snowheight_io(args):\n\n if len(args) == 0:\n print(HELP)\n elif args[0] in ['-h', '--help']:\n print(HELP)\n elif args[0] in ['-v', '--version']:\n print('snowheight: ' + climvis.__version__)\n print('License: public domain')\n print('snowheight is provided \"as is\", without warranty of any kind')\n\n # Snowheight location/position option\n elif args[0] in ['-l', '--loc']:\n if len(args) < 3:\n print('snowheight --loc needs lon and lat parameters!')\n return\n #date = str(args[1])\n lon, lat = float(args[1]), float(args[2])\n\n #lon = 11\n #lat = 47\n\n\n point = (lon, lat)\n\n # Check if Latitude and Longitude are in the correct range for the precipitation and temperature data\n if lat < 45 or lat > 50:\n raise Exception('Location is out of the permitted range lon = [10, 15] lat = [45, 50]! Analysis not possible')\n elif lon < 10 or lon > 15:\n raise Exception('Location out of the permitted range lon = [10, 15] lat = [45, 50]! Analysis not possible')\n else:\n pass\n\n\n #the_tour.calc_snow_data_for_track(user_track)\n #test = dt.datetime.strptime(date, '%d.%m.%Y')\n #msg = snowpoint.calc_tour_data(test)\n #snowpoint.plot_snow_on_tour()\n\n # creation of the html path for the track -> see core.py\n #print(climvis.modules)\n #print(dir())\n\n\n html_path = climvis.write_point_html(point)\n\n #print(sys.path)\n #print(os.listdir(\"E:\\\\Uni\\\\VU Wissenschaftliches Programmieren\\\\climvis-master\\\\climvis\"))\n\n\n\n #loc1= Skitour()\n #loc1 = climvis.snowheight.tour(loc)\n #print(loc.__class__)\n #loc1.calc_datas()\n #loc1.calc_tour()\n\n\n #loc.climvis.snowheight.tour.calc_datas()\n #loc.climvis.snowheight.tour.calc_tour()\n\n# html_path = climvis.write_html(lon, lat)\n if '--no-browser' in args:\n print('File successfully generated at: ' + html_path)\n else:\n webbrowser.get().open_new_tab('file://' + html_path)\n\n\n # Snowheight town option\n elif args[0] in ['-t', '--town']:\n if len(args) != 2 or not args[1].replace('.','',1).isdigit() and args[1].replace('.','').isdigit()\\\n or args[1].replace('.','').isdigit():\n raise Exception('snowheight -t needs a town as an Input')\n point = args[1]\n html_path = climvis.write_point_html(point)\n\n if '--no-browser' in args:\n print('File successfully generated at: ' + html_path)\n else:\n webbrowser.get().open_new_tab('file://' + html_path)\n\n print(point)\n\n print('this is location function')\n\n # Snowheight gpx file option\n elif args[0] in ['-g', '--gpx']:\n if len(args) < 3:\n print('cruvis --gpx needs the date (dd-mm-yyyy) and the name of the .gpx file ')\n return\n # sets the date for when the calculations should be done\n date = str(args[1])\n # variable with the user input name of the .gpx tour\n gpx_file_name = str(args[2])\n # assuming the .gpx file is in .\\cruvis\\data directory as given by installation package\n bdir = os.path.dirname(__file__)\n gpx_file_path = os.path.join(bdir, 'data', gpx_file_name) # os.path.abspath(gpx_file_name)#MAYBE TO CHANGE?\n # checks if gpx_file_path actually exists\n if os.path.isfile(gpx_file_path):\n pass\n #gpx_file_path = gpx_file_path\n # if it does not exist, asks for the full path on the pc from the user\n else:\n print('File not found in cwd!')\n gpx_file_path = input('Please give whole path to gpx file: ')\n if os.path.isfile(gpx_file_path):\n #gpx_file_path = gpx_file_path\n gpx_file_name = os.path.basename(gpx_file_path)\n #gpx_file_path = os.path.basename(gpx_file_path)\n # if still not correct, shuts the program down\n else:\n raise Exception('File does not exist! Program will shut down')\n\n # the track in the .gpx file is used to create a Gpx_track() instance named user_track\n user_track = Gpx_track(gpx_f_path=gpx_file_path)\n user_track.extract_gpx_points()\n\n # the lat and lon range is checked to ensure the track is contained in range for which the program has the climate data\n for i in user_track.track:\n if i.lat < 45 or i.lat > 50:\n raise Exception('Some trackpoints are out of the permitted range lon = [10, 15] lat = [45, 50]! Analysis not possible')\n elif i.lon < 10 or i.lon > 15:\n raise Exception('Some trackpoints are out of the permitted range lon = [10, 15] lat = [45, 50]! Analysis not possible')\n else:\n pass\n\n # the mean (lat,lon) coordinates of the track are extracted to center the map on the track, default zoom of map is 13\n pt_mean = user_track.get_mean()\n lat_mean = pt_mean[0]\n lon_mean = pt_mean[1]\n\n # the temporary directory to store the files and html path is created\n temp_directory = mkdtemp()\n\n # instance of the Data_Handler() class is created with the paths to the climate data\n # and the output folder is set to the just created temp_directory\n data = Data_Handler()\n data.output_folder = temp_directory\n # instance of Skitour() is created and the calcuations for the expected snow on the\n # user input desired date are processed and saved in temp_directory\n the_tour = Skitour(data)\n the_tour.calc_snow_data_for_track(user_track)\n test = dt.datetime.strptime(date, '%d.%m.%Y')\n msg = the_tour.calc_tour_data(test)\n the_tour.plot_snow_on_tour()\n\n # creation of the html path for the track -> see core.py\n #print(climvis.modules)\n #print(dir())\n html_path = climvis.write_track_html(gpx_file_path, lat_mean, lon_mean, gpx_name=gpx_file_name, mesg=msg,\n directory=temp_directory)\n\n if '--no-browser' in args:\n print('File successfully generated at: ' + html_path)\n else:\n webbrowser.get().open_new_tab('file://' + html_path)\n else:\n print('cruvis: command not understood. '\n 'Type \"cruvis --help\" for usage options.')", "title": "" }, { "docid": "57b543b976b6471a0e419f078ffe2109", "score": "0.45456466", "text": "def print_xyz(\n atoms, cell, filedesc=sys.stdout, title=\"\", cell_conv=1.0, atoms_conv=1.0\n):\n\n a, b, c, alpha, beta, gamma = mt.h2abc_deg(cell.h * cell_conv)\n\n natoms = atoms.natoms\n fmt_header = (\n \"%d\\n# CELL(abcABC): %10.5f %10.5f %10.5f %10.5f %10.5f %10.5f %s\\n\"\n )\n filedesc.write(fmt_header % (natoms, a, b, c, alpha, beta, gamma, title))\n # direct access to avoid unnecessary slow-down\n qs = dstrip(atoms.q) * atoms_conv\n lab = dstrip(atoms.names)\n for i in range(natoms):\n filedesc.write(\n \"%8s %12.5e %12.5e %12.5e\\n\"\n % (lab[i], qs[3 * i], qs[3 * i + 1], qs[3 * i + 2])\n )", "title": "" }, { "docid": "7cfeafd8a81eeebbb8441bb7abdcd0ca", "score": "0.45275086", "text": "def nwa_test1(na):\n fpts, mags, phases = na.read_data()\n\n plt.figure(1)\n plt.subplot(2, 1, 1)\n plt.xlabel(\"Frequency (GHz)\")\n plt.ylabel(\"Transmission, S21 (dB)\")\n plt.plot(fpts / 1e9, mags)\n plt.subplot(2, 1, 2)\n plt.xlabel(\"Frequency (GHz)\")\n plt.ylabel(\"Transmitted Phase (deg)\")\n plt.plot(fpts / 1e9, phases)\n plt.show()", "title": "" }, { "docid": "3a3b2f48de87c8fe955793d77442e5f6", "score": "0.45229042", "text": "def show_unit_path(self, unit):\r\n\t\tpath = unit.path.path\r\n\t\tif path is None: # show at least the position\r\n\t\t\tpath = [unit.position.to_tuple()]\r\n\r\n\t\t# the path always contains the full path, the unit might be somewhere in it\r\n\t\tposition_of_unit_in_path = 0\r\n\t\tunit_pos = unit.position.to_tuple()\r\n\t\tfor i, pos in enumerate(path):\r\n\t\t\tif pos == unit_pos:\r\n\t\t\t\tposition_of_unit_in_path = i\r\n\t\t\t\tbreak\r\n\r\n\t\t# display units one ahead if possible, it looks nicer if the unit is moving\r\n\t\tif len(path) > 1 and position_of_unit_in_path+1 < len(path):\r\n\t\t\tposition_of_unit_in_path += 1 #\r\n\t\tpath = path[position_of_unit_in_path:]\r\n\r\n\t\t# draw every step-th coord\r\n\t\tstep = 1\r\n\t\trelevant_coords = [path[0]]\r\n\t\tfor i in xrange(step, len(path), step):\r\n\t\t\trelevant_coords.append(path[i])\r\n\t\trelevant_coords.append(path[-1])\r\n\r\n\t\t# get coords, actual drawing\r\n\t\tuse_rotation = self._get_rotation_setting()\r\n\t\tself.minimap_image.set_drawing_enabled()\r\n\t\tp = fife.Point(0, 0)\r\n\t\trender_name = self._get_render_name(\"ship_route\") + str(self.__class__.__ship_route_counter.next())\r\n\t\tcolor = unit.owner.color.to_tuple()\r\n\t\tlast_coord = None\r\n\t\tdraw_point = self.minimap_image.rendertarget.addPoint\r\n\t\tfor i in relevant_coords:\r\n\t\t\tcoord = self._world_to_minimap(i, use_rotation)\r\n\t\t\tif last_coord is not None and \\\r\n\t\t\t sum(abs(last_coord[i] - coord[i]) for i in (0, 1)) < 2: # 2 is min dist in pixels\r\n\t\t\t\tcontinue\r\n\t\t\tlast_coord = coord\r\n\t\t\tp.x = coord[0]\r\n\t\t\tp.y = coord[1]\r\n\t\t\tdraw_point(render_name, p, *color)\r\n\r\n\t\tdef cleanup():\r\n\t\t\tself.minimap_image.set_drawing_enabled()\r\n\t\t\tself.minimap_image.rendertarget.removeAll(render_name)\r\n\r\n\t\tspeed = 1.0 + math.sqrt(5) / 2\r\n\t\tself.highlight(path[-1], factor=0.4, speed=speed, finish_callback=cleanup, color=color)\r\n\r\n\t\treturn True", "title": "" }, { "docid": "71f7d85515e90734961ce71494a4360a", "score": "0.45138788", "text": "def show_phase(adata, genes, mode='labeling', vkey='velocity', basis='umap', group=None):\n\n import matplotlib.pyplot as plt\n import seaborn as sns\n\n # there is no solution for combining multiple plot in the same figure in plotnine, so a pure matplotlib is used\n # see more at https://github.com/has2k1/plotnine/issues/46\n genes = genes[genes in adata.var_name]\n if len(genes) == 0:\n raise Exception('adata has no genes listed in your input gene vector: {}'.format(genes))\n if not basis in adata.obsm.keys():\n raise Exception('{} is not applied to adata.}'.format(basis))\n else:\n embedding = pd.DataFrame({basis + '_0': adata.obsm['X_' + basis].iloc[:, 0], \\\n basis + '_1': adata.obsm['X_' + basis].iloc[:, 1]})\n\n n_cells, n_genes = adata.shape[0], len(genes)\n\n if vkey in adata.layers.keys():\n velocity = adata[genes].layers[vkey]\n elif vkey in adata.obsm.keys():\n velocity = adata[genes].obsm[vkey]\n else:\n raise Exception('adata has no vkey {} in either the layers or the obsm slot'.format(vkey))\n velocity = np.sum(velocity**2, 1)\n\n if 'velocity_gamma' in adata.var.columns():\n gamma = adata.var.gamma[genes].values\n velocity_offset = [0] * n_cells if not (\"velocity_offset\" in adata.var.columns()) else \\\n adata.var.velocity_offset[genes].values\n else:\n raise Exception('adata does not seem to have velocity_gamma column. Velocity estimation is required before '\n 'running this function.')\n\n if not (mode in ['labelling', 'splicing', 'full']):\n raise Exception('mode can be only one of the labelling, splicing or full')\n\n if mode is 'labelling' and all([i in adata.layers.keys() for i in ['new', 'old']]):\n new_mat, old_mat = adata[genes].layers['new'], adata[genes].layers['old']\n df = pd.DataFrame({\"new\": new_mat.flatten(), \"old\": old_mat.flatten(), 'gene': genes * n_cells, 'prediction':\n np.repeat(gamma, n_cells) * new_mat.flatten() + np.repeat(velocity_offset, n_cells),\n \"velocity\": genes * n_cells}, index = range(n_cells * n_genes))\n\n elif mode is 'splicing' and all([i in adata.layers.keys() for i in ['spliced', 'ambiguous', 'unspliced']]):\n unspliced_mat, spliced_mat = adata.layers['unspliced'], adata.layers['spliced']\n df = pd.DataFrame({\"unspliced\": unspliced_mat.flatten(), \"spliced\": spliced_mat.flatten(), 'gene': genes * n_cells,\n 'prediction': np.repeat(gamma, n_cells) * unspliced_mat.flatten() + np.repeat(velocity_offset, \\\n n_cells), \"velocity\": genes * n_cells}, index = range(n_cells * n_genes))\n\n elif mode is 'full' and all([i in adata.layers.keys() for i in ['uu', 'ul', 'su', 'sl']]):\n uu, ul, su, sl = adata.layers['uu'], adata.layers['ul'], adata.layers['su'], adata.layers['sl']\n df = pd.DataFrame({\"uu\": uu.flatten(), \"ul\": ul.flatten(), \"su\": su.flatten(), \"sl\": sl.flatten(),\n 'gene': genes * n_cells, 'prediction': np.repeat(gamma, n_cells) * uu.flatten() +\n np.repeat(velocity_offset, n_cells), \"velocity\": genes * n_cells}, index = range(n_cells * n_genes))\n\n else:\n raise Exception('Your adata is corrupted. Make sure that your layer has keys new, old for the labelling mode, '\n 'spliced, ambiguous, unspliced for the splicing model and uu, ul, su, sl for the full mode')\n\n # use seaborn to draw the plot:\n\n for cur_axes in g.axes.flatten():\n x0, x1 = cur_axes.get_xlim()\n y0, y1 = cur_axes.get_ylim()\n\n points = np.linspace(min(x0, y0), max(x1, y1), 100)\n\n cur_axes.plot(points, points, color='red', marker=None, linestyle='--', linewidth=1.0)\n\n plt.figure(None, (15,15), dpi=80)\n nrow, ncol = np.sqrt(3 * n_cells), np.sqrt(3 * n_cells)\n ncol = ncol - 1 if nrow * (ncol - 1) == 3 * n_cells else ncol\n\n # the following code is inspired by https://github.com/velocyto-team/velocyto-notebooks/blob/master/python/DentateGyrus.ipynb\n gs = plt.GridSpec(nrow,ncol)\n for i, gn in enumerate(genes):\n ax = plt.subplot(gs[i*3])\n try:\n ix=np.where(vlm.ra[\"Gene\"] == gn)[0][0]\n except:\n continue\n cur_pd = df.iloc[df.gene == gn, :]\n sns.scatterplot(cur_pd.iloc[:, 0], cur_pd.iloc[:, 1], hue=group)\n plt.title(gn)\n plt.plot(cur_pd.iloc[:, 0], cur_pd.loc[:, 'prediction'], c=\"k\")\n plt.ylim(0, np.max(cur_pd.iloc[:, 0])*1.02)\n plt.xlim(0, np.max(cur_pd.iloc[:, 1])*1.02)\n\n despline()\n\n df_embedding = pd.concat([embedding, cur_pd.loc[:, 'gene']], ignore_index=False)\n sns.scatterplot(df_embedding.iloc[:, 0], df_embedding.iloc[:, 1], hue=df_embedding.loc[:, 'gene'])\n sns.scatterplot(df_embedding.iloc[:, 0], df_embedding.iloc[:, 1], hue=df_embedding.loc[:, 'velocity'])\n\n plt.tight_layout()", "title": "" }, { "docid": "5e9ff7618b9b23b7957bc734044b65aa", "score": "0.45131218", "text": "def print_xyz_path(beads, cell, filedesc=sys.stdout, cell_conv=1.0, atoms_conv=1.0):\n\n a, b, c, alpha, beta, gamma = mt.h2abc_deg(cell.h * cell_conv)\n\n fmt_header = (\n \"%d\\n# bead: %d CELL(abcABC): %10.5f %10.5f %10.5f %10.5f %10.5f %10.5f \\n\"\n )\n natoms = beads.natoms\n nbeads = beads.nbeads\n for j in range(nbeads):\n filedesc.write(fmt_header % (natoms, j, a, b, c, alpha, beta, gamma))\n for i in range(natoms):\n qs = dstrip(beads.q) * atoms_conv\n lab = dstrip(beads.names)\n filedesc.write(\n \"%8s %12.5e %12.5e %12.5e\\n\"\n % (lab[i], qs[j][3 * i], qs[j][3 * i + 1], qs[j][3 * i + 2])\n )", "title": "" }, { "docid": "7a6536e91d18bd7d27b0cf942ad0dc41", "score": "0.45129418", "text": "def displ_sin_ax(n_cycle=1, ampl=0.5, phase=0.0):\n x = (np.arange(N_AX, dtype=float) / N_AX + phase) * 2 * np.pi * n_cycle\n x = np.repeat(x, N_AZ).reshape(-1, N_AZ)\n out = Displ()\n out.vals = np.sin(x) * ampl / n_cycle\n out.title = 'Displ-X (ampl={:.2f} n_cycle={:.1f} phase={})'.format(ampl, n_cycle, phase)\n return out", "title": "" }, { "docid": "8db457c0e6128b3c0a1d3457d6ad1eca", "score": "0.45116678", "text": "def showLSystemCurve(filename, n, origin, heading, length, angle):\n \n # NOT IMPLEMENTED...\n pass", "title": "" }, { "docid": "28848fec275a8a3aa52d5520bc575e05", "score": "0.45012307", "text": "def show_tracks(cd):\n try:\n print('\\n\\n')\n print('{:=^66}'.format(' Current CD '))\n print('{:<10}{:^30}{:^30}'.format('CD ID', 'Title','Artist'))\n print('{:-^66}'.format('-'))\n print(cd)\n print('{:-^66}'.format('-'))\n print()\n print('{:=^66}'.format(' Track Listing '))\n print('{:<10}{:^30}{:^30}'.format('Track ID', 'Track Title','Time'))\n print('{:-^66}'.format('-'))\n print(cd.get_tracks())\n except: print('No tracks for this CD')", "title": "" }, { "docid": "832926472284ad184a3068ea08af6b3d", "score": "0.44927293", "text": "def ivan_tomo_input(periods_dict,outdir,output_periods,df):\n # Ray files\n raydir=os.path.join(outdir,'rays')\n create_path(raydir)\n p_fid=open(os.path.join(outdir,'periods.dat'),'wa')\n n=01\n for per in sorted(output_periods) :\n print \"Period %f no of measurements %i \" % (per,len(periods_dict[per]))\n if len(periods_dict[per])<1 :\n continue\n fid=open(os.path.join(raydir,\"rays\"+str(\"%02.f\" % n)+'.dat'),'wa')\n for l in periods_dict[per] :\n outline=\" \".join(np.array([l[2],l[1],l[3],l[5],l[4],l[6],l[0]],dtype=np.str))+\"\\n\"\n out=[l[2],l[1],l[3],l[5],l[4],l[6],l[0]]\n outline=\"%8.4f %8.4f %6.1f %8.4f %8.4f %6.1f %8f\\n\" % tuple(out)\n fid.write(outline)\n fid.close()\n n+=1\n p_fid.write(str(per)+\"\\n\")\n p_fid.close()\n # period/station files\n stadict={}\n for l in df[['station','lat_1','lon_1','el_1']].values :\n stadict[l[0].split(\"-\")[0]]=l[1:]\n for l in df[['station','lat_2','lon_2','el_2']].values :\n stadict[l[0].split(\"-\")[1]]=l[1:]\n sta_fid=open(os.path.join(outdir,\"stations.dat\"),'wa')\n arr=np.array(stadict.values())\n for ol in arr[:,(1,0,2)]:\n sta_fid.write(\"%8.4f %8.4f %6.1f\" % tuple(ol)+\"\\n\")\n sta_fid.close()\n return", "title": "" }, { "docid": "2fe3059b904c75600ba2b682410e32c1", "score": "0.44830543", "text": "def Mobs_show_stats(self):\n my_mobs_list = self.Mobs_list_make()\n print(\"Inside this entities.dat file, there is : \")\n \n for self.mobs_type in mobs_all_dict:\n mobs_total_number = 0\n \n for each_mobs in my_mobs_list:\n if self.mobs_type == each_mobs[2]:\n mobs_total_number += 1\n # print str(self.mobs_type) + ' = ' + str(mobs_total_number)\n print(('{:>9} = {:<3}' .format(str(self.mobs_type), str(mobs_total_number))))", "title": "" }, { "docid": "487013020d217fabeb12c607e4de4ca0", "score": "0.4479976", "text": "def print_output(self):\n if self.select_wlcg_output:\n self.print_wlcg_output()\n else:\n self.print_short_output()", "title": "" }, { "docid": "67780328dbe4f7c22c4b602bcdf4bd99", "score": "0.44793546", "text": "def view_structures(structures,w=200,h=200,columns=4,representation='ball_stick',labelsize=12,\n labels=False, labelinds=None, vector=None, sphere_scale=0.3,stick_scale=0.25,\n metal_scale=0.75):\n mols = type_convert(structures)\n if len(mols) == 1:\n view_ats = py3Dmol.view(width=w,height=h)\n mol = mols[0]\n if isinstance(labels,str):\n label = labels\n elif isinstance(labels,list):\n label = labels[0]\n elif isinstance(labels,bool):\n if labels:\n label = mol.ase_atoms.get_chemical_formula()\n else:\n label = False\n metal_ind = [i for i,x in enumerate(mol.ase_atoms) if (x.symbol in io_ptable.all_metals)]\n if len(metal_ind) > 0 : # Take advantage of empty list\n label_posits = mol.ase_atoms.positions[metal_ind].flatten()\n else:\n label_posits = mol.ase_atoms.get_center_of_mass().flatten() # Put it at the geometric center of the molecule.\n coords = mol.write_mol2('tmp.mol2', writestring=True)\n if representation == 'ball_stick':\n view_ats.addModel(coords.replace('un','1'),'mol2') # Add the molecule\n view_ats.addStyle({'sphere':{'colorscheme':'Jmol','scale':sphere_scale}}) \n msyms = [mol.ase_atoms.get_chemical_symbols()[x] for x in metal_ind]\n for ms in set(msyms):\n view_ats.setStyle({'elem':ms},{'sphere':{'colorscheme':'Jmol','scale':metal_scale}})\n view_ats.addStyle({'stick':{'colorscheme':'Jmol','radius':stick_scale}}) \n if label:\n view_ats.addLabel(\"{}\".format(label), {'position':{'x':'{}'.format(label_posits[0]),\n 'y':'{}'.format(label_posits[1]),'z':'{}'.format(label_posits[2])},\n 'backgroundColor':\"'black'\",'backgroundOpacity':'0.3',\n 'fontOpacity':'1', 'fontSize':'{}'.format(labelsize),\n 'fontColor':\"white\",'inFront':'true'})\n else:\n view_ats.addModel(coords.replace('un','1'),'mol2') # Add the molecule\n if representation == 'stick':\n view_ats.setStyle({representation:{'colorscheme':'Jmol','radius':stick_scale}})\n elif representation == 'sphere':\n view_ats.setStyle({representation:{'colorscheme':'Jmol','scale':sphere_scale}})\n else:\n view_ats.setStyle({representation:{'colorscheme':'Jmol'}})\n if label:\n view_ats.addLabel(\"{}\".format(label), {'position':{'x':'{}'.format(label_posits[0]),\n 'y':'{}'.format(label_posits[1]),'z':'{}'.format(label_posits[2])},\n 'backgroundColor':\"'black'\",'backgroundOpacity':'0.3',\n 'fontOpacity':'1', 'fontSize':'{}'.format(labelsize),\n 'fontColor':\"white\",'inFront':'true'})\n if labelinds is not None:\n if isinstance(labelinds,list):\n inds = labelinds\n else:\n inds = [x for x in range(len(mol.ase_atoms))]\n for p,i in enumerate(inds):\n atom_posit = mol.ase_atoms.positions[p]\n if i is not None:\n view_ats.addLabel(\"{}\".format(i), {'position':{'x':'{}'.format(atom_posit[0]),\n 'y':'{}'.format(atom_posit[1]),'z':'{}'.format(atom_posit[2])},\n 'backgroundColor':\"'black'\",'backgroundOpacity':'0.4',\n 'fontOpacity':'1', 'fontSize':'{}'.format(labelsize),\n 'fontColor':\"white\",'inFront':'true'})\n if vector:\n view_ats.addArrow(vector)\n view_ats.zoomTo()\n view_ats.show()\n elif len(mols) < 50:\n rows = int(m.ceil(float(len(mols))/columns))\n w = w*columns\n h = h*rows \n # Initialize Layout\n view_ats = py3Dmol.view(width=w,height=h,linked=False,viewergrid=(rows,columns))\n # Check for labels and populate\n if isinstance(labels,bool):\n if labels:\n label = [x.ase_atoms.get_chemical_formula() for x in mols]\n else:\n label = []\n elif isinstance(labels,list) or isinstance(labels,np.ndarray):\n if (len(labels) != len(mols)):\n print('Wrong amount of labels passed, defaulting to chemical formulas.')\n label = [x.ase_atoms.get_chemical_formula() for x in mols]\n else: # Force them all to be strings. \n label = [str(x) for x in labels]\n else:\n raise ValueError('What sort of labels are wanting? Not recognized.')\n x,y = 0,0 # Subframe position\n for k,mol in enumerate(mols):\n metal_inds = [i for i,x in enumerate(mol.ase_atoms) if (x.symbol in io_ptable.all_metals)]\n if len(metal_inds) > 0 : # Take advantage of empty list\n label_posits = mol.ase_atoms.positions[metal_inds[0]].flatten()\n else:\n label_posits = mol.ase_atoms.get_center_of_mass().flatten() # Put it at the geometric center of the molecule.\n coords = mol.write_mol2('tmp.mol2', writestring=True)\n if representation == 'ball_stick':\n view_ats.addModel(coords.replace('un','1'),'mol2',viewer=(x,y)) # Add the molecule\n view_ats.addStyle({'sphere':{'colorscheme':'Jmol','scale':sphere_scale}},viewer=(x,y)) \n msyms = [mol.ase_atoms.get_chemical_symbols()[x] for x in metal_inds]\n for ms in set(msyms):\n view_ats.setStyle({'elem':ms},{'sphere':{'colorscheme':'Jmol','scale':metal_scale}},viewer=(x,y))\n view_ats.addStyle({'stick':{'colorscheme':'Jmol','radius':stick_scale}},viewer=(x,y)) \n if len(label) > 0:\n view_ats.addLabel(\"{}\".format(label[k]), {'position':{'x':'{}'.format(label_posits[0]),\n 'y':'{}'.format(label_posits[1]),'z':'{}'.format(label_posits[2])},\n 'backgroundColor':\"'black'\",'backgroundOpacity':'0.5',\n 'fontOpacity':'1','fontSize':'{}'.format(labelsize),\n 'fontColor':\"white\",'inFront':'true',}, viewer=(x,y))\n if labelinds is not None:\n if isinstance(labelinds,list):\n inds = labelinds[k]\n else:\n inds = [x for x in range(len(mol.ase_atoms))]\n for p,j in enumerate(inds):\n atom_posit = mol.ase_atoms.positions[p]\n if j is not None:\n view_ats.addLabel(\"{}\".format(j), {'position':{'x':'{}'.format(atom_posit[0]),\n 'y':'{}'.format(atom_posit[1]),'z':'{}'.format(atom_posit[2])},\n 'backgroundColor':\"'black'\",'backgroundOpacity':'0.4',\n 'fontOpacity':'1', 'fontSize':'{}'.format(int(labelsize)),\n 'fontColor':\"white\", 'inFront':'true'}, viewer=(x,y))\n else:\n view_ats.addModel(coords.replace('un','1'),'mol2',viewer=(x,y))\n if representation == 'stick':\n view_ats.setStyle({representation:{'colorscheme':'Jmol','radius':stick_scale}},viewer=(x,y))\n elif representation == 'sphere':\n view_ats.setStyle({representation:{'colorscheme':'Jmol','scale':sphere_scale}},viewer=(x,y))\n else:\n view_ats.setStyle({representation:{'colorscheme':'Jmol'}},viewer=(x,y))\n if len(label) > 0:\n view_ats.addLabel(\"{}\".format(label[k]), {'position':{'x':'{}'.format(label_posits[0]),\n 'y':'{}'.format(label_posits[1]),'z':'{}'.format(label_posits[2])},\n 'backgroundColor':\"'black'\",'backgroundOpacity':'0.5',\n 'fontOpacity':'1','fontSize':'{}'.format(labelsize),\n 'fontColor':\"white\",'inFront':'true',}, viewer=(x,y))\n if labelinds is not None:\n if isinstance(labelinds,list):\n inds = labelinds[k]\n else:\n inds = [x for x in range(len(mol.ase_atoms))]\n for p,j in enumerate(inds):\n atom_posit = mol.ase_atoms.positions[p]\n if j is not None:\n view_ats.addLabel(\"{}\".format(j), {'position':{'x':'{}'.format(atom_posit[0]),\n 'y':'{}'.format(atom_posit[1]),'z':'{}'.format(atom_posit[2])},\n 'backgroundColor':\"'black'\",'backgroundOpacity':'0.4',\n 'fontOpacity':'1', 'fontSize':'{}'.format(int(labelsize)),\n 'fontColor':\"white\", 'inFront':'true'}, viewer=(x,y))\n if vector:\n view_ats.addArrow(vector)\n view_ats.zoomTo(viewer=(x,y))\n if y+1 < columns: # Fill in columns\n y+=1\n else:\n x+=1\n y=0\n view_ats.show()\n else: \n raise ValueError('Warning. Passing this many structures WILL cause your kernel to crash.')", "title": "" }, { "docid": "77b51ef33fa0696cd5511d4aeb76a8fc", "score": "0.44627443", "text": "def test_views():\n _set_backend('test')\n brain = Brain(*std_args)\n brain.show_view('lateral')\n brain.show_view('m')\n brain.show_view('rostral')\n brain.show_view('caudal')\n brain.show_view('ve')\n brain.show_view('frontal')\n brain.show_view('par')\n brain.show_view('dor')\n brain.show_view({'distance': 432})\n brain.show_view({'azimuth': 135, 'elevation': 79}, roll=107)\n brain.close()", "title": "" }, { "docid": "31e144fe7a01b6f26401a75c99c6a662", "score": "0.44528908", "text": "def show_molobj(molobj, align_conformers=True):\n\n if align_conformers:\n rdMolAlign.AlignMolConformers(molobj)\n\n n_conformers = molobj.GetNumConformers()\n assert n_conformers > 0\n\n view = nglview.show_rdkit(molobj)\n\n def _view_conformer(idx):\n coord = chembridge.get_coordinates(molobj, confid=idx)\n view.set_coordinates({0: coord})\n\n print(f\"Conformer {idx} / {n_conformers - 1}\")\n\n interact(\n _view_conformer,\n idx=ipywidgets.IntSlider(min=0, max=n_conformers - 1, step=1),\n layout=Layout(width=\"100%\", height=\"80px\"),\n )\n\n IPython.core.display.display(view)", "title": "" }, { "docid": "9194276e91a622eb22a3b0e03ea7ebe6", "score": "0.44504872", "text": "def allSaccLats(dm, direction, figTitle = True, trim = True, cousineau = True, nBin = 15, usePm = True, err = 'se'):\n\tfigList = []\n\tprint '\\n\\tdirection = %s \\n' % direction\n\t\n\tif direction == \"ToHandle\":\n\t\tstart_dm = dm.select(\"contrast_side == 'control'\")\n\tif direction == \"ToContrast\":\n\t\tstart_dm = dm.select(\"contrast_side != 'control'\")\n\t\n\texp = start_dm['exp'][0]\n\t\n\tlDm = []\n\t\n\tif exp == \"004A\":\n\t\t# Max sacc count = 4\n\t\tlSacc = range(1,5)\n\tif exp == \"004B\":\n\t\t# Max sacc count = 3\n\t\tlSacc = range(1,4)\n\t\t\n\tfor saccCount in lSacc:\n\t\t\n\t\t# Create dms containing all latencies (i.e., the to-be-binned variable) \n\t\t# of a given saccade count, e.g.\n\t\t# almost all trials for the first saccade, and less and less for the\n\t\t# subsequent saccades.\n\t\t# Do the same for the landing positions (dv's on the y-axis):\n\t\t\n\t\t# Select one saccade:\n\t\tdm_sacc = dm.select(\"saccLat%s != ''\" % str(saccCount))\n\t\t\n\t\t# Create new column containing sacc count:\n\t\tdm_sacc = dm_sacc.addField(\"saccNr\", dtype = str)\n\t\tdm_sacc[\"saccNr\"] = \"sacc%s\" % saccCount\n\t\t\n\t\t# Create new column containing IV and DV's, but without the\n\t\t# sacc count in the column header:\n\t\tdm_sacc = dm_sacc.addField(\"absSaccLat\", dtype = float)\n\t\tdm_sacc[\"absSaccLat\"] = dm_sacc[\"saccLat%s\" % str(saccCount)]\n\t\t\n\t\tdm_sacc = dm_sacc.addField(\"abs%s\" % direction, dtype = float)\n\t\tdm_sacc[\"abs%s\" % direction] = dm_sacc[\"endX%sNorm%s\"%(str(saccCount), direction)]\n\t\t\n\t\tdm_sacc = dm_sacc.addField(\"absCorr%s\" % direction, dtype = float)\n\t\tdm_sacc[\"absCorr%s\" % direction] = dm_sacc[\"endX%sCorrNorm%s\"%(str(saccCount), direction)]\n\t\t\n\t\t# Add the dm to the list of dm's\n\t\tlDm.append(dm_sacc)\n\n\t# Combine all dm's into one big dm:\n\tmerged_dm = lDm[0]\n\t\n\tfor dm in lDm[1:]:\n\t\tmerged_dm = merged_dm + dm\n\t\n\t\tcorrList = [\"corrected\", \"uncorrected\"]\n\t\n\tfor corr in corrList:\n\t\n\t\tfig = plt.figure(figsize = (3,7))\n\t\ttitle = \"Landing (%s) %s as a function of ALL binned sacc lats exp %s cousineau = %s trim = %s usePm = %s\" \\\n\t\t\t% (corr, direction, exp, cousineau, trim, usePm)\n\t\t\t\n\t\tif figTitle:\n\t\t\tplt.title(title)\n\t\t\t\n\t\tif corr == \"uncorrected\":\n\t\t\tdv = \"abs%s\" % direction\n\t\telif corr == \"corrected\":\n\t\t\tdv = \"absCorr%s\" % direction\n\t\t\t\n\t\tprint \"\\tDV = %s\\n\" % dv\n\t\t\n\t\t# There should be no ''s in the dm anymore, but off-object saccades are still\n\t\t# possible, so this filtering remains necessary:\n\t\tdv_dm = onObject(merged_dm,dv)\n\t\t\t\n\t\tsaccLat = \"absSaccLat\"\n\n\t\t# Trim the data matrix such that the most extreme latencies are excluded:\n\t\tprint \"\\n\\ttrim = %s\\n\" % trim\n\t\tif trim:\n\t\t\ttrimmed_dm = dv_dm.selectByStdDev(keys = [\"file\"], dv = saccLat)\n\t\telse:\n\t\t\ttrimmed_dm = dv_dm\n\t\t\n\t\t# Withinize sacc latencies, if wanted:\n\t\tprint \"\\n\\tcousineau = %s\\n\" % cousineau\n\t\tif cousineau:\n\t\t\t\n\t\t\t_dm = trimmed_dm.addField(\"cousineau_%s\"%saccLat, dtype = None)\n\t\t\t_dm = _dm.withinize(saccLat, \"cousineau_%s\"%saccLat, \"file\", verbose = True, whiten=False)\n\t\t\tsaccLat = \"cousineau_%s\"%saccLat\n\t\telse:\n\t\t\t_dm = dv_dm\n\t\t\t\t\n\t\t# Make bins, only for the first dv (for the second dv, the binned variable is the same and\n\t\t# therefore already exists:\n\t\tvarToBin = saccLat\n\t\tbinnedVar = \"binnend%s\" % varToBin\n\t\tbinned_dm = _dm.addField(binnedVar)\n\t\tbinned_dm = binned_dm.calcPerc(varToBin, binnedVar ,keys = [\"file\"], nBin = nBin)\n\t\t\n\t\tif not usePm:\n\t\t\t\n\t\t\tlX = []\n\t\t\tlY = []\n\t\t\t\n\t\t\tfor _bin in binned_dm.unique(binnedVar): \n\t\t\t\n\t\t\t# Filter out all but one bin\n\t\t\t\tdm_one_bin = binned_dm.select('%s == %f' % (binnedVar, _bin))\n\t\t\n\t\t\t\t# Get the mean sacc lat and the mean landing position (for x and y axis, respectively):\n\t\t\t\t# NOTE: withinising doesn't make any real difference for the overall pattern. \n\t\t\n\t\t\t\tyMean = dm_one_bin[dv].mean()\n\t\t\t\txMean = dm_one_bin[varToBin].mean()\n\t\t\t\t\n\t\t\t\tlX.append(xMean)\n\t\t\t\tlY.append(yMean)\n\t\t\t\n\t\t\t\n\t\t\tplt.plot(lX, lY, color = \"#3465a4\", marker = 'o')\n\t\t\tplt.xlabel(\"binned saccade latencies from stimulus onset\")\n\t\t\tplt.ylabel(\"landing position (%s) %s\" % (corr,direction))\n\t\t\tplt.ylim(-.2, .2)\n\t\t\tplt.axhline(0, color = \"#555753\", linestyle = \"--\", linewidth = 2)\n\t\t\tplt.savefig(os.path.join(dst,'%s.png' % title))\n\t\t\n\t\tif usePm:\n\t\t\t\n\t\t\tpm = PivotMatrix(binned_dm, binnedVar, \"file\", dv, colsWithin = True, err = 'se')\n\t\t\t#pm.plot(nLvl1=1, fig = fig, xLabel = \"binned saccade latencies from stimulus onset\", \\\n\t\t\t#\tyLabel = \"landing position (%s) %s\" % (corr,direction))\n\t\t\t\n\t\t\tpm.barPlot(fig = fig, xLabel = \"binned saccade latencies from stimulus onset\", \\\n\t\t\t\tyLabel = \"landing position (%s) %s\" % (corr,direction))\n\t\t\t\n\t\t\tplt.ylim(-.2, .2)\n\t\t\tplt.axhline(0, color = \"#555753\", linestyle = \"--\", linewidth = 2)\n\t\t\tplt.savefig(os.path.join(dst,'%s.png' % title))\n\t\t\t\n\t\t\tplt.savefig(os.path.join(dst,'%s.png' % title))\n\t\t\t\n\t\t\n\t\tfigList.append(fig)\n\n\treturn figList", "title": "" }, { "docid": "9945bf144c24044c3a43280327067ecd", "score": "0.44479397", "text": "def show_ind_plot(u): \n # plotting the trajectory, let's get h \n h = u[:,0]\n \n #Finding where the height becomes negative\n idx_negative = np.where(h<0.0)[0]\n if len(idx_negative)==0:\n idx_ground = N-1\n print ('Euler integration has not touched ground yet!')\n else:\n idx_ground = idx_negative[0] \n \n #visualization of the path\n pyplot.figure(figsize = (8,6))\n pyplot.grid(True)\n pyplot.xlabel(r't', fontsize = 18)\n pyplot.ylabel(r'h', fontsize = 18)\n pyplot.title('Flight time = %.2f' % T, fontsize=18)\n pyplot.plot(t[:idx_ground],h[:idx_ground],'k-', lw=2)\n print 'max height =' + str(np.amax(h))\n print u[idx_ground,1]\n pyplot.show()", "title": "" }, { "docid": "64f5240c1abcfe876e679a12ff35c44f", "score": "0.4444141", "text": "def display_monsters(monsters_conjured: dict) -> None:\n for monster in monsters_conjured:\n print(\"{} x {}\".format(monster[0],monsters_conjured[monster]))\n cr = get_monster_cr(monster)\n print(\"\\tCR {}, {}, {}\\n\".format(cr,monster[1],monster[2]))", "title": "" }, { "docid": "048b9682daa81c754774b24572693547", "score": "0.44427046", "text": "def show(self):\n\t\tprint 'Bounds:',\n\t\tfor b in self.bounds:\n\t\t\tprint '[',b[0],',',b[1],']',\n\t\tprint 'vol:',self.volume()", "title": "" }, { "docid": "e3c4bae181917bb45312b4731ba0c020", "score": "0.44408023", "text": "def plot_platesn(options,args):\n sf= segueSelect.segueSelect(sn=True,sample='G') #Unimportant\n bovy_plot.bovy_print()\n bovy_plot.bovy_plot(sf.platestr.platesn_r,sf.platestr.sn1_1,'gv',\n xrange=[0.,300.],yrange=[0.,300],\n xlabel=r'$\\mathrm{plateSN\\_r} \\equiv (\\mathrm{sn1\\_1} + \\mathrm{sn2\\_1})/2$',\n ylabel=r'$\\mathrm{snX\\_Y}$')\n bovy_plot.bovy_plot(sf.platestr.platesn_r,sf.platestr.sn2_1,'y^',\n overplot=True)\n bovy_plot.bovy_plot(sf.platestr.platesn_r,sf.platestr.sn1_0,'bs',\n overplot=True)\n bovy_plot.bovy_plot(sf.platestr.platesn_r,sf.platestr.sn2_0,'cp',\n overplot=True)\n bovy_plot.bovy_plot(sf.platestr.platesn_r,sf.platestr.sn1_2,'rh',\n overplot=True)\n bovy_plot.bovy_plot(sf.platestr.platesn_r,sf.platestr.sn2_2,'mH',\n overplot=True)\n bovy_plot.bovy_text(25.,280,r'$\\mathrm{sn1\\_1}:\\ r\\ \\mathrm{band}$',color='g',size=14.)\n bovy_plot.bovy_plot([15.],[285.],'gv',overplot=True)\n bovy_plot.bovy_text(25.,265,r'$\\mathrm{sn2\\_1}:\\ r\\ \\mathrm{band}$',color='y',size=14.)\n bovy_plot.bovy_plot([15.],[270.],'y^',overplot=True)\n bovy_plot.bovy_text(25.,250,r'$\\mathrm{sn1\\_0}:\\ g\\ \\mathrm{band}$',color='b',size=14.)\n bovy_plot.bovy_plot([15.],[255.],'bs',overplot=True)\n bovy_plot.bovy_text(25.,235,r'$\\mathrm{sn2\\_0}:\\ g\\ \\mathrm{band}$',color='c',size=14.)\n bovy_plot.bovy_plot([15.],[240.],'cp',overplot=True)\n bovy_plot.bovy_text(25.,220,r'$\\mathrm{sn1\\_2}:\\ i\\ \\mathrm{band}$',color='r',size=14.)\n bovy_plot.bovy_plot([15.],[225.],'rh',overplot=True)\n bovy_plot.bovy_text(25.,205,r'$\\mathrm{sn2\\_2}:\\ i\\ \\mathrm{band}$',color='m',size=14.)\n bovy_plot.bovy_plot([15.],[210.],'mH',overplot=True)\n bovy_plot.bovy_end_print(options.plotfile)", "title": "" }, { "docid": "b67ad0f7e7dcead68dc7a7342f8c04c0", "score": "0.44405717", "text": "def display_stats(position_nmea: sentences.NMEA, gain_adc: sentences.ADC, bearing_list) -> None:\n print('GPS\\n===\\nFix: {fix}, Lat: {lat}, Lon: {lon}'\n .format(fix=position_nmea.is_fixed(),\n lat=position_nmea.get_lat(),\n lon=position_nmea.get_lon()))\n print(position_nmea.unparsed)\n print(\"===\\nCurrent Gain: {gain:7.2f}\".format(gain=gain_adc.get_gain()))\n print(\"===\\nCurrent Bearing: {bear:7.2f}\".format(bear=bearing_list.get_bearing()))", "title": "" }, { "docid": "8096b9e55f0c7ee7f49d3dd49a99615f", "score": "0.4431028", "text": "def plot_moc_vs_oht(config, trans, basename='', name='simulated', obs_vol=None, obs_oht=None):\n \n # Add model data to axis (model v)\n fig = plt.figure(figsize=(15,5))\n fig.add_subplot(1,3,1)\n\n dts = utils.get_ncdates(config, trans)\n moc_model = trans.variables['moc_model'][:]\n q_sum_model = trans.variables['q_sum_model'][:]\n q_gyre_model = trans.variables['q_gyre_model'][:]\n q_ot_model = trans.variables['q_ot_model'][:]\n \n q_sum_model_lin, q_sum_model_label = linreg(moc_model, q_sum_model)\n q_gyre_model_lin, q_gyre_model_label = linreg(moc_model, q_gyre_model)\n q_ot_model_lin, q_ot_model_label = linreg(moc_model, q_ot_model)\n \n plt.plot(moc_model, q_sum_model,'x', color='k', label='total %s' % q_sum_model_label)\n plt.plot(moc_model, q_ot_model,'x', color=c1, label='overturning %s' % q_ot_model_label)\n plt.plot(moc_model, q_gyre_model,'x', color=c2, label='gyre %s' % q_gyre_model_label)\n\n if q_sum_model_lin is not None:\n plt.plot(moc_model, q_sum_model_lin,'-', color='k')\n plt.plot(moc_model, q_ot_model_lin,'-', color=c1)\n plt.plot(moc_model, q_gyre_model_lin,'-', color=c2)\n \n plt.xlabel('MOC (Sv)')\n plt.ylabel('Heat transport (PW)')\n plt.title('MOC vs OHT in %s (model velocities)' % name)\n plt.legend(loc='best', fontsize=8, )\n\n\n # Add model data to axis (RAPID approx)\n fig.add_subplot(1,3,2)\n \n moc_rapid = trans.variables['moc_rapid'][:]\n q_sum_rapid = trans.variables['q_sum_rapid'][:]\n q_gyre_rapid = trans.variables['q_gyre_rapid'][:]\n q_ot_rapid = trans.variables['q_ot_rapid'][:]\n\n q_sum_rapid_lin, q_sum_rapid_label = linreg(moc_rapid, q_sum_rapid)\n q_gyre_rapid_lin, q_gyre_rapid_label = linreg(moc_rapid, q_gyre_rapid)\n q_ot_rapid_lin, q_ot_rapid_label = linreg(moc_rapid, q_ot_rapid)\n\n plt.plot(moc_rapid, q_sum_rapid,'x', color='k', label='total %s' % q_sum_rapid_label)\n plt.plot(moc_rapid, q_ot_rapid,'x', color=c1, label='overturning %s' % q_ot_rapid_label)\n plt.plot(moc_rapid, q_gyre_rapid,'x', color=c2, label='gyre %s' % q_gyre_rapid_label)\n\n if q_sum_rapid_lin is not None:\n plt.plot(moc_rapid, q_sum_rapid_lin,'-', color='k')\n plt.plot(moc_rapid, q_ot_rapid_lin,'-', color=c1)\n plt.plot(moc_rapid, q_gyre_rapid_lin,'-', color=c2)\n\n plt.xlabel('MOC (Sv)')\n plt.ylabel('Heat transport (PW)')\n plt.title('MOC vs OHT in %s (RAPID approx)' % name)\n plt.legend(loc='best', fontsize=8, )\n\n\n # Add optional obs data to axis\n if (obs_oht is not None) and (obs_vol is not None):\n fig.add_subplot(1,3,3)\n\n mindt, maxdt = utils.get_daterange(obs_vol.dates, obs_oht.dates)\n vol_ind = utils.get_dateind(obs_vol.dates, mindt, maxdt)\n oht_ind = utils.get_dateind(obs_oht.dates, mindt, maxdt)\n q_sum_obs = obs_oht.q_sum[oht_ind]\n q_gyre_obs = obs_oht.q_gyre[oht_ind]\n q_ot_obs = obs_oht.q_ot[oht_ind]\n moc_obs = obs_vol.moc[vol_ind]\n \n q_sum_obs_lin, q_sum_obs_label = linreg(moc_obs, q_sum_obs)\n q_gyre_obs_lin, q_gyre_obs_label = linreg(moc_obs, q_gyre_obs)\n q_ot_obs_lin, q_ot_obs_label = linreg(moc_obs, q_ot_obs)\n \n plt.plot(moc_obs, q_sum_obs,'x', color='k', label='total %s' % q_sum_obs_label)\n plt.plot(moc_obs, q_ot_obs,'x', color=c1, label='overturning %s' % q_ot_obs_label)\n plt.plot(moc_obs, q_gyre_obs,'x', color=c2, label='gyre %s' % q_gyre_obs_label)\n \n if q_sum_obs_lin is not None:\n plt.plot(moc_obs, q_sum_obs_lin,'-', color='k') \n plt.plot(moc_obs, q_ot_obs_lin,'-', color=c1)\n plt.plot(moc_obs, q_gyre_obs_lin,'-', color=c2)\n\n plt.xlabel('MOC (Sv)')\n plt.ylabel('Heat transport (PW)')\n plt.title('MOC vs OHT in RAPID observations')\n plt.legend(loc='best', fontsize=8, )\n\n # Save plot\n plt.tight_layout()\n savef = basename + 'moc_vs_heat_transports_at_26n.png'\n print('SAVING: {}'.format(savef))\n fig.savefig(savef, dpi=300)\n plt.close()", "title": "" }, { "docid": "bde4682bb74d1ae1df34597f680da5c5", "score": "0.44248322", "text": "def displ_sin_az(n_cycle=1, ampl=0.5, phase=0.0):\n x = (np.arange(N_AZ, dtype=float) / N_AZ + phase) * 2 * np.pi * n_cycle\n x = np.repeat(x, N_AX).reshape(-1, N_AX).transpose()\n out = Displ()\n out.vals = np.sin(x) * ampl / n_cycle\n out.title = 'Displ-X (ampl={:.2f} n_cycle={:.1f} phase={})'.format(ampl, n_cycle, phase)\n return np.sin(x) * ampl / n_cycle", "title": "" }, { "docid": "0c6a6959c6c40e94603077ef1e3cf4bb", "score": "0.44213915", "text": "def displayLSystem(title, axiom, rules):\n print(title)\n print('-'*len(title))\n print('Axiom:')\n print('\\t'+axiom)\n print(' ')\n print('Rules:')\n # get rules\n for key in rules:\n print('\\t' + key, ' -> ', rules[key])", "title": "" }, { "docid": "34a8c8c7d4a5d9831480e2f91b3907da", "score": "0.44200662", "text": "def do_show(self, args):\n show_cli = Show()\n show_cli.cmdloop()", "title": "" }, { "docid": "d78bfee21e56a35f2513fbcfaef00bd3", "score": "0.44183874", "text": "def show_magicians(magicians):\r\n for magician in magicians:\r\n print(magician)", "title": "" }, { "docid": "c542cc90d66ad8fd0f583346f159a162", "score": "0.4416054", "text": "def show(self):\n for j in self.laby:\n t = []\n for i in j:\n if i.wall:\n t.append(\"■\")\n elif i.hero:\n t.append(\"●\")\n elif i.end:\n t.append(\"▼\")\n else:\n t.append(\"□\")\n print(\"\".join(t))", "title": "" }, { "docid": "44f2d1d997235c8738c4144415279034", "score": "0.44112453", "text": "def display_map(self, map, title='', info='', x=None, y=None, xoff=0, yoff=0,\n det=None, subtitles=None, xrmfile=None, with_savepos=True):\n\n if xrmfile is None:\n hotcols = False\n else:\n hotcols = xrmfile.hotcols\n\n if x is not None:\n zigzag = abs(xrmfile.zigzag)\n if zigzag != 0:\n x = x[zigzag:-zigzag]\n elif hotcols and map.shape[1] != x.shape[0]:\n x = x[1:-1]\n\n\n dopts = dict(title=title, x=x, y=y, xoff=xoff, yoff=yoff,\n det=det, subtitles=subtitles,\n xrmfile=xrmfile, with_savepos=with_savepos)\n displayed = False\n while not displayed:\n if len(self.im_displays) == 0:\n imd = self.add_imdisplay(title=title, det=det)\n imd.display(map, contrast_level=0.5, **dopts)\n else:\n try:\n imd = self.im_displays[-1]\n if imd.panel.conf.contrast_level in (0, None):\n dopts['contrast_level'] = 0.5\n imd.display(map, **dopts)\n displayed = True\n except PyDeadObjectError:\n self.im_displays.pop()\n except IndexError:\n pass\n imd.SetStatusText(info, 1)\n imd.Show()\n imd.Raise()", "title": "" }, { "docid": "e8236fdb7cd6a48d1af925408d819fb4", "score": "0.44096968", "text": "def show(self, i=None, c=None):", "title": "" }, { "docid": "360529e3a8d35e0dc5cbac1dd9181a50", "score": "0.44085005", "text": "def setup_display(address, x, y, rotation):\n pass", "title": "" }, { "docid": "a4461d888a0dd495257259583caa7d6b", "score": "0.44016895", "text": "def print_world(self):\n self.output.show()", "title": "" }, { "docid": "4a6782eab3bd5698394ac92fdcb4a634", "score": "0.4397909", "text": "async def show_path(self, dest, res):\n await self.print(dest.info_str)\n prev = None\n for room in res:\n if prev is None:\n d = _(\"Start\")\n else:\n d = prev.exit_to(room).dir\n prev = room\n await self.print(f\"{d}: {room.idnn_str}\")", "title": "" }, { "docid": "759b4b1d060cc87a9ef20196e68adcb0", "score": "0.43927515", "text": "def show_magicians(magicians):\n for magician in magicians:\n print(magician)", "title": "" }, { "docid": "759b4b1d060cc87a9ef20196e68adcb0", "score": "0.43927515", "text": "def show_magicians(magicians):\n for magician in magicians:\n print(magician)", "title": "" }, { "docid": "759b4b1d060cc87a9ef20196e68adcb0", "score": "0.43927515", "text": "def show_magicians(magicians):\n for magician in magicians:\n print(magician)", "title": "" }, { "docid": "cda9d1792ea2b0cbc5858e39acb41c80", "score": "0.43875", "text": "def plot_swath(data, lons, lats):\n import pyresample as pr\n\n plt.figure(figsize=(20,10))\n\n swath_def = pr.geometry.SwathDefinition(lons=lons, lats=lats)\n area_def = pr.utils.load_area('./areas.cfg', 'istjenesten_main_4k')\n result = pr.kd_tree.resample_nearest(swath_def,\n data,\n area_def,\n radius_of_influence=10000,\n fill_value=None,\n nprocs=4)\n pr.plot.save_quicklook('out.png',\n area_def,\n result,\n vmin=0,\n coast_res = 'l',\n vmax=100)", "title": "" }, { "docid": "2245e41dd5b77ac38f0f1c69785eb084", "score": "0.43868408", "text": "def do_show(self, args):\n try:\n self._show(args=args, full_output=True)\n except Exception as error:\n print \"<ERROR>%s\" % error", "title": "" }, { "docid": "1a19aae7adcd7597bd76eb6ce68555f1", "score": "0.43854195", "text": "def tilmann_tomo_input(disp_dict,periods,outdir,df):\n print(\"Writing text files to %s\" % outdir)\n create_path(outdir)\n # clean disp_dict of entrys with no interp_i_periods\n for i in disp_dict.keys() :\n if len(disp_dict[i]['interp_periods']) == 0 :\n del disp_dict[i]\n\n # Sta dictionary\n stadict={}\n for l in df[['station','lat_1','lon_1','el_1']].values :\n stadict[l[0].split(\"-\")[0]]=l[1:]\n for l in df[['station','lat_2','lon_2','el_2']].values :\n stadict[l[0].split(\"-\")[1]]=l[1:]\n stalist=sorted(stadict.keys())\n\n # Print a station file:\n ofid=open(os.path.join(outdir,\"stations.lonlat\"),'wa')\n for s in stalist :\n lat,lon,el=stadict[s]\n ofid.write(\"%s %s %s %s\\n\" % (s,lon,lat,el))\n ofid.close()\n\n # Convert that station file to cartesian using the\n # gmt script, and awk out each column separately to a txt file\n data_array=np.zeros([len(disp_dict.keys()),len(periods)])\n station_ind=np.zeros([len(disp_dict.keys())],)\n receiver_ind=np.zeros([len(disp_dict.keys())],)\n distances=np.zeros([len(disp_dict.keys())],)\n\n for k,d in enumerate(sorted(disp_dict.keys())):\n pair_info=df[df['name']==disp_dict[d]['name']]\n disp_pers=disp_dict[d]['interp_periods']\n disp_times=disp_dict[d]['interp_times']\n # Add vel for each period to data array\n for j,p in enumerate(periods) :\n time=disp_times[disp_pers==p]\n if len(time)>0 :\n data_array[k,j]=time\n else :\n data_array[k,j]=np.nan\n\n # Add the station and receiver index (for sta x,y,names list)\n sta1,sta2=pair_info['station'].values[0].split('-')\n station_ind[k]=stalist.index(sta1)+1 # Matlab indices start at 1\n receiver_ind[k]=stalist.index(sta2)+1 # Matlab indices start at 1\n # Add the separation distance\n distances[k]=disp_dict[d]['dist'][0]\n # Write ascii files to later be read to matlab format by prepare_data.m\n np.savetxt(os.path.join(outdir,\"periods.txt\"),periods)\n np.savetxt(os.path.join(outdir,\"data.txt\"),data_array)\n np.savetxt(os.path.join(outdir,\"ri.txt\"),receiver_ind)\n np.savetxt(os.path.join(outdir,\"si.txt\"),station_ind)\n return", "title": "" }, { "docid": "99b1c8155e2ee2793e5a295fd9175d63", "score": "0.43851328", "text": "def get_beam_response(obsid,\n sources,\n dt=296,\n centeronly=True):\n observation = metadata.get_observation(obsid=obsid)\n if observation is None:\n logger.error('Unable to retrieve metadata for observation %d' % obsid)\n return None\n\n duration = observation['starttime'] - observation['stoptime']\n starttimes = numpy.arange(0, duration, dt)\n stoptimes = starttimes + dt\n stoptimes[stoptimes > duration] = duration\n Ntimes = len(starttimes)\n midtimes = obsid + 0.5 * (starttimes + stoptimes)\n logger.info('Will output for %d times from 0 to %ds after %d\\n' % (Ntimes, duration, obsid))\n\n channels = observation['rfstreams']['0']['frequencies']\n if not centeronly:\n PowersX = numpy.zeros((len(sources),\n Ntimes,\n len(channels)))\n PowersY = numpy.zeros((len(sources),\n Ntimes,\n len(channels)))\n # in Hz\n frequencies = numpy.array(channels) * 1.28e6\n else:\n PowersX = numpy.zeros((len(sources),\n Ntimes, 1))\n PowersY = numpy.zeros((len(sources),\n Ntimes, 1))\n frequencies = numpy.array([channels[12]]) * 1.28e6 # center channel\n RAs = numpy.array([x[0] for x in sources])\n Decs = numpy.array([x[1] for x in sources])\n if len(RAs) == 0:\n logger.error('Must supply >=1 source positions\\n')\n return None\n if not len(RAs) == len(Decs):\n logger.error('Must supply equal numbers of RAs and Decs\\n')\n return None\n\n obs_source = SkyCoord(ra=RAs,\n dec=Decs,\n equinox='J2000',\n unit=(astropy.units.deg, astropy.units.deg))\n obs_source.location = config.MWAPOS\n\n for itime in range(Ntimes):\n obs_source.obstime = Time(midtimes[itime], format='gps', scale='utc')\n obs_source_prec = obs_source.transform_to('altaz')\n Azs, Alts = obs_source_prec.az.deg, obs_source_prec.alt.deg\n\n # go from altitude to zenith angle\n theta = numpy.radians(90 - Alts)\n phi = numpy.radians(Azs)\n\n for ifreq in range(len(frequencies)):\n rX, rY = MWA_Tile_analytic(theta, phi,\n freq=frequencies[ifreq], delays=observation['rfstreams']['0']['delays'],\n zenithnorm=True,\n power=True)\n PowersX[:, itime, ifreq] = rX\n PowersY[:, itime, ifreq] = rY\n\n return observation, midtimes, PowersX, PowersY", "title": "" }, { "docid": "f1a88573c365673c8f25d4dd0cb55174", "score": "0.43851265", "text": "def demo():\n genome = [[[0], [1, 0], [0, 0, 1], [0, 1, 0, 0], [0, 0, 1, 0, 1], [0]],\n [[0], [0, 0], [0, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0, 0], [1]],\n [[0], [0, 1], [1, 1, 0], [0, 0, 1, 1], [1, 0, 0, 1, 0], [0]]]\n\n d = make_dot_genome(genome, title=\"Demo Genome\", filename=\"test\")\n d.view()\n\n # d = make_dot_phase(genome, 1)\n # d.view()", "title": "" }, { "docid": "886f12d78f76a2ede4b24a1658890c8a", "score": "0.43815368", "text": "def show_history(self, oid):\n query = [{'staff.%s.id' % p: oid} for p in POSITIONS]\n results = self.shows.find({'$or': query})\n shows = map(lambda s: s['titles']['english'], results)\n return self.reply(shows)", "title": "" }, { "docid": "ec95cd84e95017f9c0c73709c64d697b", "score": "0.4378997", "text": "def cmd_show(args):\n\tif not args:\n\t\terror(\"need guid argument\")\n\tguid = _HOOKS.be_complete_guid(args[0])\n\tif not guid:\n\t\treturn\n\tissue = _HOOKS.be_load_issue(guid)\n\tprint issue.longString()", "title": "" }, { "docid": "d126f5edbbc88fa9b4b51284d3600ba3", "score": "0.43763855", "text": "def DisplayMatrix(M):\n DisplayMath(MatrixToLatex(M))", "title": "" }, { "docid": "dce3692e8a7be08015f1e479ba5021b3", "score": "0.43705556", "text": "def make_obslog(path):\n cal_objects = ['bias', 'flat', 'dark', 'i2', 'thar']\n regular_names = ('Bias', 'Flat', 'ThAr', 'I2')\n\n # if the obsinfo file exists, read and pack the information\n addinfo_lst = {}\n obsinfo_file = 'obsinfo.txt'\n has_obsinfo = os.path.exists(obsinfo_file)\n if has_obsinfo:\n #io_registry.register_reader('obslog', Table, read_obslog)\n #addinfo_table = Table.read(obsinfo_file, format='obslog')\n addinfo_table = read_obslog(obsinfo_file)\n addinfo_lst = {row['frameid']:row for row in addinfo_table}\n # prepare the difference list between real observation time and FITS\n # time\n real_obsdate_lst = []\n delta_t_lst = []\n\n # scan the raw files\n fname_lst = sorted(os.listdir(path))\n\n # prepare logtable\n logtable = Table(dtype=[\n ('frameid', 'i2'), ('fileid', 'S12'), ('imgtype', 'S3'),\n ('object', 'S12'), ('i2cell', 'bool'), ('exptime', 'f4'),\n ('obsdate', Time), ('nsat', 'i4'), ('q95', 'i4'),\n ])\n\n # prepare infomation to print\n pinfo = FormattedInfo(all_columns,\n ['frameid', 'fileid', 'imgtype', 'object', 'i2cell', 'exptime',\n 'obsdate', 'nsat', 'q95'])\n\n # print header of logtable\n print(pinfo.get_separator())\n print(pinfo.get_title())\n #print(pinfo.get_dtype())\n print(pinfo.get_separator())\n\n prev_frameid = -1\n # start scanning the raw files\n for fname in fname_lst:\n if fname[-5:] != '.fits':\n continue\n fileid = fname[0:-5]\n filename = os.path.join(path, fname)\n data, head = fits.getdata(filename, header=True)\n\n # determine the science and overscan regions\n naxis1 = head['NAXIS1']\n naxis2 = head['NAXIS2']\n x1 = head.get('CRVAL1', 0)\n y1 = head.get('CRVAL2', 0)\n # get science region along x axis\n cover = head.get('COVER')\n if cover is None:\n if naxis1 >= 4096:\n cover = naxis1 - 4096\n # get science region along y axis\n rover = head.get('ROVER')\n if rover is None:\n if naxis2 >= 4136:\n rover = naxis2 - 4136\n\n # get start and end indices of science region\n y2 = y1 + naxis2 - rover\n x2 = x1 + naxis1 - cover\n data = data[y1:y2,x1:x2]\n\n # find frame-id\n frameid = int(fileid[8:])\n if frameid <= prev_frameid:\n print('Warning: frameid {} > prev_frameid {}'.format(\n frameid, prev_frameid))\n\n # parse obsdate\n if 'DATE-STA' in head:\n obsdate = Time(head['DATE-STA'])\n else:\n obsdate = Time(head['DATE-OBS'])\n if (frameid in addinfo_lst and 'obsdate' in addinfo_table.colnames\n and addinfo_lst[frameid]['obsdate'] is not np.ma.masked):\n real_obsdate = addinfo_lst[frameid]['obsdate'].datetime\n file_obsdate = obsdate.datetime\n delta_t = real_obsdate - file_obsdate\n real_obsdate_lst.append(real_obsdate)\n delta_t_lst.append(delta_t.total_seconds())\n\n if 'EXPTIME' in head:\n exptime = head['EXPTIME']\n else:\n exptime = head['EXPOSURE']\n\n # parse object name\n if 'OBJECT' in head:\n objectname = head['OBJECT'].strip()\n else:\n objectname = ''\n if (frameid in addinfo_lst and 'object' in addinfo_table.colnames\n and addinfo_lst[frameid]['object'] is not np.ma.masked):\n objectname = addinfo_lst[frameid]['object']\n # change to regular name\n for regname in regular_names:\n if objectname.lower() == regname.lower():\n objectname = regname\n break\n\n # parse I2 cell\n i2cell = objectname.lower()=='i2'\n if (frameid in addinfo_lst and 'i2cell' in addinfo_table.colnames\n and addinfo_lst[frameid]['i2cell'] is not np.ma.masked):\n i2cell = addinfo_lst[frameid]['i2cell']\n\n imgtype = ('sci', 'cal')[objectname.lower().strip() in cal_objects]\n\n # determine the total number of saturated pixels\n saturation = (data>=65535).sum()\n\n # find the 95% quantile\n quantile95 = np.sort(data.flatten())[int(data.size*0.95)]\n\n item = [frameid, fileid, imgtype, objectname, i2cell, exptime, obsdate,\n saturation, quantile95]\n logtable.add_row(item)\n # get table Row object. (not elegant!)\n item = logtable[-1]\n\n # print log item with colors\n string = pinfo.get_format(has_esc=False).format(item)\n print(print_wrapper(string, item))\n\n prev_frameid = frameid\n\n print(pinfo.get_separator())\n \n # sort by obsdate\n #logtable.sort('obsdate')\n\n if has_obsinfo and len(real_obsdate_lst)>0:\n # determine the time offset as median value\n time_offset = np.median(np.array(delta_t_lst))\n time_offset_dt = datetime.timedelta(seconds=time_offset)\n # plot time offset\n fig = plt.figure(figsize=(9, 6), dpi=100)\n ax = fig.add_axes([0.12,0.16,0.83,0.77])\n xdates = mdates.date2num(real_obsdate_lst)\n ax.plot_date(xdates, delta_t_lst, 'o-', ms=6)\n ax.axhline(y=time_offset, color='k', ls='--', alpha=0.6)\n ax.set_xlabel('Log Time', fontsize=12)\n ax.set_ylabel('Log Time - FTIS Time (sec)', fontsize=12)\n x1, x2 = ax.get_xlim()\n y1, y2 = ax.get_ylim()\n ax.text(0.95*x1+0.05*x2, 0.1*y1+0.9*y2,\n 'Time offset = %d seconds'%time_offset, fontsize=14)\n ax.set_xlim(x1, x2)\n ax.set_ylim(y1, y2)\n ax.grid(True, ls='-', color='w')\n ax.set_facecolor('#eaeaf6')\n ax.set_axisbelow(True)\n ax.spines['bottom'].set_color('none')\n ax.spines['left'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n for t in ax.xaxis.get_ticklines():\n t.set_color('none')\n for t in ax.yaxis.get_ticklines():\n t.set_color('none')\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d %H:%M'))\n #plt.setp(ax.get_xticklabels(), rotation=30)i\n fig.autofmt_xdate()\n for tick in ax.xaxis.get_major_ticks():\n tick.label1.set_fontsize(10)\n for tick in ax.yaxis.get_major_ticks():\n tick.label1.set_fontsize(10)\n fig.suptitle('Time Offsets Between Log and FITS', fontsize=15)\n fig.savefig('obsdate_offset.png')\n plt.close(fig)\n\n # correct time offset\n for row in logtable:\n row['obsdate'] = row['obsdate'] + time_offset_dt\n\n # determine filename of logtable.\n # use the obsdate of the first frame\n obsdate = logtable[0]['obsdate'].iso[0:10]\n outname = '{}.obslog'.format(obsdate)\n if os.path.exists(outname):\n i = 0\n while(True):\n i += 1\n outname = '{}.{}.obslog'.format(obsdate, i)\n if not os.path.exists(outname):\n outfilename = outname\n break\n else:\n outfilename = outname\n\n # save the logtable\n outfile = open(outfilename, 'w')\n outfile.write(pinfo.get_title()+os.linesep)\n outfile.write(pinfo.get_dtype()+os.linesep)\n outfile.write(pinfo.get_separator()+os.linesep)\n for row in logtable:\n outfile.write(pinfo.get_format().format(row)+os.linesep)\n outfile.close()", "title": "" }, { "docid": "e926118926217dc28fa105792efa8ee1", "score": "0.43687403", "text": "def showmonth(request, year: int, month: int):\n logger.debug(f'start, year: {year}, month: {month}')\n bme280s = BME280.objects.filter(measure_date__year=year,\n measure_date__month=month,\n )\n title = f'Show {year}/{month} pressure, humidity and temperature'\n return __response(request, bme280s, title)", "title": "" }, { "docid": "432c06e98ea07b4c29c7fb611d51e728", "score": "0.43596187", "text": "def show(self):\n if self.state == 0 :\n return\n\n if self.state == 2 and time.time() - self.current_time > 1/self.frequency:\n \n if self.current_frame == len(self.frames) - 1 :\n self.stop()\n return\n else:\n self.current_frame += 1\n self.current_time = time.time()\n \n for y in range(0, len(self.frames[self.current_frame])):\n for x in range(0, len(self.frames[self.current_frame][y])):\n Terminal.write(self.frames[self.current_frame][y][x], [x + self.position[0], y + self.position[1]], self.color)\n \n pass", "title": "" }, { "docid": "f32b97653fc1236c47e997b0892bddfc", "score": "0.43550873", "text": "def display_board(genome):\n print('+-' + '--' * len(genome) + '+')\n\n for row in range(len(genome)):\n elements = []\n for genome_item in genome:\n if genome_item == row:\n elements.append('Q')\n else:\n elements.append('.')\n print('|' + ''.join(elements) + '|')\n\n print('+-' + '--' * len(genome) + '+')", "title": "" }, { "docid": "08835850639059cbce81a6281f52787d", "score": "0.43543613", "text": "def do_show(self, line):\n Show().onecmd(line)", "title": "" }, { "docid": "7c7ed1f2e0d0603f5ed25b0086910b49", "score": "0.43532255", "text": "def _print_verbage(self):\n print \"Tx amplitude %s\" % (self._tx_amplitude)", "title": "" } ]
ab1c464dbeffedcad2295dc61d6040f9
Category are correctly named
[ { "docid": "a5c6b7ce12746d005f0e84b77c840071", "score": "0.6557743", "text": "def test_class_name(self):\n category = Category.objects.get(name=\"MED\")\n self.assertEqual(category.name, 'MED')", "title": "" } ]
[ { "docid": "04b89e3005840b483d9d68614f7db866", "score": "0.7223058", "text": "def __str__(self):\n return \"%s\" %(self.category_name)", "title": "" }, { "docid": "17f31039bc63e05d2c29cbfddac669f2", "score": "0.7195257", "text": "def category():\n pass", "title": "" }, { "docid": "571cdaedb05995783822f5488cf8d030", "score": "0.7133246", "text": "def _indetify_category(self):\n # lets have a categories assign automatically\n for category, identifiers in categories.items():\n for identifier in identifiers:\n if identifier in self.name.lower():\n return category\n logger.warning(\n f\"Merchandise {self.name}, Category not define. Set to GENERIC.\")\n return 'GENERIC'", "title": "" }, { "docid": "0708909944d1765b981bfd898b8dc4cb", "score": "0.6985266", "text": "def category_name(self):\n return self.category.name", "title": "" }, { "docid": "1171269213d9ac1738cfb3753f6be324", "score": "0.6911554", "text": "def name_get(self):\n if self._context.get('patient_category_display') == 'short':\n return super(ThoriumcorpPatientCategory, self).name_get()\n\n res = []\n for category in self:\n names = []\n current = category\n while current:\n names.append(current.name)\n current = current.category_id\n res.append((category.id, ' / '.join(reversed(names))))\n return res", "title": "" }, { "docid": "4903bb6e2c0b3ec06a94427c6e9200a0", "score": "0.6857641", "text": "def __str__(self):\n\n return self.category", "title": "" }, { "docid": "f4ea50e0623473da804a0c75c4d8cd68", "score": "0.6791495", "text": "def category(self):\n return ''", "title": "" }, { "docid": "8bd5539522bd26d8c849a0967b9d13ff", "score": "0.67178434", "text": "def test_attribute_template_get_categories(self):\n pass", "title": "" }, { "docid": "522037aea5e59b22397d2868ead66639", "score": "0.6682366", "text": "def getCategories(self): #$NON-NLS-1$\r", "title": "" }, { "docid": "cf6cc27eced86a1024a73928f5cdd888", "score": "0.6675249", "text": "def get_categories(self, pipeline, object_name):\n if object_name == IMAGE:\n return [M_CATEGORY]\n return []", "title": "" }, { "docid": "663707172cd224e450927d65b9535776", "score": "0.66700995", "text": "def test_0005_create_category( self ):\n self.create_category( name=category_name, description=category_desc )", "title": "" }, { "docid": "0b7ff97624b2c2b1c2f7433fdffe00d5", "score": "0.6660696", "text": "def getCat(self):\n return self.category", "title": "" }, { "docid": "f99a96fe5921c336bdce08adff0a084b", "score": "0.6612987", "text": "def _handle_category(self, category_value):\n category = _Classifications.__dict__.keys()[_Classifications.__dict__.values().index(category_value)]\n self.groups.append(category)\n return str(category).lower()", "title": "" }, { "docid": "20bdddda4a6aa86aabad1cfc71ad1b7a", "score": "0.6595931", "text": "def _categories_encoding(self):", "title": "" }, { "docid": "b0933ca862b6567c160ac589e3260ce3", "score": "0.6580657", "text": "def test_event_categories(self):\r\n\t\tcategory1 = self.event.category.all()[0]\r\n\t\tcategory2 = self.event.category.all()[1]\r\n\t\tself.assertEqual(category1.name, 'sports')\r\n\t\tself.assertEqual(category2.name, 'music')", "title": "" }, { "docid": "30835c272d90f2387ea154d90db2997e", "score": "0.6575507", "text": "def test_category_detection(self):\n pass", "title": "" }, { "docid": "9f9475dc24705cdcb9d4c57157ce6a68", "score": "0.6546788", "text": "def _GetCategoryName(self):\n match = re.match(self._REGEX, self.path)\n if not match:\n return\n return match.group('category')", "title": "" }, { "docid": "f2756c78a178c037d0d8d7998d933c3f", "score": "0.65424377", "text": "def sample_category(name='Hardware'):\n\n return models.Category.objects.create(name=name)", "title": "" }, { "docid": "6fbfa1cd29b03bac59ef2d15304c2d49", "score": "0.6524565", "text": "def category(self) -> str:\n return pulumi.get(self, \"category\")", "title": "" }, { "docid": "6fbfa1cd29b03bac59ef2d15304c2d49", "score": "0.6524565", "text": "def category(self) -> str:\n return pulumi.get(self, \"category\")", "title": "" }, { "docid": "6fbfa1cd29b03bac59ef2d15304c2d49", "score": "0.6524565", "text": "def category(self) -> str:\n return pulumi.get(self, \"category\")", "title": "" }, { "docid": "1704933201ef6f891446d8580c04565d", "score": "0.651059", "text": "def __set_name__(self, cls, name):\n self._category = name", "title": "" }, { "docid": "e6e9f899bc83757c88ff3243ed1943d6", "score": "0.64237636", "text": "def parse_category(self):\n category = self.product.css(\n '.product[data-list-id=product]::attr(data-detail-category)'\n ).get() or 'Ювелирные украшения'\n\n categories = category.split('/')\n category = categories[1] if len(categories) > 1 else categories[0]\n self.loader.add_value('category', category)", "title": "" }, { "docid": "d147b905f91c97452451392bf0ba9c56", "score": "0.6417543", "text": "def category(self) -> str:\n return self._search_in_properties(ATTR_CATEGORY)", "title": "" }, { "docid": "6ceb7a9b8c026d17be819347b419b898", "score": "0.6405567", "text": "def __str__(self):\n return \"Category: {}, Caption: {}\".format(self.category.name, self.caption)", "title": "" }, { "docid": "b1bc30de45e99546893c31fd7a656a2f", "score": "0.6383898", "text": "def __repr__(self) -> str:\n return f\"<Category {self.name}>\"", "title": "" }, { "docid": "2365258ecbbd4e9ecc0cfca4f8fe86c9", "score": "0.6368867", "text": "def test_get_concept_categories(self):\n pass", "title": "" }, { "docid": "91406c7ba5c8958393e68a3c4e04b0ef", "score": "0.6349923", "text": "def __repr__(self):\n return f'{self.CategoryName} (ID: {self.CategoryID})'", "title": "" }, { "docid": "415d98994b8dd81a18507bb2900843f7", "score": "0.63415694", "text": "def category_names():\r\n CATEGORY_URL = BASE_URL + \"m=itemdb_rs/bestiary/slayerCatNames.json\"\r\n return requests.get(CATEGORY_URL).json()", "title": "" }, { "docid": "aaecd04e6a34ac7d25e03b4b66ad6e39", "score": "0.633803", "text": "def getcats(self):\n self.categories = list(self.df.category_name)", "title": "" }, { "docid": "bf9166c9731168e7df092f7b4d14c367", "score": "0.63263065", "text": "def __init__(self, category=None, name=None):\n\n self.category = category\n self.name = name", "title": "" }, { "docid": "bbdfb010151a623d0b761b9f8834b270", "score": "0.6321169", "text": "def get_category_names(self):\n return [x.name for x in self.get_categories()]", "title": "" }, { "docid": "f8a687f19e1bc71d90632ff9ecbd8677", "score": "0.63200146", "text": "def serialize_category(self):\n return{\n 'name': self.name,\n 'id': self.id\n }", "title": "" }, { "docid": "0dce449eeb3ee01b268c743226e6b980", "score": "0.6308644", "text": "def category(self):\n return _msys.print_category(self._ptr.category)", "title": "" }, { "docid": "7d0174242b69a5e3155a34359c1d9d68", "score": "0.6304219", "text": "def make_categories_advanced(simple=True, yolo=False):\r\n if simple:\r\n cat_list = [\r\n {\r\n \"id\": 0,\r\n \"name\": \"Benign Tumor\",\r\n },\r\n {\r\n \"id\": 1,\r\n \"name\": \"Malignant Tumor\",\r\n }\r\n ]\r\n if yolo:\r\n cat_mapping = {\r\n \"benign\": 0,\r\n \"malign\": 1,\r\n }\r\n else:\r\n cat_mapping = [0, 1]\r\n\r\n return cat_list, cat_mapping\r\n\r\n cat_list = [\r\n # malignant first\r\n {\r\n \"supercategory\": \"Malignant\",\r\n \"id\": 1,\r\n \"name\": \"Chondrosarcoma\",\r\n },\r\n {\r\n \"supercategory\": \"Malignant\",\r\n \"id\": 2,\r\n \"name\": \"Osteosarcoma\",\r\n },\r\n {\r\n \"supercategory\": \"Malignant\",\r\n \"id\": 3,\r\n \"name\": \"Ewing sarcoma\",\r\n },\r\n {\r\n \"supercategory\": \"Malignant\",\r\n \"id\": 4,\r\n \"name\": \"Plasma cell myeloma\",\r\n },\r\n {\r\n \"supercategory\": \"Malignant\",\r\n \"id\": 5,\r\n \"name\": \"NHL B Cell\",\r\n },\r\n # now benign\r\n {\r\n \"supercategory\": \"Benign\",\r\n \"id\": 6,\r\n \"name\": \"Osteochondroma\",\r\n },\r\n {\r\n \"supercategory\": \"Benign\",\r\n \"id\": 7,\r\n \"name\": \"Enchondroma\",\r\n },\r\n {\r\n \"supercategory\": \"Benign\",\r\n \"id\": 8,\r\n \"name\": \"Chondroblastoma\",\r\n },\r\n {\r\n \"supercategory\": \"Benign\",\r\n \"id\": 9,\r\n \"name\": \"Osteoid osteoma\",\r\n },\r\n {\r\n \"supercategory\": \"Benign\",\r\n \"id\": 10,\r\n \"name\": \"Non-ossifying fibroma\",\r\n },\r\n {\r\n \"supercategory\": \"Benign\",\r\n \"id\": 11,\r\n \"name\": \"Giant cell tumour of bone\",\r\n },\r\n {\r\n \"supercategory\": \"Benign\",\r\n \"id\": 12,\r\n \"name\": \"Chordoma\",\r\n },\r\n {\r\n \"supercategory\": \"Benign\",\r\n \"id\": 13,\r\n \"name\": \"Haemangioma\",\r\n },\r\n {\r\n \"supercategory\": \"Benign\",\r\n \"id\": 14,\r\n \"name\": \"Aneurysmal bone cyst\",\r\n },\r\n {\r\n \"supercategory\": \"Benign\",\r\n \"id\": 15,\r\n \"name\": \"Simple bone cyst\",\r\n },\r\n {\r\n \"supercategory\": \"Benign\",\r\n \"id\": 16,\r\n \"name\": \"Fibrous dysplasia\",\r\n },\r\n ]\r\n # The names from datainfo are used here!\r\n cat_mapping = {\r\n # malign\r\n \"Chondrosarkom\": 1,\r\n \"Osteosarkom\": 2,\r\n \"Ewing-Sarkom\": 3,\r\n \"Plasmozytom / Multiples Myelom\": 4,\r\n \"NHL vom B-Zell-Typ\": 5,\r\n # benign\r\n \"Osteochondrom\": 6,\r\n \"Enchondrom\": 7,\r\n \"Chondroblastom\": 8,\r\n \"Osteoidosteom\": 9,\r\n \"NOF\": 10,\r\n \"Riesenzelltumor\": 11,\r\n \"Chordom\": 12,\r\n \"Hämangiom\": 13,\r\n \"Knochenzyste, aneurysmatische\": 14,\r\n \"Knochenzyste, solitär\": 15,\r\n \"Dysplasie, fibröse\": 16,\r\n }\r\n return cat_list, cat_mapping", "title": "" }, { "docid": "34672bde8d7e85db27d99370e5326220", "score": "0.6292474", "text": "def print_category(self, cat_name):\n if cat_name.lower() == \"fashion\":\n for item in self.items:\n if isinstance(item,Fashion):\n print(item)\n print()\n elif cat_name.lower() == \"book\":\n for item in self.items:\n if isinstance(item,Book):\n print(item)\n print()\n elif cat_name.lower() == \"home_garden\":\n for item in self.items:\n if isinstance(item,HomeGarden):\n print(item)\n print()\n elif cat_name.lower() == \"electronics\":\n for item in self.items:\n if isinstance(item,Electronics):\n print(item)\n print()\n elif cat_name.lower() == \"cd\":\n for item in self.items:\n if isinstance(item,CD):\n print(item)\n print()\n elif cat_name.lower() == \"collectable\":\n for item in self.items:\n if isinstance(item,Collectable):\n print(item)\n print()\n else:\n print(\"That is not a category name.\")", "title": "" }, { "docid": "3e81d36196915cec79eea402943f0d0e", "score": "0.6280121", "text": "def addCategory(self,catName):\n if catName not in self.categories:\n newCat=dbCategory(catName)\n self.categories[catName]=newCat", "title": "" }, { "docid": "fc074620b51f391f86464e6fcce1e6e0", "score": "0.6264369", "text": "def _getCategoryOfObject( self, ob ):\n return ob.Category()", "title": "" }, { "docid": "288494524f5c8847282f3e32d6c5b4df", "score": "0.6261422", "text": "def division_category_name(self):\n return self.get_division_category_display()", "title": "" }, { "docid": "f1b0371bb5d782f1918258545f10eee7", "score": "0.6251371", "text": "def getCategories(self, type: unicode) -> List[unicode]:\n ...", "title": "" }, { "docid": "ddd430c7b4e7d9d9ceaee7268b7772c7", "score": "0.62511384", "text": "def test_categories(self) -> None:\n cate_names = [\"OVERALL\"]\n cate_names += [c.name for c in self.classes]\n for super_category, categories in self.super_classes.items():\n cate_names.append(super_category)\n cate_names.extend([c.name for c in categories])\n self.assertEqual(len(self.eval_results), len(cate_names))\n for key in self.eval_results:\n self.assertIn(key, cate_names)", "title": "" }, { "docid": "626792d7a3821e8a6a8d8d78e3b32276", "score": "0.6224911", "text": "def novel_categories():\n return ['UN']", "title": "" }, { "docid": "3c3f8f7b4b813ea0bddad61c6bf90c67", "score": "0.62224686", "text": "def category(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"category\")", "title": "" }, { "docid": "5eb9a590c17982775051eb9075119db6", "score": "0.6219983", "text": "def test_category(self):\n AMc = AMa.category()\n self.assertTupleEqual(AMc.metrics.dim, (1, 25))\n self.assertEqual(AMc.summary().split(\"\\n\")[1].split(\"\\t\")[0], \"PAIR\")\n AMc = AMa.category('FIRST_OF_PAIR')\n self.assertTupleEqual(AMc.metrics.dim, (1, 25))\n self.assertEqual(AMc.summary().split(\"\\n\")[1].split(\"\\t\")[0], \"FIRST_OF_PAIR\")", "title": "" }, { "docid": "ba24242606a5677527601eb6486f6220", "score": "0.62171006", "text": "def testCategory(self):\n\n uids = self.uids\n rows = self.rows\n\n h = S3Hierarchy(\"test_hierarchy\")\n for uid in uids:\n category = h.category(uids[uid])\n self.assertEqual(category, rows[uid].category)", "title": "" }, { "docid": "da38f72093540f9c93145c95de167170", "score": "0.6208539", "text": "def _parse_category_human(self, doc):\n return doc['category_human']", "title": "" }, { "docid": "896104cf784a7f5db86e05de269c313b", "score": "0.61914283", "text": "def get_category(self, category_name: str) -> str:\n return self._get_category(category_name).get(\"category\", \"\")", "title": "" }, { "docid": "060d80b998280aa937d35a4d6ed6a6cf", "score": "0.6188489", "text": "def category(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"category\")", "title": "" }, { "docid": "8d7f1e679d5c4d82296b8a7b55c38177", "score": "0.6159405", "text": "def test_get_netflow_categories(self):\n pass", "title": "" }, { "docid": "2dc403fba727142c004c18ae0c2fd897", "score": "0.61410046", "text": "def title(self, obj):\n return \"%s . category . palewire\" % obj.title.lower()", "title": "" }, { "docid": "784e11c23c379d212eda73f408b01c39", "score": "0.6123367", "text": "def category(self) -> str:\n return self._category", "title": "" }, { "docid": "bba190dfef26b39b26879175fded21e0", "score": "0.61150557", "text": "def get_categories(self, pipeline, object_name):\n categories = []\n if object_name == cpmeas.IMAGE:\n categories += [\"Count\"]\n if object_name == self.output_object_name:\n categories += (\"Location\", \"Number\")\n return categories", "title": "" }, { "docid": "a83c1a6a2a3925574b367035012dee37", "score": "0.61073625", "text": "def category_names(self) -> List[str]:\n return [category[\"name\"] for category in self._config[\"categories\"]]", "title": "" }, { "docid": "872b2ab721c85f5f735956c7976be129", "score": "0.60683304", "text": "def _parse_category_human(self, doc):\n return None", "title": "" }, { "docid": "b1b47c6bb6a36e0f41031c17a3bc58f9", "score": "0.6063661", "text": "def category1(self) -> str:\n return self._search_in_properties(ATTR_CATEGORY1)", "title": "" }, { "docid": "15aa01ddc37895fbb482cb82a4c7b088", "score": "0.60625994", "text": "def test_site_category(self):\n pass", "title": "" }, { "docid": "03ce9a2ed4330e11417c7015910c29f6", "score": "0.60571426", "text": "def test_category_str(self):\n category = sample_category()\n\n self.assertEqual(str(category), category.title)", "title": "" }, { "docid": "f06284095d186cf572a8d3a8c365ba85", "score": "0.60521656", "text": "def test_product_category(self):\n product_tv = Product.objects.get(name='tv')\n product_radio = Product.objects.get(name='radio')\n self.assertEqual(product_tv.get_category(), \"tv belongs to Electronics category.\")\n self.assertEqual(product_radio.get_category(), \"radio belongs to Electronics category.\")", "title": "" }, { "docid": "8cc461e21982ee30c309181c489cf776", "score": "0.60444915", "text": "def setUp(self):\n self.names = [\"QWERTY\", \"ASDF\", \"ZXCV\"]\n self.categoryStatistics = CategoryStatistics()\n self.categoryStatistics.totalForCategory = {}\n for name in self.names:\n category = Category(name=name)\n self.categoryStatistics.totalForCategory[category] = []", "title": "" }, { "docid": "05fe2842911f3acd22eee48de13f66ce", "score": "0.6039802", "text": "def test_successful_category_str(self):\n\n category = models.Category.objects.create(\n name='Accessories'\n )\n\n self.assertEqual(str(category), category.name)", "title": "" }, { "docid": "254274cf86edd2f2b46ae45daf876f3f", "score": "0.6022487", "text": "def _get_category(self, category_name: str) -> Dict[str, str]:\n for category in self._config[\"categories\"]:\n if category[\"name\"] == category_name:\n return category\n raise RuntimeError(\n \"Unexpected error: No category found in vvp-config.yaml \"\n \"with a name of \" + category_name\n )", "title": "" }, { "docid": "1e7a813713e54963bbb25e540c0c3f17", "score": "0.6020258", "text": "def get_all_categories(self):\n\t\tpass", "title": "" }, { "docid": "18bdce67e0abe55d173c1d7be3483c49", "score": "0.60187936", "text": "def getcategory(self):\n # Create the link\n link = self.url + \"categories.json\"\n\n # Make the request :\n r = requests.get(link)\n cat_file = json.loads(r.text)\n\n # Create category list :\n cat_list = []\n i = 0\n while i <= 21:\n cat_list.append(cat_file['tags'][i]['name'])\n cat_list[i] = unicodedata.normalize('NFKD', cat_list[i]).\\\n encode('ascii', 'ignore').decode()\n i += 1\n cat_list_clean = cat_list[2:]\n return cat_list_clean", "title": "" }, { "docid": "a0861a325a4b652f549b30533a85e0a0", "score": "0.60144323", "text": "def label_mapping(category_name):\n with open(category_name, 'r') as f:\n flower_to_name = json.load(f)\n return flower_to_name", "title": "" }, { "docid": "affc0856e646527ff5d2510ca0d1c2a0", "score": "0.6010032", "text": "def get_category(name: str) -> Category:\n return Category.objects.get(name=name)", "title": "" }, { "docid": "71d619bf34edc0c0f8ae2abff9b7fb03", "score": "0.6003272", "text": "def determine_category(name, group_name=''):\r\n\r\n category = ''\r\n\r\n if is_hashed(name):\r\n category = CAT_MISC_OTHER\r\n else:\r\n if group_name:\r\n category = check_group_category(name, group_name)\r\n\r\n if not category:\r\n for parent_category in parent_category_regex.keys():\r\n category = check_parent_category(name, parent_category)\r\n if category:\r\n break\r\n\r\n if not category:\r\n category = CAT_MISC_OTHER\r\n\r\n log.info('category: ({}) [{}]: {} ({})'.format(\r\n group_name,\r\n name,\r\n get_category_name(category),\r\n category\r\n ))\r\n return category", "title": "" }, { "docid": "6dbcf7d29918b31f1e76221de9fe436f", "score": "0.59857994", "text": "def get_category(self):\n\n category = self.soupe.find_all(\"li\")[2].text.strip()\n return category", "title": "" }, { "docid": "78645ae986863b172ce083ee07a5f12a", "score": "0.59643507", "text": "def category(self) -> Optional[str]:\n return pulumi.get(self, \"category\")", "title": "" }, { "docid": "4f1e303d6728f1d0ddb89c15711ca475", "score": "0.59599787", "text": "def category_list(self) -> Optional[Sequence[str]]:\n return None", "title": "" }, { "docid": "f32c49533b9a54ab9eb1fbcf5a54abc8", "score": "0.59539586", "text": "def category(self):\n return self._category", "title": "" }, { "docid": "f32c49533b9a54ab9eb1fbcf5a54abc8", "score": "0.59539586", "text": "def category(self):\n return self._category", "title": "" }, { "docid": "f32c49533b9a54ab9eb1fbcf5a54abc8", "score": "0.59539586", "text": "def category(self):\n return self._category", "title": "" }, { "docid": "bdd91a6fd0e3eeed9aad86ffaafe63f9", "score": "0.5939179", "text": "def short_name(self) -> str:\n return f'{self.category}/{self.name}'", "title": "" }, { "docid": "2034a61d47380565064cee4bf406ff25", "score": "0.5937423", "text": "def show_category(category: str) -> None:\n print(f'{Color.UNDERLINE}{Color.COLOR_GREEN}Localização{Color.RESET}{Color.COLOR_GREEN}: {category}{Color.RESET}')", "title": "" }, { "docid": "1c76b87e7eb83951cc1c13b0647be2d5", "score": "0.591842", "text": "def categories(self) -> Sequence[str]:\n return pulumi.get(self, \"categories\")", "title": "" }, { "docid": "7055339781a11ca0556846700ecd6a6e", "score": "0.59128225", "text": "def test_category_slug(self):\n test = Category.objects.get(name=\"test\")\n slugtest = Category.objects.get(name=\"slug test\")\n self.assertEquals(test.slug, 'test')\n self.assertEqual(slugtest.slug, 'slug_test')", "title": "" }, { "docid": "39e5fc279090b86b10a223355f17218c", "score": "0.59094495", "text": "def get_category_keywords(self):\n return self.get_namespace_with_aliases(\"Category\")", "title": "" }, { "docid": "5ffe474cb422d9998b58608f604fe7a5", "score": "0.5908571", "text": "def factory(self):\n return self.F.CategoryFactory", "title": "" }, { "docid": "439ed8ca2efc4727a1c7ced21fdc4fe3", "score": "0.5887272", "text": "def assign_cat_to_name(filepath, categories):\n #Retrieve category names\n with open(filepath, 'r') as f:\n cat_to_name = json.load(f)\n \n #Assign flower names corresponding to the category values using \"cat_to_name.json\" dictionary\n category_names = []\n for element in categories:\n category_names.append(cat_to_name.get(element))\n \n #Return vector of category names\n return category_names", "title": "" }, { "docid": "db2f76eca45957cf3ef7db7b999603c2", "score": "0.5887037", "text": "def set_category(self, category):\n pcategory = self.find(\"general/category\")\n pcategory.clear()\n id_ = ElementTree.SubElement(pcategory, \"id\")\n id_.text = category.id\n name = ElementTree.SubElement(pcategory, \"name\")\n name.text = category.name", "title": "" }, { "docid": "88b6c71480dfe14d0184e6b84f82d134", "score": "0.58865774", "text": "def categories(self):\n return [func.__name__ for func in self._mimesis_category_funcs]", "title": "" }, { "docid": "9573df610710ddaf254c94c17f9e8c84", "score": "0.58854353", "text": "def __init__(self):\n self._categories = ['Expense',\\\n ['Food',\n ['meal', 'drink', 'snack', 'cafe', 'groceries']], \\\n ['Transportation',\\\n ['taxi', 'bus', 'railway']],\\\n ['Shopping',\\\n ['stationery', 'health', 'beauty', 'electronics', 'accessories', 'clothing']], \\\n ['Communication',\\\n ['internet', 'phone']], \\\n ['Finance',\\\n ['taxes', 'fines', 'insurance']], \\\n 'Income',\\\n ['salary', 'bonus', 'lottery']]", "title": "" }, { "docid": "07a5c7d5fa05a81b33d59520cf391409", "score": "0.5859883", "text": "def newCategory(name, id):\n category = {'name': '', 'id': ''}\n category['name'] = name\n category['id'] = id\n return category", "title": "" }, { "docid": "8fb4fd6857af87e9a1ab8937aa6b38b5", "score": "0.5848043", "text": "def test__str(self):\n channel = create_channel('Channel Sample')\n category = create_category_in_channel('String Representation', channel)\n\n self.assertEqual(str(category), category.name)", "title": "" }, { "docid": "ea582c5155581ef873cb41d8a872abdd", "score": "0.58448505", "text": "def category(self):\n return models.Category(1, \"Ammo\")", "title": "" }, { "docid": "cd974fdf82926d1178d23182446fe3c9", "score": "0.584272", "text": "def Categories(self):\n urltool = getToolByName(self.context, 'portal_url')\n path = '/'.join(urltool.absolute_url().split('/')[:-1]) + '/'\n\n results = []\n\n try:\n cat = self.context.categoryRRHH\n except:\n cat = ()\n\n objects = cat\n\n for value in objects:\n try:\n obj = self.context.portal_catalog.searchResults(portal_type='SimpleVocabularyTerm', id=value)[0]\n\n results.append({'title': obj.Title,\n 'key': value,\n 'href': path + 'key?' + value,\n })\n except:\n # When an object is migrated, can come with keywords, but perhaps, doesn't exists still in Plone\n None\n\n return results", "title": "" }, { "docid": "dd419b4ce6062f96067d8cda7543ec0f", "score": "0.5839159", "text": "def Category(self):\n return self._category", "title": "" }, { "docid": "6bc165a1258d0e9d75fd5a33c9c94791", "score": "0.583718", "text": "def test_subclass_category_id(self):\n\n ds = MyDataset(x=\"1\", y=\"2\", description=\"test\")\n assert ds.data_name == \"my_dataset\"", "title": "" }, { "docid": "acfd8a65e1e9a880fedda17bbf6f3439", "score": "0.58368623", "text": "def test_site_category_by_id(self):\n pass", "title": "" }, { "docid": "2b0e3b0112fadbab5a98c844ba915bc3", "score": "0.5830038", "text": "def get_category_mapping(self, param, key):\r\n\t\tcategory = \"\"\r\n\t\ttext = \"\"\r\n\t\tif self.options.has_key(\"param_category_tag\"):\r\n\t\t\tcategory = param[self.options[\"param_category_tag\"]]\r\n\t\tif self.options.has_key(\"param_category_text_tag\"):\r\n\t\t\ttext = param[self.options[\"param_category_text_tag\"]]\r\n\t\treturn (category, text)", "title": "" }, { "docid": "90f67a67d1d721f69924127184f37724", "score": "0.5823254", "text": "def display_categories(self):\n if self.context.useClassifiers:\n return ', '.join(self.context.getVocabularyTitlesFromCLassifiers())\n else:\n return ', '.join(self.context.getCategoryTitles())", "title": "" }, { "docid": "dc005b92a747d2e415637ba2387d2b67", "score": "0.5821867", "text": "def _category_properties(self, request):\n p = OrderedDict([\n ('id', OrderedDict([\n ('description', 'Unique Identifier of the category'),\n ('type', 'number'),\n ])),\n ('title', OrderedDict([\n ('title', 'The title of the category'),\n ('type', 'string'),\n ])),\n ('description', OrderedDict([\n ('title', 'The description of the category'),\n ('type', 'string'),\n ]))\n ])\n return p", "title": "" }, { "docid": "f8328af08d03c13b28cab27153470a3d", "score": "0.58210206", "text": "def fuzzy_categorize(name):\n mappings = {\n ProductCategory.BOOKS: ('book',),\n ProductCategory.FOODS: ('chocolate',),\n ProductCategory.MEDICAL: ('headache pill',)\n }\n for category, keywords in mappings.items():\n for keyword in keywords:\n if keyword in name:\n return category\n return ProductCategory.OTHER", "title": "" }, { "docid": "8559ebcd0f10e79f10a33f692a55a3c6", "score": "0.5815515", "text": "def category(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"category\")", "title": "" }, { "docid": "8559ebcd0f10e79f10a33f692a55a3c6", "score": "0.5815515", "text": "def category(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"category\")", "title": "" }, { "docid": "8559ebcd0f10e79f10a33f692a55a3c6", "score": "0.5815515", "text": "def category(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"category\")", "title": "" }, { "docid": "8559ebcd0f10e79f10a33f692a55a3c6", "score": "0.5815515", "text": "def category(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"category\")", "title": "" }, { "docid": "8559ebcd0f10e79f10a33f692a55a3c6", "score": "0.5815515", "text": "def category(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"category\")", "title": "" }, { "docid": "753b7786f35475764ba9351ceafe5f58", "score": "0.58058244", "text": "def category_type(self):\n return self.container['category_type']", "title": "" }, { "docid": "1d19ef9813909e83279cc85bc423785d", "score": "0.58044416", "text": "def add_category(self):\n tester = app.test_client(self)\n return tester.post('/category', data=dict(title=\"Breakfast\", description=\"Breakfast is awesome\"))", "title": "" } ]
6a55ef7b5337d6440def0fb227d5dd4c
Global application exception handler. For "basic" errors (I/O errors, keyboard interrupt, etc.) just the error message is printed as there's generally no need to confuse the user with a complete stack trace when it's just a missing file. Other exceptions, however, are logged with the usual full stack trace.
[ { "docid": "59fb904953040acebe4029f7cf6808ee", "score": "0.0", "text": "def error_handler(exc_type, exc_value, exc_trace):\n if issubclass(exc_type, (SystemExit,)):\n # Exit with 0 (\"success\") for system exit (as it was intentional)\n return 0\n elif issubclass(exc_type, (KeyboardInterrupt,)):\n # Exit with 2 if the user deliberately terminates with Ctrl+C\n return 2\n elif issubclass(exc_type, (configargparse.ArgumentError,)):\n # For option parser errors output the error along with a message\n # indicating how the help page can be displayed\n logging.critical(str(exc_value))\n logging.critical('Try the --help option for more information.')\n return 2\n elif issubclass(exc_type, (IOError,)):\n # For simple errors like IOError just output the message which\n # should be sufficient for the end user (no need to confuse them\n # with a full stack trace)\n logging.critical(str(exc_value))\n return 1\n else:\n # Otherwise, log the stack trace and the exception into the log\n # file for debugging purposes\n for line in traceback.format_exception(exc_type, exc_value, exc_trace):\n for msg in line.rstrip().split('\\n'):\n logging.critical(msg.replace('%', '%%'))\n return 1", "title": "" } ]
[ { "docid": "cfc6405b65231024c8845a59cdfa5f6b", "score": "0.74804026", "text": "def global_exception_handler(exception: Exception) -> None:\n log.error('occur %s %s', exception.__class__.__name__, str(exception.args))\n print(exception)", "title": "" }, { "docid": "9d3b6a24b4f6e9bea2a7dac70411c627", "score": "0.7275831", "text": "def log_exception_as_error():\n AppLogger._app.logger.error(traceback.format_exc())", "title": "" }, { "docid": "85bd91358c39b96b24777a13389269c2", "score": "0.71941674", "text": "def handle_general_exception(self):\n self._session_handler.ui().error(\"An error occured: %s\\n%s\" % (\n sys.exc_info()[0], traceback.format_exc(5)))", "title": "" }, { "docid": "d77c2dc9c8b2552efc5a9d8a6d198f60", "score": "0.68699837", "text": "def _exception_handler (self, message, e=None, fatal=False):\n\n if e:\n v, t, tb = sys.exc_info()\n stack_trace = \"\".join(traceback.format_tb(tb))\n\n self._err(\"%s\\n\\n%s\\n\\n%s\" % (message, e, stack_trace))\n else:\n self._err(\"%s\" % message)\n\n if fatal:\n # we perform a hard exit here to kill all threads.\n os._exit(1)", "title": "" }, { "docid": "c5ac4ac075f549057a2a6810bfe20a40", "score": "0.681249", "text": "def my_error_handler(exc_info):\n traceback.print_exception(*exc_info)", "title": "" }, { "docid": "b7bb7e9bed26b7f1844d7ea4a27959b4", "score": "0.68049604", "text": "def _log_exceptions_in_root_logger() -> None:\n root_logger = logging.getLogger()\n def handle_exception(exc_type, exc_value, exc_traceback):\n root_logger.error(\"Uncaught exception\", exc_info=(exc_type, exc_value, exc_traceback))\n sys.excepthook = handle_exception", "title": "" }, { "docid": "3cdecb26f681d995ac404e6de4546f0c", "score": "0.6766457", "text": "def exception_handler(app):\r\n mail_handler = SMTPHandler((app.config.get('MAIL_SERVER'),\r\n app.config.get('MAIL_PORT')),\r\n app.config.get('MAIL_USERNAME'),\r\n [app.config.get('MAIL_USERNAME')],\r\n '[Exception handler] A 5xx was thrown',\r\n (app.config.get('MAIL_USERNAME'),\r\n app.config.get('MAIL_PASSWORD')),\r\n secure=())\r\n\r\n mail_handler.setLevel(logging.ERROR)\r\n mail_handler.setFormatter(logging.Formatter(\"\"\"\r\n Time: %(asctime)s\r\n Message type: %(levelname)s\r\n\r\n\r\n Message:\r\n\r\n %(message)s\r\n \"\"\"))\r\n app.logger.addHandler(mail_handler)\r\n\r\n return None", "title": "" }, { "docid": "e7ce1e82b25862d7fd0057d2685aaaad", "score": "0.6723797", "text": "def log_error_with_exception(self, message):\n self._devide_app.log_error_with_exception(message)", "title": "" }, { "docid": "a0e988fac525d4dd1ebccc3355169ef6", "score": "0.66745836", "text": "def _log_error(self):\n pymsg = \"PYTHON ERRORS:\\nTraceback info:\\n{1}\\nError Info:\\n{0}\".format(str(sys.exc_info()[1]), \"\".join(traceback.format_tb(sys.exc_info()[2])))\n self._log_message(pymsg, True)", "title": "" }, { "docid": "1548d9067b902f6df561610109129045", "score": "0.66662747", "text": "def handle_exception(exc):\n current_app.logger.exception(exc)\n return u.err_rep(500)", "title": "" }, { "docid": "5c4473a1492ebf9ee1a484441a840031", "score": "0.6644709", "text": "def except_hook(exc_type, exc_value, traceback_obj):\r\n separator = '-' * 80\r\n log_file = \"error.log\"\r\n notice = '''An unhandled exception occurred. Please report the problem\r\n via email to <a href=\"mailto:[email protected]\">[email protected]</a>.<br>\r\n A log has been written to \"<i>error.log</i>\" in your application folder.<br><br>\r\n Error information:\\n'''\r\n time_string = time.strftime(\"%Y-%m-%d, %H:%M:%S\")\r\n machine_name = os.getenv('COMPUTERNAME')\r\n user_name = os.getenv('USERNAME')\r\n\r\n tb_info_file = StringIO()\r\n traceback.print_tb(traceback_obj, None, tb_info_file)\r\n tb_info_file.seek(0)\r\n tb_info = tb_info_file.read()\r\n error_message = '{0:s}: \\n{1:s}'.format(str(exc_type), str(exc_value))\r\n sections = [separator, time_string,\r\n 'Username: {0:s}'.format(user_name),\r\n 'Machine: {0:s}'.format(machine_name),\r\n 'Version: {0:s}'.format(VERSION),\r\n separator, error_message,\r\n separator, tb_info]\r\n msg = '\\n'.join(sections)\r\n try:\r\n with open(log_file, 'w') as f:\r\n f.write(msg)\r\n f.write(VERSION)\r\n except IOError:\r\n pass\r\n message_box_error(notice, str(msg))", "title": "" }, { "docid": "9254a554c094ba62f25c930118365add", "score": "0.66414803", "text": "def exception(self, msg, *args, **kwargs):\n msg, kwargs = self.process(msg, kwargs)\n kwargs[\"exc_info\"] = 1\n self.logger.error(msg, *args, **kwargs)", "title": "" }, { "docid": "edd7d972c99bde32211d920c34e0ac1f", "score": "0.6636891", "text": "def set_global_exception():\n def handle_exception(loop, context):\n import sys\n sys.print_exception(context[\"exception\"])\n sys.exit()\n loop = asyncio.get_event_loop()\n loop.set_exception_handler(handle_exception)", "title": "" }, { "docid": "61b5fb584fb1868d5ee661b9b76c0272", "score": "0.65881336", "text": "def exception_error_handler(error):\r\n log4py.error('## exception_error_handler ##')\r\n log4py.error(error)", "title": "" }, { "docid": "7e87d5dda8fa1214ab575b26d49e5a9d", "score": "0.6557283", "text": "def init_globexc(filename=None, context=None):\n\tglobal DEFAULT_STACK_TRACE_FILENAME\n\tglobal CONTEXT_LINES\n\n\tif filename is not None:\n\t\tDEFAULT_STACK_TRACE_FILENAME = filename\n\n\tif context is not None:\n\t\tCONTEXT_LINES = context\n\n\tsys.excepthook = global_exception", "title": "" }, { "docid": "56003dc28cf6754c5ba50011946321ce", "score": "0.654906", "text": "def __global_logging_exception_handler(\n exc_type,\n exc_value,\n exc_traceback,\n _logger=logging.getLogger(__name__),\n _stderr=sys.__stderr__,\n _format_exception=traceback.format_exception,\n):\n if exc_type.__name__ == \"KeyboardInterrupt\":\n # Do not log the exception or display the traceback on Keyboard Interrupt\n # Stop the logging queue listener thread\n if is_mp_logging_listener_configured():\n shutdown_multiprocessing_logging_listener()\n return\n\n # Log the exception\n msg = \"An un-handled exception was caught by salt's global exception handler:\"\n try:\n msg = \"{}\\n{}: {}\\n{}\".format(\n msg,\n exc_type.__name__,\n exc_value,\n \"\".join(_format_exception(exc_type, exc_value, exc_traceback)).strip(),\n )\n except Exception: # pylint: disable=broad-except\n msg = \"{}\\n{}: {}\\n(UNABLE TO FORMAT TRACEBACK)\".format(\n msg,\n exc_type.__name__,\n exc_value,\n )\n try:\n _logger.error(msg)\n except Exception: # pylint: disable=broad-except\n # Python is shutting down and logging has been set to None already\n try:\n _stderr.write(msg + \"\\n\")\n except Exception: # pylint: disable=broad-except\n # We have also lost reference to sys.__stderr__ ?!\n print(msg)\n\n # Call the original sys.excepthook\n try:\n sys.__excepthook__(exc_type, exc_value, exc_traceback)\n except Exception: # pylint: disable=broad-except\n # Python is shutting down and sys has been set to None already\n pass", "title": "" }, { "docid": "dbe370a30240ba10e40cd54ad7d14a9c", "score": "0.65303767", "text": "def error_handler(error):\n\n\t# Get the traceback\n\ttrace = str(traceback.format_exc())\n\tif app.debug:\n\t\tdebug = trace\n\telse:\n\t\tdebug = \"Ask your system administrator to consult the error log for this application.\"\n\n\tif 'username' in session:\n\t\tusername = session['username']\n\telse:\n\t\tusername = 'Not logged in'\n\n\t## Log the critical error (so that it goes to e-mail)\n\tapp.logger.error(\"\"\"Fatal Error\nHTTP Path: %s\nHTTP Method: %s\nClient IP Address: %s\nUser Agent: %s\nUser Platform: %s\nUser Browser: %s\nUser Browser Version: %s\nUsername: %s\nTraceback:\n%s\n\"\"\" % (\n\n\t\t\trequest.path,\n\t\t\trequest.method,\n\t\t\trequest.remote_addr,\n\t\t\trequest.user_agent.string,\n\t\t\trequest.user_agent.platform,\n\t\t\trequest.user_agent.browser,\n\t\t\trequest.user_agent.version,\n\t\t\tusername,\n\t\t\ttrace,\t\t\t\n\t\t))\n\n\treturn bargate.lib.errors.fatalerr(debug=debug)", "title": "" }, { "docid": "d6b39b3fdbcf6930c1003ad1138770b7", "score": "0.6508926", "text": "def logUnhandledException(exc_type, exc_value, exc_traceback):\n\n filename, line, dummy, dummy = \\\n traceback.extract_tb(exc_traceback).pop()\n filename = os.path.basename(filename)\n error = \"%s: %s\" % (str(exc_type).split(\".\")[-1], exc_value)\n msg = error + \" on line %d, file %s\" % (line, filename) \n errorbox = PyQt4.QtGui.QMessageBox()\n errorbox.setText(\"Unhandled exception:\\n\"+msg)\n errorbox.exec_()\n file(\"/tmp/procexp.log\",\"ab\").write(msg+\"\\n\")", "title": "" }, { "docid": "d86775a8139889e89b2655d118b161f0", "score": "0.6508741", "text": "def init_email_error_handler(application):\r\n if application.debug: return # Do not send error emails while developing\r\n\r\n # Retrieve email settings from application.config\r\n host = application.config['MAIL_SERVER']\r\n port = application.config['MAIL_PORT']\r\n from_addr = application.config['MAIL_DEFAULT_SENDER']\r\n username = application.config['MAIL_USERNAME']\r\n password = application.config['MAIL_PASSWORD']\r\n secure = () if application.config.get('MAIL_USE_TLS') else None\r\n\r\n # Retrieve application settings from application.config\r\n to_addr_list = application.config['ADMINS']\r\n subject = application.config.get('application_SYSTEM_ERROR_SUBJECT_LINE', 'System Error')\r\n\r\n # Setup an SMTP mail handler for error-level messages\r\n import logging\r\n from logging.handlers import SMTPHandler\r\n\r\n mail_handler = SMTPHandler(\r\n mailhost=(host, port), # Mail host and port\r\n fromaddr=from_addr, # From address\r\n toaddrs=to_addr_list, # To address\r\n subject=subject, # Subject line\r\n credentials=(username, password), # Credentials\r\n secure=secure,\r\n )\r\n mail_handler.setLevel(logging.ERROR)\r\n application.logger.addHandler(mail_handler)\r\n\r\n # Log errors using: application.logger.error('Some error message')\r", "title": "" }, { "docid": "6600e91d11f01e372eaa17535aa9fa05", "score": "0.6507649", "text": "def dump_exception() -> None:\n import traceback\n print(traceback.format_exc())\n terminate_app()", "title": "" }, { "docid": "8881d705ec3a4eec7a85beb9a474cfa8", "score": "0.6501494", "text": "def ucx_exception_handler(loop, context):\n msg = context.get(\"exception\", context[\"message\"])\n print(msg)", "title": "" }, { "docid": "5e29a3928d27494a8e6f913f9a82d730", "score": "0.6486939", "text": "def uncaught_error_handler(e):\n\n app.logger.exception(e)\n error_message = 'Internal Server Error: {}'.format(repr(e))\n return (jsonify(status='error', message=error_message), 500)", "title": "" }, { "docid": "9b4514b4da1d5dfe1f951977003cbd18", "score": "0.6445176", "text": "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "title": "" }, { "docid": "9b4514b4da1d5dfe1f951977003cbd18", "score": "0.6445176", "text": "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "title": "" }, { "docid": "9b4514b4da1d5dfe1f951977003cbd18", "score": "0.6445176", "text": "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "title": "" }, { "docid": "9b4514b4da1d5dfe1f951977003cbd18", "score": "0.6445176", "text": "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "title": "" }, { "docid": "b5063f33b7c2cb6393acae2e345cc9a6", "score": "0.64441085", "text": "def on_import_error(\n self, exc: Exception, module_path: Text, context: DictObject\n ):\n self.log.exception(f'encountered import error in {module_path}')", "title": "" }, { "docid": "ce9a66ca8ea6bb73656a37122399caef", "score": "0.63835645", "text": "def _log_exception():\n exc = traceback.format_exception(*sys.exc_info())\n rospy.logerr(''.join(exc))", "title": "" }, { "docid": "f7c8a7a49f13c7b442072e2660b02edb", "score": "0.63798857", "text": "def handleException(e,path):\r\n if isinstance(e,EnvironmentError):\r\n if os.name=='nt' and isinstance(e,WindowsError):\r\n if e.errno in [2,3]:\r\n _raisePathNotFoundError(e,path)\r\n \r\n if isinstance(e,OSError) or isinstance(e,IOError):\r\n if e.errno==errno.ENOENT:\r\n _raisePathNotFoundError(e,path)\r\n if e.errno==errno.EINVAL:\r\n raise ufsi.InvalidArgumentError('Invalid argument.',e)\r\n\r\n\r\n raise e", "title": "" }, { "docid": "b9990a93c98ceac71a0c16fffd707904", "score": "0.6372835", "text": "def print_uncaught_exception():\n print traceback.format_exc()", "title": "" }, { "docid": "01a7ae5833cb5b2fefe5f9ae0ffadceb", "score": "0.6362596", "text": "def handle_error(e):\n logging.error(str(e) + '\\n...Exiting program...')\n sys.exit(0)", "title": "" }, { "docid": "031dfcc793f3ed59c478cc9f03d6a961", "score": "0.6352487", "text": "def defualt_error_handler(errr):\n s = StringIO()\n traceback.print_exc(file=s)\n text = s.getvalue()\n print(text)\n us = get_user_session()\n model.Error.new(us.id, subject='Python Error', text=text)", "title": "" }, { "docid": "676dbee5f3379b8a5b3a007a8d76d6e4", "score": "0.63474107", "text": "def unhandled_error_handler(self, error):\n # TODO: decide how to pass this error to the app\n # TODO: if there's an error in the app handler, print it and exit\n pass", "title": "" }, { "docid": "2f67e96db07e75f6170e4b0fb88b8d0b", "score": "0.6336021", "text": "def _error_handler(msg, system_exit=True):\n LOG.error(msg)\n if system_exit is True:\n raise SystemExit(msg)", "title": "" }, { "docid": "189f957e0f01fdaca8e46e7b077c9c47", "score": "0.6316678", "text": "def on_exception(self, context, exception):\n try:\n raise exception\n except LibraryError, e:\n tag = 'gchecky'\n error = 'Gchecky bug'\n except SystemError, e:\n tag = 'system'\n error = 'System failure'\n except HandlerError, e:\n tag = 'handler'\n error = 'Error in user handler method'\n except DataError, e:\n tag = 'data'\n error = 'Error converting data to/from XML'\n except:\n # Should never happen...\n tag = 'unknown'\n error = 'Unknown error'\n\n description = \"%s:\\n%s\\n\\nOriginal Error:\\n%s\\n%s\" % (error,\n exception,\n exception.origin,\n exception.traceback)\n # TODO: exception trace\n self.__log(context=context, tag=tag, error=error, description=description)\n return \"%s\\n\\n%s\" % (error, description)", "title": "" }, { "docid": "2937bfbbe1fe284fbff2c0f508ff68f1", "score": "0.6286468", "text": "def record_exception():\r\n import traceback\r\n traceback.print_exc()", "title": "" }, { "docid": "6ee7d87290912984018b2153f5089723", "score": "0.62625086", "text": "def setup_exceptions():\n # first set up the variables needed by the _excepthook function\n # pylint: disable=global-statement\n global _print_traceback, _drill\n local_print_traceback = os.getenv(\"PYLOGCONF_PRINT_TRACEBACK\")\n if local_print_traceback is not None:\n _print_traceback = _str2bool(local_print_traceback)\n local_drill = os.getenv(\"PYLOGCONF_DRILL\")\n if local_drill is not None:\n _drill = _str2bool(local_drill)\n # now that everything is ready attach the hook\n sys.excepthook = _excepthook", "title": "" }, { "docid": "a0e2f25a546985c8d2b497e3e82637a2", "score": "0.6249556", "text": "def exception(self, msg, *args, **kwargs):\r\n\r\n kwargs[\"exc_info\"] = 1\r\n self.log(logging.ERROR, msg, *args, **kwargs)", "title": "" }, { "docid": "1b4b57fb4c77b5a179ff9a4b41660732", "score": "0.62373877", "text": "def handle_error(self, request, client_address):\n print '-' * 40\n print 'Exception happened during processing of request from',\n print client_address\n import traceback\n traceback.print_exc() # XXX But this goes to stderr!\n print '-' * 40", "title": "" }, { "docid": "51ace34acf8d4cccb703d69da914d239", "score": "0.62038326", "text": "def exception ():\n \n try:\n type, value, tb = sys.exc_info ()\n info = traceback.extract_tb (tb)\n filename, lineno, function, text = info[-1] # last line only\n print_err (\"Exception: %s:%d: %s: %s (in %s)\" %\\\n (filename, lineno, type.__name__, str (value), function))\n finally:\n type = value = tb = None # clean up", "title": "" }, { "docid": "4230da2047f3cc0094863ecbe2b9db7a", "score": "0.6197903", "text": "def ErrorHandler(app, global_conf, **errorware):\n if asbool(global_conf.get('debug')):\n from pylons.error import PylonsEvalException\n app = PylonsEvalException(app, global_conf, **errorware)\n else:\n from paste.exceptions.errormiddleware import ErrorMiddleware\n if 'error_template' in errorware:\n del errorware['error_template']\n app = ErrorMiddleware(app, global_conf, **errorware)\n return app", "title": "" }, { "docid": "6d7cdf181edef58e7b2a5a5575814e8d", "score": "0.61955386", "text": "def log_exception(self, typ, value, tb):\r\n if isinstance(value, HTTPError):\r\n if value.log_message:\r\n format = \"%d %s: \" + value.log_message\r\n args = ([value.status_code, self._request_summary()] +\r\n list(value.args))\r\n gen_log.warning(format, *args)\r\n else:\r\n app_log.error(\"Uncaught exception %s\\n%r\", self._request_summary(),\r\n self.request, exc_info=(typ, value, tb))", "title": "" }, { "docid": "22a3b37f5382519f59b1022d211735d1", "score": "0.6169691", "text": "def traceback(self):\n self.log.error(\"There are something error appear.\")\n traceback.print_exc()", "title": "" }, { "docid": "f27451eed171d256da1f74cfcec377b8", "score": "0.6168399", "text": "def exceptions(e):\n ts = strftime('[%Y-%b-%d %H:%M]')\n tb = traceback.format_exc()\n logger.info('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\\n%s',\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n tb)\n return \"Internal Server Error\", 500", "title": "" }, { "docid": "b209c85ae054be5bc9d4b5f605c00766", "score": "0.6168318", "text": "def exception(self, msg, *args, **kwargs):\r\n self.log.exception(msg, *args, **kwargs)", "title": "" }, { "docid": "3c64b64416776b83f2d95ed282330d68", "score": "0.6156911", "text": "def error_handler(e):\n logging.error(\"error_handler for socketio. An error has occurred: \" + str(e))", "title": "" }, { "docid": "23ddbd8869f6d1cee21db2ba5be1cfb8", "score": "0.6133701", "text": "def internal_error_handler(self, exception):\n\n logging.exception(exception)\n\n if self._app.debug:\n return ErrorResponse(message=str(exception), debug_data=format_exception(exception))\n\n return ErrorResponse(message=\"Internal server error\")", "title": "" }, { "docid": "2b0687118ae33e5cb70d99021b671e35", "score": "0.6126399", "text": "def _exception_handler(error_type, error_value, error_traceback):\n logging.exception(\"Uncaught exception {} {}\".format(\n str(error_type), str(error_value)))\n tb = traceback.format_exception(\n error_type, error_value, error_traceback)\n traceback_string = ''\n for ln in tb:\n traceback_string += ln\n logging.exception(traceback_string)", "title": "" }, { "docid": "c019f8045acea1f9d318ae55b50392c0", "score": "0.6105268", "text": "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e) + \"\\n\" + request.form['text'], 500", "title": "" }, { "docid": "91d78d5b5a68583b39b7639d4c56eac3", "score": "0.6099875", "text": "def exceptions(e):\n ts = strftime('[%Y-%b-%d %H:%M]')\n tb = traceback.format_exc()\n logger.error('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\\n%s',\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n tb)\n return \"Internal Server Error\", 500", "title": "" }, { "docid": "91d78d5b5a68583b39b7639d4c56eac3", "score": "0.6099875", "text": "def exceptions(e):\n ts = strftime('[%Y-%b-%d %H:%M]')\n tb = traceback.format_exc()\n logger.error('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\\n%s',\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n tb)\n return \"Internal Server Error\", 500", "title": "" }, { "docid": "1a6ab76d3ef1331479191b6f643d9b36", "score": "0.6095173", "text": "def exceptions(e):\n ts = strftime('[%Y-%b-%d %H:%M]')\n tb = traceback.format_exc()\n logger.error('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\\n%s',\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n tb)\n return \"Internal Server Error\", 500", "title": "" }, { "docid": "80c30afb950692f90f0b211d98df9e9e", "score": "0.6085739", "text": "def setup_error_handler(app):\n @app.errorhandler(Exception)\n def handle_error(e):\n code = 500\n if isinstance(e, HTTPException):\n code = e.code\n error_dict = dict(code=code, message=str(e))\n response = jsonify(error_dict)\n response.status_code = code\n\n # Re-raise server errors during debugging\n if code == 500 and app.debug:\n raise\n return response", "title": "" }, { "docid": "5f712192dad76132e05f4a5723cc9865", "score": "0.60839593", "text": "def custom_exception_handler(exc, context):\n response = exception_handler(exc, context)\n response = _build_error_response(response, exc, context)\n custom_errlog(exc, context, response.data)\n return response", "title": "" }, { "docid": "f9288e6f597ca7fc112cf463ad3fc96b", "score": "0.6070603", "text": "def handle_exception(self, e, *args, **kwargs):\n raise", "title": "" }, { "docid": "e67e01b925c2c9dfa5ef3b90abf12a87", "score": "0.6062612", "text": "def log_error(e):\n print(e)", "title": "" }, { "docid": "2a4940c717cffbd0b19fcc92751cb99b", "score": "0.60462797", "text": "def test_python_unhandled_exception(self):\n data = self._read_test_data('python_unhandled_exception.txt')\n expected_type = 'Uncaught exception'\n expected_address = ''\n expected_state = '_read_exact\\n_read_gzip_header\\nread\\n'\n\n expected_stacktrace = data\n expected_security_flag = False\n self._validate_get_crash_data(data, expected_type, expected_address,\n expected_state, expected_stacktrace,\n expected_security_flag)", "title": "" }, { "docid": "4e512f4581bcd540ca9dfb36a9e74171", "score": "0.60423297", "text": "def log_error(e):\n\tprint(e)", "title": "" }, { "docid": "d1b4db53288bd49320fddf02e1ffffc0", "score": "0.60336524", "text": "async def on_error(self, event, *args, **kwargs):\n\n with open('err.log', 'a') as f:\n if event == 'on_message':\n f.write(f'Unhandled message: {args[0]}\\n')\n else:\n f.write(f'Error: {args[0]}\\n')\n raise", "title": "" }, { "docid": "0859d20e49d15ecaebd9f78c3ed32f27", "score": "0.60303277", "text": "def exception_handler(self, url=None, ex=None):\n print(\"An error occurred. Exception: {}\".format(ex))\n self._stop = True", "title": "" }, { "docid": "95e7d6cd5c0b6ddc199f69390191f3ba", "score": "0.60263324", "text": "def handle_exception(self):\r\n self._exceptions.append(sys.exc_info()[1])", "title": "" }, { "docid": "fc77ada59b000258512fd371adf2740b", "score": "0.6020354", "text": "def err_500():\n # return '500 page', 500\n import json, os, errno, random, traceback\n try:\n raise ValueError('value')\n except Exception as e:\n data = {\n \"serviceContext\": {\n \"service\": \"default\",\n \"version\": os.environ.get('GAE_MODULE_VERSION', 'default')\n },\n \"message\": traceback.format_exc(),\n }\n filename = '/var/log/app_engine/custom_logs/errors.log.json'\n if not os.path.exists(os.path.dirname(filename)):\n try:\n os.makedirs(os.path.dirname(filename), 0777)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n with open(filename, 'a+') as file:\n file.write('\\n'+json.dumps(data))\n raise\n # raise\n return '500 page', 500", "title": "" }, { "docid": "03cfa14ee87cb53013d2aa060fa5b8bf", "score": "0.6020314", "text": "def handle_exec_exception(self, e):\n if self.parser.debug:\n raise\n # output user error if one exists otherwise show debugging traceback\n exc = find_user_exception(e)\n if exc is not None:\n # allow exception attribute to override user verbosity level\n if getattr(exc, \"_verbosity\", None) is not None:\n verbosity = exc._verbosity\n else:\n verbosity = getattr(self.parser, \"verbosity\", 0)\n # output verbose error message if it exists\n if verbosity > 0:\n msg = exc.msg(verbosity).strip(\"\\n\")\n if msg:\n self.err.write(msg)\n raise SystemExit\n self.parser.error(exc)\n raise", "title": "" }, { "docid": "a1d459a9fbc6a0545cbfd3cca45539f7", "score": "0.6017624", "text": "def exception(self, msg, *args, exc_info=True, **kwargs):\n self.logger.exception(msg, *args, exc_info=True, **kwargs)", "title": "" }, { "docid": "ba7c31dfe0d23e87bc2d6011c3deb227", "score": "0.6011726", "text": "def fileError():\n\n\t# print error message\n\tprint('[-] %s%sAn error occured with a file operation.%s' % (fg(233), bg(9), attr(0)))\n\n\t# exit program\n\texit(0)", "title": "" }, { "docid": "1c4aff85ed0c257ec5b7290a57528b6f", "score": "0.6011141", "text": "def log_error(e):\r\n print(e)", "title": "" }, { "docid": "1c4aff85ed0c257ec5b7290a57528b6f", "score": "0.6011141", "text": "def log_error(e):\r\n print(e)", "title": "" }, { "docid": "26a992f44fedb06d3ad3d3786c7421ca", "score": "0.60001796", "text": "def errorHandler(self, msg):\n print \"ERROR:\", msg", "title": "" }, { "docid": "bd82197556fb943986bc7f5de01e36a3", "score": "0.59972346", "text": "def exceptions(e):\n\n # NOTE: Capture werkzeug exceptions and propagate them to sentry.\n capture_exception(e)\n\n # NOTE: Capture traceback for dumping it to the log.\n tb = traceback.format_exc()\n\n if hasattr(e, \"code\") and e.code == 404:\n service_log.error(\n \"{} {} {} {} 404 NOT FOUND\\n{}\".format(\n request.remote_addr, request.method, request.scheme, request.full_path, tb\n )\n )\n return error_response(HTTP_SERVER_ERROR - e.code, e.name)\n\n if hasattr(e, \"code\") and e.code >= 500:\n service_log.error(\n \"{} {} {} {} 5xx INTERNAL SERVER ERROR\\n{}\".format(\n request.remote_addr, request.method, request.scheme, request.full_path, tb\n )\n )\n return error_response(HTTP_SERVER_ERROR - e.code, e.name)\n\n # NOTE: Werkzeug exceptions should be covered above, following line is for unexpected HTTP server errors.\n return error_response(HTTP_SERVER_ERROR, str(e))", "title": "" }, { "docid": "54312ad70630fdb4c0c3dbf014ccd267", "score": "0.59959984", "text": "def configure_exceptions(app, api, custom):\n for code in default_exceptions:\n app.errorhandler(code)(handle_error)\n if api:\n api.handle_error = handle_error\n for exc in custom:\n app.errorhandler(exc)(handle_error)", "title": "" }, { "docid": "fdbd9928e0c95e3f7c41d6d93e3348ba", "score": "0.5995416", "text": "def handle_exception(self):\n self._exceptions.append(sys.exc_info()[1])", "title": "" }, { "docid": "f3d14d11c9b62ddba94cafae5764199d", "score": "0.5993878", "text": "def server_error(e):\n app.logger.error(f\"Server error: {e}, route: {request.url}\")\n return render_template('500.html'), 500", "title": "" }, { "docid": "663132f511b96290041f278b95d98616", "score": "0.5991694", "text": "def global_exception(exc_type, exc_value, exc_traceback):\n\n\t# Walk to the end of the stack trace to find the line that actually raised the exception\n\tacc = exc_traceback\n\twhile acc is not None:\n\t\tfilename = acc.tb_frame.f_code.co_filename\n\t\tlineno = acc.tb_lineno\n\t\tacc = acc.tb_next\n\n\t# Print a brief summary of the exception to the log.\n\t# We suppress this if the exception was broken pipe because there is a good\n\t# change all that happened is the user was piping to more/less and pressed 'q'\n\tif not (exc_type is IOError and exc_value.errno == 32):\n\t\tlogger.critical('Exception {cls} ({filename}:{lineno}) {message}. '\n\t\t\t\t\t\t'Trace file written to \\'{trace}\\'.'.format(\n\t\t\t\tcls=exc_type,\n\t\t\t\t# cls=str(exc_type).replace('<class \\'', '').replace('\\'>', ''),\n\t\t\t\tmessage=unicode(exc_value),\n\t\t\t\tfilename=filename,\n\t\t\t\tlineno=lineno,\n\t\t\t\ttrace=expanded_filename()))\n\t\t# be very paranoid here as we are reading undocumented attributes of logging\n\t\tif hasattr(logger, 'manager') and \\\n\t\t\t\tgetattr(logger.manager, 'emittedNoHandlerWarning') is True:\n\t\t\t# make sure the user sees something even if logging is not initialised\n\t\t\t# (as of python 2.7.5 the logging module suppresses all output if not\n\t\t\t# initialised\n\t\t\tprint('Trace file written to {trace}'.format(\n\t\t\t\t\ttrace=expanded_filename()))\n\n\t# Print an even briefer version to stderr if it is an interactive terminal\n\tif sys.stderr.isatty():\n\t\tprint('\\n{message}\\n'.format(message=str(exc_value)))\n\n\t# Now write a full dump and stack trace to the trace file\n\twith open(expanded_filename(), 'w') as h:\n\t\th.write('Exception object:\\n'\n\t\t\t\t'{indent}Class: {cls}\\n'\n\t\t\t\t'{indent}Text: {value}\\n'\n\t\t\t\t'{indent}Attributes:\\n'.format(\n\t\t\t\t# cls=str(exc_type).replace('<class \\'', '').replace('<type \\'', '').replace(\n\t\t\t\t\t# '\\'>', ''),\n\t\t\t\tcls=exc_type,\n\t\t\t\tvalue=exc_value,\n\t\t\t\tindent=INDENTATION))\n\t\t# list all the attributes of the exception object\n\t\tfor key in dir(exc_value):\n\t\t\tif not key.startswith('_'):\n\t\t\t\th.write('{indent}{indent}{key}: {value}\\n'.format(\n\t\t\t\t\t\tindent=INDENTATION, key=key, value=getattr(exc_value, key)))\n\n\t\th.write('\\nCall stack:\\n')\n\t\tdisplay_tb(exc_traceback, indent=' ', target=h)", "title": "" }, { "docid": "bb7f09aaf1f8baa906087633b67fbffe", "score": "0.5989594", "text": "def email_error(exp, message):\n # Always log a message about our fun\n cstr = StringIO.StringIO()\n traceback.print_exc(file=cstr)\n cstr.seek(0)\n tbstr = cstr.read()\n log.err( exp )\n log.msg( message )\n\n # Prevent a parser from sending tons of spam\n if int(os.environ.get('EMAILS', 10)) < 0:\n return\n os.environ['EMAILS'] = str( int(os.environ.get(\"EMAILS\",10)) - 1 )\n\n msg = MIMEText(\"\"\"\nEmails Left: %s Host: %s\n\nException:\n%s\n%s\n\nMessage:\n%s\"\"\" % (os.environ[\"EMAILS\"], socket.gethostname(), tbstr, exp, message))\n # Send the email already!\n msg['subject'] = '%s Traceback' % (sys.argv[0],)\n msg['From'] = settings.get('pywwa_errors_from', 'ldm@localhost')\n msg['To'] = settings.get('pywwa_errors_to', 'ldm@localhost')\n smtp.sendmail(\"smtp\", msg[\"From\"], msg[\"To\"], msg)", "title": "" }, { "docid": "b841c561e5dcf453f7d46ef77a9be490", "score": "0.59805", "text": "def errorHandler(self, msg=''):\n print(\"Server Error: %s\", msg)", "title": "" }, { "docid": "fe1eae43f7154afa0ab4a00d1e15c9ba", "score": "0.5980101", "text": "def together_exception_handler(self, config):", "title": "" }, { "docid": "0f76476634212ad65d3e7411053db23e", "score": "0.597248", "text": "def log_exception(*args, **kwds):\r\n cls, err = sys.exc_info()[:2]\r\n logging.exception('Exception in request: %s: %s', cls.__name__, err)", "title": "" }, { "docid": "7d99adbe00b14c9d6f9c2f14a28f06d2", "score": "0.5967447", "text": "def error_handler(self, exception):\n\n if self._app.debug:\n logging.exception(exception)\n return ErrorResponse(message=str(exception), debug_data=format_exception(exception))\n\n return ErrorResponse(message=str(exception))", "title": "" }, { "docid": "b71d37a5ac2290e68c861cc4229f05a5", "score": "0.59670025", "text": "def unhandled_exception(error):\n app.logger.exception('Unhandled Exception: %s', error)\n return {\"message\": str(error)}, 400", "title": "" }, { "docid": "853c22063470554b630571087e9acb00", "score": "0.5966997", "text": "def log_exception(self, context):\n exception_message = self.exception_format % context.log_vars\n self.logger.exception(exception_message)", "title": "" }, { "docid": "1e89feb4523d2db249fca324476fff00", "score": "0.5958501", "text": "def internal_error(Exception):\n # log an error for flask\n APP.logger.error(Exception)\n # return rendered template\n return(flask.render_template(\"500.html\"))", "title": "" }, { "docid": "cbedd114cd70623f6a437790cacdcc03", "score": "0.59583366", "text": "def handle_exception(self, e):\n handler = self.error_handlers.get(500)\n if self.debug:\n raise\n self.logger.exception('Exception on %s [%s]' % (\n request.path,\n request.method\n ))\n if handler is None:\n return InternalServerError()\n return handler(e)", "title": "" }, { "docid": "f12a0a4fb01968dcacb07e98da5a131b", "score": "0.59582615", "text": "def print_exc(self):\n msg = self.format_exc()\n sys.stderr.write(msg)", "title": "" }, { "docid": "162546b720485ae2e3ff09d3269146fa", "score": "0.5951869", "text": "def log_exception(*args, **kwds):\n logging.exception(\"Exception in request:\")", "title": "" }, { "docid": "7004e70dc3261d6a54f73200ab8da297", "score": "0.5941553", "text": "def exception(__self, __message, *args, **kwargs): # noqa: N805\n options = (True,) + __self._options[1:]\n __self._log(\"ERROR\", False, options, __message, args, kwargs)", "title": "" }, { "docid": "c95ac52503655dedc2f31d90fba61dd9", "score": "0.59378076", "text": "def log_error(self, e):\n print(e)", "title": "" }, { "docid": "541f0509158dcfd60308a2414c98de34", "score": "0.59342915", "text": "def handle_error(self):\n type, v, t = sys.exc_info()\n if type == SystemExit:\n raise\n else:\n asynchat.async_chat.handle_error(self)", "title": "" }, { "docid": "88c9e4dd84d0191ac13d335e705a0c53", "score": "0.5919085", "text": "def error_view(request):\n # Get the latest exception from Python system service\n (type, value, traceback1) = sys.exc_info()\n traceback1 = '.'.join(traceback.format_exception(type, value, traceback1))\n\n # Send email to admins\n subject = 'Error in ieeetags: %s, %s' % (str(type), value)\n message = traceback1\n mail_admins(subject, message, True)\n\n title = None\n message = None\n\n if type is util.EndUserException:\n title, message = value\n\n return render(request, '500.html', {\n 'title': title,\n 'message': message,\n })", "title": "" }, { "docid": "514e6b8100fa5eb28a660d2a5797ce52", "score": "0.59042305", "text": "def log_error(e):\n print(e)", "title": "" }, { "docid": "514e6b8100fa5eb28a660d2a5797ce52", "score": "0.59042305", "text": "def log_error(e):\n print(e)", "title": "" }, { "docid": "514e6b8100fa5eb28a660d2a5797ce52", "score": "0.59042305", "text": "def log_error(e):\n print(e)", "title": "" }, { "docid": "514e6b8100fa5eb28a660d2a5797ce52", "score": "0.59042305", "text": "def log_error(e):\n print(e)", "title": "" }, { "docid": "514e6b8100fa5eb28a660d2a5797ce52", "score": "0.59042305", "text": "def log_error(e):\n print(e)", "title": "" }, { "docid": "514e6b8100fa5eb28a660d2a5797ce52", "score": "0.59042305", "text": "def log_error(e):\n print(e)", "title": "" }, { "docid": "514e6b8100fa5eb28a660d2a5797ce52", "score": "0.59042305", "text": "def log_error(e):\n print(e)", "title": "" }, { "docid": "514e6b8100fa5eb28a660d2a5797ce52", "score": "0.59042305", "text": "def log_error(e):\n print(e)", "title": "" }, { "docid": "514e6b8100fa5eb28a660d2a5797ce52", "score": "0.59042305", "text": "def log_error(e):\n print(e)", "title": "" }, { "docid": "514e6b8100fa5eb28a660d2a5797ce52", "score": "0.59042305", "text": "def log_error(e):\n print(e)", "title": "" }, { "docid": "514e6b8100fa5eb28a660d2a5797ce52", "score": "0.59042305", "text": "def log_error(e):\n print(e)", "title": "" }, { "docid": "e16d8441ca7e4b918408c795934e3ce0", "score": "0.59039974", "text": "def catch_unhandled_exceptions(self):\n if not getattr(sys.excepthook, \"_wrapped\", False): # skip if already modified\n orig_hook = sys.excepthook\n\n def handle_exception(exc_type, exc_value, exc_traceback):\n if issubclass(exc_type, KeyboardInterrupt):\n sys.__excepthook__(exc_type, exc_value, exc_traceback)\n return\n logging.getLogger(\"Client\").exception(\"Uncaught exception\",\n exc_info=(exc_type, exc_value, exc_traceback))\n return orig_hook(exc_type, exc_value, exc_traceback)\n\n handle_exception._wrapped = True\n\n sys.excepthook = handle_exception", "title": "" }, { "docid": "2a74e29512e47287459ced32d6c0e1e5", "score": "0.59002215", "text": "def error_handler(msg):\r\n print \"Server Error: %s\" % msg", "title": "" } ]
2c51f84424129b9c3d172d5617a4d18d
This endpoint does a special onetime bootstrap of the ACL system, making the first management token if the acl_master_token is not specified in the Consul server configuration, and if the cluster has not been bootstrapped previously. This is available in Consul 0.9.1 and later, and requires all Consul servers to be upgraded in order to operate. You can detect if something has interfered with the ACL bootstrapping by the response of this method. If you get a string response with the ``ID``, the bootstrap was a success. If the method raises a
[ { "docid": "e39d22b980fcafe79ee8f139b0f1f886", "score": "0.5312343", "text": "def bootstrap(self):\n return self._put_response_body(['bootstrap'])['ID']", "title": "" } ]
[ { "docid": "29e5ddf142f447f368aa51fed9c847d9", "score": "0.5988396", "text": "def cmd_bootstrap(args):\n if getuser() != \"root\":\n error(\"Command has to run as root.\")\n\n if args.nodes is None:\n nodelist = [vm.name for vm in clusterdef.vm]\n else:\n nodelist = args.nodes.split()\n\n status(\"Check status of cluster nodes\")\n for n in nodelist:\n if do_remote(n, \"systemctl -q is-active corosync.service\", stderr_on=False):\n print(\"As Cluster is active, need to stop it and reboot !\")\n print(\"This can not been done automatically; just do:\")\n print(\"- Login on each node and disable pacemaker and corosync service\")\n print(\"- Reboot all nodes\")\n error(\"Cluster already active on {}\".format(n))\n\n cmd_sbd(args)\n status_long(\"Initialize cluster\")\n if not do_remote(nodelist[0], \"ha-cluster-init -y -s /dev/vdc\", stderr_on=True):\n error(\"Failed to initialize cluster\")\n status_done()\n if len(nodelist) > 1:\n # disable as ocsf2 is something than should be deployed in testing\n# if len(nodelist) > 2:\n# status(\"Adjust OCFS2 configuration for > 2 nodes\")\n# if not do_remote(nodelist[0], \"crm resource meta c-clusterfs show clone-max 2>/dev/null\") and \\\n# not do_remote(nodelist[0], \"crm resource meta c-clusterfs set clone-max 2\"):\n# error(\"Failed to update cluster configuration\")\n status_long(\"Join cluster from remaining nodes\")\n for n in nodelist[1:]:\n if not do_remote(n, \"ha-cluster-join -y -c {}\".format(nodelist[0])):\n error(\"Failed to join cluster ({}) from {}\".format(nodelist[0], n))\n status_progress()\n status_done()", "title": "" }, { "docid": "852dc43dd974658a575a1acb33dea3a9", "score": "0.57365394", "text": "def bootstrap_cdm_cluster(cdm_cluster):\n cluster_bootstrap_specs = __get_cluster_bootstrap_specs(cdm_cluster)\n log.info('Bootstrapping %s with cluster_bootstrap_specs %s'\n % (cdm_cluster, json.dumps(cluster_bootstrap_specs, indent=4)))\n driver_node = cdm_cluster.nodes[0]\n log.debug('Driving cluster bootstrap with %s (%s).'\n % (driver_node, driver_node.ipv4))\n\n bootstrap_endpoint = ('https://%s/api/internal/cluster/me/bootstrap'\n % driver_node.ipv4)\n\n log.debug('Sending POST request to %s.' % bootstrap_endpoint)\n response = requests.post(bootstrap_endpoint,\n verify=False,\n json=cluster_bootstrap_specs)\n response.raise_for_status()\n request_id = response.json()['id']\n start_time = datetime.now(utc)\n while True:\n params = {'request_id': request_id}\n response = requests.get(bootstrap_endpoint,\n verify=False,\n params=params)\n response_json = response.json()\n\n if not response.ok:\n response.raise_for_status()\n elif response_json['status'] == 'SUCCESS':\n log.info('Successfully completed cluster bootstrap on %s.'\n % cdm_cluster)\n break\n elif datetime.now(utc) - start_time > BOOTSTRAP_WAIT_TIME:\n bodega_error(log,\n 'Bootstrap for %s ran over time period of %s'\n % (cdm_cluster, BOOTSTRAP_WAIT_TIME))\n elif response_json['status'] == 'IN_PROGRESS':\n log.debug('Cluster bootstrap status is IN_PROGRESS so wait for '\n '30 seconds before checking again for %s.'\n % cdm_cluster)\n time.sleep(30)\n else:\n bodega_error(log,\n 'Hit unexpected status (%s) for bootstrap of %s. '\n 'Response dictionary: %s'\n % (response_json['status'], cdm_cluster,\n response_json))", "title": "" }, { "docid": "28fa55e2d5498174909c5a02e2235cf0", "score": "0.55898297", "text": "def _become_master(self):\n\n try:\n self._etcd_client.write(self._key,\n self.id_string,\n ttl=self._ttl,\n prevExist=False,\n timeout=self._interval)\n except Exception as e:\n # We could be smarter about what exceptions we allow, but any kind\n # of error means we should give up, and safer to have a broad\n # except here. Log and reconnect.\n self._log_exception(\"become elected master\", e)\n raise RestartElection()\n\n LOG.info(\"Successfully become master - key %s, value %s\",\n self._key, self.id_string)\n\n self._master = True\n\n try:\n while not self._stopped:\n try:\n LOG.info(\"Refreshing master role\")\n self._etcd_client.write(self._key,\n self.id_string,\n ttl=self._ttl,\n prevValue=self.id_string,\n timeout=self._interval / 3)\n LOG.info(\"Refreshed master role\")\n except Exception as e:\n # This is a pretty broad except statement, but anything\n # going wrong means this instance gives up being the\n # master.\n self._log_exception(\"renew master role\", e)\n raise RestartElection()\n eventlet.sleep(self._interval)\n finally:\n LOG.info(\"Exiting master refresh loop, no longer the master\")\n self._master = False\n raise RestartElection()", "title": "" }, { "docid": "e7e59e4ab7482eacf9a6853429086eb4", "score": "0.52714527", "text": "def label_master(api, master=MASTER_NODE):\n\n\n # body = {\n # \"metadata\": {\n # \"labels\": {\n # \"kubernetes.io/role\": \"master\"\n # }\n # }\n # }\n\n # body2 = {\n # \"metadata\": {\n # \"labels\": {\n # \"node-role.kubernetes.io/master\": \"\"\n # }\n # }\n # }\n\n taint = {\n \"spec\": {\n \"taints\": {\n \"effect\": \"NoSchedule\",\n \"key\": \"node-role.kubernetes.io/master\",\n \"value\": \"NoSchedule\"\n }\n }\n }\n \n # api_response = api.patch_node(master, body)\n\n # pprint(api_response)\n\n # api_response = api.patch_node(master, body2)\n\n # pprint(api_response)\n\n api_response = api.patch_node(master, taint)\n\n pprint(api_response)\n\n # api_response = client.models.v1_taint.V1Taint(master, effect=\"NoSchedule\")", "title": "" }, { "docid": "0df0ec187b6e3bcd1b54afcbd7085d53", "score": "0.5219821", "text": "def set_api_config(self, master_node):\n # get the master node of the cluster\n master_info = self.persistence_manager.find(\n 'nodes', {\"address\": master_node})\n # get the kubernetes bearer token from master\n try:\n token = master_info[0]['token']\n except (IndexError, KeyError):\n self.logger.error(\"Cluster is not valid. No valid node exist\")\n raise ClusterNotFoundException(\"Cluster is invalid\")\n self.logger.debug('Bearer token retrieved from master node')\n # kubernetes API configurations\n configuration = client.Configuration()\n configuration.host = 'https://{}:6443'.format(master_node)\n configuration.verify_ssl = False\n configuration.debug = True\n configuration.api_key = {\"authorization\": \"Bearer \" + token}\n client.Configuration.set_default(configuration)\n self.logger.debug('API configurations set.')", "title": "" }, { "docid": "dd7c11d7ab51baec9e4aa6b9468778c4", "score": "0.5187012", "text": "def bootstrap(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"bootstrap\")", "title": "" }, { "docid": "9d8fb8811eb484568dea2362f059ccb7", "score": "0.51618", "text": "def bootstrap_token(cls) -> TokenData:\n return cls(\n token=Token(),\n username=\"<bootstrap>\",\n token_type=TokenType.service,\n scopes=[\"admin:token\"],\n created=datetime.now(tz=timezone.utc),\n )", "title": "" }, { "docid": "e592de38d021a0996584f89116416046", "score": "0.51596993", "text": "def post(self, request):\n # Test if master is reachable\n validation_status, response_code, message, content = validate_host(request.DATA)\n\n # Master created successfully but login was unsuccessful\n if not validation_status and response_code == 401:\n transaction.rollback()\n return Response(dict(error=[\"invalid_master_credentials\"],\n data={}),\n status=status.HTTP_401_UNAUTHORIZED)\n\n # Unable to reach master\n elif not validation_status and response_code == 400 and message == \"duplicate_master\":\n transaction.rollback()\n return Response(dict(error=[\"duplicate_master\"],\n data={}),\n status=status.HTTP_400_BAD_REQUEST)\n\n # Unable to reach master\n elif not validation_status and response_code == 400:\n transaction.rollback()\n return Response(dict(error=[\"no_route_to_host\"],\n data={}),\n status=status.HTTP_400_BAD_REQUEST)\n\n # If yes, create an entry in master and master token table\n elif validation_status and response_code == 200:\n serializer = MasterSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.object.created_by = serializer.object.modified_by = request.user\n serializer.save()\n\n try:\n salt_user = User.objects.get(username=request.DATA['master_username'])\n except ObjectDoesNotExist:\n salt_user = User.objects.create_user(username=request.DATA['master_username'],\n password=request.DATA['master_password'])\n\n # Add Master Token Data\n token_serializer = MasterTokenSerializer(data={\n 'master': serializer.object.id,\n 'user': salt_user.id,\n 'allowed_functions': content.get('perms', []),\n 'token': content.get('token', None)\n })\n\n if not token_serializer.is_valid():\n # roll back db created values in case there are any errors\n transaction.rollback()\n return Response(dict(error=token_serializer.errors, data={}),\n status=status.HTTP_400_BAD_REQUEST)\n\n token_serializer.save()\n\n response_dict = dict(hostname=serializer.object.hostname,\n netapi_port=serializer.object.netapi_port,\n eauth=serializer.object.auth_mode,\n allowed_function=token_serializer.object.allowed_functions)\n\n wheel_call_status = self._set_master_config(serializer.object, request.DATA)\n\n if not wheel_call_status:\n # delete the entries created in the db in case there is an error\n transaction.rollback()\n return Response(dict(error=[\"wheel_call_error\"], data={}),\n status=status.HTTP_400_BAD_REQUEST)\n\n # set master config in DB\n set_config_status, master_config = self._set_master_details(serializer.object, request.DATA)\n\n if not set_config_status:\n # delete the entries created in the db in case there is an error\n transaction.rollback()\n return Response(dict(error=[\"master_set_config_error\"], data={}),\n status=status.HTTP_400_BAD_REQUEST)\n\n serializer.object.config = master_config\n master = serializer.save()\n\n try:\n sync_call_resp = master.sync_all_grains() # fix for 1102\n except Exception as err:\n logger.error('Error:{0} while trying to sync grains for master: {1}'.format(err, master))\n transaction.rollback()\n return Response(dict(error=['sync_grains_error'], data={}),\n status=500)\n\n # if all is success, commit the transaction\n transaction.commit()\n\n return Response(dict(error=[], data=response_dict), status=status.HTTP_200_OK)\n else:\n # delete the entries created in the db in case there is an error\n transaction.rollback()\n # Error processing\n error_list = [\n e for error in serializer.errors.values() for e in error]\n return Response(dict(error=error_list, data={}), status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(dict(error=[\"error\"], data={}), status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "c89512a86e4ff3b297d73ef5c24dd144", "score": "0.51547754", "text": "def master(cls, cluster_id_label):\n cluster_status = cls.status(cluster_id_label)\n if cluster_status.get(\"state\") == 'UP':\n return list(filter(lambda x: x[\"role\"] == \"master\", cluster_status.get(\"nodes\")))[0]\n else:\n return cluster_status", "title": "" }, { "docid": "38f5188c039f0eeb7dbf3c3affe50420", "score": "0.5092527", "text": "def bootstrap():\n run(\"/usr/sbin/locale-gen en_US.UTF-8 && /usr/sbin/update-locale LANG=en_US.UTF-8\")\n with fabric_settings(warn_only=True):\n run(\"aptitude update && aptitude -y dist-upgrade\")\n append(\"/etc/hosts\", \"{0} saltmaster-private\".format(env.master_server.private_ip))\n with fabric_settings(warn_only=True):\n reboot()\n run(\"aptitude install -y build-essential rsync\")\n # allow users in the wheel group to sudo without a password\n uncomment(\"/etc/sudoers\", \"wheel.*NOPASSWD\")", "title": "" }, { "docid": "c8553fd77ffabe49a1f1cd379801b57f", "score": "0.5053771", "text": "def bootstrap_node(self, switch_config, timeout=90):\n _cmd = \"cd '{}'; knife bootstrap {} -V --bootstrap-template {} --environment {}\".format(\n self.chef_repo_path, switch_config['ip_host'],\n self.config['distro'], self.config['environment'])\n _alter = [('assword:', switch_config['sshtun_pass'], False, True)]\n self.class_logger.info(\"Install chef client on device '{}'.\".format(switch_config['name']))\n self.ssh.open_shell()\n self.ssh.shell_command(_cmd, alternatives=_alter, timeout=timeout)\n self.ssh.close_shell()", "title": "" }, { "docid": "d2f4bffbeff9470f5c756358251d5a3f", "score": "0.4964513", "text": "def bootstrap(self):\n if not self.config['port']:\n self.config['port'] = configuration['master']['port']\n\n if not self.config['address']:\n self.config['address'] = configuration['master']['host']\n\n if not self.config['slave_nodes_num']:\n self.config['slave_nodes_num'] = \\\n configuration['slave']['number_of_slaves']\n\n if not self.config['slave_nodes_port']:\n self.config['slave_nodes_port'] = configuration['slave']['port']", "title": "" }, { "docid": "81e8eb911fd8119631a2f78d71d44954", "score": "0.49577904", "text": "def bootstrap(command, conf, vars):\n\n # <websetup.bootstrap.before.auth\n\n # <websetup.bootstrap.after.auth>", "title": "" }, { "docid": "81e8eb911fd8119631a2f78d71d44954", "score": "0.49577904", "text": "def bootstrap(command, conf, vars):\n\n # <websetup.bootstrap.before.auth\n\n # <websetup.bootstrap.after.auth>", "title": "" }, { "docid": "10c3db7c66fd9c978de89ce41350dae5", "score": "0.493276", "text": "def bootstrap(self, *args, **kwargs):\n return self._bootstrap.request(self._client, *args, **kwargs)", "title": "" }, { "docid": "23a36c5b4f4af5fc03c9de944026aba0", "score": "0.49293762", "text": "def cli_create_role_with_has_primary(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"ready_with_3_slaves\")\n node_ids = sorted([node['id'] for node in\n self.fuel_web.client.list_nodes()])\n release_id = self.fuel_web.get_releases_list_for_os(\n release_name=OPENSTACK_RELEASE)[0]\n templates_path = os.path.join(\n '{0}/fuelweb_test/config_templates/'.format(os.environ.get(\n \"WORKSPACE\", \"./\")), 'create_primary_role.yaml')\n self.show_step(2)\n self.show_step(3)\n if os.path.exists(templates_path):\n self.ssh_manager.upload_to_remote(self.ssh_manager.admin_ip,\n templates_path, '/tmp')\n self.show_step(4)\n self.ssh_manager.execute_on_remote(\n ip=self.ssh_manager.admin_ip,\n cmd='fuel2 role create -n /tmp/create_primary_role -r {} -f yaml'\n ''.format(release_id))\n\n if NEUTRON_SEGMENT_TYPE:\n nst = '-nst {0}'.format(NEUTRON_SEGMENT_TYPE)\n else:\n nst = ''\n self.show_step(5)\n cmd = ('fuel2 env create {0} -r {1} {2} -f json'\n ''.format(self.__class__.__name__, release_id, nst))\n env_result = self.ssh_manager.execute_on_remote(\n ip=self.ssh_manager.admin_ip,\n cmd=cmd,\n jsonify=True\n )['stdout_json']\n cluster_id = env_result['id']\n\n cmd = ('fuel2 env add nodes -e {0} -n {1} -r test-primary-role'\n ''.format(cluster_id, node_ids[0]))\n result = self.ssh_manager.execute(\n ip=self.ssh_manager.admin_ip,\n cmd=cmd,\n )\n assert_equal(result['exit_code'], 0,\n \"Can't assign new role\"\n \" to node id {}\".format(node_ids[0]))\n self.env.make_snapshot(\"cli_create_role_with_has_primary\")", "title": "" }, { "docid": "ba254294053540138e4661c7005292cb", "score": "0.49173543", "text": "def _set_master_config(self, master_obj, request_obj):\n username = request_obj.get(\"master_username\", None)\n password = request_obj.get(\"master_password\", None)\n eauth = request_obj.get(\"auth_mode\", None)\n\n wheel_data = dict(username=username, password=password, eauth=eauth,\n fun=\"config.update_config\", file_name=\"gui\",\n yaml_contents=dict(id=master_obj.id),\n client=\"wheel\")\n\n try:\n resp = master_obj.api_request('POST',\n '/run',\n wheel_data)\n except Exception as err:\n msg = 'Error process request {0}'.format(str(error))\n logger.error(err)\n return False\n\n if resp.status == 200:\n logger.info(\"Request processed successfully.\")\n return True\n else:\n logger.debug('Failed with {0}'.format(str(resp.status)))\n return False", "title": "" }, { "docid": "a1cafb03929cbd4e363eae692d24b72e", "score": "0.49036634", "text": "def cluster_bootstrap_account(self) -> Optional[str]:\n return pulumi.get(self, \"cluster_bootstrap_account\")", "title": "" }, { "docid": "ec15b6ab3f2b943332926a3ff004c5b8", "score": "0.48595178", "text": "async def _on_bootstrap(self, **kwargs):\n if callable(self._context):\n self._context = self._context()\n\n signature = inspect.signature(self.bootstrap)\n parameters = [p.name for p in signature.parameters.values()]\n fn_kwargs = {\n key: value\n for key, value in kwargs.items()\n if key in parameters\n }\n\n if asyncio.iscoroutinefunction(self.bootstrap):\n await self.bootstrap(**fn_kwargs)\n else:\n self.bootstrap(**fn_kwargs)", "title": "" }, { "docid": "f81b1c4e6cc7a0d370e51ddbd3a902b4", "score": "0.48587027", "text": "def test_cluster_create_with_lb_disabled(self):\n\n expected_args = self._default_args\n expected_args['master_lb_enabled'] = False\n arglist = [\n '--cluster-template', self._cluster.cluster_template_id,\n '--master-lb-disabled',\n self._cluster.name\n ]\n verifylist = [\n ('cluster_template', self._cluster.cluster_template_id),\n ('master_lb_enabled', [False]),\n ('name', self._cluster.name)\n ]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n self.cmd.take_action(parsed_args)\n self.clusters_mock.create.assert_called_with(**expected_args)", "title": "" }, { "docid": "2bedc6410df8eababf0cd54a9774948e", "score": "0.48573712", "text": "def create_master():\n require('master_ec2_ami', 'master_ec2_instance_type')\n\n btw(\"Spinning up a new puppet master server...\")\n inst = ec2_launch(env.master_ec2_ami, env.master_ec2_instance_type)\n inst.add_tag(\"puppet:type\", \"puppetmaster\")\n\n env.host_string = \"ubuntu@%s\" % inst.ip_address\n wait_until_alive()\n puppet.install_master()\n\n yay(\"\\nSuccess! Puppet master is now alive.\\n\")\n\n env.working_master = inst", "title": "" }, { "docid": "67c005dcfbf5f1b826765e24642129ba", "score": "0.48440123", "text": "def initiate_safe_zkfc_failover():\n import params\n\n # Must kinit before running the HDFS command\n if params.security_enabled:\n Execute(format(\"{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}\"),\n user = params.hdfs_user)\n\n active_namenode_id = None\n standby_namenode_id = None\n active_namenodes, standby_namenodes, unknown_namenodes = get_namenode_states(params.hdfs_site, params.security_enabled, params.hdfs_user)\n if active_namenodes:\n active_namenode_id = active_namenodes[0][0]\n if standby_namenodes:\n standby_namenode_id = standby_namenodes[0][0]\n\n if active_namenode_id:\n Logger.info(format(\"Active NameNode id: {active_namenode_id}\"))\n if standby_namenode_id:\n Logger.info(format(\"Standby NameNode id: {standby_namenode_id}\"))\n if unknown_namenodes:\n for unknown_namenode in unknown_namenodes:\n Logger.info(\"NameNode HA state for {0} is unknown\".format(unknown_namenode[0]))\n\n if params.namenode_id == active_namenode_id and params.other_namenode_id == standby_namenode_id:\n # Failover if this NameNode is active and other NameNode is up and in standby (i.e. ready to become active on failover)\n Logger.info(format(\"NameNode {namenode_id} is active and NameNode {other_namenode_id} is in standby\"))\n\n failover_command = format(\"hdfs haadmin -ns {dfs_ha_nameservices} -failover {namenode_id} {other_namenode_id}\")\n check_standby_cmd = format(\"hdfs haadmin -ns {dfs_ha_nameservices} -getServiceState {namenode_id} | grep standby\")\n\n msg = \"Rolling Upgrade - Initiating a ZKFC failover on active NameNode host {0}.\".format(params.hostname)\n Logger.info(msg)\n code, out = shell.call(failover_command, user=params.hdfs_user, logoutput=True)\n Logger.info(format(\"Rolling Upgrade - failover command returned {code}\"))\n wait_for_standby = False\n\n if code == 0:\n wait_for_standby = True\n else:\n # Try to kill ZKFC manually\n was_zkfc_killed = kill_zkfc(params.hdfs_user)\n code, out = shell.call(check_standby_cmd, user=params.hdfs_user, logoutput=True)\n Logger.info(format(\"Rolling Upgrade - check for standby returned {code}\"))\n if code == 255 and out:\n Logger.info(\"Rolling Upgrade - NameNode is already down.\")\n else:\n if was_zkfc_killed:\n # Only mandate that this be the standby namenode if ZKFC was indeed killed to initiate a failover.\n wait_for_standby = True\n\n if wait_for_standby:\n Logger.info(\"Waiting for this NameNode to become the standby one.\")\n Execute(check_standby_cmd,\n user=params.hdfs_user,\n tries=50,\n try_sleep=6,\n logoutput=True)\n else:\n msg = \"Rolling Upgrade - Skipping ZKFC failover on NameNode host {0}.\".format(params.hostname)\n Logger.info(msg)", "title": "" }, { "docid": "b3da0527285475a171ffb9349fcf6c39", "score": "0.4822968", "text": "def compute_master_secret(self, pre_master_secret,\n client_random, server_random):\n seed = client_random + server_random\n if self.tls_version < 0x0300:\n return None\n elif self.tls_version == 0x0300:\n return self.prf(pre_master_secret, seed, 48)\n else:\n return self.prf(pre_master_secret, b\"master secret\", seed, 48)", "title": "" }, { "docid": "2c28ed14794d57635b22fade52f4d5ce", "score": "0.48172528", "text": "def _boot_config(self):\n raise NotImplementedError", "title": "" }, { "docid": "d45d49b4cff87fa391c92524ef562908", "score": "0.4815227", "text": "def _set_master_details(self, master_obj, request_obj):\n wheel_data = dict(fun=\"config.values\", client=\"wheel\")\n master_token = MasterToken.objects.get(master=master_obj).token\n\n try:\n headers = {\n 'X-Auth-Token': master_token\n }\n\n resp = master_obj.api_post(wheel_data, headers)\n except Exception as error:\n logger.critical(\"Error processing request %s. Please check salt-api logs.\" % error)\n return False, dict()\n\n if resp.status == 200:\n return True, json.loads(resp.read().decode('utf-8'))['return'][0]['data']['return']\n else:\n return False, {}", "title": "" }, { "docid": "62e49f2fc5b527d11a93f7d3a21d4124", "score": "0.47999585", "text": "def systemctl_master(command='restart'):\n run_command_on_master('sudo systemctl {} dcos-mesos-master'.format(command))", "title": "" }, { "docid": "cadd3d1fb7011f437557eabad68fb920", "score": "0.47824016", "text": "def test_bootstrap(self):\n\n execute(self.accept_devices, hosts=get_mender_clients())", "title": "" }, { "docid": "0c0c43e6556e4e5462be59cd5b0be284", "score": "0.4778572", "text": "def apply_master_config(overrides=None, defaults=None):\n if defaults is None:\n defaults = DEFAULT_MASTER_OPTS.copy()\n if overrides is None:\n overrides = {}\n\n opts = defaults.copy()\n opts[\"__role\"] = \"master\"\n _adjust_log_file_override(overrides, defaults[\"log_file\"])\n if overrides:\n opts.update(overrides)\n # `keep_acl_in_token` will be forced to True when using external authentication\n # for REST API (`rest` is present under `external_auth`). This is because the REST API\n # does not store the password, and can therefore not retroactively fetch the ACL, so\n # the ACL must be stored in the token.\n if \"rest\" in opts.get(\"external_auth\", {}):\n # Check current value and print out warning\n if opts[\"keep_acl_in_token\"] is False:\n log.warning(\n \"The 'rest' external_auth backend requires 'keep_acl_in_token' to be True. \"\n \"Setting 'keep_acl_in_token' to True.\"\n )\n opts[\"keep_acl_in_token\"] = True\n\n opts[\"__cli\"] = salt.utils.stringutils.to_unicode(os.path.basename(sys.argv[0]))\n\n if \"environment\" in opts:\n if opts[\"saltenv\"] is not None:\n log.warning(\n \"The 'saltenv' and 'environment' master config options \"\n \"cannot both be used. Ignoring 'environment' in favor of \"\n \"'saltenv'.\"\n )\n # Set environment to saltenv in case someone's custom runner is\n # refrencing __opts__['environment']\n opts[\"environment\"] = opts[\"saltenv\"]\n else:\n log.warning(\n \"The 'environment' master config option has been renamed \"\n \"to 'saltenv'. Using %s as the 'saltenv' config value.\",\n opts[\"environment\"],\n )\n opts[\"saltenv\"] = opts[\"environment\"]\n\n for idx, val in enumerate(opts[\"fileserver_backend\"]):\n if val in (\"git\", \"hg\", \"svn\", \"minion\"):\n new_val = val + \"fs\"\n log.debug(\n \"Changed %s to %s in master opts' fileserver_backend list\", val, new_val\n )\n opts[\"fileserver_backend\"][idx] = new_val\n\n if len(opts[\"sock_dir\"]) > len(opts[\"cachedir\"]) + 10:\n opts[\"sock_dir\"] = os.path.join(opts[\"cachedir\"], \".salt-unix\")\n\n opts[\"token_dir\"] = os.path.join(opts[\"cachedir\"], \"tokens\")\n opts[\"syndic_dir\"] = os.path.join(opts[\"cachedir\"], \"syndics\")\n # Make sure ext_mods gets set if it is an untrue value\n # (here to catch older bad configs)\n opts[\"extension_modules\"] = opts.get(\"extension_modules\") or os.path.join(\n opts[\"cachedir\"], \"extmods\"\n )\n # Set up the utils_dirs location from the extension_modules location\n opts[\"utils_dirs\"] = opts.get(\"utils_dirs\") or [\n os.path.join(opts[\"extension_modules\"], \"utils\")\n ]\n\n # Insert all 'utils_dirs' directories to the system path\n insert_system_path(opts, opts[\"utils_dirs\"])\n\n if overrides.get(\"ipc_write_buffer\", \"\") == \"dynamic\":\n opts[\"ipc_write_buffer\"] = _DFLT_IPC_WBUFFER\n if \"ipc_write_buffer\" not in overrides:\n opts[\"ipc_write_buffer\"] = 0\n using_ip_for_id = False\n append_master = False\n if not opts.get(\"id\"):\n opts[\"id\"], using_ip_for_id = get_id(opts, cache_minion_id=None)\n append_master = True\n\n # it does not make sense to append a domain to an IP based id\n if not using_ip_for_id and \"append_domain\" in opts:\n opts[\"id\"] = _append_domain(opts)\n if append_master:\n opts[\"id\"] += \"_master\"\n\n # Prepend root_dir to other paths\n prepend_root_dirs = [\n \"pki_dir\",\n \"cachedir\",\n \"pidfile\",\n \"sock_dir\",\n \"extension_modules\",\n \"autosign_file\",\n \"autoreject_file\",\n \"token_dir\",\n \"syndic_dir\",\n \"sqlite_queue_dir\",\n \"autosign_grains_dir\",\n ]\n\n # These can be set to syslog, so, not actual paths on the system\n for config_key in (\"log_file\", \"key_logfile\", \"ssh_log_file\"):\n log_setting = opts.get(config_key, \"\")\n if log_setting is None:\n continue\n\n if urllib.parse.urlparse(log_setting).scheme == \"\":\n prepend_root_dirs.append(config_key)\n\n prepend_root_dir(opts, prepend_root_dirs)\n\n # Enabling open mode requires that the value be set to True, and\n # nothing else!\n opts[\"open_mode\"] = opts[\"open_mode\"] is True\n opts[\"auto_accept\"] = opts[\"auto_accept\"] is True\n opts[\"file_roots\"] = _validate_file_roots(opts[\"file_roots\"])\n opts[\"pillar_roots\"] = _validate_file_roots(opts[\"pillar_roots\"])\n\n if opts[\"file_ignore_regex\"]:\n # If file_ignore_regex was given, make sure it's wrapped in a list.\n # Only keep valid regex entries for improved performance later on.\n if isinstance(opts[\"file_ignore_regex\"], str):\n ignore_regex = [opts[\"file_ignore_regex\"]]\n elif isinstance(opts[\"file_ignore_regex\"], list):\n ignore_regex = opts[\"file_ignore_regex\"]\n\n opts[\"file_ignore_regex\"] = []\n for regex in ignore_regex:\n try:\n # Can't store compiled regex itself in opts (breaks\n # serialization)\n re.compile(regex)\n opts[\"file_ignore_regex\"].append(regex)\n except Exception: # pylint: disable=broad-except\n log.warning(\"Unable to parse file_ignore_regex. Skipping: %s\", regex)\n\n if opts[\"file_ignore_glob\"]:\n # If file_ignore_glob was given, make sure it's wrapped in a list.\n if isinstance(opts[\"file_ignore_glob\"], str):\n opts[\"file_ignore_glob\"] = [opts[\"file_ignore_glob\"]]\n\n # Let's make sure `worker_threads` does not drop below 3 which has proven\n # to make `salt.modules.publish` not work under the test-suite.\n if opts[\"worker_threads\"] < 3 and opts.get(\"peer\", None):\n log.warning(\n \"The 'worker_threads' setting in '%s' cannot be lower than \"\n \"3. Resetting it to the default value of 3.\",\n opts[\"conf_file\"],\n )\n opts[\"worker_threads\"] = 3\n\n opts.setdefault(\"pillar_source_merging_strategy\", \"smart\")\n\n # Make sure hash_type is lowercase\n opts[\"hash_type\"] = opts[\"hash_type\"].lower()\n\n # Check and update TLS/SSL configuration\n _update_ssl_config(opts)\n _update_discovery_config(opts)\n\n return opts", "title": "" }, { "docid": "06110da38770ec7d82a4955716c0e4b3", "score": "0.47764638", "text": "def _vote(self):\n try:\n response = self._etcd_client.read(self._key,\n timeout=self._interval)\n index = response.etcd_index\n except etcd.EtcdKeyNotFound:\n LOG.debug(\"Try to become the master - key not found\")\n self._become_master()\n assert False, \"_become_master() should not return.\"\n except etcd.EtcdException as e:\n # Something bad and unexpected. Log and reconnect.\n self._log_exception(\"read current master\", e)\n return\n\n LOG.debug(\"ID of elected master is : %s\", response.value)\n if response.value:\n # If we happen to be on the same server, check if the master\n # process is still alive.\n self._check_master_process(response.value)\n\n while not self._stopped:\n # We know another instance is the master. Wait until something\n # changes, giving long enough that it really should do (i.e. we\n # expect this read always to return, never to time out).\n try:\n response = self._etcd_client.read(self._key,\n wait=True,\n waitIndex=index + 1,\n timeout=Timeout(\n connect=self._interval,\n read=self._ttl * 2))\n\n index = response.etcd_index\n except etcd.EtcdKeyNotFound:\n # It should be impossible for somebody to delete the object\n # without us getting the delete action, but safer to handle it.\n LOG.warning(\"Implausible vanished key - become master\")\n self._become_master()\n except etcd.EtcdEventIndexCleared:\n # etcd only keeps a buffer of 1000 events. If that buffer wraps\n # before the master refreshes, we get EtcdEventIndexCleared.\n # Simply return, which will retry the read and get the new\n # etcd index.\n LOG.info(\"etcd index cleared; aborting poll to re-read key.\")\n return\n except etcd.EtcdException as e:\n # Something bad and unexpected. Log and reconnect.\n self._log_exception(\"wait for master change\", e)\n return\n LOG.debug(\"Election key action: %s; new value %s\",\n response.action, response.value)\n if (response.action in ETCD_DELETE_ACTIONS or\n response.value is None):\n # Deleted - try and become the master.\n LOG.info(\"Leader etcd key went away, attempting to become \"\n \"the elected master\")\n self._become_master()", "title": "" }, { "docid": "e166e88870e49c01404399154d4098bf", "score": "0.47735924", "text": "def bootstrap(self):\n p(\"Bootstrap\",self.ip)\n\n scp = SCPClient(self.ssh.get_transport())\n scp.put(\"microspark-aws-credentials\",\"/home/ubuntu/.aws/credentials\")\n scp.close()\n\n scp = SCPClient(self.ssh.get_transport())\n scp.put(\"microspark-aws-credentials\",\"/home/ubuntu/.boto\")\n scp.close()\n\n\n scp = SCPClient(self.ssh.get_transport())\n scp.put(\"Bootstrap.py\",\"/home/ubuntu/microspark/spark/Bootstrap.py\")\n scp.close()\n cmd = \"cd microspark/spark; python ./Bootstrap.py \"+FILES_BUCKET\n self.exec_ssh_command(cmd)", "title": "" }, { "docid": "1f651feb8a3a39f48988150fc4f4f471", "score": "0.47592643", "text": "def bootstrap(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"bootstrap\")", "title": "" }, { "docid": "1f651feb8a3a39f48988150fc4f4f471", "score": "0.47592643", "text": "def bootstrap(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"bootstrap\")", "title": "" }, { "docid": "aa99da18517bd88b8d9a036b029e169c", "score": "0.4739614", "text": "def do_bootstrap(self, args=None):\n\n args = args or []\n\n if not self.validated:\n print(self.colorize(\n \"You must run 'initial_setup' before you can bootstrap\",\n \"red\"))\n return\n\n if 'profile' not in self.config:\n print(self.colorize(\"You must have a default profile in order to run bootstrap. \"\n \"Run 'initial_setup'\", \"red\"))\n return\n\n print(\"Bootstrapping your account\")\n\n custom_bootstrap = raw_input(\"Do you have a custom bootstrap yaml file (y/n)? \")\n\n if custom_bootstrap in ['Y', 'y']:\n custom_bootstrap_file = raw_input(\"Enter the name of the file: \")\n\n # Load the bootstrap file\n if os.path.exists(custom_bootstrap_file):\n self.BOOTSTRAP_FILE = custom_bootstrap_file\n elif os.path.exists(os.path.expanduser(custom_bootstrap_file)):\n self.BOOTSTRAP_FILE = os.path.expanduser(custom_bootstrap_file)\n else:\n print(\"Unable to find the file.\")\n use_default = raw_input(\"Would you like to use the default bootstrap file instead (y/n)? \")\n if use_default not in ['Y', 'y']:\n print(\"Aborting bootstrap\")\n return\n\n # Load the bootstrap data. If the BOOTSTRAP_DATA property was not set just now, it will use the default\n self.bootstrap_data = yaml.safe_load(open(self.BOOTSTRAP_FILE).read())\n\n self._bootstrap_account()\n self._bootstrap_formulas()\n self._bootstrap_blueprints()", "title": "" }, { "docid": "96da3081d4a662a0f15c4d63b51b8b7a", "score": "0.47314703", "text": "def boot_linode(api_key, linode_id, config_id=None, block=True):\n kwargs = {\n 'LinodeID': linode_id\n }\n\n if config_id:\n kwargs['ConfigID'] = config_id\n\n response = base.make_single_call(\n api_key,\n 'linode.boot',\n **kwargs\n )\n\n boot_job = job.get(api_key, linode_id, response['JobID'])\n\n if block:\n boot_job.wait()\n\n return boot_job", "title": "" }, { "docid": "4aeeeef1941ca783310b1b259d490e70", "score": "0.46841174", "text": "def handle_api_restart_master(self, http_context):\n\n self.context.worker.restart_master()", "title": "" }, { "docid": "474e08298c7ce0acf0a0328a7e424fe4", "score": "0.4664416", "text": "def setup_master_storage(mode):\n host_string = env.host_string\n if host_string == env.roledefs['storage-master'][0]:\n cmd = create_storage_setup_cmd(mode)\n with settings(host_string = storage_master, password = storage_master_password):\n with cd(INSTALLER_DIR):\n print cmd\n sudo(cmd)", "title": "" }, { "docid": "474e08298c7ce0acf0a0328a7e424fe4", "score": "0.4664416", "text": "def setup_master_storage(mode):\n host_string = env.host_string\n if host_string == env.roledefs['storage-master'][0]:\n cmd = create_storage_setup_cmd(mode)\n with settings(host_string = storage_master, password = storage_master_password):\n with cd(INSTALLER_DIR):\n print cmd\n sudo(cmd)", "title": "" }, { "docid": "661d24b4378e6fd7df3bd2b6c4252ce8", "score": "0.46332288", "text": "def confirm_remote_startup(self, kernel_cmd, **kw):\n self.start_time = RemoteProcessProxy.get_current_time()\n i = 0\n ready_to_connect = False # we're ready to connect when we have a connection file to use\n while not ready_to_connect:\n i += 1\n self.handle_timeout()\n\n if self.get_application_id(True):\n # Once we have an application ID, start monitoring state, obtain assigned host and get connection info\n app_state = self.get_application_state()\n\n if app_state in ConductorClusterProcessProxy.final_states:\n raise tornado.web.HTTPError(500, \"KernelID: '{}', ApplicationID: '{}' unexpectedly found in\"\n \"state '{}' during kernel startup!\".format(self.kernel_id,\n self.application_id,\n app_state))\n\n self.log.debug(\"{}: State: '{}', Host: '{}', KernelID: '{}', ApplicationID: '{}'\".\n format(i, app_state, self.assigned_host, self.kernel_id, self.application_id))\n\n if self.assigned_host != '':\n ready_to_connect = self.receive_connection_info()", "title": "" }, { "docid": "172011d33e06e63be49dde7869de565c", "score": "0.46320435", "text": "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n bootstrap_brokers: Optional[pulumi.Input[str]] = None,\n bootstrap_brokers_public_sasl_iam: Optional[pulumi.Input[str]] = None,\n bootstrap_brokers_public_sasl_scram: Optional[pulumi.Input[str]] = None,\n bootstrap_brokers_public_tls: Optional[pulumi.Input[str]] = None,\n bootstrap_brokers_sasl_iam: Optional[pulumi.Input[str]] = None,\n bootstrap_brokers_sasl_scram: Optional[pulumi.Input[str]] = None,\n bootstrap_brokers_tls: Optional[pulumi.Input[str]] = None,\n broker_node_group_info: Optional[pulumi.Input[pulumi.InputType['ClusterBrokerNodeGroupInfoArgs']]] = None,\n client_authentication: Optional[pulumi.Input[pulumi.InputType['ClusterClientAuthenticationArgs']]] = None,\n cluster_name: Optional[pulumi.Input[str]] = None,\n configuration_info: Optional[pulumi.Input[pulumi.InputType['ClusterConfigurationInfoArgs']]] = None,\n current_version: Optional[pulumi.Input[str]] = None,\n encryption_info: Optional[pulumi.Input[pulumi.InputType['ClusterEncryptionInfoArgs']]] = None,\n enhanced_monitoring: Optional[pulumi.Input[str]] = None,\n kafka_version: Optional[pulumi.Input[str]] = None,\n logging_info: Optional[pulumi.Input[pulumi.InputType['ClusterLoggingInfoArgs']]] = None,\n number_of_broker_nodes: Optional[pulumi.Input[int]] = None,\n open_monitoring: Optional[pulumi.Input[pulumi.InputType['ClusterOpenMonitoringArgs']]] = None,\n storage_mode: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n zookeeper_connect_string: Optional[pulumi.Input[str]] = None,\n zookeeper_connect_string_tls: Optional[pulumi.Input[str]] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ClusterState.__new__(_ClusterState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"bootstrap_brokers\"] = bootstrap_brokers\n __props__.__dict__[\"bootstrap_brokers_public_sasl_iam\"] = bootstrap_brokers_public_sasl_iam\n __props__.__dict__[\"bootstrap_brokers_public_sasl_scram\"] = bootstrap_brokers_public_sasl_scram\n __props__.__dict__[\"bootstrap_brokers_public_tls\"] = bootstrap_brokers_public_tls\n __props__.__dict__[\"bootstrap_brokers_sasl_iam\"] = bootstrap_brokers_sasl_iam\n __props__.__dict__[\"bootstrap_brokers_sasl_scram\"] = bootstrap_brokers_sasl_scram\n __props__.__dict__[\"bootstrap_brokers_tls\"] = bootstrap_brokers_tls\n __props__.__dict__[\"broker_node_group_info\"] = broker_node_group_info\n __props__.__dict__[\"client_authentication\"] = client_authentication\n __props__.__dict__[\"cluster_name\"] = cluster_name\n __props__.__dict__[\"configuration_info\"] = configuration_info\n __props__.__dict__[\"current_version\"] = current_version\n __props__.__dict__[\"encryption_info\"] = encryption_info\n __props__.__dict__[\"enhanced_monitoring\"] = enhanced_monitoring\n __props__.__dict__[\"kafka_version\"] = kafka_version\n __props__.__dict__[\"logging_info\"] = logging_info\n __props__.__dict__[\"number_of_broker_nodes\"] = number_of_broker_nodes\n __props__.__dict__[\"open_monitoring\"] = open_monitoring\n __props__.__dict__[\"storage_mode\"] = storage_mode\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"zookeeper_connect_string\"] = zookeeper_connect_string\n __props__.__dict__[\"zookeeper_connect_string_tls\"] = zookeeper_connect_string_tls\n return Cluster(resource_name, opts=opts, __props__=__props__)", "title": "" }, { "docid": "40aa103ac6716ecb31e76d5c5eb106c2", "score": "0.4618397", "text": "def __init__(__self__, *,\n api_server_args: Sequence['outputs.BareMetalAdminApiServerArgumentResponse'],\n control_plane_node_pool_config: 'outputs.BareMetalAdminControlPlaneNodePoolConfigResponse'):\n pulumi.set(__self__, \"api_server_args\", api_server_args)\n pulumi.set(__self__, \"control_plane_node_pool_config\", control_plane_node_pool_config)", "title": "" }, { "docid": "f960a7fc66768ca7128f476cfe7c4c4c", "score": "0.4600296", "text": "def test_claim_leadership(self, blank_state):\n ds = blank_state\n\n first_leader = uuid.uuid4()\n second_leader = uuid.uuid4()\n\n print(\"Claiming leadership for %s\" % str(first_leader.bytes))\n crown = ds.claim_leadership(first_leader)\n\n assert crown\n\n print(\"Claiming leadership for %s\" % str(second_leader.bytes))\n crown = ds.claim_leadership(second_leader)\n\n assert crown is False\n\n time.sleep(20)\n\n print(\"Claiming leadership for %s after 20s\" %\n str(second_leader.bytes))\n crown = ds.claim_leadership(second_leader)\n\n assert crown", "title": "" }, { "docid": "746bd488cce0e24e3dc3b535c49c14f3", "score": "0.4599366", "text": "def test_com_adobe_octopus_ncomm_bootstrap(self):\n query_string = [('post', True),\n ('apply', True),\n ('delete', True),\n ('action', 'action_example'),\n ('location', 'location_example'),\n ('propertylist', 'propertylist_example'),\n ('max_connections', 56),\n ('max_requests', 56),\n ('request_timeout', 56),\n ('request_retries', 56),\n ('launch_timeout', 56)]\n response = self.client.open(\n '//system/console/configMgr/com.adobe.octopus.ncomm.bootstrap',\n method='POST',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "d7e0b5963197c4ac21dc525f2851aa67", "score": "0.459004", "text": "def master(self):\n if not self._session:\n print('[ERROR]: Please log into NS')\n return False\n try:\n ha = hanode_stats.get(self._session)\n self._state = ha[0]._hacurmasterstate\n if self._state == 'Primary':\n return True\n else:\n return False\n except nitro_exception as e:\n print(\"[ERROR]: HA Status, ErrorCode=\" + str(e.errorcode) + \", Message=\" + e.message)\n return False\n except Exception as e:\n print(\"[ERROR]: HA Status, \" + str(e.args))\n return False", "title": "" }, { "docid": "b2f9ea0a323a081f43ee0bb5cde7e27f", "score": "0.45872954", "text": "def azs_loader(mock_creds):\n mock_creds.return_value = None\n azs = AzureSentinel()\n azs.connect()\n azs.token = \"123\"\n return azs", "title": "" }, { "docid": "d4680bf56cda666b2403194f6ec8c182", "score": "0.45818824", "text": "def run_initial_setup(ip_addr, cid, ctrl_version):\n response_json = get_initial_setup_status(ip_addr, cid)\n if response_json.get('return') is True:\n print(\"Initial setup is already done. Skipping\")\n return True\n post_data = {\"target_version\": ctrl_version,\n \"action\": \"initial_setup\",\n \"subaction\": \"run\"}\n print(\"Trying to run initial setup %s\\n\" % str(post_data))\n post_data[\"CID\"] = cid\n base_url = \"https://\" + ip_addr + \"/v1/api\"\n try:\n response = requests.post(base_url, data=post_data, verify=False)\n except requests.exceptions.ConnectionError as err:\n if \"Remote end closed connection without response\" in str(err):\n print(\"Server closed the connection while executing initial setup API.\"\n \" Ignoring response\")\n response_json = {'return': True, 'reason': 'Warning!! Server closed the connection'}\n else:\n raise AvxError(\"Failed to execute initial setup: \" + str(err)) from err\n else:\n response_json = response.json()\n # Controllers running 6.4 and above would be unresponsive after initial_setup\n print(response_json)\n time.sleep(INITIAL_SETUP_API_WAIT)\n if response_json.get('return') is True:\n print(\"Successfully initialized the controller\")\n else:\n raise AvxError(\"Could not bring up the new controller to the \"\n \"specific version\")\n return False", "title": "" }, { "docid": "b18eebaaf26b0c0e2ba62ec3cebea4a3", "score": "0.4557579", "text": "def __init__(self, scope: cdk.Construct, id: str, vpc: ec2.Vpc, cluster: ecs.Cluster, **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n\n # Create a load balancer so master node can be hit via a public endpoint\n lb = load_balancer.NetworkLoadBalancer(\n self, 'PyGridLoadBalancer',\n vpc=vpc,\n internet_facing=True,\n cross_zone_enabled=True\n )\n\n master_node_url = lb.load_balancer_dns_name + ':5001'\n \n # Pass in required environmental variables, define CPU/ memory resources, etc.\n # Note that by setting the containerImage to mkenney1/artificien_orchestration:latest,\n # we tell AWS to run the code contained in the contianer (published on DockerHub with this name).\n self.service = ecs_patterns.NetworkLoadBalancedFargateService(\n self,\n 'MasterNodeService',\n\n # Resources\n cluster=cluster,\n cpu=512,\n memory_limit_mib=2048,\n desired_count=1,\n\n # Load balancer config\n public_load_balancer=True,\n listener_port=5001,\n\n # Task image options\n task_image_options=ecs_patterns.NetworkLoadBalancedTaskImageOptions(\n container_name='artificien_master_node',\n container_port=5001,\n image=ecs.ContainerImage.from_registry('mkenney1/artificien_orchestration:latest'),\n environment={\n 'MASTER_NODE_URL': lb.load_balancer_dns_name + ':5001',\n 'LOCALTEST': 'False'\n },\n enable_logging=True,\n log_driver=ecs.AwsLogDriver(\n stream_prefix='MasterNode',\n log_group=logs.LogGroup(\n self, 'MasterNodeLogGroup',\n removal_policy=cdk.RemovalPolicy.DESTROY,\n retention=logs.RetentionDays.ONE_MONTH\n )\n )\n ),\n load_balancer=lb\n )\n # Add requisite IAM roles to deploy all of these resources\n add_policies(self.service.service.task_definition.task_role)\n\n # Allow ingress\n all_ports = ec2.Port(\n protocol=ec2.Protocol.TCP,\n from_port=0,\n to_port=65535,\n string_representation='All'\n )\n self.service.service.connections.allow_from_any_ipv4(all_ports)\n\n # Add a Health Check to allow us to monitor the service\n self.service.target_group.configure_health_check(\n port='traffic-port',\n protocol=load_balancer.Protocol.TCP\n )\n\n # Get domain name of load balancer and output it to the console\n cdk.CfnOutput(self, 'MasterNodeLoadBalancerDNS', value=master_node_url)", "title": "" }, { "docid": "f8fad08760f71ca4d8a297f1cd428b12", "score": "0.4549753", "text": "def status(client, logger):\n status = client.cluster.status()\n if not status.initialized:\n logger.error('This manager is not part of a Cloudify Manager cluster')\n else:\n logger.info('Cloudify Manager cluster initialized!\\n'\n 'Encryption key: {0}'.format(status.encryption_key))", "title": "" }, { "docid": "d76f74556cef824d0af84c0ce94b4755", "score": "0.45432502", "text": "def test_acl_create():\n # Given\n test_acl_configuration = acl_defaut_configuration.copy()\n test_acl_configuration.update({\n 'state': 'absent'\n })\n test_acl_configuration.update(sasl_default_configuration)\n ensure_kafka_acl(\n localhost,\n test_acl_configuration\n )\n time.sleep(0.5)\n # When\n test_acl_configuration.update({\n 'state': 'present'\n })\n ensure_idempotency(\n ensure_kafka_acl,\n localhost,\n test_acl_configuration\n )\n time.sleep(0.5)\n # Then\n for host, host_vars in kafka_hosts.items():\n kfk_addr = \"%s:9094\" % \\\n host_vars['ansible_eth0']['ipv4']['address']['__ansible_unsafe']\n check_configured_acl(host, test_acl_configuration, kfk_addr)", "title": "" }, { "docid": "0f581b97607f47ab65368a0af9f820b8", "score": "0.4542812", "text": "def bootstrap():\n system()\n db()\n services()\n http()\n if config.ssl:\n letsencrypt()\n # Now put the https ready Nginx conf.\n http()\n ssh_keys()", "title": "" }, { "docid": "960e3c36dc439ed80e8b2c02072d147a", "score": "0.45310438", "text": "def bootstrap(self, app_config: Dict, path: str, is_console: bool) -> None:\n\n # Silently do not bootstrap multiple times\n if self.booted: return\n\n # App name and path\n self._path = path\n self._name = app_config.name\n self._main = app_config.main\n self.add_running_config('name', self.name)\n self.add_running_config('main', self.main)\n self.add_running_config('path', self.path)\n self.add_running_config('version', '0.0.0')\n self.add_running_config('uvicore.version', self.version)\n\n # Merge running config/app.py paths dictionary with defaults and full path\n self._build_paths(app_config)\n\n # Detect if running in console (to register commands)\n # Ensure console is False even when running ./uvicore http serve\n self._is_console = is_console\n if command_is('http serve'): self._is_console = False\n self._is_http = not self.is_console\n\n # Detect debug flag from main app config\n self._debug = app_config.debug\n\n # Build recursive providers graph\n self._build_provider_graph(app_config)\n\n\n # Failsafe if no http package, force console\n # This solves a ./uvicore http serve error if you don't have the http package\n #if 'uvicore.web' not in self.providers or 'uvicore.api' not in self.providers:\n if 'uvicore.http' not in self.providers:\n self._is_console = True\n self._is_http = False\n\n # Register and merge all providers\n self._register_providers(app_config)\n self.add_running_config('providers', self.providers)\n\n #dump(self.packages)\n #dd('REGISTERED')\n\n # Boot all providers\n #self._boot_providers()\n self._boot_providers(app_config)\n\n # Add each final package to running_config, but only the basic details\n # for package in self.packages.values():\n # if package.main:\n # self.add_running_config('version', package.version)\n # self.add_running_config('packages.[' + package.name + ']', {\n # 'name': package.name,\n # 'short_name': package.short_name,\n # 'vendor': package.vendor,\n # 'version': package.version,\n # 'main': package.main,\n # 'path': package.path,\n # 'registers': [k for (k,v) in package.registers.items() if v == True],\n # })\n\n #dd(self.packages)\n\n # Return application\n return self", "title": "" }, { "docid": "c3b697a4c4e19dfd1baad695cc25f3d9", "score": "0.45195535", "text": "def bootstrap():\n require('hosts')\n _warn('''\n This is a potientially dangerous operation. Make sure you have\\r\\n\n all your ducks in a row, and that you have checked the configuration\\r\\n\n files both in conf/ and in the fabfile.py itself!\n ''')\n _confirmtask()\n createuser()\n deploy()\n mkvirtualenv()\n upload_virtualenv_vendors()\n upload_secrets()\n installreqs()\n createdb()\n supervisorcfg()\n nginxcfg()\n logrotatecfg()\n syncdb()\n migrate()\n collectstatic()\n gitpull()\n flushdb()\n loaddata()\n restart()\n _success()", "title": "" }, { "docid": "6d29cfffc3f293e0621cafc3ac52934c", "score": "0.4513073", "text": "def test_system_clusters(mz: MaterializeApplication) -> None:\n mz.wait_replicas()\n\n assert_pod_properties(mz, \"cluster-s1-replica-2-0\")\n assert_pod_properties(mz, \"cluster-s2-replica-3-0\")", "title": "" }, { "docid": "8f15d93adac46ec49833a96169b8d9ae", "score": "0.4508992", "text": "def _bootstrap_subresources(self):\n bootstrapped = []\n for resource in self.iter_bootstrappable:\n should_cleanup = True\n\n resource_name = type(resource).__name__\n logging.info(f\"Attempting bootstrap {resource_name}\")\n for _ in range(BOOTSTRAP_RETRIES):\n try:\n # Bootstrap and add to list of successes\n resource.bootstrap()\n logging.info(f\"Successfully bootstrapped {resource_name}\")\n bootstrapped.append(resource)\n should_cleanup = False\n break\n except BootstrapFailureException as ex:\n # Don't attempt to retry if we reached maximum retries beneath\n raise ex\n except Exception as ex:\n logging.error(f\"Exception while bootstrapping {resource_name}\")\n logging.exception(ex)\n # Clean up any dependencies the first attempt made\n logging.info(f\"Cleaning up dependencies created by {resource_name}\")\n resource.cleanup()\n logging.info(f\"Retrying bootstrapping {resource_name}\")\n continue\n else:\n logging.error(f\"🚫 Exceeded maximum retries ({BOOTSTRAP_RETRIES}) for bootstrapping {resource_name}\")\n \n if should_cleanup:\n # Attempt to clean up successfully bootstrapped elements\n self._cleanup_resources(bootstrapped)\n raise BootstrapFailureException(f\"Bootstrapping failed for resource type '{resource_name}'\")", "title": "" }, { "docid": "337a67c3580cfba3df74583adbf63514", "score": "0.449002", "text": "def boot_worker(self, hardware_config, os_config):\n assert False, \"Subclsses implement\"", "title": "" }, { "docid": "9e7c8fc90f1decbc9b964d6c5c96c458", "score": "0.44891372", "text": "def _setup_primary_node(config):\n # Check for runtime compatibility: some tools are required to be available in PATH\n if 'boulder' in config.option.acme_server:\n try:\n subprocess.check_output(['docker', '-v'], stderr=subprocess.STDOUT)\n except (subprocess.CalledProcessError, OSError):\n raise ValueError('Error: docker is required in PATH to launch the integration tests on'\n 'boulder, but is not installed or not available for current user.')\n\n try:\n subprocess.check_output(['docker-compose', '-v'], stderr=subprocess.STDOUT)\n except (subprocess.CalledProcessError, OSError):\n raise ValueError(\n 'Error: docker-compose is required in PATH to launch the integration tests, '\n 'but is not installed or not available for current user.'\n )\n\n # Parameter numprocesses is added to option by pytest-xdist\n workers = ['primary'] if not config.option.numprocesses\\\n else ['gw{0}'.format(i) for i in range(config.option.numprocesses)]\n\n # If a non-default DNS server is configured, start it and feed it to the ACME server\n dns_server = None\n acme_dns_server = None\n if config.option.dns_server == 'bind':\n dns_server = dns_lib.DNSServer(workers)\n config.add_cleanup(dns_server.stop)\n print('DNS xdist config:\\n{0}'.format(dns_server.dns_xdist))\n dns_server.start()\n acme_dns_server = '{}:{}'.format(\n dns_server.dns_xdist['address'],\n dns_server.dns_xdist['port']\n )\n\n # By calling setup_acme_server we ensure that all necessary acme server instances will be\n # fully started. This runtime is reflected by the acme_xdist returned.\n acme_server = acme_lib.ACMEServer(config.option.acme_server, workers,\n dns_server=acme_dns_server)\n config.add_cleanup(acme_server.stop)\n print('ACME xdist config:\\n{0}'.format(acme_server.acme_xdist))\n acme_server.start()\n\n config.acme_xdist = acme_server.acme_xdist\n config.dns_xdist = dns_server.dns_xdist if dns_server else None", "title": "" }, { "docid": "b712873e0ae0c392a9ae60801282be09", "score": "0.44883946", "text": "def init_node(name, start_master=False):\n import roslaunch\n import rospy\n\n class ROSMasterStub:\n @staticmethod\n def shutdown():\n warnings.warn(\n \"ROS master was started somewhere else and cannot be shut \"\n \"down.\"\n )\n\n try:\n rospy.get_master().getPid()\n except ConnectionRefusedError:\n if start_master:\n master = roslaunch.parent.ROSLaunchParent(\n \"master\", [], is_core=True\n )\n master.start()\n # make sure master is shut down on exit\n atexit.register(master.shutdown)\n else:\n raise RuntimeError(\"ROS master is not running.\")\n else:\n master = ROSMasterStub()\n\n rospy.init_node(name)\n\n return master", "title": "" }, { "docid": "2b101f440c636a5bf5b656fb4c569ac3", "score": "0.44853076", "text": "def _bootstraps(self, ctx):\n return [\n (\"Divmod.bootstrap\",\n [flat.flatten(self.transportRoot, ctx).decode(\"ascii\")]),\n (\"Nevow.Athena.bootstrap\",\n [self.jsClass, self.clientID.decode('ascii')])]", "title": "" }, { "docid": "f6cc6b5ce269db11a4c8a0626dc214a3", "score": "0.44793403", "text": "def boot(self) -> Optional[bool]:\n return pulumi.get(self, \"boot\")", "title": "" }, { "docid": "3581475ba65558e85e2291210ce6d5b5", "score": "0.4468921", "text": "def start_iscsi_initiators(ctx, tgt_link):\n remotes = ctx.cluster.only(teuthology.is_type('client')).remotes\n tgtd_list = []\n for role, host in tgt_link:\n rem = _get_remote(remotes, role)\n rem_name = _get_remote_name(remotes, host)\n rem.run(\n args=[\n 'sudo',\n 'iscsiadm',\n '-m',\n 'discovery',\n '-t',\n 'st',\n '-p',\n rem_name,\n ])\n proc = rem.run(\n args=[\n 'sudo',\n 'iscsiadm',\n '-m',\n 'node',\n '--login',\n ])\n if proc.exitstatus == 0:\n tgtd_list.append((rem, rem_name))\n general_io_test(ctx, rem, host)\n try:\n with contextutil.nested(\n lambda: generic_mkfs(ctx=ctx, config={host: {'fs_type': 'xfs'}},\n devname_rtn=tgt_devname_rtn),\n lambda: generic_mount(ctx=ctx, config={host: None},\n devname_rtn=tgt_devname_rtn),\n ):\n yield\n finally:\n for rem_info in tgtd_list:\n rem = rem_info[0]\n rem_name = rem_info[1]\n rem.run(\n args=[\n 'sudo',\n 'iscsiadm',\n '-m',\n 'node',\n '--logout',\n ])", "title": "" }, { "docid": "e533e99ace0ddc26049b2b7c6692bffe", "score": "0.44622546", "text": "def is_master():\n worker_config_path = '/etc/lava-server/worker.conf'\n if \"VIRTUAL_ENV\" in os.environ:\n worker_config_path = os.path.join(os.environ[\"VIRTUAL_ENV\"],\n worker_config_path[1:])\n\n return not os.path.exists(worker_config_path)", "title": "" }, { "docid": "919fb2d712ac172595fe9ec1638d7466", "score": "0.445036", "text": "def start(ctx, identifier, startupdata):\n cred = ctx.obj.credentials\n url, username, cookies = cred.url, cred.username, cred.cookies\n\n request_url = urljoin(url,\n \"/user/{}/api/v1/containers/\".format(username))\n\n payload_dict = dict(mapping_id=identifier)\n if startupdata is not None:\n # First make sure that the allow_startup_data policy is True\n check_url = urljoin(url,\n \"/user/{}/api/v1/applications/\".format(username))\n response = requests.get(check_url, cookies=cookies, verify=False)\n apps_data = json.loads(response.content.decode(\"utf-8\"))\n app_data = apps_data[\"items\"][identifier]\n allow_startup_data = app_data[\"image\"][\"policy\"][\"allow_startup_data\"]\n if allow_startup_data:\n payload_dict.update(\n configurables=dict(startupdata=dict(startupdata=startupdata)))\n else:\n raise click.ClickException(\n \"The 'allow_startup_data' policy is False for the current \"\n \"user.\\nExiting.\")\n\n payload = json.dumps(payload_dict)\n\n response = requests.post(request_url, payload, cookies=cookies,\n verify=False)\n if response.status_code == 201:\n location = response.headers[\"Location\"]\n parsed = urlsplit(location)\n path, port = parsed.path.split(\"_\")\n port = port.rstrip(\"/\")\n path = \"\".join(path.split(\"/api/v1\"))\n location = f\"{parsed.scheme}://{parsed.hostname}:{port}{path}\"\n print(location)", "title": "" }, { "docid": "57a0272c1707ac02b5c1a48a59e17856", "score": "0.44491786", "text": "def bootstrap(self) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "6b7b3a130b829dc3c01302e06df2bd58", "score": "0.44437012", "text": "def bootstrap(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "04643c46d3b4ffc95a1d97f31e4ae2df", "score": "0.44418922", "text": "def test_master_get(self):\n # Copy dummy certs from /certs to /code\n # Trying to load directly from /certs\n # raise a django.core.exceptions.SuspiciousFileOperation\n certname = \"puppet-grua.example.com.pem\"\n tmp_cert = f\"/code/uploads/cert_{certname}\"\n tmp_pk = f\"/code/uploads/pk_{certname}\"\n copyfile(f\"/code/dummy_certs/cert/{certname}\", tmp_cert)\n copyfile(f\"/code/dummy_certs/private_keys/{certname}\", tmp_pk)\n cert = open(tmp_cert)\n private_key = open(tmp_pk)\n # Create the master\n master_zone = models.MasterZone.objects.create(\n label=\"Splinter\",\n address=\"http://10.10.10.10\",\n signed_cert=File(cert),\n private_key=File(private_key),\n )\n # Close the files\n cert.close()\n private_key.close()\n # Remove the copied certs\n remove(tmp_cert)\n remove(tmp_pk)\n url = f\"/api/master_zones/{master_zone.pk}/\"\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n response_json = response.json()\n self.assertIsInstance(response_json[\"signed_cert\"], str)\n self.assertIsInstance(response_json[\"private_key\"], str)\n # Should be URLs\n self.assertIn(\"http://\", response_json[\"signed_cert\"])\n self.assertIn(\"http://\", response_json[\"private_key\"])\n # Remove the uploaded certs\n remove(master_zone.signed_cert.path)\n remove(master_zone.private_key.path)", "title": "" }, { "docid": "9426f980249490c3fda98426420bc805", "score": "0.44405517", "text": "def run_master():\n # Check connection to the db\n check_connection()\n\n logging.info('Init Master')\n master_config = get_master_config()\n logging.info(master_config)\n master = Master((master_config.host, master_config.port), ClientTCPHandler)\n\n # Activate the server; this will keep running until you\n # interrupt the program with Ctrl-C\n try:\n logging.info(\"Start serving\")\n master.serve_forever()\n except KeyboardInterrupt:\n master.stop()\n sys.exit(0)", "title": "" }, { "docid": "076634c173092b014d6537cf28cf4f61", "score": "0.4428597", "text": "def test_replicaset_ismaster(self):\n\n # start mongo process on free test port\n self.run_tool(\"init --replicaset\")\n\n # wait for primary\n assert self.tool._wait_for_primary()\n\n # insert a document and wait to replicate to 2 secondaries\n # (10 sec timeout)\n mc = MongoClient('localhost:%i' % self.port)\n mc.test.smokeWait.insert_one({}, w=2, wtimeout=10 * 60 * 1000)", "title": "" }, { "docid": "fd6c0f4a39e5bdb944c5b65cb6aa5547", "score": "0.44185448", "text": "def __init__(self, apache=None, aws_cloud=None, crio=None, docker=None, docker_enterprise=None, has_package_manager=None, k8s_api_server=None, k8s_controller_manager=None, k8s_etcd=None, k8s_federation_api_server=None, k8s_federation_controller_manager=None, k8s_kubelet=None, k8s_proxy=None, k8s_scheduler=None, kubernetes=None, openshift=None, os_distro=None, serverless=None, swarm_manager=None, swarm_node=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration.get_default_copy()\n self.local_vars_configuration = local_vars_configuration\n\n self._apache = None\n self._aws_cloud = None\n self._crio = None\n self._docker = None\n self._docker_enterprise = None\n self._has_package_manager = None\n self._k8s_api_server = None\n self._k8s_controller_manager = None\n self._k8s_etcd = None\n self._k8s_federation_api_server = None\n self._k8s_federation_controller_manager = None\n self._k8s_kubelet = None\n self._k8s_proxy = None\n self._k8s_scheduler = None\n self._kubernetes = None\n self._openshift = None\n self._os_distro = None\n self._serverless = None\n self._swarm_manager = None\n self._swarm_node = None\n self.discriminator = None\n\n if apache is not None:\n self.apache = apache\n if aws_cloud is not None:\n self.aws_cloud = aws_cloud\n if crio is not None:\n self.crio = crio\n if docker is not None:\n self.docker = docker\n if docker_enterprise is not None:\n self.docker_enterprise = docker_enterprise\n if has_package_manager is not None:\n self.has_package_manager = has_package_manager\n if k8s_api_server is not None:\n self.k8s_api_server = k8s_api_server\n if k8s_controller_manager is not None:\n self.k8s_controller_manager = k8s_controller_manager\n if k8s_etcd is not None:\n self.k8s_etcd = k8s_etcd\n if k8s_federation_api_server is not None:\n self.k8s_federation_api_server = k8s_federation_api_server\n if k8s_federation_controller_manager is not None:\n self.k8s_federation_controller_manager = k8s_federation_controller_manager\n if k8s_kubelet is not None:\n self.k8s_kubelet = k8s_kubelet\n if k8s_proxy is not None:\n self.k8s_proxy = k8s_proxy\n if k8s_scheduler is not None:\n self.k8s_scheduler = k8s_scheduler\n if kubernetes is not None:\n self.kubernetes = kubernetes\n if openshift is not None:\n self.openshift = openshift\n if os_distro is not None:\n self.os_distro = os_distro\n if serverless is not None:\n self.serverless = serverless\n if swarm_manager is not None:\n self.swarm_manager = swarm_manager\n if swarm_node is not None:\n self.swarm_node = swarm_node", "title": "" }, { "docid": "ddd812a8e7b2b007472e43b93eb0c879", "score": "0.44163176", "text": "def test_bake_node_0_again(self, sandbox):\n sandbox.client(0).bake('bootstrap1', BAKE_ARGS)", "title": "" }, { "docid": "98b7c73df8ba6d9095b1f8768e8e442b", "score": "0.4410705", "text": "def start(self):\n resp = self.client._perform_json(\n \"POST\", \"/admin/clusters/%s/actions/start\" % (self.cluster_id))\n if resp is None:\n raise Exception('Cluster operation returned no data')\n if resp.get('messages', {}).get('error', False):\n raise Exception('Cluster operation failed : %s' % (json.dumps(resp.get('messages', {}).get('messages', {}))))\n return resp", "title": "" }, { "docid": "f32c64fd8d911460094e4de8815ca2be", "score": "0.4409063", "text": "def boot_manager_security_version(self):\n if \"bootManagerSecurityVersion\" in self._prop_dict:\n return self._prop_dict[\"bootManagerSecurityVersion\"]\n else:\n return None", "title": "" }, { "docid": "44bbc832b92921c54d637987e1a891b1", "score": "0.44078675", "text": "def _boot(self, resource_url, response_key, name, image, flavor,\n meta=None,\n files=None,\n userdata=None,\n return_raw=False,\n key_name=None,\n availability_zone=None,\n nics=None,\n admin_pass=None,\n disk_config=None, **kwargs):\n body = {\n \"server\": {\n \"name\": name,\n # \"imageRef\": str(base.getid(image)) if image else '',\n \"flavorRef\": str(base.getid(flavor)),\n }\n }\n image = str(base.getid(image))\n if image:\n body['server'].update({'imageRef': image})\n\n if userdata:\n if os.path.exists(userdata):\n with open(userdata, \"r\") as fuserdata:\n userdata = fuserdata.read()\n\n if six.PY3:\n userdata = userdata.encode(\"utf-8\")\n else:\n userdata = encodeutils.safe_encode(userdata)\n\n userdata_b64 = base64.b64encode(userdata).decode('utf-8')\n body[\"server\"][\"user_data\"] = userdata_b64\n if meta:\n body[\"server\"][\"metadata\"] = meta\n # if reservation_id:\n # body[\"server\"][\"reservation_id\"] = reservation_id\n if key_name:\n body[\"server\"][\"key_name\"] = key_name\n # if scheduler_hints:\n # body['os:scheduler_hints'] = scheduler_hints\n # if config_drive:\n # body[\"server\"][\"config_drive\"] = config_drive\n if admin_pass:\n body[\"server\"][\"adminPass\"] = admin_pass\n # if not min_count:\n # min_count = 1\n # if not max_count:\n # max_count = min_count\n # body[\"server\"][\"min_count\"] = min_count\n # body[\"server\"][\"max_count\"] = max_count\n\n # if security_groups:\n # body[\"server\"][\"security_groups\"] = [{'name': sg}\n # for sg in security_groups]\n\n # Files are a slight bit tricky. They're passed in a \"personality\"\n # list to the POST. Each item is a dict giving a file name and the\n # base64-encoded contents of the file. We want to allow passing\n # either an open file *or* some contents as files here.\n if files:\n personality = body['server']['personality'] = []\n for filepath, file_or_string in sorted(files.items(),\n key=lambda x: x[0]):\n if hasattr(file_or_string, 'read'):\n data = file_or_string.read()\n else:\n data = file_or_string\n\n if six.PY3 and isinstance(data, str):\n data = data.encode('utf-8')\n cont = base64.b64encode(data).decode('utf-8')\n personality.append({\n 'path': filepath,\n 'contents': cont,\n })\n\n if availability_zone:\n body[\"server\"][\"availability_zone\"] = availability_zone\n\n # Block device mappings are passed as a list of dictionaries\n # if block_device_mapping:\n # body['server']['block_device_mapping'] = \\\n # self._parse_block_device_mapping(block_device_mapping)\n # elif block_device_mapping_v2:\n # body['server']['block_device_mapping_v2'] = block_device_mapping_v2\n\n # if nics is not None:\n # # NOTE(tr3buchet): nics can be an empty list\n # all_net_data = []\n # for nic_info in nics:\n # net_data = {}\n # # if value is empty string, do not send value in body\n # if nic_info.get('net-id'):\n # net_data['uuid'] = nic_info['net-id']\n # if (nic_info.get('v4-fixed-ip') and\n # nic_info.get('v6-fixed-ip')):\n # raise base.exceptions.CommandError(_(\n # \"Only one of 'v4-fixed-ip' and 'v6-fixed-ip' may be\"\n # \" provided.\"))\n # elif nic_info.get('v4-fixed-ip'):\n # net_data['fixed_ip'] = nic_info['v4-fixed-ip']\n # elif nic_info.get('v6-fixed-ip'):\n # net_data['fixed_ip'] = nic_info['v6-fixed-ip']\n # if nic_info.get('port-id'):\n # net_data['port'] = nic_info['port-id']\n # all_net_data.append(net_data)\n # body['server']['networks'] = all_net_data\n if nics is not None:\n body['server']['networks'] = nics\n\n if disk_config is not None:\n disk_config_dict = json.loads(disk_config)\n # body['server']['OS-DCF:diskConfig'] = disk_config\n for k, v in disk_config_dict.items():\n body['server'][k] = v\n return self._create(resource_url, body, response_key,\n return_raw=return_raw, **kwargs)", "title": "" }, { "docid": "160217be238e0eb2c37d7474124e71e2", "score": "0.44070017", "text": "def init(api, name):\n try:\n response = api.post('/containers/{}/init'.format(api.quote(name)))\n response.read()\n return response.status == HTTPStatus.NO_CONTENT\n except errors.NotFoundError as e:\n api.raise_not_found(e, e.response, errors.ContainerNotFound)", "title": "" }, { "docid": "04b0b69e6a95e5f527f23948a8bcb809", "score": "0.4397922", "text": "def __init__(__self__, *,\n api_server_args: Sequence['outputs.BareMetalApiServerArgumentResponse'],\n control_plane_node_pool_config: 'outputs.BareMetalControlPlaneNodePoolConfigResponse'):\n pulumi.set(__self__, \"api_server_args\", api_server_args)\n pulumi.set(__self__, \"control_plane_node_pool_config\", control_plane_node_pool_config)", "title": "" }, { "docid": "c82d2d32f55b6c66a8f5fa0cf1da1ab4", "score": "0.43957967", "text": "def manual_lb_config(self) -> 'outputs.BareMetalAdminManualLbConfigResponse':\n return pulumi.get(self, \"manual_lb_config\")", "title": "" }, { "docid": "0aa1aa71464008170d2f3503f2e4d610", "score": "0.43918434", "text": "def cluster_bootstrap_account_password(self) -> Optional[str]:\n return pulumi.get(self, \"cluster_bootstrap_account_password\")", "title": "" }, { "docid": "a486fd6505740c9b1af60c7cbe881e71", "score": "0.43904382", "text": "def isMaster(self, *args) -> \"bool\":\n return _pyAgrum.Instantiation_isMaster(self, *args)", "title": "" }, { "docid": "84787fc209410ce4b7c96f2e0299cafb", "score": "0.43891808", "text": "def _bootstrap_nodes(self, devops_node_names=[], timeout=600):\n timer = time.time()\n\n slaves = []\n for node_name in devops_node_names:\n slave = ci.environment.node[node_name]\n logging.info(\"Starting slave node %r\", node_name)\n slave.start()\n slaves.append(slave)\n\n nodes = []\n full_nodes_len = len(slaves)\n while True:\n for slave in list(slaves):\n node = self._get_slave_node_by_devops_node(slave)\n if node is not None:\n nodes.append(node)\n slaves.remove(slave)\n logging.debug(\"Node %s found\", node['mac'])\n else:\n logging.debug(\"Node %s not bootstrapped yet\", slave.name)\n\n logging.debug(\"Bootstrapped nodes: %s\",\n str([n['mac'] for n in nodes]))\n if (time.time() - timer) > timeout:\n raise Exception(\"Bootstrap nodes discovery failed by timeout.\"\n \" Nodes: %s\" %\n ', '.join([n.name for n in slaves]))\n\n if len(nodes) == full_nodes_len:\n break\n\n logging.info(\"Waiting bootstraping slave nodes: timer: %s\",\n (time.time() - timer))\n time.sleep(15)\n\n return nodes", "title": "" }, { "docid": "e3b33bfcfae2713685c49225ce1215a6", "score": "0.4378937", "text": "def test_crud_cluster(self):\n # create the object\n url = reverse('clusterman:clusters-list')\n responses.add(responses.POST, 'https://127.0.0.1:4430/v3/clusters/c-abcd1?action=generateKubeconfig',\n json={'config': load_kube_config()}, status=200)\n response = self.client.post(url, self.CLUSTER_DATA, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED,\n response.content)\n\n # check it exists\n url = reverse('clusterman:clusters-detail', args=[response.data['id']])\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertDictContainsSubset(self.CLUSTER_DATA,\n response.data)\n\n # delete the object\n url = reverse('clusterman:clusters-detail', args=[response.data['id']])\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n # check it no longer exists\n url = reverse('clusterman:clusters-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "31823ee7fe8e4fe53d5d1eb0fe7122f4", "score": "0.43767256", "text": "def join(self, node_id=None, node_address=None, bootstrap_address=None, additional_data=None):\n self.id = node_id or self.generate_key(self.node_address)\n self.node_address = node_address or self.node_address # normally already set in __init__\n self.bootstrap_address = bootstrap_address\n self.predecessor = None\n self.log.info(\"[Configuration] node_id: %d, bootstrap_node: %s\", self.id, self.bootstrap_address)\n\n self.additional_data = additional_data or {}\n\n if self.bootstrap_address:\n # Regular node joining via bootstrap node\n self.__generate_fingers(None)\n\n # Try joining later if our successor does not respond\n successor = None\n while True:\n successor, status = yield from self.run_rpc_safe(self.bootstrap_address, \"rpc_find_successor_rec\",\n self.fingertable[0][\"start\"])\n if status == 0:\n if successor[\"status\"] == 0:\n # Successors seems to be reachable: we can proceed\n break\n else:\n self.log.warn(\"Successor node not responding.\")\n else:\n self.log.warn(\"Bootstrap node not responding.\")\n\n self.log.warn(\"Will retry in 3 seconds.\")\n yield from asyncio.sleep(3)\n\n # Proceed with a working successor\n successor = filter_node_response(successor)\n self.successor.set(successor)\n\n yield from self.init_successor_list(successor)\n yield from self.init_finger_table()\n self.bootup_finished = True\n yield from self.update_others()\n\n else:\n # This is the bootstrap node\n successor_node = self.as_dict()\n self.__generate_fingers(successor_node)\n self.successor.set(successor_node) # bootstrap first references itself\n self.bootup_finished = True\n\n self.print_finger_table()\n\n # if self.bootstrap_address:\n # remote_peer = yield from self.container.connect(self.bootstrap_address)\n # ft = yield from remote_peer.rpc_get_fingertable()\n # print(\"Bootstrap Finger Table: \")\n # self.print_finger_table(ft)", "title": "" }, { "docid": "cd1a60a28aa6141129d78a6f113af9f4", "score": "0.43730858", "text": "def test_bake_node_2_again(self, sandbox):\n sandbox.client(2).bake('bootstrap1', BAKE_ARGS)", "title": "" }, { "docid": "0fbb3748b947abc310b735863a0f1c5c", "score": "0.4373042", "text": "def test_sup_create_master_account(self):\n cmd = SupCommand('crossbar_maintenance', 'create_account', 'test', 'localhost', 'admin', 'secret')\n exit_code, output = self.container.exec(cmd)\n self.assertEqual(exit_code, 0)\n output = output.split('\\n')\n self.assertRegex(output[0], r'^created new account')\n self.assertRegex(output[1], r'^created new account admin user')\n self.assertRegex(output[2], r'^promoting account')\n self.assertRegex(output[3], r'^updating master account id in system_config.accounts')", "title": "" }, { "docid": "dad63d572050f09ac120d47dda5fb3b0", "score": "0.43718767", "text": "def test_init(self):\n\n r = self.run_function(\"lxc.init\", [self.prefix], profile=\"sshd\", seed=False)\n self.assertTrue(r.get(\"created\", False))\n self.assertTrue(self.run_function(\"lxc.exists\", [self.prefix]))", "title": "" }, { "docid": "9e7f41d974b38bd836ef75d1d01a6e7d", "score": "0.4360967", "text": "async def bootstrap(self):\n helpers.write_state_conf_file(start_state)\n return await helpers.launch_bftlist(__name__)", "title": "" }, { "docid": "ab990a593dee9123dbbe1066f7daf08d", "score": "0.43589297", "text": "def deploy_bootstrap(host, force, inventory, user, tag, playbook):\n inventory = inventory or host\n if force or confirm_deploy_action('bootstrap', host, inventory):\n Deploy().bootstrap(host, inventory, user, tag, playbook)", "title": "" }, { "docid": "00224bb3bdddb8ce131d6ce8bcdaf602", "score": "0.43580517", "text": "def init():\n\tif utils.check_if_initialized():\n\t\tclick.echo('You have already initialized the application.')\n\t\tclick.echo('Please run \\\"chef reinit\\\" to reinitialize the application.')\n\telse:\n\t\tapi.new_oauth2_token()", "title": "" }, { "docid": "38b45c7fda9ba5a96ad7043349c44024", "score": "0.43551758", "text": "def _send_bootstrap_request(self, request):\n hostports = list(self._bootstrap_hosts)\n random.shuffle(hostports)\n for host, port in hostports:\n ep = self._endpoint_factory(self.reactor, host, port)\n try:\n protocol = yield ep.connect(_bootstrapFactory)\n except Exception as e:\n log.debug(\"%s: bootstrap connect to %s:%s -> %s\", self, host, port, e)\n continue\n\n try:\n response = yield protocol.request(request).addTimeout(self.timeout, self.reactor)\n except Exception:\n log.debug(\n \"%s: bootstrap %s to %s:%s failed\",\n self,\n _ReprRequest(request),\n host,\n port,\n exc_info=True,\n )\n else:\n returnValue(response)\n finally:\n protocol.transport.loseConnection()\n\n raise KafkaUnavailableError(\"Failed to bootstrap from hosts {}\".format(hostports))", "title": "" }, { "docid": "a62b2ad564cda7ee6d09d5e6ecf76aac", "score": "0.43533903", "text": "def vcenter_ceph_multiroles_cindervmdk_and_cephosd(self):\n\n self.env.revert_snapshot(\"ready_with_9_slaves\")\n\n # Configure cluster\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=DEPLOYMENT_MODE,\n settings={'volumes_ceph': True,\n 'volumes_lvm': False,\n 'ephemeral_ceph': True},\n vcenter_value={\n \"glance\": {\n \"vcenter_username\": \"\",\n \"datacenter\": \"\",\n \"vcenter_host\": \"\",\n \"vcenter_password\": \"\",\n \"datastore\": \"\", },\n \"availability_zones\": [\n {\"vcenter_username\": VCENTER_USERNAME,\n \"nova_computes\": [\n {\"datastore_regex\": \".*\",\n \"vsphere_cluster\": \"Cluster1\",\n \"service_name\": \"vmcluster1\"},\n {\"datastore_regex\": \".*\",\n \"vsphere_cluster\": \"Cluster2\",\n \"service_name\": \"vmcluster2\"}, ],\n \"vcenter_host\": VCENTER_IP,\n \"az_name\": \"vcenter\",\n \"vcenter_password\": VCENTER_PASSWORD,\n }],\n \"network\": {\"esxi_vlan_interface\": \"vmnic1\"}}, )\n\n logger.info(\"cluster is {}\".format(cluster_id))\n\n # Assign role to nodes\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-01': ['controller', 'ceph-osd'],\n 'slave-02': ['controller', 'cinder-vmware'],\n 'slave-03': ['controller', 'ceph-osd', 'cinder-vmware'],\n 'slave-04': ['compute', 'ceph-osd'],\n 'slave-05': ['compute', 'cinder-vmware'],\n 'slave-06': ['compute', 'ceph-osd', 'cinder-vmware'], })\n\n self.configure_nova_vlan(cluster_id)\n\n self.fuel_web.deploy_cluster_wait(cluster_id)\n self.fuel_web.verify_network(cluster_id)\n\n self.fuel_web.run_ostf(\n cluster_id=cluster_id, test_sets=['sanity', 'ha'])\n\n self.run_smoke(cluster_id=cluster_id)", "title": "" }, { "docid": "f123b14606536531101a7c3eb00b5c0a", "score": "0.43506977", "text": "async def pre_bootstrap(self):\n # Set provider type for post-bootstrap\n app.env['JUJU_PROVIDERTYPE'] = juju.get_cloud_types_by_name()[\n app.current_cloud]\n # Set current credential name (localhost doesn't have one)\n app.env['JUJU_CREDENTIAL'] = app.current_credential or ''\n app.env['JUJU_CONTROLLER'] = app.current_controller\n app.env['JUJU_MODEL'] = app.current_model\n app.env['CONJURE_UP_SPELLSDIR'] = app.argv.spells_dir\n\n step = StepModel({},\n filename='00_pre-bootstrap',\n name='pre-bootstrap')\n await utils.run_step(step,\n self.msg_cb)", "title": "" }, { "docid": "e2a5776e1e481bf36a9301c40327a621", "score": "0.4349061", "text": "def test_kerberos_cross_realm():\n topic = \"test\"\n\n cluster = Cluster('KafkaCluster',\n root_path=os.environ.get('TRIVUP_ROOT', 'tmp'),\n debug=True)\n\n ZookeeperApp(cluster)\n\n #\n # Create KDCs for each realm.\n # First realm will be the default / broker realm.\n #\n realm_cnt = 2\n realms = [\"REALM{}.COM\".format(x + 1) for x in range(0, realm_cnt)]\n\n # Pre-Allocate ports for the KDCs so they can reference eachother\n # in the krb5.conf configuration.\n kdc_ports = {x: TcpPortAllocator(cluster).next(\"dummy\") for x in realms}\n\n # Set up realm=kdc:port cross-realm mappings\n cross_realms = \",\".join([\"{}={}:{}\".format(x, cluster.get_node().name, kdc_ports[x]) for x in realms])\n\n kdcs = dict()\n for realm in realms:\n kdc = KerberosKdcApp(cluster, realm,\n conf={'port': kdc_ports[realm],\n 'cross_realms': cross_realms,\n 'renew_lifetime': '30',\n 'ticket_lifetime': '120'})\n kdc.start()\n kdcs[realm] = kdc\n\n broker_realm = realms[0]\n client_realm = realms[1]\n broker_kdc = kdcs[broker_realm]\n client_kdc = kdcs[client_realm]\n\n # Create broker_cnt brokers\n broker_cnt = 4\n brokerconf = {'replication_factor': min(3, int(broker_cnt)),\n 'num_partitions': broker_cnt * 2,\n 'version': '2.2.0',\n 'sasl_mechanisms': 'GSSAPI',\n 'realm': broker_realm,\n 'conf': ['connections.max.idle.ms=60000']}\n\n brokers = dict()\n for n in range(0, broker_cnt):\n broker = KafkaBrokerApp(cluster, brokerconf)\n brokers[broker.appid] = broker\n\n # Get bootstrap server list\n security_protocol = 'SASL_PLAINTEXT'\n all_listeners = (','.join(cluster.get_all(\n 'listeners', '', KafkaBrokerApp))).split(',')\n bootstrap_servers = ','.join([x for x in all_listeners\n if x.startswith(security_protocol)])\n\n assert len(bootstrap_servers) > 0, \"no bootstrap servers\"\n\n print(\"## Deploying cluster\")\n cluster.deploy()\n print(\"## Starting cluster\")\n cluster.start(timeout=30)\n\n # Add cross-realm TGTs\n for realm in realms:\n for crealm in [x for x in realms if x != realm]:\n kdcs[realm].execute('kadmin.local -d \"{}\" -q \"addprinc -requires_preauth -pw password krbtgt/{}@{}\"'.format(kdcs[realm].conf.get('dbpath'), crealm, realm)).wait()\n kdcs[realm].execute('kadmin.local -d \"{}\" -q \"addprinc -requires_preauth -pw password krbtgt/{}@{}\"'.format(kdcs[realm].conf.get('dbpath'), realm, crealm)).wait()\n\n # Create client base configuration\n client_config = {\n 'bootstrap.servers': bootstrap_servers,\n 'enable.sparse.connections': False,\n 'broker.address.family': 'v4',\n 'sasl.mechanisms': 'GSSAPI',\n 'security.protocol': security_protocol,\n 'debug': 'broker,security'\n }\n\n os.environ['KRB5CCNAME'] = client_kdc.mkpath('krb5cc')\n os.environ['KRB5_CONFIG'] = client_kdc.conf['krb5_conf']\n os.environ['KRB5_KDC_PROFILE'] = client_kdc.conf['kdc_conf']\n principal,keytab = client_kdc.add_principal(\"admin\")\n\n client_config['sasl.kerberos.keytab'] = keytab\n client_config['sasl.kerberos.principal'] = principal.split('@')[0]\n client_config['sasl.kerberos.min.time.before.relogin'] = 120*1000*3\n\n print(client_config)\n\n print(\"bootstraps: {}\".format(client_config['bootstrap.servers']))\n p = Producer(client_config)\n\n time.sleep(10)\n for n in range(1, 100):\n p.produce(topic, \"msg #{}\".format(n))\n\n p.poll(1.0)\n\n p.flush(1.0)\n\n print(\"####### {} messages remaining\\n\\n\\n\".format(len(p)))\n\n start = time.time()\n end = start + (90*60)\n until = start + (12*60)\n while time.time() < end:\n now = time.time()\n if until < now:\n print(\"### Producing 2 messages\")\n for n in range(1, 2):\n p.produce(topic, \"msg #{}\".format(n))\n until = now + (12*60)\n\n p.poll(1.0)\n\n del p\n\n cluster.stop()", "title": "" }, { "docid": "5f02b57298b9668987d4c37197442bed", "score": "0.43484905", "text": "def start_api():\n service = MasterNodeService()\n service.main()", "title": "" }, { "docid": "ed731071b8cb4c0fc91734a8f0546c51", "score": "0.4347841", "text": "def EnableNonExpectedMasterStatus(self):\r\n\t\treturn self._get_attribute('enableNonExpectedMasterStatus')", "title": "" }, { "docid": "f59266e3928ad0eef044ea5fa044cdec", "score": "0.43400797", "text": "def deploy_lma_toolchain(self):\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n\n # TODO(scroiset): use actions fuel_actions.py\n # upload_plugin and install_plugin\n # copy plugins to the master node\n utils.upload_tarball(\n ip=self.ssh_manager.admin_ip,\n tar_path=settings.LMA_COLLECTOR_PLUGIN_PATH,\n tar_target=\"/var\")\n utils.upload_tarball(\n ip=self.ssh_manager.admin_ip,\n tar_path=settings.ELASTICSEARCH_KIBANA_PLUGIN_PATH,\n tar_target=\"/var\")\n utils.upload_tarball(\n ip=self.ssh_manager.admin_ip,\n tar_path=settings.INFLUXDB_GRAFANA_PLUGIN_PATH,\n tar_target=\"/var\")\n utils.upload_tarball(\n ip=self.ssh_manager.admin_ip,\n tar_path=settings.LMA_INFRA_ALERTING_PLUGIN_PATH,\n tar_target=\"/var\")\n\n # install plugins\n utils.install_plugin_check_code(\n ip=self.ssh_manager.admin_ip,\n plugin=os.path.basename(settings.LMA_COLLECTOR_PLUGIN_PATH))\n utils.install_plugin_check_code(\n ip=self.ssh_manager.admin_ip,\n plugin=os.path.basename(settings.ELASTICSEARCH_KIBANA_PLUGIN_PATH))\n utils.install_plugin_check_code(\n ip=self.ssh_manager.admin_ip,\n plugin=os.path.basename(settings.INFLUXDB_GRAFANA_PLUGIN_PATH))\n utils.install_plugin_check_code(\n ip=self.ssh_manager.admin_ip,\n plugin=os.path.basename(settings.LMA_INFRA_ALERTING_PLUGIN_PATH))\n\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=settings.DEPLOYMENT_MODE,\n )\n\n influxdb_user = \"influxdb\"\n influxdb_pass = \"influxdbpass\"\n influxdb_rootpass = \"r00tme\"\n grafana_user = \"grafana\"\n grafana_pass = \"grafanapass\"\n mysql_dbname = \"grafanalma\"\n mysql_user = \"grafanalma\"\n mysql_pass = \"mysqlpass\"\n nagios_pass = \"nagiospass\"\n plugins = [\n {\n 'name': 'lma_collector',\n 'version': '0.9.0',\n 'options': {\n 'environment_label/value': 'deploy_lma_toolchain',\n 'elasticsearch_mode/value': 'local',\n 'influxdb_mode/value': 'local',\n 'alerting_mode/value': 'local',\n }\n },\n {\n 'name': 'elasticsearch_kibana',\n 'version': '0.9.0',\n 'options': {\n }\n },\n {\n 'name': 'lma_infrastructure_alerting',\n 'version': '0.9.0',\n 'options': {\n 'send_to/value': 'root@localhost',\n 'send_from/value': 'nagios@localhost',\n 'smtp_host/value': '127.0.0.1',\n 'nagios_password/value': nagios_pass,\n }\n },\n {\n 'name': 'influxdb_grafana',\n 'version': '0.9.0',\n 'options': {\n 'influxdb_rootpass/value': influxdb_rootpass,\n 'influxdb_username/value': influxdb_user,\n 'influxdb_userpass/value': influxdb_pass,\n 'grafana_username/value': grafana_user,\n 'grafana_userpass/value': grafana_pass,\n 'mysql_mode/value': 'local',\n 'mysql_dbname/value': mysql_dbname,\n 'mysql_username/value': mysql_user,\n 'mysql_password/value': mysql_pass,\n }\n },\n ]\n for plugin in plugins:\n plugin_name = plugin['name']\n plugin_version = plugin['version']\n msg = \"Plugin '{:s}' couldn't be found. \" \\\n \"Test aborted\".format(plugin_name)\n assert_true(\n self.fuel_web.check_plugin_exists(cluster_id, plugin_name),\n msg)\n logger.debug('{:s} plugin is installed'.format(plugin_name))\n self.fuel_web.update_plugin_settings(\n cluster_id, plugin_name,\n plugin_version, plugin['options'])\n\n analytics_roles = [\"influxdb_grafana\",\n \"elasticsearch_kibana\",\n \"infrastructure_alerting\"]\n self.fuel_web.update_nodes(\n cluster_id,\n {\n \"slave-01\": [\"controller\"],\n \"slave-02\": [\"controller\"],\n \"slave-03\": [\"controller\"],\n \"slave-04\": [\"compute\", \"cinder\"],\n \"slave-05\": analytics_roles,\n }\n )\n self.fuel_web.deploy_cluster_wait(cluster_id, timeout=9000)\n\n analytics_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, analytics_roles\n )\n msg = \"One node with '{}' roles must be present, found {}\".format(\n ' + '.join(analytics_roles), len(analytics_nodes))\n\n assert_true(len(analytics_nodes) == 1, msg)\n\n elasticsearch_kibana_vip = self.get_vip(cluster_id, 'es_vip_mgmt')\n influxdb_grafana_vip = self.get_vip(cluster_id, 'influxdb')\n nagios_vip = self.get_vip(cluster_id, 'infrastructure_alerting')\n assert_is_not_none(\n elasticsearch_kibana_vip,\n \"Fail to retrieve the Elasticsearch/Kibana cluster VIP address\"\n )\n assert_is_not_none(\n influxdb_grafana_vip,\n \"Fail to retrieve the InfluxDB/Grafana cluster VIP address\"\n )\n assert_is_not_none(\n nagios_vip,\n \"Fail to retrieve the Infrastructure Alerting cluster VIP address\"\n )\n\n def assert_http_get_response(url, expected=200):\n r = requests.get(url)\n assert_equal(r.status_code, expected,\n \"{} responded with {}, expected {}\".format(\n url, r.status_code, expected))\n\n logger.debug(\"Check that Elasticsearch is ready\")\n assert_http_get_response(\"http://{0}:9200/\".format(\n elasticsearch_kibana_vip))\n\n logger.debug(\"Check that Kibana is ready\")\n assert_http_get_response(\"http://{0}/\".format(\n elasticsearch_kibana_vip))\n\n logger.debug(\"Check that the root user can access InfluxDB\")\n influxdb_url = \"http://{0}:8086/query?db=lma&u={1}&p={2}&\" + \\\n \"q=show+measurements\"\n assert_http_get_response(influxdb_url.format(influxdb_grafana_vip,\n 'root',\n influxdb_rootpass))\n logger.debug(\"Check that the LMA user can access InfluxDB\")\n assert_http_get_response(influxdb_url.format(influxdb_grafana_vip,\n influxdb_user,\n influxdb_pass))\n\n logger.debug(\"Check that the LMA user can access Grafana\")\n assert_http_get_response(\n \"http://{0}:{1}@{2}:8000/api/org\".format(grafana_user,\n grafana_pass,\n influxdb_grafana_vip))\n\n nagios_url = \"http://{}:{}\".format(nagios_vip, '8001')\n r = requests.get(nagios_url, auth=('nagiosadmin',\n nagios_pass))\n assert_equal(\n r.status_code, 200,\n \"Nagios HTTP response code {}, expected {}\".format(\n r.status_code, 200)\n )\n self.fuel_web.run_ostf(cluster_id=cluster_id)\n\n self.env.make_snapshot(\"deploy_lma_toolchain\")", "title": "" }, { "docid": "ebd70a6b53bf7cd169e22d5033a4bf18", "score": "0.433921", "text": "def _wait_for_leadership(self):\n _log.info(\"Waiting for this controller to be elected leader\")\n while True:\n try:\n is_leader = self._is_leader()\n except requests.exceptions.ConnectionError:\n # During startup, the leader election container\n # might not be up yet. Handle this case gracefully.\n _log.info(\"Waiting for leader election container\")\n else:\n # Successful response from the leader election container.\n # Check if we are the elected leader.\n if is_leader:\n _log.info(\"We have been elected leader\")\n break\n time.sleep(1)", "title": "" }, { "docid": "d1fa4e10eb6abd2db8c40eefe2b70ddc", "score": "0.43318516", "text": "def setup_master(self):\n # TODO: remove this code when fuel-devops will be ready to\n # describe all required network parameters (gateway, CIDR, IP range)\n # inside 'address_pool', so we can use 'network_pools' section\n # for L3 configuration in tests for multi racks\n if MULTIPLE_NETWORKS:\n from system_test.core.discover import load_yaml\n self._devops_config = load_yaml(MULTIPLE_NETWORKS_TEMPLATE)\n if USE_HAPROXY_TEMPLATE and SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH:\n from system_test.core.discover import load_yaml\n self._devops_config = load_yaml(EXTERNAL_HAPROXY_TEMPLATE)\n if ENABLE_DMZ:\n from system_test.core.discover import load_yaml\n self._devops_config = load_yaml(ENABLE_DMZ_TEMPLATE)\n\n self.check_run(\"empty\")\n\n with TimeStat(\"setup_environment\", is_uniq=True):\n\n if list(self.env.d_env.get_nodes(role='fuel_master')):\n self.env.setup_environment()\n self.fuel_post_install_actions()\n\n elif list(self.env.d_env.get_nodes(role='centos_master')):\n # need to use centos_master.yaml devops template\n hostname = ''.join((settings.FUEL_MASTER_HOSTNAME,\n settings.DNS_SUFFIX))\n self.centos_setup_fuel(hostname)\n\n else:\n raise SkipTest(\n \"No Fuel master nodes found!\")\n\n self.env.make_snapshot(\"empty\", is_make=True)\n self.current_log_step = 0", "title": "" }, { "docid": "cb01d3ca69a4e99455e33f787fc1fff8", "score": "0.43317893", "text": "def test_bake_node_0(self, sandbox):\n sandbox.client(0).bake('bootstrap1', BAKE_ARGS)", "title": "" }, { "docid": "578d0b7e37eb4d10b28f5cfbd278a5b8", "score": "0.43308273", "text": "def sync_root_acls(self):\n future_response = self.client._perform_json(\n \"POST\", \"/admin/connections/%s/sync\" % self.name,\n body = {'root':True})\n return DSSFuture(self.client, future_response.get('jobId', None), future_response)", "title": "" }, { "docid": "78b33e7577647cda3c51d73b11a0c343", "score": "0.43266305", "text": "def master_system(self):\n\n return None", "title": "" }, { "docid": "78b33e7577647cda3c51d73b11a0c343", "score": "0.43266305", "text": "def master_system(self):\n\n return None", "title": "" }, { "docid": "a33d644602dcb0d6e9e3e4ab6f7671ca", "score": "0.43129337", "text": "def resource_setup(cls):\n super(AmphoraAPITest, cls).resource_setup()\n\n lb_name = data_utils.rand_name(\"lb_member_lb1-amphora-api\")\n lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,\n const.NAME: lb_name}\n\n cls._setup_lb_network_kwargs(lb_kwargs)\n\n lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)\n cls.lb_id = lb[const.ID]\n cls.addClassResourceCleanup(\n cls.mem_lb_client.cleanup_loadbalancer,\n cls.lb_id)\n\n waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,\n cls.lb_id, const.PROVISIONING_STATUS,\n const.ACTIVE,\n CONF.load_balancer.lb_build_interval,\n CONF.load_balancer.lb_build_timeout)", "title": "" }, { "docid": "b022dfafedf68c5a0b0b9057c672914c", "score": "0.43128577", "text": "def __is_master(self, node):\n return node.ip == self.master_ip", "title": "" } ]
8e444fb4ab189b2b1342a799914c2ce0
Query backend to set new risk limit.
[ { "docid": "6fc80801b87db66e6f435ccc23132824", "score": "0.67008007", "text": "def set_risk_limit(self):\n name, symbol = self._get_selected()\n riskLimit = int(self.riskSpin.get())\n try:\n core.position_risk_limit([name], symbol, riskLimit)\n self.update_instruments()\n except Exception as e:\n tkinter.messagebox.showerror(\"Error\", str(e))", "title": "" } ]
[ { "docid": "4027090ba44f9d7b789a7adc225e048b", "score": "0.6715031", "text": "def risk_limit(self, risk_limit):\n\n self._risk_limit = risk_limit", "title": "" }, { "docid": "208d7e671dd414b0b0c63ed3c1b72284", "score": "0.6701142", "text": "def set_limits(self):", "title": "" }, { "docid": "b2491178fbc1dc01562831b7ebb56af4", "score": "0.6484744", "text": "def set_query_limit(self, query_limit):\n if query_limit is None or not query_limit or query_limit == 0:\n self.query_limit = 0\n elif self.vendor == \"MariaDB\":\n self.query_limit = float(query_limit)\n else:\n self.query_limit = int(query_limit * 1000.0)\n\n if self.vendor == \"MariaDB\":\n result = self.execute(\n \"SET SESSION max_statement_time = {}\".format(self.query_limit)\n )\n else:\n result = self.execute(\n \"SET SESSION max_execution_time = {}\".format(self.query_limit)\n )\n return result[\n \"success\"\n ] # many versions will not accept query time restrictions", "title": "" }, { "docid": "9fc5be034b21fab12d080590a330c941", "score": "0.647888", "text": "def AdjustLimits( *args, **kargs ):\n from helpers.limhelper import AdjustLimits_Helper\n helper = AdjustLimits_Helper()\n helper.configure( *args, **kargs )\n return helper.execute( *args, **kargs )", "title": "" }, { "docid": "da9d567daa3cadd5707a4e82af2cb63f", "score": "0.63910925", "text": "def test_get_query_with_new_limit_upper() -> None:\n query = ParsedQuery(\"SELECT * FROM birth_names LIMIT 2000\")\n assert query.set_or_update_query_limit(1000) == (\n \"SELECT * FROM birth_names LIMIT 1000\"\n )", "title": "" }, { "docid": "4255dddc88460264420cde429449e3ac", "score": "0.6352513", "text": "def test_get_query_with_new_limit_lower() -> None:\n query = ParsedQuery(\"SELECT * FROM birth_names LIMIT 555\")\n assert query.set_or_update_query_limit(1000) == (\n \"SELECT * FROM birth_names LIMIT 555\"\n )", "title": "" }, { "docid": "ba836f9b225e639779599243efdde2d3", "score": "0.6214256", "text": "def set_limit(self):\n (self.REQUEST_LIMIT, self.RESET_TIME) = UsefulFunctions.get_limit(self.GH_KEY)\n self.REQUEST_COUNTER = 0\n print(\"REQUEST_LIMIT: \" + str(self.REQUEST_LIMIT))\n print(\"RESET_TIME: \" + datetime.datetime.fromtimestamp(self.RESET_TIME).strftime('%Y-%m-%d %H:%M:%S'))\n\n time_diff = self.RESET_TIME - int(time.time())\n if time_diff > 0 and self.REQUEST_COUNTER > self.REQUEST_LIMIT - 10:\n print(\"Come to close to the request limit!!!\")\n print(\"Need to sleep: \" + str((time_diff + 60) / 60) + \" min\")\n\n # If need to sleep more than 10 min: sleep 10 min, than check limit again\n if time_diff > (10 * 60):\n time.sleep(10 * 60)\n self.set_limit()\n else:\n time.sleep(time_diff)", "title": "" }, { "docid": "9115522401fc4c0a83ede298a204b89c", "score": "0.6090126", "text": "def set_Limit(self, value):\n super(ScanInputSet, self)._set_input('Limit', value)", "title": "" }, { "docid": "8dffc9d59db485c4a6731a2f8f61c7e9", "score": "0.6083895", "text": "def current_limit(self, c, current_limit=None):", "title": "" }, { "docid": "55bbf0c4439afa53bc6c5aeff0f069f1", "score": "0.60559916", "text": "def limit(self, value):\n self._sqlize_select.limit = int(value)\n return (self)", "title": "" }, { "docid": "5e41f655d092de23d15fb886832a3191", "score": "0.6019967", "text": "def real_limit(self, value):\n if value > self.max_limit:\n raise WalletLimitExceed()\n elif value < 0:\n raise WalletLimitNotAllowed()\n else:\n self.real_limit_ = value\n self.save()", "title": "" }, { "docid": "b802eb143c4b9f3eae07a1feeffea48f", "score": "0.60066617", "text": "def quota_update(context, project_id, resource, limit, user_id=None):\n per_user = user_id and resource not in PER_PROJECT_QUOTAS\n model = models.ProjectUserQuota if per_user else models.Quota\n query = model_query(context, model).\\\n filter_by(project_id=project_id).\\\n filter_by(resource=resource)\n if per_user:\n query = query.filter_by(user_id=user_id)\n\n result = query.update({'hard_limit': limit})\n if not result:\n if per_user:\n raise exception.ProjectUserQuotaNotFound(project_id=project_id,\n user_id=user_id)\n else:\n raise exception.ProjectQuotaNotFound(project_id=project_id)", "title": "" }, { "docid": "64340a34b121ac449fc45c8cce3bca86", "score": "0.5940006", "text": "def quota_class_update(context, class_name, resource, limit):\n result = model_query(context, models.QuotaClass, read_deleted=\"no\").\\\n filter_by(class_name=class_name).\\\n filter_by(resource=resource).\\\n update({'hard_limit': limit})\n\n if not result:\n raise exception.QuotaClassNotFound(class_name=class_name)", "title": "" }, { "docid": "6970049827363de8e3083d47bc94fa01", "score": "0.5927246", "text": "def limit(self, limit):\n self._limit = limit", "title": "" }, { "docid": "62ea8890ff4d0fdcecf67f0dbe56d756", "score": "0.58784515", "text": "def limit(self, limit):\n\n self._limit = limit", "title": "" }, { "docid": "62ea8890ff4d0fdcecf67f0dbe56d756", "score": "0.58784515", "text": "def limit(self, limit):\n\n self._limit = limit", "title": "" }, { "docid": "904fb144f56502331bf8ddd5929c9c8a", "score": "0.5842627", "text": "async def test_rate_limit_set():\n rate_headers = {\n \"ratelimit-limit\": \"42\",\n \"ratelimit-remaining\": \"1\",\n \"ratelimit-reset\": \"0\",\n }\n gl = MockGitLabAPI(headers=rate_headers)\n await gl._make_request(\"GET\", \"/rate_limit\", {}, \"\")\n assert gl.rate_limit.limit == 42", "title": "" }, { "docid": "ee194d3f362f89067c92587f521fdd5b", "score": "0.58123314", "text": "def set_Limit(self, value):\n super(PersonInputSet, self)._set_input('Limit', value)", "title": "" }, { "docid": "0c3acab64408f6a90a316c1c244eabca", "score": "0.5810424", "text": "def SetMaxRate(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "title": "" }, { "docid": "dbc3f26e2f69d5582147d497a7d4ba4a", "score": "0.5777984", "text": "def rflimit(self):\n return self.query('*RFLV:LIMIT?')", "title": "" }, { "docid": "07effc3a6c3aa3d11965b59d67191acd", "score": "0.57462925", "text": "def set_Limit(self, value):\n super(SearchByKeywordInputSet, self)._set_input('Limit', value)", "title": "" }, { "docid": "43340e6f9da9cb1b3b02ab0c2f2a924b", "score": "0.5737109", "text": "def limits(conf, **kwargs):\n\n wotemu.cli.limits.update_limits(conf, **kwargs)", "title": "" }, { "docid": "0616d400e3b4c477b47a9883e3c1bc28", "score": "0.5714994", "text": "def setRangeLimitForSweep(self, select, limit):\n self.sendCommand(\"RANGE \"+str(select)+\" \"+str(limit))\n return 0", "title": "" }, { "docid": "c79d0bcceac1a5770df03424d7199597", "score": "0.5704887", "text": "def set_limit(counter, errors):\n counter['limit'] = errors", "title": "" }, { "docid": "03ed799a7234ed6b5deb74ed40aa2984", "score": "0.5656544", "text": "def limits(self):\n return self.get_object(\"/pscr/query/v1/limits\")", "title": "" }, { "docid": "fb3546eaa72b3e71e3a3b8c0c41e210f", "score": "0.5651792", "text": "def test_change_throttling_settings_http_with_overwrite_throttled_rate_above_account_quota():", "title": "" }, { "docid": "8d1fcbbc6c992ce7d230021f3f039e69", "score": "0.56099766", "text": "def MaxResultsSet(self, value):\n self._endpoints_query_info.limit = value", "title": "" }, { "docid": "8b11dc2146fbf03a9cb7c9fe09afba2e", "score": "0.56051415", "text": "def set_limit(self, limit, truncated=False):\n self.limit = {'limit': limit, 'truncated': truncated}", "title": "" }, { "docid": "82789214486ce483237f98aa76fde273", "score": "0.5601383", "text": "def limit(self, limit):\n if self._actual_result_cursor is not None:\n raise InvalidSearchOperation(\"Cannot set search options after\"\n \" executing SearchQuery\")\n self._limit = limit\n return self", "title": "" }, { "docid": "b49aac13a1b8be0dcd6e4a69db34f90e", "score": "0.5588386", "text": "async def iv_limit(self, ctx: commands.Context, date: str = None):\n session = session_maker()\n server = session.query(schema.Server).filter_by(id=ctx.guild.id).one_or_none()\n if date is None:\n await ctx.send(f'The current reinterview limit is `{server.limit.strftime(\"%Y/%m/%d\")}`.')\n return\n server.limit = datetime.strptime(date, \"%Y/%m/%d\")\n session.commit()\n await ctx.message.add_reaction(ctx.bot.greentick)", "title": "" }, { "docid": "06579b19d3fbe73b53a16f3e0e125b34", "score": "0.5577641", "text": "def limits():\n limits = api.Api()\n print(limits.get_account_limits())", "title": "" }, { "docid": "9c96b7e37a7ead785d0f7c0356a88321", "score": "0.5577393", "text": "def SetMaxRate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "08ef17e6360e963a111267f7fba57eed", "score": "0.55691427", "text": "def test_get_query_with_new_limit_comment_with_limit() -> None:\n query = ParsedQuery(\"SELECT * FROM birth_names -- SOME COMMENT WITH LIMIT 555\")\n assert query.set_or_update_query_limit(1000) == (\n \"SELECT * FROM birth_names -- SOME COMMENT WITH LIMIT 555\\nLIMIT 1000\"\n )", "title": "" }, { "docid": "9f62885b9b3895ea92e19d23d90fe068", "score": "0.55452865", "text": "def test_get_query_with_new_limit_comment() -> None:\n query = ParsedQuery(\"SELECT * FROM birth_names -- SOME COMMENT\")\n assert query.set_or_update_query_limit(1000) == (\n \"SELECT * FROM birth_names -- SOME COMMENT\\nLIMIT 1000\"\n )", "title": "" }, { "docid": "73cdbf2b5476c36b193d40c159e49818", "score": "0.553726", "text": "def push_rate_limit_to_context():\n custom_rate_limit = \"10 per second\"\n setattr(g, \"user_rate_limit\", custom_rate_limit)\n return custom_rate_limit", "title": "" }, { "docid": "f3570f4586f5889800a8a0e94384847e", "score": "0.5534609", "text": "def rate_limit(self):\n time_next = time()\n if time_next - self.time_last < 0.5:\n raise cherrypy.HTTPError(403) # raise a Forbidden Error\n self.time_last = time_next", "title": "" }, { "docid": "718b0fe6cf4c3c617f74ce8d4c0b7dea", "score": "0.5509499", "text": "def test_limit_rate(self):\n with patch.object(RedisWQ, '_get_limit_key') as mock_get_limit_key:\n mock_get_limit_key.return_value = 0\n # request lease, should return True\n self.assertTrue(self.r_wq._limit_rate(1, 'hour'))\n # same timestamp lease request over limit, should return False\n self.assertFalse(self.r_wq._limit_rate(1, 'hour'))\n self.assertTrue(len(self.mock_redis.expiremap.items()) == 1)\n self.assertEqual(self.mock_redis.expiremap\n ['mock_limit_rate:limit:0'], 60 * 60)", "title": "" }, { "docid": "e9f86b857ec6d4f20cd407a49ba0f67c", "score": "0.5509086", "text": "def rate_limit(self):\n return self.base_requester.rate_limit", "title": "" }, { "docid": "edd1a1fd2be1adfbcb26964f2e01f56b", "score": "0.5499592", "text": "def limit(self):\n if self._limit_present:\n return self._limit_value\n else:\n return 10", "title": "" }, { "docid": "fe688ba5e68f4d931ba5e3a1f23e1b10", "score": "0.546185", "text": "def enable_rate_limit(self):\n self.limit_rate = True", "title": "" }, { "docid": "ebc6a912d2dc93d5aca8e3e89b491a3c", "score": "0.54482883", "text": "def _get_rate_limit(self):\n return self.__rate_limit", "title": "" }, { "docid": "ebc6a912d2dc93d5aca8e3e89b491a3c", "score": "0.54482883", "text": "def _get_rate_limit(self):\n return self.__rate_limit", "title": "" }, { "docid": "ebc6a912d2dc93d5aca8e3e89b491a3c", "score": "0.54482883", "text": "def _get_rate_limit(self):\n return self.__rate_limit", "title": "" }, { "docid": "ebc6a912d2dc93d5aca8e3e89b491a3c", "score": "0.54482883", "text": "def _get_rate_limit(self):\n return self.__rate_limit", "title": "" }, { "docid": "7ef2ba17d3fddd1f5691c52f14e48506", "score": "0.54445267", "text": "def setVoltageLimit(self, limit):\n\n self.sendCommand(\"VLIM \"+str(limit))\n return 0", "title": "" }, { "docid": "4041ca21a7b303f2d6474586c2a2ed9b", "score": "0.5423636", "text": "def decode_limit(self, statement, limit):\n if not isinstance(limit, int) or limit < 0:\n raise WrongParameter(\n u\"Limit must be greater or equal to 0.\"\n )\n statement.limit = limit\n return statement", "title": "" }, { "docid": "1284a980233b9db11e94de99b9d07aa5", "score": "0.54209334", "text": "async def tracelimit(self, ctx):\n\n data = get('https://trace.moe/api/me').json()\n limit = data['limit']\n limit_ttl = data['limit_ttl']\n\n embed = discord.Embed(color=ctx.me.color)\n embed.add_field(name='Limits', value=f'{limit} requests\\n{limit_ttl} sekunder til resettelse')\n await ctx.send(embed=embed)", "title": "" }, { "docid": "be5b842887bb0ee4a98dcb5383a833ad", "score": "0.5416373", "text": "def rate_limit_set_enable(\n scope,\n max_read_ops,\n max_read_bytes,\n max_write_ops,\n max_write_bytes,\n global_scope=None,\n):\n limset = utils.exec_shell_cmd(\n f\"radosgw-admin {global_scope} ratelimit set --ratelimit-scope={scope}\"\n + f\" --max-read-ops={max_read_ops} --max-read-bytes={max_read_bytes}\"\n + f\" --max-write-bytes={max_write_bytes} --max-write-ops={max_write_ops}\"\n )\n log.info(f\"Rate limits set on {scope}\")\n limenable = utils.exec_shell_cmd(\n f\"radosgw-admin {global_scope} ratelimit enable --ratelimit-scope={scope}\"\n )", "title": "" }, { "docid": "e01376c7fbea8bea0b3e91d29853bec9", "score": "0.5413138", "text": "def updateGurobiParam(self, TIME_LIMIT):\r\n \r\n if TIME_LIMIT > 0:\r\n self.TIME_LIMIT = TIME_LIMIT\r\n self.model.setParam(GRB.Param.TimeLimit, TIME_LIMIT) \r\n logging.info(\"Update GRB.Param.TimeLimit to %f\" %(self.TIME_LIMIT) )\r\n \r\n \r\n self.model.update()", "title": "" }, { "docid": "1fc858cbe9b3f6fe7dfc419e10b75080", "score": "0.5393917", "text": "def test_change_throttling_settings_http_with_overwrite_throttled_burst_above_account_quota():", "title": "" }, { "docid": "0aa7038f721a1b99159278beb9499f32", "score": "0.5387859", "text": "def calculate(self, limit):\n pass", "title": "" }, { "docid": "785d6b0fcf855fe4a0d9a07e0adc05b9", "score": "0.5385896", "text": "def get_limit(self):\n return self._limit", "title": "" }, { "docid": "058cc0f2819edf2b5c658a83ef9f3544", "score": "0.5384189", "text": "def limit_price(self, limit_price):\n\n self._limit_price = limit_price", "title": "" }, { "docid": "c6b1120e01d676eecb2675a7f3edf339", "score": "0.5382184", "text": "def _apply_limit(\n self, limit: Optional[int], search_params: Dict[str, Union[str, int]],\n ) -> None:\n if limit is None:\n return\n\n search_params[\"limit\"] = int(limit)", "title": "" }, { "docid": "625e51b7ba4368d4440f81cf6ae80c51", "score": "0.5372994", "text": "def set_filters_bw_limit(self, bw_limit, burst_limit):\n # because replace of tc filters is not working properly and it's adding\n # new filters each time instead of replacing existing one first old\n # ingress qdisc should be deleted and then added new one so update will\n # be called to do that:\n return self.update_filters_bw_limit(bw_limit, burst_limit)", "title": "" }, { "docid": "112312deae69533260d0f9b150b0aa52", "score": "0.5369946", "text": "def runQuery(self):\r\n self.sphinx.SetLimits(self.params['limit'][0], self.params['limit'][1])\r\n return self.sphinx.RunQueries()", "title": "" }, { "docid": "d0c34e4fd8319a30f9435a3052594be6", "score": "0.5365396", "text": "def _switch_limit(self) -> None:\r\n limit_checkbox = self.driver.find_element_by_id(\r\n self._limit_checkbox_id\r\n )\r\n limit_checkbox.click()", "title": "" }, { "docid": "d7686e45debbacc489c6bd48e66e6c80", "score": "0.53569144", "text": "def api_request_rate_limit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"api_request_rate_limit\")", "title": "" }, { "docid": "75ca72316ff2826d3801cf6166d1e5b4", "score": "0.5347101", "text": "def limit(self, limit: _LimitOffsetType) -> Self:\n self._limit_clause = sql_util._offset_or_limit_clause(limit)\n return self", "title": "" }, { "docid": "4b984d9ca0ee888a20642c8f84d5967e", "score": "0.53377146", "text": "def test_limit_rate_reset(self):\n with patch.object(RedisWQ, '_get_limit_key') as mock_get_limit_key:\n mock_get_limit_key.side_effect = [0, 1]\n self.assertTrue(self.r_wq._limit_rate(1, 'hour'))\n # different timestamp lease request, return True\n self.assertTrue(self.r_wq._limit_rate(1, 'hour'))\n self.assertTrue(len(self.mock_redis.expiremap.items()) == 2)", "title": "" }, { "docid": "cfeb959c1a25dcb8841ce0462ee15482", "score": "0.53352904", "text": "def set_resource_limit():\n\n status_code = 200\n userid = request.headers.get('userid')\n memory = request.headers.get('memory')\n hdd = request.headers.get('hdd')\n vcpus = request.headers.get('vcpus')\n\n if not all((memory, hdd, vcpus)):\n result = {\n 'success': False,\n 'message': 'memory, hdd, vcpus shouldn\\'t be empty'\n }\n return result, status_code\n\n if not userid:\n result = {\n 'success': False,\n 'message': 'userid shouldn\\'t be empty'\n }\n return result, status_code\n\n if not all((memory.isdigit(), hdd.isdigit(), vcpus.isdigit())):\n result = {\n 'success': True,\n 'message': 'memory, hdd, vcpus should be numeric'\n }\n return result, status_code\n\n if current_identity['type'] == 'user':\n result = {\n 'success': False,\n 'message': 'Only admin can use this API'\n }\n else:\n db.set_resource_limits(userid, memory, hdd, vcpus)\n result = {\n 'success': False,\n 'message': 'Resource limit is set for the user {}'.format(userid)\n }\n\n return result, status_code", "title": "" }, { "docid": "911333966514a96580f1a6bd91c559a3", "score": "0.5326881", "text": "def calculate(self, limit):", "title": "" }, { "docid": "1e6e25ce56ea8bd9bbf215951ef69b0a", "score": "0.5309676", "text": "def test_set_domain_access_limit(c):\n access_limit = {\n \"enabled\": True,\n \"limit\": 2000\n }\n domain = 'test-sdk.sys-qa.com'\n response = c.set_domain_access_limit(domain, access_limit);\n print(response)", "title": "" }, { "docid": "fa6c74493fc29db90ea54b415ad13d21", "score": "0.53026354", "text": "def set_limit(self, limit: int) -> None:\n if not isinstance(limit, int):\n raise ValueError(\"Error: limit must be an integer between 1 and 100\")\n\n if 0 < limit <= self.MAX_LIMIT:\n self.limit = limit\n elif limit <= 0:\n self.limit = self.DEFAULT_LIMIT\n else:\n self.limit = self.MAX_LIMIT", "title": "" }, { "docid": "364a77a0e1a7145e81807d3c184cd050", "score": "0.5293315", "text": "def set_limit_bandwidth(self, limit):\n if limit:\n self.cam.set_limit_bandwidth_mode(\"XI_ON\")\n else:\n self.cam.set_limit_bandwidth_mode(\"XI_OFF\")", "title": "" }, { "docid": "b59ff97de0e76ee30976e3e15fe06a75", "score": "0.5291706", "text": "def set_rate_limit(domain: str, limit: float):\n if not isinstance(domain, str):\n raise ValueError(\"Domain must be a string\")\n\n if type(limit) not in (int, float):\n raise ValueError(\"Limit must be numeric\")\n\n if limit < 0:\n raise ValueError(\"Limit must be zero or positive\")\n\n src.models.ratelimit.DomainRates.set(\n domain, limit\n )", "title": "" }, { "docid": "ea2c67398c374493aba279c42ca4d9a4", "score": "0.5274358", "text": "def update_customer_credit(cust_id, new_credit_limit):\n try:\n query = Customer.get(Customer.customer_id == cust_id)\n query.credit_limit = new_credit_limit\n query.save()\n logging.info(\"Customer {} credit limit changed to {}\".format(cust_id, new_credit_limit))\n except DoesNotExist:\n raise ValueError", "title": "" }, { "docid": "176714c0879f10880821670223ac3918", "score": "0.5256479", "text": "def update(self, dt):\n super().update(dt)\n self.limit()", "title": "" }, { "docid": "0909d95c27956d8ac5f3b066bff0a40e", "score": "0.5254374", "text": "def test_change_throttling_settings_http_with_overwrite_throttled_rate_above_50():", "title": "" }, { "docid": "15a69b351543b6f0b5e1c6684b1cf216", "score": "0.5253899", "text": "def voltage_limit(self, c, voltage_limit=None):", "title": "" }, { "docid": "a9d0319d6dd6be02b5bcb7df1ecda8b7", "score": "0.5239866", "text": "def limits(self, **query):\n return self._list(_limit.Limit, **query)", "title": "" }, { "docid": "af57cc0affaddfeecf2432f2da6759e0", "score": "0.5238276", "text": "def limit(self, value: bool) -> None:\r\n if self.limit != value:\r\n self._switch_limit()\r\n self._limit = value", "title": "" }, { "docid": "613136fc09250a978714028bef691693", "score": "0.5231822", "text": "def rate_limit_reset(self):\n # TODO: to be done\n pass", "title": "" }, { "docid": "5a263b6923d0d16681ad3f2822cc2099", "score": "0.5230839", "text": "def current_limit(self, currentLim):\n\t\tself.send(\"LAS:LIM:LDI {}\".format(float(currentLim)))", "title": "" }, { "docid": "9ca30ef76e3c25a40ada5fd4dc51a4d9", "score": "0.5224226", "text": "def limit(self, count):\n # save the Limit in cursor for next fetch\n self.cursor_dict['limit'] = count\n\n if count:\n self.n_limit = count\n return self", "title": "" }, { "docid": "b5c48f5614ca94d4b98b05f4c20c26dc", "score": "0.52199495", "text": "def SetMaxRate(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "title": "" }, { "docid": "0e5ab40662e0ac4c26b13b994affe11f", "score": "0.5212841", "text": "def get_raw_limits(self):\n raise NotImplementedError", "title": "" }, { "docid": "02a90fe6539513ae00300153ab7d1944", "score": "0.5208084", "text": "def set_maxreq(self, maxreq):\n self.options['maxreq'] = maxreq", "title": "" }, { "docid": "4230adcaadcba055229d0e15109eb50b", "score": "0.5201946", "text": "def set_feedback_limit(self, band, val, **kwargs):\n self._caput(self._band_root(band) + self._feedback_limit, val, **kwargs)", "title": "" }, { "docid": "f28d83086c28e1ae0affb8c6b230927d", "score": "0.5196892", "text": "def set_actual_score(self):\n try:\n actual_score_query = self.query.get_actual_bin_and_score_query()\n actual_score_data = self.database.get_data_from_database(actual_score_query)\n self.database.update_actual_score(actual_score_data) \n except Exception as err:\n error('Exception Occured while running set_actual_score job :' + str(err))\n raise Exception('Exception Occured while running set_actual_score job : {}'.format(str(err)))", "title": "" }, { "docid": "2fc564a27756b37266c14ec2fbba00f8", "score": "0.5194093", "text": "def real_limit(self):\n return self.real_limit_ if self.real_limit_ else self.max_limit", "title": "" }, { "docid": "86cda936ef63af4ae3133b1bd7e56c5c", "score": "0.5188416", "text": "def set_limit(self, amount):\n return self._i2c.writeWord(self.address, TWIST_LIMIT, amount)", "title": "" }, { "docid": "447f823ebd439490220b47aee50b6ac2", "score": "0.5187899", "text": "def ratelimit(self):\n r = requests.request(self.RATE_LIMIT[0],\n self.SERVER + self.RATE_LIMIT[1],\n auth = self.auth)\n r.raise_for_status()\n obj = r.json()[\"resources\"][\"core\"]\n return obj", "title": "" }, { "docid": "e113348ed83def3bf488a904143a3bc9", "score": "0.5171992", "text": "def update_limits(self):\n self.logger.debug(__name__, 'Updating twitter limits')\n self.cache_tl_total_reqs = self.memcacheClient.get('timeline_limit')\n if self.cache_tl_total_reqs:\n self.tl_total_reqs = int(self.cache_tl_total_reqs)\n\n self.cache_tl_reqs_left = self.memcacheClient.get('timeline_remaining')\n if self.cache_tl_reqs_left:\n self.tl_reqs_left = int(self.cache_tl_reqs_left)\n\n self.cache_tl_reqs_reset_time = self.memcacheClient.get(\n 'timeline_reset'\n )\n if self.cache_tl_reqs_reset_time:\n self.tl_reqs_reset_time = int(self.cache_tl_reqs_reset_time)\n self.utc_now = datetime.datetime.utcnow()\n self.utc_secs = (self.utc_now - epoch_time).total_seconds()\n self.secs_until_reset = self.tl_reqs_reset_time - self.utc_secs\n if self.secs_until_reset <= 0:\n # Force getting rates from twitter\n self.tl_reqs_reset_time = None\n\n self.update_values_valid = (\n self.tl_total_reqs and\n self.tl_reqs_left and\n self.tl_reqs_reset_time\n )\n \n if not self.update_values_valid:\n self.update_vals = self.twitterClient.get_user_timeline_rate_limit()\n self.tl_total_reqs = self.update_vals.limit\n self.tl_reqs_left = self.update_vals.remaining\n self.tl_reqs_reset_time = self.update_vals.reset", "title": "" }, { "docid": "6569e045676dee1f49714f25a5139bda", "score": "0.5161472", "text": "def test_get_application_rate_limit_status(self):\r\n self.oauth2_api.get_application_rate_limit_status()", "title": "" }, { "docid": "bf496b25372344a12c9387e562f65532", "score": "0.51450187", "text": "def set_signal_limit(self, lower, upper):\n self.signal_obj.set_limit(lower, upper)", "title": "" }, { "docid": "c677a3c0569df15072fac768cf2fc9e3", "score": "0.51447755", "text": "def revision_history_limit(self, revision_history_limit):\n\n self._revision_history_limit = revision_history_limit", "title": "" }, { "docid": "d58cdac2a0c4d061cbfc24c816c979c4", "score": "0.51403666", "text": "def get_circuit_limits(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "811014b8d18161b143978602db480f9a", "score": "0.5138155", "text": "def api_limits(self, keyword=None):\n if keyword:\n try: \n return self.api.rate_limit_status()['resources'][keyword]\n except KeyError:\n print(\"Error! Wrong key. Key entered is not found.\")\n return False \n return self.api.rate_limit_status()", "title": "" }, { "docid": "977e8d80c42f4700febc35f1a62d474e", "score": "0.5133127", "text": "def set_options(self, options_list):\n self._result_limit = options_list['result_limit'].get_value()", "title": "" }, { "docid": "09bebb775cada84d566402ebe7d5c85f", "score": "0.5129853", "text": "def overdraft_limit(self, overdraft_limit):\n\n self._overdraft_limit = overdraft_limit", "title": "" }, { "docid": "3d647a370b3898bc70bbb62447b996b6", "score": "0.51263785", "text": "def set_error_limit(self, lower, upper):\n self.error_obj.set_limit(lower, upper)", "title": "" }, { "docid": "603b23d7b07fec604428b80b7f3be9ab", "score": "0.5124392", "text": "def getLimit(self):\n return self._limit", "title": "" }, { "docid": "a654fcac092766dcb49666b14fe4c2bc", "score": "0.5121163", "text": "def test_bulk_limits():\n\n limits = UEM.bulk_limits()\n\n assert limits[\"DeleteDevice\"] == 3000\n assert limits[\"EnterpriseWipe\"] == 10\n assert limits[\"GPS\"] == 500\n assert limits[\"LockDevice\"] == 3000\n assert limits[\"SendMessage\"] == 3000", "title": "" }, { "docid": "4a61dc37797b06b34f486a020674b81f", "score": "0.5119389", "text": "def on_limit(self, track):\n return", "title": "" }, { "docid": "b1e559c3ca2bab08c9371184790ac010", "score": "0.5118318", "text": "def incr_ratelimit(cls, entity_key: str, max_api_calls: int, max_api_window: int) -> None:\n list_key, set_key, _ = cls.get_keys(entity_key)\n now = time.time()\n\n # Start redis transaction\n with client.pipeline() as pipe:\n count = 0\n while True:\n try:\n # To avoid a race condition between getting the element we might trim from our list\n # and removing it from our associated set, we abort this whole transaction if\n # another agent manages to change our list out from under us\n # When watching a value, the pipeline is set to Immediate mode\n pipe.watch(list_key)\n\n # Get the last elem that we'll trim (so we can remove it from our sorted set)\n last_val = pipe.lindex(list_key, max_api_calls - 1)\n\n # Restart buffered execution\n pipe.multi()\n\n # Add this timestamp to our list\n pipe.lpush(list_key, now)\n\n # Trim our list to the oldest rule we have\n pipe.ltrim(list_key, 0, max_api_calls - 1)\n\n # Add our new value to the sorted set that we keep\n # We need to put the score and val both as timestamp,\n # as we sort by score but remove by value\n pipe.zadd(set_key, {str(now): now})\n\n # Remove the trimmed value from our sorted set, if there was one\n if last_val is not None:\n pipe.zrem(set_key, last_val)\n\n # Set the TTL for our keys as well\n api_window = max_api_window\n pipe.expire(list_key, api_window)\n pipe.expire(set_key, api_window)\n\n pipe.execute()\n\n # If no exception was raised in the execution, there were no transaction conflicts\n break\n except redis.WatchError: # nocoverage # Ideally we'd have a test for this.\n if count > 10:\n raise RateLimiterLockingException()\n count += 1\n\n continue", "title": "" }, { "docid": "da54eb698c99c36987ca979d6f802023", "score": "0.51134175", "text": "def limit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"limit\")", "title": "" }, { "docid": "fbece34c0c0ff0863398a38d7448e295", "score": "0.5104165", "text": "def testQueryLimit(self):\n gen = ObservationMetaDataGenerator()\n results = gen.getObservationMetaData(fieldRA=(numpy.degrees(1.370916), numpy.degrees(1.5348635)),\n limit=20)\n self.assertEqual(len(results),20)", "title": "" }, { "docid": "2b68268f80db08cdbc5079878ee11ecb", "score": "0.5098674", "text": "def limit(self):\n return self._limit", "title": "" }, { "docid": "0f6ac4f7ac11350af7be5abb7b0bced3", "score": "0.5097004", "text": "def limits(self, options):\n params = {}\n\n for (k, v) in list(options.items()):\n if k not in self.LIMITS:\n raise KeyError('invalid key: %s' % k)\n\n params[self.LIMITS[k]] = int(v)\n\n if len(list(params.items())) == 0:\n raise KeyError(\"You need to specify one of the valid Keys\")\n\n r = requests.put('%s/proxy/%s/limit' % (self.host, self.port),\n params)\n return r.status_code", "title": "" } ]
6b0bda8dd2dc34d181e9106c82a2a85b
generated source for method __init__
[ { "docid": "69a03b0b49aef10faed120ddbf5197d2", "score": "0.0", "text": "def __init__(self, finder):\n super(TablingSolver, self).__init__(finder)\n self.simpleFinder = SudokuStepFinder(True)\n i = 0\n while len(tmpOnSets):\n self.tmpOnSets[i] = SudokuSet()\n self.tmpOffSets[i] = SudokuSet()\n i += 1\n self.steps = ArrayList()\n if self.tablingComparator == None:\n self.tablingComparator = TablingComparator()\n i = 0\n while len(tmpChains):\n self.tmpChains[i] = Chain()\n self.tmpChains[i].setChain([None] * Options.getInstance().getMaxTableEntryLength())\n i += 1\n i = 0\n while len(alsEliminations):\n self.alsEliminations[i] = SudokuSet()\n i += 1", "title": "" } ]
[ { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.8635625", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "270c1496db15c807f5ddd23e536efa9c", "score": "0.85879993", "text": "def __init__(self,):\n raise NotImplementedError", "title": "" }, { "docid": "f7774ddaa12117815f3bc1a52d063491", "score": "0.85787165", "text": "def __init__(self):\n raise NotImplementedError", "title": "" }, { "docid": "60056122071018c9f967e2184372e1ed", "score": "0.8549695", "text": "def __init__(self, ): # TODO complete the parameter list\n\n pass # TODO", "title": "" }, { "docid": "8224f75f811d3e0ca6e8e4b68d328db1", "score": "0.85112834", "text": "def __init__(self,): # TODO complete the parameter list\n\n pass # TODO", "title": "" }, { "docid": "e1e65559d60b6ac2fc2c0fcec5d7f4fc", "score": "0.8395705", "text": "def __init__():", "title": "" }, { "docid": "e1e65559d60b6ac2fc2c0fcec5d7f4fc", "score": "0.8395705", "text": "def __init__():", "title": "" }, { "docid": "ab968ad2278f99f50e9bab0735f4faff", "score": "0.83778226", "text": "def __init__(self):\n raise NotImplementedError()", "title": "" }, { "docid": "ab968ad2278f99f50e9bab0735f4faff", "score": "0.83778226", "text": "def __init__(self):\n raise NotImplementedError()", "title": "" }, { "docid": "ad42e046fdd920a8d11a9d5d74530730", "score": "0.83630234", "text": "def __init__(self) -> None:\n raise NotImplementedError()", "title": "" }, { "docid": "ad42e046fdd920a8d11a9d5d74530730", "score": "0.83630234", "text": "def __init__(self) -> None:\n raise NotImplementedError()", "title": "" }, { "docid": "610ccbc20e68c6e7b3cf473b88b152d5", "score": "0.82935363", "text": "def init(self):\n raise NotImplementedError", "title": "" }, { "docid": "fe659b36b726eec4ff251e824c3d738b", "score": "0.820709", "text": "def __init__(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "108726ab9484ed10778f3b608374f9b1", "score": "0.8191199", "text": "def __init__(self):\n # TODO\n pass", "title": "" }, { "docid": "108726ab9484ed10778f3b608374f9b1", "score": "0.8191199", "text": "def __init__(self):\n # TODO\n pass", "title": "" }, { "docid": "6d169f6ce8584eb0a338c4684e0061b8", "score": "0.8190942", "text": "def __init__(self):\n \n pass", "title": "" }, { "docid": "2861db21f711e429d3eee5a6aaf3655e", "score": "0.8166467", "text": "def __init__(self):\n\t\t\tpass", "title": "" }, { "docid": "f25b541a7fda9cc2df31855e7a6580a7", "score": "0.816041", "text": "def __init__(self):\n raise NotImplemented", "title": "" }, { "docid": "c14a6389537816f18bc6f6778fbea72d", "score": "0.8123705", "text": "def __init__(self) -> None:\n pass", "title": "" }, { "docid": "c14a6389537816f18bc6f6778fbea72d", "score": "0.8123705", "text": "def __init__(self) -> None:\n pass", "title": "" }, { "docid": "f35eaaf5233469ad3bbcb225c0e8394e", "score": "0.81151074", "text": "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "f35eaaf5233469ad3bbcb225c0e8394e", "score": "0.81151074", "text": "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "f35eaaf5233469ad3bbcb225c0e8394e", "score": "0.81151074", "text": "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "f35eaaf5233469ad3bbcb225c0e8394e", "score": "0.81151074", "text": "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "f35eaaf5233469ad3bbcb225c0e8394e", "score": "0.81151074", "text": "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "f35eaaf5233469ad3bbcb225c0e8394e", "score": "0.81151074", "text": "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "f35eaaf5233469ad3bbcb225c0e8394e", "score": "0.81151074", "text": "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "f35eaaf5233469ad3bbcb225c0e8394e", "score": "0.81151074", "text": "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "f35eaaf5233469ad3bbcb225c0e8394e", "score": "0.81151074", "text": "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "f35eaaf5233469ad3bbcb225c0e8394e", "score": "0.81151074", "text": "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "aa69f48eb8a6f7ee0645a5d38120ac91", "score": "0.81060505", "text": "def __post_init__(self):\n raise NotImplementedError", "title": "" }, { "docid": "6734702d07fa0625f932b8284def85be", "score": "0.8083704", "text": "def __init__(self):\n\t\tpass", "title": "" }, { "docid": "6734702d07fa0625f932b8284def85be", "score": "0.8083704", "text": "def __init__(self):\n\t\tpass", "title": "" }, { "docid": "6734702d07fa0625f932b8284def85be", "score": "0.8083704", "text": "def __init__(self):\n\t\tpass", "title": "" }, { "docid": "6734702d07fa0625f932b8284def85be", "score": "0.8083704", "text": "def __init__(self):\n\t\tpass", "title": "" }, { "docid": "6734702d07fa0625f932b8284def85be", "score": "0.8083704", "text": "def __init__(self):\n\t\tpass", "title": "" }, { "docid": "6734702d07fa0625f932b8284def85be", "score": "0.8083704", "text": "def __init__(self):\n\t\tpass", "title": "" }, { "docid": "6734702d07fa0625f932b8284def85be", "score": "0.8083704", "text": "def __init__(self):\n\t\tpass", "title": "" }, { "docid": "6734702d07fa0625f932b8284def85be", "score": "0.8083704", "text": "def __init__(self):\n\t\tpass", "title": "" }, { "docid": "d5762893a26d07d53db1cbd7b15a3fbc", "score": "0.8077696", "text": "def __init__(self, *args, **kwargs):\n return NotImplemented", "title": "" }, { "docid": "ed8e47b20b44d03080df0212334b75c0", "score": "0.80642724", "text": "def __init__(self, *args, **kwds) -> None:", "title": "" }, { "docid": "ac423eba6e88c1080e12f1aadac5dd7c", "score": "0.8034739", "text": "def _init(self):\n pass", "title": "" }, { "docid": "3ab8ca8547131f752e22b18c41ff2c78", "score": "0.80310845", "text": "def init_class(self):", "title": "" }, { "docid": "4c75a4915229eb71a069110643914c66", "score": "0.80184317", "text": "def init(self):\n \n pass", "title": "" }, { "docid": "c7d93bff02c62f5f9bf2b9901a4ea866", "score": "0.8012071", "text": "def __init__(self):\n \n \t\tpass", "title": "" }, { "docid": "db5b1eec8ff2b70fece9817942a6d799", "score": "0.80074984", "text": "def init(self): \n pass", "title": "" }, { "docid": "9f55ae47803879ab475e8bcb3d2aaf75", "score": "0.79991245", "text": "def _init(self):", "title": "" }, { "docid": "9b5b9db2d68ff5e24aa6c1b6714b7134", "score": "0.79984856", "text": "def __init__(self):\n \n None", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7985275", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "6d0332b480ff506654e80e5d2ebfb664", "score": "0.79841214", "text": "def initialize(self, *args, **kwargs) -> None:", "title": "" }, { "docid": "e91926e61fa89cc04b46c292db643017", "score": "0.7952634", "text": "def __init__(self): \n \n pass", "title": "" }, { "docid": "9a91c854034ce117248e93a1b69c6b5a", "score": "0.79523736", "text": "def __init__(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "9a91c854034ce117248e93a1b69c6b5a", "score": "0.79523736", "text": "def __init__(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "9a91c854034ce117248e93a1b69c6b5a", "score": "0.79523736", "text": "def __init__(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "9a91c854034ce117248e93a1b69c6b5a", "score": "0.79523736", "text": "def __init__(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "9a91c854034ce117248e93a1b69c6b5a", "score": "0.79523736", "text": "def __init__(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "9a91c854034ce117248e93a1b69c6b5a", "score": "0.79523736", "text": "def __init__(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "a7a46f3538ced71d5475de9aebbdc54a", "score": "0.79090714", "text": "def init(self, *args, **kwargs):", "title": "" } ]
8cee350d51966e0ec041726df5615876
returns a sequence with the first and last characters swapped
[ { "docid": "c24010c590ecec8b99a867a3ada4c4e7", "score": "0.7389425", "text": "def exchange_first_last(seq):\n\tfirst = seq[0]\n\tmid = seq[1:-1]\n\tlast = seq[-1]\n\t\n\tif type(seq) == str:\n\t\treturn last + mid + first\n\telse:\n\t\teos=len(seq)\n\t\tmid.insert(0,last)\n\t\tmid.insert(eos,first)\n\t\treturn mid", "title": "" } ]
[ { "docid": "acecd658cbf3c6f2e8fd53588ee88c6c", "score": "0.77933437", "text": "def seq_swap_first_last(seq):\n return (seq if (len(seq) < 2) else seq[-1:] + seq[1:-1] + seq[0:1])", "title": "" }, { "docid": "8085456ed54e7f29145e05cbbaaf882d", "score": "0.76585096", "text": "def exchange_first_last(seq):\n return seq[-1::] + seq[1:-1] + seq[0:1]", "title": "" }, { "docid": "a9713b905368c7fbb586d0c50fd51237", "score": "0.7462185", "text": "def exchange_first_last(seq):\n if len(seq) <= 1:\n return seq\n else:\n return seq[-1:] + seq[1:-1] + seq[:1]", "title": "" }, { "docid": "db82d92eab720b52a2f97c24cc76dc04", "score": "0.6979666", "text": "def seq_reverse(seq):\n return seq[::-1]", "title": "" }, { "docid": "e44b2898e2a7385b03725781c400087c", "score": "0.69596833", "text": "def exchange_first_last(n):\n\n return n[-1:] + n[1:-1] + n[:1]", "title": "" }, { "docid": "f7bc3ac0bc6a5df9212b01bd0bd55098", "score": "0.6916659", "text": "def reverse(seq):\n return seq[::-1]", "title": "" }, { "docid": "f7bc3ac0bc6a5df9212b01bd0bd55098", "score": "0.6916659", "text": "def reverse(seq):\n return seq[::-1]", "title": "" }, { "docid": "f7bc3ac0bc6a5df9212b01bd0bd55098", "score": "0.6916659", "text": "def reverse(seq):\n return seq[::-1]", "title": "" }, { "docid": "78320931cf2e5fcd7e5720f81eab3be2", "score": "0.6902493", "text": "def reverse(sequence: str) -> str:\r\n return sequence[::-1]", "title": "" }, { "docid": "30968fa2707b74211164c49553b5af6a", "score": "0.6880139", "text": "def mirror(s):\n mirrored = s[-1::-1]\n return s + mirrored", "title": "" }, { "docid": "c54fa7a5704cefedf6816cdfe3c2bc08", "score": "0.6778374", "text": "def reverse_my_animals(sequence):\n rev_sequence = sequence[::-1]\n return rev_sequence", "title": "" }, { "docid": "e56bcf1a041c78a0c57b0c5a5771f57c", "score": "0.672243", "text": "def reverse_pair(t):\n new = t.split()\n number = len(new)\n changed = []\n while number >=1:\n changed.append(new[number-1])\n number = number - 1\n changed = \" \".join(changed)\n return changed", "title": "" }, { "docid": "305efa42996f5a717b1709738e460098", "score": "0.6704497", "text": "def reverse(s):\r\n rev=''\r\n \r\n for letter in s:\r\n # push each letter onto the front of rev\r\n rev=letter+rev\r\n \r\n return rev", "title": "" }, { "docid": "8cffb7317b2d41cca86b9573bb43e2df", "score": "0.6684242", "text": "def reverseString(self, s):\n for i in range(len(s) // 2):\n swapping = len(s) - (i + 1)\n s[i], s[swapping] = s[swapping], s[i]", "title": "" }, { "docid": "6f393ad7bd7749409a7c9604044f0c8b", "score": "0.66659486", "text": "def reverse(characters):\n\n left_index = 0\n right_index = len(characters) - 1\n\n while left_index < right_index:\n\n characters[left_index], characters[right_index] = characters[right_index], characters[left_index]\n\n left_index += 1\n right_index -= 1\n\n return characters", "title": "" }, { "docid": "2efa807ec9922899eb9a6bf7aeddf39e", "score": "0.6593083", "text": "def reverseString(self, s: List[str]) -> None:\r\n i,j=0,len(s)-1\r\n while(i<j):\r\n s[i],s[j]=s[j],s[i] #swap elements\r\n i+=1\r\n j-=1", "title": "" }, { "docid": "1d7b6d52e7f6d8ad881a95aebdc89889", "score": "0.6565496", "text": "def base_pair(seq):\n trans_5 = str.maketrans('THVthv', 'UGUugu')\n trans_3 = str.maketrans('ACGUTHVacguthv', 'UGCAAUGugcaaug')\n\n return seq.translate(trans_5), seq.translate(trans_3)[::-1]", "title": "" }, { "docid": "c782324ca52a5bcaceb1806964afafdd", "score": "0.64779764", "text": "def mirror(s):\r\n\r\n mirrored_string = s + s[::-1]\r\n\r\n return mirrored_string", "title": "" }, { "docid": "7f1460fe692a7c3d1b37efba3199c54a", "score": "0.6458073", "text": "def reverse_compliment(seq):\n return_strand = ''\n for nt in seq:\n if nt.upper() == 'A':\n return_strand += 'T'\n if nt.upper() == 'G':\n return_strand += 'C'\n if nt.upper() == 'C':\n return_strand += 'G'\n if nt.upper() == 'T':\n return_strand += 'A'\n if nt.upper() == 'N':\n return_strand == 'N'\n assert 'unexpected nt!!'\n return return_strand[::-1]", "title": "" }, { "docid": "a38899762e14a11e7c99a4e69f5126d0", "score": "0.64548266", "text": "def reversed_by_slicing(seq):\n return seq[::-1]", "title": "" }, { "docid": "9781433b6d419c0d50ca936670ec0614", "score": "0.6442965", "text": "def omkeer(s):\n return s[::-1]", "title": "" }, { "docid": "d2486480d799f0541b16ca2dd0fd2b92", "score": "0.6438348", "text": "def reverse_complement(seq):\n return \"\".join(BASE_COMPLEMENTS[base] for base in reversed(seq))", "title": "" }, { "docid": "b5ee408638376830d124daaa4289a348", "score": "0.6427647", "text": "def reverseString(self, s: List[str]) -> None:\n length = len(s)\n for i in range(length // 2):\n s[i], s[length-i-1] = s[length-i-1], s[i]", "title": "" }, { "docid": "f99d7ecf6a62610491203a381ca56450", "score": "0.6421205", "text": "def reverse_complement(seq):\n return seq.translate(COMPLEMENT)[::-1]", "title": "" }, { "docid": "d8ab6d50f6aa2c0e67f7fc431ca5304e", "score": "0.64208585", "text": "def reverseString(self, s: List[str]) -> None:\n start = 0\n end = len(s) - 1\n while start <= end:\n s[start], s[end] = s[end], s[start]\n start += 1\n end -= 1", "title": "" }, { "docid": "5d8046cdaab90abb481d53860d31b45e", "score": "0.63993514", "text": "def reverseCompSequence(sequence):\n tmp = \"\"\n\n for base in sequence[::-1]:\n tmp += rclookup[base]\n\n return tmp", "title": "" }, { "docid": "fb9fc788cfd807f8d8ece06480037f86", "score": "0.63982475", "text": "def rev_comp(seq):\n comp_dict = {'A':'T', 'T':'A', 'G':'C', 'C':'G', 'N':'N'}\n return \"\".join([comp_dict.get(i.upper()) for i in seq[::-1]])", "title": "" }, { "docid": "3870fb68d938bbc83f4184395a543cfa", "score": "0.6392788", "text": "def revcomp(self):\n\n return self.translate(self.bp)[::-1]", "title": "" }, { "docid": "cfcae8f83d6a84bf2d07e98f2d14ae38", "score": "0.63794583", "text": "def reverse_complement(sequence: str) -> str:\r\n sequence = reverse(sequence)\r\n sequence = complement(sequence)\r\n return sequence", "title": "" }, { "docid": "314d598dd02416bae8ec37b925cabe56", "score": "0.6378279", "text": "def exchange(str):\n # if length of string is > 1, we switch characters, else just return str\n if len(str) > 1:\n # store the first char and last char, then switch them\n last_char = str[len(str) - 1]\n first_char = str[0]\n str = last_char + str[1:len(str) - 1] + first_char\n return str\n else:\n return str", "title": "" }, { "docid": "686cc018f7a0c1dfffae95c66c115216", "score": "0.63408107", "text": "def reverse(st):\n sl = st.split() # ['Hello', 'World']\n r = sl.reverse() # ['World', 'Hello']\n rev_st = ' '.join(sl) # \"World Hello\"\n return rev_st", "title": "" }, { "docid": "53eaace9b6ffb534673663082536d774", "score": "0.6339083", "text": "def reverseString(self, s: List[str]) -> None:\n for i in range (0, len(s)//2):\n s[i] , s[size-1-i] = s[size-1-i], s[i]", "title": "" }, { "docid": "c76cc190c090b24eadb63f10cc10fbf8", "score": "0.6312462", "text": "def firstlast(sequence):\n return sequence[:1] + sequence[-1:]", "title": "" }, { "docid": "3292433b5c1b9081b689b42d4de3924e", "score": "0.62813103", "text": "def reversecomplement(sequence): \n\n complement = {\"A\":\"T\", \"T\":\"A\", \"C\":\"G\", \"G\":\"C\", \"N\":\"N\"}\n \n reverse_complement_sequence = \"\"\n\n sequence_list = list(sequence)\n sequence_list.reverse()\n\n for letter in sequence_list:\n reverse_complement_sequence += complement[letter.upper()]\n \n return reverse_complement_sequence", "title": "" }, { "docid": "860e437490d693104b79ab7df637287a", "score": "0.62798786", "text": "def function4(seq):\n\tseq1 = seq[::-1]\n\treturn seq1", "title": "" }, { "docid": "7d09a20f51db2416dd30aaed39a5f9b2", "score": "0.62715834", "text": "def mirror(s): \r\n str = \"\"\r\n for i in s:\r\n str = i + str\r\n return s + str", "title": "" }, { "docid": "4bc83958836c3c46694628ff15ca1633", "score": "0.626788", "text": "def reverseString(self, s: List[str]) -> None:\n \n N=len(s)\n \n i=0\n j=N-1\n \n while i<j:\n s[i],s[j]=s[j],s[i]\n i+=1\n j-=1\n \n return s", "title": "" }, { "docid": "876efb83eba7fdaa6197921ad6f5e6c7", "score": "0.6265012", "text": "def reverseString(self, s: 'List[str]') -> None:\n ls = len(s)\n for i in range(ls // 2):\n s[i], s[ls - i - 1] = s[ls - i - 1], s[i]", "title": "" }, { "docid": "aa2940bd0991e9d0336ddcf45d34e6ac", "score": "0.6260102", "text": "def reverseString(self, s: List[str]) -> None:\r\n if len(s) < 2:\r\n return\r\n i, j = 0, len(s) - 1\r\n while i < j:\r\n s[i], s[j] = s[j], s[i]\r\n i += 1\r\n j -= 1\r\n return", "title": "" }, { "docid": "c26acd3cafe98688512caaed9343a9b1", "score": "0.622819", "text": "def reverseString(self, s: List[str]) -> None:\n left, right = 0, len(s) - 1\n while left < right:\n s[left], s[right] = s[right], s[left]\n left, right = left + 1, right - 1", "title": "" }, { "docid": "9efffa4ce210b37261ad212696d683db", "score": "0.6226025", "text": "def reverseString(self, s: List[str]) -> None:\n left = 0\n right = len(s) - 1\n\n while (left < right):\n s[left], s[right] = s[right], s[left]\n left, right = left + 1, right - 1", "title": "" }, { "docid": "4c92a0c48d7fb604d24f0a1295b2134e", "score": "0.6225713", "text": "def reverseSequence(Sequence):\n reverse = \"\"\n nucleotides = {'A' : 'T',\n 'T' : 'A',\n 'C' : 'G',\n 'G' : 'C'}\n for n in Sequence:\n if n in nucleotides:\n tmp = nucleotides[n]\n else :\n tmp = n # in some sequences there is many N or other letter\n reverse += tmp\n return reverse", "title": "" }, { "docid": "d17c4752b81508ae6a66e51850e39636", "score": "0.6215869", "text": "def reverse(s):\r\n \r\n str = \"\"\r\n for i in s:\r\n str = i + str\r\n return str", "title": "" }, { "docid": "8a9e71aefe60f29cc6618733f4f319a4", "score": "0.6201891", "text": "def reverse_complement(seq_str: str) -> str:\n return str(Seq(seq_str).reverse_complement())", "title": "" }, { "docid": "d535dcc4c4a82b343b7f9928b50a4a7b", "score": "0.61934096", "text": "def reverse_pair(asd):#This function returns the reverse pair of the input sentence.\n sabai=asd.split(' ')\n newlist=sabai[::-1]\n newstring=\" \".join(newlist)\n return newstring", "title": "" }, { "docid": "169bda040c8832372eb1fea6cdcef6a3", "score": "0.6191223", "text": "def reverseString(self, s: List[str]) -> None:\n start=0\n endl=len(s)-1\n \n while(start<endl):\n s[start] , s[endl] = s[endl] , s[start]\n start+=1\n endl-=1", "title": "" }, { "docid": "8d42c3b4c31bc1bf1539ce90dec0b418", "score": "0.61867106", "text": "def reverse(s):\n if len(s) == 0: # basic case\n return \"\"\n return reverse(s[1:]) + s[0]", "title": "" }, { "docid": "09c3444333b953819e0400712f891b86", "score": "0.6177581", "text": "def reverse_pair(sentence):\n list = sentence.split()\n l = []\n for s in list:\n l = [s] + l\n return ' '.join(l)", "title": "" }, { "docid": "b9fac644a4d9fd69bd56e4c2a99836a3", "score": "0.6175182", "text": "def reverseString(self, s):\n length = len(s)\n if length <= 1:\n return\n\n start, end = 0, len(s) - 1\n while start <= end:\n s[start], s[end] = s[end], s[start]\n start, end = start + 1, end - 1\n print(s)", "title": "" }, { "docid": "c23c8a2e0122f06100ffaab336a99319", "score": "0.61726683", "text": "def rev_comp(seq):\r\n rev_seq = seq[::-1]\r\n compliment_dict = {'A':'T', 'T':'A', 'C':'G', 'G':'C'}\r\n rev_comp_seq = ''\r\n for nuc in rev_seq:\r\n if nuc in ['A', 'T', 'G', 'C']:\r\n rev_comp_seq += compliment_dict[nuc]\r\n return rev_comp_seq", "title": "" }, { "docid": "4c815001b13fa8bf473cb0d345271e79", "score": "0.6168838", "text": "def flipside(s):\n z = len(s)/2\n return s[z::]+s[0:z]", "title": "" }, { "docid": "ca1295ac5848559d32377bee43cd4572", "score": "0.6166178", "text": "def reverseString(self, s: List[str]) -> None:\n # test case - [\"h\",\"e\",\"l\",\"l\",\"o\"]\n # first method - bacis python inbuild list manipulation\n # computional time - 56ms\n s.reverse()\n \n \n \"\"\"\n # second method - simple list manipulation with insert and pop\n # computional time - 40ms\n for ii in range(0, len(s)):\n s.insert(ii, s[len(s)-1])\n s.pop()\n \"\"\" \n \n # 2.1 method - shortened version of the second method with \n # simultaneously removing the last characted and then adding \n # it at the desired postion\n # computional time - 56ms\n \"\"\"\n for ii in range(0, len(s)):\n s.insert(ii, s.pop())\n \"\"\"\n \n # third method - list comprehension with lambda function\n # computional time - 86ms\n \"\"\"\n annfunc = lambda ind: s.insert(ind, s.pop()) \n [annfunc(ind) for ind in range(0, len(s))]\n \"\"\"", "title": "" }, { "docid": "fd5f23d37e7ea2a1660254c5469d0326", "score": "0.61632305", "text": "def reverseString(self, s: List[str]) -> None:\n l,r=0,len(s)-1\n while l<r:\n s[l],s[r]=s[r],s[l]\n l+=1\n r-=1", "title": "" }, { "docid": "dca16dbccdc5cc15f8c782f5a7b43493", "score": "0.6158762", "text": "def rev_complement(seq):\r\n return ''.join([COMP_DICT[x] for x in seq][::-1])", "title": "" }, { "docid": "16020ee2dcf9f1d0c44913b27ca4b7d8", "score": "0.6157503", "text": "def rc(seq):\n revdict = {\"A\":\"T\",\n \"T\":\"A\",\n \"G\":\"C\",\n \"C\":\"G\",\n \"a\":\"t\",\n \"t\":\"a\",\n \"g\":\"c\",\n \"c\":\"g\"}\n return ''.join([revdict[a] for a in seq][::-1])", "title": "" }, { "docid": "3f360f77419594038f087bee2567b621", "score": "0.61558914", "text": "def reverse(s):\n\n rev = ''\n for letter in s:\n rev = letter + rev\n return rev", "title": "" }, { "docid": "76948d3d441042b4975c0f52d6c97d56", "score": "0.61461294", "text": "def reverse(sequence, str_table=SIMPLE_COMPLEMENTS_STR):\r\n base_mapping = get_base_mapping(str_table)\r\n return _clean_sequence(sequence[::-1], base_mapping)", "title": "" }, { "docid": "54c958a638f3c5081dc053cd0ab63345", "score": "0.61435217", "text": "def reverse(sequence, str_table=SIMPLE_COMPLEMENTS_STR):\r\n return _clean_sequence(sequence, str_table)[::-1]", "title": "" }, { "docid": "60c62b78ec20d995e857fc67786e3ffa", "score": "0.61310756", "text": "def adjust(s):\n # This does not need to be terribly efficient as the code is\n # only run once, during initialization\n s = s.encode('latin-1')\n for i in range(1, len(s) - 1):\n rotate(s[i], s[i - 1])", "title": "" }, { "docid": "ddc07c74ea46b58046ee850e32a99bd4", "score": "0.61263466", "text": "def reverse(sequence, str_table=SIMPLE_COMPLEMENTS_STR):\n return _clean_sequence(sequence, str_table)[::-1]", "title": "" }, { "docid": "0d9e7c22596ebda948ea3d2391bc6e4c", "score": "0.61242956", "text": "def first_reverse(str_param):\r\n\r\n result = ''\r\n length = len(str_param)\r\n\r\n for i in range(length - 1, -1, -1):\r\n result += str_param[i]\r\n\r\n return result", "title": "" }, { "docid": "28d15b54fa02226a3fdffe3689ba2dae", "score": "0.61200035", "text": "def reverse_complement(sequence: str) -> str:\n return complement(sequence)[::-1]", "title": "" }, { "docid": "8f67aff40541f2750a112793314859bc", "score": "0.6118087", "text": "def flipsides(s):\n x = len(s)//2\n return s[-x:] + s[0:x+1]", "title": "" }, { "docid": "4a373f22b36f15928b3fd43d1fccf71e", "score": "0.6114611", "text": "def reverseString(self, s: List[str]) -> None:\n s.reverse()", "title": "" }, { "docid": "6eb2ef57e175aaa63cab30068b158f92", "score": "0.6102878", "text": "def reverseComp(seq):\n temp = seq.translate(transTable)\n rc = temp[::-1]\n return rc", "title": "" }, { "docid": "af65bc57639d6b749a5c086a23a27bba", "score": "0.6101014", "text": "def firstLast(seq):\n\tseq1 = seq[-1:] + seq[1:-1] + seq[:1]\n\treturn seq1", "title": "" }, { "docid": "b2f6b9e9f99e63ec2f0ca8e6eeac9dac", "score": "0.6099781", "text": "def reverseString(self, s) -> None:\n l, r = 0, len(s) - 1\n while l < r:\n s[l], s[r] = s[r], s[l]\n l += 1\n r -= 1", "title": "" }, { "docid": "ccd2a287420f6bd6c16d950433d11990", "score": "0.6090153", "text": "def reverse_complement(seq):\n seq = reverse(seq)\n seq = complement(seq)\n return seq", "title": "" }, { "docid": "c21cfc36bff38d43ec2c17951e0d7806", "score": "0.6089183", "text": "def rev_comp(seq):\n rev_seq = seq[::-1]\n compliment_dict = {'A':'T', 'T':'A', 'C':'G', 'G':'C'}\n rev_comp_seq = ''\n for nuc in rev_seq:\n if nuc in ['A', 'T', 'G', 'C']:\n rev_comp_seq += compliment_dict[nuc]\n return rev_comp_seq", "title": "" }, { "docid": "d4e8eae559bf926934f0e5ab7db19f04", "score": "0.6088372", "text": "def make_reverse_complement(seq):\n\n reverse_seq = seq[::-1]\n complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}\n rev_complement = \"\".join(complement.get(base, base) for base in reverse_seq)\n\n return rev_complement", "title": "" }, { "docid": "8348866db837910179b4452a5adec327", "score": "0.6084004", "text": "def revcomp(seq: str) -> str:\n\n rc_nuc = {\n 'A': 'T',\n 'C': 'G',\n 'T': 'A',\n 'G': 'C',\n }\n\n seq_rev = seq[::-1]\n seq_rev_comp = ''.join([rc_nuc[n] for n in list(seq_rev)])\n\n return seq_rev_comp", "title": "" }, { "docid": "1a96e67baa27203ffb49957276a9d270", "score": "0.6076847", "text": "def both_ends(s): \n l = len(s)\n new_s = '' \n \n if l < 2:\n return new_s\n else:\n new_s = s[0] + s[1] + s[l-2] + s[l-1]\n return new_s", "title": "" }, { "docid": "08b70379ec996fbb5d8d50c586616953", "score": "0.60657656", "text": "def flip(data: List[str]) -> List[str]:\n return [line[::-1] for line in data]", "title": "" }, { "docid": "4bbe1ca99fad292c3614dcd3f5f4dce6", "score": "0.6063451", "text": "def reverseString(self, s: List[str]) -> None:\n # one-line solution\n # s.reverse() \n \n # two pointer\n # time complexity: o(n)\n # space complexity: o(1) in place\n left, right = 0, len(s) - 1\n while left < right:\n s[left], s[right] = s[right], s[left]\n left, right = left + 1, right - 1", "title": "" }, { "docid": "6247818c5583298e987c0a353cd98381", "score": "0.60550517", "text": "def reverseString(self, s: List[str]) -> None:\r\n s.reverse()", "title": "" }, { "docid": "e5d0258f51fa673df22446456a9e1c0b", "score": "0.6041294", "text": "def reverse(self) -> None:\n\n for i in range(self.length() // 2):\n self.swap_pairs(i, (self.length() - 1 - i))", "title": "" }, { "docid": "e3a5ce824eee46475864277ff6e0637b", "score": "0.6037553", "text": "def reverse(str):\n\n new_str = \"\"\n for i in range(len(str), 0, -1):\n \tnew_str += str[i-1]\n return new_str", "title": "" }, { "docid": "d11c87bb9d7339be3bb7a4028b477d28", "score": "0.6032004", "text": "def SwapString(str):\n s = []\n for x in range(0, len(str) - 1, 2):\n s.append(str[x+1])\n s.append(str[x])\n return ''.join(s).strip()", "title": "" }, { "docid": "196dcf2dad1c02343e5fb810382c1a0d", "score": "0.6030722", "text": "def get_reverse_complement(dna):\n #empty string for eventual reverse sequence\n new_dna = ''\n #we want the reverse, so the while loop will count down\n i = len(dna) - 1\n while i >= 0:\n #if last one is A, changes to T and counts down one and then moves on\n if dna[i] == 'A':\n new_dna += ('T')\n i -= 1\n elif dna[i] == 'T':\n new_dna += ('A')\n i -= 1\n elif dna[i] == 'C':\n new_dna += ('G')\n i -= 1\n elif dna[i] == 'G':\n new_dna += ('C')\n i -= 1\n #return the resulting sequence\n return new_dna", "title": "" }, { "docid": "dc3b7ec559df6df0fb76820af99a2d61", "score": "0.60285246", "text": "def string_reverse(s):\n if len(s) == 0:\n return s\n else:\n return string_reverse(s[1:]) + s[0]", "title": "" }, { "docid": "a8b0ed80a8689341ff9dddc0f3b254a1", "score": "0.6021835", "text": "def swap_ends(bar):\n bar[0], bar[-1] = bar[-1], bar[0]", "title": "" }, { "docid": "51f916526dce2bcee4db3f0680811029", "score": "0.6021005", "text": "def reverseString(s: List[str]) -> None:\n s.reverse()", "title": "" }, { "docid": "ec1eb4183c2649ec253ae567f2c14d73", "score": "0.6013185", "text": "def backward(my_string):\n return my_string[::-1]", "title": "" }, { "docid": "ecbed43e1acaa20c14e10109456626f5", "score": "0.6011235", "text": "def reverse(s):\n\n rev = ''\n\n # For each character in s, add that char to the beginning of rev.\n\n for ch in s:\n rev = ch + rev\n\n return rev", "title": "" }, { "docid": "b4a450a32de5e4834081b8d018e534e8", "score": "0.6008765", "text": "def word_flipper(our_string):\n\n word_list = our_string.split(\" \")\n\n for idx in range(len(word_list)):\n word_list[idx] = word_list[idx][::-1] # [index1:index2:step]\n\n return \" \".join(word_list)", "title": "" }, { "docid": "bcb0b106d56c75c2e9b4b9be475b6ffd", "score": "0.60058576", "text": "def sequentize(string):\n first, last, result = None, None, []\n for char in map(ord, string):\n if last is None:\n first = last = char\n elif last + 1 == char:\n last = char\n else:\n result.append((first, last))\n first = last = char\n if last is not None:\n result.append((first, last))\n return ''.join([\n '%s%s%s' % (chr(first), last > first + 1 and '-'\n or '', last != first and chr(last) or '')\n for first, last in result\n ]) # noqa", "title": "" }, { "docid": "85619c268636fd5fe3e5af3f03d8c69f", "score": "0.60008776", "text": "def Complement(Sequence):\n Sequence=Sequence.upper()\n Trans=maketrans('ACTG','TGAC')\n return Sequence.translate(Trans)", "title": "" }, { "docid": "1b28048fef1f0a0d5bff440955886de3", "score": "0.59947234", "text": "def make_palindrome(s):\n return s[::-1]", "title": "" }, { "docid": "912bf8833b0ee09c3c0ed4c9b68550a4", "score": "0.59922177", "text": "def revcomp(seq):\n trantab = str.maketrans(\"ATCG\", \"TAGC\")\n return seq.translate(trantab)[::-1]", "title": "" }, { "docid": "6236792d21bcefa0546b3662a0c555d0", "score": "0.5991127", "text": "def swap(s: str, i: int, j: int):\n l1 = list(s)\n temp = l1[i]\n l1[i] = l1[j]\n l1[j] = temp\n return ''.join(l1)", "title": "" }, { "docid": "35d5284d77aff7d6e618143e847555e7", "score": "0.59785914", "text": "def reverseString(self, s) -> None:\n\t\tlength = len(s)\n\t\tleft = 0\n\t\tright = int((length-1))\n\t\twhile(left < right):\n\t\t\ttemp = s[left]\n\t\t\ts[left] = s[right]\n\t\t\ts[right] = temp\n\t\t\tleft +=1\n\t\t\tright -=1\n\t\treturn s", "title": "" }, { "docid": "5df74448cdaf9f8a54beaead0b693649", "score": "0.5978086", "text": "def strReverseR(s):\r\n if s == 1:\r\n return s\r\n elif len(s) == 0:\r\n return s\r\n else:\r\n return s[-1] + strReverseR(s[:-1])\r\n return s", "title": "" }, { "docid": "fa3c7c96dc396023a8e3633bc6bd4377", "score": "0.59762144", "text": "def reverse_complement(seq): \n\t\n\tbasecomplement = {'a':'t', 'c':'g', 't':'a', 'g':'c', 'A':'T', 'C':'G', 'T':'A', 'G':'C'}\n\treverse_seq = seq[::-1]\n\n\tdna = '' # initialize the variable dna as an empty string\n\tfor nt in reverse_seq:\n\t\tdna += basecomplement[nt] \n\treturn dna", "title": "" }, { "docid": "3b1a4f3d1c1aa46754e5840a7b983105", "score": "0.59623384", "text": "def makePalindrome(s):\n\treturn s + s[::-1]", "title": "" }, { "docid": "c94e18e60764df9a64c2c61ba061e835", "score": "0.5961592", "text": "def reverse_complement(bases):\r\n replacements = [('A', 't'), ('T', 'a'), ('C', 'g'), ('G', 'c')]\r\n for ch1, ch2 in replacements:\r\n bases = re.sub(ch1, ch2, bases)\r\n return bases[::-1].upper()", "title": "" }, { "docid": "4ac0f13e625b1190412a19279170a8da", "score": "0.59601516", "text": "def reverse_item_characters(list):\n temp = []\n for fruit in list[:]:\n temp.append( fruit[::-1] )\n return temp", "title": "" }, { "docid": "3515ad57459bf44d33a80110e2c5e024", "score": "0.5958538", "text": "def reverseString(self, s: List[str]) -> None:\r\n s = s.reverse()", "title": "" }, { "docid": "74bdfb77c338654b898bc4ff4a4da0e1", "score": "0.5954003", "text": "def reverseString(self, s: List[str]) -> None:\n s.reverse()", "title": "" }, { "docid": "a5c39a42a87ee161eda4c319978ac588", "score": "0.5950327", "text": "def reverseString(self, s) -> None:\n l, r = 0, len(s) - 1\n while l < r:\n s[l] ^= s[r]\n s[r] ^= s[l]\n s[l] ^= s[r]\n l += 1\n r -= 1", "title": "" }, { "docid": "6333c4e59d4e1f6638bd4b628fe5546f", "score": "0.5948949", "text": "def reverseString(self, s: List[str]) -> None:\n # 方法一:双指针\n left, right = 0, len(s) - 1\n while left < right:\n s[left], s[right] = s[right], s[left]\n left += 1\n right -= 1\n\n # 方法二:递归,空间复杂度不符合要求\n # def helper(left, right):\n # if left < right:\n # s[left], s[right] = s[right], s[left]\n # helper(left + 1, right - 1)\n # helper(0, len(nums) - 1)", "title": "" } ]
4a365ce584eee01290f5a160284042b9
Returns the tests in |tests| that have at least one of their compile targets in |compile_targets|.
[ { "docid": "26fe9329c821b898fee803f75a5b3580", "score": "0.80282277", "text": "def tests_in_compile_targets(api, compile_targets, tests):\n result = []\n for test in tests:\n test_compile_targets = test.compile_targets(api)\n\n # Always return tests that don't require compile. Otherwise we'd never\n # run them.\n if ((set(compile_targets).intersection(set(test_compile_targets))) or\n not test_compile_targets):\n result.append(test)\n\n return result", "title": "" } ]
[ { "docid": "7c1f73c51d4e05474cc4179520270cfc", "score": "0.64364606", "text": "def all_compile_targets(api, tests):\n return sorted(set(x\n for test in tests\n for x in test.compile_targets(api)))", "title": "" }, { "docid": "77041d9d085c52ac692244cfa9099c7a", "score": "0.59145314", "text": "def try_matches(target, tests):\n return _try_all(target, tests, try_match)", "title": "" }, { "docid": "2562b0f5c15983d4101a6c2f7f18aa42", "score": "0.58373857", "text": "def _get_test_targets(self):\n\n test_targets = list(filter(self._test_target_filter(), self._get_targets()))\n return test_targets", "title": "" }, { "docid": "26bdd241afeb03467ef7305f7f604c7f", "score": "0.5755654", "text": "def runner_tests_ok(tests: RunnerTestResults) -> bool:\n return False not in [result for test, result in tests]", "title": "" }, { "docid": "1aa05a890569a1daa54c57e98d613a85", "score": "0.54295397", "text": "def try_non_matches(target, tests):\n return not any([_try_match_quietly(target, test) for test in tests])", "title": "" }, { "docid": "990953a30df8344fae241dcdcb8fc744", "score": "0.5347448", "text": "def make(self, build_only=False, rebuild=False, local_builds_only=False):\n\n all_tests = []\n for test_batch in self.make_iter(build_only, rebuild, local_builds_only):\n all_tests.extend(test_batch)\n\n return all_tests", "title": "" }, { "docid": "f6e3ebc5cc1157565c79f79116299eaa", "score": "0.5336644", "text": "def BuildFileTargets(target_list, build_file):\n return [p for p in target_list if\n BuildFileAndTarget('', p)[0] == build_file]", "title": "" }, { "docid": "05e0df0614569042fdfc5e9569b79677", "score": "0.5330824", "text": "def filter_tests(self, tests: unittest.TestSuite) -> unittest.TestSuite:\n tests_to_be_run = unittest.TestSuite()\n for test in tests:\n added = False\n for _filter_ in self.filters:\n if getattr(test, _filter_, False):\n tests_to_be_run.addTest(test)\n added = True\n\n elif isinstance(test, unittest.TestCase):\n # noinspection PyProtectedMember\n if getattr(getattr(test.__class__, test._testMethodName), \"unittest\", False):\n tests_to_be_run.addTest(test)\n added = True\n\n if not added and isinstance(test, unittest.TestSuite):\n new_test_suite = self.filter_tests(test)\n if new_test_suite.countTestCases() > 0:\n tests_to_be_run.addTest(new_test_suite)\n\n return tests_to_be_run", "title": "" }, { "docid": "c9affc2a07a06fa1ea7541eeffd85eb3", "score": "0.5323092", "text": "def collect_tests(self):\n tester = self.project.project_tester()\n # Collect tests\n tester.collect_tests()\n # find all tests relating to this model\n # return the set of test ids\n apply_ids = []\n for test_id in tester.test_ids:\n test = tester.get_test(test_id)\n\n did = None\n if self.design is not None:\n did = self.design.id\n\n mid = os.path.basename(self.model_path)\n if test.applies_to_model(mid, design_id=did) and not len(test.children):\n # We only care about bottom level tests\n apply_ids.append(test_id)\n\n return apply_ids", "title": "" }, { "docid": "9934ca94b167ecb526184447dc1caf9a", "score": "0.53026605", "text": "def contains_any(self, test_components):\n for test_comp in test_components:\n if self.contains(test_comp):\n return True\n return False", "title": "" }, { "docid": "eb4c50740e5377427f13181c5d4ec182", "score": "0.5277089", "text": "def contains_all(self, test_components):\n for test_comp in test_components:\n if not self.contains(test_comp):\n return False\n return True;", "title": "" }, { "docid": "c0bab68fbea22be2dfcef1cbcbb5f3ac", "score": "0.5267896", "text": "def _find_matches(self) -> None:\n targets = set()\n for inspector in self._inspectors:\n targets.update(inspector.report())\n\n for root, _, files in os.walk(self._start_directory):\n for f in files:\n if fnmatch(f, self._pattern):\n full_path = os.path.join(root, f)\n tree = ast.parse(open(full_path).read())\n visitor = ImportFromVisitor(list(targets))\n visitor.visit(tree)\n if visitor.contains_target:\n self._test_filenames.add((root, f))", "title": "" }, { "docid": "d0723fc891ea9c73f2dab219e105025f", "score": "0.52182966", "text": "def has_test_files(self):\n return (self.archive.get_files_re('test_.*.py') +\n self.archive.get_files_re('.*_test.py')) != []", "title": "" }, { "docid": "9adb73b9d97baa483b9cb309c627e450", "score": "0.51056117", "text": "def all_successful(self):\n for target in self.targets:\n if not self.builds[target].passed():\n return False\n\n return True", "title": "" }, { "docid": "d1a4cef6f7d3ec588105855f8360b894", "score": "0.5090099", "text": "def get_test_classes_to_run(self):\n tcs_to_run = []\n for test_class in self.test_classes:\n if not test_class.is_finished():\n tcs_to_run.append(test_class)\n return tcs_to_run", "title": "" }, { "docid": "c239b82f5cd0024a533fce879c0bda1b", "score": "0.5058649", "text": "def _get_test_targets_for_spawn(self):\n return self._get_test_targets()", "title": "" }, { "docid": "657bd06ed259f66343e1786cf6b0c121", "score": "0.50443745", "text": "def eval_targets(self, targets):\n\n for target in targets:\n assert target in self.test_inst.data.keys(), \\\n \"{:s} not found in data\".format(target)\n assert not np.isnan(self.test_inst[target]).any(), \\\n \"NaN values found in {:s}\".format(target)\n assert target in self.test_inst.meta.data.index, \\\n \"{:s} not found in metadata\".format(target)\n return", "title": "" }, { "docid": "8a8bd787ce451933b836e6bd056d8e2b", "score": "0.5033227", "text": "def _CheckTests(self, expected_tests):\n for master in _MOCK_DATA[0]:\n for bot in _MOCK_DATA[1]:\n expected = ['%s/%s/%s' % (master, bot, t) for t in expected_tests]\n tests = graph_data.TestMetadata.query(\n graph_data.TestMetadata.master_name == master,\n graph_data.TestMetadata.bot_name == bot).fetch()\n actual = [t.test_path for t in tests]\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "4846bcf7de991bc233ba10fdbc647a47", "score": "0.49683335", "text": "def suite(self, filenames=None):\n filenames = filenames or self.files\n\n return {test\n for filename in filenames\n for test in self[filename].all_affected_tests}", "title": "" }, { "docid": "576596d76cb6fc8f92ec2754ba41ee6e", "score": "0.49384516", "text": "def contains_all_tests(self):\n return not self.partial_result", "title": "" }, { "docid": "18d03cc081f500d6cdf1c45590314859", "score": "0.49305618", "text": "def testlist(self, test_prefix=DEFAULT_TEST_PREFIX):\n if self.build_path is None:\n return []\n\n watchedfiles = self.filelist.values()\n # Create dict of expected test binary names to the relative path of the\n # source files that they were compiled from. This is to make\n # identification of test binaries easier during build tree scanning.\n testfiles = {\n os.path.splitext(w.name)[0] + EXE_SUFFIX: w.relpath\n for w in watchedfiles\n if w.name.startswith(test_prefix)\n }\n # Scan the build tree. If an expected test binary is encountered, add a\n # GTest().\n return [\n GTest(testfiles[f], os.path.join(d, f), term=self.term)\n for d, f, m, t in walk(self.build_path)\n if f in testfiles and m & stat.S_IXUSR\n ]", "title": "" }, { "docid": "fbb5143578b0a41146a525a0739c9def", "score": "0.4910435", "text": "def _try_all(target, tests, try_method):\n return all([try_method(target, test) for test in tests])", "title": "" }, { "docid": "54d73745f727b794682e6664fcf118e5", "score": "0.48727047", "text": "def gatherTests(self):\n tests, content = [], {}\n execution = ConfigurationManager().getConfiguration('execution').configuration\n\n if execution.scripts.enabled == 'true':\n if not os.path.exists(execution.scripts.PCDATA):\n raise Exception('Invalid configuration. Executions scripts do not exist')\n content['tools'] = execution.scripts.PCDATA\n\n if len(self.__testCreators) == 0:\n raise Exception('No enabled test creators found')\n\n for creator in self.__testCreators:\n newTests = creator.createTests()\n if len(newTests) == 0:\n raise Exception('%s provided no tests!' % creator.__class__.__name__)\n\n tests += newTests\n content[creator.execScriptName] = creator.execScriptLocation\n content[creator.srcName] = creator.srcLocation\n\n defGroup = Group(self.DEFAULT_GROUP_NAME,\n self.DEFAULT_GROUP_DESC, tests)\n source = Source(self.DEFAULT_LOCATION, defGroup)\n\n descBuffer = StringIO()\n for testFilter in self.__testFilters:\n testFilter.filterTests(source)\n descBuffer.write(testFilter.getAppliedFilterDescription())\n desc = descBuffer.getvalue()\n descBuffer.close()\n\n if len(tests) == 0:\n raise Exception('No tests found!')\n\n testIds = set()\n for test in tests:\n if test.testId in testIds:\n raise Exception('Duplicate test ID %s' % test.testId)\n testIds.add(test.testId)\n\n # TODO: where does this come from?\n testSuiteName = \"FIXME\"\n dateTime = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%dT%H:%M:%S')\n macAddress = Network.getMacAddress()\n executionName = \"%s_%s_%s\" % (testSuiteName, dateTime, macAddress)\n\n self.__packDetails = (\n testSuiteName,\n str(desc),\n self.__getShortFilterDesc(),\n executionName,\n dateTime)\n\n self.__makeTar(**content)\n return source, desc", "title": "" }, { "docid": "de548e1b1d120b44e199814ea947dea1", "score": "0.48380068", "text": "def get_test_sets() -> []:\n tests = ConfigManager.get_test_dict()\n return tests", "title": "" }, { "docid": "d3e306aa8bf204fa37c444546edf484f", "score": "0.4831065", "text": "def tested(self):\n return self._tests", "title": "" }, { "docid": "e9bc4a81b573e99420921bc3a8a68f82", "score": "0.4807907", "text": "def has_any(self, sources):\n return any(x in list(self.keys()) for x in sources)", "title": "" }, { "docid": "d020bc84cf15b4d377d036612986267d", "score": "0.4804515", "text": "def match(*specs, only_installed=False):\n\n\tinst = set(installed())\n\tds = envs.match(*specs, installed=inst)\n\n\tif only_installed:\n\t\tds = ds.intersection(inst)\n\n\treturn sorted(ds)", "title": "" }, { "docid": "2a052d24cf1758eec3fa932fa5283770", "score": "0.47930267", "text": "def contained_in(self, test_components):\n for component in self.components:\n if component not in test_components:\n return False\n return True", "title": "" }, { "docid": "e7cab1ba42e2b4023bb02809fffb67b8", "score": "0.4792717", "text": "def all_files_present(self):\n if self._source_files is None:\n self._source_files = self._process_source_files()\n return all(x[2] for x in self._source_files)", "title": "" }, { "docid": "a01c68988cda3aee270c93463315476d", "score": "0.4789014", "text": "def _is_test_build_target_valid(self, t_info):\n # If the cached build target can be found in current module-info, then\n # it is a valid build targets of the test.\n for build_target in t_info.build_targets:\n if str(build_target).startswith(constants.MODULES_IN):\n continue\n if not self.module_info.is_module(build_target):\n logging.debug('%s is not a valid build target.', build_target)\n return False\n return True", "title": "" }, { "docid": "98655888d7824467efc9b0ddb12d2d6a", "score": "0.47846225", "text": "def test_xs_needs_c_compiler(testing_config):\n # This uses Sub::Identify=0.14 since it includes no .c files but a .xs file.\n api.skeletonize(\"Sub::Identify\", version=\"0.14\", repo=\"cpan\", config=testing_config)\n m = api.render(\"perl-sub-identify/0.14\", finalize=False, bypass_env_check=True)[0][\n 0\n ]\n build_requirements = m.get_value(\"requirements/build\")\n assert compiler(\"c\", testing_config) in build_requirements", "title": "" }, { "docid": "2c753be57ae236eb3c49d1676e13f3ea", "score": "0.47836363", "text": "def load_and_run_tests(test_names, failfast, report_coverage, ask_before_running_tests, tests_mode=None):\n\n modules_to_be_loaded = sorted({test.split(\".\")[0].strip() for test in test_names})\n modules_to_be_tested = [ModuleTests(module) for module in modules_to_be_loaded]\n\n modes = set()\n tested_ioc_directories = set()\n\n for module in modules_to_be_tested:\n # Add tests that are either the module or a subset of the module i.e. module.TestClass\n module.tests = [test for test in test_names if test == module.name or test.startswith(module.name + \".\")]\n modes.update(module.modes)\n\n test_results = []\n\n arch = get_build_architecture()\n print(\"Running tests for arch {}\".format(arch.name))\n\n for mode in modes:\n if tests_mode is not None and mode != tests_mode:\n continue\n\n modules_to_be_tested_in_current_mode = [module for module in modules_to_be_tested if mode in module.modes]\n\n for module in modules_to_be_tested_in_current_mode:\n # Skip tests that cannot be run with a 32-bit architecture\n if arch not in module.architectures:\n print(f\"Skipped module tests.{module.name} in {TestModes.name(mode)}: suite not available with a {BuildArchitectures.archname(arch)} build architecture\")\n continue\n\n clean_environment()\n device_launchers, device_directories = make_device_launchers_from_module(module.file, mode)\n tested_ioc_directories.update(device_directories)\n test_results.append(\n run_tests(arguments.prefix, module.name, module.tests, device_collection_launcher(device_launchers),\n failfast, ask_before_running_tests))\n\n if report_coverage:\n report_test_coverage_for_devices(tested_ioc_directories)\n\n return all(test_result is True for test_result in test_results)", "title": "" }, { "docid": "8756161d274e8730d24115fe14a82931", "score": "0.47765338", "text": "def test_build_test_execution_commands_no_test_suites(self):\n exp = ([\"starcluster -c sc_config start nightly_tests\"], [],\n [\"starcluster -c sc_config terminate -c nightly_tests\"])\n obs = _build_test_execution_commands([], 'sc_config', 'ubuntu',\n 'nightly_tests')\n self.assertEqual(obs, exp)", "title": "" }, { "docid": "46064d60db2c3ea2147a4270bf2fd063", "score": "0.47702372", "text": "def contains_blocks(project, opcodes):\n return any(any((isinstance(block, dict) and block[\"opcode\"] in opcodes) for block in target[\"blocks\"].values())\n for target in project)", "title": "" }, { "docid": "cd6e461a5866aa0d20fea6ef74c3f0cc", "score": "0.47683987", "text": "def find_containments(seed_polygon, target_polygons):\n contain_booleans = []\n for _, poly in enumerate(target_polygons):\n contain_booleans.append(seed_polygon.contains(poly))\n\n return contain_booleans", "title": "" }, { "docid": "b6bac4eb1d993a84b045ca397ed4fadf", "score": "0.475926", "text": "def compile_all_projects(projects, extra_args=''):\r\n failed_clients = []\r\n for project, clients in enumerate_projects().iteritems():\r\n for client in clients:\r\n project_client = '%s.%s' % (project, client)\r\n if not compile_and_install_client(project_client, extra_args):\r\n failed_clients.append(project_client)\r\n\r\n return failed_clients", "title": "" }, { "docid": "be24ba1aa0288716c11d3cdcbfe3dd1e", "score": "0.47574547", "text": "def _host_run_test_dependencies(meta_yaml: \"MetaYamlTypedDict\") -> Set[\"PackageName\"]:\n _ = meta_yaml[\"requirements\"]\n rq = (_[\"host\"] or _[\"build\"]) | _[\"run\"] | _[\"test\"]\n return typing.cast(\"Set[PackageName]\", rq)", "title": "" }, { "docid": "4939e444f2f13e83997834ff7a87fb65", "score": "0.47528726", "text": "def is_project_set_exist(self, test_project_set):\n rst = False\n # iter in all the project set\n for project_set in self.project_set:\n if set(project_set) == set(test_project_set):\n rst = True\n break\n return rst", "title": "" }, { "docid": "8d927899efd8c972542fe99c9bb18a8f", "score": "0.47489983", "text": "def generate(self) -> Set[Test]:\n\n tests = set()\n\n # find all functions which have not been tested\n constants = filter(lambda c: not c.properties.get('presence_tested', False),\n Database().get_constants())\n\n # for all applicable functions\n for constant in constants:\n test = self.generate_test(constant)\n\n tests.add(test)\n\n return tests", "title": "" }, { "docid": "98900feed1e6d37dc2a5b101426ac51d", "score": "0.47420177", "text": "def targets(self):\n return filter(lambda t: isinstance(t, self.claimed_target_types), self.context.target_roots)", "title": "" }, { "docid": "28d496cc7bc407db766fb3463efdd8d2", "score": "0.47395974", "text": "def _calculate_isortable_python_sources(self, targets):\n sources = set()\n for target in targets:\n sources.update(\n source for source in target.sources_relative_to_buildroot()\n if os.path.splitext(source)[1] == self._PYTHON_SOURCE_EXTENSION\n )\n return list(sources)", "title": "" }, { "docid": "3e27eac6cf8ccdb3a53b80dbf190b5cc", "score": "0.47389108", "text": "def test_targets():\n # Build Ref with flags\n tempdir = None\n try:\n fprime_root = os.path.join(\n os.path.dirname(__file__), \"..\", \"..\", \"..\", \"..\", \"..\"\n )\n # Create a temp directory and register its deletion at the end of the program run\n tempdir = tempfile.mkdtemp()\n get_cmake_builder().generate_build(\n os.path.join(fprime_root, \"Ref\"), tempdir, {\"CMAKE_BUILD_TYPE\": \"Testing\"}\n )\n test_data = [\n (os.path.join(fprime_root, \"Ref\"), \"\"),\n (os.path.join(fprime_root, \"Svc\", \"CmdDispatcher\"), \"\"),\n (os.path.join(fprime_root, \"Svc\", \"CmdDispatcher\"), \"ut_exe\"),\n (os.path.join(fprime_root, \"Svc\", \"CmdDispatcher\"), \"check\"),\n ]\n # Loop over all directories and target pairs ensuing things work\n for path, target in test_data:\n get_cmake_builder().execute_known_target(target, tempdir, path)\n test_data = [\n (os.path.join(fprime_root, \"Svc\", \"CmdDispatcher\"), \"nontarget1\"),\n (os.path.join(fprime_root, \"Svc\", \"CmdDispatcher3Not\"), \"\"),\n ]\n # Loop over all paths and target pairs looking for expected Exceptions\n for path, target in test_data:\n with pytest.raises(fprime.fbuild.cmake.CMakeException):\n get_cmake_builder().execute_known_target(target, tempdir, path)\n # Clean up when all done\n finally:\n if tempdir is not None:\n shutil.rmtree(tempdir, ignore_errors=True)", "title": "" }, { "docid": "c65b2ecc9a25efdfd4205e8d635ba287", "score": "0.4736587", "text": "def _filter_tests(classes, args):\n test_name_filters = args['--filter']\n if test_name_filters:\n test_name_filters = test_name_filters.split(',')\n test_name_res = [re.compile(p, re.I) for p in test_name_filters]\n def pred(name):\n return any(f.search(name) for f in test_name_res)\n for cls in classes:\n cls = _filter_test_methods(cls, pred)\n return classes", "title": "" }, { "docid": "9679a857142bafbba24546a2b70beb6b", "score": "0.47267923", "text": "def setup_projects(targets):\n\n actual = []\n invalid = []\n\n allowed = get_projects()\n\n # Keep those project names which are found in the path\n # elements of the targets.\n # XXX: This is not really reliable. If a project name is\n # part of the path of another project it will be falsely\n # accepted.\n actual = []\n for project in allowed :\n for target in targets:\n elements = target.split(\"/\")\n if project in elements:\n actual += [ project ]\n\n if not actual:\n actual = allowed\n\n return actual", "title": "" }, { "docid": "7c5bbdacd0a57c4059f9dad5a51e5a1a", "score": "0.4723135", "text": "def filter_suite(suite, test_names):\n new_suite = unittest.suite.TestSuite()\n for name in test_names:\n new_suite.addTests(cls for cls, class_name in list_classes(suite) if name in class_name)\n return new_suite", "title": "" }, { "docid": "ff80ca6c429f59aa29f8ea47c1844469", "score": "0.47174424", "text": "def filter_targets(self, targets_names):\n\n result = []\n\n for name in targets_names:\n result += self.filter_name(name, True)\n\n return set(result)", "title": "" }, { "docid": "622762a8834020818f35a8621163cd20", "score": "0.47155708", "text": "def has_test_suite(self):\n return (self.has_test_files or self.metadata['test_suite'] or\n self.metadata['tests_require'] != [])", "title": "" }, { "docid": "3071bc50599676883c01d9b0657bfb86", "score": "0.4712481", "text": "def find_and_run_tests(start, options):\r\n if options.module_list:\r\n modules = []\r\n for m in options.module_list:\r\n modules.append(m.split('.'))\r\n else:\r\n modules = scan_for_modules(start, options)\r\n\r\n print_header('Number of test modules found: %d' % len(modules))\r\n\r\n functions = {}\r\n for module_names in modules:\r\n # Create a function that'll test a particular module. module=module\r\n # is a hack to force python to evaluate the params now. We then\r\n # rename the function to make error reporting nicer.\r\n run_module = lambda module=module_names: run_test(module, options)\r\n name = '.'.join(module_names)\r\n run_module.__name__ = name\r\n functions[run_module] = set()\r\n\r\n try:\r\n dargs = {}\r\n if options.debug:\r\n dargs['max_simultaneous_procs'] = 1\r\n pe = parallel.ParallelExecute(functions, **dargs)\r\n pe.run_until_completion()\r\n except parallel.ParallelError, err:\r\n return err.errors\r\n return []", "title": "" }, { "docid": "b873db6d38f7489a73c06d2b3bb1e125", "score": "0.47031468", "text": "def test(self, selectors: Mapping[str, Any]) -> bool:", "title": "" }, { "docid": "ba1f8744e245b4a2f407f6ef1bd70cab", "score": "0.46903393", "text": "def tests(self):\n return [test for test in self._sequence]", "title": "" }, { "docid": "c42ec9ada3715f0771edb49bf1f58b11", "score": "0.46785706", "text": "def _get_fuzz_targets(project):\n fuzz_targets = []\n for name in os.listdir(project.out):\n if name.startswith('afl-'):\n continue\n if name == 'centipede':\n continue\n if name.startswith('jazzer_'):\n continue\n if name == 'llvm-symbolizer':\n continue\n\n path = os.path.join(project.out, name)\n # Python and JVM fuzz targets are only executable for the root user, so\n # we can't use os.access.\n if os.path.isfile(path) and (os.stat(path).st_mode & 0o111):\n fuzz_targets.append(name)\n\n return fuzz_targets", "title": "" }, { "docid": "a7540ec4b310f0bbf44fe6b765ae520f", "score": "0.46682423", "text": "def has_testcases(xml):\n\n # if testsuite has a child, it means a testcase is present\n if len(xml) > 0:\n return True\n\n return False", "title": "" }, { "docid": "be92f9d1e83b0f871b729d19b688e335", "score": "0.46534136", "text": "def test_multi_build(self):\n\n arg_parser = arguments.get_parser()\n args = arg_parser.parse_args([\n 'build',\n '-H', 'this',\n 'build_parallel'\n ])\n\n build_cmd = commands.get_command(args.command_name)\n build_ret = build_cmd.run(self.pav_cfg, args)\n\n build_cmd.outfile.seek(0)\n self.assertEqual(build_ret, 0, msg=build_cmd.outfile.read())\n\n for test in build_cmd.last_tests:\n test.wait(timeout=10)\n\n # Make sure we actually built separate builds\n builds = [test.builder for test in build_cmd.last_tests]\n build_names = set([b.name for b in builds])\n self.assertEqual(len(build_names), 4)\n\n for test in build_cmd.last_tests:\n if not test.skipped:\n self.assertEqual(test.status.current().state, STATES.BUILD_DONE,\n msg='Test {} status: {}'\n .format(test.id, test.status.current()))", "title": "" }, { "docid": "9870fb90df561bf876272e46c68c1e9b", "score": "0.46417305", "text": "def test_targets(self) -> None:\n # TODO: Test the targets returned from the Excel connector\n pass", "title": "" }, { "docid": "13e1d37a69731c4f68d173ebeb198d86", "score": "0.46407396", "text": "def check_files(self, files):\n\n result = []\n\n for file in files:\n pattern = self._filename_pattern(file)\n\n if pattern in self.found:\n found = self.found[pattern]\n\n else:\n found = self.found[pattern] = self.files_by_pattern(pattern)\n\n (base, dot, suffix) = file.partition('.')\n\n result.append(base in found)\n\n return result", "title": "" }, { "docid": "5d06c1a5e9618d7137df9e9698ef11e4", "score": "0.4619066", "text": "def list_tests(self):\n return ['check', 'syntax-check']", "title": "" }, { "docid": "0048ed06dd7f5865e0e5044e9c48bac6", "score": "0.46176124", "text": "def test_needed_functions():\n needed_funcs = [\n \"get_include_info\",\n \"execute_known_target\",\n \"get_include_locations\",\n \"get_fprime_configuration\",\n ]\n for func in needed_funcs:\n assert hasattr(get_cmake_builder(), func)", "title": "" }, { "docid": "a94780dea2424d71b64ee45c2b0b85ee", "score": "0.46039826", "text": "def retrieve_required_inputs(cgpms, topo, targets, constraints, extraneous):\n required = set(targets)\n for l in reversed(topo):\n outputs_l = cgpms[l].outputs\n inputs_l = cgpms[l].inputs\n if any(i in required or i in constraints for i in outputs_l):\n required.update(inputs_l)\n return [\n target for target in required if\n all(target not in x for x in [targets, constraints, extraneous])\n ]", "title": "" }, { "docid": "537eee76df34bc6a7ee8c4cae38940bf", "score": "0.45769072", "text": "def test_compilation(cfile, compiler, **compiler_attrs):\n\n efile, ext = os.path.splitext(cfile)\n\n cpreargs = lpreargs = []\n if sys.platform == 'darwin':\n # use appropriate arch for compiler\n if platform.architecture()[0] == '32bit':\n if platform.processor() == 'powerpc':\n cpu = 'ppc'\n else:\n cpu = 'i386'\n cpreargs = ['-arch', cpu]\n lpreargs = ['-arch', cpu, '-undefined', 'dynamic_lookup']\n else:\n # allow for missing UB arch, since it will still work:\n lpreargs = ['-undefined', 'dynamic_lookup']\n if sys.platform == 'sunos5':\n if platform.architecture()[0] == '32bit':\n lpreargs = ['-m32']\n else:\n lpreargs = ['-m64']\n extra = compiler_attrs.get('extra_compile_args', None)\n extra_link = compiler_attrs.get('extra_link_args', [])\n lpreargs.extend(extra_link)\n\n objs = compiler.compile([cfile], extra_preargs=cpreargs, extra_postargs=extra)\n compiler.link_executable(objs, efile, extra_preargs=lpreargs)\n return efile", "title": "" }, { "docid": "efa3efe559b727e62440a024fc18e385", "score": "0.45765036", "text": "def suiteFull():\r\n \r\n return unittest.TestLoader().loadTestsFromTestCase( MatchingCompoundTests )", "title": "" }, { "docid": "58afd4103accddeef52a07dc1c45f3ed", "score": "0.45699888", "text": "def Cost(outputs,targets):\r\n errors = np.zeros((len(targets),1))\r\n \r\n t=0\r\n for target in targets:\r\n output = outputs[t]\r\n errors[t] = output!= target\r\n t+=1\r\n \r\n return errors", "title": "" }, { "docid": "440fccfba9f33a5e399a42883f36cb1f", "score": "0.4568942", "text": "def contains_test_passer(t, test):\n return test(t.value) or any([test(c.value) for c in t.children])", "title": "" }, { "docid": "3a989679b397d02fa8b20727a9ad3371", "score": "0.45676422", "text": "def filter_targets(self, targets, exclude): # type: (t.List[IntegrationTarget], t.Set[str]) -> None\n if self.controller and self.args.host_settings.controller_fallback and targets:\n affected_targets = [target.name for target in targets]\n reason = self.args.host_settings.controller_fallback.reason\n\n if reason == FallbackReason.ENVIRONMENT:\n exclude.update(affected_targets)\n display.warning(f'Excluding {self.host_type} tests since a fallback controller is in use: {\", \".join(affected_targets)}')\n elif reason == FallbackReason.PYTHON:\n display.warning(f'Some {self.host_type} tests may be redundant since a fallback python is in use: {\", \".join(affected_targets)}')\n\n if not self.allow_destructive and not self.config.is_managed:\n override_destructive = set(target for target in self.include_targets if target.startswith('destructive/'))\n override = [target.name for target in targets if override_destructive & set(target.aliases)]\n\n self.skip('destructive', 'which require --allow-destructive or prefixing with \"destructive/\" to run on unmanaged hosts', targets, exclude, override)\n\n if not self.args.allow_disabled:\n override_disabled = set(target for target in self.args.include if target.startswith('disabled/'))\n override = [target.name for target in targets if override_disabled & set(target.aliases)]\n\n self.skip('disabled', 'which require --allow-disabled or prefixing with \"disabled/\"', targets, exclude, override)\n\n if not self.args.allow_unsupported:\n override_unsupported = set(target for target in self.args.include if target.startswith('unsupported/'))\n override = [target.name for target in targets if override_unsupported & set(target.aliases)]\n\n self.skip('unsupported', 'which require --allow-unsupported or prefixing with \"unsupported/\"', targets, exclude, override)\n\n if not self.args.allow_unstable:\n override_unstable = set(target for target in self.args.include if target.startswith('unstable/'))\n\n if self.args.allow_unstable_changed:\n override_unstable |= set(self.args.metadata.change_description.focused_targets or [])\n\n override = [target.name for target in targets if override_unstable & set(target.aliases)]\n\n self.skip('unstable', 'which require --allow-unstable or prefixing with \"unstable/\"', targets, exclude, override)", "title": "" }, { "docid": "e13879015fcedfca8bdaee8333d1e6b7", "score": "0.45642173", "text": "def make_tests(input_dir, msg_dir, filter_rgx, callbacks):\n if filter_rgx:\n is_to_run = re.compile(filter_rgx).search\n else:\n is_to_run = lambda x: 1\n tests = []\n for module_file, messages_file in (\n get_tests_info(input_dir, msg_dir, 'func_', '')\n ):\n if not is_to_run(module_file) or module_file.endswith(('.pyc', \"$py.class\")):\n continue\n base = module_file.replace('func_', '').replace('.py', '')\n\n dependencies = get_tests_info(input_dir, msg_dir, base, '.py')\n\n for callback in callbacks:\n test = callback(input_dir, msg_dir, module_file, messages_file,\n dependencies)\n if test:\n tests.append(test)\n return tests", "title": "" }, { "docid": "68157585d88e9e7139268fdce99c937e", "score": "0.4555055", "text": "def has_any_contender_results(self) -> bool:\n for run_comparison in self.run_comparisons:\n if run_comparison.contender_benchmark_result_info:\n return True\n return False", "title": "" }, { "docid": "898f150259e29797b953e8c5222e0eba", "score": "0.45541653", "text": "def run_all_tests(skip_expensive_tests, verbose, setup_deps=True):\n\n start = time.time()\n\n # Prepare tasks.\n task_to_test = {}\n tasks = []\n test_classes = {}\n test_classes.update(ALL_TEST_CLASSES)\n test_classes.update(all_third_party_tests())\n\n for test_class_name in test_classes:\n if skip_expensive_tests and test_class_name in EXPENSIVE_TESTS:\n continue\n test = FunctionalTestTask(test_class_name, verbose)\n task = TaskThread(test.run, name='testing %s' % test_class_name)\n task_to_test[task] = test\n tasks.append(task)\n\n # order tests by their size largest to smallest\n tasks = sorted(\n tasks,\n key=lambda task: test_classes.get(task_to_test[task].test_class_name),\n reverse=True)\n\n # setup dependencies\n if setup_deps:\n setup_all_dependencies()\n\n # execute all tasks\n log('Executing all %s test suites' % len(tasks))\n runtimes_sec = []\n TaskThread.execute_task_list(\n tasks, chunk_size=16, runtimes_sec=runtimes_sec)\n\n # map durations to names\n name_durations = []\n for index, duration in enumerate(runtimes_sec):\n name_durations.append((\n round(duration, 2), task_to_test[tasks[index]].test_class_name))\n\n # report all longest first\n log('Reporting execution times for 10 longest tests')\n for duration, name in sorted(\n name_durations, key=lambda name_duration: name_duration[0],\n reverse=True)[:10]:\n log('Took %ss for %s' % (int(duration), name))\n\n # Check we ran all tests as expected.\n total_count = 0\n for task in tasks:\n test = task_to_test[task]\n # Check that no unexpected tests were picked up via automatic discovery,\n # and that the number of tests run in a particular suite.py invocation\n # matches the expected number of tests.\n test_count = test_classes.get(test.test_class_name, None)\n expected_text = 'INFO: All %s tests PASSED!' % test_count\n if test_count is None:\n log('%s\\n\\nERROR: ran unexpected test class %s' % (\n test.output, test.test_class_name))\n if expected_text not in test.output:\n log('%s\\n\\nERROR: Expected %s tests to be run for the test class '\n '%s, but found some other number.' % (\n test.output, test_count, test.test_class_name))\n raise Exception()\n total_count += test_count\n\n log('Ran %s tests in %s test classes; took %ss' % (\n total_count, len(tasks), int(time.time() - start)))", "title": "" }, { "docid": "136a960c1efccb1cb433b39083e6246f", "score": "0.4550851", "text": "def filter_targets(self, targets, exclude): # type: (t.List[IntegrationTarget], t.Set[str]) -> None\n super().filter_targets(targets, exclude)\n\n if not self.allow_root and not self.config.have_root:\n self.skip('needs/root', 'which require --allow-root or running as root', targets, exclude)\n\n self.skip(f'skip/python{self.config.python.version}', f'which are not supported by Python {self.config.python.version}', targets, exclude)\n self.skip(f'skip/python{self.config.python.major_version}', f'which are not supported by Python {self.config.python.major_version}', targets, exclude)", "title": "" }, { "docid": "8a31aa97388111f8054134897472d5a4", "score": "0.45327947", "text": "def has_any_contender_runs(self) -> bool:\n return bool(self.run_comparisons)", "title": "" }, { "docid": "e902f7aa4b21b38720d396b0493d703f", "score": "0.45303938", "text": "def testscftargets(self):\r\n assert len(self.data.scftargets[0]) == self.num_scf_criteria", "title": "" }, { "docid": "2526845b69ad398531a73c9a2670587d", "score": "0.4527421", "text": "def select_samples_from_doit(dependencies, targets):\n\n for dep_file in dependencies:\n dep_ext = dep_file.split('.')[-1]\n\n if dep_ext == 'table':\n segregation_path = dep_file\n elif dep_ext == 'txt':\n sample_names_path = dep_file\n\n assert len(targets) == 1\n\n select_samples_from_file(\n segregation_path,\n sample_names_path,\n list(targets)[0])", "title": "" }, { "docid": "4cec99c70ad1a5f0d0cb1b47b167af0b", "score": "0.45267114", "text": "def AllTargets(target_list, target_dicts, build_file):\n bftargets = BuildFileTargets(target_list, build_file)\n deptargets = DeepDependencyTargets(target_dicts, bftargets)\n return bftargets + deptargets", "title": "" }, { "docid": "9ea3086c0d114276f74e44f917059d25", "score": "0.4524205", "text": "def get_all_tests():\n return _get_tests('all.xml')", "title": "" }, { "docid": "c3de9957fffd219bb60da7bcf8222082", "score": "0.45203453", "text": "def find_host_unit_tests(module_info, path):\n logging.debug('finding unit tests under %s', path)\n found_unit_tests = []\n unit_test_names = module_info.get_all_unit_tests()\n logging.debug('All the unit tests: %s', unit_test_names)\n for unit_test_name in unit_test_names:\n for test_path in module_info.get_paths(unit_test_name):\n if test_path.find(path) == 0:\n found_unit_tests.append(unit_test_name)\n return found_unit_tests", "title": "" }, { "docid": "2d1ee6428883d93cebd6e47c5aa1fc0c", "score": "0.45135447", "text": "def all_tests(cls):\n return dict(_tests.ATest.test_classes)", "title": "" }, { "docid": "a8894f925b45b3316dea143383cdb5b9", "score": "0.45129296", "text": "def countMulttargets(self):\n return sum(cf._multTarget is not None for cf in self._cashFlows)", "title": "" }, { "docid": "6820d4d41903b22f0458744674feaaf2", "score": "0.45128325", "text": "def getTests(self):\n return self.__tests", "title": "" }, { "docid": "85e9cd6af0195e4c0f24f3ef5512fce8", "score": "0.45081648", "text": "def get_selected_tests(self, filter_func):\n\n j2env = (\n jinja2.Environment(\n loader=jinja2.FileSystemLoader(self.suite_path),\n keep_trailing_newline=True,\n )\n if USE_JINJA\n else None\n )\n\n for test_name in os.listdir(self.suite_path):\n if not is_test_from_dir(self.suite_path, test_name):\n continue\n if self.args.test and not any(\n re.search(pattern, test_name) for pattern in self.args.test\n ):\n continue\n if USE_JINJA and test_name.endswith(\".gen.sql\"):\n continue\n if not filter_func(test_name):\n continue\n test_name = self.render_test_template(j2env, self.suite_path, test_name)\n yield test_name", "title": "" }, { "docid": "f352a21391ee3aa04d5fb12e872df565", "score": "0.4498331", "text": "def get_tests_in_browsertest(file: str) -> Dict[str, Set[TestPlatform]]:\n tests: Dict[str, Set[TestPlatform]] = {}\n # Attempts to only match test test name in a test declaration, where the\n # name contains the test id prefix. Purposefully allows any prefixes on\n # the test name (like MAYBE_ or DISABLED_).\n for match in re.finditer(fr'{CoverageTest.TEST_ID_PREFIX}(\\w+)\\)', file):\n test_id = match.group(1)\n tests[test_id] = set(TestPlatform)\n test_name = f\"{CoverageTest.TEST_ID_PREFIX}{test_id}\"\n if f\"DISABLED_{test_name}\" not in file:\n continue\n enabled_platforms: Set[TestPlatform] = tests[test_id]\n for platform in TestPlatform:\n # Search for macro that specifies the given platform before\n # the string \"DISABLED_<test_name>\".\n macro_for_regex = re.escape(platform.macro)\n # This pattern ensures that there aren't any '{' or '}' characters\n # between the macro and the disabled test name, which ensures that\n # the macro is applying to the correct test.\n if re.search(fr\"{macro_for_regex}[^{{}}]+DISABLED_{test_name}\",\n file):\n enabled_platforms.remove(platform)\n if len(enabled_platforms) == len(TestPlatform):\n enabled_platforms.clear()\n return tests", "title": "" }, { "docid": "ae526ebc0f858a43f95621155b04b4fd", "score": "0.44975027", "text": "def FindTestModules():\n tests = []\n start_dir = os.path.dirname(os.path.abspath(__file__))\n for dir, subdirs, files in os.walk(start_dir):\n if dir.endswith('/.svn') or '/.svn/' in dir:\n continue\n tests.extend(ModuleName(os.path.join(dir, f), start_dir) for f \n in files if f.endswith('_test.py'))\n return tests", "title": "" }, { "docid": "3be626db7c4878e79956c890633ad620", "score": "0.449362", "text": "def has_test_case(view):\n for php_class in find_php_classes(view):\n if php_class[-4:] == 'Test':\n return True\n return False", "title": "" }, { "docid": "40fc8696f872550456185eb9309283ac", "score": "0.44888446", "text": "def filter_targets(self, targets, exclude): # type: (t.List[IntegrationTarget], t.Set[str]) -> None\n super().filter_targets(targets, exclude)\n\n arch = detect_architecture(self.config.python.path)\n\n if arch:\n self.skip(f'skip/{arch}', f'which are not supported by {arch}', targets, exclude)", "title": "" }, { "docid": "a0374bb0e42f8e8c6d71ba531692cfea", "score": "0.44878712", "text": "def compute_correct(self, outputs, targets, tolerance=None):\n tolerance = tolerance if tolerance is not None else self.tolerance\n correct = []\n for r in range(len(outputs[0])):\n row = []\n for c in range(len(outputs)):\n row.extend(list(map(lambda v: v <= tolerance, np.abs(outputs[c][r] - targets[c][r]))))\n correct.append(all(row))\n return correct", "title": "" }, { "docid": "a60841f6c043b23b9d649ddb3964259e", "score": "0.4482388", "text": "def is_suite_to_be_executed(name, testSuites):\r\n if len(testSuites) == 0:\r\n return True\r\n else:\r\n for test_case in testSuites:\r\n if str(test_case) == name:\r\n return True\r\n\r\n return False", "title": "" }, { "docid": "3a18f6ca5b40d0338a2163df7946d01d", "score": "0.44671527", "text": "def get_testable_functions():\n # Check the files that aren't the current file.\n curr_file = os.path.basename(__file__)\n files_to_check = [\n f for f in os.listdir() if f.endswith('.py') and f != curr_file\n ]\n\n # Collect a list of functions to test\n output = []\n for file in files_to_check:\n module_name = file[:-3]\n module = __import__(module_name)\n\n try:\n output += module.TESTABLE\n except AttributeError:\n # No testable functions in the file\n pass\n\n return output", "title": "" }, { "docid": "3c71baba1ba10731c0df83fe5d6604ed", "score": "0.44659096", "text": "def _find_framework_tests(rows):\r\n job_test_pattern = re.compile('SERVER|CLIENT\\\\_JOB\\.[\\d]')\r\n test_jobs = []\r\n for r in rows:\r\n test_name = r[0]\r\n if job_test_pattern.match(test_name):\r\n test_jobs.append(test_name)\r\n\r\n return test_jobs", "title": "" }, { "docid": "adda17059b4d04b248ebc251717c70c6", "score": "0.4462767", "text": "def has_passed(self):\n\n # Look through the test classes for this device,\n # and if any of them have no passed (i.e. not passed\n # required\n for test_class in self.test_classes:\n if test_class.is_required() and not test_class.has_passed():\n return False\n return True", "title": "" }, { "docid": "62717f425117151325c75bb236c1a66a", "score": "0.4461657", "text": "def targets(self):\n targets = list()\n if self.isValid():\n target_list = self.pbx_root_object.get(PBX_Constants.kPBX_PROJECT_targets, None)\n if target_list:\n targets.extend(target_list)\n return targets", "title": "" }, { "docid": "639e80cd6da61e445ccece0292f92a3a", "score": "0.4454716", "text": "def find_json():\n matches = []\n for root, _dirnames, filenames in os.walk('tests'):\n for filename in fnmatch.filter(filenames, '*.json'):\n matches.append(os.path.join(root, filename))\n return matches", "title": "" }, { "docid": "9385d81084997adb2b261566882095dc", "score": "0.4448311", "text": "def get_test_names(): \n tests = [] \n for root, dirnames, filenames in os.walk(tp.tests_to_run_dir): # @UnusedVariable\n for filename in fnmatch.filter(filenames, 'test_*.py'):\n tests.append(os.path.join(root, filename))\n return tests", "title": "" }, { "docid": "8716760daff9b25c04ff5653937769b6", "score": "0.44403902", "text": "def collect_tests(paths, logger=None, setup=None, teardown=None):\n if logger is None:\n logger = j.logger.logging\n\n result = []\n logger.info('Collecting tests from paths {}'.format(paths))\n for path in paths:\n if not j.sal.fs.exists(path):\n logger.error('Path {} does not exist'.format(path))\n continue\n if j.sal.fs.isFile(path):\n name = j.sal.fs.getBaseName(path)\n result.append(AYSTest(name=name, path=path))\n continue\n for dir_ in j.sal.fs.listDirsInDir(path):\n logger.debug('Creating group test for path {}'.format(dir_))\n result.append(AYSGroupTest(name=j.sal.fs.getBaseName(dir_), path=dir_))\n for file_ in sorted([file__ for file__ in j.sal.fs.listFilesInDir(path) if not j.sal.fs.getBaseName(file__).startswith('_') and\n (file__.endswith('{}yaml'.format(os.path.extsep)) or\n file__.endswith('{}bp'.format(os.path.extsep)))]):\n logger.debug('Creating test for path {}'.format(file_))\n result.append(AYSTest(name=j.sal.fs.getBaseName(file_), path=file_, setup=setup, teardown=teardown))\n return result", "title": "" }, { "docid": "e743d441b76d01e1895d65240b0de78c", "score": "0.4438747", "text": "def additional_tests():\r\n return make_suite('tests.')", "title": "" }, { "docid": "8f91f10a74ca7bb3a0d1b426e1134a4d", "score": "0.443836", "text": "def test_local_builds_only(self):\n\n arg_parser = arguments.get_parser()\n args = arg_parser.parse_args([\n 'build',\n '-H', 'this',\n '--local-builds-only',\n 'build_parallel'\n ])\n\n build_cmd = commands.get_command(args.command_name)\n build_ret = build_cmd.run(self.pav_cfg, args)\n\n build_cmd.outfile.seek(0)\n self.assertEqual(build_ret, 0, msg=build_cmd.outfile.read())\n\n for test in build_cmd.last_tests:\n test.wait(timeout=10)\n\n # Make sure we actually built separate builds\n builds = [test.builder for test in build_cmd.last_tests]\n build_names = set([b.name for b in builds])\n self.assertEqual(len(build_names), 2)\n\n for test in build_cmd.last_tests:\n if not test.skipped:\n self.assertEqual(test.status.current().state, STATES.BUILD_DONE,\n msg='Test {} status: {}'\n .format(test.id, test.status.current()))", "title": "" }, { "docid": "e961a0dd43f1ce08b7d416b6a636b473", "score": "0.4437065", "text": "def _get_test_classes_from_modules(modules):\n classes = []\n for module in modules:\n for attr in dir(module):\n val = getattr(module, attr)\n not_base_itself = not val == seltest.Base\n instance_of_base = (hasattr(val, '__bases__')\n and seltest.Base in val.__bases__)\n if not_base_itself and instance_of_base:\n classes.append(val)\n return classes", "title": "" }, { "docid": "72d2f1ed1b2e13e367e8d663e9903b8e", "score": "0.44370463", "text": "def get_test_methods(self, filter_required=None):\n res = []\n for test_class in self.test_classes:\n if test_class.is_required() == filter_required:\n continue\n for test_method in test_class.get_methods():\n res.append(test_method)\n return res", "title": "" }, { "docid": "d14b133a7e171a32f9aae76692e94ac2", "score": "0.44324496", "text": "def has(*commands):\n r = True\n for c in commands:\n if runp('which ' + c, check=True)[0] != 0:\n r = False\n break\n return r", "title": "" }, { "docid": "a47da154a950a22c9dd6116fe41288a3", "score": "0.4420631", "text": "def generate_rules() -> List[str]:\n cmake_rules = []\n\n for platform in test_definitions.CMakePlatform:\n cmake_rule = cmake_builder.rules.build_set(\n variable_name=f\"IREE_MODULE_COMPILE_CONFIG_ID_{platform.value.upper()}\",\n value=f'\"{test_definitions.PLATFORM_COMPILE_CONFIG_MAP[platform].id}\"')\n cmake_rules.append(cmake_rule)\n\n for test_config in test_definitions.TEST_CONFIGS:\n model = test_config.model\n runner_args = run_module_utils.build_run_flags_for_model(\n model=model,\n model_input_data=test_config.input_data) + test_config.extra_test_flags\n # TODO(#11136): Currently the DRIVER is a separate field in the CMake rule (\n # and has effect on test labels). Rules should be generated in another way\n # to avoid that. Generates the flags without the driver for now.\n runner_args += run_module_utils.build_run_flags_for_execution_config(\n test_config.execution_config, with_driver=False)\n cmake_rule = cmake_builder.rules.build_iree_benchmark_suite_module_test(\n target_name=test_config.name,\n model=f\"{model.id}_{model.name}\",\n driver=test_config.execution_config.driver.value,\n expected_output=test_config.expected_output,\n runner_args=runner_args,\n xfail_platforms=[\n platform.value for platform in test_config.xfail_platforms\n ],\n unsupported_platforms=[\n platform.value for platform in test_config.unsupported_platforms\n ])\n cmake_rules.append(cmake_rule)\n\n return cmake_rules", "title": "" }, { "docid": "3314476a8d96c5cb43b56ed75b718627", "score": "0.44169393", "text": "def test_output_checks(self):\n with self._isolate() as f:\n test_artifact = os.path.join(TEST_DATA_DIR, \"output_tests_tool.cwl\")\n test_command = [\n \"test\",\n \"--no-container\",\n \"--engine\", \"cwltool\",\n test_artifact,\n ]\n self._check_exit_code(test_command, exit_code=1)\n output_json_path = os.path.join(f, \"tool_test_output.json\")\n with open(output_json_path, \"r\") as f:\n output = json.load(f)\n assert \"tests\" in output\n tests = output[\"tests\"]\n # check out tests/data/output_tests_tool_test.yml\n expected_statuses = [\n \"success\",\n \"failure\",\n \"success\",\n \"success\",\n \"failure\",\n \"success\",\n \"failure\",\n \"success\",\n \"failure\",\n \"success\",\n \"failure\",\n ]\n for i in range(len(expected_statuses)):\n test_i = tests[i]\n data = test_i[\"data\"]\n expected_status = expected_statuses[i]\n assert data[\"status\"] == expected_status", "title": "" }, { "docid": "4fe142df74275f3677c0400e8f01df71", "score": "0.44075513", "text": "def test_contrib_tests_exist(self):\n\n contrib_tests_dir = os.path.dirname(os.path.realpath(contrib_tests.__file__))\n contrib_test_files = os.listdir(contrib_tests_dir)\n\n # Find all python files in the contrib dir and assert there's a corresponding test file\n for filename in os.listdir(self.CONTRIB_DIR):\n if filename.endswith(\".py\") and filename not in [\"__init__.py\"]:\n expected_test_file = f\"test_{filename}\"\n error_msg = (\n \"Every Contrib Rule must have associated tests. \"\n f\"Expected test file {os.path.join(contrib_tests_dir, expected_test_file)} not found.\"\n )\n self.assertIn(expected_test_file, contrib_test_files, error_msg)", "title": "" }, { "docid": "82e7b9b1036db0da616f01e0c154b0a7", "score": "0.44047672", "text": "def load_tests(loader, tests=None, patterns=None,excludes=None):\n classes = [cls for name, cls in inspect.getmembers(sys.modules[__name__],\n inspect.isclass)\n if issubclass(cls, unittest.TestCase)]\n\n suite = OrderedTestSuite()\n for test_class in classes:\n tests = loader.loadTestsFromTestCase(test_class)\n if patterns:\n tests = [test for test in tests if all(re.search(pattern, test.id()) for pattern in patterns)]\n if excludes:\n tests = [test for test in tests if not any(re.search(exclude_pattern,test.id()) for exclude_pattern in excludes)]\n suite.addTests(tests)\n return suite", "title": "" }, { "docid": "0ec3b90a66532279324e1ab53c1ece55", "score": "0.44033256", "text": "def matches_source_commit(self, builds):\n source_details = self.config.content.source.git\n alias = self.config.content.source.alias\n if source_details is Missing and alias is not Missing:\n source_details = self.runtime.group_config.sources[alias]\n if source_details is Missing:\n commit_hash = None # source is in distgit; just check if it has built\n else:\n _, commit_hash = self.runtime.detect_remote_source_branch(dict(source_details))\n return self._matches_commit(commit_hash, builds)", "title": "" } ]
45b3623ba61966a94edbc20b2bdf4fe8
Convenience method for merging replicates. Merge will extrapolate times to `number_per_run=1` and will not transfer any metadata. (Since it might differ between replicates)
[ { "docid": "4a14c333dc41744af2af5f93dce71346", "score": "0.44534874", "text": "def merge(measurements): # type: (Iterable[Measurement]) -> List[Measurement]\r\n grouped_measurements: DefaultDict[TaskSpec, List[Measurement]] = collections.defaultdict(list)\r\n for m in measurements:\r\n grouped_measurements[m.task_spec].append(m)\r\n\r\n def merge_group(task_spec: TaskSpec, group: List[Measurement]) -> Measurement:\r\n times: List[float] = []\r\n for m in group:\r\n # Different measurements could have different `number_per_run`,\r\n # so we call `.times` which normalizes the results.\r\n times.extend(m.times)\r\n\r\n return Measurement(\r\n number_per_run=1,\r\n raw_times=times,\r\n task_spec=task_spec,\r\n metadata=None,\r\n )\r\n\r\n return [merge_group(t, g) for t, g in grouped_measurements.items()]", "title": "" } ]
[ { "docid": "5c48bdd3d85f9a61a258cddfddf4a793", "score": "0.57867146", "text": "def merge_runs(ctx, args):\n from . import merge_impl\n\n merge_impl.main(args, ctx)", "title": "" }, { "docid": "9061efa9a9fc791c8dbbeddcbf57705d", "score": "0.57148755", "text": "def run_merge(self):\n log = self.app.timing_get_full_list()\n if len(log) < 2:\n return\n self.load(log[-1].code)\n if not log[-2].can_merge(log[-1]):\n raise QError('Cannot merge latest two work entries.')\n self.ticket.work_timing_merge()\n self.ticket.save()", "title": "" }, { "docid": "0ad5977298cb403442b92d8b3ad8f935", "score": "0.5713215", "text": "def perform_duplication():\n pass", "title": "" }, { "docid": "a05639b5310c22cb1bb702be5a2a9ac9", "score": "0.5424366", "text": "def merge_similar_runs():\n # create/ensure existence of log directories\n target_path_base = MERGED_LOGS_PATH\n source_path_base = \"./logs\"\n if not Path(source_path_base).is_dir():\n print(f\"Logfile directory {source_path_base} not found! Run experiments before using the evaluation tool\")\n raise SystemExit\n if not Path(target_path_base).is_dir():\n Path(target_path_base).mkdir()\n source_path_base = Path(source_path_base)\n # find all json files in ./logs\n log_files = list(source_path_base.glob('*.json'))\n log_map = defaultdict(lambda: [])\n # create list of logs for each unique settings key\n for idx, log in enumerate(log_files):\n with log.open() as json_file:\n json_data = json.load(json_file)\n key = (json_data[\"Strategy\"], json_data[\"Budget\"], json_data[\"Initial Split\"],\n json_data[\"Iterations\"], json_data[\"Batch Size\"], json_data[\"Target Layer\"], json_data[\"Model\"],\n json_data[\"Data Augmentation\"])\n log_map[key].append(json_data)\n # loop over each setting key\n for key, log_list in log_map.items():\n # initialize data containers\n acc = np.empty(shape=(0, key[3]+1))\n class_dist = np.empty(shape=(0, key[3]+1, 10))\n conf_mat = np.empty(shape=(0, key[3]+1, 10, 10))\n info_gain = np.empty(shape=(0, key[3]+1))\n # accumulate date over multiple runs\n for log in log_list:\n\n acc = np.append(acc, np.expand_dims(np.asarray(log[\"Accuracy\"]), axis=0), axis=0)\n class_dist = np.append(class_dist, np.expand_dims(np.asarray(log[\"Class Distribution\"]), axis=0), axis=0)\n conf_mat = np.append(conf_mat, np.expand_dims(np.asarray(log[\"Confusion Matrix\"]), axis=0), axis=0)\n info_gain = np.append(info_gain,\n np.asarray([scipy.stats.entropy(x, np.ones(10)/10)\n for x in np.asarray(log[\"Class Distribution\"])]).reshape((1, -1)), axis=0)\n # create mean and standard deviation info over runs\n acc_mean = np.mean(acc, axis=0)\n class_dist_mean = np.mean(class_dist, axis=0)\n acc_std = np.std(acc, axis=0)\n class_dist_std = np.std(class_dist, axis=0)\n conf_mat_mean = np.mean(conf_mat, axis=0)\n conf_mat_std = np.std(conf_mat, axis=0)\n info_gain_mean = np.mean(info_gain, axis=0)\n info_gain_std = np.std(info_gain, axis=0)\n # turn back to serializable format\n acc = acc.tolist()\n acc_mean = acc_mean.tolist()\n acc_std = acc_std.tolist()\n class_dist = class_dist.tolist()\n class_dist_mean = class_dist_mean.tolist()\n class_dist_std = class_dist_std.tolist()\n conf_mat = conf_mat.tolist()\n conf_mat_mean = conf_mat_mean.tolist()\n conf_mat_std = conf_mat_std.tolist()\n info_gain = info_gain.tolist()\n info_gain_mean = info_gain_mean.tolist()\n info_gain_std = info_gain_std.tolist()\n # create json structure\n merged_dict = {\"Strategy\": key[0], \"Budget\": key[1], \"Initial Split\": key[2],\n \"Iterations\": key[3], \"Batch Size\": key[4], \"Target Layer\": key[5], \"Model\": key[6],\n \"Data Augmentation\": key[7], \"Accuracy All\": acc,\n \"Accuracy Mean\": acc_mean, \"Accuracy Std\": acc_std,\n \"Class Distribution All\": class_dist, \"Class Distribution Mean\": class_dist_mean,\n \"Class Distribution Std\": class_dist_std, \"Confusion Matrix All\": conf_mat,\n \"Confusion Matrix Mean\": conf_mat_mean, \"Confusion Matrix Std\": conf_mat_std,\n \"Information Gain All\": info_gain, \"Information Gain Mean\": info_gain_mean,\n \"Information Gain Std\": info_gain_std}\n # generate a filename by settings\n target_file = Path(f\"{key[0]}_{key[1]}_{key[2]}_{key[3]}_{key[4]}_{key[5]}_{key[6]}\"\n f\"{'_data_augmentation' if key[7] else ''}.json\")\n # create json file\n with Path.joinpath(Path(target_path_base, target_file)).open('w', encoding='utf-8') as file:\n json.dump(merged_dict, file, ensure_ascii=False)", "title": "" }, { "docid": "1710fa9e72200696997e6931a86a9532", "score": "0.5357749", "text": "def _merge_data(self):\n self.logger_info(\"Merging into a single dataset.\")\n interim_path = self._get_data_name_folders_path(\"interim\")\n interim_agg_path = os.path.join(interim_path, self.aggregation_level)\n filenames = self._get_files_in_dir(interim_agg_path)\n list_data = [\n pd.read_csv(\n os.path.join(interim_agg_path, filename), low_memory=True\n ).infer_objects()\n for filename in tqdm(filenames, desc=\"Loading data\", leave=False)\n ]\n df_geo = list_data[0].copy()\n geo_col = [c for c in df_geo.columns if GEO_TAG in c]\n df_geo = df_geo[geo_col]\n df_geo.set_index(self._get_aggregation_level_id_col(), inplace=True)\n for df in list_data:\n df.set_index(self._get_aggregation_level_id_col(), inplace=True)\n df.drop(columns=geo_col, errors=\"ignore\", inplace=True)\n\n list_data.insert(0, df_geo)\n self.__processed_data = pd.concat(list_data, axis=1)\n self.__processed_data.reset_index(inplace=True)\n self._drop_duplicated_col_from_merge()", "title": "" }, { "docid": "c052211611bc75a335ca4d9367e37657", "score": "0.5354194", "text": "def split_count_matrix_by_replicate(\n count_matrix_file,\n rep1_file,\n rep2_file,\n pooled_file):\n assert \".mat\" in count_matrix_file\n \n # rep1\n pull_single_replicate(count_matrix_file, rep1_file, rep=\"b1\")\n\n # rep2\n pull_single_replicate(count_matrix_file, rep2_file, rep=\"b2\")\n\n # pooled\n pool_replicates(count_matrix_file, pooled_file)\n\n return None", "title": "" }, { "docid": "b977569157910b99f450b563a177fa3e", "score": "0.5276833", "text": "def test_pooling(self):\n directory = TempDirectory()\n dna = \"AAAAAAAAAA\"\n original_file_path = write_fixed_dna_fasta(dna, directory.path, \"original.fasta\")\n args = make_default_args(original_file_path)\n args.random_seed = 1\n args.num_sims = 2\n args.subset_len = 1\n\n args.num_subs = 1\n args.num_insertions = 0\n args.num_deletions = 0\n snpmutator.run_from_args(args)\n mutated_seq_record1 = read_fasta_seq_record(\"original_mutated_1.fasta\")\n mutated_seq_record2 = read_fasta_seq_record(\"original_mutated_2.fasta\")\n self.assertEqual(str(mutated_seq_record1.seq), 'AATAAAAAAA', \"Pooling SNP replicate 1 test failed, dna=%s mutated seq1=%s\" % (dna, str(mutated_seq_record1.seq)))\n self.assertEqual(str(mutated_seq_record2.seq), 'AACAAAAAAA', \"Pooling SNP replicate 2 test failed, dna=%s mutated seq2=%s\" % (dna, str(mutated_seq_record2.seq)))\n\n args.num_subs = 0\n args.num_insertions = 1\n args.num_deletions = 0\n snpmutator.run_from_args(args)\n mutated_seq_record1 = read_fasta_seq_record(\"original_mutated_1.fasta\")\n mutated_seq_record2 = read_fasta_seq_record(\"original_mutated_2.fasta\")\n self.assertEqual(str(mutated_seq_record1.seq), 'AAAGAAAAAAA', \"Pooling INS replicate 1 test failed, dna=%s mutated seq=%s\" % (dna, str(mutated_seq_record1.seq)))\n self.assertEqual(str(mutated_seq_record2.seq), 'AAACAAAAAAA', \"Pooling INS replicate 2 test failed, dna=%s mutated seq=%s\" % (dna, str(mutated_seq_record2.seq)))\n\n args.num_subs = 0\n args.num_insertions = 0\n args.num_deletions = 1\n snpmutator.run_from_args(args)\n mutated_seq_record1 = read_fasta_seq_record(\"original_mutated_1.fasta\")\n mutated_seq_record2 = read_fasta_seq_record(\"original_mutated_2.fasta\")\n self.assertEqual(str(mutated_seq_record1.seq), 'AAAAAAAAA', \"Pooling DEL replicate 1 test failed, dna=%s mutated seq=%s\" % (dna, str(mutated_seq_record1.seq)))\n self.assertEqual(str(mutated_seq_record2.seq), 'AAAAAAAAA', \"Pooling DEL replicate 2 test failed, dna=%s mutated seq=%s\" % (dna, str(mutated_seq_record1.seq)))", "title": "" }, { "docid": "b200fe13341ba077e267fad73d0175b6", "score": "0.52248764", "text": "def merge(self):\n return", "title": "" }, { "docid": "3d4f95fb2a70e17f50dcfd2b4f066657", "score": "0.52225995", "text": "def _merge_one_build(cls, aggregated_json, incremental_json,\n incremental_index, num_runs):\n\n for key in incremental_json.keys():\n # Merge json results except \"tests\" properties (results, times etc).\n # \"tests\" properties will be handled separately.\n if key == JSON_RESULTS_TESTS:\n continue\n\n if key in aggregated_json:\n aggregated_json[key].insert(\n 0, incremental_json[key][incremental_index])\n aggregated_json[key] = \\\n aggregated_json[key][:num_runs]\n else:\n aggregated_json[key] = incremental_json[key]", "title": "" }, { "docid": "8b6f9b1b71549d22fac6badd78f72f2e", "score": "0.5194163", "text": "def merge_time(self):\n\n self.run()\n\n if type(self.current) is not list:\n warnings.warn(message=\"There is only file in the dataset. No need to merge!\")\n return None\n\n cdo_command = \"cdo --sortname -mergetime\"\n\n run_this(cdo_command, self, output=\"one\")\n\n if session_info[\"lazy\"]:\n self._merged = True\n\n if cdo_version() in [\"1.9.3\"]:\n self.run()", "title": "" }, { "docid": "dda289223d7dc4df8e5a204efab72052", "score": "0.5129586", "text": "def merge(*args) -> None:", "title": "" }, { "docid": "a0acfb75d734633227caa5bd5c039bb0", "score": "0.5111439", "text": "def merge(self, int: int) -> None:\n ...", "title": "" }, { "docid": "bc67dcdeeb9a218c5c5a92c979ec183c", "score": "0.5107162", "text": "def merge(self, other):\n ...", "title": "" }, { "docid": "3458d3c93a93844b1aa13f482a9bfee5", "score": "0.50817776", "text": "def merge(self, other: 'Saver'):\n # No merging is supported for ONNX. self.path must be unique\n raise RuntimeError('merging not supported for ONNX exporter')", "title": "" }, { "docid": "1d569a1a685fe7d4a542d185121446c1", "score": "0.5050246", "text": "def merge_duplicates(self):\n if len(self.entries) == 0:\n self.log.error(\"WARNING: `entries` is empty, loading stubs\")\n if self.args.update:\n self.log.warning(\n \"No sources changed, entry files unchanged in update.\"\n \" Skipping merge.\")\n return\n self.entries = self.load_stubs()\n\n task_str = self.get_current_task_str()\n\n keys = list(sorted(self.entries.keys()))\n n1 = 0\n mainpbar = tqdm(total=len(keys), desc=task_str)\n while n1 < len(keys):\n name1 = keys[n1]\n if name1 not in self.entries:\n self.log.info(\"Entry for {} not found, likely already \"\n \"deleted in merging process.\".format(name1))\n n1 = n1 + 1\n mainpbar.update(1)\n continue\n allnames1 = set(self.entries[name1].get_aliases() +\n self.entries[name1].extra_aliases())\n\n # Search all later names\n for name2 in keys[n1 + 1:]:\n if name1 == name2:\n continue\n if name1 not in self.entries:\n self.log.info(\"Entry for {} not found, likely already \"\n \"deleted in merging process.\".format(name1))\n continue\n if name2 not in self.entries:\n self.log.info(\"Entry for {} not found, likely already \"\n \"deleted in merging process.\".format(name2))\n continue\n\n allnames2 = set(self.entries[name2].get_aliases() +\n self.entries[name2].extra_aliases())\n\n # If there are any common names or aliases, merge\n if len(allnames1 & allnames2):\n self.log.warning(\n \"Found two entries with common aliases \"\n \"('{}' and '{}'), merging.\".format(name1, name2))\n\n load1 = self.proto.init_from_file(\n self, name=name1)\n load2 = self.proto.init_from_file(\n self, name=name2)\n if load1 is not None and load2 is not None:\n # Delete old files\n self._delete_entry_file(entry=load1)\n self._delete_entry_file(entry=load2)\n self.entries[name1] = load1\n self.entries[name2] = load2\n priority1 = 0\n priority2 = 0\n for an in allnames1:\n if an.startswith(self.entries[name1]\n .priority_prefixes()):\n priority1 += 1\n for an in allnames2:\n if an.startswith(self.entries[name2]\n .priority_prefixes()):\n priority2 += 1\n\n if priority1 > priority2:\n self.copy_to_entry_in_catalog(name2, name1)\n keys.append(name1)\n del self.entries[name2]\n else:\n self.copy_to_entry_in_catalog(name1, name2)\n keys.append(name2)\n del self.entries[name1]\n else:\n self.log.warning('Duplicate already deleted')\n\n # if len(self.entries) != 1:\n # self.log.error(\n # \"WARNING: len(entries) = {}, expected 1. \"\n # \"Still journaling...\".format(len(self.entries)))\n self.journal_entries()\n\n if self.args.travis and n1 > self.TRAVIS_QUERY_LIMIT:\n break\n n1 = n1 + 1\n mainpbar.update(1)\n mainpbar.close()", "title": "" }, { "docid": "9ea82f635a4215dfa8d3cfef68c3b923", "score": "0.5047663", "text": "def merge_neuron_sets_repeatedly(new_neuron_set_1,\n new_neuron_set_2,\n alignment_min_threshold,\n overlap_min_threshold,\n **parameters):\n\n if new_neuron_set_1.size:\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_1\"] = new_neuron_set_1\n\n if new_neuron_set_2.size:\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_2\"] = new_neuron_set_2\n\n assert (new_neuron_set_1.dtype == new_neuron_set_2.dtype)\n\n new_neuron_set = numpy.hstack([new_neuron_set_1, new_neuron_set_2])\n\n if len(new_neuron_set_1) and len(new_neuron_set_2):\n logger.debug(\"Have 2 sets of neurons to merge.\")\n elif len(new_neuron_set_1) or len(new_neuron_set_2):\n logger.debug(\n \"Have 1 set of neurons to merge. Only the first set has neurons.\"\n )\n else:\n logger.debug(\"Have 0 sets of neurons to merge.\")\n\n original_new_neuron_set_size = 0\n\n while (new_neuron_set.size != 1) and \\\n (original_new_neuron_set_size != new_neuron_set.size):\n original_new_neuron_set_size = new_neuron_set.size\n\n new_neuron_set_flattened_image = xnumpy.array_to_matrix(\n new_neuron_set[\"image\"]\n )\n\n new_neuron_set_flattened_mask = xnumpy.array_to_matrix(\n new_neuron_set[\"mask\"]\n )\n\n # Measure the normalized dot product between any two neurons (i.e.\n # related to the angle of separation)\n new_neuron_set_angle = xnumpy.pair_dot_product_normalized(\n new_neuron_set_flattened_image,\n ord=2\n )\n new_neuron_set_angle = numpy.triu(new_neuron_set_angle, k=1)\n\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_angle\"] = new_neuron_set_angle\n\n # Measure the distance between the two masks\n # (note distance relative to the total mask content of each mask\n # individually)\n new_neuron_set_masks_overlaid = xnumpy.pair_dot_product_partially_normalized(\n new_neuron_set_flattened_mask,\n ord=1,\n float_type=numpy.float32\n )\n numpy.fill_diagonal(new_neuron_set_masks_overlaid, 0)\n\n new_neuron_set_masks_overlaid_1 = new_neuron_set_masks_overlaid\n new_neuron_set_masks_overlaid_2 = new_neuron_set_masks_overlaid.T\n\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_masks_overlaid_1\"] = new_neuron_set_masks_overlaid_1\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_masks_overlaid_2\"] = new_neuron_set_masks_overlaid_2\n\n # Now that the three measures for the correlation method have been\n # found, we want to know, which are the best correlated neurons between\n # the two sets using these measures. This done to find the neuron in\n # new_neuron_set_1 that best matches each neuron in new_neuron_set_2.\n new_neuron_set_angle_all_optimal_i = new_neuron_set_angle.argmax(\n axis=0\n )\n new_neuron_set_masks_overlaid_1_all_optimal_i = new_neuron_set_masks_overlaid_1.argmax(\n axis=0\n )\n new_neuron_set_masks_overlaid_2_all_optimal_i = new_neuron_set_masks_overlaid_2.argmax(\n axis=0\n )\n\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_angle_all_optimal_i\"] = new_neuron_set_angle_all_optimal_i\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_masks_overlaid_1_all_optimal_i\"] = \\\n new_neuron_set_masks_overlaid_1_all_optimal_i\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_masks_overlaid_2_all_optimal_i\"] = \\\n new_neuron_set_masks_overlaid_2_all_optimal_i\n\n # Get all the j indices\n new_neuron_set_all_j = numpy.arange(len(new_neuron_set))\n\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_all_j\"] = new_neuron_set_all_j\n\n # Get the maximum corresponding to the best matched pairs from before\n new_neuron_set_angle_maxes = new_neuron_set_angle[\n (new_neuron_set_angle_all_optimal_i, new_neuron_set_all_j,)\n ]\n new_neuron_set_masks_overlaid_1_maxes = new_neuron_set_masks_overlaid_1[\n (new_neuron_set_masks_overlaid_1_all_optimal_i, new_neuron_set_all_j,)\n ]\n new_neuron_set_masks_overlaid_2_maxes = new_neuron_set_masks_overlaid_2[\n (new_neuron_set_masks_overlaid_2_all_optimal_i, new_neuron_set_all_j,)\n ]\n\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_angle_maxes\"] = new_neuron_set_angle_maxes\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_masks_overlaid_1_maxes\"] = new_neuron_set_masks_overlaid_1_maxes\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_masks_overlaid_2_maxes\"] = new_neuron_set_masks_overlaid_2_maxes\n\n # Store a list of the optimal neurons in the existing set to fuse with\n # (by default set all values to -1)\n new_neuron_set_all_optimal_i = numpy.zeros(\n (len(new_neuron_set),), dtype=int\n )\n new_neuron_set_all_optimal_i -= 1\n\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_all_optimal_i_0\"] = new_neuron_set_all_optimal_i\n\n # Create the masks to use for getting the proper indices\n new_neuron_set_angle_maxes_significant = numpy.zeros(\n (len(new_neuron_set),), dtype=bool\n )\n new_neuron_set_masks_overlaid_1_maxes_significant = numpy.zeros(\n (len(new_neuron_set),), dtype=bool\n )\n new_neuron_set_masks_overlaid_2_maxes_significant = numpy.zeros(\n (len(new_neuron_set),), dtype=bool\n )\n\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_angle_maxes_significant_0\"] = new_neuron_set_angle_maxes_significant\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_masks_overlaid_1_maxes_significant_0\"] = \\\n new_neuron_set_masks_overlaid_1_maxes_significant\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_masks_overlaid_2_maxes_significant_0\"] = \\\n new_neuron_set_masks_overlaid_2_maxes_significant\n\n already_matched = numpy.zeros((len(new_neuron_set),), dtype=bool)\n\n # Get masks that indicate which measurements have the best matching\n # neuron\n new_neuron_set_angle_maxes_significant[new_neuron_set_angle_maxes > alignment_min_threshold] = True\n\n already_matched |= new_neuron_set_angle_maxes_significant\n already_matched[\n new_neuron_set_angle_all_optimal_i[new_neuron_set_angle_maxes_significant]\n ] |= True\n\n new_neuron_set_masks_overlaid_1_maxes_significant[\n ~already_matched &\n (new_neuron_set_masks_overlaid_1_maxes > overlap_min_threshold)\n ] = True\n\n already_matched |= new_neuron_set_masks_overlaid_1_maxes_significant\n already_matched[\n new_neuron_set_masks_overlaid_1_all_optimal_i[new_neuron_set_masks_overlaid_1_maxes_significant]\n ] |= True\n\n new_neuron_set_masks_overlaid_2_maxes_significant[\n ~already_matched &\n (new_neuron_set_masks_overlaid_2_maxes_significant > overlap_min_threshold)\n ] = True\n\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_angle_maxes_significant_1\"] = new_neuron_set_angle_maxes_significant\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_masks_overlaid_1_maxes_significant_1\"] = new_neuron_set_masks_overlaid_1_maxes_significant\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_masks_overlaid_2_maxes_significant_1\"] = new_neuron_set_masks_overlaid_2_maxes_significant\n\n # Using the masks construct the best match neuron index for each case.\n new_neuron_set_all_optimal_i[new_neuron_set_angle_maxes_significant] = new_neuron_set_angle_all_optimal_i[new_neuron_set_angle_maxes_significant]\n\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_all_optimal_i_1\"] = new_neuron_set_all_optimal_i\n\n new_neuron_set_all_optimal_i[new_neuron_set_masks_overlaid_1_maxes_significant] = new_neuron_set_masks_overlaid_1_all_optimal_i[new_neuron_set_masks_overlaid_1_maxes_significant]\n\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_all_optimal_i_2\"] = new_neuron_set_all_optimal_i\n\n new_neuron_set_all_optimal_i[new_neuron_set_masks_overlaid_2_maxes_significant] = new_neuron_set_masks_overlaid_2_all_optimal_i[new_neuron_set_masks_overlaid_2_maxes_significant]\n\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_all_optimal_i_3\"] = new_neuron_set_all_optimal_i\n\n\n # Separate all the best matches that were found from those that were\n # not. Also, remove the -1 as they have served their purpose.\n new_neuron_set_all_optimal_i_found = (\n new_neuron_set_all_optimal_i != -1\n )\n new_neuron_set_all_j_fuse = new_neuron_set_all_j[new_neuron_set_all_optimal_i_found]\n new_neuron_set_all_j_append = new_neuron_set_all_j[~new_neuron_set_all_optimal_i_found]\n new_neuron_set_all_optimal_i = new_neuron_set_all_optimal_i[new_neuron_set_all_optimal_i_found]\n\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_all_optimal_i_found\"] = new_neuron_set_all_optimal_i_found\n\n if new_neuron_set_all_j_fuse.size:\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_all_j_fuse\"] = new_neuron_set_all_j_fuse\n\n if new_neuron_set_all_j_append.size:\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_all_j_append\"] = new_neuron_set_all_j_append\n\n if new_neuron_set_all_optimal_i.size:\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_neuron_set_all_optimal_i_3\"] = new_neuron_set_all_optimal_i\n\n new_neuron_set_kept = numpy.ones(new_neuron_set.shape, dtype=bool)\n\n # Fuse all the neurons that can be from new_neuron_set_2 to the\n # new_neuron_set (composed of new_neuron_set_1)\n for i, j in iters.izip(\n new_neuron_set_all_optimal_i, new_neuron_set_all_j_fuse\n ):\n #fuse_neurons.recorders.array_debug_recorder = hdf5.record.HDF5EnumeratedArrayRecorder(\n # merge_neuron_sets_repeatedly.recorders.array_debug_recorder.hdf5_handle\n #)\n fuse_neurons.recorders.array_debug_recorder = merge_neuron_sets_repeatedly.recorders.array_debug_recorder\n\n new_neuron_set[i] = fuse_neurons(\n new_neuron_set[i],\n new_neuron_set[j],\n **parameters[\"fuse_neurons\"]\n )\n\n new_neuron_set_kept[j] = False\n\n new_neuron_set = new_neuron_set[new_neuron_set_kept]\n\n logger.debug(\n \"Fused \\\"\" + repr(len(new_neuron_set_all_j_fuse)) +\n \"\\\" neurons to the existing set.\"\n )\n\n if new_neuron_set.size:\n merge_neuron_sets_repeatedly.recorders.array_debug_recorder[\"new_merged_neurons_set\"] = new_neuron_set\n\n return(new_neuron_set)", "title": "" }, { "docid": "b35b6fb234bc2d1f74e097de01b3a888", "score": "0.50226676", "text": "def merge(alist, overwrite_=True):\n def verify(): \n n_rows = 0 \n row_names = []\n for i, p_obj in enumerate(alist): \n table = p_obj.table\n if i == 0: \n n_rows = table.shape[0] \n row_names = table.index \n else: \n assert table.shape[0] == n_rows\n assert all(row_names == table.index)\n return row_names\n import copy\n\n # emtpy list\n if len(alist) == 0:\n # do nothing \n print('(merge) No input data. Exiting ...')\n return PerformanceMetrics() # return a dummy object \n\n # design: assume that each df references different methods\n row_names = verify()\n\n # todo: do it according to the class protocol\n\n # create a new table and merge all the algorithm metrics (columns)\n p_new = PerformanceMetrics() # DataFrame()\n records = {} # \n for i, p_obj in enumerate(alist): \n table = p_obj.table\n for col in table.columns: \n if overwrite_ or (not col in p_new.table.columns): # add only if not existed\n p_new.table[col] = table[col] # if not incr_update, then update no matter what\n p_new.records.update(p_obj.records) # but this overwrites the key (and its value)\n \n p_new.table.index = row_names\n\n ## this operation does not merge the following base attributes\n # p_new.op \n # p_new.records \n \n return p_new", "title": "" }, { "docid": "65920feb16e7dd35a5a97f43086353c8", "score": "0.50209785", "text": "def set_number_of_identical_mdruns (multiples) :\n self.multiples = multiples", "title": "" }, { "docid": "999aa9d615fd8b254d41b9d03ed3f014", "score": "0.4987291", "text": "def duplicate_batch_id_csv(year, month, day, start, num):\r\n\r\n for i in range(start, start + num):\r\n with open(\"MED_DATA_\" + df.year_format(year,month,day) + df.time_format(i) + \".csv\", \"w\", newline=\"\") as csvfile:\r\n mywriter = csv.writer(csvfile, delimiter=\",\", quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)\r\n\r\n header = [\"batch_id\", \"timestamp\", \"reading1\", \"reading2\", \"reading3\", \"reading4\", \"reading5\", \"reading6\",\r\n \"reading7\", \"reading8\", \"reading9\", \"reading10\"]\r\n choices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\r\n duplicate_id1 = ran.randint(1, 10)\r\n choices.remove(duplicate_id1)\r\n duplicate_index = ran.randint(0, 8)\r\n duplicate_id2 = choices[duplicate_index]\r\n dup1 = min(duplicate_id1, duplicate_id2)\r\n dup2 = max(duplicate_id1, duplicate_id2)\r\n\r\n mywriter.writerow(header)\r\n for n in range(0, 10):\r\n row = [ran.randint(1, 199), \"00:00:00\"]\r\n if n == dup1:\r\n dup_id = row[0]\r\n elif n == dup2:\r\n row[0] = dup_id\r\n\r\n for m in range(0, 10):\r\n row.append(round(ran.randint(0, 9900)/1000, 3))\r\n\r\n mywriter.writerow(row)", "title": "" }, { "docid": "b2c0b631254d856eb8d4da4735daf4cb", "score": "0.49846694", "text": "def merge(self, other):\n self.documents = self.documents + other.documents\n frames = [self.metadata, other.metadata]\n self._metadata = pd.concat(frames, ignore_index=True)\n self.dic.merge_with(other.dic)\n self._reset_index()", "title": "" }, { "docid": "001f95a0b1a9a612e1148703f02904c8", "score": "0.49611795", "text": "def merge(cls, to_merge):\n samples = to_merge[0].samples\n inst_class = to_merge[0].__class__\n \n for o in to_merge[1:]:\n if not isinstance(o, inst_class):\n raise ValueError(\"Operation not permitted for other object of type %s.\" % type(other))\n #fi\n \n if not all([ p[0] == p[1] for p in zip(samples, o.samples) ]):\n raise ValueError(\"The VCF objects do not describe the same samples!!!\")\n #fi\n #efor\n\n records = []\n for o in to_merge:\n records.extend(o.records)\n #efor\n \n records_obj = VCF2_records(records, samples)\n filt = VCF_filter(\"MERGE\", [\"%d objects\" % len(to_merge)])\n return inst_class(vcf_object=records_obj, filter_stack=[filt])", "title": "" }, { "docid": "2c746a5f9ecbb2cab7fad2d24931cc1c", "score": "0.4942957", "text": "def set_is_replicated(self, should_replicate):\n return None", "title": "" }, { "docid": "4f51416714f61d27c22e8b87545ee2af", "score": "0.4939774", "text": "def merge(self, ts: 'Timestamp') -> None:\n\n for (id, value) in ts.replicas.items():\n\n if id not in self.replicas:\n\n self.replicas[id] = value\n\n elif value > self.replicas[id]:\n\n self.replicas[id] = ts.replicas[id]", "title": "" }, { "docid": "ce677eb801cc08a5bb7de1b7bfa3f118", "score": "0.4930495", "text": "def merge(self) -> bool:\n ...", "title": "" }, { "docid": "563dda42dba3ce8d6c6418159aaac5fe", "score": "0.48937684", "text": "def merge(self, other):\n raise NotImplementedError()", "title": "" }, { "docid": "a2dac1e5154a62bf2037a469e3056dc9", "score": "0.4890549", "text": "def merged(cls, benches: ['MiniNASTabularBenchmark'], merge_fun=np.mean):\n b0 = benches[0]\n all_results = []\n for bench in benches:\n all_results.extend(bench.results.values())\n merged_results = MiniResult.merge_result_list(all_results, merge_fun=merge_fun, ensure_same_size=True)\n\n # update indices and reference dicts\n results, arch_to_idx, tuple_to_str, tuple_to_idx = {}, {}, {}, {}\n for i, r in enumerate(merged_results):\n r.arch_index = i\n results[i] = r\n arch_to_idx[r.arch_str] = i\n tuple_to_str[r.arch_tuple] = r.arch_index\n tuple_to_idx[r.arch_tuple] = i\n\n return cls(default_data_set=b0.default_data_set, default_result_type=b0.default_result_type,\n bench_name=\"Merged(%s)\" % \", \".join([b.bench_name for b in benches]),\n bench_description=b0.bench_description,\n value_space=b0.value_space, results=results, arch_to_idx=arch_to_idx,\n tuple_to_str=tuple_to_str, tuple_to_idx=tuple_to_idx)", "title": "" }, { "docid": "3858bc7d7e08ccef18f8983ad6d9ff0e", "score": "0.48817542", "text": "def merge_duplicates(data):\n prev = data[0]\n mark = []\n for entry in data[1:]:\n if check_duplicate(prev, entry):\n prev[\"metadata\"][\"played_duration\"] = (\n prev[\"metadata\"][\"played_duration\"]\n + entry[\"metadata\"][\"played_duration\"]\n )\n # mark entry for removal\n mark.append(entry)\n else:\n prev = entry\n # remove marked entries\n for entry in mark:\n data.remove(entry)\n return data", "title": "" }, { "docid": "6fd5d87c0514013bbbf13e04e34b63d3", "score": "0.48810023", "text": "def repeated_sim(n_runs=5, aggregate=True, **kwargs):\n sim_func = create_experiment_func(**kwargs)\n avg_results = rerun_experiment(sim_func, n_runs=n_runs, aggregate=aggregate)\n return avg_results", "title": "" }, { "docid": "bd35562109fc379b190cce147c804957", "score": "0.48596787", "text": "def merged(self, *others):\n species_dict = {}; share_dict = {}\n for deme in (self,)+others:\n for i in xrange(len(deme.species)):\n species = deme.species[i]\n name = str(species)\n if species_dict.has_key(name):\n share_dict[name] += deme.distribution[i]\n else:\n species_dict[name] = species\n share_dict[name] = deme.distribution[i]\n species = [s for s in self.setup.strategyList \\\n if species_dict.has_key(s.name)] # keep species order!\n dist = array([share_dict[s.name] for s in species])\n return self.new(species, dist)", "title": "" }, { "docid": "f273f1b44b0b86431a9af77cc8e20286", "score": "0.48587766", "text": "def merge_outcomes(self, label_merge_dict, recordZeroCnts=True):\n\n #static_self = self.copy()\n #static_self.done_adding_data() # makes static, so we can assume this below\n\n # strings -> tuple outcome labels in keys and values of label_merge_dict\n to_outcome = _ld.OutcomeLabelDict.to_outcome # shorthand\n label_merge_dict = {to_outcome(key): list(map(to_outcome, val))\n for key, val in label_merge_dict.items()}\n\n merge_dict_old_outcomes = [outcome for sublist in label_merge_dict.values() for outcome in sublist]\n if not set(self.get_outcome_labels()).issubset(merge_dict_old_outcomes):\n raise ValueError(\n \"`label_merge_dict` must account for all the outcomes in original dataset.\"\n \" It's missing directives for:\\n%s\" %\n '\\n'.join(set(map(str, self.get_outcome_labels())) - set(map(str, merge_dict_old_outcomes)))\n )\n\n new_outcomes = sorted(list(label_merge_dict.keys()))\n new_outcome_indices = _OrderedDict([(ol, i) for i, ol in enumerate(new_outcomes)])\n nNewOutcomes = len(new_outcomes)\n\n #Count the number of time steps so we allocate enough space\n nSteps = 0\n for key, dsrow in self.items():\n cur_t = None\n for t in dsrow.time:\n if t != cur_t:\n nSteps += 1\n cur_t = t\n\n #idea is that we create oliData, timeData, repData, and circuitIndices for the\n # merged dataset rather than looping over insertion, as this is faster\n oliData = _np.empty(nSteps * nNewOutcomes, self.oliType)\n repData = _np.empty(nSteps * nNewOutcomes, self.repType)\n timeData = _np.empty(nSteps * nNewOutcomes, self.timeType)\n\n oli_map = {} # maps old outcome label indices to new ones\n for new_outcome, old_outcome_list in label_merge_dict.items():\n new_index = new_outcome_indices[new_outcome]\n for old_outcome in old_outcome_list:\n oli_map[self.olIndex[old_outcome]] = new_index\n\n #Future - when recordZeroCnts=False these may not need to be so large\n new_olis = _np.array(range(nNewOutcomes), _np.int64)\n new_cnts = _np.zeros(nNewOutcomes, self.repType)\n\n if recordZeroCnts:\n def add_cnts(t, cnts, offset): # cnts is an array here\n new_cnts[:] = 0\n for nonzero_oli, cnt in cnts.items():\n new_cnts[nonzero_oli] = cnt\n timeData[offset:offset + nNewOutcomes] = t\n oliData[offset:offset + nNewOutcomes] = new_olis\n repData[offset:offset + nNewOutcomes] = new_cnts # a length-nNewOutcomes array\n return nNewOutcomes\n\n else:\n def add_cnts(t, cnts, offset): # cnts is a dict here\n nNewCnts = len(cnts)\n #new_olis = _np.empty(nNewCnts, _np.int64)\n #new_cnts = _np.empty(nNewCnts, self.repType)\n for ii, (nonzero_oli, cnt) in enumerate(cnts.items()):\n new_olis[ii] = nonzero_oli\n new_cnts[ii] = cnt\n timeData[offset:offset + nNewCnts] = t\n oliData[offset:offset + nNewCnts] = new_olis[0:nNewCnts]\n repData[offset:offset + nNewCnts] = new_cnts[0:nNewCnts]\n return nNewCnts # return the number of added counts\n\n k = 0 # beginning of current circuit data in 1D arrays: oliData, timeData, repData\n circuitIndices = _OrderedDict()\n for key, dsrow in self.items():\n\n last_t = dsrow.time[0]\n\n #Below code is faster version of: mapped_oli = [oli_map[x] for x in dsrow.oli]\n mapped_oli = dsrow.oli.copy()\n for from_oli, to_oli in oli_map.items():\n mapped_oli[dsrow.oli == from_oli] = to_oli\n\n reps = _np.ones(len(dsrow.time), self.timeType) if (self.repData is None) else dsrow.reps\n cnts = _DefaultDict(lambda: 0)\n\n i = 0 # offset to current timeslice\n for oli, t, reps in zip(mapped_oli, dsrow.time, reps):\n if t != last_t:\n i += add_cnts(last_t, cnts, k + i)\n last_t = t; cnts.clear()\n cnts[oli] += reps\n if len(cnts) > 0:\n i += add_cnts(last_t, cnts, k + i)\n\n circuitIndices[key] = slice(k, k + i)\n k += i\n\n merged_dataset = DataSet(oliData[0:k], timeData[0:k], repData[0:k], circuitIndices=circuitIndices,\n outcomeLabelIndices=new_outcome_indices, bStatic=True)\n return merged_dataset", "title": "" }, { "docid": "81e3ed6c391f2624f7af2662893a0c33", "score": "0.4857831", "text": "def _merge_parallel_results(self, key, max_samples=None):\n numbers = [name.split('.')[-2].split('_')[-1]\n for name in os.listdir(self.directory)\n if 'mem_{}'.format(key) in name]\n mem = np.zeros((0, len(self.train_set)))\n n_sources = len(self.train_set)\n idxs = np.zeros((0, n_sources), int)\n vals = np.zeros(len(self.train_set))\n counter = 0.\n for number in numbers:\n if max_samples is not None:\n if counter > max_samples:\n break\n samples_dir = os.path.join(\n self.directory,\n 'mem_{}_{}.pkl'.format(key, number)\n )\n print(samples_dir)\n dic = pkl.load(open(samples_dir, 'rb'))\n if not len(dic['mem_{}'.format(key)]):\n continue\n mem = np.concatenate([mem, dic['mem_{}'.format(key)]])\n idxs = np.concatenate([idxs, dic['idxs_{}'.format(key)]])\n counter += len(dic['mem_{}'.format(key)])\n vals *= (counter - len(dic['mem_{}'.format(key)])) / counter\n vals += len(dic['mem_{}'.format(key)]) / counter * np.mean(mem, 0)\n os.remove(samples_dir)\n merged_dir = os.path.join(\n self.directory,\n 'mem_{}_0000.pkl'.format(key)\n )\n pkl.dump({'mem_{}'.format(key): mem, 'idxs_{}'.format(key): idxs},\n open(merged_dir, 'wb'))\n return mem, idxs, vals", "title": "" }, { "docid": "ac8123df1b31d2e4b41c2a4837991507", "score": "0.4849359", "text": "def merge(self, other):\n e1 = self.epoch\n e2 = other.epoch if isinstance(other, PrecessingCoordinates) else None\n precess, e1, e2 = self.precession_required(e1, e2)\n\n self.epoch = e1 # Update the epoch if required\n if not precess:\n super().merge(other)\n return\n\n if e1.singular: # Precess epoch 2 onto epoch 1\n precessed = self.copy()\n self.convert(other, precessed)\n super().merge(precessed)\n return\n\n # If epoch 1 is an array and epoch 2 is not\n if e2.singular:\n # Need to convert all epoch2 times to an array.\n e2 = e2.copy()\n e2.equinox = Time(np.full(other.size, e2.equinox.value),\n scale=e2.equinox.scale,\n format=e2.equinox.format)\n\n size_1 = self.size\n size_2 = other.size\n super().merge(other)\n\n new_time_values = np.empty(size_1 + size_2, dtype=float)\n\n t_format = 'byear' if isinstance(e1, BesselianEpoch) else 'jyear'\n new_time_values[:size_1] = getattr(e1.equinox, t_format)\n new_time_values[size_1:] = getattr(e2.equinox, t_format)\n self.epoch.equinox = Time(new_time_values, scale=e1.equinox.scale,\n format=t_format)", "title": "" }, { "docid": "c24e20766b31078d7d240b40b639879f", "score": "0.48343483", "text": "def pool_replicates(mat_file, out_file):\n assert \".mat\" in mat_file\n data = pd.read_table(mat_file, index_col=0)\n\n samples = sorted(\n list(set([colname.split(\"_\")[0]\n for colname in data.columns])))\n for sample in samples:\n data[sample] = data[\"{}_b1\".format(sample)] + data[\"{}_b2\".format(sample)]\n data_pooled = data[samples].astype(int)\n data_pooled.to_csv(out_file, sep='\\t', compression=\"gzip\")\n\n return None", "title": "" }, { "docid": "aa3044db089c428e3471b5af5785d07b", "score": "0.4816423", "text": "def plan_merges(merge_pairs, body_sizes, size_threshold=1e7):\n assert isinstance(merge_pairs, np.ndarray)\n assert isinstance(body_sizes, pd.Series)\n assert body_sizes.index.name == 'body'\n assert body_sizes.name == 'size'\n simulated_sizes = body_sizes.to_dict()\n\n merge_pairs.sort(axis=1)\n merge_pairs = pd.DataFrame(merge_pairs, columns=['body1', 'body2'])\n merge_pairs = merge_pairs.drop_duplicates(['body1', 'body2']).copy()\n\n merges = defaultdict(lambda: [])\n remaps = {}\n merge_indexes = []\n\n for i, b1, b2 in tqdm_proxy(merge_pairs[['body1', 'body2']].itertuples(), total=len(merge_pairs)):\n b1 = remaps.get(b1, b1)\n b2 = remaps.get(b2, b2)\n if b1 == b2:\n continue\n s1, s2 = simulated_sizes[b1], simulated_sizes[b2]\n if s1 < size_threshold or s2 < size_threshold:\n merge_indexes.append(i)\n if s1 > s2:\n # Merge body2 into body 1\n remaps[b2] = b1\n for b in merges[b2]:\n remaps[b] = b1\n merges[b1].extend([b2, *merges[b2]])\n del merges[b2]\n simulated_sizes[b1] += s2\n else:\n # Merge body1 into body2\n remaps[b1] = b2\n for b in merges[b1]:\n remaps[b] = b2\n merges[b2].extend([b1, *merges[b1]])\n del merges[b1]\n simulated_sizes[b2] += s1\n\n return merge_indexes, merges, remaps, merge_pairs, simulated_sizes", "title": "" }, { "docid": "6a4457ea7b90fc3d209bec99457cabd2", "score": "0.4815523", "text": "def merge(cls, builder, aggregated, incremental, num_runs, sort_keys=False):\n\n if not incremental:\n logging.warning(\"Nothing to merge.\")\n return None\n\n logging.info(\"Loading incremental json...\")\n incremental_json = cls._load_json(incremental)\n if not incremental_json:\n return None\n\n logging.info(\"Checking incremental json...\")\n if not cls._check_json(builder, incremental_json):\n return None\n\n logging.info(\"Loading existing aggregated json...\")\n aggregated_json = cls._load_json(aggregated)\n if not aggregated_json:\n return incremental\n\n logging.info(\"Checking existing aggregated json...\")\n if not cls._check_json(builder, aggregated_json):\n return incremental\n\n logging.info(\"Merging json results...\")\n try:\n if not cls._merge_json(aggregated_json[builder], incremental_json[builder], num_runs):\n return None\n except Exception, err:\n logging.error(\"Failed to merge json results: %s\", str(err))\n return None\n\n aggregated_json[JSON_RESULTS_VERSION_KEY] = JSON_RESULTS_VERSION\n\n return cls._generate_file_data(aggregated_json, sort_keys)", "title": "" }, { "docid": "60d449679485f8fc6e17c80680c6f9d4", "score": "0.48126644", "text": "def _frame_merger(data, frames):\n fail = 0\n for idx, frame in enumerate(frames[1:], start=2):\n try:\n # concatenation handles 99% of the cases\n with dask.config.set(**{'array.slicing.split_large_chunks': False}):\n data = xr.concat([data, frame], dim='time')\n except (ValueError, NotImplementedError):\n try:\n # try merging the data, usually one of the data files is missing a variable from a co-located\n # sensor that the system was unable to find\n _, index = np.unique(data['time'], return_index=True)\n data = data.isel(time=index)\n with dask.config.set(**{'array.slicing.split_large_chunks': False}):\n data = data.merge(frame, compat='override')\n except (ValueError, NotImplementedError):\n # something is just not right with this data file\n fail += 1\n\n return data, fail", "title": "" }, { "docid": "bddac254f036f46c0d9a2108d5b85ae2", "score": "0.48090795", "text": "def _mergeResults ( self, result ) :\n self.output.update ( result ) \n print 'FIT-MERGE: ', len ( self.output )", "title": "" }, { "docid": "6527c8d23a4c8042d765492517e660e5", "score": "0.48043308", "text": "def merge(self, guid, diff, increment_seqno=True):\n record = self._storage.get(guid)\n seqno = None\n merged = False\n\n for prop, meta in diff.items():\n orig_meta = record.get(prop)\n if orig_meta is not None and orig_meta['mtime'] >= meta['mtime']:\n continue\n if increment_seqno:\n if not seqno:\n seqno = self._seqno.next()\n meta['seqno'] = seqno\n else:\n meta['seqno'] = (orig_meta or {}).get('seqno') or 0\n record.set(prop, **meta)\n merged = True\n\n if merged and record.consistent:\n props = {}\n if seqno:\n props['seqno'] = seqno\n self._index.store(guid, props, False,\n self._pre_store, self._post_store,\n # No need in after-merge event, further commit event\n # is enough to avoid events flow on nodes synchronization\n None, False)\n\n return seqno", "title": "" }, { "docid": "818811284ae5d2abc964c81b6085273e", "score": "0.47912434", "text": "def set_replicates(self, replicates):\n return None", "title": "" }, { "docid": "8fce1b69c35abc6cffd86ea26941a39d", "score": "0.4785784", "text": "def try_merge(self, other: MergedGroup) -> bool:\n if self._get_group_similarity(other) >= 0.5:\n self.members.update(other.members)\n # periods must be merged one by one to ensure time cohesion\n for new_period in other.periods:\n has_merged = False\n for period in self.periods:\n if period.try_merge(new_period):\n has_merged = True\n break\n if not has_merged:\n self.periods.append(new_period)\n return True\n return False", "title": "" }, { "docid": "a2b5f3f7f8c96550f7976975d542ccb2", "score": "0.47750998", "text": "def merge_overlapping_images(metadata,inputs):\n\n # only for Sentinel-2 at this stage (not sure if this is needed for Landsat images)\n sat = 'S2'\n filepath = os.path.join(inputs['filepath'], inputs['sitename'])\n filenames = metadata[sat]['filenames']\n # find the pairs of images that are within 5 minutes of each other\n time_delta = 5*60 # 5 minutes in seconds\n dates = metadata[sat]['dates'].copy()\n pairs = []\n for i,date in enumerate(metadata[sat]['dates']):\n # dummy value so it does not match it again\n dates[i] = pytz.utc.localize(datetime(1,1,1) + timedelta(days=i+1))\n # calculate time difference\n time_diff = np.array([np.abs((date - _).total_seconds()) for _ in dates])\n # find the matching times and add to pairs list\n boolvec = time_diff <= time_delta\n if np.sum(boolvec) == 0:\n continue\n else:\n idx_dup = np.where(boolvec)[0][0]\n pairs.append([i,idx_dup])\n # because they could be triplicates in S2 images, adjust the for consecutive merges\n for i in range(1,len(pairs)):\n if pairs[i-1][1] == pairs[i][0]:\n pairs[i][0] = pairs[i-1][0]\n\n # for each pair of image, create a mask and add no_data into the .tif file (this is needed before merging .tif files)\n for i,pair in enumerate(pairs):\n fn_im = []\n for index in range(len(pair)):\n # get filenames of all the files corresponding to the each image in the pair\n fn_im.append([os.path.join(filepath, 'S2', '10m', filenames[pair[index]]),\n os.path.join(filepath, 'S2', '20m', filenames[pair[index]].replace('10m','20m')),\n os.path.join(filepath, 'S2', '60m', filenames[pair[index]].replace('10m','60m')),\n os.path.join(filepath, 'S2', 'meta', filenames[pair[index]].replace('_10m','').replace('.tif','.txt'))])\n # read that image\n im_ms, georef, cloud_mask, im_extra, im_QA, im_nodata = SDS_preprocess.preprocess_single(fn_im[index], sat, False)\n # im_RGB = SDS_preprocess.rescale_image_intensity(im_ms[:,:,[2,1,0]], cloud_mask, 99.9)\n\n # in Sentinel2 images close to the edge of the image there are some artefacts,\n # that are squares with constant pixel intensities. They need to be masked in the\n # raster (GEOTIFF). It can be done using the image standard deviation, which\n # indicates values close to 0 for the artefacts.\n if len(im_ms) > 0:\n # calculate image std for the first 10m band\n im_std = SDS_tools.image_std(im_ms[:,:,0],1)\n # convert to binary\n im_binary = np.logical_or(im_std < 1e-6, np.isnan(im_std))\n # dilate to fill the edges (which have high std)\n mask10 = morphology.dilation(im_binary, morphology.square(3))\n # mask all 10m bands\n for k in range(im_ms.shape[2]):\n im_ms[mask10,k] = np.nan\n # mask the 10m .tif file (add no_data where mask is True)\n SDS_tools.mask_raster(fn_im[index][0], mask10)\n # create another mask for the 20m band (SWIR1)\n im_std = SDS_tools.image_std(im_extra,1)\n im_binary = np.logical_or(im_std < 1e-6, np.isnan(im_std))\n mask20 = morphology.dilation(im_binary, morphology.square(3))\n im_extra[mask20] = np.nan\n # mask the 20m .tif file (im_extra)\n SDS_tools.mask_raster(fn_im[index][1], mask20)\n # use the 20m mask to create a mask for the 60m QA band (by resampling)\n mask60 = ndimage.zoom(mask20,zoom=1/3,order=0)\n mask60 = transform.resize(mask60, im_QA.shape, mode='constant', order=0,\n preserve_range=True)\n mask60 = mask60.astype(bool)\n # mask the 60m .tif file (im_QA)\n SDS_tools.mask_raster(fn_im[index][2], mask60)\n else:\n continue\n\n # make a figure for quality control\n # fig,ax= plt.subplots(2,2,tight_layout=True)\n # ax[0,0].imshow(im_RGB)\n # ax[0,0].set_title('RGB original')\n # ax[1,0].imshow(mask10)\n # ax[1,0].set_title('Mask 10m')\n # ax[0,1].imshow(mask20)\n # ax[0,1].set_title('Mask 20m')\n # ax[1,1].imshow(mask60)\n # ax[1,1].set_title('Mask 60 m')\n\n # once all the pairs of .tif files have been masked with no_data, merge the using gdal_merge\n fn_merged = os.path.join(filepath, 'merged.tif')\n\n # merge masked 10m bands and remove duplicate file\n gdal_merge.main(['', '-o', fn_merged, '-n', '0', fn_im[0][0], fn_im[1][0]])\n os.chmod(fn_im[0][0], 0o777)\n os.remove(fn_im[0][0])\n os.chmod(fn_im[1][0], 0o777)\n os.remove(fn_im[1][0])\n os.chmod(fn_merged, 0o777)\n os.rename(fn_merged, fn_im[0][0])\n\n # merge masked 20m band (SWIR band)\n gdal_merge.main(['', '-o', fn_merged, '-n', '0', fn_im[0][1], fn_im[1][1]])\n os.chmod(fn_im[0][1], 0o777)\n os.remove(fn_im[0][1])\n os.chmod(fn_im[1][1], 0o777)\n os.remove(fn_im[1][1])\n os.chmod(fn_merged, 0o777)\n os.rename(fn_merged, fn_im[0][1])\n\n # merge QA band (60m band)\n gdal_merge.main(['', '-o', fn_merged, '-n', '0', fn_im[0][2], fn_im[1][2]])\n os.chmod(fn_im[0][2], 0o777)\n os.remove(fn_im[0][2])\n os.chmod(fn_im[1][2], 0o777)\n os.remove(fn_im[1][2])\n os.chmod(fn_merged, 0o777)\n os.rename(fn_merged, fn_im[0][2])\n\n # remove the metadata .txt file of the duplicate image\n os.chmod(fn_im[1][3], 0o777)\n os.remove(fn_im[1][3])\n\n print('%d Sentinel-2 images were merged (overlapping or duplicate)' % len(pairs))\n\n # update the metadata dict\n metadata_updated = copy.deepcopy(metadata)\n idx_removed = []\n idx_kept = []\n for pair in pairs: idx_removed.append(pair[1])\n for idx in np.arange(0,len(metadata[sat]['dates'])):\n if not idx in idx_removed: idx_kept.append(idx)\n for key in metadata_updated[sat].keys():\n metadata_updated[sat][key] = [metadata_updated[sat][key][_] for _ in idx_kept]\n\n return metadata_updated", "title": "" }, { "docid": "4ab24cf4dd99a590131e1c8f19e2a5aa", "score": "0.47450465", "text": "def testMergeBuildStats(self) -> None:\n base_map = data_types.TestExpectationMap({\n 'foo':\n data_types.ExpectationBuilderMap({\n data_types.Expectation('foo', ['win'], 'Failure'):\n data_types.BuilderStepMap({\n 'builder':\n data_types.StepBuildStatsMap({\n 'step': data_types.BuildStats(),\n }),\n }),\n }),\n })\n merge_stats = data_types.BuildStats()\n merge_stats.AddFailedBuild('1', frozenset())\n merge_map = data_types.TestExpectationMap({\n 'foo':\n data_types.ExpectationBuilderMap({\n data_types.Expectation('foo', ['win'], 'Failure'):\n data_types.BuilderStepMap({\n 'builder':\n data_types.StepBuildStatsMap({\n 'step': merge_stats,\n }),\n }),\n }),\n })\n expected_stats = data_types.BuildStats()\n expected_stats.AddFailedBuild('1', frozenset())\n expected_base_map = {\n 'foo': {\n data_types.Expectation('foo', ['win'], 'Failure'): {\n 'builder': {\n 'step': expected_stats,\n },\n },\n },\n }\n base_map.Merge(merge_map)\n self.assertEqual(base_map, expected_base_map)", "title": "" }, { "docid": "ad08e2c615b6409af023ab6d2f74b642", "score": "0.47434458", "text": "def merge_batches_and_save_dataset():\n\n path = '/Users/andreidm/ETH/projects/normalization/data/'\n\n batches = []\n merged_mz = set()\n\n for bid in bids:\n\n data = get_all_data_from_h5(path + 'harm_4_{}_DATA.h5'.format(bid))\n batches.append({'data': data, 'id': bid})\n merged_mz.update(data['samples']['mzs'])\n\n merged_mz = sorted(list(merged_mz))\n annotation = []\n\n shared_mz_df = pandas.DataFrame()\n for mz in merged_mz:\n\n mz_df = pandas.DataFrame()\n for batch in batches:\n\n columns, names = get_shared_perturbations_ids_for_batch(batch)\n\n if mz in batch['data']['samples']['mzs']:\n # if this mz appears in batch, use intensities\n index = batch['data']['samples']['mzs'].index(mz)\n bdf = pandas.DataFrame([batch['data']['samples']['data'][index, columns]], columns=names)\n\n # each mz appears in one batch only, so annotation can be assigned only here\n anno_index = batch['data']['annotation']['mzs'].index(round(mz, 4))\n annotation.append(batch['data']['annotation']['names'][anno_index])\n else:\n # if not, fill up with zeros\n bdf = pandas.DataFrame([numpy.zeros(len(columns))], columns=names)\n\n mz_df = pandas.concat([mz_df, bdf], axis=1)\n\n shared_mz_df = pandas.concat([shared_mz_df, mz_df], ignore_index=True)\n\n assert len(merged_mz) == len(annotation)\n\n all_data = pandas.DataFrame({'name': annotation, 'mz': merged_mz, 'rt': [0 for x in merged_mz]})\n all_data = pandas.concat([all_data, shared_mz_df], axis=1)\n\n # collapse the same mzs\n all_data = collapse_same_mzs(all_data)\n\n # filter out small intensities\n filtered_data = all_data[(all_data.iloc[:, 3:] > min_relevant_intensity).all(axis=1)]\n\n # save\n all_data.to_csv(path + \"all_data.csv\", index=False)\n filtered_data.to_csv(path + \"filtered_data.csv\", index=False)", "title": "" }, { "docid": "9ed5e75297a344a31ddf90ba7f9bdc75", "score": "0.47304523", "text": "def _merge_single_child(self):\n assert len(self) == 1, \"bug: _merge_single_child called on loop with len != 1\"\n child = cast(Loop, self[0])\n\n # if the child has a fixed repetition count of 1 the measurements can be merged\n mergable_measurements = child.repetition_count == 1 and not child.volatile_repetition\n\n assert not self._measurements or mergable_measurements, \"bug: _merge_single_child called on loop with measurements\"\n assert not self._waveform, \"bug: _merge_single_child called on loop with children and waveform\"\n\n measurements = child._measurements\n if self._measurements:\n if measurements:\n measurements.extend(self._measurements)\n else:\n measurements = self._measurements\n\n if not self.volatile_repetition and not child.volatile_repetition:\n # simple integer multiplication\n repetition_definition = self.repetition_count * child.repetition_count\n elif not self.volatile_repetition:\n repetition_definition = child._repetition_definition * self.repetition_count\n elif not child.volatile_repetition:\n repetition_definition = self._repetition_definition * child.repetition_count\n else:\n # create a new expression that depends on both\n expression = 'parent_repetition_count * child_repetition_count'\n repetition_definition = VolatileRepetitionCount.operation(\n expression=expression,\n parent_repetition_count=self._repetition_definition,\n child_repetition_count=child._repetition_definition)\n\n self[:] = iter(child)\n self._waveform = child._waveform\n self._repetition_definition = repetition_definition\n self._measurements = measurements\n self._invalidate_duration()\n return True", "title": "" }, { "docid": "f2a8fc77a8c868356baf997ca41dc6fb", "score": "0.47293442", "text": "def run_replicates(iterations, n_replicates, test, parallel=False):\n df = pd.DataFrame(index=np.arange(len(iterations) * n_replicates))\n df['iterations'] = np.repeat(iterations, n_replicates)\n df['replicate'] = np.tile(np.arange(n_replicates), len(iterations))\n\n results = pints.evaluate(test, list(df['iterations']), parallel=parallel)\n assert len(results) > 0, 'Empty result set generated'\n for key in results[0].keys():\n df[key] = np.array([r[key] for r in results], copy=False)\n\n return df", "title": "" }, { "docid": "d7456688e34ffd581ad2a3a52347a38d", "score": "0.4712553", "text": "def merge_and_dedup(bams):\n\n if len(bams) == 1:\n return bams\n\n merged = tempfile.NamedTemporaryFile(delete=False, prefix='merged', suffix='.bam').name\n merged_and_deduped = tempfile.NamedTemporaryFile(delete=False, prefix='merged_and_duped', suffix='.bam').name\n metrics = tempfile.NamedTemporaryFile(delete=False, prefix='metrics', suffix='.txt').name\n\n shell('echo \"tempfiles created by merge_and_dedup: {merged} {merged_and_deduped} {metrics}\" {log}')\n\n if not keep_tempfiles:\n registered_for_deletion.extend([merged, merged_and_deduped, metrics])\n\n bams = ' '.join(bams)\n shell(\n 'samtools merge '\n '-f '\n '-@ {snakemake.threads} '\n '{merged} '\n '{bams} '\n '{log} '\n )\n shell(\n 'picard '\n '{java_args} '\n 'MarkDuplicates '\n 'INPUT={merged} '\n 'OUTPUT={merged_and_deduped} '\n 'METRICS_FILE={metrics} '\n 'REMOVE_DUPLICATES=true '\n '{log} '\n )\n return merged_and_deduped", "title": "" }, { "docid": "d8d4b2d8567999712bbada24312b16a1", "score": "0.4705641", "text": "def merge(args):\n import nifti_mrs.tools as nmrs_tools\n from nifti_mrs.nifti_mrs import NIFTI_MRS\n # 1. Load the files\n if len(args.files) < 2:\n raise ValueError('Files argument must provide two or more files to merge.')\n\n to_concat = []\n concat_names = []\n for fp in args.files:\n concat_names.append(fp.with_suffix('').with_suffix('').name)\n curr_file = NIFTI_MRS(fp)\n\n # Merging along a new axis\n if args.newaxis:\n if args.dim in curr_file.dim_tags:\n raise ValueError(f'--dim ({args.dim}) must be different from existing tags: {curr_file.dim_tags}.')\n if curr_file.ndim == 7:\n raise ValueError('Inputs use all three higher dimension already, cannot add new axis.')\n new_order = curr_file.dim_tags\n new_order[curr_file.ndim - 4] = args.dim\n curr_file = nmrs_tools.reorder(curr_file, new_order)\n to_concat.append(curr_file)\n\n # 2. Merge the files\n merged = nmrs_tools.merge(to_concat, args.dim)\n\n # 3. Save the output file\n if args.filename:\n file_out = args.output / args.filename\n else:\n file_out = args.output / ('_'.join(concat_names) + '_merged')\n merged.save(file_out)", "title": "" }, { "docid": "ddd6595e1011468914f6ffcdfb54dffc", "score": "0.47011703", "text": "def merge(self):\n self.ensure_has_same_campaigns()\n self._merge()", "title": "" }, { "docid": "bce74fd2d891688ed9b0486a0fca0949", "score": "0.46962276", "text": "def merge(self, session, source, dest):\n\n raise NotImplementedError()", "title": "" }, { "docid": "ac14df03347dc17ee9c2513bf39de371", "score": "0.46901122", "text": "def merge_shards(StreamName=None, ShardToMerge=None, AdjacentShardToMerge=None):\n pass", "title": "" }, { "docid": "d0f06e5ec3a697711b4342b4938596e1", "score": "0.4689531", "text": "def augment_photos_folder(self, batch_size=64, new_size=None):\n default_logger.info(\n f'Started augmentation with {self.workers} workers'\n )\n default_logger.info(f'Total images to augment: {self.total_images}')\n default_logger.info(f'Session assigned id: {self.session_id}')\n with ThreadPoolExecutor(max_workers=self.workers) as executor:\n while self.image_paths_copy:\n current_batch, current_paths = self.load_batch(\n new_size, batch_size\n )\n future_augmentations = {\n executor.submit(self.augment_image, image, path): path\n for image, path in zip(current_batch, current_paths)\n }\n for future_augmented in as_completed(future_augmentations):\n future_augmented.result()\n default_logger.info(f'Augmentation completed')\n augmentation_frame = pd.DataFrame(\n self.augmentation_data, columns=self.mapping.columns\n )\n saving_path = os.path.join(\n '..', 'Output', 'Data', f'augmented_data_plus_original.csv'\n )\n combined = pd.concat([self.mapping, augmentation_frame])\n for item in ['bx', 'by', 'bw', 'bh']:\n combined = combined.drop(combined[combined[item] > 1].index)\n combined.to_csv(saving_path, index=False)\n default_logger.info(f'Saved old + augmented labels to {saving_path}')\n adjusted_combined = adjust_non_voc_csv(\n saving_path, self.image_folder, self.image_width, self.image_height\n )\n adjusted_saving_path = saving_path.replace('augmented', 'adjusted_aug')\n adjusted_combined.to_csv(adjusted_saving_path, index=False)\n default_logger.info(\n f'Saved old + augmented (adjusted) labels to {adjusted_saving_path}'\n )\n return adjusted_combined", "title": "" }, { "docid": "0224e49cc7266ddea7a8cfe1769afa8e", "score": "0.4686847", "text": "def aggregate(self, data_source: str, agg_rep: str, agg_time: str = \"mean\") -> \"Measurement\":\n aggs = [x.aggregate(data_source, agg_time) for x in self.repetitions] # [:-1]\n new_df = pd.DataFrame(aggs)\n if agg_rep:\n new_df = new_df.aggregate(agg_rep)\n\n return new_df", "title": "" }, { "docid": "afd101da36fc603063563f357571fc6b", "score": "0.4683037", "text": "def merge(self, other):\n return CountersTracker(\n count=self.count + other.count,\n true_count=self.true_count + other.true_count,\n )", "title": "" }, { "docid": "b2c7b6aa99d4ce7983c78e29439249c4", "score": "0.46735698", "text": "def merge_templates_parallel(self, pairs):\n n_samples = 2000\n p_val_threshold = 0.9\n merge_pairs = []\n\n for pair in pairs:\n unit1, unit2 = pair\n\n fname_out = os.path.join(\n self.save_dir,\n 'unit_{}_{}.npz'.format(unit1, unit2))\n\n if os.path.exists(fname_out):\n if np.load(fname_out)['merge']:\n merge_pairs.append(pair)\n\n else:\n \n # get spikes times and soft assignment\n idx1 = self.spike_train[:, 1] == unit1\n spt1 = self.spike_train[idx1, 0]\n prob1 = self.soft_assignment[idx1]\n shift1 = self.shifts[idx1]\n scale1 = self.scales[idx1]\n n_spikes1 = self.n_spikes_soft[unit1]\n \n idx2 = self.spike_train[:, 1] == unit2\n spt2 = self.spike_train[idx2, 0]\n prob2 = self.soft_assignment[idx2]\n shift2 = self.shifts[idx2]\n scale2 = self.scales[idx2]\n n_spikes2 = self.n_spikes_soft[unit2]\n \n # randomly subsample\n if n_spikes1 + n_spikes2 > n_samples:\n ratio1 = n_spikes1/float(n_spikes1+n_spikes2)\n n_samples1 = np.min((int(n_samples*ratio1), n_spikes1))\n n_samples2 = n_samples - n_samples1\n\n else:\n n_samples1 = n_spikes1\n n_samples2 = n_spikes2\n idx1_ = np.random.choice(len(spt1), n_samples1, replace=False,\n p=prob1/np.sum(prob1))\n idx2_ = np.random.choice(len(spt2), n_samples2, replace=False,\n p=prob2/np.sum(prob2))\n spt1 = spt1[idx1_]\n spt2 = spt2[idx2_]\n shift1 = shift1[idx1_]\n shift2 = shift2[idx2_]\n scale1 = scale1[idx1_]\n scale2 = scale2[idx2_]\n\n ptp_max = self.ptps[[unit1, unit2]].max(0)\n mc = ptp_max.argmax()\n vis_chan = np.where(ptp_max > 1)[0]\n\n # align two units\n shift_temp = (self.templates[unit2, :, mc].argmin() - \n self.templates[unit1, :, mc].argmin())\n spt2 += shift_temp\n \n # load residuals\n wfs1, skipped_idx1 = self.reader_residual.read_waveforms(\n spt1, self.spike_size, vis_chan)\n spt1 = np.delete(spt1, skipped_idx1)\n shift1 = np.delete(shift1, skipped_idx1)\n scale1 = np.delete(scale1, skipped_idx1)\n \n wfs2, skipped_idx2 = self.reader_residual.read_waveforms(\n spt2, self.spike_size, vis_chan)\n spt2 = np.delete(spt2, skipped_idx1)\n shift2 = np.delete(shift2, skipped_idx2)\n scale2 = np.delete(scale2, skipped_idx2)\n \n # align residuals\n wfs1 = shift_chans(wfs1, -shift1)\n wfs2 = shift_chans(wfs2, -shift2)\n\n # make clean waveforms\n wfs1 += scale1[:, None, None]*self.templates[[unit1], :, vis_chan].T\n if shift_temp > 0:\n temp_2_shfted = self.templates[[unit2], shift_temp:, vis_chan].T\n wfs2[:, :-shift_temp] += scale2[:, None, None]*temp_2_shfted\n elif shift_temp < 0:\n temp_2_shfted = self.templates[[unit2], :shift_temp, vis_chan].T\n wfs2[:, -shift_temp:] += scale2[:, None, None]*temp_2_shfted\n else:\n wfs2 += scale2[:, None, None]*self.templates[[unit2],:,vis_chan].T\n\n \n # compute spatial covariance\n spatial_whitener = self.get_spatial_whitener(vis_chan)\n # whiten\n wfs1_w = np.matmul(wfs1, spatial_whitener)\n wfs2_w = np.matmul(wfs2, spatial_whitener)\n wfs1_w = np.matmul(wfs1_w.transpose(0,2,1),\n self.temporal_whitener).transpose(0,2,1)\n wfs2_w = np.matmul(wfs2_w.transpose(0,2,1),\n self.temporal_whitener).transpose(0,2,1)\n\n\n temp_diff_w = np.mean(wfs1_w, 0) - np.mean(wfs2_w,0)\n c_w = np.sum(0.5*(np.mean(wfs1_w, 0) + np.mean(wfs2_w,0))*temp_diff_w)\n dat1_w = np.sum(wfs1_w*temp_diff_w, (1,2))\n dat2_w = np.sum(wfs2_w*temp_diff_w, (1,2))\n dat_all = np.hstack((dat1_w, dat2_w))\n p_val = dp(dat_all)[1]\n\n if p_val > p_val_threshold:\n merge = True\n else:\n merge= False\n\n centers_dist = np.linalg.norm(temp_diff_w)\n\n\n if p_val > p_val_threshold:\n merge = True\n else:\n merge= False\n \n centers_dist = np.linalg.norm(temp_diff_w)\n np.savez(fname_out,\n merge=merge,\n dat1_w=dat1_w,\n dat2_w=dat2_w,\n centers_dist=centers_dist,\n p_val=p_val)\n\n if merge:\n merge_pairs.append(pair)\n\n return merge_pairs", "title": "" }, { "docid": "e92f449d20c92494bbd28d5c072dd12c", "score": "0.46673", "text": "def merge_replicates(regions_a, comps_a, regions_b, comps_b, contrib_a=1.0, contrib_b=1.0,\n offset_max=4.0, offset_step=0.5, scale_max=2.0, scale_step=0.1):\n \n best_score = -1.0\n best_offset = 0.0\n best_scale_start = -1.0\n best_scale_end = -1.0\n best_comps = None\n best_scores = None\n best_regions = None\n\n n_b = len(regions_b)\n points_b = np.arange(n_b)\n \n min_scale = 1.0/scale_max\n \n for offset in np.arange(-offset_max, offset_max, offset_step):\n\n for scale_start in np.arange(min_scale, scale_max, scale_step):\n \n for scale_end in np.arange(min_scale, scale_max, scale_step):\n \n stretch = np.interp(points_b, [0, n_b], [scale_start, scale_end])\n \n stretch_regions = (regions_b * stretch) + offset \n \n d = overlap_region_comps(regions_a, comps_a,\n stretch_regions, comps_b,\n contrib_a, contrib_b)\n \n sim_scores, merge_regions, merge_comps, sim_score, score_width, p = d\n \n if sim_score > best_score:\n best_scores = sim_scores\n best_score = sim_score\n best_scale_start = scale_start\n best_scale_end = scale_end\n best_offset = offset\n best_comps = merge_comps\n best_regions = merge_regions\n\n info(' best score: %5.2f at offset: %5.2f start scale: %5.2f end scale: %5.2f' % (best_score, best_offset, best_scale_start, best_scale_end))\n \n return best_regions, best_comps, best_score", "title": "" }, { "docid": "42369da84a5a5c15f30b04732a243739", "score": "0.46667022", "text": "def merge(self):\n with open(f\"{self.file_name}.mp4\", \"wb\") as merged_file:\n for ts_file in [\n open(f\"chunks\\/{self.file_name}-{chunk_number}.chunk.ts\")\n for chunk_number in range(self.total_chunks)\n ]:\n shutil.copyfileobj(ts_file, merged_file)", "title": "" }, { "docid": "9dc321b4c8efc9ef55b1637819911057", "score": "0.46645865", "text": "def test_merge_options(self):\n self.time.change_system_time(\"next friday 0800\")\n # m (The merge window) (Starts 1 day ago with a window size of 2)\n # <--->\n # Time/Day 1 2 3 4 5\n # 02:00 b b b b b (The last backup before the merge is the 5th Task)\n # 06:00 m (The merge is the 6th Task)\n\n # Define a schedule\n schedule = \\\n [\n (1, 'DAYS', '02:00'),\n (5, 'DAYS', '06:00'),\n ]\n\n # Make the second task a merge with offset_start = 1, offset_end = 2\n merge_map = {1: (1, 2)}\n\n schedule_test = ScheduleTest(self, [schedule], merge_map=merge_map)\n\n schedule_test.run(5)\n self.sleep(60) # Takes a moment to update the backups\n backups_before_merge = self.get_backups(\"active\", \"repo_name0\")\n self.assertEqual(len(backups_before_merge), 5)\n\n schedule_test.run(1)\n self.sleep(60) # Takes a moment to update the backups\n backups_after_merge = self.get_backups(\"active\", \"repo_name0\")\n\n # Check the merge backed up the correct backups\n self.assertEqual(len(backups_after_merge), 3)\n self.assertIn(backups_before_merge[0]._date, [backup._date for backup in backups_after_merge]) # Check backup 1 is not part of the merge\n self.assertIn(backups_before_merge[4]._date, [backup._date for backup in backups_after_merge]) # Check backup 5 is not part of the merge", "title": "" }, { "docid": "d54a21a3f46c5639b0ef7bf91a95f75c", "score": "0.46634975", "text": "def replicate(self, source, target, opts={}, **kwargs):\n\n params = {\n 'source': source,\n 'target': target\n }\n\n params.update(opts)\n if 'params' in kwargs:\n params.update(kwargs['params'])\n del kwargs['params']\n\n return self.post('_replicate', params=params, **kwargs)", "title": "" }, { "docid": "1ccfe1018eb7781938db12f74bb826e0", "score": "0.46618715", "text": "def parallel_merge(self, that):\n self.cov_sens = weighted_average(self.cov_sens, self.num_samples, that.cov_sens, that.num_samples)\n self.mean_sens = weighted_average(self.mean_sens, self.num_samples, that.mean_sens, that.num_samples) \n self.num_samples += that.num_samples", "title": "" }, { "docid": "7eea7ea3ed5c03af7e8c0ff0861d6fe5", "score": "0.46494043", "text": "def merge(self, match=[\"year\", \"month\", \"day\"]):\n\n if type(match) is str:\n match = [match]\n\n if type(match) is not list:\n raise TypeError(\"match supplied is not a list\")\n\n for mm in match:\n if type(mm) is not str:\n raise TypeError(f\"{mm} from match is not a list\")\n\n if type(match) is list:\n match = [y.lower() for y in match]\n\n if len([x for x in match if x not in [\"year\", \"month\", \"day\"]]) > 0:\n raise ValueError(\"match supplied is not valid\")\n\n # Force a release if needed\n self.run()\n\n if type(self.current) is not list:\n warnings.warn(\n message=\"There is only one file in the dataset. No need to merge!\"\n )\n return None\n\n # Make sure the times in the files are compatiable, based on the match criteria\n\n all_times = []\n for ff in self:\n cdo_result = subprocess.run(\n f\"cdo ntime {ff}\",\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n ).stdout\n cdo_result = str(cdo_result).replace(\"b'\", \"\").strip()\n ntime = int(cdo_result.split(\"\\\\\")[0])\n all_times.append(ntime)\n if len(set(all_times)) > 1:\n warnings.warn(\n message=\"The files to merge do not have the same number of time steps!\"\n )\n\n all_grids = []\n for ff in self:\n cdo_result = subprocess.run(\n f\"cdo griddes {ff}\",\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n ).stdout\n all_grids.append(cdo_result)\n\n if len(set(all_grids)) > 1:\n raise ValueError(\n \"The files in the dataset to do not have the same grid. Consider using regrid!\"\n )\n\n all_times = []\n for ff in self:\n cdo_result = subprocess.run(\n f\"cdo showtimestamp {ff}\",\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n ).stdout\n cdo_result = str(cdo_result).replace(\"b'\", \"\").strip()\n cdo_result = cdo_result.split()\n cdo_result = pd.Series((v for v in cdo_result))\n all_times.append(cdo_result)\n\n for i in range(1, len(all_times)):\n if (len(all_times[i]) != len(all_times[0])) and (len(all_times[i]) > 1):\n raise ValueError(\n \"You are trying to merge data sets with an incompatible number of time steps\"\n )\n\n # remove files with more than one time step in it\n all_times = [x for x in all_times if len(x) > 1]\n\n all_df = []\n if len(all_times) > 1:\n for i in range(0, len(all_times)):\n month = [datetime.strptime(v[0:10], \"%Y-%m-%d\").month for v in all_times[i]]\n year = [datetime.strptime(v[0:10], \"%Y-%m-%d\").year for v in all_times[i]]\n day = [datetime.strptime(v[0:10], \"%Y-%m-%d\").day for v in all_times[i]]\n i_data = pd.DataFrame({\"year\": year, \"month\": month, \"day\": day})\n i_data = i_data.loc[:, match]\n all_df.append(i_data)\n\n for i in range(1, len(all_df)):\n if all_df[0].equals(all_df[i]) == False:\n raise ValueError(\"Dates of data sets do not satisfy matching criteria!\")\n\n cdo_command = \"cdo -merge\"\n\n run_this(cdo_command, self, output=\"one\")\n\n if session_info[\"lazy\"]:\n self._merged = True\n\n if cdo_version() in [\"1.9.3\"]:\n self.run()", "title": "" }, { "docid": "f0ae060f81101cb5457805213c86bf42", "score": "0.46367767", "text": "def compete(self, num_runs):\n LOGGER.info('GAME {!r}\\nNUM RUNS {}'.format(self, num_runs))\n m = num_runs // 2\n self.runs(m)\n self.swap_agents()\n self.runs(num_runs-m, index_offset=m+1)\n self.swap_agents()\n log_info = 'GAME {!r}\\nNUM RUNS {} complete!'\n log_info += '\\nRESULTS\\nAGENT1 {!r}\\nAGENT2 {!r}\\n'\n LOGGER.info(log_info.format(self, num_runs, self._agent1, self._agent2))", "title": "" }, { "docid": "69bd9fbb36c815e713b858119767aa83", "score": "0.46341488", "text": "def merge_results(images, submissions, output):\n\n df_image = gpd.read_file(images)\n df_image.index = df_image['key']\n df_image = df_image.drop(columns=['key'])\n\n damage_features = labelling_options.keys()\n\n os.makedirs(submissions+'-processed', exist_ok=True)\n sub_proc_dir = submissions+'-processed'\n\n num_batches = len(list(set([re.findall(r\".*?_(\\d+)_.*\", x)[0] for x in os.listdir(submissions)])))\n print('found', num_batches, 'batches')\n\n # loop over batches\n for batch in range(num_batches):\n\n # initialize geodatframe for results\n df_image_batch = df_image.copy()\n\n # get files and annotator names of this batch\n files = [x for x in os.listdir(submissions) if str(batch) == re.findall(r\".*?_(\\d+)_.*\", x)[0]]\n names = list(set([x.split('.')[0].split('_')[-1] for x in files]))\n num_annotators = len(names)\n map_names_ids = {name: i for i, name in enumerate(names)}\n print('processing batch', batch, 'unique annotators', names)\n batch_keys = []\n\n # initialize empty columns in results geodatframe\n for i in range(num_annotators):\n for damage_feature in damage_features:\n df_image_batch[damage_feature + '_' + str(i)] = False\n\n # merge results from different files\n for file in files:\n\n name = file.split('.')[0].split('_')[-1]\n annotator_id = map_names_ids[name]\n\n with open(submissions+'/'+file) as json_file:\n data = json.load(json_file)\n\n for image in data['_via_img_metadata']:\n labels = data['_via_img_metadata'][image]['file_attributes']['damage_labels']\n for damage_feature in damage_features:\n if damage_feature in labels.keys():\n df_image_batch.at[image, damage_feature+'_'+str(annotator_id)] = True\n batch_keys = list(data['_via_img_metadata'])\n\n # keep only images in batch\n df_image_batch = df_image_batch[df_image_batch.index.isin(batch_keys)]\n\n # merge results from the diferrent annotators\n for damage_feature in damage_features:\n df_image_batch[damage_feature+'_merged'] = 'no damage'\n\n for image in df_image_batch.index:\n df = df_image_batch.loc[image].copy()\n for damage_feature in damage_features:\n labels = [damage_feature+'_'+str(i) for i in range(num_annotators)]\n values = df[labels].values\n if any(values):\n df_image_batch.at[image, damage_feature + '_merged'] = 'possible damage'\n if all(values):\n df_image_batch.at[image, damage_feature + '_merged'] = 'confirmed damage'\n\n # save processed batch results\n print('finished processing batch', batch, 'with', len(df_image_batch), 'entries')\n if len(df_image_batch) > 0:\n df_image_batch.to_file(sub_proc_dir+'/results_batch_'+str(batch)+'.geojson', driver='GeoJSON')\n\n # merge results from all batches\n gdf_results = gpd.GeoDataFrame()\n for file in os.listdir(sub_proc_dir):\n gdf_batch = gpd.read_file(sub_proc_dir+'/'+file)\n gdf_results = gdf_results.append(gdf_batch, ignore_index=True)\n gdf_merged = gpd.GeoDataFrame()\n gdf_merged['key'] = gdf_results['key']\n gdf_merged['geometry'] = gdf_results['geometry']\n gdf_merged['captured_at'] = gdf_results['captured_at']\n\n # save one layer per damage feature\n for damage_feature in damage_features:\n gdf_results_feature = gdf_results[['key', damage_feature+'_merged', 'geometry', 'captured_at']]\n gdf_results_feature = gdf_results_feature.rename(columns={damage_feature+'_merged': damage_feature})\n gdf_results_feature.to_file(output+'/results_'+damage_feature+'.geojson', driver='GeoJSON')\n gdf_merged[damage_feature] = gdf_results_feature[damage_feature]\n\n # merge damage features in one layer, save it\n gdf_merged['damage'] = \"no damage\"\n for ix, row in gdf_merged.iterrows():\n if any(['confirmed damage' in row[x] for x in light_features]) or any(['possible damage' in row[x] for x in medium_features]):\n gdf_merged.at[ix, 'damage'] = \"light damage\"\n if any(['confirmed damage' in row[x] for x in medium_features]) or any(['possible damage' in row[x] for x in heavy_features]):\n gdf_merged.at[ix, 'damage'] = \"moderate damage\"\n if any(['confirmed damage' in row[x] for x in heavy_features]):\n gdf_merged.at[ix, 'damage'] = \"severe damage\"\n\n gdf_merged = gdf_merged.drop(columns=light_features+medium_features+heavy_features)\n gdf_merged = gdf_merged[['key', 'damage', 'geometry', 'captured_at']]\n gdf_merged.to_file(output+'/results_merged.geojson', driver='GeoJSON')", "title": "" }, { "docid": "4ec086395bd313cb7719d2ba9815324d", "score": "0.46333608", "text": "def merge(items, target):\n item = items.pop()\n merged = {}\n k = '{}+{}'.format(item.remote_address, item.remote_port)\n merged[k] = item.copy()\n for item in items:\n k = '{}+{}'.format(item.remote_address, item.remote_port)\n if k not in merged:\n merged[k] = item.copy()\n continue\n if item.is_targeted(target):\n merged[k].client_address = item.client_address\n merged[k].count += item.count\n for t in item.METADATA_TYPES:\n merged[k].metadata[t] |= item.metadata_for(t)\n return merged.values()", "title": "" }, { "docid": "f7b6d91429bcadbd8cea35024841dacd", "score": "0.4631474", "text": "def merge_records(self, dest, record_style, includetar=True, overwrite=False, dryrun=False):\n \n # Get records in source database\n self_records, self_df = self.get_records(record_style, return_df=True)\n print(len(self_records), 'records in source')\n \n # Get records in destination database\n dest_records, dest_df = dest.get_records(record_style, return_df=True)\n print(len(dest_records), 'records in destination')\n \n # Identify records missing from destination\n if len(dest_records) > 0:\n missing = set(self_df.name[~self_df.name.isin(dest_df.name.tolist())])\n else:\n missing = set(self_df.name)\n print(len(missing), 'records missing from destination')\n \n if overwrite is True:\n print('comparing content of records')\n # Identify records that have changed\n changed = []\n for name in tqdm(self_df.name[~self_df.name.isin(missing)], desc=\"Comparing records\", ascii=True):\n self_record = self_records[self_df.name == name][0].model.json(ensure_ascii=False)\n dest_record = dest_records[dest_df.name == name][0].model.json(ensure_ascii=False)\n if self_record != dest_record:\n changed.append(name)\n print(len(changed), 'records in destination different in source')\n missing = missing.union(changed)\n \n if includetar is True:\n \n # Get metadata for tars in source database\n self_tar = []\n for frecord in self.mongodb[f'{record_style}.files'].find():\n self_tar.append(frecord)\n self_tar = pd.DataFrame(self_tar)\n print(len(self_tar), 'tars in source')\n \n # Get metadata for tars in destination database\n dest_tar = []\n for frecord in dest.mongodb[f'{record_style}.files'].find():\n dest_tar.append(frecord)\n dest_tar = pd.DataFrame(dest_tar)\n print(len(dest_tar), 'tars in destination') \n \n # Identify records missing from destination\n if len(dest_records) > 0:\n missingtar = set(self_tar.recordname[~self_tar.recordname.isin(dest_tar.recordname.tolist())])\n else:\n missingtar = set(self_tar.recordname)\n print(len(missingtar), 'tars missing from destination')\n missing = missing.union(missingtar)\n \n if len(missing) > 0:\n records = self_records[self_df.name.isin(missing)]\n if dryrun:\n print(len(records), 'to copy')\n return records\n else:\n self.copy_records(dest, records=records, includetar=includetar, overwrite=overwrite)\n else:\n print('No records to copy')\n if dryrun:\n return []", "title": "" }, { "docid": "2b353cf359933fa574641c26b724e9ae", "score": "0.46280736", "text": "def map_reduce_aggregation(self, last_update):\n try:\n # define the mapper,\n # emit the key and value to be used in reducer\n # the mapper should have the same form with reduced value\n mapper = Code(\n '''\n function () {\n // key is the text, value map \"documents\"\n // to the object with\n // documentId as key and \"document\"\n // in collection1 as content\n // also, move add timestamp into the \"document\" field\n var key = this.text;\n var value = {\"documents\": {}}\n value[\"documents\"][this.documentId] = this.document;\n value[\"documents\"][this.documentId][\"ts\"] = this.ts;\n\n // emit the mapped key and value\n emit(key, value);\n }\n '''\n )\n\n # define the reducer,\n # the reducer should always remain the same format no matter\n # how many times map reduce applied\n reducer = Code(\n '''\n // receive the key and value from the mapper\n function (key, values) {\n // define new object to receive\n // and add the newly reduced content\n var obj = {};\n obj[\"documents\"] = {};\n\n // merge function check the timestamp\n // of given documentId and document\n // replace the existing content if the timestamp is larger\n function merge(documentId, document) {\n // check if the documentId is in the object\n // if does, replace with new content\n if (documentId in obj[\"documents\"] &&\n obj[\"documents\"][documentId][\"ts\"] >\n document[\"ts\"]) {\n return;\n }\n obj[\"documents\"][documentId] = document;\n }\n\n // loop through the value collected,\n // and apply merge on each documentId\n for (var i = 0; i < values.length; i ++) {\n var value = values[i];\n for(let documentId in value[\"documents\"]) {\n merge(documentId, value[\"documents\"][documentId])\n }\n }\n return obj;\n }\n '''\n )\n\n # apply map reduce using the mapper and reducer,\n # with specified newly the time stamp\n return self.collection.map_reduce(\n mapper,\n reducer,\n out={\"reduce\": self.mr_collection.name},\n query={\"ts\": {\"$gte\": last_update}},\n )\n except Exception:\n raise DataBaseAggregationFail()", "title": "" }, { "docid": "311d447378a12be050664ca4db29486c", "score": "0.4611131", "text": "def merge(cls, batches, batch_size=None):\n batches = [batch for batch in batches if batch is not None]\n if len(batches) == 0:\n return None, None\n total_len = np.sum([len(batch) for batch in batches])\n if batch_size is None:\n batch_size = total_len\n elif not isinstance(batch_size, int) or batch_size < 1:\n raise ValueError(\"Batch size must be positive int\")\n indices = np.arange(total_len)\n\n data = []\n for comp in batches[0].components:\n data.append(np.concatenate([batch.get(component=comp) for batch in batches]))\n data = copy.deepcopy(data)\n\n new_indices = indices[:batch_size]\n new_batch = cls(bf.DatasetIndex(new_indices), unique_labels=batches[0].unique_labels)\n new_batch._data = tuple(comp[:batch_size] for comp in data) # pylint: disable=protected-access, attribute-defined-outside-init, line-too-long\n if total_len <= batch_size:\n rest_batch = None\n else:\n rest_indices = indices[batch_size:]\n rest_batch = cls(bf.DatasetIndex(rest_indices), unique_labels=batches[0].unique_labels)\n rest_batch._data = tuple(comp[batch_size:] for comp in data) # pylint: disable=protected-access, attribute-defined-outside-init, line-too-long\n return new_batch, rest_batch", "title": "" }, { "docid": "046b8b23e7576eecd051d84028cdc131", "score": "0.46095175", "text": "def merger(filepath, src, dst):\n # Load and merge the multitrack pianoroll\n multitrack = Multitrack(filepath)\n merged = get_merged(multitrack)\n\n # Save the merged multitrack pianoroll\n result_path = change_prefix(filepath, src, dst)\n make_sure_path_exists(os.path.dirname(result_path))\n merged.save(result_path)", "title": "" }, { "docid": "8d83c776e3d88c7be862176e4d080aa2", "score": "0.46063486", "text": "def merge(self, prefix):\n world_size = get_world_size()\n merged_file = prefix.rsplit(\".\", 1)[0] + \".all\"\n logger.info(f\"concat all results into:{merged_file}\")\n merged_fd = open(merged_file, \"w\")\n for rank in range(world_size):\n res_file = prefix + str(rank)\n assert op.exists(res_file), f\"No such file or directory: {res_file}\"\n with open(res_file, \"r\") as fin:\n for line_idx, line in enumerate(fin):\n merged_fd.write(line)\n logger.info(f\"merging {res_file} {line_idx+1} results\")\n merged_fd.close()\n return merged_file", "title": "" }, { "docid": "4c49a49707b56530215030a6638bd257", "score": "0.45907712", "text": "def replicate(self, args):\n (options, args) = MethodOptionParser(fixed=[\"glob...\"]).parse_args(args)\n\n self._Locations().forAllLocal([\"replicate\"] + args).collect(labelPrint=True)", "title": "" }, { "docid": "b618d4b2bb0491ffc4585b991c58b567", "score": "0.45877486", "text": "def merge(self, *args, max_concurrent=None):\n from ..operators.observable.merge import merge\n return merge(self, *args, max_concurrent=max_concurrent)", "title": "" }, { "docid": "12b88061064efbd011aa01365f815721", "score": "0.45815164", "text": "def merge(self):\n \n # Merge self.loc_df and self.pbp_df on playbyplayorder_id\n self.pbp_loc_merged_df = pd.merge(self.pbp_df, self.loc_df, on = [\"playbyplayorder_id\", \"row_type\", \"game_id\"])\n \n # Make changes in actiondescription columns to simplify\n self.pbp_loc_merged_df[\"actiondescription\"] = self.pbp_loc_merged_df[\"actiondescription\"].apply(lambda x: 'jump_shot' if 'Jump' in x else ('layup' if 'Layup' in x else ('other'))) \n \n # Inspect the data\n \"\"\"\n print (self.pbp_loc_merged_df.info())\n self.pbp_loc_merged_df.hist(bins = 50, figsize = (20, 15))\n plt.show()\n \"\"\"\n \n \n return self.pbp_loc_merged_df", "title": "" }, { "docid": "0532514b5df6ecaa1a92520d98f522a6", "score": "0.45799404", "text": "def merge(self, otherArray, acceptDuplicates=False):\n otherArray = otherArray.toFormat(self)\n if acceptDuplicates:\n cannotAppend = self.max() > otherArray.min()\n else:\n cannotAppend = self.max() >= otherArray.min()\n if cannotAppend:\n print self\n print otherArray\n raise Exception('Time ranges cannot be concatenated')\n self.array = np.hstack((self.array, otherArray.array))", "title": "" }, { "docid": "96ecf3c3898e4e97cbe273a75429fee2", "score": "0.45793575", "text": "def test_two_batches_binary_duplicate(self):\n community = DebugCommunity.create_community(self._dispersy, self._my_member)\n\n # create node and ensure that SELF knows the node address\n node = DebugNode(community)\n node.init_socket()\n node.init_my_member()\n\n global_time = 10\n # first batch\n message = node.create_full_sync_text(\"duplicates\", global_time)\n node.give_packets([message.packet for _ in xrange(10)])\n\n # only one message may be in the database\n times = [x for x, in self._dispersy.database.execute(u\"SELECT global_time FROM sync WHERE community = ? AND member = ? AND meta_message = ?\", (community.database_id, node.my_member.database_id, message.database_id))]\n self.assertEqual(times, [global_time])\n\n # second batch\n node.give_packets([message.packet for _ in xrange(10)])\n\n # only one message may be in the database\n times = [x for x, in self._dispersy.database.execute(u\"SELECT global_time FROM sync WHERE community = ? AND member = ? AND meta_message = ?\", (community.database_id, node.my_member.database_id, message.database_id))]\n self.assertEqual(times, [global_time])\n\n # cleanup\n community.create_dispersy_destroy_community(u\"hard-kill\")\n self._dispersy.get_community(community.cid).unload_community()", "title": "" }, { "docid": "83ec724a7715315b7820c50ed04e30cb", "score": "0.45779082", "text": "def merge(target, **kwargs):\n subspec = kwargs.pop('spec', T)\n init = kwargs.pop('init', dict)\n op = kwargs.pop('op', None)\n if kwargs:\n raise TypeError('unexpected keyword args: %r' % sorted(kwargs.keys()))\n spec = Merge(subspec, init, op)\n return glom(target, spec)", "title": "" }, { "docid": "1cff8787cfa049476f5e1a2a3cc8c620", "score": "0.45739704", "text": "def merge_wrapper(merge_config: str, source: List, destination: List, processes: int):\n try:\n merge(merge_config, source, destination, processes)\n exit(0)\n except Exception as me:\n get_logger().error(f\"kgx.merge error: {str(me)}\")\n exit(1)", "title": "" }, { "docid": "2c50d7801d79170c235c1dad174ff672", "score": "0.45722777", "text": "def test_should_generate_correct_result_if_original_document_modified_in_parallel(self):\n # original document\n lww = LWWElementSet()\n abc = \"ABCDEFG\"\n t = 0\n for s in abc:\n t = t + 1\n lww.add(Element(s, t))\n\n # person 1's updates\n lww1 = LWWElementSet()\n added = \"HIJKL\"\n removed = \"ACEG\"\n i = len(abc)\n for a in added:\n i = i + 1\n lww1.add(Element(a, i))\n\n for r in removed:\n i = i + 1\n lww1.remove(Element(r, i))\n\n # merge original and person 1's update\n result1 = lww + lww1\n # Original: ABCDEFG\n # Added: HIJKL Removed: ACEG\n self.assertEqual(str(''.join([x.item for x in result1.get_resultant_document()])), 'BDFHIJKL')\n\n # person 2's parallel updates\n lww2 = LWWElementSet()\n added = \"KLMNOP\"\n removed = \"BFG\"\n i = len(abc) # to simulate parallel time, starting i with same as person 1's i\n for a in added:\n i = i + 1\n lww2.add(Element(a, i))\n\n for r in removed:\n i = i + 1\n lww2.remove(Element(r, i))\n\n # merge original and person 2's update\n result2 = lww + lww2\n # Original: ABCDEFG\n # Added: KLMNOP Removed: BFG\n self.assertEqual(str(''.join([x.item for x in result2.get_resultant_document()])), 'ACDEKLMNOP')\n\n # final state of original document after, merge of both person's updates\n final = result1 + result2\n # Original: ABCDEFG\n # Person 1 : Added : HIJKL Removed : ACEG\n # Person 2 : Added : KLMNOP Removed : BFG\n self.assertEqual(str(''.join([x.item for x in final.get_resultant_document()])), 'DHIJKLMNOP')", "title": "" }, { "docid": "ce0ef033fd5c68e008c08731a0323569", "score": "0.45706043", "text": "def merge_whole_observations(observations: Iterable[WholeObs]) -> pd.DataFrame:\n return pd.DataFrame(dict(observations), index=[0])", "title": "" }, { "docid": "f45101aef97a6de68a097ecf1a44e2c7", "score": "0.45690897", "text": "def testMergeBuildStats(self):\n base_map = {\n 'foo': {\n data_types.Expectation('foo', ['win'], 'Failure'): {\n 'builder': {\n 'step': data_types.BuildStats(),\n },\n },\n },\n }\n merge_stats = data_types.BuildStats()\n merge_stats.AddFailedBuild('1')\n merge_map = {\n 'foo': {\n data_types.Expectation('foo', ['win'], 'Failure'): {\n 'builder': {\n 'step': merge_stats,\n },\n },\n },\n }\n expected_stats = data_types.BuildStats()\n expected_stats.AddFailedBuild('1')\n expected_base_map = {\n 'foo': {\n data_types.Expectation('foo', ['win'], 'Failure'): {\n 'builder': {\n 'step': expected_stats,\n },\n },\n },\n }\n queries._MergeExpectationMaps(base_map, merge_map)\n self.assertEqual(base_map, expected_base_map)", "title": "" }, { "docid": "5bd9543f8e7a47167d0391ce1c4ec2aa", "score": "0.4567705", "text": "def _process_repeats_impl(reads, repeats_dict, work_dir, all_labels,\n initial_file_names, return_queue):\n MIN_MULT = trestle_config.vals[\"min_mult\"]\n MAX_MULT = trestle_config.vals[\"max_mult\"]\n FLANKING_LEN = trestle_config.vals[\"flanking_len\"]\n ORIENT_CONFIG = trestle_config.vals[\"orientations_to_run\"]\n \n repeat_label, side_labels = all_labels\n (template_name, extended_name, repeat_reads_name, \n pre_partitioning_name) = initial_file_names\n \n reads_dict = {}\n for read_file in reads:\n reads_dict.update(fp.read_sequence_dict(read_file))\n #orig_graph = fp.read_sequence_dict(graph_edges)\n #graph_dict = {int(h.split('_')[1]):orig_graph[h] for h in orig_graph}\n \n if not reads_dict:\n raise ProcessingException(\"No reads found from {0}\".format(reads))\n #if not graph_dict:\n # raise ProcessingException(\"No edges found from {0}\".format(\n # graph_edges))\n \n repeat_list = []\n repeat_edges = {}\n all_edge_headers = {}\n for rep in sorted(repeats_dict, reverse=True):\n #Checks multiplicity of repeat and presence of reverse strand\n #One run processes both forward and reverse strand of repeat\n \n if rep <= 0:\n continue\n \n valid_repeat = True\n if -rep not in repeats_dict:\n logger.debug(\"Repeat {0} missing reverse strand\".format(rep))\n valid_repeat = False\n elif (repeats_dict[rep].multiplicity < MIN_MULT or\n repeats_dict[rep].multiplicity > MAX_MULT or\n repeats_dict[-rep].multiplicity < MIN_MULT or\n repeats_dict[-rep].multiplicity > MAX_MULT):\n logger.debug(\"Repeat {0} multiplicity not in range: {1}\".format(\n rep, repeats_dict[rep].multiplicity))\n valid_repeat = False\n #if rep not in graph_dict:\n # logger.debug(\"Repeat {0} missing from graph file\".format(rep))\n # valid_repeat = False\n if not valid_repeat:\n continue\n \n #Makes repeat dirs\n repeat_dir = os.path.join(work_dir, repeat_label.format(rep))\n if not os.path.isdir(repeat_dir):\n os.mkdir(repeat_dir)\n repeat_list.append(rep)\n \n run_orientations = []\n if ORIENT_CONFIG == \"forward\":\n run_orientations = [(\"forward\", rep)]\n elif ORIENT_CONFIG == \"reverse\":\n run_orientations = [(\"reverse\", -rep)]\n elif ORIENT_CONFIG == \"both\":\n run_orientations = [(\"forward\", rep), (\"reverse\", -rep)]\n for curr_label, curr_rep in run_orientations:\n orient_path = os.path.join(repeat_dir, curr_label)\n if not os.path.isdir(orient_path):\n os.mkdir(orient_path)\n template_path = os.path.join(orient_path, template_name)\n extended_path = os.path.join(orient_path, extended_name)\n repeat_reads_path = os.path.join(orient_path, repeat_reads_name)\n partitioning_path = os.path.join(orient_path, \n pre_partitioning_name)\n \n in_label = side_labels[0]\n out_label = side_labels[1]\n repeat_edges[curr_rep] = {in_label:[], out_label:[]}\n \n #(mult, all_reads_list, inputs_dict,\n # outputs_dict) = repeats_dict[curr_rep]\n #mult = repeats_dict[curr_rep].multiplicity\n all_reads_list = repeats_dict[curr_rep].all_reads\n inputs_dict = repeats_dict[curr_rep].in_reads\n outputs_dict = repeats_dict[curr_rep].out_reads\n \n template_dict = {}\n extended_dicts = {}\n repeat_reads_dict = {}\n #Partitioning parts: id_num, Partitioned/Tied/None, \n #edge_id, top_score, total_score, Header\n partitioning = {in_label:[], out_label:[]}\n read_id = 0\n \n template_seq = repeats_dict[curr_rep].sequences[\"template\"]\n #if curr_label == \"reverse\":\n # template_seq = fp.reverse_complement(graph_dict[rep])\n template_dict[curr_rep] = template_seq\n \n all_edge_headers[curr_rep] = {}\n out_headers = set()\n #Headers will be in the form -h or +h,\n #edge_dict is in the form >[Input,Output]_edge##_h,\n #rev_comp of read will be written if the header is -h\n for edge_id in inputs_dict: \n repeat_edges[curr_rep][in_label].append(edge_id)\n extended_dicts[(in_label, edge_id)] = {}\n \n headers = inputs_dict[edge_id]\n for header in headers:\n if (not header) or (header[0] != '+' and header[0] != '-'):\n raise ProcessingException(\n \"Input read format not recognized: {0}\".format(\n header))\n if header[1:] not in reads_dict:\n raise ProcessingException(\n \"Read header {0} not in any of {1}\".format(\n header[1:], reads))\n \n if header[1:] not in all_edge_headers[curr_rep]:\n status_label = \"Partitioned\"\n edge_label = str(edge_id)\n score = 1\n total_score = 0\n partitioning[in_label].append((read_id, status_label, \n edge_label, score, \n total_score, \n header[1:]))\n all_edge_headers[curr_rep][header[1:]] = read_id\n read_id += 1\n \n extend_in_header = \"Extended_Template_Input_{0}\".format(\n edge_id)\n #if edge_id > 0:\n # edge_seq = graph_dict[edge_id]\n #elif edge_id < 0:\n # edge_seq = fp.reverse_complement(graph_dict[-edge_id])\n edge_seq = repeats_dict[curr_rep].sequences[edge_id]\n extended_seq = edge_seq[-FLANKING_LEN:]\n extended_dicts[(in_label, edge_id)][extend_in_header] = (\n extended_seq + template_seq)\n \n \n for edge_id in outputs_dict: \n repeat_edges[curr_rep][out_label].append(edge_id)\n extended_dicts[(out_label, edge_id)] = {}\n \n headers = outputs_dict[edge_id]\n for header in headers:\n if (not header) or (header[0] != '+' and header[0] != '-'):\n raise ProcessingException(\n \"Output read format not recognized: {0}\".format(\n header))\n if header[1:] not in reads_dict:\n raise ProcessingException(\n \"Read header {0} not in any of {1}\".format(\n header[1:], reads))\n \n curr_read_id = read_id\n if header[1:] not in all_edge_headers[curr_rep]:\n status_label = \"None\"\n edge_label = \"NA\"\n score = 0\n total_score = 0\n partitioning[in_label].append((read_id, status_label, \n edge_label, score, \n total_score,\n header[1:]))\n \n all_edge_headers[curr_rep][header[1:]] = read_id\n read_id += 1\n else:\n curr_read_id = all_edge_headers[curr_rep][header[1:]]\n \n if header[1:] not in out_headers:\n status_label = \"Partitioned\"\n edge_label = str(edge_id)\n score = 1\n total_score = 0\n partitioning[out_label].append((curr_read_id, \n status_label, \n edge_label, score, \n total_score, \n header[1:]))\n out_headers.add(header[1:]) \n \n extend_out_header = \"Extended_Template_Output_{0}\".format(\n edge_id)\n #if edge_id > 0:\n # edge_seq = graph_dict[edge_id]\n #elif edge_id < 0:\n # edge_seq = fp.reverse_complement(graph_dict[-edge_id])\n edge_seq = repeats_dict[curr_rep].sequences[edge_id]\n extended_seq = edge_seq[:FLANKING_LEN]\n extended_dicts[(out_label, edge_id)][extend_out_header] = (\n template_seq + extended_seq)\n \n #Need to reiterate over in_headers to add in_headers to \n #out-partitioning while avoiding double-adding ones in both\n for edge_id in inputs_dict:\n headers = inputs_dict[edge_id]\n for header in headers:\n if header[1:] not in out_headers:\n curr_read_id = all_edge_headers[curr_rep][header[1:]]\n status_label = \"None\"\n edge_label = \"NA\"\n score = 0\n total_score = 0\n partitioning[out_label].append((curr_read_id, \n status_label, \n edge_label, score, \n total_score, \n header[1:]))\n \n \n for header in all_reads_list:\n if (not header) or (header[0] != '+' and header[0] != '-'):\n raise ProcessingException(\n \"All reads format not recognized: {0}\".format(header))\n if header[1:] not in reads_dict:\n raise ProcessingException(\n \"Read header {0} not in any of {1}\".format(\n header[1:], reads))\n \n seq = reads_dict[header[1:]]\n if header[0] == '-':\n seq = fp.reverse_complement(seq)\n repeat_reads_dict[header[1:]] = seq\n \n curr_read_id = read_id\n if header[1:] not in all_edge_headers[curr_rep]:\n all_edge_headers[curr_rep][header[1:]] = read_id\n read_id += 1\n \n status_label = \"None\"\n edge_label = \"NA\"\n score = 0\n total_score = 0\n partitioning[in_label].append((curr_read_id, status_label, \n edge_label, score, \n total_score, header[1:]))\n \n status_label = \"None\"\n edge_label = \"NA\"\n score = 0\n total_score = 0\n partitioning[out_label].append((curr_read_id, status_label, \n edge_label, score, \n total_score, header[1:]))\n \n if template_dict and template_dict.values()[0]:\n fp.write_fasta_dict(template_dict, template_path)\n for edge in extended_dicts:\n if extended_dicts[edge] and extended_dicts[edge].values()[0]:\n extended_edge_path = extended_path.format(edge[0], \n edge[1])\n fp.write_fasta_dict(extended_dicts[edge], \n extended_edge_path)\n if repeat_reads_dict and repeat_reads_dict.values()[0]:\n fp.write_fasta_dict(repeat_reads_dict, repeat_reads_path)\n for side in side_labels:\n _write_partitioning_file(partitioning[side], \n partitioning_path.format(side))\n \n if not template_dict:\n raise ProcessingException(\"No template {0} found\".format(\n curr_rep))\n for edge in extended_dicts:\n if not template_dict:\n raise ProcessingException(\n \"No extended template {0} {1} {2} found\".format(\n curr_rep, edge[0], edge[1]))\n if not repeat_reads_dict:\n raise ProcessingException(\"No repeat reads {0} found\".format(\n curr_rep))\n for side in side_labels:\n if not partitioning[side]:\n raise ProcessingException(\n \"Empty partitioning file {0}\".format(\n partitioning_path.format(side)))\n\n return_queue.put((repeat_list, repeat_edges, all_edge_headers))", "title": "" }, { "docid": "e468c8d3cbe67536990b1100cfb49d7a", "score": "0.45671993", "text": "def test_resample_iterations_same(PM_ds_initialized_1d, chunk, replace):\n ds = PM_ds_initialized_1d.isel(lead=range(3), init=range(5))\n if chunk:\n ds = ds.chunk()\n ds_r_idx = _resample_iterations_idx(ds, ITERATIONS, \"member\", replace=replace)\n ds_r = _resample_iterations(ds, ITERATIONS, \"member\", replace=replace)\n for d in ds.dims:\n xr.testing.assert_identical(ds_r[d], ds_r_idx[d])\n assert ds_r.tos.size == ds_r_idx.tos.size", "title": "" }, { "docid": "f6d5ea3f530d39e15f7e1d689b00b853", "score": "0.4566746", "text": "def duplicate(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"duplicate\"), kwargs)", "title": "" }, { "docid": "cf5a7dbded0db7bc743fd6800dc2299f", "score": "0.45605308", "text": "def duplicate(*args, **kwargs):\n\n pass", "title": "" }, { "docid": "e46d145a364469d25136f51cd2a1c2f4", "score": "0.45585206", "text": "def mosaic(*rasters, **kwargs):\n # use cellsize of first raster if not manually assigned\n## firstraster = rasters[0]\n## cellwidth = kwargs.get(\"cellwidth\")\n## cellheight = kwargs.get(\"cellheight\")\n## if not (cellwidth and cellheight):\n## cellwidth = firstraster.info[\"cellwidth\"]\n## cellheight = firstraster.info[\"cellheight\"]\n \n # align first band of all rasters and resample to output\n aligned = align_rasters(*rasters)\n #aligned = [resample(rast, cellwidth=cellwidth, cellheight=cellheight)\n # for rast in aligned]\n firstalign,firstmask = aligned[0]\n merged = firstalign.copy()\n for rast,mask in aligned[1:]:\n merged.grids[0].img.paste(rast.grids[0].img, (0,0), mask)\n\n return merged", "title": "" }, { "docid": "16ba118a41df66c31ebd35fea1616156", "score": "0.4554724", "text": "def merger(filepath, src, dst):\r\n # Load and merge the multitrack pianoroll\r\n multitrack = Multitrack(filepath)\r\n merged = get_merged(multitrack)\r\n\r\n # Save the merged multitrack pianoroll\r\n result_path = change_prefix(filepath, src, dst)\r\n make_sure_path_exists(os.path.dirname(result_path))\r\n merged.save(result_path)", "title": "" }, { "docid": "df4b293caefa2e12aa2eea261ab004b8", "score": "0.4552576", "text": "def do_task_merge (self, *unnamed_args, **named_args):\n # extra dereference because we are only interested in the first (only) job\n return list(p1 for (p1, ps) in self.get_param_iterator (*unnamed_args, **named_args)(None))[0]", "title": "" }, { "docid": "ffbdb4c61bdcd5e57924f26de3731f23", "score": "0.45515713", "text": "def test_merge_ins_with_duplicates(self, dir_paths):\n\n with pytest.raises(ValueError):\n list(\n merge.merge_samples(\n dir_paths, sample_names=['a', 'a'],\n with_expression=False)[0]) # yapf: disable", "title": "" }, { "docid": "cd306e22083867de6e94ee929366ce28", "score": "0.4550894", "text": "def _incorporate_dup(self):\n seq, cds_start, cds_stop, start, end = self._setup_incorporate()\n\n dup_seq = seq[start:end]\n seq[end:end] = dup_seq\n\n is_frameshift = len(dup_seq) % 3 != 0\n variant_start_aa = int(math.ceil((self._var_c.posedit.pos.end.base + 1) / 3.0))\n\n alt_data = AltTranscriptData(\n seq,\n cds_start,\n cds_stop,\n is_frameshift,\n variant_start_aa,\n self._transcript_data.protein_accession,\n is_ambiguous=self._ref_has_multiple_stops,\n )\n return alt_data", "title": "" }, { "docid": "637e8c8f0b23eb614d315e61f8342ca4", "score": "0.45382947", "text": "def resolve_duplicate(self, task, found_duplicates):\n raise NotImplementedError", "title": "" }, { "docid": "2adf6262c87afd19894f8e737a505d16", "score": "0.45308942", "text": "def _merge_non_test_data(cls, aggregated_json, incremental_json, num_runs):\n\n incremental_builds = incremental_json[JSON_RESULTS_BUILD_NUMBERS]\n aggregated_builds = aggregated_json[JSON_RESULTS_BUILD_NUMBERS]\n aggregated_build_number = int(aggregated_builds[0])\n # Loop through all incremental builds, start from the oldest run.\n for index in reversed(range(len(incremental_builds))):\n build_number = int(incremental_builds[index])\n logging.debug(\"Merging build %s, incremental json index: %d.\",\n build_number, index)\n\n # Return if not all build numbers in the incremental json results\n # are newer than the most recent build in the aggregated results.\n # FIXME: make this case work.\n if build_number < aggregated_build_number:\n logging.warning((\"Build %d in incremental json is older than \"\n \"the most recent build in aggregated results: %d\"),\n build_number, aggregated_build_number)\n return False\n\n # Return if the build number is duplicated.\n # FIXME: skip the duplicated build and merge rest of the results.\n # Need to be careful on skiping the corresponding value in\n # _merge_tests because the property data for each test could\n # be accumulated.\n if build_number == aggregated_build_number:\n logging.warning(\"Duplicate build %d in incremental json\",\n build_number)\n return False\n\n # Merge this build into aggreagated results.\n cls._merge_one_build(aggregated_json, incremental_json, index, num_runs)\n\n return True", "title": "" }, { "docid": "3f4bb9b1585cf64c6bda74d4022291e5", "score": "0.4530067", "text": "def merge_subreddit_threads(subreddit, file):\n RedditService.merge_csv_bulk(subreddit, file)", "title": "" }, { "docid": "28d7b3bb45c1b2ed71ced30da470ba9d", "score": "0.45209795", "text": "def test_not_all_same_pool(self):\n directory = TempDirectory()\n original_file_path, dna = write_random_dna_fasta(directory.path, \"original.fasta\", 1000)\n args = make_default_args(original_file_path)\n args.random_seed = 1\n args.num_sims = 3\n args.num_subs = 2\n args.num_insertions = 2\n args.num_deletions = 2\n args.subset_len = 500\n\n snpmutator.run_from_args(args)\n mutated_seq_record1 = read_fasta_seq_record(\"original_mutated_1.fasta\")\n mutated_seq_record2 = read_fasta_seq_record(\"original_mutated_2.fasta\")\n mutated_seq_record3 = read_fasta_seq_record(\"original_mutated_3.fasta\")\n self.assertNotEqual(str(mutated_seq_record1.seq), str(mutated_seq_record2.seq), \"Generated sequences 1 and 2 should be different.\")\n self.assertNotEqual(str(mutated_seq_record2.seq), str(mutated_seq_record3.seq), \"Generated sequences 2 and 3 should be different.\")\n self.assertNotEqual(str(mutated_seq_record1.seq), str(mutated_seq_record3.seq), \"Generated sequences 1 and 3 should be different.\")", "title": "" }, { "docid": "eaedcfab63725aa38e043baf37ce931d", "score": "0.45205456", "text": "def average_duplicates(args: Args):\n print('Loading data')\n header = get_header(args.data_path)\n data = get_data(path=args.data_path, smiles_columns=args.smiles_columns, target_columns=args.target_columns)\n print(f'Data size = {len(data):,}')\n\n # Map SMILES string to lists of targets\n smiles_in_order = []\n smiles_to_targets = defaultdict(list)\n for smiles, targets in zip(data.smiles(flatten=True), data.targets()):\n smiles_to_targets[smiles].append(targets)\n if len(smiles_to_targets[smiles]) == 1:\n smiles_in_order.append(smiles)\n\n # Find duplicates\n duplicate_count = 0\n stds = []\n new_data = []\n for smiles in smiles_in_order:\n all_targets = smiles_to_targets[smiles]\n duplicate_count += len(all_targets) - 1\n num_tasks = len(all_targets[0])\n\n targets_by_task = [[] for _ in range(num_tasks)]\n for task in range(num_tasks):\n for targets in all_targets:\n if targets[task] is not None:\n targets_by_task[task].append(targets[task])\n\n stds.append([np.std(task_targets) if len(task_targets) > 0 else 0.0 for task_targets in targets_by_task])\n means = [np.mean(task_targets) if len(task_targets) > 0 else None for task_targets in targets_by_task]\n new_data.append((smiles, means))\n\n print(f'Number of duplicates = {duplicate_count:,}')\n print(f'Duplicate standard deviation per task = {\", \".join(f\":{std:.4e}\" for std in np.mean(stds, axis=0))}')\n print(f'New data size = {len(new_data):,}')\n\n # Save new data\n with open(args.save_path, 'w') as f:\n f.write(','.join(header) + '\\n')\n\n for smiles, avg_targets in new_data:\n f.write(smiles + ',' + ','.join(str(value) if value is not None else '' for value in avg_targets) + '\\n')", "title": "" }, { "docid": "8de712a8f2bc6e9bf7c8e857f147baa0", "score": "0.45054013", "text": "def merge(self):\n if len(self.n_best) <= 1:\n return\n else:\n new_n_best = self.n_best[:1]\n\n for cur_idx in xrange(1, len(self.n_best)):\n cur_hyp = self.n_best[cur_idx]\n for new_idx, new_hyp in enumerate(new_n_best):\n if new_hyp[1] == cur_hyp[1]:\n # Merge, add the probabilities.\n new_hyp[0] += cur_hyp[0]\n break\n else:\n new_n_best.append(cur_hyp)\n\n self.n_best = sorted(new_n_best, reverse=True)\n return self", "title": "" }, { "docid": "13712eb74386e4f571d859c2d300e423", "score": "0.44971138", "text": "def get_merged(multitrack):\r\n track_lists_to_merge = [[] for _ in range(17)]\r\n for idx, track in enumerate(multitrack.tracks):\r\n if track.is_drum:\r\n track_lists_to_merge[0].append(idx)\r\n else:\r\n track_lists_to_merge[track.program//8 + 1].append(idx)\r\n\r\n tracks = []\r\n for idx, track_list_to_merge in enumerate(track_lists_to_merge):\r\n if track_list_to_merge:\r\n merged = multitrack[track_list_to_merge].get_merged_pianoroll('max')\r\n tracks.append(Track(merged, TRACK_INFO[idx][1], (idx == 0),\r\n TRACK_INFO[idx][0]))\r\n else:\r\n tracks.append(Track(None, TRACK_INFO[idx][1], (idx == 0),\r\n TRACK_INFO[idx][0]))\r\n return Multitrack(None, tracks, multitrack.tempo, multitrack.downbeat,\r\n multitrack.beat_resolution, multitrack.name)", "title": "" }, { "docid": "d71d2406211439375a6d54e94c0c8a6f", "score": "0.44917613", "text": "def get_merged(multitrack):\n track_lists_to_merge = [[] for _ in range(5)]\n for idx, track in enumerate(multitrack.tracks):\n if track.is_drum:\n track_lists_to_merge[0].append(idx)\n elif track.program//8 == 0:\n track_lists_to_merge[1].append(idx)\n elif track.program//8 == 3:\n track_lists_to_merge[2].append(idx)\n elif track.program//8 == 4:\n track_lists_to_merge[3].append(idx)\n elif track.program < 96 or 104 <= track.program < 112:\n track_lists_to_merge[4].append(idx)\n\n tracks = []\n for idx, track_list_to_merge in enumerate(track_lists_to_merge):\n if track_list_to_merge:\n merged = multitrack[track_list_to_merge].get_merged_pianoroll('max')\n tracks.append(Track(merged, TRACK_INFO[idx][1], (idx == 0),\n TRACK_INFO[idx][0]))\n else:\n tracks.append(Track(None, TRACK_INFO[idx][1], (idx == 0),\n TRACK_INFO[idx][0]))\n return Multitrack(None, tracks, multitrack.tempo, multitrack.downbeat,\n multitrack.beat_resolution, multitrack.name)", "title": "" }, { "docid": "ca1012b2aa5bbe2223394002e5651ee4", "score": "0.4489591", "text": "def aggregate(self, data_source: str, agg_rep: str = \"mean\", agg_time: str = \"mean\") -> pd.DataFrame:\n data = []\n for name, run in self.runs.items():\n tmp = run.aggregate(data_source, agg_rep=agg_rep, agg_time=agg_time)\n tmp[\"run\"] = name\n data.append(tmp)\n\n if agg_rep:\n data = pd.DataFrame(data)\n else:\n data = pd.concat(data)\n data = data.set_index(\"run\")\n return data", "title": "" }, { "docid": "b6ce7f1435bb9e4a51594ecd8dc341c8", "score": "0.4485769", "text": "def combine(self, source1, source2):\n \n \n i = 1\n while source1._values != [] or source2._values != []:\n if i == 1:\n self._values.append(deepcopy(source1._values.pop()))\n if source2._values != []:\n i = 2\n elif i == 2:\n self._values.append(deepcopy(source2._values.pop()))\n if source1._values != []:\n i = 1\n \n return", "title": "" }, { "docid": "0d8092f12b31cbb9fa3147ba0be55773", "score": "0.44767585", "text": "def build_repetition_counts(self):\n if self.repData is not None: return\n if self.bStatic:\n raise ValueError(\"Cannot build repetition counts in a static DataSet object\")\n self.repData = []\n for oliAr in self.oliData:\n self.repData.append(_np.ones(len(oliAr), self.repType))", "title": "" }, { "docid": "bf73fb6878d461f97bcb990f8a806967", "score": "0.44681942", "text": "def combine(self, dataBatch):\n self.randomMasks()\n self.data[\"frame\"][\"A\"][self.mask[\"frame\"][\"A\"]] = dataBatch.data[\"frame\"][\"A\"][\n self.mask[\"frame\"][\"A\"]\n ]\n self.data[\"frame\"][\"B\"][self.mask[\"frame\"][\"B\"]] = dataBatch.data[\"frame\"][\"B\"][\n self.mask[\"frame\"][\"B\"]\n ]\n self.data[\"latent\"][\"A\"][self.mask[\"latent\"][\"A\"]] = dataBatch.data[\"latent\"][\n \"A\"\n ][self.mask[\"latent\"][\"A\"]]\n self.data[\"latent\"][\"B\"][self.mask[\"latent\"][\"B\"]] = dataBatch.data[\"latent\"][\n \"B\"\n ][self.mask[\"latent\"][\"B\"]]\n self.data[\"location\"][\"A\"][self.mask[\"location\"][\"A\"]] = dataBatch.data[\n \"location\"\n ][\"A\"][self.mask[\"location\"][\"A\"]]\n self.data[\"location\"][\"B\"][self.mask[\"location\"][\"B\"]] = dataBatch.data[\n \"location\"\n ][\"B\"][self.mask[\"location\"][\"B\"]]\n # self.data[\"output\"][self.mask[\"output\"]] = dataBatch.data[\"output\"][self.mask[\"output\"]]", "title": "" }, { "docid": "5c7363d2d01355485af9117a5a7f5697", "score": "0.44589022", "text": "def merge(self, datasets=None, separate_datasets=False):\n self.logger.info(\"merging\")\n if separate_datasets:\n warnings.warn(\"The option seperate_datasets=True is\"\n \"not implemented yet. Performing merging, but\"\n \"neglecting the option.\")\n else:\n if datasets is None:\n datasets = list(range(len(self.datasets)))\n first = True\n for dataset_number in datasets:\n if first:\n dataset = self.datasets[dataset_number]\n first = False\n else:\n dataset = self._append(dataset, self.datasets[dataset_number])\n for raw_data_file, file_size in zip(self.datasets[dataset_number].raw_data_files,\n self.datasets[dataset_number].raw_data_files_length):\n dataset.raw_data_files.append(raw_data_file)\n dataset.raw_data_files_length.append(file_size)\n self.datasets = [dataset]\n self.number_of_datasets = 1\n return self", "title": "" } ]
a2705f464cacd4cac18658eb5c83b576
Get the abbreviated metaedges for an abbreviated metapath. Pass a hetio.MetaGraph object to `standardize_by` to standardize metaedge abbreviations based on the noninverted orietatation. Pass `text` to standardize by alphabetical/forwarddirection arrangment of the abbreviation. Default (`None`) does not standardize.
[ { "docid": "d03621972f35fb52df1ff13fc44441a9", "score": "0.7505954", "text": "def metaedges_from_metapath(abbreviation, standardize_by=None):\n if isinstance(standardize_by, hetio.hetnet.MetaGraph):\n metapath = standardize_by.metapath_from_abbrev(abbreviation)\n return [metaedge.get_standard_abbrev() for metaedge in metapath]\n # Note that this is a valid regex module pattern but will not work in the\n # re module due to \"look-behind requires fixed-width pattern\".\n regex_string = r'(?<=^|[a-z<>])[A-Z][A-Z0-9]*[a-z<>]+[A-Z][A-Z0-9]*'\n pattern = regex.compile(regex_string)\n metaedge_abbrevs = pattern.findall(abbreviation, overlapped=True)\n if standardize_by is None:\n return metaedge_abbrevs\n elif standardize_by == 'text':\n metaedge_abbrevs = [arrange_metaedge(x) for x in metaedge_abbrevs]\n return metaedge_abbrevs\n else:\n raise ValueError('Invalid value for standardize_by')", "title": "" } ]
[ { "docid": "f621f2899b05db266c51a54b3830006c", "score": "0.5728711", "text": "def create_abbreviations(metagraph):\n kind_to_abbrev = find_abbrevs(metagraph.node_dict.keys())\n kind_to_abbrev = {kind: abbrev.upper()\n for kind, abbrev in kind_to_abbrev.items()}\n\n edge_set_to_keys = dict()\n for edge in list(metagraph.edge_dict.keys()):\n key = frozenset(list(map(str.lower, edge[:2])))\n value = edge[2]\n edge_set_to_keys.setdefault(key, list()).append(value)\n\n for edge_set, keys in list(edge_set_to_keys.items()):\n key_to_abbrev = find_abbrevs(keys)\n for key, abbrev in list(key_to_abbrev.items()):\n previous_abbrev = kind_to_abbrev.get(key)\n if previous_abbrev and len(abbrev) <= len(previous_abbrev):\n continue\n kind_to_abbrev[key] = abbrev\n\n return kind_to_abbrev", "title": "" }, { "docid": "2097d7789cb5d239344d5b74b592d78b", "score": "0.5544935", "text": "def arrange_metaedge(abbreviation):\n source, target = regex.split('[a-z<>]+', abbreviation)\n edge = regex.search('[a-z]+', abbreviation).group()\n if '<' in abbreviation or (source > target):\n source, target = target, source\n return '{}{}{}'.format(source, edge, target)", "title": "" }, { "docid": "35d9cf412a587d9f4ad373e1efc163bc", "score": "0.5312294", "text": "def get_abbreviations(self) -> dict:\n\n return list(filter(lambda l: l.get('heading') == \"Abbreviations\", self.get_sections_texts())).pop()", "title": "" }, { "docid": "c0d240365993df9e91a3fa5daea363d1", "score": "0.50795704", "text": "def validate_abbreviations(metagraph):\n valid = True\n metanodes = set(metagraph.get_nodes())\n metaedges = set(metagraph.get_edges(exclude_inverts=False))\n\n # Duplicated metanode and metaedge kinds\n metanode_kinds = {metanode.identifier for metanode in metanodes}\n metaedge_kinds = {metaedge.kind for metaedge in metaedges}\n duplicated_kinds = metanode_kinds & metaedge_kinds\n if duplicated_kinds:\n msg = 'Duplicated kinds between metanodes and metaedges: {}'\n print(msg.format(duplicated_kinds))\n valid = False\n\n # Check that metanodes do not have any duplicated abbreviations\n kind_to_abbrev = metagraph.kind_to_abbrev\n metanode_kind_to_abbrev = {\n k: v for k, v in kind_to_abbrev.items() if k in metanode_kinds}\n duplicated_metanode_abbrevs = get_duplicates(\n metanode_kind_to_abbrev.values())\n if duplicated_metanode_abbrevs:\n print('Duplicated metanode abbrevs:', duplicated_metanode_abbrevs)\n valid = False\n\n # Check metanode abbreviation violations\n for metanode in metanodes:\n abbrev = metanode.abbrev\n # metanode abbreviations should be uppercase\n if not abbrev.isupper():\n print('lowercase metanode abbreviation:', abbrev)\n valid = False\n # metanode abbreviation should not start with a digit\n if abbrev[0].isdigit():\n print('digit leading metanode abbreviation:', abbrev)\n valid = False\n\n # Check metaedge abbreviation violations\n for metaedge in metaedges:\n abbrev = metaedge.kind_abbrev\n # metaedge abbreviations should be lowercase\n if not abbrev.islower():\n print('uppercase metaedge abbreviation:', abbrev)\n valid = False\n # metaedge abbreviations should not contain digits\n if any(character.isdigit() for character in abbrev):\n print('digit in metaedge abbreviation:', abbrev)\n valid = False\n\n # Check that metaedges are not ambigious\n metaedge_abbrevs = [metaedge.abbrev for metaedge in metaedges]\n duplicated_meataedge_abbrevs = get_duplicates(metaedge_abbrevs)\n if duplicated_meataedge_abbrevs:\n msg = 'Duplicated metaedge abbreviations: {}'\n print(msg.format(duplicated_meataedge_abbrevs))\n valid = False\n\n return valid", "title": "" }, { "docid": "2f055d622863aa5fa66b6b2bca0be090", "score": "0.5062466", "text": "def knowledge_area_abbreviation(ka):\n\n return {\n 'abbr': ka.title,\n 'title': ka.title[ka.title.find('(')+1:-1]\n }", "title": "" }, { "docid": "100427076ad24cb402e00eb491b354d2", "score": "0.4881498", "text": "def _create_unit_abbreviations(text, *args):\n\n def _split(part):\n return [a for a, b in re.findall(r'(\\w+?)(\\b|(?<=[a-zäö])(?=[A-ZÄÖ]))', part)]\n\n def _variations(part):\n inner_parts = _split(part) + ['']\n\n spacecombos = list(itertools.product(['.', '. '], repeat=len(inner_parts) - 1))\n combined = [tuple(zip(inner_parts, spacecombo)) for spacecombo in spacecombos]\n combined_strings = [''.join(cc[0] + cc[1] for cc in inner).strip() for inner in combined]\n\n variations = list(set(combined_strings))\n variations += [' '.join(inner_parts)]\n variations += [''.join(inner_parts)]\n return sorted(variations)\n\n variation_lists = [_variations(part) + [part] for part in text.split('/')]\n\n combined_variations = sorted(set(['/'.join(combined).strip().replace(' /', '/')\n for combined in sorted(set(itertools.product(*variation_lists)))]))\n\n variationset = set(variation.strip() for var_list in variation_lists for variation in var_list\n if not re.search(r'^[0-9](\\.)?$', variation.strip()))\n\n return ' # '.join(combined_variations) + ' # ' + ' # '.join(sorted(variationset))", "title": "" }, { "docid": "badfc61b32a9af6dc5537ec839085433", "score": "0.48322722", "text": "def _handle_abbreviations(s):\n # Format: abbrev = \"meaning\" gender (kk|kvk|hk)\n a = s.split('=', maxsplit=1)\n abbrev = a[0].strip()\n m = a[1].strip().split('\\\"')\n par = \"\"\n if len(m) >= 3:\n # Something follows the last quote\n par = m[-1].strip()\n gender = \"hk\" # Default gender is neutral\n fl = None # Default word category is None\n if par:\n p = par.split(' ')\n if len(p) >= 1:\n gender = p[0].strip()\n if len(p) >= 2:\n fl = p[1].strip()\n Abbreviations.add(abbrev, m[1], gender, fl)", "title": "" }, { "docid": "c9dcd29c11145de62fbd00504d303d3c", "score": "0.47770673", "text": "def metaedge_id_from_abbreviation(metagraph, abbreviation):\n source_abbrev, target_abbrev = regex.split('[a-z<>]+', abbreviation)\n edge_abbrev = regex.search('[a-z<>]+', abbreviation).group()\n abbrev_to_kind = {v: k for k, v in metagraph.kind_to_abbrev.items()}\n source_kind = abbrev_to_kind[source_abbrev]\n target_kind = abbrev_to_kind[target_abbrev]\n metanode = metagraph.get_node(source_kind)\n for edge in metanode.edges:\n if edge.target.identifier != target_kind:\n continue\n if edge.kind_abbrev == edge_abbrev:\n kind = edge.kind\n break\n else:\n raise KeyError('edge abbreviation not found: {}'.format(edge_abbrev))\n if '>' in abbreviation:\n direction = 'forward'\n elif '<' in abbreviation:\n direction = 'backward'\n else:\n direction = 'both'\n return source_kind, target_kind, kind, direction", "title": "" }, { "docid": "8d6d0616dcb6e1d8bc83c9ceafd77e15", "score": "0.47619233", "text": "def normalized_concept_name(lang, text):\n if lang == 'en':\n stem = normalize_english(text) or text\n return normalize_text(stem)\n else:\n return normalize_text(text)", "title": "" }, { "docid": "6e2a056f75dccdefcf78a636fb8b68fd", "score": "0.47287285", "text": "def acronym(self, text):\r\n # Find the acronyms.\r\n acronyms = r'''(?P<acronym>[\\w]+)\\((?P<definition>[^\\(\\)]+?)\\)'''\r\n\r\n # Check all acronyms.\r\n for acronym, definition in re.findall(acronyms, text):\r\n caps_acronym = ''.join(re.findall('[A-Z\\d]+', acronym))\r\n caps_definition = ''.join(re.findall('[A-Z\\d]+', definition))\r\n if caps_acronym and caps_acronym == caps_definition:\r\n text = text.replace('%s(%s)' % (acronym, definition), '<acronym title=\"%s\">%s</acronym>' % (definition, acronym))\r\n \r\n text = html_replace(r'''(^|\\s)([A-Z]{3,})\\b(?!\\()''', r'''\\1<span class=\"caps\">\\2</span>''', text)\r\n\r\n return text", "title": "" }, { "docid": "7b50836766b5307fdc8e08cc0db807c3", "score": "0.45906082", "text": "def extract_abbreviation(text):\n\tcur_offset = len(text)\n\tstart_index = -1\n\tgroup_count = 0\n\tbrace_count = 0\n\ttext_count = 0\n\t\n\twhile True:\n\t\tcur_offset -= 1\n\t\tif cur_offset < 0:\n\t\t\t# moved at string start\n\t\t\tstart_index = 0\n\t\t\tbreak\n\t\t\n\t\tch = text[cur_offset]\n\t\t\n\t\tif ch == ']':\n\t\t\tbrace_count += 1\n\t\telif ch == '[':\n\t\t\tif brace_count == 0: # unexpected brace\n\t\t\t\tstart_index = cur_offset + 1\n\t\t\t\tbreak\n\t\t\tbrace_count -= 1\n\t\telif ch == '}':\n\t\t\ttext_count += 1\n\t\telif ch == '{':\n\t\t\tif text_count == 0: # unexpected brace\n\t\t\t\tstart_index = cur_offset + 1\n\t\t\t\tbreak\n\t\t\ttext_count -= 1\n\t\telif ch == ')':\n\t\t\tgroup_count += 1\n\t\telif ch == '(':\n\t\t\tif group_count == 0: # unexpected brace\n\t\t\t\tstart_index = cur_offset + 1\n\t\t\t\tbreak\n\t\t\tgroup_count -= 1\n\t\telse:\n\t\t\tif brace_count or text_count:\n\t\t\t\t# respect all characters inside attribute sets\n\t\t\t\tcontinue\n\t\t\tif not is_allowed_char(ch) or (ch == '>' and is_ends_with_tag(text[0:cur_offset + 1])):\n\t\t\t\t# found stop symbol\n\t\t\t\tstart_index = cur_offset + 1\n\t\t\t\tbreak\n\t\t\n\tif start_index != -1 and start_index < len(text) and text_count == brace_count == group_count == 0:\n\t\treturn text[start_index:]\n\telse:\n\t\treturn ''", "title": "" }, { "docid": "7882f9436793ea14552680ef4428aefd", "score": "0.45717865", "text": "def normalize(text):\n text = araby.strip_harakat(text)\n text = araby.strip_tashkeel(text)\n text = araby.strip_small(text)\n text = araby.strip_tatweel(text)\n text = araby.strip_shadda(text)\n text = araby.strip_diacritics(text)\n text = araby.normalize_ligature(text)\n text = araby.normalize_teh(text)\n text = araby.normalize_alef(text)\n return text", "title": "" }, { "docid": "5bdf4f621d806277efbfb532c267668c", "score": "0.45079258", "text": "def _parse_abbreviation(self, abbr):\n if 'cfb/schools' not in str(abbr):\n return None\n abbr = re.sub(r'.*/schools/', '', str(abbr))\n abbr = re.sub(r'/.*', '', abbr)\n return abbr", "title": "" }, { "docid": "8e64303ee44eda48ea2edff8d211f6a4", "score": "0.4425654", "text": "def standardize_adata_gene_names(adata_in, sp_2_letter):\n assert sp_2_letter in [\"hs\",\"mm\"]\n geneset, _ = retrieve_TADMap_by_species(sp_2_letter)\n return _match_adata_to_geneset(adata_in, geneset)", "title": "" }, { "docid": "a20ee56c9ff80472faabe485053d094b", "score": "0.44184145", "text": "def clean_text(text):\n trantab = maketrans('%*@!$^+,:\\\"><&[()]\\';_=', ' ' * 21)\n # Hardcoded dictionary of some common abbreviations.\n hardcoded_dict = \\\n {'INV': 'Invasive',\n 'Inv': 'Invasive', 'AvDO2': 'avdo2', #'arteriojugular venous difference of oxygen'\n 'FiO2': 'fio2', 'CaO2': 'cao2', #'fraction inspired oxygen','arterial oxygen concentration'\n 's/p': 'status post', 'S/P': 'status post',\n 'D/C': 'discontinue', 'RLQ': 'Right Lower Quadrant of Abdomen',\n 'RUQ': 'Right Upper Quadrant of Abdomen',\n 'LLQ': 'Left Lower Quadrant of Abdomen',\n 'LUQ': 'Left Upper Quadrant of Abdomen',\n 'RUE': 'Right Upper Extremity',\n 'RLE': 'Right Lower Extremity', 'PO': 'by mouth',\n 'po': 'by mouth', 'airleak': 'air leak',\n 'mlns': 'ml normal saline solution', 'NS': 'normal saline solution',\n 'NSS': 'normal saline solution', 'nss': 'normal saline solution',\n 'ns': 'normal saline solution', '02sat': 'oxygen saturation',\n 'sat02': 'oxygen saturation', 'satO2': 'oxygen saturation',\n 'O2sat': 'oxygen saturation', 'LDH': 'Lactate dehydrogenase enzyme',\n 'art': 'arterial', 'ART': 'arterial', 'bp': 'blood pressure',\n 'BP': 'Blood Pressure',\n 'angio': 'angiography', 'NeoSure': 'neosure', 'NeoCate': 'neocate',\n 'PRBCS': 'Packed Red Blood Cells', 'PRBCs': 'Packed Red Blood Cells',\n 'PRBC\\'S': 'Packed Red Blood Cells',\n 'PRBC': 'Packed Red Blood Cells', 'prbc': 'Packed Red Blood Cells',\n 'BNP': 'Brain natiuretic Peptide',\n 'High PIP': 'High Peak Inspiratory Pressure',\n 'PIP': 'Peak Inspiratory Pressure',\n 'high pip': 'high peak inspiratory pressure',\n 'trach': 'tracheal', 'Trach': 'tracheal',\n 'baedp': 'Balloon Aortic End-Diastolic Pressure',\n 'baedp': 'balloon aortic end-diastolic pressure',\n 'kvo': 'keep vein open',\n 'KVO': 'Keep Vein Open', 'PTT': 'Partial Thromboplastin Time',\n 'ed': 'emergency department',\n 'LR': 'lactated Ringer\\'s solution',\n 'lr': 'lactated Ringer\\'s solution',\n '(L)': '(Left)', '(R)': '(Right)', 'Neg': 'negative',\n 'Insp': 'Inspiratory',\n 'Insp.': 'Inspiratory', 'LCW': 'Left Cardiac Work',\n 'LCWI': 'Left Cardiac Work Index',\n 'LVSW': 'Left Ventricular Stroke Work'}\n indx = []\n text_all = []\n acronyms = []\n for row in range(len(text)):\n if text.iloc[row] != '' and isinstance(text.iloc[row], str):\n indx.append(text.index.values[row])\n tmp = text.iloc[row].split(' ')\n for word in tmp:\n if sum(1 for c in word if c.isupper()) == len(word):\n acronyms.append(word)\n tmp = \" \".join(tmp)\n tmp = re.sub(r'\\b[a-z]', lambda m: m.group().upper(), tmp)\n text_all.append(tmp)\n text_split = pd.Series(text_all, index=indx)\n cleaned_text = text_split.str.split(\" \").apply(translate_words, args=[hardcoded_dict,])\n cleaned_text = cleaned_text.str.join(\" \").str.translate(trantab, '#').str.lower()\n return cleaned_text", "title": "" }, { "docid": "a2e8ceb335fff945717e2ab7a1aee2e1", "score": "0.43994313", "text": "def _calc_abbrev(transitions):\n for transition in transitions:\n format = transition.format\n delta_seconds = transition.deltaSeconds\n\n index = format.find('/')\n if index >= 0:\n if delta_seconds == 0:\n abbrev = format[:index]\n else:\n abbrev = format[index + 1:]\n elif format.find('%s') >= 0:\n letter = transition.letter\n if letter == '-': letter = ''\n abbrev = format % letter\n else:\n abbrev = format\n\n transition.abbrev = abbrev", "title": "" }, { "docid": "1bd6a77b4e43307d0eb503d88734453a", "score": "0.43914795", "text": "def _replace_abbreviations(self, text, split_multipart_abbrevs=True):\n replacements = {}\n text = self._replace_regex(text, self.single_letter_ellipsis, \"abbreviation\")\n text = self._replace_regex(text, self.and_cetera, \"abbreviation\")\n text = self._replace_regex(text, self.str_abbreviations, \"abbreviation\")\n text = self._replace_regex(text, self.nr_abbreviations, \"abbreviation\")\n text = self._replace_regex(text, self.single_token_abbreviation, \"abbreviation\")\n text = self._replace_regex(text, self.single_letter_abbreviation, \"abbreviation\")\n text = self.spaces.sub(\" \", text)\n text = self._replace_regex(text, self.ps, \"abbreviation\")\n\n def repl(match):\n instance = match.group(0)\n if instance not in replacements:\n # check if it is a multipart abbreviation\n if split_multipart_abbrevs and self.multipart_abbreviation.fullmatch(instance):\n parts = [p.strip() + \".\" for p in instance.strip(\".\").split(\".\")]\n replacements[instance] = self._multipart_replace(instance, parts, \"abbreviation\")\n else:\n replacement = replacements.setdefault(instance, self._get_unique_string())\n self.mapping[replacement] = Token(instance, \"abbreviation\")\n return \" %s \" % replacements[instance]\n text = self.abbreviation.sub(repl, text)\n # text = self._replace_set(text, self.simple_abbreviation_candidates, self.simple_abbreviations, \"abbreviation\", ignore_case=True)\n return text", "title": "" }, { "docid": "8558de506d06d52de9d7d428c52507bb", "score": "0.43890435", "text": "def combine_bolds(graph_text):\n if graph_text.startswith(\"(\"):\n graph_text = (\n graph_text.replace(\" \", \" \")\n .replace(\"(\", \"**(\", 1)\n .replace(\")\", \")**\", 1)\n .replace(\"** **\", \" \", 1)\n )\n return graph_text", "title": "" }, { "docid": "316e8d51f387390c203885819aba8fc0", "score": "0.43853647", "text": "def setUseAbbreviations(self, value):\n return self._set(useAbbreviations=value)", "title": "" }, { "docid": "a5ec5109322b3878b7f20b125c0ff180", "score": "0.43607435", "text": "def get_abbrev(self):\n return self.__abbrev", "title": "" }, { "docid": "84fcb33276077ad967e35272770d2137", "score": "0.43299747", "text": "def find_abbrevs(kinds):\n kind_to_abbrev = {kind: kind[0].lower() for kind in kinds}\n duplicates = get_duplicates(kind_to_abbrev.values())\n while duplicates:\n for kind, abbrev in list(kind_to_abbrev.items()):\n if abbrev in duplicates and len(abbrev) < len(kind):\n abbrev += kind[len(abbrev)].lower()\n kind_to_abbrev[kind] = abbrev\n duplicates = get_duplicates(kind_to_abbrev.values())\n return kind_to_abbrev", "title": "" }, { "docid": "c507b09a10d74174ed34ff6d7c54701e", "score": "0.429816", "text": "def handle_disambig(text):\n # find titles of the form Foo (bar)\n text = text.replace('_', ' ').replace('/', ' ')\n while ' ' in text:\n text = text.replace(' ', ' ')\n match = re.match(r'([^(]+) \\((.+)\\)', text)\n if not match:\n return text, None\n else:\n return match.group(1), 'n/' + match.group(2).strip(' _')", "title": "" }, { "docid": "bc8afb02c06fc27c2b9284fad40f3570", "score": "0.4142849", "text": "def __get__(self, c: \"NamingConventions\", owner) -> Optional[str]:\n\n p = c.metadata.platforms\n if not p:\n return None\n\n if not self.allow_unknown_abbreviations:\n unknowns = p.difference(self.known_abbreviations)\n if unknowns:\n raise ValueError(\n f\"We don't know the DEA abbreviation for platforms {unknowns!r}. \"\n f\"We'd love to add more! Raise an issue on Github: \"\n f\"https://github.com/GeoscienceAustralia/eo-datasets/issues/new' \"\n )\n\n abbreviations = sorted(\n self.known_abbreviations.get(s, s.replace(\"-\", \"\")) for s in p\n )\n\n if self.show_specific_platform and len(abbreviations) == 1:\n return abbreviations[0]\n\n # If all abbreviations are in a group, name it using that group.\n # (eg. \"ls\" instead of \"ls5-ls7-ls8\")\n for group_name, pattern in self.grouped_abbreviations.items():\n if all(pattern.match(a) for a in abbreviations):\n return group_name\n\n # Otherwise, there's a mix of platforms.\n\n # Is there a common constellation?\n constellation = c.metadata.properties.get(\"constellation\")\n if constellation:\n return constellation\n\n # Don't bother to include platform in name for un-groupable mixes of them.\n if not self.allow_unknown_abbreviations:\n raise NotImplementedError(\n f\"Satellite constellation abbreviation is not known for platforms {p}. \"\n f\"(for DEA derivative naming conventions.)\"\n f\" Is this a mistake? We'd love to add more! Raise an issue on Github: \"\n f\"https://github.com/GeoscienceAustralia/eo-datasets/issues/new' \"\n )\n return None", "title": "" }, { "docid": "44547b32c006301d879e7ad27bb7e368", "score": "0.413793", "text": "def bold_first_italics(graph_text):\n if graph_text.count(\"*\") > 1:\n return graph_text.replace(\"*\", \"**\", 2)\n else:\n return graph_text", "title": "" }, { "docid": "b2fc6339456fae323a4561a2624d7d3b", "score": "0.41293293", "text": "def normalize(self, text):\n if text is None:\n return None\n text = self.fix_text(text)\n\n # Normalize to canonical unicode (using NFKC by default)\n # if self.form is not None:\n # text = unicodedata.normalize(self.form, text)\n\n if self._remove_control_chars:\n text = self.remove_control_chars(text)\n\n # if self.fix_line_breaks:\n # text = text.replace('\\u2028', '\\n').replace('\\u2029', '\\n').replace('\\r\\n', '\\n').replace('\\r', '\\n')\n\n if self._fix_hyphens:\n text = self.fix_hyphens(text)\n\n if self._uncurl_quotes:\n text = self.uncurl_quotes(text)\n\n if self._fix_ellipsis:\n text = self.fix_ellipsis(text)\n\n if self._fix_slashes:\n text = self.fix_slashes(text)\n\n if self._fix_tildes:\n text = self.fix_tildes(text)\n\n if self._replace_tabs:\n text = self.replace_tabs(text, replacement_spaces=self._replacement_spaces)\n\n if self._fix_whitespaces:\n text = self.fix_whitespaces(text)\n\n if self._collapse_whitespaces:\n text = self.collapse_whitespaces(text)\n\n if self._strip:\n text = text.strip()\n\n if self._single_quotes_only:\n text = self.single_quotes_only(text)\n\n if self._regular_parentheses_only:\n text = self.regular_parentheses_only(text)\n\n if self._hanja2hangle:\n text = self.hanja2hangle(text)\n\n if self._fix_emoticons:\n text = self.emoticon_normalize(text, num_repeats=self._num_repeats)\n\n return text", "title": "" }, { "docid": "fdb5aa00eb2b2f4a0dac037fec227052", "score": "0.412086", "text": "def replaced_abbreviations(sentence: str,\n abbreviation_pattern_to_full_text_map: Dict[str, str]) -> str:\n assert sentence is not None\n assert abbreviation_pattern_to_full_text_map is not None\n\n return _replaced('abbreviation', sentence, abbreviation_pattern_to_full_text_map)", "title": "" }, { "docid": "5465138209d2f94b9a882c3524a1158b", "score": "0.41156703", "text": "def build_suffix_tree(text):\r\n result = []\r\n # Implement this function yourself\r\n tree = suffix_tree(text)\r\n for s in tree:\r\n result.append(s.label)\r\n \r\n return result", "title": "" }, { "docid": "71aa5d2c926b2de30434914e32385e84", "score": "0.41088402", "text": "def _parse_team_abbreviation(self, stats):\n team_tag = stats(PLAYER_SCHEME['team_abbreviation'])\n team = re.sub(r'.*/cbb/schools/', '', str(team_tag('a')))\n team = re.sub(r'/.*', '', team)\n return team", "title": "" }, { "docid": "e9971deaf5f7f34f3de4283dccbfa346", "score": "0.41034454", "text": "def test_abbreviate_returns_short_word(expected):\n # Setup - none necessary\n\n # Exercise\n result = wtlw.abbreviate(expected)\n\n # Verify\n assert result == expected\n\n # Cleanup - none", "title": "" }, { "docid": "ae81cf8715bf29b8b4592d329ed7f386", "score": "0.40943274", "text": "def _get_abbrev(self, field_id, raw_value):\n values = self.abbreviations.get(field_id, None)\n\n # convert raw_value to a string since that's always what we want for line name input,\n # and for easy comparison against abbreviation keys which are converted to strings in\n # __init__() to avoid problems on the JS side (can't create string values as numbers)\n raw_value = str(raw_value)\n if not values:\n return raw_value\n\n abbreviation = values.get(raw_value)\n if abbreviation:\n # tolerate values that may have been provided as ints, for example\n return str(abbreviation)\n return raw_value", "title": "" }, { "docid": "3793bb7a8e297fa442fd7a475acd0187", "score": "0.4085441", "text": "def title_except(\n s,\n exceptions=('the', 'of', 'for', 'and'),\n acronyms=('OIM', 'IT', 'PVS', 'SFM', 'OT', 'NP', 'FMDP', 'VRM', 'TEC', 'GIS', 'ODG'),\n):\n words = s.split()\n\n if words[0].startswith('A/'):\n words_title = ['A/' + words[0].replace('A/', '').capitalize()]\n elif words[0] in acronyms:\n words_title = [words[0]]\n else:\n words_title = [words[0].capitalize()]\n\n for word in words[1:]:\n word = word.lower()\n\n if word.startswith('('):\n pre = '('\n word = word.replace('(', '')\n else:\n pre = ''\n\n if word.endswith(')'):\n post = ')'\n word = word.replace(')', '')\n else:\n post = ''\n\n if word.upper() in acronyms:\n word = word.upper()\n elif word in exceptions:\n pass\n else:\n word = word.capitalize()\n\n words_title.append(pre + word + post)\n\n return ' '.join(words_title)", "title": "" }, { "docid": "cbec6f5e15acb9ab855a27f85ec70d60", "score": "0.4071853", "text": "def func_normalize_text(text):\n # type: (str)->str\n if six.PY2:\n if isinstance(text, str):\n text = text.decode('utf-8')\n return jaconv.h2z(text=re.sub(r'\\s', '', string=text), kana=True, ascii=True, digit=True)\n else:\n return jaconv.h2z(text=re.sub(r'\\s', '', string=text), kana=True, ascii=True, digit=True)", "title": "" }, { "docid": "dc997957fb59eb4600e1f0272a3b5e37", "score": "0.40477467", "text": "def normalize(self, text):\n return self.lemma_split(text)[0]", "title": "" }, { "docid": "c20dc1efee0a3775010034b1674f0a5c", "score": "0.40417555", "text": "def map_abbrev_info(abbrev):\n \n if PLOT_ABBREV_MAP.has_key(abbrev):\n plot_type, subtitle, offsets_scale, orientation = PLOT_ABBREV_MAP[abbrev]\n else:\n plot_type, subtitle, offsets_scale, orientation = abbrev, 'NoMatch', [-4.85, 1.25, 0.76], 'landscape'\n return plot_type, subtitle, offsets_scale, orientation", "title": "" }, { "docid": "242080837589c567c4e84c4b3c1ca5e6", "score": "0.4035622", "text": "def normalize_answer_allen(s):\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "7eef699801725fedeea546196cf68918", "score": "0.40242097", "text": "def generateAbbreviations(self, word: str):\n length = len(word)\n up = 1 << length\n res = []\n for num in range(up):\n tmp_str = ''\n counts = 0\n tmp_num = num\n for i in range(length-1, -1, -1):\n if tmp_num & 1:\n counts += 1\n else:\n tmp_str += str(counts)[::-1] if counts > 0 else ''\n tmp_str += word[i]\n counts = 0\n tmp_num = tmp_num >> 1\n tmp_str += str(counts)[::-1] if counts > 0 else ''\n res.append(tmp_str[::-1])\n return res", "title": "" }, { "docid": "a95b3776038009a7db839dbaacdc307d", "score": "0.40203413", "text": "def get_tcga_abbr(self, long_name):\n with self.cadb:\n cur = self.cadb.cursor()\n\n name = cur.execute(\"SELECT Abbr FROM TCGA WHERE longName = ?\", (long_name,)).fetchone()\n\n if not name:\n return None\n\n return name[0]", "title": "" }, { "docid": "d4cef77e6ce1040cdfd0e8da6665af60", "score": "0.4015586", "text": "def deanogham(string, alphabet=None):\n oghamdict = {\" \": \" \", \"-\": \" \", \"[\": \"[\", \"]\": \"]\", \"/\": \"/\", \".\": \".\", \"̣\": \"̣\", \"?\": \"?\", \"(\": \"(\", \")\": \")\",\n \"vac.\": \"vac.\", \"{\": \"{\", \"}\": \"}\", \"<\": \"<\", \">\": \">\", \"〚\": \"〚\", \"〛\": \"〛\"}\n if alphabet is None:\n alphabet = \"orthodox\"\n if alphabet != \"scholastic\":\n alphabet = \"orthodox\"\n if alphabet == \"orthodox\":\n oghamdict.update(aicmi)\n oghamdict.update(extendedaicmi)\n oghamdict.update(fada)\n elif alphabet == \"scholastic\":\n oghamdict.update(aicmi)\n oghamdict.update(forfeda)\n oghamdict.update(fada)\n \"\"\"Ogham does not distinguish between upper and lower case letters, all letters are upper case\"\"\"\n string = string.upper()\n \"\"\"all strings of ogham begin and end with designated markers\"\"\"\n string = (\"᚛\" + string[:] + \"᚜\")\n \"\"\"Identifies a list of characters which will not be removed during the conversion to ogham\"\"\"\n allowed = [\"᚛\", \" \", \"...\", \"…\", \"-\", \"\\n\", \"(\", \"᚜\", \"[\", \"]\", \".\"]\n for let in oghamdict:\n allowed.append(oghamdict.get(let))\n for item in removables:\n allowed.append(item)\n \"\"\"Finds and changes letter combinations which represent one ogham letter from latin alphabet to ogham\"\"\"\n for combo in lettercombos:\n if combo in oghamdict:\n if combo in string:\n comcount = string.count(combo)\n for i in range(comcount):\n compos = string.find(combo)\n string = string[:compos] + oghamdict.get(combo) + string[compos + len(combo):]\n \"\"\"Changes characters which exist in ogham from latin alphabet to ogham\"\"\"\n for character in string:\n charpos = string.find(character)\n if character in oghamdict:\n string = (string[:charpos]+oghamdict.get(character)+string[charpos+1:])\n \"\"\"Removes select non-letter characters\"\"\"\n if character in removables:\n string = (string[:charpos]+string[charpos+1:])\n \"\"\"Brackets off substrings of characters which cannot be translated to ogham\"\"\"\n newstring1 = string\n done1lets = []\n for character in newstring1:\n if character not in allowed:\n done1lets.append(character)\n charitir1 = done1lets.count(character)\n charno1 = charitir1 - 1\n charpos1 = findnth(newstring1, character, charno1)\n if newstring1[charpos1 - 1] in allowed:\n newstring1 = newstring1[:charpos1] + \"(\" + newstring1[charpos1:]\n newstring2 = newstring1\n done2lets = []\n for character in newstring2:\n if character not in allowed:\n done2lets.append(character)\n charitir2 = done2lets.count(character)\n charno2 = charitir2 - 1\n charpos2 = findnth(newstring2, character, charno2)\n if newstring2[charpos2 + 1] in allowed:\n newstring2 = newstring2[:charpos2 + 1] + \")\" + newstring2[charpos2 + 1:]\n if \"…\" in newstring2:\n splitstring = newstring2.split(\"…\")\n newstring2 = \"[...]\".join(splitstring)\n if \"[ ... ]\" in newstring2:\n splitstring = newstring2.split(\"[ ... ]\")\n newstring2 = \"[...]\".join(splitstring)\n if \"...\" in newstring2:\n splitstring = newstring2.split(\"...\")\n newstring2 = \"[...]\".join(splitstring)\n if \"\\n\" in newstring2:\n splitstring = newstring2.split(\"\\n\")\n newstring2 = \"᚜\\n᚛\".join(splitstring)\n if \"[[[\" in newstring2:\n splitstring = newstring2.split(\"[[[\")\n newstring2 = \"[\".join(splitstring)\n if \"]]]\" in newstring2:\n splitstring = newstring2.split(\"]]]\")\n newstring2 = \"]\".join(splitstring)\n if \"[[\" in newstring2:\n splitstring = newstring2.split(\"[[\")\n newstring2 = \"[\".join(splitstring)\n if \"]]\" in newstring2:\n splitstring = newstring2.split(\"]]\")\n newstring2 = \"]\".join(splitstring)\n if \".\" in newstring2:\n stoptest = \"\".join(newstring2.split(\"[...]\"))\n if \".\" in stoptest:\n splitstring = newstring2.split(\".\")\n newstring2 = \"(.)\".join(splitstring)\n if \"ᚃᚐᚉ(.)\" in newstring2:\n splitstring = newstring2.split(\"ᚃᚐᚉ(.)\")\n newstring2 = \"vac.\".join(splitstring)\n finalstring = newstring2\n return finalstring", "title": "" }, { "docid": "f36a62b3e63835e4386eeba30e430e28", "score": "0.4003113", "text": "def parse(text):\n\n return [tokens for tokens in G(text).summarize() if tokens is not None]", "title": "" }, { "docid": "d8ec716a9b4f4f26e85cc2f50e999880", "score": "0.4002351", "text": "def winning_abbr(self):\n if self.winner == HOME:\n if 'cfb/schools' not in str(self._home_name):\n return self._home_name.text()\n return utils._parse_abbreviation(self._home_name)\n if 'cfb/schools' not in str(self._away_name):\n return self._away_name.text()\n return utils._parse_abbreviation(self._away_name)", "title": "" }, { "docid": "051f1d7c15d5118c469dccf9d0f90fe7", "score": "0.3995614", "text": "def abbrev(branch):\n if isinstance(branch, Branch):\n return p4gf_util.abbrev(branch.branch_id)\n return p4gf_util.abbrev(branch)", "title": "" }, { "docid": "e1cb514557072800872316dfbd65d587", "score": "0.3995111", "text": "def ruleset_text(path):\n out = []\n for xpath, rules in standard_ruleset.items():\n try:\n # Use slice 1: to ensure we match /budget/ but not /total-budget/\n reduced_path = path.split(xpath[1:] + '/')[1]\n except IndexError:\n continue\n out += rules_text(rules, reduced_path)\n return out", "title": "" }, { "docid": "8b37dbcd334a7a90f18bdcf6bbd622e8", "score": "0.39807242", "text": "def Akiyama():\n\n return FreeGroupAutomorphism(\"a->b,b->ac,c->a\")", "title": "" }, { "docid": "56cd8a5ef8a9d7a54d2c29953fa0990a", "score": "0.39721972", "text": "def normalizeTitles (\r\n\r\n self,\r\n text = None\r\n ) :\r\n\r\n if utilities.isEmpty( text ) : return \"\"\r\n\r\n index = 0\r\n\r\n while True :\r\n\r\n index = text.find( self.headingCode + \" \", index )\r\n\r\n if index < 0 : break\r\n\r\n text = text.replace( self.headingCode + \" \", self.headingCode )\r\n\r\n index = 0\r\n\r\n while True :\r\n\r\n index = text.find( \" \" + self.headingCode, index )\r\n\r\n if index < 0 : break\r\n\r\n text = text.replace( \" \" + self.headingCode, self.headingCode )\r\n\r\n while True :\r\n\r\n index = text.find( \"=\" + self.heading5Code, index )\r\n\r\n if index < 0 : break\r\n\r\n text = text.replace( \"=\" + self.heading5Code, self.heading5Code )\r\n\r\n \r\n return text", "title": "" }, { "docid": "03bcbd1d2fb54cd0f378c582bd4093fe", "score": "0.39705688", "text": "def generateAbbreviations(self, word: str):\n length = len(word)\n up = 1 << length\n res = []\n for num in range(up):\n tmp_str = ''\n counts = 0\n tmp_num = num\n for i in range(length):\n if tmp_num & 1:\n counts += 1\n else:\n tmp_str += str(counts) if counts > 0 else ''\n tmp_str += word[i]\n counts = 0\n tmp_num = tmp_num >> 1\n tmp_str += str(counts) if counts > 0 else ''\n res.append(tmp_str)\n return res", "title": "" }, { "docid": "7c1a39a1c870bcea19c31cfcbc1cb14c", "score": "0.39614338", "text": "def get_abbreviation(syntax, abbr):\n\treturn zen_resources.get_abbreviation(syntax, abbr)", "title": "" }, { "docid": "493ccb1af3bc6e29d2c07f38b110e675", "score": "0.39538717", "text": "def free_text_to_search_labels(self, text: str) -> List[str]:\n candidates = text.split(self._distinction_word)\n candidates = [c.strip() for c in candidates]\n\n joined_subsets = list()\n for c in candidates:\n text_list = [t for t in c.split(' ') if t not in self._distinction_word]\n subsets = list(chain.from_iterable(combinations(text_list, r) for r in range(len(text_list) + 1)))\n joined_subsets += [' '.join(s) for s in subsets if len(s) > 0]\n\n joined_subsets.append(text)\n return list(set(joined_subsets)) # prevent repeated elements", "title": "" }, { "docid": "f000bc7591490b9ce489707740175350", "score": "0.3952965", "text": "def fix_hyphens(self, text):\n # TODO: Better normalization of em/en dashes to '--' if surrounded by spaces or start/end?\n if text is None:\n return None\n for hyphen in HYPHENS | MINUSES:\n text = text.replace(hyphen, \"-\")\n text = text.replace(\"\\u00ad\", \"\")\n return text", "title": "" }, { "docid": "6ef834ec35c68462544c2a27b82a6bef", "score": "0.39505887", "text": "def _cleanup_label(label):\n conjunctions = ['and', 'but', 'yet', 'for', 'nor', 'so']\n little_preps = [\n 'at', 'by', 'in', 'of', 'on', 'to', 'up', 'as', 'it', 'or']\n articles = ['a', 'an', 'the']\n\n # remove the abbreviation\n lbl = label.split(r';')[0]\n\n fixedwords = []\n i = 0\n for wrd in lbl.split():\n i += 1\n # convert the roman numerals to numbers,\n # but assume that the first word is not\n # a roman numeral (this permits things like \"X inactivation\"\n if i > 1 and re.match(romanNumeralPattern, wrd):\n num = fromRoman(wrd)\n # make the assumption that the number of syndromes are <100\n # this allows me to retain \"SYNDROME C\"\n # and not convert it to \"SYNDROME 100\"\n if 0 < num < 100:\n # get the non-roman suffix, if present.\n # for example, IIIB or IVA\n suffix = wrd.replace(toRoman(num), '', 1)\n fixed = ''.join((str(num), suffix))\n wrd = fixed\n\n # capitalize first letter\n wrd = wrd.title()\n\n # replace interior conjunctions, prepositions,\n # and articles with lowercase\n if wrd.lower() in (conjunctions+little_preps+articles) and i != 1:\n wrd = wrd.lower()\n\n fixedwords.append(wrd)\n\n lbl = ' '.join(fixedwords)\n # print (label, '-->', lbl)\n return lbl", "title": "" }, { "docid": "f67336597d6bd84d0c038d4dfe53fd1e", "score": "0.39502653", "text": "def graph_text_summarization(self, max_sents=config[\"app\"][\"text_summarization\"][\"max_num_sentences\"], min_sents=config[\"app\"][\"text_summarization\"][\"min_num_sentences\"], measure=config[\"app\"][\"text_summarization\"][\"measure\"], order_by_occurence=config[\"app\"][\"text_summarization\"][\"order_by_occurence\"]):\n item = config[self.show][\"text_summ_name\"][self.doc_type]\n results = aws.get_dynamo_data(item=item, table=config[\"aws\"][\"Dynamo_Table\"], resource=self.dynamo)\n # print(results)\n if results == \"No Response\" or self.clean_sents:\n partition_key = config[\"aws\"][\"partition_key\"]\n results = {partition_key:item}\n self.get_sentence_embeddings()\n for title, sent_embed in self.sentence_embeddings.items():\n # do graph stuff\n dists = 1-pairwise_distances(list(sent_embed.values()), metric=\"cosine\")\n dists = pd.DataFrame(dists, index = list(sent_embed.keys()), columns=list(sent_embed.keys()))\n G = nx.Graph()\n G.add_nodes_from(list(dists.columns))\n for i in list(dists.columns):\n for j in list(dists.index):\n if i==j or math.isnan(dists[i][j]) or dists[i][j] < self.sent_threshold:\n pass\n else:\n G.add_edge(i, j, weight=dists[i][j])\n dists[j][i] = None\n if measure == \"pagerank\":\n gr_dict = nx.pagerank(G)\n elif measure == \"betweenness centrality\":\n gr_dict = nx.betweenness_centrality(G, weight=\"weight\")\n elif measure == \"load centrality\":\n gr_dict = nx.load_centrality(G, weight=\"weight\")\n\n for key, value in gr_dict.items():\n gr_dict[key] = value*SemanticAlgos.sentence_length_multiplier(key)\n\n temp_dict = sort_dict(gr_dict)\n\n doc_embed=self.doc_embeddings[title]\n sorted_gr_dict = temp_dict[0:max_sents]\n summ_embedding_dict={}\n\n # finding optimal length for each summarization\n print(\"Finding Optimal n...\")\n for i in range(min_sents, max_sents+1):\n test_dict = dict(sorted_gr_dict[0:i])\n # print(list(self.sentence_embeddings.items()))\n test_dict = {key:self.sentence_embeddings[title][key] for key in list(test_dict.keys())}\n summ_embedding = np.mean(np.array(list(test_dict.values())), axis = 0)\n summ_embedding_dict.update({str(i):summ_embedding})\n \n embed_list = [doc_embed]+list(summ_embedding_dict.values())\n dists = 1-pairwise_distances(embed_list, metric=\"cosine\")[0]\n sim_dict = dict(zip(range(min_sents, max_sents+1), dists[1:len(dists)]))\n loss = [SemanticAlgos.get_loss(key, val) for key, val in sim_dict.items()]\n n = list(sim_dict.keys())[loss.index(min(loss))]\n if isinstance(n, list):\n n = n[0]\n print(\"Optimal Number of Sentences: \" + str(n))\n sorted_gr_dict = dict(temp_dict[0:n])\n if order_by_occurence:\n summary_dict = {x:gr_dict[x] for x in list(gr_dict.keys()) if x in list(sorted_gr_dict.keys())}\n else:\n summary_dict = sorted_gr_dict\n results.update({title:list(summary_dict.keys())})\n dynamo_table = self.dynamo.Table(config[\"aws\"][\"Dynamo_Table\"])\n dynamo_table.put_item(Item=results)\n return(results)", "title": "" }, { "docid": "b030b2f0b4ce1441f4ca6bc8aad13271", "score": "0.3941901", "text": "def get_frequency(self, text):\n if not isinstance(text, unicode): text = text.decode('utf-8')\n words = self.tokenize(text).split(' ')\n for word in words:\n if self.canonicalize(word) in self.frequencies:\n return self.canonicalize(word)\n return None", "title": "" }, { "docid": "b3b697e6a1f29e7d03828db80b399f32", "score": "0.39381593", "text": "def get_hyponyms(word, max_depth=None):\n synset = make_synset(word)\n if synset:\n hyponyms = synsets_words(all_hyponyms(synset, max_depth))\n else:\n hyponyms = None\n return hyponyms", "title": "" }, { "docid": "396bfe742da53241e6f9657429669f7f", "score": "0.39328012", "text": "def __call__(self, text):\n return self.normalize(text)", "title": "" }, { "docid": "1cecf7fe048c0008c29af21fcbf47685", "score": "0.39302558", "text": "def antonyms(word):\n \n la = []\n \n syn = _load(word)\n dWord = syn.get(word)\n if dWord is not None:\n ctxKeys = dWord.keys()\n relev = 3\n while relev > 0:\n for key in ctxKeys:\n dCtx = dWord.get(key)\n if isinstance(dCtx,dict):\n dSyn = dCtx.get(\"antonyms\")\n if dSyn.has_key(str(relev)):\n la.extend(dSyn.get(str(relev)))\n else:\n la.extend(antonyms(dCtx.encode(\"utf-8\")))\n relev = 0\n relev -= 1\n \n return _removeDuplicates(la)", "title": "" }, { "docid": "f4410f126ad6159c7e86193d57ff7f7f", "score": "0.392774", "text": "def match_desc_all(self, string):\n ngram_descs = []\n node = self.desc_actrie\n for ch in string:\n node = node.move(ch)\n if node is None:\n node = self.desc_actrie\n else:\n ngram_descs.extend([match_string for match_string in \n node.generate_all_suffix_nodes_values()])\n return ngram_descs", "title": "" }, { "docid": "b47f5a6f97fabe75a2fe7395c446e302", "score": "0.39236796", "text": "def getBiTrigrams(self, text):\n bigram_measures = cl.BigramAssocMeasures()\n trigram_measures = cl.TrigramAssocMeasures()\n tk = RegexpTokenizer(r'\\w+')\n\n st = LancasterStemmer()\n\n text = tk.tokenize(text.lower())\n job_vec = [st.stem(word) for word in text if word not in self.stopWords]\n\n bigrams = []\n trigrams = []\n collocations = cl.BigramCollocationFinder.from_words(job_vec)\n tri_collocations = cl.TrigramCollocationFinder.from_words(job_vec)\n top10 = collocations.score_ngrams(bigram_measures.raw_freq)\n top10 = sorted(bigram for bigram,score in top10)\n tri_top10 = tri_collocations.score_ngrams(bigram_measures.raw_freq)\n tri_top10 = sorted(trigram for trigram,score in tri_top10)\n for coll in top10:\n bigrams.append(coll[0] + ' ' + coll[1])\n for tri_coll in tri_top10:\n trigrams.append(tri_coll[0] + ' ' + tri_coll[1] + ' ' + tri_coll[2])\n\n return bigrams, trigrams", "title": "" }, { "docid": "c8370e3255c5f4749ca3cb9130b1cbe4", "score": "0.39226186", "text": "def get_structural_variants(g):\n\n # get the genes.\n hugo = g['TRUE_HUGO_SYMBOL']\n k = list(hugo.keys())[0]\n genes = hugo[k]\n\n if not isinstance(genes, list):\n genes = [genes]\n\n # TODO add synonyms\n\n # encode as full search criteria.\n sv_clauses = []\n for gene in genes:\n abc = \"(.*\\W{0}\\W.*)|(^{0}\\W.*)|(.*\\W{0}$)\".format(gene)\n sv_clauses.append(re.compile(abc, re.IGNORECASE))\n\n # add it to filter and remove gene criteria.\n del g['TRUE_HUGO_SYMBOL']\n g['STRUCTURAL_VARIANT_COMMENT'] = {\"$in\": sv_clauses}\n\n return g", "title": "" }, { "docid": "614bf08d0b656d2c6b40941335e1396e", "score": "0.39219946", "text": "def build_suffix_tree(text):\n result = []\n stree = suffixTree(text)\n\n for node in nodes:\n result.append(node.label)\n\n return result", "title": "" }, { "docid": "62877535d02e176e6f6fd8db96ecfff6", "score": "0.39149085", "text": "def normalize(text, lang='', remove_articles=True):", "title": "" }, { "docid": "c6ce067c36b24aeaee7042edd83b78b3", "score": "0.3912082", "text": "def normalize_text(self, text: str) -> str:\n assert text is not None, \"row is None\"\n\n # text = row.text\n # logger.debug(row.describe())\n logger.info(f'normalizing text: {text}')\n\n if text is not None and len(text) > 0:\n\n text = remove_amazon_tags(text)\n text = remove_http_links(text)\n\n if self.to_lowercase:\n text = tu.make_lowercase(text)\n if self.remove_newlines:\n text = tu.remove_newlines(text)\n if self.remove_html_tags:\n text = tu.remove_html_tags(text)\n if self.remove_accented_chars:\n text = tu.remove_accented_chars(text)\n if self.expand_contractions:\n text = tu.expand_contractions(text)\n if self.remove_special_chars:\n text = tu.remove_special_chars(text)\n # we have to do this after expanding contractions so it doesn't remove words like don't or shouldn't\n if self.remove_alphanumeric_words:\n text = tu.remove_alphanumeric_words(text)\n if self.remove_stop_words:\n text = tu.remove_stop_words(text)\n if self.stem_text:\n text = tu.stem_text(text)\n if self.lemmatize_text:\n text = tu.lemmatize_text(text)\n\n text = expand_star_ratings(text)\n\n logger.info(f'finished normalizing text: {text}')\n return text", "title": "" }, { "docid": "88729d551a88a936da3f03940dfda93b", "score": "0.3909781", "text": "def home_away_abbrev(self):\n return 'H' if self.is_home else 'A'", "title": "" }, { "docid": "703b1e0dcc6298cc97246a039ad028d3", "score": "0.3907746", "text": "def extract_abbreviation(view, loc):\n pt = -1\n region = None\n\n if isinstance(loc, (list, tuple)):\n loc = to_region(loc)\n\n if isinstance(loc, int):\n # Character location is passed, extract from line\n pt = loc\n region = view.line(pt)\n elif isinstance(loc, sublime.Region):\n # Extract from given range\n pt = loc.end()\n region = loc\n else:\n return None\n\n text = view.substr(region)\n begin = region.begin()\n opt = get_options(view, pt)\n\n if opt['type'] == 'stylesheet':\n # No look-ahead for stylesheets: they do not support brackets syntax\n # and enabled look-ahead produces false matches\n opt['lookAhead'] = False\n\n if opt['syntax'] == 'jsx':\n opt['prefix'] = view.settings().get('emmet_jsx_prefix', None)\n\n abbr_data = extract(text, pt - begin, opt)\n\n if abbr_data:\n abbr_data['start'] += begin\n abbr_data['end'] += begin\n abbr_data['location'] += begin\n return abbr_data, opt", "title": "" }, { "docid": "ff0f53c90ab4a514a2579bbb439095a7", "score": "0.39073408", "text": "def team_abbreviation(self):\n return self._team_abbreviation", "title": "" }, { "docid": "1c9236a6f2d54c4b41c115ad0c661b65", "score": "0.39065203", "text": "def _run_strip_accents(self, text):\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)", "title": "" }, { "docid": "f3fe86f315a33b74f1f4939d9b25bf6f", "score": "0.38968197", "text": "def read_MAG_taxonomy(args):\n\n #gtdb_file = os.path.join(args.b,'nc_gtdbtk_MQNC/gtdbtk.bac120.summary.tsv')\n gtdb_file = '07_binannotation/bacteria/nc_gtdbtk_MQNC/mag_taxonomy.txt'\n lineage = ['superkingdom','phylum','class','order','family','genus','species']\n #dlineage = {lin:None for lin in lineage}\n\n if not os.path.exists:\n print('No GTDBTK taxonomy available? Generate this file', gtdb_file)\n sys.exit(1)\n parsed_motherbins = set()\n MAG_tax = dict()\n with open(gtdb_file,'r') as infile:\n header = infile.readline()\n for line in infile:\n line = line.strip().split('\\t')\n genomeid = line[0]\n motherbin = genomeid.split('_')[1]\n if motherbin in parsed_motherbins:\n continue\n parsed_motherbins.add(motherbin)\n MAG_tax[motherbin] = {lin:None for lin in lineage}\n tax = line[2]\n \n tax = tax.split(';')\n tax = [ x.split('__')[1] for x in tax ]\n for i,lin in enumerate(lineage):\n entry = tax[i]\n if entry == '':\n entry = 'NA'\n if entry == 'Bacteroidota':\n entry = 'Bacteroidetes'\n \n MAG_tax[motherbin][lin] = entry\n return MAG_tax", "title": "" }, { "docid": "5bd47f4ae79a5a8948e4725b51b5b4cb", "score": "0.38956478", "text": "def _run_strip_accents(self, text):\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)", "title": "" }, { "docid": "991ba8b3a8321e058451cccfe9d1ccc6", "score": "0.38897467", "text": "def _categorize(text, first, force=False):\n # If it's a regular word in second category, just add an 's'\n if force and not first and \"[\" not in text:\n return \"{}s\".format(text)\n\n # Handle words with optional suffices like `close[s]` and `sword[s]`\n while True:\n match = _RE_OPTIONAL_SUFFIX.search(text)\n if match is None:\n break\n\n before = text[0:match.start()]\n after = text[match.end():]\n if first:\n # Omit the optional part\n text = '%s%s' % (before, after)\n else:\n # Include the optional part\n text = '%s%s%s' % (before, match.group(1), after)\n\n # Handle irregular words like `[are|is]` and `sta[ff|aves]`\n while True:\n match = _RE_IRREGULAR.search(text)\n if match is None:\n break\n\n before = text[0:match.start()]\n after = text[match.end():]\n if first:\n # Use the first form\n text = '%s%s%s' % (before, match.group(1), after)\n else:\n # Use the second form\n text = '%s%s%s' % (before, match.group(2), after)\n\n return text", "title": "" }, { "docid": "ae6e3934ce42bc6aff6bfc9cf24be2ec", "score": "0.38846642", "text": "def process_fasta(text):\n\n lines = text.split('\\n')\n lines = filter(lambda x: not x.startswith('>'), lines)\n lines = [line.strip() for line in lines]\n text = ''.join(lines)\n text = text.upper()\n return re.sub(r'[^ACGT]', 'T', text)", "title": "" }, { "docid": "63e0efb65c721ab01eb80880b8155dde", "score": "0.3875231", "text": "def choose_title(text_blocks, config):\n ## Have to encode output when piping script. See: http://goo.gl/h0ql0\n for tb in text_blocks:\n if config.multiline:\n return ' '.join([t['text'] for t in tb['blockText']]).encode('utf-8')\n else:\n return tb['blockText'][0]['text'].encode('utf-8')\n return None", "title": "" }, { "docid": "d04541a4bd6ffc0b2f2275f5adc82109", "score": "0.3867616", "text": "def normalize_answer(s):\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "d04541a4bd6ffc0b2f2275f5adc82109", "score": "0.3867616", "text": "def normalize_answer(s):\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "d04541a4bd6ffc0b2f2275f5adc82109", "score": "0.3867616", "text": "def normalize_answer(s):\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "d04541a4bd6ffc0b2f2275f5adc82109", "score": "0.3867616", "text": "def normalize_answer(s):\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "a7a22a15ac518dcd8eaea3c9502c3b4a", "score": "0.3867443", "text": "def parse(grammar, text, keep_posleafs=False):\n chart = build_chart(grammar, text)\n ret_trees = chart[1, len(text), \"S\"]\n if not keep_posleafs:\n for tree in ret_trees:\n replace_leafs_by_words(tree, text)\n return ret_trees", "title": "" }, { "docid": "8a58070d1985fa536e0e8407835993b9", "score": "0.3866564", "text": "def is_abbreviation(self, word=\"\"):\n # Consider adding trim and remove last period to make the is_abbreviation function more universal\n return True if word.lower() in self.abbreviations else False", "title": "" }, { "docid": "e386ce1464c8b9ee21d6e0d3ae523041", "score": "0.3862285", "text": "def normalize_answer(s):\n\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "e386ce1464c8b9ee21d6e0d3ae523041", "score": "0.3862285", "text": "def normalize_answer(s):\n\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "0e1d4986683a43d8b142f8f9249fd8cc", "score": "0.38542402", "text": "def normalize(self, text):\n return self.stem_tokens(nltk.word_tokenize(text.lower().translate(remove_punctuation_map)))", "title": "" }, { "docid": "57b742da64a073217ad2653fb218fb0f", "score": "0.38534823", "text": "def run_strip_accents(self, text):\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)", "title": "" }, { "docid": "6342ed9b4ba9c8d447dc06481885a797", "score": "0.3851088", "text": "def normalize_answer(s):\n\n def remove_articles(text):\n return re.sub(r\"\\b(a|an|the)\\b\", \" \", text)\n\n def white_space_fix(text):\n return \" \".join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return \"\".join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "c1f867680295579a4b724bb77d6d1b05", "score": "0.3847006", "text": "def make_abbrevs(dbpath):\n with closing(sqlite3.connect(dbpath)) as dbconn:\n db = dbconn.cursor()\n if db.execute(\"select count(*) from sqlite_master where type='table' and name='abbrev'\").fetchone()[0] > 0:\n if ask_yes_no(sys.stderr, \"Table 'abbrev' already exists; do you wish to drop it?\"):\n db.execute('drop table abbrev')\n else:\n print >>sys.stderr, 'Aborting'\n return\n db.execute(\"\"\"create table abbrev (article_id integer,\n abbr text not null,\n exp text not null,\n foreign key (article_id) references article(id),\n unique(article_id, abbr, exp))\"\"\")\n db.execute('create index abbrev_article_id on abbrev(article_id)')\n db.execute('create index abbrev_abbr on abbrev(abbr)')\n db.execute('create index abbrev_exp on abbrev(exp)')\n\n\n # A generator function that yields abbreviations:\n def abbrevs():\n db2 = dbconn.cursor() # the other cursor is being used for inserting, must use a different one!\n\n # Go through all links and collect abbreviation--expansion pairs.\n\n links = db2.execute('select distinct L.text, A.title, A.id from link L join article a on L.tgt_id = A.id')\n\n n_done = 0\n for (a, b, idx) in links:\n if check_abbr(a, b):\n yield get_abbr(a, b, idx)\n n_done += 1\n\n if n_done % 1000 == 0:\n print >>sys.stderr, n_done, 'abbrevs collected'\n sys.stdout.flush()\n print >>sys.stderr, 'Done processing links'\n\n # Go through redirects.\n\n redirs = db2.execute('select distinct A.title, R.title, A.id from article A join article R on A.redirect_id = R.id')\n\n for (a, b, idx) in redirs:\n if check_abbr(a, b):\n yield get_abbr(a, b, idx)\n n_done += 1\n\n if n_done % 1000 == 0:\n print >>sys.stderr, n_done, 'abbrevs collected'\n print >>sys.stderr, 'Done processing redirects'\n\n db.executemany('insert or ignore into abbrev values(?,?,?)', abbrevs())\n dbconn.commit()", "title": "" }, { "docid": "cf027a9ccba026c3114cefa08075a1d7", "score": "0.3845125", "text": "def normalize_answer(self, s):\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "cf027a9ccba026c3114cefa08075a1d7", "score": "0.3845125", "text": "def normalize_answer(self, s):\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "363e907b4dbc62fed3dc6a155b66c28c", "score": "0.38446075", "text": "def normalize_answer(s):\r\n\r\n def remove_articles(text):\r\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\r\n\r\n def white_space_fix(text):\r\n return ' '.join(text.split())\r\n\r\n def remove_punc(text):\r\n exclude = set(string.punctuation)\r\n return ''.join(ch for ch in text if ch not in exclude)\r\n\r\n def lower(text):\r\n return text.lower()\r\n\r\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "c466e4642be6020411d35b3987f2e164", "score": "0.38414463", "text": "def __get__(self, c: \"NamingConventions\", owner) -> Optional[str]:\n if not c.metadata.producer:\n return None\n\n try:\n return self.known_abbreviations[c.metadata.producer]\n except KeyError:\n raise NotImplementedError(\n f\"We don't know how to abbreviate organisation domain name {c.metadata.producer!r}. \"\n f\"We'd love to add more orgs! Raise an issue on Github: \"\n f\"https://github.com/GeoscienceAustralia/eo-datasets/issues/new' \"\n )", "title": "" }, { "docid": "b75023a02bef57c74c7677d70551afab", "score": "0.3837186", "text": "def _run_strip_accents(text):\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)", "title": "" }, { "docid": "1f4402714565dfd2b05e57ea33b264ab", "score": "0.38370943", "text": "def create_tags(text,a):\n a = a.lower()\n tags = []\n inside = False\n for w in text.split():\n w_stripped = w.strip()\n if w_stripped == 'BEG____END':\n continue\n if w_stripped.startswith(\"BEG__\") and w_stripped.endswith(\"__END\"):\n concept = w_stripped.split(\"_\")[2]\n if a in concept.lower():\n tags.append('B-ans')\n if inside: # something went wrong, leave as is\n print(\"Inconsistent markup.\")\n else:\n tags.append('O')\n elif w_stripped.startswith(\"BEG__\"):\n assert not inside\n inside = True\n concept = [w_stripped.split(\"_\", 2)[-1]]\n\n elif w_stripped.endswith(\"__END\"):\n if not inside:\n if a in w_stripped[:-5].lower():\n tags.append('I') #might be B\n else:\n tags.append('O')\n else:\n concept.append(w_stripped.rsplit(\"_\", 2)[0])\n if a in ' '.join(concept).lower():\n tags.append('B-ans')\n for w in concept:\n tags.append('I-ans')\n tags.pop(-1)\n else:\n for w in concept:\n tags.append('O')\n inside = False\n else:\n if inside:\n concept.append(w_stripped)\n else:\n tags.append('O')\n\n return ' '.join(tags)", "title": "" }, { "docid": "a9d4e3996d0614c5f72d66e061f441ed", "score": "0.38356382", "text": "def losing_abbr(self):\n if self.winner == HOME:\n if 'cfb/schools' not in str(self._away_name):\n return self._away_name.text()\n return utils._parse_abbreviation(self._away_name)\n if 'cfb/schools' not in str(self._home_name):\n return self._home_name.text()\n return utils._parse_abbreviation(self._home_name)", "title": "" }, { "docid": "b7e4d95cf6d2fe54591a0fab6005e583", "score": "0.3832994", "text": "def convertBezToOutline(ufoFontData, glyphName, bezString):\n # convert bez data to a UFO glif XML representation\n #\n # Convert all bez ops to simplest UFO equivalent\n # Add all hints to vertical and horizontal hint lists as encountered;\n # insert a HintMask class whenever a new set of hints is encountered\n # after all operators have been processed, convert HintMask items into\n # hintmask ops and hintmask bytes add all hints as prefix review operator\n # list to optimize T2 operators.\n # if useStem3 == 1, then any counter hints must be processed as stem3\n # hints, else the opposite.\n # Counter hints are used only in LanguageGroup 1 glyphs, aka ideographs\n\n bezString = re.sub(r\"%.+?\\n\", \"\", bezString) # supress comments\n bezList = re.findall(r\"(\\S+)\", bezString)\n if not bezList:\n return \"\", None, None\n flexList = []\n # Create an initial hint mask. We use this if\n # there is no explicit initial hint sub.\n hintMask = HintMask(0)\n hintMaskList = [hintMask]\n vStem3Args = []\n hStem3Args = []\n argList = []\n opList = []\n newHintMaskName = None\n inPreFlex = False\n hintInfoDict = None\n opIndex = 0\n curX = 0\n curY = 0\n newOutline = XMLElement(\"outline\")\n outlineItem = None\n seenHints = False\n\n for token in bezList:\n try:\n val = float(token)\n argList.append(val)\n continue\n except ValueError:\n pass\n if token == \"newcolors\":\n pass\n elif token in [\"beginsubr\", \"endsubr\"]:\n pass\n elif token in [\"snc\"]:\n hintMask = HintMask(opIndex)\n # If the new hints precedes any marking operator,\n # then we want throw away the initial hint mask we\n # made, and use the new one as the first hint mask.\n if opIndex == 0:\n hintMaskList = [hintMask]\n else:\n hintMaskList.append(hintMask)\n newHintMaskName = hintMask.pointName\n elif token in [\"enc\"]:\n pass\n elif token == \"div\":\n value = argList[-2] / float(argList[-1])\n argList[-2:] = [value]\n elif token == \"rb\":\n if newHintMaskName is None:\n newHintMaskName = hintMask.pointName\n hintMask.hList.append(argList)\n argList = []\n seenHints = True\n elif token == \"ry\":\n if newHintMaskName is None:\n newHintMaskName = hintMask.pointName\n hintMask.vList.append(argList)\n argList = []\n seenHints = True\n elif token == \"rm\": # vstem3's are vhints\n if newHintMaskName is None:\n newHintMaskName = hintMask.pointName\n seenHints = True\n vStem3Args.append(argList)\n argList = []\n if len(vStem3Args) == 3:\n hintMask.vstem3List.append(vStem3Args)\n vStem3Args = []\n\n elif token == \"rv\": # hstem3's are hhints\n seenHints = True\n hStem3Args.append(argList)\n argList = []\n if len(hStem3Args) == 3:\n hintMask.hstem3List.append(hStem3Args)\n hStem3Args = []\n\n elif token == \"preflx1\":\n # the preflx1/preflx2 sequence provides the same i as the flex\n # sequence; the difference is that the preflx1/preflx2 sequence\n # provides the argument values needed for building a Type1 string\n # while the flex sequence is simply the 6 rcurveto points. Both\n # sequences are always provided.\n argList = []\n # need to skip all move-tos until we see the \"flex\" operator.\n inPreFlex = True\n elif token == \"preflx2a\":\n argList = []\n elif token == \"preflx2\":\n argList = []\n elif token == \"flxa\": # flex with absolute coords.\n inPreFlex = False\n flexPointName = kBaseFlexName + str(opIndex).zfill(4)\n flexList.append(flexPointName)\n curveCnt = 2\n i = 0\n # The first 12 args are the 6 args for each of\n # the two curves that make up the flex feature.\n while i < curveCnt:\n curX = argList[0]\n curY = argList[1]\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY})\n outlineItem.append(newPoint)\n curX = argList[2]\n curY = argList[3]\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY})\n outlineItem.append(newPoint)\n curX = argList[4]\n curY = argList[5]\n showX, showY = convertCoords(curX, curY)\n opName = 'curve'\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY,\n \"type\": opName})\n outlineItem.append(newPoint)\n opList.append([opName, curX, curY])\n opIndex += 1\n if i == 0:\n argList = argList[6:12]\n i += 1\n # attach the point name to the first point of the first curve.\n outlineItem[-6].set(kPointName, flexPointName)\n if newHintMaskName is not None:\n # We have a hint mask that we want to attach to the first\n # point of the flex op. However, there is already a flex\n # name in that attribute. What we do is set the flex point\n # name into the hint mask.\n hintMask.pointName = flexPointName\n newHintMaskName = None\n argList = []\n elif token == \"flx\":\n inPreFlex = False\n flexPointName = kBaseFlexName + str(opIndex).zfill(4)\n flexList.append(flexPointName)\n curveCnt = 2\n i = 0\n # The first 12 args are the 6 args for each of\n # the two curves that make up the flex feature.\n while i < curveCnt:\n curX += argList[0]\n curY += argList[1]\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY})\n outlineItem.append(newPoint)\n curX += argList[2]\n curY += argList[3]\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY})\n outlineItem.append(newPoint)\n curX += argList[4]\n curY += argList[5]\n showX, showY = convertCoords(curX, curY)\n opName = 'curve'\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY,\n \"type\": opName})\n outlineItem.append(newPoint)\n opList.append([opName, curX, curY])\n opIndex += 1\n if i == 0:\n argList = argList[6:12]\n i += 1\n # attach the point name to the first point of the first curve.\n outlineItem[-6].set(kPointName, flexPointName)\n if newHintMaskName is not None:\n # We have a hint mask that we want to attach to the first\n # point of the flex op. However, there is already a flex\n # name in that attribute. What we do is set the flex point\n # name into the hint mask.\n hintMask.pointName = flexPointName\n newHintMaskName = None\n argList = []\n elif token == \"sc\":\n pass\n elif token == \"cp\":\n pass\n elif token == \"ed\":\n pass\n else:\n if inPreFlex and (token[-2:] == \"mt\"):\n continue\n\n if token[-2:] in [\"mt\", \"dt\", \"ct\", \"cv\"]:\n opIndex += 1\n else:\n raise BezParseError(\n \"Unhandled operation: '%s' '%s'.\" % (argList, token))\n dx = dy = 0\n opName = bezToUFOPoint[token]\n if token[-2:] in [\"mt\", \"dt\"]:\n if token in [\"mt\", \"dt\"]:\n curX = argList[0]\n curY = argList[1]\n else:\n if token in [\"rmt\", \"rdt\"]:\n dx = argList[0]\n dy = argList[1]\n elif token in [\"hmt\", \"hdt\"]:\n dx = argList[0]\n elif token in [\"vmt\", \"vdt\"]:\n dy = argList[0]\n curX += dx\n curY += dy\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY,\n \"type\": \"%s\" % opName})\n\n if opName == \"move\":\n if outlineItem is not None:\n if len(outlineItem) == 1:\n # Just in case we see two moves in a row,\n # delete the previous outlineItem if it has\n # only the move-to\n log.info(\"Deleting moveto: %s adding %s\",\n xmlToString(newOutline[-1]),\n xmlToString(outlineItem))\n del newOutline[-1]\n else:\n # Fix the start/implied end path\n # of the previous path.\n fixStartPoint(outlineItem, opList)\n opList = []\n outlineItem = XMLElement('contour')\n newOutline.append(outlineItem)\n\n if newHintMaskName is not None:\n newPoint.set(kPointName, newHintMaskName)\n newHintMaskName = None\n outlineItem.append(newPoint)\n opList.append([opName, curX, curY])\n else:\n if token in [\"ct\", \"cv\"]:\n curX = argList[0]\n curY = argList[1]\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY})\n outlineItem.append(newPoint)\n curX = argList[2]\n curY = argList[3]\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY})\n outlineItem.append(newPoint)\n curX = argList[4]\n curY = argList[5]\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY,\n \"type\": \"%s\" % opName})\n outlineItem.append(newPoint)\n else:\n if token in [\"rct\", \"rcv\"]:\n curX += argList[0]\n curY += argList[1]\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY})\n outlineItem.append(newPoint)\n curX += argList[2]\n curY += argList[3]\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY})\n outlineItem.append(newPoint)\n curX += argList[4]\n curY += argList[5]\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY,\n \"type\": \"%s\" % opName})\n outlineItem.append(newPoint)\n elif token == \"vhct\":\n curY += argList[0]\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY})\n outlineItem.append(newPoint)\n curX += argList[1]\n curY += argList[2]\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY})\n outlineItem.append(newPoint)\n curX += argList[3]\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY,\n \"type\": \"%s\" % opName})\n outlineItem.append(newPoint)\n elif token == \"hvct\":\n curX += argList[0]\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY})\n outlineItem.append(newPoint)\n curX += argList[1]\n curY += argList[2]\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY})\n outlineItem.append(newPoint)\n curY += argList[3]\n showX, showY = convertCoords(curX, curY)\n newPoint = XMLElement(\n \"point\", {\"x\": \"%s\" % showX, \"y\": \"%s\" % showY,\n \"type\": \"%s\" % opName})\n outlineItem.append(newPoint)\n if newHintMaskName is not None:\n # attach the pointName to the first point of the curve.\n outlineItem[-3].set(kPointName, newHintMaskName)\n newHintMaskName = None\n opList.append([opName, curX, curY])\n argList = []\n\n if outlineItem is not None:\n if len(outlineItem) == 1:\n # Just in case we see two moves in a row, delete\n # the previous outlineItem if it has zero length.\n del newOutline[-1]\n else:\n fixStartPoint(outlineItem, opList)\n\n # add hints, if any\n # Must be done at the end of op processing to make sure we have seen\n # all the hints in the bez string.\n # Note that the hintmasks are identified in the opList by the point name.\n # We will follow the T1 spec: a glyph may have stem3 counter hints or\n # regular hints, but not both.\n\n if (seenHints) or (len(flexList) > 0):\n hintInfoDict = XMLElement(\"dict\")\n\n idItem = XMLElement(\"key\")\n idItem.text = \"id\"\n hintInfoDict.append(idItem)\n\n idString = XMLElement(\"string\")\n idString.text = \"id\"\n hintInfoDict.append(idString)\n\n hintSetListItem = XMLElement(\"key\")\n hintSetListItem.text = kHintSetListName\n hintInfoDict.append(hintSetListItem)\n\n hintSetListArray = XMLElement(\"array\")\n hintInfoDict.append(hintSetListArray)\n # Convert the rest of the hint masks\n # to a hintmask op and hintmask bytes.\n for hintMask in hintMaskList:\n hintMask.addHintSet(hintSetListArray)\n\n if len(flexList) > 0:\n hintSetListItem = XMLElement(\"key\")\n hintSetListItem.text = kFlexIndexListName\n hintInfoDict.append(hintSetListItem)\n\n flexArray = XMLElement(\"array\")\n hintInfoDict.append(flexArray)\n addFlexHint(flexList, flexArray)\n\n return newOutline, hintInfoDict", "title": "" }, { "docid": "9070cbbabad88583cfc4e070ee679808", "score": "0.38319367", "text": "def given_names_or_middle_abbrev(tok):\r\n gnames = given_names(tok)\r\n if gnames is not None:\r\n return gnames\r\n if tok.kind != TOK.WORD:\r\n return None\r\n wrd = tok.txt\r\n if wrd.startswith('['):\r\n # Abbreviation: Cut off the brackets & trailing period\r\n wrd = wrd[1:-2]\r\n if len(wrd) > 2 or not wrd[0].isupper():\r\n if wrd not in { \"van\", \"de\", \"of\" }:\r\n # Accept \"Thomas de Broglie\", \"Ruud van Nistelroy\", \"Mary of Canterbury\"\r\n return None\r\n # One or two letters, capitalized: accept as middle name abbrev,\r\n # all genders and cases possible\r\n return [PersonName(name = wrd, gender = None, case = None)]", "title": "" }, { "docid": "f5d2587f91c5cb90845d2adbdef9db53", "score": "0.3831338", "text": "def semantic_head(self, text):\n return str(list(self.nlp(text).sents)[0].root)", "title": "" }, { "docid": "41447acb31f8332874b6c94d118e357f", "score": "0.38295537", "text": "def normalize_answer(s):\n def remove_articles(text):\n regex = re.compile(r'\\b(a|an|the)\\b', re.UNICODE)\n return re.sub(regex, ' ', text)\n def white_space_fix(text):\n return ' '.join(text.split())\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n def lower(text):\n return text.lower()\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "ba6e05aa4cbbb517b1a56b2d549689e9", "score": "0.3819811", "text": "def _str_title(self):\n tmp = \"a\"\n if isinstance(self.graph, UndirectedGraph):\n tmp = \"an\"\n return \"GMRF model on {} {}\".format(tmp, self.graph)", "title": "" }, { "docid": "77e39fce3500d51f478fcc6408fd277a", "score": "0.3816846", "text": "def normalize_answer(s):\n\n def remove_articles(text):\n regex = re.compile(r\"\\b(a|an|the)\\b\", re.UNICODE)\n return re.sub(regex, \" \", text)\n\n def white_space_fix(text):\n return \" \".join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return \"\".join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "77e39fce3500d51f478fcc6408fd277a", "score": "0.3816846", "text": "def normalize_answer(s):\n\n def remove_articles(text):\n regex = re.compile(r\"\\b(a|an|the)\\b\", re.UNICODE)\n return re.sub(regex, \" \", text)\n\n def white_space_fix(text):\n return \" \".join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return \"\".join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "77e39fce3500d51f478fcc6408fd277a", "score": "0.3816846", "text": "def normalize_answer(s):\n\n def remove_articles(text):\n regex = re.compile(r\"\\b(a|an|the)\\b\", re.UNICODE)\n return re.sub(regex, \" \", text)\n\n def white_space_fix(text):\n return \" \".join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return \"\".join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "2ec76bdd51bc3b8dd1dbafcb1d150ed6", "score": "0.38141522", "text": "def punkt(self, text):\n\t\tpunkt_param = PunktParameters()\n\t\tabbreviation = ['f', 'fr', 'k'] # depending on where sentence tokenizer is likely to split on abbrevations\n\t\tpunkt_param.abbrev_types = set(abbreviation)\n\t\ttokenizer = PunktSentenceTokenizer(punkt_param)\n\t\ttokenizer.train(text)\n\t\tsegmentedText = tokenizer.tokenize(text)\n\t\treturn segmentedText", "title": "" }, { "docid": "2fb16d18de7f6e0aa0488e6489f41c53", "score": "0.38106847", "text": "def constructTerm(term, label, id):\n termlist = []\n if term != '': \n listitem = {}\n listitem['label'] = label\n listitem['id'] = id\n patterns = []\n #for labels that are tree species or alternate names for tree species\n if label in ['TREE SPECIES', 'ALT TREE SPECIES']:\n for s in term.split(' '):\n pattern = {}\n #we will lowercase all words in the text and in patterns so we don't have to worry abut case matching\n pattern['LOWER'] = s.lower()\n patterns.append(pattern)\n listitem['pattern'] = patterns\n termlist.append(listitem.copy())\n #create a pattern with genus abbreviated, ex. \"p. strobus\"\n altitem = {}\n altitem['label'] = label\n altitem['id'] = id\n altpatterns = []\n for i, s in enumerate(term.split(' ')):\n altpattern = {}\n if i == 0:\n altpattern['LOWER'] = s[0].lower() + '.'\n altpatterns.append(altpattern)\n else:\n altpattern['LOWER'] = s.lower()\n altpatterns.append(altpattern)\n altitem['pattern'] = altpatterns\n termlist.append(altitem.copy())\n #for the labels that are common names, add patterns for matching pluralized form in addition to singular \n elif label == 'TREE COMMON NAME':\n for i, s in enumerate(term.split(' ')):\n if '-' in s:\n hyphenpatterns = tokenHyphen(s)\n patterns.extend(hyphenpatterns)\n else:\n pattern = {}\n pattern['LOWER'] = s.lower()\n patterns.append(pattern)\n listitem['pattern'] = patterns\n termlist.append(listitem.copy())\n patterns = []\n for i, s in enumerate(term.split()):\n pattern = {}\n #pluralize only the last token in the word\n if i != len(term.split())-1:\n if '-' in s:\n hyphenpatterns = tokenHyphen(s)\n patterns.extend(hyphenpatterns)\n else:\n pattern['LOWER'] = s.lower()\n patterns.append(pattern)\n else:\n pattern['LOWER'] = pluralize(s.lower())\n patterns.append(pattern)\n listitem['pattern'] = patterns\n termlist.append(listitem)\n else:\n listitem = None\n return termlist", "title": "" }, { "docid": "2a73f7fac50f276b40729910e45d9fe9", "score": "0.38031164", "text": "def compute_max_term_similarity(df):\n human_similarity, wup_term_sim, shp_term_sim, lch_term_sim = [], [], [], []\n # depth_max = 20 # Wordnet 3.0 depth\n depth_max = max_dataset_depth(df) # dataset taxonomy depth\n for index, row in df.iterrows():\n s_1 = wn.synsets(row['Word 1'])\n s_2 = wn.synsets(row['Word 2'])\n human_similarity.append(float(row['Human']))\n max_wup_similarity = -1\n max_shp_similarity = -1\n max_lch_similarity = -1\n for synset_1 in s_1:\n for synset_2 in s_2:\n lcs_tuple = lcs_path(synset_1, synset_2, 0)\n if lcs_tuple is not None:\n least_common_subsumer = lcs_tuple[0]\n synsets_shortest_path = lcs_tuple[1]\n wu_sim = wu_palmer_similarity(synset_1, synset_2, least_common_subsumer)\n shp_sim = shortest_path_similarity(synset_1, synset_2, synsets_shortest_path, depth_max)\n lch_sim = lc_similarity(synset_1, synset_2, synsets_shortest_path, depth_max)\n if wu_sim is not None:\n if wu_sim > max_wup_similarity:\n max_wup_similarity = wu_sim\n if shp_sim is not None:\n if shp_sim > max_shp_similarity:\n max_shp_similarity = shp_sim\n if lch_sim is not None:\n if lch_sim > max_lch_similarity:\n max_lch_similarity = lch_sim\n wup_term_sim.append(max_wup_similarity)\n shp_term_sim.append(max_shp_similarity)\n lch_term_sim.append(max_lch_similarity)\n return human_similarity, wup_term_sim, shp_term_sim, lch_term_sim", "title": "" }, { "docid": "59b59d72e55b349d638f3e7080069d80", "score": "0.38001633", "text": "def ambn_legacy(longer_set, shorter_set, nwords):\n if shorter_set == set() or nwords <= 0:\n shorter_set.clear()\n return None\n sd = form_subdict(shorter_set, nwords)\n for longer_tuple in longer_set:\n for sub_longer_string in it.combinations(longer_tuple, nwords):\n if sub_longer_string in sd:\n for shorter_tuple in sd[sub_longer_string]:\n shorter_set.discard(shorter_tuple)\n return None", "title": "" } ]
e7a0978044c28389b858fd2862c54aaa
Wrapper for main function
[ { "docid": "e696deb90cf7e8b40da734a53d2bb0d6", "score": "0.0", "text": "def main():\n # Constants\n y_label = \"Survived\"\n factor_cols = [\"Embarked\", \"Sex\"]\n\n # Load and clean training data\n print(\"Loading, cleaning training data...\")\n training_data = extract_clean_data(\"train.csv\")\n print(\"Done\")\n print()\n\n # These are the features we will feed back into the estimator to\n # yield predictions\n features = training_data.drop(y_label, axis=1).columns\n # train the rf model\n print(\"Training random forest classifier with test data...\")\n estimator = train_model_rf(training_data, y_label)\n print(\"Done\")\n print()\n\n # test the irf model on the testing data\n print(\"Loading test data...\")\n test_df = load_test_data(\"test.csv\", factor_cols)\n print(\"Done\")\n print()\n\n ####### RANDOM FOREST ######\n print(\"--Random forest classifier--\")\n print()\n\n # Create predictions from the rf model using the testing data\n print(\"Making predictions with RF model...\")\n predictions = predict_model(estimator, test_df, features)\n\n # Convert prediction nparray to pandas dataframe\n predict_df = pd.DataFrame(\n data=predictions,\n columns=[\"Survived\"],\n )\n print(\"Done\")\n print()\n\n # Write predictions to a CSV file based on the ID\n print(\"Writing random forest predictions to csv...\")\n write_pred(predict_df, test_df, \"model_output_rf.csv\",\n \"PassengerId\", \"Survived\")\n print(\"Done\")\n print()\n\n ###### Multilayer perceptron algorithm with backpropagation ######\n print(\"--MLP backprop--\")\n print()\n\n # train the MLP net\n print(\"Training MLP...\")\n mlp_model = train_model_mlp(training_data, y_label)\n print(\"Done\")\n print()\n\n # Create predictions with the MLP net\n print(\"Making predictions with the MLP...\")\n mlp_predictions = predict_model(mlp_model, test_df, features)\n print(\"Done\")\n print()\n\n # Convert predictions from multilayer perceptron to a pandas array\n mlp_predictions = pd.DataFrame(\n data=mlp_predictions,\n columns=[\"Survived\"],\n )\n\n # Write MLP predictions to csv file\n print(\"Writing MLP predictions to csv...\")\n write_pred(mlp_predictions, test_df, \"model_output_mlp.csv\",\n \"PassengerId\", \"Survived\")\n print(\"Done\")\n print()", "title": "" } ]
[ { "docid": "6a72f0523c7f4b19a3ff495c1baaa5c5", "score": "0.8710058", "text": "def main() -> None:", "title": "" }, { "docid": "6a72f0523c7f4b19a3ff495c1baaa5c5", "score": "0.8710058", "text": "def main() -> None:", "title": "" }, { "docid": "82c54a749d7508e492693a3059faed30", "score": "0.86150384", "text": "def main():\n return", "title": "" }, { "docid": "c8e238f9ea21384e3be28d52a1ee7939", "score": "0.84371597", "text": "def main():\n\tpass", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8426721", "text": "def main():", "title": "" }, { "docid": "2dc5d012a9a966271705c303b5947936", "score": "0.84225357", "text": "def run():\r\n main(sys.argv[1:])", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.83485705", "text": "def main():\n pass", "title": "" }, { "docid": "ff6b5f435e231cc978bcc5e9148a452d", "score": "0.83341634", "text": "def run():\n main(sys.argv[1:])", "title": "" }, { "docid": "ff6b5f435e231cc978bcc5e9148a452d", "score": "0.83341634", "text": "def run():\n main(sys.argv[1:])", "title": "" }, { "docid": "ff6b5f435e231cc978bcc5e9148a452d", "score": "0.83341634", "text": "def run():\n main(sys.argv[1:])", "title": "" }, { "docid": "ff6b5f435e231cc978bcc5e9148a452d", "score": "0.83341634", "text": "def run():\n main(sys.argv[1:])", "title": "" }, { "docid": "ff6b5f435e231cc978bcc5e9148a452d", "score": "0.83341634", "text": "def run():\n main(sys.argv[1:])", "title": "" }, { "docid": "ff6b5f435e231cc978bcc5e9148a452d", "score": "0.83341634", "text": "def run():\n main(sys.argv[1:])", "title": "" }, { "docid": "ff6b5f435e231cc978bcc5e9148a452d", "score": "0.83341634", "text": "def run():\n main(sys.argv[1:])", "title": "" }, { "docid": "ff6b5f435e231cc978bcc5e9148a452d", "score": "0.83341634", "text": "def run():\n main(sys.argv[1:])", "title": "" }, { "docid": "ff6b5f435e231cc978bcc5e9148a452d", "score": "0.83341634", "text": "def run():\n main(sys.argv[1:])", "title": "" }, { "docid": "ff6b5f435e231cc978bcc5e9148a452d", "score": "0.83341634", "text": "def run():\n main(sys.argv[1:])", "title": "" }, { "docid": "ff6b5f435e231cc978bcc5e9148a452d", "score": "0.83341634", "text": "def run():\n main(sys.argv[1:])", "title": "" }, { "docid": "a1d30f32de4c80c264091990269a232c", "score": "0.8323435", "text": "def main():\r\n _main(sys.argv[1:])", "title": "" }, { "docid": "04688cd98d9fcf835947139440a6440f", "score": "0.82870895", "text": "def main():\n\n pass\n\n return None", "title": "" }, { "docid": "c71f730bfc6af73fef41ca1abca8b850", "score": "0.82808316", "text": "def main(argv):\n pass", "title": "" }, { "docid": "ffb681e947cba9d9419a3633c2c476f0", "score": "0.82726336", "text": "def main():\n\n pass", "title": "" }, { "docid": "e9138dd83ef3265fc161c47bbf7720c4", "score": "0.8255691", "text": "def main() -> None:\n _main(sys.argv[1:])", "title": "" }, { "docid": "737edbcdda02609a89cb40f455a292fb", "score": "0.82446617", "text": "def main(args):\n pass", "title": "" }, { "docid": "50333a46fe2be3b14f8763c7450461cc", "score": "0.8144043", "text": "def main(args=None):\n pass", "title": "" }, { "docid": "0bdf51636417aa862634dcdbfb677537", "score": "0.80959386", "text": "def run():\n main()", "title": "" }, { "docid": "b99c2e6bdbb95793e972d25658af44fc", "score": "0.8027213", "text": "def main(args=None):\r\n return 0", "title": "" }, { "docid": "0c3c72134f4a85071342c80bc84b4c99", "score": "0.7980044", "text": "def main():\n args = parse_args(sys.argv[1:])\n return args.func(args)", "title": "" }, { "docid": "7ae943a9b1cbd5ad001bb773230f7539", "score": "0.7974459", "text": "def run():\n\n run_main(main)", "title": "" }, { "docid": "535c6e7d32028af3cc3d681a0804815d", "score": "0.7945748", "text": "def main(args=None):\n return 0", "title": "" }, { "docid": "01280920665150620ec0a7454b922752", "score": "0.79232717", "text": "def main(**kwargs):\n pass", "title": "" }, { "docid": "acad0bb197f1a4f7d0f6d5f45d69e8a3", "score": "0.79001534", "text": "def main():\n status, output = inner_main(argv[1:])\n print output\n return status", "title": "" }, { "docid": "a3a854789ab90f67053c08e0f263321b", "score": "0.7866427", "text": "def main_script():\n return main(sys.argv)", "title": "" }, { "docid": "ed70d5da1c8c3a511edfa7715065cd32", "score": "0.78030854", "text": "def main():\n args = parse_arguments(sys.argv[1:])\n return run_from_args(args)", "title": "" }, { "docid": "f9ac54a4f8091eb5279716e9e81f2035", "score": "0.77934515", "text": "def run():\n args = parse_args(sys.argv[1:])\n main(args)", "title": "" }, { "docid": "c64b60990ae613c240fec77a2ee01f8a", "score": "0.7774413", "text": "def main() -> \"NoReturn\":\n sys.exit(_main(sys.argv))", "title": "" }, { "docid": "7eb28b0be1a44b1f50cda97cc19964b9", "score": "0.76199293", "text": "def Main(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "07e2ff51ba6bb61e422038e757c2a2dd", "score": "0.7603682", "text": "def main() -> None:\n\n cli()", "title": "" }, { "docid": "54ae48e7fdd54b66f24f539b7b2a1373", "score": "0.75739485", "text": "def main() -> None:\n cli()", "title": "" }, { "docid": "7647a5f1dabbb857653cb396154c9e22", "score": "0.7455345", "text": "def main():\n args = construct_primary_parser().parse_args()\n #print(args)\n args.run(args)", "title": "" }, { "docid": "587a92e488ea62e3751c7c84e855da03", "score": "0.74534565", "text": "def main(args):\n print()", "title": "" }, { "docid": "4fcb658a1a6b1946b6b9105a58d3a715", "score": "0.7450844", "text": "def test(self):\n main()", "title": "" }, { "docid": "90402dc0093797f2ba79093e721f113f", "score": "0.7428213", "text": "def main(ctx):\n pass", "title": "" }, { "docid": "806bc7b2011f1e1761aa283f50419d0b", "score": "0.7419531", "text": "def main(config):\n pass", "title": "" }, { "docid": "8c9f56e83fe67caebd57b0fc586bcc6e", "score": "0.7403606", "text": "def main():\n example()", "title": "" }, { "docid": "8c9f56e83fe67caebd57b0fc586bcc6e", "score": "0.7403606", "text": "def main():\n example()", "title": "" }, { "docid": "1b1ec34c88d48f7c338bae01917ac061", "score": "0.737079", "text": "def main():\n parse_args(sys.argv[1:])\n return 0", "title": "" }, { "docid": "250722d7c0bd003e12168f094029549b", "score": "0.73671347", "text": "def main() -> None:\n cli() # pragma: no cover", "title": "" }, { "docid": "d3bc8d0ffe7ca1f67f6ed25dd23c3256", "score": "0.73642147", "text": "def main():\n return cli(obj={})", "title": "" }, { "docid": "78cb0337267c7abce9525c080e734c1f", "score": "0.7313906", "text": "def main(argv=None):\n if argv is None:\n argv = sys.argv[1:]\n opts = ParseArgs(argv)\n Main(opts.output)", "title": "" }, { "docid": "e541f5dbabd0ce644b46bde159cb34f3", "score": "0.73087066", "text": "def main(inf):", "title": "" }, { "docid": "819ce69933746e9f256af7e64e7a0e75", "score": "0.7271746", "text": "def main(cls):\n raise NotImplementedError", "title": "" }, { "docid": "473132bb3df4c2b080aa8aac580e379b", "score": "0.7250102", "text": "def main(self, arg1, *args):\n return", "title": "" }, { "docid": "b5a2fc76acdfda05b4a92720eec791ea", "score": "0.72320855", "text": "def main(ctx):", "title": "" }, { "docid": "8474ce52d1b55c8a758af1fd317d38fc", "score": "0.71971923", "text": "def main():\n print(test(sys.argv[1]))", "title": "" }, { "docid": "b485e28d8e74ac302e61b407e6691341", "score": "0.7168381", "text": "def main(self,argv):\n self.setup()\n self.runMain(argv)", "title": "" }, { "docid": "d348c6e88156af1ea516fa43898ac598", "score": "0.7097931", "text": "def main():\n\n # Setup root logger\n setup_logger()\n\n # Capture CLI arguments\n args = cli_args()", "title": "" }, { "docid": "3c09d0ce4a7f40daf04a8689f5294965", "score": "0.7096376", "text": "def run(prog): # -> None:\n ...", "title": "" }, { "docid": "a88ed6882106d5d5d93c1176c8397b21", "score": "0.7094608", "text": "def main():\n print(\"It works!!! ;-)\")\n ###TODO### do something with the various methods/functions of this file", "title": "" }, { "docid": "051ad50cbcc0799a8bde7236f6c6c908", "score": "0.70516443", "text": "def main_argv():\n main_parse_args(sys.argv[1:])", "title": "" }, { "docid": "d7cfb4c1af72c8a994c61686b4be676b", "score": "0.7050078", "text": "def main(self):\r\n\r\n raise NotImplementedError()", "title": "" }, { "docid": "1b51aae0dc8b05fbc34ddfd6ab779be7", "score": "0.7040667", "text": "def main():\n import src.main\n main.main()", "title": "" }, { "docid": "4fd0eeacf724d7fc9430becd3258c2b9", "score": "0.70096743", "text": "def main(args=None):\n print('Hello World')\n return 0", "title": "" }, { "docid": "ddb1af00b6532fa016ebe81d222f1d30", "score": "0.7007251", "text": "def main():\n run(gcd)", "title": "" }, { "docid": "d3cc1da141122120b390cbabb6d02456", "score": "0.6996888", "text": "def main():\n print(\"Call your main application code here This is a Cli\")", "title": "" }, { "docid": "bac6f3c0e5c1856438b6d2370ba3ad2c", "score": "0.69937587", "text": "def main():\n setup_args()\n\n args = parser.parse_args()\n run(args)", "title": "" }, { "docid": "fa1c196d6daef88aa8eef8a024680c1c", "score": "0.69899666", "text": "def main(arguments: typing.List[str] = None) -> None: # pragma: no-cover\n run(arguments)", "title": "" } ]
d3f1b8b59075de171a62c9387ab95df1
Execute the decorated test only if running in v2 mode. This function is intended to be applied to tests that exercise v2 only functionality. If the test is run in v1 mode it will simply be skipped. See go/tftestdecoratorcheatsheet for the decorators to use in different v1/v2/eager/graph combinations.
[ { "docid": "da0fa67a1dfe1d3c0c9f4710dd11ae72", "score": "0.8331696", "text": "def run_v2_only(func=None):\n\n def decorator(f):\n if tf_inspect.isclass(f):\n raise ValueError('`run_v2_only` only supports test methods.')\n\n def decorated(self, *args, **kwargs):\n if not tf2.enabled():\n self.skipTest('Test is only compatible with v2')\n\n return f(self, *args, **kwargs)\n\n return decorated\n\n if func is not None:\n return decorator(func)\n\n return decorator", "title": "" } ]
[ { "docid": "aac29693e6b53172179e0026e37e8d2c", "score": "0.60816467", "text": "def decorated(test):\n\n @wraps(test)\n def test_with_version(self):\n \"Only run the test if ES version is not less than specified.\"\n actual_version = self.get_es().info()['version']['number']\n\n if LooseVersion(actual_version) >= LooseVersion(minimum_version):\n test(self)\n else:\n raise SkipTest\n\n return test_with_version", "title": "" }, { "docid": "c8dc78299146570896d62215deb620a9", "score": "0.54928935", "text": "def skip_test():\n if not tvm.get_global_func(\"relay.ext.verilator\", True):\n print(\"Skip test because Verilator codegen is not available.\")\n return True\n if sys.platform == \"win32\":\n print(\"Skip test on Windows for now\")\n return True\n return False", "title": "" }, { "docid": "1ea8833179d348cdc9336c77890b39c1", "score": "0.53972596", "text": "def test_require_version_good(self):\n def func() :\n pass\n sys.version_info = (2, 5, 5, 'final', 4)\n current = sys.version_info[:3]\n compare = ('2.4', '2.5', '2.5.4', '2.5.5')\n for version in compare:\n decorator = require_version(version)\n self.assertEqual(func, decorator(func), '%s =< %s : function \\\n return by the decorator should be the same.' % (version,\n '.'.join([str(element) for element in current])))", "title": "" }, { "docid": "7db82c049f94c7b04bc29727886f94ee", "score": "0.5322788", "text": "def testrun_run2(self):\n self.maxDiff = None\n self.dsr_runtest(2, '-r', remlines=[])", "title": "" }, { "docid": "bfe2411e0d46d282fa793be73927d86f", "score": "0.5282247", "text": "def test_two_layers(self):\n with override_quantized_engine('fbgemm'):\n model = AnnotatedTwoLayerLinearModel()\n model = prepare(model)\n\n self.checkNoPrepModules(model)\n self.checkObservers(model)\n self.checkNoPrepModules(model.fc1)\n self.checkHasPrepModules(model.fc2)\n\n test_only_eval_fn(model, self.calib_data)\n model = convert(model)\n\n def checkQuantized(model):\n self.checkNoPrepModules(model)\n self.checkNoPrepModules(model.fc1)\n self.checkHasPrepModules(model.fc2)\n self.assertEqual(type(model.fc1), torch.nn.Linear)\n self.checkWrappedQuantizedLinear(model.fc2)\n test_only_eval_fn(model, self.calib_data)\n self.checkScriptable(model, self.calib_data)\n\n checkQuantized(model)\n\n # test one line API\n model = quantize(AnnotatedTwoLayerLinearModel(), test_only_eval_fn,\n self.calib_data)\n checkQuantized(model)", "title": "" }, { "docid": "6077b57dbb6ebdafa9be1d3b745bdd92", "score": "0.52524143", "text": "def test_2(self):\n pass", "title": "" }, { "docid": "6077b57dbb6ebdafa9be1d3b745bdd92", "score": "0.52524143", "text": "def test_2(self):\n pass", "title": "" }, { "docid": "52de41269bbb4235d7cceeb98c9c69b2", "score": "0.5246232", "text": "def require_version(minimum_version):\n\n def decorated(test):\n \"\"\"Decorator to only run the test if ES version is greater or\n equal than specified.\n\n \"\"\"\n\n @wraps(test)\n def test_with_version(self):\n \"Only run the test if ES version is not less than specified.\"\n actual_version = self.get_es().info()['version']['number']\n\n if LooseVersion(actual_version) >= LooseVersion(minimum_version):\n test(self)\n else:\n raise SkipTest\n\n return test_with_version\n\n return decorated", "title": "" }, { "docid": "e01fb4cf2a74d01b31386b144dd76e0e", "score": "0.5226082", "text": "def test_single_notebook_v2(self):\n self._copy_file(join(\"files\", \"test-v2.ipynb\"), \"p2.ipynb\")\n run_nbgrader([\"update\", \"p2.ipynb\"])", "title": "" }, { "docid": "f149365abb89e7ab04458f93e6ddd826", "score": "0.5168513", "text": "def test_2(self):\n\n pass", "title": "" }, { "docid": "db207465bf0f5723eb2e7111e83639ab", "score": "0.51523286", "text": "def test_2(self):\n tests = [\n ('model1', True),\n ('model2', True),\n ('model3', False)\n ]\n\n for test in tests:\n result = self._usecase.get_performance_metrics(test[0])\n self.assertEqual(result['success'], test[1])", "title": "" }, { "docid": "e9e273ba8be5bf83a2ee55f5f16a49a7", "score": "0.5058622", "text": "def test_tidy2(self):\n self.__check_analyzer_result('tidy2.out', 'test2.cpp_clang-tidy.plist',\n ['files/test2.cpp'], 'tidy2.plist')", "title": "" }, { "docid": "f6f120075f6594187ebe82c2e22b69e2", "score": "0.5049639", "text": "def pytest_tavern_before_runtest(spec):", "title": "" }, { "docid": "562df03e3997b672c3274dff8b94511d", "score": "0.50399023", "text": "def skip_if_not_4gpu():\n\n def _wrapper(func):\n if GPU_COUNT < 4:\n # skip if not enabled\n print(\"Skip {}\".format(func.__name__))\n func.benchmark_name = \"skip_\" + func.__name__\n return func\n\n return _wrapper", "title": "" }, { "docid": "dc9406d1aaa25b4734b05d91cbaede89", "score": "0.503286", "text": "def test_meta_block_stop_method(self, pytester: pytest.Pytester, adaptavist_mock: AdaptavistMock):\n\n pytester.makepyfile(\n \"\"\"\n import pytest\n\n def test_TEST_T123(meta_block):\n with meta_block(1) as mb_1:\n mb_1.check(False, action_on_fail=mb_1.Action.STOP_METHOD)\n mb_1.check(False, message_on_fail=\"THIS SHOULD NOT BE DISPLAYED\")\n with meta_block(2) as mb_2:\n mb_2.check(True)\n\n def test_TEST_T124(meta_block):\n with meta_block(1) as mb_1:\n mb_1.check(True)\n \"\"\"\n )\n pytester.runpytest(\"--adaptavist\")\n _, _, etss = adaptavist_mock\n assert etss.call_count == 2\n for call in etss.call_args_list:\n assert \"THIS SHOULD NOT BE DISPLAYED\" not in call.kwargs[\"comment\"]\n assert call.test_case_key != \"TEST-T123\" or call.step != 2", "title": "" }, { "docid": "13d211f882237877b3726b59741636d0", "score": "0.5016975", "text": "def test_T2():", "title": "" }, { "docid": "39428ba5b915a3ed18032afcc19d47a5", "score": "0.50154203", "text": "def for_testing_only(func, _testing_check=lambda: \"pytest\" in sys.modules):\n\n @functools.wraps(func)\n def _wrapper(*args, **kwargs):\n \"\"\"\n Function wrapper for `testing_only` decorator.\n \"\"\"\n if not _testing_check():\n warnings.warn(f\"{func.__name__} should only be used for testing purposes.\")\n return func(*args, **kwargs)\n\n return _wrapper", "title": "" }, { "docid": "166b6f14dbb4b5a7ffdb41bb581b3122", "score": "0.50115246", "text": "def test_skipif_markeval_namespace_multiple(self, pytester: Pytester) -> None:\n root = pytester.mkdir(\"root\")\n root.joinpath(\"__init__.py\").touch()\n root.joinpath(\"conftest.py\").write_text(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n\n def pytest_markeval_namespace():\n return {\"arg\": \"root\"}\n \"\"\"\n ),\n encoding=\"utf-8\",\n )\n root.joinpath(\"test_root.py\").write_text(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n\n @pytest.mark.skipif(\"arg == 'root'\")\n def test_root():\n assert False\n \"\"\"\n ),\n encoding=\"utf-8\",\n )\n foo = root.joinpath(\"foo\")\n foo.mkdir()\n foo.joinpath(\"__init__.py\").touch()\n foo.joinpath(\"conftest.py\").write_text(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n\n def pytest_markeval_namespace():\n return {\"arg\": \"foo\"}\n \"\"\"\n ),\n encoding=\"utf-8\",\n )\n foo.joinpath(\"test_foo.py\").write_text(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n\n @pytest.mark.skipif(\"arg == 'foo'\")\n def test_foo():\n assert False\n \"\"\"\n ),\n encoding=\"utf-8\",\n )\n bar = root.joinpath(\"bar\")\n bar.mkdir()\n bar.joinpath(\"__init__.py\").touch()\n bar.joinpath(\"conftest.py\").write_text(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n\n def pytest_markeval_namespace():\n return {\"arg\": \"bar\"}\n \"\"\"\n ),\n encoding=\"utf-8\",\n )\n bar.joinpath(\"test_bar.py\").write_text(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n\n @pytest.mark.skipif(\"arg == 'bar'\")\n def test_bar():\n assert False\n \"\"\"\n ),\n encoding=\"utf-8\",\n )\n\n reprec = pytester.inline_run(\"-vs\", \"--capture=no\")\n reprec.assertoutcome(skipped=3)", "title": "" }, { "docid": "86efda8f8f73c207ef2591396923f49d", "score": "0.500848", "text": "def new_test_decorator(path, ti, args):\n return self.on_test_visit(path, ti, args)", "title": "" }, { "docid": "c6f14d99bac5e68991162706a9999ede", "score": "0.5005335", "text": "def pytest_runtest_call(item):\n _check_latest_version_mark(item)", "title": "" }, { "docid": "3f7ca35d3ab8a9ab59721ce7cad5c811", "score": "0.5000307", "text": "def test_func_one_line_2():\n pass", "title": "" }, { "docid": "54d825588efd551dce34f56dadbabec8", "score": "0.49965397", "text": "def test_workload_migration_from_v2(self) -> None:\n self._assert_migrates(\n \"workload_v2_tuned.json\",\n \"workload_v3_tuned.json\",\n )", "title": "" }, { "docid": "43071de07db4dac4948dffc36e773429", "score": "0.49735293", "text": "def test_meta_block_stop_context(self, pytester: pytest.Pytester, adaptavist_mock: AdaptavistMock):\n pytester.makepyfile(\n \"\"\"\n import pytest\n\n def test_TEST_T123(meta_block):\n with meta_block(1) as mb_1:\n mb_1.check(False, action_on_fail=mb_1.Action.STOP_CONTEXT)\n mb_1.check(False, message_on_fail=\"THIS SHOULD NOT BE DISPLAYED\")\n with meta_block(2) as mb_2:\n mb_2.check(True)\n \"\"\"\n )\n report = pytester.runpytest(\"--adaptavist\", \"-vv\")\n _, _, etss = adaptavist_mock\n for call in etss.call_args_list:\n assert \"THIS SHOULD NOT BE DISPLAYED\" not in call.kwargs[\"comment\"]\n assert etss.call_count == 2\n assert etss.call_args.kwargs[\"step\"] == 2\n assert etss.call_args.kwargs[\"status\"] == \"Pass\"\n report.assert_outcomes(skipped=1)", "title": "" }, { "docid": "0ee1194507548bad5bc88d750f1312e1", "score": "0.49451458", "text": "def test__decorate_functions():\n pass", "title": "" }, { "docid": "b87dc36dc779585d59e0dfe7d7a33700", "score": "0.49425277", "text": "def decorated2(debugger, args, exe_ctx, result, dict):\n print(\"hello from decorated2\", file=result)", "title": "" }, { "docid": "4416636361457254311822dcc1145908", "score": "0.493362", "text": "def test_prevent_double_vote(test_app):\n pass", "title": "" }, { "docid": "51700f2c0c6ee3ce0eedb5c60372e5b1", "score": "0.49065796", "text": "def _GetTestConfigsV2():\n convert_offline = False\n # TODO(laigd): add support for static_engine.\n dynamic_engine = True\n # TODO(laigd): add support for calibration.\n no_calibration = False\n use_calibration = True\n\n # Add all possible test cases and let the derived test class to decide\n # whether to run specific ones with ShouldRunTest().\n #\n # Note:\n # - In TF2.0 the conversion always produce dynamic engine, and we don't test\n # the offline mode here.\n # - For simplicity we don't test online conversion which requires setting the\n # Grappler config in default eager context.\n # - INT8 without calibration behaves like FP32/FP16.\n opts = list(\n itertools.product([FP32, FP16], [convert_offline], [dynamic_engine],\n [no_calibration], [False, True]))\n # We always run calibration with offline tool.\n opts.append((INT8, convert_offline, dynamic_engine, use_calibration, False))\n opts.append((INT8, convert_offline, dynamic_engine, use_calibration, True))\n return opts", "title": "" }, { "docid": "97e0ad0eaf4ac2e97e3b48e107ac40c7", "score": "0.48922426", "text": "def custom_test_func(self):\n return True", "title": "" }, { "docid": "5d7947294088fc0e4f918149def067be", "score": "0.48794407", "text": "def Step2(self):\n if self.inRange('Step2', 'argument'): \n r = self.__impl.step2Behav(argument=self.actdict['Step2']['argument']['tok_list'])\n r['status'] = ['ProcedureExample', 'Step2', 'on']\n \n self.clear_tok('Step2')\n self.parseResult(r)\n return True \n else:\n insuff_pins = self.wrong_tok('Step2')\n for pin in insuff_pins:\n LOG.debug('Incorrect token number on input %s of action %s: %s'\n % (pin, \"Step2\", self.actdict['Step2'][pin]['tok_list']))\n return False", "title": "" }, { "docid": "499562d2c19edf0854866a01e6961f7e", "score": "0.48695183", "text": "def benchmark_graph_2_gpu(self):\n self._setup()\n FLAGS.num_gpus = 2\n FLAGS.enable_eager = False\n FLAGS.run_eagerly = False\n FLAGS.distribution_strategy = 'mirrored'\n FLAGS.model_dir = self._get_model_dir('benchmark_graph_2_gpu')\n FLAGS.batch_size = 128 * 2 # 2 GPUs\n self._run_and_report_benchmark()", "title": "" }, { "docid": "8aeb1bddd349b9f0a94f13bbbcf58617", "score": "0.48569664", "text": "def test_app_test(self):\n out = _run(\"entrypoint.sh test\")\n self.assertTrue(\"Ran 2 tests\" in out)", "title": "" }, { "docid": "8a7544598491a6251feb55a478548709", "score": "0.4852445", "text": "def test_should_not_run_product_version_default_filler(self):\n self.run_test('test_should_not_run', True, test_case_prefix='tinctest.test.test_version_check.MockTINCTestCaseWithGetProductVersion')", "title": "" }, { "docid": "bcf0dc93f13b6f174ecd93cc3404ec3f", "score": "0.48497897", "text": "def skip_wo_symlink_capability(func):\n @wraps(func)\n @attr('skip_wo_symlink_capability')\n def _wrap_skip_wo_symlink_capability(*args, **kwargs):\n if not has_symlink_capability():\n pytest.skip(\"no symlink capabilities\")\n return func(*args, **kwargs)\n return _wrap_skip_wo_symlink_capability", "title": "" }, { "docid": "1353e26d42e463a43206b0207d844346", "score": "0.4838966", "text": "def _set_v2_dtype_behavior(fn, enabled):\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n v2_dtype_behavior = base_layer_utils.V2_DTYPE_BEHAVIOR\n base_layer_utils.V2_DTYPE_BEHAVIOR = enabled\n try:\n return fn(*args, **kwargs)\n finally:\n base_layer_utils.V2_DTYPE_BEHAVIOR = v2_dtype_behavior\n\n return tf_decorator.make_decorator(fn, wrapper)", "title": "" }, { "docid": "4252e02421a3bdc73d435c0106bfcda8", "score": "0.4826109", "text": "def hasAttrMocks_2( self, *args, **kwargs ): #pylint: disable=unused-argument\n print(\"arguments\", args, kwargs)\n opsDict={\n 'extraCLIArguments': False,\n 'detectorModel': False,\n 'detectortype': True,\n 'eventType': True,\n 'outputFile': True,\n 'datatype': True,\n 'setOutputRecFile': False,\n 'setOutputDstFile': False,\n }\n self.assertIn( args[1], opsDict )\n return opsDict[ args[1] ]", "title": "" }, { "docid": "87ed44a3e5d59a77995a1fcb40e3975b", "score": "0.48225012", "text": "def _non_regression_test():\n\timport doctest\n\tdoctest.testmod()\n\treturn", "title": "" }, { "docid": "7ddc6b8408e577e0ef25dbc4be5ee272", "score": "0.481237", "text": "def run_through_wrapper(args):\n\n test_function = args.test_function.lower()\n if test_function not in test_dict.keys():\n raise Exception(\"'test_function' {0} not found in test suite\", format(test_function))\n tf = test_dict[test_function]\n try:\n input_df = pd.read_csv(args.input_file)\n except Exception as e:\n raise Exception(\"error reading input file {0}:{1}\".format(args.input_file, str(e)))\n # if len(input_df.columns) != tf.number_decision_vars():\n # raise Exception(\"Incorrect number of decision variables. Should be {} d. vars. got {} d. vars.\"\n # .format(len(input_df.columns), tf.number_decision_vars()))\n f1 = tf.f1(input_df.parval1)\n f2 = tf.f2(input_df.parval1)\n with open(args.output_file, 'w') as f:\n f.write(\"f1 {0:20.8E}\\n\".format(f1))\n f.write(\"f2 {0:20.8E}\\n\".format(f2))", "title": "" }, { "docid": "7d2265ab7e0a07f85934a21bf42137f5", "score": "0.47986087", "text": "def process3Dot2(self):\n if self.inRange('process3Dot2', 'argument'):\n r = self.__impl.process3Dot2Behav(argument=self.actdict['process3Dot2']['argument']['tok_list'])\n r['status'] = ['CapabilityExample', 'process3Dot2', 'on']\n \n self.clear_tok('process3Dot2')\n self.parseResult(r)\n return True \n else:\n insuff_pins = self.wrong_tok('process3Dot2')\n for pin in insuff_pins:\n LOG.debug('Incorrect token number on input %s of action %s: %s'\n % (pin, \"process3Dot2\", self.actdict['process3Dot2'][pin]['tok_list']))\n return False", "title": "" }, { "docid": "ce4103cb7f58d5a1e0dc8d817fd5549b", "score": "0.47566283", "text": "def skip_if_not_supported(is_route_flow_counter_supported):\n pytest_require(is_route_flow_counter_supported, 'route flow counter is not supported')", "title": "" }, { "docid": "c0832097ad55aecd37ff86a0e1e1d6fa", "score": "0.47554904", "text": "def test_func_true():\n return", "title": "" }, { "docid": "e43626b3c28d1c13c77ce35a448d4742", "score": "0.47539562", "text": "def test_multiple_decorators():\n assert decorated_with_mulitiple_decorators() == '^third^-second-*first*test*first*-second-^third^'", "title": "" }, { "docid": "76e268a065661b10c16b16acdcc03f7c", "score": "0.47493154", "text": "def test_meta_block_fail_method(self, pytester: pytest.Pytester, adaptavist_mock: AdaptavistMock):\n pytester.makepyfile(\n \"\"\"\n import pytest\n\n def test_TEST_T123(meta_block):\n with meta_block(1) as mb_1:\n mb_1.check(False, action_on_fail=mb_1.Action.FAIL_METHOD)\n with meta_block(2) as mb_2:\n mb_2.check(True)\n \"\"\"\n )\n pytester.runpytest(\"--adaptavist\")\n _, _, etss = adaptavist_mock\n assert etss.call_count == 1", "title": "" }, { "docid": "13d70a2fc67b54be036857986c5992f5", "score": "0.47421676", "text": "def test_no_cross_test_side_effects_part2(self, estimator_instance):\n assert not hasattr(estimator_instance, \"test__attr\")", "title": "" }, { "docid": "b4f8fdd2a09854cd9cd666bcf66e9649", "score": "0.47362146", "text": "def main():\n\n defaults = {\n \"TEST_STACK\": \"tst\",\n \"TEST_APP\": \"kubv2cache\" + KubeV2CacheTestScenario.DEFAULT_TEST_ID,\n }\n\n return citest.base.TestRunner.main(\n parser_inits=[KubeV2CacheTestScenario.initArgumentParser],\n default_binding_overrides=defaults,\n test_case_list=[KubeV2CacheTest],\n )", "title": "" }, { "docid": "04c160a44ee95909200182703d98775f", "score": "0.4727578", "text": "def test_trace__decorator():\n set_trace(helper_decorator, trace_func)\n with pytest.warns(UserWarning, match=\"^helper_decorator:100$\"):\n helper_decorator()", "title": "" }, { "docid": "bf4a626aa6ab62b59eab70ae1a525497", "score": "0.47220868", "text": "def benchmark_2_gpu(self):\n self._setup()\n FLAGS.num_gpus = 2\n FLAGS.data_dir = self.data_dir\n FLAGS.batch_size = 128\n FLAGS.train_epochs = 182\n FLAGS.model_dir = self._get_model_dir('benchmark_2_gpu')\n FLAGS.dtype = 'fp32'\n FLAGS.enable_eager = True\n self._run_and_report_benchmark()", "title": "" }, { "docid": "0072db3be970fe397b8863804df915bc", "score": "0.47206366", "text": "def skip_from_python(*ver):\r\n def skip_from_python_(f):\r\n def skip_from_python__(self):\r\n if sys.version_info[:len(ver)] >= ver:\r\n return self.skipTest(\"skipped because Python %s\"\r\n % \".\".join(map(str, sys.version_info[:len(ver)])))\r\n else:\r\n return f(self)\r\n\r\n return skip_from_python__\r\n return skip_from_python_", "title": "" }, { "docid": "6e46224b4062ea7c7b58c3c3daa3e922", "score": "0.47187048", "text": "def test_part2():", "title": "" }, { "docid": "72fdc080b424c465c1958e6bb21d199b", "score": "0.47152025", "text": "def skip_if_gpu():\n device = os.environ.get(\"DGL_BENCH_DEVICE\", \"cpu\")\n\n def _wrapper(func):\n if device == \"gpu\":\n # skip if not enabled\n func.benchmark_name = \"skip_\" + func.__name__\n return func\n\n return _wrapper", "title": "" }, { "docid": "8b9d8ac1103f88d0f50319d88ceddfc8", "score": "0.47080207", "text": "def test_with_python_api(self):\n self.build()\n self.inline_stepping()", "title": "" }, { "docid": "625f8c5fe1a30777b1ba613d27b7a2f0", "score": "0.46953195", "text": "def _verify_decorated_result(self, expected, args, result):\n self.assertTrue(result == expected, \"caching decorator spoil function. Arguments %s. Expected %s, got %s\" %\n (args, expected, result))", "title": "" }, { "docid": "756ff0570fc557d63ab026cb08d657f0", "score": "0.46647426", "text": "def skip_before_python(*ver):\r\n def skip_before_python_(f):\r\n def skip_before_python__(self):\r\n if sys.version_info[:len(ver)] < ver:\r\n return self.skipTest(\"skipped because Python %s\"\r\n % \".\".join(map(str, sys.version_info[:len(ver)])))\r\n else:\r\n return f(self)\r\n\r\n return skip_before_python__\r\n return skip_before_python_", "title": "" }, { "docid": "c5fa4e130d28bd81983c8146c879790a", "score": "0.4643112", "text": "def test_meta_block_check_stop_session(self, pytester: pytest.Pytester):\n pytester.makepyfile(\n \"\"\"\n import pytest\n\n def test_TEST_T121(meta_block):\n with meta_block(1) as mb_1:\n mb_1.check(True)\n\n def test_TEST_T123(meta_block):\n with meta_block(1) as mb_1:\n mb_1.check(False, action_on_fail=mb_1.Action.STOP_SESSION)\n\n def test_TEST_T124(meta_block):\n with meta_block(1) as mb_1:\n mb_1.check(True)\n \"\"\"\n )\n outcome = pytester.runpytest(\"--adaptavist\").parseoutcomes()\n assert outcome[\"passed\"] == 1\n assert outcome[\"blocked\"] == 2", "title": "" }, { "docid": "d7c1f6427058a36d4a66795334fea06e", "score": "0.46429762", "text": "def inception_twoview(pretrained=False, **kwargs):\n\n return InceptionTwoView(**kwargs)", "title": "" }, { "docid": "7e3fa0ddb4bbb08c7dc0379b60b46f10", "score": "0.46385658", "text": "def two_sided_test(x1, x2, test='t_test', value=0):\n\n x1 = np.asarray(x1)\n nobs1 = x1.shape[0]\n x1_mean = x1.mean(0)\n x1_var = x1.var(0) # sample variance\n x2 = np.asarray(x2)\n nobs2 = x2.shape[0]\n x2_mean = x2.mean(0)\n x2_var = x2.var(0) # sample variance\n var_pooled = nobs1 * x1_var + nobs2 * x2_var\n\n if test == 't_test':\n dof = nobs1 + nobs2 - 2\n elif test == 'z_test':\n dof = nobs1 + nobs2\n else:\n raise ValueError('Either t_test or z_test!')\n\n var_pooled = var_pooled/dof\n var_pooled = var_pooled*(1.0 / nobs1 + 1.0 / nobs2)\n std_diff = np.sqrt(var_pooled)\n\n if test == 't_test:':\n stat, pval = _two_sided_tstat_generic(x1_mean, x2_mean, std_diff, dof, diff=value)\n else:\n stat, pval = _two_sided_zstat_generic(x1_mean, x2_mean, std_diff, diff=value)\n\n return stat, pval", "title": "" }, { "docid": "e2285fd46964b6768a94cabf9ed10941", "score": "0.4635681", "text": "def test(self, strategy: S.Strategy, version=None):\n raise NotImplementedError", "title": "" }, { "docid": "700829de63c141a9071d773be9dd1671", "score": "0.46305203", "text": "def test_meta_block_assume(self, pytester: pytest.Pytester, adaptavist_mock: AdaptavistMock):\n pytester.makepyfile(\n \"\"\"\n import pytest\n\n def test_TEST_T123(meta_block):\n with meta_block(1) as mb_1:\n mb_1.check(False)\n with meta_block(2) as mb_2:\n mb_2.check(True)\n \"\"\"\n )\n pytester.runpytest(\"--adaptavist\")\n _, _, etss = adaptavist_mock\n assert etss.call_count == 2", "title": "" }, { "docid": "21699896a626c713c54c523da0414859", "score": "0.46058947", "text": "def _AddTests(test_class):\n _AddTestsFor(test_class, is_v2=False)\n _AddTestsFor(test_class, is_v2=True)", "title": "" }, { "docid": "fe4cbefff195f2bff40d629022f67553", "score": "0.46010798", "text": "def test_run_coverage_fail_under2(self):\n STATS = r\"\\s+8\\s+5\\s+38%\\s+1, 7-10\"\n TOTAL_STATS = r\"\\s+8\\s+5\\s+38%\\s\"\n\n proc = self.runIn(\"scenario/coverage_config_fail_under2\", \"-v\")\n self.assertProcOutputPattern(\n proc,\n \"part_covered_lib\",\n STATS,\n total_stats=TOTAL_STATS,\n assert_exit_status=1,\n )", "title": "" }, { "docid": "761bbaa8828877558e8de0b187f5cc4a", "score": "0.45935196", "text": "def test(self, label='fast', verbose=1, extra_argv=['--exe'],\n doctests=True, coverage=False):\n return super(_NoseTester, self).test(label=label, verbose=verbose,\n extra_argv=extra_argv,\n doctests=doctests, coverage=coverage)", "title": "" }, { "docid": "c09cc26f83b27ef3145c1964ab2efcda", "score": "0.45920393", "text": "def test(self, label='fast', verbose=1, extra_argv=['--exe'],\n doctests=True, coverage=False):\n return super(NoseTester, self).test(label=label, verbose=verbose,\n extra_argv=extra_argv,\n doctests=doctests, coverage=coverage)", "title": "" }, { "docid": "0f5532c513b9eeb466493c10407bf585", "score": "0.45915064", "text": "def Process2(self):\n if self.inRange('Process2', 'argument'):\n r = self.__impl.process2Behav(argument=self.actdict['Process2']['argument']['tok_list'])\n r['status'] = ['CapabilityExample', 'Process2', 'on']\n \n self.clear_tok('Process2')\n self.parseResult(r)\n return True \n else:\n insuff_pins = self.wrong_tok('Process2')\n for pin in insuff_pins:\n LOG.debug('Incorrect token number on input %s of action %s: %s'\n % (pin, \"Process2\", self.actdict['Process2'][pin]['tok_list']))\n return False", "title": "" }, { "docid": "035a46a599ce8db2565c988864421431", "score": "0.45821315", "text": "def _AddTestsFor(test_class, is_v2):\n opts = _GetTestConfigsV2() if is_v2 else _GetTestConfigsV1()\n for (precision_mode, convert_online, dynamic_engine, use_calibration,\n dynamic_shape) in opts:\n conversion = \"OnlineConversion\" if convert_online else \"OfflineConversion\"\n engine_type = \"DynamicEngine\" if dynamic_engine else \"StaticEngine\"\n calibration_type = \"UseCalibration\" if use_calibration else \"NoCalibration\"\n dynamic_shape_type = \"DynamicShape\" if dynamic_shape else \"ImplicitBatch\"\n test_name = \"%s_%s_%s_%s_%s_%s\" % (\"testTfTrtV2\" if is_v2 else \"testTfTrt\",\n conversion, engine_type, precision_mode,\n calibration_type, dynamic_shape_type)\n run_params = RunParams(\n convert_online=convert_online,\n precision_mode=precision_mode,\n dynamic_engine=dynamic_engine,\n test_name=test_name,\n use_calibration=use_calibration,\n is_v2=is_v2,\n dynamic_shape=dynamic_shape)\n if is_v2:\n setattr(test_class, test_name,\n test_util.run_v2_only(_GetTest(run_params)))\n else:\n setattr(test_class, test_name,\n test_util.run_v1_only(\"\", _GetTest(run_params)))", "title": "" }, { "docid": "6f37e50655c4b5b6ce1e19c55bd7cbee", "score": "0.45816487", "text": "def test_method_decorators_incorrect(\n assert_errors,\n parse_ast_tree,\n code,\n default_options,\n mode,\n):\n tree = parse_ast_tree(mode(code))\n\n visitor = FunctionDefinitionVisitor(default_options, tree=tree)\n visitor.run()\n\n assert_errors(visitor, [WrongDescriptorDecoratorViolation])", "title": "" }, { "docid": "be0f4f93a24bfe2673794b67731d608a", "score": "0.45760384", "text": "def test_strict_option_is_deprecated(pytester: Pytester) -> None:\n pytester.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.unknown\n def test_foo(): pass\n \"\"\"\n )\n result = pytester.runpytest(\"--strict\", \"-Wdefault::pytest.PytestRemovedIn8Warning\")\n result.stdout.fnmatch_lines(\n [\n \"'unknown' not found in `markers` configuration option\",\n \"*PytestRemovedIn8Warning: The --strict option is deprecated, use --strict-markers instead.\",\n ]\n )", "title": "" }, { "docid": "ab8a93fdf4e4d18c0172e39c7f1fe60e", "score": "0.45744228", "text": "def test_view_even(self):\n self.__base_view_test(self.object, 100, 10)\n self.__base_view_test(self.object, 100, 100)\n self.__base_view_test(self.object, 10, 9)\n self.__base_view_test(self.object, 9, 10)", "title": "" }, { "docid": "d80257266e946468aad174a20829d28b", "score": "0.4563983", "text": "def test_cam_rotor2(self):\n\n # The flags, execute the script, and then check the chi2 value.\n self.flags(opt=True)\n self.interpreter.run(script_file=self.cam_path+'rotor2.py')\n self.check_chi2(0.24293662685639067)", "title": "" }, { "docid": "e134f2319ecd8a3f5855917f483eb755", "score": "0.4560073", "text": "def test_step_over_with_python_api(self):\n self.build()\n self.inline_stepping_step_over()", "title": "" }, { "docid": "0d37925cdf2cb36c21839c459f07bad1", "score": "0.45436767", "text": "def test_2(self):\n self.assertTrue(False, \"This is a failing test\")", "title": "" }, { "docid": "f6d4ce1d42c5eef68eea704ba026c24e", "score": "0.45401827", "text": "def skip_if_tpc_disabled(f):\r\n def skip_if_tpc_disabled_(self):\r\n from psycopg2 import ProgrammingError\r\n cnn = self.connect()\r\n cur = cnn.cursor()\r\n try:\r\n cur.execute(\"SHOW max_prepared_transactions;\")\r\n except ProgrammingError:\r\n return self.skipTest(\r\n \"server too old: two phase transactions not supported.\")\r\n else:\r\n mtp = int(cur.fetchone()[0])\r\n cnn.close()\r\n\r\n if not mtp:\r\n return self.skipTest(\r\n \"server not configured for two phase transactions. \"\r\n \"set max_prepared_transactions to > 0 to run the test\")\r\n return f(self)\r\n\r\n skip_if_tpc_disabled_.__name__ = f.__name__\r\n return skip_if_tpc_disabled_", "title": "" }, { "docid": "fc40e51257ebc576a53aff7cdfe7b8b7", "score": "0.4538359", "text": "def test84_unrechable_func_3rd(self):\n input = \"\"\"\n void foo(){}\n void foo1(){foo();}\n void foo2(boolean f){foo2(foo3());}\n boolean foo3(){return foo3();}\n int main(){\n foo1();\n float main;\n main = 1e10-10e1;\n string foo2;\n foo2(true);\n do\n return 1;\n while(true);\n }\n \"\"\"\n expect = \"Type Mismatch In Expression: CallExpr(Id(foo2),[BooleanLiteral(true)])\"\n self.assertTrue(TestChecker.test(input,expect,484))", "title": "" }, { "docid": "9722af49e83711731b1c58715c190982", "score": "0.45316827", "text": "def test_meta_block_stop_exit_session(self, pytester: pytest.Pytester, adaptavist_mock: AdaptavistMock):\n pytester.makepyfile(\n \"\"\"\n import pytest\n\n def test_TEST_T123(meta_block):\n with meta_block(1) as mb_1:\n mb_1.check(False, action_on_fail=mb_1.Action.STOP_EXIT_SESSION)\n mb_1.check(False, message_on_fail=\"THIS SHOULD NOT BE DISPLAYED\")\n with meta_block(2) as mb_2:\n mb_2.check(True)\n\n def test_TEST_T124(meta_block):\n with meta_block(1) as mb_1:\n mb_1.check(True)\n \"\"\"\n )\n pytester.runpytest(\"--adaptavist\")\n _, _, etss = adaptavist_mock\n assert etss.call_count == 1\n assert etss.call_args.kwargs[\"status\"] == \"Blocked\"", "title": "" }, { "docid": "3aaf74e527af77be7059ddb7044ce2ae", "score": "0.45248854", "text": "def test_doc_directive2_md(testfile_creator, testfile_tester, testfile_checker):\n testfile = testfile_creator(\"doc/directive2.md\")\n result = testfile_tester(\n contents=testfile, pytest_options=[\"-v\", \"--doctest-modules\"]\n )\n nofail_noerror_nowarn(result)\n testfile_checker(\"doc/test_directive2.py\", testfile)", "title": "" }, { "docid": "7ea1ded73f0984658ed774f03fdc8480", "score": "0.4516789", "text": "def test_optimizer_from_api2():\n g = Generator(\n 1,\n 3,\n 2,\n 500,\n GD=400,\n GDD=400,\n TOD=800,\n FOD=7000,\n QOD=70000,\n resolution=0.05,\n normalize=True,\n )\n g.generate_freq()\n\n cf = CosFitMethod(*g.data)\n cf.guess_GD(900)\n with patch.object(FitOptimizer, \"update_plot\") as patched_obj:\n res = cf.optimizer(2, order=5, initial_region_ratio=0.01, extend_by=0.01)\n patched_obj.assert_called()\n np.testing.assert_array_almost_equal(res, [900, 400, 800, 7000, 70000])", "title": "" }, { "docid": "48388a21311e867fc39ef63c2e33eba1", "score": "0.4514874", "text": "def test_hidden_tests_autotest(self, db, course_dir):\n run_nbgrader([\"db\", \"assignment\", \"add\", \"ps1\", \"--db\", db, \"--duedate\",\n \"2015-02-02 14:58:23.948203 America/Los_Angeles\"])\n run_nbgrader([\"db\", \"student\", \"add\", \"foo\", \"--db\", db])\n run_nbgrader([\"db\", \"student\", \"add\", \"bar\", \"--db\", db])\n run_nbgrader([\"db\", \"student\", \"add\", \"baz\", \"--db\", db])\n with open(\"nbgrader_config.py\", \"a\") as fh:\n fh.write(\"\"\"c.ClearSolutions.code_stub=dict(python=\"# YOUR CODE HERE\")\"\"\")\n\n self._copy_file(join(\"files\", \"autotest-hidden.ipynb\"), join(course_dir, \"source\", \"ps1\", \"p1.ipynb\"))\n self._copy_file(join(\"files\", \"autotests.yml\"), join(course_dir, \"autotests.yml\"))\n run_nbgrader([\"generate_assignment\", \"ps1\", \"--db\", db])\n\n self._copy_file(join(\"files\", \"autotest-hidden-unchanged.ipynb\"), join(course_dir, \"submitted\", \"foo\", \"ps1\", \"p1.ipynb\"))\n self._copy_file(join(\"files\", \"autotest-hidden-changed-wrong.ipynb\"), join(course_dir, \"submitted\", \"bar\", \"ps1\", \"p1.ipynb\"))\n self._copy_file(join(\"files\", \"autotest-hidden-changed-right.ipynb\"), join(course_dir, \"submitted\", \"baz\", \"ps1\", \"p1.ipynb\"))\n\n # make sure submitted validates for both bar and baz (should only fail on hidden tests), but not foo (missing any input and visible type checks will fail)\n output = run_nbgrader([\n \"validate\", join(course_dir, \"submitted\", \"foo\", \"ps1\", \"p1.ipynb\"),\n ], stdout=True)\n assert output.splitlines()[0] == (\n \"VALIDATION FAILED ON 1 CELL(S)! If you submit your assignment \"\n \"as it is, you WILL NOT\"\n )\n output = run_nbgrader([\n \"validate\", join(course_dir, \"submitted\", \"bar\", \"ps1\", \"p1.ipynb\")\n ], stdout=True)\n assert output.strip() == \"Success! Your notebook passes all the tests.\"\n\n output = run_nbgrader([\n \"validate\", join(course_dir, \"submitted\", \"baz\", \"ps1\", \"p1.ipynb\")\n ], stdout=True)\n assert output.strip() == \"Success! Your notebook passes all the tests.\"\n\n # autograde\n run_nbgrader([\"autograde\", \"ps1\", \"--db\", db])\n assert os.path.exists(join(course_dir, \"autograded\", \"foo\", \"ps1\", \"p1.ipynb\"))\n assert os.path.exists(join(course_dir, \"autograded\", \"bar\", \"ps1\", \"p1.ipynb\"))\n assert os.path.exists(join(course_dir, \"autograded\", \"baz\", \"ps1\", \"p1.ipynb\"))\n\n # make sure hidden tests are placed back in autograded\n sub_nb = join(course_dir, \"autograded\", \"foo\", \"ps1\", \"p1.ipynb\")\n with io.open(sub_nb, mode='r', encoding='utf-8') as nb:\n source = nb.read()\n assert \"BEGIN HIDDEN TESTS\" in source\n sub_nb = join(course_dir, \"autograded\", \"bar\", \"ps1\", \"p1.ipynb\")\n with io.open(sub_nb, mode='r', encoding='utf-8') as nb:\n source = nb.read()\n assert \"BEGIN HIDDEN TESTS\" in source\n sub_nb = join(course_dir, \"autograded\", \"baz\", \"ps1\", \"p1.ipynb\")\n with io.open(sub_nb, mode='r', encoding='utf-8') as nb:\n source = nb.read()\n assert \"BEGIN HIDDEN TESTS\" in source\n\n # make sure autograded for foo does not validate, should fail on visible and hidden tests\n output = run_nbgrader([\n \"validate\", join(course_dir, \"autograded\", \"foo\", \"ps1\", \"p1.ipynb\"),\n ], stdout=True)\n assert output.splitlines()[0] == (\n \"VALIDATION FAILED ON 1 CELL(S)! If you submit your assignment \"\n \"as it is, you WILL NOT\"\n )\n # make sure autograded for bar does not, should fail on hidden tests\n output = run_nbgrader([\n \"validate\", join(course_dir, \"autograded\", \"bar\", \"ps1\", \"p1.ipynb\"),\n ], stdout=True)\n assert output.splitlines()[0] == (\n \"VALIDATION FAILED ON 1 CELL(S)! If you submit your assignment \"\n \"as it is, you WILL NOT\"\n )\n # make sure autograded for bar validates, should succeed on hidden tests\n output = run_nbgrader([\n \"validate\", join(course_dir, \"autograded\", \"baz\", \"ps1\", \"p1.ipynb\"),\n ], stdout=True)\n assert output.strip() == \"Success! Your notebook passes all the tests.\"\n\n with Gradebook(db) as gb:\n submission = gb.find_submission(\"ps1\", \"foo\")\n nb1 = submission.notebooks[0]\n assert nb1.score == 0\n submission = gb.find_submission(\"ps1\", \"bar\")\n nb1 = submission.notebooks[0]\n assert nb1.score == 0\n submission = gb.find_submission(\"ps1\", \"baz\")\n nb1 = submission.notebooks[0]\n assert nb1.score == 1", "title": "" }, { "docid": "f5484e6c3a0cb4e933fc56f47e4bb76e", "score": "0.45074752", "text": "async def test_properties_v2(event_loop, v2_server):\n async with v2_server:\n async with aiohttp.ClientSession(loop=event_loop) as websession:\n api = await API.login_via_credentials(TEST_EMAIL, TEST_PASSWORD, websession)\n systems = await api.get_systems()\n system = systems[TEST_SYSTEM_ID]\n\n keypad = system.sensors[\"195\"]\n assert keypad.data == 0\n assert not keypad.error\n assert not keypad.low_battery\n assert keypad.settings == 1\n\n # Ensure that attempting to access the triggered of anything but\n # an entry sensor in a V2 system throws an error:\n with pytest.raises(SimplipyError):\n assert keypad.triggered == 42\n\n entry_sensor = system.sensors[\"609\"]\n assert entry_sensor.data == 130\n assert not entry_sensor.error\n assert not entry_sensor.low_battery\n assert entry_sensor.settings == 1\n assert not entry_sensor.trigger_instantly\n assert not entry_sensor.triggered", "title": "" }, { "docid": "9b6dca19da4942f625c769bd1ca80be7", "score": "0.44941473", "text": "def test2():\n # Step one checkdpendencies\n # this will fetch and build all dependencies\n if not TaskingBenchmarks.check_dependencies():\n print(\"Dependency check failed\")\n return\n # Step2: Fetch the specified repo\n if not TaskingBenchmarks.fetch():\n print(\"Unable to fetch\")\n return\n if not TaskingBenchmarks.build():\n print(\"Unable to build\")", "title": "" }, { "docid": "fdd1e23d4903c77c51006da06b93c630", "score": "0.4490546", "text": "def skip_other_tests():\n import unittest\n from unittest import TextTestRunner as _TextTestRunner\n\n class CustomTestRunner(_TextTestRunner):\n def run(self, test):\n if test._tests:\n for t1 in test._tests:\n for t2 in t1._tests:\n if t2._testMethodName == self._special_name:\n return _TextTestRunner.run(self, t2)\n raise RuntimeError(\"couldn't isolate test\")\n\n def outer(fun, *args, **kwargs):\n # monkey patch unittest module\n unittest.TextTestRunner = CustomTestRunner\n if hasattr(unittest, 'runner'): # unittest2\n unittest.runner.TextTestRunner = CustomTestRunner\n CustomTestRunner._special_name = fun.__name__\n\n def inner(self):\n return fun(self, *args, **kwargs)\n return inner\n\n return outer", "title": "" }, { "docid": "a1bea1358896deb1a8bd0323381dfacb", "score": "0.44868478", "text": "def test_game_over_2(self):\n self.assertMain(\"main_game_over_2.in\", \"main_game_over_2.out\", stop_early=False)", "title": "" }, { "docid": "99bf04e63a7ba7dd4cff9f53341c77af", "score": "0.4483516", "text": "def test(args: Dict[str, Any], unknown: Optional[List[str]]) -> None:\n estimator = _get_estimator(args, unknown)\n estimator.test(summary=args['summary'], eager=args['eager'])", "title": "" }, { "docid": "a4316e3670458cf1b76f743f7ddbf8a4", "score": "0.44782817", "text": "def test_func(self):\n return True", "title": "" }, { "docid": "c7956a90cfc940154713321a85b67278", "score": "0.4477657", "text": "def test_1(self):\n pass", "title": "" }, { "docid": "c7956a90cfc940154713321a85b67278", "score": "0.4477657", "text": "def test_1(self):\n pass", "title": "" }, { "docid": "c7956a90cfc940154713321a85b67278", "score": "0.4477657", "text": "def test_1(self):\n pass", "title": "" }, { "docid": "642ead6ae647a310d017f7f50640361f", "score": "0.44763717", "text": "def skip_on_fail(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n instance = args[0]\n try:\n return func(*args, **kwargs)\n except Exception as e:\n log_message(instance, \"Skipping Test : %s\" % repr(e))\n instance.skipTest(e)\n\n return wrapper", "title": "" }, { "docid": "885686a329fb41ec60ccc94d5af04516", "score": "0.4473928", "text": "def test_2_tensorflow_mnist(self):\n classifier, sess = get_image_classifier_tf()\n self._test_attack(classifier, self.x_test_mnist, self.y_test_mnist, False)", "title": "" }, { "docid": "6171beb8e8ed3ca7b469475db1a38bfb", "score": "0.44624028", "text": "def test_version():\n with patch.object(firewalld, \"__firewall_cmd\", return_value=2):\n assert firewalld.version() == 2", "title": "" }, { "docid": "76fb681d55fc2efeac2dac6cd310874e", "score": "0.44618356", "text": "def test_meta_block_fail_exit_session(self, pytester: pytest.Pytester, adaptavist_mock: AdaptavistMock):\n pytester.makepyfile(\n \"\"\"\n import pytest\n\n def test_TEST_T123(meta_block):\n with meta_block(1) as mb_1:\n mb_1.check(False, action_on_fail=mb_1.Action.FAIL_EXIT_SESSION)\n mb_1.check(False, message_on_fail=\"THIS SHOULD NOT BE DISPLAYED\")\n with meta_block(2) as mb_2:\n mb_2.check(True)\n\n def test_TEST_T124(meta_block):\n with meta_block(1) as mb_1:\n mb_1.check(True)\n \"\"\"\n )\n pytester.runpytest(\"--adaptavist\")\n _, _, etss = adaptavist_mock\n assert etss.call_count == 1\n assert etss.call_args.kwargs[\"status\"] == \"Fail\"", "title": "" }, { "docid": "083774f46d48b811349db4e3dd659d13", "score": "0.4460597", "text": "def test_scenario():\r\n pass", "title": "" }, { "docid": "83b0ed59ba7db0d3a0c31921d84b781f", "score": "0.44539884", "text": "def test_profile_training_data(self, model_version):\n pd = pytest.importorskip(\"pandas\")\n np = pytest.importorskip(\"numpy\")\n\n cont_col = np.random.random(100)\n discrete_col = np.random.choice(5, 100)\n string_discrete_col = np.random.choice([\"a\", \"b\", \"c\", \"d\", \"e\"], size=100)\n string_freeform_col = [uuid.uuid4().hex.upper()[0:10] for _ in range(100)]\n other_col = [datetime.datetime.now() for x in range(100)]\n output_col = np.random.choice(2, 100)\n\n col_names = [\n \"Continuous_Numeric\",\n \"Discrete_Numeric\",\n \"Discrete_String\",\n \"Freeform_String\",\n \"Other\",\n \"Output_Col\",\n ]\n supported_col_names = [\"Continuous_Numeric\", \"Discrete_Numeric\", \"Output_Col\"]\n\n # create dataframes\n df = pd.DataFrame(\n list(\n zip(\n cont_col,\n discrete_col,\n string_discrete_col,\n string_freeform_col,\n other_col,\n output_col,\n )\n ),\n columns=col_names,\n )\n\n # log to model version with new method\n model_version.log_training_data_profile(\n df.loc[:, df.columns != \"Output_Col\"],\n pd.DataFrame(df[\"Output_Col\"]),\n )\n\n # get back attributes to validate\n attributes = model_version.get_attributes()\n key = _deployable_entity._FEATURE_DATA_ATTR_PREFIX + \"{}\"\n discrete_col_missing_summary = _utils.json_to_proto(\n model_version.get_attribute(key.format(\"2\")),\n FeatureDataInModelVersion, # missing value\n )\n discrete_col_distribution_summary = _utils.json_to_proto(\n model_version.get_attribute(key.format(\"3\")),\n FeatureDataInModelVersion, # missing value\n )\n\n # missing value, distribution summary for each supported column +\n # equal number of attributes for visualization\n assert len(attributes.keys()) == len(supported_col_names) * 2 * 2\n assert (\n discrete_col_distribution_summary.summary_type_name\n == \"verta.discreteHistogram.v1\"\n )\n assert (\n discrete_col_distribution_summary.profiler_name == \"BinaryHistogramProfiler\"\n )\n assert (\n len(\n json.loads(discrete_col_distribution_summary.content)[\n \"discreteHistogram\"\n ][\"buckets\"]\n )\n <= 5\n )\n\n assert (\n discrete_col_missing_summary.summary_type_name\n == \"verta.discreteHistogram.v1\"\n )\n assert discrete_col_missing_summary.profiler_name == \"MissingValuesProfiler\"\n assert (\n len(\n json.loads(discrete_col_missing_summary.content)[\"discreteHistogram\"][\n \"buckets\"\n ]\n )\n == 2\n )\n\n # reference distribution attributes can be fetched back as histograms\n for col in supported_col_names:\n key = _deployable_entity._TRAINING_DATA_ATTR_PREFIX + col + \"Distribution\"\n histogram = model_version.get_attribute(key)\n assert isinstance(histogram, _verta_data_type._VertaDataType)", "title": "" }, { "docid": "9f09cc56038c8f7eb4d4cf2b127ec5af", "score": "0.44474134", "text": "def check_admin_access_if_oss_fuzz(func):\n\n @functools.wraps(func)\n def wrapper(self):\n \"\"\"Wrapper.\"\"\"\n if utils.is_oss_fuzz():\n return check_admin_access(func)(self)\n\n return func(self)\n\n return wrapper", "title": "" }, { "docid": "8154041f6136e6ecfa4235eb81bab8bf", "score": "0.4443883", "text": "def test_user1_allowed_by_policy2(self):\n perm_cache = perm.PermissionCache(self.env, 'user2')\n self.assertIn('ACTION_2', perm_cache)\n self.assertEqual(1, self.ps.policies[0].call_count)\n self.assertEqual(1, self.ps.policies[1].call_count)\n self.assertEqual([\n ('policy1', 'ACTION_2', None),\n ('policy2', 'ACTION_2', True),\n ], self.decisions)", "title": "" }, { "docid": "41d0e5a516ab7ff4c7a472142a9c4a7b", "score": "0.4443878", "text": "def test_doc_example2_md(testfile_creator, testfile_tester, testfile_checker):\n testfile = testfile_creator(\"doc/example2.md\", skips=[\"Python 3.7\", \"LAST\"])\n result = testfile_tester(\n contents=testfile, pytest_options=[\"-v\", \"--doctest-modules\"]\n )\n nofail_noerror_nowarn(result)\n testfile_checker(\"doc/test_example2.py\", testfile)", "title": "" }, { "docid": "53fd2fb48439b0138236795795b17b28", "score": "0.44356787", "text": "def test__before_after__parametrize__many_functions(testdir):\n testdir.makepyfile(\"\"\"\n import pytest\n data = {p1: {p2: {\"before\": [0]*3, \"after\": [0]*3} for p2 in [7, 8]} for p1 in [1, 2]}\n data2 = {p1: {p2: {\"before\": [0]*3, \"after\": [0]*3} for p2 in [7, 8]} for p1 in [1, 2]}\n \n def teardown_module():\n assert data == {p1: {p2: {\"before\": [3, 2, 1], \"after\": [1, 2, 3]} for p2 in [7, 8]} for p1 in [1, 2]}\n assert data2 == {p1: {p2: {\"before\": [3, 2, 1], \"after\": [1, 2, 3]} for p2 in [7, 8]} for p1 in [1, 2]}\n\n @pytest.mark.parametrize(\"param1\", [1, 2])\n @pytest.mark.deployment_test\n @pytest.mark.parametrize(\"param2\", [7, 8])\n async def test_parametrize(param1, param2, deploy_status):\n data[param1][param2][deploy_status.status][0] += 1\n yield\n data[param1][param2][deploy_status.status][1] += 1\n yield\n data[param1][param2][deploy_status.status][2] += 1\n \n @pytest.mark.parametrize(\"param1\", [1, 2])\n @pytest.mark.deployment_test\n @pytest.mark.parametrize(\"param2\", [7, 8])\n async def test_parametrize2(param1, param2, deploy_status):\n data2[param1][param2][deploy_status.status][0] += 1\n yield\n data2[param1][param2][deploy_status.status][1] += 1\n yield\n data2[param1][param2][deploy_status.status][2] += 1\n \"\"\")\n result = testdir.runpytest('--deployment_test=echo 1')\n result.assert_outcomes(failed=0, error=0, passed=65, skipped=0)", "title": "" }, { "docid": "0a31b6417cdfa7ea254ed810bede0803", "score": "0.44356555", "text": "def test_hacking_support_python2():\n code = dedent(\"\"\"\\\n import ConfigParser\n import re\n import StringIO\n\n import thirdparty\n \"\"\")\n\n assert format_(code, formatter=hacking) == code", "title": "" }, { "docid": "35419cc9ac33e6f93de1c50dadac33b2", "score": "0.44351637", "text": "def callDeprecated(self, version, f, *args, **kwargs):\n result = f(*args, **kwargs)\n warningsShown = self.flushWarnings([self.callDeprecated])\n try:\n info = list(version)\n except TypeError:\n since = version\n replacement = None\n else:\n [since, replacement] = info\n\n if len(warningsShown) == 0:\n self.fail('%r is not deprecated.' % (f,))\n\n observedWarning = warningsShown[0]['message']\n expectedWarning = getDeprecationWarningString(\n f, since, replacement=replacement)\n self.assertEqual(expectedWarning, observedWarning)\n\n return result", "title": "" }, { "docid": "f263d4050f97a892a4bfa609d3e81dbd", "score": "0.44349003", "text": "def test_RestrictingNodeTransformer__visit_FunctionDef__2():\n result = compile_restricted_exec(\"def foo(_bad=1): pass\")\n assert result.errors == (functiondef_err_msg,)", "title": "" }, { "docid": "069f4afe28a06a33fd553efe3af27f0f", "score": "0.442796", "text": "def is_even(arg):\n if arg % 2 == 1:\n print(\"not even\")\n else:\n print(\"even\")", "title": "" }, { "docid": "f3acd0e2a125fbe7673f3b4c677e9e2d", "score": "0.4423457", "text": "def test_catch_violation(test_app):\n pass", "title": "" }, { "docid": "75b2a9fac3c57511880383b4012f005e", "score": "0.44215012", "text": "def test(self, *args, **kwargs):\n pass", "title": "" } ]
8815029ce05be0962e676d03a39cb105
get GS states behind and other states at front
[ { "docid": "f0fd1d1e4410d79516a30bb815aeb5d6", "score": "0.0", "text": "def ordered_states(state_list):\n labels = [s.label for s in state_list]\n indices = np.argsort(labels)\n state_list = np.array(state_list)[indices].tolist()\n\n ordered_list = []\n for state in state_list:\n if state.label == _GS_.label:\n ordered_list.append(state)\n else:\n ordered_list.insert(0, state)\n\n return tuple(ordered_list)", "title": "" } ]
[ { "docid": "8bb41154d12cc8e694ed65d92ee342b4", "score": "0.67085534", "text": "def states(self):", "title": "" }, { "docid": "c78ef67cf415cc2aeb46b1bde676b289", "score": "0.6554295", "text": "def getStates(self):\r\n return self.grid", "title": "" }, { "docid": "0463ff4107e45caa9895204945a627e5", "score": "0.6337517", "text": "def getGhostStartStates(self):\n util.raiseNotDefined()", "title": "" }, { "docid": "0463ff4107e45caa9895204945a627e5", "score": "0.6337517", "text": "def getGhostStartStates(self):\n util.raiseNotDefined()", "title": "" }, { "docid": "1eb158c83d6666f09ed5bd7645eb8088", "score": "0.63149667", "text": "def get_all_states():\n return get_running_states() + get_stopped_states()", "title": "" }, { "docid": "117e9c780d8fa4c1347ff05d79865beb", "score": "0.6154688", "text": "def get_states(self):\n return filter(\n lambda x: self.grid[x[0]][x[1]] != 'x',\n [(i, j) for i in range(self.height) for j in range(self.width)])", "title": "" }, { "docid": "b60da40e763516bf772eff3fe91f939f", "score": "0.6111885", "text": "def getStates(self):\n return self.states", "title": "" }, { "docid": "d114cbf595987696d50e5e38b2358131", "score": "0.6043684", "text": "def getstate(self):\n return self.mt[:], self.index", "title": "" }, { "docid": "d114cbf595987696d50e5e38b2358131", "score": "0.6043684", "text": "def getstate(self):\n return self.mt[:], self.index", "title": "" }, { "docid": "1b4bf3b0e91233f90983c9433aac3d46", "score": "0.60252845", "text": "def get_states(self):\n return self._states[:,0], self._states[:,1], self._states[:,2]", "title": "" }, { "docid": "df62b77c25327e3b8f3e4bc52a9796e4", "score": "0.5957939", "text": "def get_state(self) -> None:", "title": "" }, { "docid": "32ab105b06f43f2dfa9359c9f148cad9", "score": "0.5854525", "text": "def get_states(cls) -> List[Tuple[str, str]]:\n pass", "title": "" }, { "docid": "199cb400b25c0f3fd19b30ab4faae96c", "score": "0.57822436", "text": "def getState(self):", "title": "" }, { "docid": "0b3b68b0436071b0b96c94748abfe18c", "score": "0.5780284", "text": "def get_states(self):\n for model_name in self.model_names:\n if model_name == self._object_name:\n data = self.get_model_state(\n model_name, \"world\") # gazebo service client\n return np.array([\n data.pose.position.x,\n data.pose.position.y,\n data.pose.position.z,\n data.twist.linear.x,\n data.twist.linear.y,\n data.twist.linear.z\n ])", "title": "" }, { "docid": "626c258bbae1d31ae69bcde9ce658aa9", "score": "0.5776954", "text": "def states(self):\n raise NotImplementedError", "title": "" }, { "docid": "20dfd44155ea96aece2940b0d262088d", "score": "0.57434046", "text": "def get_crossover(self):\n self.crossSwitch.get_state()\n return self.crossSwitch.state", "title": "" }, { "docid": "6c56f3e996431266d4421b0ea6b15fb4", "score": "0.57357645", "text": "def states(self):\n return self.nodes()", "title": "" }, { "docid": "a755879344a9bf0ffc7e14bafd8a1f25", "score": "0.57251656", "text": "def states(self):\n return self._g.nodes", "title": "" }, { "docid": "b57103f7ecf8766e045810593cb52fa3", "score": "0.5711294", "text": "def states(self):\n return self._stateList", "title": "" }, { "docid": "b57103f7ecf8766e045810593cb52fa3", "score": "0.5711294", "text": "def states(self):\n return self._stateList", "title": "" }, { "docid": "041530e8c17e7bfd896f9a1940b99c6a", "score": "0.5693339", "text": "def states(self):\n return list(self.graph.states())", "title": "" }, { "docid": "ce716cb0f204b1d61bb294a1e246fb57", "score": "0.5690933", "text": "def get_states(self):\n return list(self.__transitions.keys())", "title": "" }, { "docid": "ac06a4310d7da6e89ff64986956e77b2", "score": "0.5670228", "text": "def _get_states(rev, act):\n if rev:\n initial_state = 'products'\n final_state = 'reactants'\n else:\n initial_state = 'reactants'\n final_state = 'products'\n # Overwrites the final state if necessary\n if act:\n final_state = 'transition state'\n return initial_state, final_state", "title": "" }, { "docid": "5ac2818ad3f06c961b3f45ab39874e9a", "score": "0.56573683", "text": "def backward_reachable(self, state):\n ancestors = nx.ancestors(self, state)\n return ancestors", "title": "" }, { "docid": "09ee3a6f17c3d03631b5dddeb0aa00c9", "score": "0.5645659", "text": "def find_next_states(self):\n\n next_states = []\n next_states.append(self.move_up())\n next_states.append(self.move_down())\n next_states.append(self.move_left())\n next_states.append(self.move_right())\n\n next_states = list(filter(None, next_states))\n\n return next_states", "title": "" }, { "docid": "66f2041d70f18d63454a972cc4102ef4", "score": "0.56241643", "text": "def getStates(self) -> List[int]:\n return list(set(itertools.chain.from_iterable(map(lambda tr: list(tr[1].values()) + [tr[0]],\n self.transitions.items()))))", "title": "" }, { "docid": "71e6abcebb62dfc83f9359aa6795ff29", "score": "0.5610207", "text": "def get_state(self):\n return {\"powers\": self._lifx.get_power_all_lights(),\n \"colors\": self._lifx.get_color_all_lights()}", "title": "" }, { "docid": "843cba809821dbe82c4cfdec79e0b5be", "score": "0.55871373", "text": "def get_states(self):\n if self.use_image_input is True:\n states = self.mask_fn(self.get_raw_img())\n states = np.array(states, dtype='uint8')\n else:\n states = np.array([self.agent.x,\n self.agent.y]) \n if self.use_discrete_state is True:\n states = np.floor(states)\n states = states.astype(int)\n return states", "title": "" }, { "docid": "0cbd33a3dbfcf8159700008bb383dbea", "score": "0.5576081", "text": "def __get_states(self):\n return [getattr(self.me, i) for i in dir(self.me) if i.startswith('MODE_')]", "title": "" }, { "docid": "f3e3ba6fcf0877d475b6c3c213d882c2", "score": "0.5570573", "text": "def getEventStates(self):\n return self.eventStateConversions", "title": "" }, { "docid": "b24967450be7eb246a3770372e5863be", "score": "0.55640405", "text": "def get_state(self):\n return np.concatenate([self.screen_history[hist] for hist in range(0, self.history)], axis=2)", "title": "" }, { "docid": "897167df41453f191dfe32d35cc4dbd6", "score": "0.55342996", "text": "def S(self):\r\n result = []\r\n nz = np.transpose( np.nonzero(self._map == 1) )\r\n for ind in nz:\r\n result.append( GWState( ind ) )\r\n return result", "title": "" }, { "docid": "d15c5763d4e186764189df63d7be1555", "score": "0.5526194", "text": "def getSharkState(self):\n sharkX = 10\n sharkY = 10\n sharkZ = -15\n sharkTheta = 0\n\n self.sharkXList += [sharkX]\n self.sharkYList += [sharkY]\n self.sharkZList += [sharkZ]\n\n return (sharkX, sharkY, sharkTheta)", "title": "" }, { "docid": "a4c57505cc9ad8bb6c42e02038dd2510", "score": "0.55120903", "text": "def getBoardState( self ):\n\t\treturn( ( self.baseBoard, self.lineBoard ) )", "title": "" }, { "docid": "9473aab9e70030e9ce1531aa58fb57e3", "score": "0.5509339", "text": "def get_state(obj):", "title": "" }, { "docid": "526221c50e68628a59782d3365497483", "score": "0.550275", "text": "def get_top(self):\n if self.Off:\n return self.Off\n elif self.Modify:\n return self.Modify\n else:\n return self.C", "title": "" }, { "docid": "46dee2b7096bdc6d29e03182a66f3ae6", "score": "0.54995865", "text": "def get_states_space(self):\n logging.DEBUG('states space: %s' % self.states_space)\n return self.states_space", "title": "" }, { "docid": "6fb3e869a3b7c76246e90c7c6c954100", "score": "0.5490537", "text": "def state(self):", "title": "" }, { "docid": "795e63c67637aeafb2c6fb229231c4ef", "score": "0.5489766", "text": "def getCurrentState(self):\n return (self.pi.input(self.forwardPin), self.pi.input(self.backwardPin))", "title": "" }, { "docid": "4e14a3b8f1e6f3baedd25ba4df5941d3", "score": "0.54765767", "text": "def render_backfaces(self):\n return bool(self.view.gxview.get_3d_group_flags(self.number) & 0b1)", "title": "" }, { "docid": "f1da7eb3db8b530665ab6886c5ba63ab", "score": "0.54675025", "text": "def states(self):\n return [(x, y) for x in range(0, self.max_cars_ + 1)\n for y in range(0, self.max_cars_ + 1)]", "title": "" }, { "docid": "6909fec7d4ccadaa7ae0620e9b43b379", "score": "0.5465265", "text": "def get_state(self, visual_perception=[0,0]):\n\n #state related to step counter\n internal_state = list(np.argwhere((self.state_space[0] - self.agent_state) <= 0)[-1])\n \n return internal_state + visual_perception", "title": "" }, { "docid": "d58f80f08a6352873259109eaf6f166d", "score": "0.54634565", "text": "def get_states(self):\n return np.array(self.states)", "title": "" }, { "docid": "13f0c6b8aec1e17acf78f601ffb31e6d", "score": "0.54610103", "text": "def states(self):\n return Tuple(*[arg[0] for arg in self.args])", "title": "" }, { "docid": "b5f91ea77a5b87a903b9a81b66167581", "score": "0.5458116", "text": "def getTurtleState(myTurtle):\n\n return (myTurtle.pencolor(), myTurtle.position(), myTurtle.heading())", "title": "" }, { "docid": "15452b10da419217d9ffaaf6ab6ce748", "score": "0.5448365", "text": "def getstate(self):\n return GPBase.getstate(self) + [self.Z,\n self.num_inducing,\n self.has_uncertain_inputs,\n self.X_variance]", "title": "" }, { "docid": "8889b77107c2d5b989bb0c8f4bdbfdc7", "score": "0.5438635", "text": "def get_state_space(self):\n state_list = []\n for dealer_hand in range(1,11):\n for hard_sum in range(1,22):\n state_list.append((dealer_hand, hard_sum, 0))\n state_list.append((dealer_hand, hard_sum, 1))\n state_list+=[lose_state, win_state, draw_state]\n return state_list", "title": "" }, { "docid": "e610433d5eede914516f446868932480", "score": "0.54343665", "text": "def get_state(obj, seen=None):", "title": "" }, { "docid": "903fbb34077d8a402d1bc49691c74a54", "score": "0.54273677", "text": "def get_states(self):\n states = self._get_states_from_api()\n return states", "title": "" }, { "docid": "2bbf527380094f80073fc58d664bfdc8", "score": "0.5424451", "text": "def getGameState(self):\n ### Student code goes here\n states = [[-1 for i in range(3)] for j in range(3)] \n \n at_str ='fact: '\n at_str += '(at ?tile ?posx ?posy)'\n bindings = self.kb.kb_ask(parse_input(at_str))\n \n for b in bindings:\n i1 = int(b['?posy'][3])-1\n i2 = int(b['?posx'][3])-1\n states[i1][i2] = int(b['?tile'][4])\n \n result = []\n for state in states:\n result.append(tuple(state))\n return tuple(result)", "title": "" }, { "docid": "db87627f5921bda656a9c805a87011bf", "score": "0.5420594", "text": "def get_victory_states(self):\n victory_states = [\n # rows\n self.cells[0:3],\n self.cells[3:6],\n self.cells[6:9],\n\n # cols\n self.cells[0::3],\n self.cells[1::3],\n self.cells[2::3],\n\n # diags\n self.cells[0::4],\n [self.cells[2], self.cells[4], self.cells[8]]\n ]\n return victory_states", "title": "" }, { "docid": "a288f181179a374796b5e4083b661a5d", "score": "0.54089415", "text": "def get_state(self):\n vertices = [x for x in self.vertices]\n v_labels = {k:v for k, v in self.v_labels.items()}\n edges = [x for x in self.edges]\n return vertices, v_labels, edges", "title": "" }, { "docid": "18d15d5fd4115a2d899857fb28611102", "score": "0.5405459", "text": "def get_states(self):\n states = ['AZ', 'CA', 'NY', 'NJ']\n return states", "title": "" }, { "docid": "367004c4acb6336273982f7b9e1c64c8", "score": "0.54026186", "text": "def GetVisitedColour(self):", "title": "" }, { "docid": "ca3d812ff27a82c908453739bc63c758", "score": "0.5402451", "text": "def __get_action_states(self, state):\n a_s = []\n for i in range(len(self.actions)):\n inc = self.neighbors[i]\n a = self.actions[i]\n nei_s = (state[0] + inc[0], state[1] + inc[1])\n if nei_s[0] >= 0 and nei_s[0] < self.height and nei_s[1] >= 0 and nei_s[\n 1] < self.width and self.grid[nei_s[0]][nei_s[1]] != 'x':\n a_s.append((a, nei_s))\n return a_s", "title": "" }, { "docid": "d0ef760d40493f30c4adbc755cfb41dd", "score": "0.5401083", "text": "def get_state(self) -> Dict:\n return self.game_state", "title": "" }, { "docid": "b3a20dc406b525e2edd2d98d19e9a533", "score": "0.53995454", "text": "def fBellStates(self):\n return {tuple(es.targets): es.fBellState for es in self.edges_specs}", "title": "" }, { "docid": "d5ebafc656e5bc9506657a4cbab1b58e", "score": "0.5398597", "text": "def getState():\n return state.getState(frame)", "title": "" }, { "docid": "419fbc5b28dea8fc36aedf43dc3f5702", "score": "0.5393231", "text": "def accessible_states(self):\n return set(i for i in self.prob)", "title": "" }, { "docid": "0a4603dfbe7631345a1d6ff859d75dbb", "score": "0.5381723", "text": "def ghostDirectionSuccessorStateAxioms(t, ghost_num, blocked_west_positions, blocked_east_positions):\n # iterate over \n # blocked east and blocked west disjoin \n pos_str = ghost_pos_str+str(ghost_num)\n east_str = ghost_east_str+str(ghost_num)\n lst = []\n lst3 = []\n for (x, y) in blocked_east_positions:\n lst.append(logic.PropSymbolExpr(pos_str, x, y, t))\n east = ~(logic.disjoin(lst))\n east = logic.PropSymbolExpr(east_str, t-1) & east\n lst2 = []\n for (x, y) in blocked_west_positions:\n lst2.append(logic.PropSymbolExpr(pos_str, x, y, t)) \n west = logic.disjoin(lst2)\n west = ~(logic.PropSymbolExpr(east_str, t-1)) & west\n return logic.PropSymbolExpr(east_str, t) % (east | west)", "title": "" }, { "docid": "dc665c7ed4b7eb677bd66a0931bdb3c6", "score": "0.5370432", "text": "def get_top_transition(self, transitions):", "title": "" }, { "docid": "cbb104d90650afe0ba88bbb707511411", "score": "0.53702575", "text": "def get_back(self):\n return self.__back", "title": "" }, { "docid": "b0f2d9d21182e170887351eeeb33938a", "score": "0.53696734", "text": "def get_state_keys(self):\r\n return []", "title": "" }, { "docid": "3fda34991ca8ab745e0e559c404fe543", "score": "0.53669214", "text": "def _backtrack( self, gamestate, stepsLeft ):\n modifier = (stepsLeft + 1) ** 4\n boards = gamestate[0]\n height = gamestate[1]\n turns = gamestate[2]\n bc = gamestate[3]\n currentp = turns & 1\n\n if self._canWin( boards[0] ): # 1. Check for win player 0\n ret = [modifier, 0]\n self._statedic[bc] = ret\n return ret\n\n if self._canWin( boards[1] ): # 2. Check for win player 1\n ret = [0, modifier]\n self._statedic[bc] = ret\n return ret\n\n if stepsLeft == 0: # 3. Recursion Anker\n self._statedic[bc] = [0, 0]\n return [0, 0]\n\n ret = [0, 0] # 4. Substates\n for col in range( 0, self._WIDTH ):\n if height[col] < self._maxHeight[col]:\n newboards = boards[:]\n newboards[currentp] = boards[currentp] ^ ( 1 << height[col] )\n newheight = height[:]\n newheight[col] += 1\n # No repeated function call needed if it is in dic already\n newbc = newboards[(turns + 1) & 1] + newboards[0] + newboards[1]\n if newbc in self._statedic:\n p1, p2 = self._statedic[newbc]\n else:\n newstate = ( newboards, newheight, turns + 1, newbc, )\n p1, p2 = self._backtrack( newstate, stepsLeft - 1 )\n ret[0] += p1\n ret[1] += p2\n\n self._statedic[bc] = ret\n return ret", "title": "" }, { "docid": "1d2ab444a403ae97ba644bf6de6c7da5", "score": "0.53641164", "text": "def state(self) -> Sequence[HostState]:\n return [h.state for h in self.hosts]", "title": "" }, { "docid": "9d9ff240049bbdd859b5710b0f6c4c33", "score": "0.53550065", "text": "def get_state(self) -> List[float]:\n raise NotImplementedError(\"Abstract method\")", "title": "" }, { "docid": "efff2db9e44149c6024b96ade8072875", "score": "0.53537565", "text": "def enumerateStates(self):\n states = []\n for i in range(self.width):\n for j in range(self.height):\n for k in range(self.depth):\n states.append((i, j, k))\n return states", "title": "" }, { "docid": "fcae298a30bea8e86b9d7fc8bcf10308", "score": "0.53488094", "text": "def enumerateStates(self):\n pass", "title": "" }, { "docid": "655312c0a76adeb058daa49c34c141c2", "score": "0.53405964", "text": "def get_state(self) -> GameState:\n pass", "title": "" }, { "docid": "255f4c2d50b504010c245536d74fcc77", "score": "0.5340384", "text": "def top(self):\n if self.tEvt['sType'] == \"init\":\n self.stateStart(self.Red)\n return 0\n else:\n return 0", "title": "" }, { "docid": "44fc540716132e9fd80f54de17a05645", "score": "0.53373474", "text": "def _get_current_state(self):\r\n\t\tcurrent_state = PRODUCTION.STATES.none\r\n\t\tfor production in self.get_productions():\r\n\t\t\tstate = production.get_animating_state()\r\n\t\t\tif state is not None and current_state < state:\r\n\t\t\t\tcurrent_state = state\r\n\t\treturn current_state", "title": "" }, { "docid": "d4881cfd3c13948ae160e723b297e47e", "score": "0.53339624", "text": "def read_state(froeling):\n return froeling.send_command(CMD_KESSELZUSTAND_ABFRAGEN)", "title": "" }, { "docid": "4b0212c86c6c657fe305361edfe3f3fb", "score": "0.533216", "text": "def get_state():\n state = {\n \"queue\": SOCO.get_queue(0, 1000),\n \"current_track_info\": SOCO.get_current_track_info(),\n }\n return state", "title": "" }, { "docid": "0283444d99ec3bb11b97f9a1b82b06bf", "score": "0.53286946", "text": "def actionsF_8p(state):\n ret_value = []\n r, c = findBlank_8p(state)\n if c != 0:\n ret_value.append((\"left\", 1))\n if c != 2:\n ret_value.append((\"right\", 1))\n if r != 0:\n ret_value.append((\"up\", 1))\n if r != 2:\n ret_value.append((\"down\", 1))\n return ret_value", "title": "" }, { "docid": "c7fa38c0bc5328c7f7ac20a09160b9b6", "score": "0.531447", "text": "def get_state(self):\n return self.game_state", "title": "" }, { "docid": "8276131f8590da718f21533fe6ee7819", "score": "0.531429", "text": "def back(self):\n return (self.direction+2)%4", "title": "" }, { "docid": "9fdf996e19ac51c5b595f5921634102b", "score": "0.5306996", "text": "def active_ofp_port_state(self,states):\r\n active = []\r\n for (state,bit) in of.ofp_port_state_rev_map.items():\r\n if states & bit:\r\n active.append(state)\r\n return active", "title": "" }, { "docid": "192d7e5f2ff0765ef5818d705ac99d17", "score": "0.530566", "text": "def get_state(self, model: Model, sfc_index: int):\n state = []\n node_len = len(model.topo.nodes)\n\n # first part: topo state\n # 1. node state\n max_v = 0\n for node in model.topo.nodes(data=True):\n if node[1]['computing_resource'] > max_v:\n max_v = node[1]['computing_resource']\n max_f = 0\n for node in model.topo.nodes(data=True):\n if node[1]['fail_rate'] > max_f:\n max_f = node[1]['fail_rate']\n for node in model.topo.nodes(data=True):\n state.append(node[1]['fail_rate'] / max_f)\n state.append((node[1]['computing_resource'] - node[1]['active'])/ max_v)\n if node[1]['reserved'] == float('-inf'):\n state.append(0)\n else:\n state.append(node[1]['reserved'] / max_v)\n\n # 2. edge state\n max_e = 0\n for edge in model.topo.edges(data=True):\n if edge[2]['bandwidth'] > max_e:\n max_e = edge[2]['bandwidth']\n max_l = 0\n for edge in model.topo.edges(data=True):\n if edge[2]['latency'] > max_l:\n max_l = edge[2]['latency']\n for edge in model.topo.edges(data=True):\n state.append(edge[2]['latency'] / max_l)\n state.append((edge[2]['bandwidth'] - edge[2]['active']) / max_e)\n if edge[2]['reserved'] == float('-inf'):\n state.append(0)\n else:\n state.append(edge[2]['reserved'] / max_e)\n\n # the sfcs located in this time slot state\n sfc = model.sfc_list[sfc_index] if sfc_index < len(model.sfc_list) else model.sfc_list[sfc_index - 1]\n state.append(sfc.computing_resource / max_v)\n state.append(sfc.tp / max_e)\n state.append(sfc.latency / max_l)\n state.append(sfc.update_tp / max_e)\n state.append(sfc.process_latency / max_l)\n state.append(sfc.s)\n state.append(sfc.d)\n return state, False\n\n #second part\n #current sfc hasn't been deployed\n # if sfc_index == len(model.sfc_list) - 1 or model.sfc_list[sfc_index].state == State.Undeployed:\n # sfc = model.sfc_list[sfc_index]\n # state.append(sfc.computing_resource)\n # state.append(sfc.tp)\n # state.append(sfc.latency)\n # state.append(sfc.update_tp)\n # state.append(sfc.process_latency)\n # state.append(sfc.s)\n # state.append(sfc.d)\n #\n # #current sfc has been deployed\n # elif model.sfc_list[sfc_index].state == State.Normal or model.sfc_list[sfc_index].state == State.Failed:\n # sfc = model.sfc_list[sfc_index + 1]\n # state.append(sfc.computing_resource)\n # state.append(sfc.tp)\n # state.append(sfc.latency)\n # state.append(sfc.update_tp)\n # state.append(sfc.process_latency)\n # state.append(sfc.s)\n # state.append(sfc.d)", "title": "" }, { "docid": "815557844c6bcb550524de127656580c", "score": "0.53013295", "text": "def getstate(self):\n return self.VERSION, super().getstate(), self.gauss_next", "title": "" }, { "docid": "19f6d849f64a832f94b052cf6de724b9", "score": "0.5298703", "text": "def _titles_states():\n titles_states = cache.get(\"titles_states\")\n if not titles_states:\n titles = []\n states = []\n # create a temp Set _states to hold states before compiling full list\n _states = set()\n for title in models.Title.objects.filter(has_issues=True).select_related():\n short_name = title.name.split(\":\")[0] # remove subtitle\n title_name = \"%s (%s)\" % (short_name,\n title.place_of_publication)\n titles.append((title.lccn, title_name))\n for p in title.places.all():\n _states.add(p.state)\n _states = [s for s in _states if s is not None]\n for state in _states:\n states.append((state, state))\n states = sorted(states)\n cache.set(\"titles_states\", (titles, states))\n else:\n titles, states = titles_states\n return (titles, states)", "title": "" }, { "docid": "2f4bab4dbc39d9c0ac3ac2f3ca959bde", "score": "0.52968234", "text": "def enumerateStates(self):\n states = []\n for i in range(self.width):\n for j in range(self.height):\n states.append((i, j))\n return states", "title": "" }, { "docid": "4165ecf97268685c590b6fee36923b57", "score": "0.5289068", "text": "def peekStyle(self):\n if not self.gState: # It's empty, answer None\n return None\n return self.gState[-1]", "title": "" }, { "docid": "cfeb1ebc636f81a5a6d25a6f1120e68d", "score": "0.5288051", "text": "def get_state(self):\n pass", "title": "" }, { "docid": "835782a8025671305b50db91e53d1103", "score": "0.5284202", "text": "def states(self) -> np.ndarray:\n return self.state", "title": "" }, { "docid": "814212fe0b09a1ec2d88428f5589afaa", "score": "0.52804744", "text": "def getstate(self):\n return getattr(self, '_state_', {})", "title": "" }, { "docid": "6e1c7035e25bdda70685a8c14c663ebb", "score": "0.5278259", "text": "def query_all_states(self):\n return self.state_dict.copy()", "title": "" }, { "docid": "a18343dfae8cc6f3f1a5006419aa91ee", "score": "0.5263659", "text": "def prev_state(self):\n record_size = len(self._prev_states)\n if record_size > 1:\n output = self._prev_states[record_size - 2]\n else:\n output = 'Card'\n return output", "title": "" }, { "docid": "a53f173778c3e4ba9d4c0ca5217f58bf", "score": "0.5257512", "text": "def game_state(self):\n return", "title": "" }, { "docid": "985338efa60924713e450571287257fb", "score": "0.5251592", "text": "def GetPrev(self):", "title": "" }, { "docid": "d86ee74b12a9c41107f65f504ef2632b", "score": "0.5245429", "text": "def get_states(self):\n return self.xmldoc.selectNodes(self.xpath_workflow_states())", "title": "" }, { "docid": "0bf43c0423734017ec79ab55b56fc1ea", "score": "0.5238171", "text": "def DetermineState(self):\n\t\tpass", "title": "" }, { "docid": "f22d788da20f7c4a9213740db7ac7129", "score": "0.5237069", "text": "def _get_state(self):\n return self.__state", "title": "" }, { "docid": "f22d788da20f7c4a9213740db7ac7129", "score": "0.5237069", "text": "def _get_state(self):\n return self.__state", "title": "" }, { "docid": "f22d788da20f7c4a9213740db7ac7129", "score": "0.5237069", "text": "def _get_state(self):\n return self.__state", "title": "" }, { "docid": "f22d788da20f7c4a9213740db7ac7129", "score": "0.5237069", "text": "def _get_state(self):\n return self.__state", "title": "" }, { "docid": "f22d788da20f7c4a9213740db7ac7129", "score": "0.5237069", "text": "def _get_state(self):\n return self.__state", "title": "" }, { "docid": "f22d788da20f7c4a9213740db7ac7129", "score": "0.5237069", "text": "def _get_state(self):\n return self.__state", "title": "" }, { "docid": "f22d788da20f7c4a9213740db7ac7129", "score": "0.5237069", "text": "def _get_state(self):\n return self.__state", "title": "" }, { "docid": "f22d788da20f7c4a9213740db7ac7129", "score": "0.5237069", "text": "def _get_state(self):\n return self.__state", "title": "" }, { "docid": "f22d788da20f7c4a9213740db7ac7129", "score": "0.5237069", "text": "def _get_state(self):\n return self.__state", "title": "" }, { "docid": "f22d788da20f7c4a9213740db7ac7129", "score": "0.5237069", "text": "def _get_state(self):\n return self.__state", "title": "" } ]
59c71601c11f442754b4945bc8de09ad
Switch the coordinate base of a wiggle file
[ { "docid": "2e3f63eb028b90fcdfc784afd6c1d18a", "score": "0.554798", "text": "def switch_base(opts):\n functional.switch_base(opts.i, opts.o, from_base=1-opts.to, to_base=opts.to)", "title": "" } ]
[ { "docid": "ab04d55cfd56dced47f1aa034242f7b2", "score": "0.57458204", "text": "def SetBase(self, base):", "title": "" }, { "docid": "d81d1619137b8199755643751174e59e", "score": "0.5698028", "text": "def setImageBase(self, base: ghidra.program.model.address.Address) -> None:\n ...", "title": "" }, { "docid": "7bc998c456c3d199e067c87530d066fa", "score": "0.5691871", "text": "def geometric(self, base):\n self._geo_base = base", "title": "" }, { "docid": "be64fd5d454af663995d06ae99250026", "score": "0.5601534", "text": "def raw_to_xy(self, start_loc):\r\n return start_loc % 8, start_loc // 8", "title": "" }, { "docid": "f60f4106c99851e69f71cf1b12c990f1", "score": "0.5571574", "text": "def _base(self):\n return self._pix['Base']", "title": "" }, { "docid": "02432f73f8173f597fc196f08d2aff1a", "score": "0.55301404", "text": "def set_base(self,basex=1.0,basey=1.0,**kwargs):\n\t\ttry:\n\t\t\tbasex = float(basex)\n\t\t\tbasey = float(basey)\n\t\t\tk = self._LAYER_OBJECTS[kwargs['ind']]\n\t\t\tk.set_base(basex,basey)\n\t\texcept:\n\t\t\tprint('kaplot: set_base error. basex or basey must be a valid float.')", "title": "" }, { "docid": "3c0652094ea088fa77b0208679c4bdb5", "score": "0.54455256", "text": "def change_starting_map(gamefile, map_number):\n offset_in_rom = STARTING_MAP_NUMBER_LOCATION[gamefile] + file_location[gamefile]\n new_map_bytes = str(map_number).encode()\n with open(DEST_ROM_PATH, 'rb+') as f:\n f.seek(offset_in_rom)\n f.write(new_map_bytes)", "title": "" }, { "docid": "b70a0478e23e81f99625748a7de95973", "score": "0.5387754", "text": "def set_base(self, new_base):\n\n if type(new_base) != int:\n raise TypeError('new_base must be int (was %s)' %\n str(type(new_base)))\n\n self.base = new_base", "title": "" }, { "docid": "f283306e9ae23841e95f298d54a20e6c", "score": "0.5381497", "text": "def map_path_base(self, x, y, zoom=7):\n if self.basemap:\n if not self.dry_run and not os.path.exists(self.data_dir):\n os.makedirs(self.data_dir)\n if self.stubmap:\n name = 'stubmap'\n else:\n name = 'basemap'\n return os.path.join(self.data_dir, name)\n else:\n output_dir = os.path.join(self.data_dir, str(zoom), str(x))\n if not self.dry_run and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n return os.path.join(output_dir, '%d-%d' % (x, y))", "title": "" }, { "docid": "245bd749c5da3e3fad0203ad8efc231d", "score": "0.535356", "text": "def tweak(self):\n tweak_base(self.x, self.y)", "title": "" }, { "docid": "ea4638e63723ce0761e3d5eba4395293", "score": "0.5352342", "text": "def move_to_bin():\n \n div_steps()\n robot['robot_location'] = 'at_bin'\n print('move_to_bin')", "title": "" }, { "docid": "33d937259aa09cdc7cceed318d9432ce", "score": "0.5328326", "text": "def set_base(self, base: int):\n self.base = base\n return self.base", "title": "" }, { "docid": "5bcfa93668a7b05279660a2446d7c2b5", "score": "0.5326366", "text": "def setBase(self, base):\n self.base = base", "title": "" }, { "docid": "ed1fb2c6e084723955b99778c05584f3", "score": "0.5255352", "text": "def solute_coordinate_file(self):\n pass", "title": "" }, { "docid": "28697c900773dc7e8f6b972bfcf4262b", "score": "0.5235905", "text": "def set_base_xpos(self, pos):\n node = self.worldbody.find(\"./body[@name='base']\")\n node.set(\"pos\", array_to_string(pos - self.bottom_offset))", "title": "" }, { "docid": "8ad7535a0c7137416cf06e67ad0d286a", "score": "0.5155282", "text": "def rounddown(self, x, base):\n\n return int(base * math.floor(x / base))", "title": "" }, { "docid": "f3937b2f7889d645d65047a6f1658b1d", "score": "0.51517916", "text": "def rebin(self, factor):\n old_cdelt = self.get_step()\n\n if self.wcs.wcs.has_cd():\n self.wcs.wcs.cd = self.wcs.wcs.cd * factor\n else:\n self.wcs.wcs.cdelt = self.wcs.wcs.cdelt * factor\n self.wcs.wcs.set()\n cdelt = self.get_step()\n\n crpix = self.wcs.wcs.crpix[0]\n crpix = (crpix * old_cdelt - old_cdelt / 2.0 + cdelt / 2.0) / cdelt\n self.wcs.wcs.crpix[0] = crpix\n self.shape = self.shape // factor\n self.wcs.wcs.set()", "title": "" }, { "docid": "8fba78557e0283265c4130f44a4a1ccb", "score": "0.51236963", "text": "def change_base_note(self, new_base_note):\n if new_base_note is not None:\n if bool(new_base_note & (new_base_note - 1)) or not new_base_note:\n raise Exception(\"base_note can't be {}. \".format(base_note) +\n 'It must be a power of 2.')\n\n self.base_note = new_base_note\n self._bpm = (self.base_note//4) * self.bpm\n self._spb = 60000//self._bpm", "title": "" }, { "docid": "1ca553499cefdcb6297bd482a7825b14", "score": "0.5121909", "text": "def _sample2base(self, sample):\n return dot(self._A, sample)+self._center", "title": "" }, { "docid": "3b11f7d4b888eea2e16c405edead29d2", "score": "0.51117104", "text": "def convert_w_h_cx_cy(base):\n w = base[2] - base[0] + 1\n h = base[3] - base[1] + 1\n cx = base[0] + 0.5 * (w - 1)\n cy = base[1] + 0.5 * (h - 1)\n return w, h, cx, cy", "title": "" }, { "docid": "8930b44718247b689b0c44afa07ed481", "score": "0.509261", "text": "def lift_genome(chrom_bed_file):\n\n chain_file = \"hg16ToHg19.chain\"\n # TODO wrapping the liftover tool \n lifted_file = \"hg19_examples.bed\" \n\n return lifted_file", "title": "" }, { "docid": "c83c06fa77b5ac827a17bcb9c6f96177", "score": "0.50925434", "text": "def OffsetLogicalOrigin(self, xoff, yoff):", "title": "" }, { "docid": "0dc293ce2b3ad8293d859e92ddeaf0bb", "score": "0.50922585", "text": "def rebase(src_base: str, dest_base: str, src_path: str) -> str:\n return os.path.join(dest_base, os.path.relpath(src_path, src_base))", "title": "" }, { "docid": "a560765d5e820e5d8f3a162a4d759f77", "score": "0.50575143", "text": "def _base2oldsample(self, e):\n return dot(self._lastInvA, (e - self._lastCenter))", "title": "" }, { "docid": "22f2472b44855b2daa79cc781741a9e2", "score": "0.5040897", "text": "def update_location_from_file(self,xyfile,indices=[0,999]):\n return", "title": "" }, { "docid": "d077fc8dcb00b0b22019265fb5d50fd2", "score": "0.5038482", "text": "def set_base_xpos(self, pos):\n node = self.worldbody.find(\"./body[@name='base_footprint']\")\n node.set(\"pos\", array_to_string(pos - self.bottom_offset))", "title": "" }, { "docid": "ea04811c11efa155e168c117511e26d7", "score": "0.5035331", "text": "def input_coordinate_file(self):\n pass", "title": "" }, { "docid": "ea04811c11efa155e168c117511e26d7", "score": "0.5035331", "text": "def input_coordinate_file(self):\n pass", "title": "" }, { "docid": "ea04811c11efa155e168c117511e26d7", "score": "0.5035331", "text": "def input_coordinate_file(self):\n pass", "title": "" }, { "docid": "4892a32cefbc313fba5418573f251966", "score": "0.50179684", "text": "def get_current_base_position(self):\n basis = tf_wrapper.lookup_pose(self._origin, \"base_footprint\")\n t, r = [basis.pose.position.x, basis.pose.position.y, basis.pose.position.z], \\\n [basis.pose.orientation.x, basis.pose.orientation.y, basis.pose.orientation.z,\n basis.pose.orientation.w] # self.get_msg_translation_and_rotation(self._basis, \"map\")\n return self.base_position(t, r)", "title": "" }, { "docid": "3534f7b9539be5ff38831ed52f757e5b", "score": "0.5001732", "text": "def SetBitmapPosition(self, dir):", "title": "" }, { "docid": "de638dc8c4452b37d2deec8ac7715a4d", "score": "0.5000698", "text": "def open_scale_origin(self):\n self.nbr_pixels, self.meters_value, self.scale_factor = self.widget.airport_file.airport.factor\n self.origin_pos = self.widget.airport_file.airport.origin", "title": "" }, { "docid": "12f8e653672908cb0c37baa003d89cf2", "score": "0.49977872", "text": "def opcode9(self):\r\n value = self.getOpValueSingle()\r\n self.relativeBase += value\r\n if self.verboseOutput:\r\n print(f'\\tOpcode 9 adjusting relative base by {value} to {self.relativeBase}')\r\n\r\n self.sp += self.OP_LENGTH_2", "title": "" }, { "docid": "d1bf004b3a5d7b431846ab0a1409d6a6", "score": "0.4996715", "text": "def Base(self, base):\n self._Base = base \n self.column_quality_list = list()\n self.insert_list = list()\n self.target_column = None\n self.strand_list = list() # all the base mapped at the position\n self.mapping_quality_list = list()\n self.init()", "title": "" }, { "docid": "d7b89d5baf09610fbfc059455ad71c26", "score": "0.4978142", "text": "def base():\n return basis.base()", "title": "" }, { "docid": "e7a07abad534022fb70d0b5ed069811b", "score": "0.49582085", "text": "def setLatBase(self, base):\n self.base = numpy.array(base)\n detbase = numalg.det(self.base)\n if abs(detbase) < 1.0e-8:\n emsg = \"base vectors are degenerate\"\n raise LatticeError(emsg)\n elif detbase < 0.0:\n emsg = \"base is not right-handed\"\n raise LatticeError(emsg)\n self._a = a = math.sqrt(numpy.dot(self.base[0,:], self.base[0,:]))\n self._b = b = math.sqrt(numpy.dot(self.base[1,:], self.base[1,:]))\n self._c = c = math.sqrt(numpy.dot(self.base[2,:], self.base[2,:]))\n self._ca = ca = numpy.dot(self.base[1,:], self.base[2,:]) / (b*c)\n self._cb = cb = numpy.dot(self.base[0,:], self.base[2,:]) / (a*c)\n self._cg = cg = numpy.dot(self.base[0,:], self.base[1,:]) / (a*b)\n self._sa = sa = math.sqrt(1.0 - ca**2)\n self._sb = sb = math.sqrt(1.0 - cb**2)\n self._sg = sg = math.sqrt(1.0 - cg**2)\n self._alpha = math.degrees(math.acos(ca))\n self._beta = math.degrees(math.acos(cb))\n self._gamma = math.degrees(math.acos(cg))\n # cache the unit volume value\n Vunit = self.unitvolume\n # reciprocal lattice\n self._ar = ar = sa/(self.a*Vunit)\n self._br = br = sb/(self.b*Vunit)\n self._cr = cr = sg/(self.c*Vunit)\n self._car = car = (cb*cg - ca)/(sb*sg)\n self._cbr = cbr = (ca*cg - cb)/(sa*sg)\n self._cgr = cgr = (ca*cb - cg)/(sa*sb)\n self._sar = sar = math.sqrt(1.0 - car**2)\n self._sbr = sbr = math.sqrt(1.0 - cbr**2)\n self._sgr = sgr = math.sqrt(1.0 - cgr**2)\n self._alphar = math.degrees(math.acos(car))\n self._betar = math.degrees(math.acos(cbr))\n self._gammar = math.degrees(math.acos(cgr))\n # standard orientation of lattice vectors\n self.stdbase = numpy.array([\n [ 1.0/ar, -cgr/sgr/ar, cb*a ],\n [ 0.0, b*sa, b*ca ],\n [ 0.0, 0.0, c ]],\n dtype=float)\n # calculate unit cell rotation matrix, base = stdbase*baserot\n self.baserot = numpy.dot(numalg.inv(self.stdbase), self.base)\n self.recbase = numalg.inv(self.base)\n # bases normalized to unit reciprocal vectors\n self.normbase = self.base * [[ar], [br], [cr]]\n self.recnormbase = self.recbase / [ar, br, cr]\n # update metrics tensor\n self.metrics = numpy.array([\n [ a*a, a*b*cg, a*c*cb ],\n [ b*a*cg, b*b, b*c*ca ],\n [ c*a*cb, c*b*ca, c*c ]],\n dtype=float)\n return", "title": "" }, { "docid": "902ec3f42cdf17b362b8615fe27074e7", "score": "0.49489677", "text": "def base(self, value):\n self.__base = value", "title": "" }, { "docid": "0560fd0f8a4908523796c5a135c2b06c", "score": "0.49244136", "text": "def receptor_coordinate_file(self):\n pass", "title": "" }, { "docid": "cdb3827f2120e2897685cad1bcee1aee", "score": "0.4922065", "text": "def file_to_range(self, file_index):\n pass", "title": "" }, { "docid": "ac7953c7ca04691a17ffca0bdb139035", "score": "0.49171242", "text": "def get_base_path(wiki_name):\n return path.join(DATA_PATH, wiki_name)", "title": "" }, { "docid": "682b9686028d4bf4710a924288a596b9", "score": "0.49071854", "text": "def move_to_base_name(self, basename):\n for plot in self.plots:\n found = False\n cube = plot.split()\n if (cube[0] == 'total_density' or\n cube[0] == 'spin_density' or\n cube[0] == 'delta_density'):\n found = True\n old_name = cube[0] + '.cube'\n new_name = basename + '.' + old_name\n if cube[0] == 'eigenstate' or cube[0] == 'eigenstate_density':\n found = True\n state = int(cube[1])\n s_state = cube[1]\n for i in [10, 100, 1000, 10000]:\n if state < i:\n s_state = '0' + s_state\n old_name = cube[0] + '_' + s_state + '_spin_1.cube'\n new_name = basename + '.' + old_name\n if found:\n os.system('mv ' + old_name + ' ' + new_name)", "title": "" }, { "docid": "a76b08bfa73cd5c717a0b41c5a4aaa42", "score": "0.48922893", "text": "def input_coordinate_file(self, value):\n pass", "title": "" }, { "docid": "7a40b996ce13961bf4d977a8878a19c4", "score": "0.48889896", "text": "def assign_robot_to_bin(self,robot):\n\n bin_num_x = np.floor((robot.position[0]-self.barriers[0])/self.bin_size)\n bin_num_y = np.floor((robot.position[1]-self.barriers[2])/self.bin_size)\n\n robot.bin_index = np.array((bin_num_x,bin_num_y))\n robot.bin_index = np.clip(robot.bin_index,np.zeros(2),self.bin_layout-1)\n\n robot.bin_index = (int(robot.bin_index[0]),int(robot.bin_index[1]))\n return robot.bin_index", "title": "" }, { "docid": "76b60bbe490f7d620e7f290b0c87498e", "score": "0.48831135", "text": "def populateShpfileDbase(shpfile,zwerts):\n for polygon in range(shpfile.NPolygons):\n pol = shpfile.layer.GetNextFeature()\n luc = shpfile.get_value(pol, \"Kategorie\")\n # land uses code range is from 100 to 900 which translates\n # to 1st or 9th column in allowed thresholds in zwert\n # keep in mind, column indecies start from 0 ... \n luc = (int(luc) / 100) - 1\n for field, contname in zip(shpfile.fields[3:],\n zwerts.contnames):\n values=zwerts.targets_LUT[contname]\n value=values[luc]\n shpfile.set_value(pol, field, value)\n shpfile.layer.ResetReading()", "title": "" }, { "docid": "218d89f2513e53d01a4093aebdb8a4bf", "score": "0.48702544", "text": "def test_projected_to_base(self):\n\n seq = DNA.make_seq(\"AAAAAAAAATTTTTTTTT\", name=\"x\")\n layer_one = seq.add_feature(\"repeat\", \"frog\", [(1, 17)])\n layer_two = layer_one.add_feature(\"repeat\", \"frog\", [(2, 16)])\n got = layer_two._projected_to_base(seq)\n self.assertEqual(got.map.start, 3)\n self.assertEqual(got.map.end, 17)\n self.assertEqual(got.map.parent_length, len(seq))\n\n layer_three = layer_two.add_feature(\"repeat\", \"frog\", [(5, 10)])\n got = layer_three._projected_to_base(seq)\n self.assertEqual(got.map.start, 8)\n self.assertEqual(got.map.end, 13)\n self.assertEqual(got.map.parent_length, len(seq))\n\n layer_four = layer_three.add_feature(\"repeat\", \"frog\", [(0, 4)])\n layer_five = layer_four.add_feature(\"repeat\", \"frog\", [(1, 2)])\n got = layer_five._projected_to_base(seq)\n self.assertEqual(got.map.start, 9)\n self.assertEqual(got.map.end, 10)\n self.assertEqual(got.map.parent_length, len(seq))", "title": "" }, { "docid": "7f787f99cfd7ad5caada12f60a3e5e92", "score": "0.48577508", "text": "def setReferenceBase(self):\n bases = dict()\n bam = pysam.AlignmentFile(NanoSV.opts_bam, 'rb')\n for pileupcolumn in bam.pileup(self.chr,self.pos-1,self.pos, truncate=True):\n for pileupread in pileupcolumn.pileups:\n if not pileupread.is_del and not pileupread.is_refskip:\n if not pileupread.alignment.query_sequence[pileupread.query_position] in bases:\n bases[pileupread.alignment.query_sequence[pileupread.query_position]] = 0\n bases[pileupread.alignment.query_sequence[pileupread.query_position]] += 1\n if (len(bases.keys()) > 0):\n self.ref = sorted(bases, key=bases.__getitem__)[-1]", "title": "" }, { "docid": "966d02761e46f1dbe112d59b078643ce", "score": "0.4855075", "text": "def part1(_filename: str) -> int:\n # Too lazy to parse: These come from input.txt\n target_x = 3075\n target_y = 2981\n return part1(target_x, target_y)", "title": "" }, { "docid": "3843d8ed59310e79096f54829604987c", "score": "0.48522463", "text": "def _change_gaggle_direction(self):\r\n\t\tfor marth in self.marths.sprites():\r\n\t\t\tmarth.rect.x -= self.settings.gaggle_drop_speed\r\n\t\tself.settings.gaggle_direction *= -1", "title": "" }, { "docid": "bd4714124b03c1329d1a5fada0788349", "score": "0.48489535", "text": "def tilecoord(self, filename):\n raise NotImplementedError", "title": "" }, { "docid": "1d59d68765ee3849f366ea1033fa4c4b", "score": "0.48464084", "text": "def _adjust_weight(self, intputX, outputY):", "title": "" }, { "docid": "84e306c58f54f49766b78b93db49b780", "score": "0.4824492", "text": "def rebase(self, other):", "title": "" }, { "docid": "dfd100ba646f9789a8a687a1636b593a", "score": "0.4822244", "text": "def set_base_filename(self, base_filename):\n self.base_filename = base_filename", "title": "" }, { "docid": "37c7b2fe383e893b4186b80d1e24c6f5", "score": "0.4820089", "text": "def makeOffset2D(self):\n ...", "title": "" }, { "docid": "00c5560d4db0f24b6539c9afe3d1b672", "score": "0.48138458", "text": "def simplify_tile(self, tile):\n tileset = self.tilesets[tile['filename']]\n return (tileset.offset +\n tileset.width*tile['y']//self.resolution[1] +\n tile['x']//self.resolution[0])", "title": "" }, { "docid": "b77ef15a1ecd2783a9431ed334bf5857", "score": "0.4808851", "text": "def get_file(pos: int) -> float:\n return pos % 8", "title": "" }, { "docid": "b8c20c318589359ff58d1051d69f6841", "score": "0.4808703", "text": "def make_moving_base_robot(robotname,world):\n\tf = open(moving_base_template_fn,'r')\n\tpattern = ''.join(f.readlines())\n\tf.close()\n\tf2 = open(\"temp.rob\",'w')\n\tf2.write(pattern \n\t\t% (robot_files[robotname],robotname))\n\tf2.close()\n\tworld.loadElement(\"temp.rob\")\n\treturn world.robot(world.numRobots()-1)", "title": "" }, { "docid": "1ed86707c09df48b74eb7457394480b6", "score": "0.47946787", "text": "def baf_set_baseline(base):\n half = 0.5\n delta = half - base\n def baf_shift(x):\n if x <= base:\n return (x/base) * delta\n else:\n return ((1-x)/(1-base)) * delta\n return baf_shift", "title": "" }, { "docid": "d65755f04c512a5e071b7ae0b1d28a61", "score": "0.47808093", "text": "def updateCorner(point1,point2,base,index,M):\n tmp_dist = p2pDistance(point1,point2)\n #print(\"didtance:\",tmp_dist)\n if tmp_dist>base[index]:\n base[index] = tmp_dist\n #print(\"point1:x and y\",point1[0],\" \",point1[1])\n M[0] = point1[0]\n M[1] = point1[1]", "title": "" }, { "docid": "0062e8f6130322112d08bdfa8295c8a5", "score": "0.47687784", "text": "def put_in_minimal_position(self):", "title": "" }, { "docid": "0e4aded2f19adabf60fbbea37225af31", "score": "0.4766209", "text": "def assign_from_image(in_image, in_basefile):\n print('Copying projection from {} to {}'.format(in_basefile, in_image))\n # Open the image file, in update mode\n # so that the image can be edited. \n baseDS = gdal.Open(in_basefile, gdal.GA_ReadOnly)\n outputDS = gdal.Open(in_image, gdal.GA_Update)\n \n if (not baseDS is None) and (not outputDS is None):\n outputDS.SetProjection(baseDS.GetProjection())\n else:\n raise Exception(\"Could not open input / output file\")\n \n baseDS = None\n outputDS = None", "title": "" }, { "docid": "2a3a6023b9610571beadd7081abce1c1", "score": "0.47464126", "text": "def change_base(num, new_base):\n convert_string = '0123456789ABCDEF'\n if num%new_base == num:\n return convert_string[num]\n else:\n return change_base(num//new_base, new_base) + convert_string[num % new_base]", "title": "" }, { "docid": "7dd7355a334943583395dc48c49456ab", "score": "0.47438106", "text": "def base(self, base):\n\n self._base = base", "title": "" }, { "docid": "7dd7355a334943583395dc48c49456ab", "score": "0.47438106", "text": "def base(self, base):\n\n self._base = base", "title": "" }, { "docid": "c2d0491fa6df1f646c1c613e116119db", "score": "0.4741841", "text": "def change_block(self, move):\n fpath = os.path.split(os.path.abspath(self.file))[0] #file directory\n fname = os.path.split(os.path.abspath(self.file))[1] #current file\n pre, _ = fname.split('_B')\n flist = glob.glob(fpath+'/'+pre+'_B*.nwb')\n curr_ind = flist.index(self.file)\n if curr_ind+move == len(flist):\n new_file = flist[0]\n else:\n new_file = flist[curr_ind+move]\n self.open_another_file(filename=new_file)", "title": "" }, { "docid": "7695f2f867a0fb6a99143881874044fd", "score": "0.47293624", "text": "def base_path(self, base_path: str):\n\n self._base_path = base_path", "title": "" }, { "docid": "2b8e432cbb0fe37c7fca39d24816ccbb", "score": "0.47290847", "text": "def base(self):\n return 2", "title": "" }, { "docid": "816e0d994fd48f1264d9c3288a27a540", "score": "0.47285", "text": "def middledendrite(binned_distances_transposed, file_name):\n\n for row in binned_distances_transposed:\n if row[0] == row[1] == row[2]:\n mid_dendrite = 0\n elif row[0] == max(row):\n mid_dendrite = 1\n elif row[1] == max(row):\n mid_dendrite = 2\n elif row[2] == max(row):\n mid_dendrite = 3\n else: \n mid_dendrite = 0\n\n # convert WT values that start with 072 to the right color order\n if file_name[3:6] == '072':\n mid_dendrite = mydict[mid_dendrite] \n\n # convert WT values that start with 072 to the right color order\n if file_name[3:5] == '05' and file_name[7:9] == '18':\n mid_dendrite = mydict[mid_dendrite] \n mid_dendrite_col.append(mid_dendrite)\n\n\n return mid_dendrite_col", "title": "" }, { "docid": "2e12b66f50c9e168660626d6190cfe16", "score": "0.47195327", "text": "def normalized(file, ref_marker, target_marker):\n ref_x,ref_y,ref_z = coord(file,ref_marker)\n target_x, target_y,target_z = coord(file,target_marker)\n norm_x=target_x-ref_x\n norm_y=target_y-ref_y\n norm_z=target_z-ref_z\n \n return norm_x,norm_y,norm_z", "title": "" }, { "docid": "d61a2469e985d39807dc4114558103f8", "score": "0.47152665", "text": "def fetch_starting_at_coord(self,coord):\n #b2 = BAMFile(self.path,blockStart=coord[0],innerStart=coord[1],index_obj=self.index,reference=self._reference)\n b2 = BAMFile(self.path,BAMFile.Options(blockStart=coord[0],innerStart=coord[1],reference=self.reference))\n return b2", "title": "" }, { "docid": "ba7ae4c1b0bf1eb540217a8b6cacaa80", "score": "0.4713344", "text": "def set_mask(base_path):\n scene = bpy.context.scene\n nodes = scene.node_tree.nodes\n links = bpy.context.scene.node_tree.links\n\n map_value_node = bpy.context.scene.node_tree.nodes.new(\"CompositorNodeMapValue\")\n map_value_node.size[0] = 1 / 255\n links.new(nodes[\"Render Layers\"].outputs[\"IndexOB\"], map_value_node.inputs[0])\n\n file_output_node = bpy.context.scene.node_tree.nodes.new('CompositorNodeOutputFile')\n file_output_node.base_path = base_path\n links.new(map_value_node.outputs[0], file_output_node.inputs[0])\n\n return file_output_node", "title": "" }, { "docid": "029056e415b31983d2e0d67df9622660", "score": "0.47014517", "text": "def test_fp_base_setter():\n fp_encoder = FixedPointEncoder()\n fp_encoder.base = 3\n assert fp_encoder.base == 3\n assert fp_encoder.scale == 3 ** fp_encoder.precision", "title": "" }, { "docid": "34a7e6ae090cd3411081920759ab8eaf", "score": "0.47013396", "text": "def imread(idx):\n img_tgt = plt.imread(\"data/landmarks/\"+idx, format='RGB').astype(np.float)\n img_src = plt.imread(\"data/original/\"+idx, format='RGB').astype(np.float)\n return (img_src/127.5)-1, (img_tgt/127.5)-1", "title": "" }, { "docid": "3fadce1c49c0e191577335c1d3a1897b", "score": "0.46895203", "text": "def getImageBase(self) -> long:\n ...", "title": "" }, { "docid": "4c180433baa291a9793a5c70621a0dbc", "score": "0.46757668", "text": "def encode(x,y,nb):\n return 1 + x + y*9 + nb*81", "title": "" }, { "docid": "dfa478cfc49a0d75e5b06f2677cec5e0", "score": "0.46733135", "text": "def convert_to_tilenum(self, x, y, zoom):", "title": "" }, { "docid": "e4db78975e00eea0171f700997a326a3", "score": "0.46701685", "text": "def create_base(self, basename):\n query = f\"\"\"INSERT INTO `bases` (`base_id`, `name`, `latitude`, `longitude`) \n VALUES (NULL, '{basename}', NULL, NULL);\"\"\"\n self.execute_query(query)", "title": "" }, { "docid": "2abb01114c59be22ec00a25fb75641b2", "score": "0.46655333", "text": "def set_binning(self, bin_x, bin_y):\n self.bin_x = bin_x\n self.bin_y = bin_y\n self.set_new_update('new binning, x:{:1d} y:{:1d}'.format(bin_x,\n bin_y))", "title": "" }, { "docid": "1a6e1202f309c689c993e93e63f9bec2", "score": "0.46585587", "text": "def set_base(self, histogram):\n midpoint = np.int(histogram.shape[0] / 2)\n self.leftx_base = np.argmax(histogram[:midpoint])\n self.rightx_base = np.argmax(histogram[midpoint:]) + midpoint", "title": "" }, { "docid": "108c51d99128315f30ab42faf5dad8c0", "score": "0.4656858", "text": "def map_relative(self):\n raise NotImplementedError", "title": "" }, { "docid": "3f95b48c30e024b8443b9d7bf7710d34", "score": "0.46509933", "text": "def INDEX_TO_BASE(self):\n raise NotImplementedError()", "title": "" }, { "docid": "362204079a0262327d2b7447d3ea4b62", "score": "0.46507445", "text": "def lower_half(self, output_file, returnable=False):\n for row in self.infile:\n for column in row:\n column[0] = column[0]/2\n column[1] = column[1]/2\n column[2] = column[2]/2\n\n if returnable:\n return self.infile\n\n Image.fromarray(self.infile, \"RGB\").save(output_file)\n return None", "title": "" }, { "docid": "9258e42ef3649b85e13aac20b1958953", "score": "0.46479756", "text": "def getBasePixel(self, x, y):\n if x < 0 or y < 0 or x >= 128 or y >= 128:\n return 0\n return self.base[y * 128 + x]", "title": "" }, { "docid": "479e260710442f20aa0b5539c4f1c554", "score": "0.46450916", "text": "def offsetimg(fname, off=(3,3)):\n from PIL import ImageChops\n im = Image.open(fname)\n im = ImageChops.offset(im, off[0], off[1])\n im.save(fname)", "title": "" }, { "docid": "f889e7f130c29ea17e90d35b3b6c66af", "score": "0.46448028", "text": "def subregion_orig2(dirin=None, fittemp=None, outn=None, montage=True,correct=True,xy=True,RADEC=False):\n ccorr=correct\n if fittemp==None:\n template = raw_input(\"Ingrese el archivo template : \")\n else:\n template=fittemp\n\n os.system('fits in='+fittemp+' op=xyin out=template2.xy')\n if dirin==None:\n dir1=raw_input(\"Ingrese el directorio de las imagenes : \")\n else:\n dir1=dirin\n input1='0'\n \n os.system('cp '+dir1+'/*.fits tmp1.fits')\n name='tmp1.fits'\n wcs1=ast.astWCS.WCS(template)\n bigmap=[]\n wcs2=ast.astWCS.WCS(name) \n minmax1=wcs1.getImageMinMaxWCSCoords()\n minmax2=wcs2.getImageMinMaxWCSCoords()\n print 'template',minmax1\n print 'archivo', minmax2\n if RADEC==True:\n minra=ast.astCoords.convertCoords('J2000','GALACTIC',minmax2[0],minmax2[2],2000.)\n maxra=ast.astCoords.convertCoords('J2000','GALACTIC',minmax2[1],minmax2[3],2000.)\n minmax2=[minra[0],maxra[0],minra[1],maxra[1]]\n print minmax1, minmax2\n \n bigmap=n.append(bigmap,name)\n print bigmap\n print ccorr, correct\n if montage==True:\n makemosaico(bigmap, 'tempmosaico.fits', clean=True, correct=ccorr)\n if commands.getoutput('ls -1 dirtmp | wc -l')=='1' and montage==False:\n print \"Solo copiando archivo...\"\n os.system('cp '+str(bigmap)+' tempmosaico.fits')\n \n if outn==None:\n outname=raw_input('Ingrese el nombre del archivo de salida : ')\n else:\n outname=outn\n name=outname+'-'+input1\n\n ra1=commands.getoutput('gethd in=template2.xy/crval1 format=degrees')\n dec1=commands.getoutput('gethd in=template2.xy/crval2 format=degrees')\n ra1e,dec1e=ast.astCoords.convertCoords('GALACTIC', 'J2000', float(ra1), float(dec1), 2000.)\n delta1=n.abs(float(commands.getoutput('gethd in=template2.xy/cdelt1 format=degrees')))\n xpix=float(commands.getoutput('gethd in=template2.xy/naxis1'))\n ypix=float(commands.getoutput('gethd in=template2.xy/naxis2'))\n sizey=str(delta1*xpix)\n sizex=str(delta1*ypix)\n if xy==False:\n sizey=str(delta1*ypix)\n sizex=str(delta1*xpix)\n print ra1e, dec1e\n os.system('mSubimage tempmosaico.fits '+name+'.fits '+str(ra1e)+' '+str(dec1e)+' '+sizex+' '+sizey )\n #os.system('mSubimage tempmosaico.fits '+name+'.fits '+str(ra1)+' '+str(dec1)+' '+sizex+' '+sizey )\n print 'Archivo creado : '+name+\".fits\"\n return 0", "title": "" }, { "docid": "a6d3aa87ebd00dc89bbedaad8e2dc146", "score": "0.46433213", "text": "def initialize(self, base_file_name):\r\n self.base_file_name = base_file_name", "title": "" }, { "docid": "0398c5d31fa0a16f02e31b5763f3b7c5", "score": "0.4634871", "text": "def relative_orbit(self):\n if self.mission == \"S1A\":\n return ((self.absolute_orbit - 73) % 175) + 1\n elif self.mission == \"S1B\":\n return ((self.absolute_orbit - 27) % 175) + 1", "title": "" }, { "docid": "13bddd6e1154b3497c00bc775438ddfd", "score": "0.4632912", "text": "def basel_rot(ants_xyz,start_freq,ha,dec,lat):", "title": "" }, { "docid": "6b70a68d24ee3cb7232df7615b3b70b0", "score": "0.46217495", "text": "def dst():", "title": "" }, { "docid": "6b70a68d24ee3cb7232df7615b3b70b0", "score": "0.46217495", "text": "def dst():", "title": "" }, { "docid": "1ebfe5f7daa22ab9d7a21de221cca785", "score": "0.46217427", "text": "def translate(self, offset):\n self.xmin += offset\n self.xmax += offset", "title": "" }, { "docid": "8e29ca31755f17480d64d7825cbaa5ac", "score": "0.46186802", "text": "def getBaseIndex(blendShape, base):\n # Checks\n if not isBlendShape(blendShape):\n raise Exception('Object \"' + blendShape + '\" is not a valid blendShape node!')\n if not hasBase(blendShape, base):\n raise Exception('Obejct \"' + base + '\" is not a base geometry for blendShape \"' + blendShape + '\"!')\n\n # Get Base Index\n baseGeo = glTools.utils.deformer.getAffectedGeometry(blendShape)\n if not baseGeo.has_key(base):\n raise Exception('Unable to determine base index for \"' + base + '\" on blendShape \"' + blendShape + '\"!')\n baseGeo[base]\n\n # Return Result\n return baseGeo[base]", "title": "" }, { "docid": "b887e25facf99c6b7d1add1f9e4f9594", "score": "0.46153826", "text": "def BASE_TO_INDEX(self):\n raise NotImplementedError()", "title": "" }, { "docid": "db7d2bb1d62fff4bc247898eef9a2551", "score": "0.46100596", "text": "def set_baselines(self):\n if self.style.xbaseline:\n self.coords.coords[:, 0] += self.style.xbaseline\n self.coords.verts[:, 0] += self.style.xbaseline \n\n if self.style.ybaseline:\n self.coords.coords[:, 1] += self.style.ybaseline\n self.coords.verts[:, 1] += self.style.ybaseline", "title": "" }, { "docid": "c62517ea14cf88b4baca6a466a1f24d2", "score": "0.46095493", "text": "def output_coordinate_file(self):\n pass", "title": "" }, { "docid": "c62517ea14cf88b4baca6a466a1f24d2", "score": "0.46095493", "text": "def output_coordinate_file(self):\n pass", "title": "" }, { "docid": "320d18d454f986f94a09fcb1625a60cc", "score": "0.4605682", "text": "def change_of_basis(self, new_cell, new_origin=np.array([0., 0., 0.])):\n M = np.dot(self.cell.bg, new_cell.bg)\n P = np.linalg.inv(M)\n new_pos = np.dot(P, self.to_crys())\n return Coord(new_pos, cell=new_cell)", "title": "" }, { "docid": "acd928111eef732c9bd36e08aac435e4", "score": "0.46035394", "text": "def connect_base(current_commit, base_commit):\n current_files = utils.get_file_list(current_commit)\n base_files = utils.get_file_list(base_commit)\n tree = utils.git_mktree(current_files + base_files)\n return utils.git_commit(\n tree, [current_commit, base_commit],\n message=b'Connect history with base %s' % (base_commit.encode('ascii')))", "title": "" }, { "docid": "a80667bb5e53ec5f24e50ec3447cd3b3", "score": "0.4599338", "text": "def basemap_from_file(ifile, withgrid = False, **kwds):\n proj4 = getproj4(ifile, withgrid = withgrid)\n basemap_options = basemap_options_from_proj4(proj4, **kwds)\n if 'llcrnrx' in basemap_options:\n if 'urcrnrx' in kwds:\n basemap_options['urcrnrx'] = kwds['urcrnrx']\n elif 'width' in kwds:\n basemap_options['urcrnrx'] = basemap_options['llcrnrx'] + kwds['width']\n elif 'x' in ifile.variables:\n x = ifile.variables['x']\n urx = x.max() + np.mean(np.diff(x))\n basemap_options['urcrnrx'] = urx\n else:\n raise KeyError('When a false_easting is available, the file must contain an x variable or the user must supply width or urcrnrx')\n if 'llcrnry' in basemap_options:\n if 'urcrnry' in kwds:\n basemap_options['urcrnry'] = kwds['urcrnry']\n elif 'height' in kwds:\n basemap_options['urcrnry'] = basemap_options['llcrnry'] + kwds['height']\n elif 'y' in ifile.variables:\n y = ifile.variables['y']\n ury = y.max() + np.mean(np.diff(y))\n basemap_options['urcrnry'] = ury\n else:\n raise KeyError('When a false_northing is available, the file must contain a y variable or the user must supply height or urcrnry')\n\n from mpl_toolkits.basemap import Basemap\n print(basemap_options)\n bmap = Basemap(**basemap_options)\n return bmap", "title": "" }, { "docid": "b9ca481196edbe913f87f0ae3276b893", "score": "0.45972842", "text": "def getFileBase(self, filename: str, fileInc: int) -> str:\n\n return filename + \"{:02d}\".format(fileInc)", "title": "" }, { "docid": "8ae95d6b526ac29983c0bf5cc54f157d", "score": "0.45970538", "text": "def __init__(self, start_face):\n self.face = start_face\n self.path_dict = {\n \"E\": 0,\n \"S\": 0,\n \"W\": 0,\n \"N\": 0\n }", "title": "" } ]
1133fbd07407924ef118cf63f878dc14
Like get_observations() but handles pagination so you get all the results in one shot.
[ { "docid": "56a7866671c7575d9da7ba58c8febe31", "score": "0.7313944", "text": "def get_all_observations(params: Dict, user_agent: str = None) -> List[Dict[str, Any]]:\n\n # According to the doc: \"The large size of the observations index prevents us from supporting the page parameter\n # when retrieving records from large result sets. If you need to retrieve large numbers of records, use the\n # per_page and id_above or id_below parameters instead.\n\n results = [] # type: List[Dict[str, Any]]\n id_above = 0\n\n while True:\n iteration_params = merge_two_dicts(\n params,\n {\"order_by\": \"id\", \"order\": \"asc\", \"per_page\": PER_PAGE_RESULTS, \"id_above\": id_above,},\n )\n\n page_obs = get_observations(params=iteration_params, user_agent=user_agent)\n results = results + page_obs[\"results\"]\n\n if page_obs[\"total_results\"] <= PER_PAGE_RESULTS:\n return results\n\n sleep(THROTTLING_DELAY)\n id_above = results[-1][\"id\"]", "title": "" } ]
[ { "docid": "da7f164f0dfe32607542dab96d5e45d8", "score": "0.72840744", "text": "def observations(self):\n ro = self._create_basic_request_object('observations_get')\n\n ro.set_owner(self.owner_name)\n ro.set_description('retrieve observations for {}'.format(self._reference_indicator))\n\n for item in self._tc.result_pagination(ro, 'observation'):\n yield parse_observation(item)", "title": "" }, { "docid": "4d0415fb292780f619556091104e09d7", "score": "0.68137586", "text": "def get_observations():\n \n max_results = request.args.get('max_results', 10, type=int)\n page = request.args.get('page', 1, type=int)\n sort_tmp = request.args.get('sort', '_updated', type=str)\n \n sort = {}\n \n if sort_tmp[0] == '-':\n sort['field'] = sort_tmp[1:]\n sort['direction'] = -1\n else:\n sort['field'] = sort_tmp\n sort['direction'] = 1\n \n \n col = app.data.driver.db['observations']\n #db.companies.find().skip(NUMBER_OF_ITEMS * (PAGE_NUMBER - 1)).limit(NUMBER_OF_ITEMS )\n cursor = col.find({'$and': [{'workflow.state': {'$nin': ['closed', 'withdrawn']}}, \\\n {'$or': [{'acl.execute.users': {'$in': [app.globals['user_id']]}}, \\\n {'acl.execute.groups': {'$in': app.globals['acl']['groups']}}, \\\n {'acl.execute.roles': {'$in': app.globals['acl']['roles']}} ] } ] } ).sort(sort['field'], sort['direction'])\n total_items = cursor.count()\n \n _items = list(cursor.skip(max_results * (page - 1)).limit(max_results))\n \n \"\"\"\n #hateos\n _links = {\"self\": {\"title\": \"observations/todo\", \"href\": \"observations/todo?max_results=%i&page=%i\" % (max_results, page), \n \"next\": {},\n \"previous\": {},\n \"last\": {},\n \"first\": {},\n \"parent\": {}}}\n \"\"\" \n _meta = { 'page': page, 'max_results' : max_results, 'total' : total_items} \n result = {'_items' : _items, '_meta': _meta} \n return Response(json.dumps(result, default=json_util.default), mimetype='application/json')", "title": "" }, { "docid": "17698e94b79d1828613b962ba8f06bfa", "score": "0.67402494", "text": "def get_observations(self):\n return self.observations", "title": "" }, { "docid": "7f281678ac1953ea36fe7c9174690a20", "score": "0.66541165", "text": "def current_observations(cls):\n try:\n response = requests.get(cls.API_URL, headers=cls.API_HEADERS, timeout=15)\n response.raise_for_status()\n response.encoding = \"UTF8\"\n return csv.DictReader(\n response.text.splitlines(), delimiter=\";\", quotechar='\"'\n )\n except requests.exceptions.HTTPError:\n _LOGGER.error(\"While fetching data\")", "title": "" }, { "docid": "ad076bfe0e50e8ba908b1bbc9768011d", "score": "0.6569353", "text": "def cli_observations(ctx):\n pass", "title": "" }, { "docid": "dbbe09a2bb298e407cf9643b2bfc43d1", "score": "0.6232293", "text": "def collect_observations(observations_request):\r\n\r\n try:\r\n request = requests.get(url=observations_request) # json object\r\n except requests.HTTPError:\r\n print('HTTP error for the request: ' + str(observations_request))\r\n else:\r\n # make first request\r\n\r\n response_json = request.json()\r\n # collect data\r\n observations = response_json[\"value\"] # collect things from all responses. A list\r\n # print('type of observations', type(observations))\r\n # print(observations)\r\n make_request = True\r\n # retrieve data from all pages\r\n while make_request:\r\n if '@iot.nextLink' in response_json:\r\n request = requests.get(response_json['@iot.nextLink'])\r\n response_json = request.json()\r\n observations = observations + response_json['value']\r\n else:\r\n make_request = False\r\n return observations", "title": "" }, { "docid": "77a02e5cb2a8a277397336f3393862f0", "score": "0.61225486", "text": "def get_observations(self, *args: Any, **kwargs: Any) -> Observations:\n return Observations(self.sensors, *args, **kwargs)", "title": "" }, { "docid": "9d7ff1ed9c0ea03fe9c1d7eb568b7b89", "score": "0.60767525", "text": "def _get_obs(self):\n # TODO\n return observations", "title": "" }, { "docid": "d7ebf94d024624491d1557e78f5fd912", "score": "0.6041073", "text": "def get_obs(self):\n return self.observations", "title": "" }, { "docid": "eb8d42777424b498185338668f26fdd7", "score": "0.59841275", "text": "def get_observations(self, *args: Any, **kwargs: Any) -> Observations:\n return Observations(self.sensors, *args, **kwargs)", "title": "" }, { "docid": "f28fc5f288b003fe7ea29673231c9906", "score": "0.5980575", "text": "def observations(self):\n return self._observations", "title": "" }, { "docid": "0713bfac78af3bbdcd27d3ed9b9aa2c0", "score": "0.5959873", "text": "def scrap_obs(self):", "title": "" }, { "docid": "5c727a184ed392b244cd0ce19acab932", "score": "0.590692", "text": "def dot_observation_range(self):\n if self.single_submit is None:\n case_id = self.patient_casedoc._id\n observations = query_observations(case_id, self.start_date, self.end_date)\n else:\n observations = query_observations_singledoc(self.single_submit)\n\n return sorted(observations, key=lambda x: x['observed_date'])", "title": "" }, { "docid": "a3083883e5121b3a435f5908c25ac5f3", "score": "0.5822204", "text": "def __iter__(self):\n return self._observations.__iter__()", "title": "" }, { "docid": "ac79082e61132a8813933a1542598498", "score": "0.5698318", "text": "def observations(self):\n return self.agent_manager.get_observations()", "title": "" }, { "docid": "84934c12c86384a0242d9b730867938a", "score": "0.5675413", "text": "def collect_table_data():\n first_response = requests.get(\n \"https://www.ebi.ac.uk/ebisearch/ws/rest/embl-covid19/\"\n \"?size=1000&format=JSON&facetcount=11&query=id%3A%5B*%20TO%20*%5D\"\n ).json()\n results = list()\n for i in range(0, first_response['hitCount'], 1000):\n if i == 0:\n for record in first_response['entries']:\n results.append(record)\n else:\n response = requests.get(\n f\"https://www.ebi.ac.uk/ebisearch/ws/rest/embl-covid19/\"\n f\"?start={i}&size=1000&format=JSON&facetcount=11\"\n f\"&query=id%3A%5B*%20TO%20*%5D\").json()\n for record in response['entries']:\n results.append(record)\n return results", "title": "" }, { "docid": "5ae9330eb106be923991f1a64c37c41f", "score": "0.56370836", "text": "def get_observations(latitude, longitude):\n\n try:\n payload = dict()\n payload['latitude'] = latitude\n payload['longitude'] = longitude\n response = call_openwhisk('observations', payload)\n except Exception as e:\n raise APIException('KO', internal_details=str(e))\n\n return response", "title": "" }, { "docid": "e28f3f265381e6b947b14bf0a91f873a", "score": "0.5616497", "text": "def get_data(self):\n url = os.path.join(BASE_SW_API, \"people/\")\n json_response = self._fetch(url).json()\n total_count = json_response[\"count\"]\n all_results = json_response[\"results\"]\n\n # got thought all the page next by next until we get all objects or next_url is None\n while len(all_results) < total_count:\n next_url = json_response[\"next\"]\n if next_url is None:\n break\n json_response = self._fetch(next_url).json()\n all_results += json_response[\"results\"]\n return all_results", "title": "" }, { "docid": "29f837adcb57c49fcb4b62821dcffc70", "score": "0.561592", "text": "def get_allsamples(self):\n\n import web.apps.web_copo.templatetags.html_tags as htags\n\n df = pd.DataFrame()\n\n if self.accession:\n if isinstance(self.accession, str):\n self.accession = self.accession.split(\",\")\n\n object_ids = [ObjectId(x) for x in self.accession if x.strip()]\n records = cursor_to_list(Sample().get_collection_handle().find({\"_id\": {\"$in\": object_ids}}))\n\n if records:\n df = pd.DataFrame(records)\n df['accession'] = df._id.astype(str)\n df['label'] = df['name']\n df['desc'] = df['accession'].apply(lambda x: htags.generate_attributes(\"sample\", x))\n df['description'] = df['desc'].apply(lambda x: self.format_description(x))\n df['server-side'] = True # ...to request callback to server for resolving item description\n elif self.search_term:\n projection = dict(name=1)\n filter_by = dict()\n filter_by[\"name\"] = {'$regex': self.search_term, \"$options\": 'i'}\n\n sort_by = 'name'\n sort_direction = -1\n\n records = Sample(profile_id=self.profile_id).get_all_records_columns(filter_by=filter_by,\n projection=projection,\n sort_by=sort_by,\n sort_direction=sort_direction)\n if not records:\n # try getting all records\n del filter_by['name']\n records = Sample(profile_id=self.profile_id).get_all_records_columns(filter_by=filter_by,\n projection=projection,\n sort_by=sort_by,\n sort_direction=sort_direction)\n\n if records:\n df = pd.DataFrame(records)\n df['accession'] = df._id.astype(str)\n df['label'] = df['name']\n df['description'] = ''\n df['server-side'] = True # ...to request callback to server for resolving item description\n\n result = list()\n\n if not df.empty:\n df = df[['accession', 'label', 'description', 'server-side']]\n result = df.to_dict('records')\n\n return result", "title": "" }, { "docid": "2541fac64bdb547f0ccee103b56ebfda", "score": "0.56017536", "text": "def __next__(self):\n return self._observations.__next__()", "title": "" }, { "docid": "e037cdf107326dfcee7f4f12de325387", "score": "0.56010973", "text": "def get_observations(self, id):\n\n query_string = \"\"\"SELECT L.SUBJECT_ID, L.HADM_ID, L.ITEMID, COUNT(L.ITEMID) as TOTAL_ITEMCOUNT, COUNT(L.FLAG) AS TOTAL_ABNORMAL, \n MAX(DL.LABEL) AS LABEL, MAX(DL.FLUID) AS SPECIMEN, MAX(DL.CATEGORY) AS CATEGORY FROM `green-gasket-256323.mimiciii_fullyautomated.LABEVENTS` AS L \n JOIN `green-gasket-256323.mimiciii_fullyautomated.D_LABITEMS` AS DL ON L.ITEMID = DL.ITEMID WHERE HADM_ID IS NOT NULL AND SUBJECT_ID = {} \n GROUP BY SUBJECT_ID, HADM_ID, ITEMID;\"\"\"\n query_string = query_string.format(id)\n results = self.cl.queryRecords(query_string)\n\n patient_observation = []\n for row in results:\n res = {}\n for i in row.keys():\n if i not in res:\n res[i] = None\n if i in res and res[i] == None:\n res[i] = row[i]\n patient_observation.append(res)\n \n patient_observation_res = []\n\n for res in patient_observation:\n observation_json = {\n \"resourceType\" : \"Observation\",\n # from Resource: id, meta, implicitRules, and language\n # from DomainResource: text, contained, extension, and modifierExtension\n \"identifier\" : None, # Business Identifier for observation\n \"basedOn\" : None, # Fulfills plan, proposal or order\n \"partOf\" : None, # Part of referenced event\n \"status\" : \"registered\", # R! registered | preliminary | final | amended +\n \"category\" : res[\"CATEGORY\"], # Classification of type of observation\n \"code\" : res[\"ITEMID\"], # R! Type of observation (code / type)\n \"subject\" : res[\"SUBJECT_ID\"], # Who and/or what the observation is about\n \"focus\" : res[\"LABEL\"], # What the observation is about, when it is not about the subject of record\n \"encounter\" : res[\"HADM_ID\"], # Healthcare event during which this observation is made\n # effective[x]: Clinically relevant time/time-period for observation. One of these 4:\n \"effectiveDateTime\" : None,\n \"effectivePeriod\" : None,\n \"effectiveTiming\" : None,\n \"effectiveInstant\" : None,\n \"issued\" : None, # Date/Time this version was made available\n \"performer\" : None, # Who is responsible for the observation\n # value[x]: Actual result. One of these 11:\n \"valueQuantity\" : res[\"TOTAL_ITEMCOUNT\"],\n \"valueCodeableConcept\" : None,\n \"valueString\" : None,\n \"valueBoolean\" : None,\n \"valueInteger\" : res[\"TOTAL_ABNORMAL\"],\n \"valueRange\" : None,\n \"valueRatio\" : None,\n \"valueSampledData\" : None,\n \"valueTime\" : None,\n \"valueDateTime\" : None,\n \"valuePeriod\" : None,\n \"dataAbsentReason\" : None, # C? Why the result is missing\n \"interpretation\" : None, # High, low, normal, etc.\n \"note\" : None, # Comments about the observation\n \"bodySite\" : None, # Observed body part\n \"method\" : None, # How it was done\n \"specimen\" : res[\"SPECIMEN\"], # Specimen used for this observation\n \"device\" : None, # (Measurement) Device\n \"referenceRange\" : [{ # Provides guide for interpretation\n \"low\" : None, # C? Low Range, if relevant\n \"high\" : None, # C? High Range, if relevant\n \"type\" : None, # Reference range qualifier\n \"appliesTo\" :None, # Reference range population\n \"age\" : None, # Applicable age range, if relevant\n \"text\" : None # Text based reference range in an observation\n }],\n \"hasMember\" : None, # Related resource that belongs to the Observation group\n \"derivedFrom\" :None, # Related measurements the observation is made from\n \"component\" : [{ # Component results\n \"code\" : res[\"ITEMID\"], # R! Type of component observation (code / type)\n # value[x]: Actual component result. One of these 11:\n \"valueQuantity\" : None,\n \"valueCodeableConcept\" : None,\n \"valueString\" : None,\n \"valueBoolean\" : None,\n \"valueInteger\" : None,\n \"valueRange\" : None,\n \"valueRatio\" : None,\n \"valueSampledData\" : None,\n \"valueTime\" : None,\n \"valueDateTime\" : None,\n \"valuePeriod\" : None,\n \"dataAbsentReason\" : None, # C? Why the component result is missing\n \"interpretation\" : None, # High, low, normal, etc.\n \"referenceRange\" : None # Provides guide for interpretation of component result\n }]\n }\n patient_observation_res.append(observation_json)\n return patient_observation_res", "title": "" }, { "docid": "4591fdd8b7e579c70d27db83c8adc579", "score": "0.55799055", "text": "def get_observations(self, obs_type):\n return self.obs[obs_type]", "title": "" }, { "docid": "df981c12944d4180669c1c0ff7f16863", "score": "0.55657864", "text": "def get_data(self, limit=500):", "title": "" }, { "docid": "1c1e5113182724ff9f9363566b43f757", "score": "0.5517853", "text": "def prepare_observations_request(sensor_api_root, extent, phenomenon, page_size=200):\r\n\r\n if is_valid_wkt_polygon(extent):\r\n observations_request = sensor_api_root + \"/Things?$top=\"+ str(page_size) + \\\r\n \"&$filter=geo.intersects(Things/Locations/location,\" + \\\r\n \"geography'\" + extent + \"')\" + \\\r\n \" and Datastreams/ObservedProperty/name eq '\" + phenomenon + \"'\" + \\\r\n \"&$select=name,@iot.id&$expand=Datastreams([email protected],unitOfMeasurement;$filter=ObservedProperty/name eq '\" + phenomenon + \"';\" + \\\r\n \"$expand=Observations($orderby=phenomenonTime desc;$top=1)),Locations($select=location;$expand=HistoricalLocations($select=time;$orderby=time desc;$top=1))\"\r\n\r\n else:\r\n print('Extent definition is not valid')\r\n return\r\n return observations_request", "title": "" }, { "docid": "099c59bc3c28e07344dbe5bf9210fa55", "score": "0.5509803", "text": "def list_survey_observations(self, survey):\n observations = Observation.objects.filter(survey=survey)\n\n return observations", "title": "" }, { "docid": "166524ab52ff6dbde4c322e9e8acf176", "score": "0.5490312", "text": "def paging_results(self):\n\n raise NotImplementedError()", "title": "" }, { "docid": "1accd376abbb176208d7117b4c7337f9", "score": "0.5487095", "text": "def getObservations(self):\n \n # Sometimes the first station does not have any observations so\n # we have to loop until we get at least the temperature.\n stations = self.getStations()\n for stationId,stationName in stations:\n print(stationName)\n r = requests.get(baseuri + \"stations/%s/observations/latest\" % stationId)\n if r.status_code != 200: \n continue\n j = geojson.loads(r.text)\n #print(geojson.dumps(j,indent=4))\n temperature = None\n conditions = None\n try:\n properties = j[\"properties\"]\n temperature_c = (properties[\"temperature\"])[\"value\"]\n temperature = c_to_f(temperature_c)\n windspeed = ms_to_mph((properties[\"windSpeed\"])[\"value\"])\n description = properties[\"textDescription\"]\n conditions = \"%s, %s, %s.\" % (description, temperature, windspeed)\n self.stationName = stationName\n self.conditions = conditions\n pass\n except KeyError as e:\n print(stationName,\"empty\")\n pass\n if temperature:\n # Windspeed and description would be good but not worth waiting for\n break\n\n return conditions", "title": "" }, { "docid": "2fedef6fcd584d845217d2523ba304c5", "score": "0.548324", "text": "def get_samples(sample_per_client=1):\n page = 1\n total_pages = 1\n results = []\n # Multiple pages of results\n while page <= total_pages:\n params = {\n 'sample': sample_per_client,\n 'sample-group': 'data-center',\n 'page[size]': 1000,\n 'url': '*'\n }\n\n # Build the request to get the sample dois\n payload = urllib.parse.urlencode(params)\n url = API_ENDPOINT + '?%s' % payload\n with urllib.request.urlopen(url) as f:\n data = json.loads(f.read())\n\n # Extract just doi and url\n for work in data['data']:\n result = {\n 'doi': work['attributes']['doi'],\n 'url': work['attributes']['url']\n }\n results.append(result)\n\n total_pages = data['meta']['total-pages']\n\n page += 1\n\n return results", "title": "" }, { "docid": "950ab961078f2f31de0fba268ded310c", "score": "0.5476006", "text": "def get_observations(self, series):\n return series.findall(\".//Obs\")", "title": "" }, { "docid": "6bd3f6c292b09079bd3908b14b0068a2", "score": "0.54727274", "text": "def get_observations(self, track_id: str) -> Dict[str, pymap.Observation]:\n pass", "title": "" }, { "docid": "3f6a1b4e25a266e566f5a766d53f3c11", "score": "0.54560524", "text": "def all():\n url_params = connexion.request.args\n page = url_params.get('page', 0)\n limit = url_params.get('limit', 20)\n return jsonify({\"data\": [c.serialize() for c in models.Interviewer.query.offset(page).limit(limit).all()]})", "title": "" }, { "docid": "dbf8071600025542f8ea7b5649ef3572", "score": "0.54555935", "text": "def _fetch_data(\n self,\n included_mrns: Optional[Set] = None,\n limit: Optional[int] = None\n ):\n data = self._data\n if included_mrns:\n data = data[data.medical_record_number.isin(included_mrns)]\n return data[:limit]", "title": "" }, { "docid": "6ba91e901c8e4f0eb219d99bfe2cf861", "score": "0.5446574", "text": "def get_all(self, start=0, count=-1, filter='', sort='', scope_uris=''):\n return self._helper.get_all(start, count, filter=filter, sort=sort, scope_uris=scope_uris)", "title": "" }, { "docid": "e63be33386b44b4709f9edf4203ec052", "score": "0.5442254", "text": "def _get_data(self, paginate):\n queryset = self._get_queryset()\n\n if paginate and self.pagination:\n queryset = queryset[self.pagination.start:self.pagination.start + self.pagination.count]\n return list(queryset)", "title": "" }, { "docid": "b0773ab7029e01ef0c3c6990aa5e8d4c", "score": "0.54409903", "text": "def get_values(self,\n observations):\n\n return NotImplemented", "title": "" }, { "docid": "b10df6a62c25009f6d4423146e4dff23", "score": "0.54276747", "text": "def get_all_samples():\n response = {}\n for num,doc in enumerate(mongo_db['samples'].find(None,{'_id':0}).limit(5)):\n response[num] = doc\n return response", "title": "" }, { "docid": "5c17394fa4ae674f35973e97dca7ef92", "score": "0.5409065", "text": "def get(endpoint, **parameters):\n page_number = 1\n dataset = pd.DataFrame()\n\n while True:\n page = get_page(endpoint, page_number, **parameters)\n dataset = pd.concat([dataset, page])\n\n if len(page) == 0 or page_number == page.total_pages:\n return dataset\n\n page_number += 1", "title": "" }, { "docid": "b184f30cbba77e12b942916b63f80e5e", "score": "0.54063785", "text": "def get_observations_for_date_interval(self, start_date, end_date):\n\n # TODO: add exception handling when dates do not have records for in data_df\n start_row_id = self.data_df[self.data_df['Timestamp'] == start_date].index.tolist()[0]\n end_row_id = self.data_df[self.data_df['Timestamp'] == end_date].index.tolist()[0]\n\n df = self.data_df[start_row_id:end_row_id]\n\n # filter out production data from the result set to return\n df = df.filter(['Timestamp',\n 'Temperature, C (observation)',\n 'Wind speed, km/h (observation)',\n 'Wind direction, degrees (observation)'],\n axis=1)\n return df", "title": "" }, { "docid": "609283ab9019c266ce710b73b149dbb8", "score": "0.5394708", "text": "def test_get_series_observations(self):\n\n response = self.series_services.get_series_observations(\n series_id='GNPCA'\n )\n self.assertIsNotNone(response)", "title": "" }, { "docid": "1fcc9e92e8a2e3eafe107ca6ba00c477", "score": "0.53858954", "text": "def get_isasamples(self):\n\n import web.apps.web_copo.templatetags.html_tags as htags\n\n df = pd.DataFrame()\n\n if self.accession:\n if isinstance(self.accession, str):\n self.accession = self.accession.split(\",\")\n\n object_ids = [ObjectId(x) for x in self.accession if x.strip()]\n records = cursor_to_list(Sample().get_collection_handle().find({\"_id\": {\"$in\": object_ids}}))\n\n if records:\n df = pd.DataFrame(records)\n df['accession'] = df._id.astype(str)\n df['label'] = df['name']\n df['desc'] = df['accession'].apply(lambda x: htags.generate_attributes(\"sample\", x))\n df['description'] = df['desc'].apply(lambda x: self.format_description(x))\n df['server-side'] = True # ...to request callback to server for resolving item description\n\n elif self.search_term:\n projection = dict(name=1)\n filter_by = dict(sample_type=\"isasample\")\n filter_by[\"name\"] = {'$regex': self.search_term, \"$options\": 'i'}\n\n sort_by = 'name'\n sort_direction = -1\n\n records = Sample(profile_id=self.profile_id).get_all_records_columns(filter_by=filter_by,\n projection=projection,\n sort_by=sort_by,\n sort_direction=sort_direction)\n if not records:\n # try getting all records\n del filter_by['name']\n records = Sample(profile_id=self.profile_id).get_all_records_columns(filter_by=filter_by,\n projection=projection,\n sort_by=sort_by,\n sort_direction=sort_direction)\n if records:\n df = pd.DataFrame(records)\n df['accession'] = df._id.astype(str)\n df['label'] = df['name']\n df['description'] = ''\n df['server-side'] = True # ...to request callback to server for resolving item description\n\n result = list()\n\n if not df.empty:\n df = df[['accession', 'label', 'description', 'server-side']]\n result = df.to_dict('records')\n\n return result", "title": "" }, { "docid": "343b607e0c8567c2b3280916496e7391", "score": "0.5383245", "text": "def get_all(self, start=0, count=-1, filter='', sort='', query='', scope_uris=''):\n return self._helper.get_all(start, count, filter=filter, sort=sort, query=query, scope_uris=scope_uris)", "title": "" }, { "docid": "671644eb0bdc4d971672b84cbf3170e9", "score": "0.5362104", "text": "def test_observations_method(self):\n\n try:\n awx = AerisWeather(app_id=app_id,\n client_id=client_id,\n client_secret=client_secret)\n\n obs_sum_list = awx.observations_summary(location=None,\n action=RequestAction.OBSERVATIONS_SUMMARY.CLOSEST,\n filter_=[RequestFilter.OBSERVATIONS_SUMMARY.ALL_STATIONS],\n sort=None,\n params={ParameterType.OBSERVATIONS_SUMMARY.P: \"54601\"},\n query=None)\n\n for obs_sum in obs_sum_list: # type: ObservationsSummaryResponse\n assert obs_sum.id is not None\n\n loc = obs_sum.loc\n assert loc is not None\n assert type(loc) is AerisLocation\n assert obs_sum.loc.lat > 43\n\n place = obs_sum.place\n assert place is not None\n # assert place.name == \"la crosse\"\n assert place.state == \"wi\"\n\n periods = obs_sum.periods\n assert periods is not None\n\n temp = periods[0].temp\n assert type(temp) is ObservationsSummaryTemp\n assert temp.avgF > -10\n\n profile = obs_sum.profile\n assert profile is not None\n assert profile.elevFT > 600\n\n except URLError as url_err:\n print(\"URL Error: \" + url_err.reason)\n raise url_err\n\n except AerisError as aeris_err:\n print(\"AerisError: \" + str(aeris_err))\n raise aeris_err\n\n except Exception as ex:\n print(ex.args)\n raise ex", "title": "" }, { "docid": "20f040034505a17cce83ee6d9e7b6032", "score": "0.5349634", "text": "def getDocuments(n=100, start=0):\n\n base_url = \"https://cn.dataone.org/cn/v1/query/solr/\"\n fields = \",\".join([\"identifier\"])\n query_params = \"formatType:METADATA+AND+(datasource:*LTER+OR+datasource:*\"\\\n \"KNB+OR+datasource:*PISCO+OR+datasource:*GOA)+AND+-\"\\\n \"obsoletedBy:*\"\n rows = n\n start = start\n\n query_string = \"%s?fl=%s&q=%s&rows=%s&start=%s\" % (base_url,\n fields,\n query_params,\n rows,\n start)\n print(query_string)\n\n xmldoc = getXML(query_string)\n\n return(xmldoc)", "title": "" }, { "docid": "d294f0c318208a341af8fff5b8664d2b", "score": "0.53440946", "text": "def GenerateObservations(self, num_reads):\n n, prevalences = self.suite.SamplePosterior()\n\n names = self.GetNames()\n name_iter = SpeciesGenerator(names, n)\n\n items = zip(name_iter, prevalences)\n\n cdf = thinkbayes2.Cdf(dict(items))\n observations = cdf.Sample(num_reads)\n\n #for ob in observations:\n # print ob\n\n return n, observations", "title": "" }, { "docid": "58e3ab3b733bce7f73f89422e9553975", "score": "0.5340551", "text": "def _return_data(self, limit):\n data = copy.deepcopy(self._data)\n return data[\"results\"][:limit]", "title": "" }, { "docid": "c2b1434737fe1fbfc511249c6478eb59", "score": "0.5339006", "text": "def handle_get_dataset_paginated(project_id, experiment_id, operator_id, pagination_parameters):\n datasets = get_dataset_pagination(project_id=project_id, experiment_id=experiment_id, operator_id=operator_id,\n page=pagination_parameters.page, page_size=pagination_parameters.page_size)\n\n return datasets", "title": "" }, { "docid": "a5c68a16e0759286a582966e19a39011", "score": "0.5330591", "text": "def sample(self, n=10) -> List[dict]:\n return self.client._get_records_sync(\n self.name, params={\"with_meta\": \"yes\", \"count\": n, \"flatten\": \"yes\"}\n )", "title": "" }, { "docid": "919108741325028b2f8e93a45783bcc6", "score": "0.53253525", "text": "def fetch(self):\n self.total_pages = self.get_total_pages()\n for i in range(self.page, self.total_pages + 1):\n for item in self.get_data(i):\n yield item", "title": "" }, { "docid": "95da89fdc984821f24875585bb5c4c30", "score": "0.5324761", "text": "def observations(identifier, **kwargs):\r\n kwargs['series_id'] = identifier\r\n return Fred().series('observations', **kwargs)", "title": "" }, { "docid": "ba98e45cc1bec44da6332e95b6604a40", "score": "0.53190106", "text": "def _fetch_page(self) -> list:\n self.filters['offset'] = self._current_offset\n self._total_count, new_page = self._fetch_analyses_history(\n self._request_url_path, self.filters)\n\n if new_page:\n self._current_page = new_page\n self._update_current_page_metadata()\n return self._current_page", "title": "" }, { "docid": "041232468c18eb1eb3c4b54a26bb9806", "score": "0.52738976", "text": "def sel_observations(self, idx):\n raise NotImplementedError(\"sel_observations must be implemented on a model basis\")", "title": "" }, { "docid": "2f665cbf0cacfd92ad50a1bdbd2f4d3a", "score": "0.52661055", "text": "def _build_observations(self, state: np.array) -> List[np.array]:\n pass", "title": "" }, { "docid": "0eb37953c797fe848d0f078ab7f2e759", "score": "0.5260021", "text": "def process_observations(results):\n\n processed_map = {}\n for result in results:\n\n # Skip results where the publication state is working\n if result.get(\"publicationState\") == \"working\":\n continue\n\n # Skip where the result_field value is None\n if result[\"result_field\"] is None:\n continue\n\n data_path = result[\"result_field\"][\"dataPath\"].rstrip(\"/\")\n\n try:\n processed_map[data_path] = {\n \"title\": result[\"title\"],\n \"url\": f'https://catalogue.ceda.ac.uk/uuid/{result[\"uuid\"]}',\n \"record_type\": \"Dataset\",\n }\n except TypeError:\n continue\n\n return processed_map", "title": "" }, { "docid": "8637426c39daff22ddd89d16a371e719", "score": "0.5257472", "text": "async def latest_observations(\n place: str,\n starttime: Optional[str] = None,\n timestep: int = 10,\n aiohttp_kwargs: Any = None,\n) -> List[Observation]:\n sensor_parameters = \",\".join(OBSERVATION_PARAMS.keys())\n\n params = {\n \"request\": \"getFeature\",\n \"storedquery_id\": \"fmi::observations::weather::simple\",\n \"place\": place,\n \"parameters\": sensor_parameters,\n }\n\n if timestep % 10 != 0:\n raise ValueError(\"timestep must be divisable by 10\")\n\n if timestep != 10:\n params[\"timestep\"] = str(timestep)\n\n if starttime:\n params[\"starttime\"] = starttime\n\n url = WFS_URL + urlencode(params)\n unparsed_gml = await fetch(url, aiohttp_kwargs)\n\n return parse_latest_observations(unparsed_gml)", "title": "" }, { "docid": "25efe8a885d1ee6410fccbd8f095bd7d", "score": "0.5256837", "text": "def get_all(self):\n last_index = self.last_index\n self.last_index = 0\n res = self.get_next_batch(64)\n self.last_index = last_index\n \n return res", "title": "" }, { "docid": "5fb090d12d046c83eb4c123b135a083e", "score": "0.52458596", "text": "def get_results(self, request, term, page, context):\n raise NotImplementedError", "title": "" }, { "docid": "128698f0642ad3836a53456e2dcf4563", "score": "0.5245216", "text": "def observations(identifier, **kwargs):\n kwargs['series_id'] = identifier\n return Fred().series('observations', **kwargs)", "title": "" }, { "docid": "72be29c230633c4d1c3e7c2cea571da8", "score": "0.5220634", "text": "def sel_observations(self, idx):\n raise NotImplementedError(\"sel_observations method must be implemented for each subclass\")", "title": "" }, { "docid": "eca14ccd20c91e174ac3d753bfc83a0f", "score": "0.52206016", "text": "def observations(self):\n observations = []\n for observed_data in self._env.query(Filter('type', '=', 'observed-data')):\n if not self.revoked(observed_data):\n observations.append(observed_data)\n return observations", "title": "" }, { "docid": "5d7f15e6748cadd4b3d63b4998241062", "score": "0.51923966", "text": "def retrieve_reads():\n reads = query_premises.get_reads_for_gas_south_customers(YEAR, MONTH)\n reads = reads.append(query_premises.get_reads_for_gas_south_customers(YEAR, MONTH, year_prior=True))\n # # export as csv\n # reads.to_csv('reads.csv', index=False)\n reads = reads.reset_index(drop=True)\n return reads", "title": "" }, { "docid": "0570caef43e0929d4c2a2bea08c8b340", "score": "0.51902467", "text": "def get_all(self, start=0, count=-1, filter='', sort=''):\n return self._client.get_all(start, count, filter=filter, sort=sort)", "title": "" }, { "docid": "62398f5a08738907fc9d6fd287d43a43", "score": "0.51882225", "text": "def explore_dataset(request):\n \n response = query_individual_dataset('00000')\n\n return JsonResponse(response)", "title": "" }, { "docid": "e631fe5e79ea68fde485e3a644b68e8e", "score": "0.5181932", "text": "def get_all(method, *args, **kwargs):\n res = []\n kwargs['page'] = 1\n\n while True:\n response = method(*args, **kwargs)\n\n if not response:\n break\n\n res.extend(response)\n kwargs['page'] += 1\n\n return res", "title": "" }, { "docid": "335883fcfc21e7b3e1f3142eafbaf6b6", "score": "0.51795", "text": "def get_all(self, start=0, count=-1, filter='', sort=''):\n return self._client.get_all(start=start, count=count, filter=filter, sort=sort)", "title": "" }, { "docid": "17a18204c2c82c3c2ab72415e3521d3a", "score": "0.51743394", "text": "def get_geojson_observations(properties: List[str] = None, **kwargs) -> Dict[str, Any]:\n kwargs[\"mappable\"] = True\n observations = get_all_observations(kwargs)\n return as_geojson_feature_collection(\n (flatten_nested_params(obs) for obs in observations),\n properties=properties if properties is not None else DEFAULT_OBSERVATION_ATTRS,\n )", "title": "" }, { "docid": "b8da1da84b676e50fcffe956c3f2836a", "score": "0.5166958", "text": "def get(self,count=500):\n\n return self.filter_proxies()[:count]", "title": "" }, { "docid": "f81be40393f2c4d6f97b57f2def8730f", "score": "0.5163707", "text": "def fetchall(self):\n out = self._results[:]\n self._results = []\n return out", "title": "" }, { "docid": "7166bea11ea241b40fa515c344b601d8", "score": "0.5155878", "text": "def iter_records(self, **kwargs):\n return self._iter_records(**kwargs)", "title": "" }, { "docid": "78b5ee66b949bed8a6cc69dbe20a5ade", "score": "0.51556313", "text": "def get_objects(self):\n\n query = self.get_basequery()\n\n if getattr(self, 'paginate', True):\n page = self.get_page()\n pagination = query.paginate(page, per_page=self.get_per_page())\n\n return pagination.items, pagination\n\n return query.all(), None", "title": "" }, { "docid": "66bc3d445eb0a3b794e48cb853e10900", "score": "0.51527894", "text": "def feed(self, n=20, page=0):\n _objects = [i for i in self.posts.all()]\n for i in self.campaigns.all():\n _objects.append(i)\n _objects.extend(p for p in i.posts.all())\n _objects.sort(key=lambda o: o.pub_date, reverse=True)\n\n start = page * n\n end = start + n\n # this will never return an `IndexError`\n # slicing a list out-of-bounds returns a truncated slice, or an empty list\n return _objects[start:end]", "title": "" }, { "docid": "5d104086064a452946afa2e5861fc878", "score": "0.5152389", "text": "def observations_count(self):\n\n qs = Observation.objects.all()\n observations = set()\n\n for observation in qs:\n observation: Observation\n\n observations.add(observation.specimen_label)\n\n return observations", "title": "" }, { "docid": "a19d16431e4f32b184273b07c7896b33", "score": "0.5142812", "text": "def download_all(self):\n parser = PageParser(self.query,self.numResults,self.subcorpus,\n self.mode,self.nLeft,self.nRight)\n all_res = parser.extract_results()\n n_results = min(self.numResults,parser.occurrences)\n for i in range(self.per_page,n_results,self.per_page):\n parser.pagenum = i\n all_res += parser.extract_results()\n if len(all_res) > self.numResults:\n all_res = all_res[:self.numResults]\n return all_res", "title": "" }, { "docid": "cd926c005ff344fbee36cfbf72cb4cd0", "score": "0.51314485", "text": "def _fetch_all(self):\n if self._result_cache is None:\n self._result_cache, self._pagination_item = self.model.get(self.request_options)", "title": "" }, { "docid": "3df7e8b0ad4ccc49fabfb096c487f146", "score": "0.51308376", "text": "def get_all_pred_records():\n session = controller.connect_to_database()\n result = session.query(Prediction).all()\n preds = convert_pred_results(result)\n session.close()\n return preds", "title": "" }, { "docid": "acefbb95634855a960aa2b06fe50733a", "score": "0.5130649", "text": "def get_all_records(self):\n pass", "title": "" }, { "docid": "29d0a74cb95cabfb4f5e181ef36696a5", "score": "0.512669", "text": "def test_count_observations(self):\n parser = WebObsResultsParser(RESULTS_HTML)\n observations = parser.get_observations()\n self.assertEqual(len(observations), 1)", "title": "" }, { "docid": "a051f66df8b9d388076b1d5b714825cd", "score": "0.5122111", "text": "def get_all(self, q=None, limit=None):\r\n q = q or []\r\n\r\n if limit and limit < 0:\r\n raise ClientSideError(_(\"Limit must be positive\"))\r\n kwargs = _query_to_kwargs(q, storage.SampleFilter.__init__)\r\n f = storage.SampleFilter(**kwargs)\r\n return map(Sample.from_db_model,\r\n pecan.request.storage_conn.get_samples(f, limit=limit))", "title": "" }, { "docid": "f3240c57005d7f136ea27dfcf65b4f31", "score": "0.5119736", "text": "def get_all(self, start=0, count=-1, filter='', sort='', scope_uris=''):\n result = self._helper.get_all(start=start,\n count=count,\n filter=filter,\n sort=sort,\n scope_uris=scope_uris)\n\n return result", "title": "" }, { "docid": "dc4491b54f9637db755690a2497fce95", "score": "0.5110192", "text": "def observations(self, main_type, sub_type, unique_id, owner=None, params=None):\n params = params or {}\n\n if owner:\n params['owner'] = owner\n\n if not sub_type:\n url = f'/v2/{main_type}/{unique_id}/observations'\n else:\n url = f'/v2/{type}/{sub_type}/{unique_id}/observations'\n\n r = self._get(url, params)\n self.log.debug(f'status code: {r.status_code}')\n self.log.trace(f'url: {r.request.url}')\n return r", "title": "" }, { "docid": "12de52d51ccf25cd11e657bd2dec7111", "score": "0.508577", "text": "def get_socrata_dataset_in_chunks(soda_api_url, limit=10000, order_by=\":id\"):\n offset = 0\n data_set_df = None\n\n while True:\n dataset_url = soda_api_url + \"&$order=\" + order_by + \"&$offset=\" + str(offset) + \"&$limit=\" + str(limit) + \"&$$exclude_system_fields=false\"\n print(dataset_url)\n slice_df = pd.read_json(dataset_url)\n record_count = slice_df[order_by].count()\n if data_set_df is None:\n data_set_df = slice_df\n else:\n data_set_df = data_set_df.append(slice_df)\n\n if record_count < limit:\n break\n else:\n data_set_df\n\n offset += limit\n\n return data_set_df", "title": "" }, { "docid": "84f4dda8f08ed0332444be0e26073db9", "score": "0.5080683", "text": "def data_fetch(self):\n\t\tcurrent_date = date.today().strftime(\"%Y-%m-%d\")\n\t\tmaster_URL = \"\"\"{main_url}&end_at={end_dt_filter}\"\"\".format(main_url = self.URL, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t end_dt_filter = current_date)\n\t\t\n\t\tself.LOGGER.info(\"URL to fetch records --> {master_url}\\n\".format(master_url = master_URL))\n\t\t\n\t\ttry:\n\t\t\tself.LOGGER.info(\"Get request --> hitting the URL to fetch data\")\n\t\t\tresponse = requests.get(url = master_URL)\n\t\t\tself.LOGGER.info(\"Get request ::: Status --> Successful\\n\")\n\t\texcept Exception as e:\n\t\t\tself.LOGGER.info(\"Get request ::: Status --> Failed\")\n\t\t\tself.LOGGER.info(\"Reason ::: {failure_logs}\".format(failure_logs = str(e)))\n\t\t\traise \n\t\t\t\n\t\thistorical_records_json = response.json()\n\t\tself.LOGGER.info(\"Display sample records fetched from URL\\n\")\n\t\tprint(historical_records_json['rates'][list(historical_records_json['rates'].keys())[0]])\n\t\tprint(' ')\n\t\t\n\t\treturn historical_records_json", "title": "" }, { "docid": "35300ecf05e1564b8da63cb06dfd16be", "score": "0.50719196", "text": "def fetch_data(self):\n for t in [HOURLY, DAILY, MONTHLY, YEARLY]:\n self.get_data_per_period(t)", "title": "" }, { "docid": "496a9e23601670a04cf89d520120127d", "score": "0.5069737", "text": "def get_observations(self, input_image):\n output_image_default, observations = self.model.calculate(input_image, suppress=True)\n return output_image_default, self.transform_observations(observations)", "title": "" }, { "docid": "0c1983f50251ae73cfd9d38e66f512a7", "score": "0.5067351", "text": "def dates():\r\n # Query all dates and temperature observations for last year\r\n results = session.query(Measurement.date, Measurement.tobs).\\\r\n filter(Measurement.date.between('2017-01-01', '2017-12-31')).all()\r\n\r\n #Convert query results to dictionary\r\n all_observations = []\r\n for temp in results:\r\n temp_dict = {}\r\n temp_dict[\"date\"] = temp.date\r\n temp_dict[\"tobs\"] = temp.tobs\r\n all_observations.append(temp_dict)\r\n\r\n # Convert list of tuples into normal list\r\n return jsonify(all_observations)", "title": "" }, { "docid": "f2c44252487fa2f59a5d8f3e6a858284", "score": "0.50631493", "text": "def test_get_documents_offset_optional_params(index_with_documents):\n index = index_with_documents()\n response = index.get_documents()\n assert isinstance(response, list)\n assert len(response) == 20\n response_offset_limit = index.get_documents({\n 'limit': 3,\n 'offset': 1,\n 'attributesToRetrieve': 'title'\n })\n assert len(response_offset_limit) == 3\n assert response_offset_limit[0]['title'] == response[1]['title']", "title": "" }, { "docid": "bba4f577965bb164fa7aeb8833b7a741", "score": "0.50628805", "text": "def displayCurrentObservations(self):\n return self.__currentState.availableObservations()", "title": "" }, { "docid": "c51e88e9f78c002e44e6f3a94cce8d32", "score": "0.5060686", "text": "def generate_observations_rough(self, conditions):\n self.reward = self.calc_reward_function(conditions)\n\n # Check if we need to spin the tesselation\n if self.dither & (conditions.night != self.night):\n self._spin_fields()\n self.night = conditions.night.copy()\n\n # Let's find the best N from the fields\n order = np.argsort(self.reward)[::-1]\n # Crop off any NaNs\n order = order[~np.isnan(self.reward[order])]\n\n iter = 0\n while True:\n best_hp = order[iter*self.block_size:(iter+1)*self.block_size]\n best_fields = np.unique(self.hp2fields[best_hp])\n observations = []\n for field in best_fields:\n obs = empty_observation()\n obs['RA'] = self.fields['RA'][field]\n obs['dec'] = self.fields['dec'][field]\n obs['rotSkyPos'] = 0.\n obs['filter'] = self.filtername\n obs['nexp'] = self.nexp\n obs['exptime'] = self.exptime\n obs['field_id'] = -1\n obs['note'] = self.survey_name\n\n observations.append(obs)\n break\n iter += 1\n if len(observations) > 0 or (iter+2)*self.block_size > len(order):\n break\n return observations", "title": "" }, { "docid": "30b47084f734e6563987122c8bef6fc6", "score": "0.50575334", "text": "def get_historical_data():", "title": "" }, { "docid": "f401932db0f0d00b59bb5733a382891f", "score": "0.5055959", "text": "def retrieve_results(self):\n for a in data.get_user_top_albums(self.name,\"overall\"):\n self.all_albums.add((a[\"artist\"],a[\"name\"]))\n self.recent_albums = data.get_user_top_albums(self.name,\"12month\")\n return self._get_results()", "title": "" }, { "docid": "ce93bc6604d366fd82b2fe13be0cc16c", "score": "0.5038485", "text": "def __query_all_data(self, query_func, reset_data=True, **query_params):\n data = []\n\n self.params[\"page\"] = 1\n self.params[\"per_page\"] = 100\n\n while not self.try_query(query_func, **query_params):\n pass\n\n data.extend(self.query_result[\"data\"])\n\n while(self.__update_page_request()):\n while not self.try_query(query_func, **query_params):\n pass\n\n data.extend(self.query_result[\"data\"])\n\n if reset_data:\n self.data = data\n else:\n self.data.extend(data)", "title": "" }, { "docid": "7f1085ec73c83b089b8cb7d723ead536", "score": "0.50355464", "text": "def search(self, debug=True):\n inc=10000\n for price in range(0, 3000000, inc):\n if debug:\n print(\"From \" + str(price) + \" to \" + str(price + inc))\n res = self.__search(1, set_page_count=True, min_price=price, max_price=price + inc);\n if self.results is not None:\n print(self.results.shape)\n self.results = self.results.append(res)\n print(self.results.shape)\n else:\n self.results = res\n\n print(self.page_count)\n for page_number in range(2, self.page_count + 1):\n if debug:\n print(\"Getting page: \" + str(page_number))\n res = self.__search(page_number, min_price=price, max_price=price + inc)\n if self.results is not None:\n print(self.results.shape)\n self.results = self.results.append(res)\n print(self.results.shape)\n else:\n self.results = res\n\n self.results = self.results.drop_duplicates(subset=['id'], keep=False)\n self.results = self.results.reset_index(drop=True)\n self.results = self.results.sort_values(by='price')\n return self.results", "title": "" }, { "docid": "6b7fdb6e934f785dbcc9c3688c119029", "score": "0.502903", "text": "def fetchall(self, api: Airtable, **kwargs) -> GenAny:\n for page in api.get_iter(**kwargs):\n for record in page:\n yield Model.from_airtable(record)", "title": "" }, { "docid": "a244d0003c8d46d8be3844ca44ddccfa", "score": "0.50193834", "text": "def get_datarecords():\n try:\n return record_models.Dataset.objects.all()\n except:\n raise", "title": "" }, { "docid": "6df9be311a6101b53aeadf02ab9b1f37", "score": "0.50192374", "text": "def get_all(self): # noqa\n if self.number_calls is None:\n total_resources = self._get_count(self.url)\n logger.info(f\"there are {total_resources} matching resources for {self.url}\")\n self.number_calls = int(np.ceil(total_resources / PAGE_SIZE))\n\n if self.number_calls == 0:\n return self._get_data([])\n\n if self.parallel_requests:\n urls = []\n for i in range(self.number_calls):\n urls.append(\n (\n self.auth.token,\n f\"{self.url}&_getpagesoffset={i*PAGE_SIZE}&_count={PAGE_SIZE}\",\n )\n )\n\n p = multiprocessing.Pool()\n results = p.starmap(process_function, urls)\n p.close()\n self.pbar.update(self.bar_frac)\n else:\n results = []\n\n next_url = self.url\n while next_url:\n next_url = self._fix_next_url(next_url)\n response = self._get_response(next_url)\n page_results = self._get_data(response.results)\n\n results.append(page_results)\n next_url = response.next_url\n\n self._concat(results)\n\n return self.df", "title": "" }, { "docid": "4032bcc05ae057854fd4f8dc68c4b58b", "score": "0.5015355", "text": "def retrieve_documents(self, assessor, query_id, doc_retrieval_count):\n\n dao = DAO()\n\n if not dao.index_exists():\n dao.create_index()\n existing_doc_count = 0\n else:\n existing_doc_count = dao.get_doc_count(assessor, query_id)\n\n print 'existing count', existing_doc_count\n\n if existing_doc_count < doc_retrieval_count:\n dao.add_additional_docs(existing_doc_count, doc_retrieval_count, assessor, query_id)\n\n assessment_list = dao.retrieve_docs(assessor, query_id, doc_retrieval_count)\n\n doc_list = []\n\n for doc in assessment_list:\n\n doc_index_id = doc.get_index_id()\n doc_query_id = doc.get_query_id()\n doc_assessor_ = doc.get_assessor()\n doc_url = doc.get_document()\n doc_grade = doc.get_grade()\n\n doc_list.append({'index_id': doc_index_id,\n 'query_id': doc_query_id,\n 'assessor': doc_assessor_,\n 'document': doc_url,\n 'grade': doc_grade})\n\n return doc_list", "title": "" }, { "docid": "9c31f5097481405d172d8993e3ff3ed5", "score": "0.50085855", "text": "def _fetchResults(self, limit, context, obj, token=None):\n from google.appengine.ext.ndb import query as ndb_query\n\n query = self.getQuery(context, obj)\n assert isinstance(query, ndb_query.Query)\n\n kwargs = {\n 'produce_cursors': True,\n 'keys_only': self.getKeysOnly(context, obj),\n 'deadline': self.getDeadline(context, obj),\n 'read_policy': self.getReadPolicy(context, obj),\n }\n\n if token:\n kwargs['start_cursor'] = ndb_query.Cursor.from_websafe_string(token)\n\n results, cursor, more = query.fetch_page(limit, **kwargs)\n\n obj[CONTINUATION_MORE_RESULTS_KEY] = more\n obj[self.__NEXT_TOKEN] = more and cursor.to_websafe_string() or None\n return results", "title": "" }, { "docid": "6e99ebab3e00d753f99bd48feffc0af8", "score": "0.5001199", "text": "def get_entire_dataset(self, dataset_id, order_by):\n count = self.get_dataset_count(dataset_id)\n limit = 100000\n dataset = []\n for offset in range(0, count, limit):\n dataset.extend(self.client.get(dataset_id, order=order_by,\n offset=offset, limit=limit))\n return dataset", "title": "" }, { "docid": "5245604d0fd6ed5c7058c342a5c81f15", "score": "0.49992168", "text": "def get_sales():\n response = requests.get(base_url + \"/api/v1/sales?page=1\")\n data = response.json()\n sales_df = pd.DataFrame(data['payload']['sales'])\n \n total_pages = data['payload']['max_page']\n\n for i in range(2, total_pages+1): \n print(f\"Retrieving sales from page {i}\")\n response = requests.get(base_url + f\"/api/v1/sales?page={i}\")\n data = response.json()\n sales_df = pd.concat([sales_df, pd.DataFrame(data['payload']['sales'])])\n \n return sales_df.reset_index()", "title": "" }, { "docid": "8d5893867be77d59ae9da37669831492", "score": "0.49958277", "text": "def fetch_stix_objects_from_api(self, test: bool = False, **kwargs):\n data = []\n\n server = Server(url=self._base_url, auth=TokenAuth(key=self._api_key), verify=self._verify,\n proxies=self._proxies)\n\n for api_root in server.api_roots:\n for collection in api_root.collections:\n for bundle in as_pages(collection.get_objects, per_request=100, **kwargs):\n data.extend(bundle.get('objects'))\n if test:\n return data\n\n self.objects_data[kwargs.get('type')] = data", "title": "" }, { "docid": "dd56f6e661837a2fac6e2227dc68b366", "score": "0.49912074", "text": "def get_data(self):\n\n parameters = {\n \"action\": \"query\",\n \"prop\": \"extracts|info\",\n \"inprop\": \"url\",\n \"explaintext\": True,\n \"exsentences\": 2,\n \"exlimit\": 1,\n \"generator\": \"geosearch\",\n \"ggsradius\": 10000,\n \"ggscoord\": f\"{self.latitude}|{self.longitude}\",\n \"format\": \"json\",\n }\n\n headers = {\"date\": DATE, \"user-agent\": APP_NAME}\n response = requests.get(\n self.api_url, params=parameters, headers=headers, timeout=3\n )\n\n if response.status_code == 200:\n content = response.json()\n if content.get(\"query\", \"\") != \"\":\n if content.get(\"query\").get(\"pages\", \"\") != \"\":\n places = content.get(\"query\").get(\"pages\")\n places_list = []\n\n # select the nearest place\n for place in places:\n # select the smallest index\n places_list.append(\n (\n places.get(place).get(\"index\"),\n places.get(place).get(\"pageid\"),\n )\n )\n\n place_selected = min(places_list)\n pageid_selected = str(place_selected[1])\n\n return {\n \"title\": content.get(\"query\")\n .get(\"pages\")\n .get(pageid_selected)\n .get(\"title\", \"\"),\n \"extract\": content.get(\"query\")\n .get(\"pages\")\n .get(pageid_selected)\n .get(\"extract\", \"\"),\n \"fullurl\": content.get(\"query\")\n .get(\"pages\")\n .get(pageid_selected)\n .get(\"fullurl\", \"\"),\n }\n\n else:\n err = f\"Mediawiki API : '{response.status_code}' error occurred\"\n print(err)", "title": "" } ]
b8a317bbc561ef78b076ac96213c320d
Callback of stop order update.
[ { "docid": "3d97f6c0964c4dd3ffebfefaf4a1e725", "score": "0.7922227", "text": "def on_stop_order(self, stop_order: StopOrder):\n pass", "title": "" } ]
[ { "docid": "434387fcfcd9e863bf8d93d3048974d5", "score": "0.70248485", "text": "def triggerStopOrders(self):\n for soID,so in self.workingLimitOrdersDict.items():\n if so.direction == DIRECTION_LONG:\n triggered = (self.bar.High>=so.price)\n elif so.direction == DIRECTION_SHORT:\n triggered = (self.bar.Low<=so.price)\n else:\n triggered = False\n if triggered:\n #If the stop order is triggered\n so.status = SOSTATUS_TRIGGERED\n so.datetimeTriggered = self.datetime\n #Track the new info\n self.tracker.stopOrderTriggered(so)\n #Judge the new order\n if so.offset == OFFSET_CLOSE:\n #If the offset is close, then we have to make sure wether there's still nececssity to close the postition\n if (self.position_long>0 & so.direction == DIRECTION_SHORT):\n #There is long position in hand\n self.sendOrder(so.price,so.volume,orderType=so.orderType)\n elif (self.position_short>0 & so.direction == DIRECTION_LONG):\n #There is shor position in hand\n self.sendOrder(so.price,so.volume,orderType=so.orderType)\n elif so.offset == OFFSET_OPEN:\n #If the offset is open, then we can just send the new order out withou check the current position.\n self.sendOrder(so.price,so.volume,so.orderType)\n #Delete the triggered stop order from working dictionary\n del self.workingStopOrdersDict[so.soID]", "title": "" }, { "docid": "75f154a17e2e8e76f7f01b8c1f2de93b", "score": "0.6820954", "text": "def cancel_stop_order(self, **kwargs):\n return False", "title": "" }, { "docid": "6f6c9f48d0cd95588a37ef34052de0b4", "score": "0.67881554", "text": "def on_stop(self):\n\t\tpass;", "title": "" }, { "docid": "87f5523d085665e45a4df0a17ee30666", "score": "0.67418766", "text": "def stop(self):\n \"on stop hook\"\n return 0", "title": "" }, { "docid": "6848c5c655024817e9c3d6df41a4e819", "score": "0.67397654", "text": "def post_stop(self):", "title": "" }, { "docid": "d90f9295b193d0eccc0ce7908595e16e", "score": "0.67197305", "text": "def _on_stop_notifying(self):\n pass", "title": "" }, { "docid": "8a9c3169a7d23d27d088a97ef33b9650", "score": "0.66627276", "text": "def handle_stop(self):\n pass", "title": "" }, { "docid": "7f64f13a2b7b9f3336df93a349c077d5", "score": "0.66621625", "text": "def stop_unstop(self, status):\n\tself = frappe.get_doc(\"Production Order\", self)\n\tstatus = update_status(self, status)\n\tself.update_planned_qty()\n\tfrappe.msgprint(_(\"Production Order status is {0}\").format(status))\n\tself.notify_update()\n\tfrom myrador.myrador.doctype.production_order_operation_scheduler.production_order_operation_scheduler import make_from_production_order\n\tmake_from_production_order(self)", "title": "" }, { "docid": "2d767df9ef9fd8961bd7a52f887b3d1b", "score": "0.6642425", "text": "def on_stop(self):\n pass", "title": "" }, { "docid": "2d767df9ef9fd8961bd7a52f887b3d1b", "score": "0.6642425", "text": "def on_stop(self):\n pass", "title": "" }, { "docid": "e9d996a8ec02799a63adc8867e8974c5", "score": "0.6534456", "text": "def send_stop_order(self, **kwargs):\n return False", "title": "" }, { "docid": "25973ec0756c5860d81152d839f91281", "score": "0.6513099", "text": "def notify_stop(self, data):\n\n log.msg('Process %r stopped: %r' % (self.args[0], data))\n self.stop_data = data\n self.state = 'after'\n for i in range(len(self.stop_deferreds)):\n d = self.stop_deferreds.pop()\n d.callback(data)\n return data", "title": "" }, { "docid": "a0bd734c6b4b26c77d25741c49960941", "score": "0.64999044", "text": "def on_stop(self):\n self.write_log(\"策略停止\")\n\n self.put_event()", "title": "" }, { "docid": "c7a4003fe9057005424c94e4572373db", "score": "0.64891356", "text": "def on_stop(self):\n self.write_log(\"策略停止\")\n self.put_event()", "title": "" }, { "docid": "27fd8511351e5ca955f5b993dfd06929", "score": "0.6424197", "text": "def on_stop(self):\n pass\n # self.write_log(\"策略停止\")", "title": "" }, { "docid": "d23f6a2ec6fbbd159035d579c9f7552f", "score": "0.6414023", "text": "def do_stop(self):\r\n return", "title": "" }, { "docid": "b5759b0fd66923b6a2d66f465ba050bf", "score": "0.6384306", "text": "def stop(self):\n ...", "title": "" }, { "docid": "dbd4a552ab70a694b154ecd022dcfcaf", "score": "0.6329219", "text": "def onStop( self ):\n pass", "title": "" }, { "docid": "7ece1c08da600176d45b56fc15a10437", "score": "0.6316253", "text": "def on_stop(self):\n if self._return is None:\n Logger.logerr('%s Emergency stop on SM stop if state is active' % (self.name))\n if self._cmd_topic:\n self._pub.publish(self._cmd_topic, Twist())\n\n if self._cmd_topic_stamped:\n ts = TwistStamped() # Zero twist to stop if blocked\n ts.header.stamp = self._node.get_clock().now().to_msg() # update the time stamp\n self._pub.publish(self._cmd_topic_stamped, ts)", "title": "" }, { "docid": "17ead96cf7dfb340d485da4eb4729bae", "score": "0.63045806", "text": "def handleStop(self, msgTokens):\n responseText = \"Stopping\"\n self.sendText(responseText)\n self.d.getFeedback(ALL_OFF_COMMAND)\n responseText = \"Stopped\"\n self.sendText(responseText)\n self.setStatus(responseText)", "title": "" }, { "docid": "fd372c19fa04849c8b18fed941cc4038", "score": "0.6301418", "text": "def on_stop(self):\n self.write_log(\"策略停止\")", "title": "" }, { "docid": "fd372c19fa04849c8b18fed941cc4038", "score": "0.6301418", "text": "def on_stop(self):\n self.write_log(\"策略停止\")", "title": "" }, { "docid": "fd372c19fa04849c8b18fed941cc4038", "score": "0.6301418", "text": "def on_stop(self):\n self.write_log(\"策略停止\")", "title": "" }, { "docid": "fd372c19fa04849c8b18fed941cc4038", "score": "0.6301418", "text": "def on_stop(self):\n self.write_log(\"策略停止\")", "title": "" }, { "docid": "e81d39152b4c45d5971311988bc11b03", "score": "0.6275531", "text": "def stop(self):", "title": "" }, { "docid": "e81d39152b4c45d5971311988bc11b03", "score": "0.6275531", "text": "def stop(self):", "title": "" }, { "docid": "e81d39152b4c45d5971311988bc11b03", "score": "0.6275531", "text": "def stop(self):", "title": "" }, { "docid": "e81d39152b4c45d5971311988bc11b03", "score": "0.6275531", "text": "def stop(self):", "title": "" }, { "docid": "e81d39152b4c45d5971311988bc11b03", "score": "0.6275531", "text": "def stop(self):", "title": "" }, { "docid": "81ee52dad7ddabe59f920e96cd9a833b", "score": "0.626944", "text": "def stop():", "title": "" }, { "docid": "81ee52dad7ddabe59f920e96cd9a833b", "score": "0.626944", "text": "def stop():", "title": "" }, { "docid": "8e2d4521fb1b02fbb739a8b907b5479d", "score": "0.6255687", "text": "def stop(self) -> None:\n ...", "title": "" }, { "docid": "4bb365aadbb85584642cf289b2bbd2bb", "score": "0.6204263", "text": "def stop(self, *args, **kwargs):\n\n pass", "title": "" }, { "docid": "4913bb5fd88088655afaae1a1b0bfd02", "score": "0.6196648", "text": "def stop(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "55901d532d9dee40437b551b54de112a", "score": "0.61906916", "text": "def stop(self, irc, msg, args):\n irc.reply(\"Stopping market monitoring.\")\n self.e.set()", "title": "" }, { "docid": "f2ac4fa84609be0fc0e9933cf9292d45", "score": "0.6181572", "text": "def stop(self):\n\n\n pass", "title": "" }, { "docid": "d437781c2cba41937a97b8b29f96d486", "score": "0.61561567", "text": "def stop(self):\n for inx,stock in enumerate(self.stocks):\n print \"Cancelling:\\t%d = %s\" % (inx,stock)\n self.connection.cancelRealTimeBars(inx)\n self.connection.disconnect()", "title": "" }, { "docid": "7b05f9cf79060736d78dbc0116e36a7c", "score": "0.6137176", "text": "def stop():\n log(\"=========== hook: stop ===========\")", "title": "" }, { "docid": "82f26e753964ede5105dc15c4c582126", "score": "0.6099474", "text": "def stop(self):\r\n pass", "title": "" }, { "docid": "f54cd02f8386d02165ddfa4f7ebf8243", "score": "0.60975194", "text": "def stop(self, context):\n\t\tpass", "title": "" }, { "docid": "e53cd8689fbad2498dfb1c339e7b6289", "score": "0.60965055", "text": "def stop (self):\n pass", "title": "" }, { "docid": "984d4a06de6ae8ebec8f11097a3454ae", "score": "0.6096491", "text": "def _on_stop(self, component):\n self.on_stop()", "title": "" }, { "docid": "6fcc5621f459dd75cfd55a643c617eef", "score": "0.60955554", "text": "def on_stop(self):\n self.stop_serial_thread.set()", "title": "" }, { "docid": "782574e068ec542d8ed60776520d5c7d", "score": "0.6090423", "text": "def stop(self):\r\n\t\tself.stop = True", "title": "" }, { "docid": "66ae013f9db905dc73afbfd2fe5e7b09", "score": "0.6079601", "text": "def stop(self):\n\n pass", "title": "" }, { "docid": "66ae013f9db905dc73afbfd2fe5e7b09", "score": "0.6079601", "text": "def stop(self):\n\n pass", "title": "" }, { "docid": "ba43674de7f9ce040045202f6e8fa129", "score": "0.60584915", "text": "def __place_order_stop_loss(self, price, amount, side, pair, type, params,\n callback, meta_data={}, key_exchange=None):\n ex_instance = self.get_ex_instance(key_exchange)\n self._log(f'bot_fathermom---343Create stop loss order with params {params}, '\n f'bot_fathermom---344amount {amount}, side {side}, pair {pair}, type {type}, meta_data {meta_data}')\n profit_order_id = params[PARAMS_PROFIT_ORDER_ID]\n stop_loss_type = 'own_stoploss_{}'.format(type)\n self._log(f'bot_fathermom---347{self.alias}. Place order: '\n f'{pair}__{side}__{price}__{amount}__{type}__profit_order_id_{profit_order_id}')\n stop_loss_order_id = ex_instance.create_order(pair, stop_loss_type, side, amount, price,\n params={'stopPrice': price,\n 'profit-order-id': profit_order_id,\n 'bot_type': self.alias})\n if not stop_loss_order_id:\n self._log(f'bot_fathermom---354Create stop loss order is fail with params {params}, amount {amount}, side {side}, '\n f'bot_fathermom---355pair {pair}, type {type}, meta_data {meta_data}')\n return False\n # Thread(target=self.__subscribe_stop_loss_order_is_open,\n # args=(stop_loss_order_id, price, amount, side, params, callback, meta_data)).start()\n self.update_callback_follow_order_id(stop_loss_order_id, callback)\n self.submit_task(self.subscribe_order_status, stop_loss_order_id, price, amount, side, params, callback, meta_data)\n # two order, profit and stop loss order\n data_order = {\n KEY_GET_ORDER_ID: stop_loss_order_id,\n STOP_LOSS_ORDER_ID: stop_loss_order_id,\n KEY_GET_ORDER_AMOUNT: amount,\n KEY_GET_ORDER_SIDE: side,\n KEY_GET_ORDER_PRICE: price,\n KEY_GET_ORDER_STATUS: ORDER_PENDING,\n KEY_GET_ORDER_FILLED: 0.0,\n KEY_GET_ORDER_AVERAGE_PRICE: 0.0,\n FEES: 0.0,\n KEY_GET_ORDER_META_DATA: meta_data\n }\n return data_order", "title": "" }, { "docid": "e7e99aee81ac9dd8236f6d9fb4c621a9", "score": "0.6057225", "text": "def stop(self):\n self.logger.info(\"Requesting stop\")\n self.outstream.write(\"RELEASE\\n\")\n self.update_expected = True", "title": "" }, { "docid": "34700c8c69d05a00d2230364fd53eea8", "score": "0.604528", "text": "def on_cancel_order(self, data, request):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6041199", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6041199", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6041199", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6041199", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6041199", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6041199", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6041199", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6041199", "text": "def stop(self):\n pass", "title": "" }, { "docid": "8d03d6a52d83cee511ad5a7ab905a209", "score": "0.60286576", "text": "def stop(self, stop):\n\n self._stop = stop", "title": "" }, { "docid": "d34561e45ceedbe308c2c2ce411b4d4a", "score": "0.6028336", "text": "def _watch_and_cancel(self):\n paid = False\n for i in range(3600):\n time.sleep(2)\n if self.paid_query():\n paid = True\n print(\"paid!\")\n #self.sk.sendall(bytes(\"2\", \"utf-8\"))\n break\n print(\"not paid...\")\n\n # order is not paid in 30s , cancel this order\n if paid is False:\n self.del_trade()\n print(\"trade closed\")", "title": "" }, { "docid": "081f7b3d2b0e8d40dbbd0ec05f9875e7", "score": "0.60106117", "text": "def stop(self):\n self._stop = True\n self.log.info(\"stopping\")", "title": "" }, { "docid": "6ee091188ded64c63496e9652d5bf5b7", "score": "0.60074866", "text": "def stop(self):\n pass", "title": "" }, { "docid": "77b0dc53b52f69ad1391c675e0db9b1c", "score": "0.6003565", "text": "def stop(self):\n CBTLOG.debug('CBT: Stop requested')\n self._stop = True\n CBTLOG.info('CBT: Stopped')", "title": "" }, { "docid": "0cc1a31578749d85cff5449fe19c6853", "score": "0.59813696", "text": "def stop():\n pass", "title": "" }, { "docid": "0cc1a31578749d85cff5449fe19c6853", "score": "0.59813696", "text": "def stop():\n pass", "title": "" }, { "docid": "30a4403f6a943b77676e099336e046a3", "score": "0.59809023", "text": "def stop(self):\n self.stop_flag = True", "title": "" }, { "docid": "63175e83428c4d96fff99171cf5cc62a", "score": "0.5956462", "text": "def stop(self):\n \n pass", "title": "" }, { "docid": "49732add88d2cbc1a82e0299133185b6", "score": "0.59511447", "text": "def stop(self):\n self.stopping = True", "title": "" }, { "docid": "d2718b7fc5a726eb5551c85b23d8558a", "score": "0.5943539", "text": "def stop(self):\n\t\tself._stop.set()", "title": "" }, { "docid": "3c65c1de98123b5808c381501f3921ad", "score": "0.59385616", "text": "def stop(self):\n return", "title": "" }, { "docid": "3c65c1de98123b5808c381501f3921ad", "score": "0.59385616", "text": "def stop(self):\n return", "title": "" }, { "docid": "00d571f65e07e2a035d0a15d599fb6be", "score": "0.5921908", "text": "def stop(self):\n\t\tself.stopped = True", "title": "" }, { "docid": "bb5d62229087c1f383b5e95964af33ae", "score": "0.5920177", "text": "async def on_stop(self):\n self[\"start\"].disable()\n self[\"restart\"].disable()\n self[\"stop\"].disable()\n self[\"status\"].value = \"Stopping TalisMUD...\"\n await self.service.action_stop()\n await self.check_status()", "title": "" }, { "docid": "1ffe7053ad35c956494569353ec132ce", "score": "0.5912045", "text": "def _stop():\n shutdown_function()", "title": "" }, { "docid": "cad311cee1745f3cfc4f8095eb4cbd0a", "score": "0.59047234", "text": "def cancel_order(self, order_param):\n pass", "title": "" }, { "docid": "92cec20a3cf2667416c4d6cb8fd1da52", "score": "0.58867973", "text": "def stop(self):\n\t\tself.log_system(\"Stopping {0}\".format(type(self).__name__))", "title": "" }, { "docid": "bf784d686c8221f9b66d4bb056af12e4", "score": "0.58848065", "text": "def debconf_progress_stop(self):\n self._abstract('debconf_progress_stop')", "title": "" }, { "docid": "a68cfb9a162e7485df26595bbfb56deb", "score": "0.5884718", "text": "def put_stop_order_event(self, stop_order: StopOrder):\n event = Event(EVENT_CTA_STOPORDER, stop_order)\n self.event_engine.put(event)", "title": "" }, { "docid": "e0342310d12d9ad6356480e49128b000", "score": "0.58788544", "text": "def stop(self):\n self.handle(events.StopMessage)", "title": "" }, { "docid": "2cbd1f82bbc421d37718b63dfdaa4587", "score": "0.5876407", "text": "def stop_trading(context, data):\n context.trading_hours = False", "title": "" }, { "docid": "c00a6f049181bd1a0eaf519765ca5632", "score": "0.5876221", "text": "def stop(self):\n pass # pragma: no cover", "title": "" }, { "docid": "71b4baf2f010de9b324aa436079b92a1", "score": "0.58755034", "text": "def set_stop(self, stop_event):\n self.stop_time = stop_event.created\n self.check_stop_time = False\n self.acknowledged = True", "title": "" }, { "docid": "99536ce860c51025344bc48ba1519ceb", "score": "0.5872702", "text": "def stop(self):\n self.stop_event.set()", "title": "" }, { "docid": "99536ce860c51025344bc48ba1519ceb", "score": "0.5872702", "text": "def stop(self):\n self.stop_event.set()", "title": "" }, { "docid": "208555e051dafcbcfdaa7dfb46d6191e", "score": "0.581552", "text": "def stop(update: Update, context: CallbackContext):\r\n update.message.reply_text(\"Ok, let's start again.\\n Enter /start.\")\r\n \r\n return END", "title": "" }, { "docid": "13b225c044fdcc30677f72d01116b603", "score": "0.58083814", "text": "def on_pot_stop_clicked(self, data=None):\n try:\n dstat.state.ser.stop_exp()\n\n except AttributeError:\n pass\n except:\n logger.warning(sys.exc_info())", "title": "" }, { "docid": "e0e0ce84ca348f5c947f7a88e67ee6d1", "score": "0.58003694", "text": "async def send_cancel(self, order):", "title": "" }, { "docid": "a77106f162f403653bd5978cf19aa6d2", "score": "0.576561", "text": "def stop(self):\n raise NotImplementedError", "title": "" }, { "docid": "a77106f162f403653bd5978cf19aa6d2", "score": "0.576561", "text": "def stop(self):\n raise NotImplementedError", "title": "" }, { "docid": "a77106f162f403653bd5978cf19aa6d2", "score": "0.576561", "text": "def stop(self):\n raise NotImplementedError", "title": "" }, { "docid": "a77106f162f403653bd5978cf19aa6d2", "score": "0.576561", "text": "def stop(self):\n raise NotImplementedError", "title": "" }, { "docid": "a77106f162f403653bd5978cf19aa6d2", "score": "0.576561", "text": "def stop(self):\n raise NotImplementedError", "title": "" }, { "docid": "c4d01ef56e84eb969a091bd67c51e139", "score": "0.576151", "text": "def handle_stop(mqtt_sender):\n print('stop')\n mqtt_sender.send_message('stop')", "title": "" }, { "docid": "3d1a03a496a3f1b7aa845bb3cc6f742a", "score": "0.57544565", "text": "def stop_bleed(self):\n tickerhandler.remove(self, 30)", "title": "" }, { "docid": "8407929e49b3b1bce9e99a95f27697b0", "score": "0.5737226", "text": "def stop(self, onestop_id):\n return self.request('/api/v1/stops/%s'%onestop_id)", "title": "" }, { "docid": "9d678a65b1a76e45c8f464c95562b038", "score": "0.57354796", "text": "def observe_stop(self):\n if self.state=='after':\n return defer.succeed(self.stop_data)\n else:\n d = defer.Deferred()\n self.stop_deferreds.append(d)\n return d", "title": "" }, { "docid": "99d130d5ed7b7e21ad183d978e7c47ba", "score": "0.573341", "text": "def cancel_stop(self, bot, update):\n # reply if the user cancels the stop operation\n self.stop_train_flag = False\n update.message.reply_text('OK, training will not be stopped.', reply_markup=ReplyKeyboardRemove())\n\n return ConversationHandler.END", "title": "" }, { "docid": "6b28ec239180d0d3381e3f1a010006cc", "score": "0.57314247", "text": "def _perform_stop(self):\n if self.current:\n name = self.current.name\n if self.current.stop():\n self.bus.emit(Message(\"mycroft.stop.handled\",\n {\"by\": \"audio:\" + name}))\n\n self.current = None", "title": "" } ]
978f74abfb77ac10e4d0bc386de42f9c
Constructor for a generic pipeline
[ { "docid": "716e68cd8ffc66ec9e99b1381224e6e3", "score": "0.64695215", "text": "def __init__(self, name=None):\n if not name:\n # w/o a name, generate a random UUID to name this pipeline\n self.name = str(uuid.uuid4())\n else:\n self.name = name \n self.tasks = []\n self.result_map = {}\n self.total_pipeline_run_time = 0", "title": "" } ]
[ { "docid": "80e68f1c67c09d30204dd32ad82e5c03", "score": "0.7773082", "text": "def from_pipeline(cls, pipeline):\n self = object.__new__(cls)\n self.data = pipeline.data\n return self", "title": "" }, { "docid": "4ab57fb936d17c796e937df740d6fe7e", "score": "0.7675824", "text": "def __init__(self, pipeline, sink):\n self.pipeline = pipeline\n self.sink = sink", "title": "" }, { "docid": "4ab57fb936d17c796e937df740d6fe7e", "score": "0.7675824", "text": "def __init__(self, pipeline, sink):\n self.pipeline = pipeline\n self.sink = sink", "title": "" }, { "docid": "53f8d52867450fec71be098ec45cb0c5", "score": "0.7009929", "text": "def pipeline(self) -> Pipeline:\n pass", "title": "" }, { "docid": "c6ff9cf11aaa6322b0e357c7661779f5", "score": "0.7007379", "text": "def __init__(self, sub_pipeline_nodes):\n super(SubPipelineNode, self).__init__()\n \n self.sub_pipeline = Pipeline(sub_pipeline_nodes)", "title": "" }, { "docid": "b75fdc254400a82bb29126a5f31a27c3", "score": "0.7001686", "text": "def __init__(self, pipeline):\r\n RPObject.__init__(self)\r\n self.stages = []\r\n self.inputs = {}\r\n self.pipes = {}\r\n self.input_blocks = []\r\n self.previous_pipes = {}\r\n self.future_bindings = []\r\n self.defines = {}\r\n self.pipeline = pipeline\r\n self.created = False\r\n\r\n self._load_stage_order()\r\n\r\n # Register the manager so the pipe viewer can read our data\r\n PipeViewer.register_stage_mgr(self)", "title": "" }, { "docid": "9f04d94090948cb3f596ab3d3df69c37", "score": "0.6836815", "text": "def build_pipeline(self):\n pipe = pipeline(\n \"sentiment-analysis\", model=self.model, tokenizer=self.tokenizer, device=0\n )\n self.inference_pipeline = pipe", "title": "" }, { "docid": "9edf1b7e1568b2e41fe94c2c7d482dba", "score": "0.6678768", "text": "def make_pipeline(*steps):\n return Pipeline(pipeline._name_estimators(steps))", "title": "" }, { "docid": "9eb8f10acda30554347eea9fe6f8ca80", "score": "0.6673167", "text": "def _init_pipeline(self, cfg: Config) -> Callable:", "title": "" }, { "docid": "5bb81b058b03f58a8ba8836ae7a86d34", "score": "0.659589", "text": "def CreatePipeline(vectorizer, classifier):\n pipe = Pipeline([('vectorizer', vectorizer), ('clf', classifier)])\n return pipe", "title": "" }, { "docid": "7fc20891c8e4f5e35965e274f5ca2077", "score": "0.65810126", "text": "def __init__(self, pipeline, language=None, hint_language='en'):\n self.pipeline = pipeline\n self.language = language\n self.hint_language = hint_language", "title": "" }, { "docid": "9fd75375d7b004399c12d17409e8df14", "score": "0.65781236", "text": "def build_pipeline(self):\n object = self.source\n for pipe in self.pipeline:\n keywords = set(pipe.class_trait_names())\n keywords.remove('trait_added')\n keywords.remove('trait_modified')\n this_kwargs = {}\n for key, value in self.kwargs.items():\n if key in keywords:\n this_kwargs[key] = value\n object = pipe(object, **this_kwargs)._target\n return object", "title": "" }, { "docid": "06820e5968c794a983e1266e23e69502", "score": "0.65576994", "text": "def constructPipeline(self):\r\n # Create the pipeline instance\r\n self.player = gst.Pipeline()\r\n\r\n # Define pipeline elements\r\n self.filesrc = gst.element_factory_make(\"filesrc\")\r\n\r\n self.filesrc.set_property(\"location\", self.inFileLocation)\r\n\r\n self.decodebin = gst.element_factory_make(\"decodebin\")\r\n\r\n self.autoconvert = gst.element_factory_make(\"autoconvert\")\r\n\r\n self.audioconvert = gst.element_factory_make(\"audioconvert\")\r\n self.audioresample = gst.element_factory_make(\"audioresample\")\r\n self.audio_encoder = gst.element_factory_make(\"lame\")\r\n self.audiosink = gst.element_factory_make(\"filesink\")\r\n self.audiosink.set_property(\"location\", self.audioOutLocation)\r\n\r\n self.video_encoder = gst.element_factory_make(\"ffenc_mpeg4\")\r\n self.muxer = gst.element_factory_make(\"ffmux_mp4\")\r\n\r\n self.videosink = gst.element_factory_make(\"filesink\")\r\n self.videosink.set_property(\"location\", self.videoOutLocation)\r\n\r\n self.queue1 = gst.element_factory_make(\"queue\")\r\n self.queue2 = gst.element_factory_make(\"queue\")\r\n self.queue3 = gst.element_factory_make(\"queue\")\r\n\r\n # Add elements to the pipeline\r\n self.player.add(self.filesrc,\r\n self.decodebin,\r\n self.queue1,\r\n self.autoconvert,\r\n self.video_encoder,\r\n self.muxer,\r\n self.videosink,\r\n self.queue2,\r\n self.audioconvert,\r\n self.audio_encoder,\r\n self.audiosink,\r\n self.queue3\r\n )\r\n\r\n # Link elements in the pipeline.\r\n gst.element_link_many(self.filesrc, self.decodebin)\r\n\r\n gst.element_link_many(self.queue1,\r\n self.autoconvert,\r\n self.video_encoder,\r\n self.muxer,\r\n self.videosink)\r\n\r\n gst.element_link_many(self.queue2,\r\n self.audioconvert,\r\n self.audio_encoder,\r\n self.audiosink)", "title": "" }, { "docid": "b839ec90a635297d2d6c501624658751", "score": "0.65462023", "text": "def create_pipeline(**parameters):\n\n # height and depth\n height = parameters.get('height', 0)\n depth = parameters.get('depth', 0)\n\n # create a selector for ancestor and descendants\n height_depth_selector = HeightDepthSelector(depth=depth, height=height)\n\n # feature subset\n use_numeric = parameters.get('use_numeric', False)\n use_classes = parameters.get('use_classes', False)\n use_ids = parameters.get('use_ids', False)\n use_tags = parameters.get('use_tags', False)\n\n transformer_list = create_feature_transformers(use_classes, use_ids, use_numeric, use_tags, height, depth)\n\n estimator = Pipeline(steps=[\n ('verbosity', height_depth_selector),\n ('union', FeatureUnion(transformer_list=transformer_list)),\n ('normalizer', MaxAbsScaler()),\n ('reduce_dim', SelectPercentile(chi2)),\n ('classify', create_classifier(parameters.get('classify', 'logistic')))\n ])\n\n return estimator", "title": "" }, { "docid": "03e2c89f280f919d0e60dc7b9729e9c5", "score": "0.65412366", "text": "def construct_pipeline(self):\n\n model = getattr(clustering, self._algorithm)()\n param_map = [i.name for i in model.params]\n\n # Make sure that the params in self._params are the right for the algorithm\n dict_params_labels = dict(filter(\n lambda x: x[0] in param_map, self._dict_parameters.items())\n )\n dict_params_labels['featuresCol'] = 'scaled_features'\n\n # Model is set\n model = eval(\"clustering.\" + self._algorithm)(**dict_params_labels)\n dict_params_labels = dict(map(\n lambda i: (i.name, model.getOrDefault(i.name)), model.params)\n )\n # Add algorithm dict_params_labels\n dict_params_labels['algorithm'] = self._algorithm\n # dict_params_labels['seed'] = -1983291474829197226\n # dict_params_labels['initMode'] = 'random'\n stages = [model] # [vectorized_features, caster, scaling_model, model]\n self._dict_parameters.update(dict_params_labels) # dict gets updated\n\n return Pipeline(stages=stages)", "title": "" }, { "docid": "30d2d8b2d758c91092cc5019cca3998a", "score": "0.6538572", "text": "def init_pipeline():\n derive_step = ('derive', CreateDerivedFeatures())\n derive_features = Pipeline([derive_step])\n si_step_cat = ('si_cat', SimpleImputer(strategy='constant',\n fill_value='missing_value'))\n ohe_step_cat = ('ohe_cat', OneHotEncoder(handle_unknown='ignore',\n sparse=False))\n pipe_cat = Pipeline([si_step_cat, ohe_step_cat])\n si_step_num = ('si_num', SimpleImputer(strategy='constant',\n fill_value=-999))\n pipe_num = Pipeline([si_step_num])\n preprocessor = ColumnTransformer(\n transformers=[('cat', pipe_cat, cat_features),\n ('num', pipe_num, num_features)]\n )\n classifier = rf(n_estimators=20, max_depth=20)\n preprocessor_classifier = Pipeline([\n ('derive_features', derive_features),\n ('preprocessor', preprocessor),\n ('classifier', classifier)\n ])\n return preprocessor_classifier", "title": "" }, { "docid": "6c6eea39cc7cb80f59c45972e2bd751a", "score": "0.65279126", "text": "def __init__(self, **kwargs):\r\n super(TransformerBase, self).__init__()", "title": "" }, { "docid": "3a19edc4a71b4af38401a8f54589d2f5", "score": "0.6525941", "text": "def __init__(self, **kwargs):\n super(TransformerBase, self).__init__()", "title": "" }, { "docid": "b8c2a6819e382b375df2c93e9e5b2396", "score": "0.6523997", "text": "def from_json(data: List) -> \"Pipeline\":\n return Pipeline(data)", "title": "" }, { "docid": "2e311c62c5916052eb5d41618c09c0c6", "score": "0.64594287", "text": "def create_pipeline(self):\n nltk.download('stopwords')\n stop_words = set(stopwords.words('french'))\n self.pipeline = Pipeline([\n (\"custom_preprocessor\", CustomPreprocessor()),\n (\"tfidf\", TfidfVectorizer(ngram_range=(2, 2), stop_words=stop_words)),\n (\"ridge\", RidgeClassifier())\n ])", "title": "" }, { "docid": "e301c0229794e5d63a91cc4b35f50e6f", "score": "0.639483", "text": "def CreatePipe(*args, **kwargs): # real signature unknown\r\n pass", "title": "" }, { "docid": "5de00c4a50cbe585307a4d598ab42eec", "score": "0.63759685", "text": "def __init__(self):\n\n super(PipelineNode, self).__init__()\n self._cs_updates = dict()\n self.pipeline = None", "title": "" }, { "docid": "ac577a3a5ead1311cbdca7690228b807", "score": "0.63664734", "text": "def __init__(self, n_result=None, **kwargs):\n if n_result is not None and n_result < 1:\n raise ValueError(\"Expected number of outputs cannot be less than 1\")\n self.n_result = n_result\n PipeElement.__init__(self, **kwargs)", "title": "" }, { "docid": "4b4a7ff986742d3827cac2f171e28f9a", "score": "0.63310987", "text": "def pipeline(self, transaction=True):\r\n return Pipeline(\r\n self.connection,\r\n transaction,\r\n self.encoding,\r\n self.errors\r\n )", "title": "" }, { "docid": "5219714730b7fb513c7e0a002c6150c9", "score": "0.6316722", "text": "def __init__(\n self,\n pipeline=None,\n outsetname=\"\",\n token_type=\"Token\",\n spacetoken_type=\"SpaceToken\",\n sentence_type=\"Sentence\",\n nounchunk_type=\"NounChunk\",\n add_tokens=True,\n # add_spacetokens=True, # not sure how to do this yet\n add_entities=True,\n add_sentences=True,\n add_nounchunks=True,\n add_deps=True,\n ent_prefix=None,\n ):\n self.outsetname = outsetname\n\n self.token_type = token_type\n self.sentence_type = sentence_type\n self.add_entities = add_entities\n self.ent_prefix = ent_prefix\n self.spacetoken_type = spacetoken_type\n self.nounchunk_type = nounchunk_type\n self.add_tokens = add_tokens\n self.add_sentences = add_sentences\n self.add_nounchunks = add_nounchunks\n self.add_deps = add_deps\n if pipeline:\n self.pipeline = pipeline\n else:\n self.pipeline = spacy.load(\"en_core_web_sm\")", "title": "" }, { "docid": "44f2f31a9ce26a94ebb123a9da42a43e", "score": "0.6301211", "text": "def pipeline_instance(self, pipeline_function, config):\n pipeline_input_dataset = self.dataset_load(config.passthrough.input_dataset)\n\n runnable_pipeline = pipeline_function(pipeline_input_dataset)\n return runnable_pipeline", "title": "" }, { "docid": "52e5ee65edb464cd2bde5346c449e877", "score": "0.6290487", "text": "def construct_pipeline(self):\n\n if self.method == \"rotate\":\n self.rotation_objective = True\n self.atomic_objective = False\n self.bond_objective = False\n\n if self.method == \"no-bond\":\n self.rotation_objective = True\n self.atomic_objective = True\n self.bond_objective = False\n\n elif self.method == \"full\":\n self.rotation_objective = True\n self.atomic_objective = True\n self.bond_objective = True\n\n elif self.method == \"info\":\n self.create_atoms = True\n self.rotation_objective = False\n self.atomic_objective = False\n self.bond_objective = False\n\n # sanity check override\n if self.bond_objective == True or self.atomic_objective == True:\n self.create_atoms = True\n\n # TODO more bond", "title": "" }, { "docid": "7d1abe2fc5479a0e4d0716e38572fbda", "score": "0.6270422", "text": "def PipelineFactory(ModelName: str, dummy: bool, **kwargs): \n \n if ((ModelName == 'linear')or(ModelName == 'LS-SVM')):\n if (dummy):\n pipeline = Pipeline([\n ('std_scaler', StandardScaler(with_mean=False, with_std=False)),\n ])\n else:\n pipeline = Pipeline([\n ('std_scaler', StandardScaler()), #scaling to be centered on 0, with unit variance...since the values are quite different, this will help things\n ])\n elif ((ModelName == 'polynomial')or(ModelName == 'poly_enr')or(ModelName == 'poly_lasso')):\n pipeline = MyPolyPipeline(dummy, **kwargs) \n# elif ModelName == 'SVMLinear':\n# ModelClass = TSVMLinearModel(ObjName,Target, Feature, Target_test, Feature_test,**kwargs)\n# elif ModelName == 'SVMPolynomial':\n# ModelClass = TSVMPolynomialModel(ObjName,Target, Feature, Target_test, Feature_test,**kwargs)\n# elif ModelName == 'SVM_RBF':\n# ModelClass = TSVM_RBFModel(ObjName,Target, Feature, Target_test, Feature_test,**kwargs)\n else: #default option\n print(\"!!!!!!! ERROR : THE MODEL \",ModelName,\" HAS NOT BEEN IMPLEMENTED. DEFAULTING TO LINEAR PIPELINE MODEL.\")\n pipeline = Pipeline([\n ('std_scaler', StandardScaler(with_mean=False, with_std=False)),\n ])\n #and return to sender\n return pipeline", "title": "" }, { "docid": "09f6113e6b321b5a16ec55cc4f745985", "score": "0.6258991", "text": "def __init__(self):\n self._process_pipe = []\n self._gen = None", "title": "" }, { "docid": "12208371499f6c55a3cf3c9513e99e0d", "score": "0.62513494", "text": "def __init__(self): # Takes a git url to a rails app\n self.pipe = Pipeline()\n self.dockerfile_dir = None\n self.repository = None\n self.registry = None\n self.git_url = None\n self.image = None", "title": "" }, { "docid": "18813ace305c3ee5aa0ac50a606eea6e", "score": "0.6227089", "text": "def set_pipeline(self):\n '''returns a pipelined model'''\n dist_preprocess = Pipeline([\n ('transformer',DistanceTransformer()),\n ('scaler',RobustScaler())\n ])\n time_preprocess = Pipeline([\n ('transformer',TimeFeaturesEncoder()),\n ('encoder',OneHotEncoder(handle_unknown='ignore', sparse=False))\n ])\n\n time_column = ['pickup_datetime']\n dist_columns = ['pickup_longitude', 'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude']\n\n # create preprocessing pipeline\n preprocess = ColumnTransformer([\n ('time', time_preprocess, time_column),\n ('dist', dist_preprocess, dist_columns)\n ])\n\n self.pipeline = Pipeline([\n ('preprocess', preprocess),\n ('estimator', RandomForestRegressor())\n ])\n\n return self.pipeline", "title": "" }, { "docid": "49658f72ba11103997b7949d953675aa", "score": "0.6212904", "text": "def __init__(self, name: str, image: str, command: str=None, arguments: str=None,\n file_inputs : Dict[_pipeline_param.PipelineParam, str]=None,\n file_outputs : Dict[str, str]=None, is_exit_handler=False):\n\n if not _pipeline.Pipeline.get_default_pipeline():\n raise ValueError('Default pipeline not defined.')\n\n self.human_name = name\n self.name = _pipeline.Pipeline.get_default_pipeline().add_op(self, is_exit_handler)\n self.image = image\n self.command = command\n self.arguments = arguments\n self.is_exit_handler = is_exit_handler\n self.memory_limit = None\n self.memory_request = None\n self.cpu_limit = None\n self.cpu_request = None\n self.volumes = []\n self.volume_mounts = []\n self.env_variables = []\n\n matches = []\n if arguments:\n for arg in arguments:\n match = re.findall(r'{{pipelineparam:op=([\\w-]*);name=([\\w-]+);value=(.*?)}}', str(arg))\n matches += match\n\n self.argument_inputs = [_pipeline_param.PipelineParam(x[1], x[0], x[2])\n for x in list(set(matches))]\n self.file_inputs = file_inputs\n self.file_outputs = file_outputs\n self.dependent_op_names = []\n\n self.inputs = []\n if self.argument_inputs:\n self.inputs += self.argument_inputs\n\n if file_inputs:\n self.inputs += list(file_inputs.keys())\n\n self.outputs = {}\n if file_outputs:\n self.outputs = {name: _pipeline_param.PipelineParam(name, op_name=self.name)\n for name in file_outputs.keys()}\n\n self.output=None\n if len(self.outputs) == 1:\n self.output = list(self.outputs.values())[0]", "title": "" }, { "docid": "4c25900a1fe710c041ddfc563df4eaf3", "score": "0.6209057", "text": "def set_pipeline(self):\n\n pipe_time = Pipeline([\n ('features', TimeFeaturesEncoder('pickup_datetime')),\n ('onehot', OneHotEncoder(handle_unknown='ignore'))\n ])\n\n X_distance = ['pickup_longitude', 'pickup_latitude',\n 'dropoff_longitude', 'dropoff_latitude']\n\n X_time = ['pickup_datetime']\n\n pipe_distance = Pipeline([\n ('distance_transformer', DistanceTransformer()),\n ('standardize', StandardScaler())\n ])\n\n pipe_preproc = ColumnTransformer(\n [(\"pipe_distance\", pipe_distance, X_distance),\n (\"pipe_time\", pipe_time, X_time)\n ])\n\n # Add the model of your choice to the pipeline\n\n final_pipe = Pipeline([\n ('pipelines_aggregated', pipe_preproc),\n ('model', LinearRegression())\n\n ])\n\n # display the pipeline with model\n\n\n return final_pipe", "title": "" }, { "docid": "7d3deb3e2815c2f56248aacce69e4038", "score": "0.6197476", "text": "def __init__(self, name: str):\n self.name = name # No use at the moment.\n self.data = [''] # Generic data object again.\n self.cache = newPipe('cache', 1)\n self.finished = True", "title": "" }, { "docid": "01d469c12f23a0d51a14e726b43f35f7", "score": "0.6164404", "text": "def __init__(self, pipeline: List[BuildStep], config_group_desc: str):\n self._config_group_desc = config_group_desc\n self._pipeline = pipeline\n self._config_parser_group: Optional[configargparse.ArgParser] = None\n self._all_pre_runs_skipped = False\n self._all_runs_skipped = False\n self._all_cleanups_skipped = False", "title": "" }, { "docid": "9ad83052451f70b1c2e906addd91b9f5", "score": "0.6143261", "text": "def build_pipe(self, hash_size = 100):\n \n self.read_yaml_file()\n self.read_csv()\n self.fill_na()\n self.data.drop(['msisdn'],axis=1,inplace=True)\n self.hash_list()\n self.pipeline(hash_size)\n \n self.full_pipeline = ColumnTransformer(\n transformers=[\n ('num', self.num_pipeline, self.num),\n ('cat', self.cat_pipeline, self.low_cat),\n ('hash', self.hash_pipeline, self.hash_features)\n ])\n \n self.X = self.data\n \n self.full_pipeline.fit(self.X)\n \n self.X = self.full_pipeline.transform(self.X)\n \n print(self.X.shape)\n return self.X, self.full_pipeline", "title": "" }, { "docid": "acceb46895d10f0468450c0c5a9a17b8", "score": "0.6137514", "text": "def __init__(self, timeout=0.5, tolerance=20):\n Pipe.__init__(self)\n self.timeout = timeout\n self.tolerance = tolerance\n self.cache = {}", "title": "" }, { "docid": "f2d10416ec60ef783ebf344ef3a3d5c0", "score": "0.6125807", "text": "def __init__(__self__, *,\n pipeline_definition: pulumi.Input[Union['PipelineDefinition0PropertiesArgs', 'PipelineDefinition1PropertiesArgs']],\n role_arn: pulumi.Input[str],\n parallelism_configuration: Optional[pulumi.Input['ParallelismConfigurationPropertiesArgs']] = None,\n pipeline_description: Optional[pulumi.Input[str]] = None,\n pipeline_display_name: Optional[pulumi.Input[str]] = None,\n pipeline_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input['PipelineTagArgs']]]] = None):\n pulumi.set(__self__, \"pipeline_definition\", pipeline_definition)\n pulumi.set(__self__, \"role_arn\", role_arn)\n if parallelism_configuration is not None:\n pulumi.set(__self__, \"parallelism_configuration\", parallelism_configuration)\n if pipeline_description is not None:\n pulumi.set(__self__, \"pipeline_description\", pipeline_description)\n if pipeline_display_name is not None:\n pulumi.set(__self__, \"pipeline_display_name\", pipeline_display_name)\n if pipeline_name is not None:\n pulumi.set(__self__, \"pipeline_name\", pipeline_name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "title": "" }, { "docid": "6efb3008aa86272828dad78b372b9d03", "score": "0.61052495", "text": "def _build_pipeline(self, cmd):\r\n _log.debug(cmd.replace(\"(\", \"\\\\(\").replace(\")\", \"\\\\)\")\\\r\n .replace(\";\", \"\\;\"))\r\n \r\n try:\r\n self.pipe = gst.parse_launch(cmd)\r\n except gobject.GError, e:\r\n raise PipelineException(_(\"Unable to construct pipeline! \") + \\\r\n str(e))\r\n \r\n bus = self.pipe.get_bus()\r\n bus.add_signal_watch()\r\n bus.connect(\"message\", self._on_message)", "title": "" }, { "docid": "ca1da7176a70b31419472fe877438b3c", "score": "0.6092936", "text": "def __init__(\n self,\n pipeline: Union[Pipeline, BaseEstimator] = PIPELINE,\n name: Optional[str] = \"stacking_estimator\",\n min_year=BEST_PARAMS[\"min_year\"],\n ) -> None:\n super().__init__(pipeline, name=name)\n\n self.min_year = min_year", "title": "" }, { "docid": "caf7fd96800b441f5af497f3f80cc5ab", "score": "0.6087005", "text": "def _init_pipeline(self, cfg: ConfigType) -> None:\n pipeline_cfg = cfg.test_dataloader.dataset.pipeline\n\n # For inference, the key of ``instances`` is not used.\n if 'meta_keys' in pipeline_cfg[-1]:\n pipeline_cfg[-1]['meta_keys'] = tuple(\n meta_key for meta_key in pipeline_cfg[-1]['meta_keys']\n if meta_key != 'instances')\n\n # Loading annotations is also not applicable\n idx = self._get_transform_idx(pipeline_cfg, 'LoadOCRAnnotations')\n if idx != -1:\n del pipeline_cfg[idx]\n\n self.file_pipeline = Compose(pipeline_cfg)\n\n load_img_idx = self._get_transform_idx(pipeline_cfg,\n 'LoadImageFromFile')\n if load_img_idx == -1:\n raise ValueError(\n 'LoadImageFromFile is not found in the test pipeline')\n pipeline_cfg[load_img_idx]['type'] = 'LoadImageFromNDArray'\n self.ndarray_pipeline = Compose(pipeline_cfg)", "title": "" }, { "docid": "6e81b0c7749414328c13f3c4e26aaf7b", "score": "0.60837454", "text": "def pipeline(self, transaction=True, shard_hint=None):\n return Pipeline(\n self,\n self.parser.response_callbacks,\n transaction,\n shard_hint)", "title": "" }, { "docid": "45241c8126bf94426270177254055c8d", "score": "0.60743284", "text": "def __init__(self, source_name = '', source_path = '', gt_path = '', flags = UNDEFINED):\n # defined by pipeline\n self.id = 0 \n # defined by DB module\n self.flags = flags\n self.source_name = source_name\n self.source_path = source_path\n self.gt_path = gt_path\n self.gt_labels = None # definet by IO module (reading GT)\n self.vout_path = '' # defined by VAD module\n self.vad_labels = None # defined by IO module (reading VAD output)\n self._length = 0 # data length in seconds\n self.set_length()", "title": "" }, { "docid": "c4b3fec5461b9a6afa6947319e5d39fa", "score": "0.6070282", "text": "def pipeline(self, hash_size):\n \n self.num_pipeline = Pipeline(steps= [('imputer', SimpleImputer(strategy='mean')), ('std_scaler', MinMaxScaler())])\n self.cat_pipeline = Pipeline(steps=[('imputer', SimpleImputer(strategy='constant', fill_value='Missing')),\n ('one_hot_encoding', OneHotEncoder(handle_unknown = \"ignore\", sparse = False))])\n self.hash_pipeline = Pipeline([('imputer', SimpleImputer(strategy='constant', fill_value='Missing')),\n ('hasher', FeatureHasher(n_features=hash_size, input_type='string'))])", "title": "" }, { "docid": "6489bf8abc4992145561f9f3af15b296", "score": "0.60686934", "text": "def _create_pipeline(self) -> codepipeline.Pipeline:\n source_output = codepipeline.Artifact()\n build_output = codepipeline.Artifact()\n return codepipeline.Pipeline(\n self,\n 'Pipeline',\n stages=[\n self._create_source_stage('Source', source_output),\n self._create_image_build_stage(\n 'Build', source_output, build_output),\n self._create_deploy_stage('Deploy', build_output)\n ]\n )", "title": "" }, { "docid": "7f7bc0e8b88903cf59599e66e908ad52", "score": "0.6038285", "text": "def __init__(self, pool: ThreadPoolExecutor = None):\n\n info('pipeline initialized: %s', self.__class__.__name__)\n\n self.SAMPLE_RATE = 16000\n self.v = visdom.Visdom()", "title": "" }, { "docid": "d4331c61b1232609ab49a09d6e2c7408", "score": "0.60352254", "text": "def __init__(\n self,\n pool_frequency=0.2, # type: float\n default_execution_queue=None, # type: Optional[str]\n pipeline_time_limit=None, # type: Optional[float]\n auto_connect_task=True, # type: Union[bool, Task]\n always_create_task=False, # type: bool\n add_pipeline_tags=False, # type: bool\n ):\n # type: (...) -> ()\n self._nodes = {}\n self._running_nodes = []\n self._start_time = None\n self._pipeline_time_limit = pipeline_time_limit * 60. if pipeline_time_limit else None\n self._default_execution_queue = default_execution_queue\n self._pool_frequency = pool_frequency * 60.\n self._thread = None\n self._stop_event = None\n self._experiment_created_cb = None\n self._add_pipeline_tags = add_pipeline_tags\n self._task = auto_connect_task if isinstance(auto_connect_task, Task) else Task.current_task()\n self._step_ref_pattern = re.compile(self._step_pattern)\n if not self._task and always_create_task:\n self._task = Task.init(\n project_name='Pipelines',\n task_name='Pipeline {}'.format(datetime.now()),\n task_type=Task.TaskTypes.controller,\n )\n\n # make sure all the created tasks are our children, as we are creating them\n if self._task:\n self._task.add_tags([self._tag])\n self._auto_connect_task = bool(auto_connect_task)", "title": "" }, { "docid": "36fe461431f07fbeac9613ea1cf339d9", "score": "0.601129", "text": "def pipeline(self) -> Union[Pipeline, Model]:\n pass", "title": "" }, { "docid": "253b481230c7cdf343b0257e3387f586", "score": "0.599986", "text": "def __init__(self, interaction: bool = False):\n if interaction:\n self.classifier = make_pipeline(\n StandardScaler(),\n PolynomialFeatures(degree=2, interaction_only=True),\n LogisticRegression(\n penalty=\"l1\",\n class_weight=\"balanced\",\n solver=\"saga\",\n max_iter=10_000,\n ),\n )\n else:\n self.classifier = make_pipeline(\n StandardScaler(), LogisticRegression(class_weight=\"balanced\")\n )", "title": "" }, { "docid": "c67572d34f6975339b13621cbebdd2dd", "score": "0.59774303", "text": "def __init__(self, data, transform=None):\n self.data = data\n self.transform = transform", "title": "" }, { "docid": "9d9c92da238108ce9e8377dc07d03455", "score": "0.5951545", "text": "def _init_pipeline(self, cfg: ConfigType) -> Compose:\n pipeline_cfg = cfg.test_dataloader.dataset.pipeline\n\n # For inference, the key of ``instances`` is not used.\n if 'meta_keys' in pipeline_cfg[-1]:\n pipeline_cfg[-1]['meta_keys'] = tuple(\n meta_key for meta_key in pipeline_cfg[-1]['meta_keys']\n if meta_key != 'instances')\n\n # Loading annotations is also not applicable\n idx = self._get_transform_idx(pipeline_cfg, 'LoadOCRAnnotations')\n if idx != -1:\n del pipeline_cfg[idx]\n\n for transform in self.loading_transforms:\n load_img_idx = self._get_transform_idx(pipeline_cfg, transform)\n if load_img_idx != -1:\n pipeline_cfg[load_img_idx]['type'] = 'InferencerLoader'\n break\n if load_img_idx == -1:\n raise ValueError(\n f'None of {self.loading_transforms} is found in the test '\n 'pipeline')\n\n return Compose(pipeline_cfg)", "title": "" }, { "docid": "50f7bb0af70194574f0027951c2f86cb", "score": "0.5947056", "text": "def __init__(self, n_cpu=-1, verbose=False, chunksize=100):\n self._pipe = []\n self._mol_feed = []\n self.n_cpu = n_cpu if n_cpu else -1\n self.num_input = 0\n self.num_output = 0\n self.verbose = verbose\n self.chunksize = chunksize", "title": "" }, { "docid": "aa5314cbe541c5930b27264230780065", "score": "0.5921661", "text": "def __init__(\n self, \n train=False, \n batch_size=32, \n # workers=4, \n size=384, \n ):\n pipe = ExternalSourcePipeline(\n train=train, \n size=size, \n batch_size=batch_size,\n )\n pipe.build()\n self.loader = DALIClassificationIterator(\n pipe,\n size=len(ExternalInputIterator(train, batch_size)),\n auto_reset=True,\n fill_last_batch=train, # want real accuracy on validiation\n last_batch_padded=True, # want epochs to have the same length\n )", "title": "" }, { "docid": "a065361fd340017a41fc2f621bd0bcec", "score": "0.5920541", "text": "def make_pipeline():\n \n # Base universe set to the Q500US\n base_universe = Q1500US()\n\n # Factor of yesterday's close price.\n yesterday_close = USEquityPricing.close.latest\n \n pipe = Pipeline(\n screen = base_universe,\n columns = {\n 'close': yesterday_close,\n }\n )\n return pipe", "title": "" }, { "docid": "2d982bee1283a20cfe6aa6ab03f49366", "score": "0.591779", "text": "def __init__(self, transforms, partition, params={}):\n super().__init__(transforms, params)\n self.partition = partition\n \n self.S1 = self.transforms[0]\n self.T1 = self.transforms[1]\n self.S2 = self.transforms[2]\n self.T2 = self.transforms[3]", "title": "" }, { "docid": "be7f9fb043918cf1344649a4ce40fa5b", "score": "0.5916246", "text": "def __init__(self, generator, *args, **kwargs):\n\t\tself.outpipe, inpipe = multiprocessing.Pipe(duplex=False)\n\t\tdef feed():\n\t\t\ti = generator(*args, **kwargs)\n\t\t\twhile 1:\n\t\t\t\ttry:\n\t\t\t\t\tinpipe.send(next(i))\n\t\t\t\texcept StopIteration:\n\t\t\t\t\tinpipe.send(StopIteration)\n\t\t\t\t\tbreak\n\t\tself.process = multiprocessing.Process(target=feed)\n\t\tself.process.start()", "title": "" }, { "docid": "50a315ac5636511eaa9d4b4cf70a75d4", "score": "0.59139866", "text": "def create_pipeline_for_kfold(self, args):\n return ClassificationPipeline(args=args)", "title": "" }, { "docid": "9075ccae3c5fb37770d40239b23ba280", "score": "0.59128517", "text": "def make_pipeline(\n parameter_task: ClassVar[PipelineInput],\n final_task: ClassVar[BaseTask],\n) -> ClassVar[BaseTask]:\n\n @luigi_util.requires(parameter_task, final_task)\n class Pipeline(BaseTask):\n def run(self):\n copy(self.input()[1].path, self.output().path)\n\n return Pipeline", "title": "" }, { "docid": "5dd087eff99a107a203e694cc7fac161", "score": "0.5909479", "text": "def __init__(self, out_pipe=None, metadata=False):\n super(CloudSyncProcess, self).__init__()\n\n # Instantiate objects\n self.plateRegionDetector = CarDetector()\n self.plateRecog = PlateRecognition()\n self.writePlate = WritePlate()\n self.createJSON = CreateJSON()\n self.uploadToS3 = UploadToS3()\n self.out_pipe = out_pipe\n self.inputs_queue = collections.deque(maxlen=50)\n self.state = True\n self.metadata = metadata", "title": "" }, { "docid": "f7cfe6444859f827afdb3d0a6d13cc48", "score": "0.59082115", "text": "def __init__(self, pipeline_dir, prepared_dir, classifier_dir, pretrained, normalizer_dir, model_yaml_path,\n encoder_level='char', decoder_level='char', onmt_dir='./OpenNMT-py', language='en'):\n self.pipeline_dir = pipeline_dir\n self.prepared_dir = prepared_dir\n self.classifier_dir = classifier_dir\n self.pretrained = pretrained\n self.normalizer_dir = normalizer_dir\n self.encoder_level = encoder_level\n self.decoder_level = decoder_level\n self.onmt_dir = onmt_dir\n\n check_folder(self.pipeline_dir)\n check_folder(self.pipeline_dir + '/tmp')\n self.Classifier = Classifier(pretrained, prepared_dir, classifier_dir)\n self.Normalizer = Normalizer(model_yaml_path, prepared_dir, normalizer_dir,\n norm_only=False if pretrained else True,\n onmt_dir=onmt_dir,\n encoder_level=encoder_level,\n decoder_level=decoder_level,\n language=language)", "title": "" }, { "docid": "173c5708a59c07d682463bda86fe9522", "score": "0.5895883", "text": "def construct(self):\n return self.cls(*self.args, **self.kwargs)", "title": "" }, { "docid": "2109fa5f14b17fa6b90b7b48133cfbbc", "score": "0.58690727", "text": "def __init__(self, tree, name):\n self.pipe_buffer = b\"\"\n super(OutputPipeBytes, self).__init__(tree, name)", "title": "" }, { "docid": "2bfafba3ec83afca28657b5d73395e89", "score": "0.5868343", "text": "def __init__(self, transforms):\n\n self.transforms = transforms", "title": "" }, { "docid": "ea32b51ad6208424eba83a9b1b5a30da", "score": "0.58639", "text": "def fill_pipeline(transform_list, sg_type, **kwargs):\n kwargs = _override_bad_defaults(dict(kwargs))\n function_list = L()\n settings = {}\n for f in transform_list:\n usable_kwargs = get_usable_kwargs(f, kwargs)\n function_list += f(**usable_kwargs)\n settings.update(usable_kwargs)\n warn_unused(kwargs, settings)\n return AudioToSpec(nn.Sequential(*function_list), settings={**sg_type, **settings})", "title": "" }, { "docid": "692a6c6140e319c628fb07735d23ab3a", "score": "0.5855708", "text": "def __init__(self, arguments: Sequence[T], cls: AggClass) -> None:", "title": "" }, { "docid": "f8f41748a02b08392d02ccfc87d05474", "score": "0.5849596", "text": "def __init__(self, **kwargs):\n if self.name is None:\n raise NotImplementedError('Pipeline steps must define a name attribute')\n\n if self.slug is None:\n raise NotImplementedError('Pipeline steps must define a slug attribute')\n\n self.debug = False\n self._debug_log = {}\n\n self.configure(**kwargs)", "title": "" }, { "docid": "b3fdae9a875bf684d535f486570875ef", "score": "0.5835248", "text": "def map(self, pipeline_stream): # pragma: no cover\n pass", "title": "" }, { "docid": "00622d6d2914bd47e18ea6423f11327a", "score": "0.5831007", "text": "def __init__(self, component_config: Dict[Text, Any] = None) -> None:\n\n super().__init__(component_config)\n self.nlp = stanza.Pipeline(\n lang=component_config[\"lang\"], # the language model from Stanza to user\n dir=component_config[\n \"cache_dir\"\n ], # the caching directory to load the model from\n processors=\"tokenize,pos,lemma\", # info: https://stanfordnlp.github.io/stanza/pipeline.html#processors\n tokenize_no_ssplit=True, # disable sentence segmentation\n )", "title": "" }, { "docid": "83bbe56baeb26ddf659a21d5353939f2", "score": "0.5820844", "text": "def build_pipeline(self, flawed_yaml=False, verbose=True) -> Pipeline:\n\n if verbose:\n print(\"Building pipeline...\")\n\n # check of the base level definition\n if self.parsed_yaml.get('pipeline'):\n if flawed_yaml:\n pipeline_content = list_of_dicts_to_dict(self.parsed_yaml['pipeline'])\n else:\n pipeline_content = self.parsed_yaml['pipeline']\n else:\n raise PipelineParsingError(\"Missing pipeline definition.\")\n\n if not is_subset_of(\n ['name', 'inputs', 'outputs', 'components'],\n list(pipeline_content.keys())\n ):\n raise PipelineParsingError(\"Pipeline missing key attributes.\")\n\n pipeline = Pipeline(pipeline_content['name'])\n # now we have the pipeline lets start to feed it\n task_factory_instance = TaskFactory()\n for component in pipeline_content['components']:\n\n if len(component) != 1:\n raise PipelineParsingError(\n \"Component root should be list of dicts of length one with its component.name as a key!\"\n )\n else:\n c_name = list(component.keys())[0]\n\n if flawed_yaml:\n component_dict = list_of_dicts_to_dict(component[c_name])\n else:\n component_dict = component[c_name]\n # checking if task contain all required sections\n if not is_subset_of(['runner', 'inputs', 'outputs'], list(component_dict.keys())):\n raise PipelineParsingError(f'Component {c_name} missing one of the key arguments!')\n\n task_from_component = task_factory_instance.spawn_task(c_name, **component_dict)\n pipeline.add_task(task_from_component)\n\n # check if all pipeline inputs are declared on command line and add them one by one\n for pipeline_input in pipeline_content['inputs']:\n if pipeline_input in self.command_line_inputs:\n consumable_from_input = Consumable(pipeline_input, self.command_line_inputs[pipeline_input])\n pipeline.add_input(consumable_from_input)\n else:\n raise PipelineParsingError(\n f\"Pipeline declared input {pipeline_input} not found in command line inputs!\")\n\n # this could be done in one shove, but there should be in principle CRUD\n # this way it would be easy to extend in future :)\n for pipeline_output in pipeline_content['outputs']:\n if pipeline_output not in pipeline.expected_outputs:\n pipeline.add_output(pipeline_output)\n\n return pipeline", "title": "" }, { "docid": "acdb59a215198e34b5e13d91111bfb68", "score": "0.58177304", "text": "def make_pipeline(imageset_path, output_dir):\n p = Pipeline(imageset_path, output_dir)\n p.random_distortion(probability=0.7, grid_width=4, grid_height=4, magnitude=8)\n p.flip_left_right(probability=0.5)\n p.flip_top_bottom(probability=0.5)\n p.zoom(probability=0.3, min_factor=1.1, max_factor=1.4)\n p.rotate(probability=0.5, max_left_rotation=10, max_right_rotation=10)\n return p", "title": "" }, { "docid": "9479f95dda4e9e23115ad62dd8728ccd", "score": "0.58167815", "text": "def _construct(self, **kwargs):\n pass", "title": "" }, { "docid": "1e29e6b5cadaa86cd8a59e4d6ef77ea1", "score": "0.58157235", "text": "def _build_meta(meta: str, pipelines: Iterable[\"Pipeline\"]) -> \"Pipeline\":\n return Pipeline(\n protocol=[\n {\n \"meta\": meta,\n \"pipelines\": [pipeline.protocol for pipeline in pipelines],\n },\n ],\n )", "title": "" }, { "docid": "300d768f18b7d7dca2acaabc06d628a8", "score": "0.5812503", "text": "def pipeline(self, transaction=True, shard_hint=None):\n return Pipeline(\n self.connection_pool, self.response_callbacks, transaction, shard_hint\n )", "title": "" }, { "docid": "8b860907e115c2b23437f9aa6da9ac7a", "score": "0.5802891", "text": "def create_pipeline(model, scaler=StandardScaler(), encoder=OneHotEncoder(handle_unknown='ignore')):\n \n num_pipeline = Pipeline([\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', scaler), \n ])\n\n cat_pipeline = Pipeline([\n ('imputer', SimpleImputer(strategy='most_frequent')),\n ('encoder', encoder),\n ])\n\n preprocessor = ColumnTransformer([\n ('num', num_pipeline, cfg.NUMERICAL),\n ('cat', cat_pipeline, cfg.CATEGORICAL),\n ])\n\n pipeline = Pipeline([\n ('preprocessor', preprocessor),\n ('model', model),\n ])\n\n return pipeline", "title": "" }, { "docid": "3fcdd338a6dadd3e1ee45fef95423858", "score": "0.57971466", "text": "def __init__(self, project_info):\n AnatomicalPipeline.__init__(self, project_info)\n\n if len(project_info.subject_sessions) > 0:\n subject_id = \"_\".join((self.subject, self.global_conf.subject_session))\n subject_session = self.global_conf.subject_session\n else:\n subject_id = self.subject\n subject_session = \"\"\n\n self.stages = {\n \"Segmentation\": SegmentationStageUI(\n subject=self.subject,\n session=subject_session,\n bids_dir=project_info.base_directory,\n output_dir=project_info.output_directory,\n ),\n \"Parcellation\": ParcellationStageUI(\n pipeline_mode=\"Diffusion\",\n subject=self.subject,\n session=subject_session,\n bids_dir=project_info.base_directory,\n output_dir=project_info.output_directory,\n ),\n }\n\n for stage in list(self.stages.keys()):\n if project_info.subject_session != \"\":\n self.stages[stage].stage_dir = os.path.join(\n self.base_directory,\n \"derivatives\",\n __nipype_directory__,\n self.subject,\n project_info.subject_session,\n self.pipeline_name,\n self.stages[stage].name,\n )\n else:\n self.stages[stage].stage_dir = os.path.join(\n self.base_directory,\n \"derivatives\",\n __nipype_directory__,\n self.subject,\n self.pipeline_name,\n self.stages[stage].name,\n )\n\n self._init_and_add_listeners_to_stage_traits_ui(subject_id=subject_id)", "title": "" }, { "docid": "81b1ba8c51b16f19b669688b046ad073", "score": "0.5785319", "text": "def __init__(self, testLabel=None, useCache=False, wwwCache=True, summaryProcessing=\"delay\",\n lazyPlot='sensor', *args, **kwargs):\n pipeBase.Task.__init__(self, *args, **kwargs)\n\n self.testSets = {}\n self.testLabel = testLabel\n\n # if we're not going to use the cached values\n # we'll have to clean the output directory on our first call\n self.useCache = useCache\n self.clean = not useCache\n self.wwwCache = wwwCache\n self.summaryProcessing = summaryProcessing\n\n self.summOpt = {\n 'delay' : 'delay', # make summary figures after the final CCD is processed\n 'summOnly' : 'summOnly', # only make summary figures\n 'none' : 'none' # don't make summary figures\n }\n \n\n options = ['none', 'sensor', 'all']\n if not lazyPlot in options:\n raise ValueError, \"lazyPlot must be: \"+ \",\".join(options) + \" You said: \"+lazyPlot\n \n self.lazyPlot = lazyPlot", "title": "" }, { "docid": "7fe785034ce07b8448dfa46ca1e74652", "score": "0.5779717", "text": "def __init__(self, *args, **kwargs):\n\n self.callback = kwargs.pop('callback', None)\n self.mapper = kwargs.pop('mapper', itertools.imap)\n self.cacher = kwargs.pop('cacher', {})\n self.loader = kwargs.pop('loader', None)\n self.fit_callback = kwargs.pop('fit_callback', None)\n self.max_iter = kwargs.pop('max_iter', 100)\n\n composite_grid = args[1]\n if isinstance(composite_grid, dict):\n grid = composite_grid\n self.transforms = {}\n else:\n grid, self.transforms = make_grid(*split_constraints_and_transforms(composite_grid),\n max_iter=self.max_iter)\n args = list(args)\n print('grid length: %d' % len(ParameterGrid(grid)))\n args[1] = grid\n args = tuple(args)\n\n super(GridSearchCVParallel, self).__init__(*args, **kwargs)", "title": "" }, { "docid": "c7703762039abeeb6c5ef3f897e95c91", "score": "0.5747235", "text": "def pipeline(self, transaction=True, shard_hint=None):\n if isinstance(self.client, redis.RedisCluster):\n p = ClusterPipeline(\n nodes_manager=self.client.nodes_manager,\n commands_parser=self.client.commands_parser,\n startup_nodes=self.client.nodes_manager.startup_nodes,\n result_callbacks=self.client.result_callbacks,\n cluster_response_callbacks=self.client.cluster_response_callbacks,\n cluster_error_retry_attempts=self.client.cluster_error_retry_attempts,\n read_from_replicas=self.client.read_from_replicas,\n reinitialize_steps=self.client.reinitialize_steps,\n lock=self.client._lock,\n )\n\n else:\n p = Pipeline(\n connection_pool=self.client.connection_pool,\n response_callbacks=self._MODULE_CALLBACKS,\n transaction=transaction,\n shard_hint=shard_hint,\n )\n\n p._encode = self._encode\n p._decode = self._decode\n return p", "title": "" }, { "docid": "2aea39b8aac4d1e8050f137b1e1fc1af", "score": "0.57460785", "text": "def new_pipeline(self, pipe_path):\r\n _new_pipe = read_config(pipe_path) # xml.etree object\r\n new_meths = [elem.attrib['method'] for elem in _new_pipe]\r\n _found_methods = self.scan_meths(_new_pipe)\r\n self.pipeline.container = [] # erasing current method container\r\n _new_container = self._get_meth_container(_found_methods)\r\n self.pipeline.load_new_pipeline(_new_container, _new_pipe)", "title": "" }, { "docid": "2d2143812ac1957bbfc9874b9251e8fb", "score": "0.5745986", "text": "def __init__(self, *pipeline_exprs, **kwargs):\n self.pipelines_exprs = pipeline_exprs\n self.setup_tables = kwargs.get('setup_tables', False)\n self.infrastructure_class = kwargs.get('infrastructure_class', None)\n\n self.session = kwargs.get('session', None)\n\n self.process_classes = OrderedDict()\n for pipeline_expr in self.pipelines_exprs:\n for process_class in pipeline_expr:\n process_name = process_class.__name__.lower()\n if process_name not in self.process_classes:\n self.process_classes[process_name] = process_class\n\n self.processes = {}\n self.is_session_shared = True\n\n # Determine which process follows which.\n self.followers = OrderedDict()\n # A following is a list of process classes followed by a process class.\n # Todo: Factor this out, it's confusing. (Only used in ActorModelRunner now).\n self.followings = OrderedDict()\n for pipeline_expr in self.pipelines_exprs:\n previous_name = None\n for process_class in pipeline_expr:\n process_name = process_class.__name__.lower()\n try:\n follows = self.followings[process_name]\n except KeyError:\n follows = []\n self.followings[process_name] = follows\n\n try:\n self.followers[process_name]\n except KeyError:\n self.followers[process_name] = []\n\n if previous_name and previous_name not in follows:\n follows.append(previous_name)\n followers = self.followers[previous_name]\n followers.append(process_name)\n\n previous_name = process_name", "title": "" }, { "docid": "926611e9e3041022bc0031f8fea5fa07", "score": "0.57436544", "text": "def pipeline1(images):\n # Pipeline parameters\n bin_thresholds = [np.percentile(images[0], 93)/np.max(images[0])]\n directions = [np.array([np.cos(t), np.sin(t)]) for t in np.linspace(0, 2 * np.pi, 8)[:-1]]\n n_iterations = np.linspace(1,21, 5).astype(int).tolist()\n \n features = [('bottleneck', Amplitude(metric='bottleneck', n_jobs=-1)), \n ('PE', PersistenceEntropy(n_jobs=-1))]\n\n # Make filtrations\n binned_steps = [('binarizer_{}'.format(t), Binarizer(threshold=t, n_jobs=-1)) for t in bin_thresholds]\n filtrations = [('height_{}'.format(d), HeightFiltration(direction=d, n_jobs=-1)) for d in directions]\n filtrations += [('erosion_{}'.format(i), ErosionFiltration(n_iterations= i, n_jobs=-1)) for i in n_iterations]\n filtrations += [('dilation_{}'.format(i), DilationFiltration(n_iterations= i, n_jobs=-1)) for i in n_iterations]\n\n # Make pipelines\n cubical_lower = ('cubical', CubicalPersistence(n_jobs=-1))\n\n partial_pipeline_steps = []\n partial_pipeline_steps.append([cubical_lower])\n partial_pipeline_steps.append([('inverter', Inverter(n_jobs=-1)), cubical_lower])\n\n for b, f in itertools.product(binned_steps, filtrations):\n partial_pipeline_steps.append([b,f, ('cubical', CubicalPersistence(n_jobs=-1))])\n\n\n feature_pipelines = []\n for s, f in itertools.product(partial_pipeline_steps, features):\n feature_pipelines.append(Pipeline(s + [f]))\n \n return feature_pipelines", "title": "" }, { "docid": "9f33513d206c5d8759a33d538baa6b53", "score": "0.5739117", "text": "def __init__(self,\n configuration=CONFIGURATION,\n components=COMPONENTS_CONFIG,\n crawler=None,\n http_client=None,\n task_queue=None,\n link_extractor=None,\n parse_link=None,\n pipeline=None,\n **kwargs):\n self.config = {}\n if verify_configuration(configuration):\n self.config.update(configuration)\n\n # cover the config item with kwargs\n to_remove = []\n for k, v in kwargs.items():\n if k in CONFIGURATION:\n to_remove.append(k)\n self.config.__setitem__(k, v)\n\n for k in to_remove:\n kwargs.pop(k)\n\n self.__dict__.update(kwargs)\n\n try:\n self.logger = dynamic_import(self.config.get('log_init_fn', None),\n ReturnType.FUNCTION,\n self.config)\n except Exception as e:\n print('The dynamic function call has an error: %s' % e)\n print('next, call the default function for initialize log system.')\n self.logger = _init_logging(self.config)\n\n self.crawler = crawler if crawler else dynamic_import(components['crawler'],\n ReturnType.CLASS,\n name=self.config['name'],\n roots=self.config['roots'],\n strict=self.config['strict'],\n max_redirect=self.config['max_redirect'],\n max_retries=self.config['max_retries'],\n task_queue=task_queue,\n http_client=http_client,\n logger=self.logger)\n\n if callable(parse_link):\n self.crawler.parse_link = parse_link\n\n if not isinstance(self.crawler, Crawler):\n raise ValueError('The crawler is invalid and must be a subclass of %s.%s, got %s.%s'\n % (Crawler.__module__,\n Crawler.__name__,\n self.crawler.__class__.__module__,\n self.crawler.__class__.__name__)\n )\n\n self.link_extractor = link_extractor if link_extractor else dynamic_import(components['link_extractor'],\n ReturnType.CLASS,\n allow=self.config['allowed_rule'],\n deny=self.config['denied_rule'],\n allow_domains=self.config[\n 'allow_domains'],\n deny_domains=self.config[\n 'deny_domains'])\n\n if not isinstance(self.link_extractor, LinkExtractor):\n raise ValueError('The link extractor is invalid and must be a subclass of %s.%s, got %s.%s'\n % (LinkExtractor.__module__,\n LinkExtractor.__name__,\n self.link_extractor.__class__.__module__,\n self.link_extractor.__class__.__name__)\n )\n\n self.pipeline = pipeline if pipeline else dynamic_import(components['pipeline'],\n ReturnType.CLASS)\n\n if not isinstance(self.pipeline, Pipeline):\n raise ValueError('The pipeline is invalid and must be a subclass of %s.%s, got %s.%s'\n % (Pipeline.__module__,\n Pipeline.__name__,\n self.pipeline.__class__.__module__,\n self.pipeline.__class__.__name__)\n )", "title": "" }, { "docid": "f74a4fbf555a764cc1b9f78de32f7817", "score": "0.5734905", "text": "def __init__(self, *args, **kwargs) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "121c31ab76197d858a401318f11adf46", "score": "0.573249", "text": "def __init__(self, stages: List[Tuple]):\n conns = [reversed(mp.Pipe(duplex=False)) for _i in range(len(stages) + 1)]\n conns = list(itertools.chain(*conns))\n self.send_ = conns[0]\n self.recv_ = conns[-1]\n self.cores = schedule_workers(1)[0]\n self.ps = [\n self._launch_stage(stage, conns[i * 2 + 1], conns[i * 2 + 2])\n for i, stage in enumerate(stages)\n ]", "title": "" }, { "docid": "4f4a2aa9955de3f7db102b2d1d431e1c", "score": "0.57299525", "text": "def __init__(self,\n model_uri: Text,\n name: Text = None,\n enable_cache: Optional[bool] = True,\n steps_dict: Dict[Text, BaseStep] = None,\n backend: OrchestratorBaseBackend = None,\n metadata_store: Optional[ZenMLMetadataStore] = None,\n artifact_store: Optional[ArtifactStore] = None,\n datasource: Optional[BaseDatasource] = None,\n pipeline_name: Optional[Text] = None):\n if model_uri is None:\n raise AssertionError('model_uri cannot be None.')\n self.model_uri = model_uri\n super(BatchInferencePipeline, self).__init__(\n name=name,\n enable_cache=enable_cache,\n steps_dict=steps_dict,\n backend=backend,\n metadata_store=metadata_store,\n artifact_store=artifact_store,\n datasource=datasource,\n pipeline_name=pipeline_name,\n model_uri=model_uri,\n )", "title": "" }, { "docid": "6d965b07ecbc0d41a1bf7a42e64f5d6a", "score": "0.5728661", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.inputs = 3\n self.outputs = 2", "title": "" }, { "docid": "c6b14b28102f59ae38dfd3edcbf66cd3", "score": "0.57243526", "text": "def __init__(self, pipeline_id, variant=\"TicTacToe\", lr=None, reg=None):\n # game environment\n if variant == \"Connect4\":\n self.input_shape = (6, 7, 3)\n self.num_possible_moves = 7\n self.game = game.Connect4Optimized()\n if variant == \"TicTacToe\":\n self.input_shape = (3, 3, 3)\n self.num_possible_moves = 9\n self.game = game.TicTacToeOptimized()\n\n self.variant = variant\n\n self.lr = lr\n if lr is None:\n self.lr = config.NEURAL_NETWORKS['learning_rate']\n\n self.reg = reg\n if reg is None:\n self.reg = config.NEURAL_NETWORKS['regularization_strength']\n\n # memory\n self.memory = memory.PositionMemory(variant=variant)\n\n # model\n self.pipeline_id = pipeline_id\n self.model = model.AZModel(\n memory=self.memory,\n input_shape=self.input_shape,\n num_possible_moves=self.num_possible_moves,\n model_id=self.pipeline_id,\n lr=self.lr,\n reg=self.reg\n )\n\n # agent\n self.agent = agent.AlphaZeroAgent(model=self.model, variant=variant)\n \n # trajectories counter\n self.seen_trajectories = None\n self.unique_trajectories = None\n \n # evaluation\n self.win_ratio = []\n self.draw_ratio = []\n\n # logger\n # self.logger = logs.get_logger()", "title": "" }, { "docid": "ad20a6c66e2d9b03ac3cfa07227a43d4", "score": "0.57240164", "text": "def set_pipeline(self):\n memory = self.kwargs.get(\"pipeline_memory\", None)\n if memory:\n memory = mkdtemp()\n #Feature Engineering\n pipe_time = make_pipeline(TimeFeaturesEncoder(time_column='pickup_datetime'), OneHotEncoder())\n pipe_distance = make_pipeline(DistanceTransformer(),StandardScaler())\n pipe_distance_to_center = make_pipeline(DistanceFromCenter(),StandardScaler())\n pipe_calculation_direction = make_pipeline(CalculationDirection(),StandardScaler())\n pipe_manhattan_dist = make_pipeline(MinkowskiDistance(p=1),StandardScaler())\n pipe_euclidian_dist = make_pipeline(MinkowskiDistance(p=2),StandardScaler())\n time_col = ['pickup_datetime']\n dist_cols = ['pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude']\n features = [\n ('time', pipe_time, time_col),\n ('distance', pipe_distance, dist_cols)\n ]\n if self.dist_to_center == True:\n self.mlflow_log_param('feature1', 'distance_to_center')\n features.append(\n ('distance_to_center', pipe_distance_to_center, dist_cols)\n )\n if self.calculation_direction == True:\n self.mlflow_log_param('feature2', 'calculation_direction')\n features.append(\n ('calculation_direction', pipe_calculation_direction, dist_cols)\n )\n if self.manhattan_dist == True:\n self.mlflow_log_param('feature3', 'manhattan_dist')\n features.append(\n ('manhattan_dist', pipe_manhattan_dist, dist_cols)\n )\n if self.euclidian_dist == True:\n self.mlflow_log_param('feature4', 'euclidian_dist')\n features.append(\n ('euclidian_dist', pipe_euclidian_dist, dist_cols)\n )\n\n feat_eng_pipeline = ColumnTransformer(features)\n\n # Main Pipeline\n self.mlflow_log_param('student_name', 'Felix Fähnrich')\n self.pipeline = Pipeline([\n ('feat_eng', feat_eng_pipeline),\n ('regressor', self.get_estimator())\n ])\n return self.pipeline", "title": "" }, { "docid": "d94270ff8263caede96f781706f47b7c", "score": "0.57115674", "text": "def test_custom_pipeline_is_pipeline():\n data = pycaret.datasets.get_data(\"juice\")\n pc = pycaret.classification.setup(\n data=data,\n custom_pipeline=Pipeline(\n [(\"scaler\", StandardScaler()), (\"pca\", PCA(n_components=5))]\n ),\n )\n X, _ = pc.pipeline.transform(pc.X, pc.y)\n assert X.shape[1] == 5", "title": "" }, { "docid": "46423198992f08d9b44899dd948a1c30", "score": "0.5710767", "text": "def __init__(self, img, camera_calibration_op):\n\t\tPipelineOp.__init__(self)\n\t\tself.__img = np.copy(img)\n\t\tself.__camera_calibration_op = camera_calibration_op", "title": "" }, { "docid": "d776f5c79c3579699bf4a0a7d15edb95", "score": "0.5706901", "text": "def __init__(self, filename='', flags=[]):\n\t\tif in_rlang():\n\t\t\tself._cmd_coro = self._cmd_rlang\n\t\t\treturn\n\t\ttry:\n\t\t\tif os.name == \"nt\":\n\t\t\t\tmypipename = os.environ['r2pipe_path']\n\t\t\t\twhile 1:\n\t\t\t\t\thPipe = windll.kernel32.CreateFileA(szPipename + mypipename, GENERIC_READ | GENERIC_WRITE, 0, None,\n\t\t\t\t\t OPEN_EXISTING, 0, None)\n\t\t\t\t\tif hPipe != INVALID_HANDLE_VALUE:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"Invalid Handle Value\")\n\n\t\t\t\t\tif windll.kernel32.GetLastError() != ERROR_PIPE_BUSY:\n\t\t\t\t\t\tprint(\"Could not open pipe\")\n\t\t\t\t\t\treturn\n\t\t\t\t\telif (windll.kernel32.WaitNamedPipeA(szPipename, 20000)) == 0:\n\t\t\t\t\t\tprint(\"Could not open pipe\\n\")\n\t\t\t\t\t\treturn\n\n\t\t\t\twindll.kernel32.WriteFile(hPipe, \"e scr.color=false\\n\", 18, byref(cbWritten), None)\n\t\t\t\twindll.kernel32.ReadFile(hPipe, chBuf, BUFSIZE, byref(cbRead), None)\n\t\t\t\tself.pipe = [hPipe, hPipe]\n\t\t\t\tself._cmd_coro = self._cmd_pipe\n\t\t\telse:\n\t\t\t\tself.pipe = [int(os.environ['R2PIPE_IN']), int(os.environ['R2PIPE_OUT'])]\n\t\t\t\tself._cmd_coro = self._cmd_pipe\n\t\t\tself.url = \"#!pipe\"\n\t\t\treturn\n\t\texcept Exception:\n\t\t\tpass\n\n\t\tif filename.startswith(\"#!pipe\"):\n\t\t\traise Exception(\"ERROR: Cannot use #!pipe without R2PIPE_{IN|OUT} env\")", "title": "" }, { "docid": "77bc51dd2b6f78d01f7c94d43a425a69", "score": "0.57048976", "text": "def __new__(cls, *args, **kwargs):\n \n # TODO: Make work later?\n #\n # if callable(x):\n # def f(y):\n # s = Chained.__new__(cls, x.__call__(y), *args, **kwargs)\n # s.__init__(*args, **kwargs)\n # return s\n # return lambda y : f(y)\n \n label = False\n if 'label' in kwargs.keys() :\n label = kwargs['label']\n del kwargs['label']\n\n # Actually construct an instance of the child class\n result = super(Chained, cls).__new__(cls)# , *args, **kwargs)\n result._cached_properties = {}\n result._drs_multiplied = {}\n result._compute_dr_wrt_cached = {} \n result._nonuser_attributes = set(dir(result)) \n\n # These contain all the arguments to init. When \"differentiable_wrt\"\n # is called, those particular arguments will be removed from these lists.\n # This is done so that we can manage equality/redundancy operations\n result._static_args = args\n result._static_kwargs = kwargs\n\n if label :\n result._label = label\n \n \n return result", "title": "" }, { "docid": "c04fb196d8ca8ec9435534efdfcedb52", "score": "0.56996936", "text": "def create_pipeline(self, params=None):\n try:\n self.ingest_client.put_pipeline(\n self._name, load_json(self._pipeline_handler._json)\n )\n except Exception as e:\n raise (e)", "title": "" }, { "docid": "432853cdc3fe921ed32a5f241c032f4c", "score": "0.56964093", "text": "def __init__(self, proc):\n\n self._proc = proc\n self.captured = []", "title": "" }, { "docid": "3380d989199f071c63ac7b47504c85ad", "score": "0.5694548", "text": "def __init__(__self__, *,\n data_factory_id: pulumi.Input[str],\n sinks: pulumi.Input[Sequence[pulumi.Input['DataFlowSinkArgs']]],\n sources: pulumi.Input[Sequence[pulumi.Input['DataFlowSourceArgs']]],\n annotations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n description: Optional[pulumi.Input[str]] = None,\n folder: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n script: Optional[pulumi.Input[str]] = None,\n script_lines: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n transformations: Optional[pulumi.Input[Sequence[pulumi.Input['DataFlowTransformationArgs']]]] = None):\n pulumi.set(__self__, \"data_factory_id\", data_factory_id)\n pulumi.set(__self__, \"sinks\", sinks)\n pulumi.set(__self__, \"sources\", sources)\n if annotations is not None:\n pulumi.set(__self__, \"annotations\", annotations)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if folder is not None:\n pulumi.set(__self__, \"folder\", folder)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if script is not None:\n pulumi.set(__self__, \"script\", script)\n if script_lines is not None:\n pulumi.set(__self__, \"script_lines\", script_lines)\n if transformations is not None:\n pulumi.set(__self__, \"transformations\", transformations)", "title": "" }, { "docid": "ecf496058b26931fc6b3f3b8677a9bf9", "score": "0.5691514", "text": "def setup_pipeline(pipeline):\n pipeline.shear(probability=0.4, max_shear_left=15, max_shear_right=15)\n pipeline.flip_left_right(probability=0.5)\n pipeline.flip_top_bottom(probability=0.5)\n pipeline.rotate_random_90(probability=0.75)", "title": "" }, { "docid": "b4ff590fc6e9ad3c15809825d0594f28", "score": "0.5690381", "text": "def build_pipeline():\n pipeline = Pipeline([\n ('count_vectorizer', CountVectorizer(tokenizer=tokenize)),\n ('tfidf_transformer', TfidfTransformer()),\n ('classifier', MultiOutputClassifier(AdaBoostClassifier()))\n ])\n parameters = {\n 'classifier__estimator__n_estimators': [20, 50],\n 'classifier__estimator__learning_rate': [0.75, 1.0],\n 'count_vectorizer__max_features': [30000],\n }\n cv = GridSearchCV(pipeline, parameters,\n cv = 3,\n n_jobs=-1)\n return cv", "title": "" }, { "docid": "9410acaf5b71bd840693ad8250fc0930", "score": "0.5687941", "text": "def __init__(\n self,\n processes=None,\n partition_size=None,\n disable_compression=False,\n no_wrap=None,\n ):\n super(ParallelStream, self).__init__(\n disable_compression=disable_compression, no_wrap=no_wrap\n )\n self.processes = processes\n self.partition_size = partition_size", "title": "" }, { "docid": "b37f664a740fda711af676d6a08abf5b", "score": "0.56806326", "text": "def __init__(self, img, color_space, src_color_space='RGB', color_channel=-1):\n\t\tPipelineOp.__init__(self)\n\t\tself.__img = np.copy(img)\n\t\tself.__color_space = color_space.upper()\n\t\tself.__src_color_space = src_color_space.upper()\n\t\tself.__color_channel = color_channel", "title": "" }, { "docid": "33e3da071b0ac4d7416373ff08cb1364", "score": "0.5677405", "text": "def __init__(self, source=[], root=None):\n super().__init__(self._convert_iterable(source))", "title": "" } ]
6afc41a59914e0674305218c865b34ae
Calls openMVG to do compute matches
[ { "docid": "4822d5fcfbdeda71bb433a957a3e5049", "score": "0.7459588", "text": "def openmvg_matches(pth_sfm, pth_matches, video_mode=None, force=False):\n\n cmd = \"{}/openMVG_main_ComputeMatches -i {} -o {} -v {}\".format(PATH_OMVG, pth_sfm,\n pth_matches,\n video_mode)\n if not video_mode:\n cmd = '{}/openMVG_main_ComputeMatches -i {} -o {}'.format(PATH_OMVG, pth_sfm,\n pth_matches)\n\n if force:\n cmd = cmd + \" -f 1\"\n os.system(cmd)", "title": "" } ]
[ { "docid": "4e08f7c38ca53640306f5e8ebfb60f23", "score": "0.5903164", "text": "def exec_matching(infr, vsone=False, prog_hook=None):\n if infr.verbose:\n print('[infr] exec_matching')\n #from ibeis.algo.hots import graph_iden\n ibs = infr.ibs\n aid_list = infr.aids\n cfgdict = {\n 'can_match_samename': True,\n 'K': 3,\n 'Knorm': 3,\n 'prescore_method': 'csum',\n 'score_method': 'csum'\n }\n # TODO: use current nids\n qreq_ = ibs.new_query_request(aid_list, aid_list, cfgdict=cfgdict)\n cm_list = qreq_.execute(prog_hook=prog_hook)\n infr.cm_list = cm_list\n infr.qreq_ = qreq_", "title": "" }, { "docid": "c5f52baa01ee86fba941529b30e01877", "score": "0.587764", "text": "def main():\n\n logging.info(\"matching process started at {0}\".format(datetime.now()))\n\n parser = argparse.ArgumentParser(\n description = 'Match patient phenotypes in the GeneYenta DB. If a patient ID is provided, match that patient to all other patients, otherwise match all patients to all other patients.'\n )\n\n parser.add_argument('-id', '--patient_id', nargs='?', const=0, default=0)\n parser.add_argument('-id2', '--patient_id2', nargs='?', const=0, default=0)\n parser.add_argument('-f', '--force', action='store_true')\n args = parser.parse_args()\n patient_id = args.patient_id\n patient_id2 = args.patient_id2\n force_match = args.force\n\n m = GYMatcher()\n \n process_attempts = 0\n existing_process = m.isMatchProcessing()\n while existing_process and process_attempts < PROCESS_RETRIES:\n process_attempts += 1\n time.sleep(PROCESS_WAIT_TIME)\n existing_process = m.isMatchProcessing()\n\n if existing_process:\n logging.error(\n \"matching process timed out waiting for a previous matching \" \\\n \"process to complete at {0}\".format(datetime.now()))\n m.notifyMatchProcessTimedOut()\n else:\n m.setMatchProcessStarted()\n\n if patient_id:\n patient = m.fetchPatient(patient_id)\n if patient_id2:\n patient2 = m.fetchPatient(patient_id2)\n m.matchPatientToPatient(patient, patient2, force_match)\n else:\n m.matchPatientToAll(patient, force_match)\n else:\n m.matchAllPatients(force_match)\n\n m.setMatchProcessFinished()\n\n logging.info(\"matching process completed at {0}\".format(datetime.now()))", "title": "" }, { "docid": "3daa1f6173e9497387d7e7b8f8641c37", "score": "0.5822397", "text": "def catalogmatch(conn, sources, catalog, imobj, search_radius, save):\n catalog_matched = []\n\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n \n match_logger.info('Attempting to match {} sources from this image to '\n 'the {} sky catalog...'.format(len(sources), catalog))\n\n # Print results without saving to database\n if not save:\n # Dump sources into a temporary table\n sql = (\n '''\n CREATE TEMP TABLE temp_source (\n src_id INTEGER,\n ra DOUBLE PRECISION,\n dec DOUBLE PRECISION\n );\n ''')\n cur.execute(sql)\n conn.commit()\n for src in sources:\n cur.execute('''INSERT INTO temp_source (\n src_id, ra, dec) VALUES (%s, %s, %s)''', (\n src.src_id, src.ra, src.dec))\n conn.commit()\n # Find nearest neighbor within FOV & \"match\" if within half a beam\n sql = '''SELECT a.src_id, bb.id AS catalog_src_id,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) AS sep,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) < %s AS match\n FROM temp_source AS a, LATERAL (\n SELECT b.* FROM radcat.{} AS b\n WHERE q3c_join(a.ra, a.dec, b.ra, b.dec, %s)\n ORDER BY q3c_dist(a.ra, a.dec, b.ra, b.dec) ASC LIMIT 1) AS bb'''\n values = (0.5*imobj.bmin, 2.*imobj.radius)\n cur.execute(psycopg2.sql.SQL(sql).format(\n psycopg2.sql.Identifier(catalog)), values)\n rows = cur.fetchall()\n cur.execute('DROP TABLE temp_source')\n conn.commit()\n\n match_logger.info('-------------------------------------------------'\n '-------------')\n match_logger.info('VLITE_src_id match catalog_src_id '\n 'separation (arcsec)')\n match_logger.info('-------------------------------------------------'\n '-------------') \n for row in rows:\n if row['match']:\n catalog_matched.append(row['catalog_src_id'])\n match_logger.info('{}\\t\\t{}\\t{}\\t{}'.format(\n row['src_id'], row['match'], row['catalog_src_id'], row['sep']))\n\n # Store results for insertion into database\n else:\n # Skip the sources which already have results for this catalog\n # (from a different image)\n assoc_ids = []\n for src in sources:\n already_matched = dbio.check_catalog_match(conn, src.id, catalog)\n if already_matched:\n continue\n else:\n assoc_ids.append(src.id)\n match_logger.info(' -- found previous matching results for {} sources'.\n format(len(sources) - len(assoc_ids)))\n\n # Find nearest neighbor within half a beam\n sql = '''SELECT a.id AS assoc_id, bb.*, \n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) AS sep\n FROM assoc_source AS a, LATERAL (\n SELECT b.* FROM radcat.{} AS b\n WHERE a.id IN %s AND q3c_join(a.ra, a.dec, b.ra, b.dec, %s)\n ORDER BY q3c_dist(a.ra, a.dec, b.ra, b.dec) ASC LIMIT 1) AS bb'''\n values = (tuple(assoc_ids), (0.5*(imobj.bmin/3600.)))\n cur.execute(psycopg2.sql.SQL(sql).format(\n psycopg2.sql.Identifier(catalog)), values)\n rows = cur.fetchall()\n\n matched_ids = []\n for row in rows:\n matched_ids.append(row['assoc_id'])\n csrc = catalogio.CatalogSource()\n dbclasses.dict2attr(csrc, row)\n catalog_matched.append(csrc)\n\n for src in sources:\n if src.id in matched_ids:\n # Found a match!\n try:\n src.nmatches += 1\n except TypeError:\n src.nmatches = 1\n else:\n if src.nmatches is None:\n src.nmatches = 0\n\n cur.close()\n\n match_logger.info (' -- number of matches: {}'.format(len(catalog_matched)))\n\n return sources, catalog_matched", "title": "" }, { "docid": "77a50d9e5c23b3664cf74a01a606ea2f", "score": "0.57784605", "text": "def openmvg_incremental(pth_sfm, pth_matches, pth_incr):\n\n cmd = \"{}/openMVG_main_IncrementalSfM -i {} -m {} -o {} \".format(PATH_OMVG, pth_sfm,\n pth_matches,\n pth_incr)\n os.system(cmd)", "title": "" }, { "docid": "e1b60aeb1d61e76cc2b04e76d97b863e", "score": "0.5617747", "text": "def main(interim_filepath, models_filepath, processed_filepath):\n transform_championship_matches(interim_filepath, models_filepath,\n processed_filepath)", "title": "" }, { "docid": "90b66ac40b92d0b62ffa96ad0e976bad", "score": "0.5608212", "text": "def _re_detectAndmatch(source_image, template_image, origin_image, query_image,\n source_image_match_keypoints): \n [h, w] = [template_image.shape[0], template_image.shape[1]]\n kp = source_image_match_keypoints\n dst_pts = np.float32([kp[m] for m in range(len(kp))]).reshape(-1, 1, 2)\n row, col, dim = dst_pts.shape\n if 0 < row < 33:\n _image_process(template_image, 0.1)\n match_value, match_posi, sift_similarity, max, k = [], [], [], 0.0, 0\n for i in range(row): #在匹配特征点附近进行模板搜索\n rect_img = _region_copy(source_image, dst_pts[i][0], w, h, 2.)\n val_1,kp_num = feature_similarity(rect_img,template_image,0.7)\n val_2,disp = template_match(rect_img,template_image,dst_pts[i][0],1)\n sift_similarity.append(val_1)\n match_value.append(val_2)\n match_posi.append(disp)\n if max < val_2:\n max = val_2\n k = i\n if DEBUG: print \"393: \", k, max, sift_similarity[k]\n if 0.9 < max and 0.09 < sift_similarity[k] or (0.8 < max and 0.5 < sift_similarity[k]):\n center_x = int(match_posi[k][0] + w/2)\n center_y = int(match_posi[k][1] + h/2)\n elif 0.2 < sift_similarity[k]:\n if 0.5 < sift_similarity[k]:\n val,disp = rotate_template_match(source_image, template_image)\n else:\n val,disp = template_match(source_image, template_image,[],1)\n if DEBUG: print \"402_value: \", val\n if val < 0.7: \n return None\n else:\n [center_x, center_y] = [int(disp[0]+w/2),int(disp[1]+h/2)]\n rect_img = _region_copy(origin_image,[center_x,center_y],w,h,1.5)\n val1 = hist_similarity(rect_img, query_image)\n if DEBUG: print \"409_hist_value: \", val1\n if val1 < 0.1 and val < 0.99: \n return None\n else:\n rect = _region_copy(source_image,[center_x,center_y],w,h,2)\n val_3,kp_num3 = feature_similarity(rect,template_image,0.7)\n if DEBUG: print \"415_sift_value: \", val_3, kp_num3\n if 0.0 < val_3: num = int(kp_num3/val_3) #计算good_match点数\n if val_3 < 0.15 or (num <= 5 and val < 0.92): return None\n else:\n value, posi = template_match(source_image, template_image,[],0)\n [center_x,center_y] = [posi[0]+int(w/2), posi[1]+int(h/2)]\n rect2 = _region_copy(source_image,[center_x,center_y],w,h,2.)\n val_4,kp_num4 = feature_similarity(rect2,template_image,0.7)\n if DEBUG: print \"423_value: \", value, val_4,kp_num4\n if value < 0.8 or val_4 == 0.0: #可以更高点\n return None\n else:\n rect_img = _region_copy(origin_image,[center_x,center_y],w,h,1.)\n hist_value = hist_similarity(rect_img, query_image)\n if DEBUG: print \"429_hist_value: \", hist_value\n if hist_value < 0.35: return None\n else:\n value, posi, scale = multi_scale_match(source_image, template_image)\n [center_x,center_y] = [int(posi[0]+scale*w/2), int(posi[1]+scale*h/2)]\n if DEBUG: print \"434_value: \", value\n if 0.9 < value:\n rect_img = _region_copy(origin_image,[center_x,center_y],\n int(w*scale), int(h*scale), 1.)\n temp = cv2.resize(query_image, (int(w*scale), int(h*scale)),\n cv2.cv.CV_INTER_LINEAR)\n hist_value = hist_similarity(rect_img, temp)\n if DEBUG: print \"441_hist_value: \", hist_value\n if hist_value < 0.35: return None\n else:\n return None \n top_x = int(center_x - w / 2)\n top_y = int(center_y - h / 2)\n if (top_x < 0) and (top_y < 0):\n return None\n else:\n return [int(center_x), int(center_y)]", "title": "" }, { "docid": "a317f21b547099760e60a9340cc0d716", "score": "0.5606128", "text": "def detect(self):\r\n\r\n # if not self.registered:\r\n # print(\"Call 'register()' first.\")\r\n # return\r\n\r\n # print(\"Start detection...\")\r\n # print(\"Press 'q' to quit.\\n\")\r\n\r\n bf = cv2.BFMatcher() # Prepare a Blute-Force (BF) matcher\r\n\r\n temp = True\r\n #while self.vidcap.isOpened():\r\n if temp == True:\r\n print(\"Into matching process\")\r\n frame = cv2.imread('box_in_scene.jpg') # trainI\r\n #frame = cv2.imread('ryan.jpg') # trainI\r\n frame = cv2.imread('chinchq.jpg') # trainI\r\n bkp_frame = cv2.imread('chinchq.jpg')\r\n #frame = cv2.imread('cblankcq.jpg')\r\n\r\n # Keypoint (kp) detection and calculate descriptors (des)\r\n kp, des = self.feature_detector.detectAndCompute(frame, None)\r\n winframe=imutils.resize(frame, 1000,1000)\r\n cv2.imshow(\" Actual Img\", winframe)\r\n cv2.waitKey(0)\r\n cv2.imshow(\" Template Image\", self.querying)\r\n cv2.waitKey(0)\r\n cv2.imshow(\" no .... \", self.querying)\r\n cv2.waitKey(0)\r\n # Apply blute-force knn matching between keypoints\r\n #matches = bf.knnMatch(self.des0, des, k=2)\r\n matches = bf.knnMatch(self.des0, des, k=2)\r\n\r\n #print(matches)\r\n # Adopt only good feature matches\r\n good = [[m] for m, n in matches if m.distance < self.ratio * n.distance]\r\n #print(good)\r\n #print(self.min_match_count)\r\n\r\n # Find Homography\r\n if len(good) > self.min_match_count:\r\n src_pts = np.float32([self.kp0[m[0].queryIdx].pt for m in good]).reshape(-1, 1, 2)\r\n #print(src_pts)\r\n dst_pts = np.float32([kp[m[0].trainIdx].pt for m in good]).reshape(-1, 1, 2)\r\n #print(dst_pts)\r\n\r\n\r\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\r\n\r\n h, w, _ = self.querying.shape # Assume color camera\r\n pts = np.float32([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]).reshape(-1, 1, 2)\r\n #print(pts)\r\n dst = cv2.perspectiveTransform(pts, M)\r\n print(\"box coords\" + str(dst))\r\n\r\n for i, coords in enumerate(dst):\r\n #print(i, coords)\r\n if i == 0:\r\n x1, y1 = coords[0]\r\n elif i == 1:\r\n x2, y2 = coords[0]\r\n elif i == 2:\r\n x3, y3 = coords[0]\r\n else:\r\n x4, y4 = coords[0]\r\n\r\n #cropped_img = frame[508:675, 2936:3000]\r\n\r\n #cv2.imshow(\"Amount portion : \",cropped_img)\r\n #cv2.waitKey()\r\n # print(\"Y Axis\")\r\n # print(y1, y2, y3, y4)\r\n # print(\"X Axis\")\r\n # print(x1, x2, x3, x4)\r\n # print(int(x2)+100)\r\n cropped_img = bkp_frame[int(y1):int(y3), int(x2):]\r\n\r\n frame = cv2.polylines(frame, [np.int32(dst)], True, (0, 255, 0), 2, cv2.LINE_AA)\r\n #print(frame)\r\n\r\n # Visualize the matches\r\n #draw_params = dict(flags=2)\r\n draw_params = dict(matchColor=(0, 255, 0), singlePointColor=(0, 0, 255), flags=0)\r\n img = cv2.drawMatchesKnn(self.querying, self.kp0, frame, kp, good, None, **draw_params)\r\n\r\n #cv2.imshow(\"Detection (press 'q' to quit)\", img)\r\n\r\n #if cv2.waitKey(1) & 0xFF == ord('q'):\r\n # break\r\n\r\n #cv2.figure(figsize=IMAGE_SIZE)\r\n img=imutils.resize(img, 1000,2000)\r\n cv2.imshow(\"feature match : \",img)\r\n cv2.waitKey()\r\n\r\n cv2.imshow(\"Amount portion : \",cropped_img)\r\n cv2.waitKey()", "title": "" }, { "docid": "a6d57c0a254277638bb11e352cbc2b28", "score": "0.55065846", "text": "def extract_matches(path_mtchs):\n \n path_mtchs_bin = path_mtchs.replace('.txt','.bin')\n os.path.exists(path_mtchs_bin)\n cmd = \"{}/omvg-match {} > {}\".format(PATH_MATCH, path_mtchs_bin, path_mtchs)\n os.system(cmd)\n \n match_list = np.loadtxt(path_mtchs, dtype=int) \n return match_list", "title": "" }, { "docid": "c835f660e113ead16bc098ec02865fd5", "score": "0.54806924", "text": "def test_two_camera_matching(self):\n cpar = read_control_par(\"tests/testing_fodder/parameters/ptv.par\")\n vpar = read_volume_par(\"tests/testing_fodder/parameters/criteria.par\")\n\n cpar.num_cams = 2\n\n vpar.Zmin_lay[0] = -1\n vpar.Zmin_lay[1] = -1\n vpar.Zmax_lay[0] = 1\n vpar.Zmax_lay[1] = 1\n\n calib = read_all_calibration(cpar.num_cams)\n frm = generate_test_set(calib, cpar)\n\n corrected = correct_frame(frm, calib, cpar, 0.0001)\n corr_lists = safely_allocate_adjacency_lists(cpar.num_cams, frm.num_targets)\n\n match_pairs(corr_lists, corrected, frm, vpar, cpar, calib)\n\n # Assert each target has the real matches as candidates\n for cam in range(cpar.num_cams - 1):\n for subcam in range(cam + 1, cpar.num_cams):\n for part in range(frm.num_targets[cam]):\n correct_pnr = (\n corrected[cam][corr_lists[cam][subcam][part].p1].pnr\n if (subcam - cam) % 2 == 0\n else 15 - corrected[cam][corr_lists[cam][subcam][part].p1].pnr\n )\n found_correct_pnr = False\n for cand in range(MAXCAND):\n if (\n corrected[subcam][\n corr_lists[cam][subcam][part].p2[cand]\n ].pnr\n == correct_pnr\n ):\n found_correct_pnr = True\n break\n self.assertTrue(found_correct_pnr)\n\n # continue to the consistent_pair matching test\n con = [n_tupel() for _ in range(4 * 16)]\n tusage = safely_allocate_target_usage_marks(cpar.num_cams)\n\n # high accept corr bcz of closeness to epipolar lines.\n matched = consistent_pair_matching(\n corr_lists, cpar.num_cams, frm.num_targets, 10000.0, con, 4 * 16, tusage\n )\n\n assert matched == 16", "title": "" }, { "docid": "e13531e893bfbd5048dcb2212372f731", "score": "0.54779375", "text": "def test_vs_similarity():\n ref_mol = next(oddt.toolkit.readfile('sdf', xiap_crystal_ligand))\n receptor = next(oddt.toolkit.readfile('pdb', xiap_protein))\n\n # following toolkit differences is due to different Hs treatment\n vs = virtualscreening(n_cpu=1, chunksize=10)\n vs.load_ligands('sdf', xiap_actives_docked)\n vs.similarity('usr', cutoff=0.4, query=ref_mol)\n if oddt.toolkit.backend == 'ob':\n assert len(list(vs.fetch())) == 11\n else:\n assert len(list(vs.fetch())) == 6\n\n vs = virtualscreening(n_cpu=1)\n vs.load_ligands('sdf', xiap_actives_docked)\n vs.similarity('usr_cat', cutoff=0.3, query=ref_mol)\n if oddt.toolkit.backend == 'ob':\n assert len(list(vs.fetch())) == 16\n else:\n assert len(list(vs.fetch())) == 11\n\n vs = virtualscreening(n_cpu=1)\n vs.load_ligands('sdf', xiap_actives_docked)\n vs.similarity('electroshape', cutoff=0.45, query=ref_mol)\n if oddt.toolkit.backend == 'ob':\n assert len(list(vs.fetch())) == 55\n else:\n assert len(list(vs.fetch())) == 95\n\n vs = virtualscreening(n_cpu=1)\n vs.load_ligands('sdf', xiap_actives_docked)\n vs.similarity('ifp', cutoff=0.8, query=ref_mol, protein=receptor)\n assert len(list(vs.fetch())) == 33\n\n vs = virtualscreening(n_cpu=1)\n vs.load_ligands('sdf', xiap_actives_docked)\n vs.similarity('sifp', cutoff=0.8, query=ref_mol, protein=receptor)\n assert len(list(vs.fetch())) == 33\n\n # test wrong method error\n with pytest.raises(ValueError):\n vs.similarity('sift', query=ref_mol)", "title": "" }, { "docid": "065c3457834ef483e7c90328b58c4609", "score": "0.5468788", "text": "def cmd_similar(*args):\n global grid,cluster_viewer,flann\n assert type(grid)==gtk.ListStore\n selection = set([grid[i][2].id for i in cluster_viewer.get_selected_items()])\n selected = array([grid[i][2].image.ravel() for i in cluster_viewer.get_selected_items()])\n data = array([grid[i][2].image.ravel() for i in range(len(grid))])\n import pyflann\n flann = pyflann.FLANN()\n flann.build_index(selected)\n neighbors,dists = flann.nn_index(data)\n print dists[:10]\n grid.reorder([int(x) for x in argsort(dists)])\n cluster_viewer.unselect_all()\n for i in range(len(grid)):\n if grid[i][2].id in selection: cluster_viewer.select_path(i)\n cluster_viewer.scroll_to_path(1,1,0,0)\n return 1", "title": "" }, { "docid": "f0b67572bed7c56cc5393e3c00bfe3ed", "score": "0.54595566", "text": "def multiprocess_ext_fdr_calculation_mvz(comparison_list):\n # Activate to track comparisons.\n #increment()\n\n total_phenotype_matches = 0\n total_phenotype_nonmatches = 0\n\n species_a_genotype_id = comparison_list[0]\n species_a_phenotypes = read_only_mouse_geno_pheno_hash[comparison_list[0]]\n genotype_a_phenotype_count = len(species_a_phenotypes)\n\n # Genotype for species B\n species_b_genotype_id = comparison_list[1]\n species_b_phenotypes = read_only_zebrafish_geno_pheno_hash[comparison_list[1]]\n phenotype_matches = 0\n phenotype_non_matches = 0\n genotype_b_phenotype_count = len(species_b_phenotypes)\n\n for k in species_a_phenotypes:\n # Orthologs for species A\n species_a_phenotype = k\n for l in species_b_phenotypes:\n # Orthologs for species B\n species_b_phenotype = l\n\n ab_combo = species_a_phenotype+'_'+species_b_phenotype\n ba_combo = species_b_phenotype+'_'+species_a_phenotype\n if ab_combo in read_only_mvz_phenologs or ba_combo in read_only_mvz_phenologs:\n #print('species a ortholog:'+species_a_ortholog+' matches species b ortholog:'+species_b_ortholog)\n phenotype_matches += 1\n #print(species_a_ortholog+' == '+species_b_ortholog)\n total_phenotype_matches += 1\n else:\n #print('species a ortholog:'+species_a_ortholog+' does not match species b ortholog:'+species_b_ortholog)\n phenotype_non_matches += 1\n total_phenotype_nonmatches += 1\n\n if phenotype_matches > 0:\n #print('Matches: '+str(ortholog_matches))\n #print('Non-matches: '+str(ortholog_non_matches))\n m = float(genotype_b_phenotype_count)\n n = float(genotype_a_phenotype_count)\n N = float(len(read_only_mvz_phenologs))\n c = float(phenotype_matches)\n prb = float(hypergeom.pmf(c, N, m, n))\n #print(str(c)+', '+str(N)+', '+str(m)+', '+str(n))\n #print(prb)\n #phenolog_ext_p_value_list.append(prb)\n #total_hyp_calcs += 1\n\n return prb\n else:\n return", "title": "" }, { "docid": "522aea46e8fb2d4abeaba8eb31ed1c76", "score": "0.5446843", "text": "def test_match_window_multiple(self):\n yield self.introduce_nodes()\n\n self.nodes[2].overlay.settings.match_window = 0.5 # Wait 1 sec before accepting (the best) match\n\n order1 = yield self.nodes[0].overlay.create_bid(\n AssetPair(AssetAmount(10, 'DUM1'), AssetAmount(10, 'DUM2')), 3600)\n order2 = yield self.nodes[1].overlay.create_bid(\n AssetPair(AssetAmount(10, 'DUM1'), AssetAmount(10, 'DUM2')), 3600)\n\n yield self.sleep(0.3)\n\n # Make sure that the two matchmaker match different orders\n order1_tick = self.nodes[3].overlay.order_book.get_tick(order1.order_id)\n order2_tick = self.nodes[4].overlay.order_book.get_tick(order2.order_id)\n order1_tick.available_for_matching = 0\n order2_tick.available_for_matching = 0\n\n yield self.nodes[2].overlay.create_ask(AssetPair(AssetAmount(20, 'DUM1'), AssetAmount(20, 'DUM2')), 3600)\n\n yield self.sleep(1.5)\n\n # Verify that the trade has been made\n self.assertEqual(len(list(self.nodes[0].overlay.transaction_manager.find_all())), 1)\n self.assertEqual(len(list(self.nodes[1].overlay.transaction_manager.find_all())), 1)\n self.assertEqual(len(list(self.nodes[2].overlay.transaction_manager.find_all())), 2)", "title": "" }, { "docid": "5ae7796df3e1a1396a594b9e1d55d394", "score": "0.54371226", "text": "def match_to_gaia(imcat, refcat, product, output, searchrad=5.0):\n if isinstance(imcat, str):\n imtab = Table.read(imcat, format='ascii.ecsv')\n imtab.rename_column('X-Center', 'x')\n imtab.rename_column('Y-Center', 'y')\n else:\n imtab = imcat\n if 'X-Center' in imtab.colnames:\n imtab.rename_column('X-Center', 'x')\n imtab.rename_column('Y-Center', 'y')\n \n \n reftab = Table.read(refcat, format='ascii.ecsv')\n \n # define WCS for matching\n tpwcs = tweakwcs.FITSWCS(HSTWCS(product, ext=1))\n \n # define matching parameters\n tpmatch = tweakwcs.TPMatch(searchrad=searchrad)\n \n # perform match\n ref_indx, im_indx = tpmatch(reftab, imtab, tpwcs)\n print('Found {} matches'.format(len(ref_indx)))\n \n # Obtain tangent plane positions for both image sources and refeence sources\n im_x, im_y = tpwcs.det_to_tanp(imtab['x'][im_indx], imtab['y'][im_indx])\n ref_x, ref_y = tpwcs.world_to_tanp(reftab['RA'][ref_indx], reftab['DEC'][ref_indx])\n if 'RA' not in imtab.colnames:\n im_ra, im_dec = tpwcs.det_to_world(imtab['x'][im_indx], imtab['y'][im_indx])\n else:\n im_ra = imtab['RA'][im_indx]\n im_dec = imtab['DEC'][im_indx]\n \n\n # Compile match table\n match_tab = Table(data=[im_x, im_y, im_ra, im_dec, \n ref_x, ref_y, \n reftab['RA'][ref_indx], reftab['DEC'][ref_indx]],\n names=['img_x','img_y', 'img_RA', 'img_DEC', \n 'ref_x', 'ref_y', 'ref_RA', 'ref_DEC'])\n if not output.endswith('.ecsv'):\n output = '{}.ecsv'.format(output) \n match_tab.write(output, format='ascii.ecsv')", "title": "" }, { "docid": "7a63e81c817de61245fd28131a126254", "score": "0.54091597", "text": "def detect_objects(input_img, display=False, file=None):\r\n img2 = input_img.copy()\r\n match_these = {'pink': io.imread(r'D:\\python\\mikhailAssets\\templates\\pink\\wm_e_pi_j.jpg'),\r\n 'blue': io.imread(r'D:\\python\\mikhailAssets\\templates\\blue\\bm_e_pi_j.jpg'),\r\n 'cheese': io.imread(r'D:\\python\\mikhailAssets\\templates\\cheese\\c_e_pi_j.jpg')}\r\n\r\n # matching_methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED']\r\n # matching_methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',\r\n # 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']\r\n matching_methods = ['skimage.feature.match_template']\r\n object_list = deepcopy(object_list_template)\r\n\r\n for key in match_these.keys():\r\n nn_output_legend = {'blue': 0,\r\n 'cheese': 1,\r\n 'pink': 2}\r\n # Contains scores from the neural network for the template matcher's guess\r\n # We will pack this array with scores for each method and use da best\r\n method_scores = {'skimage.feature.match_template': {'score': 0, 'center': (0, 0), 'size': (0, 0)},\r\n 'cv2.TM_CCOEFF': {'score': 0, 'center': (0, 0), 'size': (0, 0)},\r\n 'cv2.TM_CCOEFF_NORMED': {'score': 0, 'center': (0, 0), 'size': (0, 0)},\r\n 'cv2.TM_CCORR': {'score': 0, 'center': (0, 0), 'size': (0, 0)},\r\n 'cv2.TM_CCORR_NORMED': {'score': 0, 'center': (0, 0), 'size': (0, 0)},\r\n 'cv2.TM_SQDIFF': {'score': 0, 'center': (0, 0), 'size': (0, 0)},\r\n 'cv2.TM_SQDIFF_NORMED': {'score': 0, 'center': (0, 0), 'size': (0, 0)}}\r\n for met in matching_methods:\r\n # met = 'cv2.TM_CCOEFF_NORMED'\r\n # if key == 'cheese':\r\n # met = 'cv2.TM_CCOEFF'\r\n img = img2.copy()\r\n # if key == 'blue':\r\n # image_colorfulness(img)\r\n\r\n result = match_template(img, match_these[key])\r\n ij = np.unravel_index(np.argmax(result), result.shape)\r\n x, y = ij[0], ij[1]\r\n w, h = match_these[key].shape[0], match_these[key].shape[1]\r\n # Call the neural network to check validity\r\n guess = img2[w:x, h:y]\r\n # viewer = skimage.viewer.ImageViewer(guess)\r\n # viewer.show()\r\n\r\n #\r\n # cv2.imwrite(r'D:\\python\\mikhailAssets\\image_under_test.jpg', guess)\r\n # image_under_test = [tf.image.decode_jpeg(r'D:\\python\\mikhailAssets\\image_under_test.jpg', channels=0)]\r\n resized_guess = skimage.transform.resize(guess, (1, 64, 64, 3))\r\n nn_out = model.predict(resized_guess, steps=1)\r\n method_scores[met]['score'] = nn_out[0][nn_output_legend[key]]\r\n\r\n template_key = future_keys[key]\r\n method_scores[met][\"center\"] = ((x + w) / 2, (y + h) / 2)\r\n method_scores[met][\"size\"] = (w, h)\r\n # for idx, met_key in enumerate(method_scores.keys()):\r\n # best_score = 0\r\n # best_idx = 0\r\n # best_method = 'str'\r\n # if method_scores[met_key]['score'] > best_score:\r\n # best_method = met_key\r\n\r\n object_list[template_key][\"center\"] = ((x + w) / 2, (y + h) / 2)\r\n object_list[template_key][\"confidence\"] = nn_out[0][nn_output_legend[key]]\r\n object_list[template_key][\"size\"] = (w, h)\r\n #\r\n # object_list[template_key][\"center\"] = method_scores[best_method]['center']\r\n # object_list[template_key][\"size\"] = method_scores[best_method]['size']\r\n\r\n\r\n if display:\r\n print(object_list)\r\n\r\n fig = plt.figure(figsize=(8, 3))\r\n ax1 = plt.subplot(1, 3, 1)\r\n ax2 = plt.subplot(1, 3, 2)\r\n ax3 = plt.subplot(1, 3, 3, sharex=ax2, sharey=ax2)\r\n\r\n ax1.imshow(guess, cmap=plt.cm.gray)\r\n ax1.set_axis_off()\r\n ax1.set_title('template')\r\n\r\n ax2.imshow(input_img, cmap=plt.cm.gray)\r\n ax2.set_axis_off()\r\n ax2.set_title('image')\r\n # highlight matched region\r\n rect = plt.Rectangle((x, y), w, h, edgecolor='r', facecolor='none')\r\n ax2.add_patch(rect)\r\n\r\n ax3.imshow(result)\r\n ax3.set_axis_off()\r\n ax3.set_title('`match_template`\\nresult')\r\n # highlight matched region\r\n ax3.autoscale(False)\r\n ax3.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)\r\n\r\n plt.show()\r\n if file is not None:\r\n print(file)\r\n print(object_list)\r\n return object_list", "title": "" }, { "docid": "94a496a2151efde060b9c936f2d17447", "score": "0.5390066", "text": "def query(request):\n tank_ids = set(Tank.objects.filter( # Only tanks containing cukes\n cucumber__identifier__isnull=False).values_list(\n \"identifier\", flat=True))\n if request.method == \"POST\":\n form = MatchForm(request.POST, request.FILES)\n if form.is_valid():\n # Run CV model and render result data to match report\n logger.info(\"Match form validated.\"\n \" Rendering images to Match instances...\")\n match_id = MatchRecord.create(form.cleaned_data, request.FILES)\n return redirect('/match/result/?id=%s' % match_id)\n # Form not validated\n logger.error(\"MatchForm errors: %s\" % pprint.pformat(form.errors))\n return render(request, 'match/match.html', {\n \"form\": form,\n \"tanks\": tank_ids\n })\n # Assume GET\n if not tank_ids:\n return no_occupied_tanks(request)\n\n form = MatchForm()\n return render(request, 'match/match.html', {\n \"form\": form,\n \"tanks\": tank_ids\n })", "title": "" }, { "docid": "70d11f6ec0824a1461506c4cc669dbfc", "score": "0.53870344", "text": "def refineMatches_(matches, iteration, threshold):\n def calculateHomography(correspondences):\n #loop through the matched points and create assemble matrix\n aList = []\n for corr in correspondences:\n p1 = np.matrix([corr.item(0), corr.item(1), 1])\n p2 = np.matrix([corr.item(2), corr.item(3), 1])\n\n a1 = [p1.item(0), p1.item(1), 1, 0, 0, 0,\n -p2.item(0)*p1.item(0), -p2.item(0)*p1.item(1), -p2.item(0)]\n\n a2 = [0, 0, 0, p1.item(0), p1.item(1), 1,\n -p2.item(1)*p1.item(0), -p2.item(1)*p1.item(1), -p2.item(1)]\n\n aList.append(a1)\n aList.append(a2)\n\n matrixA = np.matrix(aList)\n\n #svd composition\n u, s, v = np.linalg.svd(matrixA)\n\n #reshape the min singular value into a 3 by 3 matrix\n h = np.reshape(v[8], (3, 3))\n\n #normalize and now we have h\n h = (1/h.item(8)) * h\n\n return h\n\n def geometricDistance(correspondence, h):\n\n p1 = np.transpose(np.matrix([correspondence[0].item(0), correspondence[0].item(1), 1]))\n estimatep2 = np.dot(h, p1)\n estimatep2 = (1/(estimatep2.item(2)+np.finfo(np.float32).eps.item()))*estimatep2\n\n p2 = np.transpose(np.matrix([correspondence[0].item(2), correspondence[0].item(3), 1]))\n error = p2 - estimatep2\n\n return np.linalg.norm(error)\n\n\n def ransac(points, iteration, threshold):\n maxInliers = []\n finalH = None\n\n for i in range(iteration):\n #find 4 random points to calculate a homography\n points1 = points[random.randrange(0, len(points))]\n points2 = points[random.randrange(0, len(points))]\n randomFour = np.vstack((points1, points2))\n points3 = points[random.randrange(0, len(points))]\n randomFour = np.vstack((randomFour, points3))\n points4 = points[random.randrange(0, len(points))]\n randomFour = np.vstack((randomFour, points4))\n\n #call the homography function on those points\n h = calculateHomography(randomFour)\n inliers = []\n\n for i in range(len(points)):\n d = geometricDistance(points[i], h)\n if d < 5: # this is also threshold, but we define this value as 5\n inliers.append(points[i])\n\n if len(inliers) > len(maxInliers):\n maxInliers = inliers\n finalH = h\n\n if len(maxInliers) > (len(points)*threshold):\n break\n\n return finalH, maxInliers\n \n # define matrix\n matches_mat = np.matrix(matches)\n\n # loop RANSAC until Homography matrix passes the criterion\n Done = False\n\n while Done == False:\n # we use only Homography matrix\n H, _ = ransac(points=matches_mat, iteration=iteration, threshold=threshold)\n\n D = H[0,0]*H[1,1]-H[0,1]*H[1,0]\n sx = np.sqrt(H[0,0]**2+H[1,0]**2)\n sy = np.sqrt(H[0,1]**2+H[1,1]**2)\n P = np.sqrt(H[2,0]**2+H[2,1]**2)\n\n # criterion for filtering false matches\n if D<=0 or sx<0.1 or sx>4 or sy<0.1 or sy>4:\n Done = False\n else:\n Done = True\n \n return H", "title": "" }, { "docid": "e42926a481ddc07fdbc80e3b9f9c7aca", "score": "0.5367625", "text": "def run_match(best, root_dir, c_chess_exe, concurrency, book_file_name, stockfish_base, stockfish_test):\n if stockfish_test is None:\n stockfish_test = stockfish_base\n\n pgn_file_name = os.path.join(root_dir, \"out.pgn\")\n command = \"{} -each tc=4+0.04 option.Hash=8 option.Threads=1 -gauntlet -games 200 -rounds 1 -concurrency {}\".format(\n c_chess_exe, concurrency\n )\n command = (\n command\n + \" -openings file={} order=random -repeat -resign 3 700 -draw 8 10\".format(\n book_file_name\n )\n )\n command = command + \" -engine cmd={} name=master\".format(stockfish_base)\n for net in best:\n command = command + \" -engine cmd={} name={} option.EvalFile={}\".format(\n stockfish_test, net, os.path.join(os.getcwd(), net)\n )\n command = command + \" -pgn {} 0 2>&1\".format(\n pgn_file_name\n )\n\n print(\"Running match with c-chess-cli ... {}\".format(pgn_file_name), flush=True)\n c_chess_out = open(os.path.join(root_dir, \"c_chess.out\"), 'w')\n process = subprocess.Popen(\"stdbuf -o0 \" + command, stdout=subprocess.PIPE, shell=True)\n seen = {}\n for line in process.stdout:\n line = line.decode('utf-8')\n c_chess_out.write(line)\n if 'Score' in line:\n epoch_num = re.search(r'epoch(\\d+)', line)\n if epoch_num.group(1) not in seen:\n sys.stdout.write('\\n')\n seen[epoch_num.group(1)] = True\n sys.stdout.write('\\r' + line.rstrip())\n sys.stdout.write('\\n')\n c_chess_out.close()\n if process.wait() != 0:\n print(\"Error running match!\")", "title": "" }, { "docid": "43c951317b2f500e862d8259a4c8f438", "score": "0.53550774", "text": "def test_pairwise_matching(self):\n cpar = read_control_par(\"tests/testing_fodder/parameters/ptv.par\")\n vpar = read_volume_par(\"tests/testing_fodder/parameters/criteria.par\")\n\n # /* Cameras are at so high angles that opposing cameras don't see each other\n # in the normal air-glass-water setting. */\n cpar.mm.n2[0] = 1.0001\n cpar.mm.n3 = 1.0001\n\n calib = read_all_calibration(cpar.num_cams)\n frm = generate_test_set(calib, cpar)\n\n print(\"frame generated\\n\")\n print(\n \"%f %f %d\\n\"\n % (frm.targets[0][0].x, frm.targets[0][0].y, frm.targets[0][0].pnr)\n )\n print(\n \"%f %f %d\\n\"\n % (frm.targets[1][0].x, frm.targets[1][0].y, frm.targets[1][0].pnr)\n )\n\n corrected = correct_frame(frm, calib, cpar, 0.0001)\n corr_list = safely_allocate_adjacency_lists(cpar.num_cams, frm.num_targets)\n\n match_pairs(corr_list, corrected, frm, vpar, cpar, calib)\n\n # /* Well, I guess we should at least check that each target has the\n # real matches as candidates, as a sample check. */\n for cam in range(cpar.num_cams - 1):\n for subcam in range(cam + 1, cpar.num_cams):\n for part in range(frm.num_targets[cam]):\n # /* Complications here:\n # 1. target numbering scheme alternates.\n # 2. Candidte 'pnr' is an index into the x-sorted array, not\n # the original pnr.\n # */\n if (subcam - cam) % 2 == 0:\n correct_pnr = corrected[cam][\n corr_list[cam][subcam][part].p1\n ].pnr\n else:\n correct_pnr = (\n 15 - corrected[cam][corr_list[cam][subcam][part].p1].pnr\n )\n\n for cand in range(MAXCAND):\n if (\n corrected[subcam][corr_list[cam][subcam][part].p2[cand]].pnr\n == correct_pnr\n ):\n break\n\n self.assertFalse(cand == MAXCAND)", "title": "" }, { "docid": "fa856410fcb39549b1366ce630aec62a", "score": "0.53436446", "text": "def debug_find(file, target_program, similarity=0.8):\n myApp = App.focus(target_program)\n target_window = myApp.focusedWindow()\n print \"\"\n print \"\"\n print \"+ Sikuli match object for '%s' in window '%s'\" % (file, target_program)\n print \"+ with minimum similarity of %s:\" % similarity\n debug_matches = findAll_wrapper(target_window, Pattern(file).similar(similarity))\n for img_match in debug_matches:\n print img_match\n target_window.mouseMove(img_match)\n if isinstance(debug_matches, list) and len(debug_matches) == 0:\n print \"No matches!\"\n print \"\"\n print \"\"\n exit(0)", "title": "" }, { "docid": "6a34ad9a69819c867328a98c465196ba", "score": "0.5334633", "text": "def drawMatches(img1, kp1, img2, kp2, matches):\n\n # Create a new output image that concatenates the two images together\n # (a.k.a) a montage\n rows1 = img1.shape[0]\n cols1 = img1.shape[1]\n rows2 = img2.shape[0]\n cols2 = img2.shape[1]\n\n out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')\n\n # Place the first image to the left\n out[:rows1,:cols1] = np.dstack([img1, img1, img1])\n\n # Place the next image to the right of it\n out[:rows2,cols1:] = np.dstack([img2, img2, img2])\n\n # For each pair of points we have between both images\n # draw circles, then connect a line between them\n for mat in matches:\n\n # Get the matching keypoints for each of the images\n img1_idx = mat.queryIdx\n img2_idx = mat.trainIdx\n\n # x - columns\n # y - rows\n (x1,y1) = kp1[img1_idx].pt\n (x2,y2) = kp2[img2_idx].pt\n\n # Draw a small circle at both co-ordinates\n # radius 4\n # colour blue\n # thickness = 1\n cv2.circle(out, (int(x1),int(y1)), 4, (255, 0, 0), 1) \n cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (255, 0, 0), 1)\n\n # Draw a line in between the two points\n # thickness = 1\n # colour blue\n cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (255, 0, 0), 1)\n\n\n # Show the image\n cv2.imshow('Matched Features', out)\n cv2.waitKey(0)\n cv2.destroyWindow('Matched Features')\n\n # Also return the image if you'd like a copy\n return out", "title": "" }, { "docid": "cec16a7a73c67950c5c4ed8111ae0fa8", "score": "0.53271186", "text": "def run_overlay_resources_score_motifs(normal_expression_per_tissue_origin_per_TF,\n matching_cell_name_representative_dict, motifTFName_TFNames_matches_dict, \n cells_assays_dict, cell_tfs, tf_cells, assay_cells_datatypes, header):\n motif_files = []\n if not os.path.isdir(params['motif_sites_dir']) and os.path.isfile(params['motif_sites_dir']):\n motif_files = [params['motif_sites_dir']]\n params['motif_sites_dir'] = \".\"\n else:\n motif_files = os.listdir(params['motif_sites_dir'])\n \n chromatin_tracks_files = os.listdir(params['all_chromatin_makrs_all_cells_combined_dir_path'])\n if not os.path.exists(params['motifs_overlapping_tracks_output_dir']):\n os.mkdir(params['motifs_overlapping_tracks_output_dir'])\n motifs_overlapping_tracks_files = []\n scored_motifs_overlapping_tracks_files = []\n if get_value(params['run_in_parallel_param']) and len(motif_files)>1:\n p = Pool(int(params['number_processes_to_run_in_parallel']))\n for motif_file in motif_files:\n if motif_file.split('/')[-1] in chromatin_tracks_files:#it is assumed for every motif file name there exists a matching file name in the chromatin_tracks_input_dir\n motifs_overlapping_tracks_file = params['motifs_overlapping_tracks_output_dir']+'/' + '.'.join(motif_file.split('/')[-1].split('.')[0:-1])+'_overlapping_tracks' + '.bed7'\n scored_motifs_chromatin_tracks_output_file = '.'.join(motifs_overlapping_tracks_file.split('.')[0:-1]) + '_scored.bed10' \n if not (os.path.exists(motifs_overlapping_tracks_file) and os.path.exists(scored_motifs_chromatin_tracks_output_file)):\n if get_value(params['run_in_parallel_param']) and len(motif_files)>1:\n p.apply_async(overlay_resources_score_motifs, args=(params['motif_sites_dir']+'/'+motif_file, \n params['all_chromatin_makrs_all_cells_combined_dir_path']+'/'+motif_file.split('/')[-1], \n scored_motifs_chromatin_tracks_output_file, \n motifs_overlapping_tracks_file,\n normal_expression_per_tissue_origin_per_TF, \n matching_cell_name_representative_dict, motifTFName_TFNames_matches_dict, \n cells_assays_dict, cell_tfs, tf_cells, assay_cells_datatypes, header))\n else:\n overlay_resources_score_motifs(params['motif_sites_dir']+'/'+motif_file, \n params['all_chromatin_makrs_all_cells_combined_dir_path']+'/'+motif_file.split('/')[-1], \n scored_motifs_chromatin_tracks_output_file, \n motifs_overlapping_tracks_file,\n normal_expression_per_tissue_origin_per_TF,\n matching_cell_name_representative_dict, motifTFName_TFNames_matches_dict, \n cells_assays_dict, cell_tfs, tf_cells, assay_cells_datatypes, header)\n motifs_overlapping_tracks_files.append(motifs_overlapping_tracks_file)\n scored_motifs_overlapping_tracks_files.append(scored_motifs_chromatin_tracks_output_file)\n if get_value(params['run_in_parallel_param']) and len(motif_files)>1:\n p.close()\n p.join()\n return motifs_overlapping_tracks_files, scored_motifs_overlapping_tracks_files", "title": "" }, { "docid": "5fb8fedf77ad2f493cb8ba99d4b33e66", "score": "0.5326719", "text": "def _xmatch(self, level, atom):\n\t\tcp = portage.dep_getkey(atom)\n\t\tif level == \"match-all\":\n\t\t\tmatches = set()\n\t\t\tfor db in self._dbs:\n\t\t\t\tif hasattr(db, \"xmatch\"):\n\t\t\t\t\tmatches.update(db.xmatch(level, atom))\n\t\t\t\telse:\n\t\t\t\t\tmatches.update(db.match(atom))\n\t\t\tresult = list(x for x in matches if portage.cpv_getkey(x) == cp)\n\t\t\tdb._cpv_sort_ascending(result)\n\t\telif level == \"match-visible\":\n\t\t\tmatches = set()\n\t\t\tfor db in self._dbs:\n\t\t\t\tif hasattr(db, \"xmatch\"):\n\t\t\t\t\tmatches.update(db.xmatch(level, atom))\n\t\t\t\telse:\n\t\t\t\t\tdb_keys = list(db._aux_cache_keys)\n\t\t\t\t\tfor cpv in db.match(atom):\n\t\t\t\t\t\tmetadata = dict(izip(db_keys,\n\t\t\t\t\t\t\tdb.aux_get(cpv, db_keys)))\n\t\t\t\t\t\tif not self._visible(db, cpv, metadata):\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tmatches.add(cpv)\n\t\t\tresult = list(x for x in matches if portage.cpv_getkey(x) == cp)\n\t\t\tdb._cpv_sort_ascending(result)\n\t\telif level == \"bestmatch-visible\":\n\t\t\tresult = None\n\t\t\tfor db in self._dbs:\n\t\t\t\tif hasattr(db, \"xmatch\"):\n\t\t\t\t\tcpv = db.xmatch(\"bestmatch-visible\", atom)\n\t\t\t\t\tif not cpv or portage.cpv_getkey(cpv) != cp:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif not result or cpv == portage.best([cpv, result]):\n\t\t\t\t\t\tresult = cpv\n\t\t\t\telse:\n\t\t\t\t\tdb_keys = list(db._aux_cache_keys)\n\t\t\t\t\t# break out of this loop with highest visible\n\t\t\t\t\t# match, checked in descending order\n\t\t\t\t\tfor cpv in reversed(db.match(atom)):\n\t\t\t\t\t\tif portage.cpv_getkey(cpv) != cp:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tmetadata = dict(izip(db_keys,\n\t\t\t\t\t\t\tdb.aux_get(cpv, db_keys)))\n\t\t\t\t\t\tif not self._visible(db, cpv, metadata):\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif not result or cpv == portage.best([cpv, result]):\n\t\t\t\t\t\t\tresult = cpv\n\t\t\t\t\t\tbreak\n\t\telse:\n\t\t\traise NotImplementedError(level)\n\t\treturn result", "title": "" }, { "docid": "73953a0ef138a28fdfd8c29e3b963630", "score": "0.53184396", "text": "def evaluate_match(matched_PDB):\n match_name = os.path.basename(os.path.normpath(matched_PDB))\n\n # Parse matched_PDB to get ideal binding site name and residues\n match_pdb_name = os.path.basename(os.path.normpath(matched_PDB))\n pnc = re.split('_|-|\\.', match_pdb_name)\n match_name_dash_split = re.split('-|\\.', match_pdb_name)\n\n motif_index_list = match_name_dash_split[1].split('_')[1:-1]\n motif_index_string = '_'.join(motif_index_list)\n\n # Ideal Binding Site Name\n # todo: pnc list is all messed up for monomer scaffold set\n\n pnc_conformer = pnc[6] if pnc.index(ligand) == 5 else pnc[7]\n ideal_binding_site_name = '{}_{}-1_{}'.format(ligand, pnc_conformer, motif_index_string)\n\n # Motif Residues in Matched PDB\n motif_residue_ID_list = [a for a in re.split('(\\D+)', pnc[2]) if a != '']\n motif_residue_IDs = [(res_one_to_three[motif_residue_ID_list[indx]], motif_residue_ID_list[indx + 1]) for\n indx in range(0, len(motif_residue_ID_list), 2)]\n\n # --- Validate PDB quality... (HACKY BUT W/E IDGAF) --- #\n\n # This is necessary when I forget to adjust memory allocations for submitted jobs on the cluster and things die unexpectedly...\n row_dict = {'match_name': matched_PDB,\n 'ligand_shell_eleven': 0,\n 'interface_CB_contact_percentage': 0,\n 'motif_shell_CB': 0,\n 'residue_match_score': 9999,\n 'ligand_match_score': 9999,\n 'min_res_per_chain': 0,\n 'gurobi_motif_score': 0,\n 'ligand_CB_clashes': 9999\n }\n\n # Return if match PDB cannot be parsed by ProDy\n try:\n match_prody = prody.parsePDB(matched_PDB)\n except:\n return row_dict\n\n condition_list = [not match_prody,\n match_prody.select('name CB within 11 of resname {}'.format(ligand)) is None,\n match_prody.select('protein') is None,\n len(match_prody.select('protein')) < 200,\n match_prody.select('resname {}'.format(ligand)) is None,\n match_prody.select('name CB within 6 of resname {}'.format(ligand)) is None\n ]\n\n if any(condition_list):\n return row_dict\n\n # Return if motif residues are missing from match\n for res_tuple in motif_residue_IDs:\n if match_prody.select('resnum {} and not hydrogen and protein'.format(res_tuple[1])) is None:\n return row_dict\n\n if not monomer:\n\n # Return if match PDB is dimer but there's a chain missing (??)\n chains_in_dimer = list(set(match_prody.getChids()) - set('X'))\n if len(chains_in_dimer) != 2:\n return row_dict\n\n # Return if the interface doesn't exist (???)\n interface_cb = match_prody.select('(name CB and chain {}) within 8 of chain {} or\\\n (name CB and chain {}) within 8 of chain {}'.format(chains_in_dimer[0], chains_in_dimer[1], chains_in_dimer[1], chains_in_dimer[0]))\n\n if interface_cb is None:\n return row_dict\n\n # --- Calculate Match Metrics --- #\n\n # Calculate number of CB atoms within 10A of ligand\n # Percentage of CB atoms in the protein-protein interface (based on an 8A° threshold) that are within 6A of any ligand atom\n\n ligand_shell_eleven, interface_CB_contact_percentage, motif_shell_CB = filter.calculate_CB_stats(match_prody, motif_residue_IDs)\n\n # Calculate match score as defined by Roland\n # Calculate RMSD to ideal binding site (side chains only, not ligand)\n\n if args['<ideal_binding_site_dir>'] or args['fuzzballs']:\n residue_match_score, ligand_match_score = filter.calculate_rmsd_stats(match_prody, ideal_binding_site_name, motif_residue_IDs, match_name)\n else:\n residue_match_score, ligand_match_score = filter.calculate_rmsd_stats(match_prody, ideal_binding_site_name, motif_residue_IDs, match_name, ligand_match_score_only=True)\n\n # minimum number of motif residues per chain\n # todo: accomodate cases where all residues are on one chain! Currently returns 4 b/c list of res.getChIDs() is used to determine this\n motif_resnums = [res[1] for res in motif_residue_IDs]\n motif_residues = [match_prody.select('resnum {}'.format(motif_resnum)) for motif_resnum in motif_resnums]\n motif_residue_chain_list = [res.getChids()[0] for res in motif_residues]\n\n # todo: UM_1_D267F289Y271Q279_1_2BH1_TEP_0001-10-18-21-25_1 is empty??\n # todo: update to accomodate arbitrary number of motif residues\n try:\n min_res_per_chain = min([motif_residue_chain_list.count(chain) for chain in (set(motif_residue_chain_list) - set('X'))])\n except:\n min_res_per_chain = -1\n\n # Count CB that are within 2.4A of ligand\n clashing_CB_atoms = match_prody.select('name CB within 2.4 of chain X')\n ligand_CB_clashes = len(clashing_CB_atoms) if clashing_CB_atoms is not None else 0\n\n # Count number of motif residues that are within 2A of ligand\n clashing_residues = match_prody.select('protein within 2 of chain X')\n if clashing_residues == None:\n clashing_motif_resnums_count = 0\n else:\n clashing_resnums = set(clashing_residues.getResnums())\n matched_resnums = set([a[1] for a in motif_residue_IDs])\n clashing_motif_resnums = clashing_resnums & matched_resnums\n clashing_motif_resnums_count = len(clashing_motif_resnums)\n\n # Look up binding motif score in gurobi solutions\n # todo: pnc list is all messed up for monomer scaffold set\n current_conformer = '{}_{}'.format(ligand, pnc_conformer)\n index_list_string = '[1, {}]'.format(', '.join(motif_index_list))\n\n gurobi_score = 0\n if args['<gurobi_solutions_dir>']:\n gurobi_score_row = gurobi_solutions.loc[(gurobi_solutions['Residue_indicies'] == index_list_string) & (gurobi_solutions['Conformer'] == current_conformer)]\n gurobi_score = gurobi_score_row['Obj_score'].iloc[0]\n\n # Aggragate results\n row_dict = {'match_name': matched_PDB,\n 'ligand_shell_eleven': ligand_shell_eleven,\n 'interface_CB_contact_percentage': interface_CB_contact_percentage,\n 'motif_shell_CB': motif_shell_CB,\n 'residue_match_score': residue_match_score,\n 'ligand_match_score': ligand_match_score,\n 'min_res_per_chain': min_res_per_chain,\n 'gurobi_motif_score': gurobi_score,\n 'ligand_CB_clashes': ligand_CB_clashes,\n 'clashing_motif_resnums_count': clashing_motif_resnums_count\n }\n\n return row_dict", "title": "" }, { "docid": "821fb94584dd2e2e7218ba8bade16044", "score": "0.5317523", "text": "def _matches(image):\n for result in stbt.detect_match(image):\n if result.match:\n yield result", "title": "" }, { "docid": "7cec390aee5e5936fb1c00b0b803f193", "score": "0.5309397", "text": "def multiprocess_ext_fdr_calculation_hvm(comparison_list):\n # Activate to track comparisons.\n #increment()\n\n total_phenotype_matches = 0\n total_phenotype_nonmatches = 0\n\n species_a_genotype_id = comparison_list[0]\n species_a_phenotypes = read_only_human_geno_pheno_hash[comparison_list[0]]\n #print(species_a_phenotypes)\n genotype_a_phenotype_count = len(species_a_phenotypes)\n\n # Genotype for species B\n species_b_genotype_id = comparison_list[1]\n species_b_phenotypes = read_only_mouse_geno_pheno_hash[comparison_list[1]]\n #print(species_b_phenotypes)\n phenotype_matches = 0\n phenotype_non_matches = 0\n\n\n genotype_b_phenotype_count = len(species_b_phenotypes)\n\n for k in species_a_phenotypes:\n # Orthologs for species A\n #ortholog_matches = 0\n #ortholog_non_matches = 0\n\n species_a_phenotype = k\n for l in species_b_phenotypes:\n # Orthologs for species B\n species_b_phenotype = l\n\n ab_combo = species_a_phenotype+'_'+species_b_phenotype\n ba_combo = species_b_phenotype+'_'+species_a_phenotype\n if ab_combo in read_only_hvm_phenologs or ba_combo in read_only_hvm_phenologs:\n #print('species a ortholog:'+species_a_ortholog+' matches species b ortholog:'+species_b_ortholog)\n phenotype_matches += 1\n #print(species_a_ortholog+' == '+species_b_ortholog)\n total_phenotype_matches += 1\n else:\n #print('species a ortholog:'+species_a_ortholog+' does not match species b ortholog:'+species_b_ortholog)\n phenotype_non_matches += 1\n total_phenotype_nonmatches += 1\n\n if phenotype_matches > 0:\n #print('Matches: '+str(ortholog_matches))\n #print('Non-matches: '+str(ortholog_non_matches))\n m = float(genotype_b_phenotype_count)\n n = float(genotype_a_phenotype_count)\n N = float(len(read_only_hvm_phenologs))\n c = float(phenotype_matches)\n prb = float(hypergeom.pmf(c, N, m, n))\n #print(str(c)+', '+str(N)+', '+str(m)+', '+str(n))\n #print(prb)\n #phenolog_ext_p_value_list.append(prb)\n #total_hyp_calcs += 1\n\n return prb\n else:\n return", "title": "" }, { "docid": "0d332758f95e6450a763255de595a6ba", "score": "0.53064716", "text": "def match_images(image, capture):\n\n # If figure not found, this pair is returned\n NOT_FOUND = False, None\n\n # Feature extractor uses grayscale images\n img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n cap = cv2.cvtColor(capture, cv2.COLOR_BGR2GRAY)\n\n # Create a detector with the parameters\n # CURRENT RASPBERRY opencv version is 3.4.13\n # Initiate BRISK detector --> you could use any other detector, including NON binary features (SIFT, SURF)\n # but this is the best performing one in this version\n detector = cv2.SIFT_create()\n binary_features = False # True if detector = ORB, AKAZE, BRISK, and False otherwise\n\n # find the keypoints and corresponding descriptors\n kpImg, desImg = detector.detectAndCompute(img, None)\n kpCap, desCap = detector.detectAndCompute(cap, None)\n\n if desImg is None or desCap is None:\n # WARNING: empty detection?\n return NOT_FOUND\n if len(desImg) < MIN_MATCH_COUNT or len(desCap) < MIN_MATCH_COUNT:\n # WARNING: not enough FEATURES (im1: len(desImg), im2: len(desCap))\n return NOT_FOUND\n\n # A different matching process is applied whether binary features are used or not\n if binary_features:\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n matches = bf.match(desImg, desCap)\n goodMatches = sorted(matches, key=lambda x: x.distance)\n else:\n FLANN_INDEX_KDTREE = 0\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n search_params = dict(checks=50)\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n matches = flann.knnMatch(desImg, desCap, k=2)\n # store all the good matches as per Lowe's ratio test.\n goodMatches = []\n for m, n in matches:\n if m.distance < 0.7 * n.distance:\n goodMatches.append(m)\n\n # Show matches if requested\n if Cfg.image_match:\n img_tmp = cv2.drawMatches(image, kpImg, capture, kpCap, goodMatches, None)\n cv2.imshow(\"Matches\", img_tmp)\n\n # If enough matches found, figure is considered to be recognized\n if len(goodMatches) > MIN_MATCH_COUNT:\n img_pts = np.float32([kpImg[m.queryIdx].pt for m in goodMatches]).reshape(-1, 1, 2)\n cap_pts = np.float32([kpCap[m.trainIdx].pt for m in goodMatches]).reshape(-1, 1, 2)\n H_21, mask = cv2.findHomography(img_pts, cap_pts, cv2.RANSAC, 3.0)\n matchesMask = mask.ravel().tolist()\n num_robust_matches = np.sum(matchesMask)\n if num_robust_matches < MIN_MATCH_OBJECTFOUND:\n # NOT enough ROBUST matches found - num_robust_matches (required MIN_MATCH_OBJECTFOUND)\n return NOT_FOUND\n h, w = img.shape\n pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)\n box_corners = cv2.perspectiveTransform(pts, H_21)\n cv2.polylines(capture, [np.int32(box_corners)], True,\n color=(255, 255, 255), thickness=3)\n\n # Show matches if requested\n if Cfg.image_match:\n draw_params = dict(matchColor=(0, 255, 0), # draw matches in green color\n singlePointColor=None,\n matchesMask=matchesMask, # draw only inliers\n flags=2)\n img3 = cv2.drawMatches(image, kpImg, capture, kpCap, goodMatches, None, **draw_params)\n cv2.imshow(\"Matches\", img3)\n # ROBUST matches found - np.sum(matchesMask) (out of len(goodMatches)) --> OBJECT FOUND\"\n sumOfColumns = sum(box_corners, 0)[0]\n xCenter = sumOfColumns[0] / len(box_corners)\n yCenter = sumOfColumns[1] / len(box_corners)\n return True, (xCenter / Cfg.CAMERA_WIDTH, yCenter / Cfg.CAMERA_HEIGHT)\n else:\n # Not enough initial matches are found - len(goodMatches) (required MIN_MATCH_COUNT)\"\n return NOT_FOUND", "title": "" }, { "docid": "b55ee0f3b5a0973466b868ab539e7fb0", "score": "0.52671003", "text": "def Draw_Match(\n input_1=('ImagePin', 0), keypoints_1=('KeyPointsPin', 0),\n input_2=('ImagePin', 0), keypoints_2=('KeyPointsPin', 0),\n matches= ('FeatureMatchPin',0),\n output=(REF, ('ImagePin', 0)) ):\n img3 = cv2.drawMatchesKnn(input_1.image, keypoints_1.data[0], input_2.image, keypoints_2.data[0], matches,\n None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n output(img3)\n\n\n img(image)", "title": "" }, { "docid": "49227f5cc6f7abe81bce7c0592e86c2b", "score": "0.52657825", "text": "def execute(self, *args, **kwargs):\n self.match(*args, **kwargs)", "title": "" }, { "docid": "58407350444825e68c5dc3e5e4baefa6", "score": "0.52628636", "text": "def drawMatches(img1, kp1, img2, kp2, matches):\n\n # Create a new output image that concatenates the two images together\n # (a.k.a) a montage\n rows1 = img1.shape[0]\n cols1 = img1.shape[1]\n rows2 = img2.shape[0]\n cols2 = img2.shape[1]\n\n out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')\n\n # Place the first image to the left\n out[:rows1,:cols1] = np.dstack([img1, img1, img1])\n\n # Place the next image to the right of it\n out[:rows2,cols1:] = np.dstack([img2, img2, img2])\n\n # For each pair of points we have between both images\n # draw circles, then connect a line between them\n for mat in matches:\n\n # Get the matching keypoints for each of the images\n img1_idx = mat.queryIdx\n img2_idx = mat.trainIdx\n\n # x - columns\n # y - rows\n (x1,y1) = kp1[img1_idx].pt\n (x2,y2) = kp2[img2_idx].pt\n\n # Draw a small circle at both co-ordinates\n # radius 4\n # colour blue\n # thickness = 1\n \n i1 = random.randint(0, 255)\n i2 = random.randint(0, 255)\n i3 = random.randint(0, 255)\n \n cv2.circle(out, (int(x1),int(y1)), 4, (i1, i2, i3), 1) \n cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (i1, i2, i3), 1)\n\n # Draw a line in between the two points\n # thickness = 1\n # colour blue\n cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (i1, i2, i3), 1)\n\n\n # Show the image\n #cv2.imshow('Matched Features', out)\n #cv2.waitKey(0)\n #cv2.destroyWindow('Matched Features')\n\n # Also return the image if you'd like a copy\n return out", "title": "" }, { "docid": "a9fa272d73b033490b76e8dcddbfeba5", "score": "0.5256477", "text": "def matches(module, chunksize=200):\n matches = module.Match.all()\n print('{} matches'.format(len(matches)))\n random.shuffle(matches)\n\n # First 80% go to model\n model_edge = int(len(matches)*.8)\n # Second 10% go to verification\n verification_edge = int(len(matches)*.9)\n start = time.time()\n # Test\n print('{} matches for test'.format(len(matches[verification_edge:])))\n match_records = []\n with concurrent.futures.ProcessPoolExecutor() as executor:\n for match_record in executor.map(get_record, matches[verification_edge:], chunksize=chunksize):\n match_records.append(match_record)\n print('compiled {} match records for test'.format(len(match_records)))\n with open('{}/match_test_data.csv'.format(module.DATA_DIR), 'w') as f:\n writer = csv.writer(f)\n for record in match_records:\n if record[6] > 0:\n writer.writerow(record)\n print('Test took {} seconds with chunksize {}'.format(int(time.time() - start), chunksize))\n # Model\n print('{} matches for model'.format(len(matches[:model_edge])))\n start = time.time()\n match_records = []\n with concurrent.futures.ProcessPoolExecutor() as executor:\n for match_record in executor.map(get_record, matches[:model_edge], chunksize=chunksize):\n match_records.append(match_record)\n print('compiled {} match_records for model'.format(len(match_records)))\n with open('{}/match_model_data.csv'.format(module.DATA_DIR), 'w') as f:\n writer = csv.writer(f)\n for record in match_records:\n if record[6] > 0:\n writer.writerow(record)\n print('Model took {} seconds with chunksize {}'.format(int(time.time() - start), chunksize))\n # Verification\n print('{} matches for verification'.format(len(matches[model_edge:verification_edge])))\n start = time.time()\n match_records = []\n with concurrent.futures.ProcessPoolExecutor() as executor:\n for match_record in executor.map(get_record, matches[model_edge:verification_edge], chunksize=chunksize):\n match_records.append(match_record)\n print('compiled {} match_records for verification'.format(len(match_records)))\n with open('{}/match_verification_data.csv'.format(module.DATA_DIR), 'w') as f:\n writer = csv.writer(f)\n for record in match_records:\n if record[6] > 0:\n writer.writerow(record)\n print('Verification took {} seconds with chunksize {}'.format(int(time.time() - start), chunksize))", "title": "" }, { "docid": "adc52f697219056688d2c80100989219", "score": "0.525434", "text": "def rgdist(self, oov):\n oov_before_rgdist = oov\n if len(oov) > 3 and oov.isupper():\n oov = oov.lower()\n lgr.debug(\"ED LC O: [{0}], O_LC: [{1}]\".format(repr(oov_before_rgdist), repr(oov)))\n # Ordered regexes. Format: (incorrect, correct)\n # TODO: treat side effects like laa=>lada, mia=>mida, solaa=>solada\n # any stats (even unigram freq) may get rid of it wout extra lists\n # TODO: more precise regexes cos some (those w \"h\") are unlikely to bring good\n # candidates\n subs_tups = [('^wa', 'gua'), ('^we', 'bue'),\n ('gi', 'gui'), ('ge', 'gue'),\n ('q(?!ui)', 'que'),\n ('qe', 'que'), ('qi', 'qui'), ('ke', 'que'), ('ki', 'qui'),\n ('nio', u'ño'), ('nia', u'ña'), ('nyo', u'ño'), ('nya', u'ña'),\n ('x','ch'), ('y', 'll'), ('ll', 'y'),\n ('ao$','ado'),('io$','ido'),('aa$','ada'),('ia$','ida'),\n ('(?<!h)a','ha'), ('(?<!h)e','he'), ('(?<!h)i','hi'),\n ('(?<!h)o','ho'),('(?<!h)u','hu'),\n ('h$','s'),\n ('g', 'ge'), ('d', 'de'), ('p', 'pe'), ('t','te'),\n ('b','be'), ('q(?!u)','qu'), ('k','ca'), ('k', 'qu'), \n ('oy', 'oi'), ('ay', 'ai')]\n subs = dict(subs_tups)\n\n subs_keys = [tup[0] for tup in subs_tups]\n apptimes = {}\n result = {}\n cand = oov \n for reg in sorted(subs, key=lambda x: subs_keys.index(x)):\n cand_bef = cand\n patt = re.compile(reg, re.IGNORECASE)\n # recursive \n cand = re.sub(patt, subs[reg], cand)\n if not cand == cand_bef:\n apptimes.setdefault(cand, 0)\n # record how many times a rule has applied for cand\n apptimes[cand] += 1\n try:\n apptimes[cand] += apptimes[cand_bef]\n except KeyError:\n pass\n result.setdefault(cand, 0)\n result[cand] = -0.5 * apptimes[cand]\n for cand in result.keys():\n if tc.accent_check_in_regexes:\n acc_cand = self.accent_check(cand, oov)\n if acc_cand:\n bkp_times = result[cand]\n del result[cand]\n lgr.debug(\"ED (Rg) Deleted [{0}] from regex cands, Reason, Acc Cand [{1}]\".format(\n repr(cand), repr(acc_cand)))\n result[acc_cand] = bkp_times\n result[acc_cand] += -0.25\n if cand not in self.ivdico and cand in result:\n del result[cand]\n if cand == oov and cand in result:\n del result[cand]\n lgr.debug(\"RED RES {0}, APPT {1}\".format(repr(result), repr(apptimes)))\n return {\"cands\": result, \"apptimes\" : apptimes}", "title": "" }, { "docid": "afda66d29c36cb0cee829eebb4f8b846", "score": "0.5253097", "text": "def drawMatches(self, img1, kp1, img2, kp2, matches):\n\n # Create a new output image that concatenates the two images together\n # (a.k.a) a montage\n rows1 = img1.shape[0]\n cols1 = img1.shape[1]\n rows2 = img2.shape[0]\n cols2 = img2.shape[1]\n\n out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')\n\n # Place the first image to the left\n out[:rows1,:cols1,:] = np.dstack([img1, img1, img1])\n\n # Place the next image to the right of it\n out[:rows2,cols1:cols1+cols2,:] = np.dstack([img2, img2, img2])\n\n # For each pair of points we have between both images\n # draw circles, then connect a line between them\n for mat in matches:\n\n # Get the matching keypoints for each of the images\n img1_idx = mat.queryIdx\n img2_idx = mat.trainIdx\n\n # x - columns\n # y - rows\n (x1,y1) = kp1[img1_idx].pt\n (x2,y2) = kp2[img2_idx].pt\n\n # Draw a small circle at both co-ordinates\n # radius 4\n # colour blue\n # thickness = 1\n cv2.circle(out, (int(x1),int(y1)), 4, (255, 0, 0), 1) \n cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (255, 0, 0), 1)\n\n # Draw a line in between the two points\n # thickness = 1\n # colour blue\n cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (255, 0, 0), 1)\n\n # Show the image\n # cv2.imshow('Matched Features', out)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()", "title": "" }, { "docid": "97cc6f0e860bbdcf478644ae8170b05f", "score": "0.52530336", "text": "def main():\n\tobjects = []\n\n\tmatcher = cv2.BFMatcher(cv2.NORM_HAMMING)\n\tdetector = cv2.FeatureDetector_create(\"ORB\")\n\textractor = cv2.DescriptorExtractor_create(\"ORB\")\n\tcamera = cv2.VideoCapture(\"test2.mp4\")\n\tglobal frameNumber\n\tframeNumber = 0\n\n\t# Colors for debugging, each object is given a color to differentiate in the debug image\n\tglobal colors\n\tcolors = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255), (0,255,255)]\n\tcolorIndex = 0\n\t#bst = BinarySearchTree()\n\t\n\n\t\n\twhile 1:\n\t\tret, frame = camera.read()\n\t\t\n\t\tsegmented = segmentation(frame)\n\t\tcv2.imwrite(\"%i%s\" % (frameNumber, 'labels.jpg'), segmented)\n\t\tsegments = extractSegments(frame, segmented)\n\n\t\tfeatures, shapes = featureExtractor(detector, extractor, segments)\n\t\tfeatureMatches = matchFinder(features, objects, frameNumber, colorIndex, matcher, shapes)\n\t\t#featureMatches = bst.startSearch(features, matcher, colorIndex)\n\t\t\n\n\t\t# Render object bounding box, keypoints and name if found in current frame\n\t\tlastName = \"\"\n\t\tfor match in featureMatches:\n\t\t\tfor pair in match.keypointPairs:\n\t\t\t\tcv2.line(frame, (int(pair[0].pt[0]), int(pair[0].pt[1])),(int(pair[1].pt[0]), int(pair[1].pt[1])), match.object.color, 1)\n\t\t\tcv2.rectangle(frame, match.min, match.max, match.object.color, 2)\n\t\t\tif lastName != match.object.name:\n\t\t\t\tcv2.putText(frame, match.object.name, match.min, cv2.FONT_HERSHEY_PLAIN, 2, match.object.color, 2)\n\t\t\t\tcv2.putText(frame, match.object.shape, match.min, cv2.FONT_HERSHEY_PLAIN, 2, match.object.color, 2)\n\n\t\t\tlastName = match.object.name\n\t\t\n\t\tcv2.imwrite(\"%i%s\" % (frameNumber, '.jpg'), frame)\n\t\tprint 'saving image', frameNumber\n\t\t\n\t\tfor i, segment in enumerate(segments):\n\t\t\tcv2.imwrite(\"%i%s%i%s\" % (frameNumber, '_seg', i, '.jpg'), segment)\n\t\t\n\t\tframeNumber += 1", "title": "" }, { "docid": "22cc2b1cc5aab120b450712f2862f45d", "score": "0.5249396", "text": "def _play_matches(self, chunk, build_results=True):\r\n interactions = defaultdict(list)\r\n index_pair, match_params, repetitions, seed = chunk\r\n p1_index, p2_index = index_pair\r\n if self.players[p1_index].name==\"M&J DQN learner w/ memory\":\r\n player1 = self.players[p1_index]\r\n else:\r\n player1 = self.players[p1_index].clone()\r\n if self.players[p2_index].name==\"M&J DQN learner w/ memory\":\r\n player2 = self.players[p2_index]\r\n else:\r\n player2 = self.players[p2_index].clone()\r\n match_params[\"players\"] = (player1, player2)\r\n match_params[\"seed\"] = seed\r\n match = Match_6505(**match_params)\r\n for _ in range(repetitions):\r\n match.play()\r\n \r\n if build_results:\r\n results = self._calculate_results(match.result)\r\n else:\r\n results = None\r\n \r\n interactions[index_pair].append([match.result, results])\r\n return interactions", "title": "" }, { "docid": "9e65ff80cb74b3f1f6605489a19f67e5", "score": "0.52170485", "text": "def gv_pair(q_kp, q_des, db_kp, db_des):\n M = None\n n_inliers = 0\n ransac_matches = []\n\n q_des = np.asarray(q_des,np.float32)\n db_des= np.asarray(db_des,np.float32)\n\n matcher = cv2.BFMatcher() # brute-force matcher\n matches = matcher.knnMatch(q_des, db_des, 2)\n good_matches = []\n for m,n in matches:\n if m.distance < 0.65*n.distance:\n good_matches.append([m])\n # find & compute homography if there are enough good_matches\n if len(good_matches) > MIN_MATCH_COUNT:\n src_pts = [q_kp[m[0].queryIdx] for m in good_matches]\n src_pts = np.float32(src_pts).reshape(-1,1,2)\n dst_pts = [db_kp[m[0].trainIdx] for m in good_matches]\n dst_pts = np.float32(dst_pts).reshape(-1,1,2)\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n if mask is None:\n return M, n_inliers, ransac_matches\n matchesMask = mask.ravel().tolist()\n n_inliers = len( [1 for m in matchesMask if m==1] )\n ransac_matches = [good_matches[i] for i in range(len(matchesMask)) \\\n if matchesMask[i]==1]\n return M, n_inliers, ransac_matches", "title": "" }, { "docid": "a254a3dde6a438a8b4128e60a7b1584e", "score": "0.5199831", "text": "def run(self):\n cmd = [\n *Match.rcss_cmd,\n 'server::team_l_start={}'.format(self.team_a),\n 'server::team_r_start={}'.format(self.team_b),\n ]\n print('Running match \"{}\"'.format(' '.join(cmd)))\n self.match_stdout = subprocess.check_output(cmd, universal_newlines=True).splitlines()", "title": "" }, { "docid": "0e575b5914cfff21f0522b6dc6a0b786", "score": "0.51806945", "text": "def test_match_regex():\n for obj, expected in MATCHING_TEST:\n query_json = {'object': obj}\n rv = client.post('/api/indicators/match',\n data=json.dumps(query_json),\n content_type='application/json')\n response = json.loads(rv.data)\n assert expected == response\n assert rv.status_code == 200", "title": "" }, { "docid": "3d37182184ffee7d969a83f770facc90", "score": "0.51729167", "text": "def _triggers_matching(rm, t):\n\n n = Choice.objects.filter(room_id=rm.id, t=t).exclude(desired_good=None).count()\n\n # Do matching if all users made their choice\n if n == rm.n_user:\n\n try:\n game.room.client.matching(rm=rm, t=t)\n\n except (psycopg2.OperationalError, django.db.utils.OperationalError):\n # If an error is raised\n # It means that another player has launched the matching\n # Then do nothing\n pass", "title": "" }, { "docid": "7b0fd8d637ce9cd9e8c05c5bd5ec767b", "score": "0.5170009", "text": "def MatchingAccuraciesHMM(data_items, M, C):\n for item in data_items:\n D, B, Match = JointViterbi(item.nouns, item.tags, item.X, item.Y, M, C)\n bestAlignment = GetBestAlignment(item.nouns, item.tags, B, D)\n PrintBestMatching(item.nouns, item.tags, bestAlignment, M, item.X, item.Y)\n print(\"==========\")", "title": "" }, { "docid": "de77f38956effdbe5f338aee1bd99ede", "score": "0.5130873", "text": "def main():\n print(len(sys.argv))\n if len(sys.argv) == 5:\n fu = sys.argv[1]\n fv = sys.argv[2]\n u0 = sys.argv[3]\n v0 = sys.argv[4]\n\n else:\n fu, fv, u0, v0 = 760, 760, 360, 360\n print(\"WARNING: You have not specified any parameters. Using Default\")\n print(\"Please use the following format: python src/code.py <fu> <fv> <u0> <v0>\")\n print(\"<fu, fv> represents the focal length and <u0, v0> represents the camera parameters\")\n print(\"These values default to <760, 760> and <360, 360> respectively\")\n\n # Camera Parameters = [[fu, 0, u0], [0, fv, v0], [0, 0, 1]]\n camera_parameters = np.array([[fu, 0, u0], [0, fv, v0], [0, 0, 1]])\n\n print(\"The camera parameters chosen are:\")\n print(\"Focal Length <fu, fv>: <\",fu,fv,\">\")\n print(\"Camera Values <u0, v0>: <\",u0,v0,\">\")\n\n orb = cv2.ORB_create()\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n\n dir_name = os.getcwd()\n model_co2 = cv2.imread(os.path.join(dir_name, 'reference/co2.jpg'), 0)\n model_mol = cv2.imread(os.path.join(dir_name, 'reference/mol.jpg'), 0)\n model_pi = cv2.imread(os.path.join(dir_name, 'reference/pi.jpg'), 0)\n model_pc = cv2.imread(os.path.join(dir_name, 'reference/laptop.jpg'), 0)\n\n kp_model_co2, des_model_co2 = orb.detectAndCompute(model_co2, None)\n kp_model_mol, des_model_mol = orb.detectAndCompute(model_mol, None)\n kp_model_pi, des_model_pi = orb.detectAndCompute(model_pi, None)\n kp_model_pc, des_model_pc = orb.detectAndCompute(model_pc, None)\n\n obj_co2 = OBJ(os.path.join(dir_name, 'models/co2.obj'), swapyz=True) \n obj_mol = OBJ(os.path.join(dir_name, 'models/mol.obj'), swapyz=True) \n obj_pi = OBJ(os.path.join(dir_name, 'models/pi.obj'), swapyz=True) \n obj_pc = OBJ(os.path.join(dir_name, 'models/laptop.obj'), swapyz=True) \n\n cap = cv2.VideoCapture(0)\n\n while True:\n ret, frame = cap.read()\n\n kp_frame, des_frame = orb.detectAndCompute(frame, None)\n if type(des_frame) != type(None):\n matches_co2 = bf.match(des_model_co2, des_frame)\n matches_mol = bf.match(des_model_mol, des_frame)\n matches_pi = bf.match(des_model_pi, des_frame)\n #Removed Temporarily for Demo purposes\n #matches_pc = bf.match(des_model_pc, des_frame)\n matches_pc = []\n\n else:\n #print(\"0 matches found:\",type(des_frame))\n matches_co2, matches_mol, matches_pi, matches_pc = [], [], [], []\n\n tmp_dict = {'co2':len(matches_co2),'mol':len(matches_mol),'pi':len(matches_pi),'pc':len(matches_pc)}\n max_val = max(tmp_dict.items(), key=operator.itemgetter(1))[0]\n\n if max_val == 'co2':\n matches, model = matches_co2, model_co2\n kp_model, des_model = kp_model_co2, des_model_co2\n obj = obj_co2\n scale_var = 3\n #print(\"Found a Co2: %s \" % len(matches_co2))\n\n elif max_val == 'mol':\n matches, model = matches_mol, model_mol\n kp_model, des_model = kp_model_mol, des_model_mol\n obj = obj_mol\n scale_var = 3\n #print(\"Found a Mol: %s \" % len(matches_mol))\n\n elif max_val == 'pi':\n matches, model = matches_pi, model_pi\n kp_model, des_model = kp_model_pi, des_model_pi\n obj = obj_pi\n scale_var = 3\n #print(\"Found a Pi: %s \" % len(matches_pi))\n\n elif max_val == 'pc':\n matches, model = matches_pc, model_pc\n kp_model, des_model = kp_model_pc, des_model_pc\n obj = obj_pc\n scale_var = 50\n #print(\"Found a Pyr: %s \" % len(matches_pc))\n\n else:\n #print(\"UNKNOWN: Something went wrong!\")\n pass\n\n matches = sorted(matches, key=lambda x: x.distance)\n\n if len(matches) > 10:\n src_pts = np.float32([kp_model[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp_frame[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)\n\n homography, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n h, w = model.shape\n pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)\n dst = cv2.perspectiveTransform(pts, homography)\n frame = cv2.polylines(frame, [np.int32(dst)], True, 255, 3, cv2.LINE_AA) \n if len(matches) > MIN_MATCHES:\n projection = projection_matrix(camera_parameters, homography) \n frame = render(frame, obj, projection, model, scale_var)\n #matches arg\n frame = cv2.drawMatches(model, kp_model, frame, kp_frame, matches[:10], 0, flags=2)\n cv2.imshow('frame', frame)\n\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n\n if key & 0xFF == ord('h'):\n print(\"Ensure you are in the correct directory /Project/\")\n print(\"Please use the following format: python src/code.py <fu> <fv> <u0> <v0>\")\n print(\"<fu, fv> represents the focal length and <u0, v0> represents the camera parameters\")\n print(\"These values default to <760, 760> and <360, 360> respectively\")\n print(\"Pressing q will quit out of the program. Pressing h will bring up the help menu\")\n\n\n cap.release()\n cv2.destroyAllWindows()\n return 0", "title": "" }, { "docid": "d7fcf092cfeb623bb1483b630c336ba9", "score": "0.5100964", "text": "def get_matches(self):\n # initialize output\n self.feature_match_locs = -1 * np.ones((self.num_images, self.n_features, 2))\n\n # get features from first img, fill in self.features\n kp1, des1 = self.get_features(0)\n\n self.features['kp'] = kp1\n img_num = 0\n for feature_num, kp in enumerate(kp1):\n self.features['kp_np'].append(kp.pt)\n self.feature_match_locs[img_num, feature_num, 0] = kp.pt[0]\n self.feature_match_locs[img_num, feature_num, 1] = kp.pt[1]\n self.features['des'] = des1\n\n # iterate through all images and match with first\n for img_num in range(self.num_images):\n kp, des = self.get_features(img_num) \n matches = self.bf.match(des, self.features['des'])\n \n avg_dist = sum(m.distance for m in matches) / len(matches)\n\n for match in matches:\n if match.distance > 1.2*avg_dist:\n continue\n train_feature_num = match.trainIdx\n query_feature_num = match.queryIdx\n self.feature_match_locs[img_num, train_feature_num, 0] = \\\n kp[query_feature_num].pt[0]\n self.feature_match_locs[img_num, train_feature_num, 1] = \\\n kp[query_feature_num].pt[1]\n\n return self.feature_match_locs", "title": "" }, { "docid": "3a6b1b983b72979c37880988ce4b3d3a", "score": "0.50990665", "text": "def test_scores(self):\n obio = OboIO()\n terms = obio.get_graph(GOFILE2)\n gsgo = GsesameGO(terms)\n output = 0.8259052924791086\n self.assertEqual(output, gsgo.scores('0043229','0043231'))", "title": "" }, { "docid": "1198e7e3583bd578a3fc4c509fadcb87", "score": "0.50952303", "text": "def __call__(self, reduced_parameter_index):\n query_graph = self.GeneQuery(reduced_parameter_index)\n a = self.QueryFP1(query_graph.mgi(0))\n b = self.QueryFP2(query_graph.mgi(self.max_gpi))\n c = any ( self.QueryDoubleFP(query_graph.mgi(i)) for i in range(1,self.max_gpi) )\n return (a, b, c)", "title": "" }, { "docid": "4bef2647b9c947a86cb1d5afa6a17aa5", "score": "0.5092421", "text": "def matchAll(dsc1, dsc2):\n bfMatcher = cv.BFMatcher()\n matches = bfMatcher.knnMatch(dsc1,dsc2,k=2)\n return matches", "title": "" }, { "docid": "edc00dddafe4a21940c408d126ad63dc", "score": "0.50833166", "text": "def grid_search_custom_fn1_abcd(n_jobs=-1, num_matches=20, grid_file=None):\n cpu_agents = get_cpu_agents()\n if grid_file and isfile(grid_file):\n abcd = load_grids(grid_file)\n else:\n abcd = list(product(range(1, 10), range(1, 10),\n range(1, 5), range(1, 5)))\n total_matches = num_matches * len(cpu_agents) * 2\n total_wins = Parallel(n_jobs=n_jobs, verbose=10)(\n delayed(_eval_with_params)(\n cpu_agents, num_matches, custom_score, a=a, b=b, c=c, d=d\n )\n for a, b, c, d in abcd\n )\n for i, (a, b, c, d) in enumerate(abcd):\n print(\"a = {}, b = {}, c = {}, d = {}, win: {} / {}\".format(\n a, b, c, d, total_wins[i], total_matches))\n best = np.argmax(total_wins)\n print(\"-----------------------------------------------\")\n print(\"The best result: a = {}, b = {}, c = {}, d = {}\".format(*abcd[best]))\n print(\"-----------------------------------------------\")", "title": "" }, { "docid": "a8258de76dde4aa628f699057fdc9aed", "score": "0.5072679", "text": "def _find_match(self, **kwargs):\n main_lat_array = self.main_df['lat'].astype(float)\n main_lon_array = self.main_df['lon'].astype(float)\n main_depth_array = self.main_df['depth'].astype(float)\n\n\n print('Finding match...')\n self.matching_main_id_set = set() # All matches in main frame\n self.matching_match_id_list = [] # All matches in match frame\n self.matching_main_id_for_match_id = {}\n for time, lat, lon, depth, id in zip(self.match_df['time'],\n self.match_df['lat'].astype(float),\n self.match_df['lon'].astype(float),\n self.match_df['depth'].astype(float),\n self.match_df['visit_depth_id']):\n\n # Time\n time_boolean = (self.main_df['time'] >= (time-self.tolerance_time)) & (\n self.main_df['time'] <= (time+self.tolerance_time))\n\n # Distance\n # lat_array = np.array([float(item) if item else np.nan for item in self.main_df['lat']])\n # lon_array = np.array([float(item) if item else np.nan for item in self.main_df['lon']])\n\n dist_array = latlon_distance_array(lat, lon, main_lat_array, main_lon_array)\n dist_boolean = (dist_array <= self.tolerance_dist)\n\n # Depth\n depth_boolean = (main_depth_array >= (depth - self.tolerance_depth)) & (\n main_depth_array <= (depth + self.tolerance_depth))\n\n\n m_df = self.main_df.loc[time_boolean & dist_boolean & depth_boolean]\n if len(m_df):\n self.matching_match_id_list.append(id)\n self.matching_main_id_set.update(m_df['visit_depth_id'].values)\n self.matching_main_id_for_match_id[id] = m_df['visit_depth_id'].values", "title": "" }, { "docid": "277b180549f37dc227232b9713f1cd73", "score": "0.506596", "text": "def request_matches(self, **kwargs):\n endpoint = self.build_endpoint(**kwargs, endpoint_name=\"matches\")\n result = self.perform_get(built_uri=endpoint)\n total_result = []\n\n if result:\n for match in result:\n total_result.append({\n Match.HOME_TEAM: match[\"homeTeam\"][\"name\"],\n Match.AWAY_TEAM: match[\"awayTeam\"][\"name\"],\n Match.FULL_TIME_HOME_SCORE: match[\"homeGoals\"],\n Match.FULL_TIME_AWAY_SCORE: match[\"awayGoals\"],\n Match.COMPETITION: match[\"competition\"][\"name\"],\n Match.FLS_API_COMPETITION_ID: match[\"competition\"][\"dbid\"],\n Match.SEASON_YEAR: match[\"season\"][\"name\"],\n Match.FLS_MATCH_ID: match[\"dbid\"]\n })\n\n return total_result", "title": "" }, { "docid": "a7412a0a17920b31e51d1bf6c958182f", "score": "0.50593096", "text": "def organization_matches(org_id=1):\n try:\n # connect to database\n conn = pg.connect(host=URL,\n port=PORT,\n dbname=DBNAME,\n user=USER,\n password=PASS,\n connect_timeout=TIMEOUT)\n cursor = conn.cursor()\n\n # TODO : get mentees in need of match\n\n # TODO : for each mentee get mentors that would be possible matches (top 5)\n\n # TODO : write matches into match table\n\n # TODO : commit transaction\n\n cursor.close()\n conn.close()\n\n except:\n # TODO: handle exceptions\n return jsonify({'success': False,\n 'org_id' : org_id})\n\n return jsonify({'success' : True,\n 'org_id' : org_id})", "title": "" }, { "docid": "4b16b2ce5f0d10734976978269199358", "score": "0.5058551", "text": "def match(f, t, selected=False, iterate=None, minPoints=1, showMatrix=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tmove=True):\n\tif iterate is not None and not move:\n\t\traise MidasError(\"Cannot iterate if 'move' is false\")\n\tmobileAtoms, refAtoms, mobileMol, refMol = _atomSpecErrorCheck(f, t)\n\txfrel = mobileMol.openState.xform.inverse()\n\txfrel.multiply(refMol.openState.xform)\n\timport chimera.match\n\tfirstPass = 1\n\twhile 1:\n\t\tif len(refAtoms) < minPoints:\n\t\t\traise TooFewAtomsError(\"Too few corresponding atoms\"\n\t\t\t\t\" (%d) to match models\\n\" % len(refAtoms))\n\n\t\txform, rmsd = chimera.match.matchAtoms(refAtoms, mobileAtoms)\n\t\tif move:\n\t\t\txf = refMol.openState.xform\n\t\t\txf.multiply(xform)\n\t\t\tmobileMol.openState.xform = xf\n\t\t\tif selected:\n\t\t\t\t# transform all active models\n\t\t\t\ttfIds = [(refMol.id, refMol.subid),\n\t\t\t\t\t\t(mobileMol.id, mobileMol.subid)]\n\t\t\t\tfor id, subid in chimera.openModels.listIds():\n\t\t\t\t\topenState = chimera.openModels.openState(id, subid)\n\t\t\t\t\tif openState.active and (id, subid) not in tfIds:\n\t\t\t\t\t\topenState.xform = xf\n\t\tif iterate is None:\n\t\t\tbreak\n\t\telif iterate < 0.0:\n\t\t\traise MidasError(\"Iteration cutoff must be positive\")\n\n\t\tpairings = []\n\t\tfor i in range(len(refAtoms)):\n\t\t\tref = refAtoms[i]\n\t\t\tmobile = mobileAtoms[i]\n\t\t\tpairings.append((ref.xformCoord().sqdistance(\n\t\t\t\tmobile.xformCoord()), ref, mobile))\n\t\tpairings.sort()\n\n\t\tif firstPass:\n\t\t\tfirstPass = 0\n\t\t\tcutoff = iterate * iterate\n\n\t\tif pairings[-1][0] <= cutoff:\n\t\t\tbreak\n\n\t\t# cull 10% or...\n\t\tindex1 = int(len(refAtoms) * 0.9)\n\n\t\tfor i in range(len(refAtoms)-1, -1, -1):\n\t\t\tif pairings[i][0] <= cutoff:\n\t\t\t\tbreak\n\n\t\t# cull half the long pairings\n\t\tindex2 = int((i + len(refAtoms)) / 2)\n\n\t\t# whichever is fewer\n\t\tindex = max(index1, index2)\n\n\t\trefAtoms = []\n\t\tmobileAtoms = []\n\t\tfor i in range(index):\n\t\t\tdist, ref, mobile = pairings[i]\n\t\t\trefAtoms.append(ref)\n\t\t\tmobileAtoms.append(mobile)\n\n\tif len(refAtoms) < 3:\n\t\treplyobj.warning(\"This superposition uses less than 3 pairs of\"\n\t\t\t\" atoms is therefore not unique.\\n\")\n\tif showMatrix:\n\t\t# even when iterating, only last transformation computed matters\n\t\tfrom Matrix import transformation_description, xform_matrix\n\t\ttf = transformation_description(xform_matrix(xform))\n\t\txfrel.premultiply(xform)\n\t\ttl = transformation_description(xform_matrix(xfrel))\n\t\tmsg = ('Motion from original file coordinates\\n%s' % tf +\n\t\t 'Motion from last position\\n%s' % tl)\n\t\treplyobj.info(msg)\n\t_showStatus('RMSD between %d atom pairs is %.3f angstroms'\n\t\t\t\t\t\t% (len(refAtoms), rmsd))\n\treturn mobileAtoms, refAtoms, rmsd", "title": "" }, { "docid": "b3f83b3e52cc117719f19063707eba43", "score": "0.5058132", "text": "def drawMatches(img1, kp1, img2, kp2, matches):\n\n # Create a new output image that concatenates the two images together\n # (a.k.a) a montage\n rows1 = img1.shape[0]\n cols1 = img1.shape[1]\n rows2 = img2.shape[0]\n cols2 = img2.shape[1]\n\n out = np.zeros((max([rows1, rows2]), cols1 + cols2, 3), dtype='uint8')\n\n # Place the first image to the left\n out[:rows1, :cols1] = np.dstack([img1, img1, img1])\n\n # Place the next image to the right of it\n out[:rows2, cols1:] = np.dstack([img2, img2, img2])\n\n # For each pair of points we have between both images\n # draw circles, then connect a line between them\n for mat in matches:\n # Get the matching keypoints for each of the images\n img1_idx = mat.queryIdx\n img2_idx = mat.trainIdx\n\n # x - columns\n # y - rows\n (x1, y1) = kp1[img1_idx].pt\n (x2, y2) = kp2[img2_idx].pt\n\n # Draw a small circle at both co-ordinates\n # radius 4\n # colour blue\n # thickness = 1\n cv2.circle(out, (int(x1), int(y1)), 4, (255, 0, 0), 1)\n cv2.circle(out, (int(x2) + cols1, int(y2)), 4, (255, 0, 0), 1)\n\n # Draw a line in between the two points\n # thickness = 1\n # colour blue\n cv2.line(out, (int(x1), int(y1)), (int(x2) + cols1, int(y2)), (255, 0, 0), 1)\n\n # Show the image\n cv2.namedWindow('Features', cv2.WINDOW_NORMAL)\n cv2.imshow('Features', out)\n cv2.waitKey(0)\n cv2.destroyWindow('Matched Features')\n\n # Also return the image if you'd like a copy\n return out", "title": "" }, { "docid": "ae7fd09ce6d20965e85324d18c74bac7", "score": "0.5054768", "text": "def get_match_score(d1, d2):\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(d1, d2, k=2)\n\n sim = 0\n for m, n in matches:\n if m.distance < 0.70 * n.distance:\n sim += 1\n\n return sim", "title": "" }, { "docid": "9d38fadec59a9548db3c3f520f1e154c", "score": "0.50436985", "text": "def cvMatchTemplate(*args):\n return _cv.cvMatchTemplate(*args)", "title": "" }, { "docid": "95f8693525e6ac43ed0c01f32b57f135", "score": "0.5035006", "text": "def estimateMatch(direction, paramd, vvv=True):\n igr = np.array((paramd['x_match'], paramd['y_match'], paramd['z_match']))\n if np.linalg.norm(igr) == 0: # match is a ring so return the middle of it\n spx = paramd['r_match_inner'] + \\\n 0.5*(paramd['r_match_outer'] - paramd['r_match_inner'])\n else:\n name, r = getBearing(direction, paramd['geometry'])\n cross = np.linalg.norm(np.cross(igr, r))\n dot = np.linalg.norm(np.dot(igr, r))\n angle = np.arctan(cross/dot)\n spx = np.cos(angle)*np.linalg.norm(igr)\n if vvv:\n print('Ignition Center: ', igr)\n print('Estimated position', spx)\n return spx", "title": "" }, { "docid": "a986bc1b2c5c29b9adc0889a2e614a42", "score": "0.503088", "text": "def test_openings():\n ops = openings.Openings(\"eco/eco.json\")\n selected = ops.closest_match(\"kings pawn\")\n assert selected.name == \"King's Pawn\"\n assert selected.eco == \"B00\"\n assert selected.uci == \"e2e4\"\n assert selected.fen == \"rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq\"\n\n amar_gambit = ops.closest_match(\"Amar Gambit\")\n assert amar_gambit.as_pgn_moves() == \"1. Nh3 d5 2. g3 e5 3. f4 Bxh3 4. Bxh3 exf4\"\n\n assert ops.closest_match(\"foobar\") is None\n assert ops.closest_match(\"g4 english\").eco == \"A10\"", "title": "" }, { "docid": "a0b6956363a9701d9d1fb5023ec52fdd", "score": "0.5025812", "text": "def custom_match(num_matchs=20):\n agents = [\n Agent(AlphaBetaPlayer(score_fn=custom_score), \"AB_Custom_1\"),\n Agent(AlphaBetaPlayer(score_fn=custom_score_2), \"AB_Custom_2\"),\n Agent(AlphaBetaPlayer(score_fn=custom_score_3), \"AB_Custom_3\"),\n ]\n for i in range(1, 3):\n for j in range(i + 1, len(agents)):\n agent_i = agents[i]\n agent_j = agents[j]\n wins = {agent_i.player: 0, agent_j.player: 0}\n play_round(agent_i, [agent_j], wins, num_matches=num_matchs)\n print('{} vs {} = {} : {}'.format(\n agent_i.name,\n agent_j.name,\n wins[agent_i.player],\n wins[agent_j.player])\n )", "title": "" }, { "docid": "e5d4f1f3c2217fff59f046a4f37df22e", "score": "0.5021878", "text": "def match_rms(self, low, high, match):\n raise errors.Unimplemented()", "title": "" }, { "docid": "864d8a9abbca965f30dd7c48397bff5e", "score": "0.50196135", "text": "def run(self, use_mags=False, xmatchserver_user=None, **kwargs):\n prob_ratio_secondary = kwargs.pop('prob_ratio_secondary', 0.5)\n kwargs_prior, kwargs_run = self.parse_args(kwargs)\n\n self._match_raw = self._xmatch(xmatchserver_user, **kwargs_run)\n\n # match_file = 'tmp_match.fits'\n # self._match_raw = Table.read(match_file)\n\n if use_mags:\n self._priors = self._calc_priors(**kwargs_prior)\n\n return self._final_table(self._match_raw, prob_ratio_secondary)", "title": "" }, { "docid": "94bc55597936a6935d58ded2d66ca034", "score": "0.50181955", "text": "def test_replacements():\n # initialisation purpose only\n data_set = 'data/HDI/raw.csv'\n alts = dr.open_raw(data_set)[0]\n seed = 1\n method = 'mean'\n prometheeMV = prom.PrometheeMV(alts, seed=seed, method=method)\n alternatives = [[1],\n [0],\n ['*'],\n [2]]\n f = [myf]\n pref = [[[0, 1, '*', 0],\n [0, 0, '*', 0],\n ['*', '*', 0, '*'],\n [1, 1, '*', 0]]]\n\n for i in pref[0]:\n print(i)\n\n P = prometheeMV.compute_pairwise_comparisons(alternatives, f)\n print(\"second round\")\n for i in P[0]:\n print(i)", "title": "" }, { "docid": "972ac3483d94bb540d5e0ebf7f570997", "score": "0.5015136", "text": "def draw_matching_points(img, matches, train_locs, query_locs):\n for qry, trn in zip(matches[0], matches[1]):\n try:\n p2 = query_locs[qry]\n p1 = train_locs[trn]\n img = cv2.circle(img, (int(p1[1]), int(p1[0])), 2, (0, 0, 255), 2)\n img = cv2.circle(img, (int(p2[1]), int(p2[0])), 2, (0, 255, 0), 2)\n img = cv2.line(img, (int(p1[1]), int(p1[0])), (int(p2[1]), int(p2[0])), (255, 0, 0))\n except:\n import ipdb\n ipdb.set_trace()\n return img", "title": "" }, { "docid": "dbdf8c45f20ce917b06ca48cb34b743c", "score": "0.50135213", "text": "def drawMatches(img1, kp1, img2, kp2, matches):\n\n # Create a new output image that concatenates the two images together\n # (a.k.a) a montage\n rows1 = img1.shape[0]\n cols1 = img1.shape[1]\n rows2 = img2.shape[0]\n cols2 = img2.shape[1]\n\n out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')", "title": "" }, { "docid": "fd32ad47f8524795a1a59f570fe7f8b7", "score": "0.50050193", "text": "def match(target, source):\n match_translates(target, source)\n match_rotates(target, source)", "title": "" }, { "docid": "b48b0d8d7af75b81cb2ac07c8f4ae801", "score": "0.49917355", "text": "def drawmatches(name, img1, img2, kp1, kp2, verbosity=0):\n h1, w1 = img1.shape[:2]\n h2, w2 = img2.shape[:2]\n # Create storage for eventual matches\n view = np.zeros((max(h1, h2), w1 + w2, 3), np.uint8)\n view[:h1, :w1, 0] = img1\n view[:h2, w1:, 0] = img2\n view[:, :, 1] = view[:, :, 0]\n view[:, :, 2] = view[:, :, 0]\n\n for p1, p2 in izip(kp1, kp2):\n # draw the keypoints\n # print m.queryIdx, m.trainIdx, m.distance\n color = tuple([np.random.randint(0, 255)\n for _ in xrange(3)])\n cv2.line(view,\n (int(p1[0]),\n int(p1[1])),\n (int(p2[0] + w1),\n int(p2[1])),\n color)\n # Resize for easy display\n resize_and_display(name, view, 0.5, 0.5)", "title": "" }, { "docid": "7afd441ac2df235c68cfd694c57d6f57", "score": "0.49879152", "text": "def match(userid):", "title": "" }, { "docid": "539b55afd40af803a67f57a0cc3dd216", "score": "0.49792248", "text": "def _check_mp_match(\n self):\n self.log.debug('starting the ``_check_mp_match`` method')\n\n moverName = None\n\n # The self.movers are organised in order of increasing angular separation.\n # If the 1st row doesn't match, don't bother with the rest.\n if len(self.movers) > 0:\n # Get the offests. We don't care which direction they are in. Value\n # should be < 1.0 arcmins.\n if self.movers[0]['raOff'] and self.movers[0]['decOff']:\n # Get rid of the NESW designations\n raOff = decOff = None\n try:\n raOff = float(self.movers[0]['raOff'].replace('S', '').replace(\n 'N', '').replace('E', '').replace('W', ''))\n decOff = float(self.movers[0]['decOff'].replace(\n 'S', '').replace('N', '').replace('E', '').replace('W', ''))\n except ValueError, e:\n print \"Can't convert the offsets %s, %s. Unable to check them.\" % (self.movers[0]['raOff'], self.movers[0]['decOff'])\n self.moverName = moverName\n return\n\n if self.matchRadius:\n if raOff is not None and decOff is not None and raOff < self.matchRadius and decOff < self.matchRadius:\n moverName = self.movers[0]['designation']\n else:\n print raOff\n print decOff\n print self.matchRadius\n else:\n moverName = self.movers[0]['designation']\n\n self.moverName = moverName\n\n self.log.debug('completed the ``_check_mp_match`` method')\n return None", "title": "" }, { "docid": "11c6e8fdef6b3eff0ef7313aba663e87", "score": "0.49784687", "text": "def test_full_run():\n results = rcd.detect(\"tests/data\", video_start_threshold_percentile=50, min_detection_size_seconds=3)\n vid0 = results[\"video0.mp4\"]\n vid1 = results[\"video1.mp4\"]\n\n assert 5.2 < abs(vid0[0][0] - vid0[0][1]) < 5.8\n assert 4.5 < abs(vid0[1][0] - vid0[1][1]) < 5.5\n\n assert 5.2 < abs(vid1[0][0] - vid1[0][1]) < 5.8\n assert 4.5 < abs(vid1[1][0] - vid1[1][1]) < 5.5", "title": "" }, { "docid": "2fe09325191f667ec426ecfd86e84119", "score": "0.49761352", "text": "def test_call_open_reference_with_match_usearch61(self):\n\n app = Usearch61ReferenceOtuPicker(\n params={'save_intermediate_files': False,\n 'output_dir':\n self.output_dir,\n 'remove_usearch_logs': True,\n 'suppress_new_clusters':\n False\n })\n\n obs_clusters, failures = app(self.tmp_seq_filepath_97perc_id_rc,\n refseqs_fp=self.tmp_seqs_rc_single_seq)\n\n # Randomly selected match is used for equivalent matches, so need to\n # test for results without order affecting output\n expected_clusters = {'denovo0': ['usearch_ecoli_seq',\n 'usearch_ecoli_seq_1bp_change'],\n 'usearch_ecoli_seq_2bp_change_rc':\n ['usearch_ecoli_seq_2bp_change_rc']}\n\n for result in obs_clusters:\n for cluster in obs_clusters[result]:\n self.assertTrue(cluster in expected_clusters[result])\n\n expected_failures = []\n self.assertEqual(failures, expected_failures)", "title": "" }, { "docid": "f3f963c1cc188c01fadaee5c5fd14343", "score": "0.49644282", "text": "def assessStrategyGlobal(test_beginning_match,\n duration_train_matches,\n duration_val_matches,\n duration_test_matches,\n xgb_params,\n nb_players,\n nb_tournaments,\n features,\n data,\n model_name=\"0\"):\n ########## Training/validation/testing set generation\n \n # Number of matches in our dataset (ie. nb. of outcomes divided by 2)\n nm=int(len(features)/2)\n \n # Id of the first and last match of the testing,validation,training set\n beg_test=test_beginning_match\n end_test=min(test_beginning_match+duration_test_matches-1,nm-1)\n end_val=min(beg_test-1,nm-1)\n beg_val=beg_test-duration_val_matches\n end_train=beg_val-1\n beg_train=beg_val-duration_train_matches\n \n train_indices=range(2*beg_train,2*end_train+2)\n val_indices=range(2*beg_val,2*end_val+2)\n test_indices=range(2*beg_test,2*end_test+2)\n \n if (len(test_indices)==0)|(len(train_indices)==0):\n return 0\n \n # Split in train/validation/test\n xval=features.iloc[val_indices,:].reset_index(drop=True)\n xtest=features.iloc[test_indices,:].reset_index(drop=True)\n xtrain=features.iloc[train_indices,:].reset_index(drop=True)\n ytrain=pd.Series([1,0]*int(len(train_indices)/2))\n yval=pd.Series([1,0]*int(len(val_indices)/2))\n \n # We limit the number of players and tournaments one-hot encoded : we'll keep only the \n # players that won the most matches to avoid overfitting and make the process quicker\n # Biggest players :\n biggest_players=data.iloc[range(beg_train,end_train),:][[\"Winner\",\"Loser\"]]\n biggest_players=pd.concat([biggest_players.Winner,biggest_players.Loser],0)\n biggest_players=list(biggest_players.value_counts().index[:nb_players])\n player_columns=[el for el in xtrain.columns if el[:6]==\"player\"]\n to_drop_players=[el for el in player_columns if el[7:] not in biggest_players]\n # Biggest Tournaments\n biggest_tournaments=data.iloc[range(beg_train,end_train),:][\"Tournament\"]\n biggest_tournaments=list(biggest_tournaments.value_counts().index[:nb_tournaments])\n tournament_columns=[el for el in xtrain.columns if el[:10]==\"tournament\"]\n to_drop_tournaments=[el for el in tournament_columns if el[11:] not in biggest_tournaments]\n # We drop smallest Tournaments and players\n xtrain=xtrain.drop(to_drop_players+to_drop_tournaments,1)\n xval=xval.drop(to_drop_players+to_drop_tournaments,1)\n xtest=xtest.drop(to_drop_players+to_drop_tournaments,1)\n \n ### ML model training\n model=xgbModelBinary(xtrain,ytrain,xval,yval,xgb_params,sample_weights=None)\n \n # The probability given by the model to each outcome of each match :\n pred_test= model.predict(xgb.DMatrix(xtest,label=None)) \n # For each match, the winning probability the model gave to the players that won (should be high...) :\n prediction_test_winner=pred_test[range(0,len(pred_test),2)]\n # For each match, the winning probability the model gave to the players that lost (should be low...) :\n prediction_test_loser=pred_test[range(1,len(pred_test),2)]\n \n ### Odds and predicted probabilities for the testing set (1 row/match)\n odds=data[[\"PSW\",\"PSL\"]].iloc[range(beg_test,end_test+1)]\n implied_probabilities=1/odds\n p=pd.Series(list(zip(prediction_test_winner,prediction_test_loser,implied_probabilities.PSW,implied_probabilities.PSL)))\n\n ### For each match in the testing set, if the model predicted the right winner :\n right=(prediction_test_winner>prediction_test_loser).astype(int)\n ### For each match in the testing set, the confidence of the model in the outcome it chose\n def sel_match_confidence(x):\n if x[0]>x[1]:\n return x[0]/x[2] \n else:\n return x[1]/x[3] \n confidence=p.apply(lambda x:sel_match_confidence(x))\n \n ### The final confidence dataset \n confidenceTest=pd.DataFrame({\"match\":range(beg_test,end_test+1),\n \"win\"+model_name:right,\n \"confidence\"+model_name:confidence,\n \"PSW\":odds.PSW.values})\n confidenceTest=confidenceTest.sort_values(\"confidence\"+model_name,ascending=False).reset_index(drop=True)\n \n return confidenceTest", "title": "" }, { "docid": "f5c104024a710e4e7d8467dabe5ce1e6", "score": "0.49640077", "text": "def do_cloud_matching(self):\n logger.info('Running fcssm with dilation:\\n' +\n 'cloud: {c}\\n'.format(c=self.cloud_dilate) +\n 'shadow: {s}'.format(s=self.shadow_dilate) +\n 'snow: {sn}'.format(sn=self.snow_dilate)\n )\n\n # Run Fmask FCSSM\n self.fmask_result.do_fcssm(self.cloud_dilate,\n self.shadow_dilate,\n self.snow_dilate)\n\n # TODO if PREVIEW RESULT button: (else keep in memory)\n self.fcssm_filename, _tempfile = \\\n pyfmask_utils.temp_raster(self.fmask_result.fmask_final,\n self.fmask_result.geoT,\n self.fmask_result.prj)\n self.temp_files.append(_tempfile)\n\n # Open as raster layer\n rlayer_name = 'Fmask (cloud probability {p})'.format(p=self.cloud_prob)\n self.fmask_rlayer = qgis.core.QgsRasterLayer(self.fcssm_filename,\n rlayer_name)\n\n # Add to QGIS\n qgis.core.QgsMapLayerRegistry.instance().addMapLayer(\n self.fmask_rlayer)\n\n # Set symbology for new raster layer\n pyfmask_utils.apply_symbology(self.fmask_rlayer,\n self.symbology,\n self.enable_symbology,\n transparent=[255])\n\n # Refresh layer symbology\n self.iface.legendInterface().refreshLayerSymbology(self.fmask_rlayer)\n\n # Enable matching button\n self.allow_results(save=True)", "title": "" }, { "docid": "611aa37f3dbc9d441b80064fd4179459", "score": "0.49615738", "text": "def grid_search_custom_fn2_abc(n_jobs=-1, num_matches=20, grid_file=None):\n cpu_agents = get_cpu_agents()\n if grid_file and isfile(grid_file):\n abc = load_grids(grid_file)\n else:\n abc = list(product(range(1, 10), range(1, 10), range(1, 5)))\n total_matches = num_matches * len(cpu_agents) * 2\n total_wins = Parallel(n_jobs=n_jobs, verbose=10)(\n delayed(_eval_with_params)(\n cpu_agents, num_matches, custom_score_2, a=a, b=b, c=c\n )\n for a, b, c in abc\n )\n for i, (a, b, c) in enumerate(abc):\n print(\"a = {}, b = {}, c = {}, win: {} / {}\".format(\n a, b, c, total_wins[i], total_matches))\n best = np.argmax(total_wins)\n print(\"---------------------------------------\")\n print(\"The best result: a = {}, b = {}, c = {}\".format(*abc[best]))\n print(\"---------------------------------------\")", "title": "" }, { "docid": "ea3b8aac20b5e594ef3d690ad0dcb950", "score": "0.49561605", "text": "def match(cameta, cadata, ghcnmeta, ghcndata, table):\n\n cadict = cametaasdict(cameta)\n ghcndict = ghcnmetaasdict(ghcnmeta)\n\n drop_short(cadict, cadata)\n\n global wmocount\n wmomatch = 0\n wmofar = 0\n locnear = 0\n\n for match in itermatches(cadict, ghcndict):\n cast = match.cast\n ghcnst = match.ghcnst\n sep = locdist((cast['Latitude'], cast['Longitude']),\n (ghcnst.lat, ghcnst.lon))\n match.sep = sep\n if match.type == 'wmo':\n wmomatch += 1\n if sep > 0.015:\n wmofar += 1\n else:\n if sep < 0.015:\n locnear += 1\n castid = cast['id11']\n overlap,q,id12 = match_quality(cadata[castid][castid+'0'],\n ghcndata[ghcnst.uid])\n match.q = q\n wmo = blah_get(match.cast, 'WMO Identifier') or 'nowmo'\n print match.type, wmo, castid, id12, \\\n \"%.2f\" % sep, \"%4d\" % overlap, q\n if match.type == 'wmo' or match.q + match.sep < 1:\n newid = ghcnst.uid+'9'\n assert newid not in ghcndata[ghcnst.uid]\n table.write(\"%s %s\\n\" % (castid+'0', newid))\n\n print \"dropped stations\", dropcount\n print \"kept stations\", len(cadict)\n print \"WMO stations\", wmocount\n print \"WMO match\", wmomatch, \"(of those) not near:\", wmofar\n print \"LOCATION near\", locnear", "title": "" }, { "docid": "bdd17c680b208342998811cb808b02b9", "score": "0.4955032", "text": "def compute_sift_matches(f1, f2):\n img1 = cv2.imread(f1, cv2.IMREAD_GRAYSCALE)\n img2 = cv2.imread(f2, cv2.IMREAD_GRAYSCALE)\n\n # Initiate SIFT detector\n sift = cv2.SIFT()\n\n # find the keypoints and descriptors with SIFT\n kp1, des1 = sift.detectAndCompute(img1,None)\n kp2, des2 = sift.detectAndCompute(img2,None)\n\n # create BFMatcher object\n bf = cv2.BFMatcher(cv2.NORM_L1)\n # Match descriptors.\n matches = bf.knnMatch(des1,des2, k=2)\n # Sort them in the order of their distance.\n good = []\n for m,n in matches:\n if m.distance < 0.75*n.distance:\n good.append(m)\n # Draw first 10 matches.\n\n src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)\n dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)\n\n M, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)\n # Use custom version\n #dst_pts, src_pts = list(dst_pts), list(src_pts)\n #M = compute_homography(dst_pts, src_pts)\n\n (w, h) = img1.shape\n out = cv2.warpPerspective(img2, M, (h, w))\n #out = apply_homography(img2, M)\n cv2.imwrite(\"out/out_sift.png\", out)\n\n return out", "title": "" }, { "docid": "04f58d9d79bc6ae820e0d99a7c227c2e", "score": "0.49539405", "text": "def _play_matches(self, chunk, build_results=True):\r\n interactions = defaultdict(list)\r\n index_pair, match_params, repetitions, seed = chunk\r\n p1_index, p2_index = index_pair\r\n if \"learner\" in self.players[p1_index].name:\r\n player1 = self.players[p1_index]\r\n else:\r\n player1 = self.players[p1_index].clone()\r\n if \"learner\" in self.players[p2_index].name:\r\n player2 = self.players[p2_index]\r\n else:\r\n player2 = self.players[p2_index].clone()\r\n match_params[\"players\"] = (player1, player2)\r\n match_params[\"seed\"] = seed\r\n match = Match(**match_params)\r\n for _ in range(repetitions):\r\n match.play()\r\n \r\n if build_results:\r\n results = self._calculate_results(match.result)\r\n else:\r\n results = None\r\n \r\n interactions[index_pair].append([match.result, results])\r\n return interactions", "title": "" }, { "docid": "ece9f0c969782d87530d77948cd77959", "score": "0.49453643", "text": "def generate_all_matches (seq_data, pars_data):\r\n\t# Find all regions in the pars data\r\n\tregions = find_regions(pars_data)\r\n\t# Using these extract their sequences\r\n\tseq_regions = find_seq_regions(regions, seq_data)\r\n\t# Start at beginning and... match! Use a recursive method to generate full\r\n\t# matching tree. Return all possible solutions.\r\n\treturn match_recursion(regions, seq_regions, [], [], [], 0, 1)", "title": "" }, { "docid": "9176017a97c46db60b8737ddf1b6da5f", "score": "0.4940043", "text": "def compute_feature_contribution(db, model_path: str, venue_extractors, checkin_extractors):\n model_prefixes = [model_path + \"/\" + e[:-4:] for e in os.listdir(model_path) if \".mdl\" in e]\n\n total_results = {}\n\n prev_dbquery = \"\"\n for model_prefix in model_prefixes:\n # Find the query for given model and fetch relevant data from the DB\n dbquery = \"\"\n with open(model_prefix + \".desc\") as f:\n content = f.readlines()\n\n for line in content:\n if line[0:6] == \"Query:\":\n dbquery = line[7::]\n\n model = pickle.load(open(model_prefix + \".mdl\", \"rb\"))\n scaler = pickle.load(open(model_prefix + \".scaler\", \"rb\"))\n unigrams = pickle.load(open(model_prefix + \".unigrams\", \"rb\"))\n\n print(\"Processing {}.\".format(model_prefix))\n # Do not load the data twice if we are operating on the same data.\n if dbquery != prev_dbquery:\n raw_data = io.fetch_data_from_mongo(db[\"venues\"], db[\"checkins_gte5\"], dbquery, venue_extractors,\n checkin_extractors)\n\n # Normalize geographical coordinates\n raw_data[\"coordinates\"] = scaler.transform(raw_data[\"coordinates\"])\n\n # Construct sparse matrices\n features = [feature for feature in raw_data.keys() if feature not in [\"coordinates\", \"counts\", \"unigrams\"]]\n\n data = {\"coordinates\": raw_data[\"coordinates\"]}\n\n for feature in features:\n if feature == 'user' and os.path.isfile(model_prefix + \".svdfeatmap\"):\n svdfeatmap = pickle.load(open(model_prefix + \".svdfeatmap\", \"rb\"))\n\n data[feature] = io.get_sparse_occur_matrix(raw_data[feature], svdfeatmap)[:, 0:len(unigrams[feature])]\n else:\n unigram_ids = dict([(w, i) for i, w in enumerate(unigrams[feature])])\n data[feature] = io.get_sparse_occur_matrix(raw_data[feature], unigram_ids)\n\n # Compute likelihoods\n orig_ll = model.predict_log_probs(data)\n\n results = {\"orig\": orig_ll}\n for feature in features:\n betas = model.beta_arrays[feature]\n model.beta_arrays[feature] = Model.get_topic_unigram(model.m_arrays[feature], np.zeros_like(betas))\n ll = model.predict_log_probs(data)\n results[feature] = ll\n model.beta_arrays[feature] = betas\n\n without_geo = compute_probabilities_from_mixture(model, data)\n\n results[\"geo\"] = without_geo\n total_results[model_prefix] = results\n\n prev_dbquery = dbquery\n return total_results", "title": "" }, { "docid": "eaca5993c25e8260063c6c6141d30455", "score": "0.49215412", "text": "def run_tests():\n mh = MongoHelper()\n\n print(\"Getting airports near within 200 miles of: (-98.5034180, 33.9382331)\")\n res = mh.get_features_near_me('airports',(-98.5034180, 33.9382331),200)\n print(\"Found %d airports\" % len(res))\n print(\"\")\n\n print(\"Getting countries that have 'High income' in the INCOME_GRP field.\")\n res = mh.get_doc_by_keyword('countries','properties.INCOME_GRP','High income')\n print(\"Found %d countries\" % len(res))\n print(\"\")\n\n print(\"Getting earthquakes that had a magnitude of 5.5 (not a partial match like above), and don't pass in 5.5 as a string!\")\n res = mh.get_doc_by_keyword('earthquakes','properties.mag',5.5,False)\n print(\"Found %d earthquakes\" % len(res))\n print(\"\")\n\n print(\"Getting a state polygon.\")\n state = mh.get_state_poly('co')\n print(\"Found %d polygon in the result.\" % len(state['coordinates']))\n print(\"\")\n\n print(\"Getting all airports within the state poly from the previous query.\")\n res = mh.get_feature_in_poly('airports',state['coordinates'])\n print(\"Found %d airports in the polygon.\" % len(res))\n print(\"\")\n \n # Getting polygon data for Belgium\n country = mh.get_country_poly('BEL')\n\n # This query chokes on countries with type: MultiPolygon, but works on Polygon. \n # I'm investigating ... (If we think about it, we probaly just need run one \n # query per polygon within the \"multi\" polygon. \n print(\"Getting all airports within the country poly from the previous query.\")\n res = mh.get_feature_in_poly('airports',country['coordinates'])\n print(\"Found %d airports in the polygon.\" % len(res))\n print(\"\")\n\n print(\"Getting the country that encompasses the point [44.2968750,24.6669864]\")\n res = mh.get_poly_by_point('countries',[44.2968750,24.6669864])\n print(\"That country is: %s\" % (res['properties']['NAME_LONG']))\n\n res = mh.get_all('airports')\n print(len(res))", "title": "" }, { "docid": "46b4f4cd425c58fd7bfda8e319126331", "score": "0.4915057", "text": "def main():\n with read_std_files(OUT_FILE) as (qrys_file, docs_file, out_file):\n doc_count, token_count, word_map = map_docs(docs_file)\n avg_doc_len = token_count / float(doc_count)\n for doc_id, doc_tokens in tokenize(docs_file):\n doc_len = len(doc_tokens)\n doc_dct = dictify(doc_tokens)\n for query_id, query_tokens in tokenize(qrys_file):\n query_dct = dictify(query_tokens)\n similarity = sim(query_dct, doc_dct, doc_len, doc_count, avg_doc_len, word_map, k=TUNE_K)\n log(out_file, query_id, doc_id, similarity)", "title": "" }, { "docid": "ea4cc63751d612d52e01a5a3ca30dfdf", "score": "0.4911027", "text": "def main():\r\n num_students = 23\r\n num_simulations = 10000\r\n count = count_matches(num_students, num_simulations)\r\n \r\n \r\n print('After %d simulations' % num_simulations)\r\n print('with %d students' % num_students)\r\n print('there were %d simulations with at least one match' % count)", "title": "" }, { "docid": "9b5bd0ae5ca1d2bb8e13abb78446d3e2", "score": "0.49066353", "text": "def evaluate(self):\n best_fitness_score = 0.0\n index = 0\n for i, member in enumerate(self.population):\n if member.fitness > best_fitness_score:\n index = i\n best_fitness_score = member.fitness\n\n self.best_phrase = self.population[index].get_phrase()\n if best_fitness_score == self.perfect_match_score:\n self.evolved = True", "title": "" }, { "docid": "5a198e885baf6770f92a528d9d91d28e", "score": "0.4905658", "text": "def test_matchingmode():\r\n l = Lsystem()\r\n modes = { 0: PatternModule.eSimple, 1: PatternModule.eWithStar , 2: PatternModule.eWithStarNValueConstraint }\r\n l.set(lcode_matchingmode)\r\n for key,val in modes.items(): \r\n l.context().options.setSelection('Module matching',key)\r\n l.context()['mode'] = val\r\n l.iterate()", "title": "" }, { "docid": "035c26b4d8f8be9e4fec9617caff62bd", "score": "0.48890564", "text": "def calcul_matching(mot): # mot va etre en uppercase dans cette méthode\r\n scores=[(r,bleu([r], mot.upper(), smoothing_function=smoothie)) for r in ref ]\r\n scores = sorted(scores, key=lambda e: e[1]) # il faut prendre en compte en cas d'égalite\r\n return (mot, scores[len(scores) - 1]) # le mot , le mot de ref possible avec la similarité entre les 2\r", "title": "" }, { "docid": "7e71c37299f2e9c7785888adab57ed24", "score": "0.48881307", "text": "def FindSimilarLigand():\n if DEBUG: print \"FindSimilarLigand> find similar request\"\n pass", "title": "" }, { "docid": "26207be7a7443e6f9b7dec7def7010bc", "score": "0.48867023", "text": "def parse_openmvg_file(input_openMVG_file_path, image_dp, image_fp_type, suppress_distortion_warnings, op):\n op.report({'INFO'}, 'parse_openmvg_file: ...')\n op.report({'INFO'},'input_openMVG_file_path: ' + input_openMVG_file_path)\n input_file = open(input_openMVG_file_path, 'r')\n json_data = json.load(input_file)\n\n cams = OpenMVGJSONFileHandler.parse_cameras(\n json_data, image_dp, image_fp_type, suppress_distortion_warnings, op)\n view_index_to_absolute_fp = {\n cam.view_index: cam.get_absolute_fp() for cam in cams}\n points = OpenMVGJSONFileHandler.parse_points(\n json_data, op, view_index_to_absolute_fp)\n op.report({'INFO'},'parse_openmvg_file: Done')\n return cams, points", "title": "" }, { "docid": "8c9a87d0d3e3f6da41653d7009effe6f", "score": "0.48857182", "text": "def test_match_window_ask(self):\n yield self.match_window_impl(True)", "title": "" }, { "docid": "7f423f1ce4d7f06dbb41c4cb837d588b", "score": "0.48856187", "text": "def similarity_matches_route():\n data = SimilarityThresholdSchema().load(request.json)\n query = data[\"query\"]\n target = data.get(\"target\", None)\n query_mols, query_skipped = convert_compound_request(\n query, field=\"fingerprints\" if \"fingerprints\" in query else \"compounds\"\n )\n query_arena = make_fingerprint_arena(\n query_mols,\n fingerprint_type=data[\"fingerprint_type\"],\n fingerprint_args=data[\"fingerprint_args\"],\n )\n del query_mols\n target_arena = None\n if target is not None:\n target_mols, target_skipped = convert_compound_request(\n target, field=\"fingerprints\" if \"fingerprints\" in target else \"compounds\"\n )\n target_arena = make_fingerprint_arena(\n target_mols,\n fingerprint_type=data[\"fingerprint_type\"],\n fingerprint_args=data[\"fingerprint_args\"],\n )\n del target_mols\n matches = find_similarity_matches(\n query_arena,\n target_arena,\n threshold=data[\"threshold\"],\n n_threads=data[\"n_threads\"],\n )\n out = {\"query\": [], \"target\": [], \"score\": []}\n for k, v in matches.items():\n out[\"query\"].extend([k] * len(v))\n out[\"target\"].extend(v.keys())\n out[\"score\"].extend(v.values())\n SimilarityResultSchema().validate(out)\n return out", "title": "" }, { "docid": "68d4a498e596358085eab32283b7ef0d", "score": "0.48806268", "text": "def openmvg_features(pth_sfm, pth_features, force=False):\n\n cmd = '{}/openMVG_main_ComputeFeatures -i {} -p HIGH -o {}'.format(PATH_OMVG, pth_sfm, pth_features)\n if force: cmd = cmd + \" -f 1\"\n os.system(cmd)", "title": "" }, { "docid": "49d43898946812f383f54dc4214ca385", "score": "0.48725748", "text": "def process_matches(items, colour):\n matches = filter_by_colour(items, colour)\n for item in matches:\n photograph(item)\n matches += 1\n return len(matches)", "title": "" }, { "docid": "144d03746b35bf601db70a61572b08d7", "score": "0.48689666", "text": "def _score_match(matchinfo: bytes, form, query) -> float:\n # I don't know why the query comes in quoted, but let's remove the quotes\n query = query.strip('\"')\n try:\n if form == query:\n return 20\n if form.lower() == query.lower():\n return 10\n\n # Decode matchinfo blob according to https://www.sqlite.org/fts3.html#matchinfo\n offset = 0\n num_cols = int.from_bytes(matchinfo[offset : offset + 4], sys.byteorder)\n offset += 4\n tokens = int.from_bytes(matchinfo[offset : offset + 4], sys.byteorder)\n offset += num_cols * 4\n matched_tokens = int.from_bytes(matchinfo[offset : offset + 4], sys.byteorder)\n\n # print(matchinfo, form, query, matched_tokens, tokens)\n return matched_tokens / tokens\n except Exception as e:\n print(e)\n raise", "title": "" }, { "docid": "397c76eac6eff06719748208dc489dc0", "score": "0.4865622", "text": "def searchMatch(self, leftNode, rightNode, featureList, matcher, featureMatches, colorIndex):\n\t\t#matcher = cv2.BFMatcher object\n\t\tprint \"asdfasdf\", leftNode, rightNode\n\t\tif leftNode == None and rightNode == None:\n\t\t\treturn featureMatches\n\t\tnodes = [leftNode, rightNode]\n\t\tfor a, feature in enumerate(featureList):\n\n\t\t\tisKnownObject = False\n\t\t\t#Check if left or right object matches\n\t\t\tfor node in nodes:\n\t\t\t\tprint \"nodes\", nodes\n\t\t\t\t#Check that node is Object instances\n\t\t\t\tif isinstance(node, Object):\n\t\t\t\t\tprint \"isinstance node\", node\n\t\t\t\t\t#To limit processing power needed only n newest occurrences of an object are kept in the feature list\n\t\t\t\t\tif len(node.features) > 5:\n\t\t\t\t\t\tnode.features = node.features[1:]\n\t\t\t\t\tfor featureObject in node.features:\n\n\t\t\t\t\t\tif featureObject.descriptors != None and node.features != None:\n\t\t\t\t\t\t\tfor feature in node.features:\n\t\t\t\t\t\t\t\t#Filter keypoints and matches\n\t\t\t\t\t\t\t\tmatches = matcher.knnMatch(featureObject.descriptors, feature.descriptors, k=2)\n\t\t\t\t\t\t\t\tpairs = filterMatches(featureObject.keypoints, feature.keypoints, matches)\n\t\t\t\t\t\t\t\t#Feature is declared matching if n matched pairs remain\n\t\t\t\t\t\t\t\tif len(pairs) >= 10:\n\t\t\t\t\t\t\t\t\t#add new features to existing object\n\t\t\t\t\t\t\t\t\tnode.features.append(feature)\n\t\t\t\t\t\t\t\t\t#Add match to found matches\n\t\t\t\t\t\t\t\t\tfeatureMatches.append(Match(node, feature, pairs))\n\n\t\t\t\t\t\t\t\t\tisSameObject = True\n\t\t\t\t\t\t\t\t\tisKnownObject = True\n\n\t\t\tif not isKnownObject:\n\t\t\t\t#If the feature is not a known object, add it as the first occurrence of a new object\n\t\t\t\tobject = Object(str(frameNumber) + str(a), colors[colorIndex % len(colors)], feature)\n\t\t\t\t#Insert the node into bst\n\t\t\t\tself.insertObject(object)\n\n\t\t\t\tcolorIndex += 1\n\n\t\treturn featureMatches", "title": "" }, { "docid": "00c3e469670943dd41d8e7de2cd64e2e", "score": "0.4860551", "text": "def multiprocess_ext_fdr_calculation_hvz(comparison_list):\n # Activate to track comparisons.\n #increment()\n\n total_phenotype_matches = 0\n total_phenotype_nonmatches = 0\n\n species_a_genotype_id = comparison_list[0]\n species_a_phenotypes = read_only_human_geno_pheno_hash[comparison_list[0]]\n genotype_a_phenotype_count = len(species_a_phenotypes)\n\n # Genotype for species B\n species_b_genotype_id = comparison_list[1]\n species_b_phenotypes = read_only_zebrafish_geno_pheno_hash[comparison_list[1]]\n phenotype_matches = 0\n phenotype_non_matches = 0\n\n\n genotype_b_phenotype_count = len(species_b_phenotypes)\n\n for k in species_a_phenotypes:\n # Orthologs for species A\n species_a_phenotype = k\n for l in species_b_phenotypes:\n # Orthologs for species B\n species_b_phenotype = l\n\n ab_combo = species_a_phenotype+'_'+species_b_phenotype\n ba_combo = species_b_phenotype+'_'+species_a_phenotype\n if ab_combo in read_only_hvz_phenologs or ba_combo in read_only_hvz_phenologs:\n #print('species a ortholog:'+species_a_ortholog+' matches species b ortholog:'+species_b_ortholog)\n phenotype_matches += 1\n #print(species_a_ortholog+' == '+species_b_ortholog)\n total_phenotype_matches += 1\n else:\n #print('species a ortholog:'+species_a_ortholog+' does not match species b ortholog:'+species_b_ortholog)\n phenotype_non_matches += 1\n total_phenotype_nonmatches += 1\n\n if phenotype_matches > 0:\n m = float(genotype_b_phenotype_count)\n n = float(genotype_a_phenotype_count)\n N = float(len(read_only_hvz_phenologs))\n c = float(phenotype_matches)\n prb = float(hypergeom.pmf(c, N, m, n))\n\n return prb\n else:\n return", "title": "" }, { "docid": "cbcbe905a3f89a819c333392cb3bb08f", "score": "0.48574096", "text": "def manualMatching(self):\n\n best_filtered = pd.read_csv(self.best_filtered, index_col=None, dtype=self.df_dtypes)\n best_filtered['Manual_Match_N'] = ''\n best_filtered['Manual_Match_NA'] = ''\n\n if self.in_args.terminal_matching:\n # Iterate over the file, shuffled with sample, as best matches otherwise would show first:\n for index, row in best_filtered.sample(frac=1).iterrows():\n logging.info(\"\\nsource name: \" + str(row.src_name_adj))\n logging.info(\"\\nRegistry name: \" + str(row.reg_name_adj))\n logging.info(\"\\nLevenshtein distance: \" + str(row.leven_dist_N))\n match_options = [\"y\", \"n\", \"u\", \"f\"]\n match = input(\"\\nMatch? Yes, No, Unsure, Finished (Y/N/U/F):\")\n while match.lower() not in match_options:\n match = input(\"\\nMatch? Yes, No, Unsure, Finished (Y/N/U/F):\")\n\n if str(match).lower() != \"f\":\n best_filtered.at[index, 'Manual_Match_N'] = str(match).capitalize()\n continue\n else:\n break\n\n best_filtered.sort_values(by=['Cluster_ID'], inplace=True, axis=0, ascending=True)\n\n logging.info(\"Saving...\")\n best_filtered.to_csv(self.unverified_file, index=False, columns=self.dbUpload_cols)\n else:\n best_filtered.to_csv(self.unverified_file, index=False, columns=self.dbUpload_cols)", "title": "" }, { "docid": "28fc6bc303a462cfab7c6aebfe6ec6ce", "score": "0.48571292", "text": "def test_match_finds_best_matches(searcher: FuzzySearcher, nlp: Language) -> None:\n doc = nlp(\"chiken from Popeyes is better than chken from Chick-fil-A\")\n query = nlp(\"chicken\")\n assert searcher.match(doc, query, ignore_case=False) == [\n (0, 1, 92),\n (6, 7, 83),\n ]", "title": "" }, { "docid": "ad32a298d00a6f7dff23a0e50e7fa7d1", "score": "0.4853434", "text": "def cvMatchShapes(*args):\n return _cv.cvMatchShapes(*args)", "title": "" }, { "docid": "8f727befe76872bfbc06fba219c72354", "score": "0.48520866", "text": "def test_match(self):\n self.assertEqual([self.device], self.nd.match(nodename=self.nodename))\n self.assertEqual(self.nd.all(), self.nd.match(vendor='juniper'))\n self.assertEqual([], self.nd.match(vendor='cisco'))", "title": "" }, { "docid": "1aba6e8c78b5db2097fa0012c2b195a2", "score": "0.4850106", "text": "def matching(H,E,F,seqs,d,go, ge ,max_number_of_matching):\n seq1 = seqs[0]\n seq2 = seqs[1]\n H_arg_maxs = np.argwhere(H == np.max(H))\n array=[([\"\" for seq in seqs] ,tuple(arg_max) ) for arg_max in H_arg_maxs]\n ended_matching_array=[]\n while(array):\n next_array=[]\n for record in array:\n index = record[1]\n matching = record[0]\n if(H[index] == 0):\n ended_matching_array.append( ([matching[i] for i in range(len(seqs))],index ) )\n else:\n x = index[0]\n y = index[1]\n if(H[x,y] == H[x-1,y-1] + d[(seq1[x-1],seq2[y-1])] ):\n symbols = (seq1[x-1] , seq2[y-1])\n next_matching = [symbols[i] + matching[i] for i in range(len(seqs))]\n next_array.append((next_matching,(x-1,y-1)))\n if(H[x,y] == E[x,y]):\n codition = True\n symbols = (\"\",\"\")\n while(codition):\n symbols = (seq1[x-1] + symbols[0], \"_\" + symbols[1])\n if(H[x-1][y] + go + ge == E[x,y]):\n next_matching = [symbols[i] + matching[i] for i in range(len(seqs))]\n next_array.append((next_matching,(x - 1,y)))\n codition = (E[x,y] == E[x-1][y] + ge)\n x = x - 1 \n pass\n if(H[x,y] == F[x,y]):\n codition = True\n symbols = (\"\",\"\")\n while(codition):\n symbols = (\"_\" + symbols[0], seq2[y - 1] + symbols[1])\n if(H[x][y - 1] + go + ge == F[x ,y]):\n next_matching = [symbols[i] + matching[i] for i in range(len(seqs))]\n next_array.append((next_matching,(x ,y - 1)))\n codition = (F[x,y] == F[x][y - 1] + ge)\n y = y - 1\n array = next_array[0:max_number_of_matching - len(ended_matching_array)]\n return ended_matching_array", "title": "" }, { "docid": "7d45f170e3106bf6a2f3e10248a0bec6", "score": "0.48492488", "text": "def get_matches(self, feat1, feat2, cv_kpts1, cv_kpts2, dist_type, ransac=True,\n ratio=None, cross_check=True, info=''):\n matcher = cv2.BFMatcher(dist_type) #cv2.NORM_L2\n good_matches = []\n mask = None\n\n # start = time.time()\n if(cross_check):\n init_matches1 = matcher.knnMatch(feat1, feat2, k=2)\n init_matches2 = matcher.knnMatch(feat2, feat1, k=2)\n for i in range(len(init_matches1)):\n # cross check\n if cross_check and init_matches2[init_matches1[i][0].trainIdx][0].trainIdx == i:\n # ratio test\n if ratio is not None and init_matches1[i][0].distance <= ratio * init_matches1[i][1].distance:\n good_matches.append(init_matches1[i][0])\n elif ratio is None:\n good_matches.append(init_matches1[i][0])\n elif not cross_check:\n good_matches.append(init_matches1[i][0])\n else:\n raw_matches = matcher.knnMatch(feat1, feat2, k=2)\n for m, n in raw_matches:\n if m.distance < ratio * n.distance:\n good_matches.append(m)\n\n if(ransac):\n good_kpts1 = np.array([cv_kpts1[m.queryIdx].pt for m in good_matches])\n good_kpts2 = np.array([cv_kpts2[m.trainIdx].pt for m in good_matches])\n _, mask = cv2.findFundamentalMat(good_kpts1, good_kpts2, cv2.RANSAC, 4.0, confidence=0.999)\n\n # end = time.time()\n # print('Time cost in feature match ', end - start)\n\n n_inlier = np.count_nonzero(mask)\n # print(info, 'n_putative', len(good_matches), 'n_inlier', n_inlier)\n\n return good_matches, mask", "title": "" }, { "docid": "8b956a37f27743d792c989a17b722e17", "score": "0.48489034", "text": "def cpv_matches (self, cpv, criterion):\n\n raise NotImplementedError", "title": "" }, { "docid": "df37858a49d2d0e8bc0b3021aa58e21b", "score": "0.48435584", "text": "def match(image1,image2):\r\n im1, keypoints1, descriptors1 = ReadKeys(image1)\r\n im2, keypoints2, descriptors2 = ReadKeys(image2)\r\n \r\n i = 0 # this is our counter into the first image keyframe\r\n matched_pairs = [] # our match list \r\n\r\n for d1_row in descriptors1:\r\n # go through every keypoint descriptor\r\n best_list = []\r\n # save all the computed angles of dot prods here\r\n for d2_row in descriptors2:\r\n # go through every keypoint in the second image for this current keypoint and\r\n best_list.append(math.acos(np.dot(d1_row,d2_row)))\r\n # compute and save the angles of the dot products\r\n sorted_best_list = sorted(best_list)\r\n # after we're done going through them all, sort it to retrieve the top 2\r\n if sorted_best_list[0]/sorted_best_list[1]<0.6 :\r\n # 0.6 was used for part 3 as a threshold against scene/book\r\n # check that the ratio of the smallest to the next largest is less than a threshold\r\n keypoint2_index = best_list.index(sorted_best_list[0])\r\n # and then retrieve the keypoint\r\n matched_pairs.append([keypoints1[i],keypoints2[keypoint2_index]])\r\n # save it into our matched_pairs to use to draw later\r\n i += 1\r\n # increment the counter for indexing\r\n\r\n # RANSAC attempt begins here\r\n RANSAC_matches = []\r\n\r\n for i in range(10):\r\n RANSAC_subset = []\r\n # clear our subset\r\n\r\n # do the random selection 10 times\r\n # pick a random match from matched_pairs\r\n random_match = matched_pairs[np.random.randint(0,len(matched_pairs),size=1)]\r\n\r\n # calculate the change of orientation for this random match picked\r\n random_match_coo = abs(np.degrees(random_match[0][3] - random_match[1][3]))\r\n # calculate the change of scale for this random match picked\r\n random_match_cos = random_match[0][2]/random_match[1][2]\r\n\r\n # go through all the match pairs\r\n for all_other_matches in matched_pairs:\r\n\r\n # calculate change of orientation, scale for these match pairs\r\n all_other_coo = abs(np.degrees(all_other_matches[0][3] - all_other_matches[1][3]))\r\n all_other_cos = all_other_matches[0][2]/all_other_matches[1][2]\r\n\r\n # if the change of orientation is within +/- a threshold\r\n # and the change of scale is within 1.5 times and 0.5 times the random_match\r\n if all_other_coo <= random_match_coo+30 and all_other_coo >= random_match_coo-30:\r\n if all_other_cos >= 0.5*random_match_cos and all_other_cos <= 1.5*random_match_cos:\r\n # add it into the subset\r\n RANSAC_subset.append(all_other_matches)\r\n\r\n # if the subset is larger than our current match set\r\n if len(RANSAC_subset) > len(RANSAC_matches):\r\n RANSAC_matches = RANSAC_subset\r\n # update our matches\r\n\r\n # update the match pairs with our largeset match\r\n matched_pairs = RANSAC_matches\r\n #\r\n # END OF SECTION OF CODE TO REPLACE\r\n #\r\n im3 = DisplayMatches(im1, im2, matched_pairs)\r\n return im3", "title": "" }, { "docid": "68a5767220611cb25accd8ce8bd10b57", "score": "0.48434007", "text": "def simulate_match(params, home_index, away_index):\n\n home_goals = np.random.poisson( lam = (params[0,home_index] * params[3,away_index]) )\n away_goals = np.random.poisson( lam = (params[1,home_index] * params[2,away_index]) )\n return int(home_goals), int(away_goals)", "title": "" }, { "docid": "15247eefbee7a7fe13ed421426df63a0", "score": "0.4842634", "text": "def test_with_simple_matches(self):\n repository = self.create_repository(tool_name='Git')\n review_request = self.create_review_request(repository=repository)\n\n diffset = self.create_diffset(review_request=review_request,\n revision=1)\n\n filediff1 = self.create_filediff(\n diffset=diffset,\n source_file='foo.txt',\n source_revision='123',\n dest_file='foo.txt',\n diff=b'diff1')\n\n filediff2 = self.create_filediff(\n diffset=diffset,\n source_file='foo2.txt',\n source_revision='123',\n dest_file='foo2.txt',\n diff=b'diff2')\n\n interdiffset = self.create_diffset(review_request=review_request,\n revision=2)\n\n interfilediff1 = self.create_filediff(\n diffset=interdiffset,\n source_file='foo.txt',\n source_revision='123',\n dest_file='foo.txt',\n diff=b'interdiff1')\n\n interfilediff2 = self.create_filediff(\n diffset=interdiffset,\n source_file='foo2.txt',\n source_revision='123',\n dest_file='foo2.txt',\n diff=b'interdiff2')\n\n matched_files = get_matched_interdiff_files(\n tool=repository.get_scmtool(),\n filediffs=[filediff1, filediff2],\n interfilediffs=[interfilediff1, interfilediff2])\n\n self.assertEqual(\n list(matched_files),\n [\n (filediff1, interfilediff1),\n (filediff2, interfilediff2),\n ])", "title": "" } ]
75bc406279890dfbecf82c2784606de1
Loads all data, first it creates list of data points and then populates each data point with data by loading the contour and dicom files.
[ { "docid": "d1b8031ee16956ff92b461f44f8f4605", "score": "0.6711694", "text": "def load_all_data(dicom_dir, contour_dir, link_file):\n data_points = create_data_points(dicom_dir, contour_dir, link_file)\n return get_image_and_masks(data_points)", "title": "" } ]
[ { "docid": "05efec025234f1f56eeac96532b96fa5", "score": "0.73711884", "text": "def load_dataset(self, contour): \n self.dataset = []\n for patient_id in self.link_dict.keys():\n self.dataset += self.get_patient_data(patient_id, contour)", "title": "" }, { "docid": "e8ebb063f524c146e945c3c6c4ceea62", "score": "0.70130897", "text": "def loadData(self):\n for entry in self._dataToLoad:\n self._loadFile( entry, self.data, False)", "title": "" }, { "docid": "087dbd3f3dda6a974036acd1b4fa6a24", "score": "0.66223025", "text": "def loadData(self):\n for ptype,UIname,dec_factor in list(zip(self.ptypes,self.UInames,self.dec_factors)):\n print(\"Loading ptype %s\"%ptype)\n snapdict = openSnapshot(\n self.snapdir,\n self.snapnum,\n int(ptype[-1]), ## ptype should be PartType4,etc...\n keys_to_extract = ['Coordinates']+self.returnKeys\n )\n\n tracked_names,tracked_arrays,tracked_filter_flags,tracked_colormap_flags = [],[],[],[]\n for returnKey,filterFlag,colormapFlag,doMag,doLog in list(zip(\n self.returnKeys,self.filterFlags,self.colormapFlags,self.doMags,self.doLogs)):\n if returnKey in snapdict:\n arr = snapdict[returnKey]\n if doLog:\n arr = np.log10(arr)\n returnKey = 'log10%s'%returnKey\n elif doMag:\n arr = np.linalg.norm(arr,axis=1)\n returnKey = 'mag%s'%returnKey\n\n tracked_names += [returnKey]\n tracked_filter_flags += [filterFlag]\n tracked_colormap_flags += [colormapFlag]\n tracked_arrays+= [arr]\n \n self.particleGroups += [ParticleGroup(\n UIname,\n snapdict['Coordinates'],\n tracked_names = tracked_names,\n tracked_arrays = tracked_arrays,\n decimation_factor = dec_factor,\n tracked_filter_flags = tracked_filter_flags,\n tracked_colormap_flags = tracked_colormap_flags\n )]\n\n ## save the filenames that were opened (so you can re-open them yourself in that order)\n self.particleGroups[-1].filenames_opened = snapdict['fnames']\n\n ## add this particle group to the reader's options file\n self.options.addToOptions(self.particleGroups[-1])\n\n return self.particleGroups", "title": "" }, { "docid": "071745d074b8ddc1f39b55c08be53c80", "score": "0.6580832", "text": "def load_data(self):\n print \"Combining all data\"\n\n # Set up the output WCS object\n output_wcs = WCS(naxis=2)\n output_wcs.wcs.crpix = [self.output_naxis1 / 2.,\n self.output_naxis2 / 2.]\n output_wcs.wcs.crval = [self.output_center_ra, self.output_center_dec]\n output_wcs.wcs.cdelt = np.array([-self.output_pixel_scale,\n self.output_pixel_scale]) / 3600.\n output_wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n self.output_wcs = output_wcs\n\n # Load all of the data.\n # Note that we convert all of our ra and dec values into x and y\n # coordinates in arcseconds relative to (0., 0.). We flip the x-axis so\n # that our coordinates increase from left to right (or East to West).\n # TODO: We use double the memory that we need to currently. This should\n # be optimized to query all of the files for how many points they have,\n # and then load them one by one into preallocated arrays.\n self.ref_vals = np.hstack([i.vals for i in self.ref_images])\n self.ref_errs = np.hstack([i.errs for i in self.ref_images])\n self.ref_psfs = np.hstack([i.psfs for i in self.ref_images])\n self.ref_xs = np.hstack([i.ras for i in self.ref_images])\n self.ref_xs *= -np.cos(self.output_center_dec * np.pi / 180.) * 3600.\n self.ref_ys = np.hstack([i.decs for i in self.ref_images])\n self.ref_ys *= 3600.\n\n self.new_vals = np.hstack([i.vals for i in self.new_images])\n self.new_errs = np.hstack([i.errs for i in self.new_images])\n self.new_psfs = np.hstack([i.psfs for i in self.new_images])\n self.new_xs = np.hstack([i.ras for i in self.new_images])\n self.new_xs *= -np.cos(self.output_center_dec * np.pi / 180.) * 3600.\n self.new_ys = np.hstack([i.decs for i in self.new_images])\n self.new_ys *= 3600.\n\n # Generate KDTrees.\n # TODO: Try to use a single KDTree for both? Profile this, and if the\n # KDTree is limiting then that might be a good idea.\n print \"Generating KDTrees\"\n self.ref_kdtree = cKDTree(zip(self.ref_xs, self.ref_ys))\n self.new_kdtree = cKDTree(zip(self.new_xs, self.new_ys))\n\n # Load the bright objects\n self.bright_obj_xs = (-np.cos(self.output_center_dec * np.pi / 180.) *\n 3600. * self.bright_obj_ras)\n self.bright_obj_ys = self.bright_obj_decs * 3600.\n bright_obj_fluxes = [self._calc_value(x, y, radius=0.3, is_star=True)\n for x, y in zip(self.bright_obj_xs,\n self.bright_obj_ys)]\n self.bright_obj_fluxes = np.array(bright_obj_fluxes)\n\n print \"Done loading data\"", "title": "" }, { "docid": "927ac16f42cfa6fb9e640d2776802daa", "score": "0.6545536", "text": "def loadData(self):\n self.getXrfDetector()\n self.getXrdDetector()\n self.getXrdDetectorMask()\n self.getBeamMonitor()\n self.getScanGeometry()\n with h5py.File(self.dataPath, 'r') as h5In:\n for scan in self.scans:\n self.dataUrls.append(h5In.get('%s/measurement/%s' % (scan, self.xrddetector), getlink=True).path)", "title": "" }, { "docid": "06e06c108fdd6b805f7289d95958192a", "score": "0.6476002", "text": "def load_all_data(self):\n images = []\n measurements = []\n lines = self.__read_csv__(self.csv_file_path)\n for line in lines:\n file_name = line[0][0].split('/')[-1]\n imageFolder = line[0][0].split('/')[-2]\n csv_split_path = self.csv_file_path.split('/')\n # path format '../data/IMG/filename'\n center_image_path = csv_split_path[0] + '/' + csv_split_path[1] + '/' + imageFolder + '/' + file_name\n image = cv2.imread(center_image_path)\n images.append(image)\n measurement = float(line[3])\n measurements.append(measurement)\n\n X_train = np.array(images, dtype=float)\n y_train = np.array(measurements, dtype=float)\n\n return X_train, y_train", "title": "" }, { "docid": "15f210a9643fdd72cef75f1d784f1ee3", "score": "0.6451589", "text": "def load_data(self):\n try:\n import trimesh\n except ModuleNotFoundError:\n print(\"module trimesh not found. install using 'pip install trimesh' command\")\n\n if self.datadir is None:\n\n self.__load_modelnet()\n\n else:\n\n self.__load_custom_datafiles(self.datadir)\n\n data_voxels = []\n\n for file in tqdm(self.data_files, desc=\"rendering data\"):\n\n mesh = trimesh.load(file)\n voxel = mesh.voxelized(0.5)\n (x, y, z) = map(float, voxel.shape)\n zoom_fac = (self.side_length / x, self.side_length / y, self.side_length / z)\n voxel = ndimage.zoom(voxel.matrix, zoom_fac, order=1, mode=\"nearest\")\n data_voxels.append(voxel)\n\n data_voxels = np.array(data_voxels)\n data_voxels = data_voxels.reshape(\n (-1, self.side_length, self.side_length, self.side_length, 1)\n )\n return data_voxels", "title": "" }, { "docid": "1d2b2959bab306a7bab55c7bbf760f57", "score": "0.64324296", "text": "def create_data_points(dicom_dir, contour_dir, link_file):\n if not os.path.isdir(dicom_dir):\n logger.error('Dicom directory not found at: ' + dicom_dir)\n return\n\n if not os.path.isdir(contour_dir):\n logger.error('Contour directory not found at: ' + contour_dir)\n return\n\n if not os.path.isfile(link_file):\n logger.error('Link file not found at: ' + link_file)\n return\n\n result = []\n\n link_data = pd.read_csv(link_file)\n for index, row in link_data.iterrows():\n\n current_dicom_dir = os.path.join(dicom_dir, row['patient_id'])\n if not os.path.isdir(current_dicom_dir):\n logger.info('Skipping the data point, No dicom dir found at:' + current_dicom_dir)\n\n current_contour_dir = os.path.join(os.path.join(contour_dir, row['original_id']), 'i-contours')\n # print single_contour_dir\n if not os.path.isdir(current_contour_dir):\n logger.info('Skipping the data point, No contour dir found at: ' + current_contour_dir)\n\n for file_name in os.listdir(current_dicom_dir):\n if not file_name.endswith('.dcm'):\n continue\n\n dcm_file_number = file_name.replace('.dcm', '')\n contour_file_name = 'IM-0001-' + dcm_file_number.zfill(4) + '-icontour-manual.txt'\n\n full_dicom_file_path = os.path.join(current_dicom_dir, file_name)\n full_contour_file_path = os.path.join(current_contour_dir, contour_file_name)\n\n if not os.path.isfile(full_contour_file_path):\n logger.info('no contour file found against: ' + full_dicom_file_path)\n continue\n\n result.append(DataPoint(full_dicom_file_path, full_contour_file_path))\n\n return result", "title": "" }, { "docid": "d3912007c4d6599c6abcc0ca015a0c8f", "score": "0.64228433", "text": "def run(self):\n\n self.set_extents()\n self.load_basin_shapefiles()\n self.load_dem()\n self.load_vegetation()\n self.create_netcdf()", "title": "" }, { "docid": "293e5c67f88a124de2fc0c833223c501", "score": "0.63532233", "text": "def load(self):\n self.data = GEX.parse(self.path).data_df\n\n ##Dealing with cmapPy data type instability:\n rows = list(map(lambda x: x[2:-1], list(self.data.index)))\n self.data.index = rows\n columns = list(map(lambda x: x[2:-1], list(self.data)))\n self.data.columns = columns", "title": "" }, { "docid": "e64994bdf00f0946aaac87dbcbb7a095", "score": "0.62415665", "text": "def getData(self, roi_w, roi_h):\n file_manager = FileHandler()\n files_and_points = file_manager.pointsDictionary() # complete list of images and points\n\n first_img_name = next(iter(files_and_points.keys())) # first image name\n\n # getting the current parameters of the first image in the dictionary red from the path stored.\n # lines, samples, bands = get_parameters(files_and_points[first_img_name][0][4])\n lines, samples, bands = get_parameters(file_manager.get_img_path(first_img_name))\n\n cnt_imgs = -1\n\n if (roi_w is 0) or (roi_h is 0):\n full_image = True\n n_full_imgs = len(files_and_points)\n dataset = np.zeros([n_full_imgs, lines, samples, bands])\n dataset_ann = np.zeros([n_full_imgs, 1])\n printProgressBar(0, n_full_imgs, prefix='Progress:', suffix='Complete', length=50)\n else:\n full_image = False\n n_points = len(FileHandler.listSelectedPoints())\n dataset = np.zeros([n_points, roi_w, roi_h, bands])\n dataset_ann = np.zeros([n_points, 1])\n\n # iteration over each image which has associated N selected points in data\n for img_name, data_points in files_and_points.items():\n # print(img_name, \" \", data)\n\n # checking specific parameters of the current image\n params = get_parameters(file_manager.get_img_path(img_name))\n if params is not None:\n # retrieving data from parameters\n lines, samples, bands = params\n\n img_layers = np.zeros([lines, samples, bands])\n\n # reading all the wavelengths\n for i in range(0, bands):\n file_path_complete = file_manager.get_img_path(img_name) # with datacube extension\n img_layers[:, :, i] = ReadBilFile(file_path_complete, i + 1)\n\n if full_image:\n cnt_imgs = cnt_imgs + 1\n dataset[cnt_imgs] = img_layers\n for point in data_points:\n # checking if there is at least one ROI with \"Anomaly\" annotation\n dataset_ann[cnt_imgs] = True if point[2] == p_anomaly else None\n\n else:\n for point in data_points:\n cnt_imgs = cnt_imgs + 1\n # lines : number of row of the image\n # samples: number of columns of the image\n x1, y1, x2, y2 = DatasetGenerator.check_limits(int(point[0]), int(point[1]),\n roi_w, roi_h, samples, lines)\n dataset[cnt_imgs] = img_layers[y1:y2, x1:x2, :]\n dataset_ann[cnt_imgs] = True if point[2] == p_anomaly else False\n\n # Progress report\n if full_image:\n printProgressBar(cnt_imgs + 1, n_full_imgs, prefix='Progress:', suffix='Complete', length=50)\n # print(cnt_imgs)\n\n # print(dataset.shape)\n # print(dataset.max())\n\n with h5py.File(local_path_out + 'datacube_dataset.hdf5', 'w') as f:\n # datacube_images optimized for 2 bytes integer\n f.create_dataset('datacube_images', data=dataset, dtype='i2', compression=\"gzip\", compression_opts=9)\n f.create_dataset('datacube_annotations', data=dataset_ann, dtype='i1', compression=\"gzip\",\n compression_opts=9)\n\n # TODO raise an exception if there is any problem with the file or the number of bands read\n\n # return img_layers", "title": "" }, { "docid": "d7313cf85a315d125a2dd59570c290a1", "score": "0.62341446", "text": "def setData():\n global x_data\n global y_data\n global fCoefficients\n global fPrimeCoefficients\n x_path = join('..','Data','CrimeXData.bin')\n y_path = join('..','Data','CrimeYData.bin')\n c_path = join('..','Data','Coefficients.bin')\n cp_path = join('..','Data','FPrimeCoefficients.bin')\n with open(c_path,'rb') as file:\n fCoefficients = pickle.load(file)\n with open(cp_path,'rb') as file:\n fPrimeCoefficients = pickle.load(file)\n with open(x_path,'rb') as file:\n x_data = pickle.load(file)\n with open(y_path,'rb') as file:\n y_data = pickle.load(file)", "title": "" }, { "docid": "b94873e5a20516992d978f08d05a7277", "score": "0.6206832", "text": "def load_all_calib_data():\n\n return\n\n #DANGEROUS below here!!!\n calib_directories = glob.glob('/Volumes/HD3/strat_calib_data/*')\n\n for d in calib_directories:\n calib_id = int(d.split('/')[-1])\n filename = os.path.join(d, 'calib.csv')\n if os.path.exists(filename):\n print \"Importing %d/calib.csv\" % calib_id\n load_calib_data(filename, calib_id)", "title": "" }, { "docid": "eb1d1acdad0a8622852d85981bf3ca7a", "score": "0.61924255", "text": "def importDataset(self, verbose = False):\n # Get a list of the various txt files\n listOfPositionFiles = [] # actual cartesian position\n listOfForceFiles = [] # sensor forces\n listOfInputFiles = [] # f_ref \"forces\"\n\n full_positions = np.empty((1,3))\n full_forces = np.empty((1,3))\n full_inputs = np.empty((1,3))\n\n for (dirpath, dirnames, filenames) in os.walk(self.base_path):\n dirnames.sort()\n\n for txtname in fnmatch.filter(filenames, 'positions.txt'):\n listOfPositionFiles.append(os.path.join(dirpath, txtname))\n\n for txtname in fnmatch.filter(filenames, 'forces.txt'):\n listOfForceFiles.append(os.path.join(dirpath, txtname))\n\n for txtname in fnmatch.filter(filenames, 'f_ref.txt'):\n listOfInputFiles.append(os.path.join(dirpath, txtname))\n\n print(\"Retrieved %d position files, %d force files and %d input files from base directory '%s' \"\n %(len(listOfPositionFiles), len(listOfForceFiles), len(listOfInputFiles), self.base_path))\n\n # Read the appropriate columns and parse them into np arrays\n for file_idx, filename in enumerate(listOfForceFiles):\n time, seq, f_x, f_y, f_z, t_x, t_y, t_z = np.loadtxt(filename, usecols = (0, 1, 4, 5, 6, 7, 8, 9), skiprows = 1, delimiter = ',', unpack = True)\n f_x = f_x.reshape(-1,1)\n f_y = f_y.reshape(-1,1)\n f_z = f_z.reshape(-1,1)\n forces = np.concatenate((f_x, f_y, f_z), axis = 1)\n self.force_data.update({file_idx : forces})\n full_forces = np.append(full_forces, forces, axis = 0)\n\n full_forces = full_forces[1:,] # remove the first \"trash\" line that was created with np.empty\n\n for file_idx, filename in enumerate(listOfPositionFiles):\n time, seq, p_x, p_y, p_z = np.loadtxt(filename, usecols = (0, 1, 4, 5, 6), skiprows = 1, delimiter = ',', unpack = True)\n p_x = p_x.reshape(-1,1)\n p_y = p_y.reshape(-1,1)\n p_z = p_z.reshape(-1,1)\n positions = np.concatenate((p_x, p_y, p_z), axis = 1)\n self.position_data.update({file_idx : positions})\n full_positions = np.append(full_positions, positions, axis = 0)\n\n full_positions = full_positions[1:,] # remove the first \"trash\" line that was created with np.empty\n\n for file_idx, filename in enumerate(listOfInputFiles):\n time, seq, f_ref_x, f_ref_y, f_ref_z = np.loadtxt(filename, usecols = (0, 1, 4, 5, 6), skiprows = 1, delimiter = ',', unpack = True)\n f_ref_x = f_ref_x.reshape(-1,1)\n f_ref_y = f_ref_y.reshape(-1,1)\n f_ref_z = f_ref_z.reshape(-1,1)\n inputs = np.concatenate((f_ref_x, f_ref_y, f_ref_z), axis = 1)\n self.input_data.update({file_idx : inputs})\n full_inputs = np.append(full_inputs, inputs, axis = 0)\n\n full_inputs = full_inputs[1:,] # remove the first \"trash\" line that was created with np.empty\n\n faulty_files = 0\n for i in range(len(self.position_data)):\n if len(self.position_data[i]) == len(self.force_data[i]) and len(self.force_data[i]) == len(self.input_data[i]):\n if verbose:\n print(i, self.position_data[i].shape, self.force_data[i].shape, self.input_data[i].shape)\n else:\n faulty_files += 1\n print(\"In dataset %s there is a size mismatch. Pos : %d F_s %d F_ref %d \" %(listOfPositionFiles[i], self.position_data[i].shape[0], self.force_data[i].shape[0], self.input_data[i].shape[0]))\n print(\"%d files need to be fixed\" %faulty_files)\n full = np.concatenate((full_positions, full_forces, full_inputs), axis = 1)", "title": "" }, { "docid": "7346c908d80bf8039426e209159cf460", "score": "0.61921275", "text": "def load_data(self):\n\t data_samples, labels, htsoft = extract_imagedata(normalization=0)\n\t aug_data = data_samples.reshape((data_samples.shape[0], data_samples.shape[1], data_samples.shape[2], 1))\n\t total_batches = np.floor(data_samples.shape[0]/self.config.batch_size)\n\t num_train = np.floor(self.config.frac_train*total_batches) * self.config.batch_size\n\t num_train = int(num_train)\n\t self.X_train = aug_data[:num_train, :, :, :]\n\t self.Y_train = labels[:num_train, :]\n\t self.htsoft_train = htsoft[:, :num_train]\n\t self.X_test = aug_data[num_train:int(total_batches*self.config.batch_size), :, :, :]\n\t self.htsoft_test = htsoft[:, num_train:int(total_batches*self.config.batch_size)]\n\t self.Y_test = labels[num_train:int(total_batches*self.config.batch_size) , :]", "title": "" }, { "docid": "497da0735cf8d790b0965a3b96e4b397", "score": "0.61912876", "text": "def _load_data(self, data):\n if not isinstance(data, list):\n self.z = [data.T] # creates a list of length 1\n else:\n n = len(data)\n for snip in range(n):\n if snip == 0:\n self.z = [data[snip].T]\n else:\n self.z.append(data[snip].T) # time is a function of the columns, internally\n\n self.N = self.z[0].shape[0] # will be 8, the number of features", "title": "" }, { "docid": "2432e8a2f8405d4035d7b809c7e4043f", "score": "0.6178262", "text": "def load_data(self) -> None:", "title": "" }, { "docid": "2ca6a59ceac654e02f312b1642565d3a", "score": "0.6176496", "text": "def load_dataset(self, paths, files):\n patch_size = (self.patch_size, self.patch_size, 1)\n for file in files:\n # Load images in (H, W, C) format.\n img_s = np.expand_dims(io.imread(os.path.join(paths['data'], file)), -1)\n img_c = np.expand_dims(io.imread(os.path.join(paths['label'], file)), -1)\n\n # Convert to range [0, 1] in float32\n img_s = (img_s / 255.).astype('float32')\n img_c = (img_c / 255.).astype('float32')\n\n if self.augment_data:\n img_c, img_s = data_augmentation(img_c), data_augmentation(img_s)\n else:\n img_c, img_s = [img_c], [img_s]\n\n for c, s in zip(img_c, img_s):\n c_patches = create_patches(c, patch_size, step=self.patch_size)\n s_patches = create_patches(s, patch_size, step=self.patch_size)\n\n for data, label in zip(s_patches, c_patches):\n data = torch.from_numpy(data.transpose((2, 0, 1)))\n label = torch.from_numpy(label.transpose((2, 0, 1)))\n self.dataset['data'].append(data)\n self.dataset['label'].append(label)", "title": "" }, { "docid": "f8f87b9ed8fe95f15734b58ae277bda3", "score": "0.6173269", "text": "def _load_data(self):\n load_data = self.load_reader.vals4slice(de_load, self.start, data_max_date, step=1)\n date_range = pd.date_range(self.start, data_max_date, freq='1H')\n self.load_data = pd.DataFrame(data={'load': load_data}, index=date_range)\n self.ex_data = pd.DataFrame(data=[], index=date_range)\n \n if self.exog is None:\n return\n for var_name in self.exog:\n self._get_exog(var_name)", "title": "" }, { "docid": "9397379b816d245aa27748f2de562a12", "score": "0.6172759", "text": "def collect_data(self):\n if self.debug:\n print \"Collecting point cloud data\"\n try:\n os.mkdir(self.base_dir)\n os.chdir(self.base_dir)\n except OSError:\n os.chdir(self.base_dir)\n os.system(\"rm *.pcd\")\n os.system(\"kill $(pgrep XnSensorServer)\") #this is needed because the process from previous records does not die\n command = \"pcl_openni_recorder\"\n self.run_command(command,35)\n os.chdir(\"../\")", "title": "" }, { "docid": "c8b7358b58d12fe79a0c6a7c2d1d9fdf", "score": "0.61490434", "text": "def load_data(self):\n for year in self.years:\n self.__load_data(year)", "title": "" }, { "docid": "3f758d195a3596f5b040e73ffd03923a", "score": "0.6148267", "text": "def load_all_pose_data(self):\n for scene_name in self.scene_generator():\n self.get_pose_data(scene_name)", "title": "" }, { "docid": "409309487d17755b452786a5f25b1310", "score": "0.61253846", "text": "def loadChirpAndDispersion(self):\n filename = fnmatch.filter(os.listdir(self.directory), '*.dat')\n fullname = os.path.join(self.directory, filename[0])\n self.inputFilenames['chirp'] = fullname\n\n numPoints = int(os.path.getsize(fullname) / 4 / 4)\n mmap = np.fromfile(fullname, dtype=np.int32, count=numPoints, offset=0)\n weight = np.fromfile(fullname, dtype=np.float32, count=numPoints, offset=numPoints * 4)\n\n self.chirp = mmap + 1 - weight\n self.chirp = self.chirp / np.max(self.chirp)\n self.dispersion = np.fromfile(fullname, dtype=np.float32, count=numPoints * 2,\n offset=numPoints * 4 * 2)\n logging.info('Mapping and Disperion data loaded from: {}'.format(filename[0]))", "title": "" }, { "docid": "12fe92128ec11f218857d5c0591e2bc6", "score": "0.6099259", "text": "def load_data(self, path):\n self.train_labels = np.empty(0)\n self.test_labels = np.empty(0)\n self.validation_labels = np.empty(0)\n self.train_images = np.empty((0, self.resolution, self.resolution))\n self.validation_images = np.empty((0, self.resolution, self.resolution))\n self.test_images = np.empty((0, self.resolution, self.resolution))\n lst1=[]\n lst2=[]\n for dirs in os.listdir(path):\n for label in os.listdir(os.path.join(path, dirs)):\n label_path = os.path.join(path, dirs, label)\n for file in os.listdir(label_path):\n image=Image.open(os.path.join(label_path,file)).convert('F')\n lst1.append(np.array(image))\n lst2.append(label)\n if dirs == \"train\":\n self.train_images=np.array(lst1)\n self.train_labels=np.array(lst2)\n lst1.clear()\n lst2.clear()\n if dirs == \"test\":\n self.test_images=np.array(lst1)\n self.test_labels=np.array(lst2)\n lst1.clear()\n lst2.clear()\n if dirs == \"validation\":\n self.validation_images=np.array(lst1)\n self.validation_labels=np.array(lst2)\n lst1.clear()\n lst2.clear()\n print(\"LOADED\")\n return self.train_labels, self.validation_labels, self.test_labels, self.train_images, self.validation_images, self.test_images", "title": "" }, { "docid": "6fda13e6294154ad6b7f9ddc33763fe9", "score": "0.60990393", "text": "def __init__(self):\n try:\n path2013 = os.path.join(os.path.curdir,\n 'data\\\\VicRoadsAccidents\\\\2013',\n '2013.shp')\n path2014 = os.path.join(os.path.curdir,\n 'data\\\\VicRoadsAccidents\\\\2014',\n '2014.shp')\n path2015 = os.path.join(os.path.curdir,\n 'data\\\\VicRoadsAccidents\\\\2015',\n '2015.shp')\n path2016 = os.path.join(os.path.curdir,\n 'data\\\\VicRoadsAccidents\\\\2016',\n '2016.shp')\n path2017 = os.path.join(os.path.curdir,\n 'data\\\\VicRoadsAccidents\\\\2017',\n '2017.shp')\n path2018 = os.path.join(os.path.curdir,\n 'data\\\\VicRoadsAccidents\\\\2018',\n '2018.shp')\n\n self.gpd2013 = gpd.read_file(path2013)\n self.gpd2014 = gpd.read_file(path2014)\n self.gpd2015 = gpd.read_file(path2015)\n self.gpd2016 = gpd.read_file(path2016)\n self.gpd2017 = gpd.read_file(path2017)\n self.gpd2018 = gpd.read_file(path2018)\n\n self.pd2013 = pd.DataFrame(self.gpd2013)\n self.pd2014 = pd.DataFrame(self.gpd2014)\n self.pd2015 = pd.DataFrame(self.gpd2015)\n self.pd2016 = pd.DataFrame(self.gpd2016)\n self.pd2017 = pd.DataFrame(self.gpd2017)\n self.pd2018 = pd.DataFrame(self.gpd2018)\n except OSError:\n print('Please check the file path')", "title": "" }, { "docid": "ec4cb58f77f812354d501bbdb4cfa6b4", "score": "0.6094891", "text": "def load(self):\n self.load_features()\n self.load_images()", "title": "" }, { "docid": "d2af12682b4e348494f1b3f277cdd2bb", "score": "0.60718197", "text": "def load_data(self):\n self.data = io.imread(self.filename)", "title": "" }, { "docid": "d0d2bcc41c3fe6694b3a39959444147b", "score": "0.606857", "text": "def read_data(self):\n\n # Read file\n try:\n with open(self.path, newline='') as datafile:\n reader = csv.reader(datafile, delimiter=',')\n for row in reader:\n if len(row) == 5:\n if float(row[4]) < 2 and float(row[4]) >= self.limit:\n self.atoms.append(row[0])\n self.x.append(float(row[1]))\n self.y.append(float(row[2]))\n self.z.append(float(row[3]))\n self.t.append(float(row[4]))\n elif len(row) == 4:\n self.atoms.append(row[0])\n self.x.append(float(row[1]))\n self.y.append(float(row[2]))\n self.z.append(float(row[3]))\n else:\n print(\"Unknown amount of columns in xyz file (\", len(row), \")\")\n except FileNotFoundError:\n print(\"Coordinate file not found. Check the file path.\")\n\n # Convert to numpy arrays\n self.x = np.array(self.x)\n self.y = np.array(self.y)\n self.z = np.array(self.z)\n self.t = np.array(self.t)\n\n # Write xyz file\n if self.write == True:\n Data.write_to_file(self)\n print(\"VMD-compatible xyz file written.\")", "title": "" }, { "docid": "0e56d25a3a91fc1313d20a607ff9763b", "score": "0.60555005", "text": "def load_data(self):\n # Step 0: Load transformed data if it exists.\n if Path(self.transformed_data_path).is_file(): # found transformed data\n self.pd_data = load_pickle(self.transformed_data_path)\n # TODO: Here we should also load used label, numerical, and categorical column names, meaning they should be\n # saved. This allows:\n # 1) using data without prior knowledge of the mentioned class variables.\n # 2) rationalizing once we know something, we have the option to recover them without remembering them\n # 3) so no need to rely on any \"self\" variables, i.e., our saved data is self-sufficient\n # dump() the class instance as a YAML config document\n self.hash_sizes = [self.pd_data[categ_name].nunique() for categ_name in self.categ_names]\n return\n\n # Step 1: Load label, numerical, and categorical features\n label_ll = [list() for _ in range(self.label_num)] # list of label data\n numer_ll = [list() for _ in range(self.numer_num)] # list of each numerical feature's data\n categ_ll = [list() for _ in range(self.categ_num)] # list of each categorical feature's data\n # Define defaultdict where key=\"feature index\" & val=\"defaultdict\" where key=\"category\" & val=\"count\".\n categ_count_dict = defaultdict(lambda: defaultdict(int))\n\n with open(self.dataset_path, 'r') as ld:\n for line in ld:\n # Using zero \"0\" as substitute for missing values in:\n # 1) Numerical feature may corrupt data because 0 is numerical.\n # 2) Categorical feature will not corrupt data because the string \"0\" is a new category.\n # TODO: Need a principled way to determine missing values for numerical features.\n line = [v if v != \"\" else \"0\" for v in line.strip('\\n').split('\\t')]\n\n for i, j in enumerate(self.label_indices): # record label feature data\n label_ll[i].append(int(line[j])) # typecast from string to save memory\n for i, j in enumerate(self.numer_indices): # record numerical feature data\n numer_ll[i].append(int(line[j])) # typecast from string to save memory\n for i, j in enumerate(self.categ_indices): # record categorical feature data\n categ_ll[i].append(line[j]) # cannot typecast string data\n categ_count_dict[i][line[j]] += 1 # count category occurrences\n\n # Step 2: Create dictionary for indexing categories.\n if Path(self.fit_dictionary_path).is_file():\n categ_index_dict = load_pickle(self.fit_dictionary_path)\n else:\n # Define defaultdict where key=\"feature index\" & val=\"defaultdict\" where key=\"category\" & val=\"index\".\n # TODO: Implement as a parent-class function\n categ_index_dict = defaultdict(lambda: defaultdict(int))\n\n # TODO: Rename variables, e.g., feat_index > i\n for feat_index, categ_dict in categ_count_dict.items():\n categ_index = 0\n for categ, count in categ_dict.items():\n if count >= self.categ_filter: # index filtered categories at a later stage\n categ_index_dict[feat_index][categ] = categ_index\n categ_index += 1\n del categ_count_dict # release memory\n gc.collect()\n # Save fit dictionary.\n save_pickle(self.fit_dictionary_path, dict(categ_index_dict)) # cannot pickle defaultdict using lambda\n\n # Step 3: Index categories.\n for i, categ_l in enumerate(categ_ll):\n for j, c in enumerate(categ_l):\n if c in categ_index_dict[i]:\n categ_ll[i][j] = categ_index_dict[i][c] # index in-place\n else: # index filtered categories as the end index\n categ_ll[i][j] = len(categ_index_dict[i])\n del categ_index_dict # release memory\n gc.collect()\n\n # Step 4: Format data and obtain hash statistics.\n array_data = np.concatenate((np.asarray(label_ll).T, np.asarray(numer_ll).T, np.asarray(categ_ll).T), axis=1)\n del label_ll, numer_ll, categ_ll # release memory\n gc.collect()\n\n self.pd_data = pd.DataFrame(array_data, columns=self.used_columns_names)\n self.hash_sizes = [self.pd_data[categ_name].nunique() for categ_name in self.categ_names]\n del array_data # release memory\n gc.collect()\n\n for col_name, dtype in self.dtype_dict.items():\n self.pd_data[col_name] = self.pd_data[col_name].astype(dtype)\n # Save transformed data.\n save_pickle(self.transformed_data_path, self.pd_data)", "title": "" }, { "docid": "4327df5c665a0aaa9de860156f1b3c80", "score": "0.60475284", "text": "def __init__(self, filenames: list):\n self.x_dim = None\n self.y_dim = None\n self.all_records_num = None\n self.label_data = dict()\n self.index_data = []\n self.full_data_x = None\n self.full_data_y = None\n has_data = False\n for file_id, filename in enumerate(filenames):\n if isinstance(filename, (tuple, list)):\n if len(filename) >= 3:\n name_x, name_y, label = filename\n label = str(label)\n elif len(filename) == 2:\n name_x, name_y = filename\n label = None\n else:\n raise Exception(\"Incomplete data.\")\n dat_x = np.genfromtxt(name_x, delimiter=\",\")\n dat_y = np.genfromtxt(name_y, delimiter=\",\")\n if dat_y.ndim == 1:\n dat_y = dat_y[:, np.newaxis]\n if dat_x.shape[0] != dat_y.shape[0]:\n raise Exception(\"In file<{0}>, X and Y has different records count.\".format(file_id))\n if not has_data:\n self.x_dim = dat_x.shape[1:]\n self.y_dim = dat_y.shape[1:]\n self.all_records_num = dat_x.shape[0]\n self.full_data_x = dat_x\n self.full_data_y = dat_y\n else:\n if dat_x.shape[1:] != self.x_dim or dat_y.shape[1:] != self.y_dim:\n raise Exception(\"In file<{0}>, X or Y data dim doesn't match with other files.\".format(file_id))\n self.all_records_num += dat_x.shape[0]\n self.full_data_x = np.vstack(self.full_data_x, dat_x)\n self.full_data_y = np.vstack(self.full_data_y, dat_y)\n\n cur_data_dict = {\"X\": dat_x, \"Y\": dat_y}\n self.index_data.append(cur_data_dict)\n if label is not None:\n self.label_data[label] = cur_data_dict\n else:\n raise Exception(\"<{0}>, filename type not supported.\".format(file_id))", "title": "" }, { "docid": "9b51362af4fa34926dd3979d119cd9ce", "score": "0.6043146", "text": "def load_data(self):", "title": "" }, { "docid": "9b51362af4fa34926dd3979d119cd9ce", "score": "0.6043146", "text": "def load_data(self):", "title": "" }, { "docid": "0f0290827a6671068bb58a46dc4cc4e6", "score": "0.6038656", "text": "def load_data(self):\n images = list()\n labels = list()\n emotion_index_map = dict()\n label_directories = [dir for dir in os.listdir(self.datapath) if not dir.startswith('.')]\n for label_directory in label_directories:\n if self.target_emotion_map:\n if label_directory not in self.target_emotion_map.keys(): continue\n self._add_new_label_to_map(label_directory, emotion_index_map)\n label_directory_path = self.datapath + '/' + label_directory\n\n if self.time_delay:\n self._load_series_for_single_emotion_directory(images, label_directory, label_directory_path, labels)\n else:\n image_files = [image_file for image_file in os.listdir(label_directory_path) if not image_file.startswith('.')]\n self._load_images_from_directory_to_array(image_files, images, label_directory, label_directory_path, labels)\n\n vectorized_labels = self._vectorize_labels(emotion_index_map, labels)\n self._check_data_not_empty(images)\n return self._load_dataset(np.array(images), np.array(vectorized_labels), emotion_index_map)", "title": "" }, { "docid": "47bb46b10c300853ff86ab0058fdc3a6", "score": "0.6032076", "text": "def load_data(self):\n if not exists(self.path + self.file_name):\n if not exists(self.path + self.name + \".zip\"):\n #### Download ####\n url = f'http://85.215.86.232/tig/data/{self.name}.zip'\n r = requests.get(url, allow_redirects=True, stream=True)\n\n chunkSize = 1024\n pbar = tqdm(r.iter_content(chunk_size=chunkSize), unit=\"B\",\n total=int(r.headers['Content-Length']),\n desc=f'Download {self.name}',\n unit_scale=True, unit_divisor=1024)\n\n Path(self.path).mkdir(parents=True, exist_ok=True)\n with open(self.path + self.name + \".zip\", 'wb') as f:\n for chunk in pbar:\n if chunk: # filter out keep-alive new chunks\n pbar.update(len(chunk))\n f.write(chunk)\n log.info(\"Data set donwloaded! (%s%s.zip)\", self.path, self.name)\n else:\n log.info(\"Data exist already! (%s%s.zip)\", self.path, self.name)\n\n if not exists(self.path + self.name + '.npz'):\n #### Extract ####\n with ZipFile(file=self.path + self.name + '.zip') as zip_file:\n # Loop over each file\n for member in tqdm(iterable=zip_file.namelist(), total=len(zip_file.namelist()),\n desc=f'Extract {self.name} '):\n zip_file.extract(member, path=self.path)\n else:\n log.info(\"Data exist extracted! (%s%s.zip)\", self.path, self.name)\n\n if not exists(self.path + 'kinetics_class_dict.json'):\n #### Download ####\n url = 'http://85.215.86.232/tig/data/kinetics_class_dict.json'\n r = requests.get(url, allow_redirects=True, stream=True)\n\n pbar = tqdm(unit=\"B\", total=int(r.headers['Content-Length']) // 10**6,\n desc=f'Download {self.name} ')\n chunkSize = 1024\n\n Path(self.path).mkdir(parents=True, exist_ok=True)\n with open(self.path + 'kinetics_class_dict.json', 'wb') as f:\n for chunk in r.iter_content(chunk_size=chunkSize):\n if chunk: # filter out keep-alive new chunks\n pbar.update(len(chunk) // 10**6)\n f.write(chunk)\n log.info(\"Class dictionary donwloaded! (%skinetics_class_dict.json)\", self.path)\n else:\n log.info(\"Class dictionary already exist! (%skinetics_class_dict.json)\", self.path)\n\n log.info(\"Load data...\")\n data = np.load(self.path + self.file_name, allow_pickle=True, mmap_mode=\"r\")\n if self.lim is not None:\n self.x = np.asarray(list(data[\"x\"]))[:self.lim]\n self.y = np.asarray(list(data[\"y\"]))[:self.lim]\n else:\n self.x = np.asarray(list(data[\"x\"]))\n self.y = np.asarray(list(data[\"y\"]))\n\n le = preprocessing.LabelEncoder()\n self.y = le.fit_transform(self.y)\n\n with open(self.path + 'kinetics_class_dict.json', \"rb\") as f:\n self.classes = json.load(f)", "title": "" }, { "docid": "991303587063de38b4bacacbcd2f72bd", "score": "0.60309863", "text": "def get_local_data(self):\n try:\n h5 = h5py.File(os.path.join(self.directory, \"trajectory.h5\"), mode=\"r\")\n self.timepoints = [key for key in h5[\"concentration\"][\"co2\"].keys()]\n self.timepoints.sort(key=int)\n self.dims = list(h5[\"concentration\"][\"co2\"][\"0\"].shape)\n self.nutrients = list(h5[\"concentration\"].keys())\n self.collect_positions(h5)\n self.get_nutrient_grid(h5)\n h5.close()\n except:\n print(\"Missing HDF5 file\")\n\n self.biomass = pd.read_csv(\n os.path.join(self.directory, \"Results\", \"biomass.csv\"),\n usecols=[0, 1, 2],\n delimiter=\"\\t\",\n )\n self.ntypes = pd.read_csv(\n os.path.join(self.directory, \"Results\", \"ntypes.csv\"),\n usecols=[0, 1, 2],\n delimiter=\"\\t\",\n )\n self.avg_con = pd.read_csv(\n os.path.join(self.directory, \"Results\", \"avg_concentration.csv\"),\n usecols=[0, 2, 3, 4],\n delimiter=\"\\t\",\n names=[\"Time\", \"O2\", \"Sucrose\", \"CO2\"],\n skiprows=1,\n )\n f = open(os.path.join(self.directory, \"metadata.json\"), \"r\")\n self.metadata = json.load(f)\n f.close()\n if \"IPTG\" in self.metadata:\n self.IPTG = self.metadata[\"IPTG\"]\n self.sucRatio = self.metadata[\"IPTG\"]\n else:\n self.IPTG = self.metadata[\"SucRatio\"]\n # TODO replace sucRatio with IPTG\n self.sucRatio = self.metadata[\"SucRatio\"]\n self.convert_units_avg_con()\n self.convert_units_biomass()", "title": "" }, { "docid": "13d42c6533bdbd8ff51bd30770859732", "score": "0.60293514", "text": "def load_data(self):\n t0 = time.time()\n if not all([self.current_time_start, self.current_time_end]):\n raise Exception\n\n self.reset_data()\n\n # Load files within time range\n self.current_data['mit'] = self.get_data_within_time_range('mit', self.current_time_start, self.current_time_end)\n self.current_data['co2'] = self.get_data_within_time_range('co2', self.current_time_start, self.current_time_end)\n\n # Reset index\n self.current_data['mit'] = self.current_data['mit'].reset_index(drop=True)\n self.current_data['co2'] = self.current_data['co2'].reset_index(drop=True)\n\n # print('Load data')\n # print('mit', len(self.current_data['mit']))\n # print('co2', len(self.current_data['co2']))\n # print('Loaded in: {}'.format(time.time()-t0))", "title": "" }, { "docid": "5451f31a15b9d4eadcc7eb2095883fe7", "score": "0.6023418", "text": "def prepare_loaded_data(self, file_name):\n #utworzenie bufora\n buff = Buffer()\n data = CsvReader.read_from_file(file_name, 1)\n #zmiana czasu\n data = DataOperation.change_time_relative(data)\n #zmiana wysokosci na metry\n data = DataOperation.change_altitude_cm_m(data)\n #stworzenie zapisywacza\n saver = FileSaver(\"saved_data/dane.txt\")\n\n #kazda linijke z pliku csv buforujemy osobno\n for d in data:\n buff.set_data(d)\n buffered_data = buff.get_data()\n #sprawdzamy czy kazda linijka jest poprawnie zapisana\n if InputValidator.input_val(buffered_data):\n #zapisujemy kazda linijke do nowego pliku\n saver.save_data(buffered_data)\n\n #odczyt danych z pliku csv i wizualizacja\n r_data = CsvReader.read_from_file(saver.get_file_name())\n #tworzymy wizualizator, drugi parametr do interwal czasowy\n self.visualizer = PlotCreator(r_data, 1)\n self.data_loaded = True\n print \"Dane zaladowane\"", "title": "" }, { "docid": "84718ca56bf4cb30a69fa8a52a034f5a", "score": "0.6016522", "text": "def _loaddata(self):\n _header = self._header\n elemlist = self.elemlist\n frame = self.frame\n \n def nans(numitems):\n return [np.nan for i in xrange(numitems)]\n\n _elemid_lookup = dict(zip(_header.name, _header.id))\n \n # Data gets unpacked to a temporary dict tmpdata before building\n # Element objects\n # \n # We will unpack 1d elements to an array object and then\n # change them to 2d ndarrays after the data is loaded.\n #\n # Elements with more than 1 dimension will get unpacked\n # as lists of arrays and transformed to ndarrays after\n # the data is loaded\n #\n # The array.array objects are typed unlike regular lists.\n # This improves memory usage. The array.array objects also\n # have highly efficient appending unlike ndarrays\n \n # initialize temporary dict\n tmpdata = {} \n for name, typ, varrate, numvalues in \\\n zip(_header.name, _header.type, \n _header.varrateflag, _header.numvalues):\n if elemlist is None or any(fnmatch(name, wc) for wc in elemlist):\n if not varrate and numvalues == 1:\n tmpdata[name] = array(typ)\n else:\n tmpdata[name] = []\n\n # Intialize Frame arrays for CSSDC measures\n for name in tmpdata.keys():\n if _header.rate[_elemid_lookup[name]] != 1:\n tmpdata[name+'_Frames'] = array('i')\n\n fid = open(self.info.filename,'rb')\n read = fid.read # dots make things slow...\n read(self.cursor) # move to where the data begins\n\n # The daq files have a frame for every sample. Each frame\n # contains the variables states at the time the frame was\n # saved. \n #\n # we want to create arrays that can be indexed to decide\n # whether the variable needs stored. This way we don't\n # have to look through the elemlist for every variable on\n # every frame\n if elemlist is None: # elemlist empty \n mask = [True for i in xrange(len(_header.name))]\n else:\n mask = []\n for name in _header.name:\n mask.append(any(fnmatch(name, wc) for wc in elemlist))\n\n bombed = False\n # Python quirk no. 372837462: while 1 is faster than while True\n while 1:\n try:\n frame.code.append(unpack('i',read(4))[0])\n if frame.code[-1] == -2:\n break\n \n frame.frame.append(unpack('i', read(4))[0])\n frame.count.append(unpack('i', read(4))[0])\n\n # xrange (2.7, range in 3) is faster than range\n for j in xrange(frame.count[-1]):\n i = unpack('i', read(4))[0]\n\n if _header.varrateflag[i]:\n numitems = unpack('i', read(4))[0]\n else:\n numitems = _header.numvalues[i]\n \n size = _header.bytes[i]\n \n if mask[i]:\n name = _header.name[i]\n \n if numitems == 1:\n typ = _header.type[i]\n tmpdata[name].append(unpack(typ,read(size))[0])\n \n else: # numitems > 1\n typ = _header.nptype[i]\n tmpdata[name].append(fromfile(fid,typ,numitems))\n \n if _header.rate[i] != 1:\n tmpdata[name+'_Frames'].append(frame.frame[-1])\n \n else: # we don't need to read this element\n read(numitems*size) # seek is slow...\n # reduce calls to read\n except:\n msg = 'Failed loading file on frame %i.'%frame.frame[-1]\n msg += ' (stopped reading file)'\n warnings.warn(msg, RuntimeWarning)\n bombed = True\n break\n\n fid.close()\n\n # We made it through the daq file.\n # Now it is time to do some bookkeeping.\n frame.frame = np.array(frame.frame)\n self.f0 = f0 = frame.frame[0]\n self.fend = fend = frame.frame[-1]\n\n # If we bombed unpacking a frame we need to make sure\n # everything is aligned before we intialize Elements\n if bombed:\n # we will strip off the last frame just to make sure\n # everything is kosher\n n = min(len(frame.code),len(frame.frame),len(frame.count))-1\n frame.code = frame.code[:n]\n frame.frame = frame.frame[:n]\n frame.count = frame.count[:n]\n \n for name in tmpdata:\n if name.endswith('_Frames'):\n continue\n\n val = tmpdata[name]\n i = _elemid_lookup[name]\n if _header.rate[i] != 1:\n # CSSDC measures should always be okay. If they have 1\n # value they use read, if they have multiple values they\n # use fromfile. In either case if the unpacking fails\n # nothing will get appended to tmpdata and it will exit\n # before appending to the cooresponding _Frames array.\n #\n # At least that is what my mental interpreter thinks.\n # we will assert just to be on the safe side.\n assert len(tmpdata[name]) == len(tmpdata[name+'_Frames'])\n \n else:\n tmpdata[name] = np.array(tmpdata[name][:n], ndmin=2)\n \n # gets transposed in next for loop if numvalues > 1\n if _header.numvalues[i] > 1:\n assert tmpdata[name].shape[0] == frame.frame.shape[0]\n else:\n assert tmpdata[name].shape[1] == frame.frame.shape[0]\n\n # cast as Element objects\n # 'varrateflag' variables remain lists of lists\n #\n # There are obvious more compact ways to write this but I'm\n # paranoid about reference counting and garbage collection not\n # functioning properly\n for name, i, rate in zip(_header.name, _header.id, _header.rate):\n if elemlist is not None:\n if not any(fnmatch(name, wc) for wc in elemlist):\n continue\n \n # transpose Element with more than 1 row \n if _header.numvalues[i] > 1:\n tmpdata[name] = np.array(tmpdata[name]).transpose()\n \n if rate != 1:\n self[name] = \\\n Element(tmpdata[name],\n tmpdata[name+'_Frames'],\n rate=_header.rate[i],\n name=_header.name[i],\n dtype=_header.type[i],\n varrateflag=_header.varrateflag[i],\n elemid=_header.id[i],\n units=_header.units[i])\n \n del tmpdata[name+'_Frames']\n else:\n self[name] = \\\n Element(tmpdata[name],\n frame.frame[:],\n rate=_header.rate[i],\n name=_header.name[i],\n dtype=_header.type[i],\n varrateflag=_header.varrateflag[i],\n elemid=_header.id[i],\n units=_header.units[i])\n\n # delete tmpdata arrays as we go to save memory\n del tmpdata[name]\n \n del _header, self._header\n self._header = None", "title": "" }, { "docid": "6125bdb53d5aed8bcb4eb296a74625df", "score": "0.6012088", "text": "def loaddata(self, x_path, y_path):\n # load x-series and y-series data\n df_x = pd.read_csv(x_path, dtype={'id': str}, parse_dates=['ds'], date_parser=self.dateparse)\n df_y = pd.read_csv(y_path, dtype={'id': str}, parse_dates=['ds'], date_parser=self.dateparse)\n self.df_x = df_x[['id', 'ds', 'x']]\n self.df_y = df_y[['id', 'ds', 'y']]\n self.lg.logger.info(\"load data: {} | {}\".format(os.path.basename(x_path), os.path.basename(y_path)))", "title": "" }, { "docid": "cde7e3dc7a92ff340201336cab894c99", "score": "0.59944457", "text": "def load_data(self):\n if not os.path.isdir(self.path):\n print(\"Data for these parameters not generated!!!\")\n print(\"Generating data, this can take a while...\")\n generate_data(self.glove_dimension, self.max_words, self.full)\n print(\"Generating data done!\")\n self.embedding_matrix = np.load(os.path.join(self.path, \"embedding_matrix.npy\"))\n self.X_train = np.load(os.path.join(self.path, \"X_train.npy\"))\n self.Y_train = np.load(os.path.join(self.path, \"Y_train.npy\"))\n self.X_test = np.load(os.path.join(self.path, \"X_test.npy\"))", "title": "" }, { "docid": "c03747fd92b1d9c97efa0443bf7addd5", "score": "0.5991191", "text": "def _load_data(self):\n cols = [list() for _ in range(len(self.columns_names))]\n user2index_dict = dict() # needs renumber since CustomerID ranges from 1 to 2649429, with gaps\n\n for fp in self.dataset_path: # TODO: deal with list of paths\n\n with open(fp, 'r') as f:\n for line in f.readlines():\n if ':' in line:\n movie = int(line.strip(\":\\n\"))-1 # -1 because the sequential MovieIDs starts from 1\n else:\n user, rating = [int(v) for v in line.strip().split(',')[:2]]\n cols[1].append(movie)\n cols[2].append(rating)\n\n if user in user2index_dict.keys():\n cols[0].append(user2index_dict[user])\n else:\n cols[0].append(len(user2index_dict.keys())) # number users from 0\n user2index_dict[user] = len(user2index_dict.keys())\n\n # TODO: load date as well, later keep only selected data via self.used_columns_names\n self.pd_data = pd.DataFrame(dict(zip(self.columns_names, cols)))\n\n for col_name, dtype in self.dtype_dict.items():\n self.pd_data[col_name] = self.pd_data[col_name].astype(dtype)\n\n self.pd_data = self.pd_data[self.used_columns_names]\n self.user_num = self.pd_data[\"user_id\"].nunique()\n self.item_num = self.pd_data[\"item_id\"].nunique()", "title": "" }, { "docid": "ed5b989f5a48d68b557c919be221d493", "score": "0.59870875", "text": "def _read_files(self):\n data = []\n print(\"Leyendo los archivos...\",end=\" \")\n init = time.time()\n\n for ii in range(self.ndata):\n # Leo las coordenadas y el numero atomico\n file_name = self.path_C + str(ii+1) + \".xyz\"\n atomic_number, positions, how_many = self._read_xyz(file_name)\n\n # Leo Densidades, Energia XC, type gaussians y Nuc\n file_name = self.path_P + str(ii+1) + \".dat\"\n Exc, Pmat_fit, gtype, Nuc = self._read_rhoE(file_name)\n\n # Guardo los datos en un solo diccionario\n single_data = {\n \"targets\": Exc,\n \"atomic_number\": atomic_number,\n \"how_many\": how_many,\n \"Pmat_fit\": Pmat_fit,\n \"gtype\": gtype, \n \"Nuc\": Nuc,\n }\n\n if len(positions) != 0:\n single_data[\"positions\"] = positions\n \n # Guardo los datos de una molecula\n data.append(single_data)\n \n fin = time.time()\n print(str(np.round(fin-init,2))+\" s.\")\n\n return data", "title": "" }, { "docid": "2db3e6105fac9e46d4cbe6c60d3e1593", "score": "0.5980057", "text": "def __init__(self, data_dir):\n print('Loading original MNIST data from', data_dir)\n self.data = []\n for name, key in [('train', 'train'), ('test', 't10k')]:\n images = os.path.join(data_dir, '%s-images.idx3-ubyte' % key)\n labels = os.path.join(data_dir, '%s-labels.idx1-ubyte' % key)\n if not os.path.isfile(images) or not os.path.isfile(labels):\n print('Warning: Missing %s data' % name)\n else:\n self.data.append((name, images, labels))", "title": "" }, { "docid": "78fde79e39a1dde2c51dc5cdb21288eb", "score": "0.5975555", "text": "def load_data(self):\n dset = self.config.dataset\n dset.load()\n found = 0\n for i,cband in enumerate(dset.dmap):\n emin, emax, event_type = cband.emin(), cband.emax(), cband.event_class()\n if event_type == -2147483648: event_type=0 # previous bug in FITS setup\n try:\n band = self.find(emin, event_type)\n found +=1\n except Exception:\n continue\n band.load_data(cband)\n band.data_index = i # useful to look up associated index\n if found!=len(self):\n print ('{}: Loaded subset of bands {} instead of {}'.format( self.__repr__(), found, len(self)))\n self[:]= self[:found] \n self.config.emax=self[-1].emax\n self.has_data = True", "title": "" }, { "docid": "d90c82b56133369719db443f99ec3ac8", "score": "0.59707594", "text": "def load_all_pose_data(self):\n raise NotImplementedError('subclass must implement this method')", "title": "" }, { "docid": "7b07a36e9ebbe87d7bd6c2689315702e", "score": "0.5941635", "text": "def loadBasicData(self):\n train = pd.read_csv(os.path.join(self.path, 'train.csv'))\n \n s = LabelEncoder().fit(train.species) \n self.classes = list(s.classes_) \n classes_labels = s.transform(train.species)\n train = train.drop(['species'], axis=1)\n\n if self.pca:\n trainX = train.drop(['id'], axis=1)\n pca = PCA(n_components=0.9 ,svd_solver='full')\n pca.fit(trainX)\n trainX=pca.transform(trainX)\n train_df=pd.DataFrame.from_records(trainX)\n train_df.insert(loc=0, column='id', value=train['id'])\n train=train_df\n\n \n for train_index, test_index in StratifiedShuffleSplit(n_splits=1, test_size=self.nb_test_data, random_state=self.r).split(train, classes_labels): \n X_train, X_test = train.values[train_index], train.values[test_index] \n self.t_train, self.t_test = classes_labels[train_index], classes_labels[test_index]\n\n self.id_img_train = list(np.int_( X_train[:,0]))\n self.id_img_test = list(np.int_( X_test[:,0])) \n self.X_data_train = np.delete(X_train, 0, 1)\n self.X_data_test = np.delete(X_test, 0, 1)", "title": "" }, { "docid": "8af6c89f57159a47b73d405ef97c77ca", "score": "0.5937247", "text": "def load(self):\n\n meta_data = []\n if len(os.listdir(self.processed_data_dir)) != 0:\n\n print(\"Loading Saved Data from Disk.......\")\n\n train_dir = os.path.join(self.processed_data_dir, 'train')\n test_dir = os.path.join(self.processed_data_dir, 'test')\n\n dataset = DiskDataset(data_dir=self.processed_data_dir)\n train = DiskDataset(data_dir=train_dir)\n test = DiskDataset(data_dir=test_dir)\n\n meta_data = pickle.load(open(os.path.join(self.processed_data_dir, 'meta.pkl'), 'rb'))\n max_atom = meta_data[0]\n\n print(\"Transforming Data.\")\n if not self.transformer_types:\n if self.transformer_types == 'normalization_y':\n self.transformers += [\n NormalizationTransformer(transform_y=True, dataset=dataset)\n ]\n elif self.transformer_types == 'normalization_w':\n self.transformers += [\n NormalizationTransformer(transform_w=True, dataset=dataset)\n ]\n elif self.transformer_types == 'balancing_w':\n self.transformers += [\n BalancingTransformer(transform_w=True, dataset=dataset)\n ]\n elif self.transformer_types == 'balancing_y':\n self.transformers += [\n BalancingTransformer(transform_y=True, dataset=dataset)\n ]\n else:\n ValueError(\"Transformer type Not defined!{}\".format(self.transformer_types))\n\n else:\n print(\"Loading and Featurizing Data.......\")\n # loader = dc.data.CSVLoader(\n # tasks=self.tasks, smiles_field=self.smiles_field, featurizer=self.Featurizer)\n dataset = self.featurize(shard_size=2048)\n\n print(\"Transforming Data.\")\n if not self.transformer_types:\n if self.transformer_types == 'normalization_y':\n self.transformers += [\n NormalizationTransformer(transform_y=True, dataset=dataset)\n ]\n elif self.transformer_types == 'normalization_w':\n self.transformers += [\n NormalizationTransformer(transform_w=True, dataset=dataset)\n ]\n elif self.transformer_types == 'balancing_w':\n self.transformers += [\n BalancingTransformer(transform_w=True, dataset=dataset)\n ]\n elif self.transformer_types == 'balancing_y':\n self.transformers += [\n BalancingTransformer(transform_y=True, dataset=dataset)\n ]\n else:\n ValueError(\"Transformer type Not defined!{}\".format(self.transformer_types))\n\n if len(self.transformers) > 0:\n for transformer in self.transformers:\n # pass dataset through maybe more than one transformer\n dataset = transformer.transform(dataset)\n\n \"\"\"max_atom is the max atom of molecule in all_dataset \"\"\"\n max_atom = self.find_max_atom(dataset)\n meta_data.append(max_atom)\n meta_data.extend(self.tasks)\n with open(os.path.join(self.processed_data_dir, 'meta.pkl'), 'wb') as f:\n pickle.dump(meta_data, f)\n\n \"\"\"\n Split Dataset\n \"\"\"\n print(\"Splitting Date to Train/Validation/Testing\")\n splitters = {\n 'index': IndexSplitter(),\n 'random': RandomSplitter(),\n 'scaffold': ScaffoldSplitter()\n }\n\n if self.splitter not in splitters:\n raise ValueError(\"Splitter not defined!\")\n else:\n splitter = splitters[self.splitter]\n\n # create processed dirs as train, valid, test\n train_dir = os.path.join(self.processed_data_dir, 'train')\n test_dir = os.path.join(self.processed_data_dir, 'test')\n\n print(\"Saving Data at %s...\", self.processed_data_dir)\n train, test = splitter.train_test_split(\n dataset,\n train_dir=train_dir,\n test_dir=test_dir,\n frac_train=self.split_frac[0],\n )\n\n return self.tasks, (train, test), self.transformers, max_atom", "title": "" }, { "docid": "6758cc52f44c8b357ed3d7891c0b848e", "score": "0.59339476", "text": "def load_data_set_graphs():\n\n asc_graphs_array = []\n desc_graphs_array = []\n flat_graphs_array = []\n soy_graphs_array = []\n\n for asc_file in os.listdir(\"samples/ASC/graphs/\"):\n if asc_file.endswith(\".png\"):\n img_data = cv2.imread(\"samples/ASC/graphs/\" + asc_file, 0)\n asc_graphs_array.append(img_data)\n\n for desc_file in os.listdir(\"samples/DESC/graphs/\"):\n if desc_file.endswith(\".png\"):\n img_data = cv2.imread(\"samples/DESC/graphs/\" + desc_file, 0)\n desc_graphs_array.append(img_data)\n\n for flat_file in os.listdir(\"samples/FLAT/graphs/\"):\n if flat_file.endswith(\".png\"):\n img_data = cv2.imread(\"samples/FLAT/graphs/\" + flat_file, 0)\n flat_graphs_array.append(img_data)\n\n for soy_file in os.listdir(\"samples/SOY/graphs/\"):\n if soy_file.endswith(\".png\"):\n img_data = cv2.imread(\"samples/SOY/graphs/\" + soy_file, 0)\n soy_graphs_array.append(img_data)\n\n\n return asc_graphs_array, desc_graphs_array, flat_graphs_array, soy_graphs_array", "title": "" }, { "docid": "0c162860e049cdcd6df07e931a7de844", "score": "0.59312016", "text": "def _post_file_load_processing(self):\n # When loading files with just point arrays, create and\n # set the polydata vertices\n if self.n_points > 0 and self.n_cells == 0:\n verts = self._make_vertex_cells(self.n_points)\n self.verts = CellArray(verts, self.n_points, deep=False)", "title": "" }, { "docid": "bd3c8e433d4911cb2c46e6da8803b5d0", "score": "0.59306145", "text": "def build_data_loader(self):\n # Load dataset\n dataset = build_dataset(self.cfg)", "title": "" }, { "docid": "ea737d3461bc2ff86e2d0f6854012207", "score": "0.5930364", "text": "def load_list(self):\n file_names = ['cube1.nc', 'cube2.nc']\n file_names = [os.path.join(self.data_dir, f) for f in file_names]\n\n iris.load(file_names)", "title": "" }, { "docid": "717cdc4b17306d16580d1a7e88e55217", "score": "0.59258443", "text": "def load_data(data_dir):\n\n ### YOUR CODE HERE\n\n files = os.listdir(data_dir)\n\n x_train = np.zeros(shape=[0,32,32,3], dtype=float)\n y_train = np.asarray([], dtype=int)\n\n x_test = np.zeros(shape=[0,32,32,3], dtype=float)\n y_test = np.asarray([], dtype=int)\n\n def unpickle(file):\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n\n def get_xy_batch(cur_file_path):\n current_set = unpickle(cur_file_path)\n x_temp = np.array(current_set[b'data'], dtype=float) / 255.0\n x_temp = x_temp.reshape([-1, 3, 32, 32])\n x_temp = x_temp.transpose([0, 2, 3, 1])\n y_temp = np.array(current_set[b'labels'])\n\n return x_temp, y_temp\n\n for file in files:\n cur_file_path = os.path.join(data_dir, file)\n if 'data_batch' in file:\n x_temp, y_temp = get_xy_batch(cur_file_path)\n x_train = np.concatenate((x_train, x_temp), axis=0)\n y_train = np.concatenate((y_train, y_temp), axis=0)\n\n elif 'test_batch' in file:\n x_temp, y_temp = get_xy_batch(cur_file_path)\n x_test = np.concatenate((x_test, x_temp))\n y_test = np.concatenate((y_test, y_temp))\n\n ### END CODE HERE\n\n return x_train, y_train, x_test, y_test", "title": "" }, { "docid": "c3948cb7e1ae2894bb35ef23a00fc2da", "score": "0.59197044", "text": "def _load_data(self):\n raise NotImplementedError", "title": "" }, { "docid": "e62a9581844ef2e78c9d81499739e8b0", "score": "0.5912595", "text": "def load_data(filepath,fnames,jinds,iinds,varname,num_cores=20,dim4D=True, sum_over_depth=False, depth_lim=13, model_data=False, remove_clim=False,dt=1, depth_lim0=0):\n # create temp files to host the shared memory variables\n folder1 = tempfile.mkdtemp()\n folder2 = tempfile.mkdtemp()\n path1 = os.path.join(folder1, 'dum0.mmap')\n path2 = os.path.join(folder2, 'dum1.mmap')\n if dim4D: # incase the files have more than one timestep in each file\n vshape=(len(fnames),366,len(jinds))\n var_par=np.memmap(path1, dtype=float, shape=vshape, mode='w+')\n else: # incase there is only one timestep in a file\n vshape=(len(fnames),len(jinds))\n var_par=np.memmap(path1, dtype=float, shape=vshape, mode='w+')\n # nts will keep track of number of days in a year\n nts=np.memmap(path2, dtype=float, shape=(len(fnames)), mode='w+')\n fnames2=np.memmap(path2, dtype='U'+str(len(fnames[0])+1), shape=(len(fnames)), mode='w+')\n fnames2[:]=fnames #np.asarray(fnames[:])\n # launch the parallel reading\n Parallel(n_jobs=num_cores)(delayed(read_files)(j,nts,jinds,iinds,filepath,fnames2,var_par,varname,sum_over_depth, depth_lim, depth_lim0, model_data=model_data) for j,fname in enumerate(fnames))\n if dim4D:\n print('removing climatology')\n var_clim=np.nanmean(var_par,0)\n if remove_clim:\n print('removing climatology') \n # smooth the daily climatology with monthly filter, as the climatology will be still noisy at daily scales\n var_clim=np.concatenate([var_clim[-120//dt:,],var_clim,var_clim[:120//dt,]],axis=0)\n b,a=signal.butter(3,2./(30/dt))\n jnonan=np.where(np.isfinite(np.sum(var_clim,0)))\n var_clim[:,jnonan]=signal.filtfilt(b,a,var_clim[:,jnonan],axis=0)\n var_clim=var_clim[120//dt:120//dt+366//dt,]\n #\n # this is the on off switch for removing the climatology\n var_clim=var_clim*int(remove_clim) \n var=var_par[0,:int(nts[0]),:]-var_clim[:int(nts[0]),:]\n # concatenate the data - note that here nts is used to strip down the 366th day when it's not a leap year\n # and include the 366th day when it is a leap year\n for j in range(1,len(fnames)):\n print(j)\n var=np.concatenate([var,var_par[j,:int(nts[j]),:]-var_clim[:int(nts[j]),:]],axis=0)\n #\n else:\n # if only one timestep per file\n var=np.asarray(var_par)\n var[np.where(var==0)]=np.nan\n if remove_clim:\n print('removing climatology')\n year0=datetime.date(int(fnames[0][-20:-16]),int(fnames[0][-16:-14]),int(fnames[0][-14:-12])).isocalendar()[0]\n year1=datetime.date(int(fnames[-1][-20:-16]),int(fnames[-1][-16:-14]),int(fnames[-1][-14:-12])).isocalendar()[0]\n var2=np.ones((year1-year0+1,int(np.ceil(366./dt)),var.shape[1]))*np.nan\n #\n for j, fname in enumerate(fnames):\n year = int(fname[-20:-16])\n month = int(fname[-16:-14])\n day = int(fname[-14:-12])\n c,c1 = datetime.date(year,month,day).isocalendar()[:2]\n c = c-year0\n c1 = c1-1\n var2[c,c1,:] = var[j,:]\n #\n var_clim=np.nanmean(var2,0)\n ind=np.where(np.nansum(var2,-1)[0,:]>0)[0]\n var=var2[0,ind,:]-var_clim[ind,:]\n for j in range(1,var2.shape[0]):\n ind=np.where(np.nansum(var2,-1)[j,:]>0)[0]\n var=np.concatenate([var,var2[j,ind,:]-var_clim[ind,:]],axis=0)\n else:\n var_clim=None\n # \n print('close files')\n #\n try:\n shutil.rmtree(folder1)\n except OSError:\n pass\n try:\n shutil.rmtree(folder2)\n except OSError:\n pass\n #\n return var, var_clim", "title": "" }, { "docid": "11798ccc67e02663e8357fea9199550d", "score": "0.5912269", "text": "def load_data(data_dir):\n\n\t### YOUR CODE HERE\n\tfnames = os.listdir(data_dir)\n\tx_train = np.array([[]]).reshape(0,3072)\n\ty_train = np.array([])\n\tfor fn in fnames:\n\t\tif fn.endswith(\"html\") or fn.endswith(\"meta\"):\n\t\t\tcontinue\n\t\twith open(os.path.join(data_dir,fn), 'rb') as fo:\n\t\t\tds = pickle.load(fo, encoding='bytes')\n\t\t\txtemp = np.array(ds[b'data']) #.reshape(10000,3,32,32).transpose(0,2,3,1).astype(np.uint8)\n\t\t\tytemp = np.array(ds[b'labels'])\n\n\t\tif fn.startswith(\"test\"):\n\t\t\tx_test = xtemp\n\t\t\ty_test = ytemp\n\n\t\tif fn.startswith(\"data\"):\n\t\t\tx_train = np.concatenate((xtemp,x_train), axis=0)\n\t\t\ty_train = np.concatenate((ytemp,y_train), axis=0)\n\t### END CODE HERE\n\n\treturn x_train, y_train, x_test, y_test", "title": "" }, { "docid": "eef24da965880b538b996ae68c599f22", "score": "0.5905727", "text": "def load_data(self):\n self.idl.load_data()\n self.legacy.load_data()", "title": "" }, { "docid": "07618452435f299bce27fd188640865f", "score": "0.58980036", "text": "def load_data(self):\n\n # Now done at beginning to make sure it only does this one per target\n if glob.glob(self.params['path']+'%d_*' % self.target) != []:\n # Load light curve\n if not os.path.exists(self.params['path']+'%d_LC.txt' % self.target):\n if self.verbose:\n print('Error: %s%d_LC.txt not found' % (self.params['path'], self.target))\n return False\n else:\n self.get_file(self.params['path'] + '%d_LC.txt' % self.target)\n self.time = np.copy(self.x)\n self.flux = np.copy(self.y)\n self.cadence = int(np.nanmedian(np.diff(self.time)*24.0*60.0*60.0))\n if self.cadence/60.0 < 10.0:\n self.short_cadence = True\n else:\n self.short_cadence = False\n self.nyquist = 10**6/(2.0*self.cadence)\n if self.verbose:\n print('# LIGHT CURVE: %d lines of data read' % len(self.time))\n if self.short_cadence:\n self.fitbg['smooth_ps'] = 2.5\n # Load power spectrum\n if not os.path.exists(self.params['path'] + '%d_PS.txt' % self.target):\n if self.verbose:\n print('Error: %s%d_PS.txt not found' % (self.params['path'], self.target))\n return False\n else:\n self.get_file(self.params['path'] + '%d_PS.txt' % self.target)\n self.frequency = np.copy(self.x)\n self.power = np.copy(self.y)\n if self.keplercorr:\n self.remove_artefact(self.frequency, self.power)\n self.power = np.copy(self.y)\n if self.verbose:\n print('## Removing Kepler artefacts ##')\n if self.verbose:\n print('# POWER SPECTRUM: %d lines of data read' % len(self.frequency))\n self.oversample = int(round((1./((max(self.time)-min(self.time))*0.0864))/(self.frequency[1]-self.frequency[0])))\n self.resolution = (self.frequency[1]-self.frequency[0])*self.oversample\n\n if self.verbose:\n print('-------------------------------------------------')\n print('Target: %d' % self.target)\n if self.oversample == 1:\n print('critically sampled')\n else:\n print('oversampled by a factor of %d' % self.oversample)\n print('time series cadence: %d seconds' % self.cadence)\n print('power spectrum resolution: %.6f muHz' % self.resolution)\n print('-------------------------------------------------')\n # Create critically sampled PS\n if self.oversample != 1:\n self.freq = np.copy(self.frequency)\n self.pow = np.copy(self.power)\n self.frequency = np.array(self.frequency[self.oversample-1::self.oversample])\n self.power = np.array(self.power[self.oversample-1::self.oversample])\n else:\n self.freq = np.copy(self.frequency)\n self.pow = np.copy(self.power)\n self.frequency = np.copy(self.frequency)\n self.power = np.copy(self.power)\n if hasattr(self, 'findex'):\n if self.findex['do']:\n # Make a mask using the given frequency bounds for the find excess routine\n mask = np.ones_like(self.freq, dtype=bool)\n if self.params[self.target]['lowerx'] is not None:\n mask *= np.ma.getmask(np.ma.masked_greater_equal(self.freq, self.params[self.target]['lowerx']))\n else:\n mask *= np.ma.getmask(np.ma.masked_greater_equal(self.freq, self.findex['lower']))\n if self.params[self.target]['upperx'] is not None:\n mask *= np.ma.getmask(np.ma.masked_less_equal(self.freq, self.params[self.target]['upperx']))\n else:\n mask *= np.ma.getmask(np.ma.masked_less_equal(self.freq, self.findex['upper']))\n self.freq = self.freq[mask]\n self.pow = self.pow[mask]\n return True\n else:\n print('Error: data not found for target %d' % self.target)\n return False", "title": "" }, { "docid": "ea527761adf30721dfc4bd47d21a6514", "score": "0.58884585", "text": "def load_data():\n def un_pickle(file):\n with open(file, 'rb') as fo:\n return_dict = pickle.load(fo, encoding='latin1')\n return return_dict\n xTr = np.array([], dtype=np.float64).reshape(0, 3072)\n yTr = np.array([], dtype=np.int64).reshape(0)\n for i in range(1, 6):\n dataDict = un_pickle('cifar-10-batches-py/data_batch_'+str(i))\n xTr = np.concatenate((xTr, dataDict['data']), axis=0)\n yTr = np.concatenate((yTr, dataDict['labels']), axis=0)\n xTe = un_pickle('cifar-10-batches-py/test_batch')['data'].astype(np.float64)\n yTe = np.array(un_pickle('cifar-10-batches-py/test_batch')['labels'], dtype=np.int64)\n return (xTr, yTr), (xTe, yTe)", "title": "" }, { "docid": "8773ef4bdc17ea39aae9abc827e17b25", "score": "0.5883034", "text": "def load_data(args, data_dir=None):\n df_all = pd.DataFrame(columns=['center', 'left', 'right', 'steering', 'throttle', 'reverse', 'speed'])\n\n if data_dir:\n for part in data_dir:\n part = os.path.join(args.data_dir, part)\n for subdir, dirs, files in os.walk(part):\n for file in files:\n if file == 'driving_log.csv':\n logging.info(\"Loading : \" + os.path.join(subdir, file))\n try:\n if dirs[0] != 'IMG':\n logging.info(\"Missing IMG directory in \" + subdir)\n break\n except IndexError:\n logging.info(\"No directories!\")\n break\n df_new = pd.read_csv(os.path.join(os.getcwd(), os.path.join(subdir, file)),\n names=['center', 'left', 'right',\n 'steering', 'throttle', 'reverse', 'speed'])\n df_all = df_all.append(df_new)\n\n else:\n for subdir, dirs, files in os.walk(args.data_dir):\n for file in files:\n if file == 'driving_log.csv':\n logging.info(\"Loading : \" + os.path.join(subdir, file))\n try:\n if dirs[0] != 'IMG':\n logging.info(\"Missing IMG directory in \" + subdir)\n break\n except IndexError:\n logging.info(\"No directories!\")\n break\n df_new = pd.read_csv(os.path.join(os.getcwd(), os.path.join(subdir, file)),\n names=['center', 'left', 'right',\n 'steering', 'throttle', 'reverse', 'speed'])\n df_all = df_all.append(df_new)\n\n # Checking for invalid data\n if df_all.isnull().any().any():\n logging.info('Corrupted data')\n return -1\n else:\n logging.info('Data not corrupted')\n\n images_all = df_all[['center', 'left', 'right']].values\n labels = df_all[['steering', 'speed']].values\n\n x_data, y_data = divide_images(images_all, labels)\n\n return x_data, y_data", "title": "" }, { "docid": "0af3f302a0ef5c610eacd197a1ede5b3", "score": "0.5882122", "text": "def dataArr(filename):\r\n #Open the file\r\n f=h5py.File(filename,'r')\r\n \r\n #Initialize the data arrays\r\n cdata=[]\r\n idxset=[]\r\n vertices=[]\r\n \r\n #Open groups in the file\r\n for group in f.keys():\r\n# print('Group- '+group)\r\n \r\n #Get the group\r\n currGroup=f[group]\r\n \r\n #Open keys in the group\r\n for key in currGroup.keys():\r\n# print('Key- '+key)\r\n \r\n #Append the data to the respective arrays\r\n if key=='cdata(Complex)':\r\n cdataGroup=currGroup[key]\r\n \r\n imag=[]\r\n real=[]\r\n #Open the keys in cdata\r\n for subkey in cdataGroup.keys():\r\n# print('Subkey- '+subkey)\r\n \r\n #Get the real and imaginary parts of the array\r\n if subkey=='Imag':\r\n imag=cdataGroup[subkey][()]\r\n elif subkey=='Real':\r\n real=cdataGroup[subkey][()]\r\n \r\n #Convert lists to numpy arrays\r\n imag=np.array(imag)\r\n real=np.array(real)\r\n #Get the cdata value\r\n cdata=real+1j*imag\r\n \r\n elif key=='idxset':\r\n idxset=currGroup[key][()]\r\n elif key=='vertices':\r\n vertices=currGroup[key][()]\r\n \r\n #Remove the unwanted component from the vertices\r\n #Vertices are stored in a 3 element array containing (x,y,z) but we only\r\n #need 2 to make flat plots, therefore we need to remove one based on the\r\n #plane on which the data is being plotted\r\n xVals=[]\r\n yVals=[]\r\n newVertices=[]\r\n for vertex in vertices:\r\n xVals.append(vertex[0])\r\n yVals.append(vertex[2])\r\n newVertices.append([vertex[0],vertex[1]])\r\n vertices=newVertices\r\n \r\n #Convert to numpy arrays\r\n cdata=np.array(cdata)\r\n xVals=np.array(xVals)\r\n yVals=np.array(yVals)\r\n \r\n #Close the file\r\n f.close()\r\n \r\n return cdata, xVals, yVals", "title": "" }, { "docid": "3b87d24f5dbcf7555daea8fc8a209185", "score": "0.588185", "text": "def load_data(self):\n num_actual = len(list(CROPS_DIRNAME.glob('*.jpg')))\n num_target = len(self.iam_dataset.line_regions_by_id)\n if num_actual < num_target - 2: # There are a couple of instances that could not be cropped\n self.process_iam_paragraphs()\n\n self.x, self.y, self.ids = self.load_iam_paragraphs()\n self.train_ind, self.test_ind = get_random_split(self.x.shape[0])\n ids_train, ids_test = self.train_ind, self.test_ind\n self.x_train, self.y_train = self.x[ids_train], self.y[ids_train]\n self.x_test, self.y_test = self.x[ids_test], self.y[ids_test]", "title": "" }, { "docid": "f7893cd93900c09766e0a131fcbc6b56", "score": "0.58773375", "text": "def init_data():\n f, _ = my_bokeh_utils.string_to_function_parser(f_input.value, ['x', 'y'])\n contour_f.compute_contour_data(f)\n g, _ = my_bokeh_utils.string_to_function_parser(g_input.value, ['x', 'y'])\n contour_g.compute_contour_data(g, isovalue=[0])\n interactor.update_to_user_view()", "title": "" }, { "docid": "3f47ca393031185b349c408a4e6a20b9", "score": "0.58720464", "text": "def load_data(self) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "9dacafb09f8e025e7eb6be0b536b4ce6", "score": "0.5870313", "text": "def load_raw_data(self):\n for i in self.dictionary_np_to_ar[self.n_pivot]:\n file_name = (self.folder + '/result' + str(i) + '.txt')\n f = open(file_name)\n data = format_data(f, self.n_pivot)\n self.raw_data[i] = data", "title": "" }, { "docid": "1c198f9f0b9b98de591a2b5a6ea9c494", "score": "0.58690315", "text": "def get_data(self, contour_path, image_path, index):\n images = []\n contours = []\n # handle `/` missing\n if contour_path[-1] != '/':\n contour_path += '/'\n if image_path[-1] != '/':\n image_path += '/'\n # get contour file\n contour_file = self.get_contour_file(contour_path)\n # Find contour data\n contour_data = pydicom.read_file(contour_path + '/' + contour_file)\n # Get ROI names\n print(get_roi_names(contour_data))\n # get slice orders\n ordered_slices = self.slice_order(image_path)\n print(ordered_slices[:5])\n # get contour dict\n contour_dict = self.get_contour_dict(contour_file, contour_path, image_path, index)\n\n for k, v in ordered_slices:\n # get data from contour dict\n if k in contour_dict:\n images.append(contour_dict[k][0])\n contours.append(contour_dict[k][1])\n # get data from dicom.read_file\n else:\n fpaths = [image_path + f for f in os.listdir(image_path) if '.dcm' in f]\n for fpath in fpaths:\n img = pydicom.read_file(fpath)\n scan_ID = img.SOPInstanceUID\n if k == scan_ID:\n print(\"Got From Dicom\")\n img_arr = img.pixel_array\n contour_arr = np.zeros_like(img_arr)\n images.append(img_arr)\n contours.append(contour_arr)\n break\n else:\n print(\"Wrong File ID.\")\n\n return np.array(images), np.array(contours)", "title": "" }, { "docid": "0fb2505786c0c41d16e47e5e63141e57", "score": "0.5867914", "text": "def load_data_files(label, data_files):\n data_x = np.empty((0, NUM_FEATURES))\n data_y = np.empty((0))\n\n for filename in data_files:\n try:\n # data = np.loadtxt(BytesIO(zipped_dataset.read(filename)))\n data = np.loadtxt(filename)\n print('... file {0}'.format(filename))\n x, y = process_dataset_file(data, label)\n data_x = np.vstack((data_x, x))\n data_y = np.concatenate([data_y, y])\n # print(\"Data's shape yet: \", data_x.shape())\n except KeyError:\n print('ERROR: Did not find {0} in zip file'.format(filename))\n return data_x, data_y", "title": "" }, { "docid": "3ef1ea225fe60f8614de107081cbb54f", "score": "0.586448", "text": "def load_dataset(self):\n\n\t\t# Paths to the training and testing data for STL-10 dataset.\n\t\tpath = './dataset/stl-10'\n\t\ttrain_data_path = os.path.join(path, 'train_X.bin')\n\t\ttrain_label_path = os.path.join(path, 'train_y.bin')\n\t\ttest_data_path = os.path.join(path, 'test_X.bin')\n\t\ttest_label_path = os.path.join(path, 'test_y.bin')\n\n\t\t# Read the training data images and thier labels from the disk.\n\t\tself.train_data = self.read_images(train_data_path)\n\t\tself.train_labels = self.read_labels(train_label_path)\n\n\t\t# Read the test data images and thier labels from the disk.\n\t\tself.test_data = self.read_images(test_data_path)\n\t\tself.test_labels = self.read_labels(test_label_path)\n\n\t\treturn", "title": "" }, { "docid": "a5d23d3fd8c1c48eff2b95e0bed5ce17", "score": "0.58631", "text": "def __load_data(self, dirname):\n #dirname = \"cifar-10-batches-py\"\n origin = \"http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\"\n path = get_file(dirname, origin=origin, untar=True)\n\n nb_train_samples = 50000\n\n X_train = np.zeros((nb_train_samples, 3, 32, 32), dtype=\"uint8\")\n y_train = np.zeros((nb_train_samples,), dtype=\"uint8\")\n\n for i in range(1, 6):\n fpath = os.path.join(path, 'data_batch_' + str(i))\n data, labels = load_batch(fpath)\n X_train[(i - 1) * 10000: i * 10000, :, :, :] = data\n y_train[(i - 1) * 10000: i * 10000] = labels\n\n fpath = os.path.join(path, 'test_batch')\n X_test, y_test = load_batch(fpath)\n\n y_train = np.reshape(y_train, (len(y_train), 1))\n y_test = np.reshape(y_test, (len(y_test), 1))\n\n if K.image_data_format() == 'channels_last':\n X_train = X_train.transpose(0, 2, 3, 1)\n X_test = X_test.transpose(0, 2, 3, 1)\n\n return (X_train, y_train), (X_test, y_test)", "title": "" }, { "docid": "da0d95df0c52bc86033c5de18b70304c", "score": "0.58627766", "text": "def get_data(download=True, unzip=True, load=True, loadupto=6000):\n\n from os import path, makedirs, getcwd, listdir\n from zipfile import ZipFile\n\n filename = 'cocodataset'\n zdirname = path.join(getcwd(), filename + '.zip')\n fdirname = path.join(getcwd(), filename)\n\n # Download data\n if download is True:\n if path.exists(zdirname):\n print('Ignored download: Cannot download file that already exists!')\n else:\n url = 'http://images.cocodataset.org/zips/val2017.zip'\n filename = 'cocodataset'\n urlretrieve(url, zdirname)\n print('* File downloaded')\n\n # Unzip data\n if unzip is True:\n if path.exists(fdirname):\n print('Ignored unzip: Cannot unzip file that already exists!')\n else:\n dirname = path.join(getcwd(), filename)\n makedirs(dirname, exist_ok=True)\n with ZipFile(filename + '.zip', 'r') as zipfile:\n zipfile.extractall(fdirname)\n print('* File unzipped')\n\n # Load data\n if load is True:\n images = []\n shapes = []\n i = 1\n for fdir in listdir(fdirname):\n for file in listdir(path.join(fdirname, fdir)):\n fpath = path.join(*[fdirname, fdir, file])\n image = Image.open(fpath)\n image = np.array(image) # Convert to numpy array\n shape = np.array(image.shape)\n if len(shape) < 3: # Skip black and white images\n continue\n shapes.append(shape)\n images.append(image)\n if i >= loadupto:\n break\n i += 1\n\n # Trim all images to a the smallest size\n shapes = np.array(shapes)\n min_shape = np.amin(shapes, axis=0)\n min_shape = (124, 124, 3)\n data_imgs = np.array([image[: min_shape[0], : min_shape[1], : min_shape[2]] for image in images])\n print('* Loaded {0} images to numpy array'.format(i))\n\n return data_imgs", "title": "" }, { "docid": "3373dd382cf4dd644d2d7cc0f2ddd59b", "score": "0.5853483", "text": "def load_data(self, fname):\n self.hyperspec_data = np.zeros((10,10,34))#np.random.rand(10,10,34)\n self.display_image =np.zeros((10,10,34))# np.random.rand(10,10)\n self.spec_x_array = 3*np.arange(34)", "title": "" }, { "docid": "da8773f2f198f47b40ba8eebbbb85961", "score": "0.58465314", "text": "def _load_base_data(self, scp_fname, labels_fnames, labels_dtype):\n args = self.args\n if is_incremental(scp_fname):\n scps = io.read_lines(scp_fname)\n else:\n scps = [scp_fname]\n data_gens = []\n for scp in scps:\n data_gens.append(DataGenerator(\n MultiLabelTemporalData.from_kaldi,\n scp=scp, alipdfs=labels_fnames, num_pdfs=args.num_classes,\n context=args.context, padding='replicate',\n utt_feats_dict=self.ivectors, labels_dtype=labels_dtype\n ))\n return data_gens", "title": "" }, { "docid": "a4f3047a8fc11c017ed2ba0e252397d7", "score": "0.5832999", "text": "def _load_data(self, train_dicts=None, dev_dicts=None, test_dicts=None):\n logger.info(\"\\nLoading data into the data silo ...\"\n \"{}\".format(TRACTOR_SMALL))\n\n # dev data\n dev_file = self.processor.data_dir / self.processor.dev_filename\n logger.info(\"Loading dev set from: {}\".format(dev_file))\n self.data[\"dev\"], self.tensor_names = self._get_dataset(dev_file)\n\n # skip the test data\n self.data[\"test\"] = None\n\n if self.caching:\n self._save_dataset_to_cache()\n\n # derive stats and meta data\n self._calculate_statistics()\n\n self._initialize_data_loaders()", "title": "" }, { "docid": "31579b0a33b87140f506abf0c66d3daa", "score": "0.58303064", "text": "def loadData(tempdir=None, nsamples=100, npoints=100000):\n\n\t# def loadDataHelper(data):\n\t# \treturn data\n\n\tdata = np.zeros((0, npoints))\n\n\tif not tempdir:\n\t\ttempdir = TemporaryDirectory().name\n\t\ttry:\n\t\t\tif platform == \"win32\":\n\t\t\t\tsystem(\"wsl -r -N -c -np -P %s https://physionet.org/files/ptbdb/1.0.0/\" % tempdir)\n\t\t\tif platform == \"darwin\":\n\t\t\t\tsystem(\"wget -r -N -c -np -P %s https://physionet.org/files/ptbdb/1.0.0/\" % tempdir)\n\t\t\telse:\n\t\t\t\tprint(\"I don't know what platform you have... Tell Ethan on Slack.\")\n\t\t\t\treturn None, None\n\t\texcept Exception as e:\n\t\t\tprint(\"[%s]:[%s] %s\" % __name, __func__, e)\n\t\t\tprint(\"Make sure if you are on Windows that you have wsl install. If you are on Mac, please install wget.\")\n\t\t\treturn None, None\n\n\tfor dataloc in getDataFiles(tempdir):\n\t\tif data.shape[0] == nsamples:\n\t\t\tbreak\n\t\trec = rdrecord(dataloc, channels=[0])\n\t\tx = rec.p_signal\n\t\tif x is None:\n\t\t\tx = rec.d_signal\n\t\tif x is None:\n\t\t\tcontinue\n\t\tif len(x) < npoints:\n\t\t\tcontinue\n\t\tx_t = np.transpose(x)\n\t\tdata = np.vstack([data, x_t[:, 0:npoints]])\n\n\treturn data, tempdir", "title": "" }, { "docid": "b38ce6813b44b62c416fd97ac6ace511", "score": "0.58130115", "text": "def _load_data():\n import Oger as og\n import csv\n now = datetime.now()\n now -= timedelta(microseconds=now.microsecond)\n periods = 500\n o2 = og.datasets.mackey_glass(sample_len=periods,\n n_samples=1,\n seed=1)[0][0].flatten()\n o2 = _minmax_scale(o2, 96, 99)\n timestamp = pd.date_range(now, periods=periods, freq='500L')\n temperature = og.datasets.mackey_glass(sample_len=periods,\n n_samples=1,\n seed=3)[0][0].flatten()\n temperature = _minmax_scale(temperature, 36, 37.5)\n air_flow = og.datasets.mackey_glass(sample_len=periods,\n n_samples=1,\n seed=5)[0][0].flatten()\n air_flow = _minmax_scale(air_flow, 0, 100)\n heart_rate = og.datasets.mackey_glass(sample_len=periods,\n n_samples=1,\n seed=7)[0][0].flatten()\n heart_rate = _minmax_scale(heart_rate, 60, 70)\n data = zip(*[o2, temperature, air_flow, heart_rate])\n data = pd.DataFrame(data, columns=['o2', 'temperature',\n 'air_flow', 'heart_rate'],\n index=timestamp)\n data = data.resample('250L', fill_method='pad')\n last_row = data.xs(data.index[-1])\n last_row.name = data.index[-1] + timedelta(seconds=0.25)\n data = data.append(last_row)\n acc_magn = og.datasets.mackey_glass(sample_len=2 * periods,\n n_samples=1,\n seed=11)[0][0].flatten()\n acc_magn = _minmax_scale(acc_magn, 0, 5)\n data['acc_magn'] = acc_magn\n data = data.resample('10L', fill_method='pad')\n with open('ecg_v1.csv', 'rb') as f:\n reader = csv.reader(f)\n reader.next()\n ecg_v1 = [float(r[0]) for r in reader]\n last_rows = data.iloc[-24:]\n last_rows['index'] = last_rows.index + timedelta(seconds=0.01)\n last_rows.set_index('index', inplace=True)\n data = data.append(last_rows)\n data['ecg_v1'] = ecg_v1\n return data", "title": "" }, { "docid": "fc74a33af253554a3113c55ae006da61", "score": "0.58085763", "text": "def _get_all_datas(self) -> None:\n\n # init\n self.all_list = {\"train\": [], \"unknown\": [], \"known\": []}\n self.classes = {}\n\n path = Path(self.path)\n\n # directories in [image_path]\n dirs = [d for d in path.glob(\"*\") if d.is_dir()]\n\n # all extensions / all sub directories\n for label_idx, _dir in enumerate(dirs):\n xs = []\n\n for ext in self.extensions:\n tmp = [\n Data(x.as_posix(), label_idx, _dir.name)\n for x in _dir.glob(f\"*.{ext}\")\n if x.is_file()\n ]\n xs.extend(tmp)\n\n # adjust to limit size\n if self.limit_size is not None:\n random.shuffle(xs)\n xs = xs[: self.limit_size]\n\n # split dataset\n train, test = train_test_split(xs, test_size=self.test_size, shuffle=True)\n\n self.all_list[\"train\"].extend(train)\n self.all_list[\"unknown\"].extend(test)\n self.all_list[\"known\"].extend(random.sample(train, len(test)))\n\n self.classes[label_idx] = _dir.name\n\n self.train_size = len(self.all_list[\"train\"])\n self.unknown_size = len(self.all_list[\"unknown\"])\n self.known_size = len(self.all_list[\"known\"])\n\n self.all_size = self.train_size + self.unknown_size", "title": "" }, { "docid": "ceb46d7fcff3d74e2c7dcbbed3fa9794", "score": "0.57918715", "text": "def load(self, root):\n files = os.listdir(root)\n data_x = []\n data_y = []\n data_traj_x = []\n data_traj_y = []\n seq_continuity = []\n traj_continuity = []\n for i in files:\n # Load file\n tmp = self.read_file(os.path.join(root,i))\n # Remove time-stamp if need be\n tmp = self.remove_ts(tmp)\n # split the input and targets\n tmp_x, tmp_y = self.split_input_output(tmp)\n # generate trajectories\n traj_x, traj_y = self.trajectory_generator(tmp_x, tmp_y)\n # generates sequences for training\n tmp_x, tmp_y, seq_idx = self.sequence_generator(tmp_x, tmp_y)\n # append for concatenation\n data_traj_x.append(traj_x)\n data_traj_y.append(traj_y)\n data_x.append(tmp_x)\n data_y.append(tmp_y)\n seq_continuity.append(seq_idx)\n numpy_traj_x = np.concatenate((data_traj_x), axis=0)\n numpy_traj_y = np.concatenate((data_traj_y), axis=0)\n numpy_data_x = np.concatenate((data_x), axis=0)\n numpy_data_y = np.concatenate((data_y), axis=0)\n numpy_seq_c = np.concatenate((seq_continuity), axis=0)\n return numpy_data_x, numpy_data_y, numpy_traj_x, numpy_traj_y, numpy_seq_c", "title": "" }, { "docid": "3792f2d543748635903f34aa4f80cd07", "score": "0.57902795", "text": "def populate_data(self):\r\n print('FINISH: Load drone data')\r\n self.tree.delete(*self.tree.get_children())\r\n for drone in self.drones.list():\r\n values = (drone.id, drone.name, drone.class_type, drone.rescue, drone.operator)\r\n self.tree.insert('', 'end', values=values)", "title": "" }, { "docid": "181ef563a94caa35e8eccb2f9ca977f1", "score": "0.57892513", "text": "def get_data(self, year, month, day, run):\n labrad.cd(year, month, day, run, kind='images')\n images = _dv.dir()[1]\n for image in images:\n _dv.open(image)\n data = list(_dv.get())\n self.d['images'].append(data)\n self.d['sums'].append(sum(data))\n self.d['sterr'].append(_sem(data))\n self._get_params()", "title": "" }, { "docid": "a9f43efef1f6026ca5ca21e7287c7ee1", "score": "0.5788692", "text": "def load_data(self):\r\n\r\n X, Y = np.empty((self.num_instances, self.img_size, self.img_size, self.num_variables)), \\\r\n np.empty((self.num_instances, self.num_variables))\r\n print(X.shape)\r\n\r\n # Initialize PAA transformer\r\n paa = PiecewiseAggregateApproximation(window_size=None, output_size=self.img_size, overlapping=False)\r\n rp = RecurrencePlot()\r\n\r\n # For all instance\r\n start = time.time()\r\n for idx, row in enumerate(self.data.iterrows()):\r\n for i in range(self.num_variables):\r\n # Get current variable's series\r\n # Apply linear interpolation on missing values\r\n s = row[1][i].interpolate(limit_direction='both').to_numpy()[:self.ts_length]\r\n # Apply PAA and RP\r\n X[idx, :, :, i] = rp.transform(paa.transform(np.expand_dims(s[:-1], axis=0)))[0]\r\n Y[idx, i] = s[-1]\r\n end = time.time()\r\n print(f\"Data loaded in {end - start} seconds\")\r\n\r\n return X, Y", "title": "" }, { "docid": "98e27adf8a16a4a577cd1dcd9faa7043", "score": "0.57796514", "text": "def _load_data(self):\n m = ROW_END - ROW_START\n n = COL_END - COL_START\n t = self.data.time.size\n\n low_res = []\n for c in LOW_RES_CHANNELS:\n channel_name = f\"C{c:02}\"\n if channel_name in self.data:\n x = self.data[channel_name].data\n else:\n x = np.zeros((t, m, n), dtype=np.float32)\n x[:] = np.nan\n low_res.append(x)\n low_res = np.stack(low_res, axis=1)\n\n med_res = []\n for c in MED_RES_CHANNELS:\n channel_name = f\"C{c:02}\"\n if channel_name in self.data:\n x = self.data[channel_name].data\n else:\n x = np.zeros((t, 2 * m, 2 * n), dtype=np.float32)\n x[:] = np.nan\n med_res.append(x)\n med_res = np.stack(med_res, axis=1)\n\n hi_res = []\n for c in HI_RES_CHANNELS:\n channel_name = f\"C{c:02}\"\n if channel_name in self.data:\n x = self.data[channel_name].data\n else:\n x = np.zeros((t, 4 * m, 4 * n), dtype=np.float32)\n x[:] = np.nan\n hi_res.append(x)\n hi_res = np.stack(hi_res, axis=1)\n\n invalid = np.any(np.all(np.isnan(low_res), axis=1), axis=(-2, -1))\n invalid *= np.any(np.all(np.isnan(med_res), axis=1), axis=(-2, -1))\n invalid *= np.any(np.all(np.isnan(hi_res), axis=1), axis=(-2, -1))\n valid = ~invalid\n low_res = low_res[valid]\n med_res = med_res[valid]\n hi_res = hi_res[valid]\n\n low_res = self.normalizer[0](low_res)\n med_res = self.normalizer[1](med_res)\n hi_res = self.normalizer[2](hi_res)\n\n self.x = (torch.tensor(low_res), torch.tensor(med_res), torch.tensor(hi_res))", "title": "" }, { "docid": "aae977be42c250a4b0c917ff5e27310a", "score": "0.5768356", "text": "def load(self, root):\n files = os.listdir(root)\n data_x = []\n data_y = []\n data_traj_x = []\n data_traj_y = []\n for i in files:\n # Load file\n tmp = self.read_file(os.path.join(root,i))\n # Remove time-stamp if need be\n tmp = self.remove_ts(tmp)\n # split the input and targets\n tmp_x, tmp_y = self.split_input_output(tmp)\n # generate trajectories\n traj_x, traj_y = self.trajectory_generator(tmp_x, tmp_y)\n # generates sequences for training\n tmp_x, tmp_y = self.sequence_generator(tmp_x, tmp_y)\n # append for concatenation\n data_traj_x.append(traj_x)\n data_traj_y.append(traj_y)\n data_x.append(tmp_x)\n data_y.append(tmp_y)\n # concatenates as a numpy array\n numpy_traj_x = np.concatenate((data_traj_x), axis=0)\n numpy_traj_y = np.concatenate((data_traj_y), axis=0)\n numpy_data_x = np.concatenate((data_x), axis=0)\n numpy_data_y = np.concatenate((data_y), axis=0)\n return numpy_data_x, numpy_data_y, numpy_traj_x, numpy_traj_y", "title": "" }, { "docid": "3bb1a5c5ce86aaad64587bf638dbe90d", "score": "0.5767288", "text": "def __get_local_data__(self):\n if self.param.data_name in ['djia30', 'webtraffic', 'netflow', 'clockerr']:\n self.raw_data_array['data'] = np.load(os.path.join(self.param.data_path, self.param.data_name, 'series.npy'))\n self.raw_data_array['label'] = np.load(os.path.join(self.param.data_path, self.param.data_name, 'events.npy'))\n\n else:\n raise Exception('no data')", "title": "" }, { "docid": "df6cce4bd2439a921e7ddae19dc9c14d", "score": "0.5759458", "text": "def dataArr(filename):\r\n #Open the file\r\n f=h5py.File(filename,'r')\r\n \r\n #Initialize the data arrays\r\n cdata=[]\r\n idxset=[]\r\n vertices=[]\r\n \r\n #Open groups in the file\r\n for group in f.keys():\r\n# print('Group- '+group)\r\n \r\n #Get the group\r\n currGroup=f[group]\r\n \r\n #Open keys in the group\r\n for key in currGroup.keys():\r\n# print('Key- '+key)\r\n \r\n #Append the data to the respective arrays\r\n if key=='cdata(Complex)':\r\n cdataGroup=currGroup[key]\r\n \r\n imag=[]\r\n real=[]\r\n #Open the keys in cdata\r\n for subkey in cdataGroup.keys():\r\n# print('Subkey- '+subkey)\r\n \r\n #Get the real and imaginary parts of the array\r\n if subkey=='Imag':\r\n imag=cdataGroup[subkey][()]\r\n elif subkey=='Real':\r\n real=cdataGroup[subkey][()]\r\n \r\n #Convert lists to numpy arrays\r\n imag=np.array(imag)\r\n real=np.array(real)\r\n #Get the cdata value\r\n cdata=real+1j*imag\r\n \r\n elif key=='idxset':\r\n idxset=currGroup[key][()]\r\n elif key=='vertices':\r\n vertices=currGroup[key][()]\r\n \r\n #Remove the z component from the vertices\r\n xVals=[]\r\n yVals=[]\r\n newVertices=[]\r\n for vertex in vertices:\r\n xVals.append(vertex[0])\r\n yVals.append(vertex[1])\r\n newVertices.append([vertex[0],vertex[1]])\r\n vertices=newVertices\r\n \r\n #Convert to numpy arrays\r\n cdata=np.array(cdata)\r\n xVals=np.array(xVals)\r\n yVals=np.array(yVals)\r\n \r\n #Close the file\r\n f.close()\r\n \r\n return cdata, xVals, yVals", "title": "" }, { "docid": "71fdd269f8f8d31cf0393b344b97ff10", "score": "0.5757709", "text": "def load_data(files,varname,returndata,x,y):\n for i,f in enumerate(files):\n data=swim_io.read_nc(f,varname).data\n returndata[:,y[i],x[i]]=data", "title": "" }, { "docid": "591a196cd3c2413a4db125b1642c64f0", "score": "0.5750041", "text": "def _data_generation_(self, list_IDs_temp):\n # Initialization\n X = np.empty((self.batch_size, *self.dim))\n # Generate data\n for i, ID in enumerate(list_IDs_temp):\n # Load sample\n try:\n x = np.load(self.data_directory + str(ID) + self.npy)\n try:\n # pre-processing\n X[i,] = self._preprocess_sample_(x)\n\n except (ValueError, KeyError):\n warnings.warn('Shape or key-error {} sample'.format(ID))\n\n except FileNotFoundError:\n warnings.warn(ID, ' not found')\n\n return X, X", "title": "" }, { "docid": "0a4f22eb4f75dbd8eb1e0ad9c96a6bf5", "score": "0.57446283", "text": "def __load_data(self, delimiter, semantic_mapping):\n # List that contains all the frames of the datasets. Each dataset is a\n # list of frames of shape (num_peds, (frameID, pedID, x and y))\n self.__frames = []\n self.__navigation_map = []\n self.__top_left = []\n self.__semantic_map = []\n self.__homography_matrix = []\n semantic_map_labeled = {}\n homography_map = {}\n\n # Load and add the one hot encoding to the semantic maps\n for i, smap in enumerate(self.__semantic):\n # Load the semantic map\n semantic_map = np.load(smap)\n homography = np.loadtxt(self.__homography[i], delimiter=delimiter)\n filename = os.path.splitext(os.path.basename(smap))[0]\n semantic_map_labeled[filename] = semantic_map\n homography_map[filename] = homography\n\n for i, dataset_path in enumerate(self.__datasets):\n # Load the dataset. Each line is formed by frameID, pedID, x, y\n dataset = np.loadtxt(dataset_path, delimiter=delimiter)\n # Get the frames in dataset\n num_frames = np.unique(dataset[:, 0])\n # Initialize the array of frames for the current dataset\n frames_dataset = []\n # Load the navigation map\n navigation_map = np.load(self.__navigation[i])\n\n # Image has padding so we add padding to the top_left point.\n top_left = [\n np.floor(min(dataset[:, 2]) - self.neighborood_size / 2),\n np.ceil(max(dataset[:, 3]) + self.neighborood_size / 2),\n ]\n\n # For each frame add to frames_dataset the pedestrian that appears\n # in the current frame\n for frame in num_frames:\n # Get the pedestrians\n frame = dataset[dataset[:, 0] == frame, :]\n frames_dataset.append(frame)\n\n self.__frames.append(frames_dataset)\n self.__navigation_map.append(navigation_map)\n self.__top_left.append(top_left)\n self.__semantic_map.append(semantic_map_labeled[semantic_mapping[i]])\n self.__homography_matrix.append(homography_map[semantic_mapping[i]])", "title": "" }, { "docid": "6a470c6efad897bdd2e6e5f87684732d", "score": "0.5743957", "text": "def load_data(self):\n pass # stub", "title": "" }, { "docid": "6a470c6efad897bdd2e6e5f87684732d", "score": "0.5743957", "text": "def load_data(self):\n pass # stub", "title": "" }, { "docid": "ac10a8e42dc6d1ecbc7a45164a65708c", "score": "0.57406956", "text": "def load_data():\n # Clear all of the data structures\n log.info(\"Clearing data structures for full resync\")\n eps_by_host.clear()\n all_groups.clear()\n ips_by_endpointid.clear()\n\n result = client.read('/calico', recursive=True)\n\n # Iterate over all the leaves that we get back. For each leave we get the full path,\n # so we parse that to determine whether to process the key as network or endpoint API data.\n # The goal of this iteration is to get the data into a simple Python data structure,\n # as opposed to the slightly complicated etcd datastructure.\n for res in result.leaves:\n log.debug(\"Processing key %s\", res.key)\n keyparts = res.key.split(\"/\")\n\n try:\n if keyparts[2] == \"network\":\n log.debug(\"Network\")\n process_network_data(res, keyparts)\n elif keyparts[4] == \"workload\":\n log.debug(\"Endpoint\")\n process_endpoint_data(res, keyparts)\n else:\n log.debug(\"Ignoring key %s\", res.key)\n continue\n except IndexError:\n log.debug(\"Ignoring key %s\", res.key)\n continue\n log.info(\"Finished reading data. Database contains %s hosts and %s groups\",\n len(eps_by_host), len(all_groups))", "title": "" }, { "docid": "fd1016a1313c54680eb3daf9417d6a11", "score": "0.57384074", "text": "def load_dataset(self):\n # for mnist data use the function\n if self.dataset_name == 'mnist':\n self.data_X, self.data_y = load_mnist(self.y_dim, train=True) # load_mnist() imported from utils.py\n self.data_v=self.data_X[:,:,self.v_col] # extract columns of each image\n self.c_dim = self.data_X[0].shape[-1] # self.data_X[0] is first image, .shape[-1] is the dim of each element of the image\n self.v_dim = self.data_v.shape[2]\n print(\"Dim v: \",self.data_v.shape)\n else:\n self.data = glob(os.path.join(\"./data\", self.dataset_name, self.input_fname_pattern))\n imreadImg = imread(self.data[0])\n\n if len(imreadImg.shape) >= 3:\n # check if image is a non-grayscale image by checking channel number\n self.c_dim = imread(self.data[0]).shape[-1]\n else:\n self.c_dim = 1\n\n self.grayscale = (self.c_dim == 1)", "title": "" }, { "docid": "603b66c39825f716ed070c9341ff4bc6", "score": "0.5734043", "text": "def _load(dir, samples, points, obsolete, debug=False):\n # Initialize storage arrays\n pd_r = N.zeros((samples, points))\n ref_r = N.zeros((samples, points))\n v_r = N.zeros((samples, points))\n i_r = N.zeros((samples, points))\n pd_s = N.zeros((samples, points))\n ref_s = N.zeros((samples, points))\n v_s = N.zeros((samples, points))\n i_s = N.zeros((samples, points))\n \n # Loop over all acquired files\n for i in range(samples):\n if obsolete:\n reference = N.loadtxt(path.join(dir, 'Reference%g.dat' % i),\n delimiter='\\t', skiprows=2)\n signal = N.loadtxt(path.join(dir, 'Signal%g.dat' % i),\n delimiter='\\t', skiprows=2)\n else:\n reference = N.loadtxt(path.join(dir, 'Reference%g.dat' % i),\n delimiter='\\t', usecols=(1,2,3,4))\n signal = N.loadtxt(path.join(dir, 'Signal%g.dat' % i),\n delimiter='\\t', usecols=(1,2,3,4))\n\n pd_r[i,:] = reference[:,0]\n ref_r[i,:] = reference[:,1]\n v_r[i,:] = reference[:,2]\n i_r[i,:] = reference[:,3]\n \n pd_s[i,:] = signal[:,0]\n ref_s[i,:] = signal[:,1]\n v_s[i,:] = signal[:,2]\n i_s[i,:] = signal[:,3]\n \n return (pd_s, pd_r), (ref_s, ref_r)", "title": "" }, { "docid": "0a643e91249dc3c7e6751300e2ecfc35", "score": "0.57268494", "text": "def load_data(self):\n\n\t\tr = requests.get(RankDrugRev.data_url, verify = False)\n\t\tz = zipfile.ZipFile(io.BytesIO(r.content))\n\t\tdf_train = pd.read_csv(io.BytesIO(z.read(\"drugsComTrain_raw.tsv\")),sep = '\\t', parse_dates = [5])\n\t\tdf_train.rename(columns={\"Unnamed: 0\": \"id\"}, inplace = True)\n\n\t\tdf_test = pd.read_csv(io.BytesIO(z.read(\"drugsComTest_raw.tsv\")),sep = '\\t', parse_dates = [5])\n\t\tdf_test.rename(columns={\"Unnamed: 0\": \"id\"}, inplace = True)\n\t\n\t\tself.raw_data_train = df_train\n\t\tself.raw_data_test = df_test\n\n\t\tself._raw_data_loaded = True", "title": "" }, { "docid": "22fca985869efdbbaf4b86257661055e", "score": "0.57258135", "text": "def load_data(self):\n\t\tmodels_name = os.path.join(self.data_dir,'Public_Objects.p')\n\t\tmodels_file = open(models_name,'rb')\n\t\tself.models = pickle.load(models_file)\n\t\tmodels_file.close()\n\n\t\tsne_name = os.path.join(self.data_dir,'Public_Novas_Phase.p')\n\t\tsne_file = open(sne_name,'rb')\n\t\tself.novas = pickle.load(sne_file)\n\t\tsne_file.close()", "title": "" }, { "docid": "b102e355a9a66c48e7dd854605171b23", "score": "0.57233614", "text": "def load_data(self, DATA_DIR):\r\n # class files\r\n self.class_idxtonum = {}\r\n self.class_numtoidx = {}\r\n self.img_clsnum = {}\r\n self.model_clsnum = {}\r\n img_cls_path = DATA_DIR+'/Image_%s.cla' % self.mode.capitalize()\r\n model_cls_path = DATA_DIR+'/Model.cla'\r\n self.read_class_file(img_cls_path, 'img')\r\n self.read_class_file(model_cls_path, 'model')\r\n\r\n # img filenames\r\n self.IMG_DIR = os.path.join(DATA_DIR, 'IMAGES')\r\n class_keys = [x for x in self.class_numtoidx.keys()]\r\n self.img_filenames = []\r\n for key in class_keys:\r\n img_dir = os.path.join(self.IMG_DIR, self.class_numtoidx[key], self.mode.lower())\r\n filenames = [os.path.join(img_dir, x+'.png') for x in self.class_imgs[key]]\r\n filenames = sorted(filenames[:])\r\n if self.shuffle:\r\n shuffle(filenames)\r\n self.img_filenames.append(filenames)\r\n\r\n# # model filenames\r\n# self.MODEL_DIR = os.path.join(DATA_DIR, 'TARGET_MODELS/models')\r\n# self.model_filenames = glob.glob(self.MODEL_DIR+'/*.off') # or png\r\n\r\n # view filenames\r\n self.VIEW_DIR = os.path.join(DATA_DIR, 'VIEWS') #_GRAY_BLACK')\r\n self.view_len = 0\r\n self.view_filenames = {}\r\n for key in class_keys:\r\n view_dir = os.path.join(self.VIEW_DIR, self.class_numtoidx[key])\r\n filenames = glob.glob(view_dir+'/*.png')\r\n filenames = sorted(filenames[:])\r\n if self.shuffle:\r\n shuffle(filenames)\r\n model_name = self.class_numtoidx[key]\r\n self.view_filenames[model_name] = []\r\n self.view_len += self.view_num\r\n step = 1 #len(filenames)//self.view_num\r\n for idx in range(len(filenames)): #self.view_num):#filename in filenames[0:self.view_num]: # maybe view_num == len(filenames)\r\n self.view_filenames[model_name].append(filenames[min(idx*step, len(filenames)-1)].split('/')[-1].rstrip('.png'))\r\n# self.view_len += 1\r\n\r\n # for calculating epoch..\r\n# self.whole_batch_size = self.img_len if (self.train_mode == 'img' or self.mode == 'test') else self.model_len\r\n\r\n self.whole_batch_size = self.model_len if self.train_mode == 'view' else self.img_len", "title": "" }, { "docid": "d7983f22dbaeb857a4600fc7f005c6dc", "score": "0.57144225", "text": "def load_data(self):\n if hasattr(self, 'xtr'):\n return # has already been run\n\n with h5py.File('./data/Input/train/images_training.h5','r') as H:\n self.xtr = np.copy(H['datatrain'])\n\n with h5py.File('./data/Input/train/labels_training.h5','r') as H:\n self.ytr = np.copy(H['labeltrain'])\n\n with h5py.File('./data/Input/test/labels_testing_2000.h5', 'r') as H:\n self.yte = np.copy(H['labeltest'])\n\n with h5py.File('./data/Input/test/images_testing.h5', 'r') as H:\n self.xte = np.copy(H['datatest'])[:len(self.yte)]", "title": "" }, { "docid": "ea5fef430d34d95494c0e9b0b32fb410", "score": "0.5712898", "text": "def create_dataset(): \n # load the temperature data from years 1999-2009\n # don't include the 29. feb\n # use the maximum from the 4 values per day (maxtemp)\n years = list(range(1999, 2009))\n longitude = -79.63 + 180\n latitude = 43.68\n grib_folder = '/media/isa/VIS1/temperature/'\n maxtemp_data = np.empty((365, len(years)))\n\n # load the data\n for year_it in range(len(years)):\n f = open(grib_folder + str(years[year_it]) + '.grib')\n time_it = 0\n day_it = 0\n day_data = np.empty((4))\n \n while 1:\n gid = codes_grib_new_from_file(f)\n if (gid is None):\n break\n # check for feb 29\n dataDate = str(codes_get(gid, 'dataDate')).zfill(8)\n if (dataDate[4:6] == '02' and dataDate[6:8] == '29'):\n codes_release(gid)\n print ('skipping ' + dataDate)\n continue\n\n nearest = codes_grib_find_nearest(gid, latitude, longitude)[0]\n \n if (nearest.value == codes_get_double(gid, 'missingValue')):\n raise Warning('missing value!')\n \n day_data[time_it] = nearest.value\n \n time_it = time_it + 1\n if time_it == 4:\n maxtemp_data[day_it, year_it] = day_data.max()\n day_it = day_it + 1\n time_it = 0\n \n codes_release(gid)\n \n f.close()\n \n min_data, ptp_data = maxtemp_data.min(), maxtemp_data.ptp()\n maxtemp_data = (maxtemp_data - min_data) / ptp_data\n \n # x: [year0, year1, ...., year9]\n # y: the next day, ie year9 + 1 day\n # get x data: x is all samples but the last (it wouldn't have an corresponding y)\n data_x = maxtemp_data[:-1, :]\n # get y data: y is the next day\n data_y = np.empty((364))\n for d in range(364):\n data_y[d] = maxtemp_data[d + 1, -1]\n\n train_x, test_x, train_y, test_y = train_test_split(data_x, data_y, test_size=0.2)\n \n dataset = AttrDict()\n dataset.train_x = train_x\n dataset.test_x = test_x\n dataset.train_y = train_y\n dataset.test_y = test_y\n dataset.min_data = min_data\n dataset.ptp_data = ptp_data\n return dataset", "title": "" }, { "docid": "03a48649c1008eec000154e0fa10ab03", "score": "0.571136", "text": "def extractImageData(self): \n if len(self.id_img_train) == 0 or len(self.id_img_test) == 0 :\n self.loadBasicData()\n\n if len(self.X_img_train) == 0:\n self.X_img_train=self.extractImagesCaracteristics(self.id_img_train).to_numpy()\n\n if len(self.X_img_test) == 0:\n self.X_img_test=self.extractImagesCaracteristics(self.id_img_test).to_numpy()", "title": "" }, { "docid": "05efaa37c305e833a3dbc863a3c9785f", "score": "0.5711356", "text": "def readAllData(self):\n\n # Read basic data parameters (number of energy groups, assemblies, axial nodes, etc.)\n self.read1D()\n\n # Read the hex ordering map between DIF3D \"four color\" nodal and DIF3D GEODST\n # Also read index pointers to incoming partial currents on outer reactor surface\n # (these don't belong to any assembly)\n # Incoming partial currents are non-zero due to flux extrapolation\n ng = self.fc[\"ngroup\"] # number of energy groups\n imax = self.fc[\n \"ninti\"\n ] # number of triangular mesh cells in \"i\" direction (rhombus or rectangle cells)\n jmax = self.fc[\n \"nintj\"\n ] # number of triangular mesh cells in \"j\" direction (rhombus or rectangle cells)\n zmax = self.fc[\n \"nintk\"\n ] # number of axial nodes (same for each assembly in DIF3D)\n\n self.triangleFluxes = numpy.zeros((imax, jmax, zmax, ng))\n\n for g in range(ng): # loop through energy groups\n\n gEff = self.getEnergyGroupIndex(g)\n\n for z in range(zmax): # loop through axial nodes\n self.triangleFluxes[\n :, :, z, gEff\n ] = self.readTriangleFluxes() # read fluxes on this i-j plane\n\n self.f.close()", "title": "" }, { "docid": "622e5f7b457db2100d5619d8206ba1fc", "score": "0.5710531", "text": "def loadNext(self, files, c):\n\n # If I have already retrieved all data, return None\n if self.counters[c] == len(files):\n self.counters[c] += 1\n return None\n # Next time, it will start over\n elif self.counters[c] > len(files):\n self.counters[c] = 0\n\n target = files[self.counters[c]]\n study, timepoint, subject = target.split(\"/\")[-4:-1]\n id_ = study + \"_\" + timepoint + \"_\" + subject\n\n # This if controls that the behavior is different when the container\n # used when loadInMemory is not empty.\n # TODO:\n # - Put in a single container all data loaded.\n # - Read X, process Y, pass to the child class.\n # - Create child class to get X,Y,ids and return everything processed.\n try:\n X_train = self.X_container[id_][\"in_volume\"]\n Y_train = self.Y_container[id_][\"out_segmentation\"]\n except:\n # Read the actual data\n X_train = nib.load(target+\"scan.nii.gz\").get_data()\n\n X_train = np.moveaxis(X_train, -1, 0)\n X_train = np.expand_dims(X_train, axis=0)\n\n if c == 1:\n ext = \"_lesion\"\n else:\n ext = self.ext\n if os.path.isfile(target+\"scan\"+ext+\".nii.gz\"):\n Y_train = nib.load(target+\"scan\"+ext+\".nii.gz\").get_data()\n #Y_train = np.expand_dims(Y_train, -1)\n Y_train = np.stack([1.0*(Y_train==j) for j in range(2)], axis=0)\n else:\n Y_train = np.ones([2] + list(X_train.shape[2:]))\n Y_train[1,:,:,:] = 0\n #print(Y_train.shape)\n\n #Y_train = np.moveaxis(Y_train, -1, 0)\n Y_train = np.expand_dims(Y_train, 0)\n\n self.counters[c] += 1\n\n # The ID must be a list, so that I can later iterate over it\n #return X_train, Y_train, [id_]\n return X_train, Y_train, [id_]", "title": "" }, { "docid": "68d324729d695a57de8de489891b6847", "score": "0.5709875", "text": "def readSampledGEOS(self):\n col = 'aer_Nv'\n if self.verbose: \n print 'opening file',self.LBinFile.replace('%col',col)\n nc = Dataset(self.LBinFile.replace('%col',col))\n\n print 'File opened'\n\n for sds in self.SDS_AER:\n sds_ = sds\n if sds in ncALIAS:\n sds_ = ncALIAS[sds]\n if self.verbose: \n print 'Reading ',sds_ \n var = nc.variables[sds_][:]\n self.__dict__[sds] = var\n if self.verbose: \n print 'Finished Reading ',sds_\n\n nc.close()\n\n if self.verbose: \n print 'opening file',self.LCinFile\n nc = Dataset(self.LCinFile)\n\n for sds in self.SDS_CLD:\n sds_ = sds\n if sds in ncALIAS:\n sds_ = ncALIAS[sds]\n var = nc.variables[sds_][:]\n self.__dict__[sds] = var\n if self.verbose: \n print 'Finished Reading',sds_\n\n nc.close()", "title": "" } ]
b0863bb99a634fc50175ddef28794392
Draw a correlation heatmap and return the frame.
[ { "docid": "e8041ceb481120c7ee9fd76c08fa2068", "score": "0.69290173", "text": "def get_correlation(frame, outdir=None):\n\n corr = frame.corr()\n\n if outdir != None:\n corr.to_csv(outdir/'correlation.csv')\n plt.figure()\n sns.heatmap(corr, cmap=plt.cm.RdYlBu_r)\n plt.title('Correlation Heatmap')\n plt.tight_layout()\n plt.savefig(str(outdir/'correlation.png'))\n plt.close()\n\n return corr", "title": "" } ]
[ { "docid": "c32d7300ec62ae5ea0e58e19b2099924", "score": "0.74469304", "text": "def correlation_heatmap(df):\n _, ax = plt.subplots(figsize=(14, 12))\n colormap = sns.diverging_palette(220, 10, as_cmap=True)\n\n _ = sns.heatmap(df.corr().round(3),\n cmap=colormap,\n square=True,\n cbar_kws={'shrink': .9},\n ax=ax,\n annot=True,\n linewidths=0.1,\n vmax=1.0,\n linecolor='white',\n annot_kws={'fontsize': 12})\n\n plt.title(\"Feature's Pearson Correlation\", y=1.05, size=15)\n return", "title": "" }, { "docid": "2d2e09723c8aa1d2a583a7334e57a4d8", "score": "0.72475976", "text": "def plot_correlation_matrix(df):\n corr = df.corr()\n mask = np.zeros_like(corr, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n f, ax = plt.subplots(figsize=(12,8))\n cmap = sns.color_palette('coolwarm')\n sns.heatmap(corr, mask=mask, cmap=cmap, center=0, square=True, linewidths=.5,\n yticklabels=True, cbar_kws={'shrink':.5})\n plt.title('Correlation Matrix')\n plt.xticks(rotation=90, fontsize=7)\n plt.yticks(rotation=0, fontsize=7)\n plt.tight_layout()\n plt.show()", "title": "" }, { "docid": "fbd5ee95bc4598540e8173a90735cd22", "score": "0.7207547", "text": "def plot_corr(data) :\n \n corr = data.corr()\n mask = np.zeros_like(corr, dtype=bool)\n mask[np.triu_indices_from(mask)] = True\n palette = sns.diverging_palette(20, 220, n=256)\n \n fig = plt.figure(figsize=(16,16))\n sns.heatmap(corr,mask=mask, cmap=palette,annot=True)\n plt.tight_layout()", "title": "" }, { "docid": "3c42b518eaf55ce0de7c963bcdc4233f", "score": "0.7150536", "text": "def correlation_heatmap(df, *, font_size=12, ax=None):\n if ax is None:\n plt.gca()\n\n # Correlation matrix via Pandas (numeric data only)\n corr_matrix = df.corr()\n # Generate a mask for the upper triangle\n mask = np.zeros_like(corr_matrix, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n\n # Store heatmap from mask\n heat_plot = sns.heatmap(corr_matrix, mask=mask,\n cmap='RdBu_r', cbar_kws={\"shrink\": .6},\n annot=True, annot_kws={\"size\": font_size},\n vmax=1, vmin=-1, linewidths=.5,\n square=True, ax=ax)\n fig = heat_plot.figure\n return fig", "title": "" }, { "docid": "1980211836a385133a288765bb593a8e", "score": "0.7121204", "text": "def plot_corr_matrix(corr):\n fig, ax = plt.subplots()\n mask = np.zeros_like(corr, dtype=np.bool)\n mask[np.triu_indices_from(mask, k=1)] = True\n sns.heatmap(corr, vmin=-1, vmax=1, mask=mask,\n cmap=sns.diverging_palette(220, 10, as_cmap=True),\n linewidths=0.5, cbar=True, square=True, ax=ax)\n ax.set_title('Correlation Matrix')\n fig.tight_layout()\n return fig", "title": "" }, { "docid": "5fb4b269fb7616a4495f82e9b6c1735c", "score": "0.7109663", "text": "def plot_correlations(df):\n m_cor = df.corr()\n fig, ax = plt.subplots(figsize=(11, 9))\n # Add diverging colormap from red to blue\n cmap = sns.diverging_palette(10, 180, center='dark', as_cmap=True)\n sns.heatmap(m_cor,\n xticklabels=m_cor.columns, yticklabels=m_cor.columns,\n vmin=-1, vmax=1, cmap=cmap, square=True, cbar_kws={\"shrink\": .5}, ax=ax)\n plt.tight_layout()", "title": "" }, { "docid": "80e9ba64d1e48755b8ee468db4f4e038", "score": "0.70773", "text": "def plot_corelation():\n sns.set(color_codes=False, font_scale=1)\n df_cor = self.__df[[\"Close\", \"Adj Close\",\"Volume\",\"EMA5\",\"SMA20\",\"TSI\"]]\n corr = df_cor.corr()\n\n # Generate a mask for the upper triangle\n mask = np.triu(np.ones_like(corr, dtype=bool))\n\n # Set up the matplotlib figure\n f, ax = plt.subplots(figsize=(11, 9))\n\n # Generate a custom diverging colormap\n cmap = sns.diverging_palette(230, 20, as_cmap=True)\n\n # Draw the heatmap with the mask and correct aspect ratio\n sns.heatmap(corr, mask=mask,cmap= 'coolwarm', vmax=.3, center=0,annot=False,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5})", "title": "" }, { "docid": "4acee719983c88fe0c2a67cab4c4c556", "score": "0.70678544", "text": "def hof_correlation_plot(self, save=False):\n plt.figure('Correlation Heatmap', figsize=(16, 14),\n facecolor='white', edgecolor='black')\n rows, cols = (1, 1)\n ax0 = plt.subplot2grid((rows, cols), (0, 0))\n\n correlation = self.stats_fame.corr()\n cut = 0.5\n color_mask = (correlation[(correlation > -cut) & (correlation < cut)]\n .fillna(0)\n .astype(bool))\n correlation[color_mask] = 0\n\n cmap = mplcol.LinearSegmentedColormap.from_list(\n 'blue_white_blue', ['indianred'] + ['white'] * 3 + ['C0'])\n\n sns.heatmap(correlation, center=0,\n cmap=cmap,\n cbar_kws={'orientation': 'vertical'},\n linecolor='lightgray', linewidths=0.1, vmin=-1, vmax=1,\n ax=ax0)\n\n cbar = ax0.collections[0].colorbar\n cbar.set_ticks(np.arange(-1, 1.5, 0.5).tolist())\n cbar.ax.tick_params(labelsize=size['label'])\n cbar.outline.set_linewidth(1)\n cbar.outline.set_edgecolor('lightgray')\n\n ax0.set_title('Statistics Correlation (0.5 Threshold)',\n fontsize=size['title'])\n ax0.set_xticklabels(ax0.xaxis.get_majorticklabels(),\n fontsize=size['label'], rotation=90)\n ax0.set_yticklabels(ax0.yaxis.get_majorticklabels(),\n fontsize=size['label'], rotation=0)\n\n super_title = plt.suptitle('Hall of Fame Players',\n fontsize=size['super_title'],\n x=0.03, y=0.93)\n\n save_fig('hof_correlation', save, super_title)", "title": "" }, { "docid": "a97f49b29bf28021f9c7aece2580bb79", "score": "0.7011578", "text": "def plot_correlation_heatmap(df, fname=None, figsize=(10, 10)):\n corr = df.corr()\n fig = plt.figure(figsize=figsize)\n sns.heatmap(corr\n , xticklabels=corr.columns.values\n , yticklabels=corr.columns.values\n , cmap='bwr'\n , annot=True)\n\n if fname:\n fig.savefig(fname\n , bbox_inches='tight')", "title": "" }, { "docid": "a717b6f2ec1b256873c2060c5ace4e92", "score": "0.7002763", "text": "def correlations(df, cols):\r\n plt.figure(figsize=(16, 12))\r\n imgs = sns.heatmap(df[cols].corr(), cmap=\"YlGnBu\", annot=True, fmt='.2f', vmin=0)\r\n plt.savefig(\"correlation.png\")\r\n return imgs", "title": "" }, { "docid": "3eec0831f01e6a204299fafc4ea2bb55", "score": "0.69915277", "text": "def correlation_image(the_data, labels=None, cmap=0):\n colormap = [COLORMAP, COLORMAPGP][cmap]\n binned_data = np.digitize(the_data, np.linspace(-1, 1, len(colormap)))\n color = np.array(colormap)[np.rot90(binned_data, 2)-1].flatten().tolist()\n\n # Adjust the transparency value\n orig = np.rot90(np.abs(the_data), 2)\n alpha = orig + 0.3\n alpha[orig >= 0.25] = 0.8\n alpha[orig >= 0.6] = 1.0\n alpha = alpha.flatten().tolist()\n\n orientation = np.pi / 3\n if labels is None:\n labels = list(map(lambda x: str(x), range(the_data.shape[0])))\n orientation = 0.0\n\n xname, yname = list(zip(*product(labels, labels)))\n\n data = dict(xname=list(reversed(xname)), yname=list(reversed(yname)),\n colors=color, alphas=alpha,\n count=list(reversed(the_data.round(2).flatten())))\n\n # Create the figure\n p = figure(x_axis_location=\"above\", tools=\"hover,save\",\n y_range=list(reversed(labels)), x_range=labels,\n tooltips = [('Names', '@yname, @xname'), ('Correlation', '@count')])\n\n p.plot_width = 800\n p.plot_height = 800\n p.grid.grid_line_color = None\n p.axis.axis_line_color = None\n p.axis.major_tick_line_color = None\n p.axis.major_label_text_font_size = \"5pt\"\n p.axis.major_label_standoff = 0\n p.xaxis.major_label_orientation = orientation\n\n p.rect('xname', 'yname', 0.9, 0.9, source=data,\n color='colors', line_color=None, alpha='alphas',\n hover_line_color='black', hover_color='colors')\n\n return row(p, colormap_legend(cmap))", "title": "" }, { "docid": "1be1335bd5c7443dece7a476480725ad", "score": "0.69528145", "text": "def correlation_plot(x, h):\n corr = np.corrcoef(x.T)\n fig, ax = plt.subplots(figsize=(10, 10))\n \n sns.heatmap(corr, cmap=sns.diverging_palette(200, 10, n=200), center=0,\n square=True, linewidths=.2, vmin=-1, vmax=1, cbar_kws={\"shrink\": .5}, ax=ax)\n ax.set_xticks(np.arange(len(h)) + 0.5)\n ax.set_yticks(np.arange(len(h)) + 0.5)\n \n ax.set_xticklabels(h, rotation=90);\n ax.set_yticklabels(h, rotation=0);\n ax.set_ylim(-1, 31);", "title": "" }, { "docid": "9b4613e357d195c9bc0cc38b65e7b437", "score": "0.6837894", "text": "def plot_corr(df, sort=False, **kwargs):\r\n # TODO: sns.clustermap has a metric kwarg that may be interesting to test out and implement.\r\n # TODO: Bug, if plt.show() is used after this function two windows will open, one with\r\n # the desired heatmap and one empty.\r\n # Create the corrmat or clustermap in correlation.py insted?\r\n sns.set(context=\"paper\", font=\"monospace\")\r\n save_figure = kwargs.get(\"save_figure\", False)\r\n file_name = kwargs.get(\"file_name\", \"tmp_heatmap.png\")\r\n corr_method = kwargs.get(\"corr_method\", \"pearson\")\r\n abs_values = kwargs.get(\"abs_values\", False)\r\n return_corrmat = kwargs.get(\"return_corrmat\", False)\r\n\r\n corrmat = df.corr(method=corr_method)\r\n if abs_values:\r\n corrmat = corrmat.abs()\r\n if (sort):\r\n sort_method = kwargs.get(\"sort_method\", \"average\")\r\n metric = kwargs.get(\"metric\", \"euclidian\")\r\n cg = sns.clustermap(corrmat, method=sort_method, metric=metric,\r\n cmap=plt.cm.inferno, linewidths=.5)\r\n #Rotate x and y tick labels so long texts will fitt next to eachother.\r\n plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)\r\n plt.setp(cg.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)\r\n if save_figure:\r\n cg.savefig(file_name)\r\n else:\r\n f, sns_ax = plt.subplots(figsize=(12, 10))\r\n hmap = sns.heatmap(corrmat, ax=sns_ax, vmax=.8, linewidths=.5,\r\n square=True, cmap=plt.cm.inferno)\r\n\r\n colnames = corrmat.columns.values\r\n for i, colname in enumerate(colnames):\r\n if i and colname != colnames[i - 1]:\r\n ax.axhline(len(colnames) - i, c=\"black\")\r\n ax.axvline(i, c=\"black\")\r\n\r\n f.tight_layout()\r\n\r\n if return_corrmat:\r\n if sort: # Sort the columns and rows in the clustered fashion.\r\n corrmat = corrmat[cg.dendrogram_col.reordered_ind]\r\n corrmat = corrmat.reindex(corrmat.columns.values)\r\n return cg, corrmat\r\n else:\r\n if sort:\r\n return cg\r\n else:\r\n return f", "title": "" }, { "docid": "ac8aa4f4699da706637c2a55fc11753a", "score": "0.68348444", "text": "def plot_correlation(df: pd.DataFrame, only_save=True, dpi=300):\n # Compute the correlation matrix\n corr_all = df.corr()\n\n # Generate a mask for the upper triangle\n mask = np.zeros_like(corr_all, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n\n # Set up the matplotlib figure\n f, ax = plt.subplots(figsize=(7, 7))\n\n # Draw the heatmap with the mask and correct aspect ratio\n sns.heatmap(corr_all, mask=mask, square=True, linewidths=0.5, ax=ax, cmap=\"BuPu\")\n plt.title(\"Correlation Matrix Heatmap\")\n if only_save:\n plt.savefig(\"img/correlation_heatmap.png\", dpi=dpi)\n plt.close()\n else:\n plt.show()", "title": "" }, { "docid": "c21e06600ef92cf0003d31f3cd02a21b", "score": "0.6806044", "text": "def _heatmap(data_df, output_path):\n\n plt.figure(figsize=(20, 10))\n plot = sns.heatmap(data_df.corr(), annot=False,\n cmap='Dark2_r', linewidths=2)\n plot.figure.savefig(output_path)\n plt.clf()", "title": "" }, { "docid": "3b442ea219339fbd3fe7e543ac7b1311", "score": "0.67628884", "text": "def correlation_plot(data,output_path=None): \n \n corrmat = data.corr()\n fig, ax = plt.subplots(figsize=(15,15))\n sns.heatmap(corrmat,cmap=\"RdBu_r\",linewidths=.5,annot=True)\n if output_path is not None:\n output = os.path.join(output_path,'Corr_plot'+'.png')\n plt.savefig(output)\n print('Image saved at',str(output))", "title": "" }, { "docid": "bd9959d269cb351507c99c829aa99ddc", "score": "0.67297167", "text": "def corelationMatrix():\n try:\n corr_matrix = obj_Data.df.corr()\n fig = px.imshow(corr_matrix, title= \"Corelation Matrix\")\n return fig\n\n except:\n logging.exception('Something went wrong with Corelation Matrix plot')", "title": "" }, { "docid": "987f0a4f6db076988d2886aa91e368e9", "score": "0.6723992", "text": "def show_correlation_plot(self, correlations, data):\n\n plt.figure(figsize=(10, 8))\n ax = sns.heatmap(correlations,\n xticklabels=correlations.columns.values,\n yticklabels=correlations.columns.values, annot=True)\n\n plt.setp(ax.get_xticklabels(), rotation=45, horizontalalignment=\"right\",\n rotation_mode=\"anchor\")\n\n plt.tight_layout()\n plt.autoscale()\n plt.savefig(\"../data/04_analytics/correlation_plot.png\")\n plt.show()", "title": "" }, { "docid": "9e7b55ec4cda6f5711cce6980be37a9a", "score": "0.6681251", "text": "def heatmap_full(df):\r\n sns.set_style('whitegrid')\r\n plt.subplots(figsize = (14,8))\r\n # Generate mask for upper triangle\r\n m1 = np.zeros_like(df.corr(), dtype=np.bool) \r\n m1[np.triu_indices_from(m1)] = True\r\n sns.set(font_scale=0.8)\r\n sns.heatmap(df.corr(), cmap=sns.diverging_palette(20, 220, n=200), \r\n mask=m1, center=0, square=True, annot=True)\r\n plt.title(\"All features heatmap\", fontsize=15)", "title": "" }, { "docid": "5e1897d41791c864b803f7db55a85636", "score": "0.6593312", "text": "def generate_heatmap(dataset, title):\n corr = dataset.corr()\n sns.heatmap(corr, xticklabels=corr.columns,\n yticklabels=corr.columns)\n\n plt.title(title)\n plt.show()", "title": "" }, { "docid": "f6614f9ae02808b5b43f83645563d60c", "score": "0.65319544", "text": "def draw_corrmatrix(self, mode='stock'):\n fig, ax = plt.subplots(dpi=200, figsize=(6, 6))\n\n if mode == 'stock':\n im = ax.imshow(self.corrmatrix, vmin=-1, vmax=1)\n fig.colorbar(im, fraction=0.046, pad=0.04)\n ax.set_xlabel('Companies')\n ax.set_ylabel('Companies')\n return fig\n elif mode == 'category':\n im = ax.imshow(self.categ_corrmatrix, vmin=0, vmax=1)\n ax.set_xticks(range(len(self.categories)))\n ax.set_yticks(range(len(self.categories)))\n ax.set_xticklabels(self.categories, rotation=45,\n horizontalalignment='right', fontsize=8)\n ax.set_yticklabels(self.categories, fontsize=8)\n # Annotate grid\n for i in range(len(self.categories)):\n for j in range(len(self.categories)):\n ax.text(j, i, round(self.categ_corrmatrix[i, j], 2),\n ha=\"center\", va=\"center\", color=\"k\", fontsize=8)\n fig.colorbar(im, fraction=0.046, pad=0.04)\n return fig", "title": "" }, { "docid": "15434dd38172978aaf79f42bb83d51a8", "score": "0.6420136", "text": "def plot_correlation_matrix(list_df: List[pd.DataFrame], names: List[str]) -> None:\n corr = compute_correlation_matrix(list_df)\n\n corrMatrix = pd.DataFrame(corr, columns=names, index=names)\n if len(list_df) <= 4:\n plt.subplots(figsize=(15, 10))\n else:\n plt.subplots(figsize=(20, 10))\n\n sns.heatmap(corrMatrix, annot=True, fmt='g')\n plt.title('Correlation matrix')\n\n plt.show()", "title": "" }, { "docid": "13f0918b2812b18ee9763eec2ec5f3c6", "score": "0.64004743", "text": "def plot_single_mode_correlation(self, data, output_dir, addition):\n correlations = data[self.features].corr()\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(111)\n cax = ax.matshow(correlations, vmin=-1, vmax=1, cmap='viridis')\n ticks = np.arange(0, len(self.features), 1)\n plt.rc('axes', labelsize=8)\n ax.set_xticks(ticks)\n ax.set_yticks(ticks)\n ax.set_xticklabels(self.features, rotation=-90)\n ax.set_yticklabels(self.features)\n fig.colorbar(cax)\n fig.tight_layout()\n plot_out = os.path.join(\n output_dir, str(addition) + '_correlations.png')\n plt.savefig(plot_out, bbox_inches='tight')\n plt.close('all')", "title": "" }, { "docid": "4f61a76440ef496f0781b42d77a9b28e", "score": "0.6372198", "text": "def plot_cor_matrix(data):\n correlation_matrix = data.corr() # creating correlation_matrix\n # print(correlation_matrix)\n dataplot = sb.heatmap(correlation_matrix) # creating a heat map of the correlation_matrix\n mp.show() # showing the correlation_matrix\n sorted_mat = correlation_matrix.unstack().sort_values()\n # Retain upper triangular values of correlation matrix and\n # make Lower triangular values Null\n upper_corr_mat = correlation_matrix.where(np.triu(np.ones(correlation_matrix.shape), k=1).astype(bool))\n # Convert to 1-D series and drop Null values\n unique_corr_pairs = upper_corr_mat.unstack().dropna()\n # Sort correlation pairs\n sorted_mat1 = unique_corr_pairs.sort_values()\n print(sorted_mat1)\n return sorted_mat1", "title": "" }, { "docid": "47cdce25f8ae6ee0f7f78ace0cb91f10", "score": "0.63189405", "text": "def plot_correlations(matrix):\n\n pass", "title": "" }, { "docid": "e0ea48564c7b6d0c9b52fdcb54a0a49c", "score": "0.62723863", "text": "def correlation_matrix(self, data, col_y, corr_value = 0.95, corr_value_w_targhet = 0.95, plot_matr = 'yes'):\n corr = data.corr().abs()\n\n if plot_matr == 'yes':\n ds().nuova_fig(7, height =8, width =10)\n ds().titoli(titolo=\"Correlation matrix\")\n sns.heatmap(corr[(corr >= 0.5)], cmap='viridis', vmax=1.0, vmin=-1.0, linewidths=0.1, annot=True, annot_kws={\"size\": 8}, square=True, linecolor=\"black\");\n ds().aggiusta_la_finestra()\n st.pyplot()\n\n corr_with_target = corr[col_y]#correlation with the target\n relevant_feature_with_target = corr_with_target[corr_with_target < corr_value_w_targhet].sort_values(ascending = False).reset_index()\n\n upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool))#select upper triangle of correlation matrix\n correlation_between_parameters = [column for column in upper.columns if any(upper[column] > corr_value)]\n return relevant_feature_with_target, correlation_between_parameters", "title": "" }, { "docid": "780ae438bd821d468612b7ea31eba739", "score": "0.6259866", "text": "def _heatmap_plot(self,heatmap,title):\n fig, ax = plt.subplots(figsize=(self.plot_scale,self.plot_scale*(self.y_size/self.x_size)))\n plt.title(title)\n plt.xlabel(\"X\")\n plt.ylabel(\"Y\")\n im = plt.imshow(heatmap.T, extent=[0, plotcnf[\"scrsize\"][\"x\"], 0, plotcnf[\"scrsize\"][\"y\"]], origin='lower')\n plt.gca().invert_yaxis()\n divider = axes_grid1.make_axes_locatable(ax)\n cax = divider.append_axes('right', size='5%', pad=0.05)\n fig.colorbar(im, cax=cax)\n\n return fig", "title": "" }, { "docid": "5d9df5f5b0aa285fff9c5470be3c7c0e", "score": "0.6212737", "text": "def correlation(x_list, y_list, title):\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.xcorr(x_list, y_list, usevlines=True, maxlags=75, normed=True, lw=2) #\n ax1.grid(True)\n #ax1.axhline(0, color='black', lw=2)\n save(title)\n plt.close(fig)", "title": "" }, { "docid": "bdab770813d42d3832d82e22a17e6a95", "score": "0.6206435", "text": "def show_correlation(df, size=10):\n corr = df.corr()\n fig, ax = plt.subplots(figsize=(size, size))\n cax = ax.matshow(corr)\n fig.colorbar(cax)\n plt.xticks(range(len(corr.columns)), corr.columns, rotation = 90)\n plt.yticks(range(len(corr.columns)), corr.columns)\n plt.show()", "title": "" }, { "docid": "500c14209ca15688f2c48360230e7734", "score": "0.6198036", "text": "def correlations(data, **kwds):\n\t# simply call df.corr() to get a table of\n\t# correlation values if you do not need\n\t# the fancy plotting\n\tcorrmat = data.corr(**kwds)\n\t\n\tfig, ax1 = plt.subplots(ncols=1, figsize=(6,5))\n\t\n\topts = {'cmap': plt.get_cmap(\"RdBu\"),\n\t\t'vmin': -1, 'vmax': +1}\n\theatmap1 = ax1.pcolor(corrmat, **opts)\n\tplt.colorbar(heatmap1, ax=ax1)\n\n\tax1.set_title(\"Correlations\")\n\t\n\tlabels = corrmat.columns.values\n\tfor ax in (ax1,):\n\t\t# shift location of ticks to center of the bins\n\t\tax.set_xticks(np.arange(len(labels))+0.5, minor=False)\n\t\tax.set_yticks(np.arange(len(labels))+0.5, minor=False)\n\t\tax.set_xticklabels(labels, minor=False, ha='right', rotation=70)\n\t\tax.set_yticklabels(labels, minor=False)\n\t\n\t#plt.tight_layout()\n\n\t#fig.savefig(file)", "title": "" }, { "docid": "f6ab2cc003006260da744341f353c219", "score": "0.61778855", "text": "def correlation_heatmap(file: str, options: list):\n if \"/protein_table\" in options:\n with st.beta_expander(\"Correlation heatmap\"):\n df = cached_file(file)\n\n cols = [_ for _ in df.columns if \"LFQ\" in _]\n if len(cols) == 0:\n cols = df.columns\n\n if multiple_file_check(cols):\n df = np.log(df[cols])\n corr = df.corr()\n\n fig = make_subplots(rows=1, cols=1)\n fig.add_trace(\n trace=go.Heatmap(\n z=corr.values,\n x=corr.index.values,\n y=corr.columns.values,\n colorscale=\"Greys\",\n )\n )\n fig.update_layout(height=600, width=600)\n st.write(fig)", "title": "" }, { "docid": "b7ad3c53a74a5723e9b1bde0a6e8f842", "score": "0.61768585", "text": "def plot_corr(df, size=10):\n set_my_plt_style()\n corr = df.corr()\n # Re-order the rows and columns using clustering\n d = sch.distance.pdist(corr)\n L = sch.linkage(d, method='ward')\n ind = sch.fcluster(L, 0.5 * d.max(), 'distance')\n columns = [df.columns.tolist()[i] for i in list((np.argsort(ind)))]\n corr = corr.reindex(columns, axis=1)\n corr = corr.reindex(columns, axis=0)\n\n # Plot the correlation matrix\n fig, ax = plt.subplots(figsize=(size, size))\n cax = ax.matshow(corr, cmap=Curl_5_r.mpl_colormap, vmin=-1, vmax=1)\n plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)\n plt.yticks(range(len(corr.columns)), corr.columns)\n\n # Add the colorbar legend\n cbar = fig.colorbar(cax, ticks=[-1, -0.5, 0, 0.5, 1], aspect=10, shrink=.8)\n # plt.show()\n return fig", "title": "" }, { "docid": "80160f95563b86613f3d04c55bacef14", "score": "0.61421454", "text": "def correlation_plot(self, target = None, top_N = 10, ascending = False):\r\n corrmat = self.df.corr()\r\n\r\n if target is None:\r\n plt.figure(figsize=(12,9))\r\n sns.heatmap(corrmat, vmax=.8, square=True)\r\n else:\r\n plt.figure(figsize=(10,8))\r\n sns.heatmap(corrmat[[target]].sort_values(by=[target],ascending = ascending)[:top_N],\r\n vmin=-1,\r\n cmap='coolwarm',\r\n annot=True)", "title": "" }, { "docid": "7fea3cb690ab7105e2783934526ff559", "score": "0.6124578", "text": "def correlation_matrix(\n corr_mat: np.ndarray,\n labels: Union[List[str], np.ndarray],\n figure_path: pathlib.Path,\n) -> None:\n # rounding for test in CI to match reference\n fig, ax = plt.subplots(\n figsize=(round(5 + len(labels) / 1.6, 1), round(3 + len(labels) / 1.6, 1)),\n dpi=100,\n )\n im = ax.imshow(corr_mat, vmin=-1, vmax=1, cmap=\"RdBu\")\n\n ax.set_xticks(np.arange(len(labels)))\n ax.set_yticks(np.arange(len(labels)))\n ax.set_xticklabels(labels)\n ax.set_yticklabels(labels)\n for tick in ax.get_xticklabels():\n tick.set_rotation(45)\n tick.set_horizontalalignment(\"right\")\n\n fig.colorbar(im, ax=ax)\n ax.set_aspect(\"auto\") # to get colorbar aligned with matrix\n fig.tight_layout()\n\n # add correlation as text\n for (j, i), corr in np.ndenumerate(corr_mat):\n text_color = \"white\" if abs(corr_mat[j, i]) > 0.75 else \"black\"\n if abs(corr) > 0.005:\n ax.text(i, j, f\"{corr:.2f}\", ha=\"center\", va=\"center\", color=text_color)\n\n figure_path.parent.mkdir(parents=True, exist_ok=True)\n log.debug(f\"saving figure as {figure_path}\")\n fig.savefig(figure_path)\n plt.close(fig)", "title": "" }, { "docid": "78ef41f2a3aaadf36f546c855f935ef1", "score": "0.61020917", "text": "def fit(self, X, y=None):\n\n self.correlation = X.corr()\n# print(self.corr)\n\n # Generate a mask for the upper triangle\n self.mask = np.zeros_like(self.correlation, dtype=np.bool)\n self.mask[np.triu_indices_from(self.mask)] = True\n\n # Set up the matplotlib figure\n f, ax = plt.subplots(figsize=(11, 9))\n\n # Generate a custom diverging colormap\n self.cmap = sns.diverging_palette(\n h_neg=self.h_neg,\n h_pos=self.h_pos,\n as_cmap=self.as_cmap\n )\n\n # Draw the heatmap with the mask and correct aspect ratio\n sns.heatmap(\n self.correlation,\n mask=self.mask,\n cmap=self.cmap,\n vmax=.3,\n center=0,\n square=True,\n linewidths=.5,\n cbar_kws={\"shrink\": .5}\n )\n\n plt.show()\n\n return self", "title": "" }, { "docid": "2f101aed6225f5361cf064f0e2fc7061", "score": "0.59934545", "text": "def cor_heatmap_plot(self, heatmap_name, heatmap_val):\n\n # Split the data into SE and PE\n pconfig = {\n 'title': 'Pearson correlation',\n 'xlab': True,\n }\n self.add_section(\n description='Pearson correlation between log<sub>2</sub> normalised CPM values are calculated and clustered.',\n plot=heatmap.plot(heatmap_val, heatmap_name, pconfig=pconfig)\n )", "title": "" }, { "docid": "6f7a423fcdc41296fc42ccd4cec0c07c", "score": "0.59932196", "text": "def correlations(DataFrame):\n # copy df in\n df = DataFrame.copy()\n\n # get the correlations\n corrs = df.corr()\n\n # get only the correlations for the target variable\n corrs_target = corrs['fare_amount'].sort_values(ascending = True)\n\n # corrs['fare_amount'].plot.bar(color = 'b') annot = True, vmin = -1, vmax = 1,\n\n # plot the total heat map\n # plt.figure(figsize = (12, 12))\n # sns.heatmap(corrs, annot = True, fmt = '.3f')\n # plt.show()\n\n # barplot of the correlations between fare amount and features\n corrs_target.plot(kind=\"barh\", color = 'b')\n # sns.barplot(x = corrs_target.index.values, y = corrs_target.values)\n plt.show()\n\n\n return None", "title": "" }, { "docid": "c40f7e4e094c6ce9373cefb6a67ab15c", "score": "0.59882206", "text": "def create_corrolation_plot(data):\n corr = data.corr().reset_index().melt(id_vars=\"index\")\n corr.columns = [\"Variable 1\", \"Variable 2\", \"corr_values\"]\n corr[\"Correlation\"] = corr.corr_values.round(3)\n \n # Base chart \n cht = alt.Chart(corr).encode(\n x=\"Variable 1:N\", y=\"Variable 2:N\"\n ).properties(title=\"Correlation Plot\", width=700, height=700)\n \n # Text overlay \n txt = cht.mark_text().encode(\n text=\"Correlation\", \n color=alt.condition(\n alt.datum.Correlation > 0.25, \n if_true=alt.value('white'),\n if_false=alt.value('black')\n ))\n \n # Colored boxes \n rct = cht.mark_rect().encode(\n color=alt.Color(\"corr_values:Q\", scale=alt.Scale(scheme=\"purples\"),\n legend=None),\n tooltip=[\"Variable 1\", \"Variable 2\", \"Correlation\"]\n )\n cht = rct + txt\n cht = cht.configure_axis(\n ticks=False, title=None, labelPadding=10, labelFontSize=14\n ).configure_title(\n fontSize=20, anchor=\"middle\", dy=-20\n )\n return cht", "title": "" }, { "docid": "d1877b88b00abc491abf85e5c57b7ec4", "score": "0.59726924", "text": "def heatmap(self, stats, ax=None, **kwargs):", "title": "" }, { "docid": "6e0d49d3fd2e51844a2d787e2daaa04e", "score": "0.5960367", "text": "def render_heatmap(\n data: pd.DataFrame,\n ax_hm: plt.Axes = None,\n cmap: colors.Colormap = None,\n norm: colors.Normalize = colors.Normalize(),\n annotate: bool = True,\n annotation_valfmt: str = '{x:.0f}',\n add_sep_colorbar: bool = False,\n ax_cb: plt.Axes = None,\n colorbar_label: str = None,\n use_index_labels: bool = False,\n xlabel: str = None,\n ylabel: str = None,\n fig_canvas_title: str = None,\n fig_size: tuple = (8, 6),\n manipulate_ticks: bool = False,\n tick_label_prec: int = 3,\n xtick_label_prec: int = None,\n ytick_label_prec: int = None\n) -> (plt.Figure, plt.Figure):\n if isinstance(data, pd.DataFrame):\n if not isinstance(data.index, NumericIndex):\n raise pyrado.TypeErr(given=data.index, expected_type=NumericIndex)\n if not isinstance(data.columns, NumericIndex):\n raise pyrado.TypeErr(given=data.columns, expected_type=NumericIndex)\n # Extract the data\n x = data.columns\n y = data.index\n else:\n raise pyrado.TypeErr(given=data, expected_type=pd.DataFrame)\n\n # Create axes if not provided\n if ax_hm is None:\n fig_hm, ax_hm = plt.subplots(1, figsize=fig_size)\n else:\n fig_hm = ax_hm.figure\n\n if fig_canvas_title is not None:\n fig_hm.canvas.set_window_title(fig_canvas_title)\n\n # Create the image\n img = ax_hm.imshow(data, cmap=cmap, norm=norm, aspect=(x.max()-x.min())/(y.max()-y.min()), origin='lower',\n extent=[x.min(), x.max(), y.min(), y.max()]) # former: aspect='auto'\n\n # Set axes limits\n ax_hm.set_xlim(x.min(), x.max())\n ax_hm.set_ylim(y.min(), y.max())\n\n # Annotate the heat map\n if annotate:\n _annotate_heatmap(img, valfmt=annotation_valfmt)\n\n # Prepare the ticks\n if manipulate_ticks:\n _setup_index_axis(ax_hm.xaxis, x, use_index_labels,\n xtick_label_prec if xtick_label_prec is not None else tick_label_prec)\n _setup_index_axis(ax_hm.yaxis, y, use_index_labels,\n ytick_label_prec if ytick_label_prec is not None else tick_label_prec)\n\n ax_hm.stale = True # to cause redraw\n\n # Set the labels\n if xlabel is not None:\n ax_hm.set_xlabel(xlabel)\n if ylabel is not None:\n ax_hm.set_ylabel(ylabel)\n\n # Add color bar if requested\n if add_sep_colorbar:\n # Draw a new figure and re-plot the color bar there\n if ax_cb is None:\n fig_cb, ax_cb = plt.subplots(1, figsize=fig_size)\n else:\n fig_cb = plt.gcf()\n\n if colorbar_label is not None:\n colorbar.ColorbarBase(ax_cb, cmap=cmap, norm=norm, label=colorbar_label)\n else:\n colorbar.ColorbarBase(ax_cb, cmap=cmap, norm=norm)\n # if colorbar_label is not None:\n # fig_cb.colorbar(img, ax=ax_cb, label=colorbar_label) # plt.colorbar works, too\n # else:\n # fig_cb.colorbar(img, ax=ax_cb) # plt.colorbar works, too\n #\n # # Only show the color bar\n # ax_cb.remove()\n return fig_hm, fig_cb\n\n else:\n return fig_hm, None", "title": "" }, { "docid": "4a04fc16e30a5c0bfc7882688af5bcdb", "score": "0.59285873", "text": "def correlations(self):\n fontProperties = {'family':'sans-serif'}\n opts = {'cmap': plt.get_cmap(\"bwr\"), 'vmin': -1, 'vmax': +1}\n\n for c,target in enumerate(self.targets):\n\n saveAs = \"{0}/correlations_{1}_{2}\".format(self.output_dir,target.name,self.date)\n\n allkeys = target.df.keys()\n keys = []\n for key in allkeys:\n if key!='target': keys.append(key)\n t_ = target.df[keys]\n corrmat = t_.corr()\n\n # Save correlation matrix to CSV file\n corrmat.to_csv(\"{0}.csv\".format(saveAs))\n\n # Use matplotlib directly\n fig,ax = plt.subplots()\n\n heatmap1 = ax.pcolor(corrmat, **opts)\n cbar = plt.colorbar(heatmap1, ax=ax)\n\n cbar.ax.set_yticklabels( [i.get_text().strip('$') for i in cbar.ax.get_yticklabels()], **fontProperties )\n\n labels = corrmat.columns.values\n labels = [self.variable_labels[i].label for i in labels]\n\n # shift location of ticks to center of the bins\n ax.set_xticks(np.arange(len(labels))+0.5, minor=False)\n ax.set_xticklabels(labels, fontProperties, fontsize=14, minor=False, ha='right', rotation=70)\n\n ax.set_yticks(np.arange(len(labels))+0.5, minor=False)\n ax.set_yticklabels(labels, fontProperties, fontsize=14, minor=False)\n\n ## CMS/COM Energy Label + Signal name\n cms_stamp = hpl.CMSStamp(self.CMSlabelStatus)\n cms_stamp.coords = [0.02,1.00]\n cms_stamp.fontsize = 16\n cms_stamp.va = 'bottom'\n ax.text(0.02,1.00,cms_stamp.text,fontsize=cms_stamp.fontsize,\n ha=cms_stamp.ha,va=cms_stamp.va,transform=ax.transAxes)\n\n energy_stamp = hpl.EnergyStamp()\n energy_stamp.ha = 'right'\n energy_stamp.coords = [0.99,1.00]\n energy_stamp.fontsize = 16\n energy_stamp.va = 'bottom'\n ax.text(energy_stamp.coords[0],energy_stamp.coords[1],energy_stamp.text, \n fontsize=energy_stamp.fontsize,ha=energy_stamp.ha, va=energy_stamp.va, transform=ax.transAxes)\n\n ax.text(0.03,0.93,target.label,fontsize=16,ha='left',va='top',transform=ax.transAxes)\n\n\n plt.savefig(\"{0}.{1}\".format(saveAs,self.image_format),\n format=self.image_format,dpi=300,bbox_inches='tight')\n plt.close()\n\n return", "title": "" }, { "docid": "8e23ff82c707b850587fc9ad88046878", "score": "0.59212583", "text": "def plot_correlations(X, ids, weights=None, classes=None, round_threshold=0.0, targetdir=None, colorbar = False):\n N = X.shape[0]\n\n if classes is None:\n classes = np.zeros(N)\n num_classes = int(1)\n else:\n num_classes = len(np.unique(classes))\n\n figs = {}\n axs = {}\n \n for i in range(num_classes):\n\n label = f'all' if (num_classes == 1) else f'class_{i}'\n\n # Compute correlation matrix\n w = weights[classes==i] if weights is not None else None\n C = statstools.correlation_matrix(X=X[classes==i,:], weights=w)\n C[np.abs(C) < round_threshold] = np.nan\n C *= 100\n \n # Compute suitable figsize\n size = np.ceil(C.shape[0] / 3)\n \n # Plot it\n figs[label], axs[label] = plt.subplots(1,1, figsize=(size,size))\n\n axs[label].imshow(C)\n axs[label] = annotate_heatmap(X = C, ax = axs[label], xlabels = ids,\n ylabels = ids, decimals = 0, x_rot = 90, y_rot = 0, color = \"w\")\n axs[label].set_title(f'{label}: linear correlation $\\\\in$ [-100,100]', fontsize=10)\n \n if colorbar:\n cb = plt.colorbar()\n\n if targetdir is not None:\n fname = targetdir + f'{label}-correlation-matrix.pdf'\n print(__name__ + f'.plot_correlations: Save: \"{fname}\"')\n plt.savefig(fname=fname, pad_inches=0.2, bbox_inches='tight')\n\n return figs, axs", "title": "" }, { "docid": "847edc9560fd80bef55c0a7c1f4b1881", "score": "0.59083694", "text": "def plot_corr(self, values=True):\n plot_corr(self.data, values)", "title": "" }, { "docid": "7268ab825f669e8bc80a4361408d6432", "score": "0.5851477", "text": "def correlation_training(self):\n self.x=pd.DataFrame(self.x)\n dfcorr=self.x.corr()\n Tools.table(dfcorr,'.3f','fancy_grid','Correlation Matrix on Training Data', 60)\n c=corrplot.Corrplot(dfcorr)\n c.plot()\n plt.suptitle(\"Pair-Correlation Matrix on Training Data\")\n return plt.show()", "title": "" }, { "docid": "2b206ccc0170817ac6a3efe9d9e0b5c4", "score": "0.5848216", "text": "def correlation_matrix(data: pd.DataFrame, vmin: int = -1) -> str:\n fig_cor, axes_cor = plt.subplots()\n cmap_name = config[\"plot\"][\"correlation\"][\"cmap\"].get(str)\n cmap_bad = config[\"plot\"][\"correlation\"][\"bad\"].get(str)\n\n cmap = plt.get_cmap(cmap_name)\n if vmin == 0:\n cmap = get_cmap_half(cmap)\n cmap.set_bad(cmap_bad)\n\n labels = data.columns\n matrix_image = axes_cor.imshow(\n data, vmin=vmin, vmax=1, interpolation=\"nearest\", cmap=cmap\n )\n plt.colorbar(matrix_image)\n\n if data.isnull().values.any():\n legend_elements = [Patch(facecolor=cmap(np.nan), label=\"invalid\\ncoefficient\")]\n\n plt.legend(\n handles=legend_elements, loc=\"upper right\", handleheight=2.5,\n )\n\n axes_cor.set_xticks(np.arange(0, data.shape[0], float(data.shape[0]) / len(labels)))\n axes_cor.set_yticks(np.arange(0, data.shape[1], float(data.shape[1]) / len(labels)))\n\n font_size = get_correlation_font_size(len(labels))\n axes_cor.set_xticklabels(labels, rotation=90, fontsize=font_size)\n axes_cor.set_yticklabels(labels, fontsize=font_size)\n plt.subplots_adjust(bottom=0.2)\n\n return plot_360_n0sc0pe(plt)", "title": "" }, { "docid": "b3ec3b9394dd40e556f65007ba31ed14", "score": "0.5841142", "text": "def spearman_corr_heatmap(portfolio, data_source='price_data',\n attribute='changePercent', absolute=True):\n matrix = analysis.spearman_corr_coeffs(portfolio, data_source, attribute)\n\n mask = np.zeros_like(matrix)\n mask[np.triu_indices_from(mask)] = True\n ax = sns.heatmap(matrix,\n annot=True,\n mask=mask,\n cmap='bwr', vmin=-1, vmax=1)\n ax.set_title(\n f'Portfolio {attribute.capitalize()} Spearman Correlation Heat Map')\n\n return ax", "title": "" }, { "docid": "ab03d167a158bff55fd14d9e9672bbf5", "score": "0.5834672", "text": "def heatmap_carnivore(self):\n x = self.sim.animal_distribution\n carn = x.pivot('Row', 'Col', 'Carnivore').values\n plot = self.ax4.imshow(carn, vmax=self.cmax_animals['Carnivore'])\n self.ax4.set_title('Carnivore density map', y=-0.3)", "title": "" }, { "docid": "cad15024d8c46b1c9fb7e3701a804e83", "score": "0.5812108", "text": "def _test_heatmap():\n # Hit the drum at time t = 0\n drum = Drum()\n drum.hit((0.8, 0, 0), 1)\n\n # Calculate drum values\n t = 0\n points = drum.points\n values = drum.value(0)\n \n # Polar heatmap\n data = griddata(points, values, (drum.grid_r, drum.grid_theta), method='cubic', fill_value=0)\n ax1 = plt.subplot(projection=\"polar\")\n ax1.pcolormesh(drum.thetas, drum.rs, data.T)\n plt.show()", "title": "" }, { "docid": "164442cce44300142d9dd045f4e76f83", "score": "0.5804257", "text": "def corr_matrix(cls, config, df, path, title, method, footnote=None):\n\t\t#timestamp\n\t\t_t0 = datetime.datetime.now()\n\t\t_f = debug(message='t', source=\"timestamp\")\n\t\tconsole('running bokeh corr_matrix()', 'blue')\n\n\t\t# math\n\t\timport bisect\n\t\tfrom math import pi\n\t\tfrom numpy import arange\n\t\tfrom itertools import chain\n\t\tfrom collections import OrderedDict\n\t\t\n\t\t# bokeh\n\t\tfrom bokeh.models import ColorBar, LinearColorMapper, TapTool, HoverTool, Range1d, ColumnDataSource\n\t\tfrom bokeh.models.callbacks import CustomJS\n\t\tfrom bokeh.plotting import reset_output, figure\n\t\tfrom bokeh.embed import components\n\n\t\t#create color palette\n\t\tmatplotlib.use('Agg')\n\t\timport matplotlib.colors\n\t\tfrom matplotlib import cm as mpl_cmap\n\t\t\n\t\t#p-values\n\t\tif method == \"pearson\":\n\t\t\tfrom scipy.stats import pearsonr as cf\n\t\telif method == \"spearman\":\n\t\t\tfrom scipy.stats import spearmanr as cf\n\t\t\n\t\t#calculate p-values correlation coefficent\n\t\tdef get_pvalue(df):\n\t\t\tdf = df.dropna()._get_numeric_data()\n\t\t\tdfcols = pd.DataFrame(columns=df.columns)\n\t\t\tpvalues = dfcols.transpose().join(dfcols, how='outer')\n\t\t\tfor row in df.columns:\n\t\t\t\tfor column in df.columns:\n\t\t\t\t\tpvalues[row][column] = round(cf(df[row], df[column])[1], 4)\n\t\t\treturn pvalues\n\n\t\t#Gets bounds for quads with n features\n\t\tdef get_bounds(n):\n\t\t\tbottom = list(chain.from_iterable(\n\t\t\t\t[[ii]*nlabels for ii in range(nlabels)]))\n\t\t\ttop = list(chain.from_iterable(\n\t\t\t\t[[ii+1]*nlabels for ii in range(nlabels)]))\n\t\t\tleft = list(chain.from_iterable(\n\t\t\t\t[list(range(nlabels)) for ii in range(nlabels)]))\n\t\t\tright = list(chain.from_iterable(\n\t\t\t\t[list(range(1, nlabels+1)) for ii in range(nlabels)]))\n\t\t\tcorr_items = list(chain.from_iterable(\n\t\t\t\t[[ii+1]*nlabels for ii in range(nlabels)]))\n\t\t\treturn top, bottom, left, right, corr_items\n\t\t\n\t\t#Aligns color values from palette with the correlation coefficient and p-values\n\t\tdef get_colors_corr(corr_array, p_array, colors):\n\t\t\tc_corr = arange(-1, 1, 1/(len(colors)/2))\n\t\t\tcorr_color = []\n\t\t\tp_color = []\n\t\t\tcorr_list = []\n\t\t\tp_list = []\n\t\t\tfactor_list = []\n\t\t\titr = 0\n\t\t\tfor corr, pvalue in zip(corr_array,p_array):\n\t\t\t\tind = bisect.bisect_left(c_corr, corr)\n\t\t\t\t#colors\n\t\t\t\t##corr_color\n\t\t\t\tif (itr)%(nlabels+1)==0:\n\t\t\t\t\tcorr_color.append('#022f62')\n\t\t\t\telse:\n\t\t\t\t\tcorr_color.append(colors[ind-1])\n\t\t\t\t##pcolor\n\t\t\t\tif pvalue<=0.05:\n\t\t\t\t\tp_color.append(\"#F44336\")\n\t\t\t\telse:\n\t\t\t\t\tp_color.append(colors[ind-1])\n\t\t\t\t#append corr and pvalues\n\t\t\t\tcorr_list.append(corr)\n\t\t\t\tp_list.append(pvalue)\n\t\t\t\tfactor_list.append(['[%s, %s]'%(l_labels[itr][0],l_labels[itr][1])])\n\t\t\t\titr = itr + 1\n\t\t\treturn corr_color, corr_list, p_color, p_list, factor_list\n\t\t\n\t\t#create seaborn plots for each possible correlation combinations\n\t\tdef get_corr_plot(df, x, y):\n\t\t\timportlib.reload(plt); importlib.reload(sns)\n\n\t\t\t#----parameters\n\t\t\t#figure\n\t\t\tfig, ax = plt.subplots(1, 1, figsize=(10,10))\n\t\t\t#bounds\n\t\t\tplt.subplots_adjust(top=0.95, bottom=0.085, left=0.125, right=0.975, hspace=0.2, wspace=0.2)\n\t\t\t\n\t\t\t#----title\n\t\t\ttitle = \"x=%s, y=%s\"%(x,y)\n\t\t\t\n\t\t\t#----set style\n\t\t\t#pal = sns.color_palette(\"Set1\", n_colors=2, desat=.5)\n\t\t\tsns.set(style=config['style']['seaborn'], font=\"Helvetica\", font_scale=1.1)\n\t\t\tsns.despine(offset=10, trim=True)\n\t\t\t\n\t\t\t#----plot\n\t\t\t#if grouping #g = sns.lmplot(x, y, hue=groupby, data=df, palette=pal).set_title(title)\n\t\t\tsns.regplot(x, y, data=df, scatter=True, fit_reg=True, ci=95).set_title(title)\n\t\t\t\n\t\t\t#----add rho, p-value to plot\n\t\t\t#get values\n\t\t\tcorr_r = cf(df[x],df[y])\n\t\t\tcorr_t = \"r = %.2f, p < %.2f\"%(corr_r[0],corr_r[1])\n\t\t\t#place text 90% of max-x and 90% of max-y\n\t\t\tfig.text(0.85, 0.05, corr_t, size=16, horizontalalignment='center',\n\t\t\t\t\tverticalalignment='center', transform=ax.transAxes)\n\n\t\t\t#----save\n\t\t\t#check if path exists\n\t\t\tfile = \"%s.png\"%(title)\n\t\t\tpath = config['path']['output'] + \"/analysis/html/img/corr/\"\n\t\t\tif not os.path.exists(path):\n\t\t\t\tos.makedirs(path)\n\t\t\t#save\n\t\t\tfig.savefig(path + file, dpi=300)\n\n\t\t#reset\n\t\treset_output()\n\t\t\n\t\t#get color map\n\t\tRdYlBu = mpl_cmap.get_cmap('RdBu', 32)\n\t\tcolors_np = np.vstack(RdYlBu(np.linspace(0, 1, 32)))\n\t\tcmap = matplotlib.colors.ListedColormap(colors_np, name='RedBlue')\n\t\tcolors = [matplotlib.colors.rgb2hex(cmap(x/32)[:3]) for x in range(32)]\n\n\t\t# calculate correlation coefficients\n\t\t#coeff\n\t\tcorr_coeff = df.corr(method=method)\n\t\t#p-value\n\t\tp_value = get_pvalue(df)\n\n\t\t# get list and number of variables\n\t\tlabels = df.columns\n\t\tnlabels = len(corr_coeff)\n\n\t\t#create list of labels\n\t\t##x\n\t\tl_labels_x = labels.tolist() * nlabels\n\t\t##y\n\t\tl_labels_y = labels.tolist() * nlabels\n\t\t###split into chunks\n\t\tl_labels_y_split = np.hsplit(np.array(np.array_split(l_labels_y, nlabels)), nlabels)\n\t\tl_labels_y = np.concatenate(l_labels_y_split).ravel().tolist()\n\t\t##combine\n\t\tl_labels = list(zip(l_labels_x, l_labels_y))\n\n\t\t#create coeff label coordinates\n\t\tnum = np.arange(0, nlabels, 0.5).tolist()\n\t\tnum = [x for x in num if x not in np.arange(0, nlabels).tolist()]\n\t\t##x\n\t\tnum_x = [(x - 0.025) for x in num]\n\t\tnum_x = [x for x in num_x if x not in np.arange(0, nlabels).tolist()] * nlabels\n\t\t##y\n\t\tnum_y = [(x + .125) for x in num]\n\t\tnum_y = [y for y in num_y if y not in np.arange(0, nlabels).tolist()] * nlabels\n\n\t\t###split into chunks\n\t\tnum_y_split = np.hsplit(np.array(np.array_split(num_y, nlabels)), nlabels)\n\t\tnum_y = np.concatenate(num_y_split).ravel().tolist()\n\t\t##combine\n\t\t#num_labels = list(zip(num_x, num_y))\n\n\t\t#plot\n\t\ttools=\"box_select,save,reset\"\n\t\tcm = figure(tools=tools, plot_width=1200, plot_height=1200, x_range=(0, nlabels), y_range=(0, nlabels), \n\t\t\t\t\toutput_backend=\"webgl\")\n\n\t\t#hover\n\t\thover = HoverTool()\n\t\thover.tooltips = [\n\t\t\t(\"factor\", \"@factor\"),\n\t\t\t(\"rho\", \"@coeff_num\"),\n\t\t\t(\"p\", \"@p_num\"),\n\t\t]\n\t\t#cm.tools.append(hover)\n\t\tcm.hover.line_policy = \"nearest\"\n\n\t\t#legend\n\t\t#cm.toolbar.logo = None\n\n\t\t#grid\n\t\tcm.xgrid.grid_line_color = None\n\t\tcm.ygrid.grid_line_color = None\n\t\tcm.xaxis.major_label_orientation = pi/4\n\t\tcm.yaxis.major_label_orientation = pi/4\n\n\t\t#turn off scientific notation\n\t\tcm.left[0].formatter.use_scientific = False\n\n\t\t# prepare squares for plot\n\t\ttop, bottom, left, right, corr_items = get_bounds(nlabels)\n\t\tcolor_list,corr,p_color,pvalues,factor = get_colors_corr(corr_coeff.values.flatten(),p_value.values.flatten(),colors)\n\n\t\t#prepare corr_coeff label for plot\n\t\tcoeff_color = [\"#444444\"] * (nlabels * nlabels)\n\t\tcoeff_color = ['#f9f9f9' if cr >= np.float64(0.74) else cof for cof, cr in zip(coeff_color, corr)]\n\t\tcoeff_num = [\"%.2f\"%(x) for x in corr]\n\n\t\t#prepare pvalue for plot\n\t\tp_color = [\"#444444\"] * (nlabels * nlabels)\n\t\tp_color = ['#F44336' if p <= np.float64(0.05) else cof for pcol, cof, p in zip(p_color, coeff_color, pvalues)]\n\t\tp_num = [\"(%.2f)\"%(y) for y in pvalues]\n\n\t\t#data source\n\t\tsource = ColumnDataSource(data=dict(left=left, right=right, top=top, bottom=bottom,\n\t\t\t\t\t\t\t\t\t\t\tx=num_x, y=num_y, coeff_num=coeff_num, coeff_color=coeff_color,\n\t\t\t\t\t\t\t\t\t\t\tp_num=p_num, factor=factor, square_color=color_list, p_color=p_color))\n\t\t#create squares\n\t\tcm.quad(top='top', bottom='bottom', left='left', right='right', \n\t\t\t\tline_color='white', color='square_color', source=source)\n\n\t\t#create text\n\t\t#correlation coefficient\n\t\tcm.text(x='x', y='y', source=source, text_color=\"coeff_color\", text='coeff_num', \n\t\t\t\ttext_font_style='normal', text_font_size = '1.5em', text_align='center',\n\t\t\t\ty_offset=-5, x_offset=1, text_line_height=1)\n\t\t#p-value\n\t\tcm.text(x='x', y='y', source=source, text_color=\"p_color\", text='p_num', \n\t\t\t\ttext_font_style='normal', text_font_size = '1.0em', text_align='center',\n\t\t\t\ty_offset=20, x_offset=1, text_line_height=1)\n\n\t\t#callback\n\t\tlink = CustomJS(args = dict(source = source), code=\"\"\"\n\t\t\tconsole.log(\"second\")\n\t\t\tobj = cb_obj\n\t\t\ts = source\n\t\t\tdata = source.data\n\t\t\tattr = source.attributes\n\t\t\td = attr.data\n\n\t\t\t//selected item\n\t\t\tid = source.selected['1d'].indices\n\t\t\titem = d.factor[id][0].replace(/[[\\]]/g,'').match(/(\".*?\")|(\\S+)/g)\n\t\t\tconsole.log('id: ' + id + ', item: ' + item)\n\t\t\t\n\t\t\t//url\n\t\t\tpath = window.location.href\n\t\t\tcurrent = path.split(\"/\").slice(0,-1).join(\"/\")\n\t\t\turl = current + '/reg.html?x='+ item[0] + 'y=' + item[1]\n\t\t\twindow.open(url)\n\t\t\t//window.open(url, 'newwindow', config='height=720, width=1280')\n\t\t\"\"\")\n\t\tcm.add_tools(TapTool(callback=link)) \n\n\t\t#reverse y-axis\n\t\tcm.y_range = Range1d(nlabels, 0)\n\t\t#p.y_range = Range1d(monitorSize[1], 0)\n\n\t\t# Set ticks with labels\n\t\tticks = [tick+0.5 for tick in list(range(nlabels))]\n\t\ttick_dict = OrderedDict([[tick, labels[ii]] for ii, tick in enumerate(ticks)])\n\n\t\t# Create the correct number of ticks for each axis\n\t\tcm.xaxis.ticker = ticks\n\t\tcm.yaxis.ticker = ticks\n\n\t\t# Override the labels\n\t\tcm.xaxis.major_label_overrides = tick_dict\n\t\tcm.yaxis.major_label_overrides = tick_dict\n\n\t\t#color bar\n\t\tmapper = LinearColorMapper(\n\t\t\tpalette=colors,\n\t\t\tlow=-1, high=1\n\t\t)\n\t\tcolor_bar = ColorBar(color_mapper=mapper, location=(0, 0))\n\t\tcm.add_layout(color_bar, 'right')\n\n\t\t#create link for each comparison\n\t\tall_columns = [[x,y] for x in list(labels) for y in list(labels)]\n\t\tfor idx, clm in enumerate(all_columns):\n\t\t\tget_corr_plot(df=df, x=clm[0], y=clm[1])\n\n\t\t#get html\n\t\tscript, div = components(cm)\n\t\t\n\t\t##convert seperate plots and div to single string\n\t\tplots = (''.join(map(str, [div, '\\n', script])))\n\n\t\t#create html\n\t\thtml(cls, config, path=path, plots=plots, source='bokeh', title=title, footnote=footnote)\n\n\t\t#reset\n\t\treset_output()\n\n\t\t#--------finished\n\t\t#timestamp\n\t\tconsole('%s finished in %s msec'%(_f,((datetime.datetime.now()-_t0).total_seconds()*1000)), 'blue')\n\t\treturn cm", "title": "" }, { "docid": "579772c39464fd6b0bc295f06539f021", "score": "0.57749116", "text": "def draw_heatmap(df, figsize=(15, 6), cmap='YlOrBr', ylabel='', xlabel='', title=''):\n _, ax = plt.subplots(figsize=figsize)\n sns.heatmap(df, cmap=cmap, annot=False)\n ax.set_ylabel(ylabel, fontsize=15)\n ax.set_xlabel(xlabel, fontsize=15)\n ax.set_title(title, fontsize=20, weight='bold')\n plt.show()", "title": "" }, { "docid": "8868f3cd3d27717bebb45feec1b00367", "score": "0.57121176", "text": "def plot_corrcoef_raftscope(raftsfits, ROIrows, ROIcols, xylabels=None, title='', norm=True):\n datadir, dataname = os.path.split(raftsfits[0])\n dataname = os.path.splitext(dataname)[0]\n\n a = corrcoef_raftscope(raftsfits, ROIrows, ROIcols, norm)\n fig, ax = plt.subplots(figsize=(10, 8))\n if norm:\n cax = ax.imshow(a, cmap=plt.get_cmap('jet'), norm=mplcol.Normalize(vmax=1, clip=True), interpolation='nearest')\n else:\n cax = ax.imshow(a, cmap=plt.get_cmap('jet'), norm=mplcol.Normalize(vmax=20000, clip=True), interpolation='nearest')\n if norm:\n titlestr = \"Correlation for %s\"\n else:\n titlestr = \"Covariances for %s\"\n if title:\n ax.set_title(titlestr % title)\n else:\n ax.set_title(titlestr % dataname)\n ax.set_xticks(np.arange(0, 16*len(raftsfits), 16))\n ax.set_yticks(np.arange(0, 16*len(raftsfits), 16))\n if xylabels:\n ax.set_xticklabels(xylabels)\n ax.set_yticklabels(xylabels)\n cbar = fig.colorbar(cax, orientation='vertical')\n\n plt.savefig(os.path.join(datadir, \"corrscope-%s.png\" % dataname))\n plt.show()", "title": "" }, { "docid": "f75ad74a90af8d32921297c0d07628ab", "score": "0.57056934", "text": "def plot_corr(path):\n data, cols, rows = read_csv(path)\n\n plt.figure().canvas.set_window_title('Figure - Correlation (' + path + ')')\n for i in range(1, cols): plt.plot(data[0], data[i])\n \n plt.title('Correlation')\n plt.xlabel('Time (ns)')\n plt.ylabel('Correlation Coefficient')\n plt.gca().yaxis.grid(color='gray', linestyle='dashed')\n plt.show()", "title": "" }, { "docid": "0e43f09b6c7e5d4fe9dc3f6925a14d0a", "score": "0.57038325", "text": "def plot_correlation_matrices(chr_list):\n for ch in chr_list:\n ss_ch = BlockMatrix.read('gs://nbaya/sumstats_corr/'+variant_set+'_ss_correlation_chr{}.bm/'.format(ch))\n gt_ch = BlockMatrix.read('gs://nbaya/sumstats_corr/'+variant_set+'_gt_correlation_chr{}.bm/'.format(ch))\n M_max = int(1e4) #max number of variants to be taken from the block matrices (suggested: 2e4)\n M = ss_ch.shape[0] #dimension of block matrix\n# for idx in range(int(M/M_max)+1): #index of which disjoint window we are looking at in the block matrix\n for idx in range(0,int(M/M_max)+1): #index of which disjoint window we are looking at in the block matrix\n M0 = M_max*(idx) #start variant index for block matrix filtering\n M1 = min(M_max*(idx+1),M) #stop variant index for block matrix filtering\n ss_np = ss_ch[M0:M1,M0:M1].to_numpy()\n gt_np = gt_ch[M0:M1,M0:M1].to_numpy()\n print('\\nStarting variant window: ['+str(M0)+','+str(M1)+']')\n w = int(5e3) #window width of variants for correlation matrix (suggested: 2e3)\n for i in range(int((M1-M0-1)/w)+1):\n w0 = w*i #start variant index for window of correlation matrix\n w1 = min(w*(i+1),M1-M0) #stop variant index for window of correlation matrix\n full = (ss_np[w0:w1,w0:w1]+\n gt_np[w0:w1,w0:w1].T)\n np.fill_diagonal(full,1)\n fig,ax = plt.subplots()\n ax.imshow(full,cmap='bwr')\n ax.plot([0, w],[0, w],'k--',alpha=0.5,lw=2)\n plt.xlim([0,w])\n plt.ylim([w,0])\n ax.text(w*0.83,w*0.1,\"SS\",fontsize=60,alpha=0.5)\n ax.text(w*0.02,w*0.97,\"GT\",fontsize=60,alpha=0.5)\n plt.title('chr'+str(ch)+' '+variant_set+' variants ('+str(M0+w0)+'-'+str(M0+w1)+')')\n fig=plt.gcf()\n fig.set_size_inches(10,10)\n path=('gs://nbaya/sumstats_corr/plots/chr'+str(ch)+'_'+variant_set+\n '_'+str(M0+w0).zfill(len(str(M)))+'-'+str(M0+w1).zfill(len(str(M)))+'.png')\n with hl.hadoop_open(path, 'wb') as f: \n fig.savefig(f,dpi=600)\n plt.close()\n print('\\nFinished variant window: ['+str(M0)+','+str(M1)+']')", "title": "" }, { "docid": "dcc767bfe38719b5b3949d492864ed78", "score": "0.5673527", "text": "def constructHeatMap(data, show=False):\n plt.figure(figsize=(20, 10))\n sns.heatmap(data, cbar=False)\n if show:\n plt.show()", "title": "" }, { "docid": "0c79f04921fcf87404536d46e6198bbd", "score": "0.56536555", "text": "def heatmap_focused(df, target, n):\r\n n = n # Number of variables\r\n features = df.corr().nlargest(n, target)[target].index\r\n hm_data = np.corrcoef(df[features].values.T)\r\n sns.set(font_scale=0.8)\r\n sns.heatmap(hm_data, cbar=True, cmap=sns.diverging_palette(20, 220, n=200), \r\n annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, \r\n yticklabels=features.values, xticklabels=features.values).set_title(\"Main features heatmap\")", "title": "" }, { "docid": "20319472cf1b536362aed44fa5ae9394", "score": "0.562109", "text": "def draw_heatmap(*args, **kwargs):\n data = kwargs.pop('data')\n d = data.pivot(index=args[1], columns=args[0], values=args[2])\n sns.heatmap(d, **kwargs)", "title": "" }, { "docid": "593b87cdbd87f8d52b2d3fa009a1e457", "score": "0.561807", "text": "def heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n # cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels, fontproperties=prop)\n ax.set_yticklabels(row_labels, fontproperties=prop)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n # plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n # rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1] + 1) - .5, minor=True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - .5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "title": "" }, { "docid": "6c1529a1fd1e65523056787b4f1956fa", "score": "0.5616676", "text": "def autocorr_raster_plot(ax, trange, spks=None, cor=None, **kwargs):\r\n if cor is None:\r\n if spks is None:\r\n raise IOError('Need to give cor or spks')\r\n cor = cc_func(np.array(spks), np.array(spks),\r\n trange=trange, keep_zero=False)\r\n\r\n ys = [np.zeros_like(v) + i for i, v in enumerate(cor)]\r\n d = dict(color='k', alpha=.1, s=1)\r\n d.update(kwargs)\r\n ax.scatter(flaten_list(cor) * 1000, flaten_list(ys), **d)\r\n ax.set_ylim(0, len(cor))\r\n ax.set_xlim(*np.array(trange) * 1000)\r\n return cor, ys", "title": "" }, { "docid": "401d989e2180c21973bc18f01de0c90d", "score": "0.5598288", "text": "def heatmap(data, row_labels, col_labels, ax=None, cbar_kw={}, cbarlabel=\"\", **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-45, ha=\"right\", rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle=\"-\", linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "title": "" }, { "docid": "bb7220f2c95686978d74ac3290bf0900", "score": "0.5581191", "text": "def heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=00, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=00, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "title": "" }, { "docid": "24b36632f737b0977ff0c8b357697087", "score": "0.5579378", "text": "def heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "title": "" }, { "docid": "24b36632f737b0977ff0c8b357697087", "score": "0.5579378", "text": "def heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "title": "" }, { "docid": "66b878769a4292f453d388fc4af2f68d", "score": "0.5573627", "text": "def heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1] + 1) - .5, minor=True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - .5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "title": "" }, { "docid": "12b61a7c8106098b976abfd82b7465de", "score": "0.55722404", "text": "def topic_correlation(self, topic_matrix):\r\n data = pd.DataFrame(data=topic_matrix)\r\n corr = data.corr()\r\n fig, ax = plt.subplots(figsize=(16, 8), sharey='all', dpi=160)\r\n cax = ax.matshow(corr, cmap='coolwarm', vmin=-1, vmax=1)\r\n fig.colorbar(cax)\r\n ticks = np.arange(0, len(data.columns), 1)\r\n ax.set_xticks(ticks)\r\n plt.xticks(rotation=90)\r\n ax.set_yticks(ticks)\r\n ax.set_xticklabels(data.columns)\r\n ax.set_yticklabels(data.columns)\r\n\r\n fig.suptitle('Topic Correlation')\r\n\r\n if self.report:\r\n self.pdf.savefig(fig)\r\n\r\n if self._display:\r\n plt.show()\r\n else:\r\n plt.close(fig)", "title": "" }, { "docid": "728576c6989f4605462873087d0cc38d", "score": "0.55645734", "text": "def heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\",lab_size=4, **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\",size=lab_size)\n cbar.ax.tick_params(labelsize=lab_size)\n\n # We want to show all ticks...\n ax.set_xticks(numpy.arange(data.shape[1]))\n ax.set_yticks(numpy.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False,labelsize=lab_size)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-90, ha=\"right\",va='center',\n rotation_mode=\"anchor\", fontsize=lab_size)\n plt.setp(ax.get_yticklabels(), fontsize=lab_size)\n\n # Turn spines off and create white grid.\n #for edge, spine in ax.spines.items():\n # spine.set_visible(False)\n\n ax.set_xticks(numpy.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(numpy.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"k\", linestyle='-', linewidth=0.1)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "title": "" }, { "docid": "ac5f741909215516197da6e8262f172e", "score": "0.55473465", "text": "def corr_plot(\n data, features=None, method=\"pearson\", plot_width=500, plot_height=400\n):\n # Defining numeric_features list\n numeric_features = [\n \"int16\",\n \"int32\",\n \"int64\",\n \"float16\",\n \"float32\",\n \"float64\",\n ]\n\n # Cheking user's inputs\n\n # Tests whether input data is of pd.DataFrame type\n if not isinstance(data, pd.DataFrame):\n raise TypeError(\"Please pass in a Pandas DataFrame for `data`\")\n\n # Tests whether input features is of the type list\n if features is not None:\n if not isinstance(features, list):\n raise TypeError(\"Please pass in a list for `features`\")\n\n # Tests whether input features has at least two features\n if features is not None:\n if len(features) < 2:\n raise ValueError(\"At least two features should be selected\")\n\n # Tests whether input method is of the type str\n if not isinstance(method, str):\n raise TypeError(\"Please pass in a str for `method`\")\n\n # Tests whether input method is one of the 3 available options\n if method not in (\"pearson\", \"spearman\", \"kendall\"):\n raise Exception(\n \"Please pick a correlation method: 'pearson', 'spearman' or\"\n \" 'kendall'\"\n )\n\n # Tests whether input plot width and height are of the type int\n if (not isinstance(plot_width, int)) or (not isinstance(plot_height, int)):\n raise TypeError(\"Both plot_width and plot_height must be integers\")\n\n # Subsetting the data dataframe\n if features is None:\n if data.select_dtypes(include=numeric_features).shape[1] < 2:\n raise ValueError(\n \"Dataframe should have at least two numerical features\"\n )\n data = data.select_dtypes(include=numeric_features)\n else:\n if data[features].select_dtypes(np.number).shape[1] < 2:\n raise ValueError(\n \"Dataframe should have at least two numerical features\"\n )\n data = data[features].select_dtypes(include=np.number)\n # Creating corr_df dataframe\n corr_df = data.corr(method).stack().reset_index(name=\"corr\")\n corr_df.loc[corr_df[\"corr\"] == 1, \"corr\"] = 0\n corr_df[\"abs\"] = corr_df[\"corr\"].abs()\n\n # Correlation plot\n corr_plot = (\n alt.Chart(\n corr_df, title=f\"{method} Correlations Plot for Numerical Features\"\n )\n .mark_circle()\n .encode(\n x=alt.X(\"level_0\", title=\"Numerical Features\"),\n y=alt.Y(\"level_1\", title=\"Numerical Features\"),\n size=alt.Size(\"abs\", title=\"Correlation Size\"),\n color=alt.Color(\n \"corr\",\n title=\"Correlation\",\n scale=alt.Scale(scheme=\"blueorange\"),\n ),\n tooltip=alt.Tooltip(\"corr\"),\n )\n .properties(width=plot_width, height=plot_height)\n )\n\n return corr_plot", "title": "" }, { "docid": "6e0dd156668d0d817225fdfba31479aa", "score": "0.55360407", "text": "def plot_correlation_with_lag(\n self,\n lag: Union[int, List[int]],\n cols: Optional[List[Any]] = None,\n ax: Optional[mpl.axes.Axes] = None,\n mode: str = \"ins\",\n ) -> pd.DataFrame:\n df = self._get_df(cols=cols, mode=mode)\n # Calculate correlation.\n corr_df = csigna.correlate_with_lag(df, lag=lag)\n cplott.plot_heatmap(corr_df, ax=ax)\n return corr_df", "title": "" }, { "docid": "8b096de850614fe846d29c9923804b1f", "score": "0.55310494", "text": "def plotRawCorrelations(figure, gridSystem, plotRow, seqA, seqB):\n\n # Calculate the correlations:\n valid = np.correlate(seqA, seqB, 'valid')\n same = np.correlate(seqA, seqB, 'same')\n full = np.correlate(seqA, seqB, 'full')\n\n # Plot the correlations:\n ax = figure.add_subplot(gridSystem[plotRow, :])\n ax.plot(valid, 'ro', rasterized=RASTERIZE_PLOTS)\n ax.set_title('Correlation: Valid')\n plotRow += 1\n\n ax = figure.add_subplot(gridSystem[plotRow, :])\n ax.plot(same, 'ro', rasterized=RASTERIZE_PLOTS)\n ax.set_title('Correlation: Same')\n plotRow += 1\n\n ax = figure.add_subplot(gridSystem[plotRow, :])\n ax.plot(full, 'ro', rasterized=RASTERIZE_PLOTS)\n ax.set_title('Correlation: Full')\n plotRow += 1\n\n return plotRow", "title": "" }, { "docid": "3f8bd423f555d556a8b408eec4ec3d48", "score": "0.5519529", "text": "def draw_heatmap(**kwargs):\n data = kwargs.pop('data')\n data = data.pivot(columns='Plant Name', index='Objective', values='Sobol Index')\n return sns.heatmap(data, **kwargs)", "title": "" }, { "docid": "3075e0ca41c3c6d8ee6df1639fc4cfa3", "score": "0.5497765", "text": "def heatmap(data, ax=None, cbar_kw={}, cbarlabel=\"\", **kwargs):\n plt.figure(figsize=(12, 6))\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs, aspect=\"auto\")\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=False, bottom=True,\n labeltop=False, labelbottom=True)\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1] + 1) - .5, minor=True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - .5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "title": "" }, { "docid": "f89109d7d954090e5c2262d533c05113", "score": "0.54860026", "text": "def heatmap(data, row_labels, col_labels, ax=None, cbar_kw={}, cbarlabel=\"\", **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\", rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im", "title": "" }, { "docid": "291efd02e5b6f3e191f197a73a2fd052", "score": "0.54723936", "text": "def plot_frame_cells(self, frame, coll_df, color_column, c_min= 0., c_max= 1., n_ticks= 5, figsize= (6, 10), polygon_lw= .1, color_map= cm.afmhot, title= ''):\n plt.figure(figsize= figsize)\n plt.title(title, fontsize= 25)\n self.show_image(frame)\n plt.gca().autoscale_view()\n plt.gca().set_aspect('equal')\n colors= color_map((coll_df[color_column].values-c_min)/(c_max - c_min)) \n coll= mc.PolyCollection(coll_df['plot_vertices'].values, lw= polygon_lw)\n coll.set(facecolors= colors)\n plt.gca().add_collection(coll)\n plt.xlim(0, 900)\n plt.ylim(300, 1800)\n plt.gca().invert_yaxis()\n plt.axis('off')\n divider= make_axes_locatable(plt.gca())\n cax= divider.append_axes('right', size= '5%', pad= 0.05)\n mm= cm.ScalarMappable(cmap= color_map)\n mm.set_array(colors)\n cbar= plt.colorbar(mm, cax= cax, cmap= color_map, ticks= np.linspace(0, 1, n_ticks + 1))\n cbar.ax.set_yticklabels(np.linspace(c_min, c_max, n_ticks + 1))", "title": "" }, { "docid": "edf60c3e152234bdfbc49baf11f3c073", "score": "0.54602236", "text": "def heatmap_creator(given_filter):\n\tdraw_heatmap(given_filter)", "title": "" }, { "docid": "e02de2f57e56d65687b232bee974c09c", "score": "0.54509777", "text": "def correlation(x, y, xlabel='x', ylabel='y'):\n \n plt.title(xlabel + ', ' + ylabel, fontsize = 24, fontweight = 'bold')\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.scatter(x, y);\n \n plt.show()\n \n # statistical tests\n pearson_coef, pearson_p = sp.stats.pearsonr(x, y)\n spearman_coef, spearman_p = sp.stats.spearmanr(x, y)\n \n correl_test_results = pd.DataFrame()\n correl_test_results.loc['Pearson', 'Coefficient'] = pearson_coef\n correl_test_results.loc['Pearson', 'p-value'] = pearson_p\n correl_test_results.loc['Spearman', 'Coefficient'] = spearman_coef\n correl_test_results.loc['Spearman', 'p-value'] = spearman_p\n\n IPython.display.display(correl_test_results)", "title": "" }, { "docid": "5a0a1b19054966840d1f84c908c69190", "score": "0.5446537", "text": "def plot_heatmap(df_to_plot):\n # create figure and axis\n fig, ax = plt.subplots(figsize=(20, 20))\n\n # Generate a custom diverging colormap\n cmap = sns.diverging_palette(50, 10, as_cmap=True)\n\n # Draw the heat map with the mask and correct aspect ratio\n sns.heatmap(df_to_plot,\n cmap=cmap,\n vmax=1,\n center=0,\n square=True,\n linewidths=.5,\n cbar_kws={\"shrink\": .5},\n annot=True,\n ax=ax)\n\n plt.show()", "title": "" }, { "docid": "18cdd250fd811795e003c09336e25843", "score": "0.54337734", "text": "def hist_fx_correlations_physical_plot(year: str, interval: str) -> None:\n\n function_name: str = \\\n hist_fx_correlations_physical_plot.__name__\n hist_data_tools_matrices_physical \\\n .hist_function_header_print_plot(function_name, year, kind='correlations')\n\n try:\n\n periods: int\n n_cols: int\n n_rows: int\n if interval == 'week':\n periods = 52\n n_cols = 13\n n_rows = 4\n elif interval == 'month':\n periods = 12\n n_cols = 4\n n_rows = 3\n elif interval == 'quarter':\n periods = 4\n n_cols = 2\n n_rows = 2\n else:\n periods = 1\n n_cols = 1\n n_rows = 1\n\n figure: plt.figure = plt.figure(figsize=(16, 9))\n cbar_ax: plt.axes = figure.add_axes([0.91, 0.3, 0.03, 0.4])\n\n for per in range(1, periods + 1):\n\n if per < 10:\n per_str: str = f'0{per}'\n else:\n per_str = f'{per}'\n\n # Load data\n corr: pd.DataFrame = pickle.load(open(\n f'../../hist_data/matrices_physical_{year}/hist_fx_matrices'\n + f'_physical_data/hist_fx_corr_physical_data_{year}_int'\n + f'_{interval}_{per_str}.pickle', 'rb'))\n\n ax_sub = plt.subplot(n_rows, n_cols, per)\n\n if interval in ('week', 'month'):\n sns.heatmap(corr, ax=ax_sub, cbar=per == 1,\n cbar_ax=None if (per-1) else cbar_ax,\n vmin=-1, vmax=1)\n\n else:\n sns.heatmap(corr, annot=True, ax=ax_sub, cbar=per == 1,\n cbar_ax=None if (per-1) else cbar_ax,\n vmin=-1, vmax=1)\n\n if interval == 'week':\n ax_sub.tick_params(axis='x', bottom=False, labelbottom=False)\n ax_sub.tick_params(axis='y', left=False, labelleft=False)\n\n plt.yticks(rotation=45)\n plt.xticks(rotation=45)\n\n figure.tight_layout(rect=[0, 0, .9, 1])\n\n # Plotting\n hist_data_tools_matrices_physical \\\n .hist_save_plot(function_name, figure, year, interval)\n\n plt.close()\n del corr\n del figure\n gc.collect()\n\n except FileNotFoundError as error:\n print('No data')\n print(error)\n print()", "title": "" }, { "docid": "da4d1664a826caa3cb0b75c48ccfab88", "score": "0.54255104", "text": "def corr_plot(self, parameters: list = None,\n inherit: plt = None,\n figsize: list = None,\n title: str = None,\n annot: bool = True,\n show: bool = True):\n\n # Check if y is list or str\n if not isinstance(parameters, list):\n self._raise_plot_format_error([\"parameters\"], \"list or None\")\n\n # previous configuration of plot\n plt = self._plot_prev_config(inherit, figsize, \"seaborn-dark\")\n fig, ax = plt.subplots()\n\n data = self[parameters].corr()\n score = data.values\n col = data.columns\n length = len(col)\n im = ax.imshow(score, cmap='rocket_r')\n ax.xaxis.set_ticks_position('top')\n ax.set_xticks(np.arange(length))\n ax.set_yticks(np.arange(length))\n ax.set_xticklabels(col)\n ax.set_yticklabels(col)\n fig.colorbar(im, pad=0.03)\n\n # the annotation part\n if annot:\n for i in range(length):\n for j in range(length):\n if score[i, j] > 0.4:\n color = \"w\"\n else:\n color = \"black\"\n ax.text(j, i, round(score[i, j], 2),\n ha=\"center\", va=\"center\", color=color)\n\n # return for advanced adjustment\n return self._plot_post_config(plt, '', title, '', '', show)", "title": "" }, { "docid": "cf991a923d449e7b829a72da3d5b5f87", "score": "0.5412111", "text": "def plot_correlaton_matrix(self, mask_autocorrelations: bool = False, **kwargs):\n max_ma_index = self.mini_arrays.max() + 1\n all_mas = np.arange(max_ma_index)\n matrix = np.full([max_ma_index, max_ma_index], np.nan, \"complex\")\n ma1, ma2 = np.tril_indices(self.mini_arrays.size, 0)\n for ma in all_mas:\n if ma not in self.mini_arrays:\n ma1[ma1 >= ma] += 1\n ma2[ma2 >= ma] += 1\n\n mask = None\n if mask_autocorrelations:\n mask = ma1 != ma2 # cross_correlation mask\n matrix[ma2[mask], ma1[mask]] = np.mean(self.value, axis=(0, 1))[mask]\n\n fig = plt.figure(figsize=kwargs.get(\"figsize\", (10, 10)))\n ax = fig.add_subplot(111)\n ax.set_aspect(\"equal\")\n\n data = np.absolute(matrix)\n if kwargs.get(\"decibel\", True):\n data = 10*np.log10(data)\n\n im = ax.pcolormesh(\n all_mas,\n all_mas,\n data,\n shading=\"nearest\",\n cmap=kwargs.get(\"cmap\", \"YlGnBu\"),\n vmin=kwargs.get(\"vmin\", np.nanmin(data)),\n vmax=kwargs.get(\"vmax\", np.nanmax(data))\n )\n ax.set_xticks(all_mas[::2])\n ax.set_yticks(all_mas[::2])\n ax.grid(alpha=0.5)\n\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.3)\n cbar = fig.colorbar(im, cax=cax)\n cbar.set_label(kwargs.get(\"colorbar_label\", \"dB\" if kwargs.get(\"decibel\", True) else \"Amp\"))\n \n # Axis abels\n ax.set_xlabel(f\"Mini-Array index\")\n ax.set_ylabel(f\"Mini-Array index\")\n\n # Title\n ax.set_title(kwargs.get(\"title\", \"\"))\n\n # Save or show the figure\n figname = kwargs.get(\"figname\", \"\")\n if figname != \"\":\n plt.savefig(\n figname,\n dpi=300,\n bbox_inches=\"tight\",\n transparent=True\n )\n log.info(f\"Figure '{figname}' saved.\")\n else:\n plt.show()\n plt.close(\"all\")", "title": "" }, { "docid": "13ad289c23981fce522b01a2d2ac978b", "score": "0.5408755", "text": "def correlationSpectroscopyInteractive(dataset, target, mode='SHY', correlationMethod='Pearson'):\n\tif mode.lower() == 'shy':\n\t\tcolour = _vcorrcoef(dataset.intensityData[dataset.sampleMask, :], target, method=correlationMethod)\n\n\tmagnitude = numpy.mean(dataset.intensityData[dataset.sampleMask, :],axis=0)\n\t\n\tplot = plotyShadedLineplot(dataset.featureMetadata.loc[dataset.featureMask, 'ppm'], magnitude[dataset.featureMask], colour[dataset.featureMask])\n\n\tif 'ppm' in dataset.featureMetadata.columns:\n\t\txaxis = 'reversed'\n\telse:\n\t\txaxis = 'auto'\n\t\n\tlayout = go.Layout(\n\t\ttitle=None,\n\t\tlegend=dict(\n\t\t\torientation=\"h\"),\n\t\thovermode = \"closest\",\n\t\tyaxis = dict(\n\t\t\tshowticklabels=False,\n\t\t\thoverformat = '.2f'\n\t\t),\n\t\txaxis=dict(\n\t\t\tautorange=xaxis, \n\t\t\ttitle='PPM',\n\t\t\thoverformat = '.2f'\n\t\t)\n\t)\n\n\tfigure = go.Figure(data=plot, layout=layout)\n\t\n\treturn figure", "title": "" }, { "docid": "4dee24c7c99015d58c4a736411182f6a", "score": "0.5406638", "text": "def plot_correlation(data, couplings, name, type_to_idx, subset=[0,1,2,3,4,5,11], \n filename='correlation_matrix.pdf', linkage=None):\n subset = np.asarray(subset, dtype=int)\n name = np.asarray(name)\n\n x = extract_weighted_subset(data, couplings, type_to_idx, subset=subset, metric='rmse')\n corr = np.corrcoef(x)\n\n if linkage not in (None, 'single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward'):\n print(\"Unknown linkage\", linkage)\n raise SystemExit\n\n # Unsorted plots\n if linkage is None:\n # Plot heatmap\n sns.heatmap((corr), square=True, linewidths=.25, cbar_kws={\"shrink\": .5},\n cmap=sns.diverging_palette(220, 10, as_cmap=True),\n yticklabels=name[subset], xticklabels=subset+1,\n center=0.5, vmax=1, vmin=0)\n # Sorted from clustering\n else:\n d = scipy.cluster.hierarchy.distance.pdist(x)#, 'cityblock')\n L = scipy.cluster.hierarchy.linkage(d, method=linkage, optimal_ordering=True)\n\n sns.clustermap((corr), square=True, linewidths=.25, cbar_kws={\"shrink\": .5},\n cmap = sns.diverging_palette(220, 10, as_cmap=True),\n yticklabels=name[subset], xticklabels=subset+1,\n center=0.5, vmax=1, vmin=0, row_linkage=L, col_linkage=L)\n #plt.xticks(rotation=-45)\n\n plt.yticks(rotation=0)\n\n plt.savefig(filename, bbox_inches = \"tight\")\n plt.clf()", "title": "" }, { "docid": "d493a9cbe7a1d28b6d58cbd4fe7aa6e2", "score": "0.5405102", "text": "def annotated_heatmap(data, row_labels, column_labels, subplot=None, colorbar_kw={}, colorbar_label=\"\",\n title=None, xlabel=None, ylabel=None,\n horizontal_ticks_angle=0, **kwargs):\n\n if not subplot:\n subplot = plt.gca()\n\n # Plot the heatmap\n image = subplot.imshow(data, **kwargs)\n\n # Create colorbar\n colorbar = subplot.figure.colorbar(image, ax=subplot, **colorbar_kw)\n colorbar.ax.set_ylabel(colorbar_label, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n subplot.set_xticks(np.arange(data.shape[1]))\n subplot.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n subplot.set_xticklabels(column_labels)\n subplot.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n #subplot.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(subplot.get_xticklabels(), rotation=horizontal_ticks_angle, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in subplot.spines.items():\n spine.set_visible(False)\n\n subplot.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n subplot.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n subplot.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n subplot.tick_params(which=\"minor\", bottom=False, left=False)\n\n if title is not None:\n plt.title(title)\n if xlabel is not None:\n plt.xlabel(xlabel)\n if ylabel is not None:\n plt.ylabel(ylabel)\n\n return image, colorbar", "title": "" }, { "docid": "f0e8cf54ff312d9d2181174df8919ddf", "score": "0.53944826", "text": "def heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n\n mpl.rcParams.update({'font.size':24, 'lines.linewidth':3, 'lines.markersize':15, 'font.family':'Times New Roman'}) # avoid type 3 (i.e. bitmap) fonts in figures\n mpl.rcParams['ps.useafm'] = True\n mpl.rcParams['pdf.use14corefonts'] = True\n mpl.rcParams['text.usetex'] = True\n\n if not ax:\n ax = plt.gca()\n\n # handle nans\n current_cmap = mpl.cm.get_cmap()\n current_cmap.set_bad(color='w')\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n# cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n# cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n # ax.tick_params(top=True, bottom=False,\n # labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n# plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n# rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for _, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1] + 1) - .5, minor=True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - .5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n ax.invert_yaxis()\n\n ax.set_xlabel('$p_{1}$', labelpad=15)\n ax.set_ylabel('$p_{2}$', labelpad=15)\n\n return im # , cbar", "title": "" }, { "docid": "0bcbdb562ea536bf02f683b7f87355e5", "score": "0.5375192", "text": "def graph_heatmap():\n\n\tpath=\"RESULTS/Graficar_HeatMap\"\n\tpath2=\"RESULTS/Graficos\"\n\tif os.path.isdir(path2) == True:\n\t\tpass\n\telse:\n\t\tos.mkdir(path2)\n\n\tfor file1 in os.listdir(path):\n\t\tout=path2+ \"/\" + file1[:-9] + \"_heatmap.jpg\"\n\t\tinput_file1=open(path + \"/\" + file1, \"r\")\n\t\tz=list()\n\t\tfor line1 in input_file1:\n\t\t\tcolumns=line1.split(\"\\t\")\n\t\t\tz.append(float(columns[2]))\n\t\t\n\t\tcoverage=np.linspace(0,100,11)\n\t\tidentity=np.linspace(0,100,11)\n\t\tcantidad=np.array(z).reshape(11,11)\n\t\tz_min, z_max = np.abs(z).min(), np.abs(z).max()\n\n\t\tfig, ax = plt.subplots()\n\t\tlevels=MaxNLocator(nbins=10).tick_values(z_min, z_max)\n\t\tc=ax.contourf(coverage, identity, cantidad, levels=levels, \n\t\tcmap='GnBu', vmin=z_min, vmax=z_max)\n\t\ttitle='Nº de subjects para cada intervalo de \\ncobertura e identidad'\n\t\tax.set_title(title + \" del query \" + file1[:-9])\n\t\tax.axis([coverage.min(),coverage.max(), \n\t\t\t\tidentity.min(), identity.max()])\n\t\tfig.colorbar(c, ax=ax)\n\t\tplt.ylabel('Porcentaje de cobertura')\n\t\tplt.xlabel('Porcentaje de identidad')\n\t\tplt.savefig(out)", "title": "" }, { "docid": "f227e23b676adde8bdf857cea492b9b0", "score": "0.53670585", "text": "def construct_heatmap( datamatrix, genes, tissues, heatmap_file ):\n\t\n\tmy_vmax = 100\n\t\n\tprint \"number of genes for heatmap construction: \" + str( len( genes ) )\n\tdf = DataFrame( datamatrix, index=genes[::-1], columns=tissues).round( 0 )\n\t\n\tfig, ax = plt.subplots( )\n\t\n\tsns.heatmap( df, vmin=0, vmax= my_vmax, ax=ax, linewidths=0.3, annot=True, annot_kws={'fontsize':3}, cbar=False, fmt=\"g\", cmap='YlGnBu' )\t#binary\t#cmap='YlGnBu' = 1\n\t\n\tfor idx, gene in enumerate( genes ):\n\t\tax.text( -2.65, idx+0.8, gene, fontsize=3 )\n\t\n\tfor idx, tissue in enumerate( tissues ):\n\t\tax.text( idx+0.4, len( genes )+0.5, \"20\"+tissue[-2:] + \"-\" + tissue[2:4] + \"-\" + tissue[:2], rotation=90, fontsize=3 )\n\t\n\tax.set_yticklabels( [], rotation=0, fontsize=2 )\n\tax.set_xticklabels( [] , rotation=90, fontsize=3 )\n\t\n\tax.spines['bottom'].set_visible(False)\n\tax.spines['left'].set_visible(False)\n\t\n\tax.axes.get_yaxis().set_visible(False)\n\tax.axes.get_xaxis().set_visible(False)\n\t\n\tplt.yticks( rotation=0 )\n\tplt.subplots_adjust( left=0.135, right=0.99, top=0.99, bottom=0.0575, wspace=0.2 )\n\t\n\tplt.savefig( heatmap_file, dpi=900 )\n\tplt.savefig( heatmap_file.replace( \".pdf\", \".svg\" ) )", "title": "" }, { "docid": "489be190ef5475545b27b6f9460bd009", "score": "0.5357623", "text": "def vis_corr_photometry(self):\n # Look at photometry after subtracting expected magnitude\n # (i.e. subtract an approximate phase curve, but the ZTF predicted values do have errors sometimes)\n fig = plt.figure(figsize=(8, 6))\n for f in self.filterlist:\n o = self.lcobs.query('fid == @f')\n plt.errorbar(o.jd - self.lcobs.jd.iloc[0], o.magcorr, yerr=o.sigmamag, color=filtercolors[f],\n marker='.', linestyle='')\n plt.xticks(rotation=90)\n plt.xlabel('delta JD', fontsize='x-large')\n plt.ylabel('MagCorr (mag_obs - pred)', fontsize='x-large')\n plt.gca().invert_yaxis()\n plt.title(self.name)\n plt.grid(True, alpha=0.3)\n label = self._make_figurelabel()\n plt.figtext(0.15, 0.8, label)\n return fig", "title": "" }, { "docid": "dc9afac1b7f9bb81a270e60a2a8a57a2", "score": "0.53570837", "text": "def _corr_grid(corr: NDArray[float]) -> NDArray[float]:\n xs = np.arange(-corr.shape[0] / 2, corr.shape[0] / 2) + 0.5\n ys = np.arange(-corr.shape[1] / 2, corr.shape[1] / 2) + 0.5\n return np.meshgrid(xs, ys)", "title": "" }, { "docid": "c766738dc491739308744a5cf8d38ce6", "score": "0.53538287", "text": "def plotHeatmap(T, Y, ax = None):\n if ax is None:\n ax = plt.gca()\n cax1 = ax.imshow(Y, aspect='auto', interpolation='none', origin='lower',extent=[0,T[-1],len(Y),0],vmax=2, vmin=-2)\n ax.set_xlabel('Time (s)')\n ax.set_ylabel(\"Neuron\")\n return cax1", "title": "" }, { "docid": "c8f5f4b7e6b421696a0592aee556a47c", "score": "0.53533655", "text": "def plot_selectivity_heatmap(corrs, title, res_dir):\n\n # Plot each layer.\n for layer_name, lcorrs in corrs.groupby(level='layer'):\n\n # Create heatmap.\n lcorrs.index = lcorrs.index.droplevel()\n lcorrs = lcorrs.T\n wfig, hfig = np.sqrt(lcorrs.shape[1]), np.sqrt(lcorrs.shape[0])\n fig = plt.figure(figsize=(wfig, hfig))\n ax = fig.add_subplot(111)\n sns.heatmap(lcorrs, linewidths=.5, center=0, xticklabels=2, ax=ax)\n\n # Set labels.\n plt.yticks(rotation=0)\n ax.set_xlabel('Unit')\n ax.set_ylabel(corrs.columns.name)\n plt.title('%s in %s' % (title, layer_name))\n\n # Save plot.\n fname = '{}/{}.png'.format(res_dir, layer_name)\n utils.save_fig(fname, fig, close=True)", "title": "" }, { "docid": "0fccb04e486f7c59ed3cec2313dbdaf0", "score": "0.53528035", "text": "def plot(Xt, sample=0, colorscale='greys', origin='upper',\n plotly_params=None):\n return plot_heatmap(\n Xt[sample] * 1, colorscale=colorscale, origin=origin,\n title=f\"Binarization of image {sample}\",\n plotly_params=plotly_params\n )", "title": "" }, { "docid": "271f1b9cd4dc9c6df67cbf573e0b11f9", "score": "0.53442097", "text": "def update_heatmap_carn(self):\n x = self.sim.animal_distribution\n carn = x.pivot('Row', 'Col', 'Carnivore').values\n self.ax4.imshow(carn, vmax=self.cmax_animals['Carnivore'])", "title": "" }, { "docid": "46b7aa1b1dd19ff27daa2d7c5b7c33fa", "score": "0.53392583", "text": "def correlation_matrix(self):\n pass # implementation can be omitted", "title": "" }, { "docid": "ae50d63584b53b20167c5f7d05e6ca66", "score": "0.53233975", "text": "def draw_heat_map(center, df, radius = 16):\n\n from ipyleaflet import Map,Marker,CircleMarker,Heatmap\n from ipywidgets import HTML\n from ipyleaflet import Popup\n\n m = Map(center = center, zoom = 11)\n\n heatmap = Heatmap(\n locations=[(df.loc[i, \"lat\"], df.loc[i,\"lng\"]) for i in df.index],\n radius= radius\n )\n\n m.add_layer(heatmap);\n\n return display(m)", "title": "" }, { "docid": "1d0a410c59ade904d9b454048013fbdb", "score": "0.531939", "text": "def plot_cmap(matrix_values, figsize_w, figsize_h, filename):\n if figsize_w is not None and figsize_h is not None:\n plt.figure(figsize=(figsize_w,figsize_h))\n else:\n plt.figure()\n cmap = sns.diverging_palette(240, 10, sep=20, as_cmap=True)\n sns.heatmap(matrix_values, annot=True, fmt=\".2f\", cmap=cmap, vmin=-1, vmax=1)\n plt.savefig(filename)\n plt.show()\n return cmap", "title": "" }, { "docid": "262d8f1bf862cf176026079568b12242", "score": "0.53168684", "text": "def heatmap(data, key1, key2, values='value', xlabel=\"\", ylabel=\"\", log=False,\n carryover=False, draw_bars=True, cmap='CMRmap', disable_axes=True, **kwargs):\n\n def do_disable_ticks(ax):\n ax.xaxis.set_major_locator(plt.NullLocator())\n ax.yaxis.set_major_locator(plt.NullLocator())\n\n def do_disable_axes(ax):\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n\n #cmap = \"YlGnBu\"\n bar_col = \"silver\"\n\n\n # create a 2 x 3 subplot field\n # TODO: disable option\n if draw_bars:\n fig = plt.figure(frameon=False)\n gs = gridspec.GridSpec(2, 3, width_ratios=[5, 1, 1], height_ratios = [1, 5], figure=fig)\n ax_center = plt.subplot(gs[1,0], frameon=False)\n ax_top = plt.subplot(gs[0,0], frameon=False, sharex=ax_center)\n ax_right = plt.subplot(gs[1,1], frameon=False)\n ax_right2 = plt.subplot(gs[1,2], frameon=False, sharex=ax_center)\n else:\n fig = plt.figure()\n gs = gridspec.GridSpec(2, 1, height_ratios=[9, 1], width_ratios = [1], figure=fig)\n ax_center = plt.subplot(gs[0,0], frameon=False)\n ax_right2 = plt.subplot(gs[1,0], frameon=False)\n\n # prepare data\n matrix = data.pivot(key1, key2, values=values)\n if carryover:\n matrix = matrix.fillna(method='ffill')\n else:\n matrix = matrix.fillna(0)\n\n if draw_bars:\n top = data.groupby(key1)[values].sum()\n right = data.groupby(key2)[values].sum()[::-1]\n\n # plot\n hm_data = matrix.T\n hm_data = hm_data.sort_index(ascending=False)\n if log:\n norm = SymLogNorm(vmin=hm_data.min().min(), vmax=hm_data.max().max(),\n linthresh=1)\n hm = sns.heatmap(hm_data, ax=ax_center, cbar=False, cmap=cmap,\n norm=norm, linewidths=0, **kwargs)\n else:\n hm = sns.heatmap(hm_data, ax=ax_center, cbar=False, cmap=cmap, linewidths=0, **kwargs)\n # previously used imshow, didn't work as nicely\n ## ax_center.imshow(matrix.T, aspect='auto')\n if draw_bars:\n top.plot.bar(ax=ax_top, color=bar_col, align='edge', log=log)\n #ax_top.set_yscale('symlog')\n right.plot.barh(ax=ax_right, color=bar_col, align='edge', log=log)\n #ax_top.set_yscale('symlog')\n plt.colorbar(ax_center.get_children()[0], ax=ax_right2, orientation='vertical')\n\n # naming\n ax_center.set_xlabel(xlabel)\n ax_center.set_ylabel(ylabel)\n\n # remove axes and ticks where possible\n do_disable_axes(ax_right2)\n do_disable_axes(ax_top)\n do_disable_axes(ax_right)\n do_disable_ticks(ax_center)\n\n # remove spaces\n fig.tight_layout(pad = 0)\n fig.patch.set_visible(False)\n return fig, [ax_top, ax_right, ax_right2, ax_center], {key1:top, key2:right}\n else:\n plt.colorbar(ax_center.get_children()[0], ax=ax_right2, orientation='horizontal', fraction=1, aspect=40)\n fig.subplots_adjust(hspace=0)\n # naming\n ax_center.set_xlabel(xlabel)\n ax_center.set_ylabel(ylabel)\n\n # remove axes and ticks where possible\n if disable_axes:\n do_disable_axes(ax_right2)\n #do_disable_ticks(ax_center)\n\n fig.tight_layout(pad = 0)\n #fig.patch.set_visible(False)\n return fig, [ax_center], None", "title": "" }, { "docid": "78970ea2e2fdef31ce6027be88bf855e", "score": "0.531118", "text": "def heatmap(data, row_labels, col_labels, ax=None,\r\n cbar_kw={}, cbarlabel=\"\", title = \"Default\", x_title=\" \",y_title=\" \",saveFile = None, **kwargs):\r\n\r\n #plt.clf()\r\n if not ax:\r\n ax = plt.gca()\r\n\r\n # Plot the heatmap\r\n im = ax.imshow(data, **kwargs)\r\n\r\n # Create colorbar\r\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw) # pad=0.1\r\n #cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\r\n\r\n # We want to show all ticks...\r\n ax.set_xticks(np.arange(data.shape[1]))\r\n ax.set_yticks(np.arange(data.shape[0]))\r\n # ... and label them with the respective list entries.\r\n ax.set_xticklabels(col_labels)\r\n ax.set_yticklabels(row_labels)\r\n\r\n # Let the horizontal axes labeling appear on bottom\r\n ax.tick_params(top=False, bottom=True,\r\n labeltop=False, labelbottom=True)\r\n\r\n # Rotate the tick labels and set their alignment.\r\n plt.setp(ax.get_xticklabels(), rotation=30, ha=\"right\",\r\n rotation_mode=\"anchor\")\r\n\r\n # Turn spines off and create white grid.\r\n for edge, spine in ax.spines.items():\r\n spine.set_visible(False)\r\n\r\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\r\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\r\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\r\n ax.tick_params(which=\"minor\", bottom=False, left=False)\r\n \r\n # plt.title(title)\r\n # plt.xlabel(x_title)\r\n # plt.ylabel(y_title)\r\n ax.set_xlabel(x_title,fontsize=12)\r\n ax.set_ylabel(y_title,fontsize=12) # 10\r\n ax.set_title(title,fontsize=12) #10, pad=-10)\r\n \r\n \r\n if saveFile != None:\r\n plt.savefig(saveFile + '.png', bbox_inches='tight') \r\n\r\n return im, cbar", "title": "" }, { "docid": "42a0fa4fb075b58e6f7d0a4e36a260d0", "score": "0.5309405", "text": "def plot_autocorr_grid(self, data):\n if data.ndim == 1:\n self.plot_autocorr(data)\n else:\n nax = data.shape[0]\n if nax > self.max_grid_plot:\n print 'Too many plots requested.'\n return\n\n self._plot_grid(data, self.plot_autocorr)", "title": "" }, { "docid": "8af29cf3f086318471dd29d1df06c178", "score": "0.53038985", "text": "def plotDiffMatHeatmap(\n fa,\n fb,\n fo,\n start=0,\n end=-1,\n r=5000,\n cut=0,\n mcut=-1,\n na=\"\",\n nb=\"\",\n log=False,\n vmin=None,\n vmax=None,\n):\n labela, chroma, xya, tota = getData(fa, cut, mcut,start,end)\n labelb, chromb, xyb, totb = getData(fb, cut, mcut,start,end)\n if chroma != chromb:\n print(\"ERROR! %s and %s are not the same target chromosome, return.\" %\n (fa, fb))\n return\n if start == 0:\n start = min(np.min(xya), np.min(xyb))\n if end == -1:\n end = max(np.max(xya), np.max(xyb))\n if na == \"\":\n na = labela\n if nb == \"\":\n nb = labelb\n mata = getObsMat(xya, start, end, r)\n matb = getObsMat(xyb, start, end, r)\n sf = tota / totb\n mata = mata / sf\n if log:\n mat = np.log2((mata + 1) / (matb + 1))\n label = \"log2( %s/%s )\" % (na, nb)\n else:\n mat = mata - matb\n label = \"%s-%s\" % (na, nb)\n\n hights = 4\n hr = [6, 0.1]\n fig = pylab.figure(figsize=(4, hights))\n gs = mpl.gridspec.GridSpec(len(hr),\n 1,\n height_ratios=hr,\n top=0.95,\n bottom=0.05,\n left=0.1,\n right=0.9,\n wspace=0.05)\n pylab.suptitle(\"%s-%s, %s:%s-%s\" % (na, nb, chroma[0], start, end),\n fontsize=8)\n cmap = sns.color_palette(\"RdBu_r\", 11).as_hex()\n cmap[int(len(cmap) / 2)] = \"#FFFFFF\"\n cmap = ListedColormap(cmap)\n ax = fig.add_subplot(gs[-2])\n cax = fig.add_subplot(gs[-1])\n sns.set(font_scale=0.5)\n ax = sns.heatmap(mat,\n xticklabels=False,\n yticklabels=False,\n linewidths=0.0,\n square=True,\n cmap=cmap,\n ax=ax,\n center=0,\n vmin=vmin,\n vmax=vmax,\n cbar_ax=cax,\n cbar_kws={\n 'label': label,\n 'orientation': 'horizontal',\n \"shrink\": 0.5,\n \"fraction\": 0.2,\n \"anchor\": (0.0, 1.0)\n })\n cax.tick_params(labelsize=4)\n #draw the box\n ax.axvline(x=ax.get_xlim()[0], color=\"k\", linewidth=2)\n ax.axvline(x=ax.get_xlim()[1], color=\"k\", linewidth=2)\n ax.axhline(y=ax.get_ylim()[0], color=\"k\", linewidth=2)\n ax.axhline(y=ax.get_ylim()[1], color=\"k\", linewidth=2)\n pylab.savefig(fo + \"_compareMatrix.pdf\")", "title": "" }, { "docid": "8be07a705d11792466c562aaf5b72a6e", "score": "0.5281724", "text": "def my_draw(self):\n \n fig = Figure()\n canvas = FigureCanvas(fig)\n ax = fig.gca()\n \n ax.imshow(self.image_playground)\n \n rect = patches.Rectangle((self.agent_window[0],self.agent_window[1]),self.agent_window[2]-self.agent_window[0],self.agent_window[3]-self.agent_window[1],linewidth=1,edgecolor='r',facecolor='none')\n ax.add_patch(rect)\n\n for target in [self.targets[0]]:\n rect2 = patches.Rectangle((target[0],target[1]),target[2]-target[0],target[3]-target[1],linewidth=1,edgecolor='b',facecolor='none')\n ax.add_patch(rect2)\n \n canvas.draw() \n \n width, height = fig.get_size_inches() * fig.get_dpi()\n\n return np.fromstring(canvas.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3)", "title": "" }, { "docid": "746eba819ec15a7b501c6fecaaf8b21c", "score": "0.52727544", "text": "def cmap_show(self):\r\n\r\n ##SHOW THE AVAILABLE COLOUR MAPS GRAPHICALLY\r\n\r\n ##CREATE A NEW FIGURE AND CLEAR IT\r\n\r\n fig=figure(3)\r\n clf()\r\n \r\n ##CREATE AN ARRAY THAT VARIES FROM ZERO TO ONE IN THE X AXIS,\r\n ##AND IS CONSTANT IN THE Y AXIS\r\n\r\n a=outer(ones(10),arange(0,1,0.01))\r\n\r\n i=0\r\n ##FOR EACH COLOUR \r\n for col in self.cmaps:\r\n subplot(len(self.cmaps),1,i)\r\n\r\n ##THIS PLOTS A BAND STRETCHING OVER THE WHOLE COLOUR MAP\r\n\r\n plot=imshow(a,aspect='auto',cmap=get_cmap(col),origin=\"lower\")\r\n\r\n ##NO TICKMARKS\r\n plot.axes.get_xaxis().set_ticks([])\r\n plot.axes.get_yaxis().set_ticks([])\r\n\r\n ##AND LABELS IT WITH THE COLOUR NAME\r\n annotate(col, (0,1),color='white')\r\n i=i+1\r\n ##AND RESET THE FIGURE NUMBER\r\n fig=figure(1)\r\n\r\n if(matplotlib.get_backend() != 'macosx'):\r\n show()", "title": "" }, { "docid": "22b6312f1a1f80c1bc8e7d4cde2cf88d", "score": "0.5267274", "text": "def plotXCorrel(data1,data2,units,title,filename):\n\n confid_int_a = 2.0/(math.sqrt(len(data1)))\n confid_int_b = -2.0/(math.sqrt(len(data1)))\n\n plt.xcorr(data1,data2,maxlags=None)\n plt.ylabel('Cross-correlation [-1,1]')\n plt.xlabel('Lag ('+units+')')\n plt.axhline(y = confid_int_a,ls='dashed')\n plt.axhline(y = confid_int_b,ls='dashed')\n plt.title(title)\n savefig(filename)\n plt.close()\n return", "title": "" } ]
a6f121548d76e6dc98afa53d020aa3df
Check if this task failed.
[ { "docid": "dd5c1773e64de9e555e6c0c34cc41a45", "score": "0.7249489", "text": "def failed(self) -> bool:\n return self.completed is not None and not self.completed", "title": "" } ]
[ { "docid": "6af889faa0e6ed8836e8b78b10e2a744", "score": "0.77773494", "text": "def task_is_failure(task):\n\n if task and task.state == 'FAILURE':\n return True\n return False", "title": "" }, { "docid": "c14544e02e3f0b540cbf63fd7c879100", "score": "0.76552993", "text": "def failed(self) -> bool:\n return self.__failed", "title": "" }, { "docid": "7e19d29a37d92d798d37f12f89e45dd7", "score": "0.75677633", "text": "def is_failed(self) -> bool:\n return self.execution_status == 'FAILED'", "title": "" }, { "docid": "1c8540d632463b524993ba06741e75ec", "score": "0.73092407", "text": "def failed(self):\n return bool(self._exception)", "title": "" }, { "docid": "fc523f80feb64eeff3b0c05a4104144f", "score": "0.72882026", "text": "def failed(self) -> bool:\n if self.status in RUNNING_STATUSES:\n self.refresh()\n return self.status in FAILED_STATUSES", "title": "" }, { "docid": "5beb2e4731aa3174bc0d8337c10df921", "score": "0.7171653", "text": "def _was_failed( self ):\n # I think it also makes sense to have a single failed flag, but note that any\n # lanes with status .done are still to be regarded as good. Ie. the interpretation\n # of this flag is that any 'started' lane is reallY a 'failed' lane.\n return self._exists( 'pipeline/failed' )", "title": "" }, { "docid": "d70a38943bed04322014fdff81e2e59f", "score": "0.7117338", "text": "def failed(self):\n return self._failed", "title": "" }, { "docid": "4a597cdc5ddcd738679ecd820493c8d2", "score": "0.70847005", "text": "def is_failure(self):\n return sum(1 for f in self.failures if f[\"message\"] or f[\"output\"]) > 0", "title": "" }, { "docid": "58d8be084c356d5759034698d40a8d5e", "score": "0.70551085", "text": "def is_failure(self, result):\n return len(result) == 0", "title": "" }, { "docid": "ae90a5373830f51699944957e8397245", "score": "0.6971109", "text": "def hasFailed(self,identifier):\n if identifier in self._threads:\n return self._threads[identifier].failed()\n return False", "title": "" }, { "docid": "e9e0401d9c198ca76249e16ba92137a7", "score": "0.69340616", "text": "def failed(self):\n return self._failed", "title": "" }, { "docid": "66ffa7d50127a6b83fa727d18a767d93", "score": "0.6869511", "text": "def has_failed(self):\n currenttime = time.time()\n return self.EnlapsedTime() > self.timeout", "title": "" }, { "docid": "af4d52a14262385e2a92ff7453e0dcb2", "score": "0.68377984", "text": "def is_failed(self) -> bool:\n return isinstance(self, Failed)", "title": "" }, { "docid": "3a9b72bee06fd05c11d8b7b1672b037a", "score": "0.68356586", "text": "def failure(self):\n return bool(self.errors or self.warnings)", "title": "" }, { "docid": "5aebdd214892064ec5063a0a09b0d9b1", "score": "0.67788523", "text": "def fail ( self ):\n return bool ( self.sync_status & SYNC_FAIL )", "title": "" }, { "docid": "f8f02663b6294599556cbd50ceff61ae", "score": "0.67611015", "text": "def is_failed(self, *, sync: bool = False) -> bool:\n\n if sync:\n self.sync()\n\n return self.state.lower() == defaults.states.FAILED", "title": "" }, { "docid": "8a2e82ce0dd074ca76b5caf70a589896", "score": "0.66841114", "text": "def alert_on_failure(self):\n return True", "title": "" }, { "docid": "d5808585b585c9d4db266a1dd3114fbf", "score": "0.6633913", "text": "def junit_is_failure(self):\n return sum(1 for f in self.failures if f[\"message\"] or f[\"output\"]) > 0", "title": "" }, { "docid": "1fd2183650df3dd9710abead250e998d", "score": "0.6626132", "text": "def fail(self):\n self.failed = True\n self.finish()", "title": "" }, { "docid": "656904b37e73ed9eff220f99b26bb8d9", "score": "0.66234434", "text": "def __process_failed_task(self):\n while self.run:\n # Block for one second\n try:\n task = self.qerr.get(True, 1)\n except Queue.Empty:\n continue\n\n source = os.path.join(self.in_dir, task[\"file\"])\n if not os.path.isfile(source):\n return False\n\n if self.save_failed:\n destination = os.path.join(self.err_dir, task[\"file\"])\n os.rename(source, destination)\n else:\n os.remove(source)", "title": "" }, { "docid": "7edabf11fde765ddf67ca6760fd651c1", "score": "0.6612919", "text": "def has_error(self) -> bool:\n return self.execution_output.status.code != 200", "title": "" }, { "docid": "e623becec9a85b1de0549b0253a82fe4", "score": "0.66118795", "text": "def failed(self):\n ...", "title": "" }, { "docid": "1754990def035604ee90a05856042e54", "score": "0.6586701", "text": "def fail(self) -> None:\n self.success = False", "title": "" }, { "docid": "edad59cd68541c16c51cc8f9d7a2ea34", "score": "0.6578857", "text": "def check_on_run(self, task_runner: Type[TaskRunner]) -> None:\n if not task_runner.is_ok(self.run_id) and self.get_state() not in ['SYSTEM_ERROR', 'EXECUTOR_ERROR', 'COMPLETE', 'CANCELED']:\n logger.error('Failing run %s because the task to run its leader crashed', self.run_id)\n self.state_machine.send_system_error()", "title": "" }, { "docid": "9d461de487271f519d2d9c5f3f85651f", "score": "0.6560773", "text": "def test_failure(self):\n self.assertEqual(Status.FAILURE, self.run_task('exit 1'))\n self.assertEqual(Status.FAILURE, self.run_task('asd78sad7ftaoq'))", "title": "" }, { "docid": "02329e22dd09ce4a89e849beb84e32f4", "score": "0.65443164", "text": "def analysis_failed(self) -> bool:\n return not self.analysis_completed and self.analysis_started", "title": "" }, { "docid": "7f22b1b421fac358ed6ca3306743549e", "score": "0.65239406", "text": "def failed(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"failed\")", "title": "" }, { "docid": "7f22b1b421fac358ed6ca3306743549e", "score": "0.65239406", "text": "def failed(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"failed\")", "title": "" }, { "docid": "af48c5d99a676c4331387765151477d7", "score": "0.6493763", "text": "def has_failed(self):\r\n for action in self.actions:\r\n if action.status in ('nok', 'cancelled'):\r\n return True\r\n return False", "title": "" }, { "docid": "e18569432063a661ef18c8fa40f4cbc5", "score": "0.64920485", "text": "def _test_run_with_failure(self, task_class, expected_message):\n task_entry = self._create_input_entry()\n self.define_option_problem(PROBLEM_URL_NAME)\n with pytest.raises(TestTaskFailure):\n self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)\n # compare with entry in table:\n entry = InstructorTask.objects.get(id=task_entry.id)\n assert entry.task_state == FAILURE\n output = json.loads(entry.task_output)\n assert output['exception'] == 'TestTaskFailure'\n assert output['message'] == expected_message", "title": "" }, { "docid": "edc94fb1be407a9cbdc69c16ed9afeb4", "score": "0.64901346", "text": "def task_failed(self, task):\n self._queue.discard_result(task.id, self.serialize(TASK_EXPIRED))", "title": "" }, { "docid": "dd68979529c5a9d97db6dc8491208012", "score": "0.6419343", "text": "def check_fail(cmd, **kwargs):\n ret = run(cmd, **kwargs)\n assert ret.returncode > 0\n return ret", "title": "" }, { "docid": "4ef3c7e18f6985928c1dc36acb710dd1", "score": "0.64110875", "text": "def check_failed(return_code):\n if return_code != 0:\n global _failed\n _failed = True", "title": "" }, { "docid": "80a150e0b8418005db42ec765f08fade", "score": "0.6391514", "text": "def test_wasSuccessfulFalseAfterFailures(self):\n try:\n self.fail(\"foo\")\n except self.failureException:\n self.result.addFailure(self.test, sys.exc_info())\n self.assertEquals(False, self.result.wasSuccessful())", "title": "" }, { "docid": "ad724086b71046669299c8977f29e0a4", "score": "0.6380098", "text": "def fail_task_not_found():\n return cc_html.fail_with_error_stay_logged_in(\"Task not found.\")", "title": "" }, { "docid": "53301e9db78067ce02605573d0aacbd1", "score": "0.63572764", "text": "def ErrorHasHappenedInJob(self) -> bool:", "title": "" }, { "docid": "a71ddc8bce0dc4669728ac812db66872", "score": "0.63400525", "text": "def junit_is_error(self):\n return sum(1 for e in self.errors if e[\"message\"] or e[\"output\"]) > 0", "title": "" }, { "docid": "766955bb69517e9dc80035c8bc6d9d18", "score": "0.62683487", "text": "def _create_execution_has_failed_tests_flag():\n return multiprocessing.Manager().Value('error', False)", "title": "" }, { "docid": "0ea9cb625f2b2b889622982a87e9006e", "score": "0.62444496", "text": "def allow_failure(self) -> Optional[bool]:\n return pulumi.get(self, \"allow_failure\")", "title": "" }, { "docid": "18f259ee2b1a7e2c9a419dea2b201ac7", "score": "0.61810863", "text": "def failure(self) -> _ErrorType:", "title": "" }, { "docid": "7c12f7a0082387d1c022a0fc24e2688f", "score": "0.6173469", "text": "def getFailureState(self, task):\n return self._failureTaskMap.get(task, None)", "title": "" }, { "docid": "ea54cbc55f0798af83efe6f3e0a21260", "score": "0.61614394", "text": "def has_error(self):\n\n return self._err is not None", "title": "" }, { "docid": "cffbacd9dc2b6d46b0b58f92f1f86432", "score": "0.6136735", "text": "def failed(cls, status: str) -> bool:\n return status == V1Statuses.FAILED or status == V1Statuses.UPSTREAM_FAILED", "title": "" }, { "docid": "a923426e753e783e8d80ccf9421c537b", "score": "0.61306083", "text": "def isServerFailed(self):\n isFailed = False\n with open(self.logPath) as f:\n for line in f:\n if line.lower().find(\"error\") >= 0:\n isFailed = True\n break\n return isFailed", "title": "" }, { "docid": "9b9bb48dbdafaa9edda3738c300f7a62", "score": "0.61281186", "text": "def lastStepErrorOk(self):\n return True", "title": "" }, { "docid": "6f71d64d874abf1e904612c24efc3f21", "score": "0.6114285", "text": "def check_cached_task_failures(task_name, task_uid):\n cache_key = f\"{task_uid}-task-attempts\"\n task_attempts = cache.get_or_set(cache_key, 0)\n task_attempts += 1\n cache.set(cache_key, task_attempts)\n if task_attempts > settings.MAX_TASK_ATTEMPTS:\n raise FailedException(task_name=task_name)", "title": "" }, { "docid": "dd85a184380a03a710a7e6577b92ad0f", "score": "0.6103362", "text": "def testFailed(self, test):\n pass", "title": "" }, { "docid": "3ab71d7f49ac818c93b575480c5f5d96", "score": "0.6100009", "text": "def failed(self):\n self.drive.debug(debug_modules=True) #Prints some debugging info\n self.next_state('finish')", "title": "" }, { "docid": "c92c16d8f6842771ead98ffdbdb26aeb", "score": "0.60963756", "text": "def success(self):\n return not self.failure", "title": "" }, { "docid": "4bf622094eceebe2bacf95bf168162cf", "score": "0.6074788", "text": "def test_fail(self):\r\n # Grrr, we seem to get stdout here, not stderr.\r\n self._count_eq('FAIL: ', 1)", "title": "" }, { "docid": "26ec531d19342eb225baf3cdc7107d9a", "score": "0.60615087", "text": "def was_successful(self) -> bool:", "title": "" }, { "docid": "74cc636bdecde54ebf7ecdaa8c7710e1", "score": "0.604986", "text": "def check_actual():\n if get_actual() is None:\n print('No actual task. Use \"task --create <task>\" to create one.')\n return False\n\n if not os.path.exists(get_task_path(get_actual()['task'])):\n print('Actual task points to invalid directory \"'\n + get_actual()['task'] +\n '\". Please set correct task with task -a <Folder/Task>.')\n return False\n\n return True", "title": "" }, { "docid": "82e8077d97025afcd0ca3139da17a391", "score": "0.6027442", "text": "def check_task_complete(self):\n complete = True\n error_message = 'The following errors are preventing saving: '\n if len(self.window.username_textbox.text()) > 0:\n complete *= True\n else:\n complete *= False\n error_message += 'username is blank, '\n if self.window.consent_checkbox.isChecked():\n complete *= True\n else:\n complete *= False\n error_message += 'consent was not provided, '\n if self.window.age_spinbox.value() > 17:\n complete *= True\n else:\n complete *= False\n error_message += 'must be an adult (18+) to participate, '\n if str(self.window.edu_combobox.currentText()) != '':\n complete *= True\n else:\n complete *= False\n error_message += 'education level was not provided, '\n print(self.window.edu_combobox.currentText())\n if str(self.window.gender_combobox.currentText()) != '':\n complete *= True\n else:\n complete *= False\n error_message += 'gender was not provided, '\n print(self.window.gender_combobox.currentText())\n trial = self.ff_urn_draw_count + self.random_urn_draw_count\n trial_delta_req = trial - self.req_trials\n trial_delta_max = trial - self.max_trials\n print(self.req_trials, self.max_trials, trial)\n if trial == self.req_trials:\n complete *= True\n elif trial < self.req_trials:\n error_message += (str(abs(trial_delta_req)) +\n 'too few marble draws executed, ')\n elif trial > self.max_trials:\n error_message += (str(abs(trial_delta_max)) +\n 'too many marble draws executed (restart the app), ') \n error_message = error_message[:-2] + '.'\n return (complete, error_message)", "title": "" }, { "docid": "f83a72df38791eab503e97c0b75d5f05", "score": "0.6026283", "text": "def _test_run_with_long_error_msg(self, task_class):\n task_entry = self._create_input_entry()\n self.define_option_problem(PROBLEM_URL_NAME)\n expected_message = \"x\" * 1500\n with pytest.raises(TestTaskFailure):\n self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)\n # compare with entry in table:\n entry = InstructorTask.objects.get(id=task_entry.id)\n assert entry.task_state == FAILURE\n assert 1023 > len(entry.task_output)\n output = json.loads(entry.task_output)\n assert output['exception'] == 'TestTaskFailure'\n assert output['message'] == (expected_message[:(len(output['message']) - 3)] + '...')\n assert 'traceback' not in output", "title": "" }, { "docid": "8983d062cb66c268463de0224f5b5fb3", "score": "0.6020274", "text": "def success(self):\n return self.errors == 0", "title": "" }, { "docid": "8c2c33df072fc44ede86a2e4d2f96b3a", "score": "0.6020236", "text": "def lastStepErrorOk(self):\n self.computeErrorEstimate()\n return True", "title": "" }, { "docid": "8719df88b00ff5b35c1c9f4391600727", "score": "0.60163635", "text": "def is_error(self):\n return sum(1 for e in self.errors if e[\"message\"] or e[\"output\"]) > 0", "title": "" }, { "docid": "af80ed16679122971fa4857cbfe9181f", "score": "0.60003185", "text": "def failed(self, failed):\n\n self._failed = failed", "title": "" }, { "docid": "d133efadfdea7384789e9af51706a9e1", "score": "0.5986603", "text": "def failed_tests(self, *args):\n return self.__failing_unittests", "title": "" }, { "docid": "a26979e7e83329b9cf7e2d657d2e858f", "score": "0.5974275", "text": "def is_failed(behave_data):\n return \"failed\" in map(lambda i: i['status'], behave_data)", "title": "" }, { "docid": "a1db33606ef4f133ef43703d56165424", "score": "0.59515023", "text": "def __call__(self):\n for task in self.tasks:\n task()\n if not self.dry_run and task.returncode != 0:\n self.failed_task = task\n break\n if self.failed_task is not None and not self.ignore_errors:\n raise JobRuntimeError", "title": "" }, { "docid": "a1db33606ef4f133ef43703d56165424", "score": "0.59515023", "text": "def __call__(self):\n for task in self.tasks:\n task()\n if not self.dry_run and task.returncode != 0:\n self.failed_task = task\n break\n if self.failed_task is not None and not self.ignore_errors:\n raise JobRuntimeError", "title": "" }, { "docid": "80aac3eff5010422861e6f08efa07872", "score": "0.5950959", "text": "def pass_fail(self, result):\n if result == 'PASS':\n return 1\n if result == 'FAIL':\n return 0\n else:\n return None", "title": "" }, { "docid": "10650502661f3fc2c48fd1affdf5e808", "score": "0.59481573", "text": "def _can_override_failed(self, job: dict) -> Tuple[bool, str]:\n try:\n container_reason = job[\"attempts\"][-1][\"container\"][\"reason\"]\n except (KeyError, IndexError):\n container_reason = \"\"\n\n if DOCKER_INSPECT_ERROR in container_reason:\n redun_job = self.pending_batch_jobs[job[\"jobId\"]]\n assert redun_job.task\n if redun_job.task.script:\n # Script tasks will report their status in a status file.\n status_file = File(\n aws_utils.get_job_scratch_file(\n self.s3_scratch_prefix, redun_job, aws_utils.S3_SCRATCH_STATUS\n )\n )\n if status_file.exists():\n return status_file.read().strip() == \"ok\", container_reason\n else:\n # Non-script tasks only create an output file if it is successful.\n output_file = File(\n aws_utils.get_job_scratch_file(\n self.s3_scratch_prefix, redun_job, aws_utils.S3_SCRATCH_OUTPUT\n )\n )\n return output_file.exists(), container_reason\n\n return False, container_reason", "title": "" }, { "docid": "49a270fef06b767611dfdb709b83e77e", "score": "0.59427625", "text": "def check_result(self):\n for json_path in [self.task_json, self.asset_json,\n self.tips_json]:\n if not os.path.exists(json_path):\n msg = \"Json file is not generated: {0}\".format(json_path)\n return False, msg\n return True, None", "title": "" }, { "docid": "dabdcf46cb02cec23db880fb0340704d", "score": "0.59305775", "text": "def on_failure(self, f):\n self.connect_signal(task_failure, f)\n return f", "title": "" }, { "docid": "ae2cce7b5fe83240814497e7c048c95b", "score": "0.5928106", "text": "def validate(self, task, tasks):", "title": "" }, { "docid": "86c4d8b991abd7fbc9e1b22ccd21d8c0", "score": "0.59032226", "text": "def _test_run_with_short_error_msg(self, task_class):\n task_entry = self._create_input_entry()\n self.define_option_problem(PROBLEM_URL_NAME)\n expected_message = \"x\" * 900\n with pytest.raises(TestTaskFailure):\n self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)\n # compare with entry in table:\n entry = InstructorTask.objects.get(id=task_entry.id)\n assert entry.task_state == FAILURE\n assert 1023 > len(entry.task_output)\n output = json.loads(entry.task_output)\n assert output['exception'] == 'TestTaskFailure'\n assert output['message'] == expected_message\n assert output['traceback'][(- 3):] == '...'", "title": "" }, { "docid": "2b51d7a1612b80ba4244c1b2c0c6aa76", "score": "0.58968526", "text": "def return_work_failed(self, results, worker_key):\n pass", "title": "" }, { "docid": "3ed8639cdf87da9e291aa27ae715dca9", "score": "0.58961254", "text": "def failure_reason(self):\n return self.__failure_reason", "title": "" }, { "docid": "22ba3a4627a235aec5a1957960bf8d2b", "score": "0.5888151", "text": "def test_invalid_feed_error_message(self):\n task = self.run_task(gen_repo(importer_config={'feed': utils.uuid4()}))\n\n with self.subTest(comment='check task error description'):\n tokens = ['scheme', 'must', 'be', 'http', 'https', 'file']\n self.assertTrue(\n all(\n [\n token\n in task['error']['description'].lower()\n for token in tokens\n ]\n )\n )", "title": "" }, { "docid": "6312ee3d349848cb0bfff0409433f936", "score": "0.58830255", "text": "def errorExists(self):\n\t\ttry: return self.error != None\n\t\texcept AttributeError: return False", "title": "" }, { "docid": "37c678212eda8ee2e351011dbc2dd8b3", "score": "0.5874994", "text": "def test_run_raises(self):\n with pytest.raises(AssertionError):\n self.task.run(\"something-else\")", "title": "" }, { "docid": "1a9bc03c1dcc461b0d27f5083acdfd8d", "score": "0.5874528", "text": "def wait_until_fails(self, command: str) -> str:\n output = \"\"\n\n def check_failure(_: Any) -> bool:\n nonlocal output\n status, output = self.execute(command)\n return status != 0\n\n with self.nested(\"waiting for failure: {}\".format(command)):\n retry(check_failure)\n return output", "title": "" }, { "docid": "b8d6a6a0359d6ebb0ff5b814e857379e", "score": "0.5871044", "text": "def is_auth_failure(self):\r\n return self.auth_failure", "title": "" }, { "docid": "16c75b2e5f2335661ec6c21d0bb6fad8", "score": "0.5853189", "text": "def test_handle_failed_db_update_task(self):\n\n # Get database update task\n when = now()\n self.assertFalse(self.system_task_mgr._is_db_update_completed)\n task = self.system_task_mgr.get_tasks_to_schedule(when)[0]\n self.assertTrue(task.id.startswith(DB_UPDATE_TASK_ID_PREFIX))\n task_1_id = task.id\n\n # Fail task after running and get different task next time\n task.agent_id = self.agent_id\n self.task_mgr.launch_tasks([task], now())\n update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now())\n self.task_mgr.handle_task_update(update)\n self.system_task_mgr.handle_task_update(update)\n update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.FAILED, now())\n self.task_mgr.handle_task_update(update)\n self.system_task_mgr.handle_task_update(update)\n\n # No new database update right away\n tasks = self.system_task_mgr.get_tasks_to_schedule(when + datetime.timedelta(seconds=5))\n self.assertListEqual([], tasks)\n self.assertFalse(self.system_task_mgr._is_db_update_completed)\n\n # After error threshold, we should get new database update task\n new_time = when + SystemTaskManager.DATABASE_UPDATE_ERR_THRESHOLD + datetime.timedelta(seconds=5)\n task = self.system_task_mgr.get_tasks_to_schedule(new_time)[0]\n self.assertNotEqual(task.id, task_1_id)\n self.assertTrue(task.id.startswith(DB_UPDATE_TASK_ID_PREFIX))\n self.assertFalse(self.system_task_mgr._is_db_update_completed)", "title": "" }, { "docid": "1ca706a87eb8d39e63eba3db3f8b1d61", "score": "0.584821", "text": "def fail(self, tup_id):\n pass", "title": "" }, { "docid": "c96ffcfc6083d9b6bd02b7df05a3bb8c", "score": "0.58464134", "text": "def to_failed(self):\n self.status = FAILED", "title": "" }, { "docid": "d676ac1272b0abe3f74d61fea035cea4", "score": "0.58377147", "text": "def set_fail(self, e, batch):\n print(\"FAIL\", e, batch)\n self.failed = True", "title": "" }, { "docid": "1899e8d662047656c2825bd8fdfeb3c9", "score": "0.58320653", "text": "def __failure(self):\n self._failure_count += 1\n if self._failure_count >= self._failure_threshold:\n self.open()", "title": "" }, { "docid": "05c3080134c510665562180944dc2b07", "score": "0.58244705", "text": "def has_error(self):\n self.get_info()\n\n if 'status' not in self.info:\n return False\n if 'hasError' not in self.info['status']:\n return False\n\n return self.info['status']['hasError']", "title": "" }, { "docid": "e4b995ce0e7a52d7fd9fe54576e4e3ed", "score": "0.58240205", "text": "def _assert_remote_failure( self, role ):\n self._ssh( role, 'true' )\n try:\n self._ssh( role, 'false' )\n self.fail( )\n except SystemExit as e:\n self.assertEqual( e.code, 1 )", "title": "" }, { "docid": "8c2c2ffa74393384cde1772d3c118997", "score": "0.5823604", "text": "async def check_failure(ctx: Context, e: errors.CheckFailure) -> None:\n\n bot_missing_errors = (\n errors.BotMissingPermissions,\n errors.BotMissingRole,\n errors.BotMissingAnyRole,\n )\n\n if isinstance(e, bot_missing_errors):\n ctx.bot.stats.incr(\"errors.bot_permission_error\")\n await ctx.send(\n \"Sorry, it looks like I don't have the permissions or roles I need to do that.\"\n )\n elif isinstance(e, errors.NoPrivateMessage):\n await ctx.send(e)", "title": "" }, { "docid": "1be4799a29146d58bd769492904265d2", "score": "0.58189017", "text": "def check_failures(self) -> None:\n # If the dtypes do not exist return\n if self.dtypes is None:\n return\n # If the upload_failed path does not exist return\n if not os.path.isdir(f\"STORE/upload_failed/{self.name}/{self.stream_type}/\"):\n return\n # Get the list of failed uploads\n failed_list = os.listdir(f\"STORE/upload_failed/{self.name}/{self.stream_type}/\")\n # If there are no failed uploads return\n if len(failed_list) == 0:\n return\n logger = get_logger()\n logger.info((f\"uploading {len(failed_list)} failed upload files found in \"\n \"STORE/upload_failed/{self.name}/{self.stream_type}/\")\n )\n # Compose all the failed uploads into a single DataFrame\n df = self.static_compose_df_from_dir(path=f\"STORE/upload_failed/{self.name}/{self.stream_type}/\",\n fmt=\"parquet\")\n # If the DataFrame is None return\n if df is None:\n return\n # Attempt to case the DataFrame to its expected types\n try:\n df = df.astype(self.dtypes)\n except Exception as e:\n logger.exception(e)\n return\n if self.is_store_instance:\n self.stream(df)\n else:\n self.q.put(df)", "title": "" }, { "docid": "a048bc7fdbec4a4db61d9a07c4e9b9bc", "score": "0.5809967", "text": "def verify(self):\r\n errors = []\r\n for task in self._tasks:\r\n try:\r\n task.verify()\r\n except AssertionError, e:\r\n error = str(e)\r\n if not error:\r\n raise RuntimeError(\"Empty error message from %r\" % task)\r\n errors.append(error)\r\n if errors:\r\n message = [str(self.path)]\r\n for error in errors:\r\n lines = error.splitlines()\r\n message.append(\"- \" + lines.pop(0))\r\n message.extend([\" \" + line for line in lines])\r\n raise AssertionError(os.linesep.join(message))", "title": "" }, { "docid": "a048bc7fdbec4a4db61d9a07c4e9b9bc", "score": "0.5809967", "text": "def verify(self):\r\n errors = []\r\n for task in self._tasks:\r\n try:\r\n task.verify()\r\n except AssertionError, e:\r\n error = str(e)\r\n if not error:\r\n raise RuntimeError(\"Empty error message from %r\" % task)\r\n errors.append(error)\r\n if errors:\r\n message = [str(self.path)]\r\n for error in errors:\r\n lines = error.splitlines()\r\n message.append(\"- \" + lines.pop(0))\r\n message.extend([\" \" + line for line in lines])\r\n raise AssertionError(os.linesep.join(message))", "title": "" }, { "docid": "a048bc7fdbec4a4db61d9a07c4e9b9bc", "score": "0.5809967", "text": "def verify(self):\r\n errors = []\r\n for task in self._tasks:\r\n try:\r\n task.verify()\r\n except AssertionError, e:\r\n error = str(e)\r\n if not error:\r\n raise RuntimeError(\"Empty error message from %r\" % task)\r\n errors.append(error)\r\n if errors:\r\n message = [str(self.path)]\r\n for error in errors:\r\n lines = error.splitlines()\r\n message.append(\"- \" + lines.pop(0))\r\n message.extend([\" \" + line for line in lines])\r\n raise AssertionError(os.linesep.join(message))", "title": "" }, { "docid": "e697a4c69b7cbc701fb132b2b2e95e5b", "score": "0.5808749", "text": "def set_test_failure_tryjob_result(self):\n self._set_failure_type('TEST_FAILURE')", "title": "" }, { "docid": "be25f632ad9d1db01fefa79cc68bfbbc", "score": "0.5806069", "text": "def was_successful(self):\n return False", "title": "" }, { "docid": "5fbe07e0c89dea38ba860d2b178932a5", "score": "0.58057576", "text": "async def test_failed_tests(self):\n self.set_source_parameter(\"test_result\", [\"failed\"])\n response = await self.collect(get_request_text=self.JUNIT_XML)\n self.assert_measurement(\n response,\n value=\"1\",\n total=\"5\",\n entities=[dict(key=\"tc3\", name=\"tc3\", class_name=\"cn\", test_result=\"failed\")],\n )", "title": "" }, { "docid": "42ddfa5af8d99d7c862caa7a0b29ba54", "score": "0.5797261", "text": "def assertInstanceFailed(self, instance_name, exc_type):\n # type: (Text, type) -> None\n self.assertInstanceStatus(instance_name, TaskInstance.STATUS_FAILED)\n\n self.assertEqual(\n self.manager.storage[instance_name]\n .metadata[TaskInstance.META_EXCEPTION_TYPE],\n\n '{module}.{name}'.format(\n module = exc_type.__module__,\n name = exc_type.__name__,\n ),\n )", "title": "" }, { "docid": "c2a09ddc2fd7c85dc1dee510dbf74327", "score": "0.5795169", "text": "def _failure(self):\n return []", "title": "" }, { "docid": "cf815204a163b4a1e63de0cf8259616d", "score": "0.5795137", "text": "def failed(self, error_message):\n self.start_now = False\n self.status = self.STATUS.ERROR\n self.error_message = error_message", "title": "" }, { "docid": "2aef2ed837325698e5b93c7f6358c487", "score": "0.5793467", "text": "def test_failed_jobs(self):\n assert self.failed in self.all_statuses, 'A core assumption in the Batch Executor has changed. ' \\\n 'What happened to the list of statuses or the running state?'\n running_job = BatchJob('CCC', self.failed)\n self.assertEqual(running_job.get_job_state(), State.FAILED)", "title": "" }, { "docid": "acd7cdb5bc0a386169f64121a78fef07", "score": "0.5791061", "text": "def timed_out(self):\n elapsed = time.time() - self.start_time\n if elapsed >= self.task_manager_timeout:\n logger.warning(f\"{self.prefix} manager has run for {elapsed} which is greater than TASK_MANAGER_TIMEOUT of {settings.TASK_MANAGER_TIMEOUT}.\")\n return True\n return False", "title": "" }, { "docid": "421e03bf72b382467e9d90870db04523", "score": "0.5784614", "text": "def failure_reason(self):\n return self._failure_reason", "title": "" }, { "docid": "0f193af9e19e9d3f2d26ab4a35d4940a", "score": "0.5783912", "text": "def check_ok (result):\n\treturn not result is None and (not \"due to errors\" in result)", "title": "" }, { "docid": "56934ccec973b433634b6d6b9bfae2fa", "score": "0.57652456", "text": "def pass_fail(self, success):\n return \"PASS\" if success else \"FAIL\"", "title": "" }, { "docid": "2fca8beb6d41b456d3322880cf35ee08", "score": "0.5754738", "text": "def handle_fail():\n fail = True\n while not task_queue.empty():\n try:\n task_queue.get(False)\n except Empty:\n continue\n task_queue.task_done()", "title": "" }, { "docid": "99b186a000b9ed8c925842b54a61d362", "score": "0.5750136", "text": "def on_batch_job_failure(cls):\n # TODO(sll): Alert the site admin via email.\n logging.error('Job %s failed.' % cls.__name__)\n job_status = cls._register_end_of_batch_job_and_return_status()\n if job_status == job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING:\n cls._kickoff_batch_job_after_previous_one_ends()", "title": "" } ]
1f741524474d86566b65cfc9c0d6fb19
Gets the mapping of pickleable python attribute name and values.
[ { "docid": "00f6279d2da02e8235168a49f842043f", "score": "0.5921677", "text": "def _get_python_attributes(self):\r\n python_attrs = dict()\r\n for key, value in vars(self).iteritems():\r\n if key.startswith('_'):\r\n continue\r\n if isinstance(value, hou.Node):\r\n continue\r\n python_attrs[key] = value\r\n return python_attrs", "title": "" } ]
[ { "docid": "26012275e08f96726959da3317a3d693", "score": "0.67514306", "text": "def attributes(self) -> t.Dict[str, str]:", "title": "" }, { "docid": "9449c140f2f9e78549c886a816759fab", "score": "0.6637769", "text": "def get_attributes(self) -> Mapping[str, typing.Any]:\n return MappingProxyType(self._get_attributes())", "title": "" }, { "docid": "6c806a0459284571b04f0623a17eab0b", "score": "0.658317", "text": "def attr_info(self) -> typing.Mapping[str, AttributeInfo]:\n return MappingProxyType(\n {\n attr_name: AttributeInfo(self.bind[attr_name].attrs)\n for attr_name in self.list_attributes()\n }\n )", "title": "" }, { "docid": "51c007b7108c51d14690d61111aa92af", "score": "0.6560819", "text": "def ldap_attribute_map(cls) -> Dict[str, str]:\n return {k: v.ldap_key for (k, v) in vars(cls).items()\n if isinstance(v, Attribute)}", "title": "" }, { "docid": "91f34724c159ef254b9c63175b743c12", "score": "0.6513589", "text": "def get_stored_dict(self):\n return {x: getattr(self, x)() for x in self._stored_attrs}", "title": "" }, { "docid": "5f123556f6ed6c30dcf09f5bc3ecedbb", "score": "0.6438715", "text": "def _getAttrMap(self):\r\n if not getattr(self, 'attrMap'):\r\n self.attrMap = {}\r\n for (key, value) in self.attrs:\r\n self.attrMap[key] = value \r\n return self.attrMap", "title": "" }, { "docid": "d807f7f2d164507698d46ae77018957f", "score": "0.6434606", "text": "def attrs(self) -> typing.Mapping[str, DatasetAttribute]:\n return self._mapper.view()", "title": "" }, { "docid": "d9a7890e12a11d59d799df0a8358f28c", "score": "0.6351924", "text": "def _get_attributes(self) -> typing.Dict[str, typing.Any]:\n raise NotImplementedError()", "title": "" }, { "docid": "50850f487491251292d53f748f62098f", "score": "0.6341671", "text": "def get_attributes(cls) -> Dict[str, Attribute[Any]]:\n attributes = OrderedDict()\n for name, value in vars(cls).items(): # noqa: WPS421\n if isinstance(value, Attribute):\n attributes[name] = value\n return attributes", "title": "" }, { "docid": "240330e66cae1561a4870b80a79cfa19", "score": "0.6235785", "text": "def as_dict(self):\n return {attr : getattr(self, attr) for attr in self.attrs}", "title": "" }, { "docid": "384a9e218a9e61959ef8d6a04243a626", "score": "0.621797", "text": "def get_attribute_data(self):\n ret = {}\n\n for attribute in Attribute.objects.all().prefetch_related(\"value_choices\"):\n deserialized_value = None\n\n if attribute.value_type == Attribute.TYPE_GEOMETRY:\n geometry = ProjectAttributeMultipolygonGeometry.objects.filter(\n attribute=attribute, project=self\n ).first()\n if not geometry:\n continue\n deserialized_value = geometry.geometry\n elif attribute.value_type in [Attribute.TYPE_IMAGE, Attribute.TYPE_FILE]:\n try:\n deserialized_value = ProjectAttributeFile.objects.get(\n attribute=attribute, project=self\n ).file\n except ProjectAttributeFile.DoesNotExist:\n deserialized_value = None\n elif attribute.identifier in self.attribute_data:\n deserialized_value = attribute.deserialize_value(\n self.attribute_data[attribute.identifier]\n )\n\n ret[attribute.identifier] = deserialized_value\n return ret", "title": "" }, { "docid": "0782d6881d4de0ed8829253ed48c3b33", "score": "0.6196533", "text": "def as_dict(self):\n d = {}\n for a in self.InstAttr | self.MetaAttr:\n d[a] = getattr(self, a)\n return d", "title": "" }, { "docid": "e2621f47a85e4dcfc50d02e03270dde9", "score": "0.6196316", "text": "def get_attrs(self) -> Mapping:\n return self.dict(include=set(self._attributes))", "title": "" }, { "docid": "44f6e08da925aecf2752ed9833816054", "score": "0.6184532", "text": "def attributes(self):\n if self._attributes is None:\n self._attributes = {}\n rows = db.model_query(db.Attribute).filter_by(\n uuid=self.uuid)\n for row in rows:\n self._attributes.setdefault(row.name, []).append(row.value)\n return self._attributes", "title": "" }, { "docid": "58f50a06749369b5711039460e0d6635", "score": "0.6173073", "text": "def attr(self) -> google.protobuf.internal.containers.MessageMap[builtins.str, tensorflow.core.framework.attr_value_pb2.AttrValue]:", "title": "" }, { "docid": "fc754400d72b4344b0313d0f3e59f64a", "score": "0.61281246", "text": "def get_attributes(self):\n\n return {\n 'compound': self.compound,\n 'properties': self.properties,\n 'T_range': self.T_range,\n 'n_points': self.n_points,\n 'steps': self.steps,\n 'swap_freq': self.swap_freq,\n 'biasing_factor': self.biasing_factor}", "title": "" }, { "docid": "b8e6a56ff173911dc3cb05977a57c309", "score": "0.61229056", "text": "def to_map(self, key_attribute: str):\n map_ = {}\n for item in self.list:\n key = getattr(item, key_attribute, None)\n if key:\n map_[key] = item\n\n return map_", "title": "" }, { "docid": "93987efe254a21c65b206ae12652e158", "score": "0.60838", "text": "def stored_attributes(self):\n result = []\n result.append('key')\n result.extend(self.get_attribute_names_defined_in_store())\n return result", "title": "" }, { "docid": "14a0e8be1762011f1b6ed47fb86c0815", "score": "0.60629773", "text": "def attributes_dict(self):\n h = {}\n for k in self.attributes.keys():\n h[k] = getattr(self, k)\n for k in self.lists.keys():\n h[k] = getattr(self, k)\n for k in self.references.keys():\n h[k] = getattr(self, k)\n return h", "title": "" }, { "docid": "5e7f238902bddae9e0a6c2e102987f16", "score": "0.60626084", "text": "def identifiers(self) -> typing.Mapping[str, str]: # pylint: disable=function-redefined\n return {\n attr_name: getattr(self, attr_name)\n for attr_name in self.info.get(\"identifiers\", self.info.get(\"params\", []))\n if attr_name in self.bind\n }", "title": "" }, { "docid": "6e430e6d15d5c8496767b28ee1628aec", "score": "0.6055799", "text": "def json(self):\n\t\treturn {p: self.__getattribute__(p) for p in self.__slots__}", "title": "" }, { "docid": "4d873a5043d6c4950f24b7bcf22ec799", "score": "0.6011129", "text": "def get_attributes(self):\n attributes = OrderedDict()\n attributes['init'] = self._init\n attributes['lb'] = self._lb\n attributes['ub'] = self._ub\n return attributes", "title": "" }, { "docid": "2d3aaf7228394033604e05de8516fba2", "score": "0.5997634", "text": "def attributes(cls):\n return dict(cls._attributes)", "title": "" }, { "docid": "e3c04fd050657d7178ee9b8a5175d903", "score": "0.5995523", "text": "def as_dict(self) -> t.Dict[str, t.Any]:\n\n return {k: getattr(self, k, None) for k in self.fields_map.keys()}", "title": "" }, { "docid": "591760bf7f597bc9d95481341038581f", "score": "0.59728444", "text": "def get_all_attributes(self):\r\n return self.__key + self.__value", "title": "" }, { "docid": "ec2d2bc25bb81d039de2bdcd699964a8", "score": "0.5952853", "text": "def __dict__(self):\n return dict([(field, getattr(self, \"%s\" %field))\n for field in self.__class__.DICT_FIELDS])", "title": "" }, { "docid": "2e67f823017cc019158ce7329e2e3549", "score": "0.59505326", "text": "def extract_mapping(self):\n return {\n self.type_name: self.fields\n }", "title": "" }, { "docid": "d7687de833741aede5959dc91989de6a", "score": "0.59490645", "text": "def items(self) -> Dict:\n\n return {key: getattr(self.attribute, key)\n for key in dir(self.attribute)\n if not key.startswith(\"_\")}", "title": "" }, { "docid": "5e43861f026c6e427469ec8a1cb21132", "score": "0.58994234", "text": "def _attrs(self):\n return self._dets.keys()", "title": "" }, { "docid": "9f5845575e7c910eb9966d7fc1d4594c", "score": "0.5897431", "text": "def state_attributes(self):\r\n return {self.id: self.value}", "title": "" }, { "docid": "030d2b2ab611343f46ef360862047723", "score": "0.5890485", "text": "def map_attribute(self, attribute, value):\n return {}", "title": "" }, { "docid": "030d2b2ab611343f46ef360862047723", "score": "0.5890485", "text": "def map_attribute(self, attribute, value):\n return {}", "title": "" }, { "docid": "2ddc0886368a97497ca1d68b13aced34", "score": "0.5881178", "text": "def values(self):\r\n return [getattr(self, k) for k in self.keys()]", "title": "" }, { "docid": "47828414495de3388024e5b9e62e2749", "score": "0.58804315", "text": "def get_attributes(self):\n for _, an_object in self.objects_data.items():\n return list(an_object[\"attributes\"].keys())", "title": "" }, { "docid": "298839d297eec87c3650fd888371e046", "score": "0.5874971", "text": "def toDict(self: object) -> dict:\n\n return {key: self.__getattribute__(key) for key in self.__slots__}", "title": "" }, { "docid": "8ae795368b438b54ac8a029ffb2eda8b", "score": "0.5861259", "text": "def attr_dict(self):\n \n base_keys = set(Info().__dict__.keys())\n self_keys = set(self.__dict__.keys())\n res = {}\n for key in self_keys.difference(base_keys):\n val = self.__dict__[key]\n try:\n res[key] = val.attr_dict()\n except AttributeError:\n res[key] = val\n \n return(res)", "title": "" }, { "docid": "93e3c04639db2c8f05e599a26dd74f29", "score": "0.5848899", "text": "def _getAttributeNames(self, *args, **kwargs):\n return map(_namify, self.keys())", "title": "" }, { "docid": "9fe19048c6efa3c780c5bdf2d5756718", "score": "0.5836906", "text": "def get_attributes (self):\n return (self.attr)", "title": "" }, { "docid": "959be0b60913afda19e98ad6e61ef0e7", "score": "0.583006", "text": "def list_attributes(self) -> List[str]:\n return list(self.attrs.keys())", "title": "" }, { "docid": "75562bbfa0bda5b3d65674b043095101", "score": "0.5819719", "text": "def get_attribute_names_defined_in_store(self):\n pass", "title": "" }, { "docid": "398bbeeb0936e837a978a4cbbb050f29", "score": "0.58195925", "text": "def list_attrs(self):\n return self._attributes.keys()", "title": "" }, { "docid": "2f9c60ec85dfd737ea7bd15c88bdc963", "score": "0.5804851", "text": "def instance_attributes(self):\n return self.infer().instance_attributes()", "title": "" }, { "docid": "15b23b9a59f1aa39d8b5cdf3351eb748", "score": "0.5787162", "text": "def asdict(self):\n return attr.asdict(self)", "title": "" }, { "docid": "b9ab4be0fc865e071d657d9b0590d25c", "score": "0.57617927", "text": "def _values(self):\n return [self._node.getAttribute(k) for k in self._node.attributes.keys()]", "title": "" }, { "docid": "9fe6c903dfa6da3c050e5c0ca91e954f", "score": "0.576031", "text": "def maps(self) -> Tuple[Dict[str, str], Dict[str, str], Dict[str, float]]:\n sources = {}\n targets = {}\n weights = {}\n for e in self.edges.data():\n label = e[2][self.attr_label]\n sources[label] = e[0]\n targets[label] = e[1]\n weights[label] = e[2][self.attr]\n return sources, targets, weights", "title": "" }, { "docid": "271b84d1b89b665b117b3c1aeaa9d2f4", "score": "0.5759584", "text": "def attrs(self):\n return self.__dict__.keys()", "title": "" }, { "docid": "2afcb181b0b2f4467a484a5e9318a8b6", "score": "0.57474965", "text": "def object_as_dict(obj):\n return {c.key: getattr(obj, c.key)\n for c in inspect(obj).mapper.column_attrs}", "title": "" }, { "docid": "de3600a662a8244107357fd0e4a7c10e", "score": "0.57365453", "text": "def toDict(self):\n dict = {}\n for attr, value in self.__dict__.items():\n corrected_name = attr[1:] # remove first underscore\n dict[corrected_name] = value.__str__()\n return dict", "title": "" }, { "docid": "deb684948980db8e30bb8a211de71c62", "score": "0.5725433", "text": "def to_dictionary(self):\n my_attri = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n val_attr = 0\n dic_ret = {}\n\n for i in range(len(my_attri)):\n val_attri = getattr(self, my_attri[i])\n dic_ret[my_attri[i]] = val_attri\n\n return dic_ret", "title": "" }, { "docid": "0ef553a53de21530358f21fe6ad3f8c6", "score": "0.572354", "text": "def to_dict(self):\n return dict((str(f.field.name), unicode(f.value)) \\\n for f in self.attributes.values() \\\n if f.value is not None)", "title": "" }, { "docid": "69dd37a171feb4284d1814e5c530b7ea", "score": "0.5721035", "text": "def attributes(cls):\n return tuple(cls.__dict__.get(\"__fields__\", {}).keys())", "title": "" }, { "docid": "9b19ebf24ac628e7e26724394ed4bdf7", "score": "0.57191956", "text": "def _get_field_data(self):\n return {field_name: getattr(self, field_name) for field_name in self._meta.fields.keys()}", "title": "" }, { "docid": "6e69870c5e38483a59f2ec49041f27bf", "score": "0.56885177", "text": "def get_var_mapping_dict(self):\r\n # type: () -> dict\r\n return self._map_variable_dict", "title": "" }, { "docid": "bf15a63f9551f3d51c2115c5f13c2fce", "score": "0.5674434", "text": "def as_dict(self) -> dict:\n\n return {k: getattr(self, k) for k in list(vars(self).keys())}", "title": "" }, { "docid": "1b5fe9fc43051c434a353077a6a49c5e", "score": "0.5659958", "text": "def get_dict(self) -> dict:\n d = {}\n for attribute, value in self.__dict__.items():\n d[attribute] = value\n\n return d", "title": "" }, { "docid": "987a817a109f495a131799d19da19932", "score": "0.5657744", "text": "def get_attributes(self):\n attribute_map = {\n 'urn:oid:0.9.2342.19200300.100.1.1': 'uwnetid',\n 'urn:oid:1.3.6.1.4.1.5923.1.1.1.1': 'affiliations',\n 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6': 'eppn',\n 'urn:oid:1.3.6.1.4.1.5923.1.1.1.9': 'scopedAffiliations'\n }\n return {attribute_map.get(key, key): value\n for key, value in super().get_attributes().items()}", "title": "" }, { "docid": "5722c78f1ead7f56a65af45c2b06be68", "score": "0.56549287", "text": "def attrs(self):\n return self._obj.coords[GEO_MAP_COORD].attrs", "title": "" }, { "docid": "7cec7998715a91132817b5febdc96b30", "score": "0.5651105", "text": "def names(self) -> List[str]:\n\n return list(self.attribute.keys())", "title": "" }, { "docid": "3d0c81c5c2e8cfef739ee8c3cc1be3fe", "score": "0.5648379", "text": "def to_map(self):\n obj = {}\n self._add_defined_attribs(obj, self._yaml_map().keys())\n return obj", "title": "" }, { "docid": "344060bddb57a44a7fd8f7c75452ef1e", "score": "0.56431675", "text": "def __getstate__(self):\n return {k: getattr(self, k) for k in self.__slots__}", "title": "" }, { "docid": "bbe61037919116b6e4948e869f9cc449", "score": "0.5637252", "text": "def test_can_pickle_attributedictionary(self):\n d = AttributeDictionary(hello=\"world\")\n self.assertEqual(d['hello'],\"world\")\n pickled = pickle.dumps(d)\n unpickled = pickle.loads(pickled)\n self.assertTrue(isinstance(unpickled,AttributeDictionary))\n self.assertEqual(unpickled['hello'],\"world\")\n self.assertEqual(unpickled.hello,\"world\")", "title": "" }, { "docid": "7ce099564c4fc3741529be96145f64b9", "score": "0.56315374", "text": "def export(self) -> Dict[str, Any]:\n values: Dict[str, Any] = {}\n for attribute, value in self._values.items():\n if isinstance(value, Exportable):\n exported = value.export()\n if exported is not None:\n values[attribute.name] = exported\n elif value is not None:\n values[attribute.name] = value\n values.update(self._extra)\n return values", "title": "" }, { "docid": "e02f0b7d7d6a52e5fc177e67a52fc39a", "score": "0.5627427", "text": "def get_attribute_values(obj):\n return [value for key, value in obj.__dict__.items() if not key.startswith(\"__\")]", "title": "" }, { "docid": "5ef69c9015a3c5ac1bccb6f8bc51cc23", "score": "0.56218684", "text": "def to_dict(self) -> dict:\n return {field.name: getattr(self, field.name) for field in fields(self)}", "title": "" }, { "docid": "d2cb52f25592e8cd4359329262b6cd8b", "score": "0.5614424", "text": "def column_name_mapping_dict(self):\n return self.__column_name_mapping_dict.output", "title": "" }, { "docid": "5c3cc5cedadc6a455c1a5c56b1256b4a", "score": "0.5609309", "text": "def trainable_variables(self):\n # Override this because reflection seems not to work.\n return dict((k, v)\n for k, v in self.keyed_variables.items()\n if self.task_attribute_name in k)", "title": "" }, { "docid": "4a5a99650bd5c76ae50c62e0572146de", "score": "0.55903196", "text": "def __serialize__(self):\n return {k: v for k, v in self.items()}", "title": "" }, { "docid": "bbe18e7d5659d0151cba64ecd1ee1e78", "score": "0.55876184", "text": "def get_custom_attributes(self):\n return self[\"custom_attributes\"]", "title": "" }, { "docid": "3a253684180363c3e9e1735eec27a157", "score": "0.55866903", "text": "def dataclass_quick_asdict(obj) -> Dict[str, Any]:\n d = {f.name: getattr(obj, f.name) for f in dataclasses.fields(obj)}\n return d", "title": "" }, { "docid": "301259dfdfdde15a152609404a29f6bc", "score": "0.5578841", "text": "def _items(self):\n return [(k, self._node.getAttribute(k)) for k in self._node.attributes.keys()]", "title": "" }, { "docid": "b38f12b4c7e8b0dbf2418ff3bf162e25", "score": "0.55710065", "text": "def __getstate__(self):\n attrs = {}\n if hasattr(self, \"__dict__\"):\n attrs.update(self.__dict__)\n slots = []\n c = self.__class__\n while c is not None:\n if hasattr(c, \"__slots__\"):\n slots.extend(c.__slots__)\n c = c.__base__\n for name in slots:\n if hasattr(self, name):\n attrs[name] = getattr(self, name)\n return attrs", "title": "" }, { "docid": "5467039058f1d3f85303e1e421f5a9fe", "score": "0.5557673", "text": "def as_dict(self) -> dict[str, Any]:\n attrs = vars(self)\n return {key: attrs[key] for key in attrs if not key.startswith(\"_\")}", "title": "" }, { "docid": "1ed1d7153b8ca75c13bad8c387279a5b", "score": "0.5557543", "text": "def to_json(self):\n return dict((attr, getattr(self, attr))\n for attr in self.__attributes__)", "title": "" }, { "docid": "1ed1d7153b8ca75c13bad8c387279a5b", "score": "0.5557543", "text": "def to_json(self):\n return dict((attr, getattr(self, attr))\n for attr in self.__attributes__)", "title": "" }, { "docid": "6c4468d356ce3eb8dfe201c3e4172cb8", "score": "0.5555928", "text": "def attributes(self, alias=None):\n cls = _inheritor(_Attribute, self.ID)\n return OrderedDict(\n (name, cls(name, source=(alias or self._name)))\n for name in self._schema.columns(self._name)['name']\n )", "title": "" }, { "docid": "fc1e2dc2a004c0684443c61c4e145f76", "score": "0.5554138", "text": "def attr_names(self):\n l = self.attrs.keys()\n l.sort()\n return l", "title": "" }, { "docid": "fc4f8ef97a55a55ba41c35249067996c", "score": "0.55302197", "text": "def getAttributes(self):\n\t\treturn self._attributes", "title": "" }, { "docid": "6d7966d57dd19ac870eecdfc77aa6ec6", "score": "0.5521307", "text": "def _toDict(self):\n return {\n 'name': self._name,\n 'colors': self.getColormapLUT(),\n 'vmin': self._vmin,\n 'vmax': self._vmax,\n 'autoscale': self.isAutoscale(),\n 'normalization': self.getNormalization(),\n 'autoscaleMode': self.getAutoscaleMode(),\n }", "title": "" }, { "docid": "1bddb2dc34b25fbd8d554df54702165f", "score": "0.55179185", "text": "def state_attributes(self):\r\n data = {}\r\n data[self.id] = self.value\r\n data[self.name + \"_modes\"] = self.values\r\n return data", "title": "" }, { "docid": "ac0f804e8d418b1e279c333a79c0afbc", "score": "0.5505109", "text": "def attrs(self):\n assert isinstance(self._attrs, list)\n return dict(self._attrs)", "title": "" }, { "docid": "8d17b4cc12250e5d40fb7622b18f2b26", "score": "0.5500228", "text": "def keys_python(self) -> Iterable[str]:\n yield from dejsonify_attribute_names(self.keys())", "title": "" }, { "docid": "8d17b4cc12250e5d40fb7622b18f2b26", "score": "0.5500228", "text": "def keys_python(self) -> Iterable[str]:\n yield from dejsonify_attribute_names(self.keys())", "title": "" }, { "docid": "a1714d49568ad2509605777f2d840c17", "score": "0.5483594", "text": "def get_mapping(self):", "title": "" }, { "docid": "2f3ef00fe73d775c04f2097a37bd030f", "score": "0.5481292", "text": "def to_dict(self):\n return {'name': self.name, 'value': self.value}", "title": "" }, { "docid": "0951a32461a4ec6f999cf54c848e1917", "score": "0.5478671", "text": "def signable_dict(self):\n\n return attr.asdict(self)", "title": "" }, { "docid": "99e8834a52ddb61940511f8e103d2bd9", "score": "0.54667675", "text": "def get(self):\n\n d = {}\n for i in self.__list:\n d[i[0]] = getattr(self, i[0])\n return d", "title": "" }, { "docid": "2af288108c5712db6c82ed4c05ed859b", "score": "0.54587424", "text": "def get_class_attributes(self):\n return {\n \"base\": self.base,\n \"internal_type\": self.internal_type,\n \"precision_fractional\": self.precision_fractional,\n }", "title": "" }, { "docid": "7478e09b4e4c96346c16bea888693b7c", "score": "0.5455201", "text": "def __getstate__(self):\n state = {}\n for entry in self.__slots__:\n state[entry] = getattr(self, entry)\n\n return state", "title": "" }, { "docid": "8cb735772f7fdb94941f55277d366878", "score": "0.54495835", "text": "def as_dict(self):\n \n return {c.name: getattr(self, c.name) for c in self.__table__.columns}", "title": "" }, { "docid": "881a27352016a5006e9f74b08f318ea3", "score": "0.5440619", "text": "def __getstate__(self):\n state={}\n for attrname in self._expected_attrs:\n state[attrname]=getattr(self,attrname)\n return state", "title": "" }, { "docid": "a460d55adbdd2d9f78bec303b04bd69e", "score": "0.54390496", "text": "def to_dict(self) -> dict:\n mapper = self.__mapper__\n attribute_keys = mapper.columns.keys() + mapper.relationships.keys()\n\n return {k: getattr(self, k, None) for k in attribute_keys}", "title": "" }, { "docid": "562184d8d770035fe5317be12d692f6f", "score": "0.54379374", "text": "def to_dict(self):\n data = {\n \"type\": self.__class__.__name__\n }\n\n for key in self.__class__.DATA_KEYS:\n data[key] = getattr(self, key)\n\n return data", "title": "" }, { "docid": "e6fed61bb57d9d74027ac4eeeb32c8dd", "score": "0.5436603", "text": "def get_mbean_attributes(self):\n _method_name = 'get_mbean_attributes'\n _logger.entering(class_name=self.__class__.__name__, method_name=_method_name)\n map_to_list = list()\n attributes = self.__get_mbean_info_map()\n if len(attributes) > 0:\n map_to_list = [attribute for attribute in attributes.iterkeys()]\n map_to_list.sort()\n _logger.exiting(class_name=self.__class__.__name__, method_name=_method_name, result=len(map_to_list))\n return map_to_list", "title": "" }, { "docid": "b8e6ee57d60a5d384d854a9efa4c3737", "score": "0.54333323", "text": "def get_attributes(self):\n return self.attribs", "title": "" }, { "docid": "f5c8b3ae8d1dba96db6d379d02303085", "score": "0.54287755", "text": "def as_dict(self):\n\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}", "title": "" }, { "docid": "68b5cd9a262481536074ecc991200e11", "score": "0.5409635", "text": "def AttrDict(slots = tuple()):\n return _get_attr_dict_factory(slots)()", "title": "" }, { "docid": "052f974cb84d7a51d6880cc433f9db05", "score": "0.54028714", "text": "def getAttrsAsDict(self, attrs):\n\t\treturn dict((k.split(\":\")[-1], v) for k, v in attrs.items())", "title": "" }, { "docid": "05d26097faa72a8a6f61f8f847431166", "score": "0.5399544", "text": "def _get_type_attribute_dict(self, type_name: str) -> Dict[str, Dict]:\n return self._get_type_info(type_name)[constants.ATTR_INFO_KEY]", "title": "" }, { "docid": "34138c00d46b665c0405daf20ea86678", "score": "0.53984386", "text": "def get_meta_attribute_info(self):\n return self.__meta_attribute_info", "title": "" }, { "docid": "f502343df565934f8a988965cd93c912", "score": "0.53967124", "text": "def get_label_map(self):\n trainable_classes = self.read_trainable_classes()\n label_map, reverse_label_map = {}, {}\n index = 0\n\n for key, cls_name in trainable_classes.items():\n label_map[key] = index\n reverse_label_map[index] = key\n index += 1\n\n return label_map, reverse_label_map", "title": "" } ]
daf973153a7da1e68c0c9d42349d0b2c
Function to calculate the conditional priors p(xp|xeff,q) on a set of {xp,xeff,q} posterior samples. INPUTS
[ { "docid": "f87f1caa5f85a8a02591eeabab79177f", "score": "0.5392014", "text": "def joint_prior_from_isotropic_spins(q,aMax,xeffs,xps,ndraws=10000,bw_method='scott'):\n\n # Convert to arrays for safety\n xeffs = np.reshape(xeffs,-1)\n xps = np.reshape(xps,-1)\n \n # Compute marginal prior on xeff, conditional prior on xp, and multiply to get joint prior!\n p_chi_eff = chi_effective_prior_from_isotropic_spins(q,aMax,xeffs)\n p_chi_p_given_chi_eff = np.array([chi_p_prior_given_chi_eff_q(q,aMax,xeffs[i],xps[i],ndraws,bw_method) for i in range(len(xeffs))])\n joint_p_chi_p_chi_eff = p_chi_eff*p_chi_p_given_chi_eff\n\n return joint_p_chi_p_chi_eff", "title": "" } ]
[ { "docid": "3df82a3c097cc9531b6d612c43603726", "score": "0.6071597", "text": "def chi_p_prior_given_chi_eff_q(q,aMax,xeff,xp,ndraws=10000,bw_method='scott'):\n\n # Draw random spin magnitudes.\n # Note that, given a fixed chi_eff, a1 can be no larger than (1+q)*chi_eff,\n # and a2 can be no larger than (1+q)*chi_eff/q\n a1 = np.random.random(ndraws)*aMax\n a2 = np.random.random(ndraws)*aMax\n\n # Draw random tilts for spin 2\n cost2 = 2.*np.random.random(ndraws)-1.\n\n # Finally, given our conditional value for chi_eff, we can solve for cost1\n # Note, though, that we still must require that the implied value of cost1 be *physical*\n cost1 = (xeff*(1.+q) - q*a2*cost2)/a1 \n\n # While any cost1 values remain unphysical, redraw a1, a2, and cost2, and recompute\n # Repeat as necessary\n while np.any(cost1<-1) or np.any(cost1>1): \n to_replace = np.where((cost1<-1) | (cost1>1))[0] \n a1[to_replace] = np.random.random(to_replace.size)*aMax\n a2[to_replace] = np.random.random(to_replace.size)*aMax\n cost2[to_replace] = 2.*np.random.random(to_replace.size)-1. \n cost1 = (xeff*(1.+q) - q*a2*cost2)/a1 \n \n # Compute precessing spins and corresponding weights, build KDE\n # See `Joint-ChiEff-ChiP-Prior.ipynb` for a discussion of these weights\n Xp_draws = chi_p_from_components(a1,a2,cost1,cost2,q)\n jacobian_weights = (1.+q)/a1\n prior_kde = gaussian_kde(Xp_draws,weights=jacobian_weights,bw_method=bw_method)\n\n # Compute maximum chi_p\n if (1.+q)*np.abs(xeff)/q<aMax:\n max_Xp = aMax\n else:\n max_Xp = np.sqrt(aMax**2 - ((1.+q)*np.abs(xeff)-q)**2.)\n\n # Set up a grid slightly inside (0,max chi_p) and evaluate KDE\n reference_grid = np.linspace(0.05*max_Xp,0.95*max_Xp,50)\n reference_vals = prior_kde(reference_grid)\n\n # Manually prepend/append zeros at the boundaries\n reference_grid = np.concatenate([[0],reference_grid,[max_Xp]])\n reference_vals = np.concatenate([[0],reference_vals,[0]])\n norm_constant = np.trapz(reference_vals,reference_grid)\n\n # Interpolate!\n p_chi_p = np.interp(xp,reference_grid,reference_vals/norm_constant)\n return p_chi_p", "title": "" }, { "docid": "61ec7af6b4c3bc911ad127608246f670", "score": "0.6038365", "text": "def P(self,x):\n indx = np.arange(len(x))\n ps = self.pX[x,indx] #probability of xi for each i\n joint = ps.prod()\n pi = np.true_divide(joint,ps) # will be nan for elements for which ps is 0 -(should never happen if sampling)\n for j in np.where(np.isnan(pi))[0]:\n pi[j] = np.prod(ps[indx != j]) \n pij = np.vstack((pi,pi))\n pij[1-x,indx] = 0\n pij = pij.reshape((len(x)*2,)) #flatten first N-1 will be px=0,2nd px=1\n pobserve = joint# the probability of x given do()\n result = np.hstack((pij,pobserve))\n return result", "title": "" }, { "docid": "ee8e5436eadc7abc54b3589b8a0c9c10", "score": "0.5970335", "text": "def ppf(self, q, *args, **kwargs):\n return self.scipy_distribution.ppf(q, *args, **kwargs)", "title": "" }, { "docid": "3fb61e35f7c700c13f41535bc1474682", "score": "0.5799747", "text": "def evaluate_p_all(self, p_in):\n return np.exp(self.evaluate_logp_all(p_in))", "title": "" }, { "docid": "853f2e1b6db1dcf8b3a9c0bac2ad768b", "score": "0.57089645", "text": "def bigauss(x, p):\n return (x < p[1]).choose([\n p[0] * exp(-twolntwo * (x - p[1])**2 / (p[2] * (1 + p[3])**2)) + p[4],\n p[0] * exp(-twolntwo * (x - p[1])**2 / (p[2] * (1 - p[3])**2)) + p[4]\n ])", "title": "" }, { "docid": "cae99ff6f62dc61786fbcff772f08977", "score": "0.5703504", "text": "def compute_pvalue(expression, thresh, alpha=0.05):\n\n #count the cases\n #ndown = [1 if exp_i <= thresh_i else 0 for exp_i in zip(expression, thresh_i)]\n ndown = expression[expression <= - thresh].shape[0]\n nup = expression[expression >= thresh].shape[0]\n nneutral = expression.shape[0] - ndown - nup\n\n #get the one with most peptide\n ns = np.array([nup, ndown, nneutral])\n max_idx = np.argmax(ns)\n\n\n if max_idx == 0:\n direction = \"up\"\n\n elif max_idx == 1:\n direction = \"down\"\n\n else:\n direction = \"neutral\"\n\n #n number of trails for binomial\n ntrials = len(expression)\n #k success\n ksuccess = ns[max_idx]\n x = np.arange(ksuccess, ntrials + 1)\n\n #compute the pvalue as number of cases as extreme (and higher)\n #as the one observed\n pvalue = np.sum(stat.binom.pmf(x, ntrials, alpha))\n\n res_vec = pd.Series([pvalue, direction, ntrials, ksuccess])\n res_vec.index([\"pvalue\", \"direction\", \"ntrials\", \"ksuccess\"])\n return(res_vec)", "title": "" }, { "docid": "001751d13783817a650eb4e3fd46ec9f", "score": "0.5665702", "text": "def chi_p_prior_from_isotropic_spins(q,aMax,xs):\n\n # Ensure that `xs` is an array and take absolute value\n xs = np.reshape(xs,-1)\n\n # Set up various piecewise cases\n pdfs = np.zeros(xs.size)\n caseA = xs<q*aMax*(3.+4.*q)/(4.+3.*q)\n caseB = (xs>=q*aMax*(3.+4.*q)/(4.+3.*q))*(xs<aMax)\n\n # Select relevant effective spins\n x_A = xs[caseA]\n x_B = xs[caseB]\n\n pdfs[caseA] = (1./(aMax**2*q))*((4.+3.*q)/(3.+4.*q))*(\n np.arccos((4.+3.*q)*x_A/((3.+4.*q)*q*aMax))*(\n aMax\n - np.sqrt(aMax**2-x_A**2)\n + x_A*np.arccos(x_A/aMax)\n )\n + np.arccos(x_A/aMax)*(\n aMax*q*(3.+4.*q)/(4.+3.*q)\n - np.sqrt(aMax**2*q**2*((3.+4.*q)/(4.+3.*q))**2 - x_A**2)\n + x_A*np.arccos((4.+3.*q)*x_A/((3.+4.*q)*aMax*q))\n )\n )\n \n pdfs[caseB] = (1./aMax)*np.arccos(x_B/aMax)\n\n return pdfs", "title": "" }, { "docid": "5cfeaae4232a479df9325334fc6e2ea9", "score": "0.56629515", "text": "def exbayesdices():\n hypos = [4,6,8,12,20]\n def dlike(d,h):\n if (d>h): return 0.\n return 1./(1.*h)\n ys = [1./len(hypos)]*len(hypos)\n zs = zip(hypos,ys)\n xfun = psutils.Funxy_xdiscrete(zs)\n pb = psbayes.PBayes(xfun,hypos,dlike)\n datas = [6,6,8,7,7,5,4]\n pb.evidence(datas)\n print 'posterior ',zip(hypos,pb.posterior(hypos))\n return", "title": "" }, { "docid": "4fcb96a0e8e07a29bc2939206f43548c", "score": "0.56107455", "text": "def calculate_probability(x1=0, x2=0, x3=0, x4=0):\n\n\tprobability = 1.0\n\t\n\t# Do not edit any code outside the edit region\n\t# Edit region starts here\n\t#########################\n\t# Your code goes here\n\tif 'p_table' not in calculate_probability.__dict__:\n\t\tp_table = -np.ones([4,4,4,4])\n\tif p_table[x1,x2,x3,x4] != -1:\n\t\treturn p_table[x1,x2,x3,x4]\n\tpara = [x1,x2,x3,x4]\n\tif 0 not in para:\n\t\tp_table[x1,x2,x3,x4] = p_x1[x1-1]*p_x2_given_x1[x1-1, x2-1]*p_x3_given_x2[x2-1,x3-1]*p_x4_given_x3[x3-1, x4-1]\n\t\treturn p_table[x1,x2,x3,x4]\n\telse:\n\t\tfor i,x in enumerate(para):\n\t\t\tif x==0:\n\t\t\t\ttemp_para = para[:]\n\t\t\t\ttemp_p = 0.0\n\t\t\t\tfor j in range(1,4):\n\t\t\t\t\ttemp_para[i] = j\n\t\t\t\t\ttemp_p += calculate_probability(temp_para[0],temp_para[1],temp_para[2],temp_para[3])\n\t\t\t\tp_table[x1,x2,x3,x4] = temp_p\n\t\t\t\treturn temp_p\n\t#########################\n\t# Edit region ends here\n\n\treturn probability", "title": "" }, { "docid": "ee0a181ae490e18644d4fd216bf30b1b", "score": "0.56059885", "text": "def calc_prior(p,fitfunction, params):\n \n plike = 0.\n \n if fitfunction.__name__ == 'greybody':\n # param ordering is [amp,Tdust,L0,beta]\n # setting priors as uniform\n if p[params.index('amp')] < 0: plike -= np.inf\n if (p[params.index('Td')]<15) or (p[params.index('Td')]>300): plike -= np.inf\n if (p[params.index('L0')]<50) or (p[params.index('L0')]>500): plike -= np.inf\n if (p[params.index('beta')]<0.) or (p[params.index('beta')]>4.): plike -= np.inf\n return plike\n elif fitfunction.func_name == 'greybody_powerlaw':\n # param ordering is[amp,Tdust,L0,beta,alpha,z]\n if p[0] < 0: plike -= np.inf\n if (p[1]<15) or (p[1]>300): plike -= np.inf\n if (p[2]<50e-6) or (p[2]>500e-6): plike -= np.inf\n if (p[3]<0.) or (p[3]>4.): plike -= np.inf\n if (p[4]<0.25) or (p[4]>3.): plike -= np.inf\n if (p[5]<0.) or (p[5]>10.): plike -= np.inf\n return plike", "title": "" }, { "docid": "8a144c8c81f4c20e003d8410e02767fc", "score": "0.5593349", "text": "def posterior(x, n, p1, p2):\n if not isinstance(n, int) or n < 1:\n raise ValueError(\"n must be a positive integer\")\n if type(x) != int or x < 0:\n err = \"x must be an integer that is greater than or equal to 0\"\n raise ValueError(err)\n if x > n:\n raise ValueError(\"x cannot be greater than n\")\n if not isinstance(P, np.ndarray) or len(P.shape) != 1:\n raise TypeError(\"P must be a 1D numpy.ndarray\")\n if not isinstance(p1, float) or p1 < 0 or p1 > 1:\n raise TypeError(\"p1 must be a float in the range [0, 1]\")\n if not isinstance(p2, float) or p2 < 0 or p2 > 1:\n raise TypeError(\"p2 must be a float in the range [0, 1]\")\n if p2 <= p1:\n raise ValueError(\"p2 must be greater than p1\")\n return p1", "title": "" }, { "docid": "9a9d60666907e4e3c0b5a7ba1824cb44", "score": "0.55737007", "text": "def x2p(X = Math.array([]), tol = 1e-5, perplexity = 30.0):\n\n\t# Initialize some variables\n\tprint \"Computing pairwise distances...\"\n\t(n, d) = X.shape;\n\tsum_X = Math.sum(Math.square(X), 1);\n\tD = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X);\n\tP = Math.zeros((n, n));\n\tbeta = Math.ones((n, 1));\n\tlogU = Math.log(perplexity);\n\n\t# Loop over all datapoints\n\tfor i in range(n):\n\n\t\t# Print progress\n\t\tif i % 500 == 0:\n\t\t\tprint \"Computing P-values for point \", i, \" of \", n, \"...\"\n\n\t\t# Compute the Gaussian kernel and entropy for the current precision\n\t\tbetamin = -Math.inf;\n\t\tbetamax = Math.inf;\n\t\tDi = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))];\n\t\t(H, thisP) = Hbeta(Di, beta[i]);\n\n\t\t# Evaluate whether the perplexity is within tolerance\n\t\tHdiff = H - logU;\n\t\ttries = 0;\n\t\twhile Math.abs(Hdiff) > tol and tries < 50:\n\n\t\t\t# If not, increase or decrease precision\n\t\t\tif Hdiff > 0:\n\t\t\t\tbetamin = beta[i].copy();\n\t\t\t\tif betamax == Math.inf or betamax == -Math.inf:\n\t\t\t\t\tbeta[i] = beta[i] * 2;\n\t\t\t\telse:\n\t\t\t\t\tbeta[i] = (beta[i] + betamax) / 2;\n\t\t\telse:\n\t\t\t\tbetamax = beta[i].copy();\n\t\t\t\tif betamin == Math.inf or betamin == -Math.inf:\n\t\t\t\t\tbeta[i] = beta[i] / 2;\n\t\t\t\telse:\n\t\t\t\t\tbeta[i] = (beta[i] + betamin) / 2;\n\n\t\t\t# Recompute the values\n\t\t\t(H, thisP) = Hbeta(Di, beta[i]);\n\t\t\tHdiff = H - logU;\n\t\t\ttries = tries + 1;\n\n\t\t# Set the final row of P\n\t\tP[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))] = thisP;\n\n\t# Return final P-matrix\n\tprint \"Mean value of sigma: \", Math.mean(Math.sqrt(1 / beta));\n\treturn P;", "title": "" }, { "docid": "57f50fff7694e62384e28b9cdbbfd7de", "score": "0.5565294", "text": "def x2p(X = Math.array([]), tol = 1e-5, perplexity = 30.0):\n\n\t# Initialize some variables\n\tprint (\"Computing pairwise distances...\")\n\t(n, d) = X.shape;\n\tsum_X = Math.sum(Math.square(X), 1);\n\tD = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X);\n\tP = Math.zeros((n, n));\n\tbeta = Math.ones((n, 1));\n\tlogU = Math.log(perplexity);\n\n\t# Loop over all datapoints\n\tfor i in range(n):\n\n\t\t# Print progress\n\t\tif i % 500 == 0:\n\t\t\tprint (\"Computing P-values for point \", i, \" of \", n, \"...\")\n\n\t\t# Compute the Gaussian kernel and entropy for the current precision\n\t\tbetamin = -Math.inf;\n\t\tbetamax = Math.inf;\n\t\tDi = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))];\n\t\t(H, thisP) = Hbeta(Di, beta[i]);\n\n\t\t# Evaluate whether the perplexity is within tolerance\n\t\tHdiff = H - logU;\n\t\ttries = 0;\n\t\twhile Math.abs(Hdiff) > tol and tries < 50:\n\n\t\t\t# If not, increase or decrease precision\n\t\t\tif Hdiff > 0:\n\t\t\t\tbetamin = beta[i].copy();\n\t\t\t\tif betamax == Math.inf or betamax == -Math.inf:\n\t\t\t\t\tbeta[i] = beta[i] * 2;\n\t\t\t\telse:\n\t\t\t\t\tbeta[i] = (beta[i] + betamax) / 2;\n\t\t\telse:\n\t\t\t\tbetamax = beta[i].copy();\n\t\t\t\tif betamin == Math.inf or betamin == -Math.inf:\n\t\t\t\t\tbeta[i] = beta[i] / 2;\n\t\t\t\telse:\n\t\t\t\t\tbeta[i] = (beta[i] + betamin) / 2;\n\n\t\t\t# Recompute the values\n\t\t\t(H, thisP) = Hbeta(Di, beta[i]);\n\t\t\tHdiff = H - logU;\n\t\t\ttries = tries + 1;\n\n\t\t# Set the final row of P\n\t\tP[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))] = thisP;\n\n\t# Return final P-matrix\n\tprint(\"Mean value of sigma: \", Math.mean(Math.sqrt(1 / beta)))\n\treturn P;", "title": "" }, { "docid": "ca4f15080d0c717db14905503a17c1ac", "score": "0.55517834", "text": "def _set_priors(self):\n bool_arr = np.array([self.MAP_uniform_prior, self.MAP_use_log_Teff_log_g_prior])\n n_bool = np.sum(bool_arr * 1)\n if n_bool != 1:\n logger.error('_set_priors: set only one of \"MAP_uniform_prior\" or \"MAP_use_log_Teff_log_g_prior\" to True:')\n sys.exit(1)\n\n work_y = self.get('MAP_work_frequencies')\n m, K = work_y.shape\n\n if self.MAP_uniform_prior:\n prior = old_div(np.ones(m), float(m))\n elif self.MAP_use_log_Teff_log_g_prior:\n # get observed log_Teff and log_g together with their errors from the star\n if self.log_Teff_err_lower == 0 or self.log_Teff_err_upper == 0:\n logger.error('_set_priors: set log_Teff_err_lower and log_Teff_err_upper first')\n sys.exit(1)\n\n obs_log_Teff = self.log_Teff\n obs_log_Teff_err = np.max([ self.log_Teff_err_lower, self.log_Teff_err_upper ])\n obs_log_g = self.log_g\n obs_log_g_err= np.max([ self.log_g_err_lower, self.log_g_err_upper ])\n if not all([ obs_log_Teff != 0, obs_log_Teff_err != 0, \n obs_log_g != 0, obs_log_g_err != 0 ]):\n logger.error('_set_priors: Specify the log_Teff, log_g and their errors properly')\n sys.exit(1)\n\n lrn_log_Teff = self.learning_log_Teff[:]\n lrn_log_g = self.learning_log_g[:]\n\n prior_log_Teff = utils.gaussian(x=lrn_log_Teff, mu=obs_log_Teff, sigma=obs_log_Teff_err)\n prior_log_g = utils.gaussian(x=lrn_log_g, mu=obs_log_g, sigma=obs_log_g_err)\n\n prior = prior_log_Teff * prior_log_g\n\n if utils.has_nan(prior):\n logger.error('_set_priors: NaN detected')\n sys.exit(1)\n\n prior = utils.substitute_inf(prior)\n ln_prior = np.log(prior)\n\n self.set('MAP_prior', prior)\n self.set('MAP_ln_prior', ln_prior)", "title": "" }, { "docid": "9bd3465cf64e2a2dae0f90b2c2bb63b1", "score": "0.5542093", "text": "def probit(x):\n eps = np.finfo(float).eps\n result = 0.5 + 0.5 * pt.erf(x / pt.sqrt(2))\n result = pt.switch(pt.eq(result, 0), eps, result)\n result = pt.switch(pt.eq(result, 1), 1 - eps, result)\n\n return result", "title": "" }, { "docid": "4eb157e75fd97440ef81c2e98c077cbf", "score": "0.55243176", "text": "def exbayesexpo():\n tgs = []\n NS = [10,100,1000,10000]\n RS = [(0.1,3.),(0.4,1.6),(0.8,1.2),(0.94,1.06)]\n for i in range(len(NS)):\n N = NS[i]\n u0,uf = RS[i]\n fu = pspdf.pdf_uniform(u0,uf)\n fe = pspdf.pdf_exponential(1.)\n hypos = fu.rvariables()\n dlike = pslib.exponential\n pb = psbayes.PBayes(fu,hypos,dlike)\n data = fe.random(N)\n x0,xf = fe.domain()\n pb.evidence(data)\n xs = pb.hypothesis()\n zs = zip(xs,pb.posterior(xs))\n xpf = pspdf.pdf_points(zs)\n print ' N points ',N\n print ' average ',xpf.average(),' rms ',xpf.rms()\n print ' contaiment ',xpf.inverse_comulative(0.5-0.341),xpf.inverse_comulative(0.5+0.341) \n tg = psplot.tgraph(zs,title='posterior_N'+str(N))\n h = psplot.thisto(data)\n tgs.append((h,tg))\n return tgs", "title": "" }, { "docid": "085cc3c6e04ab13f78f32bcc5daafc90", "score": "0.5499824", "text": "def prob(x, xi):\n return sum([1 for x0 in x if x0 == xi])/len(x)", "title": "" }, { "docid": "4bf07dd22601f20def3de8502b68014f", "score": "0.5489826", "text": "def x2p(X=np.array([]), tol=1e-5, perplexity=30.0):\n\n # Initialize some variables\n print \"Computing pairwise distances...\"\n (n, d) = X.shape\n sum_X = np.sum(np.square(X), 1)\n D = np.add(np.add(-2 * np.dot(X, X.T), sum_X).T, sum_X)\n P = np.zeros((n, n))\n beta = np.ones((n, 1))\n logU = np.log(perplexity)\n\n # Loop over all datapoints\n for i in range(n):\n\n # Print progress\n if i % 500 == 0:\n print \"Computing P-values for point \", i, \" of \", n, \"...\"\n\n # Compute the Gaussian kernel and entropy for the current precision\n betamin = -np.inf\n betamax = np.inf\n Di = D[i, np.concatenate((np.r_[0:i], np.r_[i+1:n]))]\n (H, thisP) = Hbeta(Di, beta[i])\n\n # Evaluate whether the perplexity is within tolerance\n Hdiff = H - logU\n tries = 0\n while np.abs(Hdiff) > tol and tries < 50:\n\n # If not, increase or decrease precision\n if Hdiff > 0:\n betamin = beta[i].copy()\n if betamax == np.inf or betamax == -np.inf:\n beta[i] = beta[i] * 2\n else:\n beta[i] = (beta[i] + betamax) / 2\n else:\n betamax = beta[i].copy()\n if betamin == np.inf or betamin == -np.inf:\n beta[i] = beta[i] / 2\n else:\n beta[i] = (beta[i] + betamin) / 2\n\n # Recompute the values\n (H, thisP) = Hbeta(Di, beta[i])\n Hdiff = H - logU\n tries = tries + 1\n\n # Set the final row of P\n P[i, np.concatenate((np.r_[0:i], np.r_[i+1:n]))] = thisP\n\n # Return final P-matrix\n print \"Mean value of sigma: \", np.mean(np.sqrt(1 / beta))\n return P", "title": "" }, { "docid": "230700129186c1eb55e343bcc422e9a8", "score": "0.5466392", "text": "def _ppf(self, p, betaL, betaH, mL, mH):\n\n def inttail(beta, m):\n return m / beta / (m - 1) * np.exp(-0.5 * beta * beta)\n\n def intcore(betaL, betaH):\n return _norm_pdf_C * (_norm_cdf(betaH) - _norm_cdf(-betaL))\n\n def hightail(p, betaL, betaH, mL, mH):\n CL = inttail(betaL, mL)\n CH = inttail(betaH, mH)\n C = CL + CH\n N = 1 / (C + intcore(betaL, betaH))\n eb2H = np.exp(-0.5 * betaH * betaH)\n return -(\n mH / betaH\n - betaH\n - ((mH - 1) * (mH / betaH) ** (-mH) / eb2H * (1 - p) / N)\n ** (1 / (1 - mH))\n )\n\n def lowtail(p, betaL, betaH, mL, mH):\n CL = inttail(betaL, mL)\n CH = inttail(betaH, mH)\n C = CL + CH\n N = 1 / (C + intcore(betaL, betaH))\n eb2L = np.exp(-0.5 * betaL * betaL)\n return (\n mL / betaL\n - betaL\n - ((mL - 1) * (mL / betaL) ** (-mL) / eb2L * p / N) ** (1 / (1 - mL))\n )\n\n def core(p, betaL, betaH, mL, mH):\n CL = inttail(betaL, mL)\n CH = inttail(betaH, mH)\n C = CL + CH\n N = 1 / (C + intcore(betaL, betaH))\n return _norm_ppf(_norm_cdf(-betaL) + (1 / _norm_pdf_C) * (p / N - CL))\n\n def ppf_greater(p, betaL, betaH, mL, mH):\n N = 1.0 / (inttail(betaL, mL) + intcore(betaL, betaH) + inttail(betaH, mH))\n pbetaH = 1 - (N * (mH / betaH) * np.exp(-0.5 * betaH * betaH) / (mH - 1))\n return _lazywhere(\n p > pbetaH, (p, betaL, betaH, mL, mH), f=hightail, f2=core\n )\n\n N = 1.0 / (inttail(betaL, mL) + intcore(betaL, betaH) + inttail(betaH, mH))\n pbetaL = N * (mL / betaL) * np.exp(-0.5 * betaL * betaL) / (mL - 1)\n return _lazywhere(\n p < pbetaL, (p, betaL, betaH, mL, mH), f=lowtail, f2=ppf_greater\n )", "title": "" }, { "docid": "3e211be2e3b6314ced1a5d6c3e4d89e8", "score": "0.5454346", "text": "def peval(x, p):\n A,B,C,D = p\n return logistic4(x, A, B, C, D)", "title": "" }, { "docid": "595bfeaf3fb9b98b5c8136490cdb474c", "score": "0.5447155", "text": "def peval(x, p):\r\n A,B,C,D = p\r\n return logistic4(x, A, B, C, D)", "title": "" }, { "docid": "4f1e337e48961b7bed9f1afe705ea28c", "score": "0.54432005", "text": "def pr(self, source):\n\n H_0 = self.PROB - self.EPSILON\n H_1 = self.PROB + self.EPSILON\n beta = self.ALPHA\n a = np.log(beta / (1 - self.ALPHA))\n b = np.log((1 - beta) / self.ALPHA)\n k = 0\n\n w_sum = 0.0\n w_sum_true = 0.0\n s = Sampler()\n s.create(source)\n li = s.take(10)\n count = 0.0\n for i in li:\n if i >= 0.5:\n count = count + 1\n count = count / len(li)\n num_samples = 0\n while (num_samples < self.INITIAL_SAMPLE_SIZE):\n if li[num_samples] > 0.5:\n k = k + 1\n w_sum_true = w_sum_true + count\n w_sum = w_sum + count\n num_samples += 1\n test = None\n while (self.NUMM_SAMPLES <= self.MAX_SAMPLES):\n log_likelihood = w_sum_true * \\\n np.log(H_1 / H_0) + (w_sum - w_sum_true) * np.log((1 - H_1) / (1 - H_0))\n\n if (log_likelihood >= b):\n test = True\n elif (log_likelihood <= a):\n test = False\n else:\n i = 0\n while (i < self.SAMPLE_SIZE_STEP):\n count = 0.0\n li = s.Take(10)\n for it in li:\n if it >= 0.5:\n count = count + 1\n count = count / len(li)\n\n if li[i] > 0.5:\n k = k + 1\n w_sum_true = w_sum_true + count\n w_sum = w_sum + count\n i = i + 1\n self.NUMM_SAMPLES += self.SAMPLE_SIZE_STEP\n test = False\n\n return test", "title": "" }, { "docid": "c1e6fcfd98cafcf44dc3228ab4d0f08a", "score": "0.5438436", "text": "def x2p(X=np.array([]), tol=1e-5, perplexity=30.0):\n\n # Initialize some variables\n print(\"Computing pairwise distances...\")\n (n, d) = X.shape\n sum_X = np.sum(np.square(X), 1)\n D = np.add(np.add(-2 * np.dot(X, X.T), sum_X).T, sum_X)\n P = np.zeros((n, n))\n beta = np.ones((n, 1))\n logU = np.log(perplexity)\n\n # Loop over all datapoints\n for i in range(n):\n\n # Print progress\n if i % 500 == 0:\n print(\"Computing P-values for point %d of %d...\" % (i, n))\n\n # Compute the Gaussian kernel and entropy for the current precision\n betamin = -np.inf\n betamax = np.inf\n Di = D[i, np.concatenate((np.r_[0:i], np.r_[i+1:n]))]\n (H, thisP) = Hbeta(Di, beta[i])\n\n # Evaluate whether the perplexity is within tolerance\n Hdiff = H - logU\n tries = 0\n while np.abs(Hdiff) > tol and tries < 50:\n\n # If not, increase or decrease precision\n if Hdiff > 0:\n betamin = beta[i].copy()\n if betamax == np.inf or betamax == -np.inf:\n beta[i] = beta[i] * 2.\n else:\n beta[i] = (beta[i] + betamax) / 2.\n else:\n betamax = beta[i].copy()\n if betamin == np.inf or betamin == -np.inf:\n beta[i] = beta[i] / 2.\n else:\n beta[i] = (beta[i] + betamin) / 2.\n\n # Recompute the values\n (H, thisP) = Hbeta(Di, beta[i])\n Hdiff = H - logU\n tries += 1\n\n # Set the final row of P\n P[i, np.concatenate((np.r_[0:i], np.r_[i+1:n]))] = thisP\n\n # Return final P-matrix\n print(\"Mean value of sigma: %f\" % np.mean(np.sqrt(1 / beta)))\n return P", "title": "" }, { "docid": "94cfd45cec2e7d2819e9a69135b9d4f6", "score": "0.54333556", "text": "def test_correct_conditional_prob_h(self):\n pass", "title": "" }, { "docid": "f75311b537c5e377c6b9205d54e1e34d", "score": "0.54235107", "text": "def piecewise_p(x, val_a=25, val_b=300, val_c=100):\n if x > 8:\n p = val_a\n elif x > 6:\n p = val_b\n elif x > 4:\n p = val_c\n elif x > 2:\n p = val_a\n else:\n p = val_c\n\n return p", "title": "" }, { "docid": "de21e17695d6f2c468fbb851ce196cff", "score": "0.54168653", "text": "def qftProbability(N,list_Primes,k):\n \n summation = 0\n for prime in list_Primes:\n summation += cmath.exp(2*math.pi*1j*prime*k/N)\n return abs(summation)**2", "title": "" }, { "docid": "640586056718454210659fda2ed61040", "score": "0.5415285", "text": "def _pmf(self, x):\n if (x is True) or (x == 1):\n return self.proba_is_true\n\n if (x is False) or (x == 0):\n return 1 - self.proba_is_true\n\n return 0.", "title": "" }, { "docid": "80129b59897d8105f4bac314de4e112d", "score": "0.5411826", "text": "def multiple_comparisons(p, n):\n if p > 1e-6: #if p is large and n small, calculate directly\n return 1 - (1-p)**n\n else: \n return one_minus_exp(-n * p)", "title": "" }, { "docid": "3a06cc42d6d2c9ffb274226b2aa2853b", "score": "0.54018855", "text": "def qftPeaks(N,list_Primes, pi_N, t1):\n \n weights_tuple = []\n weights_tuple_normalized = []\n for k in range(0,N):\n if k % 5000 == 0:\n t2 = time.time()\n print k, \"([{:.3f} s])\".format(t2 - t1)\n t1 = t2\n sys.stdout.flush()\n weight = qftProbability(N, list_Primes, k)\n weights_tuple.append([k,weight])\n weights_tuple_normalized.append([k, weight/ float(N*pi_N)]) \n return weights_tuple, weights_tuple_normalized, t1", "title": "" }, { "docid": "92bfe324bc5894234197e13ee2cf0d28", "score": "0.53984207", "text": "def _calc_p(self,factors):\n logp = self.calc_logp(factors)\n return np.exp(logp)", "title": "" }, { "docid": "f40044b227b0311340c0d895247eaca5", "score": "0.5397628", "text": "def x_prob(x_sample, a):\n x_sample = np.array(x_sample)\n Pax = 1 #initialize P(a|x)\n for _x in x_sample:\n N = np.sqrt(np.log(a)/2*np.pi) #Normalization factor\n Pxa = N * a**(-_x**2 / 2) #P(x|a)\n Pa = 1 #P(a) is independent of 'a'\n Pax = Pax * Pxa * Pa \n return Pax", "title": "" }, { "docid": "ed21da6e1c09a4a45fd098433327459b", "score": "0.5389835", "text": "def pesq_example() -> tuple:\n from torchmetrics.audio.pesq import PerceptualEvaluationSpeechQuality\n\n p = lambda: torch.randn(8000)\n t = lambda: torch.randn(8000)\n\n # plot single value\n metric = PerceptualEvaluationSpeechQuality(8000, \"nb\")\n metric.update(p(), t())\n fig, ax = metric.plot()\n\n # plot multiple values\n metric = PerceptualEvaluationSpeechQuality(16000, \"wb\")\n vals = [metric(p(), t()) for _ in range(10)]\n fig, ax = metric.plot(vals)\n\n return fig, ax", "title": "" }, { "docid": "3747db3821c4500c863dd6559b15905a", "score": "0.5384205", "text": "def computeQ(data):\n Q = 0\n # Start with the expectation of the sum of priors over all tasks\n Q += (data.probZ * np.log(data.priorZ)).sum()\n\n # the expectation of the sum of posteriors over all tasks\n ab = np.dot(np.array([np.exp(data.beta)]).T, np.array([data.alpha]))\n\n # logSigma = - np.log(1 + np.exp(-ab))\n logSigma = logsigmoid(ab) # logP\n idxna = np.isnan(logSigma)\n if np.any(idxna):\n logger.warning('an invalid value was assigned to np.log [computeQ]')\n logSigma[idxna] = ab[idxna] # For large negative x, -log(1 + exp(-x)) = x\n\n # logOneMinusSigma = - np.log(1 + np.exp(ab))\n logOneMinusSigma = logsigmoid(-ab) - np.log(float(data.numClasses - 1)) # log((1-P)/(K-1))\n idxna = np.isnan(logOneMinusSigma)\n if np.any(idxna):\n logger.warning('an invalid value was assigned to np.log [computeQ]')\n logOneMinusSigma[idxna] = -ab[idxna] # For large positive x, -log(1 + exp(x)) = x\n\n for k in range(data.numClasses):\n delta = (data.labels == k + 1)\n Q += (data.probZ[:, k] * logSigma.T).T[delta].sum()\n oneMinusDelta = (data.labels != k + 1) & (data.labels != 0) # label == 0 -> no response\n Q += (data.probZ[:, k] * logOneMinusSigma.T).T[oneMinusDelta].sum()\n\n # Add Gaussian (standard normal) prior for alpha\n Q += np.log(sp.stats.norm.pdf(data.alpha - data.priorAlpha)).sum()\n\n # Add Gaussian (standard normal) prior for beta\n Q += np.log(sp.stats.norm.pdf(data.beta - data.priorBeta)).sum()\n\n if debug:\n logger.debug('a[0]={} a[1]={} a[2]={} b[0]={}'.format(data.alpha[0], data.alpha[1],\n data.alpha[2], data.beta[0]))\n logger.debug('Q={}'.format(Q))\n if np.isnan(Q):\n return -np.inf\n return Q", "title": "" }, { "docid": "b0467d3c7e727051f737479890812320", "score": "0.5382626", "text": "def psi_prior(psi):\n a = 2\n b = 40\n return get_list_val(scipy.stats.beta.pdf(psi, a, b))", "title": "" }, { "docid": "c13605bd1524c4498af355c65a00e00a", "score": "0.53797555", "text": "def x_prime_log_prior(self, x_prime):\n log_p = np.zeros(x_prime.size)\n for r in self.values():\n log_p += r.x_prime_log_prior(x_prime)\n return log_p", "title": "" }, { "docid": "acff8e79ed8c5513ce0b551c459e8389", "score": "0.5374655", "text": "def ppf(self, q):\n f = lambda x: self.cdf(x) - q\n return sp.optimize.newton(func=f, x0=self.mean(), fprime=self.pdf)", "title": "" }, { "docid": "fce89d7e326eeabfd0fa30e078a0ece4", "score": "0.53650934", "text": "def prob_early(pmf):\n return prob_bin(pmf, 0, 37)", "title": "" }, { "docid": "bf8e673281fb8ce252f20a1a671a2c71", "score": "0.53595436", "text": "def test_correct_p_values(self):\n exp = [0.003, 0.006, 0.003]\n obs = self.mc._correct_p_values([0.001, 0.002, 0.001])\n self.assertFloatEqual(obs, exp)", "title": "" }, { "docid": "c87a94eb6c938fc60bf7f168f355ff6c", "score": "0.53517747", "text": "def multinomial_predict(X, prior_0, prior_1, p_label_0, p_label_1):\n a = prior_0\n b = prior_1\n # print X.shape, p_label_1.shape, prior_1\n p_0 = np.exp(prior_0 + np.dot(X, p_label_0))\n p_1 = np.exp(prior_1 + np.dot(X, p_label_1))\n p_spam = [ p_1[i]/(p_0[i] + p_1[i]) if(p_0[i] + p_1[i]) > 0 else 0 for i in range(len(p_0))]\n\n return np.uint8(p_1 > p_0), np.array(p_spam)", "title": "" }, { "docid": "25b87bc33128b4ddb663f70b558b0ae9", "score": "0.5351664", "text": "def project_probs(P):\n values = np.random.normal(1, 0.5, P)\n values[values<0] = 0\n project_probs = values / np.sum(values)\n return values, project_probs", "title": "" }, { "docid": "940308d045ac04799b242c98638d5af8", "score": "0.5348281", "text": "def Prox(x, s, mu, p, q):\n \n X = smat(x[0:-2])\n S = smat(s[0:-2])\n \n xl = min(np.linalg.eigvals(X))\n sl = min(np.linalg.eigvals(S))\n \n if xl <= 0 or sl <= 0 or x[-2] <= 0 or x[-1] <= 0 or s[-2] <= 0 or s[-1] <= 0:\n return float('inf')\n \n D = NTScaling(X, S)\n d = np.sqrt(x[-2:] / s[-2:])\n \n V = Rotate(S, D) / np.sqrt(mu)\n [evals, evecs] = np.linalg.eigh(V)\n phi0 = Phi(evals, p, q)\n phi1 = Phi(d * s[-2:] / np.sqrt(mu), p, q)\n \n return phi0 + phi1", "title": "" }, { "docid": "a15b9856de81c6ffb33c754dfae57b57", "score": "0.5343132", "text": "def _p(self, x: np.ndarray) -> np.ndarray:\n p = np.zeros((self.n_samples, self.n_samples))\n for i, j in combinations(range(self.n_samples), 2):\n if i != j:\n pij = self._pij(x, i, j)\n pji = self._pij(x, j, i)\n # Ensure that all the pairwise probabilities sum to 1.\n pair_value = (pij + pji) / (2 * self.n_samples)\n p[i, j] = pair_value\n p[j, i] = pair_value\n return p", "title": "" }, { "docid": "70f1640a7a112a41903fb998eaa2fda1", "score": "0.53339154", "text": "def calcProb(tp):\n lamb = float(tp[0])\n thre = float(tp[1])\n maxi = thre*10000.\n# fPoi = ROOT.TF1(\"fPoi\", \"TMath::Poisson(x, [0])\", 0, maxi)\n fPoiI = ROOT.TF1(\"fPoi\", \"TMath::PoissonI(x, [0])\", 0, maxi)\n# fPoi.SetParameter(0, lamb)\n fPoiI.SetParameter(0, lamb)\n# return [fPoi.Integral(thre, maxi)/fPoi.Integral(0, maxi), fPoiI.Integral(thre, maxi)/fPoiI.Integral(0, maxi)]\n print lamb, maxi, thre\n return fPoiI.Integral(thre, maxi)/fPoiI.Integral(0, maxi)", "title": "" }, { "docid": "b4d048495ef14d746bfbf4d72da3f1a0", "score": "0.5322259", "text": "def default_priors(x, y, model_function):\n priors = [_x0_prior(x), _dx_prior(x), _A_prior(y)]\n if 'multi' in model_function:\n priors += [_x0_prior(x), _dx_prior(x), _A_prior(y)]\n elif 'thick' in model_function:\n priors += [_tau_prior()]\n elif 'hermite' in model_function:\n priors += [_h3_prior(), _h4_prior()]\n if '_cont' in model_function:\n priors += [_cont_prior(y)]\n return priors", "title": "" }, { "docid": "bba2424f5ac051f842ff3e6698b63d22", "score": "0.531893", "text": "def chi_effective_prior_from_isotropic_spins(q,aMax,xs):\n\n # Ensure that `xs` is an array and take absolute value\n xs = np.reshape(np.abs(xs),-1)\n\n # Set up various piecewise cases\n pdfs = np.ones(xs.size,dtype=complex)*(-1.)\n caseZ = (xs==0)\n caseA = (xs>0)*(xs<aMax*(1.-q)/(1.+q))*(xs<q*aMax/(1.+q))\n caseB = (xs<aMax*(1.-q)/(1.+q))*(xs>q*aMax/(1.+q))\n caseC = (xs>aMax*(1.-q)/(1.+q))*(xs<q*aMax/(1.+q))\n caseD = (xs>aMax*(1.-q)/(1.+q))*(xs<aMax/(1.+q))*(xs>=q*aMax/(1.+q))\n caseE = (xs>aMax*(1.-q)/(1.+q))*(xs>aMax/(1.+q))*(xs<aMax)\n caseF = (xs>=aMax)\n\n # Select relevant effective spins\n x_A = xs[caseA]\n x_B = xs[caseB]\n x_C = xs[caseC]\n x_D = xs[caseD]\n x_E = xs[caseE]\n\n pdfs[caseZ] = (1.+q)/(2.*aMax)*(2.-np.log(q))\n\n pdfs[caseA] = (1.+q)/(4.*q*aMax**2)*(\n q*aMax*(4.+2.*np.log(aMax) - np.log(q**2*aMax**2 - (1.+q)**2*x_A**2))\n - 2.*(1.+q)*x_A*np.arctanh((1.+q)*x_A/(q*aMax))\n + (1.+q)*x_A*(Di(-q*aMax/((1.+q)*x_A)) - Di(q*aMax/((1.+q)*x_A)))\n )\n\n pdfs[caseB] = (1.+q)/(4.*q*aMax**2)*(\n 4.*q*aMax\n + 2.*q*aMax*np.log(aMax)\n - 2.*(1.+q)*x_B*np.arctanh(q*aMax/((1.+q)*x_B))\n - q*aMax*np.log((1.+q)**2*x_B**2 - q**2*aMax**2)\n + (1.+q)*x_B*(Di(-q*aMax/((1.+q)*x_B)) - Di(q*aMax/((1.+q)*x_B)))\n )\n\n pdfs[caseC] = (1.+q)/(4.*q*aMax**2)*(\n 2.*(1.+q)*(aMax-x_C)\n - (1.+q)*x_C*np.log(aMax)**2.\n + (aMax + (1.+q)*x_C*np.log((1.+q)*x_C))*np.log(q*aMax/(aMax-(1.+q)*x_C))\n - (1.+q)*x_C*np.log(aMax)*(2. + np.log(q) - np.log(aMax-(1.+q)*x_C))\n + q*aMax*np.log(aMax/(q*aMax-(1.+q)*x_C))\n + (1.+q)*x_C*np.log((aMax-(1.+q)*x_C)*(q*aMax-(1.+q)*x_C)/q)\n + (1.+q)*x_C*(Di(1.-aMax/((1.+q)*x_C)) - Di(q*aMax/((1.+q)*x_C)))\n )\n\n pdfs[caseD] = (1.+q)/(4.*q*aMax**2)*(\n -x_D*np.log(aMax)**2\n + 2.*(1.+q)*(aMax-x_D)\n + q*aMax*np.log(aMax/((1.+q)*x_D-q*aMax))\n + aMax*np.log(q*aMax/(aMax-(1.+q)*x_D))\n - x_D*np.log(aMax)*(2.*(1.+q) - np.log((1.+q)*x_D) - q*np.log((1.+q)*x_D/aMax))\n + (1.+q)*x_D*np.log((-q*aMax+(1.+q)*x_D)*(aMax-(1.+q)*x_D)/q)\n + (1.+q)*x_D*np.log(aMax/((1.+q)*x_D))*np.log((aMax-(1.+q)*x_D)/q)\n + (1.+q)*x_D*(Di(1.-aMax/((1.+q)*x_D)) - Di(q*aMax/((1.+q)*x_D)))\n )\n\n pdfs[caseE] = (1.+q)/(4.*q*aMax**2)*(\n 2.*(1.+q)*(aMax-x_E)\n - (1.+q)*x_E*np.log(aMax)**2\n + np.log(aMax)*(\n aMax\n -2.*(1.+q)*x_E\n -(1.+q)*x_E*np.log(q/((1.+q)*x_E-aMax))\n )\n - aMax*np.log(((1.+q)*x_E-aMax)/q)\n + (1.+q)*x_E*np.log(((1.+q)*x_E-aMax)*((1.+q)*x_E-q*aMax)/q)\n + (1.+q)*x_E*np.log((1.+q)*x_E)*np.log(q*aMax/((1.+q)*x_E-aMax))\n - q*aMax*np.log(((1.+q)*x_E-q*aMax)/aMax)\n + (1.+q)*x_E*(Di(1.-aMax/((1.+q)*x_E)) - Di(q*aMax/((1.+q)*x_E)))\n )\n\n pdfs[caseF] = 0.\n\n # Deal with spins on the boundary between cases\n if np.any(pdfs==-1):\n boundary = (pdfs==-1)\n pdfs[boundary] = 0.5*(chi_effective_prior_from_isotropic_spins(q,aMax,xs[boundary]+1e-6)\\\n + chi_effective_prior_from_isotropic_spins(q,aMax,xs[boundary]-1e-6))\n\n return np.real(pdfs)", "title": "" }, { "docid": "32391e786e38d197735f19c6c3c35b75", "score": "0.5317258", "text": "def AdditiveEquiprobabilities():\r\n pass", "title": "" }, { "docid": "11d7c537bd574a1460ee81d6db268927", "score": "0.5313194", "text": "def prob59():", "title": "" }, { "docid": "4745082d1b07736369556a66635d0f6e", "score": "0.53005433", "text": "def get_p(features):\r\n # Compute the raw value\r\n raw_prediction = intercept + coefficients_broadcast.value.dot(features)\r\n # Bound the raw value between 20 and -20\r\n raw_prediction = min(20, max(-20, raw_prediction))\r\n # Return the probability\r\n probability = 1.0 / (1 + exp(- raw_prediction))\r\n return probability", "title": "" }, { "docid": "ce6e143ed471a178aa534f1b2099b409", "score": "0.5298077", "text": "def test_correct_conditional_prob_v(self):\n pass", "title": "" }, { "docid": "7a527689900d1a0bb7454209c4af3a70", "score": "0.5294685", "text": "def ppf(x, mu=0, sigma=1):\n return mu - sigma * math.sqrt(2) * erfcinv(2 * x)", "title": "" }, { "docid": "29b591350716fe9fcac754b32d4aed89", "score": "0.5281783", "text": "def f_per_particle(self,m, alpha, X, y, P):\n total_features = X.shape[1]\n # Get the subset of the features from the binary mask\n if np.count_nonzero(m) == 0:\n X_subset = X\n else:\n X_subset = X[:,m==1]\n\n ratio_selected_features = X_subset.shape[1]/total_features\n \n #Particle fittness error/loss computed using cross validation\n fitness_error = make_scorer(self.__objective_fcn, ratio_selected_features=ratio_selected_features, P=P, alpha=alpha)\n scores = cross_val_score(self.regressor, X_subset, y, cv=10, scoring=fitness_error) #does first 4 steps of k-fold CV\n particle_fitness_err_mean = scores.mean()\n\n #Stdev \n particle_fitness_err_stdev = np.std(scores)\n\n return (particle_fitness_err_mean, particle_fitness_err_stdev)", "title": "" }, { "docid": "0f4f7314a053c38a83d7fa54e418dd59", "score": "0.52714187", "text": "def perplexity(self, data):\n test_tokens = self._preprocess(data, self.n)\n N = len(test_tokens)\n\n probs = 0.0\n for word in test_tokens:\n probs -= self.get_single_probability(word, log=True)\n\n return math.exp(probs/N)", "title": "" }, { "docid": "98c6bfcb4315d422571af70bcb76e4f6", "score": "0.5263221", "text": "def sample(preds, temperature=1.0):\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return probas", "title": "" }, { "docid": "e9027b01ff967e5b02c69efaa4c8cd86", "score": "0.5261927", "text": "def ppf(self, p):\n d = self.loc[p.index, p.columns]\n icdf_arr = d.mu + d.sigma * np.sqrt(2) * erfinv(2 * p.values - 1)\n return pd.DataFrame(icdf_arr, index=p.index, columns=p.columns)", "title": "" }, { "docid": "c57344492bc9af651a3ecbfc95d995f9", "score": "0.5257148", "text": "def evaluate_p(self, X, best_p, p_range=(0, 1), sample_density=100):\n return np.mean((best_p - self.estimate_best_p(X, p_range, sample_density)) ** 2)", "title": "" }, { "docid": "18203532f60f34602f01f74f01c3ce76", "score": "0.52518934", "text": "def extract_priors(options):\n # Initialize empty dictionary to store priors \n options['priors_definitions'] = dict()\n options['priors_params'] = dict()\n\n if len(options['priors']) < 4:\n raise ValueError('''Please provide list of four arguments for \n priors. Less than four arguments provided.''')\n elif len(options['priors']) > 4:\n raise ValueError('''Please provide list of four arguments for \n priors. More than four arguments provided.''')\n elif len(options['priors']) == 4: \n NoneType = type(None)\n # Check whether arguments in list are of type integer or float\n for i in range(0,4):\n if isinstance(options['priors'][i], (str, NoneType)) is False:\n raise ValueError('''User Error: Please provide numerical values or NoneType \n for priors of parameters of the model.''')\n \n if options['priors'][i] is not None:\n if i == 0:\n # Extract prior for alpha parameter\n options['priors_definitions']['scale'] = \" \".join(re.findall(\"[a-zA-Z]+\", \n options['priors'][0]))\n \n alpha_prior_values = re.findall(r'[-+]?\\d*\\.\\d+|\\d+', options['priors'][0])\n options['priors_params']['scale'] = [np.float(x) for x in alpha_prior_values]\n elif i == 1:\n # Extract prior for beta parameter\n options['priors_definitions']['slope'] = \" \".join(re.findall(\"[a-zA-Z]+\", \n options['priors'][1]))\n \n beta_prior_values = re.findall(r'[-+]?\\d*\\.\\d+|\\d+', options['priors'][1])\n options['priors_params']['slope'] = [np.float(x) for x in beta_prior_values] \n elif i == 2:\n # Extract prior for gamma parameter\n options['priors_definitions']['gamma'] = \" \".join(re.findall(\"[a-zA-Z]+\", \n options['priors'][2])) \n gamma_prior_values = re.findall(r'[-+]?\\d*\\.\\d+|\\d+', options['priors'][2])\n options['priors_params']['gamma'] = [np.float(x) for x in gamma_prior_values]\n elif i == 3:\n # Extract prior for lambda parameter\n options['priors_definitions']['lambda'] = \" \".join(re.findall(\"[a-zA-Z]+\", \n options['priors'][3]))\n \n lambda_prior_values = re.findall(r'[-+]?\\d*\\.\\d+|\\d+', options['priors'][3])\n options['priors_params']['lambda'] = [np.float(x) for x in lambda_prior_values]\n \n return options", "title": "" }, { "docid": "992b75ebef63d1911731e82c10726dc9", "score": "0.52492476", "text": "def ppf(self, x, range):\n min, max = range\n pmin = min ** (-self.power)\n pmax = max ** (-self.power)\n scaled = pmin - x*(pmin - pmax)\n return scaled ** (-1 / self.power)", "title": "" }, { "docid": "696b90a8ad7327e37c2ae29814e7a865", "score": "0.52484584", "text": "def upP(self,q,p, *args) :\n\t\treturn array( [ p[1]**2 * self.Rp(q[0]) / (self.R(q[0])**3) , 0] )", "title": "" }, { "docid": "2894f5144b19a0f0973502e54fdde66c", "score": "0.5244034", "text": "def evaluate_p(self, p_in):\n return np.exp(self.evaluate_logp(p_in))", "title": "" }, { "docid": "795b66a6542544630181011a59623c96", "score": "0.5243482", "text": "def predict_proba(self, X):\n values = self.decision_function(X)\n likelihood = np.exp(values - values.min(axis=1)[:, np.newaxis])\n # compute posterior probabilities\n return likelihood / likelihood.sum(axis=1)[:, np.newaxis]", "title": "" }, { "docid": "91861f70e05e530c827be2a6260c8124", "score": "0.5236747", "text": "def test_guess_priors():\n kernel = Exponentiation(\n ConstantKernel(constant_value_bounds=\"fixed\") * Matern()\n + WhiteKernel()\n + CompoundKernel([RBF(), Matern()]),\n 2.0,\n )\n\n priors = guess_priors(kernel)\n\n assert len(priors) == 4\n expected = [\n -1.737085713764618,\n -4.107091211892862,\n -1.737085713764618,\n -1.737085713764618,\n ]\n for p, v in zip(priors, expected):\n assert_almost_equal(p(0.0), v)", "title": "" }, { "docid": "177a646c01a4ff035d425e2bf266daf5", "score": "0.5236103", "text": "def prior(z,m,info='hdfn',nt=6,ninterp=0,x=None,y=None): \n if info=='none' or info=='flat': return\n #We estimate the priors at m_step intervals\n #and keep them in a dictionary, and then\n #interpolate them for other values\n m_step=0.1\n accuracy=str(len(str(int(1./m_step)))-1)#number of decimals kept\n\n exec(\"from desc_bpz.prior_{} import *\".format(info), globals())\n global prior_dict\n try:\n len(prior_dict)\n except NameError:\n prior_dict={}\n\n #The dictionary keys are values of the \n #magnitud quantized to mstep mags\n #The values of the dictionary are the corresponding\n #prior probabilities.They are only calculated once \n #and kept in the dictionary for future\n #use if needed. \n forma='%.'+accuracy+'f'\n m_dict=forma %m \n if m_dict not in prior_dict or info=='lensing': #if lensing, the magnitude alone is not enough\n if info!='lensing':\n prior_dict[m_dict]=function(z,float(m_dict),nt)\n else:\n prior_dict[m_dict]=function(z,float(m_dict),nt,x,y) \n if ninterp:\n pp_i=prior_dict[m_dict]\n nz=pp_i.shape[0]\n nt=pp_i.shape[1]\n nti=nt+(nt-1)*int(ninterp)\n tipos=arange(nt)*1.\n itipos=arange(nti)*1./(1.+float(ninterp))\n buffer=zeros((nz,nti))*1.\n for iz in range(nz):\n buffer[iz,:]=match_resol(tipos,pp_i[iz,:],itipos)\n prior_dict[m_dict]=buffer\n return prior_dict[m_dict]", "title": "" }, { "docid": "078a73db8a4de3e09e29f2467dfb6e1b", "score": "0.5231932", "text": "def ppf(self, q):\n\n if np.any(q < 0.) or np.any(q > 1.):\n msg = 'Provided quantiles are out of allowed range of [0, 1]: {!r}'\n raise ValueError(msg.format(q))\n\n # ---------\n # Exercise:\n # ---------\n # Angle PDf | part c) (exercises/angle_pdf.py)\n #\n\n # define N\n N = self.k/(2 * (1 - np.exp(- np.pi * self.k)))\n\n # make value array\n values = np.zeros_like(q)\n\n # in case q is just one value\n if np.isscalar(q):\n if q == 0.:\n values = -np.pi\n elif q == 0.5:\n values = 0.\n elif q == 1.:\n values = np.pi\n elif q > 0 and q < 0.5:\n values = np.log(self.k * q / N + np.exp(- np.pi * self.k)) /self.k\n elif q > 0.5 and q < 1:\n values = - np.log(1 - (self.k/N) * (q - 1/2))/self.k\n \n else:\n # special cases first\n values[q == 0.0] = -np.pi\n values[q == 0.5] = 0.\n values[q == 1.0] = np.pi\n\n mask_lower = np.logical_and(q > 0, q < 0.5) # formula for lower percentage\n values[mask_lower] = np.log(self.k * q[mask_lower] / N + np.exp(- np.pi * self.k)) /self.k\n\n mask_higher = np.logical_and(q > 0.5, q < 1.) # formula for higher percentage\n values[mask_higher] = - np.log(1 - (self.k/N) * (q[mask_higher] - 1/2))/self.k\n\n return values", "title": "" }, { "docid": "dbc049ede849ba998d043a10b91cc05a", "score": "0.5231932", "text": "def x_prime(x):\r\n r1 = np.random.uniform(0, 1)\r\n xprime = x + (0.5 - r1) * min([3, 10**5 - x, x + 10**5])\r\n \r\n return xprime", "title": "" }, { "docid": "dd43580827e54111be148c42500113fc", "score": "0.5221587", "text": "def call_post_prob(self, val, post_cutoff):\n number_state = len(self.prior_state)\n prob = []\n for i in range(0, number_state):\n gauss_pmf = self.get_gauss_pmf_cdf(\n val, self.mu_state[i], self.sigma_state[i]\n )[0]\n prob.append(gauss_pmf * self.prior_state[i])\n sum_prob = float(sum(prob))\n post_prob = [float(a) / sum_prob for a in prob]\n max_prob = max(post_prob)\n if max_prob >= post_cutoff:\n return post_prob.index(max_prob)\n else:\n return None", "title": "" }, { "docid": "9d30b7ff4e1b862cc98f37cef75fd122", "score": "0.5219188", "text": "def posteriors(likelihoods, priors):\n #Check that there is a prior for each likelihood\n if len(likelihoods) != len(priors):\n raise ValueError, \"Lists not equal lengths.\"\n #Posterior probability is defined as prior * likelihood\n return [l * p for l, p in zip(likelihoods, priors)]", "title": "" }, { "docid": "bea50441112c0cd9bb0ba347e550899b", "score": "0.5215778", "text": "def _ex_set(p):\n k = GF(p)\n res = [ k(0), k(1), k(2), k(4) ]\n R = k['X']\n f = R([1,-3,1]) #(X**2 - 3*X+1)\n ro = f.roots()\n for a in ro:\n if a[0] not in res:\n res.append(a[0])\n return res", "title": "" }, { "docid": "ce8023d4eb64fdad7925f147727ce5db", "score": "0.52146107", "text": "def prime_net(self, primer):\n x = np.zeros((self.vocab_size, 1))\n x[self.char_to_ix[primer[0]]] = 1\n\n for i in range(len(primer)):\n self.h = np.tanh(np.dot(self.Wxh, x) + np.dot(self.Whh, self.h) + self.bh)\n y = np.dot(self.Why, self.h) + self.by\n p = np.exp(y) / np.sum(np.exp(y)) # Probabilities\n ix = self.char_to_ix[primer[i]]\n x = np.zeros((self.vocab_size, 1))\n x[ix] = 1\n return x", "title": "" }, { "docid": "769735c92a1ff5f88a73c1d640aa4793", "score": "0.52032566", "text": "def chisq_prob(data=None, model=None):\n chisq = np.sum(np.square(data - model) / model)\n p_value = np.exp(-0.5 * chisq)\n\n return p_value, chisq", "title": "" }, { "docid": "857773373d58102c233dcee263b77c6d", "score": "0.5193262", "text": "def log_prior(self, x):\n # Check if the values are in the priors bounds, will return -inf if not\n log_p = np.log(self.in_bounds(x), dtype=\"float\")\n # Uniform on x\n log_p -= np.log(self.bounds[\"x\"][1] - self.bounds[\"x\"][0])\n # Gaussian on y\n log_p += norm(scale=5).logpdf(x[\"y\"])\n return log_p", "title": "" }, { "docid": "85fefad4488eb215edc2f1120ee6f7d1", "score": "0.518961", "text": "def pred_evaluation(prepare_data, data, iterator):\n recall = 0.0\n mrr = 0.0\n evalutation_point_count = 0\n # pred_res = []\n # att = []\n\n for _, valid_index in iterator:\n x, mask, y = prepare_data([data[0][t] for t in valid_index],\n np.array(data[1])[valid_index])\n preds = sess.run(output_probs,feed_dict={input_items: x,target_items : y,input_item_mask :mask,keep_prob_1:1.0,keep_prob_2:1.0})\n # weights = f_weight(x, mask)\n targets = y\n ranks = (preds.T > np.diag(preds.T[targets])).sum(axis=0) + 1\n rank_ok = (ranks <= 20)\n\n # pred_res += list(rank_ok)\n recall += rank_ok.sum()\n mrr += (1.0 / ranks[rank_ok]).sum()\n evalutation_point_count += len(ranks)\n # att.append(weights)\n recall = numpy_floatX(recall) / evalutation_point_count\n mrr = numpy_floatX(mrr) / evalutation_point_count\n eval_score = (recall, mrr)\n return eval_score", "title": "" }, { "docid": "80bfcecff56948735b3b960991527f7a", "score": "0.5183773", "text": "def prior(hypercube):\n\n fitting_par = [0.0] * nfit\n for i, x in enumerate(hypercube):\n fitting_par[i] = PC_priors.UniformPrior(parameters_minmax[i,0], parameters_minmax[i,1])(x)\n\n return fitting_par", "title": "" }, { "docid": "035b4f9267cc79352c932415a871d77e", "score": "0.5176513", "text": "def get_conditional(self, x, Px, y, Py, Pxy, yo, Pyo):\n\n # Add measurement noise\n Py1 = Py + Pyo\n # Kalman gain\n print(Pxy)\n K = [email protected](Py)\n print(\"K\", K)\n \n x_prime = x + K@(yo - y)\n Px_prime = Px - K@[email protected]\n\n return x_prime, Px_prime", "title": "" }, { "docid": "421bf6edd0b56b25a75b427e109d58fa", "score": "0.51719713", "text": "def predictedRating(x, P, Q):\n #### TO DO\n p = P[x[0]-1]\n q = Q[x[1]-1]\n # q_t = q.T\n # q_t = q.transpose()\n predicted = np.matmul(p, q)\n l = predicted\n # print(l)\n # print(x[2])\n return (x[0],x[1],x[2],l)", "title": "" }, { "docid": "4c723c6b76f134210c91be7d64e09724", "score": "0.5169575", "text": "def check_preqs(preqs, metrics) -> List[CheckedPreq]:\n\n checked_preqs = []\n\n def add_preq(key: str, msg: str, error: bool, ctx: dict):\n checked_preqs.append(CheckedPreq(key=key, msg=msg, error=error, ctx=ctx))\n\n for pkey, rng_str in preqs.items():\n ctx = metrics.get(\"ctx\", {})\n mval = metrics.get(pkey, None)\n if mval is None:\n add_preq(\n key=pkey, error=True, ctx=ctx, msg=\"expected to be measured, but wasn't\"\n )\n continue\n\n try:\n rng = Range(rng_str)\n except InvalidRangeError as ex:\n add_preq(\n key=pkey, error=True, ctx=ctx, msg=f'invalid range \"{rng_str}\": {ex}'\n )\n continue\n\n if not rng.contains(mval):\n add_preq(\n key=pkey,\n error=True,\n ctx=ctx,\n msg=f\"{rng.format_val(mval)} in {rng} failed\",\n )\n continue\n\n add_preq(\n key=pkey,\n error=False,\n ctx=ctx,\n msg=f\"{rng.format_val(mval)} in {rng} satisfied\",\n )\n\n return checked_preqs", "title": "" }, { "docid": "87afda02ac70f0a95b5cdee2014fed08", "score": "0.5167855", "text": "def get_pdfvals(xarr, qarr, pdfset):\n lhapdf_pdf = lhapdf.mkPDF(pdfset)\n res = {}\n for x, q in zip(xarr, qarr):\n dict_update(res, lhapdf_pdf.xfxQ2(x, q))\n return res", "title": "" }, { "docid": "2a680069624b98fb0b0577dac28c4ee4", "score": "0.5161852", "text": "def posterior(x, n, P, Pr):\n if not isinstance(n, int) or (n <= 0):\n raise ValueError('n must be a positive integer')\n if not isinstance(x, int) or (x < 0):\n err = 'x must be an integer that is greater than or equal to 0'\n raise ValueError(err)\n if x > n:\n raise ValueError('x cannot be greater than n')\n if not isinstance(P, np.ndarray) or len(P.shape) != 1:\n raise TypeError('P must be a 1D numpy.ndarray')\n if np.any(P > 1) or np.any(P < 0):\n raise ValueError('All values in P must be in the range [0, 1]')\n if not isinstance(Pr, np.ndarray) or (P.shape != Pr.shape):\n err = 'Pr must be a numpy.ndarray with the same shape as P'\n raise TypeError(err)\n if np.any(Pr > 1) or np.any(Pr < 0):\n raise ValueError('All values in Pr must be in the range [0, 1]')\n\n E = np.sum(Pr)\n if not np.isclose(E, 1):\n raise ValueError('Pr must sum to 1')\n\n fact = (np.math.factorial(n)) / \\\n (np.math.factorial(x) * np.math.factorial(n - x))\n fact *= (np.power(P, x)) * (np.power((1 - P), (n - x)))\n inter = fact * Pr\n marg = np.sum(inter)\n return inter / marg", "title": "" }, { "docid": "891faa0c1aa22d38a02ef73613460574", "score": "0.5161048", "text": "def true_posterior_prob(self, thetas):\n return self.true_posterior.pdf(thetas)", "title": "" }, { "docid": "8cf1d78f0c1b7487c7d1eb1a71f0b5c8", "score": "0.51605046", "text": "def prob54():", "title": "" }, { "docid": "ae1791d42735332d54a9f8186b1bd15a", "score": "0.51601565", "text": "def posterior(x, n, p1, p2):\n if not isinstance(n, int) or n <= 0:\n raise ValueError('n must be a positive integer')\n if not isinstance(x, int) or x < 0:\n raise ValueError(\n 'x must be an integer that is greater than or equal to 0')\n if x > n:\n raise ValueError('x cannot be greater than n')\n if not isinstance(p1, float) or (p1 > 1) or (p1 < 0):\n raise ValueError('p1 must be a float in the range [0, 1]')\n if not isinstance(p2, float) or (p2 > 1) or (p2 < 0):\n raise ValueError('p2 must be a float in the range [0, 1]')\n if p2 <= p1:\n raise ValueError('p2 must be greater than p1')\n\n P1 = special.btdtr(x+1, n-x+1, p1)\n P2 = special.btdtr(x+1, n-x+1, p2)\n\n return P2-P1", "title": "" }, { "docid": "01227567adb8dc53729f3fc9fd65b0ed", "score": "0.5159808", "text": "def logprior(p):\n res = [_.logpdf(__) for _, __ in zip(priorList, p)]\n res = sum(res)\n if not np.isfinite(res):\n res = BADV\n return res", "title": "" }, { "docid": "80c8b271fa9d62c7166fd51f0e1740e5", "score": "0.51560634", "text": "def generate_preconditioner(self, f_all):\n hessian = []\n a = f_all.min.hess\n b = f_all.real.hess\n c = f_all.max.hess\n size = len(f_all.min.hess)\n for i in range(size):\n hessian.append([])\n for j in range(size):\n gmax = max(a[i,j],b[i,j],c[i,j])\n gmin = min(a[i,j],b[i,j],c[i,j])\n gmid = (gmin+gmax)*0.5\n hessian[i].append(gmid)\n hessian = np.asarray(hessian)\n return hessian", "title": "" }, { "docid": "7f853c8793702b1737badab05d415ae1", "score": "0.5148506", "text": "def classify(X,mu,sigma,Pi):\n N,D = X.shape\n K = len(Pi)\n return np.array([argmax([Pi[k]*norm_pdf_multivariate(X[n],mu[k],sigma[k]) for k in range(K)]) for n in range(N)])", "title": "" }, { "docid": "84828a4b289040f92710686d1cb2d5ec", "score": "0.5144026", "text": "def ProcessPQ(joints, marg, feature_length):\n #variable defining the heap\n pq = []\n\n for i in range(feature_length):\n\tfor j in range(i+1, feature_length):\n\t I = 0\n\t for x_u, p_x_u in marg[i].iteritems():\n\t\tfor x_v, p_x_v in marg[j].iteritems():\n\t\t if (x_u, x_v) in joints[(i, j)]:\n\t\t\tp_x_uv = joints[(i, j)][(x_u, x_v)]\n\t\t\tI += p_x_uv * (math.log(p_x_uv, 2) - math.log(p_x_u, 2) - math.log(p_x_v, 2))\n\t heapq.heappush(pq, (-I, i, j))\n \n return pq", "title": "" }, { "docid": "1778c91b326f790f0743f46d3aa0baf3", "score": "0.51428276", "text": "def get_q_probs(self):\n return F.softmax(self.q_logits, dim=0)", "title": "" }, { "docid": "36326af82d9a74250499eb2300a1bdf4", "score": "0.51424074", "text": "def ApproximatePrecessingFisherMatrixElementFactor(P, p1,p2,m,s,psd,phase_deriv_cache=None,omit_geometric_factor=False,break_out_beta=False,**kwargs):\n\n # Create basic infrastructure for IP. This requires VERY DENSE sampling...try to avoid\n # hF0 = lalsimutils.complex_hoff(P)\n # IP = lalsimutils.CreateCompatibleComplexIP(hF0,psd=psd,**kwargs)\n # dPsi2F_func = interp1d(fvals,dPsi2F, fill_value=0, bounds_error=False)\n # dalphaF_func= interp\n\n# print \" called on \", p1,p2, m, s\n\n # This geometric stuff is all in common. I should make one call to generate Sh once and for all\n beta_0 = P.extract_param('beta')\n thetaJN_0 = P.extract_param('thetaJN')\n mc_s = P.extract_param('mc')/lal.MSUN_SI *lalsimutils.MsunInSec\n d_s = P.extract_param('dist')/lal.C_SI # distance in seconds\n\n if omit_geometric_factor:\n geometric_factor=1\n else:\n geometric_factor = np.abs(lal.SpinWeightedSphericalHarmonic(thetaJN_0,0,-2,2,int(m)) * lal.WignerDMatrix(2,m,s,0,beta_0,0))**2\n if np.abs(geometric_factor) < 1e-5:\n return 0,0 # saves lots of time\n\n # Create phase derivatives. Interpolate onto grid? OR use lower-density sampling\n if phase_deriv_cache:\n if not (p1 in phase_deriv_cache.keys()):\n phase_deriv_cache[p1] = PhaseDerivativeSeries(P,p1)\n else:\n fvals,fmax_safe, dPsi2F,dPhiF, dalphaF,dgammaF = phase_deriv_cache[p1] \n if not (p2 in phase_deriv_cache.keys()):\n phase_deriv_cache[p2] = PhaseDerivativeSeries(P,p2)\n else:\n fvals,fmax_safe, dPsi2F_2,dPhiF_2, dalphaF_2,dgammaF_2 =phase_deriv_cache[p2] \n else:\n fvals,fmax_safe, dPsi2F,dPhiF, dalphaF,dgammaF = PhaseDerivativeSeries(P,p1)\n fvals,fmax_safe, dPsi2F_2,dPhiF_2, dalphaF_2,dgammaF_2 = PhaseDerivativeSeries(P,p2)\n\n dropme = np.logical_or(fvals <P.fmin, fvals>0.98*fmax_safe)\n\n Shvals = np.array(map(psd, np.abs(fvals)))\n Shvals[np.isnan(Shvals)]=float('inf')\n phase_weights = 4* (np.pi*mc_s*mc_s)**2/(3*d_s*d_s) *np.power((np.pi*mc_s*np.maximum(np.abs(fvals),P.fmin/2)),-7./3.)/Shvals # hopefully one-sided. Note the P.fmin removes nans \n phase_weights[np.isnan(phase_weights)]=0\n phase_weights[dropme] =0\n\n dalphaF[dropme] = 0\n dPsi2F[dropme] = 0\n dgammaF[dropme]=0\n\n dalphaF_2[dropme] = 0\n dPsi2F_2[dropme] = 0\n dgammaF_2[dropme]=0\n\n rhoms2 = np.sum(phase_weights*P.deltaF)*geometric_factor\n\n if (beta_0) < 1e-2: # Pathological, cannot calculate alpha or gamma\n ret_weights = P.deltaF*phase_weights*geometric_factor*( dPsi2F)*(dPsi2F_2)\n ret = np.sum(ret_weights)\n else:\n if not break_out_beta:\n ret_weights = P.deltaF*phase_weights*geometric_factor*( dPsi2F- 2 *dgammaF + m*s*dalphaF)*(dPsi2F_2 - 2*dgammaF_2+m*s*dalphaF_2)\n ret = np.sum(ret_weights)\n else:\n # Warming: this uses the explicit assumption \\gamma - -alpha cos beta\n # Warning: you probably never want this unless geometric_factor=1\n if not omit_geometric_factor:\n print(\" You are extracting a breakdown of the fisher matrix versus beta, but are not fixing the geometric factor...are you sure?\")\n ret_00 = np.sum(P.deltaF*phase_weights*geometric_factor*( dPsi2F)*(dPsi2F_2 ))\n ret_01 = np.sum(P.deltaF*phase_weights*geometric_factor*( dPsi2F)*(dalphaF_2 ))\n ret_10 = np.sum(P.deltaF*phase_weights*geometric_factor*( dalphaF)*(dPsi2F_2 ))\n ret_11 = np.sum(P.deltaF*phase_weights*geometric_factor*( dalphaF)*(dalphaF_2))\n print(\" -- submatrix \", p1,p2, ret_00, ret_01, ret_10, ret_11)\n return rhoms2,{\"00\":ret_00, \"01\":ret_01, \"10\": ret_10, \"11\": ret_11}\n\n# print \" Internal fisher element\", geometric_factor, rhoms2, ret\n # plt.plot(fvals,ret_weights)\n # plt.ylim(0,np.max(ret_weights))\n # plt.show()\n \n return rhoms2, ret", "title": "" }, { "docid": "d5a77645e4f56495b85092cd33e91714", "score": "0.51353604", "text": "def prob():\n nx = 2 #number of variables [x1, x2]\n np = 2 #number of parameters [p1, p2]\n neq = 0 #number of equality constraints\n niq = 2 #number of inequality constraints\n name = \"Problem 1\"\n return nx, np, neq, niq, name", "title": "" }, { "docid": "aad40a8d315c31681e6f22f707229fbc", "score": "0.512335", "text": "def parzen_estimation(x_samples, point_x, h):\n k_n = 0\n for row in x_samples:\n x_i = (point_x - row[:, np.newaxis]) / (h)\n for row in x_i:\n if np.abs(row) > (1 / 2):\n break\n else:\n k_n += 1\n return h, (k_n / len(x_samples)) / (h ** point_x.shape[1])", "title": "" }, { "docid": "19dd1e86d0dbd8a04ab880ab92b564bc", "score": "0.5118249", "text": "def gen_p(a, b, x1, x2):\n x = np.arange(x1, x2, 0.1)\n p = a*x*(b - x)\n return np.stack((x, p))", "title": "" }, { "docid": "3101507d4ff26c6124c0aaf2f717ce74", "score": "0.5116654", "text": "def _evaluate_values(props, h, P):\r\n output = []\r\n for i in range(len(h)):\r\n tmp = [h[i], P[i]]\r\n if 611.23 <= P[i] <= 100e6 and 1000. <= h[i] <= 4500e3:\r\n region = _region_ph(P[i], h[i])\r\n if region in [1, 2, 3]:\r\n for prop in props:\r\n if prop != 's':\r\n tmp.append(_eval(spl_objs[prop], h[i]*scaling['h'], P[i]*scaling['P']) / scaling[prop])\r\n else:\r\n if region == 1:\r\n tmp.append(0)\r\n elif region == 2:\r\n tmp.append(1)\r\n elif region == 3:\r\n tmp.append(-1)\r\n elif region == 4:\r\n hl, hv = _hl_p(P[i]), _hv_p(P[i])\r\n vtmp, ltmp = [hv], [hl]\r\n if hl == hv: x = 1.\r\n else: x = (h[i] - hl) / (hv - hl)\r\n Tl, Tv = _eval(spl_objs['T'], hl*scaling['h'], P[i]*scaling['P']) / scaling['T'], _eval(spl_objs['T'], hv*scaling['h'], P[i]*scaling['P']) / scaling['T']\r\n dl, dv = _eval(spl_objs['d'], hl * scaling['h'], P[i] * scaling['P']) / scaling['d'], _eval(spl_objs['d'], hv * scaling['h'], P[i] *scaling['P']) / scaling['d']\r\n vl, vv = _eval(spl_objs['v'], hl * scaling['h'], P[i] * scaling['P']) / scaling['v'], _eval(spl_objs['v'], hv * scaling['h'], P[i] *scaling['P']) / scaling['v']\r\n T = Tl + x * (Tv - Tl)\r\n d = dl * dv / (dv + x * (dl - dv))\r\n for prop in props:\r\n if prop == 'T':\r\n tmp.append(T)\r\n ltmp.append(Tl)\r\n vtmp.append(Tv)\r\n elif prop == 'd':\r\n tmp.append(d)\r\n ltmp.append(dl)\r\n vtmp.append(dv)\r\n elif prop == 'v':\r\n tmp.append(1./_visc(d,T))\r\n ltmp.append(vl)\r\n vtmp.append(vv)\r\n else: tmp.append(x)\r\n tmp.append(vtmp)\r\n tmp.append(ltmp)\r\n else:\r\n for prop in props:\r\n tmp.append(numpy.nan)\r\n else:\r\n for prop in props:\r\n tmp.append(numpy.nan)\r\n output.append(tmp)\r\n return output", "title": "" }, { "docid": "270d3da4d2f6d9601e522c79eb4401d6", "score": "0.5115687", "text": "def p_value(self):\n h = np.hstack(self.hist)\n f = np.hstack(self.fit_function(x=self.between, **self.coeff))\n b = h >= 5\n h = h[b]\n f = f[b]\n return chisquare(h, f, self.n_coeff).pvalue", "title": "" }, { "docid": "76e59bca3d25f8aa10435ef3f2563f6b", "score": "0.5115008", "text": "def e_step(gaussians, prior, data, posterior=None):\n if posterior==None: \n posterior = Probmap(prior)\n\n mask = prior.mask()\n for tissue in tissues: \n posterior[tissue][mask] = prior[tissue][mask] * gaussians[tissue].pdf(data)\n \n posterior.normalize()\n return posterior", "title": "" }, { "docid": "40a84fbd98b825679ccbec70b5ef4d66", "score": "0.5112961", "text": "def pred_evaluation(self, f_pred_prob, prepare_data, data, iterator):\n recall = 0.0\n mrr = 0.0\n evalutation_point_count = 0\n # pred_res = []\n # att = []\n \n for _, valid_index in iterator:\n x, mask, y = prepare_data([data[0][t] for t in valid_index],\n np.array(data[1])[valid_index])\n preds = f_pred_prob(x, mask)\n # weights = f_weight(x, mask)\n targets = y\n ranks = (preds.T > np.diag(preds.T[targets])).sum(axis=0) + 1\n rank_ok = (ranks <= 20)\n # pred_res += list(rank_ok)\n recall += rank_ok.sum()\n mrr += (1.0 / ranks[rank_ok]).sum()\n evalutation_point_count += len(ranks)\n # att.append(weights)\n \n recall = self.numpy_floatX(recall) / evalutation_point_count\n mrr = self.numpy_floatX(mrr) / evalutation_point_count\n eval_score = (recall, mrr)\n \n # ff = open('/storage/lijing/mydataset/res_attention_correct.pkl', 'wb')\n # pickle.dump(pred_res, ff)\n # ff.close()\n # ff2 = open('/storage/lijing/mydataset/attention_weights.pkl', 'wb')\n # pickle.dump(att, ff2)\n # ff2.close()\n \n return eval_score", "title": "" }, { "docid": "3b9f3320362fd87a531c8692771d3057", "score": "0.5109721", "text": "def get_predicted_probabilities(self, clf, test_x):\n if hasattr(clf, \"predict_proba\"):\n predicted_prob = clf.predict_proba(test_x)[:, 1]\n else:\n prob = clf.decision_function(test_x)\n predicted_prob = (prob - prob.min()) / (prob.max() - prob.min())\n\n return predicted_prob", "title": "" }, { "docid": "ab6d15cb14b8773dbf255bd4ace75318", "score": "0.5099645", "text": "def conditional_prob(self):\n return self.opt_exp", "title": "" }, { "docid": "3fb695102ffd54256a04f0b7220ae188", "score": "0.509872", "text": "def test_select_proportional_to_weight(self):\n th_rng = RandomStreams(12345)\n\n p = tensor.fmatrix()\n n = tensor.iscalar()\n m = th_rng.multinomial_wo_replacement(pvals=p, n=n)\n\n f = function([p, n], m, allow_input_downcast=True)\n\n n_elements = 100\n n_selected = 10\n mean_rtol = 0.0005\n numpy.random.seed(12345)\n pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)\n pvals /= pvals.sum(1)\n avg_pvals = numpy.zeros((n_elements,), dtype=config.floatX)\n\n for rep in range(10000):\n res = f(pvals, n_selected)\n res = numpy.squeeze(res)\n avg_pvals[res] += 1\n avg_pvals /= avg_pvals.sum()\n avg_diff = numpy.mean(abs(avg_pvals - pvals))\n assert avg_diff < mean_rtol", "title": "" }, { "docid": "893a9590b9282da583be185f837c91a2", "score": "0.5097317", "text": "def test_h_p(self):\n qs = QState(qubit_num=1).h(0).p(0, phase=0.25)\n actual = qs.amp\n expect = np.array([0.70710678, 0.5+0.5j])\n ans = equal_vectors(actual, expect)\n self.assertEqual(ans,True)", "title": "" }, { "docid": "9cbbda24fd499cba2d2c0851518153d0", "score": "0.5097116", "text": "def probability_improvement(x):\n x_scaled = self.scaler.transform(x.reshape(1, -1))\n too_close = np.array([True if np.linalg.norm(\n x_scaled[0][no_discrete] - p[no_discrete], -1) < 0.02\n else False for p in self.points]).any()\n if too_close:\n return np.inf\n\n pred, sigma = self.pred_sigma(x)\n pred = sign * pred\n std_dev = np.sqrt(sigma)\n pi = norm.cdf((target - pred) / std_dev)\n\n return - pi", "title": "" }, { "docid": "ea8cd74ca5ca4d4f64ebffb40709cf69", "score": "0.5093838", "text": "def setup():\n Q = pick_numbers()\n target = np.random.randint(1,1000)\n Q.sort()\n return Q, target", "title": "" }, { "docid": "ec20ddd7428fa09c1e8bc289e92f5bbb", "score": "0.5093456", "text": "def posterior(x, n, p1, p2):\n if type(n) is not int or n < 1:\n raise ValueError(\"n must be a positive integer\")\n if type(x) is not int or x < 0:\n err = \"x must be an integer that is greater than or equal to 0\"\n raise ValueError(err)\n if x > n:\n raise ValueError(\"x cannot be greater than n\")\n if type(p1) is not float or p1 < 0 or p1 > 1:\n raise ValueError(\"p1 must be a float in the range [0, 1]\")\n if type(p2) is not float or p2 < 0 or p2 > 1:\n raise ValueError(\"p2 must be a float in the range [0, 1]\")\n if p2 <= p1:\n raise ValueError(\"p2 must be greater than p1\")\n\n f1 = x + 1\n f2 = n - x + 1\n alfa = special.btdtr(f1, f2, p2)\n beta = special.btdtr(f1, f2, p1)\n return alfa - beta", "title": "" } ]
fa3504efd3ea644eaeb652cc41290809
Returns the energy (in keV) associated with a given Xray line. By example, if xray_line = 'Mn_Ka' this function returns 5.8987
[ { "docid": "ddb0535509d1c85afca40d5ae14511c0", "score": "0.8575026", "text": "def _get_energy_xray_line(xray_line):\n element, line = _get_element_and_line(xray_line)\n return elements_db[element]['Atomic_properties']['Xray_lines'][\n line]['energy (keV)']", "title": "" } ]
[ { "docid": "d3e919af76ff67321b75206d1b772a8f", "score": "0.7023946", "text": "def line_energy(element,line):\n z = elementDB[element][\"Z\"]\n if not isinstance(line,LinePair):\n line = _lookupxlsubline(line)\n return xraylib.LineEnergy(z,line.subline)", "title": "" }, { "docid": "d9a8e10e1068b90fd3237243e4aedec8", "score": "0.631008", "text": "def energy(x):\n th1, th1d, th2, th2d = x.T\n # Potential\n V = -(M1+M2)*L1*G*np.cos(th1) - M2*L2*G*np.cos(th2)\n # Kinetic\n T = 0.5*M1* (L1*th1d)**2 + \\\n 0.5*M2*((L1*th1d)**2 + (L2*th2d)**2 + \\\n 2*L1*L2*th1d*th2d*np.cos(th1-th2))\n # Sum\n return T + V", "title": "" }, { "docid": "249b5de5d7e34b2e58c010c1d08d776a", "score": "0.62636083", "text": "def get_energy(input_line):\n start_extract = input_line.find('[')\n stop_extract = input_line.find(']')\n if stop_extract == -1 or start_extract == -1:\n return 0\n data = input_line[start_extract+1:stop_extract]\n values = data.split(', ')\n \n energy = int(values[2]) * (2**24) + int(values[3]) * (2**16) + int(values[4]) * (2**8) + int(values[5])\n energy = float(energy)/3\n regex = re.compile('.*source: (\\d+).*')\n matches = regex.match(input_line)\n if matches == None:\n return 0\n groups = matches.groups()\n if groups == None:\n return 0\n src = groups[0]\n \n return (src, energy,)", "title": "" }, { "docid": "230e63090ac4c1c6597d03582614d0b9", "score": "0.5991744", "text": "def calc_xrfdetector_efficiency(exptdesc,line_energy):\n \n if isinstance(line_energy, (list, tuple, np.ndarray)):\n trans_curve=np.ones_like(line_energy)\n else:\n trans_curve=1.0\n if isinstance(exptdesc, dict):\n try:\n detector = exptdesc[\"Acquisition_instrument\"][\"XRF\"][\"Detector\"]\n if(\"Attenuators\" in detector):\n adict = detector[\"Attenuators\"]\n for attenuator in adict.keys():\n thickness = adict[attenuator][\"thickness\"]\n composition = adict[attenuator][\"composition\"]\n if(composition in exptdesc[\"Materials\"]):\n density = exptdesc[\"Materials\"][composition][\"Density\"]\n else:\n density = exptdesc[\"Elements\"][composition][\"Density\"]\n # outgoing angle correction...\n # The thickness may be x microns but if x-rays aren't \n # normal to it (on average) there will be some correction \n \n mass_thickness = density*thickness\n cross_section=calcTotalMassAttenuationCoefficients(composition, \n 1.0 ,line_energy,exptdesc[\"Materials\"],\n exptdesc[\"Elements\"])\n if(attenuator!='sensor'):\n trans_curve = trans_curve * \\\n np.exp(-mass_thickness*cross_section)\n else:\n trans_curve = trans_curve * \\\n (1.0-np.exp(-mass_thickness*cross_section))\n return trans_curve\n \n except KeyError:\n print \"Main expt keys not present...Materials,Elements,\\\n Acquisition_instrument/XRF...returning ones \"\n return trans_curve\n \n else:\n return trans_curve", "title": "" }, { "docid": "3d366c0ac1fbab7de59df4be85c5b5cc", "score": "0.59856224", "text": "def hartree2ev(x):\n return x * HARTREE_TO_EV", "title": "" }, { "docid": "4fbdd2abbc7f5f9c0113a832256cd23f", "score": "0.58669424", "text": "def energy(self):\n return self.spectral_axis.to(u.eV, u.spectral())", "title": "" }, { "docid": "0c88723ee48867a4418567684e63eb41", "score": "0.58260596", "text": "def compute_mean_teagerkaiser_energy(x):\n import numpy as np\n\n if isinstance(x, list):\n x = np.asarray(x)\n\n tk_energy = np.mean((x**2)[1:-1] - x[2:] * x[:-2])\n\n return tk_energy", "title": "" }, { "docid": "0aeff2fb2d91bc16781a18743bc8ffff", "score": "0.5727968", "text": "def energy(self):\n return 0.5 * self.mass * (self.rtd1._x ** 2 + self.rtd1._y ** 2)", "title": "" }, { "docid": "6145ab5563bbb35c2584081627b57633", "score": "0.5714193", "text": "def Energy( self, x):\n lp = 0.0 #Log posterior\n ca = x[0]\n ce = x[1]\n indx = 2\n for i, md in enumerate(self.mdlist):\n ################### a e s\n lp += md.LogPost( x[indx], x[indx+1], x[indx+2:(indx+md.n)])\n ei = x[indx+1]\n ##Hyper prior for E_i\n lp += (ca-1)*log(ei)-ei*(ca/ce)\n indx = indx+md.n\n ### The normalization constant in each hyper prior for E_i\n lp += self.L*(ca*log(ca) - ca*log(ce) - gammaln(ca))\n ### HERE THE HYPER-HYPER PRIOR FOR E ~ U(0,10) (constant), AND A ~exp(b)\n lp += self.Cte - ca/self.b\n return -lp #Return energy", "title": "" }, { "docid": "a929d2c18f112957e00659f102899084", "score": "0.57131916", "text": "def predict_local_energy(self, x_t: AtomicEnvironment) -> float:\n\n k_v = self.en_kern_vec(x_t)\n pred_mean = np.matmul(k_v, self.alpha)\n\n return pred_mean", "title": "" }, { "docid": "f2eb89b4ba309f6e75bb52021c4e3b80", "score": "0.56488097", "text": "def kernel(self):\n self.e, self.xy = self.__kernel__()\n self.e = self.e*nist.HARTREE2EV\n print('Excited State energies (eV) \\n', self.e)\n return self.e, self.xy", "title": "" }, { "docid": "0af4197e3b87ab0f224d2194a0f4c25c", "score": "0.5636421", "text": "def energy_elec(ks, dm_kpts=None, h1e_kpts=None, vhf=None):\n if h1e_kpts is None: h1e_kpts = ks.get_hcore(ks.cell, ks.kpts)\n if dm_kpts is None: dm_kpts = ks.make_rdm1()\n if vhf is None or getattr(vhf, 'ecoul', None) is None:\n vhf = ks.get_veff(ks.cell, dm_kpts)\n\n weight = getattr(ks.kpts, \"weights_ibz\",\n np.array([1.0/len(h1e_kpts),]*len(h1e_kpts)))\n e1 = np.einsum('k,kij,kji', weight, h1e_kpts, dm_kpts)\n tot_e = e1 + vhf.ecoul + vhf.exc + vhf.E_U\n ks.scf_summary['e1'] = e1.real\n ks.scf_summary['coul'] = vhf.ecoul.real\n ks.scf_summary['exc'] = vhf.exc.real\n ks.scf_summary['E_U'] = vhf.E_U.real\n logger.debug(ks, 'E1 = %s Ecoul = %s Exc = %s EU = %s', e1, vhf.ecoul,\n vhf.exc, vhf.E_U)\n return tot_e.real, vhf.ecoul + vhf.exc + vhf.E_U", "title": "" }, { "docid": "780f53e2340efefa9c06ea52937c8ee9", "score": "0.5621941", "text": "def energy(self):\n return 0.5 * self.mass * (self.vx ** 2 + self.vy ** 2)", "title": "" }, { "docid": "87ab10286dfb8e74bfb985a0d44912a0", "score": "0.5618177", "text": "def get_energy(self, x):\n # Get the checkpoints on either side of x, search by distance\n cp2_i = next(itertools.dropwhile(lambda elem: elem[1][1] < x, enumerate(self.valid_checkpoints)), [-1])[0]\n\n # Return muon energy before track begins, return 0 beyond track\n if cp2_i == 0:\n return self.checkpoints[0][0]\n if cp2_i < 0:\n return 0\n\n cp1_i = cp2_i - 1\n cp1 = self.valid_checkpoints[cp1_i]\n cp2 = self.valid_checkpoints[cp2_i]\n\n if x == cp1[1]:\n return cp1[0]\n if x == cp2[1]:\n return cp2[0]\n\n # Get the loss rate and losses between the checkpoints\n loss_rate = self.loss_rates[cp1_i]\n losses_begin, losses_end = self.loss_ranges[cp1_i]\n\n # Get the sum of losses between x and the checkpoint before x\n stoch_loss_since_cp1 = 0\n if losses_begin != losses_end:\n i_loss_before_x = next(itertools.dropwhile(lambda loss: loss[1][1] <= x, enumerate(self.losses[losses_begin:losses_end])), [losses_end-losses_begin])[0] - 1\n if i_loss_before_x >= 0:\n stoch_loss_since_cp1 = self.losses[losses_begin+i_loss_before_x][3]\n\n # (E at last cp) - (stoch losses since last cp) - (loss rate * distance from last cp)\n energy = cp1[0] - stoch_loss_since_cp1 - (x - cp1[1]) * loss_rate\n\n return energy", "title": "" }, { "docid": "0d099d01fda756a2547cabc3e87f6baf", "score": "0.5570865", "text": "def _E(self, x, vx):\n # note that v and x dot are not the same: v includes the y direction!\n return self.m*(self.g*self._h(x)+0.5*(1+self._dhdx(x)**2)*vx**2)", "title": "" }, { "docid": "744d705282f7ed4eb7bcc960b8aadacb", "score": "0.551524", "text": "def get_energy(lines):\n for line in reversed(lines):\n if 'CCSD(T) energy' in line:\n ccsdpt_energy = line.split()[2]\n if 'CCSD energy' in line:\n ccsd_energy = line.split()[2]\n if 'Total MP2 energy' in line:\n mp2_energy = line.split()[4]\n if 'E(SCF)=' in line:\n hf_energy = line.split()[1]\n\n return ccsdpt_energy, ccsd_energy, mp2_energy, hf_energy", "title": "" }, { "docid": "bae59b2be22580445f449c5fc7cb1c47", "score": "0.54818887", "text": "def EnergyVdot(x):\n # P1, P2, z1, z2, L, hp, Km, epsilon, rho, nu, g, boo = constants\n # f, Vdot, Re = variables \n P1 = constants[\"P1\"]\n rho = constants[\"rho\"]\n g = constants[\"g\"]\n z1 = constants[\"z1\"]\n hp = constants[\"hp\"]\n P2 = constants[\"P2\"]\n z2 = constants[\"z2\"]\n Km = constants[\"Km\"]\n L = constants[\"L\"]\n D = constants[\"D\"]\n return np.emath.sqrt((P1/(rho*g)+z1+hp-P2/(rho*g)-z2)/(x[0]*L/D+Km)*(math.pi**2*g*D**4)/8)", "title": "" }, { "docid": "f383575560fa3c78cbcfe444acdb0628", "score": "0.54405713", "text": "def energy(self, density):\n return 0.0", "title": "" }, { "docid": "783ce31ec43e455953bf4226d9231e2d", "score": "0.54314387", "text": "def GeV_to_km(x):\n return x*2*10**(-19)", "title": "" }, { "docid": "25d996aa7391af62afae6038be951a5c", "score": "0.5415439", "text": "def _get_element_and_line(xray_line):\n lim = xray_line.find('_')\n if lim == -1:\n raise ValueError(\"Invalid xray-line: %\" % xray_line)\n return xray_line[:lim], xray_line[lim + 1:]", "title": "" }, { "docid": "74dd33850c1aeadf22839f49da117dff", "score": "0.54123163", "text": "def line_search() -> float:\n prev_alpha = HW1.alpha0\n alpha = HW1.alpha0 + 1\n phi_alpha = HW1.phi(HW1.alpha0) # This is overwritten inside the loop\n\n phi_al_0 = HW1.phi(HW1.alpha0)\n d_phi_al_0 = HW1.d_phi(HW1.alpha0)\n for i in range(1, HW1.max_line_search_attempts + 1):\n prev_phi_alpha = phi_alpha\n\n phi_alpha = HW1.phi(alpha)\n if phi_alpha > phi_al_0 + HW1.C1 * alpha * d_phi_al_0 \\\n or (i > 1 and phi_alpha >= prev_phi_alpha):\n return HW1.zoom(prev_alpha, alpha)\n\n d_phi_alpha = HW1.d_phi(alpha)\n if abs(d_phi_alpha) <= -HW1.C2 * d_phi_al_0:\n return alpha\n if d_phi_alpha >= 0:\n return HW1.zoom(alpha, prev_alpha)\n\n prev_alpha = alpha\n alpha *= 2\n assert False", "title": "" }, { "docid": "a0f798ca5e05e7c74626a2c220fa3bdb", "score": "0.53941935", "text": "def energy_func(self, x, atomlist, I):\n # fetch options from command line\n (options,args) = parser.parse_args(self.getEnergies)\n (scf_options,args) = parser.parse_args(self.dftb2.runSCC)\n options.update(scf_options)\n atpos = XYZ.vector2atomlist(x, atomlist)\n self.setGeometry(atpos)\n self.getEnergies(**options)\n return self.Omega[I]", "title": "" }, { "docid": "9a7ee2850ccdbf9309260252dcd06cd2", "score": "0.5378639", "text": "def getLowGrayLevelEmphasisFeatureValue(self):\n pg = self.coefficients['pg']\n ivector = self.coefficients['ivector']\n Nz = self.coefficients['Nz']\n\n lgle = numpy.sum(pg / (ivector[None, :] ** 2), 1) / Nz\n return lgle", "title": "" }, { "docid": "5a4e732fe2d260bdde8e998fc21d8a0e", "score": "0.5353623", "text": "def energy_expr(self):\n\n pass", "title": "" }, { "docid": "e51b5cc9ef9b564030253a5bf2a3f284", "score": "0.5352645", "text": "def energy(frame):\n return np.sum(np.double(frame) ** 2)", "title": "" }, { "docid": "695fd3140c32005cff142943ab29dc9e", "score": "0.53491306", "text": "def xray_transmission(path_length_m, energy_kev, material='air stp'):\n\t\t\n\tcoefficients = mass_attenuation_coefficicent(energy_kev, material=material)\n\ttransmission = np.exp(-coefficients * density_cgs.get(material) * path_length_m * 100.0)\n\t\n\treturn transmission", "title": "" }, { "docid": "091ed37518044feb362d5c7914b3033e", "score": "0.5335636", "text": "def getHexEnergy(c, *args ):\n volume = args[0]\n qe = args[1]\n qe.structure.lattice.a = np.sqrt(volume/c)\n qe.structure.lattice.c = c\n qe.structure.saveStructureToPWSCF()\n qe.structure.lattice.printBase()\n# a = np.sqrt(volume/c)\n# geometry = ['Al 0.000000000 0.0000000000000000 0.000000000',\n# 'B 0.500000000 0.2886751345948129 '+str(c/a/2.),\n# 'B 0.000000000 0.5773502691896257 '+str(c/a/2.)]\n# varnameValue(task.pwscfInput, 'celldm(1)', a)\n# varnameValue(task.pwscfInput, 'celldm(2)', a)\n# varnameValue(task.pwscfInput, 'celldm(3)', c/a)\n# atomic_positions(task.pwscfInput, geometry)\n \n qe.pwscfLauncher()\n return qe.getTotalEnergy()[0]", "title": "" }, { "docid": "a46d2c63da3e52b485b98cc38dab5715", "score": "0.53311014", "text": "def cktelement_energymeter(self):\n result = ctypes.c_char_p(self.dssObj.CktElementS(ctypes.c_int32(4), ctypes.c_int32(0)))\n return result.value.decode('ascii')", "title": "" }, { "docid": "a508b2e8923c18b681ea588068f2ebbd", "score": "0.5327071", "text": "def get_x_at(y, line):\n x1, y1, x2, y2 = [float(a) for a in line]\n \n \n # Vertical line case, return one of the x's\n if (x1 == x2):\n return int(x1)\n \n # Calculate slope\n m = (y2 - y1) / (x2 - x1)\n \n # Check if line is horizontal before the next division\n if (m == 0):\n return y\n \n # Calculate the intercept, and x value given y.\n c = y2 - m * x2\n return int((y - c) / m)", "title": "" }, { "docid": "57e2cc25be55a8bc20fddeca5920f19a", "score": "0.5326914", "text": "def get_energies(filename):\n\n f = open(filename, \"r\")\n lines = f.readlines()\n f.close()\n\n energies = dict()\n\n for line in lines:\n tokens = line.split()\n\n xyz_name = tokens[0]\n hof = float(tokens[1]) - float(tokens[2])\n\n energies[xyz_name] = hof\n\n return energies", "title": "" }, { "docid": "87407792ea402d4b32fa2cadbe9fa48b", "score": "0.5324538", "text": "def edge_energy(element,shell):\n\n z = elementDB[element][\"Z\"]\n xlshell = _lookupxlshell(shell)\n return xraylib.EdgeEnergy(z,xlshell.shell)", "title": "" }, { "docid": "ed7697fb727721c0fdc8e5f5a58a99aa", "score": "0.5304294", "text": "def energy(self):\n\n e = self.compute_performance()\n e *= 10000\n\n return e", "title": "" }, { "docid": "f80b055988dd1e1372c29b913f8ee165", "score": "0.5301904", "text": "def get_k_energies_and_positions(eltname, spectrum):\n try:\n energy_kalpha = emission[eltname]['ka1']\n energy_kbeta = emission[eltname]['kb']\n except KeyError:\n raise KeyError(\"element identifier not found: \" + eltname)\n\n n_kalpha = np.argmax(spectrum)\n offset = n_kalpha + 20\n n_kbeta = np.argmax(spectrum[offset:]) + offset\n\n return energy_kalpha, energy_kbeta, n_kalpha, n_kbeta", "title": "" }, { "docid": "bcc22878b7c3dfac40f453330cf26a93", "score": "0.53016156", "text": "def energy(x, lmbd=1.0, ptype='log'):\n\n if ptype == 'quad':\n return lmbd * np.sum(x**2)\n elif ptype == 'log':\n return np.log(np.sqrt(np.sum(x**2, axis=1))) / lmbd\n else:\n raise RuntimeError(\"Invalid potential\")", "title": "" }, { "docid": "5c6e3cf5873288460a3bb018ee90daa3", "score": "0.5269032", "text": "def GetKineticEnergy(self):\n return 0.5 * np.dot(self.momentum, self.momentum) / self.mass", "title": "" }, { "docid": "eb0f47c87a4131fd7f06a6929d7a40f6", "score": "0.52613586", "text": "def find_energy(self):\n kin = .5*self.mu*self.v**2\n pot = -self.G(self.m_r*self.m_p)/(np.linalg.norm(self.r))\n self.E = kin + pot", "title": "" }, { "docid": "3683760246a7f6367671927ef5eee039", "score": "0.52603227", "text": "def energy(self):\n return (self.n + 0.5) * self.omega", "title": "" }, { "docid": "da1cf6712b59aa381190feefb33d95b1", "score": "0.52373976", "text": "def park1_euc(x):\n max_val = 25.5872304\n x1 = x[0]\n x2 = x[1]\n x3 = x[2]\n x4 = x[3]\n ret1 = (x1/2) * (np.sqrt(1 + (x2 + x3**2)*x4/(x1**2 + 0.00001)) - 1)\n ret2 = (x1 + 3*x4) * np.exp(1 + np.sin(x3))\n return min(ret1 + ret2, max_val)", "title": "" }, { "docid": "9d6cc8752b683dbe4896b692bd00cd26", "score": "0.522561", "text": "def avg_line_x(self, extremity):\n # I want to use this regardless of current page state\n if isinstance(self.children[0], Area):\n lines = []\n for area in self.children:\n lines.extend(area.children)\n if isinstance(self.children[0], Line):\n lines = self.children\n if extremity == 'START':\n x = [l.children[0].box.x0 for l in lines]\n if extremity == 'END':\n x = [l.children[-1].box.x1 for l in lines]\n return np.median(x)", "title": "" }, { "docid": "c2ac3257abf0b8c34cab22a67826923a", "score": "0.5211104", "text": "def x(self):\n return self._kml['x']", "title": "" }, { "docid": "181d6c5c54c4f5867437ec0a3991ba47", "score": "0.52106774", "text": "def en_kern_vec(self, x: AtomicEnvironment):\n\n ds = [1, 2, 3]\n size = len(self.training_data) * 3\n k_v = np.zeros(size, )\n\n for m_index in range(size):\n x_2 = self.training_data[int(math.floor(m_index / 3))]\n d_2 = ds[m_index % 3]\n k_v[m_index] = self.energy_force_kernel(x_2, x, d_2,\n self.hyps, self.cutoffs)\n\n return k_v", "title": "" }, { "docid": "8fdec80d7cc61ce4934a9647c922e3fd", "score": "0.5195022", "text": "def line(x):\n \t\treturn b * x + A", "title": "" }, { "docid": "01622c7e86dedf84575cfd26a43e7bfd", "score": "0.51923674", "text": "def energy(self):\n return self.mass * self.speed() * self.speed()", "title": "" }, { "docid": "5ecd2daafb3944e5e343ac2728c70b56", "score": "0.5192153", "text": "def kinenergy(p,m=0.511):\n ene = energy(p,m)\n te = ene-m\n return te", "title": "" }, { "docid": "8bc65518fa2c641ee0c4d46d40153001", "score": "0.5190681", "text": "def h_value(cls, line, m):\n for x1, y1, x2, y2 in line:\n assert (x1 != x2)\n return y1 + (m - x1) * (y2 - y1) / (x2 - x1)", "title": "" }, { "docid": "acfc2b86a8de2c784518956aade51cfb", "score": "0.5189933", "text": "def getSmallDependenceLowGrayLevelEmphasisFeatureValue(self):\n ivector = self.coefficients['ivector']\n jvector = self.coefficients['jvector']\n Nz = self.coefficients['Nz']\n\n sdlgle = numpy.sum(self.P_gldm / ((ivector[None, :, None] ** 2) * (jvector[None, None, :] ** 2)), (1, 2)) / Nz\n return sdlgle", "title": "" }, { "docid": "5ad7399bd8479c8b83c7efbb8fe901a4", "score": "0.51886326", "text": "def calcStrainEnergy(u_el, K_el) -> float:\n return u_el.T @ K_el @ u_el", "title": "" }, { "docid": "d7448919359568982c71e5f4396a61f9", "score": "0.5180682", "text": "def calc_escape_peak_ratios(lineEnergy,detectorelement='Si'):\n \n if(detectorelement=='Si'):\n #\n # For Si the K peak is 95% of the transition\n # and the photoionization to total cross section is ~ 95% \n # Si escape peak is typically only 0.2-1% (bigger at lower energies) \n #\n jump = xraylib.JumpFactor(14,xraylib.K_SHELL)\n fluy = xraylib.FluorYield(14,xraylib.K_SHELL)\n corr = fluy*(jump-1.0)/jump\n corr_photo = \\\n xraylib.CS_Photo(14,lineEnergy)/xraylib.CS_Total(14,lineEnergy)\n corr_trans = xraylib.RadRate(14,xraylib.KA_LINE)\\\n +xraylib.RadRate(14,xraylib.KB_LINE)\n mu_si= xraylib.CS_Total(14,lineEnergy)\n mu_internal = xraylib.CS_Total(14,1.73998)\n r = mu_internal/mu_si\n eta = corr_trans*corr_photo*corr*0.5*(1.0-r*log(1.0+1.0/r))\n ratio = eta/(1.0-eta)\n #\n # escape peak sigma should be narrower than the main peak.\n #\n return ratio\n else:\n # \n # Ge detector...\n # Ge has a large escape peak ratio ~ 5-15% and a Ka and kb component\n #\n if(lineEnergy < 11.5):\n return 0.0,0.0\n jump = xraylib.JumpFactor(32,xraylib.K_SHELL)\n fluy = xraylib.FluorYield(32,xraylib.K_SHELL)\n corr = fluy*(jump-1.0)/jump\n corr_photo = \\\n xraylib.CS_Photo(32,lineEnergy)/xraylib.CS_Total(32,lineEnergy)\n corr_trans_ka = xraylib.RadRate(32,xraylib.KA_LINE)\n corr_trans_kb =xraylib.RadRate(32,xraylib.KB_LINE)\n mu_ge= xraylib.CS_Total(32,lineEnergy)#\n # one for the Ka and one for the Kb peak...\n mu_internal_ka = \\\n xraylib.CS_Total(32,xraylib.LineEnergy(32,xraylib.KA_LINE))\n r_ka = mu_internal_ka/mu_ge\n eta_ka = corr_trans_ka*corr_photo*corr*0.5*(1.0-r_ka*log(1.0+1.0/r_ka))\n ratio_ka = eta_ka/(1.0-eta_ka)\n\n mu_internal_kb = \\\n xraylib.CS_Total(32,xraylib.LineEnergy(32,xraylib.KB_LINE))\n r_kb = mu_internal_kb/mu_ge\n eta_kb = corr_trans_kb*corr_photo*corr*0.5*(1.0-r_kb*log(1.0+1.0/r_kb))\n ratio_kb = eta_kb/(1.0-eta_kb)\n\n return ratio_ka,ratio_kb", "title": "" }, { "docid": "708e58948affff4c743dce881075879b", "score": "0.51802236", "text": "def _get_xray_lines_family(xray_line):\n return xray_line[:xray_line.find('_') + 2]", "title": "" }, { "docid": "b9b4a790fa79b335348334c5074fe23f", "score": "0.51762104", "text": "def erfc(x: float) -> float:\n ...", "title": "" }, { "docid": "af10012bdd8d8507b0e2d9347f27fe0f", "score": "0.51752347", "text": "def _energy_func(self, x, atomlist, which_en):\n atpos = XYZ.vector2atomlist(x, atomlist)\n self.setGeometry(atpos)\n self.getEnergy()\n # UGLY: getEnergies() is called twice, first inside getEnergy() and then here again\n if which_en == \"Ebs\":\n en = self.getEnergies()[0] # band structure gradient\n elif which_en == \"Ecoulomb\":\n en = self.getEnergies()[1]\n elif which_en == \"Eelec\":\n en = self.getEnergies()[2]\n elif which_en == \"Enuc\":\n en = self.getEnergies()[3]\n elif which_en == \"Etot\": \n en = self.getEnergies()[4]\n return en", "title": "" }, { "docid": "59f5f691ded5df8ef79f97d7e092ea64", "score": "0.5165331", "text": "def hx(x):\n\n return (x[0]**2 + x[2]**2)**.5", "title": "" }, { "docid": "e597909c75f0ed61d1781da82df4a407", "score": "0.5157375", "text": "def get_energy(pardict):\n return orm.Float(pardict['EtotRyd'] * 13.605693122994)", "title": "" }, { "docid": "7a6f1dd86c431306fee8e72f7bea80aa", "score": "0.5155683", "text": "def _compute_sp_energies(self, path, **kwds):\n xtbcalc = XtbCalculator(\n charge=self._charge, spin=self._spin, opt=False\n ) # Add ekstra kwds such as solvent\n path_sp_energies = []\n for path_point in path:\n sp_results = xtbcalc(\n self._atmoic_symbols, path_point, namespace=\"sp_calc\"\n )\n path_sp_energies.append(sp_results['energy'][\"elec_energy\"])\n return np.asarray(path_sp_energies)", "title": "" }, { "docid": "e21a0575165008019852a2f8e3e478c3", "score": "0.5149577", "text": "def getHighGrayLevelEmphasisFeatureValue(self):\n pg = self.coefficients['pg']\n ivector = self.coefficients['ivector']\n Nz = self.coefficients['Nz']\n\n hgle = numpy.sum(pg * (ivector[None, :] ** 2), 1) / Nz\n return hgle", "title": "" }, { "docid": "8bb544ff327ca2db4a1f32f055babb8c", "score": "0.5148058", "text": "def get_energy(self,uav_mass):\n if self.energy is None:\n self.energy = self.get_length3d() * uav_mass * 9.81 * self.height\n return self.energy", "title": "" }, { "docid": "8c3bb71ca6be7407c48724c6d04a8867", "score": "0.5134457", "text": "def v_value(cls, line, m):\n for x1, y1, x2, y2 in line:\n assert (y1 != y2)\n return x1 + (m - y1) * (x2 - x1) / (y2 - y1)", "title": "" }, { "docid": "9ebf17dd64556fae6f6616c0566d0ea2", "score": "0.5124515", "text": "def get_total_energy(self):\n results = self._node.xpath( \"energy/i[@name='e_fr_energy']\")\n if results:\n return float(results[0].text)\n else:\n raise LookupError('Value not found')", "title": "" }, { "docid": "e94a8614d2b0025dfa9f51ff34629041", "score": "0.50993466", "text": "def get_energy(file_path):\n\n f = open(file_path, 'r')\n for line in f:\n if 'SCF Done' in line:\n l = line.split()\n f.close()\n return float(l[4])", "title": "" }, { "docid": "fcffc26826a7e25f978040a334bf6472", "score": "0.5095197", "text": "def getX0(R):\n print(\"Enter the entries of x0 in a single line ({} values separated by spaces) : \".format(R))\n # User input of entries in a\n # single line separated by space\n entries = list(map(float, input().split()))\n vector = np.array(entries).reshape(R, 1)\n print(\"You entered:\\n {}\".format(vector))\n return vector", "title": "" }, { "docid": "76ddac358658422df912475ae8fe4189", "score": "0.50913167", "text": "def energy(density, coeff=1.0):\n\n # Imports\n from numpy import array, sum\n \n # Assign density list to an numpy array\n density = array(density)\n\n # Check density\n if density.ndim != 1:\n raise ValueError(\"Density should be an a *1-dimensional* array.\")\n\n if any(density < 0):\n raise ValueError(\"Density should be an array of *positive integers*.\")\n\n if density.dtype.kind != 'i' and len(density) > 0:\n raise TypeError(\"Density should be an array of *integers*.\")\n\n # Main code in return\n return coeff * sum(density * (density - 1)) / 2", "title": "" }, { "docid": "27c1ed1e2e2b1ddc682ee0e3b332f005", "score": "0.5081118", "text": "def erfcx(x):\n return exp(x**2)*(1-erf(x))", "title": "" }, { "docid": "d3a8f3017830179280ee379caedbbc58", "score": "0.5079506", "text": "def get_atomization_energy(name):\n assert name in extra or name in g2\n d = data[name]\n e = d['enthalpy']\n z = d['ZPE']\n dh = d['thermal correction']\n ae = -e + z + dh\n for a in string2symbols(d['symbols']):\n h = data[a]['enthalpy']\n dh = data[a]['thermal correction']\n ae += h - dh\n return ae", "title": "" }, { "docid": "64c91b6a6c7c6436dcf9265b7a5f2564", "score": "0.5079131", "text": "def get_energy(phi_variable):\n return (- _MU_0 * msi * h * cos(phi_variable) * sign\n + k * (sin(phi_variable - theta)**2)\n - j_n * _MU_0 * msi * msi_n * cos(phi_variable - phi_n)\n - j_p * _MU_0 * msi * msi_p * cos(phi_variable - phi_p))", "title": "" }, { "docid": "dc1a92599297742f27dc7418b012ce96", "score": "0.50788057", "text": "def evaluate_epsilon(L_disk, M_BH, eta, R_tilde):\n M_8 = (M_BH / (1e8 * M_sun)).to_value(\"\")\n L_Edd = 1.26 * 1e46 * M_8 << u.Unit(\"erg s-1\")\n l_Edd = (L_disk / L_Edd).to_value(\"\")\n xi = np.power(l_Edd / (M_8 * eta), 1 / 4)\n return 2.7 * 1e-4 * xi * np.power(R_tilde, -3 / 4)", "title": "" }, { "docid": "4e57345c39206f9f3c311003ca75ebbe", "score": "0.50734854", "text": "def kinetic_energy_history(self):\n return (self.velocity_history**2).sum(axis=-1) * self.eff_m / 2", "title": "" }, { "docid": "e570fb41815681874e39ff228bbcf245", "score": "0.5069858", "text": "def getDispersionEnergy(self):\n if hasattr(self, \"dispersion\"):\n return self.dispersion.getEnergy(self.atomlist)\n else:\n return 0.0", "title": "" }, { "docid": "396485a769689e33fac251c4c0341283", "score": "0.5060783", "text": "def K_iso(self):\n return numpy.trace(self.K)/3.0", "title": "" }, { "docid": "851f598b746e688de445ab93d4f4c38f", "score": "0.5051419", "text": "def park2_euc(x):\n max_val = 5.925698\n x1 = x[0]\n x2 = x[1]\n x3 = x[2]\n x4 = x[3]\n ret = (2.0/3.0) * np.exp(x1 + x2) - x4*np.sin(x3) + x3\n return min(ret, max_val)", "title": "" }, { "docid": "9cc237897c05fab6510d2dbbd0ce288f", "score": "0.5048679", "text": "def E1(x):\n A0=-0.57721566\n A1= 0.99999193\n A2=-0.24991055\n A3= 0.05519968\n A4=-0.00976004\n A5= 0.00107857\n B1=8.5733287401\n B2=18.059016973\n B3=8.6347608925\n B4=0.2677737343\n C1=9.5733223454\n C2=25.6329561486\n C3=21.0996530827\n C4=3.9584969228\n \n x2=x**2\n x3=x**3\n x4=x**4\n x5=x**5\n ep1A=-jnp.log(x)+A0+A1*x+A2*x2+A3*x3+A4*x4+A5*x5\n ep1B=jnp.exp(-x)/x*\\\n (x4+B1*x3+B2*x2+B3*x+B4)/\\\n (x4+C1*x3+C2*x2+C3*x+C4)\n ep=jnp.where(x<=1.0, ep1A, ep1B)\n return ep", "title": "" }, { "docid": "ac47782cd2a6c901371efedec2f888ba", "score": "0.5040625", "text": "def erfint(x):\n return x * erf(x) - 1.0 / np.sqrt(np.pi) * (1.0 - np.exp(-x**2))", "title": "" }, { "docid": "c3ab5ca24e3f959332754db830950b6c", "score": "0.50399417", "text": "def getLargeDependenceLowGrayLevelEmphasisFeatureValue(self):\n ivector = self.coefficients['ivector']\n jvector = self.coefficients['jvector']\n Nz = self.coefficients['Nz']\n\n ldlgle = numpy.sum(self.P_gldm * (jvector[None, None, :] ** 2) / (ivector[None, :, None] ** 2), (1, 2)) / Nz\n return ldlgle", "title": "" }, { "docid": "52a216846ebc86322b40c3cb2f413253", "score": "0.50382006", "text": "def energy(data):\n N = len(data)\n KE = 0.5*sum([data[i][2]**2+data[i][3]**2 for i in range(N)])\n PE = sum(sum([[-1.0/(d(data[i][0:2],data[j][0:2])) for i in range(j)] for j in range(N)]))\n return KE+PE", "title": "" }, { "docid": "e3e16aee7ab3c3cac9599f2e30c14dab", "score": "0.5036348", "text": "def findElasticLinePosition(spectrum,tofRange=(0,5000)):\n peak = FindHighestPeak(spectrum,tofRange=tofRange,dx=dt) #find the strongest peak in the TOF range\n #Fit the line to a Gaussian. Initial guess from output from FindHighestPeak\n try:\n centre = peak['centre']; sigma = peak['width']/2.35 #From FWHM to standard deviation\n except TypeError:\n print 'could not find peak for spectrum index ',spectrum.getDetector(0).getID()\n raise\n startX = centre-sigma; endX = centre+sigma #set the fitting boundaries\n funcStr = 'name=Gaussian, Height=%f, PeakCentre=%f, Sigma=%f'%(peak['height'], centre, sigma)\n gfit = Fit(funcStr, spectrum.getName(), StartX=startX, EndX=endX, CreateOutput='1')\n #Retrieve the optimized fitting parameters\n fittedParams = { 'Height':0, 'PeakCentre':0, 'Sigma':0}\n for iRow in range( gfit[3].rowCount() - 1):\n row = gfit[3].row( iRow )\n fittedParams[ row['Name'] ] = row['Value']\n return fittedParams['PeakCentre'], fittedParams['Height'],fittedParams['Sigma']", "title": "" }, { "docid": "744c8f8af57f48f78b88d253f92e53e6", "score": "0.5031463", "text": "def read_energy(self):\n with open(self.label + '/deMon.out', 'r') as f:\n text = f.read().upper()\n\n lines = iter(text.split('\\n'))\n\n for line in lines:\n if line.startswith(' TOTAL ENERGY ='):\n self.results['energy'] = float(line.split()[-1]) * Hartree\n break\n else:\n raise RuntimeError", "title": "" }, { "docid": "b8e92bfba2b26c039a6450e89b3c4a39", "score": "0.5015484", "text": "def KE(c, x, m, h, v, u):\n xc, mc, vc, hc = x[c], m[c], v[c], h[c]\n\n ## velocity w.r.t. com velocity of clump\n v_well = vc - np.average(vc, weights=mc,axis=0)\n vSqr = np.sum(v_well**2,axis=1)\n return (mc*(vSqr/2 + u[c])).sum()", "title": "" }, { "docid": "22f441446e03d9a78a2703fc679308ae", "score": "0.50140053", "text": "def energy(data):\n energy = np.sum(data ** 2) / len(data)\n return energy", "title": "" }, { "docid": "996fade952f2cfd18f6a095dacedc1f3", "score": "0.5008575", "text": "def resolution(self, energies_in_kev):\n return 0.6 * np.sqrt(energies_in_kev)", "title": "" }, { "docid": "9ced5e96cc232e3f1461bb0ef56c9f49", "score": "0.5008167", "text": "def resolution(self, energies_in_kev):\n return 0.7 * np.sqrt(energies_in_kev)", "title": "" }, { "docid": "fd0e87e8999fb994258b664b71da30e8", "score": "0.5004573", "text": "def nm2eV(E_nm):\n E_freq = c / E_nm * 1e9\n E_J = E_freq * h\n E_eV = E_J / Cb\n return E_eV", "title": "" }, { "docid": "fce146f15db02bb71d70b434576caf04", "score": "0.4991882", "text": "def energy_density(self):\n self._H_func.vector()[:] = self.compute_field()\n nodal_E = df.assemble(self._nodal_E).array() * self.unit_length ** self.dim\n return nodal_E / self._nodal_volumes", "title": "" }, { "docid": "3862d0ad180f7dc045f45f6aa1dffff7", "score": "0.4978468", "text": "def er(self,wl):\n n,k = self.n(wl)\n return (n + 1j*k)**2", "title": "" }, { "docid": "107b2466be284dd9e402ee6fab74a328", "score": "0.49767685", "text": "def getMeanIonizationEnergy_eV(atomicNumber):\n\n if atomicNumber <= 13.0:\n Value = 11.5*atomicNumber\n else:\n if math.pow(atomicNumber, 0.19) > 0.0:\n Value = 9.76*atomicNumber + 58.8/math.pow(atomicNumber, 0.19)\n else:\n Value = 0.0\n\n return Value", "title": "" }, { "docid": "5c5ec7d63ab2da699c9c7e7533cd2607", "score": "0.4974487", "text": "def kinetic_energy(m:'in KG', v:'in M/S')->'Joules':\n \n return 1/2*m*v**2", "title": "" }, { "docid": "7ac77bb5db19c83f47e76da7234b8cbc", "score": "0.49684364", "text": "def K(self):\n return numpy.array(self.magres_isc['K'])", "title": "" }, { "docid": "22e9670fe9442422558a698ca1be0a8e", "score": "0.49683946", "text": "def x(self):\n return self['x']", "title": "" }, { "docid": "a4421409adb026e24061aa5a65412a6b", "score": "0.49642023", "text": "def kinetic_energy(self):\n # Apply atom mask\n momenta = self.momenta * self.atom_masks\n\n kinetic_energy = 0.5 * torch.sum(\n torch.sum(momenta ** 2, 3) / self.masses[..., 0], 2\n )\n return kinetic_energy.detach()", "title": "" }, { "docid": "400daf6e04a1562afeb86363f1940283", "score": "0.49579078", "text": "def _traget_energy_function(self, x, lambda_value: float = 0.0):\n x = np.asarray([x.reshape(-1, 3)]) * unit.angstrom\n force_energy = self.calculate_force(x, lambda_value)\n F_flat = -np.array(\n force_energy.force.value_in_unit(\n unit.kilojoule_per_mole / unit.angstrom\n ).flatten(),\n dtype=np.float64,\n )\n self.memory_of_energy.append(force_energy.energy)\n self.memory_of_restrain_contribution.append(\n force_energy.restraint_energy_contribution\n )\n return (force_energy.energy.value_in_unit(unit.kilojoule_per_mole), F_flat)", "title": "" }, { "docid": "8bc064a549a2b0375131e991e795b456", "score": "0.49577746", "text": "def error(line,data):\n \n err = np.sum((data[:,1]-line[0]*data[:,0]-line[1])**2)\n \n return err", "title": "" }, { "docid": "0b69cb0aa26e21b885b5c0c78d7db3cf", "score": "0.49555", "text": "def K(x_0, x):\n\tret = 1/np.sqrt(2*np.pi)*np.exp(-(x_0*np.ones(x.shape[0]) - x)**2/2)\n\treturn ret", "title": "" }, { "docid": "2773cef6ad4a90ea883a665c4dd79f1b", "score": "0.49485254", "text": "def get_ee_energy_density(self):\n if self.ee_energy_density is None:\n self.ee_energy_density = self.get_ha_energy_density()\\\n + self.get_fx_energy_density()\n return self.ee_energy_density", "title": "" }, { "docid": "2773cef6ad4a90ea883a665c4dd79f1b", "score": "0.49485254", "text": "def get_ee_energy_density(self):\n if self.ee_energy_density is None:\n self.ee_energy_density = self.get_ha_energy_density()\\\n + self.get_fx_energy_density()\n return self.ee_energy_density", "title": "" }, { "docid": "2d0a98bff6f9cb46f2269173ab447b31", "score": "0.49450573", "text": "def ev2kcalmol(x):\n return x * EV_TO_KCALMOL", "title": "" }, { "docid": "c5d6dec265d058233ebc3b00e459e1ca", "score": "0.49439126", "text": "def kinetic_energy(self):\n return 0.5*self.mass*self.vel**2", "title": "" }, { "docid": "47fd66c4b04d3a28cc53496792f49ed8", "score": "0.49437764", "text": "def energy(self, n):\n mass = 1.0\n return (n**2 * self.hbar**2 * np.pi**2) / (2.0 * mass * self.length**2)", "title": "" }, { "docid": "94071f200353e2e54c2a5411f2438cb2", "score": "0.4940596", "text": "def HAtomEnergy(n,Z=1):\n E = c.m_e * np.power(c.c,2) / 2. * np.power(c.alpha,2) * np.power(Z,2)/np.power(n,2)\n E = -E.to(u.eV)\n return E", "title": "" }, { "docid": "08ea87a0ec4a4bbb3ae472f2234221b2", "score": "0.49387676", "text": "def get_xray_lines_near_energy(energy, width=0.2, only_lines=None):\n only_lines = _parse_only_lines(only_lines)\n valid_lines = []\n E_min, E_max = energy - width / 2., energy + width / 2.\n for element, el_props in elements_db.items():\n # Not all elements in the DB have the keys, so catch KeyErrors\n try:\n lines = el_props['Atomic_properties']['Xray_lines']\n except KeyError:\n continue\n for line, l_props in lines.items():\n if only_lines and line not in only_lines:\n continue\n line_energy = l_props['energy (keV)']\n if E_min <= line_energy <= E_max:\n # Store line in Element_Line format, and energy difference\n valid_lines.append((element + \"_\" + line,\n np.abs(line_energy - energy)))\n # Sort by energy difference, but return only the line names\n return [line for line, _ in sorted(valid_lines, key=lambda x: x[1])]", "title": "" }, { "docid": "2a4722231918385ffa66baf81a4eb22e", "score": "0.493853", "text": "def energia(m: float, c: float) -> float:\r\n \"\"\"Luis Angel Vealazquez Jimenez\"\"\"\r\n \"\"\"GITI7092e\"\"\"\r\n opera = float\r\n opera = m * (c * c)\r\n return opera", "title": "" }, { "docid": "881d977d09904d4d5ab4a676f4f4bd82", "score": "0.49368802", "text": "def getLargeDependenceHighGrayLevelEmphasisFeatureValue(self):\n ivector = self.coefficients['ivector']\n jvector = self.coefficients['jvector']\n Nz = self.coefficients['Nz']\n\n ldhgle = numpy.sum(self.P_gldm * ((jvector[None, None, :] ** 2) * (ivector[None, :, None] ** 2)), (1, 2)) / Nz\n return ldhgle", "title": "" }, { "docid": "af7947bea3234122379adbc551d15221", "score": "0.49359745", "text": "def energy(self):\n return self.discrete_op.energy", "title": "" } ]
03e7c53a79ac094226a73f5970f9b23b
Utility function to get the timeout value used in requests
[ { "docid": "c8799e09a6258eab949006f89eb0cde2", "score": "0.81352955", "text": "def _timeout(self, timeout):\n if timeout is None:\n return self._request_timeout\n return timeout", "title": "" } ]
[ { "docid": "22dfdd2cc0a669d48d7d0ff56ef6578b", "score": "0.8522555", "text": "def gettimeout(self): # real signature unknown; restored from __doc__\n return timeout", "title": "" }, { "docid": "ddc80f25e3f51af1150befad01524a0a", "score": "0.8129886", "text": "def get_timeout(self):\n return self._get_timeout()", "title": "" }, { "docid": "1119d2363cc3ffbc1e0f0d770186e5c3", "score": "0.8098538", "text": "def get_timeout(self):\n return self.timeout", "title": "" }, { "docid": "bd86d1cf5da2fd3a2aa0b6f4c3e312e5", "score": "0.80850226", "text": "def request_timeout(self) -> Optional[int]:\n return pulumi.get(self, \"request_timeout\")", "title": "" }, { "docid": "a22d53d02edfa2f14a3eea72c6778cff", "score": "0.8067339", "text": "def getdefaulttimeout(): # real signature unknown; restored from __doc__\n return timeout", "title": "" }, { "docid": "f95eb10bb25e6061d2ca929427113219", "score": "0.8016301", "text": "def timeout(self):\n return self.data.get('timeout')", "title": "" }, { "docid": "d0010cdcd065dddc301a53e3df0ace5f", "score": "0.80120677", "text": "def get_timeout(self):\n return self.socket.get_timeout()", "title": "" }, { "docid": "30dca7cfab1ade3067967ed2a8a87d99", "score": "0.8003331", "text": "def timeout(self) -> int:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "30dca7cfab1ade3067967ed2a8a87d99", "score": "0.8003331", "text": "def timeout(self) -> int:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "30dca7cfab1ade3067967ed2a8a87d99", "score": "0.8003331", "text": "def timeout(self) -> int:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "30dca7cfab1ade3067967ed2a8a87d99", "score": "0.8003331", "text": "def timeout(self) -> int:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "bfea1d13ea2fab18e3a48c3b4212e970", "score": "0.7936762", "text": "def get_timeout(self):\n return None", "title": "" }, { "docid": "60df2e1c11d4cfcabb259e52272a5d41", "score": "0.7925852", "text": "def get_timeout(self):\n return self._socket.gettimeout()", "title": "" }, { "docid": "d5c4c64becb812e0f2316b73a1f821a1", "score": "0.78546286", "text": "def get_timeout(self):\n return self.config['timeout']", "title": "" }, { "docid": "78b8f9d8b8edb2eeaf879bd4a9a2b7d8", "score": "0.7758228", "text": "def timeout(self):\n return self.__timeout", "title": "" }, { "docid": "d4bd040bf4ced14f3c63b2422ec51cba", "score": "0.7730424", "text": "def timeout(self):\n return self._timeout", "title": "" }, { "docid": "c1a995d0f7fe96cdbb30917e0fd17750", "score": "0.77082044", "text": "def timeout(self) -> Optional[int]:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "c1a995d0f7fe96cdbb30917e0fd17750", "score": "0.7707487", "text": "def timeout(self) -> Optional[int]:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "c1a995d0f7fe96cdbb30917e0fd17750", "score": "0.7707487", "text": "def timeout(self) -> Optional[int]:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "c1a995d0f7fe96cdbb30917e0fd17750", "score": "0.7707487", "text": "def timeout(self) -> Optional[int]:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "c1a995d0f7fe96cdbb30917e0fd17750", "score": "0.7707487", "text": "def timeout(self) -> Optional[int]:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "c1a995d0f7fe96cdbb30917e0fd17750", "score": "0.7707487", "text": "def timeout(self) -> Optional[int]:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "c1a995d0f7fe96cdbb30917e0fd17750", "score": "0.7707487", "text": "def timeout(self) -> Optional[int]:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "c1a995d0f7fe96cdbb30917e0fd17750", "score": "0.7707487", "text": "def timeout(self) -> Optional[int]:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "ed9b29587579e035132d51ede9994bdf", "score": "0.7676719", "text": "def get_timeout(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b1e99e96586cce422430d9bbbbe4f3af", "score": "0.76634705", "text": "def get_timeout(self):\n val = self['tpsjob'][0]\n try:\n timeout = int(float(val) * 60)\n except Exception, msg:\n raise Exception(\"%s\\nException : %s\" % (val, msg))\n return timeout", "title": "" }, { "docid": "a60e240c79ce9973eead7df0496940e5", "score": "0.76230234", "text": "def timeout(self) -> str:\n return self._timeout", "title": "" }, { "docid": "133b41c200138a815d843897b7e0a74d", "score": "0.7581552", "text": "def timeout(self) -> typing.Optional[aws_cdk.core.Duration]:\n return self._values.get('timeout')", "title": "" }, { "docid": "550347e0896e08e727c104d34357e587", "score": "0.7537911", "text": "def get_timeout(self):\n return self.instr.timeout", "title": "" }, { "docid": "550347e0896e08e727c104d34357e587", "score": "0.7537911", "text": "def get_timeout(self):\n return self.instr.timeout", "title": "" }, { "docid": "ba32ed04593993a5b0e5918e92d9001a", "score": "0.7509191", "text": "def get_timeout():\n try: # gipc-based implementation\n from gevent import Timeout\n return Timeout(TIMEOUT_IN_SEC), Timeout\n except ImportError:\n from Queue import Empty\n timeout = TIMEOUT_IN_SEC\n return timeout, Empty", "title": "" }, { "docid": "b8ff97fdc87fc444d169b339baef574c", "score": "0.74755865", "text": "def _get_wait_time(self):\n return self._MAX_REQUEST_WAIT_TIME", "title": "" }, { "docid": "c50f0f1a2d3e98275015c8bc94d56557", "score": "0.7439643", "text": "def getTimeout(self) -> float:\n with self._queueMutex:\n return self._timeout / 1e6", "title": "" }, { "docid": "534ef1aad77e96af17aea932f3270672", "score": "0.74292386", "text": "def timeout_seconds(self) -> Optional[float]:\n return pulumi.get(self, \"timeout_seconds\")", "title": "" }, { "docid": "19e2e1bf2177988660dd61775c08f368", "score": "0.74152976", "text": "def timeout_secs(self) -> \"int\":\n return self._attrs.get(\"timeoutSecs\")", "title": "" }, { "docid": "19e2e1bf2177988660dd61775c08f368", "score": "0.74152976", "text": "def timeout_secs(self) -> \"int\":\n return self._attrs.get(\"timeoutSecs\")", "title": "" }, { "docid": "b34ebc9f91dbe04ff095f95ede9948b0", "score": "0.73758566", "text": "def timeout(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "b34ebc9f91dbe04ff095f95ede9948b0", "score": "0.73758566", "text": "def timeout(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "b34ebc9f91dbe04ff095f95ede9948b0", "score": "0.73758566", "text": "def timeout(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "53ede45f707d021030648513452aa2e1", "score": "0.7372905", "text": "def get_timeout(title):\n route_dict = get_route_dict(title)\n timeout = route_dict['timeout']\n if not timeout:\n timeout = config['timeout']\n return timeout", "title": "" }, { "docid": "f5753d551fb684b74bc02a065fa53d27", "score": "0.7327633", "text": "def timeout_in_seconds(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"timeout_in_seconds\")", "title": "" }, { "docid": "00371a9ff9cc08b4395c2295fd787fcd", "score": "0.732669", "text": "def timeout(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "f7671fb9089dbd1133eb39f180814cec", "score": "0.72947246", "text": "def _get_timeout(self):\n scheduler = Scheduler.get_scheduler()\n self.logger.info(f\"Using scheduler interface {scheduler}\")\n timeout = scheduler.get_remaining_seconds()\n self.logger.info(f\"Obtained timeout from scheduler {timeout}\")\n return timeout", "title": "" }, { "docid": "bf264350ead57a512efd0c526ea8b5fc", "score": "0.72519463", "text": "def timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"timeout_seconds\")", "title": "" }, { "docid": "bf264350ead57a512efd0c526ea8b5fc", "score": "0.72519463", "text": "def timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"timeout_seconds\")", "title": "" }, { "docid": "bf264350ead57a512efd0c526ea8b5fc", "score": "0.72519463", "text": "def timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"timeout_seconds\")", "title": "" }, { "docid": "bf264350ead57a512efd0c526ea8b5fc", "score": "0.72519463", "text": "def timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"timeout_seconds\")", "title": "" }, { "docid": "bf264350ead57a512efd0c526ea8b5fc", "score": "0.72519463", "text": "def timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"timeout_seconds\")", "title": "" }, { "docid": "bf264350ead57a512efd0c526ea8b5fc", "score": "0.72519463", "text": "def timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"timeout_seconds\")", "title": "" }, { "docid": "2879606dc03d572e59250b84df348ccc", "score": "0.7251322", "text": "def timeout(self):\n return current_app.config[\"IIIF_CACHE_TIME\"]", "title": "" }, { "docid": "4bc028165f9c56811afc5f7cc417f97c", "score": "0.72327924", "text": "def get_sanity_test_timeout_sec(self):\n self.sanity_test_timeout_sec", "title": "" }, { "docid": "9eddb382370d827c05a371b2c04414ed", "score": "0.7215822", "text": "def timeout(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "9eddb382370d827c05a371b2c04414ed", "score": "0.7215822", "text": "def timeout(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"timeout\")", "title": "" }, { "docid": "8fed0ac0d90dc174f669c3e76d08149a", "score": "0.71421504", "text": "def poll_timeout(self):\n return self._poll_timeout", "title": "" }, { "docid": "ae7af5505e1c252789c65ca4e3de2bd9", "score": "0.7110401", "text": "def get_timeout(self):\r\n a = self.get_attributes('VisibilityTimeout')\r\n return int(a['VisibilityTimeout'])", "title": "" }, { "docid": "b2c57189c55628b28b8c8a502e72a76a", "score": "0.70609283", "text": "def get_client_timeout():\n if os.getenv(\"ARKOUDA_CLIENT_TIMEOUT\"):\n return int(os.getenv(\"ARKOUDA_CLIENT_TIMEOUT\"))\n return None", "title": "" }, { "docid": "1596f25e6d03ff3da85dd17f57aa5454", "score": "0.7036144", "text": "def zGetTimeout(self):\n return self.conversation.GetDDETimeout()", "title": "" }, { "docid": "1c0f37a59ab956832980d827b9cb79c3", "score": "0.70185834", "text": "def getTimeOut(self, filesize):\n\n timeout_max = 6*3600 # 6 hours\n timeout_min = self.timeout #5*60 # 5 mins\n\n timeout = timeout_min + int(filesize/0.4e6) # approx < 0.4 Mb/sec\n\n return max(timeout, timeout_max)", "title": "" }, { "docid": "8e1fb691ff38dbc1f82daf38dbe751e3", "score": "0.70138425", "text": "def discoverabletimeout(self):\n return self.adapter_property.Get(self.ADAPTER_IFACE,\n 'DiscoverableTimeout')", "title": "" }, { "docid": "023379605b054fa87fd74bc58b4516a6", "score": "0.6989225", "text": "def idle_timeout(self) -> str:\n return pulumi.get(self, \"idle_timeout\")", "title": "" }, { "docid": "2e267f59c5156d07d1a7b6b6ee9ae1a6", "score": "0.69802403", "text": "def test_get_timeout():\n (read, connect) = AWSClient.get_timeout(None)\n assert read == 60 and connect == 10\n (read, connect) = AWSClient.get_timeout(\"100\")\n assert read == 100 and connect == 10\n (read, connect) = AWSClient.get_timeout(\"200,2\")\n assert read == 200 and connect == 2\n (read, connect) = AWSClient.get_timeout(60)\n assert read == 60 and connect == 10\n (read, connect) = AWSClient.get_timeout(u\"60, 10\") # testing for unicode variable\n assert read == 60 and connect == 10", "title": "" }, { "docid": "e9374ad6bac74e2eec806a7c27a6cb0e", "score": "0.69736826", "text": "def getConnectTimeout():\n return 10000", "title": "" }, { "docid": "a34221e0ec8445cbd4646f74287a07aa", "score": "0.6961935", "text": "def session_timeout(self):\n return self._session_timeout", "title": "" }, { "docid": "1e8489b081a101b9b902b550c7b9b34d", "score": "0.6946508", "text": "def dtp_timeout(self):\n return self.pi.dtp_timeout", "title": "" }, { "docid": "8fdeb393b27b4c2ddbbc424fc3196100", "score": "0.69389987", "text": "def getReadTimeout():\n return 60000", "title": "" }, { "docid": "9dbd56cfcf0144ee0b680ed77051bdbb", "score": "0.69289154", "text": "def serverless_connection_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"serverless_connection_timeout_in_seconds\")", "title": "" }, { "docid": "9dbd56cfcf0144ee0b680ed77051bdbb", "score": "0.69289154", "text": "def serverless_connection_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"serverless_connection_timeout_in_seconds\")", "title": "" }, { "docid": "4abeaecc948304b6d5c27eb8eda3edc0", "score": "0.69252473", "text": "def per_try_timeout(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"per_try_timeout\")", "title": "" }, { "docid": "b1adc6ac79d89d6add8f77c9a810fb2e", "score": "0.6906411", "text": "def wlan_timeout():\n return json.dumps(dict(\n timeout=rfid_handler.get_wlan_time_left() if rfid_handler else 0\n ))", "title": "" }, { "docid": "a6575be5c28be32c3ec636cb27ae8e7c", "score": "0.6884021", "text": "def dtp_timeout(self):\n return self.factory.dtp_timeout", "title": "" }, { "docid": "1e5220b09480fb9a635d9eb8104a5633", "score": "0.6882846", "text": "def timeout(self):\n return self._get_conf('timeout', DEFAULT_WORKER_TIMEOUT)", "title": "" }, { "docid": "a1786311b352952226494368afcac4d5", "score": "0.6865132", "text": "def serverless_connection_timeout_in_seconds(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"serverless_connection_timeout_in_seconds\")", "title": "" }, { "docid": "6ae49abaab331b208a46422c4f5f854a", "score": "0.6850533", "text": "def running_timeout(self) -> str:\n return pulumi.get(self, \"running_timeout\")", "title": "" }, { "docid": "928b0fb9d40b8a72a0c930fce49e069c", "score": "0.68328464", "text": "def msg_timer_timeout(self):\n return self._msg_timer_timeout", "title": "" }, { "docid": "81a57f40f8ae05b1a9548192354d7908", "score": "0.68312556", "text": "def pool_timeout(self):\n return self._pool_timeout", "title": "" }, { "docid": "2c88ac3aa2f0c023394229258ffbd4ec", "score": "0.682287", "text": "def get_web_session_timeout(self):\n url = self.base_url + \"/web_session_timeout\"\n response = self.ctx.session.get(url)\n response.raise_for_status()\n return response.json()", "title": "" }, { "docid": "3a6aa9ee782768cebd22ce8408ac2d7e", "score": "0.6786987", "text": "async def _get_timeouts(self):\n return {\"\": self._timeout or self._TIMEOUT}", "title": "" }, { "docid": "3f23a65b2504e03583fd0bef62127206", "score": "0.6757808", "text": "def connection_idle_timeout(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"connection_idle_timeout\")", "title": "" }, { "docid": "3f23a65b2504e03583fd0bef62127206", "score": "0.6757808", "text": "def connection_idle_timeout(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"connection_idle_timeout\")", "title": "" }, { "docid": "ba4636aea33f24a22fbfbef1522834c7", "score": "0.6757743", "text": "def get_testability_timeout(self: \"SeleniumTestability\") -> str:\n return secs_to_timestr(self.timeout)", "title": "" }, { "docid": "9679178ae33a57a6ab709943c11b07ab", "score": "0.67545974", "text": "def connection_idle_timeout(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"connection_idle_timeout\")", "title": "" }, { "docid": "f93ec0b0c378b53bf2673b63f3823ec3", "score": "0.6744489", "text": "def getPingTimeout(self):\n\n return self._pingTimeout", "title": "" }, { "docid": "789ed5bae039ffeada114808340a6e79", "score": "0.6741402", "text": "def serial_timeout(self):\n return self._serial_timeout", "title": "" }, { "docid": "92ba9083406e37404fcb24c38c3ebe88", "score": "0.6738017", "text": "def readTimeout(self) -> float:\n\n return self._device.timeout", "title": "" }, { "docid": "f8e6de2504500dfc7fefc9bf775814ec", "score": "0.6725182", "text": "def graceful_timeout(self) -> str:\n return self._graceful_timeout", "title": "" }, { "docid": "b992db6230b18e26f845bf3d9aed4235", "score": "0.671494", "text": "def soft_timeout(self):\n return self._soft_timeout", "title": "" }, { "docid": "10972bf2ba7b2f6c23c55704198381aa", "score": "0.6698095", "text": "def timeout_fn():\n return evaluated_last_ckpt", "title": "" }, { "docid": "72a3de76dc4803476fca8283062b6f44", "score": "0.6671137", "text": "def idle_client_timeout(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"idle_client_timeout\")", "title": "" }, { "docid": "bfe43ad728a919ed4f4814a64cb0a398", "score": "0.66660804", "text": "def get_timeout(value):\n\n maximum_dbus_timeout_ms = 1073741823\n\n # Ensure the input str is not a float\n if isinstance(value, float):\n raise StratisCliEnvironmentError(\n \"The timeout value provided is a float; it should be an integer.\"\n )\n\n try:\n timeout_int = int(value)\n\n except ValueError:\n raise StratisCliEnvironmentError(\n \"The timeout value provided is not an integer.\"\n )\n\n # Ensure the integer is not too small\n if timeout_int < -1:\n raise StratisCliEnvironmentError(\n \"The timeout value provided is smaller than the smallest acceptable value, -1.\"\n )\n\n # Ensure the integer is not too large\n if timeout_int > maximum_dbus_timeout_ms:\n raise StratisCliEnvironmentError(\n \"The timeout value provided exceeds the largest acceptable value, %s.\"\n % maximum_dbus_timeout_ms\n )\n\n # Convert from milliseconds to seconds\n return timeout_int / 1000", "title": "" }, { "docid": "716203da652647470a631274320af539", "score": "0.6642015", "text": "def ping_timeout(self):\n return self.settings.get( # type:ignore[attr-defined]\n \"ws_ping_timeout\", max(3 * self.ping_interval, WS_PING_INTERVAL)\n )", "title": "" }, { "docid": "a145822d5517f64b87471f071a44795e", "score": "0.6641549", "text": "def get_max_response_time(self):\n \n return self.max_response_time", "title": "" }, { "docid": "b3681f3fda2633748cb8fc951fbd26b6", "score": "0.6629686", "text": "def initialization_timeout(self) -> int:\n return pulumi.get(self, \"initialization_timeout\")", "title": "" }, { "docid": "9307181b6cc7ddfae6d3569b7c4012f0", "score": "0.6587432", "text": "def idle_client_timeout(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"idle_client_timeout\")", "title": "" }, { "docid": "9307181b6cc7ddfae6d3569b7c4012f0", "score": "0.6587432", "text": "def idle_client_timeout(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"idle_client_timeout\")", "title": "" }, { "docid": "b415ef151d775e6694ab0faf13dc6f91", "score": "0.6584212", "text": "def _timeoutCall(self):\n return self._TimeoutMixin__timeoutCall", "title": "" }, { "docid": "b415ef151d775e6694ab0faf13dc6f91", "score": "0.6584212", "text": "def _timeoutCall(self):\n return self._TimeoutMixin__timeoutCall", "title": "" }, { "docid": "0f942e39065687c986163a69bd579ebb", "score": "0.65440726", "text": "def _current_wait_time(self):\n connection_timeout = self.connection_timeout or DEFAULT_TIMEOUT\n if self.reconnect_strategy is None:\n return self.connection_timeout\n elif self.reconnect_strategy == 'exp_backoff':\n r = random.uniform(1.0, 2.0)\n return min(r * connection_timeout * math.pow(2.0, self.unsuccessful_connects),\n self.upper_backoff_bound)\n elif self.reconnect_strategy == 'constant':\n return connection_timeout\n else:\n raise ValueError('Reconnect strategy must be None, \"exp_backoff\", or \"constant\"')", "title": "" }, { "docid": "c6264ddd4306aab3b77a03f9b12543f0", "score": "0.65381277", "text": "def dpd_timeout_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"dpd_timeout_seconds\")", "title": "" }, { "docid": "4385912d98d66562da295c168d2dd713", "score": "0.65243006", "text": "def ldap_connection_timeout(self):\n return self._ldap_connection_timeout", "title": "" }, { "docid": "87137de1e01cf3950df00abeb951d47b", "score": "0.650621", "text": "def default_command_timeout(self):\n return self._timeout", "title": "" } ]
291fe1dfc69deae35dbac946b041f543
Actually set the volume via `amixer`.
[ { "docid": "6339ec27cb24476ee916f2cb7eda5b98", "score": "0.8421663", "text": "def set_volume(self):\n subprocess.call([\"amixer\", \"set\", \"PCM\", \"--\", \"%d%%\" % (self.volume,)])", "title": "" } ]
[ { "docid": "008be544c722e9ddde9fbf00f2f58a24", "score": "0.7889102", "text": "def set_volume(self, volume):\r\n if self.is_active:\r\n if 0 < volume < 1:\r\n volume = int(volume*100)\r\n self.sp.volume(volume)", "title": "" }, { "docid": "2fba4be3c921afd787309e29022ba900", "score": "0.78266275", "text": "def set_volume(vol):\n command = \"osascript -e 'set volume output volume {}'\".format(vol)\n return run_command(command)", "title": "" }, { "docid": "17a92497da1a931189edc4069e41768c", "score": "0.7718421", "text": "def set_volume(channel, volume):\n\n call(\"set_volume\", channel, volume)", "title": "" }, { "docid": "85020a794ef428b5ab62258eacc87d11", "score": "0.7597933", "text": "def volume(self, volume):\n self._volume = volume", "title": "" }, { "docid": "8aed53508e501e3fd414dde3dcbe6629", "score": "0.7555571", "text": "def setVolume(self, track, volume):\n self.processor.setVolume(track, volume)", "title": "" }, { "docid": "a77eebb4be0eddcd4e09b5ca45cc7062", "score": "0.7488555", "text": "def set_volume(self, volume, which='_all_'):\n pass", "title": "" }, { "docid": "e3e22004b19cd0d4f7438497a2ecb4e9", "score": "0.74149466", "text": "def volume(self, volume):\n\n self._volume = volume", "title": "" }, { "docid": "e3e22004b19cd0d4f7438497a2ecb4e9", "score": "0.74149466", "text": "def volume(self, volume):\n\n self._volume = volume", "title": "" }, { "docid": "e3e22004b19cd0d4f7438497a2ecb4e9", "score": "0.74149466", "text": "def volume(self, volume):\n\n self._volume = volume", "title": "" }, { "docid": "e3e22004b19cd0d4f7438497a2ecb4e9", "score": "0.74149466", "text": "def volume(self, volume):\n\n self._volume = volume", "title": "" }, { "docid": "331c0562044e5871037ef7abff63d860", "score": "0.7388495", "text": "def set_volume(self, volume):\n self.volume = volume\n self.player.set_property('volume', self.volume)", "title": "" }, { "docid": "aaa23d6bd0d9e1d4fbea8ef0d192b06c", "score": "0.73611593", "text": "def set_volume(self, volume):\n self.sound.set_volume(volume)", "title": "" }, { "docid": "1892cc989506c486bde4dcdb6cd38617", "score": "0.7300104", "text": "def volume(self,volume:float):\n mixer.music.set_volume(volume)", "title": "" }, { "docid": "f437a287534568b916f341fe0193cc86", "score": "0.726341", "text": "def setVolume(self, volume=0.7):\n\t\tself.volume=volume\n\t\tpygame.mixer.music.set_volume(self.volume)", "title": "" }, { "docid": "0917e7983072f31f0f2e06f757f9f44b", "score": "0.72532773", "text": "async def set_volume(self, zone: int, volume: int) -> None:\n await self._protocol.send(_format_set_volume(zone, volume))", "title": "" }, { "docid": "ab687f26011a091f0256e8fbe37e925c", "score": "0.72436464", "text": "def set_volume(self, new_volume):\n self._post_request('core.mixer.set_volume', volume=new_volume)", "title": "" }, { "docid": "437f3324009aa974d91360deb07db72e", "score": "0.7240415", "text": "def set_volume(self, val):\n \n self.__volume = val\n \n pygame.mixer.music.set_volume(val)", "title": "" }, { "docid": "f7f8d3972edcb24ae20584ec66e20c15", "score": "0.7206662", "text": "def set_volume(self, zone: int, volume: int) -> None:\n self._process_request(_format_set_volume(zone, volume))", "title": "" }, { "docid": "23bd999770657977f801d19005bd7193", "score": "0.71519357", "text": "def set_volume(v):\n if type(v) != float:\n if type(v) == int:\n v = float(v)\n else:\n raise TypeError('Has to be a number')\n\n if not 0 <= v <= 1:\n raise ValueError('Has to be between 0 or 1')\n \n musicapp.setVolume_(v)", "title": "" }, { "docid": "2f72bd6fb5082e5440830c4cc1430eed", "score": "0.714975", "text": "def volume(self, value):\n value = min(1, max(0, value))\n logger.info('set volume %s', value)\n mixer.music.set_volume(value)", "title": "" }, { "docid": "af71d2c1d6d88671527c74be25b90ab7", "score": "0.7133459", "text": "def set_volume(self, volume):\n if self.musicpresent==True:\n pygame.mixer.music.set_volume(volume)\n else:\n self.soundeffect.set_volume(volume)", "title": "" }, { "docid": "968e85692da9767843e4df9ed1007676", "score": "0.7110554", "text": "def set_volume(self, volume):\n svc_type = 'urn:schemas-upnp-org:service:RenderingControl:1'\n self._send_cmd('SetVolume', service_type=svc_type, Channel='Master',\n DesiredVolume=volume)\n return None", "title": "" }, { "docid": "192c37b439bfb454e6b1acc2aa458b8a", "score": "0.7057902", "text": "def onSetVolume(self, event):\r\n self.currentVolume = self.volumeCOP.GetValue()\r\n self.mediaPlayer.SetVolume(float(self.currentVolume / 100))", "title": "" }, { "docid": "5adf6481a26f38c66b9071d315a18c52", "score": "0.70216686", "text": "def set_volume_level(self, volume):\n if 'volume' in self._status:\n self._client.setvol(int(volume * 100))", "title": "" }, { "docid": "c55a020175a712a9c94196a7106a5c43", "score": "0.70117956", "text": "def set_volume_level(self, volume: int) -> None:\n self._spotify.volume(int(volume * 100))", "title": "" }, { "docid": "6781af140174c4d98bdf2d9fa04506d0", "score": "0.70045114", "text": "def set_master_volume(self, val):\n vol = self.calc_vol_for_mixer(val)\n\n self.main.log.write(log.MESSAGE, \"[ALSA] setting mixer volume to \"\n \"%d (=%d)...\" % (val, vol))\n alsaaudio.Mixer(self.mixer).setvolume(vol)", "title": "" }, { "docid": "303f51146a9c7f9a75da086662673041", "score": "0.69950217", "text": "def set_volume(channel, volume):\n return fmod.FSOUND_SetVolume(channel, volume)", "title": "" }, { "docid": "aa8e2798de841504538c20aba77833c7", "score": "0.69819754", "text": "def set_amp_volume(self, val):\n vol = self.calc_vol_for_mixer(val)\n\n self.main.log.write(log.MESSAGE, \"[ALSA] setting amp volume to \"\n \"%d (=%d)...\" % (val, vol))\n alsaaudio.Mixer(self.amp).setvolume(vol)", "title": "" }, { "docid": "da49e0abd5a7a01fb83d652c832b2adf", "score": "0.69744354", "text": "def set_aspirate_volume(self, probe_num, volume, block = True):\n self.wait_for_buffered()\n device_id = self.syringe[probe_num]['device_id']\n probe_letter = \"L\"\n if self.syringe[probe_num]['side'] is 'right':\n probe_letter = \"R\"\n suffix = ''\n if not volume % 1 > 0:\n suffix = '.0'\n if (volume % 1) == (self.get_syringe_pump_status(probe_num)[1] % 1):\n return\n self.buffered(('A%s' % (probe_letter)) + str(volume) + suffix, \n device_id)\n self.syringe[probe_num]['next_operation'] = -volume\n while self.get_syringe_pump_status(probe_num)[0] != 'H':\n self.sleep(.05, '[aspirate block]')", "title": "" }, { "docid": "76be7b622a86ee14e8b5289fbc91fb5a", "score": "0.6914168", "text": "def cmd_increase_volume(self):\n pass", "title": "" }, { "docid": "fa50de71da3ecf0675de15e0a2314d35", "score": "0.68965644", "text": "def setVolume(volume=50):\n if volume > 100:\n volume = 100\n elif volume < 0:\n volume = 0\n tkSnack.audio.play_gain(volume)", "title": "" }, { "docid": "9c5065ef38807e4bafc9ca749bf97a43", "score": "0.68597615", "text": "def set_volume_value(self, index, value):\n self._volumes[index] = value", "title": "" }, { "docid": "b677af321513591268b75dbfc92685ea", "score": "0.6856387", "text": "def SetVol(self, e):\n \n vol = self.sldr_vol.GetValue()\n print 'cur vol val:', vol\n vol /= 100.0\n print 'volume to be set:', vol\n self.mediaPlayer.SetVolume(vol)", "title": "" }, { "docid": "2332a50c1717014305494b8df427aacf", "score": "0.67961496", "text": "async def volume(self, ctx, volume: int):\r\n\r\n if ctx.voice_client is None:\r\n return await ctx.send(\"Not connected to a voice channel.\")\r\n\r\n ctx.voice_client.source.volume = volume / 100\r\n await ctx.send(\"Changed volume to {}%\".format(volume))", "title": "" }, { "docid": "a0819431df8a60fef963ea11e9ba4792", "score": "0.6790397", "text": "async def volume(self, ctx, volume: int):\n\n if ctx.voice_client is None:\n return await ctx.send(\"Not connected to a voice channel.\")\n\n ctx.voice_client.source.volume = volume / 100\n await ctx.send(\"Changed volume to {}%\".format(volume))", "title": "" }, { "docid": "f7ca074a8cabee2c635233fd8e6b1bd6", "score": "0.6781557", "text": "def volume(object,val):\n if object[1]==1:\n mixer.music.set_volume(0.0)\n elif object[1]==0:\n mixer.music.set_volume(1.0)", "title": "" }, { "docid": "2a639ecc2bbb0d011498f30beaa28106", "score": "0.6757883", "text": "def __init__(self):\n self.volume = 70 # in percent\n self.set_volume()", "title": "" }, { "docid": "43d80db5c4c3e13cdec95e29bf1fd7ef", "score": "0.67351687", "text": "def set_volume(self):\n self.town_theme.set_volume(0.5)\n self.fireball.set_volume(1)\n self.monster_kill.set_volume(0.75)", "title": "" }, { "docid": "237ff89a06fbcd5aa86639f053f78645", "score": "0.67323464", "text": "def root_volume(self, root_volume):\n self._root_volume = root_volume", "title": "" }, { "docid": "e1b4f58ee196e30815c6bcd8cd6b5de2", "score": "0.67320853", "text": "def set_volume(self, volume_perc):\n res = requests.get(Song.SET_VOLUME.format(self.room.ip_address, volume_perc))\n return res.json()", "title": "" }, { "docid": "e1b4f58ee196e30815c6bcd8cd6b5de2", "score": "0.67320853", "text": "def set_volume(self, volume_perc):\n res = requests.get(Song.SET_VOLUME.format(self.room.ip_address, volume_perc))\n return res.json()", "title": "" }, { "docid": "278a8b26706357ea83fac3e2e5e3a672", "score": "0.66813874", "text": "def OnSetVolume(self):\r\n volume = self.volume_var.get()\r\n # vlc.MediaPlayer.audio_set_volume returns 0 if success, -1 otherwise\r\n if volume > 100:\r\n volume = 100\r\n if self.player.audio_set_volume(volume) == -1:\r\n self.errorDialog(\"Failed to set volume\")", "title": "" }, { "docid": "1b9f9e0261882c66993e5e5bc97a1fed", "score": "0.66727656", "text": "def updateVolume(val):\n\n global VOLUME\n\n VOLUME = SLIDERS[0][\"volume\"].val", "title": "" }, { "docid": "3cfa2f27415b1561afbd1c314f25e7af", "score": "0.6633869", "text": "def change_volume_control(self, available):\n self.device.volume_control(available)", "title": "" }, { "docid": "3bec790d112ba64dd8e045a9b131ce8f", "score": "0.66276306", "text": "def set_music_volume(self, value):\n pygame.mixer.music.set_volume(value / 100)", "title": "" }, { "docid": "cce0893cf7c11424100e1a66b406ae13", "score": "0.6608898", "text": "async def volume(self, ctx, value : int):\n global player\n player.volume = value / 100\n await self.bot.say('Set the volume to {:.0%}'.format(player.volume))", "title": "" }, { "docid": "33fe494f6e947056f8803193af7b8fde", "score": "0.6606079", "text": "def bcp_set_volume(self, track, value):\n if track == 'master':\n self.sound.set_volume(value)\n\n #if track in self.sound.tracks:\n #self.sound.tracks[track]\n\n # todo add per-track volume support to sound system", "title": "" }, { "docid": "f1163fea68ab6b1720e21f210e02faf9", "score": "0.66035086", "text": "async def _set_music_master_volume(self, request, volume):\n await self.music_manager.set_master_volume(self.discord_context, request, volume)", "title": "" }, { "docid": "9515ff4d06c5e76fdc18aaba69dd3309", "score": "0.6602625", "text": "def on_volume(self, event):\n volume = event.GetPosition()\n self.video.SetVolume(volume / 100.0)\n ev.ConfigUpdate(params={u'volume': volume})", "title": "" }, { "docid": "337668e4cf78c272dcee082cb0d03a51", "score": "0.6550764", "text": "async def async_set_volume_level(self, volume):\n self._player.set_volume(volume * 100)", "title": "" }, { "docid": "f61cef59c11318f098cac9869676c193", "score": "0.65102637", "text": "def create_volume(self, volume):\n pass", "title": "" }, { "docid": "74891d7725ca1d812d84cc3f0a003851", "score": "0.65026903", "text": "def volumes(self, volumes):\n\n self._volumes = volumes", "title": "" }, { "docid": "d8132297e80518b6be8322392f5f8fdd", "score": "0.64582324", "text": "async def volume(self, ctx, volume: int=None):\n\n\t\tplayer = music.get_player(guild_id=ctx.guild.id)\n\t\tif player is None: return await ctx.send(\"I am not in a Voice Channel.\")\n\t\tplayer.change_volume(float(volume/100))\n\t\tawait ctx.send(\"Changed volume to {}%\".format(volume))", "title": "" }, { "docid": "2c87324a037bc00e0629b78185d89c94", "score": "0.6456995", "text": "def volume(self, value: float) -> None:\n self.call_service(\n \"media_player/volume_set\",\n entity_id=self.args[\"speaker\"],\n volume_level=value,\n )", "title": "" }, { "docid": "0df1bd34b39c630920b574d4c7c8b917", "score": "0.64529353", "text": "def set_secondary_volume(channel, volume, delay):\n\n call(\"set_secondary_volume\", channel, volume, delay)", "title": "" }, { "docid": "85479b69ba79bce6a77af38762a8b6df", "score": "0.6432605", "text": "async def volume(self, ctx: Context, *, volume=None):\n \n try:\n volume = int(volume)\n if not ctx.voice_state.is_playing:\n await ctx.send(embed=NOTHING_PLAYING_ERROR)\n\n elif volume < 0 or volume > 100:\n await ctx.send(embed=INVALID_VOLUME_ERROR(volume))\n\n else:\n ctx.voice_state.volume = volume / 100\n await ctx.send('Volume of the player set to {}%'.format(volume))\n except ValueError:\n await ctx.send(embed=get_error_message(\n \"That's an invalid volume :eyes:\"\n ))", "title": "" }, { "docid": "6b4232ed23fcc7a55eb05e1914c609bf", "score": "0.63723564", "text": "def volume_unit(self, volume_unit):\n\n self._volume_unit = volume_unit", "title": "" }, { "docid": "f1eea926e74578a4e22b8cc362dee63c", "score": "0.63495463", "text": "async def set_default_volume(self, volume):\n return await self._post('audio/volume', dict(Volume=min(max(volume, 0), 100)))", "title": "" }, { "docid": "2eabf48ebacb6b00224923db482e7c6f", "score": "0.6322727", "text": "def set_all_volume(self) -> None:\n for speaker in self.speakers:\n speaker.volume = speaker.default_volume", "title": "" }, { "docid": "7c2d3fbd5717935cc27eb46cc65d99b2", "score": "0.62947226", "text": "def volume(self, volume=False):\n if volume:\n action = '\"urn:schemas-upnp-org:service:RenderingControl:1#SetVolume\"'\n\n body = ('<u:SetVolume xmlns:u=\"urn:schemas-upnp-org:service:'\n 'RenderingControl:1\"><InstanceID>0</InstanceID><Channel>Master'\n '</Channel><DesiredVolume>' + repr(volume) +\n '</DesiredVolume></u:SetVolume>')\n\n response = self.__send_command(SoCo.RENDERING_ENDPOINT, action, body)\n\n if (response == ('<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/'\n 'envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org'\n '/soap/encoding/\"><s:Body><u:SetVolumeResponse '\n 'xmlns:u=\"urn:schemas-upnp-org:service:RenderingControl'\n ':1\"></u:SetVolumeResponse></s:Body></s:Envelope>')):\n return True\n else:\n return self.__parse_error(response)\n else:\n action = '\"urn:schemas-upnp-org:service:RenderingControl:1#GetVolume\"'\n\n body = ('<u:GetVolume xmlns:u=\"urn:schemas-upnp-org:service:'\n 'RenderingControl:1\"><InstanceID>0</InstanceID><Channel>Master'\n '</Channel></u:GetVolume>')\n\n response = self.__send_command(SoCo.RENDERING_ENDPOINT, action, body)\n\n dom = XML.fromstring(response)\n\n volume = dom.findtext('.//CurrentVolume')\n\n return int(volume)", "title": "" }, { "docid": "aea56d71cc6a5e159b46c3d8b2914d96", "score": "0.62866116", "text": "def restore_volume(self):", "title": "" }, { "docid": "b7f9c2b27296e6b4ca0dab36b8a0d22d", "score": "0.625767", "text": "def set_dispense_volume(self, probe_num, volume):\n self.wait_for_buffered()\n device_id = self.syringe[probe_num]['device_id']\n probe_letter = \"L\"\n if self.syringe[probe_num]['side'] is 'right':\n probe_letter = \"R\"\n suffix = ''\n if not volume % 1 > 0:\n suffix = '.0' \n if (volume % 1) == (self.get_syringe_pump_status(probe_num)[1] % 1):\n return\n self.buffered(('D%s' % (probe_letter)) + str(volume) + suffix, \n device_id)\n self.syringe[probe_num]['next_operation'] = volume\n while self.get_syringe_pump_status(probe_num)[0] != 'H':\n self.sleep(.05, '[aspirate block]')", "title": "" }, { "docid": "81aafd4fe39b1ad7d617b0ff5f53f4d5", "score": "0.6251954", "text": "async def vol(self, ctx, volume: int):\n player: DefaultPlayer = self.bot.lavalink.player_manager.get(ctx.guild.id)\n if not player.is_connected:\n # We can't disconnect, if we're not connected.\n return await ctx.send('Not connected.')\n\n await player.set_volume(volume)\n dt = datetime.now()\n embed = discord.Embed(color=discord.Color.blurple(), datetime=dt)\n embed.title = 'Player Volume Changed'\n embed.description = \"Changed volume to {}%\".format(volume)\n return await ctx.send(embed=embed)", "title": "" }, { "docid": "b90d45ce72bc960a33b933918e8f47c0", "score": "0.62444043", "text": "def setmusic():\n G_CHNL.set_volume(PLYR_VOL_DICT[PLYR][0] * theVolume())\n D_CHNL.set_volume(PLYR_VOL_DICT[PLYR][1] * theVolume())\n G_CHNL.play(G_DICT[glob.LEVEL], -1)\n D_CHNL.play(D_DICT[glob.LEVEL], -1)", "title": "" }, { "docid": "b46ec3fb7c41f9cdee20d1eff76676a0", "score": "0.6229608", "text": "def set_master_volume(volume):\n return fmod.FSOUND_SetSFXMasterVolume(volume)", "title": "" }, { "docid": "c5476788472d8e624c50784345e26a5b", "score": "0.6171493", "text": "async def async_set_volume_level(self, volume):\n await self._device.async_set_volume_level(volume)", "title": "" }, { "docid": "ac028782f0894bfec440a1f1b169791d", "score": "0.6147204", "text": "def setRoomVolume(name_udn, volume):\n returndata = {}\n returndata[\"success\"] = False\n room = __getSingleRoom(name_udn)\n if room != None:\n room.volume = volume\n returndata[\"success\"] = True\n return json.dumps(returndata)", "title": "" }, { "docid": "4dd0bf8d5a3df2e408d10fcf7f5bad9b", "score": "0.61338377", "text": "async def volume(self, ctx, vol: float):\n if vol > 50.0:\n embed = discord.Embed(title=\"It is unsafe to go that high!\", colour=RED)\n else:\n voice = get(self.bot.voice_clients, guild=ctx.guild)\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = vol / 10\n embed = discord.Embed(title=\"Volume changed\", description=f\"New volume: {vol}%\")\n embed.set_author(name=ctx.author.nick if ctx.author.nick else ctx.author.name, icon_url=ctx.author.avatar_url)\n await ctx.message.delete()\n await ctx.send(embed=embed)", "title": "" }, { "docid": "7094b3da35fd791acb4f448b5241796f", "score": "0.61272246", "text": "async def async_set_volume_level(self, volume):\n await self._volume_entity.async_set_volume_level(self.hass, volume)", "title": "" }, { "docid": "af89e802748b0e022f424dbac07e7b01", "score": "0.6107115", "text": "def volume(self):\n raise NotImplementedError", "title": "" }, { "docid": "af89e802748b0e022f424dbac07e7b01", "score": "0.6107115", "text": "def volume(self):\n raise NotImplementedError", "title": "" }, { "docid": "9c2ea392b82d9e94a41c5e3448546ca6", "score": "0.6106013", "text": "def mute_volume(self, mute):\n if 'volume' in self._status:\n if mute:\n self._muted_volume = self.volume_level\n self.set_volume_level(0)\n else:\n self.set_volume_level(self._muted_volume)\n self._muted = mute", "title": "" }, { "docid": "4cddbc8d1c32fb8c8903ceb48fa89ae5", "score": "0.61047566", "text": "def volume_name(self, volume_name):\n\n self._volume_name = volume_name", "title": "" }, { "docid": "cbdabc8893d1c7660446ec58e3385c69", "score": "0.60872823", "text": "def SetSpeechVolume(self, vol):\n try:\n fcn = '[SetSpeechVolume(\"{}\")]'.format(vol)\n self._get_request(fcn)\n except Exception as e:\n print(e)\n print(\"Exception in SetSpeechVolume function\")\n return", "title": "" }, { "docid": "04c4da02639838f123245ec4572cdd28", "score": "0.6070909", "text": "def attach_volume(self, context, volume, instance_uuid, host_name,\n mountpoint):\n pass", "title": "" }, { "docid": "79cfdbd86ed4d126ddf59d9d530d42f6", "score": "0.6068145", "text": "def updateVolume(self, newVolume):\n boundedVolume = self.restrictToBounds(newVolume)\n if(boundedVolume != self.volume):\n output = self.amixer(\"set 'PCM' unmute {}%\".format(boundedVolume))\n self.synchronize(output)", "title": "" }, { "docid": "c2a1f50781e785e6e8db4d59bab16b42", "score": "0.60619235", "text": "def volumes(self, volumes):\n\n self._volumes = volumes", "title": "" }, { "docid": "53076d4d33b3a1c6ffa6fd68ccfff8b3", "score": "0.6044846", "text": "def set_velocity(self, velocity):\n self.volume = min(127, velocity)", "title": "" }, { "docid": "78bc764e3cea0e437905106c27cc6dcd", "score": "0.60245776", "text": "async def volume(self, ctx, value: int):\r\n if value > 100:\r\n value = 100\r\n elif value < 10:\r\n value = 10\r\n\r\n state = self.get_voice_state(ctx.message.server)\r\n if state.is_playing():\r\n player = state.player\r\n player.volume = value / 100\r\n await self.bot.say('Set the volume to {:.0%}'.format(player.volume))", "title": "" }, { "docid": "4f0f64197800b34dd7cfd1fc2aef59e8", "score": "0.6007267", "text": "def volume_controller(level):\n sys.stdout.write(\"\\r\")\n if operating_system == 'Darwin':\n level = round((8 * level) / 100)\n os.system(f'osascript -e \"set Volume {level}\"')\n elif operating_system == 'Windows':\n if 'SetVol.exe' not in current_dir:\n # Courtesy: https://rlatour.com/setvol/\n sys.stdout.write(\"\\rPLEASE WAIT::Downloading volume controller for Windows\")\n os.system(\"\"\"curl https://thevickypedia.com/Jarvis/SetVol.exe --output SetVol.exe --silent\"\"\")\n sys.stdout.write(\"\\r\")\n os.system(f'SetVol.exe {level}')", "title": "" }, { "docid": "441c831fb84ad0c2a71f4b5ccdb48f01", "score": "0.6002478", "text": "def modify_volume_attribute(self, VolumeId: str, AutoEnableIO: Dict = None, DryRun: bool = None):\n pass", "title": "" }, { "docid": "991bf53113384ae994a1e0dcc17d3fd6", "score": "0.6002317", "text": "def _set_volume(self, station: RadioStation):\n frequency_distance = abs(self.current_freq - station.frequency)\n amp = self._get_media_amplification(frequency_distance)\n\n if amp >= 20:\n # Fully tuned into station\n self.noise_player.stop()\n else:\n if not self.noise_player.is_playing():\n self.noise_player.play()\n if amp <= -20:\n # Fully outside station bounds\n self.music_player.stop()\n else:\n if not self.music_player.is_playing():\n self.music_player.play()\n time_offset = self._get_station_offset(station)\n self.music_player.get_media_player().set_time(int(time_offset))\n\n # Set the volume using the equalizer\n self.equalizer.set_preamp(amp)\n self.music_player.get_media_player().set_equalizer(self.equalizer)\n self.equalizer.set_preamp(-amp)\n self.noise_player.get_media_player().set_equalizer(self.equalizer)", "title": "" }, { "docid": "3219c0567c403878cb33bd7dc154c723", "score": "0.5995478", "text": "def send_channel_volume(self, value=127, ch=None, delay=0):\n self.send_control_change(CHANNEL_VOLUME, value, ch=ch, delay=delay)", "title": "" }, { "docid": "2f5915bd3ccf1e06372a8f64fa5ec5f4", "score": "0.59922343", "text": "def volume_calculation(self, volume_calculation):\n\n self._volume_calculation = volume_calculation", "title": "" }, { "docid": "19c38d2aafdcff923d965d06dc2d3ec5", "score": "0.59802496", "text": "def setZoneVolume(name_udn, volume):\n returndata = {}\n returndata[\"success\"] = False\n zone = __getSingleZone(name_udn)\n if zone != None:\n zone.volume = volume\n returndata[\"success\"] = True\n return json.dumps(returndata)", "title": "" }, { "docid": "91c48851e27d497e0c388a9d54822a71", "score": "0.59186894", "text": "def ComAudioOptionsAddVolume(builder, volume):\n return AddVolume(builder, volume)", "title": "" }, { "docid": "cf38def09b3503faf8f7c6401ae32777", "score": "0.5916988", "text": "def rename_volume_setup(self):\n\t\t\n\t\t#\n\t\t#---------------------- Select Volume To Rename -----------------------\n\t\t#\n\t\t\n\t\tpass", "title": "" }, { "docid": "cbe3611beeedae2ae25862345e89e08c", "score": "0.5911226", "text": "def volume_up(self):\n if 'volume' in self._status:\n current_volume = int(self._status['volume'])\n\n if current_volume <= 100:\n self._client.setvol(current_volume + 5)", "title": "" }, { "docid": "ce39694f7124d8b5a86cfc41d61aa02a", "score": "0.58853155", "text": "def initMusic():\n FX_CHNL.set_volume(theVolume())\n FX_2_CHNL.set_volume(theVolume())\n setmusic()", "title": "" }, { "docid": "4735132ab78985bbad5d182c0b98fd84", "score": "0.5880529", "text": "def lower_volume(self):", "title": "" }, { "docid": "fac1d3667f536934365a545bbc9b1462", "score": "0.58379817", "text": "def __init__(self, volume, width, height):\n\n self.volume = volume\n self.width = width\n self.height = height", "title": "" }, { "docid": "fac1d3667f536934365a545bbc9b1462", "score": "0.58379817", "text": "def __init__(self, volume, width, height):\n\n self.volume = volume\n self.width = width\n self.height = height", "title": "" }, { "docid": "6c9101cabb595c87d56bbbe7128d635b", "score": "0.58368075", "text": "def get_volume():\n command = \"osascript -e 'set ovol to output volume of\"\\\n \"(get volume settings)'\"\n return run_command(command)", "title": "" }, { "docid": "fa909285a16236759025b62760ae1681", "score": "0.5830253", "text": "def set_write_volume(self, level=0, mute=False):\n if mute:\n return bgapi_wrapper('uuid_audio {uuid} start write mute'.format(\n uuid=self.uuid))\n elif level == 0:\n return bgapi_wrapper('uuid_audio {uuid} stop'.format(\n uuid=self.uuid))\n else:\n return bgapi_wrapper('uuid_audio {uuid} start write level '\n '{level}'.format(uuid=self.uuid, level=level))", "title": "" }, { "docid": "540d5b062f94fb3aa1213c1762175036", "score": "0.5813251", "text": "def correct_volume(self, vol, writing):\n return vol", "title": "" }, { "docid": "25b21c7a95431f25f4dc9676bcbeb4fe", "score": "0.5796543", "text": "def updateVolumeBar(self):\n self.mixer = alsaaudio.Mixer()\n volumes = self.mixer.getvolume()\n mutes = self.mixer.getmute()\n\n # update on changes and prolong living time of self.\n if self.masterVol != volumes[0]:\n self.masterVol = volumes[0]\n self.prolongLiving()\n\n if self.masterMute != mutes[0]:\n self.masterMute = mutes[0]\n self.prolongLiving()\n\n if(self.masterMute == 1):\n self.volumeBar.set_fraction(0)\n self.label.set_markup(\"<span foreground='white' size='small'>0</span>\")\n else:\n self.volumeBar.set_fraction(self.masterVol/100)\n if(self.masterVol == 100):\n self.label.set_markup(\"<span foreground='white' size='xx-small'>\" + str(self.masterVol) + \"</span>\")\n else:\n self.label.set_markup(\"<span foreground='white' size='small'>\" + str(self.masterVol) + \"</span>\")\n\n\n\n return True", "title": "" }, { "docid": "5482abdccaf69a01a0496a5247bf678b", "score": "0.5790783", "text": "def attach_volume(self, context, connection_info, instance, mountpoint,\n disk_bus=None, device_type=None, encryption=None):\n raise NotImplementedError()", "title": "" }, { "docid": "9b4e9da98553ce20abd7346232161a30", "score": "0.5775059", "text": "def run(self,inputVolume,outputVolume):\n return True", "title": "" }, { "docid": "71819554ac626229b9628f327dd49ac2", "score": "0.5772074", "text": "def __call__(self, name, vol=None, hdr=None):\n _Volume.__call__(self, name, vol=vol, hdr=hdr)\n self._bgd.set_volume(self._vol, self._hdr)\n self._grid_transform()\n self._update()", "title": "" }, { "docid": "67f82b6a2a87e267c703b5ce72876925", "score": "0.5764509", "text": "def increase(self):\n if self.volume == 0:\n self.updateVolume(VOLUME_MIN)\n else:\n self.updateVolume(self.volume + VOLUME_INCREMENT)", "title": "" } ]
a3dff4a0b5a3b63b7a71b29519ee5309
Moves duplicate files to a separate directory, while preserving the subdirectory structure in there.
[ { "docid": "320ae484e2d52eae10f58cdc2fc7bd2b", "score": "0.7337616", "text": "def __call__(self, directory_to_move_duplicates_to=None):\n if directory_to_move_duplicates_to is None:\n directory_to_move_duplicates_to = os.path.join(self.directory, \"duplicates\")\n directory_to_move_duplicates_to = os.path.join(os.path.abspath(directory_to_move_duplicates_to), \"\")\n # ^ ensures correct formatting\n\n classes = self.get_equivalence_classes()\n for equiv_class in classes:\n best_file = self.get_best_file_from_class(equiv_class)\n for file in equiv_class:\n if file == best_file:\n continue\n dest = file.replace(self.directory, directory_to_move_duplicates_to)\n os.makedirs(os.path.dirname(dest), exist_ok=True)\n shutil.move(file, dest)", "title": "" } ]
[ { "docid": "b996a5c6073bbe3944cfbade4eb38d7a", "score": "0.65459585", "text": "def scan_and_remove_duplicate(from_dir, debug=False):\n all_files = defaultdict(list)\n for root, dirs, files in os.walk(from_dir):\n for file in files:\n full_path = os.path.join(root, file)\n md5 = hashlib.md5(file_as_bytes(open(full_path, 'rb'))).hexdigest()\n all_files[md5].append(full_path)\n for v in all_files.values():\n duplicated_files = v[1:]\n if duplicated_files:\n for f in duplicated_files:\n print('Remove diplicated file: {} (original: {})'.format(f, v[0]))\n if not debug:\n os.remove(f)", "title": "" }, { "docid": "7ac9be7e6ee8bd07dc43eb76e37893c9", "score": "0.6354505", "text": "def restore_dublicates(path: str):\n old_path = os.path.join(path, 'dublicates')\n files = os.listdir(old_path)\n for p in files:\n if p.endswith('jpg'):\n try:\n shutil.move(os.path.join(old_path, p), os.path.join(path, p.split('jpg_')[1]))\n except:\n pass\n if len(os.listdir(old_path)) == 0:\n shutil.rmtree(old_path)", "title": "" }, { "docid": "aea4690d0b925107ee06c5696fa0caee", "score": "0.6176697", "text": "def find_duplicates(path):\n # find groups of files consisting of more than 1 item (suspected duplicates)\n bgroups = []\n dir_contents = {}\n for dirname, dirnames, filenames in os.walk(path):\n groups = group_files(dirname, filenames)\n for group in groups.items():\n if len(group[1]) > 1:\n bgroups.append((dirname, group))\n dir_contents[dirname] = (dirnames, sorted(groups.keys()))\n\n # check suspected duplicate files\n duplicates = []\n for dirname, group in bgroups:\n files = sorted(group[1], key=lambda p: p[1], reverse=True)\n while len(files) > 1:\n champ = files[0]\n suspects = files[1:]\n f1 = os.path.join(dirname, champ[0])\n unconfirmed = []\n for suspect in suspects:\n f2 = os.path.join(dirname, suspect[0])\n if filecmp.cmp(f1, f2, shallow=False):\n duplicates.append(f2)\n print '\"{}\" is a duplicate of \"{}\" in \"{}\"'.format(suspect[0], champ[0], dirname)\n else:\n unconfirmed.append(suspect)\n\n files = unconfirmed\n\n for item in dir_contents.items():\n parent = item[0]\n dirs = item[1][0]\n files = item[1][1]\n for d1 in range(len(dirs)-1):\n for d2 in range(d1+1, len(dirs)):\n dir1 = os.path.join(parent, dirs[d1])\n dir2 = os.path.join(parent, dirs[d2])\n if compare_dirs(dir_contents, dir1, dir2):\n if os.stat(dir1).st_mtime > os.stat(dir2).st_mtime:\n master = dirs[d1]\n dup = dirs[d2]\n dup_fullpath = dir2\n else:\n master = dirs[d2]\n dup = dirs[d1]\n dup_fullpath = dir1\n\n if dup_fullpath not in duplicates:\n duplicates.append(dup_fullpath)\n print 'Directory \"{}\" is a duplicate of \"{}\" in \"{}\"'.format(dup, master, parent)\n\n return duplicates", "title": "" }, { "docid": "91d411182d5fda79c8085e5692040b8b", "score": "0.6167216", "text": "def extract_from_directories():\n directories = glob.glob(\"*/\")\n for directory in directories:\n files_to_extract = get_music_in_directory(directory)\n for file in files_to_extract:\n shutil.move(file, os.getcwd())\n os.rmdir(directory)", "title": "" }, { "docid": "6a7b23b33982645b72acd806c2e67ef2", "score": "0.59987", "text": "def move_tree(self, src: pm.path, dst: pm.path):\n self._log_command(f\"\"\"Recursively move files from \\\"{src}\\\" to \\\"{dst}\\\"\"\"\")\n self.copy_tree(src, dst)\n self.remove_tree(src)", "title": "" }, { "docid": "15c68ef7178a48b5350e02f69a5a91ab", "score": "0.59847116", "text": "def move_files_to_subdirectory(file_list, working_directory, subdirectory):\n # create subdirectory, if it does not exist\n if not os.path.exists(working_directory + os.sep + subdirectory):\n os.makedirs(working_directory + os.sep + subdirectory)\n\n # move files\n for file in file_list:\n os.rename(working_directory + os.sep + file, working_directory + os.sep + subdirectory + os.sep + file)", "title": "" }, { "docid": "021533fb2f7bbb6c9384b53e9771437b", "score": "0.5971916", "text": "def move_files(abs_dirname):\n\n files = [os.path.join(abs_dirname, f) for f in os.listdir(abs_dirname)]\n\n i = 0\n curr_subdir = None\n\n for f in files:\n # create new subdir if necessary\n if i % N == 0:\n subdir_name = os.path.join(abs_dirname, '{0:03d}'.format(i / N + 1))\n os.mkdir(subdir_name)\n curr_subdir = subdir_name\n\n # move file to current dir\n f_base = os.path.basename(f)\n shutil.move(f, os.path.join(subdir_name, f_base))\n i += 1", "title": "" }, { "docid": "6f7ee5c037340cf43113a88964e5db64", "score": "0.59539324", "text": "def replicate_all_files(src_dir, copy_file, use_gitignore=True, debugging=False):\n spec = get_pathspec(src_dir, use_gitignore)\n for filename in pathspec.util.iter_tree(src_dir):\n if not spec.match_file(filename):\n copy_file(os.path.join(src_dir, filename))", "title": "" }, { "docid": "95dae4519c2739bc862bbfb601d30fcb", "score": "0.5923724", "text": "def moveResultsTo(self, dir, trimCommonPaths = False):\n allChanges = set()\n self._scanChanges(self._dir, allChanges)\n\n prefix = self._dir\n if trimCommonPaths:\n prefix = None\n for c in allChanges:\n if prefix is None:\n prefix = c\n else:\n while not c.startswith(prefix):\n prefix = os.path.dirname(prefix)\n\n if prefix is None:\n prefix = self._dir\n elif not os.path.isdir(prefix):\n # Only one file remains, the common path is its folder\n prefix = os.path.dirname(prefix)\n\n allErrors = []\n for c in allChanges:\n # Add 1 to len(prefix) so that we don't include the preceding slash,\n # which would make os.path.join treat it as an absolute\n target = os.path.join(dir, os.path.relpath(c, prefix))\n safeMake(os.path.dirname(target))\n nRetries = 4\n for _retry in range(nRetries):\n try:\n os.rename(c, target)\n except OSError as e:\n if e.errno == 16 and _retry != nRetries - 1:\n # Device busy, will retry\n pass\n elif e.errno == 2:\n # 2 - No such file; was temporary\n break\n else:\n allErrors.append(\"{0}: {1}\".format(c, ''.join(\n traceback.format_exception_only(\n *sys.exc_info()[:2]))))\n break\n else:\n break\n\n # Will retry\n time.sleep(1.)\n\n if allErrors:\n raise Exception(\"Errors during file move:\\n\\n{0}\".format(\n '\\n\\n'.join(allErrors)))", "title": "" }, { "docid": "65f87e6299d2fc20993347197d05c0ac", "score": "0.58844805", "text": "def move(src_path, dst_path, src_root=None):\n root_ext = os.path.splitext(dst_path)\n i = 0\n while os.path.isfile(dst_path):\n # Recursively avoid the collision\n i += 1\n dst_path = root_ext[0] + \" ({})\".format(i) + root_ext[1]\n # Move file, make directories if needed\n os.makedirs(os.path.dirname(dst_path), exist_ok=True)\n shutil.move(src_path, dst_path)\n # Delete directory if necessary (recursively)\n directory = os.path.dirname(src_path)\n while (src_root is not None and src_path.startswith(src_root) and directory != src_root):\n delete_directory_if_empty_or_hidden(directory)\n directory = os.path.dirname(directory)", "title": "" }, { "docid": "52771a455b04ecfb99cfedd413a7f5af", "score": "0.5881753", "text": "def rsync_replacement(src: str, dst: str) -> None:\n if not os.path.exists(dst):\n os.makedirs(dst)\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if os.path.isdir(s):\n rsync_replacement(s, d)\n else:\n shutil.copy2(s, d)", "title": "" }, { "docid": "e85f61418c77edaddb953ae5c446fa83", "score": "0.5869827", "text": "def put_originals_in_subdirectory(file_paths):\n directory, file_name, extension = split_file_path(file_paths[0])\n\n destination_directory = os.path.join(directory, 'originals')\n if not os.path.exists(destination_directory):\n os.mkdir(destination_directory)\n\n for file_path in file_paths:\n os.rename(\n src=file_path,\n dst=os.path.join(destination_directory, os.path.basename(file_path))\n )", "title": "" }, { "docid": "be6ad133f0e79fff064882ad7af311f8", "score": "0.58319145", "text": "def move_files(dir_name):\n file_ls = glob.glob('*dat') \n plot_ls = glob.glob('*png')\n file_ls += plot_ls\n os.makedirs(dir_name)\n for file in file_ls:\n shutil.move(str(file), dir_name)", "title": "" }, { "docid": "6a8678a748f1e58b1d315cddd85ca690", "score": "0.5827099", "text": "def _directory_processor(srcdir, new_suffix=\"part_\"):\n weeks = []\n srcdir = srcdir.rstrip(\"/\")\n for f in os.listdir(srcdir):\n # for example, stub_00_01.md\n w = f.split(\"_\")[1]\n # gives w as 00\n if w not in weeks:\n weeks.append(w)\n for w in weeks:\n newdir = f\"{srcdir}/{new_suffix}{w}\"\n # for example, testdir/week_00\n if os.path.isdir(newdir):\n print(f\"{newdir} already exists...\")\n else:\n os.makedirs(newdir)\n for f in os.listdir(srcdir):\n if f.endswith(\".md\"):\n # For example stub_00_01.md\n w = f.split(\"_\")[1]\n # for example w as 00\n os.rename(f\"{srcdir}/{f}\", f\"{srcdir}/{new_suffix}{w}/{f}\")\n # so testdir/stub_00_01.md becomes testdir/week_00/stub_00_01.md", "title": "" }, { "docid": "9ee90884dc4e19739f4f328838543136", "score": "0.57634467", "text": "def test_dir_to_child(self):\n root = tempfile.mkdtemp(dir=tmpdir)\n src = os.path.join(root, 'subdir', 'folder')\n src2 = os.path.join(root, 'subdir', 'folder', 'file.txt')\n dst = os.path.join(root, 'subdir', 'folder', 'subfolder')\n util.fs.mkdir(src)\n util.fs.save(src2, DUMMY_BYTES)\n with self.assertRaises(util.fs.FSMoveInsideError):\n util.fs.move(src, dst)\n\n # verify no unexpected content change\n self.assertEqual(\n glob_files(src),\n {os.path.join(src, ''), src2},\n )", "title": "" }, { "docid": "220eeecdd4fb57a2455d1c2be0aa7962", "score": "0.57633305", "text": "def _copy_into_tempdir(self, old):\n extraction_tempdir = tempfile.mkdtemp()\n old.extractall(extraction_tempdir)\n old_filesdir = os.path.join(extraction_tempdir, self.basename)\n for item in os.listdir(old_filesdir):\n shutil.move(os.path.join(old_filesdir, item), self.tempdir)\n shutil.rmtree(extraction_tempdir)", "title": "" }, { "docid": "8f5a5ee3ebb174f04d4012f84daa6433", "score": "0.57532245", "text": "def handleFolder(foo, dirName, names):\n for name in names:\n path = os.path.join(dirName, name)\n # move all files to target folder\n if os.path.isfile(path):\n shutil.move(path, outFolder)", "title": "" }, { "docid": "16adf5eb74570eb7c08b5f0a731d0775", "score": "0.57263696", "text": "def _copy_template_dir(self):\n for (dirpath, dirnames, filenames) in os.walk(self.template_dir):\n for filename in filenames:\n if filename in FILES_TO_IGNORE:\n continue\n path = os.path.join(dirpath, filename)\n self.zip_file.write(path, os.path.relpath(path, self.template_dir))", "title": "" }, { "docid": "098a0433691ec1708e51503fba6114e7", "score": "0.5724657", "text": "def safe_move(file_path, out_dir):\n name = os.path.basename(file_path)\n if not os.path.exists(os.path.join(out_dir, name)):\n shutil.move(file_path, os.path.join(out_dir, name))\n else:\n base, extension = os.path.splitext(name)\n i = 1\n while os.path.exists(os.path.join(out_dir, f'{base}_{i}{extension}')):\n i += 1\n shutil.move(file_path, os.path.join(out_dir, f'{base}_{i}{extension}'))", "title": "" }, { "docid": "87d27d2d4da39de449db1f72c2b16c4a", "score": "0.5722395", "text": "def squash_directory(directory_path):\n file_list = []\n for file in os.listdir(directory_path):\n file_path = os.path.join(directory_path, file)\n if file.startswith(\"squash\"):\n logger.info(\"Directory (%s) already squashed.\", directory_path)\n return\n elif os.path.isfile(file_path):\n file_list.append(file_path)\n\n with open(\n os.path.join(directory_path, \"squash_\" + str(uuid.uuid1())) + \".csv\",\n \"w\",\n ) as new_file:\n for file in file_list:\n logger.info(\"getting content of: %s\", file)\n with open(file, \"r\") as reader:\n new_file.write(reader.read())\n\n for file in file_list:\n logger.info(\"removing file: %s\", file)\n os.remove(file)\n\n logger.info(\"successfully squashed: %s\", directory_path)", "title": "" }, { "docid": "29480d16126ec1092de7aaf1c9b92f8b", "score": "0.5713722", "text": "def newFolderByExtension(folder_path):\n new_folder_path = folder_path + \"_sorted\"\n print(\"new_folder_path:\", new_folder_path)\n\n for root, dirs, files in os.walk(folder_path):\n for file in files:\n print(file)\n print(root)\n print(dirs)\n file_ext = splitext(file)[1].strip(\".\")\n if file_ext == \"\":\n file_ext = \"NoExtension\"\n sub_folder = join(new_folder_path,file_ext)\n if not exists(sub_folder):\n print(\"new sub fodler:\", sub_folder)\n makedirs(sub_folder)\n if not exists(sub_folder):\n print(\"not created:\",sub_folder)\n copyfile(file,join(sub_folder,file))", "title": "" }, { "docid": "917c26372b8c92f66f610cbfd796d788", "score": "0.5692911", "text": "def recursive_overwrite(src, dest, ignore=None):\n if os.path.isdir(src):\n if not os.path.isdir(dest):\n os.makedirs(dest)\n files = os.listdir(src)\n if ignore is not None:\n ignored = ignore(src, files)\n else:\n ignored = set()\n for f in files:\n if f not in ignored:\n recursive_overwrite(os.path.join(src, f),\n os.path.join(dest, f),\n ignore)\n else:\n try:\n shutil.copyfile(src, dest)\n except Exception as e:\n print(e)", "title": "" }, { "docid": "9f1946139ca7c06167d88cdf2f2eb6c6", "score": "0.56908065", "text": "def mergefolders(root_src_dir, root_dst_dir, replace=False):\n for src_dir, dirs, files in os.walk(root_src_dir):\n dst_dir = src_dir.replace(root_src_dir, root_dst_dir, 1)\n if not os.path.exists(dst_dir):\n os.makedirs(dst_dir)\n for file_ in files:\n src_file = os.path.join(src_dir, file_)\n dst_file = os.path.join(dst_dir, file_)\n if os.path.exists(dst_file) and replace:\n os.remove(dst_file)\n shutil.copy(src_file, dst_dir)\n else:\n if not os.path.exists(dst_file):\n shutil.copy(src_file, dst_dir)", "title": "" }, { "docid": "829e156532efbe54409d891b416d23dd", "score": "0.5683604", "text": "def test_dir_to_child_self(self):\n root = tempfile.mkdtemp(dir=tmpdir)\n src = os.path.join(root, 'subdir', 'folder')\n src2 = os.path.join(root, 'subdir', 'folder', 'file.txt')\n dst = os.path.join(root, 'subdir', 'folder')\n util.fs.mkdir(src)\n util.fs.save(src2, DUMMY_BYTES)\n with self.assertRaises(util.fs.FSMoveInsideError):\n util.fs.move(src, dst)\n\n # verify no unexpected content change\n self.assertEqual(\n glob_files(src),\n {os.path.join(src, ''), src2},\n )", "title": "" }, { "docid": "0ecefdbfbcfd057eecfc6a9454e58289", "score": "0.56791705", "text": "def _overwriteExistingFiles(self):\n\n\t\tshutil.rmtree(self.output_folder)\n\t\tfiletools.checkDir(self.output_folder)", "title": "" }, { "docid": "3842a01b9961fcd693b2d5d0c8af25cb", "score": "0.5654301", "text": "def clean(self):\n\n dest = self._make_extraction_dir()\n p_join = os.path.join\n for name in self.names:\n try:\n shutil.move(p_join(self.root, name), p_join(dest, name))\n except IOError as e:\n if self.partial:\n pass\n else:\n raise e", "title": "" }, { "docid": "d87c946e5e8b1fabcdb3c3682bec33e9", "score": "0.56516594", "text": "def move_files(source_directory, destination_directory):\n \n files_in_source_dir = [f for f in listdir(source_directory) if isfile(join(source_directory, f))]\n \n for file in files_in_source_dir:\n shutil.copy(join(source_directory, file), destination_directory)", "title": "" }, { "docid": "4663709dcbfa036b5965b4d8e5e29623", "score": "0.5651226", "text": "def clean_up(name):\n if os.path.isdir(name):\n shutil.rmtree(name)\n\n make_destination_dir(name)", "title": "" }, { "docid": "976e62572ab1520701b2d44a0ef4ba42", "score": "0.56466764", "text": "def shutilDirectory(inPath, outPath, connection_info):\n files = os.listdir(inPath)\n filePaths = [os.path.join(inPath, i) for i in files]\n uniqueName = os.path.split(inPath)[1]\n for i in filePaths:\n tf = Insitu(i, connection_info)\n tf.parse()\n startDate = datetime.datetime.strftime(tf.keyDict[\"tstart\"], \"%Y%m%d\")\n endDate = datetime.datetime.strftime(tf.keyDict[\"tend\"], \"%Y%m%d\")\n tfNewName = uniqueName + \"_\" + startDate + \"_\" + endDate\n tfNewPath = os.path.join(outPath,tfNewName)\n shutil.copy(i, tfNewPath)", "title": "" }, { "docid": "952e320d7a106b1ea931b4770710b809", "score": "0.56431067", "text": "def loop_through_copy_files_to_one_dir(looped_dir, target_dir, include_link=False):\n if not os.path.isdir(looped_dir):\n raise Exception(\"looped_dir: a directory.\")\n if not os.path.isdir(target_dir):\n raise Exception(\"target_dir: a directory.\")\n for thing in os.listdir(looped_dir):\n thing = os.path.join(looped_dir, thing)\n if os.path.isdir(thing):\n loop_through_copy_files_to_one_dir(thing, target_dir)\n elif os.path.isfile(thing):\n shutil.move(thing, os.path.join(target_dir, parent_dir_and_name(thing)[1]))\n elif include_link:\n shutil.move(thing, os.path.join(target_dir, parent_dir_and_name(thing)[1]))\n return", "title": "" }, { "docid": "6e520c8a6da76f6ab4e09f4a1d80da94", "score": "0.5641401", "text": "def findDup(parentFolder):\n dups = {}\n for root, subDirs, fileList in os.walk(parentFolder):\n print(f'Scanning {root}...')\n for filename in fileList:\n # Get the path to the file\n path = os.path.join(root, filename)\n # Get the hash with 'hashfile' function\n fileHash = hashfile(path)\n if fileHash in dups:\n dups[fileHash].append(path)\n else:\n dups[fileHash] = [path]\n return dups", "title": "" }, { "docid": "62b3c1e68389a94cd9c555b78cb71f3c", "score": "0.5640435", "text": "def _create_destination_dir_and_move_image_files(self, destination):\n nii_rel_path = self.assembly_nifti_rel_path if destination == 'assembly_bids' else self.trashbin_nifti_rel_path\n json_rel_path = re.sub(r\"\\.nii(\\.gz)?$\", '.json', nii_rel_path) if self.json_path else None\n bval_rel_path = re.sub(r\"\\.nii(\\.gz)?$\", '.bval', nii_rel_path) if self.bval_path else None\n bvec_rel_path = re.sub(r\"\\.nii(\\.gz)?$\", '.bvec', nii_rel_path) if self.bvec_path else None\n\n absolute_dir_path = os.path.join(self.data_dir, os.path.dirname(nii_rel_path))\n self.create_dir(absolute_dir_path)\n\n file_type_to_move_list = [\n {\n 'original_file_path': self.nifti_path,\n 'new_file_path': os.path.join(self.data_dir, nii_rel_path)\n }\n ]\n if self.json_path:\n file_type_to_move_list.append(\n {\n 'original_file_path': self.json_path,\n 'new_file_path': os.path.join(self.data_dir, json_rel_path)\n }\n )\n if self.bval_path:\n file_type_to_move_list.append(\n {\n 'original_file_path': self.bval_path,\n 'new_file_path': os.path.join(self.data_dir, bval_rel_path)\n }\n )\n if self.bvec_path:\n file_type_to_move_list.append(\n {\n 'original_file_path': self.bvec_path,\n 'new_file_path': os.path.join(self.data_dir, bvec_rel_path)\n }\n )\n\n for file_dict in file_type_to_move_list:\n original_file_path = file_dict['original_file_path']\n new_file_path = file_dict['new_file_path']\n\n message = f\"Moving file {original_file_path} to {new_file_path}\"\n self.log_info(message, is_error='N', is_verbose='Y')\n self.move_file(original_file_path, new_file_path)\n\n if destination == 'assembly_bids':\n self.json_file_dict['file_blake2b_hash'] = self.nifti_blake2\n if self.json_path:\n self.json_file_dict['bids_json_file'] = json_rel_path\n self.json_file_dict['bids_json_file_blake2b_hash'] = self.json_blake2\n if self.bval_path:\n self.json_file_dict['check_bval_filename'] = bval_rel_path\n self.json_file_dict['check_bval_filename_blake2b_hash'] = self.bval_blake2\n if self.bvec_path:\n self.json_file_dict['check_bvec_filename'] = bvec_rel_path\n self.json_file_dict['check_bvec_filename_blake2b_hash'] = self.bvec_blake2", "title": "" }, { "docid": "7df815c4afff2afe7b72c60738bd6e0f", "score": "0.5626348", "text": "def copy_dir(source_dir, dest_dir):\n for root, _, files in walk(source_dir):\n src_root = Path(root)\n dest_root = Path(dest_dir, src_root.relative_to(source_dir))\n Path(dest_root).mkdir(parents=True, exist_ok=True)\n for f in files:\n src_file = Path(src_root, f)\n dest_file = Path(\n dest_root, f.replace(\".testjava\", \".java\").replace(\".testkt\", \".kt\")\n )\n dest_file.write_text(src_file.read_text())", "title": "" }, { "docid": "c409b7973b50750f996d13c72243f17e", "score": "0.5621315", "text": "def _copy_files(context, files):\n for file_task in files:\n if not file_task.dst:\n file_task.dst = file_task.src\n if os.path.isdir(file_task.src):\n context.remove_tree(file_task.dst)\n context.copytree_to(file_task.src, file_task.dst)\n else:\n context.copy_to(file_task.src, file_task.dst)", "title": "" }, { "docid": "d59091e33010f403fbfc1145ee5c36d4", "score": "0.55967206", "text": "def move_files(path):\n moved = 0\n for f in os.listdir(path):\n prefix = path + '/'\n current_dir = prefix + f\n for key in extensions.keys():\n if f.endswith(extensions[key]):\n try:\n os.rename(current_dir, prefix + key + '/' + f)\n except OSError:\n for i in range(10000):\n try:\n os.rename(current_dir, prefix + key + '/' + 'copy_' + str(i) + f)\n except:\n continue\n break\n moved += 1\n return moved", "title": "" }, { "docid": "3a38d1741e87af55c27da21178e65f9c", "score": "0.5596366", "text": "def copy_files(source_dir, dest_dir):\n for files in os.scandir(source_dir):\n filename = source_dir + files.name\n copy2(filename, dest_dir)", "title": "" }, { "docid": "d05f1a869b8c3bdf36344278c2ac5715", "score": "0.55857086", "text": "def _cleanup_filled_directory(\n self, symbol, dest_node, src_entries, copy_source\n ):\n\n cvs_paths = src_entries.keys()\n cvs_paths.sort()\n for cvs_path in cvs_paths:\n if isinstance(cvs_path, CVSDirectory):\n # Path is a CVSDirectory:\n try:\n dest_subnode = dest_node[cvs_path]\n except KeyError:\n # Path doesn't exist yet; it has to be created:\n dest_node = self._fill_directory(\n symbol, None, src_entries[cvs_path], None\n ).parent_mirror_dir\n else:\n # Path already exists, but might have to be cleaned up:\n dest_node = self._fill_directory(\n symbol, dest_subnode, src_entries[cvs_path], copy_source\n ).parent_mirror_dir\n else:\n # Path is a CVSFile:\n self._fill_file(\n symbol, cvs_path in dest_node, src_entries[cvs_path], copy_source\n )\n # Reread dest_node since the call to _fill_file() might have\n # made it writable:\n dest_node = self._mirror.get_current_path(\n dest_node.cvs_path, dest_node.lod\n )\n\n return dest_node", "title": "" }, { "docid": "0292f6a0ebb0b4bbe8d28a95817c2e2e", "score": "0.5579431", "text": "def file_arrangement(abs_dir_path):\n if not os.path.isdir(abs_dir_path):\n return\n suffix_type = []\n sep = os.path.sep\n suffix_format = re.compile('(.*)\\.(.*)') # name in group(1) and suffix in group(2)\n # using to split extension also can use os.path.splitext(path)\n for root, dirs, files in os.walk(abs_dir_path):\n dir_name = os.path.basename(root) # current directory name\n for file_name in files:\n curr_path = root + sep + file_name\n mapping = suffix_format.search(file_name)\n if mapping is None:\n new_path_name = abs_dir_path + sep + dir_name + '_' + file_name\n shutil.copy(curr_path, new_path_name)\n continue\n name = mapping.group(1)\n suffix = mapping.group(2)\n if suffix not in suffix_type:\n suffix_type.append(suffix)\n os.makedirs(abs_dir_path + sep + suffix)\n new_path_name = abs_dir_path + sep + suffix + sep + dir_name + '_' + file_name\n # that is put in to abs_dir_path\\suffix\\dir_name_filename\n shutil.copy(curr_path, new_path_name)", "title": "" }, { "docid": "8da16925dc8b72df454aed9b0800de6e", "score": "0.5579215", "text": "def copy_tree_over(src, dest):\n # From https://stackoverflow.com/questions/9160227/dir-util-copy-tree-fails-after-shutil-rmtree/28055993 : \n # If you copy folder, then remove it, then copy again it will fail, because it caches all the created dirs. \n # To workaround you can clear _path_created before copy:\n distutils.dir_util._path_created = {}\n distutils.dir_util.copy_tree(src, dest)", "title": "" }, { "docid": "2616a910f42bc65ffa285978c5dd41f1", "score": "0.55754566", "text": "def copytree_preserve_existing(os_service, src, dst):\n oss = os_service\n helper = Helpers()\n helper.copy_directory_tree(oss, src, dst)\n\n file_it = FileIterator(os_service, src)\n for file_sub_path in file_it:\n source_file_path = os.path.join(src, file_sub_path)\n dest_file_path = os.path.join(dst, file_sub_path)\n if not oss.exists(dest_file_path):\n oss.copyfile(source_file_path, dest_file_path)", "title": "" }, { "docid": "aabe8267416d2bc6588b7369860bb5c1", "score": "0.5570711", "text": "def copy_dirs(self, dst, subs='', src_sub='', dst_sub=''):\n # Join paths (if specified)\n src = self.src\n if subs:\n src = os.path.join(src, subs)\n dst = os.path.join(dst, subs)\n if dst_sub:\n dst = os.path.join(dst, dst_sub)\n if src_sub:\n src = os.path.join(src, src_sub)\n print('\\nComparing...\\n')\n print('\\nSource: %s' % src)\n print('Destination: %s\\n' % dst)\n for src_root, src_dirs, src_files in os.walk(src, topdown=True):\n\n dst_root = os.path.join(dst, os.path.relpath(src_root, src))\n dirs = filecmp.dircmp(src_root, dst_root)\n\n # Find old files and delete them from destination\n for item in dirs.right_only:\n try:\n print('Removing ' + item)\n except UnicodeEncodeError:\n # Prevents the program from stopping in the event of an\n # awkward file name\n print('Removing file (Unicode error)')\n dst_path = os.path.join(dst_root, item)\n if os.path.isdir(dst_path):\n shutil.rmtree(dst_path)\n else:\n os.remove(dst_path)\n\n # Find new files and add them to destination\n for item in dirs.left_only:\n try:\n print('Adding ' + item)\n except UnicodeEncodeError:\n # Prevents the program from stopping in the event of an\n # awkward file name\n print('Adding file (Unicode error)')\n src_path = os.path.join(src_root, item)\n if os.path.isdir(src_path):\n shutil.copytree(src_path, os.path.join(dst_root, item))\n else:\n shutil.copy2(src_path, os.path.join(dst_root, item))\n\n # Once clearing and adding has completed, update existing files\n print('\\nUpdating: ')\n if plat != 'win32':\n os.system(\n \"\"\"rsync -r -u -v --links \"{0}\"/* \"{1}\" \"\"\".format(src, dst))\n else:\n os.system(\"\"\"xcopy /I /E /Y /D \"{0}\" \"{1}\" \"\"\".format(src, dst))", "title": "" }, { "docid": "f6ba29e8b3369f5c134f8674fb030863", "score": "0.55684304", "text": "def move(sfile, source_dir, directory):\n sbase = osp.splitext(osp.split(sfile)[1])[0]\n already_here = any(sbase in dfile for dfile in dest_files)\n if not already_here:\n shutil.copy(osp.join(source_dir, sfile), directory)\n logger.info(\"Copied {} into pylinac directory\".format(sfile))", "title": "" }, { "docid": "7c11a41bc54a09cc18d12eb2e559434c", "score": "0.55585176", "text": "def moveResultsToMain(self):\n # Delete input file\n os.remove(self.subDir+self.simName+'\\\\'+self.simName+'.inp')\n # Move files\n allFiles = os.listdir(self.subDir+self.simName+'\\\\')\n for fName in allFiles:\n os.rename(self.subDir+self.simName+'\\\\'+fName, self.subDir+fName)\n # Delete (empty) simulation folder\n os.rmdir(self.subDir+self.simName)\n # Brief pause\n time.sleep(5)", "title": "" }, { "docid": "d1ba59211445527a8926e400decf0aa8", "score": "0.55563086", "text": "def make_files_match(from_dir, to_dir, do_commits = True, verbose=False, delete_missing_from = False, \n do_delete_files = True):\n\n # if from dir doesn't exist, delete to dir\n # This is used by pull_step.\n if (not os.path.exists(from_dir)):\n if delete_missing_from and os.path.exists(to_dir):\n shutil.rmtree(to_dir)\n return\n\n\n # if to_dir doesn't exist, create it - we are probably pushing\n # for the first time - Issue #2\n if (not os.path.exists(to_dir)):\n try:\n os.makedirs(to_dir, 0755)\n except:\n pass\n\n # What files is git managing for us in each directory?\n if not os.path.exists(from_dir):\n from_files = [ ]\n else:\n from_files = git.list_files(from_dir)\n to_files = git.list_files(to_dir)\n\n from_files_set = set(from_files)\n to_files_set = set(to_files)\n\n # We need to remove \"by hand\" those files which are no longer meant to\n # be in the \"to\" directory, so work out which files that is\n deleted_files = to_files_set - from_files_set\n\n if verbose:\n print 'Making files the same'\n print '====================='\n print 'From', from_dir\n print 'To ', to_dir\n if (len(from_files) > 0):\n max_len = max(len(n) for n in from_files)\n else:\n max_len = 1\n for name in sorted(from_files_set | to_files_set):\n if name in from_files_set and name in to_files_set:\n print '%-*s %s'%(max_len, name, name)\n elif name in from_files_set:\n print '%-*s'%(max_len, name)\n else:\n print '%-*s %s'%(max_len, ' ', name)\n print '====================='\n\n # If we copy over everything from the \"from\" directory to the \"to\"\n # directory, and \"git add\" them all, then that will cope with changes to\n # existing files, and also add in any new files. We then need to remember\n # to \"git rm\" any files that are meant to have gone away, at which point we\n # can commit all the changes.\n #\n # We use rsync for our copy because it will handle things like:\n #\n # rsync -a --relative four/jim four/bob <weld_root>/.weld/bases/project124\n #\n # and put \"jim\" and \"bob\" into <weld_root>/.weld/bases/project124/four\n if os.path.exists(from_dir):\n cmd = ['rsync', '-a', '--relative']\n cmd += from_files\n cmd += [to_dir]\n run_silently(cmd, cwd=from_dir, verbose=verbose)\n\n if len(from_files) >0:\n git.add(to_dir, from_files, verbose=verbose)\n if do_commits:\n git.commit_using_message(to_dir, \"Add files from %s\"%from_dir, verbose=verbose)\n\n if deleted_files and do_delete_files:\n git.rm(to_dir, list(deleted_files))\n if do_commits: \n git.commit_using_message(to_dir, \"Delete files no longer in %s\"%from_dir)", "title": "" }, { "docid": "3f2aa5c46b28e9b21b8c5c50bc334787", "score": "0.5544597", "text": "def move_files(files, destdir):\n ensure_dir(destdir)\n for f in files:\n if f.startswith(destdir):\n continue\n subprocess.call([\"mv\", \"-f\", f, destdir])", "title": "" }, { "docid": "71603c45a0219c0bf7e5e23c80132c6e", "score": "0.5533806", "text": "def RelocateNonRepoFiles(all_files):\n tempDir = os.path.join(u'scripts', u'temp')\n rm(tempDir)\n os.makedirs(tempDir)\n\n non_repo = GetNonRepoFiles(all_files)\n if non_repo:\n lprint(\" Relocating non-repository files:\")\n for path in non_repo:\n lprint(\" \", path)\n src = os.path.join(mopy, path)\n dst = os.path.join(tempDir, path)\n if os.path.isdir(src):\n shutil.move(src, dst)\n elif os.path.isfile(src):\n dirname = os.path.dirname(dst)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n shutil.move(src, dst)", "title": "" }, { "docid": "9864431a8adcaca6fb82d9e420bd838f", "score": "0.5531388", "text": "def prep_tree_for_tar(repodir, subdir, outdir, dstname):\n src = os.path.join(repodir, subdir)\n if not os.path.exists(src):\n sys.exit(\"%s: No such file or directory\" % src)\n\n dst = os.path.join(outdir, dstname)\n if os.path.exists(dst) and \\\n (os.path.samefile(src, dst) or\n os.path.samefile(os.path.dirname(src), dst)):\n sys.exit(\"%s: src and dst refer to same file\" % src)\n\n #shutil.copytree(src, dst, symlinks=True)\n gitlock = os.path.join(src, '.git/index.lock')\n count = 0\n while os.path.exists(gitlock):\n time.sleep(5)\n count = count + 1\n if count > 100:\n break\n safe_run(['cp', '-a', src, dst], cwd=os.getcwd(),interactive=sys.stdout.isatty())\n\n return dst", "title": "" }, { "docid": "84e8d8b04e948e1c30aa4bc7df405edf", "score": "0.5518604", "text": "def move_files(src, dst, group):\n print(\"Moving!\")\n print(group)\n for fname in group:\n print(src+ '/' + fname)\n os.rename(src + '/' + fname, dst + '/' + fname)", "title": "" }, { "docid": "0ed4254726c1bfa74a01b65d24e97c5f", "score": "0.5513786", "text": "def gc_mvall( scratchdir ):\n sdirs = glob.glob(scratchdir)\n for scratchdir in sdirs:\n gcdir = scratchdir.replace( '/scratch/', '/scratch/_gc/' )\n print \"scratchdir=\",scratchdir\n print \"gcdir =\",gcdir\n if os.path.isdir(scratchdir):\n if os.path.isdir(gcdir):\n print \"WARNING, gcdir %s already exists,\\n will not move from scratchdir %s\"%\\\n (gcdir,scratchdir)\n else:\n shutil.move( scratchdir, gcdir )\n else:\n #raise Exception(\"source directory %s doesn't exist\"%scratchdir)\n print \"WARNING\", \"source directory %s doesn't exist\"%scratchdir\n print \"Nothing will be moved from scratch to scratch/_gs.\"\n if not os.path.isdir(glob.glob(gcdir)[0]):\n raise Exception(\"gcdir %s doesn't exist\"%gcdir)", "title": "" }, { "docid": "7b53a4ac23d2d6b8c876aaafab56bfdf", "score": "0.5511875", "text": "def _copy_dir_recursive(src_rel, dst_rel):\n src = os.path.expanduser(src_rel)\n dst = os.path.expanduser(dst_rel)\n try:\n shutil.copytree(src, dst)\n logger.info('Files copied from %s to %s', src, dst)\n except OSError as exc:\n if exc.errno == errno.ENOTDIR:\n shutil.copy(src, dst)\n logger.info('Files copied from %s to %s', src, dst)\n else:\n raise", "title": "" }, { "docid": "2bb807e1ca46d9cdfbf6ba7dd8928e8c", "score": "0.5511476", "text": "def movefiles(sub, newid, cid):\n # construct full CISC ID (with sub-)\n cid_full = \"sub-\" + cid\n for root, dirs, files in os.walk(sub):\n for f in files:\n # Only continue with the file if it's NOT a stupid annoying mac file\n if \"DS_Store\" not in f:\n # Construct full path\n fullf = os.path.join(root, f)\n # Create new path by replacing sub-CISCID with sub- ADIEID\n newf = fullf.replace(cid_full, newid)\n # Check that directories already exist - should've been created in F3\n if not os.path.isdir(os.path.split(newf)[0]):\n print(\n \"WARNING: '{}' does NOT exists\".format(\n os.path.split(newf)[0]\n )\n )\n # If directory does exist, proceed with move and rename\n else:\n print(\"Attempting to move\", fullf, \"to\", newf, \"\\n\")\n shutil.move(fullf, newf)\n elif \"DS_Store\" in f:\n continue", "title": "" }, { "docid": "d2f051c566763bc0cec954eb9772a164", "score": "0.5510668", "text": "def remove_entries(dest):\n for entry in os.listdir(dest):\n fullpath = os.path.join(dest, entry)\n if os.path.isfile(fullpath):\n os.remove(fullpath)\n else:\n shutil.rmtree(fullpath)", "title": "" }, { "docid": "775041c355bd41c2c34db388a53e54ab", "score": "0.5499051", "text": "def rsync_files(src_file: Union[str, PathLike], dest_file: Union[str, PathLike]) -> None:\n copy2(src_file, dest_file)\n src_folder = dirname(src_file)\n if not src_folder:\n src_folder = '.'\n copystat(src_folder, dirname(dest_file))", "title": "" }, { "docid": "e5ae744e8068cab08b1d8555209a7d1a", "score": "0.5497786", "text": "def upload_tree(self, src, dst, ignore=None):\n names = os.listdir(src)\n if ignore is not None:\n ignored_names = ignore(src, names)\n else:\n ignored_names = set()\n\n try:\n dst = dst.replace('\\\\', '/')\n self.conn.mkd(dst)\n except error_perm:\n pass\n\n errors = []\n for name in names:\n if name in ignored_names:\n continue\n src_name = os.path.join(src, name)\n dst_name = os.path.join(dst, name)\n try:\n if os.path.islink(src_name):\n pass\n elif os.path.isdir(src_name):\n self.upload_tree(src_name, dst_name, ignore)\n else:\n # Will raise a SpecialFileError for unsupported file types\n self.put(src_name, dst_name)\n except Exception as why:\n errors.append((src_name, dst_name, str(why)))\n\n return dst", "title": "" }, { "docid": "0382cbf8d9c1895f0fb360781da2f9b3", "score": "0.54899615", "text": "def single_folder_2_planet_order_hierarchy(input_dir, target_dir):\r\n for root, dirs, files in os.walk(os.path.normpath(input_dir), topdown=False):\r\n for file_name in files:\r\n # This selects only the actual image *.tif file\r\n if \"AnalyticMS_clip\" in file_name and file_name.endswith(\".tif\"):\r\n logging.info(\"Found: {}\".format(file_name))\r\n file_name_split = file_name.split(\"_\")\r\n # The date_id in the ISO format YYYYMMDD becomes a folder\r\n date_id = file_name_split[0]\r\n date_dir = os.path.join(target_dir, date_id)\r\n logging.info(\"Writing date directory as: {}\".format(date_dir))\r\n if not os.path.isdir(date_dir):\r\n os.mkdir(date_dir)\r\n # The img_id is formatted as YYYYMMDD_id_sat\r\n img_id = \"_\".join(file_name_split[0:3])\r\n logging.info(\"Writing image folder as: {}\".format(img_id))\r\n img_dir = os.path.join(date_dir, img_id)\r\n if not os.path.isdir(img_dir):\r\n os.mkdir(img_dir)\r\n logging.info(\"Writing to image directory: {}\".format(img_dir))\r\n # loop for copying all metadata files as well with the same img_id\r\n for file_name2 in files:\r\n if img_id in file_name2:\r\n source_dir = os.path.join(input_dir, file_name2)\r\n shutil.copy2(source_dir, img_dir) \r\n return 0", "title": "" }, { "docid": "6ea009c9c62d1858746e62f28c2d19d3", "score": "0.5485185", "text": "def copydir(self, src, dst, replace=True):\n for file in os.listdir(src):\n if os.path.isfile(os.path.join(src, file)):\n if os.path.exists(os.path.join(dst, file)) and not replace:\n self.logger.debug(\"> Skipped file \\\"\" + os.path.join(src, file) + \"\\\": Already exists.\")\n elif os.path.exists(os.path.join(dst, file)):\n os.unlink(os.path.join(dst, file))\n shutil.copy2(os.path.join(src, file), dst)\n self.logger.debug(\"> Replaced file \\\"\" + os.path.join(src, file) + \"\\\".\")\n else:\n shutil.copy2(os.path.join(src, file), dst)\n self.logger.debug(\"> Copied file \\\"\" + os.path.join(src, file) + \"\\\".\")\n elif os.path.isdir(os.path.join(dst, file)):\n self.copydir(os.path.join(src, file), os.path.join(dst, file))\n else:\n os.makedirs(os.path.join(dst, file))\n self.copydir(os.path.join(src, file), os.path.join(dst, file))", "title": "" }, { "docid": "f8909620f8f6b691a824f7e780eca88b", "score": "0.54825467", "text": "def cleanup_project_dir(self):\n contents = os.listdir(self.project.root)\n num = 0\n for name in contents:\n if name.lower().endswith(\".asc\"):\n os.rename(self.project.root + name, self.project.data_dir + name)\n num += 1\n\n if num > 0:\n messagebox.showinfo(\n \"New Project\",\n \"{} ASC files were moved to {}\".format(num, self.project.data_dir),\n )", "title": "" }, { "docid": "cf82e570a7ed52e0b94b1afdee663101", "score": "0.5452133", "text": "def copy_directory(source, dest):\n for root, dirs, files in os.walk(source):\n if not os.path.isdir(root):\n os.makedirs(root)\n\n for file in files:\n rel_path = root.replace(source, '').lstrip(os.sep)\n dest_path = os.path.join(dest, rel_path)\n\n if not os.path.isdir(dest_path):\n os.makedirs(dest_path)\n if(dirs and files):\n shutil.copyfile(os.path.join(root, file),\n os.path.join(dest_path, file))", "title": "" }, { "docid": "255b76c568aa4b73ed9b4a2bf2e5bd91", "score": "0.5451473", "text": "def find_duplicates(data_dir):\n duplicates = []\n im_hashes = dict()\n\n def append_stuff(directory, im_hashes):\n \"\"\"\n Return a list of absolute paths to all duplicate files in directory.\n \"\"\"\n print(directory)\n for file_name in os.listdir(directory):\n file_name = f'{directory}/{file_name}'\n if os.path.isfile(file_name):\n with open(file_name, 'rb') as file:\n file_hash = hashlib.md5(file.read()).hexdigest()\n try:\n temp = im_hashes[file_hash]\n del temp\n duplicates.append(file_name)\n print(file_name.split('/')[-1])\n except KeyError:\n im_hashes[file_hash] = file_name\n else:\n append_stuff(file_name, im_hashes)\n append_stuff(data_dir, im_hashes)\n return duplicates", "title": "" }, { "docid": "c5ec29f44525327d2df19fe422b88f50", "score": "0.544854", "text": "def clean_files(self) -> None:\n api_dir = os.path.join(self._src_dir, \"_api\")\n\n shutil.rmtree(api_dir, ignore_errors=True)\n shutil.rmtree(self._out_dir, ignore_errors=True)\n os.makedirs(api_dir, exist_ok=True)\n os.makedirs(self._out_dir, exist_ok=True)\n\n print(f\"Recreated content of the {shlex.quote(self._out_dir)} and {shlex.quote(api_dir)} folders\")", "title": "" }, { "docid": "49097edb46a99cc8a7e7e9d1e4a52354", "score": "0.5430227", "text": "def moveFilesToTestDirectory(directory):\n amount_of_files = getAmountOffFilesInDirectory(directory)\n amount_of_files_moved = amount_of_files * 0.7 # Gets finds which number is 70% of the files\n files_counted = 0 # Counts the amount of files\n for file_name in os.listdir(directory):\n # Once loop reaches past the 70% mark it begins to move training files to the test file directory\n if files_counted >= amount_of_files_moved:\n class_name = directory.split('/')[1] # Gets name of the class I.E. Digimon or Pokemon\n shutil.move(directory + '/' + file_name, 'test/' + class_name + '/' + file_name) # Moves files\n files_counted += 1", "title": "" }, { "docid": "5ceff1e84e3fb9b5dfa67b280823a45d", "score": "0.5419111", "text": "def move(src, dst):\n\n try:\n os.rename(src, dst)\n except OSError:\n if os.path.isdir(src):\n if destinsrc(src, dst):\n raise Error, \"Cannot move a directory '%s' into itself '%s'.\" % (src, dst)\n copytree(src, dst, symlinks=True)\n rmtree(src)\n else:\n copy2(src,dst)\n os.unlink(src)", "title": "" }, { "docid": "5a77427c2dd2ada459d01903eb2ed1e9", "score": "0.54185", "text": "def move_files(original_fold, data_fold, data_filename):\n with open(data_filename) as f:\n for line in f.readlines():\n vals = line.split('/')\n dest_fold = os.path.join(data_fold, vals[0])\n if not os.path.exists(dest_fold):\n os.mkdir(dest_fold)\n shutil.move(os.path.join(original_fold, line[:-1]), os.path.join(data_fold, line[:-1]))", "title": "" }, { "docid": "87f3d2e56e9d512ce978f5d4d2a731ab", "score": "0.5409844", "text": "def merge_multi_folder_2_single_folder(input_dir, merged_dir):\r\n for root, dirs, files in os.walk((os.path.normpath(input_dir)), topdown=False):\r\n for file_name in files:\r\n logging.info(\"Found: {}\".format(file_name))\r\n source_dir = os.path.join(root, file_name)\r\n shutil.copy2(source_dir, merged_dir)\r\n logging.info(\"Wrote: {} to merged directory\".format(file_name))\r\n return 0", "title": "" }, { "docid": "acf82960079d6018c53bfa1d8c2198d1", "score": "0.54056674", "text": "def extract_step(self):\n super(EB_OpenFOAM, self).extract_step()\n # make sure that the expected subdir is really there after extracting\n # if not, the build scripts (e.g., the etc/bashrc being sourced) will likely fail\n openfoam_installdir = os.path.join(self.installdir, self.openfoamdir)\n if not os.path.exists(openfoam_installdir):\n self.log.warning(\"Creating expected directory %s, and moving everything there\" % openfoam_installdir)\n try:\n contents_installdir = os.listdir(self.installdir)\n source = os.path.join(self.installdir, contents_installdir[0])\n # it's one directory but has a wrong name\n if len(contents_installdir) == 1 and os.path.isdir(source):\n target = os.path.join(self.installdir, self.openfoamdir)\n self.log.debug(\"Renaming %s to %s\", source, target)\n os.rename(source, target)\n else:\n mkdir(openfoam_installdir)\n for fil in contents_installdir:\n if fil != self.openfoamdir:\n source = os.path.join(self.installdir, fil)\n target = os.path.join(openfoam_installdir, fil)\n self.log.debug(\"Moving %s to %s\", source, target)\n shutil.move(source, target)\n os.chdir(openfoam_installdir)\n except OSError as err:\n raise EasyBuildError(\"Failed to move all files to %s: %s\", openfoam_installdir, err)", "title": "" }, { "docid": "088f78b7fe91603e6456e91e534cab66", "score": "0.53969646", "text": "def file_move_dir(mv_input_img_address, mv_output_img_address):\n single_char_list = glob.glob(os.path.join(mv_input_img_address, \"*\"))\n last_name_of_output_file = int(((os.listdir(mv_output_img_address))[-1]).split(sep='.')[0])\n\n for img_index in single_char_list:\n # input_image = DOT_IMG_ADDRESS + str(img_index) + \".jpg\"\n # mv_input_image = os.path.join(DOT_INPUT_IMG_ADDRESS, \"{}.png\".format(str(img_index).zfill(6)))\n mv_raw_img = cv2.imread(img_index)\n DEBUG = False\n if DEBUG:\n cv2.imshow('mv_raw_img', mv_raw_img)\n cv2.waitKey()\n img_output_address = os.path.join(mv_output_img_address,\n \"{}.png\".format(str(last_name_of_output_file + 1).zfill(6)))\n cv2.imwrite(img_output_address, mv_raw_img)\n last_name_of_output_file += 1\n for img_index in single_char_list:\n # input_image = DOT_IMG_ADDRESS + str(img_index) + \".jpg\"\n # mv_input_image = os.path.join(DOT_INPUT_IMG_ADDRESS, \"{}.png\".format(str(img_index).zfill(6)))\n # if os.path.exists(mv_input_image):\n os.remove(img_index)\n # print(os.path.split(img_index)[:-1])\n os.removedirs(mv_input_img_address)", "title": "" }, { "docid": "b5d27487fcea039f1f673e4983aa6a0e", "score": "0.539501", "text": "def move_songs():\n\n OLD_DIR = os.path.expanduser(\"~/.playx\")\n NEW_DIR = os.path.expanduser(\"~/.playx/songs\")\n\n if not os.path.isdir(NEW_DIR):\n os.makedirs(NEW_DIR)\n\n for file in os.listdir(OLD_DIR):\n if file.endswith(\"mp3\"):\n move(os.path.join(OLD_DIR, file), os.path.join(NEW_DIR, file))", "title": "" }, { "docid": "b975b474aa96c27d12f7da2e3b88a698", "score": "0.5394871", "text": "def move_files(self, photo_paths, move_to):\n\n update_folders = []\n moved = 0\n for fullpath in photo_paths:\n photo_info = self.database_exists(fullpath)\n if photo_info:\n new_path = os.path.join(photo_info[2], move_to)\n try:\n if not os.path.isdir(new_path):\n os.makedirs(new_path)\n except:\n self.popup_message(text='Error: Could Not Create Folder', title='Error')\n break\n photo_path = os.path.join(photo_info[2], photo_info[0])\n current_folder, current_file = os.path.split(photo_path)\n new_photo_path = os.path.join(new_path, current_file)\n new_fullpath = os.path.join(move_to, current_file)\n backup_path = photo_info[10]\n if os.path.exists(backup_path):\n new_backup_path = os.path.join(new_path, '.originals')\n new_backup_file = os.path.join(new_backup_path, current_file)\n try:\n os.makedirs(new_backup_path)\n os.rename(backup_path, new_backup_file)\n except:\n self.popup_message(text='Error: Could Not Move Backup File', title='Error')\n break\n if not os.path.exists(new_backup_file):\n self.popup_message(text='Error: Could Not Move Backup File', title='Error')\n break\n photo_info[10] = new_backup_file\n if os.path.exists(photo_path):\n try:\n os.rename(photo_path, new_photo_path)\n except:\n self.popup_message(text='Error: Could Not Move File', title='Error')\n break\n if not os.path.exists(new_photo_path):\n self.popup_message(text='Error: Could Not Move File', title='Error')\n break\n\n self.database_item_update(photo_info)\n self.database_item_rename(fullpath, new_fullpath, move_to)\n update_folders.append(photo_info[1])\n moved = moved + 1\n if moved:\n self.message(\"Moved \"+str(moved)+\" files.\")\n update_folders.append(move_to)\n self.update_photoinfo(folders=update_folders)", "title": "" }, { "docid": "77d140b223afe752df41227c612548c3", "score": "0.5384689", "text": "def clear_destination_dir (self):\n \n for path, dirs, files in os.walk(self.DESTINATION_DIR, topdown=False):\n for file in files:\n os.remove(os.path.join(path, file))", "title": "" }, { "docid": "20bb9ad0107696dbead529ef90f28c08", "score": "0.53845114", "text": "def _copy_new_files(directory, config):\n def move(sfile, source_dir, directory):\n \"\"\"The function to move files from the source to the destination\"\"\"\n sbase = osp.splitext(osp.split(sfile)[1])[0]\n already_here = any(sbase in dfile for dfile in dest_files)\n if not already_here:\n shutil.copy(osp.join(source_dir, sfile), directory)\n logger.info(\"Copied {} into pylinac directory\".format(sfile))\n\n # return if no sources are configured or are not real directories\n source_undefined = config['general']['sources'] is None\n if source_undefined:\n return\n sources_are_dirs = all(osp.isdir(source) for source in config['general']['sources'])\n if not sources_are_dirs:\n return\n\n # move new files into destination directory\n dest_files = os.listdir(directory)\n with concurrent.futures.ThreadPoolExecutor(4) as exec:\n for source_dir in config['general']['sources']:\n logger.info(\"Querying new files from {}\".format(source_dir))\n source_files = os.listdir(source_dir)\n time.sleep(0.5)\n if config['general']['rolling-window-days'] > 0:\n window_cutoff = datetime.datetime.timestamp(datetime.datetime.now() - datetime.timedelta(days=config['general']['rolling-window-days']))\n source_files = [f for f in source_files if osp.getmtime(osp.join(source_dir, f)) > window_cutoff]\n for file in source_files:\n exec.submit(move, file, source_dir, directory)", "title": "" }, { "docid": "197d2ca14c339db2efd242892850f953", "score": "0.53807014", "text": "def copy_over(temp_dir, paths, artefact=\"model\", dir=\"models\"):\n\n for path in paths:\n print(\"Importing %s %s\" % (artefact, str(Path(path).resolve().relative_to(Path(temp_dir).resolve()))))\n dest_path = Path(dir, Path(path).name)\n if dest_path.exists():\n print(\"Skipping `%s` as it already exists\" % str(dest_path))\n else:\n shutil.copytree(path, str(dest_path))", "title": "" }, { "docid": "090bc18a430f0cf4d076f6288befabde", "score": "0.53780043", "text": "def copy_folder(self, src, dst):\n\n src_list, dst_list = [], []\n for folder, _, files in os.walk(src):\n rfolder = folder.replace(src, dst)\n command = \"mkdir -p {0}\".format(rfolder)\n self.ssh.exec_command(command)\n src_list.extend([os.path.join(folder, f) for f in files if not f.endswith(\".pyc\")])\n dst_list.extend([os.path.join(rfolder, f) for f in files if not f.endswith(\".pyc\")])\n\n self.ssh.put_file(src_list, dst_list)", "title": "" }, { "docid": "45a12b318adc73b9d513335ae5987dea", "score": "0.53752345", "text": "def move_clean(images, destination):\n\tfor image in images:\n\t\tshutil.move(image, destination)", "title": "" }, { "docid": "4c88a28034af4d2c2c4cb3db93e89e0a", "score": "0.5372583", "text": "def rename_directories(self):\r\n\r\n folder_classes = os.listdir(PATH_TO_DATA)\r\n\r\n # Append [class, image] of dataset as entry of new array\r\n for directory in folder_classes:\r\n new_directory = directory[:2]\r\n new_directory = new_directory.strip(\"_\")\r\n new_directory = int(new_directory) - 1\r\n new_directory = str(new_directory) + \"_\" + directory[3:]\r\n os.rename(os.path.join(PATH_TO_DATA, directory), os.path.join(PATH_TO_DATA, new_directory))", "title": "" }, { "docid": "13ff59f56d42bc8e0aa08f79e1ae7490", "score": "0.53669804", "text": "def remove_duplicates(directory_path):\n print(f'Indexing images...')\n images = glob.glob(os.path.join(directory_path, '*.jpg'))\n set_img = set()\n for image in images:\n img = tuple(average_image(cv.imread(image)))\n if img not in set_img:\n set_img.add(img)\n else:\n os.remove(image)\n print(f'Removing {image} ...')", "title": "" }, { "docid": "a1587752159c3e1440ae80f86c6a44df", "score": "0.53663343", "text": "def upsert_parent_dirs(filepath):\n comps = [c for c in filepath.split('/') if c]\n comps.pop()\n\n if filepath.startswith('/'):\n path = '/'\n else:\n path = ''\n\n for dir in comps:\n path += (dir + '/')\n\n if not os.path.exists(path):\n try:\n os.mkdir(path)\n except:\n pass", "title": "" }, { "docid": "f9a592076341ed2ea6a1f930b2dbe58d", "score": "0.5356832", "text": "def move_output_files():\n # Create output folder if it doesn't exist\n if not os.path.exists(OUTPUT_FILES_FOLDER):\n os.makedirs(OUTPUT_FILES_FOLDER)\n\n # Move the map files to the Output folder\n os.rename(os.path.join(PROGRAM_PATH, \"map_WHITE.png\"), os.path.join(OUTPUT_FILES_FOLDER, \"map_WHITE.png\"))\n os.rename(os.path.join(PROGRAM_PATH, \"map_BROWN.png\"), os.path.join(OUTPUT_FILES_FOLDER, \"map_BROWN.png\"))\n os.rename(os.path.join(PROGRAM_PATH, \"map_RED.png\"), os.path.join(OUTPUT_FILES_FOLDER, \"map_RED.png\"))\n os.rename(os.path.join(PROGRAM_PATH, \"map_SCORE-O.png\"), os.path.join(OUTPUT_FILES_FOLDER, \"map_SCORE-O.png\"))", "title": "" }, { "docid": "4840d38a6c95750734d2063d6fe86043", "score": "0.5349183", "text": "def move_files(scan_dir, filepaths):\n for f in filepaths:\n _, ext = os.path.splitext(f)\n name = os.path.basename(f)\n new_path = os.path.join(scan_dir, ext[1:], name)\n os.rename(f, new_path)", "title": "" }, { "docid": "4f53f6129526bd6d97c1856bc6d2bda5", "score": "0.53444535", "text": "def __move_up(self):\n for filename in self._selected:\n index = self._files.index(filename)\n files = OrderedSet()\n if index == 0:\n return\n\n if index > 1:\n files = self._files[:index - 1]\n files = list(files) + [self._files[index], self._files[index - 1]] + self._files[index + 1:]\n\n self._files = files\n self.__update_tree()", "title": "" }, { "docid": "cbb582de6847d80d3215489706a6a0ba", "score": "0.53393376", "text": "def create_output_directories(self):\r\n check_or_create_dir(self.dst_path)\r\n\r\n if self.medias:\r\n check_or_create_dir(join(self.dst_path, self.settings[\"GALLERY_THUMB_DIR\"]))\r\n\r\n if self.medias and self.settings[\"KEEP_ORIG\"]:\r\n self.orig_path = join(self.dst_path, self.settings[\"orig_dir\"])\r\n check_or_create_dir(self.orig_path)", "title": "" }, { "docid": "d19c19bd38282419100b378a25f51e13", "score": "0.5339199", "text": "def resave_all_archives(self,new_dirpath):\n\t\tif self.zippath!=None:\n\t\t\told_dirpath,f=os.path.split(self.zippath)\n\t\telse : old_dirpath=None\n\t\tassert os.path.abspath(new_dirpath)!=os.path.abspath(old_dirpath)\n\t\tfor arch_name in self.list_archives:\n\t\t\tif not os.path.exists(os.path.join(new_dirpath,TMP_FILE_MARK+arch_name)):\n\t\t\t\tos.mkdir(os.path.join(new_dirpath,TMP_FILE_MARK+arch_name))\n\t\t\t\n\t\t\tfor file in os.listdir(os.path.join(old_dirpath,TMP_FILE_MARK+arch_name)):\n\t\t\t\tfilepath=os.path.join(old_dirpath,TMP_FILE_MARK+arch_name,file)\n\t\t\t\tshutil.copy(filepath,os.path.join(new_dirpath,TMP_FILE_MARK+arch_name))", "title": "" }, { "docid": "09737768c435f6d46e6e85affe62bba4", "score": "0.5338812", "text": "def shuffle_folders(folder):\n for s in os.listdir(folder):\n if os.path.isfile(folder + '/' + s):\n shuffle_file(folder, s)\n print 'finish shuffling folder: %s' % s", "title": "" }, { "docid": "dfc00f904f6ca172850ab2304c7826e7", "score": "0.53345555", "text": "def recorrer(carpeta):\r\n\tlista = os.listdir(carpeta)\r\n\t#print (lista)\r\n\tfor elemento in lista:\r\n\t\tnuevo = os.path.join(carpeta,os.path.basename(elemento))\r\n\t\tif os.path.isdir(nuevo):\r\n\t\t\trecorrer(nuevo)\t\t\t\r\n\t\telse:\r\n\t\t\tdestino = os.path.join(os.getcwd(),pathDestino)\r\n\t\t\tprint(\"hashing \", nuevo)\r\n\t\t\tappendhash(genhash(nuevo).upper(), nuevo, filehash)", "title": "" }, { "docid": "89395491c8df71babf3a9bea570db48e", "score": "0.5331348", "text": "def move_malicious(abs_dirname, ratio):\n train_malicious = 'train/malicious/'\n shutil.rmtree(train_malicious)\n os.makedirs(train_malicious)\n val_malicious = 'validation/malicious/'\n shutil.rmtree(val_malicious)\n os.makedirs(val_malicious)\n files = [os.path.join(abs_dirname, f) for f in os.listdir(abs_dirname)]\n for f in files[0:ratio]:\n # create new subdir if necessary\n #subdir_name = os.path.join(abs_dirname, '{0:03d}'.format(i / N + 1))\n #os.mkdir('train/' + file_type)\n #curr_subdir = subdir_name\n\n # move file to current dir\n f_base = os.path.basename(f)\n\n copyfile(f, train_malicious + f_base)\n\n for f in files[ratio:]:\n # create new subdir if necessary\n #subdir_name = os.path.join(abs_dirname, '{0:03d}'.format(i / N + 1))\n #os.mkdir('validation/' + file_type )\n f_base = os.path.basename(f)\n\n copyfile(f, val_malicious + f_base)\n #copyfile(f, 'validation/malicious/' + f_base)", "title": "" }, { "docid": "7c4fd08a297e91d04592b7414fe28635", "score": "0.53109795", "text": "def dir_structure(image_dir):\n image_dir_files = os.listdir(image_dir)\n fake_dir = '/input'\n assert len(image_dir_files) != 0, f\"Image directory {image_dir_files} is empty.\"\n if os.path.exists(image_dir+fake_dir):\n if os.path.isdir(image_dir+fake_dir):\n print('Image directory structure test: OK.')\n else:\n print(\"Rearranging image dir structure, this might take a while...\")\n os.mkdir(image_dir+fake_dir)\n for i in image_dir_files:\n src = os.path.join(image_dir, i)\n dst = os.path.join(image_dir+fake_dir, i)\n move(src, dst)\n print(\"Done. Directory structure is now OK.\")", "title": "" }, { "docid": "5aa8f40c3fd01c5825432c96448de9cb", "score": "0.53109616", "text": "def copying_files(self):\n to_copy_folders = os.listdir(os.getcwd() + '/to_copy/')\n\n for folder in to_copy_folders:\n\n to_copy_files = os.listdir(os.getcwd() + '/to_copy/' + folder)\n for files in to_copy_files:\n if not files.startswith('.'):\n if not os.path.exists(self.app_location + '/' + folder + '/' + files):\n if not folder.startswith('.'):\n shutil.copyfile(os.getcwd() + '/to_copy/' + folder + '/' + files, self.app_location + '/' + folder + '/' + files)", "title": "" }, { "docid": "663274598bed082be9e720a68a287238", "score": "0.5310453", "text": "def move(src, dst, ignore=None, logger=None):\n\n src = Path(src)\n dst = Path(dst)\n print('src,dst',src.absolute(),dst.absolute())\n if not (src.is_dir() and dst.is_dir()):\n raise NotADirectoryError(f\"src: {src} and dst: {dst} must be directories\")\n\n # check if src and dst exist\n # read errors are already skipped by os.walk\n # and are directories\n # add logger to handle_exception\n for rpath, dirs, files in os.walk(src, topdown=False, onerror=handle_exception):\n for fi in files:\n # path to src \n fi_src = Path(rpath).absolute() / fi\n #if not fi_src.exists():\n # raise Exception(\"HELLOOO\", src, rpath, fi, fi_src)\n dir_dst = (dst / Path(rpath).relative_to(src)).absolute()\n fi_dst = dir_dst / fi\n print(\"dir dst\", dir_dst, dir_dst.exists())\n print(\"fi_src, fi dst\", fi_src, fi_dst)\n \n try:\n dir_dst.mkdir(exist_ok=True)\n os.rename(fi_src, fi_dst)\n # if dst is different device then copy and remove\n # TODO: use error code instead of broad OSError\n # log this activity\n except OSError:\n try:\n scopy(fi_src, fi_dst, follow_symlinks=False)\n set_owner_mode_xattr(fi_src, fi_dst)\n os.unlink(fi_src)\n # catch all exceptions\n except Exception as ex:\n handle_exception(ex, fi_src, fi_dst, logger=logger)\n except Exception as ex:\n handle_exception(ex, fi_src, fi_dst, logger)\n # set dst dir properties and remove src\n for di in dirs:\n # source dir path\n dir_src = Path(rpath).absolute() / di\n # check if dir exists? it must when os.walk if not top down\n dir_dst = dst / Path(rpath).relative_to(src) / di\n # to ensure not deleteting directories when not ignored\n if ignore and ignore(dir_dst):\n continue\n try:\n # handle if dst exists?\n set_owner_mode_xattr(dir_src, dir_dst)\n os.rmdir(dir_src)\n # if dir is not empty and other exceptions\n except Exception as ex:\n handle_exception(ex)\n # set permissions and delete", "title": "" }, { "docid": "bc3a97f9025bdb7ccf7941c7f00c3ea8", "score": "0.53066707", "text": "def move(old, new):\n if os.path.exists(new):\n count = 1\n root, ext = os.path.splitext(new)\n while True:\n new = \"%s_%i%s\" % (root, count, ext)\n if os.path.exists(new):\n count += 1\n else:\n break\n os.rename(old, new)\n else:\n os.rename(old, new)", "title": "" }, { "docid": "675bee6d62cdeeb13bb7009764e9bd4a", "score": "0.53064406", "text": "def copytree_replace_existing(os_service, src, dst):\n oss = os_service\n helper = Helpers()\n helper.copy_directory_tree(oss, src, dst)\n\n file_it = FileIterator(oss, src)\n for file_sub_path in file_it:\n source_file_path = os.path.join(src, file_sub_path)\n dest_file_path = os.path.join(dst, file_sub_path)\n oss.copyfile(source_file_path, dest_file_path)", "title": "" }, { "docid": "89497c3cb44718886a567f7b0d0a7bba", "score": "0.53050506", "text": "def scan_directory(dir_):\n for root, dirs, fname in os.walk(dir_):\n for f in fname:\n try:\n path_ = os.path.join(dir_, f)\n date_ = get_date_pfx(f)\n if valid_format(date_):\n new = get_datedir(date_, dir_)\n if not os.path.exists(new):\n os.makedirs(new)\n if os.path.exists(path_):\n os.rename(path_, name_new_file(new, f))\n except Exception as ex:\n print str(ex)\n continue", "title": "" }, { "docid": "96b6b58eeb478bf211cb42b22f56bbb2", "score": "0.5304466", "text": "def copyDirectoryContents(srcDir: RelFilePathT, destDir: RelFilePathT) -> None:\n filePathList = os.listdir(srcDir)\n for filePath in filePathList:\n absFilePath = osp.join(srcDir, filePath)\n subp.run(f\"cp -r {absFilePath} {destDir}\", shell=True)", "title": "" }, { "docid": "b462e79ceca0a566939f2d11feff446f", "score": "0.5302334", "text": "def main():\n print(\"Starting directory is: {}\".format(os.getcwd()))\n\n # Change to desired directory\n os.chdir('FilesToSort')\n\n # Print a list of all files in current directory\n print(\"Files in {}:\\n{}\\n\".format(os.getcwd(), os.listdir('.')))\n\n #breakpoint()\n\n # Make a new directory\n # The next time you run this, it will crash if the directory exists\n\n # Getting the extensions available:\n extension_list = []\n extension_exist = True\n\n for filename in os.listdir('.'):\n\n if filename == \".DS_Store\":\n continue\n\n extension = os.path.splitext(filename)[1]\n\n if len(extension_list) == 0:\n extension_list.append(extension)\n\n for each in extension_list:\n\n if extension == each:\n extension_exist = True\n break\n else:\n extension_exist = False\n\n if extension_exist == False:\n extension_list.append(extension)\n\n # Create extension dirs:\n extension_dict = dict()\n\n for each in extension_list:\n\n if each == \"\":\n continue\n\n key_to_add = input(\"Add {}\".format(each))\n\n if not (key_to_add in extension_dict):\n extension_dict[key_to_add] = []\n extension_dict[key_to_add].append(each)\n else:\n extension_dict[key_to_add].append(each)\n\n print(extension_dict)\n #breakpoint()\n\n for key in extension_dict.keys():\n if key != \"\":\n try:\n os.mkdir(key)\n except:\n os.rmdir(key)\n os.mkdir(key)\n\n # Loop through each file in the (current) directory\n for filename in os.listdir('.'):\n\n if filename == \".DS_Store\":\n continue\n\n extension = os.path.splitext(filename)[1]\n dir_to_move = \"\"\n # Ignore directories, just process files\n if os.path.isdir(filename):\n continue\n\n #find the dir to move:\n for key, value in extension_dict.items():\n if extension in extension_dict[key]:\n dir_to_move = key\n\n shutil.copy(filename, dir_to_move + '/' + filename)", "title": "" }, { "docid": "573a721b444eb8267b2484828b521252", "score": "0.52874583", "text": "def test_move_files(self):\n # Initialize key variables\n source_filenames = {}\n target_filenames = {}\n\n #################################################\n # Test with invalid source directory\n #################################################\n\n invalid_path = '/tmp/{}.{}'.format(\n self.random_string,\n self.random_string)\n\n with self.assertRaises(SystemExit):\n general.move_files(invalid_path, '/tmp')\n\n #################################################\n # Test with invalid destination directory\n #################################################\n\n invalid_path = '/tmp/{}.{}'.format(\n self.random_string,\n self.random_string)\n\n with self.assertRaises(SystemExit):\n general.move_files('/tmp', invalid_path)\n\n #################################################\n # Test with valid directory\n #################################################\n\n # Create a source directory\n source_dir = '/tmp/{}.1'.format(self.random_string)\n if os.path.exists(source_dir) is False:\n os.makedirs(source_dir)\n\n # Create a target directory\n target_dir = '/tmp/{}.2'.format(self.random_string)\n if os.path.exists(target_dir) is False:\n os.makedirs(target_dir)\n\n # Place files in the directory\n for count in range(0, 4):\n filename = ''.join([random.choice(\n string.ascii_letters + string.digits) for n in range(15)])\n source_filenames[count] = '{}/{}'.format(source_dir, filename)\n target_filenames[count] = '{}/{}'.format(target_dir, filename)\n open(source_filenames[count], 'a').close()\n\n # Check files in directory\n self.assertEqual(os.path.isfile(source_filenames[count]), True)\n\n # Delete files in directory\n general.move_files(source_dir, target_dir)\n\n # Check that files are not in source_dir\n for filename in source_filenames.values():\n self.assertEqual(os.path.isfile(filename), False)\n\n # Check that files are in in target_dir\n for filename in target_filenames.values():\n self.assertEqual(os.path.isfile(filename), True)\n\n # Delete directory\n shutil.rmtree(source_dir)\n\n # Delete directory\n shutil.rmtree(target_dir)", "title": "" }, { "docid": "7f3b270e73e784c64bdd46fb0d8466b1", "score": "0.5283628", "text": "def reorg_files(raw_dir):\n os.chdir(raw_dir)\n\n if not Path('People.csv').is_file():\n unzip_dir = raw_dir / 'baseballdatabank-master' / 'core'\n\n # move the unzipped csv files to the current working directory\n for root, dirs, files in os.walk(unzip_dir):\n for file in files:\n shutil.move(root + '/' + file, '.')\n\n # rm the extract directory\n shutil.rmtree('baseballdatabank-master')\n\n msg = '\\n'.join(os.listdir('.'))\n logger.info(f'{raw_dir} contents:\\n {msg}')", "title": "" }, { "docid": "8b1abbb6e682699d0ec361b1555bf918", "score": "0.52821255", "text": "def mv2scratch( filename, dirpath ):\n scpath = dirpath.replace('/scratch/_gc/','/scratch/',1)\n oldpath = os.path.join(dirpath,filename)\n #newpath = os.path.join(scpath,filename)\n print \"moving from\",oldpath,\"\\nto\",scpath\n if not os.path.isdir(scpath):\n os.makedirs(scpath)\n shutil.move( oldpath, scpath )\n listgood( filename )", "title": "" }, { "docid": "6635cfecfb52ed40319d0404c2099d8e", "score": "0.5281678", "text": "def convert_and_move(filename, date, atmos_cor):\n # TODO: Something is wrong with this function, getting false positives\n # for files already existing, so the conversion process is aborted\n # when it shouldn't be\n\n logger.debug('Starting conversion process for {}'.format(filename))\n tqdm.tqdm.write('Starting conversion process for {}'.format(filename))\n\n # Get tile id using re, break it out into zone num, tile ids, MGRS format\n tile = get_utm_tile(filename)\n\n if not tile:\n tile_zone = 'none'\n tile_band = 'none'\n tile_squareID = 'none'\n else:\n tile_zone = tile['zone']\n tile_band = tile['band']\n tile_squareID = tile['square']\n # check if converted directory already exists\n destination_dir = \"S2_{}_{}_{}_{}\".format(tile_zone,\n tile_band,\n tile_squareID,\n date.strftime('%Y%m%d_%H%M'))\n\n # Check if the simplified directory already exists\n if Path.exists(Path(FINAL_DIR, destination_dir)):\n logger.debug('Final result directory already exists, checking file'\n 'extension of first file to see if it matches the current'\n 'atmospheric correction value')\n\n file_list_iter = os.scandir(Path(FINAL_DIR, destination_dir))\n\n\n for file_name in file_list_iter:\n if file_name.name[-7:-5] == str(atmos_cor):\n logger.debug('That resolution of atmos correction already'\n 'exists..., aborting the conversion...')\n logger.debug('Checking if the .zip still exists...')\n\n if not (Path(FINAL_DIR, destination_dir + '.zip').exists()):\n zip_directory(destination_dir)\n\n return \"already exists\"\n\n if not file_name.name[-7:-5] in ['10','20','60'] and atmos_cor == 0:\n logger.debug('Uncorrected files already exist, aborting...')\n\n logger.debug('Checking if the .zip still exists...')\n\n if not (Path(FINAL_DIR, destination_dir + '.zip').exists()):\n zip_directory(destination_dir)\n\n return \"already exists\"\n\n logger.info('Final result directory does not exist. Beginning conversion')\n logger.debug('atmos_cor: {}'.format(atmos_cor))\n\n if atmos_cor in [10, 20, 60]:\n # Convert filename to L2A with .SAFE suffix\n file_string = filename.replace('L1C', 'L2A')\n else:\n file_string = filename\n\n file_string += '.SAFE'\n\n # Make sure the temporary data folder we are converting exists\n if not os.path.isdir(os.path.join(BUNDLE_DIR, TEMP_DIR, file_string)):\n logger.warning('Expected data directory to be converted does not exist'\n ' exiting...')\n tqdm.tqdm.write('Expected data directory to convert does not exist, stopping...')\n\n return 'failed'\n\n granule_dir = Path('temp', file_string, 'GRANULE')\n\n # Iterate over each granule, if there are multiple\n for dir in granule_dir.iterdir():\n\n logger.debug(str(dir))\n if atmos_cor in [10, 20, 60]:\n convert_dir = Path(dir, 'IMG_DATA', \"R{}m\".format(atmos_cor))\n else:\n convert_dir = Path(dir, 'IMG_DATA',)\n\n logger.debug(convert_dir)\n file_list = convert_dir.glob('*.jp2')\n\n # For the actual img data, use the convert_jp2 function\n for file in file_list:\n logger.debug(file)\n dest_path = Path(FINAL_DIR, destination_dir)\n try:\n conv.convert_jp2_to_tif(file,\n destination_dir,\n atmos_cor,\n dest_path)\n except Exception as e:\n logger.error('Something went wrong with the conversion')\n logger.error(str(e))\n return 'failure'\n\n # Copy and rename the metadata file in the GRANULE\n shutil.copy2(str(Path(dir, 'MTD_TL.xml')),\n str(Path(FINAL_DIR,\n destination_dir,\n destination_dir + \"_metadata.xml\")))\n\n logger.info('converted jp2 to tif successfully, zipping result')\n zip_directory(destination_dir)\n\n return 'success'", "title": "" }, { "docid": "e985ef77f29f78ddf4b26a0879bc8e75", "score": "0.5277273", "text": "def setup_destination_directory(self):\n\n new_subpath = self.dir_to_import[len(self.top_of_repo):]\n\n destination_directory = os.path.join(self.destination_directory, new_subpath)\n\n if not os.path.exists(destination_directory):\n os.makedirs(destination_directory)\n\n _log.info('Tests will be imported into: %s', destination_directory)", "title": "" }, { "docid": "946be042ff7540732aababf327aa6f5f", "score": "0.527297", "text": "def copy_files(src_folder: str, dest_folder: str) -> None:\n shutil.copytree(src_folder, dest_folder, dirs_exist_ok=True)", "title": "" }, { "docid": "48f71ed72d55817281cee9ad5801f8da", "score": "0.526326", "text": "def fix_directories(input_dir_path, output_dir_path):\n if not input_dir_path:\n input_dir_path = output_dir_path\n input_dir_path = os.path.realpath(input_dir_path)\n output_dir_path = os.path.realpath(output_dir_path)\n return input_dir_path, output_dir_path", "title": "" }, { "docid": "a6c922808be1ebe1c983b95050534aa5", "score": "0.5263", "text": "def clean_up_files(output_dir):\n\t\n\tpass\n\treturn", "title": "" } ]
dd403594a26d4ed6b1599a1e49a12d9a
Draws the points onto the GUI canvas
[ { "docid": "3d0fe5e512da5b59f5dc44f3070a7bab", "score": "0.6598843", "text": "def on_paint(self, event):\n dc = wx.PaintDC(self.panel)\n dc.SetPen(wx.Pen('black', 1))\n dc.SetBrush(wx.Brush('black', 1))\n for point in self.annealing.state.points:\n dc.DrawCircle(point[0], point[1], 2)", "title": "" } ]
[ { "docid": "ab4edc0d7656931e0deb95b2ce5bd993", "score": "0.74049175", "text": "def _draw(self):\n if self.colorTable != {}: CT = self.colorTable\n else:\n CT = self._genColorTable()\n self.plotter.clear()\n for p in self.points:\n self.plotter.setColor(CT[p])\n self.plotter.spot(p, self.spotWidth)", "title": "" }, { "docid": "99b08b619d77280ec9c902bc57ae0553", "score": "0.72308236", "text": "def draw(self):\n self.canvas.draw()", "title": "" }, { "docid": "c9e5dea799389f8e2047272ac01f5e9e", "score": "0.7219184", "text": "def drawPoint(self, qp):\r\n size = self.size()\r\n\r\n # Loops through every point in the window\r\n for i in range(self.num_xpoints):\r\n for j in range(self.num_ypoints):\r\n\r\n # Evenly distribute the points in both the x and y directions\r\n x_coord = ((size.width() - 100) / 21.0) * (i + 1) + 50\r\n y_coord = ((size.height() - 100) / 21.0) * (j + 1) + 50\r\n\r\n # Colours are determined by colourList\r\n qp.setPen(QPen(self.colourList[i][j], 1, Qt.SolidLine))\r\n qp.setBrush(QBrush(self.colourList[i][j], Qt.SolidPattern))\r\n\r\n # We draw the points as rectangles, and updates their locations\r\n qp.drawRect(x_coord, y_coord, 10, 10)\r\n self.pointLocation[i][j] = (x_coord, y_coord)", "title": "" }, { "docid": "fc54db68b3e8c1662bd6927189be6cd9", "score": "0.71158445", "text": "def DrawPoint(self, *args, **kw):", "title": "" }, { "docid": "c262c67fa87a29c83822da9ade0ff382", "score": "0.7056661", "text": "def onPaint(self, event):\r\n self.SetCurrent(self.context)\r\n\r\n # Make sure the canvas has been initialised;\r\n if not self.bInitialised:\r\n self.initialiseGL()\r\n\r\n # OpenGL, in this case, uses a \"double bufferring\" system:\r\n # it displays one buffer whilst drawing to the other.\r\n # Clear the current back buffer\r\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\r\n\r\n # Draw the points stored in the vertex array onto the buffer\r\n GL.glDrawArrays(GL.GL_LINE_STRIP, 0, self.numPoints)\r\n\r\n # Now make the back buffer the front buffer\r\n # i.e. this is now displayed on the screen\r\n GL.glFlush()\r\n self.SwapBuffers()", "title": "" }, { "docid": "bde0c050f4d3cba08435fd4920e2665e", "score": "0.70482403", "text": "def draw(self):\n\n glBegin(GL_POINTS)\n glColor((1, 1, 1))\n glVertex3fv(self.get_point())\n glEnd()", "title": "" }, { "docid": "0cb4e5cde4d1cd8a51168c87213f6ded", "score": "0.7002541", "text": "def draw(self):\n xlim_max = max([find_max(0, self.graph), 0])\n xlim_min = min([find_min(0, self.graph), 0])\n ylim_max = max([find_max(1, self.graph), 0])\n ylim_min = min([find_min(1, self.graph), 0])\n\n # Add some parameters to make graph in center of picture\n plt.xlim(xlim_min - 1, xlim_max + 2)\n plt.ylim(ylim_min - 1, ylim_max + 2)\n plt.gca().set_aspect('equal', adjustable='box')\n\n # Add list with steps on axes to see coordinates of points\n x_steps = []\n y_steps = []\n for i in range(xlim_min - 1, xlim_max + 2):\n x_steps.append(i)\n for i in range(ylim_min - 1, ylim_max + 2):\n y_steps.append(i)\n\n plt.xticks(x_steps)\n plt.yticks(y_steps)\n\n start_x = 0\n start_y = 0\n for i in range(self.graph.num_rows()):\n x1, y1 = [], []\n for j in range(self.graph.num_cols()):\n if self.graph[(i, j)]:\n x1.append(self.graph[(i, j)][0])\n y1.append(self.graph[(i, j)][1])\n\n plt.plot(x1, y1, 'b', marker='o')\n plt.show()", "title": "" }, { "docid": "4f2b43d544a6200347ba850c5049e246", "score": "0.6836888", "text": "def draw(self):\n points = [{'pos': list(self.primitive), 'color': self.color, 'name': self.name}]\n self.guids = compas_rhino.draw_points(points, layer=self.layer, clear=False, redraw=False)", "title": "" }, { "docid": "71ec4a486281bf80e3853a9ccf6b6615", "score": "0.68199426", "text": "def drawPoint(self, x, y):\n self.plotArea.create_oval(self.xToCanvas(x), self.yToCanvas(y), self.xToCanvas(x), self.yToCanvas(y))", "title": "" }, { "docid": "a82bb6a62b328cd0db8a90baafcf0015", "score": "0.6798981", "text": "def display(self):\n point1, point2 = self.points\n pyxel.line(\n x1=point1.x + self.x,\n y1=point1.y + self.y,\n x2=point2.x + self.x,\n y2=point2.y + self.y,\n col=self.colour,\n )", "title": "" }, { "docid": "8088b60d7bbe481f7a3529b1420da40a", "score": "0.679548", "text": "def draw(self):", "title": "" }, { "docid": "8088b60d7bbe481f7a3529b1420da40a", "score": "0.679548", "text": "def draw(self):", "title": "" }, { "docid": "8088b60d7bbe481f7a3529b1420da40a", "score": "0.679548", "text": "def draw(self):", "title": "" }, { "docid": "8088b60d7bbe481f7a3529b1420da40a", "score": "0.679548", "text": "def draw(self):", "title": "" }, { "docid": "ed27d331a9844a73940780db25ac219f", "score": "0.6770996", "text": "def draw(self):\n if (not self._surface):\n self._surface = display.set_mode((self.SIZE, self.SIZE))\n self._surface.fill(self.BACKGROUND_COLOR)\n for point in self.points:\n color = self.NOT_STAR_COLOR\n if (point.star):\n color = self.STAR_COLOR\n draw.circle(self._surface, color, point.position.round.tuple, point.radius, self.BORDER_THICKNESS)\n display.flip()\n for e in event.get():\n if e.type == QUIT:\n quit()\n exit(0)", "title": "" }, { "docid": "d5d6b53e7db2b0e688e08bf38669af21", "score": "0.67390406", "text": "def draw_raw_points(self):\n # wp_xyz_list = list(self.nx_graph_topology.nodes(data='xyz')\n xp_xyz_list = list()\n for vertex_id in self.nx_graph_topology.nodes():\n xp_xyz_list.append(self.node_id_map_topology[vertex_id].transform.location.x)\n xp_xyz_list.append(self.node_id_map_topology[vertex_id].transform.location.y)\n\n # all_elements1 = xp_xyz_list[::2]\n # p1 = xp_xyz_list[::2]\n # p2 = xp_xyz_list[1::2]\n\n segment_list = list()\n\n for edge in self.nx_graph_topology.edges():\n segment_list.append(edge)\n\n fig, ax = plt.subplots(1, 1)\n ax.scatter(xp_xyz_list[::2], xp_xyz_list[1::2], s=5)\n ax.set_title('Global navi waypoints')\n\n for i in range(len(segment_list)):\n ax.plot([self.node_id_map_topology[segment_list[i][0]].transform.location.x,\n self.node_id_map_topology[segment_list[i][1]].transform.location.x],\n [self.node_id_map_topology[segment_list[i][0]].transform.location.y,\n self.node_id_map_topology[segment_list[i][1]].transform.location.y],\n 'k-',\n lw=1)\n\n def onclick(event):\n print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %\n ('double' if event.dblclick else 'single', event.button,\n event.x, event.y, event.xdata, event.ydata))\n if self.clicks < self.click_max:\n self.clicks += 1\n self.nav_points_map.append((event.xdata, event.ydata))\n\n # return event.xdata, event.ydata\n\n cid = fig.canvas.mpl_connect('button_press_event', onclick)\n\n plt.show()\n # wp_xyz_list = list(self.nx_graph_topology.nodes())", "title": "" }, { "docid": "835162cc6ec678130acb4549fc546d02", "score": "0.6735644", "text": "def display(self):\n\n for point1, point2 in zip(self.points, self.points[1:] + [self.points[0]]):\n pyxel.line(\n x1=point1.x + self.x,\n y1=point1.y + self.y,\n x2=point2.x + self.x,\n y2=point2.y + self.y,\n col=self.colour,\n )\n\n if self.accelerating:\n self.display_acceleration()", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.67278993", "text": "def draw(self):\n pass", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.67278993", "text": "def draw(self):\n pass", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.67278993", "text": "def draw(self):\n pass", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.67278993", "text": "def draw(self):\n pass", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.67278993", "text": "def draw(self):\n pass", "title": "" }, { "docid": "cb1d6b2102c44e483c249400d5919ad3", "score": "0.6715999", "text": "def draw(self):\n ...", "title": "" }, { "docid": "33f4281a21d1a02b232c2b8863ac85c2", "score": "0.6613458", "text": "def draw(self):\n\t\tpass", "title": "" }, { "docid": "be473ae37043ea89cc5c9545242228ef", "score": "0.65960264", "text": "def draw(self) -> None:\n pass", "title": "" }, { "docid": "268810559c00cd7c910337cb3cb15745", "score": "0.65592295", "text": "def draw(self, **kwargs):\n pass", "title": "" }, { "docid": "1054bb296adbfe9ace7edb2581942013", "score": "0.6534617", "text": "def draw(self, win, center):", "title": "" }, { "docid": "d85526c6016741641619de885617786b", "score": "0.65293276", "text": "def DrawLines(self, points, xoffset=0, yoffset=0):", "title": "" }, { "docid": "1ce46b0f9e34733de8e52d4f71577ada", "score": "0.6499565", "text": "def plot(self, pr):\n x, y = self.transform(pr)\n self.gfx.drawPoint(self.tr.X(x), self.tr.Y(y))", "title": "" }, { "docid": "98da1f37efdd481191de1335bcd1cf79", "score": "0.64826924", "text": "def _draw(self):\n pass", "title": "" }, { "docid": "9809598353cdbae9d3ae7426cb029b30", "score": "0.6477086", "text": "def draw(self):\n fig, ax = plt.subplots(nrows=1, ncols=1)\n ax.plot(self.x, self.y, linestyle='', marker='s')\n line = list()\n if self.m is not None and self.b is not None:\n for i in self.x:\n line.append(self.m * i + self.b)\n ax.plot(self.x, line, color='red')\n plt.show()", "title": "" }, { "docid": "dc397592cec484bfb48738feadc3d36c", "score": "0.6470176", "text": "def draw(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "fc31efc4a783dc12aeca2d1f74a98b5b", "score": "0.6464472", "text": "def draw(self):\n self.prepare()\n self.show()", "title": "" }, { "docid": "ade6194f48ec419ba1139664290dda0e", "score": "0.645895", "text": "def paint(self):\r\n if self.ready:\r\n self.canvas.delete(tkinter.ALL)\r\n self.visit(self.tree.root)\r\n \r\n if self.newRegion:\r\n self.canvas.create_rectangle(self.newRegion.x_min, self.toTk(self.newRegion.y_min),\r\n self.newRegion.x_max, self.toTk(self.newRegion.y_max), \r\n outline='Black', dash=(2, 4))\r\n \r\n if self.selectedRegion:\r\n self.canvas.create_rectangle(self.selectedRegion.x_min, self.toTk(self.selectedRegion.y_min),\r\n self.selectedRegion.x_max, self.toTk(self.selectedRegion.y_max), \r\n outline='Red', dash=(2, 4))\r\n else:\r\n self.label = tkinter.Label(self.w, width=100, height = 40, text=\"Click To Add Points\")\r\n self.label.bind(\"<Button-1>\", self.prepare)\r\n self.label.pack()\r\n self.ready = True", "title": "" }, { "docid": "352d0d2f8a3bdc7ab4f3518cee23db5d", "score": "0.64500105", "text": "def update_canvas(code, point = Point(0,0)):\n fig = pylab.figure(figsize=[4, 4], dpi=100)\n ax = fig.gca()\n\n if code == \"np\": # new point\n ax.plot(points_x, points_y, 'ro')\n\n if code == \"dp\": # draw path\n ax.plot(points_x, points_y, 'ro')\n x, y = curve_to_arrays(point, 1000, 0.001)\n ax.plot(x, y, 'r--')\n\n canvas = agg.FigureCanvasAgg(fig)\n canvas.draw()\n renderer = canvas.get_renderer()\n raw_data = renderer.tostring_rgb()\n\n size = canvas.get_width_height()\n\n surf = pygame.image.fromstring(raw_data, size, \"RGB\")\n screen.blit(surf, (0, 0))\n pygame.display.flip()\n\n return ax", "title": "" }, { "docid": "d3811d976b0b791cf6be9488d36be261", "score": "0.6440813", "text": "def draw(self, output=None):", "title": "" }, { "docid": "6583158c485f0a678c827ef842da6ed0", "score": "0.6435612", "text": "def _draw(self):\r\n pass", "title": "" }, { "docid": "070971094cde182e950222e0f92d6d98", "score": "0.6429595", "text": "def start_draw(event):\r\n pos_x.append(event.xdata)\r\n pos_y.append(event.ydata)\r\n \r\n \"\"\"keshidane khotoot va kamel kardane shekl\"\"\"\r\n \r\n if len(pos_x)>1:\r\n plt.scatter(pos_x, pos_y, color =\"red\")\r\n plt.draw()\r\n if abs(event.xdata-pos_x[0]) > 0.05*pos_x[0] or \\\r\n abs(event.ydata-pos_y[0]) > 0.05*pos_y[0]:\r\n plt.plot(pos_x, pos_y, color = \"dodgerblue\")\r\n \r\n else:\r\n plt.scatter(pos_x[0], pos_y[0], color =\"red\")\r\n plt.draw()\r\n plt.plot([pos_x[-2], pos_x[0]], [pos_y[-2], pos_y[0]], \r\n color = \"dodgerblue\") \r\n fig.canvas.mpl_disconnect(button)\r\n pos_x.pop()\r\n pos_y.pop()\r\n pos_x.append(pos_x[0])\r\n pos_y.append(pos_y[0])\r\n plt.fill(pos_x, pos_y, color = \"cyan\") \r\n\r\n else : \r\n plt.scatter(pos_x[0], pos_y[0], color =\"red\")\r\n plt.draw()\r\n print([event.xdata, event.ydata])", "title": "" }, { "docid": "5eda2fc38ed3bd4888cc9ab5db5a8b82", "score": "0.6411792", "text": "def draw(self, setlim=True):\n\n # Don't plot if variable full of Nan\n if np.count_nonzero(~np.isnan(self.x)) == 0 or \\\n np.count_nonzero(~np.isnan(self.y)) == 0: return\n\n self.canvas.mpl_connect('motion_notify_event', self.ShowPosition)\n\n self.x = ma.masked_invalid(self.x)\n self.y = ma.masked_invalid(self.y)\n if setlim:\n self.xlim = [np.min(self.x), np.max(self.x)]\n self.ylim = [np.min(self.y), np.max(self.y)]\n title = self.title\n plotCurv(self, x=self.x, y=self.y, title=title, xlabel=self.xlabel,\n ylabel=self.ylabel, xlim=self.xlim, ylim=self.ylim)\n self.canvas.draw()\n self.canvas.Refresh()\n self.Show()", "title": "" }, { "docid": "0c4fa495be0a85dd1516384cfe48a274", "score": "0.64081395", "text": "def _draw(self):\n\n self.lgnd_crvs = []\n self.lgnd_pnts = []\n self.lgnd_lbls = []\n\n for ii,ll in enumerate(self.lgnd):\n if False == self._reverse:\n yy = len(self.lgnd)-ii-1\n else:\n yy = ii\n add_curve( [0.25,1], [yy,yy], self.lgnd[ii] )\n self.lgnd_crvs.append( self._get_current_object_name(\"Curve\") )\n set_curve(\"symbol.style=none\")\n pnt = self._get_point_properties( self.lgnd[ii].symbol ) \n add_point( 0.625, yy, pnt )\n self.lgnd_pnts.append( self._get_current_object_name(\"Point\") )\n add_label( 1.25, yy, str(ii), \"valign=0.5\" )\n self.lgnd_lbls.append( self._get_current_object_name(\"Label\") )", "title": "" }, { "docid": "5bfcd1996c4999315e0efede413b8d06", "score": "0.64076024", "text": "def showPoints(self, context):\n for point in self.points:\n point.show(context)", "title": "" }, { "docid": "c3435ae8fff08384eb1fc4a0a0240309", "score": "0.64066726", "text": "def update_canvas(self):\n velocity_data.append(self.current_velocity)\n altitude_data.append(self.current_altitude)\n sub_velocity.clear()\n sub_altitude.clear()\n sub_velocity.plot(velocity_data)\n sub_altitude.plot(altitude_data)\n self.canvas_left.draw()\n self.canvas_right.draw()\n if self.update_plot:\n self.frame_upper_middle.after(100, self.update_canvas)", "title": "" }, { "docid": "21b719e687b52b31446ddcbaa82982a6", "score": "0.6400952", "text": "def draw(self, canvas): \n self.draw_self(canvas) \n self.draw_children(canvas)", "title": "" }, { "docid": "f852c2a6cdb09e67c2c41e5f408d0bef", "score": "0.63838935", "text": "def draw(self):\n self.image.draw()\n self.text.draw()", "title": "" }, { "docid": "161c0e174ba79e0ebd9f718bf8ea59fa", "score": "0.63780683", "text": "def ButtonPress(self, event):\n self.clicked = True\n self.update_plot()\n self.newx = event.x\n self.newy = event.y\n self.canvas.draw_line(self.newx, self.newy)\n self.canvas.index(self.newx)\n self.create_ppdata()\n self.update_label()", "title": "" }, { "docid": "bcdc49685711a9176d4e572f31fd71d1", "score": "0.63757294", "text": "def paintPlug(self, canvas, x, y):\n pass", "title": "" }, { "docid": "66a12b3760e508b89672a24b93ba155f", "score": "0.6375435", "text": "def plot_points(self):\n self.operator._set_monitor_plot_points(self.plot_points_spinbox.value())\n self.plot_points_spinbox.setValue(self.operator.properties['monitor']['plot_points'])\n set_spinbox_stepsize(self.plot_points_spinbox)", "title": "" }, { "docid": "078df1510309d220eaef839c875ba6da", "score": "0.6368254", "text": "def DrawXYZ(self):\r\n self.xyPanel.Refresh()", "title": "" }, { "docid": "9065c246c1f3087d6f1b5fb53725f2d5", "score": "0.6358699", "text": "def draw(self):\n raise NotImplementedError(\"draw() needs to be implemented\")", "title": "" }, { "docid": "df8c3e2fa8b51b2cbd13381be4400c8b", "score": "0.6354665", "text": "def draw(points, **scatter_params):\n if scatter_params is None:\n scatter_params = {label: 'Data points'}\n X = points[:,0] / points[:,2]\n Y = points[:,1] / points[:,2]\n\n # Plot the points\n plt.scatter(X, Y, **scatter_params)", "title": "" }, { "docid": "19052fea20ee3399147bde6ea5459129", "score": "0.63519806", "text": "def update_plot(self):\n\n self.fig.canvas.draw()", "title": "" }, { "docid": "19052fea20ee3399147bde6ea5459129", "score": "0.63519806", "text": "def update_plot(self):\n\n self.fig.canvas.draw()", "title": "" }, { "docid": "19052fea20ee3399147bde6ea5459129", "score": "0.63519806", "text": "def update_plot(self):\n\n self.fig.canvas.draw()", "title": "" }, { "docid": "19052fea20ee3399147bde6ea5459129", "score": "0.63519806", "text": "def update_plot(self):\n\n self.fig.canvas.draw()", "title": "" }, { "docid": "19052fea20ee3399147bde6ea5459129", "score": "0.63519806", "text": "def update_plot(self):\n\n self.fig.canvas.draw()", "title": "" }, { "docid": "19052fea20ee3399147bde6ea5459129", "score": "0.63519806", "text": "def update_plot(self):\n\n self.fig.canvas.draw()", "title": "" }, { "docid": "757daf6c0f7046c81698f02deb847f6e", "score": "0.6343511", "text": "def draw(self, context):\n pass", "title": "" }, { "docid": "757daf6c0f7046c81698f02deb847f6e", "score": "0.6343511", "text": "def draw(self, context):\n pass", "title": "" }, { "docid": "c21f2e007aac4aded15febc7bb46fae4", "score": "0.6339141", "text": "def draw(self):\n pg.draw.circle(self.surface, self.color, self.xy, self.rect_radius)", "title": "" }, { "docid": "995653a4c3eada74acf8f08629fdc553", "score": "0.6324916", "text": "def render(self):\r\n plt.draw() #Draw and display the plot in new window\r\n plt.show()", "title": "" }, { "docid": "c050d90f80f2c14dfd8e6830eae7c6da", "score": "0.6316409", "text": "def draw_canvas(self):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n maxtag = np.amax(self.lattice)\n # rect = np.ma.masked_equal(self.lattice, 0)\n rect = np.copy(self.lattice)\n rect[rect == 0] = -1\n palette = plt.cm.gray\n palette.set_over('b', 1.0)\n palette.set_under('w', 1.0)\n palette.set_bad('w', 1.0)\n for i, tag in enumerate(list(self.ptag)):\n rect[rect == tag] = maxtag + i + 2\n # ax.matshow(rect, cmap=plt.cm.jet)\n ax.matshow(rect, cmap=palette,\n norm=colors.Normalize(vmin=1, vmax=maxtag + 1, clip=False))\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n plt.show()", "title": "" }, { "docid": "7456fb950a8eeb0506fbba06672076c5", "score": "0.6276118", "text": "def paintEvent(self, event):\r\n # Initializes the Painter object\r\n qp = QPainter()\r\n qp.begin(self)\r\n\r\n self.drawSpace(qp) # Draws the box for the points\r\n self.drawPoint(qp) # Draw the points\r\n self.drawTheCircle(qp) # Draws the circles if there are any\r\n qp.end()", "title": "" }, { "docid": "b110fe784790956532cd53f254d13057", "score": "0.62706745", "text": "def points(self, points):\n self.cpoints(points)", "title": "" }, { "docid": "f90e1bc9431039b7e16da0590efa9ee0", "score": "0.6267569", "text": "def draw_callback(self, *args):\n #print \"callback data %s\" % data_tuple\n print \"callback args %s\" % args\n data = args[0]\n tool = data[0]\n print tool\n if (tool == 'clear'):\n self.backend.clear()\n if (tool == 'line'):\n #print \"start_x=%d, start_y=%d, end_x=%d, end_y=%d\" % (data[1].x(), data[1].y(), data[2].x(), data[2].y())\n self.backend.line((data[1].x(), data[1].y()), (data[2].x(), data[2].y()))\n if (tool == 'circle'):\n origo = (data[1].x(), data[1].y())\n r = int(round(helpers().point_distance(origo, (data[1].x(), data[1].y()))))\n self.backend.circle(r, origo)", "title": "" }, { "docid": "b62b03fcc367215a7258ca8d8e79d261", "score": "0.6247265", "text": "def Draw(self):\r\n pass", "title": "" }, { "docid": "ccf3ca4d4b419ebb57b7cd36db2747dd", "score": "0.6245673", "text": "def refresh_plot(self):\n self.view.canvas.draw()", "title": "" }, { "docid": "9f8eb339cc032364b9f25c9a52e327e4", "score": "0.6222222", "text": "def drawPoints(self, pointPen, roundCoordinates=1):\n pointPen.beginPath()\n for (x, y), segmentType, args, kwargs in self.contour:\n # identifiers are not needed here\n # but the could cause errors, so remove them\n kwargs[\"identifier\"] = None\n pointPen.addPoint((x, y), segmentType, *args, **kwargs)\n pointPen.endPath()", "title": "" }, { "docid": "5b083cca31ca184dd4c924406b372dcc", "score": "0.62093407", "text": "def update_xy_plot():\n\n\tlines.set_xdata(self.current_frame.x)\n\tlines.set_ydata(self.current_frame.y)\n\tfigure.canvas.draw()", "title": "" }, { "docid": "babaf50d7133a36211550ee5235b72d9", "score": "0.62000763", "text": "def draw(self, backend):\n # Drawn by world\n pass", "title": "" }, { "docid": "72cce35f85a4d52d1b7dd112e8134cda", "score": "0.6190761", "text": "def draw(self):\n\t\ttry:\n\t\t\tself.canvas.draw()\n\t\texcept Exception as e:\n\t\t\twarnings.warn(\"Error while drawing matplotlib figure: \\n\" + str(e))\n\t\tself._repaint_signal.signal.emit(\"emitting\")", "title": "" }, { "docid": "b06b71e63824937258a37680935ddabc", "score": "0.6189219", "text": "def draw(self, position):\n # Getting value at position\n vector = self.value(position)\n x = vector[0]\n y = vector[1]\n # Using self.ped to draw vector\n self.ped.goto(position[0], position[1])\n self.ped.pendown()\n self.ped.goto(position[0]+x, position[1]+y)\n self.ped.penup()", "title": "" }, { "docid": "6dbe24102f50864754172e4508a5a3a2", "score": "0.6179086", "text": "def draw(self, window):\n window.blit(self.image, (self.x, self.y))", "title": "" }, { "docid": "99d6091d9dd0c26c1f197ca88dc8b68c", "score": "0.6175171", "text": "def display(self, canvas, x, y, width, height):\n # Do we need this?\n pass", "title": "" }, { "docid": "bc46e144c200bad26ab72357e17f16cf", "score": "0.6169833", "text": "def draw(self, shape):", "title": "" }, { "docid": "f471475ddc961393cd14236ca728b190", "score": "0.61688405", "text": "def draw():\n pass", "title": "" }, { "docid": "a3974fd9d3dab3a4f36c43adf99f4de0", "score": "0.61666214", "text": "def draw_gui(self):\r\n self.draw_button_view()\r\n # self.draw_tree_widget()\r\n pass", "title": "" }, { "docid": "c71cd077f04629119cbadfc5aa5a3201", "score": "0.6166349", "text": "def updatecanvas(self):\r\n # reseting canvas (add this before calling the draw shapes)\r\n self.canvas.delete('all')\r\n\r\n self.draw_shape(True, 37, 42)\r\n self.draw_shape(True, 43, 48)\r\n self.draw_shape(False, 1, 17)\r\n self.draw_shape(True, 18, 22)\r\n self.draw_shape(True, 23, 27)\r\n self.draw_shape(True, 37, 42)\r\n self.draw_shape(False, 28, 31)\r\n self.draw_shape(True, 32, 36)\r\n self.draw_shape(True, 49, 60)\r\n self.draw_shape(True, 61, 68)\r\n\r\n self.canvas.update()", "title": "" }, { "docid": "d06f3ed18c3670c6ec37f887e4e619de", "score": "0.61662865", "text": "def show_points(self):\r\n self.screen.blit(self.player_points_image, self.player_points_rect)\r\n self.screen.blit(self.ai_points_image, self.ai_points_rect)", "title": "" }, { "docid": "82f11dcca2e67e07d1c0bd788dc947b7", "score": "0.61629325", "text": "def draw_from_points(self,cv_image, points):\n for (x, y, w, h), n in points:\n cv.Rectangle(cv_image, (x, y), (x + w, y + h), 255)\n return cv_image", "title": "" }, { "docid": "9337bff3cc7b9225c9f76e21437676ad", "score": "0.61591405", "text": "def show_curves_callback(self):\n self.plotter.update()\n self.fig_canvas.draw()", "title": "" }, { "docid": "865f8f1ed1b82ad509ba0292a459553c", "score": "0.6152894", "text": "def draw(self):\n\t\tself.graphic.blit(self.viewport.x + self.offset_x, self.viewport.y + self.offset_y)", "title": "" }, { "docid": "d126640838abb8f840a9e58262d5e38d", "score": "0.61479783", "text": "def draw(self):\n self.batch.draw()", "title": "" }, { "docid": "0cd7d739302137da2c84c061bddb1596", "score": "0.614265", "text": "def draw(self):\n self.canvas.coords(self.hunger_bar, 150 * self.id + 50, 50, 150 * self.id + 50 + 100 * self.hunger / self.max_value, 100)\n self.canvas.coords(self.stamina_bar, 150 * self.id + 50, 150, 150 * self.id + 50 + 100 * self.stamina / self.max_value, 200)\n self.hunger_text.set(\"Hunger: %s / %s \"%(self.hunger, self.max_value))\n self.stamina_text.set(\"Stamina: %s / %s \"%(self.stamina, self.max_value))\n self.food_text.set(\"Remaining food: %s\"%self.food)\n preg = bool(self.until_birth)\n self.birth_text.set(\"Is \" + \"not\" * (not preg) + \" pregnant.\" + preg * (\" Time till giving birth: %s\"%self.until_birth))", "title": "" }, { "docid": "4b1b5245d2b005b8370871ba90be65d5", "score": "0.613991", "text": "def draw(self):\n arcade.create_rectangle_filled(\n center_x=self.x,\n center_y=self.y,\n width=self.width,\n height=self.height,\n color=arcade.color.WHITE,\n ).draw()\n\n if self.turning_point:\n self.turning_point.draw()", "title": "" }, { "docid": "3cf9eac49b0db4f49ec81cef5ec4f14d", "score": "0.61281675", "text": "def draw(self,window):\n\t\tself.main.draw(window) \n\t\tself.outer.draw(window)\n\t\tself.text.draw(window)\n\t\tself.window = window", "title": "" }, { "docid": "27ece5f215feb2407de9febd926b7cfb", "score": "0.61274135", "text": "def draw(self, screen):\n pg.draw.circle(screen,\n self.color,\n (int(self.x), int(self.y)),\n self.size,\n self.thickness\n )", "title": "" }, { "docid": "4eadb654d940f11afb7778f68d855cf8", "score": "0.6120313", "text": "def draw(self, canvas):\n for node in self.graph.nodes():\n self.graph.node[node]['object'].draw(canvas)\n\n self.set_linewidth()\n\n for edge in self.graph.edges():\n self.graph[edge[0]][edge[1]]['object'].draw(canvas)", "title": "" }, { "docid": "d4213f10c55aa9c454633a2ed1b155d6", "score": "0.6111755", "text": "def draw(self, window):\n self.rect.draw(window)\n self.text.draw(window)", "title": "" }, { "docid": "20583142a8556330d4a240087b8882fc", "score": "0.6111176", "text": "def draw(self):\r\n #draw a dot at the flying object center point, dot is blue and 5px\r\n arcade.draw_point(self.center.x, self.center.y, arcade.color.BLUE_SAPPHIRE, 5)", "title": "" }, { "docid": "7a903fe00d10aa75f620d2979dec7dc6", "score": "0.6111115", "text": "def draw_circle(self):", "title": "" }, { "docid": "e5901c5cbff840c396b94954aaaeeae6", "score": "0.6105898", "text": "def update_gps(self, idx):\n if self.gps_data is not None:\n self.gps_plot.set_xdata(self.gps_data[0][:idx])\n self.gps_plot.set_ydata(self.gps_data[1][:idx])\n self.canvas.draw()\n else:\n pass", "title": "" }, { "docid": "4a718d76d0065b8edc33d35bad45850b", "score": "0.6104651", "text": "def draw(self):\n\n #drawing the screen\n self.screen.fill((0,51,102))\n\n #drawing the walls\n self.model.walls.draw(self.screen)\n\n #drawing the cat\n self.model.cat.playerrepresentation.draw(self.screen)\n \n #drawing the circles\n for circle in self.model.allcircles:\n circle.draw(self.screen)", "title": "" }, { "docid": "a9cfc95b8758d271873a87e041ffee23", "score": "0.6095452", "text": "def draw(self):\n self.vertex_list.draw(self.mode)", "title": "" }, { "docid": "952bc52cb245e6afe73922aebd1daa77", "score": "0.60936254", "text": "def draw(self, screen):\n pg.draw.polygon(screen, self.color, self.vertexes)", "title": "" }, { "docid": "85f13af7a7db9af601389edb08919372", "score": "0.60898936", "text": "def DrawSpline(self, points):", "title": "" }, { "docid": "6f7247ed9eb09adbd813c190cd950975", "score": "0.6089498", "text": "def ButtonMotion(self, event):\n if self.clicked == True: \n self.update_plot()\n self.newx = event.x\n self.newy = event.y\n self.canvas.draw_line(self.newx, self.newy)\n self.canvas.index(self.newx)#for the black line\n self.create_ppdata()\n self.update_label()\n else:\n return", "title": "" }, { "docid": "dcc1e446c80b2b50b02e7f8d805bdaa0", "score": "0.60818344", "text": "def draw(self, canvas):\n\t\tcanvas.create_polygon([pixelFromPosition(vertex) for vertex in self.transformedVertices], **self.properties)", "title": "" }, { "docid": "41a827aca9d45e1536df2eb960985010", "score": "0.6079189", "text": "def draw(self, win):\n pygame.draw.circle(win, self.color, (self.x, self.y), self.radius)", "title": "" }, { "docid": "b15382ff7e5413c58f939ddcb600f9cc", "score": "0.6074624", "text": "def plot(self):", "title": "" }, { "docid": "2420cbdf43383fcb21537d6b12e5d0d2", "score": "0.6072332", "text": "def figure(self, points):\n edges = len(points)-1 # number of edges\n for edge in range(edges):\n self.line(points[edge], points[edge+1])", "title": "" } ]
768e00ec8ddd94a666938045bfd89fe0
Matches the NDC to db
[ { "docid": "671597d49d82516294ea16dbefe37f43", "score": "0.58281344", "text": "def match_ndc(ndc):\n\n def find_recent_db():\n \"\"\" returns most recent db csv file. Gives error pop up if no db file found \"\"\"\n files = glob.glob('ndcdb-*.csv')\n if len(files) == 0:\n error_msg = \"No NDC database file found (prefixed with ndcdb-[date].csv). This means that either \" \\\n \"the file doesn't exist or was renamed to something else. Please generate that file\"\n popupmsg(error_msg)\n print(error_msg)\n else:\n pattern = re.compile(r\"^ndcdb-(\\d{4}-\\d{2}-\\d{2}).csv$\")\n dates_list = []\n for file in files:\n str_date = pattern.match(file).groups()[0]\n date = datetime.strptime(str_date, '%Y-%m-%d')\n dates_list.append(date)\n\n formatted_date = datetime.strftime(max(dates_list).date(), '%Y-%m-%d')\n\n recent_db_file = 'ndcdb-' + formatted_date + '.csv'\n lastupdated_label[\"text\"] = \"Using data last updated on: \" + formatted_date\n return recent_db_file\n\n recent_file = find_recent_db()\n df = pd.read_csv(recent_file)\n print('Using db from this file: ', recent_file)\n\n # print(\"First entry of ndc package code no hyphen: \", df['NDCPACKAGECODE_nohyphen'][0], \", and type: \", type(df['NDCPACKAGECODE_nohyphen'][0]))\n # print(\"First entry of ALL_NDC: \", df['ALL_NDC'][0], \", and type: \", type(df['ALL_NDC'][0]))\n\n # below will be if ndc in df['NDCPACKAGECODE_nohyphen'].values: -- if ndc is found in a list of NDCs with no hyphen\n\n # 012037086010010821TXZ811R4ZKNC\t1723022810MT005\n # if df['ALL_NDC'].apply(lambda x: ndc in x).sum() == 1:\n # str_to_list = lambda x: x.strip('][').split(', ')\n if df['ALL_NDC_NO_HYPHEN'].apply(lambda x: ndc in x).any():\n index = df.loc[df['ALL_NDC_NO_HYPHEN'].apply(lambda x: ndc in x)].index[0]\n\n # assign \"raw_ndc\" with hyphens by looking through column with list of all ndcs under a package\n ndc_list = str_to_list(df['ALL_NDC'][index])\n for check_ndc in ndc_list:\n if ndc == remove_hyphen(check_ndc):\n raw_ndc = check_ndc\n\n pack_desc = df['PACKAGEDESCRIPTION'][index]\n brand = df['PROPRIETARYNAME'][index]\n generic = df['NONPROPRIETARYNAME'][index]\n dosage_form = df['DOSAGEFORMNAME'][index]\n route = df['ROUTENAME'][index]\n mfg = df['LABELERNAME'][index]\n strength = df['ACTIVE_NUMERATOR_STRENGTH'][index]\n str_units = df['ACTIVE_INGRED_UNIT'][index]\n pharm_class = df['PHARM_CLASSES'][index]\n\n def eleven_dig(ten_digit):\n \"\"\" changes 10 digit ndc to 11 digit ndc adding the zero to the appropriate place \"\"\"\n lst = ten_digit.split(\"-\")\n lst[0] = lst[0].zfill(5)\n lst[1] = lst[1].zfill(4)\n lst[2] = lst[2].zfill(2)\n return ''.join(lst)\n\n ten_dig = eleven_dig(raw_ndc)\n return True, raw_ndc, ten_dig, pack_desc, brand, generic, dosage_form, route, mfg, strength, \\\n str_units, pharm_class\n else:\n error_msg = \"NDC doesn't exist in database? If drug is new or unofficial (or maybe OTC? not sure if those are\" \\\n \" in there.., may require updating database?\"\n print(error_msg)\n popupmsg(error_msg)", "title": "" } ]
[ { "docid": "e0c3d258c19d4d7fa03901c154ec7bfa", "score": "0.5899773", "text": "def inspect_db(self):", "title": "" }, { "docid": "4c75ae12209357a13cd26ecb99d48a79", "score": "0.56975514", "text": "def getdb(self):\n if self.cl._db is not None:\n return self.cl._db\n return discretezoo.DEFAULT_DB", "title": "" }, { "docid": "2d7600e1e532f9e6b12b0494dbf3a5dd", "score": "0.5669286", "text": "def ReadNDCDatabase(SearchText, SearchType, SearchSize):\n\n NDCRecords = []\n NDCEnhancedArray = []\n\n\n if SearchType==\"ndc\":\n SearchText = SearchText.upper()\n SearchText = SearchText.strip()\n SearchError = False\n CleanNDC = SearchText.replace(\"-\", \"\")\n if len(CleanNDC) == 9:\n qNDC = NDCLookup.query(NDCLookup.ninedigitndc == CleanNDC)\n else:\n qNDC = NDCLookup.query(NDCLookup.ndc == CleanNDC)\n\n elif SearchType == \"name\":\n SearchText = SearchText.upper()\n SearchText = SearchText.strip()\n SearchError = len(SearchText) == 0\n qNDC = NDCLookup.query(NDCLookup.proprietaryname == SearchText)\n\n elif SearchType == \"ingredient\":\n QueryString = IngredientQuery(SearchText)\n logging.info(\"Executing Ingredient Search: %s\", QueryString)\n SearchError = len(QueryString) == 0 \n qNDC= NDCLookup.gql(QueryString)\n\n elif SearchType == \"image\":\n SearchText = SearchText.upper()\n SearchText = SearchText.strip()\n QueryString = NDC9ArrayQuery(SearchText)\n logging.info(\"Executing NDC9 Search: %s\", QueryString)\n SearchError = len(QueryString) == 0\n qNDC= NDCLookup.gql(QueryString)\n\n elif SearchType == \"active\":\n SearchText = SearchText.upper()\n SearchText = SearchText.strip()\n SearchError = len(SearchText) == 0\n qNDC = NDCLookup.query(NDCLookup.substancename == SearchText)\n \n else:\n SearchError = True\n\n if not(SearchError) : NDCRecords = qNDC.fetch(limit=int(SearchSize))\n\n for NDCRecord in NDCRecords:\n NDCRecord.ndc = NDCTenDigitFormat(NDCRecord.ndc, NDCRecord.format)\n NDCRecord.proprietaryname = NDCRecord.proprietaryname.title()\n NDCRecord.nonproprietaryname = NDCRecord.nonproprietaryname.title()\n NDCRecord.packagedescription = NDCRecord.packagedescription.title()\n\n substancelist = []\n\n for substance in NDCRecord.substancelist:\n substancelist.append(substance.title())\n\n NDCRecord.substancelist = substancelist\n\n\n TemplateValues = {\"NDCRecordArray\": NDCRecords,\n \"SearchText\": SearchText,\n \"SearchType\": SearchType,\n \"SearchSize\": SearchSize}\n\n return TemplateValues", "title": "" }, { "docid": "808d9a25b42b6f3d15a009c3bbb5e49e", "score": "0.5438229", "text": "def getMainDbDN(self):\n \n \n if self.conn.search(search_base=\"cn=config\", search_scope=LEVEL,\n search_filter=\"(olcDbDirectory=/opt/gluu/data/main_db)\",\n attributes='*'):\n if self.conn.response:\n return self.conn.response[0]['dn']", "title": "" }, { "docid": "faa84fb710c5caa6a1b2549f07ff4aea", "score": "0.53561217", "text": "def db_for_read(self, model, **hints):\n return random.choice(['replica_1'])", "title": "" }, { "docid": "e00622aa8d0d49fff6a6a11e28c41937", "score": "0.5321135", "text": "def db(self) -> ConfigNodePropertyString:\n return self._db", "title": "" }, { "docid": "abef234fddbac44609b1aafb0e2cce6d", "score": "0.53086776", "text": "def get_db():\n if not hasattr(g, 'mongo_db'):\n g.mongo_db = pymongo.MongoClient('db')\n return g.mongo_db.filterforge", "title": "" }, { "docid": "6099d13b58ec8776840d902d254ade31", "score": "0.5238932", "text": "def get_db():\n pass", "title": "" }, { "docid": "ef33563c491377b11678fe20f4dfc96f", "score": "0.5220712", "text": "def db_for_read(self, model, **hints):\n if model._meta.app_label == 'fblog':\n return 'fblog_db'\n return None", "title": "" }, { "docid": "389799e6ff7064b6ebf88447c09f4e1e", "score": "0.52157825", "text": "def _db_exist(self, source):\n\t\tpass", "title": "" }, { "docid": "b72f57ab10518ae657f2cbf0f2e1abe7", "score": "0.51940084", "text": "def dispatch_database():\n pass", "title": "" }, { "docid": "75c9dd6cbc580743d1a9d43792f6ae33", "score": "0.51888806", "text": "def db(self):\r\n return self.__db", "title": "" }, { "docid": "fd2ed4dd2b09b95e6f22bfbd3c33c0b1", "score": "0.5169626", "text": "def get_db_info(db_coll_dict,config_path):\n ip,username,password=load_mongo_account(config_path)\n client=MongoClient(ip,username=username,password=password)\n detail_dict={}\n for db in db_coll_dict.keys():\n meta_client=client[db][db+\".meta\"]\n detail_dict[db]={}\n\n detail_dict[db]['episodes']=[i for i in client[db].list_collection_names() if \".meta\" not in i and \".files\" not in i and \".chunks\" not in i and \".vis\" not in i and \".scans\" not in i ]\n\n # Add task description\n detail_dict[db]['task_description']=get_task_description(meta_client)\n\n # Add class and id info for entities\n detail_dict[db]['entities']={}\n class_info=get_all_class(meta_client)\n for each_object in class_info:\n obj_id=each_object['id']\n obj_class=each_object['class']\n if obj_class not in detail_dict[db]['entities'].keys():\n detail_dict[db]['entities'][obj_class]=[obj_id]\n else:\n detail_dict[db]['entities'][obj_class].append(obj_id)\n \n # Add skel entities\n detail_dict[db]['skels']={}\n bone_info=get_bone(meta_client)\n for each_bone in bone_info:\n bone=each_bone['bone']\n skel=each_bone['skel']\n if skel not in detail_dict[db]['skels'].keys():\n detail_dict[db]['skels'][skel]=[bone]\n else:\n detail_dict[db]['skels'][skel].append(bone)\n\n # Add camera view\n detail_dict[db]['camera_views']=get_camera_view(meta_client)\n # pprint.pprint(detail_dict)\n \n return detail_dict", "title": "" }, { "docid": "cea77859580f584522b8b98ba0892c08", "score": "0.5169591", "text": "def legacy_db_configured(self):\n if (\n self.kv.get(\"db_host\")\n and self.kv.get(\"db_port\")\n and self.kv.get(\"db_db\")\n and self.kv.get(\"db_user\")\n and self.kv.get(\"db_pass\")\n ):\n return True\n return False", "title": "" }, { "docid": "acb9ce2f999a8235e840fd94d2128e30", "score": "0.5157214", "text": "def dbc_db(self):\n return self.__dbc_db", "title": "" }, { "docid": "c986b9db2d7cd8d6e5c55fb2b65759b5", "score": "0.5132187", "text": "def db(self):\n return self._conf['db']", "title": "" }, { "docid": "59c589aa63afdf12dde81088a8648e97", "score": "0.51253587", "text": "def get_db_state():\n print \"\\nreference implementation of get_db_state\\n\"\n return None", "title": "" }, { "docid": "f8b6921814cd96eaf5c4e4836d56d0d0", "score": "0.5109895", "text": "def CGD(cls):\n cls.Namespace(\"http://www.candidagenome.org/cgi-bin/locus.pl?dbid=\")", "title": "" }, { "docid": "30d7f1379c3f7f0f80343538209cc154", "score": "0.5109482", "text": "def check_mongodb_state(config_path):\n ip,username,password=load_mongo_account(config_path)\n client=pymongo.MongoClient(ip,username=username,password=password,serverSelectionTimeoutMS=100)\n try:\n print(client.list_database_names())\n return True\n except Exception as e:\n return False", "title": "" }, { "docid": "5a52a415663f3ab4721fe938d3b92013", "score": "0.5103297", "text": "def test_ngen_with_known_db(runner):\n result = runner.invoke(cli.main, ['list', '-d' , '~/.keepassxc/worth.kdbx'])\n assert result.exit_code == 0", "title": "" }, { "docid": "5fc530f67300c30c79dc8432d0146653", "score": "0.5096329", "text": "def db_for_read(self, model, **hints):\n if model._meta.app_label == 'auth':\n return 'auth_db'\n return None", "title": "" }, { "docid": "0db5d66430443a9b54386c3076b54d8b", "score": "0.5089351", "text": "def __init__(self,db):\n self.db = db", "title": "" }, { "docid": "d1d22064ce85a9d9e5421703f72a124a", "score": "0.5073341", "text": "def create_db_from_dict(self, ref_dict, database = 'smart_meter'):\n\n\n\n\t\tprint(\"Database {} has been created from the dictionary\".format(database))", "title": "" }, { "docid": "e7129aa5fee58918ea3e721d4b5062e8", "score": "0.50717753", "text": "def __init__(self, db):\n self._db = db", "title": "" }, { "docid": "e7129aa5fee58918ea3e721d4b5062e8", "score": "0.50717753", "text": "def __init__(self, db):\n self._db = db", "title": "" }, { "docid": "6e422fbc834c5ce8795ad8afb0331382", "score": "0.50672495", "text": "def _get_database_id(self, db_name):\n for expert_database in Database.objects.all():\n if re.match(expert_database.label, db_name, re.IGNORECASE):\n return expert_database.id\n return None", "title": "" }, { "docid": "be694c1d9a541835fe2b57be9e5d36ea", "score": "0.506357", "text": "def _init_db(self):\n raise NotImplementedError(\"Use subclasses of Database.\")", "title": "" }, { "docid": "14377ae4a1264e70fb40459e55696168", "score": "0.5059923", "text": "def checkAccesslogDB(self):\n \n return self.conn.search(search_base='cn=config',\n search_filter='(olcSuffix=cn=accesslog)',\n search_scope=SUBTREE, attributes=[\"*\"])", "title": "" }, { "docid": "f884bbe9255899e5ccad50d1aa7da105", "score": "0.50476664", "text": "def db():\n pass", "title": "" }, { "docid": "4ec067f043854af130dc68d6310bb290", "score": "0.5025044", "text": "def db_for_read(self, model, **hints):\n if model._meta.app_label == 'mapdata':\n return 'msemap_db'\n return None", "title": "" }, { "docid": "782deb8c0adabe9849cfdf86e50738ea", "score": "0.501365", "text": "def db_show(self, name) -> bool:\n found = False\n dbs = self._index_local_databases()\n\n for db in dbs:\n if db == name:\n found = True;\n\n updated = datetime.datetime.fromtimestamp(dbs[db]['last modified']).strftime('%Y-%m-%d %H:%M:%S')\n checked = datetime.datetime.fromtimestamp(dbs[db]['last checked']).strftime('%Y-%m-%d %H:%M:%S')\n self.logger.info(f\"Database: {db}\")\n if dbs[db]['last modified'] == 0:\n self.logger.info(\" last modified: not downloaded\")\n else:\n self.logger.info(f\" last modified: {updated}\")\n if dbs[db]['last checked'] == 0:\n self.logger.info(\" last checked: n/a\")\n else:\n self.logger.info(f\" last checked: {checked}\")\n self.logger.info(f\" url: {dbs[db]['url']}\")\n if db.endswith(\".cvd\"):\n self.logger.info(f\" local version: {dbs[db]['local version']}\")\n if len(dbs[db]['CDIFFs']) > 0:\n self.logger.info(f\" CDIFFs: \\n{json.dumps(dbs[db]['CDIFFs'], indent=4)}\")\n return True\n\n if not found:\n self.logger.error(f\"No such database: {name}\")\n return found", "title": "" }, { "docid": "bebd245ae8ad61090e8d6a3ec3a0f552", "score": "0.4993931", "text": "def test_003_dbtype(self) -> None:\n self.assertEqual(self.ds.dbtype, \"dynamo\")", "title": "" }, { "docid": "e536412c7386152d4bae645d367cb220", "score": "0.4969846", "text": "def db():\n raise NotImplementedError()", "title": "" }, { "docid": "1061a95e9c40d3e60d39cc5f11b0033a", "score": "0.49446747", "text": "def get_db():\n if not hasattr(g, 'db_con'):\n g.db_con = connect_db()\n return g.db_con", "title": "" }, { "docid": "3bca35078fe59913957c9f19e658127a", "score": "0.49268067", "text": "def set_candb(self, db):\n self.candb = db", "title": "" }, { "docid": "9bcdaf6f50aeeb2c3ed60f75fc79ce09", "score": "0.49224553", "text": "def db(self):\n# log.info('fetching request.db')\n return DBSession", "title": "" }, { "docid": "aec98e1603bb3a5d602c25f3edde63ad", "score": "0.4914233", "text": "def mogo_db_info(self) -> str:\n return self.config['database']['mongo']['information']", "title": "" }, { "docid": "cf985bf73df44a6316c55391fcc69e3e", "score": "0.4912167", "text": "def db_variables():\n\n return (\"azurehack.n8sw8.gcp.mongodb.net\",\"backend\",\"azure2020\",\"admin\",\"azurehack\")", "title": "" }, { "docid": "a465304cf06a63ebd646ac049c4a6702", "score": "0.490552", "text": "def dbformat(self):\n\n pass", "title": "" }, { "docid": "7ebea65531837f781815f8a5d7cdeae1", "score": "0.48924413", "text": "def getDbCount(self):\n\t\tc= self.ds.value(\"dbCounter\",0).toInt()[0]\n\t\treturn c", "title": "" }, { "docid": "c8a44427758ee3855d217be560bc8631", "score": "0.4883515", "text": "def db_for_write(self, model, **hints):\n if model._meta.app_label == 'fblog':\n return 'fblog_db'\n return None", "title": "" }, { "docid": "3e2aff155f9b1b697a164fdacc3ab9f6", "score": "0.4879035", "text": "def get_vesync_database_info_from_nacos(self) -> dict:\n nacos_client = nacos.NacosClient(self.nacos_server.host, namespace=self.stage_namespace_id)\n self.set_nacos_client_debug(nacos_client)\n configs = nacos_client.get_config(settings.VESYNC_DATABASE_DATA_ID,\n settings.VESYNC_DATABASE_GROUP,\n no_snapshot=True)\n logger.debug(f\"configs (data id: {settings.VESYNC_DATABASE_DATA_ID}, group: {settings.VESYNC_DATABASE_GROUP}): \"\n f\"{configs}\")\n configs = common.load_properties_from_string(configs)\n database_info = {\n \"host\": configs[settings.KEY_TO_VESYNC_DATABASE_HOST],\n \"port\": int(configs[settings.KEY_TO_VESYNC_DATABASE_PORT]),\n \"user\": configs[settings.KEY_TO_VESYNC_DATABASE_USER],\n \"password\": configs[settings.KEY_TO_VESYNC_DATABASE_PASSWORD],\n \"database\": configs[settings.KEY_TO_VESYNC_DATABASE_NAME],\n \"charset\": \"utf8\",\n \"cursorclass\": pymysql.cursors.DictCursor\n }\n logger.info(f\"database info used to connect: {database_info}\")\n return database_info", "title": "" }, { "docid": "ee111f5320870999c82485c6eb76104f", "score": "0.48773244", "text": "def internalDB_process(self, *args, **kwargs):\n d_ret = self.dcm.internalDB_process(*args, **kwargs)\n return d_ret", "title": "" }, { "docid": "300a2074558c1aa272dc718eae3183ec", "score": "0.48739982", "text": "def preferred_dc(self):\n return self._preferred_dc", "title": "" }, { "docid": "a779cf9d9baf7039fa83e9cf136e8ffe", "score": "0.4869784", "text": "def _get_db(cls):\r\n return get_db(cls._meta.get(\"db_alias\", DEFAULT_CONNECTION_NAME ))", "title": "" }, { "docid": "d5c97ce33139911dad3ffd7a0a2253c7", "score": "0.48628843", "text": "def __init__(self, db) -> None:\n self._db = db", "title": "" }, { "docid": "214f1e1b15658dd10a2111c5317b104e", "score": "0.48577946", "text": "def filter_databases(rc):\n dbs = rc.databases\n public_only = rc._get(\"public_only\", False)\n if public_only:\n dbs = [db for db in dbs if db[\"public\"]]\n dbname = rc._get(\"db\")\n if dbname is not None:\n dbs = [db for db in dbs if db[\"name\"] == dbname]\n elif len(dbs) == 1:\n rc.db = dbs[0][\"name\"]\n rc.databases = dbs", "title": "" }, { "docid": "7079b600de1b490e5a7b5d5d76912821", "score": "0.485489", "text": "def _initdb(self, db=None):\n if self._db is not None:\n return\n if db is None:\n self._db = discretezoo.DEFAULT_DB\n else:\n self._db = db", "title": "" }, { "docid": "d05dfa6c864d6c3db4bc0ae5b5620b9e", "score": "0.48525262", "text": "def load(self, db):\n\t\tpass", "title": "" }, { "docid": "b10a78718803ce0ec49b89b80a3f64c4", "score": "0.48498744", "text": "def db(self) -> Any:\n return self._db", "title": "" }, { "docid": "a688e70742fe7fb640e8b7e42eed47fc", "score": "0.48465395", "text": "def get_master_mongo_conn(env='dev'):\n if env == 'prod':\n MONGO_URI = 'mongodb://chefd_prod:[email protected]:35459/'\n MONGO_URI += 'prod_chefd_1_gb?replicaSet=rs-ds135459'\n else:\n MONGO_URI = 'mongodb://staging_chefd:[email protected]:41098/'\n MONGO_URI += 'staging-chefd-replica'\n client = pymongo.MongoClient(MONGO_URI)\n db = client.get_default_database()\n return db", "title": "" }, { "docid": "c1a9648f2d860cb95a6f5dd2859d1061", "score": "0.48406836", "text": "def db(self, db: ConfigNodePropertyString):\n\n self._db = db", "title": "" }, { "docid": "7c40713744c3a79de6b7d1dc95549911", "score": "0.4830416", "text": "def _get_db():\n global db_conn\n\n if not db_conn:\n db_conn = _connect_db(DSN)\n return db_conn", "title": "" }, { "docid": "45adfa4a8a41cc5ced4962b86c5acad3", "score": "0.4827729", "text": "def db_for_read(self, model, **hints):\n if model._meta.db_table in self.call_center_tables:\n return 'call_center'\n return None", "title": "" }, { "docid": "6bf6b0932ec62e7aa0d6b98cf3ea7b1a", "score": "0.48261854", "text": "def get_database_for(self, id):\n url=\"https://app.crepc.sk/oai\"\n params={\"verb\":\"GetRecord\",\n \"metadataPrefix\":\"xml-crepc2\",\n \"identifier\":\"oai:crepc.sk:database/\"+id\n }\n r = requests.get(url = url, params = params)\n return r.content.decode(\"utf-8\")", "title": "" }, { "docid": "31a7f676bab2ed16a7cf35919b03e346", "score": "0.4823183", "text": "def _get_nsd_catalog(self):\n return self.__nsd_catalog", "title": "" }, { "docid": "e2178508a7adc4d9478519f598f8e8e0", "score": "0.48205787", "text": "def before_request():\n g.db = connect_db(app.config['ACCOUNT'], app.config['DBNAME'])\n #Entry.set_db(g.db)", "title": "" }, { "docid": "1049a65383a146966505e8e8bcc12bd6", "score": "0.48180845", "text": "def connect_db():\n pass", "title": "" }, { "docid": "6c25bd4fb73f28e625785e71da259f68", "score": "0.4813999", "text": "def db(self) -> Database[DBRecord]:\n raise NotImplementedError", "title": "" }, { "docid": "31240513e1360995e5ebc17db19072c7", "score": "0.4811872", "text": "def db_for_read(self, model, **hints):\n if model._meta.app_label == 'travel_buddy':\n return 'travel_buddy_db'\n return None", "title": "" }, { "docid": "5e7792b1126518cc230e39697c64c0ad", "score": "0.48093012", "text": "def test_db(config_parser):\n mongo_host = config_parser.get('mongo','db-host')\n mongo_db = config_parser.get('mongo','db-name')\n mongo_port = config_parser.getint('mongo','db-port')\n mongo_user = config_parser.get('mongo','username')\n mongo_pwd = config_parser.get('mongo','password')\n mongo_auth = config_parser.get('mongo','auth-db')\n client = MongoClient(mongo_host)\n db = client[mongo_db]\n db.authenticate(mongo_user,mongo_pwd,source=mongo_auth)\n logger.debug(\"Successfully connected to %s\"%db)\n return db", "title": "" }, { "docid": "bbabd11b6efa7da267c65435dd760e60", "score": "0.4806655", "text": "def get_uri_db_tny(self,id_db_tny):\n \n if id_db_tny:\n if id_db_tny in [\"softrans_sb\",\"softlog\"]:\n db = self.get_session.query(self.stringconexoes_model).filter(self.stringconexoes_model.banco_dados==id_db_tny).first()\n else:\n db = self.get_session.query(self.stringconexoes_model).filter(self.stringconexoes_model.banco_dados==\"softlog_\" + id_db_tny).first()\n \n if db is not None:\n return db.db_uri_db_pg()\n\n if id_db_tny:\n db = self.get_session.query(self.stringconexoes_model).filter(self.stringconexoes_model.id_string_conexao==id_db_tny).first()\n else:\n return None\n\n return db.db_uri_db_pg()", "title": "" }, { "docid": "ae7dc1377dd58edfea824a5393775a31", "score": "0.4789945", "text": "def db_for_write(self, model, **hints):\n if model._meta.app_label == 'auth':\n return 'auth_db'\n return None", "title": "" }, { "docid": "f5cd05e589f912f524c8410fc99add35", "score": "0.47840783", "text": "def __init__(self, db_con_str=\"dbname=catalogdb\"):\n self.conn = psycopg2.connect(db_con_str)", "title": "" }, { "docid": "8776bb80d80432ccdeee4801baba1114", "score": "0.47730455", "text": "def get_db():\n if not hasattr(g, 'db_conn'):\n g.db_conn = db_connect()\n return g.db_conn", "title": "" }, { "docid": "adf227e79ef42ab8883ed455430fb593", "score": "0.4772139", "text": "def _get_nsd(self):\n return self.__nsd", "title": "" }, { "docid": "76847efaba7e5a8392258df064fcac5b", "score": "0.47715753", "text": "def get_database(bases_list, database_name, desired_key):\n\tthere = False\n\tfor base in bases_list:\n#\t\tprint base.name\n#\t\tprint database_name\n\t\tif str(base.name) == database_name:\n\t\t\td = base\n\t\t\tthere = True\n\tif not there:\n#\t\tprint 'not there'\n\t\td = Database(parent = desired_key)\n\t\td.name = database_name\n\t\td.content = db.Text('')\n\treturn d", "title": "" }, { "docid": "5b1703b356641fa0f1b0d8fdf2659578", "score": "0.47707888", "text": "def db_for_read(self, model, **hints):\n if model._meta.app_label in DATABASE_DICT:\n return DATABASE_DICT[model._meta.app_label]\n return None", "title": "" }, { "docid": "db556679b20f0c272a5b0281b319a053", "score": "0.47697708", "text": "def test_db_table(self):\n\n\t\tself.assertEqual(self.record._meta.db_table,\"reviews\")", "title": "" }, { "docid": "249e6e635ff6c09ab7eec0bc32fde578", "score": "0.47613108", "text": "def get_connection(**kwargs):\n try:\n logging.debug(\"Connecting to mapd db...\")\n con = pymapd.connect(\n user=kwargs[\"db_user\"],\n password=kwargs[\"db_passwd\"],\n host=kwargs[\"db_server\"],\n port=kwargs[\"db_port\"],\n dbname=kwargs[\"db_name\"],\n )\n logging.info(\"Succesfully connected to mapd db\")\n return con\n except (pymapd.exceptions.OperationalError, pymapd.exceptions.Error):\n logging.exception(\"Error connecting to database.\")\n return False", "title": "" }, { "docid": "b24b310e971351286e27d62cd9fbf645", "score": "0.47595018", "text": "def dc(self):\n pass\n # TODO Client disconnection", "title": "" }, { "docid": "26de2e1fb255c2a5a43aef5fa1145655", "score": "0.4756963", "text": "def testDetermineDatabaseType(self):\n test_extractor = catalog_extractor.EseDbCatalogExtractor()\n\n database_type = test_extractor._DetermineDatabaseType([\n 'SystemIndex_0A', 'SystemIndex_Gthr'])\n self.assertEqual(database_type, 'search')", "title": "" }, { "docid": "b5470e0f8db47cbfa374940c7064db0c", "score": "0.47476617", "text": "def accesslogDBEntry(self, replicator_dn, \n log_dir=\"/opt/gluu/data/accesslog\"):\n\n attributes = {'objectClass': ['olcDatabaseConfig', 'olcMdbConfig'],\n 'olcDatabase': '{2}mdb',\n 'olcDbDirectory': log_dir,\n 'OlcDbMaxSize': 1073741824,\n 'olcSuffix': 'cn=accesslog',\n 'olcRootDN': 'cn=admin, cn=accesslog',\n 'olcRootPW': ldap_encode(self.passwd),\n 'olcDbIndex': ['default eq', 'objectClass,entryCSN,entryUUID,reqEnd,reqResult,reqStart,reqDN'],\n 'olcLimits': 'dn.exact=\"{0}\" time.soft=unlimited time.hard=unlimited size.soft=unlimited size.hard=unlimited'.format(replicator_dn),\n\n }\n #check if accesslogdb entry is allread exists. If not exists, create it.\n if not self.checkAccesslogDBEntry():\n return self.conn.add('olcDatabase={2}mdb,cn=config',\n attributes=attributes)", "title": "" }, { "docid": "17eb67d6b93aec15e1ad14cdb6106aad", "score": "0.47474527", "text": "def configureDBSnapshot(self):\n tag = { \"DDDB\": self.getProp('DDDBtag')\n , \"LHCBCOND\": self.getProp('CondDBtag')\n , \"SIMCOND\" : self.getProp('CondDBtag')\n , \"ONLINE\" : 'fake'\n }\n\n # https://savannah.cern.ch/bugs/?94454#comment12\n from Configurables import MagneticFieldSvc\n MagneticFieldSvc().UseSetCurrent = True\n\n from Configurables import CondDB\n cdb = CondDB()\n cdb.Tags = tag\n cdb.setProp('IgnoreHeartBeat', True)\n self.setOtherProps( cdb, [ 'UseDBSnapshot',\n 'DBSnapshotDirectory',\n 'PartitionName' ])\n\n # So, here is the problem: we don't want to run the risk that\n # the CondDB() configurable (which configures /after/ us)\n # overwrites our conditions. Yet, we don't want to miss the\n # default conditions (e.g. velo stepper motor, magnetic field)\n # either. if we add our conditions to its\n # RunChangeHandlerConditions list, then we a) need to fix the\n # path and b) don't know what happens for conditions that\n # appear twice, because we don't control the ordering of the\n # list. So, the hack is:\n # - don't set 'EnableRunChangeHandler'\n # - copy what is hidden behind that flag in CondDB()._configureDBSnapshot\n # - do the test of the RunChangeHandler configuration ourselves:\n cdb.setProp('EnableRunChangeHandler', False)\n from Configurables import RunChangeHandlerSvc\n rch = RunChangeHandlerSvc()\n ApplicationMgr().ExtSvc.append(rch)\n baseloc = self.getProp( \"DBSnapshotDirectory\" )\n rch.Conditions = dict( (c,'/'.join([baseloc,f])) for f,cs in cdb.getProp(\"RunChangeHandlerConditions\").iteritems() for c in cs )\n \n #path = self.getProp('DBSnapshotDirectory') + \"/..\"*4 + \"/group/online/AligWork/current/\"\n allconds = {\n 'Velo' : [\n 'Conditions/Alignment/Velo/VeloSystem',\n 'Conditions/Alignment/Velo/VeloRight',\n 'Conditions/Alignment/Velo/VeloLeft']\n + ['Conditions/Alignment/Velo/Module%02d'%i for i in range(0, 42)] \n + ['Conditions/Alignment/Velo/Detector%02d-%02d' % (i, (1 + i / 2) % 2) for i in range(0, 42)],\n 'IT' : []\n + [ 'Conditions/Alignment/IT/ITSystem' ]\n + [ 'Conditions/Alignment/IT/ITT%d' % i for i in range(1,4) ]\n + [ 'Conditions/Alignment/IT/ITT%d%sBox' % (i,b) for i in range(1,4) for b in ['Top','Bottom','ASide','CSide' ] ]\n + [ 'Conditions/Alignment/IT/ITT%d%sLayer%s' % (i,b,l) for i in range(1,4) for b in ['Top','Bottom','ASide','CSide' ] for l in ['X1','U','V','X2' ] ]\n + [ 'Conditions/Alignment/IT/ITT%d%sLayer%sLadder%d' % (i,b,l,a) for i in range(1,4) for b in ['Top','Bottom','ASide','CSide' ] for l in ['X1','U','V','X2' ] for a in range(1,8) ],\n # + [ 'Conditions/Alignment/IT/ITT%d%sLayer%sLadder%dSector' % (i,b,l,a) for i in range(1,4) for b in ['Top','Bottom','ASide','CSide' ] for l in ['X1','U','V','X2' ] for a in range(1,8) ]\n # + [ 'Conditions/Alignment/IT/ITT%d%sLayer%sLadder%dSector_Sensor1' % (i,b,l,a) for i in range(1,4) for b in ['Top','Bottom','ASide','CSide' ] for l in ['X1','U','V','X2' ] for a in range(1,8) ]\n # + [ 'Conditions/Alignment/IT/ITT%d%sLayer%sLadder%dSector_Sensor2' % (i,b,l,a) for i in range(1,4) for b in ['ASide','CSide' ] for l in ['X1','U','V','X2' ] for a in range(1,8) ] ,\n 'OT' : []\n + [ 'Conditions/Alignment/OT/OTSystem' ]\n + [ 'Conditions/Alignment/OT/T%d' %i for i in range(1,4) ]\n + [ 'Conditions/Alignment/OT/T%d%s' % (i,l) for i in range(1,4) for l in ['X1','U','V','X2' ] ]\n + [ 'Conditions/Alignment/OT/T%d%sQ%d' % (i,l,q) for i in range(1,4) for l in ['X1','U','V','X2' ] for q in range(0,4) ]\n + [ 'Conditions/Alignment/OT/T%d%sQ%dM%d' % (i,l,q,m) for i in range(1,4) for l in ['X1','U','V','X2' ] for q in range(0,4) for m in range(1,10) ],\n 'TT' : []\n + [ 'Conditions/Alignment/TT/TTSystem' ]\n + [ 'Conditions/Alignment/TT/TT%s' % i for i in ['a','b' ] ]\n + [ 'Conditions/Alignment/TT/TT%sLayer' % (l) for l in ['aX','aU','bV','bX' ] ]\n + [ 'Conditions/Alignment/TT/TT%sLayerR%dModule%d%s' % (l,r,m,w) for w in ['T','B'] for l in ['aX','aU','bV','bX'] for r in range(1,4) for m in range(1,4)]\n + [ 'Conditions/Alignment/TT/TT%sLayerR%dModule%d%s' % (l,r,m,w) for w in ['T','B'] for l in ['bV','bX'] for r in range(1,4) for m in range(4,6)]\n + [ 'Conditions/Alignment/TT/TT%sLayerR%dModule%d%s' % (l,r,m,w) for w in ['T','B'] for l in ['aX','aU','bV','bX'] for r in [1,3] for m in range(6,7)]\n + [ 'Conditions/Alignment/TT/TT%sLayerR%dModule%d%s' % (l,r,m,w) for w in ['T','B'] for l in ['aX','aU'] for r in [1,3] for m in range(4,6)],\n 'Muon' : []\n + ['Conditions/Alignment/Muon/MuonSystem']\n + [ 'Conditions/Alignment/Muon/M%sStation' % i for i in range(1,6) ]\n + [ 'Conditions/Alignment/Muon/M%sASide' % i for i in range(1,6) ]\n + [ 'Conditions/Alignment/Muon/M%sCSide' % i for i in range(1,6) ]\n }\n\n ## This is a bit dirty, since we're supposed to control TAlignment. We\n ## know that this is set from top level, so let's give it a try anyway\n ta = TAlignment()\n sdToWrite = set(ta.getProp(\"WriteCondSubDetList\"))\n pat = self.getProp(\"OnlineAligWorkDir\") + \"/xml/%s.xml\" \n conditionmap = dict((pat % sd, f) for (sd, f) in allconds.iteritems() if sd in sdToWrite)\n \n # add to the existing map\n rch.Conditions = dict(rch.Conditions.items() + dict( (c,f) for f,cs in conditionmap.iteritems() for c in cs ).items() )\n\n from Configurables import MagneticFieldSvc\n MagneticFieldSvc().UseSetCurrent = True", "title": "" }, { "docid": "157a6f85df20d249486b84e76c7046f1", "score": "0.47464782", "text": "def mongo_list_dbs():\n client = mongo_connect()\n for dbname in client.list_database_names():\n if dbname in ['admin', 'local']:\n continue\n db = client[dbname]\n gpup = db['gpu_procs'].find_one()\n if gpup is not None:\n gpup = gpup['gpu_procs']\n print(f\"DB: {dbname}\\n GPU proc count: {gpup}\")\n for colname in db.list_collection_names():\n col = db[colname]\n print(f\" Col: {colname}, count: {col.count()}\")\n client.close()", "title": "" }, { "docid": "c1c546ff3533607a7c08e2103a8e3296", "score": "0.47407824", "text": "def get_kdb():\n return current_app.extensions['kdb'].connection", "title": "" }, { "docid": "2be622382abe530d362506be550627f9", "score": "0.47388348", "text": "def checkAccesslogDBEntry(self):\n \n return self.conn.search(search_base='cn=config',\n search_filter='(olcSuffix=cn=accesslog)',\n search_scope=SUBTREE, attributes=[\"*\"])", "title": "" }, { "docid": "07435a72d7afb184ec87d740dcaa0776", "score": "0.47269005", "text": "def get_graph_db(self):\n return self.get_variable(\"general\", \"graph_db\")", "title": "" }, { "docid": "6a7daa68cfeb920c088725c5d078cf2c", "score": "0.47193885", "text": "def __init__(self, dsn, dsnDict, console):\n super(MySQLdbStorage, self).__init__(dsn, dsnDict, console)", "title": "" }, { "docid": "17388a380ba9911fda750bbbcd80851e", "score": "0.47184587", "text": "def __init__(self):\n self.db = database.db_connection()", "title": "" }, { "docid": "397399c38248591d8b2403a3d550fa74", "score": "0.47183996", "text": "def exist_care(mongoconn,db_name,carer_id,cared_id):\n factor = {'carer_id':carer_id,'cared_id':cared_id}\n res = mongoconn[db_name]['care_record'].find_one(factor)\n if res is not None:\n return res['_id']\n return None", "title": "" }, { "docid": "22ea32a1bbe0a4a2789cd49b063ec39e", "score": "0.47158706", "text": "def allow_syncdb(self, db, model):\n\n if db in DATABASE_DICT.values():\n return DATABASE_DICT.get(model._meta.app_label) == db\n elif model._meta.app_label in DATABASE_DICT:\n return False\n return None", "title": "" }, { "docid": "efcd17c7e244e8ec6c9d39fa2d9120a0", "score": "0.47150096", "text": "def database(self):\n return self.info_query(\"SELECT DATABASE()\")[0]", "title": "" }, { "docid": "5894d4b1c01c5091c9cb8d691e58dcd9", "score": "0.47148642", "text": "def get_db():\n if not hasattr(g, 'mongo'):\n g.mongo = mongo # Flask-Mongo connection\n return g.mongo.db", "title": "" }, { "docid": "aed0cd135cb4c801742f2e69a2f69c51", "score": "0.47099882", "text": "def _db_read_nonprimary(self, cur=None):\n return False", "title": "" }, { "docid": "123972ebf820beee8b10c334341d80ce", "score": "0.4706701", "text": "def ready(self, dbname):\r\n return dbname in self", "title": "" }, { "docid": "c60a20f7558c1cc5b1e8c76a71b50a9e", "score": "0.47008118", "text": "def get_custom_db(firmware_version, _db):\r\n if _db:\r\n if firmware_version in _db:\r\n return _db[firmware_version]\r\n return None", "title": "" }, { "docid": "9e94f29ce5612aafc0907a72795eaea6", "score": "0.4698479", "text": "def _configure_nve_db(self, vni, device_id, mcast_group, host_id):\n host_connections = self._get_switch_info(host_id)\n for switch_ip, intf_type, nexus_port in host_connections:\n nxos_db.add_nexusnve_binding(vni, switch_ip, device_id,\n mcast_group)", "title": "" }, { "docid": "76312512dc2ded0f5ad963b73f24aaad", "score": "0.4696835", "text": "def __init__(self):\n self._db_params = self._configuration_database()", "title": "" }, { "docid": "9c8bb710b4a166859cc2182dcc78304d", "score": "0.46957707", "text": "def get_db(server_id):\n DATABASE = \"DATABASE\" + str(server_id)\n print(DATABASE)\n \"\"\"The internal LocalStack that holds AppContext instances...application context binds an application object implicitly to the current thread\"\"\"\n tracktop = _app_ctx_stack.top\n \"\"\"If the database connection is not initiated then initiate connection. We are using sqllite3 for the database connection.\"\"\"\n \"\"\"detect_types in connect makes the sqlite3 module parse the declared type for each column it returns.\"\"\"\n if not hasattr(tracktop, 'track_db0') and server_id == 0:\n tracktop.track_db0 = sqlite3.connect(app.config[DATABASE], detect_types=sqlite3.PARSE_DECLTYPES)\n tracktop.track_db0.row_factory = sqlite3.Row\n if not hasattr(tracktop, 'track_db1') and server_id == 1:\n tracktop.track_db1 = sqlite3.connect(app.config[DATABASE], detect_types=sqlite3.PARSE_DECLTYPES)\n tracktop.track_db1.row_factory = sqlite3.Row\n if not hasattr(tracktop, 'track_db2') and server_id == 2:\n tracktop.track_db2 = sqlite3.connect(app.config[DATABASE], detect_types=sqlite3.PARSE_DECLTYPES)\n tracktop.track_db2.row_factory = sqlite3.Row\n \"\"\" check server id return the track database accordingly\"\"\"\n if server_id == 0:\n return tracktop.track_db0\n elif server_id == 1:\n return tracktop.track_db1\n else:\n return tracktop.track_db2", "title": "" }, { "docid": "27e1ffbd14a947894b7936437351383b", "score": "0.4693095", "text": "def test_get_db():\n db_test = User._get_db() # Get pymongo.database.Database instance\n assert isinstance(db_test, Database)", "title": "" }, { "docid": "7364862c404d1c3dc4e933539b7d50ea", "score": "0.468888", "text": "def get_db():\n if not hasattr(g, 'db'):\n g.db = connect_db()\n return g.db", "title": "" }, { "docid": "305a8e9dcd23d67e92986ad73c269c1e", "score": "0.46874708", "text": "def get_mongo_conn():\n MONGO_URI = 'mongodb://saran:[email protected]:13736/ingredientmaster'\n client = pymongo.MongoClient(MONGO_URI)\n db = client.get_database('ingredientmaster')\n return db", "title": "" }, { "docid": "f4a815af9118297f580ce6106690b9df", "score": "0.46834752", "text": "def test_ngen_with_unknown_db(runner):\n result = runner.invoke(cli.main, ['list', 'fake.'])\n assert result.exit_code == 2", "title": "" }, { "docid": "37d90dc6bdb91cbc0cf53e1ed35489fd", "score": "0.46785578", "text": "def db_for_read(self, model, **hints):\r\n if model._meta.app_label == 'virvo':\r\n return 'virvo'\r\n return None", "title": "" }, { "docid": "9c227f6c0dcc3bdd23a06f9623fb35d2", "score": "0.4678507", "text": "def test_db_host_get(self):\r\n\r\n db_path = os.path.join(self.res_dir, 'regular.sqlite')\r\n db_obj = db.DataBaseHost.get(db_path)\r\n\r\n self.assertEqual(db.DataBaseHost.get(db_path), db_obj)\r\n\r\n db_obj.close()", "title": "" }, { "docid": "c5f02e637e32b8ccef59b854bba5f422", "score": "0.46758056", "text": "def connections(self):\n tag, has_dimension = {}, False\n try:\n self.es_tag = pd.read_sql(\"SELECT * FROM es_connection\", con).to_dict('results')[-1]\n dimensions = pd.read_sql(\"SELECT * FROM data_connection\", con).to_dict('results')[0]\n if dimensions['dimension'] not in ['None', None]:\n has_dimension = True\n except Exception as e:\n print(e)\n return has_dimension", "title": "" }, { "docid": "4bdf84a3c72f6ab54c9d1a3335be458a", "score": "0.4670276", "text": "def load_db(self, cursor, connection):", "title": "" }, { "docid": "0560115b04f42f82e10f0b7c43c687eb", "score": "0.46691918", "text": "def _lookup_cddb(self):\n import DiscID, CDDB\n if self.cddevice: dev = DiscID.open(self.cddevice)\n else: dev = DiscID.open()\n cdid = DiscID.disc_id(dev)\n tracks = cdid[1]\n (status, info) = CDDB.query(cdid)\n if status == 200:\n (status, info) = CDDB.read(info['category'], \\\n info['disc_id'])\n elif status == 210 or status == 211:\n (status, info) = CDDB.read(info[0]['category'], \\\n info[0]['disc_id'])\n else:\n return None\n\n ret = cdinfo()\n for key in info.keys():\n if key.startswith('TTITLE'):\n n = int(re.findall(\"TTITLE([0-9]+)\", key)[0])\n ret.titles[n] = info[key].encode('ascii', errors='replace')\n elif key == 'DTITLE':\n (artist, album) = info[key].split('/')\n ret.artist = artist.strip()\n ret.album = album.strip()\n elif key == 'DYEAR':\n ret.releasedate = info[key]\n wx.LogMessage(\"cddb succeeded\")\n return ret", "title": "" }, { "docid": "cc3e31a4747600f576f65d8a3a1a9855", "score": "0.46684363", "text": "def db_for_write(self, model, **hints):\n if model._meta.app_label == 'mapdata':\n return 'msemap_db'\n return None", "title": "" } ]
c6ccb384e1ab09d3b8f9ca491cfa70c2
Fetch and format documents screen. Content is supplied from self.DATA_INDEX. App handles formatting and display
[ { "docid": "933d4bea7adbf38994604d04c98390c4", "score": "0.6130742", "text": "def setup_document(self, *args, **kwargs):\n doc_dict = self.DATA_INDEX.current_doc\n doc_idx, group_len = self.DATA_INDEX.current_index\n self.current_document_title = doc_dict['Job_Title']\n self.current_document_topics = self.format_topic_header(doc_dict)\n self.current_document_text = self.format_doc_text(doc_dict)\n self.current_document_idx = doc_idx\n self.current_document_idx_max = group_len\n Clock.schedule_once(self.highlight_doc_label, 0.5)", "title": "" } ]
[ { "docid": "792b5fddc5dab6657fbe27fdc6166895", "score": "0.63426816", "text": "def process_data(self):\n\n # Drivers License\n if self.Data.DriversLicence is not None:\n doc = Document.DriversLicense(None, self.Data.DriversLicence, self.photoImage)\n self.Documents.append(doc)\n # PASSPORT\n if self.Data.Passport is not None:\n doc = Document.Passport(None, self.Data.Passport, self.photoImage)\n self.Documents.append(doc)\n # Badge\n if self.Data.Badge is not None:\n doc = Document.Badge(None, self.Data.Badge, self.photoImage)\n self.Documents.append(doc)\n # INSURANCE\n if self.Data.Insurance is not None:\n doc = Document.Insurance(None, self.Data.Insurance, self.photoImage)\n self.Documents.append(doc)\n # CLAIM\n if self.Data.Claim is not None:\n doc = Document.InsuranceClaim(None, self.Data.Claim, self.photoImage)\n self.Documents.append(doc)\n # CONVERSATION\n if self.Data.Conversation is not None:\n self.Conversation = self.Data.Conversation.ListOfText", "title": "" }, { "docid": "b8c7ff139f5f401b50255ef3eff805fb", "score": "0.6150794", "text": "def fetch(self):\n next_batch_first_index = int(self.request.get('documentPointer', 0))\n self.request.response.setHeader('X-Theme-Disabled', 'True')\n return self.render_batch(next_batch_first_index)", "title": "" }, { "docid": "68b44b639903a0e3b0fe38b033a24ac9", "score": "0.612292", "text": "def document_retrieval(logger):\n # override args\n document_retrieval_main(args.database, 7, args.infile, args.outfile, args.path_wiki_titles, True, True)", "title": "" }, { "docid": "c6342b18bcf0c51508a3c96256c1f27a", "score": "0.5973333", "text": "def document_detail(request, id):\n\n document = SavedData.objects.get(row_id=id)\n abstract_list = []\n for paragraph in document.abstracts.split('\\n'):\n abstract_list.append(paragraph)\n\n return render(request, 'gui/document_detail.html', {\n 'document': document, 'abstracts': abstract_list})", "title": "" }, { "docid": "ca3d20faa9020d8bba14297de7585234", "score": "0.58931184", "text": "def documents(res):\n keys = ['description', 'source', 'usage']\n global gresults\n desc_translations = gresults[res]['desc_sentence_code']\n translation = {k:v for k, v in gresults[res].items() if 'segmentation' in k or 'sentence' in k or 'translation' in k}\n idiom = {k:v for k, v in gresults[res].items() if k not in translation.keys()}\n idiomtitle = idiom['name']\n for term in idiom:\n if type(idiom[term]) is AttrList:\n s = \"\\n\"\n for item in idiom[term]:\n s += item + \",\\n \"\n idiom[term] = s\n # fetch the movie from the elasticsearch index using its id\n chengyu = Idiom.get(id=res, index='idioms_search')\n chengyudic = chengyu.to_dict()\n index = 0\n description_dict = dict()\n for key, value in chengyudic.items():\n if 'translation' in key:\n translation[key] = value\n else:\n idiom[key] = value\n '''\n if key == 'description':\n print(value)\n for v in value:\n description_dict[str(index)] = v\n index += 1\n idiom = {k: v for k, v in idiom.items() if k not in translation.keys()}\n \n print(description_dict)\n '''\n # idiom['runtime'] = str(chengyudic['runtime']) + \" min\" if filmdic['runtime'] > -1 else \"Unknown\"\n return render_template('page_targetArticle.html', idiom=idiom, name=idiomtitle, keys=keys, translation=translation)", "title": "" }, { "docid": "31243f106593f9d8f7ff6d186de115f1", "score": "0.5892145", "text": "def documentsProcessing(self, personalData: PersonalData = PersonalData.all):\n\n pass", "title": "" }, { "docid": "90dbae986b4555469cf978a47ec2ba05", "score": "0.5855406", "text": "async def docs(self, ctx: commands.Context, *args: Optional[str]) -> None:\n\n data = db_parser.search_for_docs(\" \".join(args))\n if not data:\n return await ctx.send(f\"{ctx.author.mention} No Results Found!\")\n\n topic, descp, argums, returs, link = data\n embed = discord.Embed(title=topic, color=discord.Color.random())\n embed.add_field(name=\"Description\",\n value=f\"```\\n{descp}```\", inline=False)\n embed.add_field(name=\"Arguments\",\n value=f\"```python\\n{argums}```\", inline=False)\n embed.add_field(\n name=\"Returns\", value=f\"```python\\n{returs}```\", inline=False)\n embed.add_field(name=\"Link\", value=link, inline=False)\n await ctx.send(ctx.author.mention, embed=embed)", "title": "" }, { "docid": "03030ef76c56000a4b45898c9270674f", "score": "0.5854476", "text": "def database(request):\n\n #entries = Entry.objects.all().order_by('-rating') #Hier dus normalized?\n documents = SavedData.objects.all().order_by('-normalized_number_of_mentions')\n\n return render(request, 'gui/database.html', {'database': documents})\n # return render(request, 'interface/database.html')", "title": "" }, { "docid": "3bffabe261d821e4e8e5b5e13a955b3b", "score": "0.584822", "text": "def index(self):\n self.log.info('Fetching document ids that match the criteria...')\n doc_ids = self._get_doc_ids()\n\n self.log.info('Found documents: %d' % len(doc_ids))\n\n with ThreadPoolExecutor(max_workers=self.annotation_indexer_config.threads) as executor:\n executor.map(self._process_document, doc_ids)", "title": "" }, { "docid": "8b2676797ad1ab06ccc8a81e6732dc76", "score": "0.58295107", "text": "def doc_text_datablock():\n global api_doc_\n\n space_data = bpy.context.space_data\n\n try:\n doc_text = bpy.data.texts['api_doc_']\n space_data.text = doc_text\n doc_text.clear()\n except:\n bpy.data.texts.new(name='api_doc_')\n doc_text = bpy.data.texts['api_doc_']\n space_data.text = doc_text\n\n doc_text.write(text=api_doc_)\n return {'FINISHED'}", "title": "" }, { "docid": "874a90a3891dce0a220aa375504aa169", "score": "0.58218527", "text": "def index(self):\r\n self.subsection = 'Contents'\r\n return self.get_flatpage()", "title": "" }, { "docid": "874a90a3891dce0a220aa375504aa169", "score": "0.58218527", "text": "def index(self):\r\n self.subsection = 'Contents'\r\n return self.get_flatpage()", "title": "" }, { "docid": "874a90a3891dce0a220aa375504aa169", "score": "0.58218527", "text": "def index(self):\r\n self.subsection = 'Contents'\r\n return self.get_flatpage()", "title": "" }, { "docid": "9dce775017cf295e9f2413558e9cad1f", "score": "0.5771713", "text": "def index_documents(self, data):\n\t\tbulk(self.client, actions=data)", "title": "" }, { "docid": "dcda86bb55f1f1e6b89451750c227636", "score": "0.5751059", "text": "def index():\n page = request.args.get('page', 1)\n documents = documentcloud.search(DEFAULT_SEARCH, page=page)\n return render_template('index.html', documents=documents)", "title": "" }, { "docid": "ec0baf7230eaa3ae2ecdb86646a898a9", "score": "0.57493937", "text": "def _get_doc_details(self, dbx):\n result, response = dbx.paper_docs_download(\n self.paper_id, ExportFormat.markdown)\n if response.status_code == 200:\n response.close()\n else:\n raise dropbox.exceptions.APIError\n return (result.title, result.revision)", "title": "" }, { "docid": "48f1c5ea49b6322a076aad4bc8c21fb8", "score": "0.5695987", "text": "def documents():\n documents = Document.query.all() # return a list with all values\n return render_template('documents.html', name=current_user.username, role=current_user.role_code, \n documents=documents) # name parameter send to html the value of the current logged_in user", "title": "" }, { "docid": "70afe7b7e7bfcae26bff735a82ac405e", "score": "0.56497496", "text": "def doc_list():\n user = g.current_user\n email = user.email\n\n #Find all the user's documents\n docs = Page.objects(email=email, resource__page_number=1)\n docs = docs.order_by('resource__title','resource__author')\n \n if not docs:\n msg = \"Couldn't find user's documents\"\n response = resource_not_found(msg)\n return response\n \n #Collect document names into a response\n works = []\n for doc in docs:\n work = {\n \"title\": doc.resource.title,\n \"author\": doc.resource.author,\n \"language\": doc.resource.language\n }\n works.append(work)\n \n response = jsonify({\"works\": works})\n return response, HTTPStatus.OK.value", "title": "" }, { "docid": "97f83c15711b740a070a3cb4a3c51f70", "score": "0.56411767", "text": "def index(self):\n return self.draw(self.requests())", "title": "" }, { "docid": "f0aa97ce39fa4d6ad81a893df92822ae", "score": "0.5613384", "text": "def documents(self):\n for index in range(self.count()):\n yield self.widget(index)", "title": "" }, { "docid": "f0aa97ce39fa4d6ad81a893df92822ae", "score": "0.5613384", "text": "def documents(self):\n for index in range(self.count()):\n yield self.widget(index)", "title": "" }, { "docid": "c65d04f9277270ff16897b1152a078ca", "score": "0.5607856", "text": "def readDocs(self):\n DataPathList = glob.glob(self.pathDocs+'*.html')\n DataPathList.sort()\n\n self.docs = []\n \n h = 0\n for docPath in DataPathList:\n f = codecs.open(docPath, 'r')\n text = get_text(f.read())\n\n self.docs.append({\n 'id': str(h),\n 'text': self.removePonctuation(re.split('\\s|\\n',text))\n })\n\n print(re.split('\\s|\\n',text))\n # print(re.split('\\s|, |\\*|\\n',text))\n\n h += 1", "title": "" }, { "docid": "ac96580a00be97566b2eef2311e380e8", "score": "0.5547341", "text": "def main():\n creds = None\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.json'):\n creds = Credentials.from_authorized_user_file('token.json', SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=8000)\n # Save the credentials for the next run\n with open('token.json', 'w') as token:\n token.write(creds.to_json())\n\n service = build('docs', 'v1', credentials=creds)\n\n # Retrieve the documents contents from the Docs service.\n document = service.documents().get(documentId=DOCUMENT_ID).execute()\n\n document_data = [\n {\n 'text': \"Document created by MayankDoc\",\n 'type': \"watermark\",\n },\n {\n 'text': \"Growth Roadmap Review: 10:30am-11:30am\",\n 'type': \"event\",\n },\n { \n 'text': \"Growth Roadmap Review\",\n 'type': \"h1\",\n },\n {\n 'text': \"Agenda\",\n 'type': \"h2\",\n },\n {\n 'text': \"Roadmap Study Hall - 15 minutes\",\n 'type': \"p\",\n },\n {\n 'text': \"Roadmap Discussion - 20 minutes\",\n 'type': \"p\",\n },\n {\n 'text': \"Backlog Review - 20 minutes\",\n 'type': \"p\",\n },\n {\n 'text': \"Follow Ups\",\n 'type': \"h2\",\n },\n {\n 'text': \"\",\n 'type': \"check\",\n },\n {\n 'text': \"Roadmap\",\n 'type': \"h2\",\n },\n {\n 'text': \"Channels\",\n 'type': \"p\",\n },\n {\n 'text': \"Rose Liu\",\n 'type': \"request\",\n },\n {\n 'text': \"👍\",\n 'type': 'emoji',\n 'data': ['Mayank Jain', 'Yee Chen', 'Mike Jiao'],\n },\n {\n 'text': \"Core Growth\",\n 'type': \"p\",\n },\n {\n 'text': \"Mike Jiao\",\n 'type': \"request\",\n },\n {\n 'text': \"👍\",\n 'type': 'emoji',\n 'data': [],\n },\n {\n 'text': \"International\",\n 'type': \"p\",\n },\n {\n 'text': \"Jason Lee\",\n 'type': \"request\",\n },\n {\n 'text': \"👍\",\n 'type': 'emoji',\n 'data': [],\n },\n {\n 'text': \"SEO\",\n 'type': \"p\",\n },\n {\n 'text': \"Chuck Kao\",\n 'type': \"request\",\n },\n {\n 'text': \"👍\",\n 'type': 'emoji',\n 'data': [],\n },\n {\n 'text': \"Key Discussion\",\n 'type': \"h2\",\n },\n {\n 'text': \"Can we do anything to accelerate subreddit notifications?\",\n 'type': \"discussion\",\n },\n {\n 'text': \"Action Items\",\n 'type': \"h2\",\n },\n {\n 'text': \"\",\n 'type': \"check\",\n },\n {\n 'text': \"Signoff\",\n 'type': \"h2\",\n },\n {\n 'text': \"KD Bhulani\",\n 'type': \"signoff\",\n },\n {\n 'text': \"Vee Sahgal\",\n 'type': \"signoff\",\n },\n {\n 'text': \"Yee Chen\",\n 'type': \"signoff\",\n }\n ]\n\n requests = []\n\n for item in document_data:\n docs_item = translate_to_doc(item)\n requests += docs_item\n\n print(requests[::-1])\n\n result = service.documents().batchUpdate(\n documentId=DOCUMENT_ID, body={'requests': requests[::-1]}).execute()\n\n print('The title of the document is: {}'.format(document.get('title')))", "title": "" }, { "docid": "3e99ad10bc90a3e0e0437d86dc2bdd01", "score": "0.5540074", "text": "def documents(self):\n logging.info(\"Extracting docs from [%s]\", self._wikinews_archive)\n file_list = glob.glob(self._wikinews_archive)\n assert file_list, self._wikinews_archive\n for archive in file_list:\n with bz2file.open(archive, \"rt\", encoding=\"utf-8\", errors=\"strict\") as xf:\n # One line of json as produced by wikiextractor.\n for line in xf:\n record = json.loads(line)\n\n # Extract page title.\n title = html.unescape(record[\"title\"])\n curid = record[\"id\"] # pageid, e.g. 73052\n revid = record[\"revid\"] # revision, e.g. 730271\n url = record[\"url\"] # e.g. https://de.wikinews.org/wiki?curid=73052\n logging.debug(\"Got title: %s\", title)\n\n # Skip pages that don't have text.\n wiki_doc = record.get(\"text\")\n if not wiki_doc:\n self._counter[\"no text\"] += 1\n self._filtered_no_text.append(url)\n logging.debug(\"Skip: no text element\")\n continue\n\n # Apply manual fixes.\n wiki_doc = _apply_wiki_overrides(wiki_doc, url)\n\n # Create internal document identifier.\n docid = f\"{self._language}-{curid}\"\n logging.debug(\"Found (%s): %s\", docid, title)\n self._counter[\"found\"] += 1\n\n yield docid, title, url, curid, revid, wiki_doc", "title": "" }, { "docid": "7345e6b67d4c52977714a913deed0b8b", "score": "0.55391085", "text": "def render_batch(self, first):\n items = self.items.get_batch(first, self.batchsize)\n if items:\n return self.previews_template(\n previews=map(self.items.get_infos_for, items))\n else:\n # We have to return an empty string if we have no more documents\n # to render. Otherwise plone.protect will log a error-warning:\n # WARNING plone.protect error parsing dom, failure to add csrf\n # token to response.\n return ''", "title": "" }, { "docid": "27c60046b0ad130539897a9c11716c5f", "score": "0.5526614", "text": "def fetch(self):\n self.request.response.setHeader('X-Theme-Disabled', 'True')\n # The HTML stripped in order to have empty response content when\n # there are no tags at all, so that diazo does not try to\n # parse it.\n if int(self.request.get('documentPointer', 0)) >= self.number_of_documents():\n # We have to return an empty string if we have no more documents\n # to render. Otherwise plone.protect will log a error-warning:\n # WARNING plone.protect error parsing dom, failure to add csrf\n # token to response\n return ''\n return self.previews_template().strip()", "title": "" }, { "docid": "84eadb4d5c6c853026c26c372024a1c0", "score": "0.5504719", "text": "def gener_doc(dataset):\n # check or create doc folder\n folder = get_dataset_folder(dataset.dataset_id) + '/docs'\n if not os.path.exists(folder):\n os.makedirs(folder)\n os.makedirs(folder + '/_build')\n os.makedirs(folder + '/_static')\n os.makedirs(folder + '/_templates')\n\n # generate conf.py\n render('conf.txt', folder + '/conf.py', dataset=dataset)\n render('make.bat', folder + '/make.bat', dataset=dataset)\n render('makefile.txt', folder + '/Makefile', dataset=dataset)\n\n # generate index\n render('index.rst', folder + '/index.rst', dataset=dataset)\n\n # dataset data and features\n search = get_search_rounds(dataset.dataset_id)\n if len(search) > 0:\n best = get_best_models(dataset.dataset_id)\n best_pp = get_best_pp(dataset.dataset_id)\n # separate models (level 0) from ensembles (level 1)\n best1 = [b for b in best if b['level'] == 1]\n best2 = [b for b in best if b['level'] == 2]\n print(len(best1), len(best2))\n print(best1[:2])\n render('dataset.rst', folder + '/dataset.rst', dataset=dataset, best1=best1, best2=best2, best_pp=best_pp,\n n_searches1=len(search[search.level == 1]),\n n_searches2=len(search[search.level == 2]))\n\n # then for the best rounds\n N_ROUNDS = 5\n for round_id in list([b['round_id'] for b in best1[:N_ROUNDS]]) + list([b['round_id'] for b in best2[:N_ROUNDS]]):\n round = search[search.round_id == int(round_id)].to_dict(orient='records')[0]\n pipeline = [s for s in round['pipeline'] if s[0] not in ['NO-SCALE', 'PASS']]\n params = get_round_params(search, round_id)\n features = get_feature_importance(dataset.dataset_id, round_id)\n render('round.rst', folder + '/round_%s.rst' % round_id, dataset=dataset, round=round,\n pipeline=pipeline, features=features, params=params, cols=params.keys())\n else:\n # return render_template('dataset.html', dataset=dataset, n_searches1=0)\n render('dataset.rst', folder + '/dataset.rst', dataset=dataset, n_searches1=0)\n\n # then generate html and pdf with make\n if sys.platform == 'linux':\n subprocess.call(['sh', '../scripts/gen_doc.sh', os.path.abspath(get_dataset_folder(dataset.dataset_id)+'/docs')])\n else:\n os.system('call ../scripts/gen_doc ' + os.path.abspath(get_dataset_folder(dataset.dataset_id)+'/docs'))\n\n # generate zip file of the html site\n with zipfile.ZipFile(get_dataset_folder(dataset.dataset_id) + '/doc.zip', 'w') as z:\n root = get_dataset_folder(dataset.dataset_id) + '/docs/_build/html/'\n for dir in ['', '_static/', '_images/', '_sources/']:\n for f in glob.glob(root + dir + '*.*'):\n z.write(f, dataset.dataset_id + '/' + dir + os.path.basename(f))", "title": "" }, { "docid": "47c1681b45562e77bd47a79588dbfb7e", "score": "0.5497769", "text": "def get_results():\n documents = business_layer.BusinessLayer().get_results(request.args)\n return render_template('titles.html', documents=documents)", "title": "" }, { "docid": "85b2a3a202118eceb26728f5dabeb650", "score": "0.549089", "text": "def get_data():\n return [\n\t\t{\n\t\t\t\"label\": _(\"OCR Read\"),\n\t\t\t\"items\": [\n\t\t\t\t{\n\t\t\t\t\t\"type\": \"doctype\",\n\t\t\t\t\t\"name\": \"OCR Read\",\n\t\t\t\t\t\"description\": _(\"OCR Read\"),\n\t\t\t\t}\n\t\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"OCR Import\"),\n\t\t\t\"items\": [\n\t\t\t\t{\n\t\t\t\t\t\"type\": \"doctype\",\n\t\t\t\t\t\"name\": \"OCR Import\",\n\t\t\t\t\t\"description\": _(\"OCR Import\"),\n\t\t\t\t}\n\t\t\t\t]\n\t\t}\n ]", "title": "" }, { "docid": "f1c92d9525875014e93a9664c3d37d4f", "score": "0.54637945", "text": "def load_document(self):\n pass", "title": "" }, { "docid": "03108348e8effdc7a8f959723fc822fa", "score": "0.54457164", "text": "def documentations():\n return render_template('documentations.html')", "title": "" }, { "docid": "95528f562106ea181314684df03a5059", "score": "0.54443765", "text": "def get_all_docs(self):\n request = urllib2.Request(self.url_address +'_all_docs')\n request.get_method = lambda : 'GET'\n request.add_header('Content-Type', 'text/plain')\n url = self.opener.open(request)\n return json.loads(url.read())", "title": "" }, { "docid": "d1e12668f86567450fb00a986101e681", "score": "0.54393756", "text": "def get_data(self):\n\n self.code_names, self.categories = self.app.get_data()\n cur = self.app.conn.cursor()\n sql = \"select owner from code_image union select owner from code_text union select owner from code_av\"\n cur.execute(sql)\n result = cur.fetchall()\n self.coders = [\"\"]\n for row in result:\n self.coders.append(row[0])\n cur.execute('select id, length(fulltext) from source where (mediapath is Null or substr(mediapath,1,5)=\"docs:\")')\n self.file_summaries = cur.fetchall()", "title": "" }, { "docid": "78ebb6d8137ed79dddce842297ec7844", "score": "0.5438988", "text": "def pull( request ) :\n\n log = logging.getLogger(__name__)\n\n log.debug( 'pulls the database content for an entry' )\n\n idx = request.GET.get('id', 'None')\n\n if idx == 'None' : return JsonResponse({'error_flag':True})\n\n obj = models.Element.objects.get( id=int(idx) )\n document_objects = models.Document.objects.filter(element=obj)\n\n context = { 'id' : idx,\n 'name': obj.name,\n 'description' : obj.description,\n 'category' : obj.category.id,\n 'document' : list(document_objects.values_list( 'id',\n 'type__name',\n 'author__username',\n 'link',\n 'date')),\n 'error_flag' : False}\n\n print context\n return JsonResponse(context)", "title": "" }, { "docid": "f0224254bb37161210cb0afaaa062fe1", "score": "0.54367024", "text": "def _process_document(self, src_doc_id):\n\n self.log.info('Processing document with id: ' + src_doc_id)\n doc = self.annotation_indexer_config.source_indexer.get_doc(src_doc_id)\n\n\n # check whether there is document content to process\n if (isinstance(doc, dict) and (self.annotation_indexer_config.source_text_field not in doc.keys() or doc[self.annotation_indexer_config.source_text_field] is None)) or \\\n len(doc[self.annotation_indexer_config.source_text_field]) < self.MIN_TEXT_LEN:\n self.log.info('- skipping: no content')\n return\n \n try:\n # check whether the document has been already processed\n if self.annotation_indexer_config.skip_doc_check and self._document_already_processed(doc):\n self.log.info('doc id : ' + str(src_doc_id) + ' - skipping: document already processed')\n return\n \n # get the text\n doc_text = doc[self.annotation_indexer_config.source_text_field]\n \n # query the NLP service and retrieve back the annotations\n self.log.info('- querying the NLP service')\n \n nlp_response = self.annotation_indexer_config.nlp_service.query(text=doc_text)\n\n self.log.info(\"Finished processing NLP for document with id: \" + src_doc_id)\n\n if \"result\" in nlp_response.keys():\n result = nlp_response[\"result\"]\n\n if 'annotations' not in result.keys() or result['annotations'] is None or result is None:\n self.log.error(\" - no annotations available in the NLP result payload\")\n return \n\n if 'entities' not in result[\"annotations\"].keys() or result[\"annotations\"][\"entities\"] is None :\n self.log.error(\" - no annotation entities available in the NLP result payload\")\n return\n\n result = result['annotations']['entities']\n \n elif \"entities\" in nlp_response.keys():\n # Entities are present alone only when using GATE-NLP MODE ENDPOINT\n if nlp_response[\"entities\"] is not None:\n result = nlp_response[\"entities\"]\n else:\n self.log.error(\" - no annotation entities available in the NLP result payload\")\n return\n\n elif \"result\" not in nlp_response.keys() or \"entities\" not in nlp_response.keys():\n self.log.error(\" - no result payload returned from NLP service\")\n return\n\n if self.annotation_indexer_config.nlp_service.use_bulk_indexing:\n self._index_annotations_bulk(result, doc, src_doc_id)\n else:\n self._index_annotations(result, doc, src_doc_id)\n \n except Exception as e:\n self.log.error(repr(e))", "title": "" }, { "docid": "76433654d516e453aadfb4b412e834bb", "score": "0.54329705", "text": "def docs(self, request):\n return self.doc.generate()", "title": "" }, { "docid": "43e19798b1d61d4d1834ae1278fbee6f", "score": "0.54181135", "text": "def _fetch_docs(self, user, slice_data=True, format_docs=True, *args, **kwargs):\n query = self._construct_query(*args, **kwargs)\n docs = self.model.objects.find_by_user(user, **query)\n if slice_data:\n start = request.args.get('start', 0)\n limit = request.args.get('limit', DEFAULT_LIMIT)\n docs = docs[start:start + limit]\n if format_docs:\n docs = self._format_multiple_docs(docs)\n return docs", "title": "" }, { "docid": "3ab3ca959c96b98d815a2ebe5a523678", "score": "0.5410954", "text": "def document(document_id):\n return render_template('document.html', document_id=document_id)", "title": "" }, { "docid": "c3ff7964c2c4336a5e0d2b3036465aa0", "score": "0.5407521", "text": "def _ListAllDocuments(self):\n feed = self.gd_client.GetDocumentListFeed()\n self._PrintFeed(feed)", "title": "" }, { "docid": "f704914d586e2c74825f087aa4672fce", "score": "0.5399946", "text": "def action_show_documents(self):\n ids = []\n approver = self.mapped('approver_ids').filtered(\n lambda approver: approver.user_id == self.env.user\n )\n print(approver, '==============approver')\n print(approver.approve_attachment_id,'===========approve_attachment_id')\n print(approver.refused_attachment_id,'===========refused_attachment_id')\n print(approver.cancel_attachment_id,'===========cancel_attachment_id')\n\n if approver.approve_attachment_id:\n ids.append(approver.approve_attachment_id.id)\n if approver.refused_attachment_id:\n ids.append(approver.refused_attachment_id.id)\n if approver.cancel_attachment_id:\n ids.append(approver.cancel_attachment_id.id)\n print(ids, '==============ids')\n return {\n 'name': _('Documents'),\n 'type': 'ir.actions.act_window',\n 'view_mode': 'tree,form',\n 'view_id': False,\n 'res_model': 'ir.attachment',\n 'domain': [('id', 'in', ids)]\n }", "title": "" }, { "docid": "3eb126e1f1e4160d610ca9b6f059d1c9", "score": "0.5393111", "text": "def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]:\n\n # parse html response\n page2 = requests.get(page_url)\n soup2 = bs4.BeautifulSoup(page2.content, 'html.parser')\n doc_num = []\n pdf = []\n doc_title = []\n datelist = []\n doctype = []\n parsed_docs = []\n webpart2 = soup2.find(\"div\", {\"class\": \"skin-pane2 col-md-8\"})\n meta = webpart2.find_all(\"div\")\n for row in meta:\n if ((remove_html_tags((str(row))).isspace()) or not remove_html_tags((str(row)))):\n continue\n for cell in row.find_all('p'):\n # print(cell.find(\"strong\"))\n words = ''\n links = cell.find_all(\"a\")\n link_list = list(links)\n # print(links)\n nums = []\n pdf_links = [link['href'] for link in link_list if \"pdf\" in link['href'] or \"aspx\" in link['href']]\n if not pdf_links:\n continue\n pdf.append(str(pdf_links[0]))\n # print(pdf_links)\n words = remove_html_tags((str(cell.find(\"strong\")))).encode('ascii', 'ignore').decode('ascii').lstrip(\n \" \").rstrip(\" \")\n words = \" \".join(words.split())\n # print(words)\n # else:\n # print(cell)\n doc_num.append(words)\n pdf.append(str(pdf_links[0]))\n title = [text for text in cell.find_all(text=True) if text.parent.name != \"strong\"]\n doc_title.append(title)\n pub_links = []\n [pub_links.append(x) for x in pdf if x not in pub_links]\n document_name = []\n [document_name.append(x) for x in doc_num if x not in document_name]\n document_type = []\n [document_type.append(' '.join(x.split()[0:2])) for x in document_name]\n document_number = []\n [document_number.append(' '.join(x.split()[2:])) for x in document_name]\n document_title = []\n [document_title.append(x) for x in doc_title if x not in document_title]\n document_title = [item for sublist in document_title for item in sublist]\n document_title = [str(item).encode('ascii', 'ignore').decode('ascii').lstrip(\" \").rstrip(\" \") for item in\n document_title]\n final = list(itertools.zip_longest(document_type, document_number, document_title, pub_links))\n final = [list(x) for x in final]\n for item in final:\n doc_name = item[0]+' '+item[1]\n if (item[2] is None):\n doc_title=\"\"\n else:\n doc_title = item[2]\n doc_num = item[1]\n doc_type = item[0]\n publication_date = \"N/A\"\n if item[3].startswith(\"https\"):\n cac_login_required=True\n url = item[3]\n url = url.replace(\" \",\"%20\")\n else:\n cac_login_required=False\n url = \"https://www.usar.army.mil\"+item[3]\n url = url.replace(\" \",\"%20\")\n pdf_di = DownloadableItem(doc_type='pdf', web_url=url)\n version_hash_fields = {\n \"item_currency\": str(url).split('/')[-1], # version metadata found on pdf links\n \"document_title\": doc_title.strip(),\n \"document_number\": doc_num.strip()\n }\n if (str(doc_type).startswith(\"USAR\") == False):\n doc_title = doc_name\n doc_num = \"\"\n doc_type = \"USAR Doc\"\n version_hash_fields = {\n \"item_currency\": str(url).split('/')[-1], # version metadata found on pdf links\n \"document_title\": doc_title.strip(),\n \"document_number\": doc_num.strip()}\n\n doc = Document(\n doc_name=doc_name.strip(),\n doc_title=re.sub('\\\\\"', '', doc_title),\n doc_num=doc_num.strip(),\n doc_type=doc_type.strip(),\n publication_date=publication_date,\n cac_login_required=cac_login_required,\n crawler_used=\"Army_Reserve\",\n source_page_url=page_url.strip(),\n version_hash_raw_data=version_hash_fields,\n downloadable_items=[pdf_di]\n )\n\n parsed_docs.append(doc)\n return parsed_docs", "title": "" }, { "docid": "b1df67cc8547c85426ee5fc1cb079396", "score": "0.53898543", "text": "def do_GET(self):\n\n try:\n uri = None\n query_string = None\n if self.path and self.path[0] == '/':\n uri, sep, query_string = self.path[1:].partition('?')\n \n tab = self.server.window.get_tab_from_location(Gio.File.new_for_uri(uri))\n if tab:\n buf = tab.get_view().get_buffer()\n md_text = buf.get_text(buf.get_start_iter(), buf.get_end_iter(), False)\n try:\n md_html = markdown.markdown(md_text, self.server.markdown_extensions)\n except NameError:\n self.wfile.write(\n \"Python-Markdown not installed. Please install \"\n \"before using this plugin. For up-to-date code, \"\n \"install from https://github.com/waylan/Python-Markdown\")\n return\n self.wfile.write(\"<head>\")\n self.wfile.write(get_style(self.css_path.format(self.general_css)))\n highlight_css = None\n if self.server.settings:\n highlight_css = get_style(self.css_path.format(\n self.server.settings.get_string('code-theme')))\n if not highlight_css:\n highlight_css = get_style(\n self.css_path.format(self.default_highlight_css))\n if query_string: # Override code-theme if provided in query\n params = urlparse.parse_qs(query_string)\n for css_name in params.get('style', []):\n highlight_css = get_style(self.css_path.format(css_name))\n if highlight_css:\n break\n self.wfile.write(highlight_css)\n self.wfile.write(\"</head>\")\n self.wfile.write(md_html)\n else:\n self.wfile.write(\"Could not match file {0}.\".format(uri))\n except Exception as err:\n self.wfile.write(\"Exception occurred while rendering: \" + str(err))", "title": "" }, { "docid": "1d346dc4722e86adf58cf71fd840de9b", "score": "0.53839433", "text": "def view(self, files):", "title": "" }, { "docid": "185eba95491f7916c9d7f81310623e34", "score": "0.5372585", "text": "def GetDocument(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "2e9660cfa662d730db8123ba754f479e", "score": "0.53709745", "text": "def loadDocument(self, filename):\n \n #for index in range(self.mdiArea.count()):\n \n # document = self.mdiArea.widget(index)\n # if document:\n # if filename == document.filename:\n # self.mdiArea.setCurrentIndex(index)\n # document.reload()\n # return\n # Else load from file and create new document tab.\n\n self.statusBar.showMessage(self.tr(\"Loading...\"), 2500)\n\n document = Document(filename, self)\n index = self.mdiArea.addDocument(document)\n #self.mdiArea.setCurrentIndex(index)\n index.show()\n\n # After loading a conenction file, it is possible to refresh the current module.\n self.refreshAct.setEnabled(True)\n self.statusBar.showMessage(self.tr(\"Successfully loaded file\"), 2500)\n\n # Enable close action\n #self.closeAct.setEnabled(self.mdiArea.count())", "title": "" }, { "docid": "9c638ea0d31fcdba25e946363a273ecb", "score": "0.53628904", "text": "def onOpen(self):\n \n if self.document_type == 1:\n\n my_list=[]\n #self.actionUpdate.triggered.connect(self.onEdit)\n fileName = \"VAS\"\n the_doc = VAS_view(self)\n self.count = self.count +1 \n the_doc.fileName = fileName + str(self.count)\n sub = self.mdiArea.addSubWindow(the_doc)\n sub.show()\n \n #self.loadDocument(fileName)\n\n if self.document_type == 2:\n fileName = \"VCCP\"\n the_doc = VCCP_view(self)\n self.count = self.count + 1 \n the_doc.fileName = fileName + str(self.count) \n sub = self.mdiArea.addSubWindow(the_doc)\n sub.show()\n #self.loadDocument(fileName)\n\n if self.document_type == 3:\n fileName = 'VCRH'\n the_doc = VCRH_Edit(self)\n self.count = self.count + 1 \n the_doc.fileName = fileName + str(self.count) \n sub = self.mdiArea.addSubWindow(the_doc)\n sub.show()\n\n if self.document_type == 4:\n fileName = 'VCRLD'\n the_doc = VCRLD_Edit(self)\n self.count = self.count + 1 \n the_doc.fileName = fileName + str(self.count) \n sub = self.mdiArea.addSubWindow(the_doc)\n sub.show()\n\n if self.document_type == 5:\n fileName = 'VProj'\n the_doc = VProj_Edit(self)\n self.count = self.count + 1 \n the_doc.fileName = fileName + str(self.count) \n sub = self.mdiArea.addSubWindow(the_doc)\n sub.show()\n\n if self.document_type == 6:\n fileName = 'EditLayers'\n the_doc = RPE_Edit(self)\n self.count = self.count + 1 \n the_doc.fileName = fileName + str(self.count) \n sub = self.mdiArea.addSubWindow(the_doc)\n sub.show()\n\n if self.document_type == 7:\n fileName = 'Split_Section'\n the_doc = splitSections(self)\n self.count = self.count+1\n the_doc.fileName = fileName + str(self.count) \n sub = self.mdiArea.addSubWindow(the_doc)\n sub.show()\n\n if self.document_type == 8:\n fileName = 'Tweak_Section'\n the_doc = tweakSections(self)\n self.count = self.count+1\n the_doc.fileName = fileName + str(self.count) \n sub = self.mdiArea.addSubWindow(the_doc)\n sub.show()\n\n if self.document_type == 9:\n fileName = 'Move_Section'\n the_doc = moveSections(self)\n self.count = self.count+1\n the_doc.fileName = fileName + str(self.count) \n sub = self.mdiArea.addSubWindow(the_doc)\n sub.show()\n\n if self.document_type == 10:\n fileName = 'Copy_Section'\n the_doc = copySections(self)\n self.count = self.count+1\n the_doc.fileName = fileName + str(self.count) \n sub = self.mdiArea.addSubWindow(the_doc)\n sub.show()", "title": "" }, { "docid": "f505aba27a6032eef6309e79387e2be2", "score": "0.53608304", "text": "def index():\n return render_template('records.html')", "title": "" }, { "docid": "681341909be97b3e0051c95525b9b5ec", "score": "0.535427", "text": "def make_docs_data_view(api_info):\n return get_schema_view(\n api_info,\n generator_class=ApiSchemaGenerator,\n public=True,\n permission_classes=(permissions.AllowAny,),\n ).without_ui(cache_timeout=get_docs_cache_timeout())", "title": "" }, { "docid": "5e4fb43f1b9662e7a24eadd19b6df66d", "score": "0.53498733", "text": "def processUserRequest(documentList):\n queryWordList = getQueryWordListFromUser()\n numOfDocs = int(input('How many documents to retrieve?\\n'))\n return displayResults(queryWordList,documentList,numOfDocs)", "title": "" }, { "docid": "3a6d7a5f4072b977e9aaea891d0c1f29", "score": "0.5340088", "text": "def index (request):\n try:\n a = Article.objects.get (title=\"HT Wiki\")\n return preview (request, a.id)\n except ObjectDoesNotExist:\n return preview (request, -1)", "title": "" }, { "docid": "4b98a2dae2b656389fd5c05f3e8611fb", "score": "0.5321335", "text": "def make_index(self, doc_name, protocol, number_ini, number_fin):\n temp = string.ascii_uppercase\n upper = temp.replace(\"N\", \"NÑ\")\n\n document = word.Documento(doc_name)\n\n for letter in upper:\n data = self.database.get_table_by_letter(protocol, letter,\n number_ini, number_fin)\n if data:\n page = []\n for doc in data:\n number = doc[0]\n body = doc[1]\n if doc[2] == \"\":\n body += \". \"\n else:\n body += \"; \"\n body += doc[2]\n body += \". \"\n body += doc[3]\n body += \".\"\n page.append([number, body])\n document.write_page(letter, page)", "title": "" }, { "docid": "aa1a2aad44cc8df79f8891cfaebb4069", "score": "0.5319376", "text": "def IndexReader(self, dir):\n output_details_review_name = \"details_review_output.pkl\"\n output_index_file_name =\"dictionary_index.pkl\"\n self.dictionary_reviews = save_obj(dir+\"//\"+output_details_review_name)\n self.dictionary_text = save_obj(dir+\"//\"+output_index_file_name)", "title": "" }, { "docid": "6b692e1dec4c083a6b11fdcbe76f652e", "score": "0.5308543", "text": "def documents(self):\n catalog = self.context.portal_catalog\n query = {'isWorkingCopy': 0,\n 'path': {'depth': 1,\n 'query': '/'.join(self.context.getPhysicalPath())},\n 'portal_type': ['opengever.document.document',\n 'ftw.mail.mail']}\n documents = catalog(query)[:10]\n\n document_list = [{\n 'Title': document.Title,\n 'getURL': document.getURL,\n 'alt': document.document_date and\n document.document_date.strftime('%d.%m.%Y') or '',\n 'css_class': get_css_class(document),\n 'portal_type': document.portal_type,\n } for document in documents]\n\n return document_list", "title": "" }, { "docid": "c94e5011fe4810ba5178b7a261cd0d67", "score": "0.53033286", "text": "def get(self, request):\n\n records = Document.objects.all()\n uploaded_files = []\n for record in records:\n uploaded_files.append(\n {\n \"id\": record.id,\n \"file_name\": str(record.document).split(\"/\")[1],\n \"uploaded_at\": record.uploaded_at,\n }\n )\n response = render(\n request,\n \"data_enrichment/list_view.html\",\n {\n \"uploaded_files\": uploaded_files,\n },\n )\n return response", "title": "" }, { "docid": "5cfcdecf1ac8caba4681a4525a2726b0", "score": "0.5300169", "text": "def user_documents(request, userid):\n logger.debug('reading ' + userid + '\\'s documents.')\n documents, remains = Document.objects.recent(userid)\n context = {}\n context['documents'] = documents\n context['hasMoreDocuments'] = remains > 0\n context['page'] = 'my'\n return render(request, 'document/mydocuments.html', context)", "title": "" }, { "docid": "e738d6ffcdbdbad1bd3f5425ddb61587", "score": "0.52995944", "text": "def load_main_page(self, domain_filter=None):\n\n unsorted_docs = {}\n req = requests.get(IHE_TF_URL)\n if req.status_code == 200:\n soup = BeautifulSoup(req.text, \"html5lib\")\n links = list(filter(lambda x: x.get(\"href\"), soup.find_all(\"a\")))\n pdf_list = list(filter(lambda x: x.get(\"href\").endswith(\".pdf\"), links))\n\n print(\"Get information about documents\")\n for link in pdf_list:\n docinfo = self.get_infos(link.text, link.get(\"href\"), domain_filter)\n unsorted_docs[docinfo[\"filename\"]] = docinfo\n print(f\"\\n{len(unsorted_docs)} documents found in IHE website.\")\n self.classify(unsorted_docs)\n for key, value in self.doc.items():\n print(f\"{key}: {len(value)} documents\")", "title": "" }, { "docid": "6d3951aadb6120d656de97b219489998", "score": "0.52897406", "text": "def displayJournalEntries(self):", "title": "" }, { "docid": "625d6c91d1ff2c5ce00d0aafae9b9905", "score": "0.52878875", "text": "def _index(self, content: Content):\n pass", "title": "" }, { "docid": "23e88f145fa38e464ddd8b649d6ed1f1", "score": "0.5277039", "text": "def doc_show(context, request):\n context = ContextWrapper(context)\n stage = context.stage\n name, version = context.project, context.version\n doc_info = get_doc_info(context, request)\n version_links = []\n latest_doc_info = get_doc_info(context, request, version='latest', check_content=False)\n if latest_doc_info['doc_version'] != doc_info['doc_version']:\n version_links.append(dict(\n title=\"Latest documentation\",\n url=request.route_url(\n \"docviewroot\", user=stage.user.name, index=stage.index,\n project=name, version='latest', relpath=\"index.html\")))\n try:\n stable_doc_info = get_doc_info(context, request, version='stable', check_content=False)\n if stable_doc_info['doc_version'] not in (doc_info['doc_version'], latest_doc_info['doc_version']):\n version_links.append(dict(\n title=\"Stable documentation\",\n url=request.route_url(\n \"docviewroot\", user=stage.user.name, index=stage.index,\n project=name, version='stable', relpath=\"index.html\")))\n except (HTTPFound, HTTPNotFound):\n pass\n return dict(\n title=\"%s-%s Documentation\" % (name, version),\n base_url=request.route_url(\n \"docroot\", user=stage.user.name, index=stage.index,\n project=name, version=version, relpath=''),\n baseview_url=request.route_url(\n \"docviewroot\", user=stage.user.name, index=stage.index,\n project=name, version=version, relpath=''),\n url=request.route_url(\n \"docroot\", user=stage.user.name, index=stage.index,\n project=name, version=version,\n relpath=doc_info['relpath'], _query=request.query_string),\n version_mismatch=doc_info['version_mismatch'],\n version_links=version_links,\n doc_version=doc_info['doc_version'])", "title": "" }, { "docid": "97afbf74ad4bf28f958d7300588eda06", "score": "0.52533275", "text": "def docs():\n adapters = sorted(AdapterBase.registry.keys())\n\n return render_template(\"docs.html\", adapters=adapters)", "title": "" }, { "docid": "5907cd4e7c91e248687f3841964a5c52", "score": "0.5252057", "text": "def getAllDocs(idNum):\n \n # connect to database\n conn, cursor = getConnectionAndCursor()\n\n # build SQL\n sql = \"\"\"\n SELECT *\n FROM documents\n WHERE class_num = %s\n \"\"\"\n\n # execute the query\n parameters = (int(idNum), )\n cursor.execute(sql,parameters)\n\n # get the data from the database:\n data = cursor.fetchall()\n\n # clean up\n conn.close()\n cursor.close()\n \n return data", "title": "" }, { "docid": "c16991b2bfbea818783936950dadf8d9", "score": "0.52500963", "text": "def get_document_details():\n document = business_layer.BusinessLayer().get_details(request.args.get('id'))\n return render_template('details.html', details=document)", "title": "" }, { "docid": "16a62ad6994d86f26bf0eb55568402ea", "score": "0.5246594", "text": "def document_access(paper, document):\n path = os.path.join(\"documents\", files[str(document)])\n app.logger.debug(path)\n content = open(path, \"rb\").read()\n headers = {\n \"X-TODO\": \"Last-Modified\",\n \"Content-Length\": len(content)\n }\n return Response(\n headers=headers,\n content_type=model[\"documents\"][document][\"mime_type\"],\n response=content)", "title": "" }, { "docid": "7d2eaae4b4d35c32b6922fde40ae8125", "score": "0.52339095", "text": "def documents_processor(urls, job_id, client_id):\n global workfiles\n workfiles = []\n for url in urls:\n try:\n result = api_call_manager(add_api_key(url))\n process_results(result)\n except:\n logger.error('Error - URL processing failed')\n result = json.loads(json.dumps({\"job_id\" : job_id, \"type\": \"docs\", \"data\" : workfiles, \"client_id\" : str(client_id), \"version\" : version}))\n return result", "title": "" }, { "docid": "ad05b1f9d088c6670d450085741f1bc2", "score": "0.5233528", "text": "def view_content(self, lines=None):\n\n if lines is None:\n text = self.storage.page_text(self.title)\n else:\n text = ''.join(lines)\n return self.highlight(text, mime=self.mime)", "title": "" }, { "docid": "b0c4c04445b1595c4bed67f098267834", "score": "0.52280146", "text": "def store_new_docs_in_db(self):\n self.status = self.model_status.PROCESSING\n self.docs_to_process = self.search_docs_in_processed_docs()\n self.length_of_docs_to_process = len(self.docs_to_process)\n if self.length_of_docs_to_process <= 0:\n return json.dumps({'status':'ERROR','user_prompt':'There are no documents to process'}) \n for doc in list(self.docs_to_process):\n self.doc_in_process = doc\n self.current_process = 'Getting paragraphs and footnotes from {}.'.format(self.doc_in_process)\n paragraphs_from_doc_status = self.get_paragraphs_from_doc(doc)\n if paragraphs_from_doc_status['status'] == 'ok':\n name_of_table, paragraphs, footnotes = paragraphs_from_doc_status['data']\n else:\n os.remove(config.TEXTS_PATH + \"/\" + self.doc_in_process)\n for doc in self.processed_docs:\n rest, ext = doc.split(\".\")\n table_name = \"[\" + rest + \"_\" + ext + \"]\"\n erase_table_and_words_for_doc(table_name)\n os.remove(config.TEXTS_PATH + \"/\" + doc)\n return paragraphs_from_doc_status\n self.current_process = 'Storing paragraphs and footnotes in database.'\n self.create_table_for_doc(name_of_table, paragraphs, footnotes)\n self.current_process = 'Storing words in database'\n self.fill_words_db(doc)\n self.processed_docs.append(doc)\n self.docs_to_process.remove(doc)\n self.status = self.model_status.DONE\n \n return {'status':'ok', 'user_prompt':''}", "title": "" }, { "docid": "4d2f64c27211418e1a53196938305a0c", "score": "0.5225968", "text": "def action_show_all_documents(self):\n ids = []\n approvers = self.mapped('approver_ids')\n for approver in approvers:\n if approver.approve_attachment_id:\n ids.append(approver.approve_attachment_id.id)\n if approver.refused_attachment_id:\n ids.append(approver.refused_attachment_id.id)\n if approver.cancel_attachment_id:\n ids.append(approver.cancel_attachment_id.id)\n return {\n 'name': _('Documents'),\n 'type': 'ir.actions.act_window',\n 'view_mode': 'tree,form',\n 'view_id': False,\n 'res_model': 'ir.attachment',\n 'domain': [('id', 'in', ids)]\n }", "title": "" }, { "docid": "a456bf4bb206ca072ee10f8c8b7c6d1b", "score": "0.5222932", "text": "def get_docs(args):\n DbSession = get_db_session()\n numbers = sort_preserve_order(args.number) # Remove duplicate arguments\n docs = []\n dne = []\n if args.is_also:\n for number in numbers:\n std = query_std(DbSession, number)\n if std is None:\n dne.append(\"STD {} does not exist.\".format(number))\n else:\n aliases = query_std_is_also(DbSession, number)\n docs.extend(aliases)\n else:\n for number in numbers:\n rfc = query_std(DbSession, number)\n if rfc is not None:\n docs.append(rfc)\n else:\n dne.append(\"STD {} does not exist.\".format(number))\n\n # Display found documents\n show_docs(sort_preserve_order(docs), args.editor, args.pager)\n # Display messages about nonexistent documents\n for msg in dne:\n print(msg)\n\n # Exit successfully\n sys.exit(0)", "title": "" }, { "docid": "aa2c17de9ea403aec14be4f8d500c66a", "score": "0.5220909", "text": "def index(content, override_body=None):", "title": "" }, { "docid": "6ee0aa568c6e76e3297047e4db12632f", "score": "0.52024424", "text": "def index(self):\n return self.render_default()", "title": "" }, { "docid": "d41eed24f367099fe6149ae00bce8bd7", "score": "0.5197253", "text": "def index(self):\n first_three_books_reviews = self.models['Review'].get_first_three_books_reviews(3)\n book_ids = [r['bookId'] for r in first_three_books_reviews]\n all_books = self.models['Book'].get_all_books(book_ids)\n return self.load_view('/books/index.html', first_three_books_reviews = first_three_books_reviews, all_books = all_books)", "title": "" }, { "docid": "57116bc7bc637b6894fd387e07727313", "score": "0.5196881", "text": "def documentation_view(request): \r\n context = {\r\n 'docs':docs,\r\n } \r\n return render_to_response('document.html', context, RequestContext(request))", "title": "" }, { "docid": "5c41b7441bcdbc92921a10db0d6ce1d5", "score": "0.51937556", "text": "def frontpage_content(self, primary=False):\r\n return \"<div><h2>Module %s</h2><p>Handles %s documents</p></div>\" % (module_dir, rdf_type)", "title": "" }, { "docid": "f3c680fa83d37d450bb0e6394583f945", "score": "0.5187936", "text": "def display_file(self, path, pages=None):\n\n self.master.title(\"tkDocViewer: \" + path)\n self.doc_viewer.display_file(path, pages)", "title": "" }, { "docid": "290e58446456587dab96a229d5ded81b", "score": "0.5182599", "text": "def get(self):\n result_set, total = self.execute_query()\n key_values = self.get_key_values(result_set)\n keys = key_values.keys()\n keys.sort()\n\n headers = []\n for key in keys:\n sample_value = key_values[key][0]\n headers.append({\n 'name': key,\n 'type': DataType.get(sample_value).name(),\n })\n\n entities = []\n edit_path = self.base_path() + DatastoreEditHandler.PATH\n for entity in result_set:\n attributes = []\n for key in keys:\n if entity.has_key(key):\n raw_value = entity[key]\n value = DataType.get(raw_value).format(raw_value)\n short_value = DataType.get(raw_value).short_format(raw_value)\n else:\n value = ''\n short_value = ''\n attributes.append({\n 'name': key,\n 'value': value,\n 'short_value': short_value,\n })\n entities.append({\n 'key': str(entity.key()),\n 'key_name': entity.key().name(),\n 'key_id': entity.key().id(),\n 'shortened_key': str(entity.key())[:8] + '...',\n 'attributes': attributes,\n 'edit_uri': edit_path + '?key=' + str(entity.key()) + '&kind=' + urllib.quote(self.request.get('kind')) + '&next=' + urllib.quote(self.request.uri),\n })\n\n start = self.start()\n num = self.num()\n max_pager_links = 8\n current_page = start / num\n num_pages = int(math.ceil(total * 1.0 / num))\n page_start = max(math.floor(current_page - max_pager_links / 2), 0)\n page_end = min(page_start + max_pager_links, num_pages)\n\n pages = []\n for page in range(page_start + 1, page_end + 1):\n pages.append({\n 'number': page,\n 'start': (page - 1) * num,\n })\n current_page += 1\n\n in_production = self.in_production()\n if in_production:\n kinds = None\n else:\n kinds = self.get_kinds()\n\n values = {\n 'request': self.request,\n 'in_production': in_production,\n 'kinds': kinds,\n 'kind': self.request.get('kind'),\n 'order': self.request.get('order'),\n 'headers': headers,\n 'entities': entities,\n 'message': self.request.get('msg'),\n 'pages': pages,\n 'current_page': current_page,\n 'num': num,\n 'next_start': -1,\n 'prev_start': -1,\n 'start': start,\n 'total': total,\n 'start_base_url': self.filter_url(['kind', 'order', 'order_type',\n 'num']),\n 'order_base_url': self.filter_url(['kind', 'num']),\n }\n if current_page > 1:\n values['prev_start'] = int((current_page - 2) * num)\n if current_page < num_pages:\n values['next_start'] = int(current_page * num)\n\n self.generate('datastore.html', values)", "title": "" }, { "docid": "df88b7f54247f7b2ad9e0e6f667c6c66", "score": "0.5179713", "text": "def viewDocument(self, format, path):\n self.ui.tabWidget.setStyleSheet('QTabWidget::pane {background: none;}')\n\n # Read document\n fh = open(path, 'r')\n data = fh.read()\n fh.close()\n\n # Create a new editor and configure with current settings\n frame = QWidget(self.ui.tabWidget)\n label = self.formatLabels[format]\n if format == self.handler.HTML:\n editor = PyQt4.QtWebKit.QWebView(frame)\n editor.setHtml(data)\n else:\n editor = LinedEditor(parent=frame)\n font = QFont(self.settings.value('appearance/previewFontFamily').toString(),\n self.settings.value('appearance/previewFontSize').toInt()[0])\n editor.setFont(font)\n editor.setPlainText(data)\n if format == self.handler.XML:\n if not os.access(path, os.W_OK):\n # Read only!\n editor.setReadOnly(True)\n label += ' (Read-only)'\n lineNumbers = self.settings.value('appearance/previewLineNumbersXml').toBool()\n self.xmlEditor = editor\n # Create callback\n self.connect(editor, SIGNAL('textChanged()'), self.xmlChanged)\n self.xml_modified = False\n else:\n lineNumbers = self.settings.value('appearance/previewLineNumbersText').toBool()\n editor.setReadOnly(True)\n editor.enableLineNumbers = lineNumbers\n self.textEditors.append(editor)\n\n # Add editor to the tab\n layout = QVBoxLayout()\n layout.setContentsMargins(1, 1, 1, 1)\n layout.addWidget(editor)\n frame.setLayout(layout) \n self.ui.tabWidget.addTab(frame, label)", "title": "" }, { "docid": "63d757188b53e65d2dcbde844c8b866c", "score": "0.5179206", "text": "def store_documents():\n pass", "title": "" }, { "docid": "0ff044d4a0845f53af0f6a85e6e42384", "score": "0.5170479", "text": "def index(self):\n files = self.get_files()\n for file in files:\n self.process_file(file)\n return None", "title": "" }, { "docid": "d0bb0f547efd37ffcc71138e51d64260", "score": "0.51645446", "text": "def retrieve_new_documents(self, limit=100):\n today = dt.datetime.combine(dt.date.today(), dt.time.min)\n\n entry_path = etree.XPath(\"//div[@class = 'SearchResult']\")\n date_path = etree.XPath(\n \"\"\"\n .//dl/dd[preceding-sibling::dt[contains(text(), 'Date') or\n contains(text(), 'Datum')]]/text()\n \"\"\")\n doc_path = etree.XPath(\n \"\"\"\n .//ul[contains(@class, 'SearchResultDoc')]/li\n /a[contains(@href, 'PDF') or contains(@href, 'HTML')]/@href\n \"\"\")\n title_path = etree.XPath(\".//h2/a[@class = 'title']/text()\")\n detail_path = etree.XPath(\".//h2/a[@class = 'title']/@href\")\n\n timestamp = int(round(time.time() * 1000))\n url_tmpl = (\"https://eur-lex.europa.eu/search.html?lang=de&qid=\"\n f\"{timestamp}&type=quick&scope=EURLEX&sortOneOrder=desc\"\n \"&sortOne=DD&locale=de&page={}\")\n\n has_unseen_documents = True\n doc_count = 0\n page = 1\n\n while (doc_count < limit) and has_unseen_documents:\n search_url = url_tmpl.format(page)\n logging.info(f\"Crawling page '{search_url}' (page {page})\")\n res = _retry_connection(search_url, \"get\")\n html_string = res.content\n tree = html.fromstring(html_string)\n\n for entry in entry_path(tree):\n if not isinstance(entry, list):\n entry = [entry]\n\n date_string = _flat_map(date_path, entry)[0]\n match = re.search(r\"(\\d+\\/\\d+\\/\\d+)\", date_string)\n\n doc_date = dt.datetime.min\n if match:\n doc_date = dt.datetime.strptime(match[1], \"%d/%m/%Y\")\n if len(_flat_map(doc_path, entry)) == 0:\n continue\n link = _make_resource_path(_flat_map(doc_path, entry)[0],\n \"https://eur-lex.europa.eu\")\n detail = _make_resource_path(_flat_map(detail_path, entry)[0],\n \"https://eur-lex.europa.eu\")\n title = _flat_map(title_path, entry)[0]\n\n doc = {\"url\": link, \"detail_url\": detail, \"date\": doc_date,\n \"title\": title, \"crawl_date\": today}\n\n logging.debug(f\"Process Document: {link} - {doc_date.date()}\")\n\n num_docs = self.collection.count_documents({\"url\": link})\n\n if num_docs > 0:\n logging.debug(f\"Document was crawled before: '{link}'\")\n # check whether this document had a date before the crawl\n # date, if not, break.\n duplicate_doc = self.collection.find_one({\"url\": link})\n\n if duplicate_doc[\"date\"] >= duplicate_doc[\"crawl_date\"]:\n logging.debug(\"Document date lies in the future.\"\n \" Continue...\")\n continue\n\n logging.debug(\"Break!\")\n has_unseen_documents = False\n break\n\n logging.debug(f\"Found new document: {link}.\")\n res = self.collection.insert_one(doc)\n doc_count += 1\n page += 1\n logging.info(f\"Found {doc_count} new or potentially modified docs.\")\n return doc_count", "title": "" }, { "docid": "f0216e63e586ed1464e2115ef9ac2772", "score": "0.5163617", "text": "def listDoc(self, dbName, reverse=False, startKey=0, count=-1):\r\n # Responses: {u'rows': [{u'_rev': -1825937535, u'_id': u'mydoc'}],\r\n # u'view': u'_all_docs'}, 404 Object Not Found\r\n uri = \"/%s/_all_docs\" % (dbName,)\r\n args = {}\r\n if reverse:\r\n args[\"reverse\"] = \"true\"\r\n if startKey > 0:\r\n args[\"startkey\"] = int(startKey)\r\n if count >= 0:\r\n args[\"count\"] = int(count)\r\n if args:\r\n uri += \"?%s\" % (urlencode(args),)\r\n return self.get(uri\r\n ).addCallback(self.parseResult)", "title": "" }, { "docid": "7e9d1fe5f139ac6329ad057af3865af4", "score": "0.515995", "text": "def index(self):\r\n data = []\r\n fn = PAGES / \"index.html\"\r\n if sys.version < '3':\r\n f_in = io.TextWrapper(fn.open(), encoding='utf-8')\r\n else:\r\n f_in = fn.open(encoding='utf-8')\r\n with f_in:\r\n for line in f_in:\r\n data.append(line)\r\n return data", "title": "" }, { "docid": "b00248b7a2adc46987290de0e7f3d459", "score": "0.5154607", "text": "def enrich_documents(self, limit=100):\n entry_path = etree.XPath(\"//dl[contains(@class, 'NMetadata')]/dd\")\n key_path = etree.XPath(\"normalize-space(./preceding-sibling::dt[1])\")\n value_path = etree.XPath(\"\"\"\n normalize-space(\n string-join(\n ./text() | .//*[self::span[@lang] or\n self::a[not(child::span)] or\n self::i[not(child::span)]]/text(), \"#\"\n )\n )\n \"\"\")\n\n success_count = 0\n cursor = (self.collection.find({\"metadata\": {\"$exists\": False}})\n # newest first, and limit by limit\n .sort([(\"date\", -1)]).limit(limit))\n # extract additional metadata\n for index, document in enumerate(cursor):\n logging.info(f\"Processing document number {index}...\")\n\n res = _retry_connection(document[\"detail_url\"], \"get\")\n\n tree = html.fromstring(res.content)\n\n metadata = {}\n for idx, entry in enumerate(entry_path(tree)):\n key = key_path(entry).strip(\" .:,;!?-_#\")\n val = value_path(entry).strip(\" .:,;!?-_#\").split(\"#\")\n if len(key) == 0 or key in metadata:\n key += str(idx)\n if len(val) == 0:\n continue\n metadata[key] = val\n\n document[\"metadata\"] = metadata\n # update document in the database\n result = self.collection.update_one({\"_id\": document[\"_id\"]},\n {\"$set\": document})\n success_count += result.modified_count\n return success_count", "title": "" }, { "docid": "91d2f8fa0d4a9322b56547b6dd8b95ae", "score": "0.5149818", "text": "def open(request, doc):\n userid = request.session['userid']\n member = Member.objects.get(user_id=userid)\n context = {}\n context['document'] = createDummyDocument(member, 1)[0]\n return render(request, 'document/document.html', context)", "title": "" }, { "docid": "670f1d784df2703a85f152d3389fa018", "score": "0.51390713", "text": "def update(self):\n\n row = self.parent.get_row_by_file(self.filepath)\n\n # update results\n self.display_results = [\n self.filepath,\n self.parent_folder,\n self.filename,\n self.inspection.get_display('timecode_start'),\n self.inspection.get_display('content_start_timecode'),\n self.inspection.get_display('framecount'),\n self.inspection.get_display('full_duration'),\n self.inspection.get_display('content_duration'),\n self.inspection.get_display('slate'),\n self.inspection.get_display('black_at_tail'),\n self.inspection.get_display('slate_key_number'),\n self.inspection.get_display('op48_audio'),\n self.inspection.get_display('op59_audio'),\n self.inspection.get_display('audio_peak'),\n self.inspection.get_display('resolution'),\n self.inspection.get_display('fps'),\n self.inspection.get_display('video_bitrate'),\n self.inspection.get_display('video_codec'),\n self.inspection.get_display('audio_codec'),\n self.inspection.get_display('audio_bitrate'),\n self.inspection.get_display('audio_sample_rate'),\n self.inspection.get_display('slate_date'),\n self.inspection.get_display('slate_aspect'),\n self.inspection.get_display('slate_duration'),\n ]\n\n for col, v in enumerate(self.display_results):\n self.parent.dataview.SetValue(v, row, col)", "title": "" }, { "docid": "14edc14e7de3f9c80bca3f1efda45568", "score": "0.51333195", "text": "def open_document(self):\n\n # Read the DXF file\n document = readfile(self.dxf_path)\n modelspace = document.modelspace()\n # Convert DXF to pyleecan objects\n self.line_list = dxf_to_pyleecan_list(modelspace)\n # Display\n self.selected_list = [False for line in self.line_list]\n self.update_graph()", "title": "" }, { "docid": "b319ba6a16ba660df2d32315964fbda6", "score": "0.513093", "text": "def __build_document(self):\n with open('consolidated_nyt.tsv') as r:\n self.documents = r.read().splitlines()\n self.documents = preprocess_documents(self.documents)\n self.number_of_documents = len(self.documents)\n self.vocabulary = corpora.Dictionary(self.documents)\n self.vocabulary_size = len(self.vocabulary)\n print(\"Number of documents:\" + str(len(self.documents)))\n print(\"Vocabulary size:\" + str(self.vocabulary_size))", "title": "" }, { "docid": "0f3be98afefdaec1996913b2b1e625b2", "score": "0.5124302", "text": "def main():\n\tstc.html(HTML_BANNER)\n\t#menu = [\"Home\",\"About\"]\n\tst.markdown(\n f\"\"\"\n<style>\n .reportview-container .main .block-container{{\n max-width: {1500}px;\n padding-top: {1}rem;\n padding-right: {1}rem;\n padding-left: {1}rem;\n padding-bottom: {1}rem;\n }}\n \n</style>\n\"\"\",\n unsafe_allow_html=True,\n )\n\t\n\n\n\tchoice = \"Home\"#st.sidebar.selectbox(\"Menu\",menu)\n\n\tif choice == 'Home':\n\t\tst.subheader(\"Text Analysis\")\n\t\traw_text = st.text_area('Enter Text Here')\n\t\t\n\n\t\tif ( len(raw_text) > 2 & st.button(\"Analyze\")):\n\t\t\tcol1,col2,col3 = st.beta_columns(3)\n\t\t\tprocess_text = nfx.remove_stopwords(raw_text)\n\t\t\twith col1:\n\n\n\t\t\t\twith st.beta_expander(\"Preview Tagged Text\"):\n\t\t\t\t\ttagged_docx = generate_tags(raw_text)\n\t\t\t\t\tprocessed_tag_docx = mytag_visualizer(tagged_docx)\n\t\t\t\t\tstc.html(processed_tag_docx,scrolling=True)\n\n\t\t\t\twith st.beta_expander(\"Plot Mendelhall Curve\"):\n\t\t\t\t\tplot_mendelhall_curve_2(raw_text)\n\n\n\t\t\t\twith st.beta_expander(\"NER\"):\n\t\t\t\t\t\n\t\t\t\t\t# st.write(most_common_tokens)\n\t\t\t\t\tdocx = nlp(raw_text)\n\t\t\t\t\thtml = displacy.render(docx,style=\"ent\")\n\t\t\t\t\thtml = html.replace(\"\\n\\n\",\"\\n\")\n\t\t\t\t\tst.write(HTML_WRAPPER.format(html),unsafe_allow_html=True)\n\t\t\t\t\t#spacy_streamlit.visualize_ner(docx, labels=nlp.get_pipe(\"ner\").labels)\n\n\t\t\twith col2:\n\t\t\t\twith st.beta_expander('Process Text'):\n\t\t\t\t\tst.write(process_text)\n\n\t\t\t\twith st.beta_expander(\"Most Common Words\"):\n\t\t\t\t\t#st.write(raw_text)\n\t\t\t\t\tplot_most_common_tokens(process_text)\n\n\t\t\t\twith st.beta_expander(\"Plot Wordcloud\"):\n\t\t\t\t\tst.info(\"word Cloud\")\n\t\t\t\t\tplot_wordcloud(process_text)\n\n\t\t\t\t\n\t\t\t\n\t\t\twith col3:\n\t\t\t\twith st.beta_expander(\"Tokenizer\"):\n\t\t\t\t\tdocx = nlp(raw_text)\n\t\t\t\t\tspacy_streamlit.visualize_tokens(docx, attrs=[\"text\", \"pos_\", \"dep_\", \"ent_type_\"])\n\n\t\t\t\twith st.beta_expander(\"Sentiment Analysis\"):\n\t\t\t\t\tsentiment(raw_text)\n\n\t\t\t\twith st.beta_expander(\"Summarizer\"):\n\t\t\t\t\tsummarizer(raw_text)\n\n\n\n\t\telif (len(raw_text) == 1 ):\n\t\t\tst.warning(\"Insufficient Text, Minimum must be more than 1\") \n\t\t\t\n\n\n\t\t\t\n\n\n\t\t\n\n\telif choice == \"About\":\n\t\tst.subheader(\"Text Analysis NLP App\")", "title": "" }, { "docid": "4e46ba637e33b4d42ba461a7a021670d", "score": "0.51233447", "text": "def docs():", "title": "" }, { "docid": "140c0ea39a5918f4999d57e96cd8b79f", "score": "0.511046", "text": "def index():\n g.lcursor.execute('SELECT * FROM articles ORDER BY id DESC LIMIT 3')\n return render_template('main_index.html',\n articles_data=g.lcursor.fetchall())", "title": "" }, { "docid": "acfe4e54b84c3e3f952b6d19592528b1", "score": "0.51098657", "text": "def home_page():\n files = list_conversions()\n return render_template('index.html', files=files)", "title": "" }, { "docid": "99549e25377f27ae27a4a43017c5b58a", "score": "0.51065785", "text": "def render_qweb_doc(self, docids, data=None):\n\t\t# If the report is using a custom model to render its html, we must use it. otherwise, fallback on the generic html rendering.\n\t\treport_model_name = 'report.%s' % self.report_name\n\t\treport_model = self.env.get(report_model_name)\n\t\t\n\t\tif report_model is not None:\n\t\t\tdata = report_model.make_ppt(data)\n\t\telse:\n\t\t\tdocs = self.env[self.model].browse(docids)\n\t\t\tdata = {\n\t\t\t\t'doc_ids': docids,\n\t\t\t\t'doc_model': self.model,\n\t\t\t\t'docs': docs,\n\t\t\t}\n\t\t\treturn docs.doc5()\n\t\treturn data", "title": "" }, { "docid": "4ca339f1e3c9784ae4fb07e533a3ee6e", "score": "0.51034266", "text": "def loadDocs(dataFormat, path=None, ignoreEntities=[]):\n assert dataFormat == 'bioc'\n if dataFormat == 'bioc':\n assert not path is None\n docs = loadDataFromBioC(path, ignoreEntities=ignoreEntities)\n assert isinstance(docs, list)\n for doc in docs:\n assert isinstance(doc, kindred.Document)\n return docs", "title": "" }, { "docid": "f39fe1d8d6a2f93052194cafed4f2419", "score": "0.50999314", "text": "def _get_documents(self, url):\n\n documents = []\n with urllib.request.urlopen(url) as response:\n response = response.read().decode('utf-8')\n\n for doc_raw in json.loads(response)['data']:\n\n timestamp = doc_raw['created_utc']\n datetime_utc = datetime.utcfromtimestamp(timestamp)\n datetime_est = datetime_utc.replace(tzinfo=timezone.utc).astimezone(tz=None)\n date_str = datetime_est.strftime('%Y-%m-%d')\n\n if 'permalink' in doc_raw:\n url = f'https://www.reddit.com{doc_raw[\"permalink\"]}'\n else:\n url = 'n/a'\n\n documents.append({\n 'date': date_str,\n 'author': doc_raw['author'],\n 'subreddit': doc_raw['subreddit'],\n 'score': doc_raw['score'],\n 'url': url,\n 'text': html.unescape(doc_raw['body']),\n })\n\n print(len(documents))\n\n return documents", "title": "" }, { "docid": "89e1e6aea0291a0961f22be40eb07e78", "score": "0.50967455", "text": "async def frame_data_doc(self, interaction: discord.Interaction):\n link = ('https://docs.google.com/spreadsheets/d/'\n '19UtK7xG2c-ehxdlhCFKMpM4_IHSG-EXFgXLJaunE79I')\n patch = requests.get(url='https://rivals.academy/library/pomme/data.json').json()['patch']\n embed = discord.Embed(\n url=link,\n title=f'Rivals of Aether Academy Frame Data - Updated for {patch}',\n description='Data extracted manually in-game and from dev-mode files by SNC. '\n 'Extra information provided by Menace13 and Youngblood. ')\n embed.set_thumbnail(url='https://i.imgur.com/nMS0QPT.png')\n await interaction.response.send_message(content=link, embed=embed)", "title": "" }, { "docid": "be10cd8180521b550baf725a9dbc21d8", "score": "0.5095544", "text": "def app_documents(id):\n documents = Document.query.filter_by(application_id=id) # return a list with all values\n return render_template('app_documents.html', name=current_user.username, role=current_user.role_code,\n documents=documents) # name parameter send to html the value of the current logged_in user", "title": "" }, { "docid": "e9b63e19004eb1170330d7bfdb4a8b8c", "score": "0.5094836", "text": "def get_index():\n\n # version of the data contained in the dataset description\n db = current.db\n qry = (db.published_datasets.id == db.dataset_files.dataset_id)\n val = dataset_query_to_json(qry, \n fields = [('published_datasets', 'publication_date'), \n ('published_datasets', 'zenodo_concept_id'), \n ('published_datasets', 'zenodo_record_id'), \n ('published_datasets', 'dataset_access'), \n ('published_datasets', 'dataset_embargo'), \n ('published_datasets', 'dataset_title'), \n ('published_datasets', 'most_recent'), \n ('dataset_files', 'checksum'),\n ('dataset_files', 'filename'),\n ('dataset_files', 'filesize')])\n \n # repackage the db output into a single dictionary per file\n entries = val['entries'].as_list()\n [r['published_datasets'].update(r.pop('dataset_files')) for r in entries]\n val['entries'] = [r['published_datasets'] for r in entries]\n \n # Find the hashes\n index_hash = hashlib.md5(json(val).encode('utf-8')).hexdigest()\n \n # Use the file hash of the static gazetteer geojson\n gazetteer_file = os.path.join(current.request.folder, 'static', 'files', 'gis', 'gazetteer.geojson')\n with open(gazetteer_file) as f:\n gazetteer_hash = hashlib.md5(f.read().encode('utf-8')).hexdigest()\n\n # Use the file hash of the static locations alias csv\n location_aliases_file = os.path.join(current.request.folder, 'static', 'files', 'gis', 'location_aliases.csv')\n with open(location_aliases_file) as f:\n location_aliases_hash = hashlib.md5(f.read().encode('utf-8')).hexdigest()\n \n return dict(hashes=dict(index=index_hash, \n gazetteer=gazetteer_hash,\n location_aliases=location_aliases_hash),\n index=val)\n\n \n \n # return the dictionary - this will be json serialised by the API when it is returned\n return val", "title": "" }, { "docid": "5d0d755a343f7cf1ec9c811977d6f28d", "score": "0.5094192", "text": "def __call__(self, **kwargs):\n resp = get_docs(self._r_session,\n self.url,\n self.design_doc.encoder,\n **kwargs)\n return response_to_json_dict(resp)", "title": "" }, { "docid": "d3890f22cd8cf051498cbe2771247b1f", "score": "0.5093888", "text": "def document(self):\n return publish_doctree(self.body)", "title": "" }, { "docid": "9a91470aa975607e47481880d12436b1", "score": "0.5089864", "text": "def get(self):\n res = self.plag_dao.get_docs(page=int(request.args.get(\"page\", 1)),\n per_page=int(request.args.get(\"per_page\", 10)), all='all' in request.args)\n docs_info = dict(data=[d.to_dict() for d in res['data']], count=res['count'])\n print(docs_info)\n return Response(data=docs_info)", "title": "" }, { "docid": "39f2c6a73418a355cd0021c7fde50a91", "score": "0.50828326", "text": "def getWikidataContent(self):", "title": "" } ]
8abadc08d2224205c0322496613c7174
Returns followers/following, number of posts
[ { "docid": "268b1f4295be22b013434acc84c45951", "score": "0.0", "text": "def get_instagram_stats(self, username):\n user = self.get_user(username)\n user_info = self.get_user_profile(user[\"id\"])\n return user_info[\"data\"][\"counts\"]", "title": "" } ]
[ { "docid": "0c002c2cc4835dc347d3d1959dbbd10a", "score": "0.7136349", "text": "def followers_count(self):\n user_type = ContentType.objects.get_for_model(self)\n return UserInterest.objects\\\n .filter(interest_type='follow', content_type__pk=user_type.id, object_id=self.id)\\\n .count()", "title": "" }, { "docid": "57f18077da40654c6db6a29b111e57c2", "score": "0.7008957", "text": "def follow_index(request):\n post_list = Post.objects.filter(author__following__user = request.user).select_related(\"author\", \"group\")\n page_number = request.GET.get('page', 1)\n paginator = Paginator(post_list, 4) \n page = paginator.get_page(page_number)\n post_count = page.object_list.count()\n context = { \n \"page\": page,\n \"post_count\": post_count,\n }\n return render(request, \"follow.html\", context)", "title": "" }, { "docid": "a4a172ad8ac928b5a1c706b29bef0da5", "score": "0.6932286", "text": "def follower_count(self):\n return self.followers.count()", "title": "" }, { "docid": "4409acd21253b05c46d62a8d0844666e", "score": "0.688722", "text": "def getNumFollowers(self):\n\n return getTwitterDump(self, 'u')[u'followers_count']", "title": "" }, { "docid": "52810e0d79662222a11f7f0bdc18da74", "score": "0.6844079", "text": "def followed_posts(self):\n # the join operation creates a temporary table which merges the post and followers table data\n # the join is merged based on the condition passed as an argument ie the conditions to filter user's followed posts\n followed = Post.query.join(followers, (followers.c.followed_id == Post.user_id)).filter(followers.c.follower_id==self.id) \n\n # the user will also want to see her personal posts in the blog posts. \n # To do that the user's posts are queried and are then merged with the other posts already queried using the union operation\n # then the posts are sorted based on recency \n own = Post.query.filter_by(user_id = self.id)\n return followed.union(own).order_by(Post.timestamp.desc()) # Post.timestamp.desc() sorts the posts by time on a descending order.\n # Due to this, the most recent post will be rendered first", "title": "" }, { "docid": "10c70740fbab0709787211d81a0c031a", "score": "0.67538553", "text": "def followed_posts(self):\n # this method returns a query object and NOT the result, similar to 'lazy' = 'dynamic' in relationship\n # this is good practice since the caller can tach on additional queries\n return Post.query.join(followers,\n (followers.c.followed_id == Post.user_id)).filter(\n followers.c.follower_id == self.id).order_by(Post.timestamp.desc())", "title": "" }, { "docid": "d8579337648252898ac46ef9aa59083d", "score": "0.67458844", "text": "def follow_counts(request,userobj):\n\n following_count = Follow.objects.filter(follower = userobj).count()\n follower_count = Follow.objects.filter(following = userobj).count()\n\n\n if request.user.username == userobj.username:\n \n follow_count = {\"following_count\":following_count,\"follower_count\":follower_count}\n return follow_count\n \n return follower_count,following_count", "title": "" }, { "docid": "a0134e84dbde29970c1f5e99f27636b0", "score": "0.66715217", "text": "def get_follower_count(tweet):\n if is_original_format(tweet):\n return tweet[\"user\"][\"followers_count\"]\n else:\n return tweet[\"actor\"][\"followersCount\"]", "title": "" }, { "docid": "0417c32dd66b1a7c39f46dacc8fd0a9c", "score": "0.66394466", "text": "def num_pages(following_count):\n \n n = following_count / settings.TWITTER_FRIENDS_PAGE_LENGTH\n if following_count % settings.TWITTER_FRIENDS_PAGE_LENGTH > 0:\n n += 1\n return n", "title": "" }, { "docid": "2abdb9fad9e9acf57df9624247dbfceb", "score": "0.6569586", "text": "def get_following_count(tweet):\n if is_original_format(tweet):\n return tweet[\"user\"][\"friends_count\"]\n else:\n return tweet[\"actor\"][\"friendsCount\"]", "title": "" }, { "docid": "87b311ad9f5e84b54be3a9ac75a6ef4c", "score": "0.65661263", "text": "def alcance(followers):\r\n pot = 0\r\n for follower in followers:\r\n pot += follower[\"followers_count\"]\r\n return pot + len(followers)", "title": "" }, { "docid": "e19f209e9f3d0753ca3e8317148d76b6", "score": "0.6421191", "text": "def follow_index(request):\n following_posts = Post.objects.filter(author__following__user=request.user)\n paginator = Paginator(following_posts, settings.PER_PAGE_INDEX)\n page_number = request.GET.get(\"page\")\n page = paginator.get_page(page_number)\n return render(\n request,\n \"follow.html\",\n {\"page\": page, \"paginator\": paginator},\n )", "title": "" }, { "docid": "f162726dd7f206979c6886feda0854b0", "score": "0.63893497", "text": "def GetNumberOfPosts(self):\n\n return len(self.all_posts)", "title": "" }, { "docid": "a04c7b699ca24cd4e8b58cdf5f0359ae", "score": "0.63271505", "text": "def followers(self):\n url = '{0}/followers'.format(self.get_url())\n return http.Request('GET', url), parsers.parse_json", "title": "" }, { "docid": "aaf747f85eefdcc4cc743ec59d47dcf4", "score": "0.6303726", "text": "def getFollowerCount(connection, userID):\n\n\t\tvariables = [userID]\n\n\t\treturn TableTools.getCount(connection, UsersTableTools._FOLLOWS_TABLE,\n\t\t\t\"flwee = :1\", variables)", "title": "" }, { "docid": "fa8de2714618d6a5ebf4b34661719621", "score": "0.62701845", "text": "def following_posts(request):\n if request.user.is_authenticated:\n\n userobj = util.get_user_obj_by_userId(request.user.id)\n posts = paginate(request,get_all_posts_of_user_network(request,userobj))\n post_liked_ids = get_myliked_post(request).values_list(\"id\",flat= True)\n return render (request, \"network/following.html\",{\n \"allposts\": posts,\n \"post_liked_ids\":post_liked_ids,\n\n })\n else:\n\n return HttpResponseRedirect(reverse(\"network:login\"))", "title": "" }, { "docid": "0d929c65aa503c6bc5deaaca6117f26c", "score": "0.62489426", "text": "def followers(github, user, pager):\n github.followers(user, pager)", "title": "" }, { "docid": "7221e8e8810ecfb407ac6f89380f4667", "score": "0.6238051", "text": "def get_followers(profile: str, session) -> Optional[float]:\n url = f'https://www.instagram.com/{profile}'\n r = session.get(url)\n if r.ok:\n instagram = BeautifulSoup(r.text, features=\"lxml\")\n scripts = instagram.select('script[type=\"text/javascript\"]')\n json_s = scripts[3]\n json_s = json_s.text\n json_s = json_s[len('window._sharedData = '):-1]\n json_s = json.loads(json_s)\n entry_data = json_s['entry_data']\n profile_page = entry_data['ProfilePage'][0]\n graph_ql = profile_page['graphql']\n user = graph_ql['user']\n user['edge_owner_to_timeline_media'] = None\n with open(f'user_information_{profile}.json', 'w') as f:\n f.write(json.dumps(user, indent=4, sort_keys=True))\n follow_count = user['edge_followed_by']\n final: float = float(follow_count['count'])\n print(f'Finished for {profile} - Followers {final}')\n return final\n print(f\"Error. It wasn't possible to get the following count\")\n return 0", "title": "" }, { "docid": "042a8dd59b121bc4f51586643dc6d45a", "score": "0.6230698", "text": "def get_followed_user_count( self, user ):\n count = self.filter( user=user, content_type=ContentType.objects.get_for_model( User ) ).count()\n return count", "title": "" }, { "docid": "5d1901c1c4a8d090323a621c7f4b7237", "score": "0.6223728", "text": "def followers(user):\n return {\"followers\": Follow.objects.followers(user)}", "title": "" }, { "docid": "5f17e16bbd25f3fc6d7e46546f625866", "score": "0.6220345", "text": "def follower_ids_count(self):\n return len(self._follower_ids)", "title": "" }, { "docid": "6fd8cc0b9e997787fb48ab365964cd55", "score": "0.6214273", "text": "def user_posts_statistics(user, days_ago, reverse=None):\n if reverse is None:\n reverse = False\n\n start_time = datetime.now() - timedelta(days=days_ago)\n\n hit_query = Hit.objects.filter(post__publisher__id=user.id, \n created__date__gte=start_time).values(created_date=F(\"created__date\")) \\\n .annotate(number=Count(\"pk\")).order_by(\"-created_date\")\n\n like_query = Like.objects.filter(post__publisher__id=user.id, \n created__date__gte=start_time, status=True).values(created_date=F(\"updated__date\")) \\\n .annotate(number=Count(\"pk\")).order_by(\"-created_date\")\n\n download_query = Download.objects.filter(post__publisher__id=user.id, \n created__date__gte=start_time).values(created_date=F(\"created__date\")) \\\n .annotate(number=Count(\"pk\")).order_by(\"-created_date\")\n\n created_dates = hit_query.values(\"created_date\").union(\n like_query.values(\"created_date\"), \n download_query.values(\"created_date\")\n ).values_list(\"created_date\").order_by(\"-created_date\")\n\n hits = list(map(lambda date: hit_query.get(created_date=date[0])[\"number\"]\n if hit_query.filter(created_date=date[0]).exists() else 0, created_dates.iterator()))\n\n likes = list(map(lambda date: like_query.get(created_date=date[0])[\"number\"]\n if like_query.filter(created_date=date[0]).exists() else 0, created_dates.iterator()))\n\n downloads = list(map(lambda date: download_query.get(created_date=date[0])[\"number\"]\n if download_query.filter(created_date=date[0]).exists() else 0, created_dates.iterator()))\n\n dates = list(map(lambda date_: date.fromgregorian(date=date_[0])\n .strftime(\"%Y/%m/%d\"), created_dates.iterator()))\n\n if reverse:\n hits, likes, downloads, dates = hits[::-1], likes[::-1], downloads[::-1], dates[::-1]\n\n return {\n \"dates\": dates,\n \"hits\": hits,\n \"likes\": likes,\n \"downloads\": downloads\n }", "title": "" }, { "docid": "8da1157043230ddf65c26a374a4acb34", "score": "0.6208782", "text": "def add_total_followers(G, d):\n for influencer in d:\n d[influencer]['num_follow'] = len(G.in_edges(influencer))\n return d", "title": "" }, { "docid": "8692d716cd1c411fec554de892e82767", "score": "0.61743456", "text": "def fetch_posts(self):\n requests = []\n friends = self.person.friends\n for friend in friends:\n requests.append({\n 'id': friend.fb_id,\n 'request': str(friend.fb_id) + '/posts'\n })\n progress = {\n 'from': 20, 'to': 50,\n 'description': 'Collecting your and your friends posts'\n }\n self.store_api_response('posts', requests, progress)", "title": "" }, { "docid": "9157a0b04516d3f46926c4d97b3dad68", "score": "0.61446565", "text": "def get_nb_posts(self):\n i = 0\n try:\n messages = self.html.find(class_=\"pagination\").text.split()\n while True:\n try:\n # if messages[i] == 'messages' or messages[i] == 'message':\n # check if 'message' of 'messages'\n if (re.match(r\"message[s]?\", messages[i]) and\n messages[i - 1] != \"premier\"):\n self.messages = int(messages[i - 1])\n break\n else:\n i += 1\n except Exception:\n # print(e)\n print(\"Error in Topic get_nb_post\")\n self.messages = 1\n break\n except AttributeError:\n self.messages = 1\n return self.messages", "title": "" }, { "docid": "b984f96b7e4d3f77a1acbd91875c91a0", "score": "0.6084396", "text": "def get_post_count(self):\n return self.get_number_with_id(self.post_id)", "title": "" }, { "docid": "2af5695213583d10708481fb2fe46146", "score": "0.60823214", "text": "def get_followers(user):\n res = urllib2.urlopen(\"https://api.github.com/users/\" + user)\n soup = json.load(res)\n\n logger = logging.getLogger('github api')\n logger.info('printing','followers')\n\n print \"followers:\", soup['followers']\n print \"following:\", soup['following']", "title": "" }, { "docid": "1201b7acc5e3141fe467ea3e9ee92026", "score": "0.6074673", "text": "def getFollowingCount(connection, userID):\n\n\t\tvariables = [userID]\n\n\t\treturn TableTools.getCount(connection, UsersTableTools._FOLLOWS_TABLE,\n\t\t\t\"flwer = :1\", variables)", "title": "" }, { "docid": "c8bcdc6525e647236f9dd75882856369", "score": "0.60707694", "text": "def get_followers(username, count=5000):\n follower_usernames = get_follower_usernames(username, count=count)\n return get_users_for_usernames(follower_usernames)", "title": "" }, { "docid": "c213733bae05eaea82f6a983e3c636d5", "score": "0.6023095", "text": "def get_top_posts(\n L: Instaloader,\n context: CallbackContext,\n chat_id: int,\n messages: dict,\n profile_url: str,\n top_n=10,\n lookback_posts=100,\n) -> tuple:\n try:\n username = re.findall(\n r\"(?:(?:http|https):\\/\\/)?(?:www.)?(?:instagram.com|instagr.am)\\/([A-Za-z0-9-_.]+)\",\n profile_url,\n )[0]\n profile = Profile.from_username(L.context, username)\n\n n = 0\n posts_photos = []\n posts_videos = []\n\n for post in profile.get_posts():\n if n > lookback_posts:\n break\n\n link = f\"https://www.instagram.com/p/{post.shortcode}\"\n\n if post.is_video:\n posts_videos.append(\n {\"Post URL\": link, \"Views\": post.video_view_count,}\n )\n else:\n posts_photos.append(\n {\"Post URL\": link, \"Likes\": post.likes,}\n )\n n = n + 1\n\n if len(posts_photos) == 0 and len(posts_videos) == 0:\n raise ValueError(\"Profile is closed.\")\n\n if len(posts_photos) > 0:\n df_photos = (\n pd.DataFrame(posts_photos)\n .sort_values(\"Likes\", ascending=False)\n .head(n=top_n)\n .set_index(\"Post URL\")\n )\n df_photos[\"Likes\"] = df_photos[\"Likes\"].apply(\n lambda x: f\"{thousands_sep(x)} likes\"\n )\n\n top_photos = \"Top posts by likes:\\n\" + tabulate(df_photos, tablefmt=\"plain\")\n\n context.bot.send_message(\n chat_id=chat_id, text=top_photos, disable_web_page_preview=True\n )\n\n if len(posts_videos) > 0:\n df_videos = (\n pd.DataFrame(posts_videos)\n .sort_values(\"Views\", ascending=False)\n .head(n=top_n)\n .set_index(\"Post URL\")\n )\n df_videos[\"Views\"] = df_videos[\"Views\"].apply(\n lambda x: f\"{thousands_sep(x)} views\"\n )\n\n top_videos = \"Top videos by views:\\n\" + tabulate(\n df_videos, tablefmt=\"plain\"\n )\n context.bot.send_message(\n chat_id=chat_id, text=top_videos, disable_web_page_preview=True\n )\n\n result = True\n traceback = \"Success\"\n\n except Exception as e:\n context.bot.send_message(chat_id=chat_id, text=messages[\"error\"])\n result = False\n traceback = str(e)\n\n return result, traceback", "title": "" }, { "docid": "1b1692a0cb74128d3f824d1f8f4c6711", "score": "0.6010295", "text": "def followers(self):\n if not self._username:\n raise Error('microblogging.API instance must be authenticated.')\n url = self.get_url('/statuses/followers.json')\n json = self._fetch_url(url)\n data = simplejson.loads(json)\n return [User.get_from_json(x) for x in data]", "title": "" }, { "docid": "d8bcc9ebb01a69b902db975d40a894f0", "score": "0.59414184", "text": "def print_post_list(list_of_users):\n print(f'users by their posts')\n for i in range(len(list_of_users)):\n print(f'user_id = {list_of_users[i].id}, total_posts = {list_of_users[i].number_of_posts}, '\n f'list of user_posts ={list_of_users[i].post_per_likes_list}')", "title": "" }, { "docid": "f943d9feec2817d7e23853cbed99c54b", "score": "0.5936655", "text": "def get_following(self, username, num_followers, print_progress=False):\n friend_list = []\n count = 0\n for user in limit_handled(Cursor(self.api.friends, screen_name=username).items(num_followers)):\n cur_following = Format.format_username(user.screen_name)\n friend_list.append(cur_following)\n if print_progress:\n print('get_follower', count, cur_following)\n count += 1\n return friend_list", "title": "" }, { "docid": "95d95c24bb2a8201592f321a156ff9db", "score": "0.59198076", "text": "def get_people(self) -> Dict[str, int]:\r\n users = dict()\r\n\r\n for post in self.ps.search_submissions(after=self.start, subreddit=self.sub,\r\n filter=['author']):\r\n if post.author in users:\r\n users[post.author] += 1\r\n else:\r\n users[post.author] = 1\r\n\r\n for post in self.ps.search_comments(after=self.start, subreddit=self.sub,\r\n filter=['author']):\r\n if post.author in users:\r\n users[post.author] += 1\r\n else:\r\n users[post.author] = 1\r\n\r\n return users", "title": "" }, { "docid": "467ac85d518b06d9286de6db84732b6c", "score": "0.590841", "text": "def list(self, request, **kwargs):\n user = request.query_params.get('user')\n if self.queryset.filter(me=user).count() == 0:\n return Response({'status': 'no following list'})\n else:\n users_followers = Following.objects.get(me=user)\n data = {\"id\": users_followers.id, \"following\": users_followers.usernames}\n return Response(data)", "title": "" }, { "docid": "a64caf46f408cc13ed0e8e71353e1a1a", "score": "0.58928674", "text": "def count(self):\n return query.get_total_post_count(self.base_url, self.params)", "title": "" }, { "docid": "1b74eb6d00389a35daa0a7a7c551639a", "score": "0.58847", "text": "def getNewsFeed(self, userId: int) -> List[int]:\n feed = []\n for p in self.post[::-1]:\n if self.post_user[p] in self.follows[userId] or self.post_user[\n p] == userId:\n feed.append(p)\n if len(feed) == 10:\n break\n\n return feed", "title": "" }, { "docid": "b0286bc6d43f25979ef090b8b02b8878", "score": "0.5870798", "text": "def max_followers_among_all_platforms(self):\n plats = self.platforms()\n max_num_followers = 0\n for p in plats:\n if p.num_followers > max_num_followers:\n max_num_followers = p.num_followers\n return max_num_followers", "title": "" }, { "docid": "98932cd379b29a0c154bb9b2ed28e482", "score": "0.5852411", "text": "def test_get_followers_list(self):\n self.api.get_followers_list()\n self.api.get_followers_list(screen_name='twitter')", "title": "" }, { "docid": "fa89954078e93d8789337f2c5403e06a", "score": "0.5850981", "text": "def ListFollowers(self):\n try:\n i = 1\n follower_list = []\n while (True):\n param = {\"access_token\":self.personal_access_token, \"per_page\": 1, \"page\": i}\n r = requests.get(self.url_address+\"user/followers\", params = param)\n if (r.status_code!=200):\n raise ClientError(r.status_code)\n if (r.text == '[]'):\n break\n else:\n follower = json.loads(r.text[1:-1])\n follower_dict = {\"login\": follower[\"login\"], \"url\": follower[\"url\"]} \n follower_list.append(follower_dict)\n i = i+1 \n return json.dumps(follower_list)\n except ClientError as err:\n print(err.status)\n print(err.message)\n print(err.status, err.message)\n print(err)", "title": "" }, { "docid": "8950fc59a4207e0dd9cb78d5721acc91", "score": "0.58497524", "text": "def show_tweets_high_followed_users(output_file, follower_num):\r\n pass", "title": "" }, { "docid": "af87a02ff7dba95abf478afa73cea272", "score": "0.5844426", "text": "def get_all_count(self):\n html_text = self.get_html()\n soup = BeautifulSoup(html_text, \"lxml\")\n count_of_post = soup.find(id=self.post_id).find(\"span\", {\"class\": \"number\"}).string\n count_of_theme = soup.find(id=self.theme_id).find(\"span\", {\"class\": \"number\"}).string\n count = int(count_of_post) + int(count_of_theme)\n return count", "title": "" }, { "docid": "7602aa226400f56d1587b2f2a8b2ce4e", "score": "0.5817576", "text": "def test_get_followers_ids(self):\n self.api.get_followers_ids()\n self.api.get_followers_ids(screen_name='twitter')", "title": "" }, { "docid": "60e573d9894df3cc431486d569a76ef5", "score": "0.58127546", "text": "def get_post_count(tags):\n\n page = 1\n\n while True:\n resp = get_page(tags, page)\n\n if len(resp) == 0:\n earliest_empty_page = page\n break\n else:\n latest_populated_page = page\n\n page *= 2\n\n while (earliest_empty_page - latest_populated_page) > 1:\n page = int((latest_populated_page + earliest_empty_page) / 2)\n resp = get_page(tags, page)\n\n if len(resp) == 0:\n earliest_empty_page = page\n else:\n latest_populated_page = page\n\n return (\n (latest_populated_page - 1) * LIMIT +\n len(get_page(tags, latest_populated_page))\n )", "title": "" }, { "docid": "295d36aef624de05188846e671059461", "score": "0.5810799", "text": "def total_likes(self):\n return self.likes.count()\n #return int(3)", "title": "" }, { "docid": "d6b9eb9f30ab39daf22ab5c7b799dd9e", "score": "0.5809124", "text": "def get(self, request, *args, **kwargs):\n username = kwargs.get(\"username\")\n follower = self.get_object(username)\n followers = follower.get_following()\n return Response(self.serializer_class(followers, many=True).data)", "title": "" }, { "docid": "37528a2fe66a7367091762bb2796ba7f", "score": "0.58006597", "text": "def get_following(self, username):\n user = self.get_user(username)\n user_id = user['id']\n params = {'access_token': os.getenv('INSTAGRAM_ACCESS_TOKEN'), 'count': 100}\n url = self.api_url + 'users/' + user_id + '/follows?'\n response = requests.get(url, params=params).json()\n if response['meta']['code'] != 200:\n raise MediaMissingException('Unable to access account!')\n return response['data']", "title": "" }, { "docid": "fd515e8aa767c3bbf5e11c39a5684106", "score": "0.5780504", "text": "def get_followers(self, username):\n user = self.get_user(username)\n user_id = user['id']\n params = {'access_token': os.getenv('INSTAGRAM_ACCESS_TOKEN'), 'count': 100}\n url = self.api_url + 'users/' + user_id + '/followed-by?'\n response = requests.get(url, params=params).json()\n if response['meta']['code'] != 200:\n raise MediaMissingException('Unable to access account!')\n return response['data']", "title": "" }, { "docid": "2dbb94b03eccac94bac3aa105fbcce19", "score": "0.5778688", "text": "def get_follower_requests():\n return get_all_requests()", "title": "" }, { "docid": "99f63e215e3e7e1ca3dd9c57524ae9a9", "score": "0.57725054", "text": "def following(user):\n return {\"following\": Follow.objects.following(user)}", "title": "" }, { "docid": "2a67e84eff2c07b94c4a04ab793cc1a5", "score": "0.57575667", "text": "def calculate_num_likes(self, posts=None):\n\n if self.platform_name in [\"Custom\", \"Blogspot\", \"Wordpress\"]:\n return 0\n else:\n if posts:\n return posts.aggregate(Sum('engagement_media_numlikes'))['engagement_media_numlikes__sum']\n else:\n return self.posts_set.aggregate(Sum('engagement_media_numlikes'))['engagement_media_numlikes__sum']", "title": "" }, { "docid": "381ba006c45ee484b621729316336a9b", "score": "0.57518095", "text": "def followers(user):\n try:\n return Relationship.objects.get_followers_for_user(user)\n except AttributeError:\n pass", "title": "" }, { "docid": "bcb28bc1d1d8b4e1bf96776af2e41edf", "score": "0.57358927", "text": "def unfollower_ids_count(self):\n return len(self.unfollower_ids)", "title": "" }, { "docid": "f45f17ece7f7ddce2c57b89d7d12eb19", "score": "0.5726824", "text": "def get(self, request, username):\n try:\n the_user = User.objects.get(username=username)\n except User.DoesNotExist:\n raise UserNotFound\n to_check = Profile.objects.get(user_id=the_user.id)\n my_follows = to_check.w_following.all()\n serializer = self.get_serializer(my_follows, many=True)\n return Response({\"followers\": serializer.data, \"count\":\n len(serializer.data)},\n status=status.HTTP_200_OK)", "title": "" }, { "docid": "fcdd9ca1dfeecfbc75619dea8b54894f", "score": "0.5719411", "text": "def calls_authors(*args):\n calls_posts = CallsPost.objects.published()\n authors = User.objects.filter(callsposts__in=calls_posts)\n return list(authors.annotate(post_count=Count(\"callsposts\")))", "title": "" }, { "docid": "36530e41b1fd674e4fbc6d3bfdf6b903", "score": "0.5716276", "text": "def notifications_count(request):\n if 'user' not in request or request.user.is_anonymous:\n count = 0\n else:\n count = FeedEntry.objects.filter(\n user=request.user, is_read=False, category='notification',\n ).count()\n return {\n 'notifications_count': count,\n }", "title": "" }, { "docid": "625c7311f470d221efebd9f79dac725c", "score": "0.5714881", "text": "def get_follower_usernames(username, count=5000):\n return _get_friend_or_follower_usernames(FOLLOWERS, username, count)", "title": "" }, { "docid": "ad64a9be4a1bb81d069794bbcffad9d8", "score": "0.5711323", "text": "def list_private(self, request, *args, **kwargs):\n posts = []\n followees = [uf.user for uf in UserFollower.objects.filter(follower=self.request.user)]\n [posts.extend(Post.objects.filter(author=user)) for user in followees]\n posts.sort(key=lambda post: post.date)\n return Response(serializers.PostSerializer(posts, many=True, context={'request': request}).data)", "title": "" }, { "docid": "0746d9e27c6c1e43d15f6fc7d4746d08", "score": "0.57094544", "text": "def get(self, request, *args, **kwargs):\n username = kwargs.get(\"username\")\n creator = self.get_object(username)\n followers = creator.get_followers()\n return Response(self.serializer_class(followers, many=True).data)", "title": "" }, { "docid": "f59dbf4ca1658e486a7ae96312337ef8", "score": "0.57048434", "text": "def getNewsFeed(self, userId: int) -> List[int]:\n q = [(idx, tweet) for idx, tweet in self.users[userId]['tweets']]\n heapq.heapify(q)\n for followeeId in self.users[userId]['followed']:\n for idx, tweet in self.users[followeeId]['tweets']:\n if (idx, tweet) not in q:\n heapq.heappush(q, (idx, tweet))\n r = []\n while q and len(r) < self.numTweets:\n r.append(heapq.heappop(q)[1])\n return r", "title": "" }, { "docid": "aba357fadf166ade093f8eeb1441d9a7", "score": "0.570118", "text": "def get_followers(api,user):\n return api.followers(user)", "title": "" }, { "docid": "011d099a3d9d87fb2fbbf0fc5d1fccbf", "score": "0.56876034", "text": "def nr_user_stories(self) -> Tuple[int, List[str]]:\n raise NotImplementedError", "title": "" }, { "docid": "f61ce95420fde73aaade126299f50c5b", "score": "0.568267", "text": "def get_followers(self, username, num_followers, print_progress=False):\n follower_list = []\n count = 1\n for follower in limit_handled(Cursor(self.api.followers, screen_name=username).items(num_followers)):\n cur_follower = Format.format_username(follower.screen_name)\n follower_list.append(cur_follower)\n if print_progress:\n print('get_follower', count, cur_follower)\n count += 1\n return follower_list", "title": "" }, { "docid": "d9671557405c13d64fa38a0aa11f141b", "score": "0.5650465", "text": "def testGetAllFollowedReturnsNextPageID(self):\n time = datetime.utcfromtimestamp(1336604400)\n objectID = ObjectAPI(self.user).create(u'about')\n TagValueAPI(self.user).set({objectID: {u'username/follows': None}})\n comments = SecureCommentAPI(self.user)\n for i in range(26):\n comments.create(u'Comment', u'username', about=[u'about'],\n when=time - timedelta(minutes=i),\n url='http://example.com/comment')\n self.store.commit()\n\n result = yield self.invoke('getAllFollowed', username='username')\n result = loads(result)['result']\n self.assertEqual(1336602960, result['nextPageID'])\n self.assertEqual(25, len(result['comments']))", "title": "" }, { "docid": "132fc38061f936f8643c51b56c5bff9f", "score": "0.56404454", "text": "def get_followers(self):\n return User.find({'following': {'$in': self.categories}})", "title": "" }, { "docid": "f17e39e0406b0a8b491a922c61defc75", "score": "0.56342286", "text": "def getCount():\n return twitterdb.statuses.count()", "title": "" }, { "docid": "ab9d5d916c8246a582ee76550dd95890", "score": "0.56341815", "text": "def get_follows(self):\n follows = []\n if self._status == DataLocations.CACHED:\n follows = self.__dict__.get('follows')\n else:\n # Get the users from the feed.\n # TODO: Only works for local users.\n items = []\n if self._status == DataLocations.LOCAL:\n location = SettingsManager.get_user(self.user_id)['follows_location']\n item_els = self._get_attr_el(location, self.get_follows.binding, '//channel/item')\n follows = [_recursive_dict(item)[1] for item in item_els]\n\n follows = [User(remote_url=item['user_link']) for item in follows]\n cache_users(follows)\n return follows", "title": "" }, { "docid": "882476cbf914044b07373dd0f087d88b", "score": "0.5633867", "text": "def number_of_messages(idea):\n posts = Post.objects(ideas=str(idea.id))\n return posts", "title": "" }, { "docid": "77d9a22a559125d6de82ec702e8a4d71", "score": "0.5618872", "text": "def followed(self):\n return self.filter('followed')", "title": "" }, { "docid": "145cecf9a92b5f257316a03685b37652", "score": "0.56128067", "text": "def show_followers(request, username):\n\n try:\n user = User.objects.get(username__iexact=username)\n except User.DoesNotExist:\n raise Http404\n\n following = user.followers.following()\n\n data = {\n 'users' : following,\n 'user_profile': user\n }\n\n return render(\n request,\n templates['PROFILE_FOLLOWERS'],\n data,\n )", "title": "" }, { "docid": "214748cf295686f0568e174560438eed", "score": "0.5603917", "text": "def get_followers(self, url: str) -> Dict:\n\n user_data = self.get_user_info(url=url, target=\"followers\")\n\n query_url = f\"{self.BASE_URL}{self.GRAPHQL_QUERY}\"\n after = \"\"\n\n user_followers = {\n \"count\": user_data.followed_by,\n \"usernames\": list(),\n \"followers\": list(),\n }\n while True:\n params = {\n \"query_hash\": self.followers_query_hash,\n \"id\": user_data.user_id,\n \"include_reel\": False,\n \"fetch_mutual\": False,\n \"first\": 50,\n \"after\": after or \"\",\n }\n data = self._make_request(url=query_url, params=params)[\"data\"][\"user\"][\"edge_followed_by\"]\n self._extract_usernames(users=data[\"edges\"], result=user_followers[\"usernames\"])\n after = self._has_next_page(data)\n if after is None:\n break\n\n self._extract_users_by_usernames(usernames=user_followers[\"usernames\"], result=user_followers[\"followers\"])\n logging.info(msg=f'User {url} followers. Count: {len(user_followers[\"followers\"])}')\n\n return user_followers", "title": "" }, { "docid": "ba83893bb81cab812f42904d87260ae6", "score": "0.5595626", "text": "def followers(self):\n return(\n User.select().join(\n Relationship, on=Relationship.from_user\n ).where(\n Relationship.to_user == self\n )\n )", "title": "" }, { "docid": "ba83893bb81cab812f42904d87260ae6", "score": "0.5595626", "text": "def followers(self):\n return(\n User.select().join(\n Relationship, on=Relationship.from_user\n ).where(\n Relationship.to_user == self\n )\n )", "title": "" }, { "docid": "63c6c199c1bda619e5a21b11d48a83d3", "score": "0.559146", "text": "def get_usertofollow_row_count(self):\n\n return self._session.query(UserToFollow).count()", "title": "" }, { "docid": "db185a484f5db98a4a3f7c4264cadbc2", "score": "0.5589288", "text": "def followers(self):\n\n return (\n User.select().join(\n Relationship, on=Relationship.from_user\n ).where(\n Relationship.to_user == self # el usuario que inicio sesion\n )\n )", "title": "" }, { "docid": "669c2541f5d7e873b2ca2164e9c1a7db", "score": "0.55855876", "text": "def find_post_stats(post_data):\n num_posts = len(post_data)\n\n # calculate stats about post creator\n creator_id = post_data[0]['poster_id']\n num_creator_posts = 0\n for post in post_data:\n if post['poster_id'] == creator_id:\n num_creator_posts += 1\n\n num_posters = len({post['poster_id'] for post in post_data})\n\n # calculate stats for reaching certain post milestones\n topic_created_date = datetime.datetime.fromisoformat(post_data[0]['post_date'])\n post_25_delta = None\n post_50_delta = None\n if len(post_data) >= 25:\n post_25_date = datetime.datetime.fromisoformat(post_data[24]['post_date'])\n post_25_delta = str(post_25_date - topic_created_date)\n if len(post_data) >= 50:\n post_50_date = datetime.datetime.fromisoformat(post_data[49]['post_date'])\n post_50_delta = str(post_50_date - topic_created_date)\n\n post_stats = {'num_posts': num_posts,\n 'num_posters': num_posters,\n 'num_creator_posts': num_creator_posts,\n 'post_25_delta': post_25_delta,\n 'post_50_delta': post_50_delta}\n\n return post_stats", "title": "" }, { "docid": "e0874024d66ad14d809ce00b8e6bf058", "score": "0.5580903", "text": "def followers_view(request, username, *args, **kwargs):\n\n try:\n user = User.objects.get(username=username)\n except:\n return Response({\n 'details': 'Username not found'\n }, status=status.HTTP_404_NOT_FOUND)\n\n users_id_who_follow = [user.id for user in Profile.objects.get(user=user).follower.all()]\n followers = Profile.objects.filter(user__id__in=users_id_who_follow)\n serializer = ProfileSerializers(followers, context={'request':request}, many=True)\n\n return Response(serializer.data, status=200)", "title": "" }, { "docid": "1359561a775053646f65616ffc8093e7", "score": "0.5562729", "text": "def get_following(se, proxy: dict) -> dict:\n try:\n with se.get(_ROOT_URL + 'bookmark.php',\n params={'type': 'user'},\n headers={'User-Agent': random.choice(misc.USER_AGENT)},\n proxies=proxy,\n timeout=5) as fo_res:\n fo_html = BeautifulSoup(fo_res.text, 'lxml')\n fo_node = fo_html.find_all('div', class_='userdata')\n if not fo_node:\n raise exception.ResponseError('Cannot fetch following info.')\n\n fo_info = {ele.a['data-user_id']: ele.a['data-user_name'] for ele in fo_node}\n return fo_info\n except requests.Timeout:\n raise requests.Timeout('Timeout during getting following info.')\n except exception.ResponseError:\n raise", "title": "" }, { "docid": "cc0548d9839411a76bca1c3e789344a4", "score": "0.5547523", "text": "def download_followers_list(self, user_id, username):\n followers = []\n end_cursor = ''\n has_next_page = True\n\n self.session.headers['Referer']= f'https://www.instagram.com/{username}/followers'\n\n first_page_url = f'https://www.instagram.com/graphql/query/?query_hash=c76146de99bb02f6415203be841dd25a&' \\\n f'variables=%7B%22id%22%3A%22{user_id}%22%2C%22include_reel%22%3Atrue%2C%22' \\\n f'fetch_mutual%22%3Atrue%2C%22first%22%3A24%7D'\n\n res = self.session.get(first_page_url, headers=self.session.headers)\n self.session.headers['X-CSRFToken'] = res.cookies['csrftoken']\n\n print('getting followers ... ', end='')\n while has_next_page:\n try:\n has_next_page = bool(\n res.json()['data']['user']['edge_followed_by']['page_info']['has_next_page'])\n except KeyError as kerr:\n pass\n try:\n for node in res.json()['data']['user']['edge_followed_by']['edges']:\n followers.append(\n (node['node']['id'], node['node']['username']))\n except Exception as err:\n print()\n\n if has_next_page:\n end_cursor = res.json()[\n 'data']['user']['edge_followed_by']['page_info']['end_cursor'].replace('==', '')\n next_page = f'https://www.instagram.com/graphql/query/?query_hash=c76146de99bb02f6415203be841dd25a' \\\n f'&variables=%7B%22id%22%3A%22{user_id}%22%2C%22' \\\n f'include_reel%22%3Atrue%2C%22fetch_mutual%22%3Afalse%2C%22first%22%3A14%2C%22' \\\n f'after%22%3A%22{end_cursor}%3D%3D%22%7D'\n\n # time.sleep(randint(2,5)) # delay, prevents instagram ban\n res = self.session.get(next_page, headers=self.session.headers)\n self.session.headers['X-CSRFToken'] = res.cookies['csrftoken']\n\n else:\n print(colored('DONE', 'green'))\n\n return followers", "title": "" }, { "docid": "b4addb4162b8cdc0401c0452c3b309e8", "score": "0.55358255", "text": "def followers(self):\n if self._followers is None:\n data = yield 'users/{user}/followers'.format(user=self.username)\n self._followers = []\n for user in data:\n user_data = user.pop('user')\n date = user.pop('followed_at')\n self._followers.append(User(followed_at=date, **user_data))\n yield self._followers", "title": "" }, { "docid": "352c5074447cb46e7d80de8be1e2005a", "score": "0.55306405", "text": "def getNumLikes(self):\n try:\n friendCount = getFacebookDump(self)[u'likes']\n \n except KeyError:\n dump = json.loads(urllib.urlopen(\\\n \"https://graph.facebook.com/fql?q=SELECT%20friend_count%20FROM%20user%20WHERE%20uid=\"\\\n +self.getFacebook()).readline())\n friendCount = dump[u'data'][0][u'friend_count']\n \n return friendCount", "title": "" }, { "docid": "eb8b5c88090b5e5e4d755a20588c602d", "score": "0.55284023", "text": "def __init__(self):\n self.post = []\n self.follows = defaultdict(list)\n self.post_user = {}", "title": "" }, { "docid": "b293dcfd2204a7f9da73df19445c12d1", "score": "0.55264217", "text": "def getNewsFeed(self, userId):\n heap = []\n if userId in self.tweets:\n for i in self.tweets[userId]:\n heapq.heappush(heap, i)\n if len(heap) > 10:\n heapq.heappop(heap)\n if userId in self.followers:\n for i in self.followers[userId]:\n for j in self.tweets[i]:\n heapq.heappush(heap, j)\n if len(heap) > 10:\n heapq.heappop(heap)\n if not heap:\n return []\n res = []\n while heap:\n res.append(heapq.heappop(heap)[1])\n return res[::-1]", "title": "" }, { "docid": "844e66f02114db0e34be5aa7e9bf8077", "score": "0.5514456", "text": "def followers_ids(auth, **params):\n\n maxitems = params.pop(\"maxitems\", 0)\n if maxitems > 0:\n return cursor_iter(followers_ids, maxitems, auth, params)\n\n endpoint = \"https://api.twitter.com/1.1/followers/ids.json\"\n\n params.setdefault(\"count\", 5000)\n\n data, status_code, error_code = rest_call(endpoint, auth, params)\n try:\n next_cursor = data[\"next_cursor\"]\n count = len(data[\"ids\"])\n except KeyError:\n next_cursor, count = 0, 0\n\n meta = {\n \"code\": status_code,\n \"error_code\": error_code,\n \"next_cursor\": next_cursor,\n \"count\": count,\n }\n\n return data, meta", "title": "" }, { "docid": "7232970fe9a42719d88994dd51de94e4", "score": "0.5514358", "text": "def _get_followers(self, direction=\"follower\", last_user=\"\", what=\"blog\", limit=100):\n if not self.hive.is_connected():\n raise OfflineHasNoRPCException(\"No RPC available in offline mode!\")\n followers_list = []\n limit_reached = True\n cnt = 0\n while limit_reached:\n self.hive.rpc.set_next_node_on_empty_reply(False)\n if self.hive.rpc.get_use_appbase():\n query = {'account': self.name, 'start': last_user, 'type': what, 'limit': limit}\n if direction == \"follower\":\n followers = self.hive.rpc.get_followers(query, api='follow')\n if isinstance(followers, dict) and 'followers' in followers:\n followers = followers['followers']\n elif direction == \"following\":\n followers = self.hive.rpc.get_following(query, api='follow')\n if isinstance(followers, dict) and 'following' in followers:\n followers = followers['following']\n else:\n if direction == \"follower\":\n followers = self.hive.rpc.get_followers(self.name, last_user, what, limit, api='follow')\n elif direction == \"following\":\n followers = self.hive.rpc.get_following(self.name, last_user, what, limit, api='follow')\n if cnt == 0:\n followers_list = followers\n elif followers is not None and len(followers) > 1:\n followers_list += followers[1:]\n if followers is not None and len(followers) >= limit:\n last_user = followers[-1][direction]\n limit_reached = True\n cnt += 1\n else:\n limit_reached = False\n\n return followers_list", "title": "" }, { "docid": "e0532a410bafc34c6637d8ab6c0ef76e", "score": "0.55119175", "text": "def getFollowers(channel):\n\tapi = getApi()\n\ttry:\n\t\tusers = api.GetFollowers()\n\t\tf = [u.screen_name for u in users]\n\t\tsendToChannel(channel, string.join(f).replace(' ',', '))\n\texcept:\n\t\tsendToChannel(channel, \"Hurr\")", "title": "" }, { "docid": "f5debfb84f14c3a93558b9e269a55770", "score": "0.55074036", "text": "def getNewsFeed(self, userId):\n feed = self.feeds[userId][:]\n for user in self.follows[userId]:\n feed += self.feeds[user]\n feed = [x[0] for x in heapq.nlargest(10,feed,key=lambda x:x[1])]\n # feed = sorted(feed, key=lambda x: x[1], reverse=True)\n # feed = [x[0] for x in feed[:10]]\n return feed", "title": "" }, { "docid": "d74da5d08c78207d51782fec0e20990a", "score": "0.550385", "text": "def get_liker_frequencies(posts_likers):\n\n liker_counter = Counter()\n\n for post_id, post_likers in posts_likers.items():\n for post_liker in post_likers:\n liker_pk = str(post_liker['pk'])\n liker_counter.update({liker_pk: 1})\n\n\n return liker_counter", "title": "" }, { "docid": "d4980c63c0459b480ff2714d5176002e", "score": "0.55018276", "text": "def get_followed_by_user(self, url: str) -> Dict:\n\n user_data = self.get_user_info(url=url, target=\"followed_by_user\")\n\n query_url = f\"{self.BASE_URL}{self.GRAPHQL_QUERY}\"\n after = \"\"\n\n user_follow = {\n \"count\": user_data.follow,\n \"usernames\": list(),\n \"followed\": list(),\n }\n while True:\n params = {\n \"query_hash\": self.followed_by_user_query_hash,\n \"id\": user_data.user_id,\n \"first\": 50,\n \"after\": after if after else \"\",\n }\n\n data = self._make_request(url=query_url, params=params)[\"data\"][\"user\"][\"edge_follow\"]\n self._extract_usernames(users=data[\"edges\"], result=user_follow[\"usernames\"])\n\n after = self._has_next_page(data)\n if after is None:\n break\n\n self._extract_users_by_usernames(usernames=user_follow[\"usernames\"], result=user_follow[\"followed\"])\n logging.info(msg=f'Followed by user {url}. Count: {len(user_follow[\"followed\"])}')\n\n return user_follow", "title": "" }, { "docid": "760843f7236d4fde0fed15c4a5f57843", "score": "0.55013126", "text": "def getNewsFeed(self, userId: int) -> List[int]:\n if userId not in self.user:\n return list()\n else:\n # 取出自己最近10条推文\n ans = self.user[userId].tweet[-10:][::-1]\n # 取出自己followee\n for followeeId in self.user[userId].followee:\n if followeeId in self.user:\n # follow人的最近十条推文\n opt = self.user[followeeId].tweet[-10:][::-1]\n i, j, combined = 0, 0, list()\n while i < len(ans) and j < len(opt):\n if self.tweetTime[ans[i]] > self.tweetTime[opt[j]]:\n combined.append(ans[i])\n i += 1\n else:\n combined.append(opt[j])\n j += 1\n combined.extend(ans[i:])\n combined.extend(opt[j:])\n ans = combined[:10]\n return ans", "title": "" }, { "docid": "1e1208668d51f4e5fe3d773c12aa8f58", "score": "0.5499597", "text": "def calculate_num_shares(self, posts=None):\n if posts is None:\n posts = self.posts_set.all()\n\n if self.platform_name == \"Facebook\":\n return posts.aggregate(Sum('engagement_media_numfbshares'))['engagement_media_numfbshares__sum']\n if self.platform_name == \"Pinterest\":\n return posts.aggregate(Sum('engagement_media_numrepins'))['engagement_media_numrepins__sum']\n if self.platform_name == \"Twitter\":\n return posts.aggregate(Sum('engagement_media_numshares'))['engagement_media_numshares__sum']\n return 0", "title": "" }, { "docid": "e958d7308828ebb79fe9a6b80ce80185", "score": "0.54984397", "text": "def getNewsFeed(self, userId: 'int') -> 'List[int]':\n # extract the user\n # go through each user's followers\n # add their tweets to an array --> array of arrays\n # put most recent tweet from each array into max heap\n # removing from max heap ten times\n\n user = self.users[userId]\n collected_tweets = []\n result = []\n\n for followerId in user.following:\n follower = self.users[followerId]\n collected_tweets.append(follower.tweets)\n\n maxheap = Heap(\"max\")\n\n for index in range(len(collected_tweets)):\n current_tweets = collected_tweets[index]\n last_tweet = current_tweets[-1]\n maxheap.insert([last_tweet.timestamp, last_tweet.tweetId, index])\n\n count_in_newsfeed = 10\n\n while (maxheap.size() > 0 and count_in_newsfeed > 0):\n timestamp, tweetId, user = maxheap.remove_peak()\n result.append(tweetId)\n if len(collected_tweets[user]) > 0:\n last_tweet = collected_tweets[user].pop()\n maxheap.insert([last_tweet.timestamp, last_tweet.tweetId, user])\n count_in_newsfeed -= 1", "title": "" }, { "docid": "4a4b8c032d3fb1a9e7855d7c16c1fc8e", "score": "0.5485443", "text": "def get_all_posts(access_token, owner_id, count=100, offset=0):\n \n all_posts = []\n while True:\n time.sleep(random.random())\n wall = getjson(\"https://api.vk.com/method/wall.get\", {\n \"owner_id\" : owner_id,\n \"count\": count,\n \"access_token\": access_token,\n \"offset\": offset,\n \"v\": '5.131'\n })\n count_posts = wall['response']['count']\n posts = wall['response']['items']\n\n all_posts.extend(posts)\n\n if len(all_posts) >= count_posts:\n break\n else:\n offset += 100\n\n return all_posts, count_posts", "title": "" }, { "docid": "2753e129586ed28c5b7fb8188ea9d109", "score": "0.54846305", "text": "def users_i_follow(self):\n return _get_session().query(Connection).filter_by(following=self).all()", "title": "" }, { "docid": "05bac14dc609d63cba5e319271545e1e", "score": "0.5480283", "text": "def users_following_me(self):\n return _get_session().query(Connection).filter_by(followed=self).all()", "title": "" }, { "docid": "5a7aefe392d87e5f10ea033e32968599", "score": "0.5470377", "text": "def users_count(self):\n return self._data.get('related_field_counts', {}).get('users', 0)", "title": "" }, { "docid": "3e52dbeec36cd2adbbcd2be7fab37680", "score": "0.5470254", "text": "def get_users_followed(self, username):\n path = f'/api/user/{username}/following'\n return self._r.get(path, stream=True, fmt=NDJSON,\n converter=models.User.convert)", "title": "" }, { "docid": "ef873da48e20e6c7152301d0d7246cd8", "score": "0.5465221", "text": "def new_follower_ids_count(self):\n return len(self.new_follower_ids)", "title": "" }, { "docid": "d9a736f544d5aec0cd05df518afae463", "score": "0.5462012", "text": "def getNewsFeed(self, user):\n import heapq\n h, tweets = [], self.tweets\n people = self.followee.get(user, set()) | set([user])\n for person in people:\n if person in tweets and tweets[person]:\n time, tweet = tweets[person][-1]\n h.append((time, tweet, person, len(tweets[person]) - 1))\n heapq.heapify(h)\n news = []\n for _ in range(10):\n if h:\n time, tweet, person, idx = heapq.heappop(h)\n news.append(tweet)\n if idx:\n new_time, new_tweet = tweets[person][idx - 1]\n heapq.heappush(h, (new_time, new_tweet, person, idx - 1))\n return news", "title": "" }, { "docid": "72a4a548da874fcfe29c6586c7e80fc7", "score": "0.546016", "text": "def get_users_following(self, username):\n path = f'/api/user/{username}/followers'\n return self._r.get(path, stream=True, fmt=NDJSON,\n converter=models.User.convert)", "title": "" }, { "docid": "b7235885f72e0d9dd8dc0e28fda14a3f", "score": "0.5451477", "text": "def get_unfollowers_number(message, user_id, username):\n doesnt_follow_back = model.get_unfollowers(user_id, username)\n bot.edit_message_reply_markup(chat_id=message.chat.id,\n message_id=message.message_id,\n reply_markup=None)\n\n keyboard = types.InlineKeyboardMarkup()\n\n if doesnt_follow_back.__len__() > 0:\n keyboard.add(types.InlineKeyboardButton(text=f'{emojize(\" :heavy_plus_sign:\", use_aliases=True)}gimme list',\n callback_data=f'list_unfollowing_id:{user_id}_page:0'),\n types.InlineKeyboardButton(text=f'{emojize(\" :pencil2:\", use_aliases=True)}another one',\n callback_data=f'get_instagram_username'))\n else:\n keyboard.add(types.InlineKeyboardButton(text=f'{emojize(\" :pencil2:\", use_aliases=True)}another one', callback_data=f'get_instagram_username'))\n\n bot.send_message(chat_id=message.chat.id,\n text=f'{emojize(\" :pig_nose:\", use_aliases=True)}doesnt follow u back: {doesnt_follow_back.__len__()} users',\n reply_markup=keyboard)", "title": "" } ]
53bf878325c3da25c2bdf2b255812f70
Wraps up the different methods to get the complex trajectories in experiment
[ { "docid": "2cd396a60027d05589a8224e8825f982", "score": "0.6717981", "text": "def get_complex_trajectories(experiment):\n raco_before,raco_after=experiment.trajectory_racomodation()\n \n list_of_raccomodated = []\n for i,(frame,label,number) in enumerate(raco_after):\n index = raccomodate_after_to_before(frame,label,number,raco_before,experiment)\n if index!=-1:\n list_of_raccomodated.append((i,index))\n \n list_of_racc_disposable = list_of_raccomodated[:]\n total_merged_trajectories = [] #List of complex trajectories\n for i in range(len(list_of_racc_disposable)):\n comp_traj = []\n if i>=len(list_of_racc_disposable):\n break\n assemble_complex_trajectories(comp_traj,i,list_of_racc_disposable,raco_before,raco_after,experiment)\n total_merged_trajectories.append(comp_traj)\n return total_merged_trajectories", "title": "" } ]
[ { "docid": "704c0d232adc815ba1223712ec930fbb", "score": "0.5725358", "text": "def test_step_methods_in_each_level(self):\n _, model, _ = mv_simple()\n _, model_coarse, _ = mv_simple_coarse()\n _, model_very_coarse, _ = mv_simple_very_coarse()\n with model:\n initial_point = model.initial_point\n initial_point_size = sum(initial_point[n.name].size for n in model.value_vars)\n s = np.ones(initial_point_size) + 2.0\n sampler = MLDA(\n coarse_models=[model_very_coarse, model_coarse],\n base_S=s,\n base_sampler=\"Metropolis\",\n )\n assert isinstance(sampler.step_method_below, MLDA)\n assert isinstance(sampler.step_method_below.step_method_below, Metropolis)\n assert np.all(sampler.step_method_below.step_method_below.proposal_dist.s == s)\n\n sampler = MLDA(coarse_models=[model_very_coarse, model_coarse], base_S=s)\n assert isinstance(sampler.step_method_below, MLDA)\n assert isinstance(sampler.step_method_below.step_method_below, DEMetropolisZ)\n assert np.all(sampler.step_method_below.step_method_below.proposal_dist.s == s)", "title": "" }, { "docid": "a86620ffdc3a1c24164d2818c31cf511", "score": "0.56778145", "text": "def show_complex_trajectory(comp_traj,experiment,wait=50):\n path_im = experiment.path\n path_body = experiment.body_path\n path_arm = experiment.arm_path\n previous = 'trajectory'\n \n width = width_monitor\n height = height_monitor\n cv2.namedWindow(\"Trajectory\", cv2.WINDOW_NORMAL)\n cv2.resizeWindow('Trajectory', width, height)\n for i,elt in enumerate(comp_traj):\n if type(elt)==tuple:\n #raccomodation\n if i>0:\n if type(comp_traj[i-1])==tuple:\n previous='tuple'\n else:\n previous='trajectory'\n \n if previous=='trajectory':\n #looks after\n stop_frame = 239\n if i!=len(comp_traj)-1:\n index_next1,index_next_2,_ = comp_traj[i+1]\n next_traj = experiment.trajectories[index_next1][index_next_2]\n stop_frame = next_traj.beginning\n \n last_frame = comp_traj[i-1].end\n next_frame = last_frame+1\n label = elt[2]\n while next_frame<stop_frame and experiment.arm_tracker.next_cell(next_frame,label)!=-1:\n \n img = m.open_frame(path_im,next_frame+1)\n arms = m.open_frame(path_arm,next_frame+1)\n mask = (arms==(label+1)).astype(np.uint8)*255\n label = experiment.arm_tracker.next_cell(next_frame,label)\n next_frame+=1\n overlaid = m.cv_overlay_mask2image(mask,img,\"red\")\n cv2.imshow(\"Trajectory\",overlaid)\n cv2.waitKey(wait)\n else:\n cells_list = elt.cells \n for cell in cells_list:\n frame_number = cell.frame_number+1\n img = m.open_frame(path_im,frame_number)\n body = m.open_frame(path_body,frame_number)\n arms = m.open_frame(path_arm,frame_number)\n mask = (body==(cell.body+1)).astype(np.uint8)*255\n mask_arms = np.zeros(mask.shape,dtype=np.uint8)\n for arm in cell.arms:\n mask_arms+=(arms==(arm+1)).astype(np.uint8)*255\n overlaid = m.cv_overlay_mask2image(mask,img,\"green\")\n overlaid = m.cv_overlay_mask2image(mask_arms,overlaid,\"red\")\n cv2.imshow(\"Trajectory\",overlaid)\n \n cv2.waitKey(wait)\n \n #cv2.destroyAllWindows()", "title": "" }, { "docid": "7f92352eedb7758a9aec30927601bd34", "score": "0.5551739", "text": "def test_nested_cv():\n _experiment(3, 3)", "title": "" }, { "docid": "dfc53a0493bb5da14f0ea8943c6e4aa5", "score": "0.5491269", "text": "def sample_trajectories(self, env):\n# return self.model.predict(self.x_test)\n return self.i*2", "title": "" }, { "docid": "5c20a7742907d598a43254ed2e2ea301", "score": "0.5468829", "text": "def assemble_complex_trajectories(complex_trajectory,z,list_of_raccom,raco_before,raco_after,experiment):\n l,k = list_of_raccom.pop(z)\n traj1 = experiment.trajectories[raco_before[k][0]][raco_before[k][1]]\n if len(complex_trajectory)==0:\n traj0 = experiment.trajectories[raco_after[l][0]][raco_after[l][1]]\n complex_trajectory.extend([traj0,raco_after[l],raco_before[k],traj1])\n else:\n complex_trajectory.extend([raco_after[l],raco_before[k],traj1])\n \n for l_prime, (i1,i2,label) in enumerate(raco_after):\n #Find if there is a racomodation after\n if traj1==experiment.trajectories[i1][i2]:\n #Find if this raccomoadtion can itself be raccomodated\n candidates = [i for i,(x,y) in enumerate(list_of_raccom) if x==l_prime]\n if len(candidates)==1:\n assemble_complex_trajectories(complex_trajectory,candidates[0],list_of_raccom,raco_before,raco_after,experiment)\n else:\n complex_trajectory.append((i1,i2,label))\n return\n return", "title": "" }, { "docid": "759175b20f27fa8aaeaaa570473ca19d", "score": "0.54281145", "text": "def show(self,experiment,wait=50):\n path_im = experiment.path\n path_body = experiment.body_path\n path_arm = experiment.arm_path\n previous = 'trajectory'\n \n monitor = get_monitors()[0]\n width = monitor.width\n height = monitor.height\n cv2.namedWindow(\"Complex Trajectory\", cv2.WINDOW_NORMAL)\n cv2.resizeWindow('Complex Trajectory', width, height)\n for i,elt in enumerate(self):\n if type(elt)==tuple:\n #raccomodation\n if i>0:\n if type(self[i-1])==tuple:\n previous='tuple'\n else:\n previous='trajectory'\n \n if previous=='trajectory':\n #looks after\n stop_frame = 239\n if i!=len(self)-1:\n index_next1,index_next_2,_ = self[i+1]\n next_traj = experiment.trajectories[index_next1][index_next_2]\n stop_frame = next_traj.beginning\n \n last_frame = self[i-1].end\n next_frame = last_frame+1\n label = elt[2]\n while next_frame<stop_frame and experiment.arm_tracker.next_cell(next_frame,label)!=-1:\n \n img = m.open_frame(path_im,next_frame+1)\n arms = m.open_frame(path_arm,next_frame+1)\n mask = (arms==(label+1)).astype(np.uint8)*255\n label = experiment.arm_tracker.next_cell(next_frame,label)\n next_frame+=1\n overlaid = m.cv_overlay_mask2image(mask,img,\"red\")\n cv2.imshow(\"Complex Trajectory\",overlaid)\n cv2.waitKey(wait)\n else:\n cells_list = elt.cells \n for cell in cells_list:\n frame_number = cell.frame_number+1\n img = m.open_frame(path_im,frame_number)\n body = m.open_frame(path_body,frame_number)\n arms = m.open_frame(path_arm,frame_number)\n mask = (body==(cell.body+1)).astype(np.uint8)*255\n mask_arms = np.zeros(mask.shape,dtype=np.uint8)\n for arm in cell.arms:\n mask_arms+=(arms==(arm+1)).astype(np.uint8)*255\n overlaid = m.cv_overlay_mask2image(mask,img,\"green\")\n overlaid = m.cv_overlay_mask2image(mask_arms,overlaid,\"red\")\n cv2.imshow(\"Complex Trajectory\",overlaid)\n \n cv2.waitKey(wait)\n \n cv2.destroyWindow(\"Complex Trajectory\")", "title": "" }, { "docid": "cb410d6b976f3fc8447c3d0238c2da1c", "score": "0.5422803", "text": "def new_method():\n channels = [\n 'RADAR_FRONT',\n 'RADAR_FRONT_LEFT',\n 'RADAR_FRONT_RIGHT',\n 'RADAR_BACK_LEFT',\n 'RADAR_BACK_RIGHT',\n ]\n samples = scene_in_sample_data\n scene_points = list()\n for sample in samples:\n x, y, z, vx_comp, vy_comp, pointclouds = list(), list(), list(), list(), list(), list()\n ego_pose_coords = []\n for channel in channels:\n pc = sample[channel]['radar_point_cloud']\n radar_token = sample['data'][channel]\n current_radar = nusc.get('sample_data', radar_token)\n ego_pose = nusc.get('ego_pose', current_radar['ego_pose_token'])\n calibrated_sensor = nusc.get('calibrated_sensor', current_radar['calibrated_sensor_token'])\n sensor_to_car = transform_matrix(calibrated_sensor['translation'],\n Quaternion(calibrated_sensor['rotation'], inverse=False))\n car_to_world = transform_matrix(ego_pose['translation'], Quaternion(ego_pose['rotation'], inverse=False))\n\n sensor_to_world = car_to_world @ sensor_to_car\n\n pc.transform(sensor_to_world)\n\n pointclouds.append(pc)\n\n ego_pose_coords = ego_pose['translation']\n\n # combine radar\n\n for i in range(pc.points.shape[1]):\n x.append(pc.points[0][i])\n y.append(pc.points[1][i])\n z.append(pc.points[2][i]) # redundant?\n vx_comp.append(pc.points[7][i])\n vy_comp.append(pc.points[8][i])\n scene_points.append([\n np.asarray(x),\n np.asarray(y),\n np.asarray(z),\n np.asarray(vx_comp),\n np.asarray(vy_comp),\n np.asarray(pointclouds),\n np.asarray(ego_pose_coords)\n ])\n\n return np.asarray(scene_points, dtype=object)", "title": "" }, { "docid": "c30310d1c2bb8c61ced40beae38996a3", "score": "0.5397106", "text": "def executeTrajectory(self):", "title": "" }, { "docid": "97e1f2612efb18295f7d4a45e811a8dd", "score": "0.5390741", "text": "def test_trajectories():\n trajectory_true_out = np.load(\"trajectory_particle_2642532_out.npy\")\n trajectory_true_in = np.load(\"trajectory_particle_12599035.npy\")\n\n def trajectories_test():\n p = parameters.InitialConditionsParameters(\n initial_snapshot=\"/Users/lls/Documents/CODE/Nina-Simulations/double/ICs_z99_256_L50_gadget3.dat\",\n final_snapshot=\"/Users/lls/Documents/CODE/Nina-Simulations/double/snapshot_104\",\n min_halo_number=0, max_halo_number=400,\n ids_type='all', num_particles=None)\n\n trajectory_test = trajectories.Trajectories(init_parameters=p, particles=[12599035, 2642532], num_of_filters=20,\n num_particles=1)\n trajectory_in = trajectory_test.delta_in\n trajectory_out = trajectory_test.delta_out\n return trajectory_in, trajectory_out\n\n trajectory_test_in, trajectory_test_out = trajectories_test()\n\n assert np.allclose(trajectory_true_in, trajectory_test_in)\n assert np.allclose(trajectory_true_out, trajectory_test_out)", "title": "" }, { "docid": "e58452ebce45030b9745ea519ed6e550", "score": "0.5386184", "text": "def __init__(self, wl, n0, n1, n2, theta0, d, ninterface0, ninterface1):\n self.n0 = n0 # Refractive index of first medium, either glass of air\n self.n1 = n1 # Refractive index of intermediate medium.\n self.n2 = n2 # Refractive index of last medium usually h2o or d2o\n # A helping list of indeces\n self.n = (n0, n1, n2)\n # Incidencea angle of promary beam\n self.theta0 = theta0 * np.pi/180 # transforms theta1 to rad\n # Calculate angles with snellius. This doesnt work with the complex\n # ns, atleas it doesnt give the samre sult if I take real part\n # of self.sintheta or self.costheta\n #self.theta1 = np.arcsin(self.n0.real/self.n1.real*np.sin(self.theta0))\n #self.theta2 = np.arcsin(self.n1.real/self.n2.real*np.sin(self.theta1))\n # Because n_j is complex we use costheta or sintheta during\n # calculations. costheta and sintheta can be complex numbers.\n self.sintheta0 = np.sin(self.theta0)\n self.sintheta1 = self.calc_sintheta(1)\n self.sintheta2 = self.calc_sintheta(2)\n self.sintheta = (self.sintheta0, self.sintheta1, self.sintheta2)\n self.costheta0 = np.cos(self.theta0)\n self.costheta1 = self.calc_costheta(1)\n self.costheta2 = self.calc_costheta(2)\n # Helpful list of angles\n #self.theta = (self.theta0, self.theta1, self.theta2)\n self.costheta = (self.costheta0, self.costheta1, self.costheta2)\n # Array of wavelengths\n self.wl = wl\n # Thickness of intermediate material\n self.d = d\n # Phase difference due to thickness of intermediate material\n self.beta = 2 * np.pi/self.wl * self.n1 * self.d * self.costheta1\n\n # This is not fully correct because I would need a self.theta1 for VIS\n # and one for IR but I dont see why this is the case?\n self.delta = (2*np.pi*self.n2*d/self.wl) * \\\n (1/self.costheta1-(self.sintheta1/self.costheta1))\n\n # Has todo with where sfg is generated\n self.ninterface0 = ninterface0\n self.ninterface1 = ninterface1", "title": "" }, { "docid": "bc798bb23c69175a349b089f135a7802", "score": "0.53129655", "text": "def generate_subtr_examples():\n generate_generic_examples('subtr', subtr_example)", "title": "" }, { "docid": "977cc38d953458966edef71a38876c26", "score": "0.5290277", "text": "def __init__(self, traj, robot):\n # type: (orpy.RaveTrajectory, orpy.Robot) -> None\n super(RaveTrajectoryWrapper, self).__init__()\n self.traj = traj #: init\n self.spec = traj.GetConfigurationSpecification()\n self._dof = robot.GetActiveDOF()\n\n self._interpolation = self.spec.GetGroupFromName('joint').interpolation\n if self._interpolation not in ['quadratic', 'cubic']:\n raise ValueError(\n \"This class only handles trajectories with quadratic or cubic interpolation\"\n )\n self._duration = traj.GetDuration()\n all_waypoints = traj.GetWaypoints(0, traj.GetNumWaypoints()).reshape(\n traj.GetNumWaypoints(), -1)\n valid_wp_indices = [0]\n self.ss_waypoints = [0.0]\n for i in range(1, traj.GetNumWaypoints()):\n dt = self.spec.ExtractDeltaTime(all_waypoints[i])\n if dt > 1e-5: # If delta is too small, skip it.\n valid_wp_indices.append(i)\n self.ss_waypoints.append(self.ss_waypoints[-1] + dt)\n\n self.n_waypoints = len(valid_wp_indices)\n self.ss_waypoints = np.array(self.ss_waypoints)\n self.s_start = self.ss_waypoints[0]\n self.s_end = self.ss_waypoints[-1]\n\n self.waypoints = np.array([\n self.spec.ExtractJointValues(all_waypoints[i], robot,\n robot.GetActiveDOFIndices())\n for i in valid_wp_indices\n ])\n self.waypoints_d = np.array([\n self.spec.ExtractJointValues(all_waypoints[i], robot,\n robot.GetActiveDOFIndices(), 1)\n for i in valid_wp_indices\n ])\n\n # Degenerate case: there is only one waypoint.\n if self.n_waypoints == 1:\n pp_coeffs = np.zeros((1, 1, self.dof))\n for idof in range(self.dof):\n pp_coeffs[0, 0, idof] = self.waypoints[0, idof]\n # A constant function\n self.ppoly = PPoly(pp_coeffs, [0, 1])\n\n elif self._interpolation == \"quadratic\":\n self.waypoints_dd = []\n for i in range(self.n_waypoints - 1):\n qdd = ((self.waypoints_d[i + 1] - self.waypoints_d[i]) /\n (self.ss_waypoints[i + 1] - self.ss_waypoints[i]))\n self.waypoints_dd.append(qdd)\n self.waypoints_dd = np.array(self.waypoints_dd)\n\n # Fill the coefficient matrix for scipy.PPoly class\n pp_coeffs = np.zeros((3, self.n_waypoints - 1, self.dof))\n for idof in range(self.dof):\n for iseg in range(self.n_waypoints - 1):\n pp_coeffs[:, iseg, idof] = [\n self.waypoints_dd[iseg, idof] / 2,\n self.waypoints_d[iseg, idof],\n self.waypoints[iseg, idof]\n ]\n self.ppoly = PPoly(pp_coeffs, self.ss_waypoints)\n\n elif self._interpolation == \"cubic\":\n self.waypoints_dd = np.array([\n self.spec.ExtractJointValues(all_waypoints[i], robot,\n robot.GetActiveDOFIndices(), 2)\n for i in valid_wp_indices\n ])\n self.waypoints_ddd = []\n for i in range(self.n_waypoints - 1):\n qddd = ((self.waypoints_dd[i + 1] - self.waypoints_dd[i]) /\n (self.ss_waypoints[i + 1] - self.ss_waypoints[i]))\n self.waypoints_ddd.append(qddd)\n self.waypoints_ddd = np.array(self.waypoints_ddd)\n\n # Fill the coefficient matrix for scipy.PPoly class\n pp_coeffs = np.zeros((4, self.n_waypoints - 1, self.dof))\n for idof in range(self.dof):\n for iseg in range(self.n_waypoints - 1):\n pp_coeffs[:, iseg, idof] = [\n self.waypoints_ddd[iseg, idof] / 6,\n self.waypoints_dd[iseg, idof] / 2,\n self.waypoints_d[iseg, idof],\n self.waypoints[iseg, idof]\n ]\n self.ppoly = PPoly(pp_coeffs, self.ss_waypoints)\n\n self.ppoly_d = self.ppoly.derivative()\n self.ppoly_dd = self.ppoly.derivative(2)", "title": "" }, { "docid": "6d89fb770fdfd9c339b4264c793de4f4", "score": "0.5281159", "text": "def compute_ros_trajectory(self):\n raise NotImplementedError", "title": "" }, { "docid": "d7cd7c00e710a94945a313344c062efb", "score": "0.5252293", "text": "def test_composition(self):\n light = RTAPhase(\n prop_wload=PeriodicWload(\n duty_cycle_pct=10,\n duration=1.0,\n period=10e-3,\n )\n )\n\n ramp = DutyCycleSweepPhase(\n start=10,\n stop=90,\n step=20,\n period=50e-3,\n duration=1,\n duration_of='step',\n )\n\n heavy = RTAPhase(\n prop_wload=PeriodicWload(\n duty_cycle_pct=90,\n duration=0.1,\n period=100e-3,\n )\n )\n\n profile = {\"test\": light + ramp + heavy}\n\n exp_phases = [\n # Light phase:\n {\n \"loop\": 100,\n \"run\": 1000,\n \"timer\": {\n \"period\": 10000,\n \"ref\": \"unique\"\n }\n },\n # Ramp phases:\n {\n \"loop\": 20,\n \"run\": 5000,\n \"timer\": {\n \"period\": 50000,\n \"ref\": \"unique\"\n }\n },\n {\n \"loop\": 20,\n \"run\": 15000,\n \"timer\": {\n \"period\": 50000,\n \"ref\": \"unique\"\n }\n },\n {\n \"loop\": 20,\n \"run\": 25000,\n \"timer\": {\n \"period\": 50000,\n \"ref\": \"unique\"\n }\n },\n {\n \"loop\": 20,\n \"run\": 35000,\n \"timer\": {\n \"period\": 50000,\n \"ref\": \"unique\"\n }\n },\n {\n \"loop\": 20,\n \"run\": 45000,\n \"timer\": {\n \"period\": 50000,\n \"ref\": \"unique\"\n }\n },\n # Heavy phase:\n {\n \"loop\": 1,\n \"run\": 90000,\n \"timer\": {\n \"period\": 100000,\n \"ref\": \"unique\"\n }\n }]\n\n self._do_test(profile, exp_phases)", "title": "" }, { "docid": "6e7e19ac68f4d29ed7cdaab31d4fdcd4", "score": "0.5246129", "text": "def get_trajectory(self) -> list:\n pass", "title": "" }, { "docid": "253cf464c7523299d4a210659fc3915f", "score": "0.5242545", "text": "def supercomplex(self):\n return self.supercomplex_", "title": "" }, { "docid": "caa34b02e2411e07924cf17b8e01da4e", "score": "0.5225824", "text": "def generate_trajectories(self, duration=0.5, time_step=0.01, y0=None):\n\n return np.stack([\n primitive.generate_trajectory(duration=duration, time_step=time_step, y0=y0)\n for primitive in self.primitives\n ])", "title": "" }, { "docid": "c9e63a54e404aec6c1f4d9a627303990", "score": "0.52136993", "text": "def dti_tracking_analysis():", "title": "" }, { "docid": "d8bf7156c305480e8c912938511e85c9", "score": "0.52131766", "text": "def Stepper(object):\n\n def __init__(self, flow, parameters):\n super(Stepper, self).__init__()\n self.flow = flow\n\n def r_next(self, r, w, v, order=1):\n \"\"\" Get the next particle position r, given the current location, the velocity difference w and the fluid velocity v.\n\n We use the scheme from Daitsche (2013; http://arxiv.org/pdf/1210.2576.pdf) which is third-order accurate in the timestep\n\n Parameters:\n r - the current position of the particle\n w - the current velocity mismatch of the particle\n v - the current velocity of the fluid flow at r\n\n Returns:\n the new position for the particle\n \"\"\"\n pass\n\n def w_next(self, r, v, order=1):\n \"\"\" Get the next particle velocity difference, w, given the current location, the velocity of the fluid flow at \n \"\"\"\n pass\n\n def velocity_kernel(self, x, t):\n \"\"\" Return the velocity part of the the Maxey-Riley equations \n \n The velocity kernel is:\n\n \\[\n G(t) = (R-1)\\frac{du}{dt} - Rw\\cdot \\nabla u - \\frac{R}{S}w\n \\]\n \"\"\"\n pass\n\n def history_kernel(self, x, t):\n \"\"\" Return the history part of the Maxey-Riley equations\n \n The history kernel is:\n\n \\[\n H(t) = -R \\sqrt{\\frac{3}{\\pi S}} \\int_{t_0}^{t+\\delta t} \\frac{w(\\tau)}{\\sqrt{t-\\tau}}d\\tau\n \\]\n\n (see Daitsche, 2013).\n \"\"\"\n pass", "title": "" }, { "docid": "4daec651d3acee79b9e97901d977779e", "score": "0.5195512", "text": "def generateRealizations(self):", "title": "" }, { "docid": "5f9113969a9668b2d2f8e8ae706ae6b9", "score": "0.51625925", "text": "def experiment_main():\n\n\tgenerator_specs = {\"original_dim\": original_dim, \"intermediate_dim\": 8, \"latent_dim\": latent_dim, \"epochs\": 100, \"dropout\": 0.2,\\\n\t\t\t\t\t\t\"experiment\": \"German\", \"feature_names\": features}\n\n\tprint ('---------------------')\n\tprint ('Training adversarial models....')\n\tprint ('---------------------')\n\n\t# Adversarial models\n\tadv_models = dict()\n\tadv_models[\"Perturbation\"] = Adversarial_IME_Model(racist_model_f(), innocuous_model_psi()).train(xtrain, ytrain,\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeature_names=features, perturbation_multiplier=1)\n\tadv_models[\"DropoutVAE\"] = Adversarial_IME_Model(racist_model_f(), innocuous_model_psi(), generator = \"DropoutVAE\", generator_specs = generator_specs).\\\n \t\t\t\ttrain(xtrain, ytrain, feature_names=features, dummy_idcs=dummy_idcs, integer_idcs=integer_attributes, perturbation_multiplier=1)\n\tadv_models[\"ForestFill\"] = Adversarial_IME_Model(racist_model_f(), innocuous_model_psi(), generator = \"Forest\", generator_specs = generator_specs).\\\n \t\t\t\ttrain(xtrain, ytrain, feature_names=features, dummy_idcs=dummy_idcs, integer_idcs=integer_attributes, perturbation_multiplier=1)\n\n\tfor adversarial in [\"Perturbation\", \"DropoutVAE\", \"ForestFill\"]:\n\t\tadv_model = adv_models[adversarial]\n\n\t\tprint ('---------------------')\n\t\tprint (f'Training explainers with adversarial {adversarial}....')\n\t\tprint ('---------------------')\n\n\t\t# Explainers\n\t\tadv_kernel_explainers = dict()\n\t\tadv_kernel_explainers[\"Perturbation\"] = shap.SamplingExplainer(adv_model.predict, xtrain)\n\t\tadv_kernel_explainers[\"DropoutVAE\"] = shap.SamplingExplainer(adv_model.predict, xtrain, generator=\"DropoutVAE\", generator_specs=generator_specs,\\\n\t\t\t\t\t\t\t\tdummy_idcs=dummy_idcs, integer_idcs=integer_attributes, instance_multiplier = 1000)\n\t\tadv_kernel_explainers[\"ForestFill\"] = shap.SamplingExplainer(adv_model.predict, xtrain, generator=\"Forest\", generator_specs=generator_specs,\\\n\t\t\t\t\t\t\t\tdummy_idcs=dummy_idcs, integer_idcs=integer_attributes)\n\n\t\tfor explainer in [\"Perturbation\", \"DropoutVAE\", \"ForestFill\"]:\n\t\t\tadv_kernel_explainer = adv_kernel_explainers[explainer]\n\t\t\texplanations = adv_kernel_explainer.shap_values(xtest, fill_data=True, data_location=\"...\\Data/german_forest_ime.csv\", distribution_size=1000)\n\n\t\t\t# format for display\n\t\t\tformatted_explanations = []\n\t\t\tfor exp in explanations:\n\t\t\t\tformatted_explanations.append([(features[i], exp[i]) for i in range(len(exp))])\n\n\t\t\tprint (f\"IME Ranks and Pct Occurances one unrelated feature, adversarial: {adversarial}, explainer: {explainer}:\")\n\t\t\tsummary = experiment_summary(formatted_explanations, features)\n\t\t\tprint (summary)\n\t\t\tprint (\"Fidelity:\",round(adv_model.fidelity(xtest),2))\n\n\t\t\tfile_name = f\"../Results/GermanIme/germanImeSummary_adversarial_{adversarial}_explainer_{explainer}.csv\"\n\t\t\twith open(file_name, \"w\") as output:\n\t\t\t\tw = csv.writer(output)\n\t\t\t\tfor key, val in summary.items():\n\t\t\t\t\tw.writerow([key] + [pair for pair in val])", "title": "" }, { "docid": "09b511b3c4be9e61639555ab960e0717", "score": "0.5159934", "text": "def test():\n\treturn [\"vice.toolkit.interpolation.interp_scheme_1d\",\n\t\t[\n\t\t\ttest_initialize(),\n\t\t\ttest_attributes(),\n\t\t\ttest_call(),\n\t\t\ttest_getitem()\n\t\t]\n\t]", "title": "" }, { "docid": "66c454a7889a057059967dba8b7a1dd5", "score": "0.5155187", "text": "def __complex__(self):\n return complex(self.re,self.im)", "title": "" }, { "docid": "7a244ff8995f32ce73bd7ef6363d75dc", "score": "0.5128895", "text": "def getTestCasesForTestPlan(self):", "title": "" }, { "docid": "99f45c51764bf81394b64b56a6b5765d", "score": "0.5119969", "text": "def __init__(self, *args, **kwargs):\n# verbose = True\n verbose = False\n\n pigasus.__init__(self, *args, **kwargs)\n\n # ...\n # non homogeneous Dirichlet Boundary conditions\n # ...\n try:\n self.bc_dirichlet = self.testcase['bc_dirichlet']\n self.Dirichlet = True\n except:\n self.bc_dirichlet = {}\n self.Dirichlet = False\n # ...\n\n # ...\n # neumann Boundary conditions\n # ...\n try:\n self.bc_neumann = self.testcase['bc_neumann']\n self.Neumann = True\n except:\n self.bc_neumann = {}\n self.Neumann = False\n # ...\n\n # ...\n try:\n func_mass = self.testcase['b']\n self.withMass = True\n except:\n self.withMass = False\n # ...\n\n # ...\n try:\n func_adv = self.testcase['v']\n self.withAdvection = True\n except:\n self.withAdvection = False\n # ...\n\n # ...\n try:\n func_tadv = self.testcase['w']\n try:\n func_dw = self.testcase['dw']\n except:\n print(\"Warning: You should give the (div w) term. Pigasus is not yet smart enough!\")\n # ...\n if self.dim == 1:\n def func_dw(x):\n eps = 1.e-3\n return [(w(x+eps)[0]-w(x)[0])/eps]\n if self.dim == 2:\n def func_dw(x,y):\n eps = 1.e-3\n return [ (w(x+eps,y)[0]-w(x,y)[0])/eps \\\n + (w(x,y+eps)[1]-w(x,y)[1])/eps]\n if self.dim == 3:\n def func_dw(x,y,z):\n eps = 1.e-3\n return [ (w(x+eps,y,z)[0]-w(x,y,z)[0])/eps \\\n + (w(x,y+eps,z)[1]-w(x,y,z)[1])/eps \\\n + (w(x,y,z+eps)[2]-w(x,y,z)[2])/eps]\n # ...\n self.withTAdvection = True\n self.withMass = True\n except:\n self.withTAdvection = False\n # ...\n\n # ...\n try:\n func_stiff = self.testcase['A']\n self.withStiffness = True\n except:\n self.withStiffness = False\n # ...\n\n # ...\n try:\n func_D2 = self.testcase['D2']\n self.withD2 = True\n except:\n self.withD2 = False\n # ...\n\n # ...\n self.withMetric = False\n try:\n Metric = self.testcase['metric']\n if Metric is not None:\n self.withMetric = True\n except:\n pass\n # ...\n\n # ...\n withAllDirichlet = False\n try:\n if self.testcase['AllDirichlet'] is not None:\n withAllDirichlet = self.testcase['AllDirichlet']\n except:\n pass\n # ...\n\n # ...\n try:\n solverInfo = self.testcase['solverInfo']\n if solverInfo is not None:\n self.solverInfo = {}\n except:\n self.solverInfo = None\n # ...\n\n # ... set geometry\n if self.geometry is None:\n try:\n V = kwargs['V']\n self.geometry = V.geometry\n except:\n raise(\"Unable to find a geometry for the current basicPDE\")\n # ...\n\n self.withAllDirichlet = withAllDirichlet\n\n # ...\n list_DirFaces = []\n for i in range(0, self.geometry.npatchs):\n list_DirFaces.append([])\n if withAllDirichlet:\n list_extFaces = self.geometry.external_faces\n for extFaces in list_extFaces:\n patch_id = extFaces[0]\n face_id = extFaces[1]\n list_DirFaces[patch_id].append(face_id)\n else:\n try:\n list_DirFaces = self.testcase['Dirichlet']\n except:\n pass\n # ...\n\n self.list_DirFaces = list_DirFaces\n\n # ...\n self.UseDuplicateFaces = False\n list_DuplicatedFaces = []\n list_DuplicataFaces = []\n list_DuplicatedFacesPeriodic = []\n try:\n list_connectivity = self.testcase['connectivity']\n except:\n list_connectivity = self.geometry.connectivity\n\n for dict_con in list_connectivity:\n list_DuplicatedFaces.append(dict_con['original'])\n list_DuplicataFaces.append(dict_con['clone'])\n try:\n list_DuplicatedFacesPeriodic.append(dict_con['periodic'])\n except:\n list_DuplicatedFacesPeriodic.append(False)\n\n if len(list_DuplicatedFaces) > 0:\n self.UseDuplicateFaces = True\n # ...\n\n self.list_DuplicataFaces = list_DuplicataFaces\n self.list_DuplicatedFaces = list_DuplicatedFaces\n self.list_DuplicatedFacesPeriodic = list_DuplicatedFacesPeriodic\n\n # ...\n self.meanConstraint = False\n nExtFaces = len(self.geometry.external_faces)\n nBCNeumann = len(self.bc_neumann)\n nDuplicatedFaces = len(self.list_DuplicataFaces)\n if (nExtFaces == nBCNeumann) \\\n or (nExtFaces == nDuplicatedFaces) \\\n or (nExtFaces == nBCNeumann+nDuplicatedFaces) \\\n or (nExtFaces == 0):\n self.meanConstraint = True\n# print (\" self.meanConstraint \", self.meanConstraint)\n # ...\n\n #-----------------------------------\n nrb = self.geometry[0]\n list_n = nrb.shape\n list_p = nrb.degree\n\n # ...\n try:\n n_gauss = kwargs['n_gauss']\n lpi_ordergl = n_gauss\n except:\n lpi_ordergl = list_p\n\n _system = matrix()\n _slv = solver(matrix=_system, solverInfo=self.solverInfo)\n #-----------------------------------\n\n #-----------------------------------\n if self.dim == 1:\n func_zero = lambda x : [ 0. ]\n func_one = lambda x : [ 1. ]\n if self.dim == 2:\n func_zero = lambda x,y : [ 0. ]\n func_one = lambda x,y : [ 1. ]\n if self.dim == 3:\n func_zero = lambda x,y,z : [ 0. ]\n func_one = lambda x,y,z : [ 1. ]\n #-----------------------------------\n\n # ...\n try:\n V = kwargs['V']\n except:\n #-----------------------------------\n # space declaration\n #-----------------------------------\n list_DirFaces_V = list_DirFaces\n if self.Dirichlet:\n list_DirFaces_V = []\n for i in range(0,self.geometry.npatchs):\n list_DirFaces_V.append([])\n list_extFaces = self.geometry.external_faces\n for extFaces in list_extFaces:\n patch_id = extFaces[0]\n face_id = extFaces[1]\n list_DirFaces_V[patch_id].append(face_id)\n self.list_DirFaces_V = list_DirFaces_V\n\n if verbose:\n print(\"* DirFaces_V \", self.list_DirFaces_V)\n print(\"* DuplicatedFaces \", list_DuplicatedFaces)\n print(\"* DuplicataFaces \", list_DuplicataFaces)\n\n# print(\"* DuplicatedFaces \", list_DuplicatedFaces)\n# print(\"* DuplicataFaces \", list_DuplicataFaces)\n# print(\"* DuplicatedFacesPeriodic \", list_DuplicatedFacesPeriodic)\n\n V = space(geometry=self.geometry)\n V.dirichlet(faces=list_DirFaces_V)\n if self.UseDuplicateFaces:\n V.duplicate( faces_base = list_DuplicatedFaces \\\n , faces = list_DuplicataFaces \\\n , isPeriodic = list_DuplicatedFacesPeriodic)\n V.set_boundary_conditions()\n if self.withMetric:\n V.create_grids(type=\"legendre\", k=lpi_ordergl, metric=Metric)\n else:\n V.create_grids(type=\"legendre\", k=lpi_ordergl)\n #-----------------------------------\n # ...\n\n # ...\n try:\n GrV = kwargs['GrV'] # [V,V] graph\n except:\n #-----------------------------------\n # graph declaration\n #-----------------------------------\n GrV = graph(spaces=[V,V])\n #-----------------------------------\n # ...\n\n # ...\n #-----------------------------------\n # Matrix declaration\n #-----------------------------------\n Matrix_V = matrix(graph=GrV)\n MatProj_V = matrix(graph=GrV)\n #-----------------------------------\n # ...\n\n # ...\n if self.Dirichlet:\n try:\n W = kwargs['W']\n except:\n #-----------------------------------\n # space declaration\n #-----------------------------------\n W = space(geometry=self.geometry)\n W.dirichlet(faces=[[]] * self.geometry.npatchs)\n if self.UseDuplicateFaces:\n W.duplicate( faces_base = list_DuplicatedFaces \\\n , faces = list_DuplicataFaces )\n W.set_boundary_conditions()\n# W.create_grids(type=\"legendre\", k=lpi_ordergl)\n W.grids = V.grids\n #-----------------------------------\n # ...\n\n # ...\n if self.Dirichlet:\n try:\n GrVW = kwargs['GrVW']\n except:\n #-----------------------------------\n # graph declaration\n #-----------------------------------\n GrVW = graph(spaces=[V,W])\n #-----------------------------------\n # ...\n\n # ...\n if self.Dirichlet:\n #-----------------------------------\n # Matrix declaration\n #-----------------------------------\n Matrix_VW = matrix(graph=GrVW)\n #-----------------------------------\n # ...\n\n # ...\n try:\n func_u = self.testcase['u']\n except:\n func_u = func_zero\n\n try:\n U_V = kwargs['U_V']\n except:\n U_V = field(space=V, func = func_u)\n\n if self.Dirichlet:\n try:\n U_W = kwargs['U_W']\n except:\n U_W = field(space=W, func = func_u)\n # ...\n\n # ...\n try:\n func_f = self.testcase['f']\n except:\n func_f = func_zero\n\n try:\n F_V = kwargs['F_V']\n except:\n F_V = field(space=V, func = func_f)\n\n if self.Dirichlet:\n func_w = func_one\n try:\n G_W = kwargs['G_W']\n except:\n G_W = field(space=W, func = func_w)\n # ...\n\n # ... Temp field\n T_V = field(space=V, func = func_zero)\n # ...\n\n # ...\n if self.Dirichlet:\n try:\n U_W = kwargs['U_W']\n except:\n U_W = field(space=W, func = func_u)\n # ...\n\n # ...\n try:\n Projector_V = kwargs['Projector_V']\n except:\n if V.dim == 1:\n func_one = lambda x : [ 1. ]\n if V.dim == 2:\n func_one = lambda x,y : [ 1. ]\n if V.dim == 3:\n func_one = lambda x,y,z : [ 1. ]\n Projector_V = oper(spaces=[V, V], type=MASS , func=func_one)\n Projector_V.addto(MatProj_V)\n # ...\n\n # ...\n try:\n trial = kwargs['trial']\n except:\n trial = V\n # ...\n\n # ...\n try:\n M_V = kwargs['M_V']\n except:\n if self.withMass:\n M_V = oper(spaces=[V, trial], type=MASS , func=func_mass)\n M_V.addto(Matrix_V)\n # ...\n if self.withTAdvection:\n Mdw_V = oper(spaces=[V, trial], type=MASS , func=func_dw)\n Mdw_V.addto(Matrix_V)\n # ...\n\n # TODO what to do with the trial space for VW?\n try:\n M_VW = kwargs['M_VW']\n except:\n if self.withMass and self.Dirichlet:\n M_VW = oper(spaces=[V, W], type=MASS , func=func_mass)\n M_VW.addto(Matrix_VW)\n # ...\n\n # ...\n try:\n A_V = kwargs['A_V']\n except:\n if self.withAdvection:\n A_V = oper(spaces=[V, trial], type=ADVECTION , func=func_adv)\n A_V.addto(Matrix_V)\n\n try:\n A_VW = kwargs['A_VW']\n except:\n if self.withAdvection and self.Dirichlet:\n A_VW = oper(spaces=[V, W], type=ADVECTION , func=func_adv)\n A_VW.addto(Matrix_VW)\n # ...\n\n # ...\n try:\n At_V = kwargs['At_V']\n except:\n if self.withTAdvection:\n At_V = oper(spaces=[V, trial], type=ADVECTION , func=func_tadv, transpose=True)\n At_V.addto(Matrix_V)\n\n try:\n At_VW = kwargs['At_VW']\n except:\n if self.withTAdvection and self.Dirichlet:\n At_VW = oper(spaces=[V, W], type=ADVECTION , func=func_tadv, transpose=True)\n At_VW.addto(Matrix_VW)\n # ...\n\n # ...\n try:\n S_V = kwargs['S_V']\n except:\n if self.withStiffness:\n S_V = oper(spaces=[V, trial], type=STIFFNESS , func=func_stiff)\n S_V.addto(Matrix_V)\n\n try:\n S_VW = kwargs['S_VW']\n except:\n if self.withStiffness and self.Dirichlet:\n S_VW = oper(spaces=[V, W], type=STIFFNESS , func=func_stiff)\n S_VW.addto(Matrix_VW)\n # ...\n\n # ...\n try:\n B_V = kwargs['B_V']\n except:\n if self.withD2:\n B_V = oper(spaces=[V, trial], type=SECOND_DERIV, func=func_D2)\n B_V.addto(Matrix_V)\n\n try:\n B_VW = kwargs['B_VW']\n except:\n if self.withD2 and self.Dirichlet:\n B_VW = oper(spaces=[V, W], type=SECOND_DERIV, func=func_D2)\n B_VW.addto(Matrix_VW)\n # ...\n\n # ...\n self.list_G_V_BC_faces = []\n try:\n V_BC = kwargs['V_BC']\n except:\n if self.Neumann:\n list_V_BC = []\n list_G_V_BC = []\n #-----------------------------------\n # boundary space declaration\n #-----------------------------------\n for key, func_g in self.bc_neumann.items():\n patch_id = int(key[0])\n face_id = int(key[1])\n\n self.list_G_V_BC_faces.append([patch_id,face_id])\n\n nrb = self.geometry[patch_id]\n\n axis, side = face_to_bc(face_id)\n\n nrb_bnd = nrb.extract_face(axis, side)\n geo = cad_geometry()\n geo.append(nrb_bnd)\n\n lpi_ordergl = nrb_bnd.degree\n\n # ...\n V_BC = space(geometry=geo)\n V_BC.dirichlet(faces=[[]])\n V_BC.set_boundary_conditions()\n V_BC.create_grids(type=\"legendre\", k=lpi_ordergl)\n # ...\n\n # ...\n g = boundary_function(self.geometry\\\n , patch_id\\\n , face_id\\\n , func_g)\n G_V_BC = field(space=V_BC, pfunc=g)\n # ...\n\n # ...\n list_V_BC.append(V_BC)\n list_G_V_BC.append(G_V_BC)\n # ...\n #-----------------------------------\n # ...\n\n # ...\n try:\n Mean_V = kwargs['Mean_V']\n except:\n if self.meanConstraint:\n if self.dim == 1:\n func_one = lambda x : [ 1. ]\n if self.dim == 2:\n func_one = lambda x,y : [ 1. ]\n if self.dim == 3:\n func_one = lambda x,y,z : [ 1. ]\n Mean_V = field(space=V, func = func_one)\n # ...\n\n # ...\n try:\n N_U = kwargs['N_U']\n except:\n #-----------------------------------\n # NORM\n #-----------------------------------\n if self.Dirichlet:\n N_U = norm(field=U_W, type=NORM_L2, exact=func_u)\n else:\n N_U = norm(field=U_V, type=NORM_L2, exact=func_u)\n #-----------------------------------\n # ...\n\n #-----------------------------------\n # Save access for data\n #-----------------------------------\n self._slv = _slv\n self._system = _system\n self.Matrix_V = Matrix_V\n self.MatProj_V = MatProj_V\n self.GrV = GrV\n\n self.spaces = []\n self.norms = []\n self.fields = []\n self.graphs = [self.GrV]\n self.matrices = [self.Matrix_V]\n\n self.V = V\n self.spaces += [self.V]\n\n if self.withMetric:\n self.Metric = Metric\n\n if self.Dirichlet:\n self.W = W\n self.GrVW = GrVW\n self.spaces += [self.W]\n self.graphs += [self.GrVW]\n\n self.N_U = N_U\n self.norms += [self.N_U]\n\n # ... rhs\n self.F_V = F_V\n self.fields += [self.F_V]\n # ... L2 projector\n self.Projector_V = Projector_V\n\n if self.Dirichlet:\n self.G_W = G_W\n # insteed fo assembling this field, we will rise it from the\n # boundary\n# self.fields += [self.G_W]\n # ...\n\n # ... unknown\n self.U_V = U_V\n self.fields += [self.U_V]\n if self.Dirichlet:\n self.U_W = U_W\n# self.fields += [self.U_W]\n # ...\n\n # ... Temp field\n self.T_V = T_V\n # ...\n\n # ... Mass operator\n if self.withMass:\n self.M_V = M_V\n if self.withTAdvection:\n self.Mdw_V = Mdw_V\n if self.Dirichlet:\n self.M_VW = M_VW\n # ...\n\n # ... Advection operator\n if self.withAdvection:\n self.A_V = A_V\n if self.Dirichlet:\n self.A_VW = A_VW\n # ...\n\n # ... Transpose Advection operator\n if self.withTAdvection:\n self.At_V = At_V\n if self.Dirichlet:\n self.At_VW = At_VW\n # ...\n\n # ... Stiffness operator\n if self.withStiffness:\n self.S_V = S_V\n if self.Dirichlet:\n self.S_VW = S_VW\n # ...\n\n # ... Second Derivative operator\n if self.withD2:\n self.B_V = B_V\n if self.Dirichlet:\n self.B_VW = B_VW\n # ...\n\n # ...\n if self.Dirichlet:\n self.Matrix_VW = Matrix_VW\n self.matrices += [self.Matrix_VW]\n # ...\n\n # ...\n if self.Neumann:\n self.list_V_BC = list_V_BC\n for S in list_V_BC:\n self.spaces+= [S]\n\n self.list_G_V_BC= list_G_V_BC\n for G in list_G_V_BC:\n self.fields+= [G]\n # ...\n\n # ... Mean Constraint in the case of Pure Neumann and Periodic bc\n if self.meanConstraint:\n self.Mean_V = Mean_V\n self.fields += [self.Mean_V]\n # ...\n\n self.forceAssembly = False\n self.Assembled = False\n\n #-----------------------------------", "title": "" }, { "docid": "616dcff6eec4e549342268ce1cfb980b", "score": "0.5114144", "text": "def train(self):", "title": "" }, { "docid": "616dcff6eec4e549342268ce1cfb980b", "score": "0.5114144", "text": "def train(self):", "title": "" }, { "docid": "286828bd63348181bfb65a5621cb59ee", "score": "0.50948656", "text": "def sample_trajectory(pi, model, env, horizon=150, rolloutSize=50, render=False):\n if render:\n env.setRender(True)\n else:\n env.setRender(False)\n\n ac = env.action_space.sample() # not used, just so we have the datatype\n new = True # marks if we're on first timestep of an episode\n ob = env.reset()\n cF = env.getContactForce()\n num_options = pi.num_options\n cur_ep_ret = 0 # return in current episode\n cur_ep_len = 0 # len of current episode\n ep_rets = [] # returns of completed episodes in this segment\n ep_lens = [] # lengths of ...\n batch_size = int(horizon * rolloutSize)\n # Initialise history of arrays\n obs = np.array([ob for _ in range(batch_size)])\n cFs = np.array([cF for _ in range(batch_size)])\n rews = np.zeros(batch_size, 'float32')\n news = np.zeros(batch_size, 'int32')\n opts = np.zeros(batch_size, 'int32')\n activated_options = np.zeros((batch_size, num_options), 'float32')\n\n acs = np.array([ac for _ in range(batch_size)])\n model.currentMode = env.mode\n option, active_options_t = pi.get_option(ob)\n\n opt_duration = [[] for _ in range(num_options)]\n sample_index = 0\n curr_opt_duration = 0\n\n success = 0\n successFlag = False\n while sample_index < batch_size:\n ac = pi.act(True, ob, option)\n if math.isnan(ac[0]) or math.isnan(ac[1]):\n #print(\"Resolving NAN !! \")\n continue\n obs[sample_index] = ob\n news[sample_index] = new\n opts[sample_index] = option\n acs[sample_index] = ac\n activated_options[sample_index] = active_options_t\n\n # Step in the environment\n ob, rew, new, _ = env.step(ac)\n rews[sample_index] = rew\n cFs[sample_index] = env.getContactForce()\n #print(cFs[sample_index])\n curr_opt_duration += 1\n # check if current option is about to end in this state\n nbeta = pi.get_tpred(ob)\n tprob = nbeta[option]\n\n if render:\n env.render()\n\n # Check for termination\n if tprob >= pi.term_prob:\n opt_duration[option].append(curr_opt_duration)\n curr_opt_duration = 0.\n model.currentMode = model.getNextMode(ob)\n option, active_options_t = pi.get_option(ob)\n\n cur_ep_ret += rew\n cur_ep_len += 1\n\n dist = env.getGoalDist()\n if np.linalg.norm(dist) < 0.005 and not successFlag:\n success = success + 1\n successFlag = True\n\n sample_index += 1\n\n if new or (sample_index > 0 and sample_index % horizon == 0):\n render = False\n env.setRender(False)\n opt_duration[option].append(curr_opt_duration)\n curr_opt_duration = 0.\n ep_rets.append(cur_ep_ret)\n ep_lens.append(cur_ep_len)\n cur_ep_ret = 0\n cur_ep_len = 0\n ob = env.reset()\n model.currentMode = env.mode\n option, active_options_t = pi.get_option(ob)\n successFlag = False\n new = True\n\n env.close()\n print(\"Selected options\")\n for o in range(0, num_options):\n print(\"Option: \", o, \" - \", sum(opt_duration[o]))\n\n print(\"\\n Maximum Reward this iteration: \", max(ep_rets), \" \\n\")\n rollouts = {\"ob\": obs, \"rew\": rews, \"new\": news, \"ac\": acs, \"opts\": opts, \"ep_rets\": ep_rets, \"ep_lens\": ep_lens, \"opt_dur\": opt_duration, \"activated_options\": activated_options, \"success\": success, \"contactF\": cFs}\n\n return rollouts", "title": "" }, { "docid": "efc20144a6ca92706653364c876660c1", "score": "0.5094545", "text": "def _calculate_trajectories(self):\n print \"calculate initial trajectories\"\n trajectories = []\n for data in self.model_data:\n trajectories.append(pyBalloon.pyb_traj.calc_movements(data,\n self.loc0, BALLOON))\n pyBalloon.pyb_io.save_kml(PARAMETERS['kml_file'], trajectories)", "title": "" }, { "docid": "a97d17f00cd4b3dc18f749efca4b0005", "score": "0.50894016", "text": "def sample_trajectories(num_traj, num_steps, env, controller, scaler, transformer, scaler2, transformer2, show_visual=False, first_run=False, normalize_inputs=True, normalize_outputs=True):\n return_vec = np.array([])\n reward_dict = defaultdict(list)\n for traj_num in range(num_traj):\n ob = env.reset()\n obs, next_obs, acs, rewards, returns, reward_to_go = [], [], [], [], [], []\n steps = 0\n ret = 0\n while True:\n if(show_visual or traj_num==0):\n env.render()\n obs.append(ob)\n if(first_run):\n ac = env.action_space.sample() \n if ac[0]<0:\n ac[0] = 0\n else:\n inputs = np.expand_dims(ob.astype(np.float32),0) \n inputs = inputs + 1e-3*np.random.standard_normal(inputs.shape[1:])\n # normalize inputs\n if(normalize_inputs):\n inputs = transformer.transform(inputs)\n inputs = scaler.transform(inputs)\n ac = controller(inputs)[0]\n ac = np.array(ac)\n acs.append(ac)\n #if ac[0]<0:\n # ac[0] = 0\n if(np.any(np.isnan(ac))):\n print('nan error')\n pdb.set_trace()\n if(not isinstance(env.action_space, gym.spaces.Discrete)):\n ac = np.minimum(ac,env.action_space.high)\n ac = np.maximum(ac,env.action_space.low)\n action = ac\n if(normalize_outputs):\n action = scaler2.inverse_transform(action)\n action = transform2.inverse_transform(action)\n ob, reward, done, info = env.step(action)\n\n reward = -1e1*ob[0]**2 # custom: penalize deviation from x=0\n reward = -1e1*ob[2]**2 # custom: penalize deviation from x=0\n reward += -1e1*ob[4]**2\n reward += -1e1*ob[5]**2\n #reward += -1e1*(ob[1]-0.1)**2\n reward += 100 # reward staying in episode\n next_obs.append(ob)\n rewards.append(reward)\n ret += reward\n returns.append(ret)\n steps += 1\n\n if done or steps>num_steps:\n print(\"Episode {} finished after {} timesteps\".format(traj_num, steps))\n break\n\n # backwards pass to calculate reward-to-go\n reward_to_go = np.full(len(rewards),np.nan)\n discount = 0.2\n reward_to_go[-1] = rewards[-1]\n for i in range(2, reward_to_go.shape[0]+1):\n reward_to_go[-(i)] = rewards[-(i)] + discount*reward_to_go[-(i-1)] \n for idx in range(len(reward_to_go)):\n reward_dict[str(idx)].append(reward_to_go[idx])\n\n print('run return: {}'.format(returns[-1]))\n if(traj_num==0):\n trajectories = {\"observations\" : np.array(obs),\n \"next_observations\": np.array(next_obs),\n \"rewards\" : np.array(rewards),\n \"actions\" : np.array(acs),\n \"returns\" : np.array(returns),\n \"reward_to_go\" : np.array(reward_to_go),\n \"step\" : np.arange(steps)}\n else:\n traj = {\"observations\" : np.array(obs),\n \"next_observations\": np.array(next_obs),\n \"rewards\" : np.array(rewards),\n \"actions\" : np.array(acs),\n \"returns\" : np.array(returns),\n \"reward_to_go\" : np.array(reward_to_go),\n \"step\" : np.arange(steps)}\n for k in traj:\n trajectories[k] = np.append(trajectories[k],traj[k],axis=0)\n return_vec = np.append(return_vec, returns[-1])\n\n for k in reward_dict:\n reward_dict[k] = np.array(reward_dict[k])\n return trajectories, return_vec, reward_dict", "title": "" }, { "docid": "67c5d1d409843ce6b3eaa8aa377943ae", "score": "0.50839204", "text": "def __init__(self, N_X: int, N_Y: int, \n t: float, u: float, mu: float=None, t_ij=None):\n self.N_X = N_X \n self.N_Y = N_Y \n\n self.t = t\n self.t_ij = t_ij or np.ones((N_Y, N_X), dtype='complex128')\n self.u = u \n self.mu = mu \n\n self.lattice = [[ { \n 'i': i, \n 'j': j, \n 'c-up': CreationOperator('up', self.N_Y*i+j, self.N_Y*self.N_X), \n 'c-down': CreationOperator('down', self.N_Y*i+j, self.N_Y*self.N_X), \n 'a-up': AnnihilationOperator('up', self.N_Y*i+j, self.N_Y*self.N_X), \n 'a-down': AnnihilationOperator('down', self.N_Y*i+j, self.N_Y*\n self.N_X) \n } for j in range(self.N_X) ] for i in range(self.N_Y) ]\n\n self.Hs = [ \n # Get horizontal, even hopping terms\n self._sum_pair_operators(lambda i, j: 1-j%2, lambda i, j: (i, j+1)), \n # Get horizontal, odd hopping terms \n self._sum_pair_operators(lambda i, j: j%2, lambda i, j: (i, j+1)), \n # Get vertical, even hopping terms \n self._sum_pair_operators(lambda i, j: 1-i%2, lambda i, j: (i+1, j)), \n # Get vertical, odd hopping terms \n self._sum_pair_operators(lambda i, j: i%2, lambda i, j: (i+1, j)), \n # Get interacting U term \n self._sum_interacting_term()\n ]\n\n self.H = sum(self.Hs)", "title": "" }, { "docid": "88ab7bd815e9a08a39e490f748853eec", "score": "0.5069024", "text": "def functional_test():\n print(\"Expected results: \", grids[1]['expected'])\n print(\"output: \", spiral(grids[1]['grid']))\n print()\n print(\"Expected results: \", grids[2]['expected'])\n print(\"output: \", spiral(grids[2]['grid']))\n print()\n print(\"Expected results: \", grids[3]['expected'])\n print(\"output: \", spiral(grids[3]['grid']))", "title": "" }, { "docid": "0874ff421a9b282c9f05c2fd41d80939", "score": "0.5066498", "text": "def test_artefact_kernik(self):\n\n simple_protocol = protocols.VoltageClampProtocol([\n protocols.VoltageClampStep(voltage=-80, duration=1000),\n protocols.VoltageClampStep(voltage=-40, duration=1000)])\n\n fig, axs = plt.subplots(4, 1, sharex=True, figsize=(12, 8))\n\n for i, alpha in enumerate([0, .4, .7, .9]):\n if i == 0:\n baseline_model = kernik.KernikModel()\n else:\n baseline_model = kernik.KernikModel(is_exp_artefact=True, exp_artefact_params={'alpha': alpha})\n tr = baseline_model.generate_response(simple_protocol,\n is_no_ion_selective=False)\n if i == 0:\n axs[0].plot(tr.t, tr.command_voltages, 'k')\n axs[0].plot(tr.t, tr.y)\n axs[1].plot(tr.t, tr.current_response_info.get_current_summed())\n axs[2].plot(tr.t, tr.current_response_info.get_current('I_Na'), label=f'alpha={alpha}')\n if i != 0:\n axs[3].plot(tr.t, tr.current_response_info.get_current('I_seal_leak'), label=f'alpha={alpha}')\n\n axs[2].legend()\n plt.show()\n\n #plt.plot(baseline_model.t, baseline_model.y[0, :])\n #plt.plot(baseline_model.t, baseline_model.y[26, :])\n #plt.show()\n\n plt.show()", "title": "" }, { "docid": "6ccce2a8a4026e23be6189b40a3e273e", "score": "0.50654083", "text": "def test_18(self):\n d = {'EBstep': -1,\n 'L1item': 'L1_J100',\n 'chainCounter': 727,\n 'chainName': '2hypochain',\n 'chainParts': [{'L1item': '',\n 'TLA': '1i2c500m700TLA',\n 'addInfo': [],\n 'bConfig': ['split'],\n 'bMatching': [],\n 'bTag': 'bmv2c2060',\n 'bTracking': '',\n 'calib': 'em',\n 'chainPartName': 'j175_bmv2c2060_split',\n 'cleaning': 'noCleaning',\n 'dataScouting': '',\n 'dataType': 'tc',\n 'etaRange': '0eta320',\n 'extra': '',\n 'jetCalib': 'subjes',\n 'multiplicity': '1',\n 'recoAlg': 'a4',\n 'scan': 'FS',\n 'signature': 'Jet',\n 'threshold': '0',\n 'topo': [],\n 'trigType': 'j'},\n {'L1item': '',\n 'TLA': '',\n 'addInfo': [],\n 'bConfig': ['split'],\n 'bMatching': [],\n 'bTag': 'bmv2c2050',\n 'bTracking': '',\n 'calib': 'em',\n 'chainPartName': 'j50_bmv2c2050_split',\n 'cleaning': 'noCleaning',\n 'dataScouting': '',\n 'dataType': 'tc',\n 'etaRange': '0eta320',\n 'extra': '',\n 'jetCalib': 'subjes',\n 'multiplicity': '1',\n 'recoAlg': 'a4',\n 'scan': 'FS',\n 'signature': 'Jet',\n 'threshold': '50',\n 'topo': [],\n 'trigType': 'j'}],\n 'groups': ['RATE:MultiBJet', 'BW:Bjet'],\n 'signature': 'Jet',\n 'signatures': '',\n 'stream': ['Main'],\n 'topo': [],\n 'topoStartFrom': False,\n 'topoThreshold': None}\n \n generateHLTChainDef(d)", "title": "" }, { "docid": "ff148eab0bf0cb7c69f4b5eea743b2ad", "score": "0.5046788", "text": "def test_experiments_multiaxisgonio():\n # beam along +z\n gonio_1 = GoniometerFactory.from_dict(\n {\n \"axes\": [\n [\n 1.0 / sqrt(2.0),\n 0.0,\n -1.0 / sqrt(2.0),\n ],\n [1.0, 0.0, 0.0],\n ],\n \"angles\": [0.0, 0.0],\n \"names\": [\"GON_PHI\", \"GON_OMEGA\"],\n \"scan_axis\": 1,\n }\n )\n gonio_2 = GoniometerFactory.from_dict(\n {\n \"axes\": [\n [\n 1.0 / sqrt(2.0),\n 0.0,\n -1.0 / sqrt(2.0),\n ],\n [1.0, 0.0, 0.0],\n ],\n \"angles\": [0.0, 0.0],\n \"names\": [\"GON_PHI\", \"GON_OMEGA\"],\n \"scan_axis\": 0,\n }\n )\n\n experiments = ExperimentList()\n for g in [gonio_1, gonio_2]:\n experiments.append(\n Experiment(\n beam=Beam(s0=(0.0, 0.0, 2.0)),\n goniometer=g,\n scan=Scan(image_range=[1, 90], oscillation=[0.0, 1.0]),\n )\n )\n\n return experiments", "title": "" }, { "docid": "10d342c92727e48097935a8cd2fdc489", "score": "0.50422406", "text": "def exercise():\n\n uc = uctbx.unit_cell( \"96 74 77 90 113 90\")\n xs = crystal.symmetry(uc, \"C2\")\n trlw = reticular_twin_laws( xs, max_index=2, max_delta=1.5 )\n #trlw.show()\n m = [2, 1, 0,0, 1, 0,0, 0, 1]\n for ii, jj in zip( trlw.derived_laws[0].m, m):\n assert(ii==jj)\n\n tl = [-1, 0, 0, 0, -1, 0,-1, 0, 1]\n dl = trlw.derived_laws[0].twin_laws[0]\n for ii, jj in zip(tl,dl):\n assert(ii==jj)\n\n uc = uctbx.unit_cell( \"10.079 10.079 48.409 90 90 120\" )\n xs = crystal.symmetry( uc, \"R32\")\n rtl = reticular_twin_laws( xs , max_index=8, max_delta=1.5)\n for tli in rtl.derived_laws:\n for ttl in tli.twin_laws:\n assert(ttl.determinant()==1) #check that these twin laws have det equal to 1", "title": "" }, { "docid": "ca50f2ac29602900038b4ea1f4aa14d0", "score": "0.5036874", "text": "def test_inner_cv():\n _experiment(3, None)", "title": "" }, { "docid": "dc1e604b16631f9455fa03d2099f0cef", "score": "0.50362664", "text": "def GenSinRoutineTraj(self, seq, ax, amplitudes, period, j_0):\n\n traj = JointTrajectory()\n traj.header.seq = seq\n traj.header.frame_id = 'base_link'\n traj.joint_names = ['x','y','z','dummy','dummy1','yaw','joint1','joint2','joint3','joint4','joint5']\n traj.header.stamp = rospy.get_rostime()\n \n Hz = 100 # Number of traj points per second as required by the controller\n T = float(period) # casting to avoid possible error that could be \n w = 2*math.pi*(1/T) # \n small_pause = 1.0 # Pause between fowards and backwards movements on same degree of freedom\n big_pause = 1.0 # Pause between movements ivolving different degrees of freedom\n points_no = int((T + small_pause + big_pause)*Hz) #\n t_s = np.linspace(0, period+small_pause+big_pause, points_no) #\n\n last_t = t_s[-1]\n count = 0.0\n\n for axis in ax:\n #v = [] # Used for debugging\n #s = [] # Used for debugging\n #a = [] # Used for debugging\n \n k = ax.index(axis)\n A = float(amplitudes[k])\n\n if not(axis == 'x' or axis == 'y' or axis == 'z' or axis == 'yaw' or axis == 'man_yaw', axis == 'man_pitch', axis == 'man_pitch_base'):\n continue\n\n for i in range(len(t_s)):\n tmp_point = JointTrajectoryPoint()\n\n if (t_s[i] >= T/2.0 and t_s[i] <= (T/2.0 + small_pause)): # Setting values for the points of desired trajectory\n s_s = 2.0*A/w \n v_s = 0.0 # which include position, speed and acceleration\n a_s = 0.0 # \n # \n elif t_s[i] >= (T + small_pause): # \n s_s = 0.0 # \n v_s = 0.0 # \n a_s = 0.0 # \n # \n elif t_s[i] < T/2.0: #\n v_s = A * math.sin( w*t_s[i] ) # \n s_s =-A/w * math.cos( w*t_s[i] ) + A/w # \n a_s = A*w * math.cos( w*t_s[i] ) # \n #\n elif t_s[i] > (T/2.0 + small_pause) and t_s[i] < (T + small_pause): # \n v_s = A * math.sin( w*(t_s[i] - small_pause)) # \n s_s =-A/w * math.cos( w*(t_s[i] - small_pause)) + A/w # \n a_s = A*w * math.cos( w*(t_s[i] - small_pause)) #\n \n if axis == 'x':\n # x y z dummy dummy1 yaw joint1 joint2 joint3 joint4 joint5 \n tmp_point.positions = [j_0[0]+s_s, j_0[1] , j_0[2] , j_0[3], j_0[4], j_0[5] , j_0[6], j_0[7], j_0[8], j_0[9], j_0[10]]\n tmp_point.velocities = [v_s , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ]\n tmp_point.accelerations = [a_s , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ]\n elif axis == 'y':\n # x y z dummy dummy1 yaw joint1 joint2 joint3 joint4 joint5 \n tmp_point.positions = [j_0[0] , s_s+j_0[1], j_0[2] , j_0[3], j_0[4], j_0[5] , j_0[6], j_0[7], j_0[8], j_0[9], j_0[10]]\n tmp_point.velocities = [0.0 , v_s , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ]\n tmp_point.accelerations = [0.0 , a_s , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ]\n \n elif axis == 'z':\n # x y z dummy dummy1 yaw joint1 joint2 joint3 joint4 joint5 \n tmp_point.positions = [j_0[0] , j_0[1] , s_s+j_0[2], j_0[3], j_0[4], j_0[5] , j_0[6], j_0[7], j_0[8], j_0[9], j_0[10]]\n tmp_point.velocities = [0.0 , 0.0 , v_s , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ]\n tmp_point.accelerations = [0.0 , 0.0 , a_s , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ]\n\n elif axis == 'yaw':\n # x y z dummy dummy1 yaw joint1 joint2 joint3 joint4 joint5 \n tmp_point.positions = [j_0[0] , j_0[1] , j_0[2] , j_0[3], j_0[4], s_s+j_0[5], j_0[6], j_0[7], j_0[8], j_0[9], j_0[10]]\n tmp_point.velocities = [0.0 , 0.0 , 0.0 , 0.0 , 0.0 , v_s , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ]\n tmp_point.accelerations = [0.0 , 0.0 , 0.0 , 0.0 , 0.0 , a_s , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ]\n\n elif axis == 'man_yaw':\n # x y z dummy dummy1 yaw joint1 joint2 joint3 joint4 joint5 \n tmp_point.positions = [j_0[0] , j_0[1] , j_0[2] , j_0[3], j_0[4], j_0[5], j_0[6], j_0[7], s_s+j_0[8], j_0[9], j_0[10]]\n tmp_point.velocities = [0.0 , 0.0 , 0.0 , 0.0 , 0.0 , v_s , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ]\n tmp_point.accelerations = [0.0 , 0.0 , 0.0 , 0.0 , 0.0 , a_s , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ]\n\n elif axis == 'man_pitch':\n # x y z dummy dummy1 yaw joint1 joint2 joint3 joint4 joint5 \n tmp_point.positions = [j_0[0] , j_0[1] , j_0[2] , j_0[3], j_0[4], j_0[5], j_0[6], s_s+j_0[7], j_0[8], j_0[9], j_0[10]]\n tmp_point.velocities = [0.0 , 0.0 , 0.0 , 0.0 , 0.0 , v_s , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ]\n tmp_point.accelerations = [0.0 , 0.0 , 0.0 , 0.0 , 0.0 , a_s , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ]\n\n elif axis == 'man_pitch_base':\n # x y z dummy dummy1 yaw joint1 joint2 joint3 joint4 joint5 \n tmp_point.positions = [j_0[0] , j_0[1] , j_0[2] , j_0[3], j_0[4], j_0[5], s_s+j_0[6], j_0[7], j_0[8], j_0[9], j_0[10]]\n tmp_point.velocities = [0.0 , 0.0 , 0.0 , 0.0 , 0.0 , v_s , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ]\n tmp_point.accelerations = [0.0 , 0.0 , 0.0 , 0.0 , 0.0 , a_s , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ]\n \n tmp_point.time_from_start.secs = int( t_s[i] + last_t * count)\n tmp_point.time_from_start.nsecs = int((t_s[i] + last_t * count)*1000000000 - int(t_s[i] + last_t * count)*1000000000)\n traj.points.append(tmp_point)\n \n #print(\"t_s :\", t_s[i]) # Used for debugging\n #print(\"last_t :\", last_t) # Used for debugging\n #print(\"count :\", count) # Used for debugging\n #print(\"secs :\", tmp_point.time_from_start.secs) # Used for debugging\n #print(\"nsecs :\", tmp_point.time_from_start.nsecs) # Used for debugging\n\n \n #v.append(v_s) # Used for debugging\n #s.append(s_s) # Used for debugging\n #a.append(a_s) # Used for debugging\n #print(t_s[i]) # Used for debugging \n\n count = count + 1.0 \n\n #plt.figure(int(count)) # Used for debugging\n #plt.plot(t_s, s, t_s, v, t_s, a) # Used for debugging\n #plt.axis('equal') # Used for debugging\n #plt.show() # Used for debugging\n\n return traj", "title": "" }, { "docid": "508eea8d2dc041e3f191ff3955cdbfa0", "score": "0.5035858", "text": "def evaluate(self, edict):\n # can we run SupervisedLearning.evaluate? Should this be an evaluateLocal?\n ## set up the results dict with the correct dimensionality\n ### actually, let's wait for the first sample to come in.\n self.raiseADebug('Evaluating interpolated ROM ...')\n results = None\n ## TODO set up right for ND??\n forcedMax = self._maxCycles if self._maxCycles is not None else np.inf\n numMacro = min(len(self._macroSteps), forcedMax)\n macroIndexValues = []\n for m, (macroStep, model) in enumerate(sorted(self._macroSteps.items(), key=lambda x: x[0])):\n if m + 1 > numMacro:\n break\n # m is an index of the macro step, in order of the macro values (e.g. in order of years)\n # macroStep is the actual macro step value (e.g. the year)\n # model is the ClusterROM instance for this macro step\n macroIndexValues.append(macroStep)\n self.raiseADebug(f' ... evaluating macro step \"{macroStep}\" ({m+1} / {numMacro})')\n subResult = model.evaluate(edict) # TODO same input for all macro steps? True for ARMA at least...\n indexMap = subResult.get('_indexMap', {})\n # if not set up yet, then frame results structure\n if results is None:\n results = {}\n finalIndexMap = indexMap # in case every rlz doesn't use same order, which would be lame\n pivotID = model._templateROM.pivotParameterID\n indices = set([pivotID, self._macroParameter])\n for indexes in finalIndexMap.values():\n indices.update(set(indexes))\n #pivotVals = subResult[pivotID]\n #numPivot = len(pivotVals)\n for target, values in subResult.items():\n # if an index, just set the values now # FIXME assuming always the same!\n ## FIXME thing is, they're not always the same, we're clustering, so sometimes there's diff num days!\n ## TODO for now, we simply require using a classifier that always has the same number of entries.\n if target in [pivotID, '_indexMap'] or target in indices:\n results[target] = values\n else:\n # TODO there's a strange behavior here where we have nested numpy arrays instead of\n # proper matrices sometimes; maybe it has to be this way for unequal clusters\n # As a result, we use the object dtype, onto which we can place a whole numpy array.\n results[target] = np.zeros([numMacro] + list(values.shape), dtype=object)\n # END setting up results structure, if needed\n # FIXME reshape in case indexMap is not the same as finalIndexMap?\n for target, values in subResult.items():\n if target in [pivotID, '_indexMap'] or target in indices:# indexMap:\n continue\n indexer = tuple([m] + [None]*len(values.shape))\n try:\n results[target][indexer] = values\n except ValueError:\n self.raiseAnError(RuntimeError, 'The shape of the histories along the pivot parameter is not consistent! Try using a clustering classifier that always returns the same number of clusters.')\n results['_indexMap'] = {} #finalIndexMap\n for target, vals in results.items():\n if target not in indices and target not in ['_indexMap']: # TODO get a list of meta vars?\n default = [] if vals.size == 1 else [pivotID]\n results['_indexMap'][target] = [self._macroParameter] + list(finalIndexMap.get(target, default))\n results[self._macroParameter] = macroIndexValues\n return results", "title": "" }, { "docid": "0f136b73f4404ddd880e683bd259c82f", "score": "0.50345784", "text": "def __iter__(self):\n return iter(self.trajs)", "title": "" }, { "docid": "89db482fc26b10feea8c0fe58467adeb", "score": "0.50141114", "text": "def classify_complex_trajectory(traj,experiment):\n show_complex_trajectory(complex_trajectories[i],experiment,50)\n possible_answers = ['r','w','t','m','q','e']\n inp = ''\n while(not inp in possible_answers):\n inp= raw_input(\"\"\"how would you classify this image: ramified, withdrawal, transitional,\n motile, error (r/w/t/m/e)? Press q to see the sequence again\\n\"\"\")\n \n if inp=='q':\n inp = classify_complex_trajectory(traj,experiment)\n return inp", "title": "" }, { "docid": "5673b36d312fc9fba76f347d0fd28b86", "score": "0.50132704", "text": "def implementation(self):", "title": "" }, { "docid": "84f4f2177b68931bc77087ed9eab420a", "score": "0.5013004", "text": "def simulate(self) -> None:", "title": "" }, { "docid": "a280af291eb7ee3a4be49f4da39da9a8", "score": "0.5007999", "text": "def compute_rave_trajectory(self, robot):\n\n traj = orpy.RaveCreateTrajectory(robot.GetEnv(), \"\")\n spec = robot.GetActiveConfigurationSpecification('cubic')\n spec.AddDerivativeGroups(1, False)\n spec.AddDerivativeGroups(2, True)\n\n traj.Init(spec)\n deltas = [0]\n for i in range(len(self.ss_waypoints) - 1):\n deltas.append(self.ss_waypoints[i + 1] - self.ss_waypoints[i])\n if len(self.ss_waypoints) == 1:\n q = self.eval(0)\n qd = self.evald(0)\n qdd = self.evaldd(0)\n traj.Insert(traj.GetNumWaypoints(),\n list(q) + list(qd) + list(qdd) + [0])\n else:\n qs = self.eval(self.ss_waypoints)\n qds = self.evald(self.ss_waypoints)\n qdds = self.evaldd(self.ss_waypoints)\n for (q, qd, qdd, dt) in zip(qs, qds, qdds, deltas):\n traj.Insert(traj.GetNumWaypoints(),\n q.tolist() + qd.tolist() + qdd.tolist() + [dt])\n return traj", "title": "" }, { "docid": "e49acd5e9611c77f759d081ab6c31e20", "score": "0.49908498", "text": "def runPRINCIPLE(self):\n\n # index of stations with good obs\n tymes = np.arange(self.ntyme)[self.nobs>0]\n\n self.I = []\n self.Q = []\n self.U = []\n self.reflectance = []\n self.surf_reflectance = []\n self.ROT = []\n for t in tymes: \n tau = self.tau[t][:,:,self.iGood[t]]\n ssa = self.ssa[t][:,:,self.iGood[t]]\n pmom = self.pmom[t][:,:,self.iGood[t],:,:]\n pe = self.pe[t][:,self.iGood[t]]\n ze = self.ze[t][:,self.iGood[t]]\n te = self.te[t][:,self.iGood[t]]\n\n # Initiate output arrays\n nlev = tau.shape[0]\n self.I_ = np.ones([self.nstations,self.npp])*MISSING\n self.Q_ = np.ones([self.nstations,self.npp])*MISSING\n self.U_ = np.ones([self.nstations,self.npp])*MISSING\n self.reflectance_ = np.ones([self.nstations,self.npp])*MISSING\n self.surf_reflectance_ = np.ones([self.nstations,self.npp])*MISSING\n self.ROT_ = np.ones([self.nstations,nlev])*MISSING\n \n # Get VLIDORT wrapper name from dictionary\n vlidortWrapper = WrapperFuncs[self.albedoType]\n\n\n # do principle plane\n for pi, pp in enumerate(self.pp_angles): \n # Solar Geometry\n sza = self.SZA[t][self.iGood[t]]\n saa = self.SAA[t][self.iGood[t]]\n\n #viewing geometry\n vza = sza - pp \n\n raa = np.zeros(saa.shape)\n\n # if pointing towards the sun\n iGood = vza >= 0\n raa[iGood] = 180.0\n\n #if poinitng away from the sun\n iGood = vza < 0\n raa[iGood] = 0.0\n\n # make all vza's positive\n vza[iGood] = np.abs(vza[iGood])\n\n # Limit viewing angles to less than 80\n # also limits to pointing above the horizon\n iGood = vza < 80\n nobs = np.sum(iGood)\n\n if nobs >0:\n # run VLIDORT \n # get args list for each surface model\n if self.albedoType == 'MODIS_BRDF':\n kernel_wt = self.kernel_wt[t][:,:,self.iGood[t]]\n param = self.RTLSparam[t][:,:,self.iGood[t]] \n \n args = [self.channel,tau[:,:,iGood], ssa[:,:,iGood], pmom[:,:,iGood,:,:], \n pe[:,iGood], ze[:,iGood], te[:,iGood], \n kernel_wt, param, \n sza[iGood], raa[iGood], vza[iGood], \n MISSING,\n self.verbose]\n\n # Call VLIDORT wrapper function\n I, reflectance, ROT, surf_reflectance, Q, U, rc = vlidortWrapper(*args) \n \n elif self.albedoType == 'MODIS_BRDF_BPDF':\n # For albedo\n kernel_wt = self.kernel_wt[t][:,:,self.iGood[t]]\n RTLSparam = self.RTLSparam[t][:,:,self.iGood[t]] \n RTLSparam = np.append(RTLSparam,np.zeros([1,1,self.nobs[t]]),axis=0) \n\n # For BPDF\n BPDFparam = self.BPDFparam[t][:,:,self.iGood[t]]\n\n # Loop through one by one\n # Some land covers do not have polarization (i.e. urban)\n I = np.zeros([nobs,1])\n Q = np.zeros([nobs,1])\n U = np.zeros([nobs,1])\n reflectance = np.zeros([nobs,1])\n surf_reflectance = np.zeros([nobs,1])\n ROT = np.zeros([nlev,nobs,1])\n\n index = np.arange(self.nobs[t])\n index = index[iGood]\n\n for pindex,p in enumerate(index):\n if BPDFparam[2,0,p] == MISSING: \n args = [self.channel,\n tau[:,:,p:p+1], \n ssa[:,:,p:p+1], \n pmom[:,:,p:p+1,:,:], \n pe[:,p:p+1], \n ze[:,p:p+1], \n te[:,p:p+1], \n kernel_wt[:,:,p:p+1], \n RTLSparam[:,:,p:p+1], \n sza[p:p+1], \n raa[p:p+1], \n vza[p:p+1], \n MISSING,\n self.verbose]\n\n BRDFvlidortWrapper = WrapperFuncs['MODIS_BRDF']\n # Call VLIDORT wrapper function\n I_, reflectance_, ROT_, surf_reflectance_, Q_, U_, rc = BRDFvlidortWrapper(*args) \n\n else:\n args = [self.channel,\n tau[:,:,p:p+1], \n ssa[:,:,p:p+1], \n pmom[:,:,p:p+1,:,:], \n pe[:,p:p+1], \n ze[:,p:p+1], \n te[:,p:p+1], \n kernel_wt[:,:,p:p+1], \n RTLSparam[:,:,p:p+1], \n BPDFparam[:,:,p:p+1],\n sza[p:p+1], \n raa[p:p+1], \n vza[p:p+1], \n MISSING,\n self.verbose]\n\n # Call VLIDORT wrapper function\n I_, reflectance_, ROT_, surf_reflectance_, Q_, U_, rc = vlidortWrapper(*args)\n \n I[pindex:pindex+1,:] = I_\n Q[pindex:pindex+1,:] = Q_\n U[pindex:pindex+1,:] = U_\n reflectance[pindex:pindex+1,:] = reflectance_\n surf_reflectance[pindex:pindex+1,:] = surf_reflectance_\n ROT[:,pindex:pindex+1,:] = ROT_\n \n elif self.albedoType == 'LAMBERTIAN':\n albedo = self.albedo[t][self.iGood[t],:]\n \n args = [self.channel,tau[:,:,iGood], ssa[:,:,iGood], pmom[:,:,iGood,:,:], \n pe[:,iGood], ze[:,iGood], te[:,iGood], \n albedo[iGood,:], \n sza[iGood], raa[iGood], vza[iGood], \n MISSING,\n self.verbose]\n\n # Call VLIDORT wrapper function\n I, reflectance, ROT, Q, U, rc = vlidortWrapper(*args) \n surf_reflectance = albedo[iGood,:]\n\n # Store values in initialized arrays\n II = np.arange(self.nstations)\n II = II[self.iGood[t]]\n II = II[iGood]\n if pi == 0:\n self.ROT_[II,:] = np.squeeze(ROT).T\n\n self.I_[II,pi] = np.squeeze(I)\n self.reflectance_[II,pi] = np.squeeze(reflectance)\n self.surf_reflectance_[II,pi] = np.squeeze(surf_reflectance)\n self.Q_[II,pi] = np.squeeze(Q)\n self.U_[II,pi] = np.squeeze(U) \n\n #Store full principle plane for writing later\n self.I.append(self.I_)\n self.Q.append(self.Q_)\n self.U.append(self.U_)\n self.reflectance.append(self.reflectance_)\n self.surf_reflectance.append(self.surf_reflectance_)\n self.ROT.append(self.ROT_)\n\n self.writeNCpp()", "title": "" }, { "docid": "061ad9c6ed7dc12b344760d7b2a0bb66", "score": "0.49899322", "text": "def _create_objectives(self):\n # Define structures for each contrast.\n self.structures = [self._using_conditions(contrast_sld)\n for contrast_sld in self.contrast_slds]\n\n # Iterate over each measured structure.\n self.objectives = []\n for i, structure in enumerate(self.structures):\n # Define the model.\n model = refnx.reflect.ReflectModel(structure,\n scale=self.scale,\n bkg=self.bkgs[i],\n dq=self.dq)\n # Load the measured data.\n filename = '{}.dat'.format(self.labels[i])\n file_path = os.path.join(self.data_path, filename)\n data = refnx.dataset.ReflectDataset(file_path)\n\n # Combine model and data into an objective that can be fitted.\n self.objectives.append(refnx.analysis.Objective(model, data))", "title": "" }, { "docid": "77fc64bf0c6514e53061580d515d36c9", "score": "0.49797496", "text": "def run(self):\n trajectories=self.get_option('trajectories',required=True).split(',')\n failure=False\n self.start_pdf()\n reference_plotted=dict()\n for traj in trajectories:\n for runmode in self._filter_runmodes(section=traj):\n for n in range(len(self._get_columns(traj,runmode))):\n self.figureLegendRight(ylabel='value '+str(n+1), title=self.test, n=n)\n\n data,timeArray,data_label=self._get_data(section=traj,runmode=runmode,n=n)\n reference,reference_label=self._get_reference(section=traj,runmode=runmode,n=n)\n if (reference_label,n) not in reference_plotted :\n self.plot(timeArray,reference(timeArray),label=reference_label)\n reference_plotted[(reference_label,n)]=True\n self.plot(timeArray,data(timeArray),label=data_label)\n logging.debug(\"Evaluating {0}, value number {1}.\".format(data_label,n+1))\n eps=self._get_eps(runmode, traj, n)\n if not self._regression(reference,data,timeArray,eps):\n logging.debug(\"====== FAILED ======\")\n failure=True\n self.close_pdf()\n if failure:\n sys.exit(-1)", "title": "" }, { "docid": "19e9c18eddd096275be7fffd3fd6ba82", "score": "0.4978773", "text": "def get_tdr_dynamical(self):\n \n nkpt = self.eigr2d.nkpt\n nband = self.eigr2d.nband\n natom = self.eigr2d.natom\n ntemp = self.ntemp\n \n self.tdr = zeros((ntemp, nkpt, nband), dtype=complex)\n \n bose = self.ddb.get_bose(self.temperatures)\n \n fan_term = zeros((ntemp, nkpt, nband), dtype=complex)\n ddw_term = zeros((ntemp, nkpt, nband), dtype=complex)\n fan_add = zeros((ntemp, nkpt, nband),dtype=complex)\n ddw_add = zeros((ntemp, nkpt, nband),dtype=complex)\n \n # Sternheimer contribution\n \n # nmode, nkpt, nband\n fan_stern, ddw_stern = self.get_fan_ddw_sternheimer()\n \n fan_term = einsum('ijk,il->ljk', fan_stern, 2*bose+1.0)\n ddw_term = einsum('ijk,il->ljk', ddw_stern, 2*bose+1.0)\n \n # Active space contribution\n fan_num, ddw_num = self.get_fan_ddw_active()\n \n # jband\n occ = self.get_occ_nospin()\n \n delta_E_ddw = (einsum('ij,k->ijk', self.eig0.EIG[0,:,:].real, ones(nband)) -\n einsum('ij,k->ikj', self.eig0.EIG[0,:,:].real, ones(nband)) -\n einsum('ij,k->ijk', ones((nkpt, nband)), (2*occ-1)) * self.smearing * 1j)\n \n # ntemp,ikpt,iband,jband\n tmp = einsum('ijkl,lm->mijk', ddw_num, 2*bose+1.0)\n ddw_add = einsum('ijkl,jkl->ijk', tmp, 1.0 / delta_E_ddw)\n \n # ikpt,iband,jband\n delta_E = (einsum('ij,k->ijk', self.eig0.EIG[0,:,:].real, ones(nband)) -\n einsum('ij,k->ikj', self.eigq.EIG[0,:,:].real, ones(nband)) -\n einsum('ij,k->ijk', ones((nkpt,nband)), (2*occ-1)) * self.smearing * 1j)\n \n omega = self.ddb.omega[:].real # imode\n \n # imode,ntemp,jband\n num1 = (einsum('ij,k->ijk', bose, ones(nband)) + 1.0 -\n einsum('ij,k->ijk', ones((3*natom, ntemp)), occ))\n \n # ikpt,iband,jband,imode\n deno1 = (einsum('ijk,l->ijkl', delta_E,ones(3*natom)) -\n einsum('ijk,l->ijkl', ones((nkpt, nband, nband)), omega))\n \n # (imode,ntemp,jband)/(ikpt,iband,jband,imode) ==> imode,ntemp,jband,ikpt,iband\n invdeno1 = np.real(deno1) / (np.real(deno1) ** 2 + np.imag(deno1) ** 2)\n div1 = einsum('ijk,lmki->ijklm', num1, invdeno1)\n #div1 = einsum('ijk,lmki->ijklm', num1, 1.0 / deno1)\n \n # imode,ntemp,jband\n num2 = (einsum('ij,k->ijk', bose, ones(nband)) +\n einsum('ij,k->ijk', ones((3*natom, ntemp)), occ))\n \n # ikpt,iband,jband,imode\n deno2 = (einsum('ijk,l->ijkl', delta_E, ones(3*natom)) +\n einsum('ijk,l->ijkl', ones((nkpt, nband, nband)), omega))\n \n # (imode,ntemp,jband)/(ikpt,iband,jband,imode) ==> imode,ntemp,jband,ikpt,iband\n invdeno2 = np.real(deno2) / (np.real(deno2) ** 2 + np.imag(deno2) ** 2)\n div2 = einsum('ijk,lmki->ijklm', num2, invdeno2)\n #div2 = einsum('ijk,lmki->ijklm', num2, 1.0 / deno2)\n \n # ikpt,iband,jband,imode\n fan_add = einsum('ijkl,lmkij->mij', fan_num, div1 + div2)\n \n\n fan_term += fan_add\n ddw_term += ddw_add\n \n self.tdr = (fan_term - ddw_term) * self.wtq\n \n self.tdr = self.eig0.make_average(self.tdr)\n \n # nkpt, nband, ntemp\n self.tdr = np.einsum('kij->ijk', self.tdr)\n \n return self.tdr", "title": "" }, { "docid": "d2aad0814721edf48e74e73d7dd7da52", "score": "0.49773735", "text": "def analyse( self ) :\n\n TracksLocation = 'Rec/Track/Best'\n \n tracks = self.get( TracksLocation )\n\n print '# tarcks ', tracks.size()\n\n ## get the relation table\n table = self.get( 'Relations/Rec/Track/Default' )\n\n print '# #tracks/links : ', tracks.size() , table.relations().size() \n \n t2mc = table\n mc2t = table.inverse()\n\n for t in tracks :\n\n ## get all related MC-partiles: \n mcps = table.relations ( t )\n if not mcps.empty() :\n ## print number of related Mc-particles, ID of the first one and track key \n mcp = mcps[0]._to() \n print ' #links ' , mcps.size() , mcp.pname() , mcp.key() , t.key () \n\n \n # get all MC-particles \n mcparticles = self.mcselect('all', MCALL )\n\n for mcp in mcparticles :\n\n trks = mc2t.relations ( mcp )\n if not trks.empty() :\n # print number of tracks, partile name and track key \n trk = trks[0]._to()\n print ' #tracks ', trks.size() , mcp.pname() , mcp.key() , trk.key(), trks[0].weight() \n \n \n ## \n self.setFilterPassed( True ) \n return SUCCESS", "title": "" }, { "docid": "cc0e135fbaacf294c97e7094588d965f", "score": "0.49733922", "text": "def breakdown(self,verbose=0):\n children = []\n for tensorcontraction in self.list:\n if (len(tensorcontraction.tensors) == 1):\n label = 0\n else:\n label = len(tensorcontraction.tensors) - 2\n newchild = tensorcontraction.breakdown(label,self.excitationtensortypes(),verbose)\n children.append(newchild)\n result = OperationTree(NoOperation(),[],children)\n return result", "title": "" }, { "docid": "7f10184c98d5e7e18184629f30fb040f", "score": "0.49541593", "text": "def dynamic_programming_on_trellis(self, instance, run_forward_alg=True):\n\t\t#TODO:Initialize trellis and backtrace pointers \n\t\ttrellis = numpy.zeros((len(instance.data),self.label_codebook.size()))\n\t\tbacktrace_pointers = numpy.zeros((len(instance.data),self.label_codebook.size()))\n\t\t#TODO:Traverse through the trellis here\n\t\t#observation_prob_vector = self.emission_matrix[self.feature_codebook.get_index(instance.data[0][0]),:]*self.emission_matrix[self.feature_codebook.get_index(instance.data[0][1]),:]\n\t\tobservation_prob_vector = self.emission_matrix[self.feature_codebook.get_index(instance.data[0][1]),:]\n\t\tif run_forward_alg == True:\n\t\t\t#the first element of the alpha values\n\t\t\ttrellis[0]=self.init_pro*observation_prob_vector\n\t\t\tfor i in range(1,len(instance.data)):\n\t\t\t\t#observation_prob_vector = self.emission_matrix[self.feature_codebook.get_index(intance.data[i][0]),:]*self.emission_matrix[self.feature_codebook.get_index(intance.data[i][1]),:]\n\t\t\t\tobservation_prob_vector = self.emission_matrix[self.feature_codebook.get_index(intance.data[i][0]),:]\n\t\t\t\ttrellis[i]=(trellis[i-1]*self.transition_matrix).sum(1)*observation_prob_vector\n\t\t\treturn trellis\n\t\telse:\n\t\t\ttrellis[0]=self.init_pro*observation_prob_vector\n\t\t\t#backtrace_pointers[0] = ('START','START','START')\n\t\t\tfor i in range(1,len(instance.data)):\n\t\t\t\t#observation_prob_vector = self.emission_matrix[self.feature_codebook.get_index(instance.data[i][0]),:]*self.emission_matrix[self.feature_codebook.get_index(instance.data[i][1]),:]\n\t\t\t\tobservation_prob_vector = self.emission_matrix[self.feature_codebook.get_index(instance.data[i][1]),:]\n\t\t\t\t#here is an important place,we have to select the max then multiply the emission probability\n\t\t\t\t#if _DEBUG==True:\n\t\t\t\t\t#import pdb\n\t\t\t\t\t#pdb.set_trace()\n\t\t\t\talpha_list = trellis[i-1]*self.transition_matrix\n\t\t\t\ttrellis[i]=(alpha_list).max(1)*observation_prob_vector\n\t\t\t\tbacktrace_pointers[i]=numpy.argmax(alpha_list,1)\t\t\n\n\t\t\treturn (trellis, backtrace_pointers)", "title": "" }, { "docid": "19e1b898e87a48da62bd73c4a827a195", "score": "0.49519736", "text": "def process_experiment((path, object_timeline)):\n print('Processing', path)\n bag = open_bag(path)\n if bag is None:\n return None\n object_stats = ObjectStats()\n time_taken_processor = processors.TimeTaken(object_timeline)\n camera_movement_processor = processors.CameraMovementTime(object_timeline)\n marker_movement_processor = processors.MarkerMovementTime(object_timeline)\n grasp_count_processor = processors.GraspCount(object_timeline)\n for topic, message, time in bag.read_messages(topics=EXPERIMENT_TOPICS):\n model = message_factory.model(message)\n if model is None:\n continue\n time_taken_processor.update(topic, model, time)\n camera_movement_processor.update(topic, model, time)\n marker_movement_processor.update(topic, model, time)\n grasp_count_processor.update(topic, model, time)\n bag.close()\n time_taken_processor.update_last()\n object_stats.update(time_taken_processor.object_stats())\n object_stats.update(camera_movement_processor.object_stats())\n object_stats.update(marker_movement_processor.object_stats())\n object_stats.update(grasp_count_processor.object_stats())\n for obj, stats in object_stats.items():\n stats['other_time'] = (\n stats['time_taken'] - stats['camera_movement_time'] -\n stats['marker_movement_time']\n )\n\n timeline = build_timeline(\n marker_movement_processor.timeline(),\n camera_movement_processor.timeline())\n \n return object_stats, timeline", "title": "" }, { "docid": "55692e654c8f39497dddcf62d8b3eb54", "score": "0.49491498", "text": "def getIntelligence():", "title": "" }, { "docid": "a3d88562b59168f20fe776a30f24c0d6", "score": "0.49472186", "text": "def __init__(self, A, y, lamb=0.5, complex=True):\n \n self.A = A\n self.y = y\n self.lamb = lamb\n self.m, self.n = A.shape\n if len(y.shape) > 1:\n self.p = y.shape[1]\n else:\n self.p = 1\n \n \n #Run som sanity checks\n assert(isinstance(A,np.ndarray))\n assert(isinstance(y,np.ndarray))\n assert(self.m == self.y.shape[0])\n assert(self.lamb > 0)\n if not complex:\n assert(not np.iscomplexobj(A))\n assert(not np.iscomplexobj(y))\n \n\n if complex:\n self.A_backward = self._Hermitian(self.A)\n else:\n self.A_backward = self.A.T\n \n #get M as A^H*A which is used more than once \n M = self.A_backward.dot(self.A)\n \n self.alpha = 1/np.linalg.norm(A,ord = 2)**2 \n\n \n #Compute matricies involved in gradient step in CISTA\n self.Phi = np.eye(self.n) - self.alpha*M\n self.phi_y = self.alpha*self.A_backward.dot(self.y)", "title": "" }, { "docid": "4560f63789dc6a53a43dfcd25c3e63bf", "score": "0.49415022", "text": "def test_access_to_internal_models(self):\n est = LinearIntentToTreatDRIV(LinearRegression(), LogisticRegression(C=1000), WeightedLasso(),\n featurizer=PolynomialFeatures(degree=2, include_bias=False))\n Y = np.array([1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2])\n T = np.array([1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2])\n Z = np.array([1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2])\n X = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6]).reshape(-1, 1)\n est.fit(Y, T, Z=Z, X=X)\n assert isinstance(est.original_featurizer, PolynomialFeatures)\n assert isinstance(est.featurizer, Pipeline)\n assert isinstance(est.model_final, StatsModelsLinearRegression)\n for mdl in est.models_Y_X:\n assert isinstance(mdl, LinearRegression)\n for mdl in est.models_T_XZ:\n assert isinstance(mdl, LogisticRegression)\n np.testing.assert_array_equal(est.cate_feature_names(['A']), ['A', 'A^2'])\n np.testing.assert_array_equal(est.cate_feature_names(), ['x0', 'x0^2'])\n\n est = LinearIntentToTreatDRIV(LinearRegression(), LogisticRegression(C=1000), WeightedLasso(),\n featurizer=None)\n est.fit(Y, T, Z=Z, X=X)\n assert est.original_featurizer is None\n assert isinstance(est.featurizer, FunctionTransformer)\n assert isinstance(est.model_final, StatsModelsLinearRegression)\n for mdl in est.models_Y_X:\n assert isinstance(mdl, LinearRegression)\n for mdl in est.models_T_XZ:\n assert isinstance(mdl, LogisticRegression)\n np.testing.assert_array_equal(est.cate_feature_names(['A']), ['A'])", "title": "" }, { "docid": "98645f0392d6c8979e13ba7cd1a09848", "score": "0.49414748", "text": "def qft(qubits):\n\n def qft_function(amplitudes):\n\n new_amplitudes = []\n num_points = len(amplitudes)\n\n # for each element in the transformed vector\n for i in range(0, num_points):\n\n summation = complex(0, 0)\n\n # for each element in the input vector\n for j in range(0, num_points):\n\n angle = (2.0 * pi * i * j)/num_points\n\n summation += amplitudes[j] * cmath.exp(-1j * angle)\n\n new_amplitudes.append(summation)\n\n print(new_amplitudes)\n\n return new_amplitudes\n\n qubits.manipulate(qft_function)", "title": "" }, { "docid": "fabe0a185e8ec4c6e753e358242616f8", "score": "0.4939888", "text": "def __init__(self, trajectories, returns, config, iter_ids, iter_ius, k):\n self.trajectories = trajectories\n self.returns = returns\n self.config = config\n self.iter_ids = iter_ids\n self.iter_ius = iter_ius\n self.k = k", "title": "" }, { "docid": "349eb70503aa9b4ac811f391bf06fe71", "score": "0.49392518", "text": "def build_experiments(self):\n\n # We set the camera\n # This single RGB camera is used on every experiment\n\n camera = Camera('CameraRGB')\n camera.set(FOV=90)\n camera.set_image_size(800, 600)\n camera.set_position(1.44, 0.0, 1.2)\n camera.set_rotation(0, 0, 0)\n\n if self._city_name == 'Town01':\n poses_tasks = self._poses_town01()\n vehicles_tasks = [0, 20, 100]\n pedestrians_tasks = [0, 50, 250]\n else:\n poses_tasks = self._poses_town02()\n vehicles_tasks = [0, 15, 70]\n pedestrians_tasks = [0, 50, 150]\n\n experiments_vector = []\n\n for weather in self.weathers:\n\n for iteration in range(len(poses_tasks)):\n poses = poses_tasks[iteration]\n vehicles = vehicles_tasks[iteration]\n pedestrians = pedestrians_tasks[iteration]\n\n conditions = CarlaSettings()\n conditions.set(\n SendNonPlayerAgentsInfo=True,\n NumberOfVehicles=vehicles,\n NumberOfPedestrians=pedestrians,\n WeatherId=weather\n )\n\n conditions.set(DisableTwoWheeledVehicles=True)\n # Add all the cameras that were set for this experiments\n\n conditions.add_sensor(camera)\n\n experiment = Experiment()\n experiment.set(\n Conditions=conditions,\n Poses=poses,\n Task=iteration,\n Repetitions=1\n )\n experiments_vector.append(experiment)\n\n return experiments_vector", "title": "" }, { "docid": "e0aa1640ea5bf7eabc230edc85f9edc0", "score": "0.49386477", "text": "def train(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "617b4d809a821c260ff3d6995a9531cb", "score": "0.49346885", "text": "def sample_trajectories(self, itr, env, info_name=None, render=False):\n timesteps_this_batch = 0\n paths = []\n total_rew = 0\n total_info, avg_info = 0, 0\n while True:\n animate_this_episode = (len(paths)==0 and (itr % 1 == 0))\n path = self.sample_trajectory(env, animate_this_episode)\n paths.append(path)\n timesteps_this_batch += len(path[\"reward\"])\n total_rew += np.sum(path[\"reward\"])\n if info_name is not None:\n info_dict_list = path['info']\n for dict in info_dict_list:\n info = dict[info_name]\n total_info += info\n if timesteps_this_batch > self.min_timesteps_per_batch:\n break\n avg_info = total_info / len(paths)\n avg_rew = total_rew / len(paths)\n return paths, timesteps_this_batch, avg_rew, avg_info", "title": "" }, { "docid": "355db437f33cc925d1670ac165563e56", "score": "0.4933888", "text": "def algebra_generators(self):", "title": "" }, { "docid": "23308677ea588bda9020f4ef18c9ee6b", "score": "0.49320087", "text": "def test_traj(ic, tend=20):\n ode_sys.set(ics=ic, tdata=[0,tend])\n traj = ode_sys.compute('test')\n pts = traj.sample()\n plt.plot(pts['phi'], pts['nu'], 'k:', lw=1)\n plt.plot(pts['phi'][0], pts['nu'][0], 'ko')", "title": "" }, { "docid": "5701eee23f66440b5e41da09c8c9db38", "score": "0.49281594", "text": "def integrations(self):\n raise NotImplementedError(\"TODO: Not implemented yet\")", "title": "" }, { "docid": "a57d4f949642b92eec617547c7bc2a98", "score": "0.49259016", "text": "def test_trajectory_logicnetwork(self):\n net = LogicNetwork([((1, 2), {'01', '10'}),\n ((0, 2), {'01', '10', '11'}),\n ((0, 1), {'11'})])\n state = [0, 1, 0]\n got = trajectory(net, state, 3)\n self.assertEqual([[0, 1, 0], [1, 0, 0], [0, 1, 0], [1, 0, 0]], got)\n self.assertEqual([0, 1, 0], state)\n\n got = list(trajectory(net, state, 3, encode=True))\n self.assertEqual([2, 1, 2, 1], got)", "title": "" }, { "docid": "eb4dc0a3bb75fcdd20c206ab8ac77386", "score": "0.492501", "text": "def bode_data_analysis(source, port, real_world, debug):\n\n exp_id = input('EXPRERIMENT ID : ')\n experiment = Experiment.from_id(exp_id, real_world.data_folder)\n n_experiments = len(experiment.data['entree_sinus']['measures'])\n amplitudes = []\n phases = []\n freqs = []\n \n # fig, axes = pl.subplots(n_experiments)\n\n for i in range(n_experiments):\n pos_measures = np.array(\n list(map(float, experiment.data['entree_sinus']['measures'][i]['pos_measures']))\n )[30:]\n speed_measures = np.array(\n list(map(float, experiment.data['entree_sinus']['measures'][i]['speed_measures']))\n )[30:]\n speed_orders = np.array(\n list(map(float, experiment.data['entree_sinus']['measures'][i]['speed_orders']))\n )[30:]\n times = np.array(\n list(map(float, experiment.data['entree_sinus']['measures'][i]['times']))\n )[30:]\n order_freq = float(experiment.data['entree_sinus']['measures'][i]['order_freq'])\n order_amplitude = float(experiment.data['entree_sinus']['measures'][i]['order_amplitude'])\n\n pos_spectrum = np.fft.rfft(pos_measures)\n speed_spectrum = np.fft.rfft(speed_measures)\n freq_range = np.fft.rfftfreq(times.size, sample_time(times))\n\n amplitudes.append(amplitude(\n times,\n speed_measures, \n 1/order_freq\n ))\n # phases.append(phase(\n # times,\n # speed_measures,\n # speed_orders,\n # 1/order_freq\n # ))\n freqs.append(order_freq)\n\n freqs = np.array(freqs)\n amplitudes = np.array(amplitudes)/(order_amplitude/real_world.pwm_incs_ratio) # convert in pwm\n #phases = np.array(phases)\n\n j = complex(0, 1)\n\n def func(f, K, xi, f0):\n return np.abs(K/(1 + (2*xi)*j*(f/f0) - (f/f0)**2))\n \n (K, xi, f0), pcov = curve_fit(func, freqs, amplitudes, bounds=([0, 0.6, 1], [np.inf, np.inf, 10]))\n print(K, xi, f0)\n print(np.sqrt(np.diag(pcov)))\n\n #pl.subplot('211')\n pl.title('Gain en vitesse de la corbeille en fonction de la fréquence')\n pl.semilogx(\n freqs,\n 20*np.log10(amplitudes)\n )\n pl.semilogx(\n np.geomspace(freqs[0], freqs[-1], 500),\n 20*np.log10([func(freq, K, xi, f0) for freq in np.geomspace(freqs[0], freqs[-1], 500)])\n )\n pl.legend(['Expérience', 'Régression'])\n \n pl.xlabel('Fréquence (Hz)')\n pl.ylabel('Gain (dB)')\n pl.grid(True, color='0.7', linestyle='-', which='both', axis='both')\n\n precision = 3\n text = ' K = {} \\n Xi = {}\\n F0 = {}'.format(\n round(K, precision), round(xi, precision), round(f0, precision)\n )\n\n pl.text(\n 2*10**(-1), -20, \n text,\n color='white',\n fontsize=12,\n bbox=dict(facecolor='black', edgecolor='black', pad=10.0)\n )\n\n # pl.subplot('212')\n # pl.title('Phase en vitesse de la corbeille en fonction de la fréquence')\n # pl.semilogx(\n # freqs,\n # phases*(180/pi)\n # )\n pl.show()", "title": "" }, { "docid": "f131c45b99c92ffec02790385d53d316", "score": "0.49191204", "text": "def generate(s):\n dic={\n \"INPUT\": [\"V0\", \"P0\", \"Q0\"],\n\n\n \"V0\": {\n \"u\": \"V1\",\n \"type\": \"PREDICT\",\n \"final_fc\": False,\n },\n\n \"P0\": {\n \"u\": \"P1\",\n \"type\": \"PREDICT\",\n \"final_fc\": True,\n \"bias\": True\n\n },\n\n \"Q0\": {\n \"u\":\"P2\",\n \"type\": \"PREDICT\",\n \"final_fc\": True,\n \"units\":6,\n \"classify\": True\n\n },\n\n \"SETTINGS\": s.__dict__\n }\n\n\n\n #Visual path\n tau=s.tau_start\n VISUAL_UNITS=s.VISUAL_UNITS\n for i in range(1,s.N_V_LAYERS+1):\n dic[\"V%d\"%i] =dict(\n d=\"V%d\"%(i-1),\n #l=\"P%d\"%i,\n u=\"V%d\"%(i+1),\n type=\"MSTRNN\",\n units=VISUAL_UNITS,\n filter=s.FILTER,\n tau=tau\n )\n tau += s.tau_delta\n VISUAL_UNITS += s.VISUAL_UNITS_DELTA\n\n\n #Proprioceptive path\n tau=s.tau_start\n for i in range(1, s.N_P_LAYERS + 1):\n dic[\"P%d\" % i] = dict(\n d=\"P%d\" % (i - 1),\n #l=\"Q%d\" % i,\n u=\"P%d\" % (i + 1),\n type=\"CTRNN\",\n units=s.PROP_UNITS,\n tau=tau\n )\n tau+=s.tau_delta\n\n #Control path\n tau=s.tau_start\n for i in range(1, s.N_Q_LAYERS + 1):\n dic[\"Q%d\" % i] = dict(\n d=\"Q%d\" % (i - 1),\n #l=\"P%d\" % i,\n u=\"Q%d\" % (i + 1),\n type=\"CTRNN\",\n units=s.Q_UNITS,\n tau=tau\n )\n tau+=s.tau_delta\n\n if \"P%d\"%i in dic: dic[\"P%d\"%i].pop(\"u\")\n if \"Q%d\"%i in dic: dic[\"Q%d\"%i].pop(\"u\")\n if \"V%d\"%i in dic: dic[\"V%d\"%i].pop(\"u\")\n\n #Override default tau\n if s.TAU_LIST != []:\n for i,tau in enumerate(s.TAU_LIST):\n dic['V%d'%i]['tau'] = tau\n\n #Override default filter\n if s.FILTER_LIST and s.FILTER_FOR_LIST!=s.FILTER:\n for l in (s.FILTER_LIST):\n dic['V%d'%l]['filter'] = s.FILTER_FOR_LIST\n\n\n #Delete input and links if a channel isnt present\n if s.N_V_LAYERS==0:\n dic['INPUT'] = [input for input in dic['INPUT'] if input != 'V0']\n dic.pop('V0')\n\n if s.N_P_LAYERS==0:\n dic['INPUT'] = [input for input in dic['INPUT'] if input != 'P0']\n dic.pop('P0')\n\n\n if s.N_Q_LAYERS==0:\n dic['INPUT'] = [input for input in dic['INPUT'] if input != 'Q0']\n dic.pop('Q0')\n\n\n LATERAL = 1\n if LATERAL:\n for i in range(0, s.N_V_LAYERS+1):\n dic['V%d'%i]['l'] = ['P%d'%i]\n dic['P%d'%i]['l'] = ['V%d'%i]\n\n\n\n print(dic)\n with open('net.json', 'w') as outfile:\n json.dump(dic, outfile)", "title": "" }, { "docid": "800816e338e316b6a33f14ac08f888b1", "score": "0.4918915", "text": "def sub_directions(self,t,j):\n raise NotImplementedError", "title": "" }, { "docid": "59f25435a4876a46376a3c02d23316cc", "score": "0.49185294", "text": "def test_get_elements_from_classification(self):\n pass", "title": "" }, { "docid": "7079f739fbc7bcac6c1f56fac213aacb", "score": "0.4916937", "text": "def test_Part3PETCT(self,enableScreenshotsFlag=0,screenshotScaleFactor=1):\r\n logic = RSNAQuantTutorialLogic()\r\n logic.enableScreenshots = enableScreenshotsFlag\r\n logic.screenshotScaleFactor = screenshotScaleFactor\r\n\r\n self.delayDisplay(\"Starting the test\")\r\n\r\n #\r\n # first, get some data\r\n #\r\n import SampleData\r\n extractPath = SampleData.downloadFromURL(\r\n fileNames='dataset3_PETCT.zip',\r\n uris='http://slicer.kitware.com/midas3/download?items=124185',\r\n checksums='SHA256:11e81af3462076f4ca371b632e03ed435240042915c2daf07f80059b3f78f88d')[0]\r\n\r\n self.delayDisplay(\"Loading PET_CT_pre-treatment.mrb\")\r\n preTreatmentPath = extractPath + '/PET_CT_pre-treatment.mrb'\r\n slicer.util.loadScene(preTreatmentPath)\r\n logic.takeScreenshot('PETCT-LoadedPre','Loaded pre-treatement scene',-1)\r\n\r\n try:\r\n mainWindow = slicer.util.mainWindow()\r\n layoutManager = slicer.app.layoutManager()\r\n threeDView = layoutManager.threeDWidget(0).threeDView()\r\n redWidget = layoutManager.sliceWidget('Red')\r\n redController = redWidget.sliceController()\r\n greenWidget = layoutManager.sliceWidget('Green')\r\n greenController = greenWidget.sliceController()\r\n yellowWidget = layoutManager.sliceWidget('Yellow')\r\n yellowController = yellowWidget.sliceController()\r\n viewNode = threeDView.mrmlViewNode()\r\n cameras = slicer.util.getNodes('vtkMRMLCameraNode*')\r\n for cameraNode in cameras.values():\r\n if cameraNode.GetActiveTag() == viewNode.GetID():\r\n break\r\n\r\n threeDView.resetFocalPoint()\r\n slicer.util.clickAndDrag(threeDView,button='Right')\r\n redWidget.sliceController().setSliceVisible(True)\r\n yellowWidget.sliceController().setSliceVisible(True)\r\n logic.takeScreenshot('PETCT-ConfigureView','Configure View',-1)\r\n\r\n mainWindow.moduleSelector().selectModule('Volumes')\r\n compositNode = redWidget.mrmlSliceCompositeNode()\r\n compositNode.SetForegroundOpacity(0.2)\r\n logic.takeScreenshot('PETCT-ShowVolumes','Show Volumes with lesion',-1)\r\n\r\n compositNode.SetForegroundOpacity(0.5)\r\n logic.takeScreenshot('PETCT-CTOpacity','CT1 volume opacity to 0.5',-1)\r\n\r\n yellowWidget.sliceController().setSliceVisible(False)\r\n greenWidget.sliceController().setSliceVisible(True)\r\n logic.takeScreenshot('PETCT-ShowSlices','Show axial and sagittal slices',-1)\r\n\r\n self.delayDisplay('SUV Computation')\r\n if not hasattr(slicer.modules, 'petstandarduptakevaluecomputation'):\r\n self.delayDisplay(\"PET SUV Computation not available, skipping the test.\")\r\n return\r\n\r\n slicer.util.selectModule('PETStandardUptakeValueComputation')\r\n\r\n parameters = {\r\n \"PETDICOMPath\": extractPath + '/' + 'PET1',\r\n \"PETVolume\": slicer.util.getNode('PET1'),\r\n \"VOIVolume\": slicer.util.getNode('PET1-label'),\r\n }\r\n\r\n suvComputation = slicer.modules.petstandarduptakevaluecomputation\r\n self.CLINode1 = None\r\n self.CLINode1 = slicer.cli.runSync(suvComputation, self.CLINode1, parameters, delete_temporary_files=False)\r\n\r\n # close the scene\r\n slicer.mrmlScene.Clear(0)\r\n\r\n self.delayDisplay(\"Loading PET_CT_post-treatment.mrb\")\r\n postTreatmentPath = extractPath + '/PET_CT_post-treatment.mrb'\r\n slicer.util.loadScene(postTreatmentPath)\r\n logic.takeScreenshot('PETCT-LoadedPost','Loaded post-treatement scene',-1)\r\n\r\n compositNode.SetForegroundOpacity(0.5)\r\n logic.takeScreenshot('PETCT-CT2Opacity','CT2 volume opacity to 0.5',-1)\r\n\r\n redController.setSliceOffsetValue(-165.01)\r\n logic.takeScreenshot('PETCT-LarynxUptake','Mild uptake in the larynx and pharynx',-1)\r\n\r\n redController.setSliceOffsetValue(-106.15)\r\n logic.takeScreenshot('PETCT-TumorUptake','No uptake in the area of the primary tumor',-1)\r\n\r\n self.delayDisplay('Test passed!')\r\n except Exception as e:\r\n import traceback\r\n traceback.print_exc()\r\n self.delayDisplay('Test caused exception!\\n' + str(e))", "title": "" }, { "docid": "abf87eb7bcf1cff737f68a8e40ae2b94", "score": "0.4903885", "text": "def compute_all_trajectories(self):\n self.trajectories = []\n for frame_nb in range(self.n_frames-1):\n trajectory_list = self.compute_trajectories_in_frame(frame_nb)\n self.trajectories.append(trajectory_list)", "title": "" }, { "docid": "fda00ef9a9cff3608ad21bc174f550f0", "score": "0.49032158", "text": "def experiment1():\n\tagent1 = epsilon_greedy_agent(10, epsilon = 0, initial_value = 0)\n\tagent2 = epsilon_greedy_agent(10, epsilon = 0.01, initial_value = 0)\n\tagent3 = epsilon_greedy_agent(10, epsilon = 0.1, initial_value = 0)\n\tagent4 = epsilon_greedy_agent(10, epsilon = 0.5, initial_value = 0)\n\ttb = bandit_problem_test_bed(10)\n\ttb.test(agent1,time_steps = 1000, runs = 2000)\n\ttb.test(agent2,time_steps = 1000, runs = 2000)\n\ttb.test(agent3,time_steps = 1000, runs = 2000)\n\ttb.test(agent4,time_steps = 1000, runs = 2000)\n\ttb.show_figures()", "title": "" }, { "docid": "b1a0fe3ed7786f9aae852d81d57400d9", "score": "0.48963913", "text": "def main():\n # movement_experiment()\n # degree_experiment()\n # test_go_inches()\n # test_spin_degree()\n # turn_degree_experiment()\n # test_turn_degrees()\n # test_polygon()\n # test_calibrate()\n # test_raise_and_close()\n # test_move_arm_to_position()\n big_test()", "title": "" }, { "docid": "af9b69510f32fa63acd591f49b99f68f", "score": "0.48952454", "text": "def __init__(self, spectrum):\n TRTHeader = spectrum.find('./TRTHeaderedClass')\n hardware_header = TRTHeader.find(\n \"./ClassInstance[@Type='TRTSpectrumHardwareHeader']\")\n detector_header = TRTHeader.find(\n \"./ClassInstance[@Type='TRTDetectorHeader']\")\n esma_header = TRTHeader.find(\n \"./ClassInstance[@Type='TRTESMAHeader']\")\n # what TRT means?\n # ESMA could stand for Electron Scanning Microscope Analysis\n spectrum_header = spectrum.find(\n \"./ClassInstance[@Type='TRTSpectrumHeader']\")\n xrf_header = TRTHeader.find(\"./ClassInstance[@Type='TRTXrfHeader']\")\n\n # map stuff from harware xml branch:\n self.hardware_metadata = dictionarize(hardware_header)\n self.amplification = self.hardware_metadata['Amplification'] # USED\n\n # map stuff from detector xml branch\n self.detector_metadata = dictionarize(detector_header)\n self.detector_type = self.detector_metadata['Type'] # USED\n\n # decode silly hidden detector layer info:\n det_l_str = self.detector_metadata['DetLayers']\n dec_det_l_str = codecs.decode(det_l_str.encode('ascii'), 'base64')\n mini_xml = ET.fromstring(unzip_block(dec_det_l_str))\n self.detector_metadata['DetLayers'] = {} # Overwrite with dict\n for i in list(mini_xml):\n self.detector_metadata['DetLayers'][i.tag] = dict(i.attrib)\n\n # map stuff from esma xml branch:\n if esma_header:\n self.esma_metadata = dictionarize(esma_header)\n if xrf_header:\n xrf_header_dict = dictionarize(xrf_header)\n self.esma_metadata = {\n 'PrimaryEnergy':xrf_header_dict['Voltage'],\n 'ElevationAngle':xrf_header_dict['ExcitationAngle']\n }\n # USED:\n self.hv = self.esma_metadata['PrimaryEnergy']\n self.elev_angle = self.esma_metadata['ElevationAngle']\n date_time = gen_iso_date_time(spectrum_header)\n if date_time is not None:\n self.date, self.time = date_time\n\n self.spectrum_metadata = dictionarize(spectrum_header)\n self.offset = self.spectrum_metadata['CalibAbs']\n self.scale = self.spectrum_metadata['CalibLin']\n\n # main data:\n self.data = np.fromstring(spectrum.find('./Channels').text,\n dtype='Q', sep=\",\")", "title": "" }, { "docid": "e00d1a81081a1e05af508f39b58c36f0", "score": "0.48899361", "text": "def explore(self) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "1bda39ff39926510df4c8eb4468d4fed", "score": "0.48849887", "text": "def train(self):\r\n pass", "title": "" }, { "docid": "1bda39ff39926510df4c8eb4468d4fed", "score": "0.48849887", "text": "def train(self):\r\n pass", "title": "" }, { "docid": "063a15a2747790b6266a42900d9e3ca2", "score": "0.48812932", "text": "def extractFeatures(self):\n self.features = [] #Not a good idea\n self.features.append(self.subEx())\n self.features.append(self.subLen())\n self.features.append(self.subLang())\n self.features.append(self.subCod())\n self.features.append(self.subPunct())\n\n self.features.append(self.recCnt())\n self.features.extend(self.recParts())\n self.features.append(self.recGeoVec())\n self.features.append(self.recTimeDelta())\n self.features.extend(self.recWith())\n self.features.extend(self.recTZs())\n\n self.features.append(self.dateBucket())\n self.features.append(self.dateTZ())\n self.features.append(self.dateRecDelta())\n\n self.features.append(self.fromEx())\n self.features.append(self.fromLen())\n self.features.append(self.fromExAlias())\n self.features.append(self.fromDomain())\n self.features.append(self.fromDomainFQN())\n self.features.append(self.fromErrorsToDom())\n self.features.append(self.fromReplyToDom())\n #self.features.append(self.fromExDomain())\n\n self.features.append(self.toCnt())\n self.features.append(self.toDomain())\n self.features.append(self.toMsgidDom())\n #self.features.append(self.toExDomain())\n self.features.append(self.toFromDom())\n self.features.append(self.msgidDomain())\n self.features.append(self.msgidFromDom())\n\n self.features.append(self.attachCnt())\n self.features.extend(self.extractAttFeatures())\n\n self.features.extend(self.cType())\n self.features.extend(self.cTypeActual())\n\n self.features.append(self.partCnt())\n self.features.extend(self.partFeatures())\n self.features.append(self.partStruct())\n\n self.features.append(self.bodyLang())\n self.features.append(self.bodyLength())\n\n self.features.extend(self.xMailerTokens())\n\n self.features.extend(self.multipleFeatures())\n self.features.extend(self.genFeatures())\n\n self.features.append(self.replyToErrorsToDom())\n return self.features", "title": "" }, { "docid": "3b93e0a669ba5c95516188b6a1ad94c2", "score": "0.48805553", "text": "def run(model1):\r\n\tstep = 0\r\n\tlt=0\r\n\tz=0\r\n\tflag=0\r\n\r\n\ttls_data = [\r\n\t\t['yrrr', 'Grrr'],\r\n\t\t['ryrr', 'rGrr'],\r\n\t\t['rryr', 'rrGr'],\r\n\t\t['rrry', 'rrrG']\r\n\t] \r\n\t\r\n\tprint(\"simulation started ..............\")\r\n\ttraci.trafficlight.setPhase(\"0\", 6)\r\n\twhile traci.simulation.getMinExpectedNumber() > 0:\r\n\t\ttraci.simulationStep()\r\n\t\t#sim.work()\r\n\r\n\t\t#For normal algorithm without model\r\n\t\t#For getting number of vehicle/lane and find maximum\r\n\t\tL1 = int(get_vehicle_numbers(\"1i_0\"))\r\n\t\tL2 = int(get_vehicle_numbers(\"2i_0\"))\r\n\t\tL3 = int(get_vehicle_numbers(\"3i_0\"))\r\n\t\tL4 = int(get_vehicle_numbers(\"4i_0\"))\r\n\r\n\t\ttotal_l = L1 + L2 + L3 + L4\r\n\r\n\t\tx = [L1, L2, L3 ,L4]\r\n\t\tx = np.asarray(x).astype('float32')\r\n\t\tdata_dict2= {\r\n\t\t\t'L1':L1,\r\n\t\t\t'L2':L2,\r\n\t\t\t'L3':L3,\r\n\t\t\t'L4':L4,\r\n\t\t}\r\n\t\t\r\n\t\tdata_dict = {\r\n\t\t\t\"1i_0\":L1,\r\n\t\t\t\"2i_0\":L2,\r\n\t\t\t\"3i_0\":L3,\r\n\t\t\t\"4i_0\":L4,\r\n\t\t}\r\n\r\n\t\tlm = max(data_dict.items(), key=operator.itemgetter(1))[0]\r\n\t\tlm_val = max(data_dict.items(), key=operator.itemgetter(1))[1]\r\n\t\t#print(lm_val)\r\n\r\n\t\tif lm == '1i_0':\r\n\t\t\ti = 3\r\n\t\telif lm == '2i_0':\r\n\t\t\ti = 1\r\n\t\telif lm == '3i_0':\r\n\t\t\ti = 2\r\n\t\telif lm == '4i_0':\r\n\t\t\ti = 0\r\n\r\n\t\tmet1 = metrics.Accuracy()\r\n\t\tmet2 = metrics.Accuracy()\r\n\r\n\t\tif flag == 0:\r\n\t\t\tprint(\"in if\")\r\n\t\t\tk_pred = model1.predict_one(data_dict2)\r\n\t\t\tprint(k_pred)\r\n\r\n\t\t\tif k_pred != None:\r\n\t\t\t\tyellow_select = tls_data[ypred][0]\r\n\t\t\t\tgreen_select = tls_data[ypred][1]\r\n\t\t\t\tprint(yellow_select, green_select)\r\n\r\n\t\t\t\tphaseDuration(junction=\"0\", phase_state=yellow_select, phase_time=6)\r\n\t\t\t\tphaseDuration(junction=\"0\", phase_state=green_select, phase_time=30)\r\n\t\t\t\tflag = 30\r\n\t\t\t\r\n\t\telse:\r\n\t\t\tflag -= 1\r\n\t\t\t\r\n\t\t#For ARDUINO :::\r\n\t\tph = str(traci.trafficlight.getPhase(\"0\"))\r\n\t\tvalue = write_read(ph)\r\n\t\tstep += 1\r\n\ttraci.close()\r\n\tsys.stdout.flush()", "title": "" }, { "docid": "db6cdd6d5533ddb681060d297062426d", "score": "0.4880063", "text": "def __call__(self, solution):\n raise NotImplementedError", "title": "" }, { "docid": "443b091bef3a93fe7690ef1fe8c5cdb1", "score": "0.48790157", "text": "def _112_complex_1_3(self):\n asyncio.gather(self.__complex(stage=[1, 2, 3]))", "title": "" }, { "docid": "0646ad35b0db78e8565f460dafc73a71", "score": "0.48549443", "text": "def _do_lc(self):\n\t\t########## LC step #######\n\t\t## Basic LC step, combined. Store all the results in the instance.\n\t\t(self.axComb, self.bxComb, self.ktComb, self.kt_unfitComb,\n\t\t self.UComb, self.XComb, self.VComb, self.lnmxAdjustedComb, self.lnmxComb) = lcInfer(\n\t\t\tself.nmxComb, ageCutoff=self.ageCutoff, lifeTableParams=self.lifeTableParams, flattenBx=self.flattenBx) \n\n\t\t## Basic LC step, combined. Store all the results in the instance.\n\t\t(self.axFem, self.bxFem, self.ktFem, self.kt_unfitFem,\n\t\t self.UFem, self.XFem, self.VFem, self.lnmxAdjustedFem, self.lnmxFem) = lcInfer(\n\t\t\tself.nmxFem, ageCutoff=self.ageCutoff, lifeTableParams=self.lifeTableParams, flattenBx=self.flattenBx) \n\t\t\n\t\t## Basic LC step, combined. Store all the results in the instance.\n\t\t(self.axMale, self.bxMale, self.ktMale, self.kt_unfitMale,\n\t\t self.UMale, self.XMale, self.VMale, self.lnmxAdjustedMale, self.lnmxMale) = lcInfer(\n\t\t\tself.nmxMale, ageCutoff=self.ageCutoff, lifeTableParams=self.lifeTableParams, flattenBx=self.flattenBx) \n\t\t\t\n\t\tjumpoffAxFem = N.log(self.nmxFem[-1,0:self.ageCutoffIndex].ravel())\n\t\tjumpoffAxMale = N.log(self.nmxMale[-1,0:self.ageCutoffIndex].ravel())\n\n\t\t## Derive current year stuff from LC inference results (kt etc).\n\t\t## Note use of list comprehensions.\n\t\tself.e0sFromEmpiricalNmxComb = N.array([LcUtil.lifeTable(nmxp=nmxRow, **self.lifeTableParams)\n\t\t\t\t\t\t\t\t\t\t\t\tfor nmxRow in self.nmxComb])\n\t\tself.e0sFromEmpiricalNmxMale = N.array([LcUtil.lifeTable(nmxp=nmxRow, **self.lifeTableParams)\n\t\t\t\t\t\t\t\t\t\t\t\tfor nmxRow in self.nmxMale])\n\t\tself.e0sFromEmpiricalNmxFem = N.array([LcUtil.lifeTable(nmxp=nmxRow, **self.lifeTableParams)\n\t\t\t\t\t\t\t\t\t\t\t\tfor nmxRow in self.nmxFem])\n\t\tself.nmxsFromKtCurrentComb = project_nmx(kt=self.ktComb, bx=self.bxComb, ax=self.axComb,\n\t\t\t\t\t\t\t\t\t\t\t ageCutoff=self.ageCutoff)\n\t\tself.nmxsFromKtCurrentMale = project_nmx(kt=self.ktComb, bx=self.bxComb, ax=self.axMale,\n\t\t\t\t\t\t\t\t\t\t\t ageCutoff=self.ageCutoff)\n\t\tself.nmxsFromKtCurrentFem = project_nmx(kt=self.ktComb, bx=self.bxComb, ax=self.axFem,\n\t\t\t\t\t\t\t\t\t\t\t ageCutoff=self.ageCutoff)\n\t\tself.nmxsFromKtCurrent_unfitComb = project_nmx(kt=self.kt_unfitComb, bx=self.bxComb, ax=self.axComb,\n\t\t\t\t\t\t\t\t\t\t\t\t ageCutoff=self.ageCutoff) \n\t\tself.nmxsFromKtCurrent_unfitMale = project_nmx(kt=self.kt_unfitComb, bx=self.bxComb, ax=self.axMale,\n\t\t\t\t\t\t\t\t\t\t\t\t ageCutoff=self.ageCutoff) \n\t\tself.nmxsFromKtCurrent_unfitFem = project_nmx(kt=self.kt_unfitComb, bx=self.bxComb, ax=self.axFem,\n\t\t\t\t\t\t\t\t\t\t\t\t ageCutoff=self.ageCutoff) \n\t\tself.e0sFromKtCurrentComb = N.array([LcUtil.lifeTable(nmxp=nmxRow, **self.lifeTableParams)\n\t\t\t\t\t\t\t\t\t\t for nmxRow in self.nmxsFromKtCurrentComb])\n\t\tself.e0sFromKtCurrentMale = N.array([LcUtil.lifeTable(nmxp=nmxRow, **self.lifeTableParams)\n\t\t\t\t\t\t\t\t\t\t for nmxRow in self.nmxsFromKtCurrentMale])\n\t\tself.e0sFromKtCurrentFem = N.array([LcUtil.lifeTable(nmxp=nmxRow, **self.lifeTableParams)\n\t\t\t\t\t\t\t\t\t\t for nmxRow in self.nmxsFromKtCurrentFem])\n\t\tself.e0sFromKtCurrent_unfitComb = N.array([LcUtil.lifeTable(nmxp=nmxRow, **self.lifeTableParams)\n\t\t\t\t\t\t\t\t\t\t for nmxRow in self.nmxsFromKtCurrent_unfitComb])\n\t\tself.e0sFromKtCurrent_unfitMale = N.array([LcUtil.lifeTable(nmxp=nmxRow, **self.lifeTableParams)\n\t\t\t\t\t\t\t\t\t\t for nmxRow in self.nmxsFromKtCurrent_unfitMale])\n\t\tself.e0sFromKtCurrent_unfitFem = N.array([LcUtil.lifeTable(nmxp=nmxRow, **self.lifeTableParams)\n\t\t\t\t\t\t\t\t\t\t for nmxRow in self.nmxsFromKtCurrent_unfitFem])\n\t\tassert len(self.e0sFromKtCurrentComb) >= 1, \\\n\t\t\t AssertionError(\"self.e0sFromKtCurrentComb: %s\" % self.e0sFromKtCurrentComb)\n\t\tassert type(self.e0sFromKtCurrentComb) == N.ndarray, \\\n\t\t\t AssertionError(\"Weird type: %s\" % type(self.e0sFromKtCurrentComb))\n\n\t\t######## Simulation ##############\n\t\t## Get random walk parameters\n\t\tself.diffedKt = S.diff(self.ktComb)\n\t\tself.drift = S.average(self.diffedKt)\n\t\tself.stdErrorEq = S.std(self.diffedKt)\n\t\tself.stdErrorCoeff = self.stdErrorEq/S.sqrt(len(self.diffedKt))\n\t\t\n\t\t## Multi run simulation with above paramenters\n\t\tself.kt_simul = sim_kt(SEC=self.stdErrorCoeff, SEE=self.stdErrorEq, drift=self.drift,\n\t\t\t\t\t\t\t numRuns=self.numRuns, stepsForward=self.stepsForward, sortflag=False)\n\t\tself.nmx_projectedFem = project_nmx(kt=self.kt_simul, bx=self.bxComb, ax=jumpoffAxFem,\n\t\t\t\t\t\t\t\t\t\t ageCutoff=self.ageCutoff)\n\t\tself.nmx_projectedMale = project_nmx(kt=self.kt_simul, bx=self.bxComb, ax=jumpoffAxMale,\n\t\t\t\t\t\t\t\t\t\t ageCutoff=self.ageCutoff)\n\t\tself.nmx_projected_stochastic_median_final_F = project_nmx(kt=N.median(N.sort(self.kt_simul,0)),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t bx=self.bxComb, ax=jumpoffAxFem, ageCutoff=self.ageCutoff)\n\t\tself.nmx_projected_stochastic_median_final_M = project_nmx(kt=N.median(N.sort(self.kt_simul,0)),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t bx=self.bxComb, ax=jumpoffAxMale, ageCutoff=self.ageCutoff)\n\n\t\tself.e0s_projectedFem = lots_e0s(self.percentileIndices, self.nmx_projectedFem, self.lifeTableParams)\n\t\tself.e0s_projectedMale = lots_e0s(self.percentileIndices, self.nmx_projectedMale, self.lifeTableParams)\n\t\t\n\t\t## Derive analytic mean and x% forecast interval of kt\n\t\tx = S.arange(0.0, float(self.stepsForward+1))\n\t\tself.KtStdError = ((x*(self.stdErrorEq**2.0)) +\n\t\t\t\t\t\t (x*self.stdErrorCoeff)**2.0)**.5 # kt.stderr <- ( (x*see^2) + (x*sec)^2 )^.5\n\t\ttArray = N.array([self.drift] * (self.stepsForward+1))\n\t\ttArray[0] = 0.0\n\t\tself.meanKtProjected = N.cumsum(tArray)\n\t\tself.upperKtProjected = self.meanKtProjected + (self.zscore * self.KtStdError)\n\t\tself.lowerKtProjected = self.meanKtProjected - (self.zscore * self.KtStdError)\n\n\t\tself.nmx_projectedMedian_F = project_nmx(kt=self.meanKtProjected, bx=self.bxComb, ax=jumpoffAxFem,\n\t\t\t\t\t\t\t\t\t\t\t\t ageCutoff=self.ageCutoff)\n\t\tself.nmx_projectedMedian_M = project_nmx(kt=self.meanKtProjected, bx=self.bxComb, ax=jumpoffAxMale,\n\t\t\t\t\t\t\t\t\t\t\t\t ageCutoff=self.ageCutoff)\n\t\tself.nmx_projected_median_final_F = self.nmx_projectedMedian_F[-1,:]\n\t\tself.nmx_projected_median_final_M = self.nmx_projectedMedian_M[-1,:]\n\n\t\t\n\t\t## Derive the projected e0s, F\n\t\tself.upperE0ProjectedFem = LcUtil.multiKt2e0(self.upperKtProjected, ax=jumpoffAxFem, bx=self.bxComb,\n\t\t\t\t\t\t\t\t\t\t\t\t lifeTableParams=self.lifeTableParams)\n\t\tself.lowerE0ProjectedFem = LcUtil.multiKt2e0(self.lowerKtProjected, ax=jumpoffAxFem, bx=self.bxComb,\n\t\t\t\t\t\t\t\t\t\t\t\t lifeTableParams=self.lifeTableParams)\n\t\tself.meanE0ProjectedFem = LcUtil.multiKt2e0(self.meanKtProjected, ax=jumpoffAxFem, bx=self.bxComb,\n\t\t\t\t\t\t\t\t\t\t\t\t lifeTableParams=self.lifeTableParams)\n\t\t## Derive the projected e0s, M\n\t\tself.upperE0ProjectedMale = LcUtil.multiKt2e0(self.upperKtProjected, ax=jumpoffAxMale, bx=self.bxComb,\n\t\t\t\t\t\t\t\t\t\t\t\t lifeTableParams=self.lifeTableParams)\n\t\tself.lowerE0ProjectedMale = LcUtil.multiKt2e0(self.lowerKtProjected, ax=jumpoffAxMale, bx=self.bxComb,\n\t\t\t\t\t\t\t\t\t\t\t\t lifeTableParams=self.lifeTableParams)\n\t\tself.meanE0ProjectedMale = LcUtil.multiKt2e0(self.meanKtProjected, ax=jumpoffAxMale, bx=self.bxComb,\n\t\t\t\t\t\t\t\t\t\t\t\t lifeTableParams=self.lifeTableParams)\n\t\t\n\t\treturn\t\t\t\t\t\t\t# Don't return anything useful", "title": "" }, { "docid": "433e817aad6d5f6ae055547a3bdf57cc", "score": "0.48529863", "text": "def get_discrete_loads(self):\n # input values derived\n x1, x2, x3, xa, xa1, xa2, theta, d1, d3, E, G, P, la, step = self.get_discrete_input()\n Q_l, Mqz_l, ddq_x2, ddq_xa2, ddq_x1, ddq_x3 = self.get_distributed_loads_aero()\n dsch, dsca_y, dsca_z, Izz, Iyy, J, z = self.get_geometry()\n\n # function\n cte_v = -1 / (E * Izz) # cte in v deflection formula\n cte_w = -1 / (E * Iyy) # cte in w deflection formula\n cte_T = 1 / (G * J) # cte in torsion equation\n\n # order variables matrix\n # F_z1 , F_z2, F_z3, F_a, F_y1, F_y2, F_y3, c1, c2, c3, c4, c5\n left_column = np.array([\n [la - x1, la - x2, la - x3, np.cos(theta) * (la - xa1), 0, 0, 0, 0, 0, 0, 0, 0],\n # Moment around y equation at la\n [0, 0, 0, np.sin(theta) * (la - xa1), la - x1, la - x2, la - x3, 0, 0, 0, 0, 0],\n # Moment around z equation at la\n [-dsch, dsch, -dsch, dsca_y * np.cos(theta) + dsca_z * np.sin(theta), 0, 0, 0, 0, 0, 0, 0, 0],\n # Torque aka Moment around x at la\n [1, 1, 1, np.cos(theta), 0, 0, 0, 0, 0, 0, 0, 0], # force/shear z\n [0, 0, 0, np.sin(theta), 1, 1, 1, 0, 0, 0, 0, 0], # force/shear y\n [0, 0, 0, cte_v / 6 * np.sin(theta) * (x2 - xa1) ** 3, cte_v / 6 * (x2 - x1) ** 3, 0, 0, x2, 1, 0, 0, 0],\n # v deflection at x2\n [0, 0, 0, cte_v / 6 * np.sin(theta) * (xa2 - xa1) ** 3, cte_v / 6 * (xa2 - x1) ** 3,\n cte_v / 6 * (xa2 - x2) ** 3, 0, xa2, 1, 0, 0, 0], # v deflection at xa2\n [cte_w / 6 * (x2 - x1) ** 3, 0, 0, cte_w / 6 * np.cos(theta) * (x2 - xa1) ** 3, 0, 0, 0, 0, 0, x2, 1, 0],\n # w deflection at x2\n [0, 0, 0, 0, 0, 0, 0, x1, 1, 0, 0, (z - x1)], # v + theta(z-x) deflection at x1\n [-cte_T * dsch * (x3 - x1) * (z - x3), cte_T * dsch * (x3 - x2) * (z - x3), 0,\n cte_v / 6 * np.sin(theta) * (x3 - xa1) ** 3 + cte_T * (dsca_y * np.cos(theta) + dsca_z * np.sin(theta)) * (\n z - x3),\n cte_v / 6 * (x3 - x1) ** 3, cte_v / 6 * (x3 - x2) ** 3, 0, x3, 1, 0, 0, (z - x3)],\n # v + theta(z-x) deflection at x3\n [0, 0, 0, 0, 0, 0, 0, 0, 0, x1, 1, (z - x1)], # w + theta(z-x) deflection at x1\n [cte_w / 6 * (x3 - x1) ** 3 - cte_T * dsch * (x3 - x1) * (z - x3),\n cte_w / 6 * (x3 - x2) ** 3 + cte_T * dsch * (x3 - x2) * (z - x3), 0,\n cte_v / 6 * np.cos(theta) * (x3 - xa1) ** 3 + cte_T * (dsca_y * np.cos(theta) + dsca_z * np.sin(theta)) * (\n z - x3),\n 0, 0, 0, 0, 0, x3, 1, (z - x3)] # w + theta(z-x) deflection at x3\n ])\n\n right_column = np.array([\n [P * np.cos(theta) * (la - xa2)], # Moment around y equation at la\n [P * np.sin(theta) * (la - xa2) + Mqz_l], # Moment around z equation at la\n [P * (dsca_y * np.cos(theta) + dsca_z * np.sin(theta))], # Torque aka Moment around x at la\n [P * np.cos(theta)], # force/shear z\n [P * np.sin(theta) + Q_l], # force/shear y\n [cte_v * ddq_x2], # v deflection at x2 qq at x2\n [cte_v * ddq_xa2], # v deflection at xa2 qq at x2\n [0], # w deflection at x2\n [d1 * np.sin(theta) + cte_v * ddq_x1], # v deflection at x1 qq at x1\n [d3 * np.sin(theta) + cte_v * (1 / 6 * P * np.sin(theta) * (x3 - xa2) ** 3 + ddq_x3)\n + (cte_T * (P * (dsca_y * np.cos(theta) + dsca_z * np.sin(theta)) * (x3 - x1))) * (z - x3)],\n # v deflection at x3 qqt qq at x3\n [d1 * np.cos(theta)], # w deflection at x1\n [d3 * np.cos(theta) + cte_w / 6 * P * np.cos(theta) * (x3 - xa2) ** 3\n + (cte_T * (P * (dsca_y * np.cos(theta) + dsca_z * np.sin(theta)) * (x3 - x1))) * (z - x3)]\n # w deflection at x3\n ])\n\n F_z1, F_z2, F_z3, F_a, F_y1, F_y2, F_y3, c1, c2, c3, c4, c5 = np.linalg.solve(left_column, right_column)\n\n return F_z1, F_z2, F_z3, F_a, F_y1, F_y2, F_y3, c1, c2, c3, c4, c5", "title": "" }, { "docid": "23c07d3e9f8ba2e638eeee3801fe36bc", "score": "0.48499912", "text": "def test_analysis_get(self):\n pass", "title": "" }, { "docid": "d3af1717b4ff9b5929126f12402d4027", "score": "0.4847316", "text": "def extraction(self):\r\n #TODO - \r\n listtags = []\r\n resultvalues = []\r\n modname = self.extfile\r\n methodlist = []\r\n #Load each extractor code\r\n #Case 1: Extractor = External library with methods and standard parameters - inputfile\r\n #Import external library\r\n mod = __import__(modname)\r\n dirlist = dir(mod)\r\n for met in dirlist:\r\n if \"__\" not in met and met not in ['re','os']:\r\n methodlist.append(met)\r\n for method in methodlist:\r\n result = \"\"\r\n #Load each desired method from external module\r\n loading = getattr(mod, method)\r\n #Get the result from method invocation using infile as standard parameter\r\n result = loading(self, self.infile)\r\n #Test if result is a dictionary\r\n if type(result) == dict:\r\n #Adds each subtuple to the tags and results list\r\n tags = result.keys()\r\n tags.sort()\r\n #Add the new tags\r\n listtags.extend(tags)\r\n #Add the new values\r\n for tag in tags:\r\n if (result[tag]) == str:\r\n resultvalues.append(result[tag])\r\n else:\r\n resultvalues.append(str(result[tag]))\r\n else:\r\n #Converts unknown format to string\r\n result = str(result)\r\n \r\n #Adds method name to listtags and result to resultvalues\r\n listtags.append(method)\r\n resultvalues.append(result)\r\n #Case 2: Extractor = Legacy code to be executed by shell\r\n #TODO\r\n #Case 3: Extractor = python class\r\n #TODO \r\n #Standard extraction output\r\n \r\n# begin-vitor\r\n if self.extfile == \"PA_Readseq\":\r\n \tlisttags.append(\"PHYLIP\")\r\n \tfilename = self.infile.split(\"/\")[len(self.infile.split(\"/\"))-1]\r\n \tresultvalues.append(filename + \".phylip\")\r\n elif self.extfile == \"PA_Mafft\":\r\n \tlisttags.append(\"MAFFT_FILE\")\r\n \tfilename = self.infile.split(\"/\")[len(self.infile.split(\"/\"))-1]\r\n \tresultvalues.append(filename + \".mafft\")\r\n elif self.extfile == \"PA_Modelgenerator\":\r\n \tlisttags.append(\"MG\")\r\n \tfilename = self.infile.split(\"/\")[len(self.infile.split(\"/\"))-1]\r\n \tresultvalues.append(filename + \".mg.modelFromMG.txt\")\r\n elif self.extfile == \"PA_Raxml\":\r\n \tlisttags.append(\"RAXML\")\r\n \tfilename = self.infile.split(\"/\")[len(self.infile.split(\"/\"))-1]\r\n \tresultvalues.append(\"RAxML_bipartitions.\" + filename + \".phylip_tree3\")\r\n# end-vitor\r\n \t\r\n \t\r\n outlist = self.buildOutputFile(tags=listtags, provdata = resultvalues)\r\n output = open(self.outfile, \"w\")\r\n for line in outlist:\r\n output.write(line)", "title": "" }, { "docid": "924973242f8b1dabba767053790ea5f9", "score": "0.48471954", "text": "def get_trajectories(env, policy_model, n_episode, episode_length):\n paths = []\n epoch_return = 0\n episode_count = 0\n for i_episode in range(n_episode):\n episode_return = 0\n episode_count += 1\n obs_list = []\n log_prob_list = []\n action_list = []\n reward_list = []\n mask_list = []\n observation = env.reset()\n\n for t in range(episode_length):\n # env.render()\n obs_list.append(observation)\n obs_tensor = torch.Tensor(observation).view(1,-1)\n\n mu, log_sigma = policy_model(obs_tensor)\n\n normal_dist = Normal(mu,log_sigma.exp())\n\n action = normal_dist.sample()\n action_list.append(action.reshape(-1))\n\n ## the log prob here is the log prob of taking the set of actions, so we take sum of log\n ## you can also take product of exp of log to get same results\n log_prob = normal_dist.log_prob(action)\n log_prob = torch.sum(log_prob)\n log_prob_list.append(log_prob)\n\n ## make sure action is within env's specifics\n action = torch.clamp(action, -1, 1).reshape(-1)\n observation, reward, done, info = env.step(action.data.numpy())\n\n if done:\n mask_list.append(0) ## used in calculating advantage\n else:\n mask_list.append(1)\n\n episode_return += reward\n reward_list.append(reward)\n if done:\n break\n epoch_return += episode_return\n ## now we have finished one episode, we now assign reward (all the data points in\n ## the same trajectory have the same reward)\n path = {'obs':obs_list,'mask':mask_list, 'log_probs':log_prob_list, 'rewards':reward_list,'actions':action_list,'episode_return':episode_return}\n paths.append(path)\n return paths, epoch_return/n_episode\n #REturns the paths and", "title": "" }, { "docid": "5d5e3f93067c12da57d20071e09ebdc0", "score": "0.48467577", "text": "def trajectory(self):\n return Trajectory.fit(self.xyz)", "title": "" }, { "docid": "7429f4cde8403c409ab98f94ca80a05c", "score": "0.48438308", "text": "def __call__(self, observation: Mapping[str, np.ndarray],\n **kwargs) -> np.ndarray:\n\n # Prepares observation for the neural-network.\n observation[\"overhead_features\"] = observation[\n \"bird_view_camera_cityscapes\"]\n for attr in observation:\n if not isinstance(observation[attr], np.ndarray):\n observation[attr] = np.atleast_1d(observation[attr])\n observation[attr] = observation[attr][None, ...].astype(np.float32)\n\n # Makes `goal` 2D.\n observation[\"goal\"] = observation[\"goal\"][..., :2]\n # Convert image to CHW.\n observation[\"lidar\"] = np.transpose(observation[\"lidar\"], (0, 3, 1, 2))\n # Processes observations for the `ImitativeModel`.\n observation = {\n key: torch.from_numpy(tensor).to(self._device) # pylint: disable=no-member\n for (key, tensor) in observation.items()\n }\n observation = self._model.transform(observation)\n\n # Queries model.\n plan = self._model(num_steps=kwargs.get(\"num_steps\", 20),\n epsilon=kwargs.get(\"epsilon\", 1.0),\n lr=kwargs.get(\"lr\", 5e-2),\n **observation).detach().cpu().numpy()[0] # [T, 2]\n\n # TODO(filangel): clean API.\n # Interpolates plan.\n player_future_length = 40\n increments = player_future_length // plan.shape[0]\n time_index = list(range(0, player_future_length, increments)) # [T]\n plan_interp = scipy.interpolate.interp1d(x=time_index, y=plan, axis=0)\n xy = plan_interp(np.arange(0, time_index[-1]))\n\n # Appends z dimension.\n z = np.zeros(shape=(xy.shape[0], 1))\n return np.c_[xy, z]", "title": "" }, { "docid": "7aa47e88009d625d48219a36b3cd0153", "score": "0.48420858", "text": "def AddToTrajectory(self,sample):\n #Replacing the action with the hierarchical action.\n nestedMethodSample = [sample[0]] + [sample[5]] + sample[2:5] +sample[6:]\n self.nestedMethod.AddToTrajectory(nestedMethodSample)", "title": "" }, { "docid": "48ecf4a4f59196f16eded5c607b3a066", "score": "0.48416775", "text": "def test_interface():\n import pKaTool.pKa_calc\n X=pKaTool.pKa_calc.Monte_Carlo_Mult_CPP()\n\n X.intrinsic_pKa={':0001:ASP':[0.0,4.0,5.0]}\n X.charged_state={':0001:ASP':[0,1,1]}\n X.acid_base={':0001:ASP':-1}\n X.intene_mult={':0001:ASP':{':0001:ASP':[[0,0,0],[0,0,0],[0,0,0]]}}\n X._calc_pKas(0.0,10.0,0.5)\n return", "title": "" }, { "docid": "ca29e47b3ba298f8a68f04964b6826ab", "score": "0.4839466", "text": "def construct_traj_set(self, planning_problem_list):\n trajectories = []\n cnt = 1\n total_path_len = 0\n # Add code here to create a traj set #\n for planning_problem in planning_problem_list: \n print(cnt)\n cnt+=1 \n traj,traj_dist = planning_problem.get_traj(trajectories)\n total_path_len += traj_dist\n if(len(traj)==0):\n print(\"TRAJS NOT FOUND, BREAK\")\n return [], 0, False\n trajectories.append(traj)\n \n\n return trajectories, total_path_len, True", "title": "" }, { "docid": "75c716746377eb39778f331cc9fd2da2", "score": "0.4837887", "text": "def perform_exploration():\n model = un.NeuronModel(file=\"tabak.py\", name=\"tabak\")\n\n original_g_BKs = np.linspace(0, 1, nr_points_exploration)\n g_BKs = scale_conductance(original_g_BKs)\n\n # g_SK\n original_g_SKs = np.linspace(1, 3, nr_points_exploration)\n g_SKs = scale_conductance(original_g_SKs)\n\n event_durations_SK = np.zeros((len(original_g_BKs), len(original_g_SKs)))\n for i, g_BK in enumerate(tqdm(g_BKs, desc=\"Varying g_BK\")):\n for j, g_SK in enumerate(tqdm(g_SKs, desc=\"Varying g_SK\")):\n time, voltage, info = model.run(g_BK=g_BK,\n g_SK=g_SK,\n discard=discard,\n noise_amplitude=noise_amplitude,\n simulation_time=simulation_time)\n\n tmp_duration = np.mean(duration(time, voltage))\n\n if np.isnan(tmp_duration):\n tmp_duration = -1\n\n event_durations_SK[i, j] = tmp_duration\n\n\n original_g_Ks = np.linspace(1.5, 4.5, nr_points_exploration)\n g_Ks = scale_conductance(original_g_Ks)\n\n event_durations_K = np.zeros((len(original_g_BKs), len(original_g_Ks)))\n for i, g_BK in enumerate(tqdm(g_BKs, desc=\"Varying g_BK\")):\n for j, g_K in enumerate(tqdm(g_Ks, desc=\"Varying g_K\")):\n time, voltage, info = model.run(g_BK=g_BK,\n g_K=g_K,\n discard=discard,\n noise_amplitude=noise_amplitude,\n simulation_time=simulation_time)\n\n\n tmp_duration = np.mean(duration(time, voltage))\n\n if np.isnan(tmp_duration):\n tmp_duration = -1\n\n event_durations_K[i, j] = tmp_duration\n\n np.save(os.path.join(data_folder, \"original_g_BKs\"), original_g_BKs)\n np.save(os.path.join(data_folder, \"original_g_SKs\"), original_g_SKs)\n np.save(os.path.join(data_folder, \"original_g_Ks\"), original_g_Ks)\n np.save(os.path.join(data_folder, \"event_durations_SK\"), event_durations_SK)\n np.save(os.path.join(data_folder, \"event_durations_K\"), event_durations_K)\n\n return original_g_BKs, original_g_SKs, original_g_Ks, event_durations_SK, event_durations_K", "title": "" }, { "docid": "0f7b2398fd8c47565c574bd8990d20f7", "score": "0.48374778", "text": "def test2():\n print('test2 ............................................................',\n \"\"\"\n To understand and to confirm the correctnes of the SIMULINAC plots,\n this test checks the influence of the twiss-alpha parameter using a \n thin lens FODO.\n \"\"\")\n # initial conditions for twiss parameters\n beta = 1. # [m]\n alfa = -0.5\n epsi = 1.e-6 # emittance [m*rad]\n # other initials\n NC=56 # NC cells\n T = 25. # kin. energy [MeV]\n T=T*1.e-3 # [GeV] kin.energy \n betakin=M.sqrt(1.-(1+T/E0)**-2) # beta kinetic\n E=E0+T # [GeV] energy\n k=0.2998*Bg/(betakin*E) # [1/m^2]\n L=0.596 # distance between quads [m]\n Ql=0.04 # full quad length [m]\n f=1./(k*Ql) # ff = -fd = f\n mfodo1 = Mq(-2*f,Ql/2.) # 1/2 qd\n mfodo2 = mmult(mfodo1,Md(L/2.))\n mfodo3 = mmult(mfodo2,Mq(2*f,Ql/2.)) # 1/2 qf\n m11 = mfodo3[0][0]\n m12 = mfodo3[0][1]\n m21 = mfodo3[1][0]\n m22 = mfodo3[1][1]\n mfodor = np.array([[m22,m12],[m21,m11]]) # reverse\n \n mfodo = mmult(mfodo3,mfodor)\n trace = abs(mfodo.trace())\n \n mfodo2 = Mfodo(2*f,L/2.) # Wiedemann\n my_debug('matrix probe: mfodo-mfodo2 must be zero matrix')\n zero = mfodo-mfodo2\n my_debug(f'{abs(zero[0][0]):.5f} {abs(zero[0][1]):.5f}')\n my_debug(f'{abs(zero[1][0]):.5f} {abs(zero[1][1]):.5f}')\n \n # now in slices\n slices = 20\n ld = L/slices\n lq = Ql/slices\n f = 1./(k*lq)\n md = Md(ld/2) # element\n mqf = Mq(+2*f,lq/2) # element\n mqd = Mq(-2*f,lq/2) # element\n twd = twmatrix(md) # twiss-matrix\n twqf = twmatrix(mqf) # twiss-matrix\n twqd = twmatrix(mqd) # twiss-matrix\n lattice = [] # element list\n twlattice = [] # twiss matrix list\n pos = [0.] # position list\n s = 0. # postion (abzsisse)\n mfodo5 = np.array([[1.,0.],[0.,1.]])# unit matrix\n for m in range(NC):\n mfodo6 = mfodo5.dot(mfodo2) # chain Wiedemann cells\n mfodo5 = mfodo6\n for n in range(1,slices+1): # chain slices\n s += lq\n pos.append(s)\n lattice.append(mqd)\n twlattice.append(twqd)\n for n in range(1,slices+1):\n s += ld\n pos.append(s)\n lattice.append(md)\n twlattice.append(twd)\n for n in range(1,slices+1):\n s += lq\n pos.append(s)\n lattice.append(mqf)\n twlattice.append(twqf)\n for n in range(1,slices+1):\n s += lq\n pos.append(s)\n lattice.append(mqf)\n twlattice.append(twqf)\n for n in range(1,slices+1):\n s += ld\n pos.append(s)\n lattice.append(md)\n twlattice.append(twd)\n for n in range(1,slices+1):\n s += lq\n pos.append(s)\n lattice.append(mqd)\n twlattice.append(twqd)\n # full lattice matrix\n mfodo3 = np.array([[1.,0.],[0.,1.]])\n for node in lattice:\n mfodo4 = mfodo3.dot(node)\n mfodo3 = mfodo4\n my_debug('lattice probe: mfodo3-mfodo5 must be zero matrix')\n zero = mfodo3-mfodo5\n my_debug(f'{abs(zero[0][0]):.5f} {abs(zero[0][1]):.5f}')\n my_debug(f'{abs(zero[1][0]):.5f} {abs(zero[1][1]):.5f}')\n \n twx = Twiss(beta,alfa,epsi)\n # C track\n track_point_C = np.array(twx.y1())\n # S track\n track_point_S = np.array(twx.y4())\n\n points_c = [track_point_C]\n points_s = [track_point_S]\n for node in lattice:\n C_point = node.dot(track_point_C)\n points_c.append(C_point)\n track_point_C = C_point\n S_point = node.dot(track_point_S)\n points_s.append(S_point)\n track_point_S = S_point\n\n # enveloppe \n beta0, alfa0, gamma0 , epsi0 = twx()\n twiss_point = np.array([beta0,alfa0,gamma0]) # twiss track\n twpoints = [twiss_point]\n for bmx in twlattice:\n twissp = bmx.dot(twiss_point)\n twpoints.append(twissp)\n twiss_point = twissp\n\n # trajectories\n c_trk = [v[0] for v in points_c]\n s_trk = [v[0] for v in points_s]\n # enveloppe (beta*emittance)^(1/2)\n sgx = [M.sqrt(v[0]*epsi0) for v in twpoints]\n \n plt.figure('x & sgx')\n ax1 = plt.subplot(111)\n ax1.set_title(F'alpha = {alfa:2.1f}')\n ax1.plot(pos,c_trk, label='C')\n ax1.plot(pos,s_trk, label='S')\n ax1.plot(pos,sgx, label='env')\n ax1.legend(loc='lower right',fontsize='small')\n plt.show()\n return", "title": "" }, { "docid": "1d71f0d0082f3527649c54dc841786fe", "score": "0.4835147", "text": "def testTransformation3():\n\n\n\n i = 0\n while i < 40:\n i = i + 1\n\n N = 20\n model_points = np.ones((3, N))\n\n variance = 1/40 * i\n var = np.array([0.1, 0.1, variance])\n C = eye(3,3) #np.outer(var, var)\n # C = eye(3, 3)\n C[0,1] = variance\n C[1,0] = variance\n C[2,1] = variance\n C[1,2] = variance\n C[0,2] = variance/10\n C[2,0] = variance/10\n\n model_points = np.random.multivariate_normal([0,0,0], C, 15) * 200\n model_points = model_points.T\n\n planar, ratio = IsPlanar(model_points)\n\n # flat = False\n # while not flat:\n # # Camera A\n # model_points = ((2 * np.random.rand(3, 20)) - 1) * 500\n # # model_points = np.random.normal(0, 20, (3, 100))\n #\n # planar, ratio = IsPlanar(model_points)\n #\n # flat = ratio > 0.8\n\n # Ground truth transformation parameters\n # x y z\n R_params = [23, -12, 4]\n t_params = [44, -102, 12]\n transform_parms = R_params + t_params\n transfomed_points, R, t = transform(transform_parms, model_points, False, 0, 5)\n\n # fx fy cx cy k0 k1\n project_params = [100, 100, 50, 50, 0, 0]\n image_points = projective_transform(project_params, transfomed_points)\n\n # Seed Params\n # seed = np.zeros(12) # transform_parms + (np.random.normal(0, 11, 6))\n # seed = seed + [0,0,0,0,0,0,10, 10, 5, 5, 0, 0]\n\n noiseval = 5 #i * 1\n transformNoise = transform_parms + np.random.normal(0, noiseval, 6)\n projectNoise = project_params[1:] + np.random.normal(0, noiseval, 5)\n seed = np.concatenate([projectNoise, transformNoise])\n # seed = seed + (np.random.normal(0, 0.1, 11))\n\n\n # Run LMA\n out = LMA.LM(seed, (model_points, image_points),\n projective_error_function,\n lambda_multiplier=2, kmax=2000, eps=0.1)\n\n\n print(\"\\n\\nRMS ERR: \\t{}\".format(out[0]))\n print(\"Eigen Ratio: \\t{}, {}\".format(ratio, variance))\n print(\"Noise Val: {}\".format(noiseval))\n print(\"Projection: \\t{}\".format(out[1][0:5]))\n print(\"Angle: \\t{}\".format(out[1][5:8]))\n print(\"Translation: \\t{}\".format(out[1][8:11]))\n print(\"Reason: \\t{}\".format(out[2]))\n\n\n\n print(model_points)", "title": "" }, { "docid": "b44b817af2255dbfa9e1f6b8a61ba589", "score": "0.48300886", "text": "def get_trajectory_estimates(self, policy: Controller):\n\n state_mean = self.start_mean\n state_cov = self.start_cov\n\n # container required plotting later on\n state_means_container = []\n state_covs_container = []\n action_means_container = []\n action_covs_container = []\n\n state_means_container.append(state_mean)\n state_covs_container.append(state_cov)\n\n for t in range(self.args.horizon):\n state_next_mean, state_next_cov, action_mean, action_cov = self.rollout(policy, state_mean, state_cov)\n\n state_means_container.append(state_next_mean)\n state_covs_container.append(state_next_cov)\n action_means_container.append(action_mean)\n action_covs_container.append(action_cov)\n\n state_mean = state_next_mean\n state_cov = state_next_cov\n\n return np.array(state_means_container), np.array(state_covs_container), np.array(action_means_container),\\\n np.array(action_covs_container)", "title": "" }, { "docid": "3c9ce2a511927b6d35c65c5446485c90", "score": "0.4827862", "text": "def _generate_trajectory(self):\n transitions = []\n state = self.env.reset()\n for i in range(HORIZON):\n # if i < HORIZON / 2:\n # action = [0.1, 0.1]\n # else:\n action = self._expert_control(state, i)\n next_state, cost, done, _ = self.env.step(action)\n transitions.append([state, action, cost, next_state, done])\n state = next_state\n assert done, \"Did not reach the goal set on task completion.\"\n V = self.env.values()\n for i, t in enumerate(transitions):\n t.append(V[i])\n # self.env.plot_trajectory()\n return transitions", "title": "" }, { "docid": "3d05ab063f0369f231d396df2abd3cc5", "score": "0.4827535", "text": "def algorithms(dxm_state):", "title": "" }, { "docid": "0dfd9a4b6b2f19ab1b792bbfe54dd1a2", "score": "0.4825209", "text": "def gather(self):", "title": "" }, { "docid": "4feb76db7004c1ae9d6f30bdbb820c68", "score": "0.48250794", "text": "def calcOneStructure(loopInfo): \n # Generate initial structure and minimize.\n #===========================================================================\n # Generate initial structure by randomizing torsion angles.\n if args.unfold == 'yes':\n import monteCarlo\n monteCarlo.randomizeTorsions(dyn)\n\n # Then set torsion angles from restraints (this shortens high T dynamics).\n protocol.fixupCovalentGeom(maxIters=100, useVDW=1)\n import torsionTools\n if DIHE:\n import torsionTools\n torsionTools.setTorsionsFromTable(DIHE)\n\n # Initialize parameters\n InitialParams(rampedParams) # Parameters for SA.\n InitialParams(highTempParams1) # Reset some rampedParams.\n\n if args.resetCenter == 'yes':\n setCenter(immx_com, Zpos) # Translate selected center of mass to IMMx Zpos.\n\n # Torsion angle minimization.\n #==========================================================================\n protocol.initMinimize(dyn,\n potList=pots,\n numSteps=100,\n printInterval=50)\n dyn.run()\n\n # High temperature dynamics.\n #===========================================================================\n # Start with REPEL to remove clashes then phase out\n if repelStart == 'yes':\n # High temperature dynamics stage 1.\n protocol.initDynamics(dyn,\n potList=pots, # potential terms to use.\n bathTemp=ini_temp, # set bath temperature.\n initVelocities=1, # uniform initial velocities.\n finalTime=30, # run for finalTime or\n numSteps=3001, # numSteps * 0.001, whichever is less.\n printInterval=100) # printing rate in steps.\n dyn.setETolerance(ini_temp/100) # used to det. stepsize, dflt [temp/1000].\n dyn.run()\n \n # High temperature dynamics stage 2.\n InitialParams(highTempParams2)\n if args.resetCenter == 'yes':\n setCenter(immx_com, Zpos) # translate selected center of mass to IMMx Zpos.\n protocol.initDynamics(dyn,\n potList=pots, # potential terms to use.\n bathTemp=ini_temp, # set bath temperature.\n initVelocities=1, # uniform initial velocities.\n finalTime=30, # run for finalTime or\n numSteps=3001, # numSteps * 0.001, whichever is less.\n printInterval=100) # printing rate in steps.\n dyn.setETolerance(ini_temp/100) # used to det. stepsize, dflt [temp/1000].\n dyn.run()\n\n # High temperature dynamics stage 3.\n InitialParams(highTempParams)\n if args.resetCenter == 'yes':\n setCenter(immx_com, Zpos) # translate selected center of mass to IMMx Zpos.\n protocol.initDynamics(dyn,\n potList=pots, # potential terms to use.\n bathTemp=ini_temp, # set bath temperature.\n initVelocities=1, # uniform initial velocities.\n numSteps=highTempSteps, # numSteps * 0.001, whichever is less.\n finalTime=highTempSteps/100,\t # run for finalTime or\n printInterval=100) # printing rate in steps.\n dyn.setETolerance(ini_temp/100) # used to det. stepsize, dflt [temp/1000].\n dyn.run()\n\n # Initialize integrator and loop for simulated annealing and run.\n #===========================================================================\n # Dynamics for annealing.\n if args.resetCenter == 'yes':\n setCenter(immx_com, Zpos) # translate selected center of mass to IMMx Zpos.\n protocol.initDynamics(dyn,\n potList=pots,\n finalTime=0.4, # run for finalTime or\n numSteps=annealSteps, # numSteps*0.001, whichever is less.\n printInterval=100) \n\n # Set up cooling loop and run.\n from simulationTools import AnnealIVM\n AnnealIVM(initTemp=ini_temp,\n finalTemp=fin_temp,\n tempStep=step_temp,\n ivm=dyn,\n rampedParams=rampedParams\n ).run()\n\n # Run Ez-Potential to position Z-axis\n #===========================================================================\n if args.ezPot:\n from xplor import select\n m = IVM(xplor.simulation)\n protocol.initMinimize(m,numSteps=10)\n m.setVerbose(m.verbose() | m.printNodeDef)\n m.setStepType(\"MinimizeCG\")\n m.setNumSteps(10)\n m.setDEpred(1)\n m.setETolerance(1e-7)\n m.setPrintInterval(1)\n groupList=m.groupList()\n groupList.append(select(args.ezPot))\n m.setGroupList(groupList)\n m.setHingeList([('translate', select(args.ezPot)),])\n m.potList().removeAll()\n m.potList().add(Ezt)\n m.run()\n\n if args.relax == 'yes':\n InitialParams(relaxParams)\n protocol.initDynamics(dyn,\n potList=pots, # potential terms to use.\n bathTemp=args.relaxTemp, # set bath temperature.\n initVelocities=1, # uniform initial velocities.\n finalTime=30, # run for finalTime or\n numSteps=args.relaxSteps, # numSteps * 0.001, whichever is less.\n printInterval=100) # printing rate in steps.\n dyn.setETolerance(args.relaxTemp/100) # used to det. stepsize, dflt [temp/1000].\n dyn.run()\n \n # Final minimization.\n #===========================================================================\n # Torsion angle minimization.\n protocol.initMinimize(dyn,\n numSteps=500, # dflt [500 steps]\n potList=pots,\n printInterval=50)\n dyn.run()\n\n # Final Cartesian minimization.\n protocol.initMinimize(minc,\n numSteps=500, # dflt [500 steps]\n potList=pots,\n dEPred=10)\n minc.run()\n\n # Recenter coordinates in XY plane.\n setCenterXY() # translate protein coordinates to XY center.\n \n # Do analysis and write structure when this routine is finished.\n pass", "title": "" }, { "docid": "bae36d63197e84f10f88daf33608947d", "score": "0.4824436", "text": "def test_tdp(self):\n \n expstring = \"\"\"\n 0 4 4 -3.96678714134e-19 0.0\n 25 4 4 0.0123395314303 1.10216297674e-16\n 50 4 4 0.0521821949151 4.41386645353e-16\n 75 4 4 0.0375170438965 3.03228186587e-16\n 0 3 3 -2.15151658263e-18 0.0\n 25 3 3 0.0542304209663 3.27264685377e-16\n 50 3 3 0.133087497663 5.56816047005e-16\n 75 3 3 0.114486463162 5.08756535778e-16\n 0 0 5 -0.855304990279 0.0\n 25 0 5 0.338783123139 0.205218959392\n 50 0 5 -0.0404678523217 -0.0413353312194\n 75 0 5 -0.0747762812157 0.00493941699829\n \"\"\"\n \n with energy_units(\"1/cm\"):\n mol1 = Molecule([0.0, 12000.0])\n mod1 = Mode(300.0)\n mol1.add_Mode(mod1)\n mod1.set_nmax(0, 1)\n mod1.set_nmax(1, 8)\n mod1.set_HR(1,0.2)\n \n mol1.set_dipole((0,1), [1.0, 0.0, 0.0])\n \n time = TimeAxis(0.0, 100, 1.0)\n params = dict(ftype=\"OverdampedBrownian\",\n cortime=30.0, reorg=20, T=300,\n matsubara=30)\n \n cfce = CorrelationFunction(time, params=params)\n \n params = dict(ftype=\"OverdampedBrownian\",\n cortime=30.0, reorg=50, T=300,\n matsubara=30)\n \n cfc1 = CorrelationFunction(time, params=params)\n \n mol1.set_mode_environment(0, 0, corfunc=cfce)\n mol1.set_mode_environment(0, 1, corfunc=cfce)\n mol1.set_transition_environment((0,1), cfc1)\n \n rhoi = mol1.get_excited_density_matrix(condition=\"delta\")\n \n prop = mol1.get_ReducedDensityMatrixPropagator(time,\n relaxation_theory=\"stR\",\n time_dependent=True)\n HH = mol1.get_Hamiltonian()\n #rhoi.data[:,:] = 0.0\n with eigenbasis_of(HH):\n rhoi.data[0,4] = 1.0\n rhoi.data[0,3] = 1.0\n \n rhot1 = prop.propagate(rhoi, Nref=1)\n\n HH.set_rwa([0,1]) \n \n rhot2 = prop.propagate(rhoi, Nref=1)\n \n rhot2.convert_from_RWA(HH)\n \n #\n # checking that rotating wave approximation is the same as standard \n #\n #numpy.testing.assert_allclose(rhot1.data, rhot2.data, rtol=8.0e-2)\n \n \n _show_plot_ = False\n if _show_plot_:\n import matplotlib.pyplot as plt\n with eigenbasis_of(HH):\n for ii in range(HH.dim):\n plt.plot(time.data, numpy.real(rhot1.data[:,ii,ii]))\n plt.plot(time.data, numpy.real(rhot2.data[:,ii,ii]),\"--\")\n \n plt.show()\n \n _create_data_ = False\n if _create_data_:\n \n elements = [(4,4), (3,3), (0,5)]\n for el in elements:\n for tt in range(0, 100, 25):\n print(tt, el[0], el[1],\n numpy.real(rhot2.data[tt,el[0],el[1]]),\n numpy.imag(rhot2.data[tt,el[0],el[1]]))\n \n # \n # compare to precalculated data\n #\n expected = {}\n il = 0\n for line in expstring.splitlines():\n rel = line.strip()\n if len(rel) > 0:\n nmbrs = rel.split(\" \")\n tmi = int(nmbrs[0])\n i1 = int(nmbrs[1])\n i2 = int(nmbrs[2])\n re = float(nmbrs[3])\n im = float(nmbrs[4])\n expected[il] = (tmi, i1, i2, re, im)\n il += 1\n\n _perform_test_ = True\n if _perform_test_:\n for ks in expected:\n dats = expected[ks]\n tt = dats[0]\n i1 = dats[1]\n i2 = dats[2]\n numpy.testing.assert_allclose(dats[3], \n numpy.real(rhot2.data[tt,i1,i2]),\n atol=1.0e-6)\n numpy.testing.assert_allclose(dats[4], \n numpy.imag(rhot2.data[tt,i1,i2]),\n atol=1.0e-6)", "title": "" }, { "docid": "10749cb54c17dc10f1c4429fd97e3a40", "score": "0.4823318", "text": "def __call__(self):\n raw_s_t, s_t, s_t_tensors = self._raw_s_t, self._s_t, self._s_t_tensors\n a_t_tensors, a_t, q_t, q_vals = self._action_f(s_t_tensors, self._t, raw_s_t)\n raw_s_t_1, raw_r_t, done, _ = self._env.step(a_t)\n r_t = torch.tensor([raw_r_t], dtype=torch.float, device=self._device)\n self._n_trans.append(\n (s_t_tensors, a_t_tensors, r_t, q_t)\n )\n # track funtions\n for track_f in self._track_fs:\n track_f(self._tracker, self._t)\n self._t += 1\n s_t_1, s_t_1_tensors = self._get_s_t_1(raw_s_t_1, done)\n\n done_cond = done or self._env.epi_step > self._max_step_per_epi\n\n # Generate nstep trans result\n if len(self._n_trans) == self._n_steps:\n r_t_n = self._aggr_nsteps_return()\n s_0, a_0, _, __ = self._n_trans.pop(0)\n n_step_trans = [(s_0, a_0, r_t_n, s_t_1_tensors)]\n else:\n n_step_trans = []\n\n ###\n # Handle done or max step epi\n ###\n if self._env.epi_step > self._max_step_per_epi:\n print(\"Episode forcefully terminated\")\n if done_cond:\n if self._eps_config is not None:\n track_keys = [\"reward\", \"step\", \"eps\", \"total_epi\"]\n track_vals = [self._env.epi_reward, self._env.epi_step, self._eps_config.eps_schedule_f(self._t), self._epi]\n else:\n track_keys = [\"reward\", \"step\", \"total_epi\"]\n track_vals = [self._env.epi_reward, self._env.epi_step, self._epi]\n self._tracker.tracks(track_keys, self._t, track_vals)\n self._epi += 1\n # Reste s_t if done or max step\n while len(self._n_trans) > 0:\n r_t_n = self._aggr_nsteps_return()\n s_0, a_0, _, __ = self._n_trans.pop(0)\n n_step_trans.append((s_0, a_0, r_t_n, s_t_1_tensors))\n self._raw_s_t = self._env.reset()\n self._s_t = self._q_net.prep(self._raw_s_t)\n self._s_t_tensors = prep_one_trans_device_tensors(self._s_t, self._device)\n else:\n self._raw_s_t, self._s_t, self._s_t_tensors = raw_s_t_1, s_t_1, s_t_1_tensors \n return n_step_trans, self._t", "title": "" } ]
c187fe94c06737319ce6a05f9f97bdf6
Decodes the auth token
[ { "docid": "02be4b20298fe9880c620e1a3a35e5a0", "score": "0.7484995", "text": "def decode_auth(auth_token):\n try:\n payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))\n return payload['sub']\n except jwt.exceptions.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "title": "" } ]
[ { "docid": "ca7dd14a048ddd0283f1e392a638ab5b", "score": "0.7767273", "text": "def decode_auth_token(auth_token):\n try:\n pyload= jwt.decode(auth_token, JWT_SECRET_KEY)\n return pyload['id']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "title": "" }, { "docid": "47f7e3b1d77cde15dded9aca66f6afdb", "score": "0.775636", "text": "def decode_auth_token(auth_token: Union[bytes, str]\n ) -> Union[ObjectId, str]:\n try:\n payload = jwt.decode(\n jwt=auth_token, key=app.config.get('SECRET_KEY', None)\n )\n return ObjectId(payload['sub'])\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please login again.'\n except jwt.InvalidTokenError:\n logger.info('Invalid token error.')\n apm.capture_exception()\n return 'Invalid token. Please log in again.'", "title": "" }, { "docid": "4d3b77bfec4b9e0be6e1e46f867ffff1", "score": "0.7524331", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, \"webProyect2020\")\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "title": "" }, { "docid": "0c0b1047167d9f3bc66c06efceab5272", "score": "0.74842906", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, key)\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.'\n else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "title": "" }, { "docid": "81d159fd9c74fb03ff33b917b1d7ee44", "score": "0.74725163", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, app.config['SECRET_KEY'])\n is_active_token = Active_Sessions.check_active_session(auth_token)\n if not is_active_token:\n return 'Token invalid.'\n else:\n return payload['sub'] \n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "title": "" }, { "docid": "331f1f08e7f52d2eab8b90d8bf7663c4", "score": "0.7468616", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, inject.instance(\"jwt_secret\"))\n return payload['sub']\n except jwt.ExpiredSignatureError:\n raise TokenExpired()\n except jwt.InvalidTokenError:\n raise NoToken()", "title": "" }, { "docid": "441cdb0048645ecd6c30bf2d84924a60", "score": "0.74175173", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, key)\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.'\n else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "title": "" }, { "docid": "c90e7539a0bd25eaac5f6cc9067e7d16", "score": "0.7406942", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.'\n else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "title": "" }, { "docid": "ae1fd64c3e51b59691bb8f4001472212", "score": "0.7402157", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, current_app.config['SECRET_KEY'])\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired'\n except jwt.InvalidAlgorithmError:\n return 'Invalid Token'", "title": "" }, { "docid": "36ee1b4527439abbef33d7beeb990a12", "score": "0.7401791", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))\n is_blacklisted_token = ExpiredToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.'\n else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "title": "" }, { "docid": "feeeb29f0084e44be01e0c7d16c2414d", "score": "0.73797643", "text": "def decode_auth_token(token):\n try:\n payload = jwt.decode(token, app.config['SECRET_KEY'], algorithms='HS256')\n is_token_blacklisted = BlackListToken.check_blacklist(token)\n if is_token_blacklisted:\n return 'Token was Blacklisted, Please login In'\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired, Please sign in again'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please sign in again'", "title": "" }, { "docid": "8dbbf3d24afc1306e1b7ff0230d1391e", "score": "0.7377921", "text": "def decode_auth_token(auth_token):\n try:\n if is_blacklisted(auth_token):\n return 'Token has been blacklisted. Please log in again'\n payload = jwt.decode(auth_token, 'SECRET_KEY', algorithm='HS256')\n session['user_id'] = str(payload.get('sub'))\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Token Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "title": "" }, { "docid": "3fd711b1b43a7068e4d130e115c921eb", "score": "0.7376567", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, flask.current_app.config.get(\n 'SECRET_KEY'))\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.'\n else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "title": "" }, { "docid": "3e010f9e5586b9f2180fbbd689cdea53", "score": "0.73390514", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, SECRET_KEY)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Expired'\n except jwt.InvalidTokenError:\n return None", "title": "" }, { "docid": "9aebaf475fac6a775fa6ee637a427634", "score": "0.7335341", "text": "def decode_auth_token(token):\n try:\n payload = jwt.decode(token, os.environ['SECRET_KEY'])\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Expired Token'\n except jwt.InvalidTokenError:\n return 'Invalid Token'", "title": "" }, { "docid": "5bf20f71092695aee29b53eb73b39ac7", "score": "0.7331229", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, SECRET_KEY)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return \"Signature expired. Please sign-in again\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please sign-in again\"", "title": "" }, { "docid": "4a37b4375e19577cacfc89dab6fc03bb", "score": "0.73310727", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, app.config.get(\"SECRET_KEY\"))\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return \"Token blacklisted. Please log in again.\"\n else:\n return payload[\"sub\"]\n except jwt.ExpiredSignatureError:\n return \"Signature expired. Please log in again.\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please log in again.\"", "title": "" }, { "docid": "c5a491ecaafbc13cb06e8b0be9c17f85", "score": "0.7321038", "text": "def decode_auth_token(token):\n try:\n payload = jwt.decode(token, app.config['SECRET_KEY'], algorithms='HS256')\n is_token_blacklisted = BlacklistedToken.check_blacklist(token)\n if is_token_blacklisted:\n return 'Token was blacklisted, please sign in again'\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired, Please sign in again'\n except jwt.InvalidSignatureError:\n return 'Signature verification failed'", "title": "" }, { "docid": "5a1e4c82c794381a520349cb3c0f227d", "score": "0.7318235", "text": "def decode_token(token):\n public_key = config('PUBLIC_KEY')\n try:\n return jwt.decode(token[7:], verify=False)\n except jwt.exceptions.ExpiredSignatureError:\n data = {\n 'status': 'error',\n 'error': 'token_expired',\n 'message': 'Get a new token'\n }\n raise exceptions.AuthenticationFailed(\n data, status.HTTP_401_UNAUTHORIZED)\n except jwt.exceptions.InvalidTokenError:\n data = {\n 'status': 'error',\n 'error': 'Invalid token',\n 'message': 'Ensure you are using a Goauth token'\n }\n raise exceptions.AuthenticationFailed(\n data, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "b6f5b4559acf507b2a39ced1f3375dc1", "score": "0.7254783", "text": "def decode(token):\n return jwt.decode(token, TOKEN_SECRET, algorithms=['HS256'])", "title": "" }, { "docid": "916870ecba0337096950d2cc9dadd3a4", "score": "0.72450095", "text": "def decode_auth_token(token):\n try:\n payload = jwt.decode(token, app.config['SECRET_KEY'], algorithms='HS256')\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired, Please sign in again'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please sign in again'", "title": "" }, { "docid": "fdac84473a6485769daed2d9da3dcaf8", "score": "0.7175359", "text": "def decode_token(token_str):\n tokenParts = token_str.split(\".\")\n if len(tokenParts) < 3:\n raise Exception(\"Invalid JWT. Could not split into parts.\")\n padding = \"====\"\n infoStr = tokenParts[1] + padding[0 : len(tokenParts[1]) % 4]\n jsonStr = base64.urlsafe_b64decode(infoStr)\n return json.loads(jsonStr)", "title": "" }, { "docid": "8b001e7d7dcd187851e8040a6f0e7ded", "score": "0.7137952", "text": "def decode_password_reset_token(auth_token: bytes) -> Union[ObjectId, str]:\n try:\n payload = jwt.decode(\n jwt=auth_token, key=app.config.get('SECRET_KEY', None)\n )\n return ObjectId(payload['sub'])\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please get a new token.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please get a new token.'", "title": "" }, { "docid": "3922070d8c43974436c031c85057d7f2", "score": "0.71127415", "text": "def jwt_decode_token(token):\n return jwt_lib.decode(token, current_app.config['JWT_SECRET_KEY'],\n algorithms=current_app.config['JWT_ALGORITHMS'])", "title": "" }, { "docid": "c139a1bd9babb41202a610a02ac06995", "score": "0.7100378", "text": "def decode(self, token):\n json_string = base64.b64decode(token.encode('utf-8')).decode('utf-8')\n decoded_token = json.loads(json_string)\n\n # Remove the encoding metadata as it is read since it will no longer\n # be needed.\n encoded_keys = decoded_token.pop('boto_encoded_keys', None)\n if encoded_keys is None:\n return decoded_token\n else:\n return self._decode(decoded_token, encoded_keys)", "title": "" }, { "docid": "e6c8ecccb1d3433cf04a81166b313da9", "score": "0.7038034", "text": "def decode_token_or_400(auth_token):\n serializer = URLSafeSerializer(current_app.config['SECRET_KEY'])\n try:\n return serializer.loads(auth_token)\n except BadData as ex:\n generic_error_handler(ex)\n abort(400)", "title": "" }, { "docid": "d58167db9decee5c9e9e3abeeec5f669", "score": "0.69546914", "text": "def jwt_decode_handler(token):\n options = {\n \"verify_exp\": api_settings.JWT_VERIFY_EXPIRATION,\n }\n\n import jwt\n\n return jwt.decode(\n token,\n str(api_settings.JWT_SECRET_KEY),\n str(api_settings.JWT_VERIFY),\n options=options,\n leeway=api_settings.JWT_LEEWAY,\n audience=api_settings.JWT_AUDIENCE,\n issuer=api_settings.JWT_ISSUER,\n algorithms=[api_settings.JWT_ALGORITHM],\n )", "title": "" }, { "docid": "274bd0406d6c4793aab90eb1d32a164c", "score": "0.6913792", "text": "def decode(self, token: bytes) -> Mapping[str, Any]:\n payload = jwt.decode(\n token,\n key=self.secret,\n options={'verify_exp': False}\n )\n payload['exp'] = datetime.utcfromtimestamp(payload['exp'])\n payload['iat'] = datetime.utcfromtimestamp(payload['iat'])\n return payload", "title": "" }, { "docid": "8d82a205fd149fab00bc6f577e72699e", "score": "0.6904669", "text": "def decode_authorization_message(EncodedMessage=None):\n pass", "title": "" }, { "docid": "33c8246834a7fca3a3c35ad4cae29e68", "score": "0.6841103", "text": "def decode_token(token):\n try:\n payload = jwt.decode(token, os.environ.get('SECRET_KEY'))\n return payload['id']\n except jwt.ExpiredSignatureError:\n return 'Token has already Expired, Please login again'\n except jwt.InvalidTokenError:\n return 'Invalid token, please login again'", "title": "" }, { "docid": "318009e033be9b1cb0680229195ba56f", "score": "0.6821571", "text": "def decode_user_token_id(self, token):\n \"\"\" Method to return the token to its readable state \"\"\"\n decoded = jwt.decode(token, 'mylovelydaughters', algorithms=\"HS256\")\n return decoded['id']", "title": "" }, { "docid": "1cce190851c860e6de0a86c2e60ddd17", "score": "0.6805388", "text": "def for_value(cls, token_value):\n\n try:\n contents = base64.b64decode(token_value)\n except TypeError:\n # Not proper base64 encoded value.\n logging.info(\"Tried to decode auth token that isn't \" +\n \"base64 encoded\")\n return None\n\n parts = contents.split(\"\\n\")\n if len(parts) != 3:\n # Wrong number of parts / malformed.\n logging.info(\"Tried to decode malformed auth token\")\n return None\n user_id, timestamp, signature = parts\n return cls(user_id, timestamp, signature)", "title": "" }, { "docid": "f8fdb0d6833d11421ec6d20eeccc49a0", "score": "0.6546678", "text": "def get_token(self):\n response = self.Login(\"aminah\", \"12345\")\n data = json.loads(response.data)\n return data['token']", "title": "" }, { "docid": "d8528c1a3c6ea703c964b24e8b6bb888", "score": "0.64676774", "text": "async def get_payload_from_token(request: Request):\n http_bearer = HTTPBearer()\n auth_credentials: HTTPAuthorizationCredentials = await http_bearer(request)\n token = auth_credentials.credentials\n payload = jwt.decode(token, options=dict(verify_signature=False))\n realm = Auth.get_realm_from_token(token)\n return {\"realm\": realm, \"username\": payload[\"preferred_username\"]}", "title": "" }, { "docid": "80c553a01ad6db958610c82f3dd46385", "score": "0.644588", "text": "def loads(self, raw_token):\n try:\n parsed = jwt.decode(\n raw_token, key=self.secret_key, algorithms=[self.algorithm]\n )\n except jwt.ExpiredSignatureError:\n raise tokens.TokenError.expired()\n except jwt.DecodeError: # pragma: no branch\n raise tokens.TokenError.invalid()\n # ideally we never end up here as DecodeError should\n # catch everything else, however since this is the root\n # exception for PyJWT we'll catch it down and\n # and re-raise our own\n except jwt.InvalidTokenError: # pragma: no cover\n raise tokens.TokenError.bad()\n else:\n return tokens.Token(user_id=parsed[\"id\"], operation=parsed[\"op\"])", "title": "" }, { "docid": "57e7b6e67fb24c547c5fb3fb8abec916", "score": "0.64371026", "text": "def decode_token(token: str) -> Optional[dict]:\n try:\n with open(settings.AUTH_PUBLIC_KEY_PATH, \"rb\") as public_key:\n return jwt.decode(\n jwt=token,\n key=public_key.read(),\n algorithms=[JWT_ALGORITHM],\n audience=settings.AUTH_ACCESS_TOKEN_AUDIENCE,\n )\n except jwt.exceptions.InvalidTokenError:\n return None", "title": "" }, { "docid": "ca76cdc71bf41ab86190bddb0a16e2af", "score": "0.64095944", "text": "def decode(token, expiring=True):\n try:\n if expiring:\n val = signing.loads(token, max_age=settings.TOKEN_LIFETIME)\n else:\n val = signing.loads(token)\n except Exception as e:\n LOG.info(\"The token seems to be invalid (%s)\" % str(e))\n val = None\n return val", "title": "" }, { "docid": "a78ebff2d9fa3f840751c6ce53eca048", "score": "0.629871", "text": "def decode(self, token, verify=True):\n try:\n return jwt.decode(token, self.verifying_key, algorithms=[self.algorithm], verify=verify)\n except InvalidTokenError:\n raise TokenBackendError(_('Token is invalid or expired'))", "title": "" }, { "docid": "b51b2c046ad0fcaf606ec3adae616a4f", "score": "0.62967557", "text": "def deserialize(cls, token):\n s = Serializer(current_app.config['SECRET_KEY'])\n data = s.loads(token)\n return cls.get(data.get('confirm'))", "title": "" }, { "docid": "38011d4eeeaa04ed396275c22f1d6be3", "score": "0.626913", "text": "def get_auth_token():\n token = g.user.generate_auth_token()\n return jsonify({'token': token.decode('ascii')})", "title": "" }, { "docid": "826cf725f69300f9d14cf849bc6e952b", "score": "0.6251695", "text": "def get_account_auth_token(self):", "title": "" }, { "docid": "d1e30299b4efa25cadeaea3e749b9658", "score": "0.62305206", "text": "def info_from_BearerAuth(token: str) -> Optional[TokenInfo]:\n return decode_api_key_v2(token, Const.AUTH_PUBLIC_KEY)", "title": "" }, { "docid": "3674a276006504a5ec07191905183eaf", "score": "0.62135303", "text": "def verify_token(auth_token):\n try:\n payload = jwt.decode(auth_token, SECRET_KEY, algorithms='HS256')\n return payload['sub']\n except jwt.ExpiredSignatureError:\n raise Exception('Signature expired. Please log in again.')\n except jwt.InvalidTokenError:\n raise Exception('Invalid token. Please log in again.')", "title": "" }, { "docid": "ba076a3f7faad7a05fe8d68965032396", "score": "0.6193463", "text": "def parse_auth_token(token: str) -> str:\n \n if not token.startswith('Bearer '):\n return None\n \n return token.split('Bearer ')[1]", "title": "" }, { "docid": "6609595c91bc6bbd53036643882a135b", "score": "0.6150235", "text": "def get_token_auth_cookie():\n\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n\n abort(401)\n raise AuthError({\"code\": \"authorization_missing\",\n \"description\":\n \"Authorization is expected\"}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n abort(401)\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n abort(401) # No token Sent\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n abort(401)\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token", "title": "" }, { "docid": "166c8ed075dd94b63f251c168f412bb6", "score": "0.6149182", "text": "def decodeJWTView(request):\n input_jwt = request.POST.get('jwt', None)\n if not input_jwt:\n webapp2.abort(404)\n\n try:\n input_jwt = unicodedata.normalize(NORMAL_FORM, input_jwt).encode(\n 'ascii', 'ignore')\n # Append extra characters to make original string base 64 decodable.\n input_jwt += '=' * (4 - (len(input_jwt) % 4))\n decoded_jwt = base64.urlsafe_b64decode(input_jwt)\n except JSONDecodeError as e:\n result = {'error': True}\n webapp2.abort(404)\n else:\n decoded_jwt = json.loads(decoded_jwt)\n return webapp2.Response(json.dumps(decoded_jwt, indent=8))", "title": "" }, { "docid": "feea660bb72555785886b69abd4ed779", "score": "0.6142326", "text": "def get_auth_token(self):\n data = [str(self.user_data[\"_id\"]), self.user_data[\"password\"], self.user_data[\"role\"]]\n login_serializer = TimedJSONWebSignatureSerializer(current_app.config[\"SECRET_KEY\"],\n expires_in=current_app.config[\"COOKIE_DURATION\"])\n return login_serializer.dumps(data)", "title": "" }, { "docid": "22645774a81bce45200340d841ee36e0", "score": "0.61396885", "text": "def get_auth_token(self):\n data = (self.id, sha1(self.password).hexdigest())\n return login_serializer.dumps(data)", "title": "" }, { "docid": "fa26d52faac9a4de2593311af18400db", "score": "0.6108767", "text": "def jwt_authenticate(request: HttpRequest) -> Dict[str, Any]:\n token = get_raw_token_from_request(request)\n validated_token = tokens.decode_token(token)\n return validated_token", "title": "" }, { "docid": "b192865d55b0f632716c78c553be4515", "score": "0.6107343", "text": "def decode_token(self, firebase_token):\n scope_name = settings.SCOPE_NAME\n try:\n claims = firebase_auth.verify_id_token(\n firebase_token,\n check_revoked=api_settings.FIREBASE_CHECK_JWT_REVOKED\n )\n if scope_name != '':\n if claims.get(scope_name) is None or claims.get(scope_name) is False:\n raise exceptions.AuthenticationFailed(\n '! Token claim permission denied !'\n )\n return claims\n except ValueError as exc:\n raise exceptions.AuthenticationFailed(\n 'JWT was found to be invalid, or the App’s project ID cannot '\n 'be determined.'\n )\n except firebase_auth.AuthError as exc:\n if exc.code == 'ID_TOKEN_REVOKED':\n raise exceptions.AuthenticationFailed(\n 'Token revoked, inform the user to reauthenticate or '\n 'signOut().'\n )\n else:\n raise exceptions.AuthenticationFailed(\n 'Token is invalid.'\n )", "title": "" }, { "docid": "1174e90d8260a51604fd76772ecb9068", "score": "0.61054146", "text": "def ulogin_response(self, token, host):\n response = requests.get(\n settings.TOKEN_URL,\n params={\n 'token': token,\n 'host': host\n })\n content = response.content.decode('utf8')\n return json.loads(content)", "title": "" }, { "docid": "f6980f9368bccf84be8096806995bf47", "score": "0.6101621", "text": "def process_token(encoded_token):\n keyset = requests.get(CERNER_JWKS).json()\n try:\n token = jwt.decode(encoded_token, keyset,\n audience=MY_ORIGIN,\n issuer=TRUSTED_ORIGIN,\n options= {\n 'verify_iat': True,\n 'verify_exp': True,\n }\n )\n\n # OK, this is schenanigans to always \"be\" a test user in the FHIR sandbox no matter\n # what test user you're really using. You won't do this in a real app :) \n token['sub'] = DEMO_USER_FPA\n return token\n\n except (jwt.JWTError, jwt.ExpiredSignatureError, jwt.JWTClaimsError) as e:\n app.logger.info('BCS Token process failure: %s', e)\n abort(403)\n return", "title": "" }, { "docid": "4b0f4e7036171fca63e896703fe2691c", "score": "0.60998386", "text": "def _verified_token(self,encoded_token: bytes) -> Dict[str,Union[str,int,bool]]:\n # raise an error if secret key doesn't exist\n if not self._secret_key:\n raise RuntimeError(\n \"AUTHJWT_SECRET_KEY must be set when using symmetric algorithm {}\".format(self._algorithm)\n )\n\n try:\n return jwt.decode(\n encoded_token,\n self._secret_key,\n algorithms=self._algorithm\n )\n except jwt.exceptions.ExpiredSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.exceptions.DecodeError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.exceptions.InvalidAlgorithmError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.exceptions.InvalidKeyError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.exceptions.InvalidTokenError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.exceptions.InvalidIssuerError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.exceptions.InvalidAudienceError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.exceptions.InvalidIssuedAtError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.exceptions.InvalidSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.exceptions.ImmatureSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.exceptions.MissingRequiredClaimError as err:\n raise HTTPException(status_code=422,detail=str(err))", "title": "" }, { "docid": "0926f3050b67f2748f563be6d13c5c0d", "score": "0.60977006", "text": "def get_token(self):\n payload = self.register_user(self.email, self.password)\n data = json.loads(payload.data.decode())\n payload = self.login_user(self.email, self.password)\n data = json.loads(payload.data.decode())\n return data.get('token')", "title": "" }, { "docid": "db0e612c699517da33c9095dad9c972c", "score": "0.6050448", "text": "def response_auth(status, message, token, status_code):\n return {'status': status, \n 'message': message, \n 'auth_token': token.decode(\"utf-8\")\n }, status_code", "title": "" }, { "docid": "2a4f6c27b10cb76d7568acec8daf92bc", "score": "0.6030416", "text": "def access_token(self):\n return json_loads(self.token_json)['access_token']", "title": "" }, { "docid": "ba66b4fc96a14b521a7771550b9bbcf6", "score": "0.60283047", "text": "def decode_id(token_id: int) -> int:\n decoded_type = token_id >> 128\n return decoded_type", "title": "" }, { "docid": "a485afa395cabb3b0561e97981b4723f", "score": "0.601571", "text": "def exchange_token():\n return jsonify(dict(token=current_user.token.decode('ascii')))", "title": "" }, { "docid": "54910ec2563080952cd0ff346bf27a27", "score": "0.60035264", "text": "def decode_authorization_message(self, encoded_message):\r\n params = {\r\n 'EncodedMessage': encoded_message,\r\n }\r\n return self.get_object(\r\n 'DecodeAuthorizationMessage',\r\n params,\r\n DecodeAuthorizationMessage,\r\n verb='POST'\r\n )", "title": "" }, { "docid": "0f6771a9e098f2e0a77fb902079ec614", "score": "0.5995791", "text": "def get_token(self):\n data = json.dumps({'username': 'User1', 'password': 'p@ssw0rd'})\n response = self.app.post('/auth/login/', data=data,\n content_type='application/json')\n token = json.loads(response.data).get('token')\n return token", "title": "" }, { "docid": "a6145b76fbc1e6e5db879fd5118a1783", "score": "0.5989262", "text": "def verify_token(token: str) -> Union[Dict[str, Any], None]:\n\n try:\n payload = jwt.decode(token, settings.SECRET_KEY, algorithms=['HS256'])\n except jwt.exceptions.InvalidSignatureError:\n return None\n except jwt.exceptions.ExpiredSignatureError:\n return None\n except jwt.exceptions.DecodeError:\n return None\n\n return payload", "title": "" }, { "docid": "f5410a5a46cb55ece4b6612f1b2852d8", "score": "0.5975735", "text": "def to_python(self, value):\n try:\n value = fernet.decrypt(force_bytes(value))\n except InvalidToken:\n return value\n else:\n return force_str(value)", "title": "" }, { "docid": "b83fc705882368a0beefb4f4f2f38349", "score": "0.5970702", "text": "def token(self):\n if not self.auth_token or not self.auth_token.valid:\n self.auth_token = self._fetch_token()\n\n return self.auth_token", "title": "" }, { "docid": "9228b3dd0d3a8510ebb06c87b99ecb9a", "score": "0.596466", "text": "def handle_token_auth(token_auth):\r\n # print(\"Handle token auth\", token_auth)\r\n token_auth = token_auth[len(\"MSAuth1.0 \"):]\r\n token_auth = token_auth.split(\",\")\r\n if (len(token_auth) != 3):\r\n print('Auth token contains {} segments'.format(len(token_auth)))\r\n return False\r\n\r\n tokens = parse_tokens(token_auth) \r\n #print(tokens)\r\n if (len(tokens) != 3):\r\n print(\"malformed authorization header: header conatins {} values, 3 expected\".format(len(tokens)), )\r\n return False\r\n print(\"Tokens : \", tokens)\r\n #access_token_body = jwt.get_unverified_claims(tokens.get(\"accesstoken\"))\r\n\r\n # print(\"JWT AccessToken Body: \", access_token_body)\r\n if verify_token(tokens.get(\"actortoken\")):\r\n return True\r\n return False", "title": "" }, { "docid": "a565c330876235d7945faa0b3f700665", "score": "0.59575313", "text": "def decode_base64_authorization_header(\r\n self,\r\n base64_authorization_header: str\r\n ) -> str:\r\n try:\r\n utf_val = base64_authorization_header.encode('utf-8')\r\n decode = b64decode(utf_val).decode('utf-8')\r\n return decode\r\n except (AttributeError, ValueError) as a:\r\n return None", "title": "" }, { "docid": "aeaef4564278658bf73b400174abd849", "score": "0.595356", "text": "def get_token(self,data):\n data = a2b_hex(data)\n safe = self.encrypt(data)\n safe = b2a_hex(safe)\n return safe", "title": "" }, { "docid": "0f3f3307cc0d5eedabd6c644336894c0", "score": "0.5949489", "text": "def _dehydrate_token(token):\n session_token = {}\n session_token[\"key\"] = token.key\n session_token[\"secret\"] = token.secret\n return session_token", "title": "" }, { "docid": "897fac6270ce7b7bb9f8bcb467eded0f", "score": "0.59484315", "text": "def decode_secret_token(secret_token):\r\n secret_token = urllib.parse.unquote(secret_token)\r\n decrypted = hangman.decrypt(secret_token)\r\n return decrypted.split(';')", "title": "" }, { "docid": "0f8d67be792163694859d04b1dc7e0f4", "score": "0.592503", "text": "def parsed_token(payload: dict):\n token = scitokens.utils.demo.token(payload)\n return scitokens.SciToken.deserialize(token)", "title": "" }, { "docid": "a284cdabeb71571c78362d72d6cff91d", "score": "0.5921979", "text": "def get_auth_token(self):\n # The data keys should be as short as possible to keep the token short\n data = {\n # The user's id\n 'i': self.id,\n # Time of issue\n 't': time(),\n # Last characters of user's hashed password, makes sure the key is automatically\n # expired if the user changes password\n 'p': self.password_hash[-10:],\n }\n serializer = URLSafeSerializer(current_app.config['SECRET_KEY'])\n return serializer.dumps(data)", "title": "" }, { "docid": "61b106c03a3412979ea4b3bdff057ba0", "score": "0.59084606", "text": "def get_encoded_auth_token(self):\n auth_token = '{}:{}'.format(self.client_id, self.client_secret).encode('ascii')\n return base64.b64encode(auth_token).decode('ascii')", "title": "" }, { "docid": "64d6cef4aaa613091c02ab9b0b4fee31", "score": "0.5905913", "text": "def get_auth_token():\n url = 'https://{}/dna/system/api/v1/auth/token'.format(DNAC_URL) # Endpoint URL\n hdr = {'content-type' : 'application/json'} # Define request header\n resp = requests.post(url, auth=HTTPBasicAuth(DNAC_USER, DNAC_PASS), headers=hdr) # Make the POST Request\n token = resp.json()['Token'] # Retrieve the Token\n return token # Create a return statement to send the token back for later use", "title": "" }, { "docid": "987f595cb7d716ccc559d2a3d013a813", "score": "0.5903592", "text": "def load_token(token):\n\n #The Token itself was generated by User.get_auth_token. So it is up to\n #us to known the format of the token data itself.\n\n #The Token was encrypted using itsdangerous.URLSafeTimedSerializer which\n #allows us to have a max_age on the token itself. When the cookie is stored\n #on the users computer it also has a exipry date, but could be changed by\n #the user, so this feature allows us to enforce the exipry date of the token\n #server side and not rely on the users cookie to exipre.\n max_age = current_app.config[\"COOKIE_DURATION\"].total_seconds()\n\n #Decrypt the Security Token, data = [username, hashpass]\n try:\n login_serializer = URLSafeTimedSerializer(current_app.config[\"SECRET_KEY\"])\n data = login_serializer.loads(token, max_age=max_age)\n except:\n return None\n #Find the User\n user_id = data[0]\n hashed_password = data[1]\n user = User.get(user_id)\n\n #Check Password and return user or None\n if user and hashed_password == user.password:\n return user\n return None", "title": "" }, { "docid": "42908960d2b652161cd088bfc8c8a041", "score": "0.5890365", "text": "def unpack(self, token, timestamp=None):\n if not token:\n raise KeyError\n\n _jwe_header = _jws_header = None\n\n # Check if it's an encrypted JWT\n darg = {}\n if self.allowed_enc_encs:\n darg[\"enc\"] = self.allowed_enc_encs\n if self.allowed_enc_algs:\n darg[\"alg\"] = self.allowed_enc_algs\n try:\n _decryptor = jwe_factory(token, **darg)\n except (KeyError, HeaderError):\n _decryptor = None\n\n if _decryptor:\n # Yes, try to decode\n _info = self._decrypt(_decryptor, token)\n _jwe_header = _decryptor.jwt.headers\n # Try to find out if the information encrypted was a signed JWT\n try:\n _content_type = _decryptor.jwt.headers[\"cty\"]\n except KeyError:\n _content_type = \"\"\n else:\n _content_type = \"jwt\"\n _info = token\n\n # If I have reason to believe the information I have is a signed JWT\n if _content_type.lower() == \"jwt\":\n # Check that is a signed JWT\n if self.allowed_sign_algs:\n _verifier = jws_factory(_info, alg=self.allowed_sign_algs)\n else:\n _verifier = jws_factory(_info)\n\n if _verifier:\n _info = self._verify(_verifier, _info)\n else:\n raise Exception()\n _jws_header = _verifier.jwt.headers\n else:\n # So, not a signed JWT\n try:\n # A JSON document ?\n _info = json.loads(_info)\n except JSONDecodeError: # Oh, no ! Not JSON\n return _info\n except TypeError:\n try:\n _info = as_unicode(_info)\n _info = json.loads(_info)\n except JSONDecodeError: # Oh, no ! Not JSON\n return _info\n\n # If I know what message class the info should be mapped into\n if self.msg_cls:\n _msg_cls = self.msg_cls\n else:\n try:\n # try to find a issuer specific message class\n _msg_cls = self.iss2msg_cls[_info[\"iss\"]]\n except KeyError:\n _msg_cls = None\n\n timestamp = timestamp or utc_time_sans_frac()\n\n if \"nbf\" in _info:\n nbf = int(_info[\"nbf\"])\n if timestamp < nbf - self.skew:\n raise VerificationError(\"Token not yet valid\")\n\n if \"exp\" in _info:\n exp = int(_info[\"exp\"])\n if timestamp >= exp + self.skew:\n raise VerificationError(\"Token expired\")\n else:\n exp = None\n\n if \"iat\" in _info:\n iat = int(_info[\"iat\"])\n if self.allowed_max_lifetime and exp:\n if abs(exp - iat) > self.allowed_max_lifetime:\n raise VerificationError(\"Token lifetime exceeded\")\n\n if _msg_cls:\n vp_args = {\"skew\": self.skew}\n if self.iss:\n vp_args[\"aud\"] = self.iss\n _info = self.verify_profile(_msg_cls, _info, **vp_args)\n _info.jwe_header = _jwe_header\n _info.jws_header = _jws_header\n return _info\n else:\n return _info", "title": "" }, { "docid": "4fbbd59bdfffa0d3db59501397f82702", "score": "0.58806986", "text": "def getAuthToken():\n # Effettua la richiest di POST autenticandosi con username e password in basicAuth\n response = requests.post(api, auth=HTTPBasicAuth(USERNAME, PASSWORD), verify=False)\n # Ottiene il token dalla risposta in Json\n token = response.json()['Token']\n\n # Ritorna il token per poterlo riutilizzare\n return token", "title": "" }, { "docid": "9fcba151110d399dfd4c81547569ce20", "score": "0.58735234", "text": "def decode(self, data):\n pass", "title": "" }, { "docid": "77dccdeaa5a448ab8f706c3acc262b0e", "score": "0.58663964", "text": "def _decode_jwt(jwt):\n #pubkey = settings.AGAVE_JWT_PUBKEY\n pubkey = 'MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCUp/oV1vWc8/TkQSiAvTousMzO\\nM4asB2iltr2QKozni5aVFu818MpOLZIr8LMnTzWllJvvaA5RAAdpbECb+48FjbBe\\n0hseUdN5HpwvnH/DW8ZccGvk53I6Orq7hLCv1ZHtuOCokghz/ATrhyPq+QktMfXn\\nRS4HrKGJTzxaCcU7OQIDAQAB'\n try:\n key_der = b64decode(pubkey)\n key = load_der_public_key(key_der, backend=default_backend())\n except (TypeError, ValueError, UnsupportedAlgorithm):\n logger.exception('Could not load public key.')\n return {}\n\n try:\n decoded = pyjwt.decode(jwt, key, issuer=settings.AGAVE_JWT_ISSUER)\n except pyjwt.exceptions.DecodeError as exc:\n logger.exception('Could not decode JWT. %s', exc)\n return {}\n return decoded", "title": "" }, { "docid": "922732d43df5eae78c5d76bdef7dddb0", "score": "0.586006", "text": "def _decrypt(self, rj, token):\n if self.iss:\n keys = self.key_jar.get_jwt_decrypt_keys(rj.jwt, aud=self.iss)\n else:\n keys = self.key_jar.get_jwt_decrypt_keys(rj.jwt)\n return rj.decrypt(token, keys=keys)", "title": "" }, { "docid": "e85956ab2619cafdc37045a67c9e90b3", "score": "0.5857477", "text": "def get_auth(self, auth_token):\n\n data = {\n 'grant_type': 'authorization_code',\n 'code': str(auth_token),\n 'redirect_uri': self.redirect_uri,\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n }\n\n post_request = requests.post(self.SPOTIFY_TOKEN_URL, data=data)\n\n response_data = json.loads(post_request.text)\n self._access_token = response_data['access_token']\n self.authorization_header = {\n \"Authorization\": f\"Bearer {self._access_token}\"}\n\n session['access_token'] = response_data['access_token']\n\n return dict(\n access_token=response_data['access_token'],\n refresh_token=response_data['refresh_token'],\n token_type=response_data['token_type'],\n expires_in=response_data['expires_in'],\n )", "title": "" }, { "docid": "f0466580c2d31c475b233e785ca6af28", "score": "0.5851844", "text": "def _decode(self, token, encoded_keys):\n for key in encoded_keys:\n encoded = self._path_get(token, key)\n decoded = base64.b64decode(encoded.encode('utf-8'))\n self._path_set(token, key, decoded)\n return token", "title": "" }, { "docid": "8b3713b61b1bb445ed0136704cfeb38c", "score": "0.5841084", "text": "def decrypt(self, rawdata):\n if self.version:\n data = base64.b64decode(rawdata[19:])\n else:\n data = rawdata\n\n data = self.cipher.decrypt(data)\n try:\n return json.loads(data[: data.rfind(b\"}\") + 1])\n except:\n return data", "title": "" }, { "docid": "cab2ff380924a91e4c1031ee4d10118d", "score": "0.5833197", "text": "def get_raw_token(header):\n parts = header.split()\n\n if len(parts) == 0:\n # Empty AUTHORIZATION header sent\n return None\n\n if str(parts[0]) != \"b'Bearer'\":\n # Assume the header does not contain a JSON web token\n return None\n\n if len(parts) != 2:\n raise exceptions.AuthenticationFailed(\n _('Authorization header must contain two space-delimited values'),\n code='bad_authorization_header',\n )\n\n return parts[1]", "title": "" }, { "docid": "ace98bf7f987be58e2d5ab23653fa3b9", "score": "0.58189744", "text": "def validate_token(self, token):\n\n try:\n payload = jwt.decode(token, settings.SECRET_KEY, algorithms=['HS256'])\n type = payload['type']\n\n if type != 'email_confirmation':\n raise jwt.PyJWTError\n\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('The validation time has expired.')\n\n except jwt.PyJWTError:\n raise serializers.ValidationError('Invalid token.')\n\n else:\n\n self.context['payload'] = payload\n\n return token", "title": "" }, { "docid": "96c18e1043a0cebf0d79423662646aa8", "score": "0.58126706", "text": "def token():\n return json.dumps({\n \"refresh\": \"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ0b2tlbl90eXBlIjoicmVmcmVzaCIsImV4cCI6MTU4Nzk5MjIyNCwianRpIjoiN2EwMzdlNGYxOGFmNDVmOWE1YzcxZDRhYzhiMDg1ODEiLCJ1c2VyX2lkIjoxLCJjYXJyZXJhcyI6WyJBIiwiQiIsIkMiLCJEIiwiRSIsIkYiLCJHIiwiSCIsIkkiLCJKIiwiSjIiLCJLIiwiTCIsIk4yIiwiUCIsIlEiLCJPIiwiUiIsIlMiLCJOIiwiTSIsIlQiLCJBMSIsIkcxIiwiVSIsIlYiLCJWMSIsIlciLCJIMSJdLCJ1c2VybmFtZSI6ImFkbWluIn0.2QgqtOEsSreA-yp43qJsTyno_K6ptyB_WkbPUS1doCQ\",\n \"access\": \"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ0b2tlbl90eXBlIjoiYWNjZXNzIiwiZXhwIjoyNTM0OTc1OTk5LCJqdGkiOiJlNTAyNTYzZWE5MmI0MDYwOTczYTAxNDE5ODkxMDU4OSIsInVzZXJfaWQiOjEsImNhcnJlcmFzIjpbIlRFU1QiXSwidXNlcm5hbWUiOiJ0ZXN0In0.1pto6DgaLX_GetMuJNQ8pkL9jMStUDOWqkk8P3Y5PXM\"\n })", "title": "" }, { "docid": "8e347cebff8f423cc8c533a6cb0b04ad", "score": "0.57975435", "text": "def auth_token(self) -> AuthToken:\n return self._auth_token", "title": "" }, { "docid": "ffc6ad0f1088a20cafc8672ac38be09e", "score": "0.57819307", "text": "def getToken(response):\n\n resp_dict = json.loads(response.text)\n\n try:\n token = resp_dict[\"token\"]\n except KeyError:\n print('Retrieval unsuccessful.')\n return None\n\n return token", "title": "" }, { "docid": "8c0bf0a189596ae9fb197d7fdf88e721", "score": "0.5773634", "text": "def auth_token():\n return check_output([\"heroku\", \"auth:token\"]).rstrip().decode(\"utf8\")", "title": "" }, { "docid": "5bac3280023752ae4c25e1bf3eca097d", "score": "0.57712096", "text": "def authorization_token(self) -> str:\n return pulumi.get(self, \"authorization_token\")", "title": "" }, { "docid": "5b40ec94e3892c0eb7895d3a8f70c5dc", "score": "0.5768494", "text": "def check_jwt_token(token):\n # jwt.decode will verify the expiration date of the token\n # We won't have the secret so we can't verify the signature, but we should verify everything else\n return decode(token, options={'verify_signature': False})", "title": "" }, { "docid": "9e151914abb69857cca81b80fda98f31", "score": "0.57606655", "text": "def decrypt(self, token: ByteString) -> str:\n return self.clf.decrypt(token).decode()", "title": "" }, { "docid": "5dcd6b16b67c13c414af2be23217917b", "score": "0.5749952", "text": "def parse_tokens(token_auth):\r\n tokens = {}\r\n for item in token_auth:\r\n token_type = item.split('=')\r\n if (len(token_type) != 2):\r\n print(\"malformed authorization header: no '=' sign\", )\r\n tokens = {}\r\n break\r\n token_type[1] = token_type[1].replace(\"\\\"\", \"\")\r\n token = token_type[1].split()\r\n # print (token_type[0] , \" : \", token)\r\n if len(token) == 1:\r\n tokens[token_type[0]] = token[0]\r\n if len(token) == 2:\r\n tokens[token_type[0]] = token[1]\r\n return tokens", "title": "" }, { "docid": "19edc6d0d232825d38e4c03a36dc0288", "score": "0.5747173", "text": "def get_token():\n if g.current_user.is_anonymous() or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token':g.current_user.generate_auth_token(expiration=3600),\n 'expiration':3600})", "title": "" }, { "docid": "f5b5f971cd4892f93b2f1a6e8c3b352b", "score": "0.5743919", "text": "def encode_auth_token(self):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(\n days=app.config.get('AUTH_TOKEN_EXPIRY_DAYS'),\n seconds=app.config.get('AUTH_TOKEN_EXPIRY_SECONDS')\n ),\n 'iat': datetime.datetime.utcnow(),\n 'sub': self.id\n }\n return jwt.encode(\n payload,\n app.config['SECRET_KEY'],\n algorithm='HS256'\n ).decode('utf-8')\n except Exception as e:\n return e", "title": "" }, { "docid": "b7dcff8707cfdbdac98b8f2463f21710", "score": "0.5735489", "text": "def verify_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, current_app.config.get('SECRET_KEY'))\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token.\"\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login.\"", "title": "" }, { "docid": "b7dcff8707cfdbdac98b8f2463f21710", "score": "0.5735489", "text": "def verify_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, current_app.config.get('SECRET_KEY'))\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token.\"\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login.\"", "title": "" }, { "docid": "ce310ff12bb8ebe2a4faa219b6b06bdc", "score": "0.5721923", "text": "def decode(data: bytes) -> bytes:", "title": "" }, { "docid": "104078b94f2ed76585c006e443b967e3", "score": "0.57185364", "text": "def decode_base64_authorization_header(self,\n base64_authorization_header: str\n ) -> str:\n if base64_authorization_header is None:\n return None\n if not isinstance(base64_authorization_header, str):\n return None\n try:\n baseEncode = base64_authorization_header.encode('utf-8')\n baseDecode = b64decode(baseEncode)\n decodedValue = baseDecode.decode('utf-8')\n return decodedValue\n except Exception:\n return None", "title": "" }, { "docid": "e2443ce8530f1edbb3628016acb202c2", "score": "0.57102567", "text": "def decode_token(cls, key: str, token: str, verify: bool = True):\n data = jwt.decode(token, key, algorithms=['HS256'], verify=verify)\n # Enforce conversion to satisfy typing\n data = dict(data)\n return cls(**data)", "title": "" }, { "docid": "364fb98c15541832fb844660d25bad68", "score": "0.5709821", "text": "def get_token_auth_header():\n\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n\n abort(401)\n raise AuthError({\"code\": \"authorization_header_missing\",\n \"description\":\n \"Authorization header is expected\"}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n abort(401)\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n abort(401) # No token Sent\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n abort(401)\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token", "title": "" }, { "docid": "037aa45e6203dbe33ff1df33cfae9cc7", "score": "0.570254", "text": "def _get_id_token_claims_without_verification(bearer_token: str):\n\ttry:\n\t\theader, payload, signature = bearer_token.split(\".\")\n\texcept IndexError:\n\t\tL.warning(\"Cannot parse ID token: Wrong number of '.'.\")\n\t\traise aiohttp.web.HTTPBadRequest()\n\n\ttry:\n\t\tclaims = json.loads(base64.b64decode(payload.encode(\"utf-8\")))\n\texcept binascii.Error:\n\t\tL.warning(\"Cannot parse ID token: Payload is not base 64.\")\n\t\traise aiohttp.web.HTTPBadRequest()\n\texcept json.JSONDecodeError:\n\t\tL.warning(\"Cannot parse ID token: Payload cannot be parsed as JSON.\")\n\t\traise aiohttp.web.HTTPBadRequest()\n\n\treturn claims", "title": "" } ]
68f06fdad826c9bce003a7c53d88e283
Compute inverse CDF for a given value.
[ { "docid": "cd4d641d3fb9a0113530e0471fedd092", "score": "0.5649419", "text": "def at(self, value):\n assert(0 <= value <= 1)\n return bisect.bisect_right(self.cdf, value)", "title": "" } ]
[ { "docid": "2b8d83cb66b7de2be6af77838e1cc5db", "score": "0.82533187", "text": "def inverse_cdf(self, value):\n raise NotImplementedError", "title": "" }, { "docid": "8a47abed2865294dce40758967737770", "score": "0.66790795", "text": "def inverse_cdf(self, u):\n return norm.ppf(u, loc=self.mean, scale=self.std)", "title": "" }, { "docid": "a9a830e214f7a650caa327b2d1f4ac96", "score": "0.66787773", "text": "def invCDF_1(y):\n return -np.log(1-y)", "title": "" }, { "docid": "d7022d7be1b09e8ba349c501494e1d34", "score": "0.66091365", "text": "def cdf(self, value):\n name = self.name + '_cdf'\n\n if not isinstance(value, framework.Variable):\n raise TypeError(\n f\"Expected type of value is Variable, but got {type(value)}\"\n )\n\n value = self._check_values_dtype_in_probs(self.loc, value)\n loc, scale, value = paddle.broadcast_tensors(\n [self.loc, self.scale, value]\n )\n\n return (\n paddle.atan(\n paddle.divide(paddle.subtract(value, loc), scale), name=name\n )\n / np.pi\n + 0.5\n )", "title": "" }, { "docid": "40e91250d64c3c5e9f43ca81a41e391a", "score": "0.63846", "text": "def get_cdf(self, value):\n raise NotImplementedError", "title": "" }, { "docid": "c6047928f1a0bf01ad3dca31a7b390a9", "score": "0.62599695", "text": "def cdf(self, x):\n return unif_cdf(self.a, self.b, x)", "title": "" }, { "docid": "f559e6ea45d8d55c8fe7e87557ed27dc", "score": "0.625413", "text": "def _brentq_cdf(self, value):\n # The decorator expects an instance method, but usually are decorated before being bounded\n bound_cdf = partial(scalarize(GaussianKDE.cumulative_distribution), self)\n\n def f(x):\n return bound_cdf(x) - value\n\n return f", "title": "" }, { "docid": "5c784ab4bc2633b6d4ade664346c1b2e", "score": "0.6242908", "text": "def cdf(self, x):\n if x < 0:\n return 0\n else:\n cdf = -Exponential.e ** (-self.lambtha * x) + 1\n return cdf", "title": "" }, { "docid": "a76ba5429e3c5a643ad74bc6d634d299", "score": "0.61770535", "text": "def CDF_1(x):\n return 1 - np.exp(-x)", "title": "" }, { "docid": "6732bc03fa0ae822f4946c92cfbf0485", "score": "0.6118077", "text": "def cdf(self, x):\n if x < 0:\n return (0)\n else:\n a = 1 - 2.7182818285 ** (-1 * self.lambtha * x)\n return (a)", "title": "" }, { "docid": "57ae1c4403d0b5c307d1aaecba343f5c", "score": "0.6087419", "text": "def cdf(self, x):\n return tdist_cdf(self.v, x)", "title": "" }, { "docid": "4767c864c9811fbe5a37503a53df38db", "score": "0.60097516", "text": "def normal_standard_cdf(val):\n return 1/2 * (1 + torch.erf(val/np.sqrt(2)))", "title": "" }, { "docid": "6d33c8243ff8ebfb342a0c95777c3627", "score": "0.59783274", "text": "def cdf(self, x):\n return chisq_cdf(self.v, x)", "title": "" }, { "docid": "eec7024ebfc0f5fc21efbd5d9cb54fe9", "score": "0.59385973", "text": "def cdf(self, x):\n return hyper_cdf(self.s, self.f, self.n, x)", "title": "" }, { "docid": "75da5cf5c374d0b2496416fc37f45451", "score": "0.59324825", "text": "def invlogccdf(self, lq):\n return tdist_invlogccdf(self.v, lq)", "title": "" }, { "docid": "9442406366674371eb915880830b967e", "score": "0.59259665", "text": "def _f(self, c):\n if c == 0:\n return 0\n return c**(-2)", "title": "" }, { "docid": "7aea6173176a473366ea4878c8688175", "score": "0.59132576", "text": "def cdf(self, X):\n cdf = stats.norm.cdf(X)\n return cdf", "title": "" }, { "docid": "a039dd5da32dbbbbdb2ae176aea8c767", "score": "0.58947", "text": "def invlogcdf(self, lq):\n return tdist_invlogcdf(self.v, lq)", "title": "" }, { "docid": "78204983c4ee3949a16da8d44ca7a491", "score": "0.58927554", "text": "def cdf(self, x):\n return fdist_cdf(self.v1, self.v2, x)", "title": "" }, { "docid": "884b9b999c62aba0bb60ebff57796763", "score": "0.58926636", "text": "def cdf(self, x):\n return weibull_cdf(self.alpha, self.theta, x)", "title": "" }, { "docid": "336546d3ca733513d6dfc572132079fc", "score": "0.5885892", "text": "def cdf(self, x):\r\n return self._dist.cdf(x)", "title": "" }, { "docid": "e89852dd709a70a0e7b6939954e757c9", "score": "0.5881372", "text": "def erfcinv(y):\r\n return -ndtri(0.5*y)/sqrt(2)", "title": "" }, { "docid": "287713aa939d51923fe31093fb032cc7", "score": "0.5875282", "text": "def inv_exp_cdf(x, rate=1):\n return -np.log(1. - x) / rate", "title": "" }, { "docid": "a31ec526f51ae6064d6cb8b4f7a3593b", "score": "0.5861798", "text": "def cdf(x, lmbd):\n\treturn 1 - np.exp(-lmbd * x)", "title": "" }, { "docid": "440903ad02fdcfc1b840e26b7e0cb508", "score": "0.584286", "text": "def make_inv_cdf(cdf):\n inv_cdf = np.zeros((cdf['n_items'] + 1))\n cdf_ = np.int64(cdf['cdf'] * cdf['n_items'])\n\n idx, vals = np.unique(cdf_, return_index=True)\n \n for i in range(len(idx)-1):\n inv_cdf[idx[i]:idx[i+1]] = vals[i] * np.ones(len(inv_cdf[idx[i]:idx[i+1]]))\n inv_cdf[idx[i+1]] = vals[i+1]\n \n return {'inv_cdf': inv_cdf, 'n_items': cdf['n_items']}", "title": "" }, { "docid": "6c8c145a53072816676ae58df79afcd9", "score": "0.5829142", "text": "def invlogccdf(self, lq):\n return fdist_invlogccdf(self.v1, self.v2, lq)", "title": "" }, { "docid": "4b8b649fefedae2623d47a644e16a7a9", "score": "0.58269393", "text": "def cdf(x):\n\n y = math.fabs(x)\n\n if y < 7.07106781186547:\n a = (0.0352624965998911, 0.700383064443688,\n 6.37396220353165, 33.912866078383,\n 112.079291497871, 221.213596169931,\n 220.206867912376\n )\n b = (0.0883883476483184, 1.75566716318264,\n 16.064177579207, 86.7807322029461,\n 296.564248779674, 637.333633378831,\n 793.826512519948, 440.413735824752)\n\n aa = (((((a[0] * y + a[1]) * y + a[2]) * y + a[3]) * y + a[4]) * y + a[5]) * y + a[6]\n bb = ((((((b[0] * y + b[1]) * y + b[2]) * y + b[3]) * y + b[4]) * y + b[5]) * y + b[6]) * y + b[7]\n\n n = math.exp(- y * y / 2) * (aa / bb)\n\n elif 7.07106781186547 <= y <= 37:\n c = y + 1 / (y + 2 / (y + 3 / (y + 4 / (y + 0.65))))\n n = math.exp(- y * y / 2) / (2.506628274631 * c)\n\n else: # y > 37\n n = 0\n\n return (1 - n) if x > 0 else n", "title": "" }, { "docid": "4b8d573dc1b8c53581051664196f7bae", "score": "0.5817679", "text": "def invert_value(self, value):\n return self.mean - (value - self.mean)", "title": "" }, { "docid": "bed10650d8cfe3b58e58a53b3c14db01", "score": "0.58132035", "text": "def _denormalizeThroughCDF(self, data, params):\n denormed = self.normEngine.cdf(data)\n denormed = self._sampleICDF(denormed, params)\n return denormed", "title": "" }, { "docid": "33853d941a4f7f3c0964f0dc6d636b74", "score": "0.5811901", "text": "def invlogccdf(self, lq):\n return chisq_invlogccdf(self.v, lq)", "title": "" }, { "docid": "03985ea515768430eca0b720e85ea54b", "score": "0.5804701", "text": "def compute_cdf(x, y):\n\n\n return CDF", "title": "" }, { "docid": "b982bec9b493bb1759ab5c42e92922f3", "score": "0.5798588", "text": "def cdf(self, x):\n y = 0.5 * betainc(self.m / 2.0, 0.5, np.sin(np.pi * x) ** 2)\n return np.where(x < 0.5, y, 1 - y)", "title": "" }, { "docid": "dc75942917a4f289bba880f0876455f0", "score": "0.5794821", "text": "def cdf(self, x):\n expression = (x - self.mean)/(self.stddev * (2 ** (1/2)))\n return (1/2) * (1 + self.get_erf(expression))", "title": "" }, { "docid": "7310adb4afb1906e15dd72242364b3b2", "score": "0.5776293", "text": "def getCdf(self):\r\n return self.cdf", "title": "" }, { "docid": "4093e060178e3c2a053786f58645f1ec", "score": "0.5768438", "text": "def get_CLD_inverse(self, normalize=False):\n # Invert it to make a probability -> energy converter\n return self.get_CLD(normalize=normalize).inverse()", "title": "" }, { "docid": "5dc76d08961eaff25614faf1496d16ca", "score": "0.57647336", "text": "def invlogcdf(self, lq):\n return unif_invlogcdf(self.a, self.b, lq)", "title": "" }, { "docid": "64056fecc104306f530a2146cdc7ed27", "score": "0.5759312", "text": "def invlogccdf(self, lq):\n return hyper_invlogccdf(self.s, self.f, self.n, lq)", "title": "" }, { "docid": "0d8c4f805e94d51f24219a9c738ae568", "score": "0.5752328", "text": "def ccdf(self, x):\n return tdist_ccdf(self.v, x)", "title": "" }, { "docid": "2006bc4936193e310540a5ecdf2e6e53", "score": "0.57477903", "text": "def invlogcdf(self, lq):\n return chisq_invlogcdf(self.v, lq)", "title": "" }, { "docid": "8b685ae1a3bff722919fb8d355a3f05e", "score": "0.57215595", "text": "def ccdf(self, x):\n return unif_ccdf(self.a, self.b, x)", "title": "" }, { "docid": "411dfbb9b0348e77687e5b890f2f08a0", "score": "0.5690793", "text": "def invlogcdf(self, lq):\n return hyper_invlogcdf(self.s, self.f, self.n, lq)", "title": "" }, { "docid": "4cb2e08cbc66b6a8b37d5b7e51d3ef4f", "score": "0.5682713", "text": "def invlogccdf(self, lq):\n return unif_invlogccdf(self.a, self.b, lq)", "title": "" }, { "docid": "e4b4d0162b77d28552207c18782b57cc", "score": "0.5679432", "text": "def InverseStudentT(dof, probability):\n\n assert 0 <= probability <= 1\n\n if probability == 1:\n return float(\"inf\")\n if probability == 0:\n return float(\"-inf\")\n if probability == 0.5:\n return 0.0\n\n def f(x):\n return StudentTCDF(dof, x)\n\n return findRoot(probability, -10**4, 10**4, f)", "title": "" }, { "docid": "e04bfdda3564323549832b7f46c6ab52", "score": "0.56676", "text": "def invlogccdf(self, lq):\n return binom_invlogccdf(self.n, self.p, lq)", "title": "" }, { "docid": "d338b18a35c2a142c2fe7f955d882f9c", "score": "0.5656897", "text": "def invlogcdf(self, lq):\n return fdist_invlogcdf(self.v1, self.v2, lq)", "title": "" }, { "docid": "38c6870130d99ee1241eb97208858f92", "score": "0.564027", "text": "def normal_cdf(x):\n return quad(norm.pdf, -5, x)[0]", "title": "" }, { "docid": "8c549931d67e20ae6b200c750945ee1a", "score": "0.56320333", "text": "def cdf(self, x):\n return norm_cdf(self.mu, self.sigma, x)", "title": "" }, { "docid": "17292149269746f37e001b67f7dea9ab", "score": "0.56288797", "text": "def cdf(self, x):\r\n return self._cdf(x,\r\n np.vectorize(lambda v: (self._dist_samples <= v).mean()))", "title": "" }, { "docid": "e77c8c53c3f250d1437aca3789c77594", "score": "0.5614364", "text": "def invlogccdf(self, lq):\n return pois_invlogccdf(self.mu, lq)", "title": "" }, { "docid": "98e0f3de5826b0c642c08926b0975ee2", "score": "0.5608365", "text": "def cdf(value, df, loc, scale, skewness):\n dtype = dtype_util.common_dtype(\n [value, df, loc, scale, skewness], tf.float32)\n numpy_dtype = dtype_util.as_numpy_dtype(dtype)\n\n value, df, loc, scale, skewness = [\n tf.convert_to_tensor(param, dtype=dtype)\n for param in (value, df, loc, scale, skewness)]\n\n one = numpy_dtype(1.)\n two = numpy_dtype(2.)\n\n t = standardize(value, loc=loc, scale=scale, skewness=skewness)\n t_cdf = student_t.stdtr(df, t=t)\n\n squared_skewness = tf.math.square(skewness)\n return tf.math.reciprocal(one + squared_skewness) * tf.where(\n t < 0.,\n two * t_cdf,\n one - squared_skewness + two * squared_skewness * t_cdf)", "title": "" }, { "docid": "eff103fdd6db2b6427f142439a960a96", "score": "0.56069505", "text": "def inverse(self, value: Union[int, float, np.ndarray, to.Tensor]) -> Union[int, float, np.ndarray, to.Tensor]:\n inverse_fcn = getattr(self._wrapped_env, \"inverse\", None)\n if callable(inverse_fcn):\n return inverse_fcn(value)\n else:\n # Arrived at the inner env, no transformation found\n return value", "title": "" }, { "docid": "79b39ea37a8d29b5f5659439f61d6577", "score": "0.5598078", "text": "def cdf(self, x):\n return pois_cdf(self.mu, x)", "title": "" }, { "docid": "f4bb407dad7ff71cdfb68d86ec76a493", "score": "0.55946785", "text": "def cdf(self, x):\n return beta_cdf(self.alpha, self.beta, x)", "title": "" }, { "docid": "7a88609931ee52e232a6a01f47c2df0c", "score": "0.55895454", "text": "def ccdf(self, x):\n return chisq_ccdf(self.v, x)", "title": "" }, { "docid": "9330d575685a5304b26fa25d45a72356", "score": "0.5583527", "text": "def inverse(n):\r\n return 1.0 / n", "title": "" }, { "docid": "e259d4341143ee50ecdef6008ac3764e", "score": "0.55821663", "text": "def cdf(self, x):\n return cauchy_cdf(self.mu, self.sigma, x)", "title": "" }, { "docid": "dcceb3de94dd77028b0fe447669a1e26", "score": "0.5571044", "text": "def f_distribution_critical_value(f_value, df_numerator, df_denominator, loc=0, scale=1):\n return float(FDistribution.pdf(f_value, df_numerator, df_denominator, loc, scale))", "title": "" }, { "docid": "171db98af1d41539fd1a4753f46559ad", "score": "0.5568167", "text": "def cdf(self, x):\n return binom_cdf(self.n, self.p, x)", "title": "" }, { "docid": "8d99c36bdc39b58aa16b93ebde53a23e", "score": "0.55600554", "text": "def invlogccdf(self, lq):\n return geom_invlogccdf(self.p, lq)", "title": "" }, { "docid": "c0771297c20464eb7f8453801d347ceb", "score": "0.55588114", "text": "def _invf(self, fY):\n Y = fY ** 3\n Y[Y < 0.008856] = (fY[Y < 0.008856] - 4 / 29) * (108 / 841)\n return Y", "title": "" }, { "docid": "94311dbf4c2abbf539cda3777fcff80c", "score": "0.55569875", "text": "def inverse(self):\n return self.distribution.inverse", "title": "" }, { "docid": "756b2519c94a5e038aa4d582202ac77d", "score": "0.55568653", "text": "def invlogccdf(self, lq):\n return beta_invlogccdf(self.alpha, self.beta, lq)", "title": "" }, { "docid": "5a7fc7effe3f4c9df630e195259fae4a", "score": "0.5555484", "text": "def cdf(self, x):\n return exp_cdf(self.theta, x)", "title": "" }, { "docid": "2e886380cbf848f5a05d4ce3d34c35dc", "score": "0.55547625", "text": "def invlogccdf(self, lq):\n return cauchy_invlogccdf(self.mu, self.sigma, lq)", "title": "" }, { "docid": "68255f0f34bf10f818b16d82e7d274c7", "score": "0.5553923", "text": "def invcdf(p, loc=0, scale=1):\n with mp.extradps(5):\n p = _validate_p(p)\n loc, scale = _validate_params(loc, scale)\n if p == 0:\n return mp.zero\n if p == 1:\n return mp.inf\n x0, x1 = _find_bracket(lambda x: cdf(x, loc, scale), p, 0, mp.inf)\n if x0 == x1:\n return x0\n try:\n x = mp.findroot(lambda x: cdf(x, loc, scale) - p, x0=(x0, x1),\n solver='secant')\n except Exception:\n x = mp.findroot(lambda x: cdf(x, loc, scale) - p, x0=(x0 + x1)/2,\n solver='newton')\n return x", "title": "" }, { "docid": "667f8b8ceff4ac8b83ae4368d8a18f59", "score": "0.5553153", "text": "def infomation(val):\n return -val*(math.log(val, 2))", "title": "" }, { "docid": "25dc70568ca80852c1520b3e7cf77507", "score": "0.5545433", "text": "def cdf(self, x):\n\n if self.shapeParam != 0:\n return 1 - (1 + self.shapeParam * x / self.scaleParam) ** (-1 / self.shapeParam)\n\n else:\n return 1 - np.exp(-x / self.scaleParam)", "title": "" }, { "docid": "2830ebe5c55894d4fd57d100002c4c98", "score": "0.5542253", "text": "def _cdf(self, value, mean=None, sd=None):\n value = self._check_value(value, 'value')\n value = self.cast(value, self.dtype)\n mean, sd = self._check_param_type(mean, sd)\n sqrt2 = self.sqrt(self.const(2.0, mstype.float32))\n adjusted = (value - mean) / (sd * sqrt2)\n return 0.5 * (1.0 + self.erf(adjusted))", "title": "" }, { "docid": "53efa7306d9de168571cecd08a7344c1", "score": "0.55303943", "text": "def ccdf(self, x):\n return hyper_ccdf(self.s, self.f, self.n, x)", "title": "" }, { "docid": "e74add657ff9d6fe695b70de154e40df", "score": "0.55253017", "text": "def inverse(x): \n return x ** 2", "title": "" }, { "docid": "54f8066fd2b6b34c5847cac5df4a3033", "score": "0.5516761", "text": "def ccdf(self, x):\n return weibull_ccdf(self.alpha, self.theta, x)", "title": "" }, { "docid": "9927443112ea9d6f42099dc2491e86ac", "score": "0.55085015", "text": "def calculate_inverse_density(cluster):\n inverse_density = cluster['volume'] / cluster['size']\n return inverse_density", "title": "" }, { "docid": "3b0f16e23f36d99c3ecd0ec978883b5c", "score": "0.5508163", "text": "def cdf(self, x):\n\n a = 1 + self.erf((x - self.mean) / (self.stddev * (2 ** 0.5)))\n return a / 2", "title": "" }, { "docid": "f8229739df569c68d4c64bbaaa40d784", "score": "0.55026054", "text": "def calculate_cdf(histogram):\n # Get the cumulative sum of the elements\n cdf = histogram.cumsum()\n \n # Normalize the cdf\n normalized_cdf = cdf / float(cdf.max())\n \n return normalized_cdf", "title": "" }, { "docid": "b7c4f45e2fcf6f3ed027952df8369dfe", "score": "0.5499215", "text": "def cdfXY(inputV):\n # used to do this with statsmodels.distributions.ECDF but this removes that dependency:\n def ecdf(x):\n xs = np.sort(x)\n ys = np.arange(1, len(xs)+1)/float(len(xs))\n return xs, ys\n return ecdf(inputV)", "title": "" }, { "docid": "e0efa01b3014a1bcd80e4d68b5fd78fe", "score": "0.54928744", "text": "def ccdf(self, x):\n return fdist_ccdf(self.v1, self.v2, x)", "title": "" }, { "docid": "d3b65b38638d33d8687cdb5c64154e64", "score": "0.5489231", "text": "def inverse_transforme_method(u_value: float, lambda_value: float):\n\n try:\n return -(log(1-u_value)/lambda_value)\n except ValueError:\n return 0", "title": "" }, { "docid": "d746bd0d451daa9b2ff927b4dc2bd0ff", "score": "0.5487835", "text": "def get_density_func(cdf, var):\r\n\r\n pdf = cdf\r\n\r\n for i in range(len(var)):\r\n pdf = diff(pdf, var[i])\r\n \r\n return pdf", "title": "" }, { "docid": "137176572c6621c9eb7efbfab9407f90", "score": "0.54775673", "text": "def cdf(self, x):\n return gamma_cdf(self.alpha, self.beta, x)", "title": "" }, { "docid": "7c636de8ec16ca889e5eafb346ee394d", "score": "0.54764175", "text": "def invlogcdf(self, lq):\n return beta_invlogcdf(self.alpha, self.beta, lq)", "title": "" }, { "docid": "480c72583a2e36f1ce1efd1fa1df3e1f", "score": "0.54662704", "text": "def cdf(self, x):\n return lognormal_cdf(self.mu, self.sigma, x)", "title": "" }, { "docid": "6f7335ac823d7fc32b0ea476792725d1", "score": "0.54565454", "text": "def invlogcdf(self, lq):\n return geom_invlogcdf(self.p, lq)", "title": "" }, { "docid": "31ff97ebd8dd1da9593b1c3426f377f4", "score": "0.54556006", "text": "def cdf(self, x):\n return logis_cdf(self.mu, self.theta, x)", "title": "" }, { "docid": "ddcec04e32ccfa9bc0f30644bec33bbf", "score": "0.5453972", "text": "def ecdf(x):\n nobs = len(x)\n return arange(1,nobs+1)/float(nobs)", "title": "" }, { "docid": "d117e79033fc7ad85dc4d45d4da58899", "score": "0.5453101", "text": "def compute_inverse(\n b_func, epsilon, delta\n):\n sol = optimize.root_scalar(\n lambda v: b_func(v, epsilon) - delta, bracket=(0, 5000)\n )\n return sol.root", "title": "" }, { "docid": "f74e6ea083396b4d9964a5d30797d470", "score": "0.5446779", "text": "def invlogcdf(self, lq):\n return pois_invlogcdf(self.mu, lq)", "title": "" }, { "docid": "31f87e969f8bf886aea8a0bafba815c9", "score": "0.54462343", "text": "def invlogccdf(self, lq):\n return norm_invlogccdf(self.mu, self.sigma, lq)", "title": "" }, { "docid": "f5b06b53db76a3f48c4de7b6f00be539", "score": "0.54456246", "text": "def invlogcdf(self, lq):\n return binom_invlogcdf(self.n, self.p, lq)", "title": "" }, { "docid": "d64a82ddb645de1a9f6ed1658d313e55", "score": "0.5436837", "text": "def invcdf(p, mu=0, sigma=1):\n with mp.extradps(mp.dps):\n p = _validate_p(p)\n mu = mp.mpf(mu)\n sigma = mp.mpf(sigma)\n\n a = mp.erfinv(2*p - 1)\n x = mp.sqrt(2)*sigma*a + mu\n return x", "title": "" }, { "docid": "8c7392df177d14f992faea1019012fb9", "score": "0.5434988", "text": "def logccdf(self, x):\n return chisq_logccdf(self.v, x)", "title": "" }, { "docid": "6994d24f59877082fbfd2e50ba1dc94b", "score": "0.5425989", "text": "def invlogccdf(self, lq):\n return exp_invlogccdf(self.theta, lq)", "title": "" }, { "docid": "d45bfed2c1b83a280a680656c44b91d7", "score": "0.5424483", "text": "def df(x):\n return (2*x - x**2 * cot(x))/sin(x)", "title": "" }, { "docid": "16d33a192cb34ca689df5a7d39724160", "score": "0.54218924", "text": "def icdf(self, p, mode='toobig'):\n assert 0 <= p <= 1, \"Desired CDF prob must be between 0 and 1 inclusive\"\n\n if mode=='toobig':\n for i in range(self.size):\n x = self.domain[i]\n # if cdf=p, we're done.\n # if cdf>p, that means we just went past a gap, and we're now\n # returning the smallest too-big value.\n if self.cdf(x) >= p:\n return x\n else:\n # This should never happen since p is no more than 1.0, and\n # always the last item in the domain has CDF=1.0.\n assert False, \"Didn't find value with prob at least %s\" % p\n elif mode=='toosmall':\n # go in descending order\n # list(range(5,-1,-1)) => [5, 4, 3, 2, 1, 0]\n for i in range(self.size-1, -1, -1):\n x = self.domain[i]\n if self.cdf(x) <= p:\n return x\n else:\n # The problem now is, the user requested a CDF less then the\n # CDF of the smallest value in the support (and we don't have a\n # separate notion of the domain).\n # Not sure what the right behavior is. The 'toosmall'\n # criterion doesn't make sense here.\n assert False, \"Didn't find value with prob <= %s\" % p\n else:\n assert False, \"bad mode given\"", "title": "" }, { "docid": "654cf3a02a9980f2c5dde1d42467fb3f", "score": "0.5415278", "text": "def invlogccdf(self, lq):\n return nbinom_invlogccdf(self.r, self.p, lq)", "title": "" }, { "docid": "b01688fe13a2017bee16b385e985ee9e", "score": "0.54141307", "text": "def invlogccdf(self, lq):\n return gamma_invlogccdf(self.alpha, self.beta, lq)", "title": "" }, { "docid": "763d32cb85924b9ec17d2de3cd1f7ccb", "score": "0.5406932", "text": "def pdf_to_cdf(pdf):\n cdf = np.cumsum(pdf)\n cdf /= cdf[-1]\n\n return cdf", "title": "" }, { "docid": "1fdbd0c5d2de135c6b52864e27d459ec", "score": "0.54047555", "text": "def invlogccdf(self, lq):\n return weibull_invlogccdf(self.alpha, self.theta, lq)", "title": "" }, { "docid": "1c5818d5ec689dbac9d65e47071086c1", "score": "0.53933424", "text": "def cdf(self, x):\r\n raise NotImplementedError", "title": "" }, { "docid": "7c647257663ad44e7275fa66b04cc5f7", "score": "0.53895634", "text": "def logccdf(self, x):\n return fdist_logccdf(self.v1, self.v2, x)", "title": "" }, { "docid": "fc78717523efa3cc775c3db0352f2ac8", "score": "0.53876674", "text": "def invlogccdf(self, lq):\n return lognormal_invlogccdf(self.mu, self.sigma, lq)", "title": "" } ]
4086f19c350b78f3c7029a5d86742ae7
This class holds all the models for a dataset
[ { "docid": "5f8fa3d84739edffa3cf68ed1dc1e208", "score": "0.0", "text": "def __init__(self, id):\n self.id = id\n self.models = BTrees.OOBTree.BTree()\n self.constructor_id = -1\n self.dataset_id = -1\n self.group_connection_id = -1", "title": "" } ]
[ { "docid": "ea662cd6f629aaa29b6be895da859b88", "score": "0.7318091", "text": "def _get_model_dataset(self):\n\n # Instantiate the model object\n model = getattr(\n importlib.import_module('models.' + self._args.model),\n self._args.model)()\n\n # Instantiate the input object\n dataset = getattr(\n importlib.import_module('inputs.' + self._args.dataset),\n self._args.dataset)()\n return model, dataset", "title": "" }, { "docid": "78607a83b3243243d67fdae05282e466", "score": "0.70887065", "text": "def trainModel(self, data, attributes, class_label):", "title": "" }, { "docid": "c4f2545c36c8fd09d700b889b736423e", "score": "0.67790407", "text": "def models(self, models):\n if models is not None:\n models = DatasetModels(models)\n models = models.select(datasets_names=self.name)\n for model in models:\n model._exposure = self.data.exposure\n self._models = models", "title": "" }, { "docid": "63a0615905b22dac2ac78a21faf4a3b0", "score": "0.6748353", "text": "def get_dataset(self):\n\n # https://developer.nvidia.com/blog/preparing-state-of-the-art-models-for-classification-and-object-detection-with-tlt/\n train_download = not os.path.exists(os.path.join(self.load_path, \"train\"))\n trainval_2012 = datasets.VOCDetection(os.path.join(self.load_path, \"train\"), image_set='trainval',\n transform=transforms.Compose([transforms.ToTensor()]),\n target_transform=None, download=train_download)\n trainval_2007 = datasets.VOCDetection(os.path.join(self.load_path, \"train\"), image_set='trainval',\n year='2007',\n transform=transforms.Compose([transforms.ToTensor()]),\n target_transform=None, download=train_download)\n test_download = not os.path.exists(os.path.join(self.load_path, \"test\"))\n valset = datasets.VOCDetection(os.path.join(self.load_path, \"test\"), image_set='test',\n year='2007',\n transform=transforms.Compose([transforms.ToTensor()]),\n target_transform=None, download=test_download)\n train_loader_2007 = torch.utils.data.DataLoader(trainval_2007, batch_size=1, shuffle=False, num_workers=2)\n train_loader_2012 = torch.utils.data.DataLoader(trainval_2012, batch_size=1, shuffle=False, num_workers=2)\n val_loader = torch.utils.data.DataLoader(valset, batch_size=1, shuffle=False, num_workers=2)\n\n check = 0\n directories = [os.path.join(self.save_path, \"train\"), os.path.join(self.save_path, \"test\")]\n for directory in directories:\n if os.path.exists(directory):\n check += 1\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n if check != len(directories):\n indices_data = {}\n # create folders to save data\n for loader_name, loader in [('train', train_loader_2007),\n ('train', train_loader_2012),\n ('test', val_loader)]:\n for (img, annotation) in tqdm(loader):\n\n #print(annotation)\n # there may be multiple labels, they are concatenated to: 'label1_label2_'\n label = ''\n int_label = []\n\n elems = annotation['annotation']['object']\n # if only 1 label - it is a dictionary, but not list of dictionaries\n # for consistency reasons and to be able to use the loop later\n if not isinstance(elems, list):\n elems = [elems]\n\n # get bboxes, compute object size, add all object sizes and divide by img size (h*w)\n obj_sizes = 0\n num_instances = 0\n\n for elem in elems:\n # every name is in a list\n # there may be multiple instances of the same object\n # those are disregarded for label\n\n if not (bool(int(elem['difficult'][0])) and loader_name == 'test'):\n if not str(self.class_to_idx[elem['name'][0]]) in label:\n label += str(self.class_to_idx[elem['name'][0]]) + '_'\n int_label.append(self.class_to_idx[elem['name'][0]])\n\n num_instances += 1\n # percentage of objects in the image: sum obj_size/img_size\n obj_sizes += (int(elem['bndbox']['xmax'][0]) - int(elem['bndbox']['xmin'][0])) * \\\n (int(elem['bndbox']['ymax'][0]) - int(elem['bndbox']['ymin'][0]))\n obj_sizes /= float(int(annotation['annotation']['size']['width'][0]) *\n int(annotation['annotation']['size']['height'][0]))\n\n img_name = label + '_' + annotation['annotation']['filename'][0]\n\n directory = os.path.join(os.path.join(self.save_path, loader_name), label)\n if not os.path.exists(directory):\n os.makedirs(directory)\n save_image(img, os.path.join(directory, img_name))\n\n indices_data[os.path.join(directory, img_name)] = (int_label,\n obj_sizes, num_instances)\n\n # store img_paths which serves as indices and the labels for further analysis\n indices_data = collections.OrderedDict(sorted(indices_data.items()))\n\n dataframe = pd.DataFrame({'img_paths': list(indices_data.keys()),\n 'labels': np.array(list(indices_data.values()), dtype=object)[:, 0],\n 'obj_sizes': np.array(list(indices_data.values()), dtype=object)[:, 1],\n 'num_instances': np.array(list(indices_data.values()), dtype=object)[:, 2]})\n DatasetMetrics.indices_paths(self.name, dataframe)\n\n train_transform = transforms.Compose([\n # you can add other transformations in this list\n # resize (256 x remaining larger size) and RandomCrop(224)\n # like in https://papers.nips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf\n # https://arxiv.org/pdf/1409.1556.pdf\n transforms.Resize(256), # resize smaller size to 256\n transforms.RandomCrop(self.args.patch_size), # 224\n transforms.ToTensor()\n ])\n\n test_transform = transforms.Compose([\n # you can add other transformations in this list\n # resize (256 x remaining larger size) and RandomCrop(224)\n transforms.Resize(256), # resize smaller size to 256\n transforms.CenterCrop((self.args.patch_size, self.args.patch_size)), # 224\n transforms.ToTensor()\n ])\n\n if self.args.compute_dataset_metrics is True:\n # when computing dataset metrics, an original image should be used\n # - without randomness of RandomCrop\n train_transform = transforms.Compose([\n transforms.ToTensor()\n ])\n test_transform = transforms.Compose([\n transforms.ToTensor()\n ])\n\n # if not already set, set batch-size to 1 for computing the metrics\n # due to different image sizes\n self.args.batch_size = 1\n\n # load the image dataset from folder with indices\n trainset = IndxImageFolder(root = os.path.join(self.save_path, \"train\"), transform=train_transform,\n num_classes=len(self.class_to_idx), multilabel=self.args.multilabel)\n valset = IndxImageFolder(root=os.path.join(self.save_path, \"test\"), transform=test_transform,\n num_classes=len(self.class_to_idx), multilabel=self.args.multilabel)\n\n return trainset, valset", "title": "" }, { "docid": "0a41ce9ccec715c381387cff5211410f", "score": "0.66953593", "text": "def get_data_model(cls):\n raise NotImplementedError", "title": "" }, { "docid": "0a41ce9ccec715c381387cff5211410f", "score": "0.66953593", "text": "def get_data_model(cls):\n raise NotImplementedError", "title": "" }, { "docid": "bbdf9b93d03bce8db2031663b7338bfe", "score": "0.6614376", "text": "def parse_for_model(self):\n print('Pre-processing data for model...')\n if any(self.dataset):\n model_data = self.dataset.copy()\n else:\n raise ValueError('First make dataset then pre-process it for Model')\n self.room_encoder, self.ap_encoder = LabelEncoder(), LabelEncoder()\n model_data.room_id = self.room_encoder.fit_transform(model_data.room_id)\n model_data.ap_id = self.ap_encoder.fit_transform(model_data.ap_id)\n features = model_data.iloc[:, :-1].values\n target = model_data.iloc[:, -1].values\n \"\"\" Splitting data into train, validation and test \"\"\"\n x_train, x_test, y_train, y_test = train_test_split(features, target, test_size=0.1)\n x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1)\n self.model_data = (x_train, x_valid, x_test, y_train, y_valid, y_test)", "title": "" }, { "docid": "9d66fbbf227dfc6ab99b6adc4154fab0", "score": "0.6558669", "text": "def create_models(self):\n self.__handle_csv()\n self.__export_models()", "title": "" }, { "docid": "c4b1b0f46bd362c5e19c033c27465c81", "score": "0.65492105", "text": "def createModel(self, dataFile, classifierType):", "title": "" }, { "docid": "f7237f86667dc30cba8ddf3d8bf1b2f2", "score": "0.6529856", "text": "def model(self):\n raise NotImplementedError", "title": "" }, { "docid": "978af77a2915f0e622eab5eae1c62efa", "score": "0.6520754", "text": "def fit_storage(self, data):\n models = []\n for epoch in range(self.k):\n sample = self.selector(data) # data with a subset of attributes\n models.append(self.learner(sample))\n model = BaggedModel(data.domain, models)\n model.name = self.name\n return model", "title": "" }, { "docid": "0dc1e7bf48bfccc497a9ba779bf3051d", "score": "0.6504034", "text": "def __init__(self, datasets: List[IterableDataset]):\n raise NotImplementedError", "title": "" }, { "docid": "06055d9094aca48587e82926dfd159f1", "score": "0.6475266", "text": "def iter_data():\n for v, q, a in dataset:\n yield (Model(v, ontology), q.split(), a)", "title": "" }, { "docid": "b829e70cc66c36e149238672495316cf", "score": "0.64297676", "text": "def __init__(self, dataset):\r\n self.dataset = dataset", "title": "" }, { "docid": "ff42f9452ff2ff43fccb37ec127c6f25", "score": "0.64160985", "text": "def __init__(self, data, models: Iterable[BaseModel], combiner: Combiner):\n assert models, \"The list of models cannot be empty\"\n assert combiner, \"You need to provide a Combiner\"\n super().__init__(data)\n\n self.models: Iterable[BaseModel] = models\n self.combiner: Combiner = combiner\n self.model_weights: List[float] = []", "title": "" }, { "docid": "bf177400dca7c09077803697a591fbcb", "score": "0.6382948", "text": "def __init__(self, dataset):\n self.dataset = dataset", "title": "" }, { "docid": "1542ec9e58a9ab3a9bdbf240d0a86296", "score": "0.6374203", "text": "def model(self):", "title": "" }, { "docid": "1542ec9e58a9ab3a9bdbf240d0a86296", "score": "0.6374203", "text": "def model(self):", "title": "" }, { "docid": "cde3fdf752b361890fb16ed1a03bfdfb", "score": "0.63718194", "text": "def make_model(self):\n raise NotImplementedError", "title": "" }, { "docid": "e582a024b4c2a2b480a7cd12fe7423d9", "score": "0.63537705", "text": "def datasets(self):\n \n class CategoryData(DataSet):\n def data(self):\n return (\n ('gray_stuff', dict(id=1, name='gray')),\n ('yellow_stuff', dict(id=2, name='yellow')),\n )\n return [CategoryData]", "title": "" }, { "docid": "0f755d2b043bf15c2afae59e5c3f6173", "score": "0.6344792", "text": "def get_models():\n models = {}\n models['LR'] = LogisticRegression()\n models['LDA'] = LinearDiscriminantAnalysis()\n models['KNN'] = KNeighborsClassifier()\n models['CART'] = DecisionTreeClassifier()\n models['NB'] = GaussianNB()\n models['SVM'] = SVC()\n return models", "title": "" }, { "docid": "aaf7cceb0afba15f05425acaa63f41fd", "score": "0.63406163", "text": "def run_models():\n\n ### list to contain all individual model results\n models = []\n\n ### placeholder example\n example_model_results = ['model name', 'model description', 'data name',\n 'data description', 0.0, 0.0]\n models.append(example_model_results)\n\n ### contributors add models below #########################################\n\n\n\n ###########################################################################\n\n return models", "title": "" }, { "docid": "f1934063c18e2a0a1c18c148dec977bc", "score": "0.63323915", "text": "def train_model(self):\n\n\t\tself.X_train = []\n\t\tself.Y_train = []\n\n\t\tself.X_test = []\n\t\tself.Y_test = []\n\n\t\t#We are currently using a decision tree, however this can be quite modular\n\t\tif self.model == \"none\":\n\t\t\tself.model = DecisionTreeRegressor()\n\n\n\t\tfor rollout in self.rollouts:\n\n\t\t\tif uniform() > 0.2:\n\t\t\t\ttrain = True\n\t\t\telse:\n\t\t\t\ttrain = False\n\n\t\t\tfor datum in rollout[0]:\n\n\t\t\t\tobservations = datum['state']\n\t\t\t\tactions = datum['action']\n\n\t\t\t\tfor i in range(len(actions)):\n\n\t\t\t\t\ta_ = actions[i].get_value()\n\n\t\t\t\t\ts_ = observations[i]\n\n\t\t\t\t\tif train:\n\t\t\t\t\t\tself.X_train.append(s_)\n\t\t\t\t\t\tself.Y_train.append(a_)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.X_test.append(s_)\n\t\t\t\t\t\tself.Y_test.append(a_)\n\n\t\t#self.standardize()\n\t\tself.model.fit(self.X_train,self.Y_train) \n\n\t\tif self.il_config['save_model']:\n\t\t\tif not os.path.exists(self.file_path+'/model'):\n\t\t\t\tos.makedirs(self.file_path+'/model')\n\t\t\tmodel_file_path = self.file_path+'/model/model_iter_'+str(self.iter_count)\n\t\t\tnp.save(model_file_path,self.model)\n\t\t\t#pickle.dump(self.model,open(model_file_path,'w'))\n\t\t\tself.iter_count += 1", "title": "" }, { "docid": "9c3c85163fb50701d083797cf97f50d6", "score": "0.63247085", "text": "def create_models(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "bc83fb92976bcfac8275c96158219377", "score": "0.6268053", "text": "def define_models():\n models = []\n models.append(('LR', LogisticRegression(class_weight='balanced', max_iter=2500)))\n models.append(('RF', RandomForestClassifier(class_weight='balanced',criterion='entropy')))\n models.append(('ET', ExtraTreesClassifier(class_weight='balanced',criterion='entropy')))\n models.append(('XG', xgb.XGBClassifier(class_weight='balanced',criterion='entropy')))\n models.append(('KNN', BalancedBaggingClassifier(base_estimator=KNeighborsClassifier(),\n sampling_strategy='auto',\n replacement=False)))\n models.append(('NB', MultinomialNB()))\n models.append(('MLP','mlp'))\n models.append(('EMB','emb'))\n models.append(('CON','con'))\n return models", "title": "" }, { "docid": "96c3794cfa7ffbf05ac22d738f2e71b9", "score": "0.6264579", "text": "def models_from_data():\n input_filepath = os.path.join(data_path, \"spring_mass_1D_inputs.txt\")\n output1_filepath = os.path.join(data_path, \"spring_mass_1D_outputs_1.0.txt\")\n output2_filepath = os.path.join(data_path, \"spring_mass_1D_outputs_0.1.txt\")\n output3_filepath = os.path.join(data_path,\n \"spring_mass_1D_outputs_0.01.txt\")\n\n model1 = ModelFromData(input_filepath, output1_filepath, cost=1.)\n model2 = ModelFromData(input_filepath, output2_filepath, cost=4.)\n model3 = ModelFromData(input_filepath, output3_filepath, cost=16.)\n\n return [model1, model2, model3]", "title": "" }, { "docid": "4aaba645fdb0cec6dae95b2e4a0f1fb2", "score": "0.62255543", "text": "def create_model(self):", "title": "" }, { "docid": "5f09930f19ef6429118e256b7677532b", "score": "0.620214", "text": "def __init__(self, dataset, env):\n super(EncodingSimple, self).__init__()\n self.ds = dataset\n self.env = env\n self.model = None\n self.nsteps = int(self.ds.numAttr * self.ds.numTuple / self.env['batch_size'])\n self.dataset = self.ds.df.groupby(np.arange(self.ds.numTuple) // env['batch_size'])", "title": "" }, { "docid": "753557d3114d7e7592ff4eda2be697ca", "score": "0.6187025", "text": "def build_model(self):\n raise NotImplementedError", "title": "" }, { "docid": "66a113b3151bfab1ce7ac2e641a755fd", "score": "0.61857843", "text": "def get_dsets(self):\n\t\tdata_class = get_dataset(self.name, self.name, self.img_dir, self.LDS_type, self.is_target)\n\n\t\tself.num_classes, train_dataset, val_dataset, test_dataset, self.train_transforms, self.test_transforms = data_class.get_data()\n\n\t\tself.train_dataset = DatasetWithIndicesWrapper(self.name, train_dataset.data, train_dataset.targets, self.train_transforms, self.test_transforms)\n\t\tself.val_dataset = DatasetWithIndicesWrapper(self.name, val_dataset.data, val_dataset.targets, self.test_transforms, self.test_transforms)\n\t\tself.test_dataset = DatasetWithIndicesWrapper(self.name, test_dataset.data, test_dataset.targets, self.test_transforms, self.test_transforms)\n\n\t\treturn self.train_dataset, self.val_dataset, self.test_dataset", "title": "" }, { "docid": "32e73b2d4f8a6d244c3fc8e9b5468357", "score": "0.61855143", "text": "def TrainModel(self):\n # Initialize the Needed Classifier\n self.classifier = NaiveBayesClassifier.train(self.train_features)\n #self.classifier = MaxentClassifier.train(self.train_features,algorithm=\"iis\")", "title": "" }, { "docid": "8eab8763d224c83136e21721f2cb316b", "score": "0.6182225", "text": "def get_models(self):\n nb = GaussianNB()\n svc = SVC(C=100, probability=True)\n knn = KNeighborsClassifier(n_neighbors=3)\n lr = LogisticRegression(C=100, random_state=SEED)\n nn = MLPClassifier((80, 10), early_stopping=False, random_state=SEED)\n gb = GradientBoostingClassifier(n_estimators=100, random_state=SEED)\n rf = RandomForestClassifier(n_estimators=10, max_features=3, random_state=SEED)\n\n models = {\n # 'svm': svc,\n 'knn': knn,\n 'naive bayes': nb,\n # 'mlp-nn': nn,\n 'random forest': rf,\n 'gbm': gb,\n 'logistic': lr,\n }\n\n return models", "title": "" }, { "docid": "f5448a0232066ce34452fd732a809ab5", "score": "0.61742306", "text": "def __init__(self,batch_size: int, epochs: int, dataset, labels, dataset_test, labels_test, model):\n self.batch_size = batch_size\n self.epochs = epochs\n self.dataset = dataset\n self.labels = labels\n self.dataset_test = dataset_test\n self.labels_test = labels_test\n self.model = model\n self.__save_folder = \"savedModels/\"", "title": "" }, { "docid": "a588726c6a87ae362966334b1ecf1a7e", "score": "0.6171815", "text": "def dataset_setup(self):\n settings = self.settings\n if settings.crowd_dataset == CrowdDataset.ucf_qnrf:\n self.dataset_class = UcfQnrfFullImageDataset\n self.train_dataset = UcfQnrfTransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n seed=settings.labeled_dataset_seed,\n number_of_examples=settings.labeled_dataset_size)\n self.train_dataset_loader = DataLoader(self.train_dataset, batch_size=settings.batch_size,\n pin_memory=self.settings.pin_memory,\n num_workers=settings.number_of_data_workers)\n self.validation_dataset = UcfQnrfTransformedDataset(dataset='test', seed=101)\n elif settings.crowd_dataset == CrowdDataset.shanghai_tech:\n self.dataset_class = ShanghaiTechFullImageDataset\n self.train_dataset = ShanghaiTechTransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n seed=settings.labeled_dataset_seed,\n number_of_examples=settings.labeled_dataset_size,\n map_directory_name=settings.map_directory_name,\n image_patch_size=self.settings.image_patch_size,\n label_patch_size=self.settings.label_patch_size)\n self.train_dataset_loader = DataLoader(self.train_dataset, batch_size=settings.batch_size,\n pin_memory=self.settings.pin_memory,\n num_workers=settings.number_of_data_workers)\n self.validation_dataset = ShanghaiTechTransformedDataset(dataset='test', seed=101,\n map_directory_name=settings.map_directory_name,\n image_patch_size=self.settings.image_patch_size,\n label_patch_size=self.settings.label_patch_size)\n elif settings.crowd_dataset == CrowdDataset.ucf_cc_50:\n seed = 0\n self.dataset_class = UcfCc50FullImageDataset\n self.train_dataset = UcfCc50TransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n seed=seed,\n test_start=settings.labeled_dataset_seed * 10,\n inverse_map=settings.inverse_map,\n map_directory_name=settings.map_directory_name)\n self.train_dataset_loader = DataLoader(self.train_dataset, batch_size=settings.batch_size,\n pin_memory=self.settings.pin_memory,\n num_workers=settings.number_of_data_workers)\n self.validation_dataset = UcfCc50TransformedDataset(dataset='test', seed=seed,\n test_start=settings.labeled_dataset_seed * 10,\n inverse_map=settings.inverse_map,\n map_directory_name=settings.map_directory_name)\n else:\n raise ValueError('{} is not an understood crowd dataset.'.format(settings.crowd_dataset))", "title": "" }, { "docid": "684fc3c7d4134977533b41111ccd1990", "score": "0.61668634", "text": "def build_model(self):\r\n self.source_images, self.source_labels = self.dataloader.get_model_inputs()\r\n self.target_images, self.target_labels = self.dataloader.get_model_inputs()\r\n\r\n source_model = SimpleModel(self.source_images, self.source_labels, F.output_dim, scope='source_regressor')\r\n target_model = SimpleModel(self.target_images, self.target_labels, F.output_dim, scope='target_regressor')\r\n \r\n self.source_out, _ = source_model.get_model()\r\n self.target_out, _ = target_model.get_model()\r\n\r\n self.get_loss()", "title": "" }, { "docid": "9517f49d612808a2e00ebe8d56c3bf34", "score": "0.616244", "text": "def loadModel(self, dataFile):", "title": "" }, { "docid": "dbc6e9c2e814b8fe7c46b56dad66f833", "score": "0.61484253", "text": "def init_datasets(self):\n phases = self.data_dict[\"row_id\"].keys()\n tensor_dict_dict = {\n key: {\n \"features\": self.data_dict[\"features\"][key],\n \"labels\": torch.tensor(self.data_dict[\"labels\"][key], dtype=torch.long),\n \"row_id\": torch.tensor(self.data_dict[\"row_id\"][key], dtype=torch.long),\n }\n for key in phases\n }\n if self.config_dict.get(\"include_group_in_dataset\"):\n for key in phases:\n tensor_dict_dict[key][\"group\"] = torch.as_tensor(\n np.copy(self.data_dict[\"group\"][key]), dtype=torch.long\n )\n if self.config_dict.get(\"weight_var_name\") is not None:\n for key in phases:\n tensor_dict_dict[key][\"weights\"] = torch.as_tensor(\n np.copy(self.data_dict[\"weights\"][key]), dtype=torch.float\n )\n\n dataset_dict = {\n key: ArrayDataset(\n tensor_dict=tensor_dict_dict[key],\n sparse_mode=self.config_dict.get(\"sparse_mode\"),\n )\n for key in phases\n }\n\n return dataset_dict", "title": "" }, { "docid": "2da6dd6b2b574d6bd5ee5854d4228782", "score": "0.61387175", "text": "def setUp(self):\n\n def rf_model_builder(**model_params):\n rf_params = {\n k: v for (k, v) in model_params.items() if k != 'model_dir'\n }\n model_dir = model_params['model_dir']\n sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params)\n return dc.models.SklearnModel(sklearn_model, model_dir)\n\n self.rf_model_builder = rf_model_builder\n self.train_dataset = dc.data.NumpyDataset(X=np.random.rand(50, 5),\n y=np.random.rand(50, 1))\n self.valid_dataset = dc.data.NumpyDataset(X=np.random.rand(20, 5),\n y=np.random.rand(20, 1))", "title": "" }, { "docid": "f8efb4ef3ff6ee80b98640449350d866", "score": "0.6137208", "text": "def __init__(\n self,\n model: Model,\n dataset: Dataset[BatchType],\n evaluator: Evaluator,\n ) -> None:\n self.model = model\n self.evaluator = evaluator\n self.dataset = dataset", "title": "" }, { "docid": "c91a4df2326338a7e34dca2e97cbc0cf", "score": "0.61296207", "text": "def make_model(self, data):\n return self.MODEL(**data)", "title": "" }, { "docid": "56716aa7d12063260cc467faa28bca09", "score": "0.6127462", "text": "def _build_models(self):\n with tf.variable_scope('model'):\n meval = Model(self.hparams, mode='test')\n meval.build()\n self._saver = meval.saver\n\n self.meval = meval", "title": "" }, { "docid": "20966932a11c048d77d6df10cf4d0425", "score": "0.612052", "text": "def loading_data_set(self):\n # Data loading code\n if self.framework_type == FrameworkType.Classification:\n # TODO\n # change dataset in outer method\n dataset = SimpsonsDataset(cfg=self.config)\n batch_size = 16\n validation_split = .2\n shuffle_dataset = True\n random_seed = 42\n\n # Creating data indices for training and validation splits:\n dataset_size = len(dataset)\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split * dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices, val_indices = indices[split:], indices[:split]\n\n # Creating PT data samplers and loaders:\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(val_indices)\n\n data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n sampler=train_sampler)\n data_loader_eval = torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n sampler=valid_sampler)\n else:\n # TODO\n # change dataset in outer method\n train_dataset = VOCSegmentation(split=DataSplit.Train, cfg=self.config)\n eval_dataset = VOCSegmentation(split=DataSplit.Eval, cfg=self.config)\n data_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=20,\n shuffle=True,\n num_workers=1,\n pin_memory=True)\n data_loader_eval = torch.utils.data.DataLoader(\n eval_dataset, batch_size=1,\n num_workers=1)\n print('Loading data...')\n\n print('Done...')\n return data_loader, data_loader_eval", "title": "" }, { "docid": "95e26ba5f2cae45acfd565e48ac3151e", "score": "0.611596", "text": "def model(self):\n return self.modeldata", "title": "" }, { "docid": "5c369a134e23790b69260a4ea5e29dbc", "score": "0.6112193", "text": "def setup_models():\n global classifiers\n\n grega_clf = GregaClassifier(GREGA_MODEL_PATH)\n classifiers.append(grega_clf)\n\n uci_clf = UciClassifier(UCI_MODEL_PATH)\n classifiers.append(uci_clf)", "title": "" }, { "docid": "b7d56da32154c25fac256253749948a5", "score": "0.6110365", "text": "def init_model(self):\n self.places = tools.keydefaultdict(Place)\n self.users = tools.keydefaultdict(User)\n\n self.predictions = {}", "title": "" }, { "docid": "77024290e71f74a808038d96ff416d2f", "score": "0.61031425", "text": "def __init__(self, dataset, env):\n super(Encoding, self).__init__()\n self.ds = dataset\n self.env = env\n self.model = None\n self.nsteps = int(self.ds.numAttr * self.ds.numTuple / self.env['batch_size'])\n self.dataset = self.ds.df.groupby(np.arange(self.ds.numTuple) // env['batch_size'])", "title": "" }, { "docid": "4caaec4a00e7405258dcbf9867ef2a5c", "score": "0.6095743", "text": "def load_datasets(self):\n raise NotImplementedError", "title": "" }, { "docid": "ac98fb551a4ce9e036e46ec5575288cb", "score": "0.60925215", "text": "def create_model(self, parameters={}):\n if self.model_name == \"DBSCAN\":\n self.model = cluster.DBSCAN()\n elif self.model_name == \"KMeans\":\n self.model = cluster.KMeans()\n elif self.model_name == \"hdbscan\":\n self.model_glo = hdbscan\n self.model = self.model_glo.HDBSCAN()\n\n for key, value in parameters.items():\n setattr(self.model, key, value)\n return", "title": "" }, { "docid": "9a0c5cf1d4c54e24357ce7666a642389", "score": "0.6091835", "text": "def __init__(self, data, model, config):\n\n self.data = data\n self.model = model\n self.config = config", "title": "" }, { "docid": "d0e5d6c7f3b067aafa93b7c4a3a3591e", "score": "0.6085573", "text": "def init_models(self):\n pass", "title": "" }, { "docid": "9782b40687ea1edc2c65290c85b04cec", "score": "0.60783416", "text": "def __init__(self, x_train, x_test, y_train, y_test, y_preds, model_def, fitted_model, original_data):\r\n self.x_train = x_train\r\n self.x_test = x_test\r\n self.y_train = y_train\r\n self.y_test = y_test\r\n self.y_preds = y_preds\r\n self.model_def = model_def\r\n self.fitted_model = fitted_model\r\n self.original_data = original_data", "title": "" }, { "docid": "330d6d1bd7873aad992bbd40c66b13a5", "score": "0.6070308", "text": "def train_model(self):\n # load manually labeled data\n print('Loading manually labeled data for model training...')\n manual_data = pd.read_csv(os.path.join(os.path.dirname(__file__), config['labeled_data']))\n manual_data['duplicates'] = manual_data['duplicates'].apply(self._format_duplicates)\n manual_data = manual_data[['properties.address.formatted_address', 'properties.name',\n 'lat', 'lng', 'id', 'duplicates']]\n manual_data = gpd.GeoDataFrame(manual_data,\n geometry=gpd.points_from_xy(manual_data['lng'],\n manual_data['lat']))\n\n # process manually labeled data\n print('Processing manually labeled data for model training...')\n train_test_data = self._process_manual_data(manual_data)\n train_test_data = pd.DataFrame(train_test_data, columns=['address_similarity', 'address_str_similarity',\n 'name_similarity', 'label'])\n\n # perform data sampling to balance class distribution\n print('Performing data sampling to balance class distribution...')\n train_datasets, test_data = self._perform_sampling(train_test_data)\n\n # train models\n print('Begin model training...')\n gb_models = self._train(train_datasets, 'GB')\n rf_models = self._train(train_datasets, 'RF')\n xgboost_models = self._train(train_datasets, 'XGB')\n\n # evaluate model performance on hold out set\n print('Perform model evaluation...')\n y_pred_gb = self._predict(gb_models, test_data[['address_similarity', 'address_str_similarity',\n 'name_similarity']])\n y_pred_rf = self._predict(rf_models, test_data[['address_similarity', 'address_str_similarity',\n 'name_similarity']])\n y_pred_xgboost = self._predict(xgboost_models, test_data[['address_similarity', 'address_str_similarity',\n 'name_similarity']])\n self._evaluate(test_data['label'], y_pred_gb, 'Gradient Boosting')\n self._evaluate(test_data['label'], y_pred_rf, 'Random Forest')\n self._evaluate(test_data['label'], y_pred_xgboost, 'XGBoost')\n\n # save trained models locally\n print('Saving trained models locally...')\n if args.best_algorithm == 'GB':\n models = gb_models\n elif args.best_algorithm == 'RF':\n models = rf_models\n elif args.best_algorithm == 'XGB':\n models = xgboost_models\n else:\n raise ValueError('{} is not supported.'.format(args.best_algorithm))\n for i in range(len(models)):\n dump(models[i], os.path.join(os.path.dirname(__file__),\n config['models_directory'] + 'model_{}.joblib'.format(i+1)))", "title": "" }, { "docid": "591c7647a6d517ac685fb0e5d2b3bda4", "score": "0.6068656", "text": "def __init__(self,model_parameters): \n self.counter = Counter()\n self.corpus = CorpusReader()\n self.nlp = TextProcess()\n self.train_features = []\n self.classifier = None\n self.test_model = []\n self.model_parameters = {}", "title": "" }, { "docid": "5ee5312ed36537bca50e5b39334e1124", "score": "0.606777", "text": "def __init__(self):\n super(Blender, self).__init__()\n self.models = []\n for m in models_names:\n name = str(m).split()[1]\n model = m().cuda()\n model.eval()\n\n model = nn.DataParallel(model)\n model.load_state_dict(torch.load('models/full_data_{}.pth'.format(name)))\n for p in model.parameters():\n p.requires_grad = False\n self.models.append(model)\n self.weighing = nn.Sequential(\n nn.BatchNorm1d(len(models_names)*17),\n # nn.ReLU(),\n nn.Linear(in_features=len(models_names)*17, out_features=256, bias=False),\n nn.BatchNorm1d(256),\n nn.ReLU(),\n nn.Linear(in_features=256, out_features=17)\n )", "title": "" }, { "docid": "c9b188d25dda6d073ee68db475e46974", "score": "0.6066141", "text": "def build_model(self):\n ...", "title": "" }, { "docid": "9584d7224b58807ce741f74522817fff", "score": "0.60589874", "text": "def getModelsData(self):\n return self.models_data", "title": "" }, { "docid": "23a5a982c7f35aa3095efa3e008f3523", "score": "0.60569793", "text": "def get_dataset(self):\n transform = transforms.Compose([\n # you can add other transformations in this list\n transforms.ToTensor()\n ])\n\n trainset = datasets.CIFAR10('datasets/CIFAR10/train/', train=True, transform=transform,\n target_transform=None, download=True)\n\n valset = datasets.CIFAR10('datasets/CIFAR10/test/', train=False, transform=transform,\n target_transform=None, download=True)\n\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=1, shuffle=False, num_workers=2)\n val_loader = torch.utils.data.DataLoader(valset, batch_size=1, shuffle=False, num_workers=2)\n\n check = 0\n directories = [\"datasets/CIFAR10_indx/train\", \"datasets/CIFAR10_indx/test\"]\n for directory in directories:\n if os.path.exists(directory):\n check += 1\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n if check != len(directories):\n # create folders to save data\n indices_data = {}\n\n indx = 0\n for loader_name, loader in [('train', train_loader), ('test', val_loader)]:\n for (img, label) in loader:\n img_name = str(int(label))+'_'+str(int(indx))+'.jpg'\n\n directory = os.path.join(\"datasets/CIFAR10_indx/\" + loader_name,\n format(int(label), '06d'))\n if not os.path.exists(directory):\n os.makedirs(directory)\n save_image(img, os.path.join(directory, img_name))\n # img_path is the index for the images\n indices_data[os.path.join(directory, img_name)] = int(label)\n indx += 1\n\n # store img_paths which serves as indices and the labels for further analysis\n indices_data = collections.OrderedDict(sorted(indices_data.items()))\n dataframe = pd.DataFrame({'img_paths': list(indices_data.keys()),\n 'labels': list(indices_data.values())})\n DatasetMetrics.indices_paths(self.name, dataframe)\n\n if self.randomize_labels:\n evaluation = DatasetMetrics.load_evaluation_metrics(self.name)\n self.random_labels = corrupt_labels(evaluation['labels'].to_dict(),\n self.num_classes,\n self.corrupt_prob)\n else:\n self.random_labels = None\n\n # load the image dataset from folder with indices\n trainset = IndxImageFolder(root=\"datasets/CIFAR10_indx/train\", transform = transform,\n random_labels=self.random_labels)\n valset = IndxImageFolder(root=\"datasets/CIFAR10_indx/test\", transform=transform,\n random_labels=self.random_labels)\n\n return trainset, valset", "title": "" }, { "docid": "ebd20a286c5098c978a68f56cbec530e", "score": "0.6056964", "text": "def init_model():", "title": "" }, { "docid": "42750721fcac919b0e53ec669f66fed3", "score": "0.6055318", "text": "def data_model_class(self) -> DataModel:\n return BaicellsQRTBTrDataModel", "title": "" }, { "docid": "3e6a407a877c9f15b575895edb24acb3", "score": "0.60502315", "text": "def models(self) -> Model:\n return [\n self.repository.models[step.model_name]\n for step in self.config.ensemble_scheduling.step\n ]", "title": "" }, { "docid": "fb3f32100853d930f7b71ae70c748741", "score": "0.6041466", "text": "def get_datasets(self):\n d = {\n \"X_train\": self.X_train,\n \"X_test\": self.X_test,\n \"Y_train\": self.Y_train,\n \"Y_test\": self.Y_test\n }\n return d", "title": "" }, { "docid": "380f48cf7489a9e0a784af2ab9f3310c", "score": "0.604073", "text": "def get_model(name, dataset):\n field_dims = dataset.field_dims\n print('field_dims={}, {}'.format(type(field_dims), field_dims.tolist()))\n\n if name == 'lr':\n return LogisticRegressionModel(field_dims)\n elif name == 'fm':\n return FactorizationMachineModel(field_dims, embed_dim=16)\n elif name == 'hofm':\n return HighOrderFactorizationMachineModel(field_dims, order=3, embed_dim=16)\n elif name == 'ffm':\n return FieldAwareFactorizationMachineModel(field_dims, embed_dim=4)\n elif name == 'fnn':\n return FactorizationSupportedNeuralNetworkModel(field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'wd':\n return WideAndDeepModel(field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'ipnn':\n return ProductNeuralNetworkModel(field_dims, embed_dim=64, mlp_dims=(400, 400, 400), method='inner', dropout=0.2)\n elif name == 'opnn':\n return ProductNeuralNetworkModel(field_dims, embed_dim=16, mlp_dims=(16,), method='outer', dropout=0.2)\n elif name == 'dcn':\n return DeepCrossNetworkModel(field_dims, embed_dim=16, num_layers=3, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'nfm':\n return NeuralFactorizationMachineModel(field_dims, embed_dim=64, mlp_dims=(64,), dropouts=(0.2, 0.2))\n elif name == 'ncf':\n # only supports MovieLens dataset because for other datasets user/item colums are indistinguishable\n assert isinstance(dataset, MovieLens20MDataset) or isinstance(dataset, MovieLens1MDataset)\n return NeuralCollaborativeFiltering(field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2,\n user_field_idx=dataset.user_field_idx,\n item_field_idx=dataset.item_field_idx)\n elif name == 'fnfm':\n return FieldAwareNeuralFactorizationMachineModel(field_dims, embed_dim=64, mlp_dims=(64,32,), dropouts=(0.2, 0.2))\n elif name == 'dfm':\n return DeepFactorizationMachineModel(field_dims, embed_dim=64, mlp_dims=(400, 400, 400), dropout=0.2)\n elif name == 'xdfm':\n return ExtremeDeepFactorizationMachineModel(\n field_dims, embed_dim=16, cross_layer_sizes=(16, 16), split_half=False, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'afm':\n return AttentionalFactorizationMachineModel(field_dims, embed_dim=16, attn_size=16, dropouts=(0.2, 0.2))\n elif name == 'afi':\n return AutomaticFeatureInteractionModel(\n field_dims, embed_dim=16, atten_embed_dim=64, num_heads=2, num_layers=3, mlp_dims=(400, 400), dropouts=(0, 0, 0))\n elif name == 'afn':\n print(\"Model:AFN\")\n return AdaptiveFactorizationNetwork(\n field_dims, embed_dim=64, LNN_dim=1500, mlp_dims=(400, 400, 400), dropouts=(0, 0, 0))\n else:\n raise ValueError('unknown model name: ' + name)", "title": "" }, { "docid": "6aca67178bd2ddeaa4da34ac11fcc372", "score": "0.60254836", "text": "def provides(self):\n\n # models\n possible_values = [3, 5, 7, 9, 18, 27]\n\n value = [name(n=i, version=1) for i in possible_values]\n\n possible_values.append(111)\n value.extend([name(n=i, version=2) for i in possible_values])\n\n # TODO: automate this\n backends = []\n datasets = self.available_datasets()\n\n from deeprace.models.keras_details import resnet_details as keras_resnet\n if keras_resnet.can_train() != []:\n backends.extend(keras_resnet.can_train())\n\n from deeprace.models.keras_details import tfkeras_resnet_details as tfkeras_resnet\n if tfkeras_resnet.can_train() != []:\n backends.extend(tfkeras_resnet.can_train())\n\n from deeprace.models.tf_details import resnet_details as tf_resnet\n if tf_resnet.can_train() != []:\n backends.extend(tf_resnet.can_train())\n\n # datasets\n\n return value, backends, datasets", "title": "" }, { "docid": "a723b465c8ed5475e830b4986f61abbf", "score": "0.60243565", "text": "def fit_storage(self, data):\n\n models = []\n X = data.X\n Y, self.num_classes = get_eye_matrix(data.Y)\n\n Fs = np.zeros((X.shape[0], self.num_classes))\n Ps = self.softmax1(Fs)\n\n grads = self.loss(Y, Ps)\n\n for i in range(self.n_estimators):\n model_per_class = []\n for j in range(Y.shape[1]):\n newdata = Orange.data.Table(data.X, grads[:, j])\n model = self.learner(newdata)\n tmp = model(newdata)\n Fs[:, j] += tmp\n Ps = self.softmax1(Fs)\n #grads[:, j] = self.loss(Y[:, j], Ps[:, j])\n grads = self.loss(Y, Ps)\n model_per_class.append(model)\n models.append(model_per_class)\n\n self.models = models\n return self", "title": "" }, { "docid": "e7577867bb39f31392b945092b703672", "score": "0.6023276", "text": "def __init__(self, saved_model_dir_path: str, saved_dataset_custodian_dir_name_or_path: str = None,\n saved_dc_is_path: bool = False):\n self.saved_model_dir_path = saved_model_dir_path\n print(\"Reading model from\", self.saved_model_dir_path, \"...\")\n # Load model itself:\n self.model: tfkeras.Model = load_model_from_path(self.saved_model_dir_path)\n # Read associated data/information:\n print(\"Model info:\")\n saved_model_dir_path_obj = pathlib.Path(saved_model_dir_path)\n self.model_params = from_json(read_file_content(saved_model_dir_path_obj.parents[0].joinpath(\"params.json\")))\n self.model_scores = from_json(read_file_content(\n saved_model_dir_path_obj.parents[0].joinpath(saved_model_dir_path_obj.name + \"_scores.json\")))\n self.train_threshold = self.model_scores[\"train\"][\"scores\"].get(\"threshold\", None)\n self.validation_threshold = self.model_scores[\"validation\"][\"scores\"].get(\"threshold\", None)\n assert saved_model_dir_path_obj.parents[1].name == \"GridSearch\"\n\n print(\" Parameters:\", self.model_params)\n print(\" Scores:\", self.model_scores)\n print(\" Validation threshold:\", self.validation_threshold)\n\n # Load model's associated dataset custodian since is necessary in both cases:\n model_dc = DatasetCustodian.load(saved_model_dir_path_obj.parents[2], is_path=True)\n\n if saved_dataset_custodian_dir_name_or_path is None:\n # No dataset is given. Use model's dataset:\n self.dataset_custodian: DatasetCustodian = model_dc\n self.model_to_dataset_mapping = None\n else:\n # Dataset is given. Load it:\n dataset_dc: DatasetCustodian = DatasetCustodian.load(saved_dataset_custodian_dir_name_or_path,\n is_path=saved_dc_is_path)\n # Check if mapping is necessary\n if dataset_dc.get_labels() != model_dc.get_labels():\n if len(dataset_dc.get_labels()) == len(model_dc.get_labels()):\n # Len matches and score calculation is possible. However, results may be senseless if labels do not\n # match.\n print(\"WARNING: Model labels\", model_dc.get_labels(), \"do not match dataset labels\",\n dataset_dc.get_labels())\n elif len(dataset_dc.get_labels()) < len(model_dc.get_labels()):\n # Model can distinguish more classes than dataset has. Try to automatically create mapping:\n self.model_to_dataset_mapping = {}\n for model_label in model_dc.get_labels():\n if model_label in dataset_dc.get_labels():\n self.model_to_dataset_mapping[model_dc.label_to_int_mapping[model_label]] \\\n = dataset_dc.label_to_int_mapping[model_label]\n else:\n self.model_to_dataset_mapping[model_dc.label_to_int_mapping[model_label]] \\\n = dataset_dc.label_to_int_mapping[dataset_dc.get_no_bug_label()]\n print(\"WARNING: Model predicts more classes than present in the dataset.\",\n \"Created model-to-dataset mapping, which maps each non-present class to non-vulnerable\",\n \"class:\", self.model_to_dataset_mapping)\n elif len(dataset_dc.get_labels()) > len(model_dc.get_labels()):\n print(\"WARNING: Model predicts less classes than present in the dataset.\")\n\n # Use requested dataset:\n self.dataset_custodian: DatasetCustodian = dataset_dc\n\n self.dataset_custodian_info \\\n = from_json(read_file_content(os.path.join(self.dataset_custodian.prepared_data_dir_path, \"info.json\")))\n print(\"Dataset info:\", self.dataset_custodian_info)\n self.dataset_custodian.print_dataset_statistics()", "title": "" }, { "docid": "ad61dfee683dd4305036ff041f6dc2ac", "score": "0.6021987", "text": "def _load_model(self):\n # Setup teacher to label samples\n self.model = serial.load(self.teacher_path)", "title": "" }, { "docid": "de3e75865e21e796de903d0d661cd72c", "score": "0.6013955", "text": "def __init__(self):\n self.new_dataset()", "title": "" }, { "docid": "1ed14c73e18a5f011792314e96f8fb6f", "score": "0.6009634", "text": "def load_dataset(self):\n rng = np.random.RandomState(seed=self.seed['val'])\n\n #if self.args.sets_are_pre_split == True:\n data_image_paths, index_to_label_name_dict_file, label_to_index = self.load_datapaths()\n dataset_splits = dict()\n for key, value in data_image_paths.items():\n key = self.get_label_from_index(index=key)\n bits = key.split(\"/\")\n set_name = bits[0]\n class_label = bits[1]\n if set_name not in dataset_splits:\n dataset_splits[set_name] = {class_label: value}\n else:\n dataset_splits[set_name][class_label] = value\n# else:\n# data_image_paths, index_to_label_name_dict_file, label_to_index = self.load_datapaths()\n# total_label_types = len(data_image_paths)\n# num_classes_idx = np.arange(len(data_image_paths.keys()), dtype=np.int32)\n# rng.shuffle(num_classes_idx)\n# keys = list(data_image_paths.keys())\n# values = list(data_image_paths.values())\n# new_keys = [keys[idx] for idx in num_classes_idx]\n# new_values = [values[idx] for idx in num_classes_idx]\n# data_image_paths = dict(zip(new_keys, new_values))\n# # data_image_paths = self.shuffle(data_image_paths)\n# x_train_id, x_val_id, x_test_id = int(self.train_val_test_split[0] * total_label_types), \\\n# int(np.sum(self.train_val_test_split[:2]) * total_label_types), \\\n# int(total_label_types)\n# print(x_train_id, x_val_id, x_test_id)\n# x_train_classes = (class_key for class_key in list(data_image_paths.keys())[:x_train_id])\n# x_val_classes = (class_key for class_key in list(data_image_paths.keys())[x_train_id:x_val_id])\n# x_test_classes = (class_key for class_key in list(data_image_paths.keys())[x_val_id:x_test_id])\n# x_train, x_val, x_test = {class_key: data_image_paths[class_key] for class_key in x_train_classes}, \\\n# {class_key: data_image_paths[class_key] for class_key in x_val_classes}, \\\n# {class_key: data_image_paths[class_key] for class_key in x_test_classes},\n# dataset_splits = {\"train\": x_train, \"val\":x_val , \"test\": x_test}\n\n if self.args.load_into_memory is True:\n\n print(\"Loading data into RAM\")\n x_loaded = {\"train\": [], \"val\": [], \"test\": []}\n\n for set_key, set_value in dataset_splits.items():\n print(\"Currently loading into memory the {} set\".format(set_key))\n x_loaded[set_key] = {key: np.zeros(len(value), ) for key, value in set_value.items()}\n # for class_key, class_value in set_value.items():\n with tqdm.tqdm(total=len(set_value)) as pbar_memory_load:\n with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:\n # Process the list of files, but split the work across the process pool to use all CPUs!\n for (class_label, class_images_loaded) in executor.map(self.load_parallel_batch, (set_value.items())):\n x_loaded[set_key][class_label] = class_images_loaded\n pbar_memory_load.update(1)\n\n dataset_splits = x_loaded\n self.data_loaded_in_memory = True\n\n return dataset_splits", "title": "" }, { "docid": "8a6478ba0e22a4cdcf9b403d6937c8b5", "score": "0.6003628", "text": "def _train(self):", "title": "" }, { "docid": "81d526eb8fab08d7d5b6f409bdcdbfe5", "score": "0.6001419", "text": "def gen_model(self, *args):\n # generate data grid\n yy, xx = np.indices(self.data.shape)\n xdata_tuple = (xx, yy)\n # return model\n return self.model(xdata_tuple, *args)", "title": "" }, { "docid": "c98949e5628394c51a94c88a3b70326b", "score": "0.5991787", "text": "def __init__(self):\n\n self.train = pd.read_csv(os.path.join(_DATA_DIR, _TRAIN))\n self.test = pd.read_csv(os.path.join(_DATA_DIR, _TEST))", "title": "" }, { "docid": "4ab0b4e47abc225f388ede8178508f18", "score": "0.5988505", "text": "def model(self) -> Model:\n pass", "title": "" }, { "docid": "8b48dd63b0122ba6c1628e3104db236e", "score": "0.5984528", "text": "def compile_model(self):\n raise NotImplementedError(\"train method has not been implemented\")", "title": "" }, { "docid": "9cd5b48ad16ce4ae2914321b26650389", "score": "0.59714246", "text": "def load_model(self):\n raise NotImplementedError", "title": "" }, { "docid": "82b6afc3f0644396e34b431513bfd2e1", "score": "0.5965924", "text": "def generate_models(self):\n # Get the group of connections from the id\n group_of_connections = __group_of_group_of_connections__.get_group(self.get_group_connection_id())\n\n # For each connection\n for connection in group_of_connections.get_connections():\n # Create its model. Remember that the connection id and the model id is the 4-tuple\n model_id = connection.get_id()\n new_model = Model(model_id)\n # Set the constructor for this model. Each model has a specific way of constructing the states\n #new_model.set_constructor(__modelsconstructors__.get_default_constructor())\n constructor_id = self.get_constructor_id()\n new_model.set_constructor(__modelsconstructors__.get_constructor(constructor_id))\n for flow in connection.get_flows():\n # Try to add the flow\n if not new_model.add_flow(flow):\n self.delete_model_by_id(new_model.get_id())\n # The flows are not ordered. Delete the truckated models\n __groupofgroupofmodels__.delete_group_of_models(self.get_id())\n return False\n self.models[model_id] = new_model", "title": "" }, { "docid": "86485f9351eb528526292ce5a7a6cfaf", "score": "0.5961841", "text": "def get_dataset(self): \n # Get lists of file paths for training and test sets, respectively.\n train_set_paths, test_set_paths = self._get_file_paths() \n\n # Build appropriate dataframes. \n train_set = self._build_dataframe(train_set_paths)\n test_set = self._build_dataframe(test_set_paths) \n\n return train_set, test_set", "title": "" }, { "docid": "2288cd41a8517323faa2ac3fd9e468c2", "score": "0.5959713", "text": "def read_model(self):\n f1 = open(self.name + '_' + 'words', 'r')\n f2 = open(self.name + '_' + 'word_lengths', 'r')\n f3 = open(self.name + '_' + 'stems', 'r')\n f4 = open(self.name + '_' + 'sentence_lengths', 'r')\n f5 = open(self.name + '_' + 'word_pair', 'r')\n d_str1 = f1.read() \n d_str2 = f2.read() \n d_str3 = f3.read() \n d_str4 = f4.read() \n d_str5 = f5.read() \n self.words = dict(eval(d_str1))\n self.word_lengths= dict(eval(d_str2))\n self.stems = dict(eval(d_str3))\n self.sentence_lengths = dict(eval(d_str4))\n self.word_pair = dict(eval(d_str5))", "title": "" }, { "docid": "2bb10a539955b78bbda751dcbb22d75a", "score": "0.5946806", "text": "def __init__(self, model_dir, models_path=settings.MODELS_FOLDER):\n self.path = os.path.join(models_path, model_dir)\n print('------', self.path)\n self.model = self.load_model()\n self.preds = None", "title": "" }, { "docid": "11f4d5018cc0e4d0824bd4ff530f94d0", "score": "0.59453404", "text": "def get_model(cls):\n if cls.encoder_model_50 is None:\n #cls.model = load_model(os.path.join(model_path, MODEL_NAME))\n #print(cls.model.summary())\n #cls.encoder_model = load_model(os.path.join(model_path, encoder_model-100.hdf5))\n #print(cls.encoder_model.summary())\n #cls.decoder_model = load_model(os.path.join(model_path, decoder_model-100.hdf5))\n #print(cls.decoder_model.summary())\n #cls.model_100 = load_model(os.path.join(model_path, 'model-100.hdf5'))\n #cls.model_50 = load_model(os.path.join(model_path, 'model-50.hdf5'))\n cls.encoder_model_100 = load_model(os.path.join(model_path, 'encoder_model-100.hdf5'))\n cls.encoder_model_50 = load_model(os.path.join(model_path, 'encoder_model-50.hdf5'))\n cls.decoder_model_100 = load_model(os.path.join(model_path, 'decoder_model-100.hdf5'))\n cls.decoder_model_50 = load_model(os.path.join(model_path, 'decoder_model-50.hdf5'))\n \n return cls.encoder_model_50, cls.decoder_model_50, cls.encoder_model_100, cls.decoder_model_100", "title": "" }, { "docid": "183f3a2dba1c5cdf236068a381e82edc", "score": "0.59435695", "text": "def main() -> None:\n data = download_dataset(DATA_URL)\n features, labels = pre_process_data(data)\n trained_model = train_model(features, labels)\n persist_model(trained_model)", "title": "" }, { "docid": "48ef13d6f5f1bf6f123fdf5a05e49dbb", "score": "0.59416366", "text": "def _train(self):\n\n raise NotImplementedError", "title": "" }, { "docid": "d0af7575dd2ef883653323abeface3e1", "score": "0.5941535", "text": "def _add_model_data_methods(self):\n self.inputs = self._model_data.filter_by_attrs(is_result=0)\n self.results = self._model_data.filter_by_attrs(is_result=1)\n self._add_observed_dict(\"model_config\")\n self._add_observed_dict(\"run_config\")\n self._add_observed_dict(\"math\")\n\n self.inputs = self._model_data.filter_by_attrs(is_result=0)\n results = self._model_data.filter_by_attrs(is_result=1)\n if len(results.data_vars) > 0:\n self.results = results\n log_time(\n logger,\n self._timings,\n \"model_data_loaded\",\n comment=\"Model: loaded model_data\",\n )", "title": "" }, { "docid": "722d2d1901e010c3156305a2bc9f4a18", "score": "0.5938245", "text": "def initialize_models(self):\n pass", "title": "" }, { "docid": "e537c6a984aa8fed70c87d64c05674b8", "score": "0.59382355", "text": "def load_dataset(self, name=''):\n name = name or (name(self) + '.model')\n return self.loader.load_model(name)", "title": "" }, { "docid": "c9ae5a49b1d47aee4a3700ffc22e53a3", "score": "0.5937723", "text": "def create_loaders(self):\n self.spam_data.text_to_tensors()\n print('creating dataloaders')\n train_data = TensorDataset(self.spam_data.train_inputs, \n self.spam_data.train_masks, \n self.spam_data.train_labels)\n train_sampler = RandomSampler(train_data)\n self.train_dataloader = DataLoader(train_data, \n sampler=train_sampler, \n batch_size=self.batch_size)\n\n validation_data = TensorDataset(self.spam_data.validation_inputs, \n self.spam_data.validation_masks, \n self.spam_data.validation_labels)\n validation_sampler = SequentialSampler(validation_data)\n self.validation_dataloader = DataLoader(validation_data, \n sampler=validation_sampler, \n batch_size=self.batch_size)\n \n test_data = TensorDataset(self.spam_data.test_inputs, \n self.spam_data.test_masks, \n self.spam_data.test_labels)\n test_sampler = SequentialSampler(test_data)\n self.test_dataloader = DataLoader(test_data, \n sampler=test_sampler, \n batch_size=self.batch_size)\n print('finished creating dataloaders')", "title": "" }, { "docid": "babd17f7c461638ea9e0481d6a4a4611", "score": "0.5934158", "text": "def begin_local(self):\n\n # Obtain Dataset\n\n sys.path.append(os.getcwd())\n data_module = importlib.import_module(\"data.%s.data\" % self.dataset_name)\n dataset_class = find_dataset_class(data_module)\n\n if not dataset_class:\n return\n\n dataset = dataset_class(name=self.dataset_name, trial=True, **self.kwargs)\n dataset.configure_core_arguments(self.args)\n \n # Obtain the model class\n\n model_module = importlib.import_module(\"models.%s.model\" % self.model_name)\n model_class = find_model_class(model_module)\n\n # Obtain the task class\n \n task_class = None\n\n if self.task_name is not None:\n task_module = importlib.import_module(\"tasks.%s.task\" % self.task_name)\n task_class = find_task_class(task_module)\n\n if task_class is None:\n task = None\n else:\n task = task_class(data=dataset)\n\n if not model_class:\n return\n\n model = model_class(data=dataset, task=task, **self.kwargs)\n model.args = self.args\n model.settings = self.settings\n model.cloudremote = self.cloudremote\n model.configure_core_arguments(self.args)\n\n if not self.cloudremote: # local training\n self.setup_local_training(model)\n else:\n model.trial = Trial(project_name=self.project_name, \n model_name=self.model_name, \n dataset_name=self.dataset_name, \n task_name=self.task_name, \n cloud=self.cloud)\n\n model.sync_trial_metadata()\n\n if not self.cloudremote: \n print(colored('\\n \\033[1m Training\\n', 'blue'))\n\n model.run()", "title": "" }, { "docid": "6a16b5b973ed21825e524b0156154c17", "score": "0.59317327", "text": "def __init__(self, models):\n self.models = models", "title": "" }, { "docid": "8aca508aa9581be29cbbc872b7941372", "score": "0.5930965", "text": "def load_model(self):\n pass", "title": "" }, { "docid": "8aca508aa9581be29cbbc872b7941372", "score": "0.5930965", "text": "def load_model(self):\n pass", "title": "" }, { "docid": "72aa2cd3d79c8913a0cdecddcc5f347d", "score": "0.5926102", "text": "def __init__(self):\n\n # The name of the models.\n # TODO: Refector to query them from Django\n self.str_force_models = ['Campaign',\n 'Deployment',\n 'Image',\n 'AUVDeployment',\n 'BRUVDeployment',\n 'DOVDeployment',\n 'TVDeployment',\n 'TIDeployment']\n\n for model in self.str_force_models:\n self.stat_fields = {model: 0}\n\n # List of model instances\n self.force_models = [Campaign,\n Deployment,\n Image,\n AUVDeployment,\n BRUVDeployment,\n DOVDeployment,\n TVDeployment,\n TIDeployment]", "title": "" }, { "docid": "d47b7541ac9c62728469b5626d9606ff", "score": "0.59258354", "text": "def buildModel(self):\n\t\tself.clf = svm.SVC()", "title": "" }, { "docid": "f497f44b1e0f04c9092eebfb91e91f3e", "score": "0.59257245", "text": "def __init__(self):\n\n self._experiment = Experiment\n self.data = DataContainer\n self.model = ModelContainer\n self.fit = Fit", "title": "" }, { "docid": "43c10a5ef02fc81fa1aed9b36c786567", "score": "0.59194636", "text": "def make_dataset(self, dir):\n pass", "title": "" }, { "docid": "55d99919e4e8fc81fc5c1119dc5cd030", "score": "0.5917552", "text": "def __init__(self, model_name):\n download_model(model_name)\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n categories = label_map_util.convert_label_map_to_categories(label_map, \n max_num_classes=NUM_CLASSES, use_display_name=True)\n self.category_index = label_map_util.create_category_index(categories)\n self.load_graph(model_name)", "title": "" }, { "docid": "c77af5b494a196f832e13ab1159d055f", "score": "0.59164995", "text": "def _train(self, observations, metadata):\n pass", "title": "" }, { "docid": "e77d534d195abee70ee41131076860d4", "score": "0.5914514", "text": "def create_model(self) -> AmiciModel:", "title": "" }, { "docid": "576fa45543f130a0e5ca3d9b58a30542", "score": "0.59135574", "text": "def _load(self):\n dataset, info = tfds.load(self.name, with_info=True, as_supervised=True)\n self.input_shape = info.features['image'].shape\n self.output_shape = (info.features['label'].num_classes,)\n self.output_size = self.output_shape[-1]\n self.num_examples = {t: s.num_examples for (t, s) in info.splits.items()}\n self.ds = dict()\n for tag in ['train', 'test']:\n self.ds[tag] = (\n dataset[tag].map(self.convert).map(self.augment).shuffle(10000))", "title": "" }, { "docid": "05ff0fffc116699dc9009d00bce18da2", "score": "0.59132826", "text": "def get_data_from_model(self):\n model = self.model\n for i in range(model.rowCount()):\n section = model.item(i)\n key1 = str(section.text())\n if key1 in [\"Global attributes\"]:\n # global attributes\n for j in range(section.rowCount()):\n key2 = str(section.child(j, 0).text())\n val2 = str(section.child(j, 1).text())\n self.ds.root[\"Attributes\"][key2] = val2\n elif key1 in [\"Variables\"]:\n for j in range(section.rowCount()):\n variable_section = section.child(j)\n label = variable_section.text()\n for k in range(variable_section.rowCount()):\n key2 = str(variable_section.child(k, 0).text())\n val2 = str(variable_section.child(k, 1).text())\n self.ds.root[\"Variables\"][label][\"Attr\"][key2] = val2\n else:\n # this is a netCDF file with groups\n group_attributes = {}\n variables = {}\n for j in range(section.rowCount()):\n if section.child(j).text() in [\"Group attributes\"]:\n for k in range(section.child(j).rowCount()):\n key = str(section.child(j).child(k, 0).text())\n value = str(section.child(j).child(k, 1).text())\n group_attributes[key] = value\n else:\n label = section.child(j).text()\n group = getattr(self.ds, key1)\n variables[label] = {\"Data\": group[\"Variables\"][label][\"Data\"],\n \"Flag\": group[\"Variables\"][label][\"Flag\"],\n \"Attr\": group[\"Variables\"][label][\"Attr\"]}\n for k in range(section.child(j).rowCount()):\n key = str(section.child(j).child(k, 0).text())\n value = str(section.child(j).child(k, 1).text())\n variables[label][\"Attr\"][key] = value\n # create the group as an attribute in the data structure\n setattr(self.ds, key1, {\"Attributes\": group_attributes,\n \"Variables\": variables})\n #msg = \" Unrecognised object (\" + key1 + \") in netCDF file\"\n #msg += \", skipping ...\"\n #logger.warning(msg)\n return self.ds", "title": "" }, { "docid": "bf6e9ab846d54b0c7d553593fb57c978", "score": "0.59122497", "text": "def prepDatasets(self):\n self.train_ds = DataGenerator2022(generateForCellPred = False, batch_size = self.batch_size)\n self.val_ds = DataGenerator2022(train=False, generateForCellPred = False, batch_size = self.batch_size)", "title": "" }, { "docid": "8c7ce70028cae2d08538eeba8b99887b", "score": "0.590531", "text": "def get_dataset(self):\n\n train_transform = transforms.Compose([\n # you can add other transformations in this list\n # resize (256 x remaining larger size) and RandomCrop(224)\n # like in https://papers.nips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf\n # https://arxiv.org/pdf/1409.1556.pdf\n transforms.Resize(256), # resize smaller size to 256\n transforms.RandomCrop(self.args.patch_size), # 224\n transforms.ToTensor()\n ])\n test_transform = transforms.Compose([\n # you can add other transformations in this list\n # resize\n transforms.Resize(256), # resize smaller size to 256\n transforms.CenterCrop((self.args.patch_size, self.args.patch_size)), # 224\n transforms.ToTensor()\n ])\n\n if self.args.compute_dataset_metrics is True:\n\n train_transform = transforms.Compose([\n transforms.Resize((self.args.patch_size, self.args.patch_size)), # resize smaller size to 256\n transforms.ToTensor()\n ])\n test_transform = transforms.Compose([\n transforms.Resize((self.args.patch_size, self.args.patch_size)), # resize smaller size to 256\n transforms.ToTensor()\n ])\n\n '''\n train_transform = transforms.Compose([\n transforms.ToTensor()\n ])\n test_transform = transforms.Compose([\n transforms.ToTensor()\n ])'''\n\n # if not already set, set batch-size to 1 for computing the metrics\n # due to different image sizes\n self.args.batch_size = 1\n self.args.workers = 0\n\n trainset = IndxImageFolder(os.path.join(self.load_path, \"train\"),\n transform=train_transform,\n target_transform=None)\n\n valset = IndxImageFolder(os.path.join(self.load_path, \"val\"),\n transform=test_transform,\n target_transform=None)\n\n # https://pytorch.org/vision/0.8/_modules/torchvision/datasets/imagenet.html#ImageNet\n # https://www.kaggle.com/c/imagenet-object-localization-challenge/data?select=LOC_synset_mapping.txt\n # https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a\n self.class_to_idx = trainset.class_to_idx\n self.idx_to_class = dict((v, k) for k, v in self.class_to_idx.items())\n\n if not os.path.exists(os.path.join('metrics/datasets', self.name) +'.csv'):\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=1, shuffle=False, num_workers=0)\n val_loader = torch.utils.data.DataLoader(valset, batch_size=1, shuffle=False, num_workers=0)\n\n indices_data = {}\n for loader_name, loader in [('train', train_loader), ('test', val_loader)]:\n for (img, label, img_path) in tqdm(loader):\n # since img_path is a tuple, it is converted to a list\n # since batch-size is 1, the first element is taken\n indices_data[list(img_path)[0]] = label.detach().cpu().numpy()[0]\n\n # store img_paths which serves as indices and the labels for further analysis\n indices_data = collections.OrderedDict(sorted(indices_data.items()))\n\n dataframe = pd.DataFrame({'img_paths': list(indices_data.keys()),\n 'labels': list(indices_data.values())})\n DatasetMetrics.indices_paths(self.name, dataframe)\n\n return trainset, valset", "title": "" }, { "docid": "812ce1568c4c0af43cec52fda45df864", "score": "0.5903093", "text": "def __init__(self, model_name, input_shape=(256, 256, 3)):\n super().__init__(model_name=model_name)\n\n # Number of classes to segment\n # 0 -> not a building\n # 1 -> a building\n self.n_classes = 2\n # Input data shape\n self.input_shape = input_shape\n # File extensions for data to predict\n self.FILE_EXTENSIONS = [\n \"tif\",\n \"tiff\",\n \"png\",\n \"jpg\",\n \"jpeg\"\n ]\n # Files to predict\n self.filenames = list()", "title": "" } ]
80687f3cf13f915b78d9c85561dd33fb
Return readable informal exception description about checksumed exception.
[ { "docid": "1747be14a5b639183ce75ef786e8178c", "score": "0.0", "text": "def __str__(self):\n return 'Invalid {} checksum for the {} file'.format(self.algo, 'original' if self.decrypted else 'encrypted')", "title": "" } ]
[ { "docid": "9271c80dc57fab8ae115327f3df1069a", "score": "0.6199383", "text": "def exception(self):\r\n buf = traceback.format_exception_only(self.exc_type, self.exc_value)\r\n return ''.join(buf).strip().decode('utf-8', 'replace')", "title": "" }, { "docid": "fb1ae58044d5b91590512fdd3726d074", "score": "0.60794216", "text": "def format_error_message(self) -> str:\n return f\"{self.exc_type.__name__}: {self.exc_value}\"", "title": "" }, { "docid": "9bc39d440b7bf99d019f5035d908cc88", "score": "0.60473853", "text": "def __str__(self):\n return 'Database integerity error - %s' % self.reason", "title": "" }, { "docid": "c3c27ba8ab94db5472077b0e0a97113a", "score": "0.6006324", "text": "def exc_info(self):\r\n return self.exception", "title": "" }, { "docid": "7e63a9a43764baca471f52eec33819bc", "score": "0.59391123", "text": "def formatException(self, ei):\n return", "title": "" }, { "docid": "d2385ca71f53c336e0e3e76675c70df3", "score": "0.5920419", "text": "def _exc_info_to_string(self, err, test):\n return err", "title": "" }, { "docid": "9de97764282b29d2261fb095db6482ef", "score": "0.58595777", "text": "def exc_repr(e):\n return \"{}: {}\".format(type(e).__name__, str(e))", "title": "" }, { "docid": "af4df25dd30e2ad06974aed720020bf1", "score": "0.58202285", "text": "def standard_exc_info(self):\r\n return self.exc_type, self.exc_value, self.frames[0].tb", "title": "" }, { "docid": "39ced7dcdb0e9a2808e6487f967cdaa2", "score": "0.581941", "text": "def get_error_message(self):\n return self.open.query(\":SYST:ERR?\")", "title": "" }, { "docid": "754abcd3b97fa9e5b1e4aee96720ad21", "score": "0.58062327", "text": "def excused(self):\n return self._excused", "title": "" }, { "docid": "ff8c1eb51ff6dbf49bf46a5de491f9b8", "score": "0.5754992", "text": "def error_str(self):\n return u\"{0:<20} {1}\\n{3:<11} {2}\".format(\n package_helpers.color_output(\n self.get_logging_level() if self.get_logging_level() != constants.VALIDATE_LOG_LEVEL_DEBUG\n else \"PASS\", \n self.get_logging_level()), \n self.description, self.solution, \"\"\n )", "title": "" }, { "docid": "aa70429c940e19a26739526cb42b48fb", "score": "0.5741728", "text": "def generate_message(self):\n message = \"Oops! Assertion failed\"\n message += self.generate_expression_section()\n message += self.generate_description_section()\n message += self.generate_values_section()\n message += self.generate_where_section()\n return message", "title": "" }, { "docid": "485726a75047be5ee8daef6e226521f8", "score": "0.5719327", "text": "def exception_detail(\n self, exception_type: type, exception_value: Exception, tb\n ):\n text = \"[{}]: Unhandled WebSocket Error:{}\\n\".format(\n datetime.now().isoformat(), exception_type\n )\n text += \"LastSentText:\\n{}\\n\".format(self._last_sent_text)\n text += \"LastReceivedText:\\n{}\\n\".format(self._last_received_text)\n text += \"Exception trace: \\n\"\n text += \"\".join(\n traceback.format_exception(exception_type, exception_value, tb)\n )\n return text", "title": "" }, { "docid": "0c388fadd9d94e9a5e29cd5a80d0fb8f", "score": "0.5694837", "text": "def source_message(self):\n return ''.join(traceback.format_exception_only(self._src_exc[0], self._src_exc[1]))", "title": "" }, { "docid": "b3a950b6304c5840564791d4f38847e4", "score": "0.5693524", "text": "def _get_exception():\n exc_type, exc_value, exc_traceback = sys.exc_info()\n d = traceback.format_exception(exc_type, exc_value, exc_traceback)\n msg = ''.join(d)\n return msg", "title": "" }, { "docid": "8b7fd8fcc1fb84975659f8e63b4be81f", "score": "0.567742", "text": "def formatException(self, ei):\n exc_tb = ei[2]\n exc_type = ei[0]\n exc_value = ei[1]\n msg = []\n try:\n try:\n # log exception details\n now = datetime.datetime.now().isoformat()\n py_version = getattr(config, 'py_version', 'not found')\n wx_version = getattr(config, 'wx_version', 'not found')\n platform = getattr(config, 'platform', 'not found')\n app_version = getattr(config, 'version', 'not found')\n\n msg.append('An unexpected error occurred!\\n')\n msg.append('\\n')\n msg.append('Exception type: %s\\n' % exc_type)\n msg.append('Exception details: %s\\n' % exc_value)\n if exc_tb:\n msg.append('\\nApplication stack traceback:\\n')\n msg += traceback.format_tb(exc_tb)\n msg.append('\\n')\n msg.append('Date and time: %s\\n' % now)\n msg.append('Python version: %s\\n' % py_version)\n msg.append('wxPython version: %s\\n' % wx_version)\n msg.append('wxWidgets platform: %s\\n' % platform)\n msg.append('wxGlade version: %s\\n' % app_version)\n msg.append('\\n')\n\n except Exception as e:\n # This code should NEVER be executed!\n if config.debugging: raise\n logging.error('An exception has been raised inside the exception handler: %s', e)\n sys.exit(1)\n\n # delete local references of trace backs or part of them to avoid circular references\n finally:\n del ei, exc_tb, exc_type, exc_value\n\n if msg[-1][-1] == \"\\n\":\n msg[-1] = msg[-1][:-1]\n return \"\".join(msg)", "title": "" }, { "docid": "2368e9dffe07585bf094fb92c26606f4", "score": "0.5610206", "text": "def get_error_msg(self):\n raise NotImplementedError(\"Must be implemented\")", "title": "" }, { "docid": "56ed53ec2a88e5ef0adf74be2c61a747", "score": "0.5606838", "text": "def rdap_pretty_error_message(exc):\n\n if isinstance(exc, RdapNotFoundError):\n return _(\"This ASN is not assigned by any RIR\")\n if isinstance(exc, RdapInvalidRange):\n return _(\"ASNs in this range are private or reserved\")\n\n return _(\"{}\").format(exc)", "title": "" }, { "docid": "d9673b778f6a2c4f1f1497dd458de087", "score": "0.56065124", "text": "def getExceptionText():\n (_type,value,tback)=sys.exc_info()\n return \"\".join(traceback.format_exception(_type, value, tback))", "title": "" }, { "docid": "0079174b7c53c79159f216eababccaae", "score": "0.5606293", "text": "def exconly(self, tryshort: bool = False) -> str:\n lines = format_exception_only(self.type, self.value)\n text = \"\".join(lines)\n text = text.rstrip()\n if tryshort:\n if text.startswith(self._striptext):\n text = text[len(self._striptext) :]\n return text", "title": "" }, { "docid": "813627fe9f21c67dd8aa1ca76abea9ea", "score": "0.55978507", "text": "def message(self):\n if not self.enabled:\n return None\n return self._lint_message + \\\n \"\\n=======================================\" + \\\n \"\\n%s: Error:%d, Warning:%d \\n\" % \\\n (self._version, self._error_count, self._warning_count)", "title": "" }, { "docid": "996968b070f528c9786a435aeb617980", "score": "0.5596806", "text": "def err_msg(self):\n return err_msg_template.substitute(\n object_name=self.object_name,\n missing_dependency=self.missing_dependency)", "title": "" }, { "docid": "044f7377c916dadb1344d84f69197c0b", "score": "0.5580845", "text": "def format_exception(self) -> str:\n return \"\".join(\n traceback.format_exception(self.exc_type, self.exc_value, self.exc_tb)\n )", "title": "" }, { "docid": "675c6c2a6c9600313afaf4ffa87469ae", "score": "0.5577244", "text": "def format_scan_error(\n self,\n next_file: str,\n this_exception: Exception,\n show_extended_information: bool = False,\n ) -> Optional[str]:\n formatted_error = f\"{type(this_exception).__name__} encountered while scanning '{next_file}':\\n{this_exception}\"\n if show_extended_information:\n current_cause = this_exception.__cause__\n while current_cause:\n formatted_error += (\n f\"\\nCaused by: {type(current_cause).__name__}:\\n {current_cause}\"\n )\n current_cause = current_cause.__cause__\n return formatted_error", "title": "" }, { "docid": "7d79095ca7ef8edee3d27ad2fcd16eae", "score": "0.5573219", "text": "def error_details(self):\n return self._error_details", "title": "" }, { "docid": "66af77c6e6ddc576247b7a793af9d51a", "score": "0.55666035", "text": "def get_error_msg( self ):\n try:\n if (self.__dict__['status_info']['status'] != 'ERROR'):\n return None\n return self.__dict__['status_info']['message']\n except KeyError as ex:\n raise GPUdbException( \"Unknown wrapped object; could not find \"\n \" the following key: {}\"\n \"\".format( GPUdbException.stringify_exception( ex ) ) )", "title": "" }, { "docid": "5e062dca3af64d1f8901dc3afde1d4b0", "score": "0.5547183", "text": "def getErrorMsg(self):\n pass", "title": "" }, { "docid": "d2f0ac9e876a142c372f541edd1c30be", "score": "0.5544872", "text": "def exc_info(self):\r\n return self.exc_type, self.exc_value, self.frames[0]", "title": "" }, { "docid": "43b5d1f201597d263d40d5c0a28284a9", "score": "0.5541972", "text": "def error(self) -> str:\r\n if self.state == STATE_ERROR:\r\n mower_attributes = self.__get_mower_attributes()\r\n return ERRORCODES.get(mower_attributes[\"mower\"][\"errorCode\"])\r\n return \"\"", "title": "" }, { "docid": "16f3932874ef158b974ae9da07b501c6", "score": "0.5540333", "text": "def _exc_str(ex: Exception, color=_exception_color) -> str:\n if color is None:\n color = \"\"\n return_color = \"\"\n else:\n return_color = _message_color\n exception_name = ex.__name__\n use_an = exception_name[0] in 'aeiouAEIOU' and exception_name[0:4] != \"User\"\n article = 'an' if use_an else 'a'\n return f\"{article} {color}{exception_name}{return_color}\"", "title": "" }, { "docid": "e230ea07c65d236ef187a9e0b8dd1a6f", "score": "0.55307996", "text": "def generate_exception_info(self, exception: Optional[Exception]) -> str:\n\n return \"Exception information: \\n\" \\\n f\"\\tException type: {type(exception)}\\n\" \\\n f\"\\tException text: {str(exception)}\\n\" \\\n f\"\\tTrace: {traceback.format_exc()}\"", "title": "" }, { "docid": "cd99b8a343d94a804dba19495ef9aa29", "score": "0.55146784", "text": "def formatError(self):\n return \"(Unable to format message)\"", "title": "" }, { "docid": "95ff2699b2c93e142c29b7fc4c5241bd", "score": "0.55130976", "text": "def message(self):\n return self._error.message", "title": "" }, { "docid": "6927e41cd9eeb27fc4bd2737dbf1a2a4", "score": "0.5500051", "text": "def get_message(self):\n return self.err", "title": "" }, { "docid": "7e92240e63081dbf82917911dbec8e66", "score": "0.5492574", "text": "def repr_failure(self, excinfo):\n if excinfo.errisinstance(BadRequest):\n return str(excinfo.value)\n return super().repr_failure(excinfo)", "title": "" }, { "docid": "c498955ed77cd16990a275d1e104e3e3", "score": "0.5491929", "text": "def exc_info(self):\n return self.exc_type, self.exc_value, self.frames[0]", "title": "" }, { "docid": "0d4268b6cf48deb3c9cdca4822558339", "score": "0.549162", "text": "def _get_error_message_from_exception(self, e):\n\n try:\n if e.args:\n if len(e.args) > 1:\n error_code = e.args[0]\n error_msg = e.args[1]\n elif len(e.args) == 1:\n error_code = ERR_CODE_MSG\n error_msg = e.args[0]\n else:\n error_code = ERR_CODE_MSG\n error_msg = ERR_MSG_UNAVAILABLE\n except:\n error_code = ERR_CODE_MSG\n error_msg = ERR_MSG_UNAVAILABLE\n\n try:\n error_msg = self._handle_py_ver_compat_for_input_str(error_msg)\n except TypeError:\n error_msg = TYPE_ERR_MSG\n except:\n error_msg = ERR_MSG_UNAVAILABLE\n\n try:\n if error_code in ERR_CODE_MSG:\n error_text = \"Error Message: {0}\".format(error_msg)\n else:\n error_text = \"Error Code: {0}. Error Message: {1}\".format(error_code, error_msg)\n except:\n self.debug_print(PARSE_ERR_MSG)\n error_text = PARSE_ERR_MSG\n\n return error_text", "title": "" }, { "docid": "8c5783b74f281c0b7b0dafa60e5d86c2", "score": "0.5491152", "text": "def exc_info(cls) -> \"ErrorInfo\":\n return cls(*sys.exc_info())", "title": "" }, { "docid": "36e7a419dff15b48c6549ab6156f006a", "score": "0.5482453", "text": "def description(self) -> str:\n return \"Invalid value in column '%s'\" % self.column_name", "title": "" }, { "docid": "b7ceca5387c090ca88ca9cf58a0d9760", "score": "0.54719484", "text": "def explain(self):\n if self is PoolMaintenanceErrorCode.NO_IPC_REQUESTS:\n return (\n \"The pool will return an error on any IPC request that could \"\n \"cause a change in the pool state, for example, a request to \"\n \"rename a filesystem. It will still be able to respond to \"\n \"purely informational requests.\"\n )\n\n if self is PoolMaintenanceErrorCode.NO_POOL_CHANGES:\n return (\n \"The pool is unable to manage itself by reacting to events, \"\n \"such as devicemapper events, that might require it to take \"\n \"any maintenance operations.\"\n )\n\n if self is PoolMaintenanceErrorCode.READ_ONLY: # pragma: no cover\n return \"The pool is in read-only mode.\"\n\n assert False, \"impossible error code reached\" # pragma: no cover", "title": "" }, { "docid": "e4e39ecb1aa20140c9ce355a1cddeb9c", "score": "0.54719114", "text": "def error_message(self) -> str:\n return self._error_message", "title": "" }, { "docid": "8023a496044b2d547213fcab1178bd5b", "score": "0.5469984", "text": "def __str__(self):\n return 'Database access error - %s' % self.reason", "title": "" }, { "docid": "933a26dbfd66d5d54e4f45b1cc5a6522", "score": "0.5467869", "text": "def dead_letter_error_description(self) -> Optional[str]:\n if self._raw_amqp_message.application_properties:\n try:\n return self._raw_amqp_message.application_properties.get( # type: ignore\n PROPERTIES_DEAD_LETTER_ERROR_DESCRIPTION\n ).decode(\"UTF-8\")\n except AttributeError:\n pass\n return None", "title": "" }, { "docid": "aeb3f9109b6101de354ad25f0e10a729", "score": "0.5458247", "text": "def exception(self):\n if self.end_message:\n return self.end_message.contents.get(EXCEPTION_FIELD, None)", "title": "" }, { "docid": "c8e5f29072e83007eb428eb55d9aeb1b", "score": "0.54414946", "text": "def error_message(self):\n return self._error_message", "title": "" }, { "docid": "c8e5f29072e83007eb428eb55d9aeb1b", "score": "0.54414946", "text": "def error_message(self):\n return self._error_message", "title": "" }, { "docid": "307029b4ffaf78a74cd9611448a14c1e", "score": "0.54342765", "text": "def exc_info(self):\n e = self._exc_info\n if e:\n return (e[0], e[1], load_traceback(e[2]))", "title": "" }, { "docid": "fcf05a090e058a3665fdbf1ad41846d2", "score": "0.5431123", "text": "def __str__(self):\n return repr(self.error_message)", "title": "" }, { "docid": "648ae861068703e7818e4d29fc1add95", "score": "0.54292065", "text": "def format(self):\n\n return format_exception(self.type, self.value, self.traceback)", "title": "" }, { "docid": "2ee9ff6fdface7e63576571423b0e76f", "score": "0.54117876", "text": "def get_error(self):\n return '[%s]' % (self.error)", "title": "" }, { "docid": "ad55f5efb8216dc79e0f7afd7213b6a2", "score": "0.54111767", "text": "def error(self) -> Optional[str]:\n\n if self.status != \"errored\":\n return None\n\n message = next(\n iter(stage.error for stage in self.stages if stage.status == \"errored\")\n )\n return \"unknown error\" if message is None else message", "title": "" }, { "docid": "b911b986ae96bdf3521e36ba8ea2b799", "score": "0.5409309", "text": "def _err(exception):\n print(exception)", "title": "" }, { "docid": "1fad75fe980269738fbf18b9cc3f97e9", "score": "0.54014397", "text": "def __str__(self):\n return \"ParseException: %s\" % self.__msg", "title": "" }, { "docid": "3f5fcdd8e58d8b8518f906f56539ca8e", "score": "0.5401238", "text": "def exception(self):\n if self._get_hnode().get(\"UNTRANSLATED\"):\n return \"Status: error (ctx needs translation)\"\n codecell = self._get_codecell()\n exception = codecell.exception\n if exception is None and codecell.status == \"Status: OK\":\n gen_moduledict = self._get_ctx().gen_moduledict \n exc2 = gen_moduledict.exception\n if exc2 is not None:\n exception = {\n \"gen_moduledict\": exc2\n } \n return exception", "title": "" }, { "docid": "11152d60d7b2ac87efc54a2b053baced", "score": "0.5396628", "text": "def __str__(self):\n return self.base_message.format(filename=self.md_file_path) + ERROR_MESSAGE", "title": "" }, { "docid": "d5e191b3622185c632ea5d9d70884698", "score": "0.53854686", "text": "def _exc_info_to_string(self, err, test):\r\n exctype, value, tb = err\r\n # Skip test runner traceback levels\r\n while tb and self._is_relevant_tb_level(tb):\r\n tb = tb.tb_next\r\n if exctype is test.failureException:\r\n # Skip assert*() traceback levels\r\n length = self._count_relevant_tb_levels(tb)\r\n msgLines = traceback.format_exception(exctype, value, tb, length)\r\n else:\r\n msgLines = traceback.format_exception(exctype, value, tb)\r\n \r\n if self.buffer:\r\n output = sys.stdout.getvalue()\r\n error = sys.stderr.getvalue() \r\n if output:\r\n if not output.endswith('\\n'):\r\n output += '\\n'\r\n msgLines.append(STDOUT_LINE % output)\r\n if error:\r\n if not error.endswith('\\n'):\r\n error += '\\n'\r\n msgLines.append(STDERR_LINE % error)\r\n return ''.join(msgLines)", "title": "" }, { "docid": "84fc9d213200a3866610c9d2b86a9506", "score": "0.5379236", "text": "def message(self, *args, **kwargs):\n base_msg = super(CoconutInternalException, self).message(*args, **kwargs)\n if \"\\n\" in base_msg:\n return base_msg + \"\\n\" + report_this_text\n else:\n return base_msg + \" \" + report_this_text", "title": "" }, { "docid": "5edcb3e40f224f2fe15690f7efc9922e", "score": "0.5375226", "text": "def _error_msg(self, e):\n print(textwrap.dedent((\"\"\"\n ERROR! -- An unrecoverable error has occurred.\n\n If you believe the CUE file is correct, please send the input file to\n <%s>, along with the error message below.\n\n ---> %s\n \"\"\" % (__email__,e))), file=sys.stderr)", "title": "" }, { "docid": "27997bd68cbf0fa590fd976b081b88e6", "score": "0.537171", "text": "def message(self):\n self.process()\n # Check whether the error comes with source information\n if hasattr(self.error, \"line\"):\n args = (\n self.header(),\n self.highlight(),\n self.error_code(),\n self.hint(),\n )\n return \"{}\\n\\n{}\\n\\n{}: {}\".format(*args)\n else:\n return f\"{self.error_code()}: {self.hint()}\"", "title": "" }, { "docid": "db70d4f81e827ef6a85555102acc8c07", "score": "0.5336603", "text": "def description(self):\n return 'unknown'", "title": "" }, { "docid": "222f2ef4dcf1ac1be9810903cd1a2890", "score": "0.5332498", "text": "def stringify_exception( ex ):\n if str(ex):\n return str(ex)\n else:\n return repr(ex)", "title": "" }, { "docid": "015abafead869e36246ec25fe54a59c9", "score": "0.5312414", "text": "def get_desc(self):\n return \"The Developer of this Probe didn't provide a description\"", "title": "" }, { "docid": "3e72030f3d80804cb15de578b957bc65", "score": "0.53012925", "text": "def traceback(self):\n type, value, tb = sys.exc_info()\n return ''.join(traceback.format_exception(type, value, tb.tb_next))", "title": "" }, { "docid": "31f6f0a7d8c24175f8ebc18f522c5187", "score": "0.53003526", "text": "def standard_exc_info(self):\n tb = self.frames[0]\n # the frame will be an actual traceback (or transparent proxy) if\n # we are on pypy or a python implementation with support for tproxy\n if type(tb) is not TracebackType:\n tb = tb.tb\n return self.exc_type, self.exc_value, tb", "title": "" }, { "docid": "6c80b1e969a354f1b70bfa2a5a0b1148", "score": "0.52959055", "text": "def __str__(self):\n ex_msg = \"Feature plugin registration failed:\\n\"\n for ex in self.exceptions or []:\n ex_msg += ex + \"\\n\"\n return ex_msg", "title": "" }, { "docid": "b33c24c19cd83cfdc3d034fe39189ef6", "score": "0.5294852", "text": "def reason(self) -> str:\n return self.Reason", "title": "" }, { "docid": "33c79ae44030bb9903345c6eee13176d", "score": "0.528951", "text": "def invalid_reason(self):\n if self.valid():\n return ''\n else:\n if self.name is None:\n return 'No name defined'\n elif self.homepage is None:\n return 'No homepage defined'\n elif self.patterns[0].pattern is None:\n return 'No searchpattern defined'\n else:\n return 'Unknown error'", "title": "" }, { "docid": "4ab777fccc90218fbca6eccfbb89e1fb", "score": "0.52856106", "text": "def reason(self):\n r = []\n if not self.has_var:\n r.append(u'no_var')\n if not self.has_func:\n r.append(u'no_func')\n for vname in self.bad_vars:\n r.append(u'bad_var ' + vname)\n return u','.join(r)", "title": "" }, { "docid": "ab3fd85d471b1328db02500352f95d4d", "score": "0.52804965", "text": "def name(self) -> str:\n return \"Multimatic Errors\"", "title": "" }, { "docid": "061e4191a7bbe0e5db73717d9b76c715", "score": "0.5279127", "text": "def _HandleException(self, e):\n return str(e)", "title": "" }, { "docid": "28d163d18890ceb07c10fdb43f6bcfdd", "score": "0.5278666", "text": "def error(self) -> str:\n return typing.cast(\n str,\n self._properties.get(\"error\"),\n )", "title": "" }, { "docid": "ca3aab25b4be1851b5a43d1847e834f9", "score": "0.52730286", "text": "def _exc_info_to_string(self, err, test):\n exctype, value, tb = err\n # Skip test runner traceback levels\n while tb and self._is_relevant_tb_level(tb):\n tb = tb.tb_next\n\n if exctype is test.failureException:\n # Skip assert*() traceback levels\n length = self._count_relevant_tb_levels(tb)\n msgLines = value\n else:\n msgLines = value\n\n if self.buffer:\n output = sys.stdout.getvalue()\n error = sys.stderr.getvalue()\n print error\n if output:\n if not output.endswith('\\n'):\n output += '\\n'\n msgLines.append(STDOUT_LINE % output)\n if error:\n if not error.endswith('\\n'):\n error += '\\n'\n msgLines.append(STDERR_LINE % error)\n return ''.join(map(str, msgLines))", "title": "" }, { "docid": "0ee5f63a1d32974d9cbb4341a5a38e39", "score": "0.5270419", "text": "async def _get_extract_failure_reason(ex: Exception, data: Dict[str, Any]):\n\n api_message = ''\n\n if 'code' in data and data['code'] != 0:\n api_message += 'Error code {}: '.format(data['code'])\n elif 'error_code' in data:\n api_message += 'Error code {}: '.format(data['error_code'])\n\n if 'msg' in data and data['msg'] and data['msg'] != '':\n api_message += data['msg']\n if 'detailMsg' in data and data['detailMsg'] and data['detailMsg'] != '':\n api_message += '; ' + data['detailMsg']\n else:\n api_message += 'empty or missing results'\n\n return \"{} ({}: {})\".format(api_message, type(ex).__name__, ex)", "title": "" }, { "docid": "f1a97bed5ad6d3e1bba81544981662d6", "score": "0.52660227", "text": "def __str__(self):\n s = Exception.__str__(self)\n if not self.fname:\n return s\n return \"File {0}, line {1}: {2}\".format(self.fname, self.line, s)", "title": "" }, { "docid": "11817171cd255036e35e5d6f915080c6", "score": "0.5261123", "text": "def _get_fault_message(self, exception):\n try:\n return exception.failure.errors[0].message\n except AttributeError:\n try:\n return exception.details()\n except AttributeError:\n return None", "title": "" }, { "docid": "879ba82d06f5ec712a50a50b11844ce4", "score": "0.5257593", "text": "def print_exc_info():\n\n import StringIO, traceback\n \n sio = StringIO.StringIO()\n traceback.print_exc(file=sio) #thread-safe print_exception to string\n sio.seek(0, 0)\n \n return sio.read()", "title": "" }, { "docid": "dc54ec531b6c0dca2f6f42e2e1a3240e", "score": "0.5249521", "text": "def get_exception_message(self, exception_type, args):\n message = None\n try:\n raise exception_type(*args)\n except exception_type as exc:\n message = str(exc)\n return message", "title": "" }, { "docid": "536660c6e2a6a6c47f2ac7e94de5cdf0", "score": "0.5241921", "text": "def get_error_msg(self):\n\n status_display = None\n if self.get_status() == self.HIGH_TEMP_ERROR:\n status_display = \"High Temperature (100%cC)\" % self.DEGREE_SIGN\n elif self.get_status() == self.LOW_TEMP_ERROR:\n status_display = \"Low Temperature (-50%cC)\" % self.DEGREE_SIGN\n\n reading_datetime = datetime.datetime.strptime(self.format_datetime_string(), \"%Y/%m/%d %H:%M\")\n reading_display_datetime = reading_datetime.strftime('%Y/%m/%d %H:%M')\n\n reading_seq_num = self.get_sequence_num()\n\n error_msg = \"%s at %s, Sequence: %d\" % (\n status_display, reading_display_datetime, reading_seq_num)\n return error_msg", "title": "" }, { "docid": "c91634e65a58ee887ffabe121442accf", "score": "0.52325565", "text": "def get_fault_message(self):\n\n pass # pragma: no cover", "title": "" }, { "docid": "23614d013c8f58fe4d1047855159d98a", "score": "0.5226735", "text": "def message(self) -> str:\n if not self._message:\n raise Exception\n return self._message", "title": "" }, { "docid": "8ac0963e6d21b1b1a1cc240edd0903b3", "score": "0.52178687", "text": "def __str__(self):\n obj_str = \"Systematic uncertainty: {:s}\".format(self.name)\n return obj_str", "title": "" }, { "docid": "c3c1acb0e2cd5601687c21ffd4ff3b95", "score": "0.52152044", "text": "def formatEx3(excepInst):\n msg = \"%s:%s\" % (excepInst.__class__.__name__, str(excepInst))\n return msg", "title": "" }, { "docid": "4c883876b1822271bd01b5b8e55efe1d", "score": "0.52130306", "text": "def __str__(self):\n return (\n \"Critical errors: {CRITICAL}, errors: {ERROR}, warnings: \"\n \"{WARNING}\"\n ).format(**self.logged)", "title": "" }, { "docid": "27c0206bc4d8824ab3b65caf7548f457", "score": "0.52104795", "text": "def repr_failure(self, excinfo):\n if isinstance(excinfo.value, FunctionalTestException):\n return f\"Functional Test failed {excinfo.value.args}. See below, or see output log file for test {self.name}. \"\n else:\n log.error(f\"unknown failure {excinfo}\")\n print(\"Unknown failure:\", str(excinfo))\n print(excinfo.getrepr())", "title": "" }, { "docid": "713df0d6f7e9e8552aad15657b4e271f", "score": "0.5207485", "text": "def render_error(self) -> str:\n return f'{str(self.cause) if self.cause else self.msg}'", "title": "" }, { "docid": "c7000531254472b1555ac1039dcdd9ce", "score": "0.52051544", "text": "def __str__(self):\n \n return self.reason", "title": "" }, { "docid": "737605eb41ed786830dc5b6afd70fc0d", "score": "0.5194999", "text": "def repr_failure(self, excinfo):\r\n if isinstance(excinfo.value, DocoptTestException):\r\n return \"\\n\".join((\r\n \"usecase execution failed:\",\r\n self.doc.rstrip(),\r\n \"$ %s %s\" % (self.prog, self.argv),\r\n \"result> %s\" % json.dumps(excinfo.value.args[1]),\r\n \"expect> %s\" % json.dumps(self.expect),\r\n ))", "title": "" }, { "docid": "facb76f14223c41a5a6712aa59303fca", "score": "0.5192028", "text": "def __str__(self):\n return 'Failed to read file \"%s\"' % self.filepath", "title": "" }, { "docid": "04c0a8cfcc3255fdda0c0f8cb0dc989c", "score": "0.5187984", "text": "def errors_occurred(self):\n return self.logged['ERROR'] + self.logged['CRITICAL']", "title": "" }, { "docid": "a19bdb6596b3edb450a1449d60015861", "score": "0.5185789", "text": "def exception(self):\n if self._exception is not _NONE:\n return self._exception", "title": "" }, { "docid": "4ddd78eb8f69eddf6da1708ac08f689d", "score": "0.5179353", "text": "def warning_message(self) -> str:", "title": "" }, { "docid": "4835f7c69a0cf093fec8c61b7458dd5e", "score": "0.5175048", "text": "def status_reason(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status_reason\")", "title": "" }, { "docid": "45e38227c8cd8898a4423df6acc44315", "score": "0.51723176", "text": "def show_err():\n for err in list(sys.exc_info()):\n err_msg = \"[!]%s\" % str(err).strip(\"<>\")\n print(err_msg, file=sys.stderr)", "title": "" }, { "docid": "5fb692314b382cc23514cbc07302541c", "score": "0.5169245", "text": "def exception(self):\n return self._exc_info[1] if self._exc_info else None", "title": "" }, { "docid": "cff0c1ba2b82b577767f7ed0f638f008", "score": "0.5164884", "text": "def GetFailureReason(self):\n return self._failure_reason", "title": "" }, { "docid": "9cdc208aa987740317114f71f9a3bd1b", "score": "0.5159561", "text": "def get_formatted_traceback( self ):\n return self.traceback_msg", "title": "" }, { "docid": "92f7b86a8348577fd5c0b101151cb5dc", "score": "0.5154065", "text": "def reason(self) -> str:\n return pulumi.get(self, \"reason\")", "title": "" }, { "docid": "92f7b86a8348577fd5c0b101151cb5dc", "score": "0.5154065", "text": "def reason(self) -> str:\n return pulumi.get(self, \"reason\")", "title": "" }, { "docid": "b8add316c74c628b96fdc7b9c01daae8", "score": "0.5152142", "text": "def print_exc_plus( ):\r\n msg = \"\"\r\n tb = sys.exc_info( )[2]\r\n while tb.tb_next:\r\n tb = tb.tb_next\r\n stack = [ ]\r\n f = tb.tb_frame\r\n while f:\r\n if r'\\sahm\\\\' in f.f_code.co_filename:\r\n stack.append(f)\r\n f = f.f_back\r\n stack.reverse( )\r\n traceback.print_exc( )\r\n msg += \"\\n\" + \"Locals by frame, innermost last\"\r\n for frame in stack:\r\n msg += \"\\n\"\r\n msg += \"\\n\" + \"Frame %s in %s at line %s\" % (frame.f_code.co_name,\r\n frame.f_code.co_filename,\r\n frame.f_lineno)\r\n msg += \"\\n\"\r\n for key, value in frame.f_locals.items( ):\r\n msg += \"\\t%20s = \" % key\r\n # we must _absolutely_ avoid propagating exceptions, and str(value)\r\n # COULD cause any exception, so we MUST catch any...:\r\n try:\r\n msg += str(value)\r\n except:\r\n msg += \"<ERROR WHILE PRINTING VALUE>\"\r\n \r\n msg += \"\\n\\n\" + ' '.join([str(i) for i in sys.exc_info()[:2]])\r\n \r\n return msg", "title": "" }, { "docid": "e650b85d71c67437113c772e1c3d523c", "score": "0.51513445", "text": "def exception_info(e):\n except_type, except_class, tb = sys.exc_info()\n error_tuple = (except_type, except_class, traceback.extract_tb(tb))\n return error_tuple", "title": "" }, { "docid": "fa438e4ecdbf0606300fb3df5e741335", "score": "0.51506144", "text": "def Message(self, granular = False):\r\n if granular:\r\n granular = ''\r\n if len(self.Contents) > 0:\r\n granular = \"The following command line errors were improperly set: \\n{ %s }\" % (',\\n'.join([arg + ' : ' + msg for arg, msg in self.Contents]))\r\n # Include information about invalid arguments being passed if any occurred:\r\n if len(self.InvalidArgs) > 0:\r\n granular += ('\\n' if granular else '') + 'The following arguments are invalid: \\n{ %s }' % (','.join(self.InvalidArgs))\r\n return granular\r\n else:\r\n return CommandLineErrors.__Concise % self.ErrorCount()", "title": "" } ]
889e0e73799057cf38eacd20067fa5b8
Test case for api_v1_statuses_secrets_get
[ { "docid": "22d22c4deda381211a78dce4bd31ebf8", "score": "0.9513433", "text": "def test_api_v1_statuses_secrets_get(self):\n pass", "title": "" } ]
[ { "docid": "db56674662f0780175ab53fac68d5b6a", "score": "0.6973698", "text": "def test_get_all_secrets(self):\n pass", "title": "" }, { "docid": "233629767b99031fd14f7b095067d9ae", "score": "0.66280454", "text": "def get_secrets(self):\n secret_url = \"{}/secrets\".format(self.api_base_url)\n try:\n logger.debug(\"Retrieving secrets from API endpoint\")\n resp = self.session.get(secret_url)\n except Exception:\n logger.exception(\"Failed to get secrets. Aborting\")\n failed_exit()\n if resp.status_code != 200:\n logger.error(\"Failed to get secrets. Endpoint returned status code {}.\\nFull response: {}\"\n .format(resp.status_code, resp.text))\n failed_exit()\n return resp.json()", "title": "" }, { "docid": "8f894670456d448c3a024a1174b116c2", "score": "0.6238599", "text": "def test_read_image_openshift_io_v1_namespaced_secret_list_secrets(self):\n pass", "title": "" }, { "docid": "aee205342d00854a2ba4f0bf785f827a", "score": "0.621693", "text": "def test_api_v1_statuses_intelligence_get(self):\n pass", "title": "" }, { "docid": "3eba8f92986e4e626847bfb355b7e06f", "score": "0.6199641", "text": "def test_get_secret(self):\n pass", "title": "" }, { "docid": "d110f96de2d87a59ee9157892746f8e9", "score": "0.6166211", "text": "def test_api_can_list_secretsanta(self):\n res = self.client.get('/api/santas/', format=\"json\")\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "0201c547c3a1933476fc1ee16763f267", "score": "0.6151494", "text": "def test_get_status_secrets_error(mocker, gc):\n mocker.patch(f'{import_path}.OAuth2Connector.get_access_token', side_effect=Exception)\n assert gc.get_status().status is False", "title": "" }, { "docid": "d1013b2b802a389b31cbddc0c939bfaa", "score": "0.60816294", "text": "def available_secrets(self) -> 'outputs.SecretsResponse':\n return pulumi.get(self, \"available_secrets\")", "title": "" }, { "docid": "a8acf5fb7155e9604c10ca89bd8b9f0a", "score": "0.6064439", "text": "def _fetch_secrets(vault_url, path, token):\n url = _url_joiner(vault_url, 'v1', path)\n resp = requests.get(url, headers=VaultLoader._get_headers(token))\n resp.raise_for_status()\n data = resp.json()\n if data.get('errors'):\n raise VaultException(u'Error fetching Vault secrets from path {}: {}'\n .format(path, data['errors']))\n return data['data']", "title": "" }, { "docid": "7818e3d1e4d1f342e438c241fdc65384", "score": "0.6009855", "text": "def test_get_statuses_list(self):\r\n response = self.client.get(\r\n path=reverse('api_infrastructure_statuses'), **self.authentication_headers)\r\n self.assertEquals(response.status_code, 200)", "title": "" }, { "docid": "7db67e7331a96b727c9971648d408b17", "score": "0.5968243", "text": "def get_secret_output(secrets: Optional[pulumi.Input[Sequence[pulumi.InputType['GetSecretSecretArgs']]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSecretResult]:\n ...", "title": "" }, { "docid": "21f2a45422c397b0786b51fdaa58269b", "score": "0.5908694", "text": "def test_service_secret_not_set():\n # pylint: disable=protected-access\n config_instance = config._get()\n secret_string = \"secret string 1\"\n\n stubber = stub.Stubber(config._SECRETS_MANAGER_CLIENT)\n expected_params = {\"SecretId\": config_instance.service_secret_name}\n stubber.add_response(\n \"get_secret_value\", {\"SecretString\": secret_string}, expected_params\n )\n stubber.activate()\n\n service_secret = config_instance.service_secret\n\n stubber.assert_no_pending_responses()\n assert service_secret == secret_string.encode()\n\n service_secret = config_instance.service_secret\n\n assert service_secret == secret_string.encode()", "title": "" }, { "docid": "c180dd98c393001def47d65bdb505bb8", "score": "0.59069747", "text": "def get_secrets():\n # Handle local credentials.\n if \"AWS_SESSION_TOKEN\" not in os.environ:\n if \"AWS_ROLE_ARN\" in os.environ:\n session = boto3.Session(aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'], aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'])\n client = session.client(service_name='sts', region_name='us-west-2')\n creds = client.assume_role(RoleArn=os.environ['AWS_ROLE_ARN'], RoleSessionName='test')['Credentials']\n os.environ['AWS_ACCESS_KEY_ID'] = creds['AccessKeyId']\n os.environ['AWS_SECRET_ACCESS_KEY'] = creds['SecretAccessKey']\n os.environ['AWS_SESSION_TOKEN'] = creds['SessionToken']\n\n else:\n raise ValueError('Missing AWS credentials')\n\n # Create a session using the given creds\n session = boto3.Session(aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'], aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'], aws_session_token=os.environ['AWS_SESSION_TOKEN'])\n client = session.client(service_name='secretsmanager', region_name='us-west-2')\n try:\n get_secret_value_response = client.get_secret_value(\n SecretId='drivers/test'\n )\n except Exception as e:\n # For a list of exceptions thrown, see\n # https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html\n raise e\n\n # Decrypts secret using the associated KMS key.\n return json.loads(get_secret_value_response['SecretString'])", "title": "" }, { "docid": "49b34cf4f5ce5348ffd1738e9f41046f", "score": "0.57949907", "text": "def secrets(self) -> Optional[Sequence['outputs.SecretReferenceResponse']]:\n return pulumi.get(self, \"secrets\")", "title": "" }, { "docid": "5d2a52bc009031cf8bedcbb2999ed56b", "score": "0.57918", "text": "def list_(client_obj, path):\n result = client_obj.list_secrets(path=path)\n click.echo(\"\\n\".join(result))", "title": "" }, { "docid": "ce66a60a9c64c8018ae1f6f037915292", "score": "0.5768433", "text": "def test_api_v1_statuses_registry_get(self):\n pass", "title": "" }, { "docid": "44252d058e285f963b65a53e4d16c665", "score": "0.5760645", "text": "def get_secret(setting, secrets=secrets):\n try:\n return secrets[setting]\n except KeyError:\n print('Secrets not downloaded')", "title": "" }, { "docid": "63fcb48d777104f6fb27beeefc83c881", "score": "0.57218325", "text": "def get_secrets():\n\n return SECRETS", "title": "" }, { "docid": "8e2fc31ba5970ce10717d522086e8eea", "score": "0.57007414", "text": "def test_get_access_token_secret(self):\n self.assertEqual(\"Acc_tok_sec\",self._account.get_access_token_secret(),\"Access token secrets don't match\")", "title": "" }, { "docid": "bd89d70762396e7420c2e259d4eedcf1", "score": "0.5699439", "text": "def _get_secrets(secrets_location):\n with open(secrets_location, 'r') as json_file:\n secrets= json.load(json_file, object_hook=Config.decode_config)\n\n return secrets", "title": "" }, { "docid": "2687f0c3b7193d388309115c3dd31647", "score": "0.5694316", "text": "def test_get_statuses_list(self):\r\n response = self.client.get(\r\n path=reverse('api_sales_status'), **self.authentication_headers)\r\n self.assertEquals(response.status_code, 200)", "title": "" }, { "docid": "d115c18fd23e86ddfb2d7851d8968167", "score": "0.56911546", "text": "def secrets_pull(src, environment, secret, type):\n pass", "title": "" }, { "docid": "93f7e6d5ab902b265cde354fd394d4dd", "score": "0.5677414", "text": "def get_secrets(source):\n # Expand ~ to full path\n secrets_dict = yaml.load(source)\n\n # we need to have at least api_key in secrets\n try:\n secrets = Secrets(secrets_dict)\n except ConfigFieldMissingError as e:\n print('Failed to read values from secrets file: {}'.format(e))\n print('Exiting.')\n sys.exit(1)\n\n return secrets", "title": "" }, { "docid": "3009832fac69294eef937b315c92f498", "score": "0.5655301", "text": "def test_get_shared_secret(self):\n pass", "title": "" }, { "docid": "64567b7364f9800f1c9e3f7569e5a69e", "score": "0.5650154", "text": "def get_secret(secrets: Optional[Sequence[pulumi.InputType['GetSecretSecretArgs']]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecretResult:\n __args__ = dict()\n __args__['secrets'] = secrets\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('aws:kms/getSecret:getSecret', __args__, opts=opts, typ=GetSecretResult).value\n\n return AwaitableGetSecretResult(\n id=pulumi.get(__ret__, 'id'),\n secrets=pulumi.get(__ret__, 'secrets'))", "title": "" }, { "docid": "6f0ccd3666e176b7ea01f424b7d03dbb", "score": "0.55011624", "text": "def test_api_v1_statuses_serverless_radar_get(self):\n pass", "title": "" }, { "docid": "fd050230796c78b14b7cd0fede2c5e58", "score": "0.5452719", "text": "def put_config_secrets(\n config_key: str,\n *,\n db: Session = Depends(deps.get_db),\n storage_secrets: Dict[str, str] = Body(...),\n verify: Optional[bool] = True,\n) -> TestStatusMessage:\n logger.info(f\"Finding storage config with key '{config_key}'\")\n\n storage_config = StorageConfig.get_by(db=db, field=\"key\", value=config_key)\n if not storage_config:\n raise HTTPException(\n status_code=HTTP_404_NOT_FOUND,\n detail=f\"No storage configuration with key {config_key}.\",\n )\n\n try:\n secrets_schema = get_schema_for_secrets(\n storage_type=storage_config.type,\n secrets=storage_secrets,\n )\n except KeyError as exc:\n raise HTTPException(\n status_code=HTTP_422_UNPROCESSABLE_ENTITY,\n detail=exc.args[0],\n )\n except ValueError as exc:\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail=exc.args[0],\n )\n\n logger.info(f\"Updating storage config secrets for config with key '{config_key}'\")\n try:\n storage_config.set_secrets(db=db, storage_secrets=secrets_schema.dict())\n except ValueError as exc:\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail=exc.args[0],\n )\n\n msg = f\"Secrets updated for StorageConfig with key: {config_key}.\"\n if verify:\n status = secrets_are_valid(secrets_schema, storage_config.type)\n if status:\n logger.info(f\"Storage secrets are valid for config with key '{config_key}'\")\n else:\n logger.warning(\n f\"Storage secrets are invalid for config with key '{config_key}'\"\n )\n\n return TestStatusMessage(\n msg=msg, test_status=\"succeeded\" if status else \"failed\"\n )\n\n return TestStatusMessage(msg=msg, test_status=None)", "title": "" }, { "docid": "690682bef65ab994bfc2461e73e5c817", "score": "0.5451394", "text": "def test_get_stashes_unauthorized(self):\n with self.client as c:\n \n resp = c.get(\"/api/stashes\")\n \n self.assertEqual(resp.status_code, 401)\n self.assertIn(\"Unauthorized\", str(resp.data))", "title": "" }, { "docid": "b5f2a6719b7447d058f9197b9a113eeb", "score": "0.54076934", "text": "def test_read_secret_v2_key(self):\n # given path secrets/mysecret generate v2 output\n version = {\n \"v2\": True,\n \"data\": \"secrets/data/mysecret\",\n \"metadata\": \"secrets/metadata/mysecret\",\n \"type\": \"kv\",\n }\n mock_version = MagicMock(return_value=version)\n mock_vault = MagicMock()\n mock_vault.return_value.status_code = 200\n v2_return = {\n \"data\": {\n \"data\": {\"akey\": \"avalue\"},\n \"metadata\": {\n \"created_time\": \"2018-10-23T20:21:55.042755098Z\",\n \"destroyed\": False,\n \"version\": 13,\n \"deletion_time\": \"\",\n },\n }\n }\n\n mock_vault.return_value.json.return_value = v2_return\n with patch.dict(\n vault.__utils__, {\"vault.make_request\": mock_vault}\n ), patch.dict(vault.__utils__, {\"vault.is_v2\": mock_version}):\n vault_return = vault.read_secret(\"/secret/my/secret\", \"akey\")\n\n self.assertEqual(vault_return, \"avalue\")", "title": "" }, { "docid": "41a61d9a53d640fd09ac30a042e188e9", "score": "0.5401017", "text": "def get_secret(setting, secrets_in=secrets):\n try:\n return secrets_in[setting]\n except KeyError:\n raise ImproperlyConfigured(\"Set the {} setting\".format(setting))", "title": "" }, { "docid": "210ef7886340d0c9d471c528598577f2", "score": "0.53979146", "text": "def test_read_secret_v2(self):\n # given path secrets/mysecret generate v2 output\n version = {\n \"v2\": True,\n \"data\": \"secrets/data/mysecret\",\n \"metadata\": \"secrets/metadata/mysecret\",\n \"type\": \"kv\",\n }\n mock_version = MagicMock(return_value=version)\n mock_vault = MagicMock()\n mock_vault.return_value.status_code = 200\n v2_return = {\n \"data\": {\n \"data\": {\"akey\": \"avalue\"},\n \"metadata\": {\n \"created_time\": \"2018-10-23T20:21:55.042755098Z\",\n \"destroyed\": False,\n \"version\": 13,\n \"deletion_time\": \"\",\n },\n }\n }\n\n mock_vault.return_value.json.return_value = v2_return\n with patch.dict(\n vault.__utils__, {\"vault.make_request\": mock_vault}\n ), patch.dict(vault.__utils__, {\"vault.is_v2\": mock_version}):\n # Validate metadata returned\n vault_return = vault.read_secret(\"/secret/my/secret\", metadata=True)\n self.assertDictContainsSubset({\"data\": {\"akey\": \"avalue\"}}, vault_return)\n # Validate just data returned\n vault_return = vault.read_secret(\"/secret/my/secret\")\n self.assertDictContainsSubset({\"akey\": \"avalue\"}, vault_return)", "title": "" }, { "docid": "42fb26bc094bdcaf4c24738e30e0f7d0", "score": "0.53950924", "text": "def list_secrets(client: hvac.Client, path: str) -> Union[dict, None]:\n secret_list = client.list(path=path)\n\n return secret_list.get('data', None)", "title": "" }, { "docid": "94d4fb16cf7cd4fdf2803326695fc826", "score": "0.53946084", "text": "def _get_secret_dict(arn, stage, token=None):\n # Only do VersionId validation against the stage if a token is passed in\n if token:\n secret = secrets_manager_client.get_secret_value(SecretId=arn, VersionId=token, VersionStage=stage)\n else:\n secret = secrets_manager_client.get_secret_value(SecretId=arn, VersionStage=stage)\n return json.loads(secret['SecretString'])", "title": "" }, { "docid": "271e95aa7210b35fb2626cb442d33a0e", "score": "0.539014", "text": "def test_get_ticket_statuses_list(self):\r\n response = self.client.get(\r\n path=reverse('api_services_status'), **self.authentication_headers)\r\n self.assertEquals(response.status_code, 200)", "title": "" }, { "docid": "27151fa91a5af259d555e0d0cdaf1d3a", "score": "0.53884006", "text": "def list_secrets(self, secret_type: SecretType) -> List[V1Secret]:\n secret_list = self._api.list_secret_for_all_namespaces(\n label_selector=f\"{KUBERNETES_TOKEN_TYPE_LABEL}={secret_type.value}\"\n )\n return secret_list.items", "title": "" }, { "docid": "93f6c14a7b385f119aee1922ee539151", "score": "0.53876984", "text": "def test_get_secret_encoding(self, input_secret_dto):\n secret = input_secret_dto.secret\n key_spec = input_secret_dto.key_spec\n secret_type = input_secret_dto.type\n\n decrypt_mock = self.retrieving_plugin.decrypt\n decrypt_mock.return_value = base64.decodebytes(secret)\n\n secret_model = self.context.secret_model\n secret_model.algorithm = key_spec.alg\n secret_model.bit_length = key_spec.bit_length\n secret_model.mode = key_spec.mode\n\n secret_dto = self.plugin_to_test.get_secret(\n secret_type,\n None, # Secret metadata is not relevant to store_crypto process.\n self.context)\n\n # Verify response.\n self.assertIsInstance(secret_dto, secret_store.SecretDTO)\n self.assertEqual(secret, secret_dto.secret)\n self.assertEqual(secret_type, secret_dto.type)\n self.assertIsInstance(secret_dto.key_spec, secret_store.KeySpec)\n self.assertEqual(\n secret_model.algorithm, secret_dto.key_spec.alg)\n self.assertEqual(\n secret_model.bit_length, secret_dto.key_spec.bit_length)\n self.assertEqual(\n secret_model.mode, secret_dto.key_spec.mode)", "title": "" }, { "docid": "719d6d9d865657d70aebd21657a28729", "score": "0.5378437", "text": "def test_create_secret_as_audit(self):\n resp = self.behaviors.create_secret_from_config()\n self.assertEqual(resp.status_code, 403)", "title": "" }, { "docid": "475a58beada1c3d9fc3279af922a1053", "score": "0.53765696", "text": "def get_secret(setting, secrets=secrets):\n try:\n return secrets[setting]\n except KeyError:\n raise Exception(\"Set the {} setting\".format(setting))", "title": "" }, { "docid": "12452c58422fbdb4b6b6087d745e9c47", "score": "0.5353421", "text": "def test_read_secret_v1_key(self):\n version = {\"v2\": False, \"data\": None, \"metadata\": None, \"type\": None}\n mock_version = MagicMock(return_value=version)\n mock_vault = MagicMock()\n mock_vault.return_value.status_code = 200\n mock_vault.return_value.json.return_value = {\"data\": {\"key\": \"somevalue\"}}\n with patch.dict(\n vault.__utils__, {\"vault.make_request\": mock_vault}\n ), patch.dict(vault.__utils__, {\"vault.is_v2\": mock_version}):\n vault_return = vault.read_secret(\"/secret/my/secret\", \"key\")\n\n self.assertEqual(vault_return, \"somevalue\")", "title": "" }, { "docid": "3e8d04195f978be02bb0cb5be7ea1f30", "score": "0.5353245", "text": "def secrets():\n return render_template(\"secrets.html\")", "title": "" }, { "docid": "56e36ba933bbc41fa9156ec54ec07148", "score": "0.5343", "text": "def test_config(client):\n response = client.get(\"/health/config/\")\n assert 200 <= response.status_code < 300\n assert response.json[\"SECRET_KEY\"] == \"<hidden>\"\n assert response.json[\"SENTRY_DSN\"] == \"<hidden>\"", "title": "" }, { "docid": "0c0b2353ddaddaecdf973d885b83a279", "score": "0.5338981", "text": "def get(client_obj, text, name):\n secret = client_obj.get_secret(path=name)\n if text:\n click.echo(secret)\n return\n\n click.echo(yaml.safe_dump(secret,\n default_flow_style=False,\n explicit_start=True), nl=False)", "title": "" }, { "docid": "40311a0e8e015b158eb33f82caa8d224", "score": "0.53214324", "text": "def get_secret(setting, secrets=secrets):\n try:\n return secrets[setting]\n except KeyError:\n raise ImproperlyConfigured(\"Set the {} setting\".format(setting))", "title": "" }, { "docid": "0ce051d99b526f140999d88458fe80f7", "score": "0.5318574", "text": "def get_secrets(bucket, key):\n warnings.warn(\"Use Vault for secrets\", DeprecationWarning)\n global _secrets\n if not _secrets:\n logger.debug('Obtaining secrets from S3 bucket %s, key %s...', bucket, key)\n s3 = get_s3(bucket)\n obj = s3.open_r(key)\n _secrets = json.load(obj)\n else:\n logger.debug('Using pre-fetched secrets...')\n return _secrets", "title": "" }, { "docid": "55f8d296b5a0c89ca355da0708546ab0", "score": "0.5313115", "text": "def secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SecretArgs']]]]:\n return pulumi.get(self, \"secrets\")", "title": "" }, { "docid": "1cebbba967213f746ea2e64b7ccfae72", "score": "0.5301734", "text": "async def test_device_diagnostics_secret_value(\n hass: HomeAssistant,\n client,\n multisensor_6_state,\n integration,\n hass_client: ClientSessionGenerator,\n version_state,\n) -> None:\n\n def _find_ultraviolet_val(data: dict) -> dict:\n \"\"\"Find ultraviolet property value in data.\"\"\"\n return next(\n val\n for val in (\n data[\"values\"]\n if isinstance(data[\"values\"], list)\n else data[\"values\"].values()\n )\n if val[\"commandClass\"] == CommandClass.SENSOR_MULTILEVEL\n and val[\"property\"] == PROPERTY_ULTRAVIOLET\n )\n\n node_state = copy.deepcopy(multisensor_6_state)\n # Force a value to be secret so we can check if it gets redacted\n secret_value = _find_ultraviolet_val(node_state)\n secret_value[\"metadata\"][\"secret\"] = True\n node = Node(client, node_state)\n client.driver.controller.nodes[node.node_id] = node\n client.driver.controller.emit(\"node added\", {\"node\": node})\n await hass.async_block_till_done()\n dev_reg = dr.async_get(hass)\n device = dev_reg.async_get_device(identifiers={get_device_id(client.driver, node)})\n assert device\n\n diagnostics_data = await get_diagnostics_for_device(\n hass, hass_client, integration, device\n )\n test_value = _find_ultraviolet_val(diagnostics_data[\"state\"])\n assert test_value[\"value\"] == REDACTED", "title": "" }, { "docid": "f5d26a9cc3e0c869a585aed2f71730ac", "score": "0.5273805", "text": "def vm_secrets(self) -> Optional[Sequence['outputs.VaultSecretGroupResponse']]:\n return pulumi.get(self, \"vm_secrets\")", "title": "" }, { "docid": "9ed57b3a306cd6ab8bb80a7ed9255eca", "score": "0.5261279", "text": "def get_secrets(self):\n credentials = ServicePrincipalCredentials(\n client_id=self._config[\"client_id\"],\n secret=self._config[\"app_secret\"],\n tenant=self._config[\"tenant_id\"],\n )\n client = KeyVaultClient(credentials)\n\n try:\n secrets = client.get_secrets(self._config[\"key_vault_uri\"])\n except KeyVaultErrorException:\n secrets = []\n\n return secrets", "title": "" }, { "docid": "724b6be2d9b850d8949594980ef468ae", "score": "0.52607644", "text": "def secrets(self) -> pulumi.Input['SasDatastoreSecretsArgs']:\n return pulumi.get(self, \"secrets\")", "title": "" }, { "docid": "c833cf01d48f445f9118ba78cb4ac270", "score": "0.5258891", "text": "def test_delete_all_secrets(self):\n pass", "title": "" }, { "docid": "e867ae0b990855da1cca591a1daf5b66", "score": "0.52417755", "text": "def test_get_status_no_secrets(gc, remove_secrets):\n assert gc.get_status().status is False", "title": "" }, { "docid": "16ca306f5db0be0dc3a84640951aff0f", "score": "0.52229756", "text": "def test_api_v1_statuses_serverless_auto_deploy_get(self):\n pass", "title": "" }, { "docid": "8c17b050f7d158017c5a21457cb78626", "score": "0.5211298", "text": "async def test_find_pets_by_status(client):\n params = [('status', 'available')]\n headers = { \n 'Accept': 'application/json',\n 'Authorization': 'Bearer special-key',\n }\n response = await client.request(\n method='GET',\n path='/v2/pet/findByStatus',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "title": "" }, { "docid": "d0af2f1398b0f3c6a6e12e16b5e1e998", "score": "0.5163564", "text": "def get(self) -> Optional[Any]:\n if isinstance(prefect.context.get(\"flow\"), prefect.core.flow.Flow):\n raise ValueError(\n \"Secrets should only be retrieved during a Flow run, not while building a Flow.\"\n )\n\n secrets = prefect.context.get(\"secrets\", {})\n try:\n value = secrets[self.name]\n except KeyError:\n if prefect.config.backend != \"cloud\":\n raise ValueError(\n 'Local Secret \"{}\" was not found.'.format(self.name)\n ) from None\n if prefect.context.config.cloud.use_local_secrets is False:\n try:\n result = self.client.graphql(\n \"\"\"\n query($name: String!) {\n secret_value(name: $name)\n }\n \"\"\",\n variables=dict(name=self.name),\n )\n except ClientError as exc:\n if \"No value found for the requested key\" in str(exc):\n raise KeyError(\n f\"The secret {self.name} was not found. Please ensure that it \"\n f\"was set correctly in your tenant: https://docs.prefect.io/\"\n f\"orchestration/concepts/secrets.html\"\n ) from exc\n else:\n raise exc\n # the result object is a Box, so we recursively restore builtin\n # dict/list classes\n result_dict = result.to_dict()\n value = result_dict[\"data\"][\"secret_value\"]\n else:\n raise ValueError(\n 'Local Secret \"{}\" was not found.'.format(self.name)\n ) from None\n try:\n return json.loads(value)\n except (json.JSONDecodeError, TypeError):\n return value", "title": "" }, { "docid": "9fa47cacba80aeb4793d0ff0474b56ce", "score": "0.5159782", "text": "def secrets(self) -> pulumi.Input['ServicePrincipalDatastoreSecretsArgs']:\n return pulumi.get(self, \"secrets\")", "title": "" }, { "docid": "a63704b2b62f41f54e9d38e230c70c50", "score": "0.5145105", "text": "def _secret_not_in_order():\n abort(falcon.HTTP_400, _(\"Secret metadata expected but not received.\"))", "title": "" }, { "docid": "e22b5e04554fc667019af546e1991ee4", "score": "0.5142224", "text": "def load_twitter_credentials():\n consumer_key = \"\"\n consumer_secret = \"\"\n access_key = \"\"\n access_secret = \"\"\n with open(\"twitter_credentials.json\") as json_file:\n data = json.load(json_file)\n consumer_key = data[\"consumer_key\"]\n consumer_secret = data[\"consumer_secret\"]\n access_key = data[\"access_key\"]\n access_secret = data[\"access_secret\"]\n\n return (consumer_key, consumer_secret, access_key, access_secret)", "title": "" }, { "docid": "e1d057c9f0c5764511eec83a9a9e92d4", "score": "0.51290625", "text": "def get_app_secrets():\n resp = _get_resp(GET_SECRETS_URL)\n return (resp['app_id'], resp['app_secret'])", "title": "" }, { "docid": "e4a7c44b216e28305f6369d3f6b65ab8", "score": "0.5125082", "text": "def secrets(self) -> pulumi.Input['AccountKeyDatastoreSecretsArgs']:\n return pulumi.get(self, \"secrets\")", "title": "" }, { "docid": "29d9e0cbb70133105ae7c490a1ff3a9e", "score": "0.5084021", "text": "def test_add_secret(self):\n pass", "title": "" }, { "docid": "4f2123c2e835ef8980fb87313894d6fa", "score": "0.5082062", "text": "def test_status_list_success(self, twitter_mock):\n twitter_mock.return_value = self.status\n status_list = [self.status.id]\n\n response = self.twitter.tweet('\"Select all\" and archive your Gmail inbox. The page loads so much faster!')\n self.assertEqual(status_list, response, 'The status response is different')", "title": "" }, { "docid": "f1b0223a73045aa0f24a3e2dcf7d621e", "score": "0.5076851", "text": "def test_withnout_token(client):\n req = client.get(\"/api/v1/test_token\")\n\n assert req.status_code == 401", "title": "" }, { "docid": "9cc96b399e17efff14cb00391a1585ff", "score": "0.50657356", "text": "def get_secrets():\n local_file = os.path.join(CONF_ROOT, 'pillar', env.environment, 'secrets.sls')\n if os.path.exists(local_file):\n local('cp {0} {0}.bak'.format(local_file))\n remote_file = os.path.join('/srv/pillar/', env.environment, 'secrets.sls')\n get(remote_file, local_file)", "title": "" }, { "docid": "7d99f46c3a48207bf2404e10762e2de3", "score": "0.5051772", "text": "def setup_secrets():\n # get username and password\n from vmc.common.config import config\n username = config.current_profile.get('connection', 'username')\n password = config.current_profile.get('connection', 'password')\n #setup secrets\n append_entry_to_secrets(username, password)", "title": "" }, { "docid": "20c8c170357aee45f00a02417d26668e", "score": "0.50494665", "text": "def get_secrets(self):\n return SECRETS_TEMPLATE.format(\n total_words=len(dictionary.get_dict()),\n valid_words=len(valid_words),\n state=self.state,\n word=self.word,\n word_revealed=self.word_revealed,\n guesses_right=self.guesses_right,\n guesses_wrong=self.guesses_wrong,\n )", "title": "" }, { "docid": "e60a46235c5099d4fa61aa6dd57ec53e", "score": "0.5048611", "text": "def test_set_get_delete_secret(test_environment):\n plaintext = str(uuid.uuid4())\n key = str(uuid.uuid4())\n with pytest.raises(RequiresVaultError):\n test_environment.set_secret(key, plaintext)\n\n with pytest.raises(RequiresVaultError):\n test_environment.get_secret(key)\n\n with pytest.raises(RequiresVaultError):\n test_environment.delete_secret(key)", "title": "" }, { "docid": "404811833bf002d20262c69cba71dc62", "score": "0.50380886", "text": "def test_retrieve_only_suggestions_with_status(self):\n accepted = 'accepted'\n\n SuggestionFactory.create(status=accepted)\n SuggestionFactory.create(status=accepted)\n SuggestionFactory.create(status='other')\n\n response = self.client.get(self.suggestion_list_url, {'status': accepted},\n HTTP_AUTHORIZATION=self.user_1_token)\n\n data = json.loads(response.content)\n self.assertEqual(len(data), 2)\n\n self.assertEqual(data[0]['status'].lower(), accepted)\n self.assertEqual(data[1]['status'].lower(), accepted)", "title": "" }, { "docid": "663657bb42b9bb742814eae232011789", "score": "0.5036149", "text": "def credential_test(self):\n r = requests.get(self.base_uri + 'search.json?q=test')\n return r.status_code", "title": "" }, { "docid": "9e3f9281d57dc9363bda66d0d65b3e1c", "score": "0.50339514", "text": "def fetch_twitter_creds():\n try:\n cred_path = os.path.join(cfg.windbagger_data, 'credentials.json')\n return json.load(open(cred_path, 'r'))\n except IOError as e:\n logger.critcal('Credentials for twitter not found!')", "title": "" }, { "docid": "ecd5ff6867552aab5b2612e9dc72a4bf", "score": "0.50285727", "text": "def get_secret(secret_name):\n client = boto3.client(service_name=\"secretsmanager\")\n get_secret_value_response = client.get_secret_value(SecretId=secret_name)\n\n if \"SecretString\" in get_secret_value_response:\n secret = get_secret_value_response[\"SecretString\"]\n return json.loads(secret)\n else:\n decoded_binary_secret = base64.b64decode(\n get_secret_value_response[\"SecretBinary\"]\n )\n return decoded_binary_secret", "title": "" }, { "docid": "92b520c20d21140f258c95ec4acea07d", "score": "0.50270486", "text": "async def test_health_liveness_get(client):\n headers = { \n }\n response = await client.request(\n method='GET',\n path='/api/auth/v2/health/liveness',\n headers=headers,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "title": "" }, { "docid": "72eba3a1d63a3ff0bbbf1edf9e5743b8", "score": "0.5025915", "text": "def test_verify_token(self):\n #print \"test_verify_token\"\n resp = app.get('/auth/dropbox/%s' % userid).json\n self.assertEqual(resp[\"state\"],1)", "title": "" }, { "docid": "9cb2c3af5a444784eed992bb02de2bd0", "score": "0.5021788", "text": "def oauth_secrets(doc):\n if doc.get('type') == 'oauth_client_credentials':\n yield doc.get('client_id'), doc['_id']", "title": "" }, { "docid": "c26d3200aec33394c3ab8b53376534d5", "score": "0.5008839", "text": "def test_get_stashes(self):\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.user.id\n sess[CURR_USER_NAME] = self.user.username\n\n resp = c.get(\"/api/stashes\")\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"test stash name\", str(resp.data))", "title": "" }, { "docid": "8fbab24dbc0b01506327c91bcce4a47c", "score": "0.50083536", "text": "def get_secret(setting, secrets=SECRETS):\n try:\n return secrets[setting]\n except KeyError:\n error_msg = \"Set the {0} environment variable in the secret file\".format(setting)\n logger.error(\"Set the {0} environment variable in the secret file\".format(setting))\n raise ImproperlyConfigured(error_msg)", "title": "" }, { "docid": "88f62c895819267d7a6fb37180ceac19", "score": "0.50080615", "text": "def test_api_authorization_list_units_get(self):\n pass", "title": "" }, { "docid": "5dcdf485a6c0ed977f86e8b34587fedb", "score": "0.50071317", "text": "def load_twitter_credentials(keys_filename):\n with open(keys_filename) as api_credentials:\n api_twitter = json.loads(api_credentials.read())\n consumer_key = api_twitter['Twitter'][\"consumer_key\"]\n consumer_secret = api_twitter['Twitter'][\"consumer_secret\"]\n access_token = api_twitter['Twitter'][\"access_token\"]\n access_secret = api_twitter['Twitter'][\"access_secret\"]\n logging.debug(\"{}-{}-{}-{}\".format(consumer_key, consumer_secret, access_token, access_secret))\n return consumer_key, consumer_secret, access_token, access_secret", "title": "" }, { "docid": "d64bb449cb1640794a52cd75804449a2", "score": "0.50000733", "text": "def get_secret(setting, secrets=secrets):\n try:\n return secrets[setting]\n except KeyError:\n error_msg = \"Set the {0} environment variable in the secret file\".format(setting)\n raise ImproperlyConfigured(error_msg)", "title": "" }, { "docid": "88e9cb67023e569e30335a5e9634f738", "score": "0.49848822", "text": "def get_api_secret():\n config = ConfigParser.ConfigParser()\n config.read(\"api_keys.ini\")\n secret = config.get(\"ApiKeys\", \"secret\")\n return secret", "title": "" }, { "docid": "618d50f5802a6bbad64709b3180da812", "score": "0.4984404", "text": "def api_secret_key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"api_secret_key\")", "title": "" }, { "docid": "16481a0eadcbfd374fd335f21f6a482a", "score": "0.49727523", "text": "def test_valid_key():\n\n response = sdk_instance.get_orgs()\n\n assert response.status_code != 401", "title": "" }, { "docid": "4dc8e95f93e312fad89a03275be89953", "score": "0.49555314", "text": "def test_get_call(self, dut):\n username = DEVICE_UNDER_TEST['username']\n passwd = DEVICE_UNDER_TEST['password']\n dut.login(username=username, password=passwd)\n url = dut.url_prefix + '/status'\n r = dut.session.get(url)\n assert r.status_code >= 200 <= 299", "title": "" }, { "docid": "8692fb7889e202b56f6a9e60e6707f17", "score": "0.49522367", "text": "def get_secret(secret_name):\n\n # Create a Secrets Manager client\n client = boto3.client('secretsmanager')\n\n try:\n get_secret_value_response = client.get_secret_value(\n SecretId=secret_name\n )\n except ClientError as e:\n if e.response['Error']['Code'] == 'DecryptionFailureException':\n # Secrets Manager can't decrypt the protected secret text using the provided KMS key.\n # Deal with the exception here, and/or rethrow at your discretion.\n logger.error(\"Secrets Manager can't decrypt the protected secret text using the provided KMS key.\")\n raise e\n elif e.response['Error']['Code'] == 'InternalServiceErrorException':\n # An error occurred on the server side.\n # Deal with the exception here, and/or rethrow at your discretion.\n logger.error(\"An error occurred on the server side.\")\n raise e\n elif e.response['Error']['Code'] == 'InvalidParameterException':\n # You provided an invalid value for a parameter.\n # Deal with the exception here, and/or rethrow at your discretion.\n logger.error(\"Invalid value for a parameter\")\n raise e\n elif e.response['Error']['Code'] == 'InvalidRequestException':\n # You provided a parameter value that is not valid for the current state of the resource.\n # Deal with the exception here, and/or rethrow at your discretion.\n logger.error(\"Provided a parameter value is not valid for the current state of the resource\")\n raise e\n elif e.response['Error']['Code'] == 'ResourceNotFoundException':\n # We can't find the resource that you asked for.\n # Deal with the exception here, and/or rethrow at your discretion.\n logger.error(f\"Unable to find secret with name {secret_name}\")\n raise e\n else:\n # Decrypts secret using the associated KMS CMK.\n # Depending on whether the secret is a string or binary, one of these fields will be populated.\n if 'SecretString' in get_secret_value_response:\n return get_secret_value_response['SecretString']\n else:\n return base64.b64decode(get_secret_value_response['SecretBinary'])", "title": "" }, { "docid": "068e8ad8639fd2702786ca446da5e58e", "score": "0.49401957", "text": "def access_secret(project_id, location_id, secret_id):\n\n # Creates an API client for the Secret Manager API.\n client = secretmanager.SecretManagerServiceClient()\n\n secret = f\"projects/{project_id}/secrets/{secret_id}/versions/latest\"\n response = client.access_secret_version(name=secret)\n\n return response.payload.data", "title": "" }, { "docid": "77b270c92bc417c6d8e0ad991ea6d95a", "score": "0.49340248", "text": "def get_secret(name):\n log.info('Fetching secret from env var. VAR: {}'.format(name))\n kms = boto3.session.Session().client(\"kms\")\n return kms.decrypt(CiphertextBlob=base64.b64decode(os.environ[name]))[\"Plaintext\"]", "title": "" }, { "docid": "d377fd703d16cf1ef3b9911a5d6b5661", "score": "0.4927424", "text": "def secret(self):\n return self.oauth_secret", "title": "" }, { "docid": "dfdcf60c94b9299da6376f6199ec053c", "score": "0.49247336", "text": "def list_properties_of_secrets(self, **kwargs: \"Any\") -> AsyncIterable[SecretProperties]:\n return self._client.get_secrets(\n self.vault_endpoint,\n maxresults=kwargs.pop(\"max_page_size\", None),\n cls=lambda objs: [SecretProperties._from_secret_item(x) for x in objs],\n **kwargs\n )", "title": "" }, { "docid": "97ebae09b62678fd31867f2ac30dbbdb", "score": "0.48997778", "text": "def get_api_token(credentials_file):\n logging.info(\"Leyendo las credenciales de {}\".format(credentials_file))\n credentials = read_yaml_file(credentials_file)\n token = credentials['food_inspections']\n \n return token", "title": "" }, { "docid": "9d5c3cae96164fe5b30bd35db8c06e7a", "score": "0.48990205", "text": "def test_notestatus_list_api_unauthorized(self):\n\n # get response\n response = self.client.get('/api/notestatus/')\n # compare\n self.assertEqual(response.status_code, 401)", "title": "" }, { "docid": "a13153177d5d105ce05a243599dc8bc3", "score": "0.4898741", "text": "def get_secret(setting, secrets=secrets):\n try:\n return secrets[setting]\n except KeyError:\n error_msg = \"Set the {0} environment variable\".format(setting)\n raise ImproperlyConfigured(error_msg)", "title": "" }, { "docid": "6e7a7066247baf887df82206862ec644", "score": "0.48920795", "text": "def test_get_status(self):\n pass", "title": "" }, { "docid": "6e7a7066247baf887df82206862ec644", "score": "0.48920795", "text": "def test_get_status(self):\n pass", "title": "" }, { "docid": "3949a223b2c9d9692ccf4601964b22af", "score": "0.4889985", "text": "def get(self, key):\n\n rest_utils.validate_inputs({'key': key})\n return get_storage_manager().get(models.Secret, key)", "title": "" }, { "docid": "4168e0cce2374d46ab9350f16ef45832", "score": "0.4888779", "text": "def get_secret_value_output(resource_group_name: Optional[pulumi.Input[str]] = None,\n secret_resource_name: Optional[pulumi.Input[str]] = None,\n secret_value_resource_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSecretValueResult]:\n ...", "title": "" }, { "docid": "1dec3491237df09c200c9b9088125026", "score": "0.48811728", "text": "def secrets_profile(self) -> Optional['outputs.SecretsProfileResponse']:\n return pulumi.get(self, \"secrets_profile\")", "title": "" }, { "docid": "dd58a88a05d853a5729e775f4bd839b1", "score": "0.48607022", "text": "def test_get_secret(self):\n\n secret_dto = self.plugin_to_test.get_secret(\n secret_store.SecretType.OPAQUE,\n None, # Secret metadata is not relevant to store_crypto process.\n self.context)\n\n # Verify response.\n self.assertIsInstance(secret_dto, secret_store.SecretDTO)\n self.assertEqual(secret_store.SecretType.OPAQUE, secret_dto.type)\n self.assertEqual(\n base64.encodebytes(self.decrypted_secret).rstrip(b'\\n'),\n secret_dto.secret)\n self.assertEqual(\n self.encrypted_datum_model.content_type, secret_dto.content_type)\n self.assertIsInstance(secret_dto.key_spec, secret_store.KeySpec)\n self.assertEqual(\n self.secret_model.algorithm, secret_dto.key_spec.alg)\n self.assertEqual(\n self.secret_model.bit_length, secret_dto.key_spec.bit_length)\n self.assertEqual(\n self.secret_model.mode, secret_dto.key_spec.mode)\n\n # Verify decrypt plugin and method where invoked.\n decrypt_mock = self.retrieving_plugin.decrypt\n self.assertEqual(1, decrypt_mock.call_count)\n args, kwargs = decrypt_mock.call_args\n (\n test_decrypt,\n test_kek_meta,\n test_kek_meta_extended,\n test_project_id\n ) = tuple(args)\n\n self.assertIsInstance(test_decrypt, base.DecryptDTO)\n self.assertEqual(\n base64.b64decode(self.encrypted_datum_model.cypher_text),\n test_decrypt.encrypted)\n\n self.assertIsInstance(test_kek_meta, base.KEKMetaDTO)\n self.assertEqual(\n self.kek_meta_project_model.plugin_name, test_kek_meta.plugin_name)\n\n self.assertEqual(\n self.encrypted_datum_model.kek_meta_extended,\n test_kek_meta_extended)\n\n self.assertEqual(self.project_id, test_project_id)", "title": "" }, { "docid": "bdf86063485160dbce61d83613f1c94f", "score": "0.48555958", "text": "def test_missing_access_token_secret(self):\n res = self.social_login(self.social_url, self.missing_token_secret)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(res.data.get('errors').get('error')[0], \"An access token secret is required for Twitter Login\")", "title": "" }, { "docid": "ffd5d75ef8cf8d01641d34a993483f21", "score": "0.48548844", "text": "def test_define_execution_list_with_secrets(self, project_function_clean):\n secrets = {\"a\": \"secret\", \"b\": \"secret02\"}\n testdir = project_function_clean.testdir\n project = project_function_clean.name\n secrets_path = os.path.join(project_function_clean.path, 'secrets.json')\n with open(secrets_path, 'w') as secrets_file:\n secrets_file.write(json.dumps(secrets, indent=True))\n execution_runner = exc_runner.ExecutionRunner()\n execution_runner.tests = ['test_001']\n execution_runner.execution.processes = 1\n execution_runner.execution.browsers = ['chrome']\n execution_runner.execution.envs = ['']\n execution_runner.project = project_function_clean.name\n execution_list = execution_runner._define_execution_list()\n expected_list = [\n SimpleNamespace(name='test_001', data_set={}, secrets={\"a\": \"secret\", \"b\": \"secret02\"}, browser='chrome', reportdir=None)\n ]\n assert execution_list == expected_list", "title": "" }, { "docid": "12805505b673627dda84a6db9270b10b", "score": "0.48485553", "text": "def get_token_secret(self):\n return self.oauth_token_secret or None", "title": "" }, { "docid": "1149f333260f4b2c901eefb57320e6e6", "score": "0.48473433", "text": "def getSwiftSecret(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" } ]
ded8dd55a74a3491147509032239a5c2
Display a dialog asking the user to input the range of frames they would like to export
[ { "docid": "911b2d5a62a538eb21b7466296923e33", "score": "0.6653546", "text": "def ask_frame_range(self, n_frames):\n valid_input = False\n _, ext = os.path.splitext(self.output.GetPath())\n show_single_btn = (ext == '.h5')\n dlg = FrameSelectDialog(n_frames, show_single_btn)\n frames = None\n increment = None\n single_file = True\n while not valid_input:\n if dlg.ShowModal() == wx.ID_OK:\n msg = \"\"\n try:\n first_frame = int(dlg.first_input.GetValue())\n last_frame = int(dlg.last_input.GetValue())\n increment = int(dlg.increment_input.GetValue())\n if not show_single_btn:\n single_file = dlg.single_btn.GetValue()\n\n if last_frame < 0 or first_frame < 0:\n msg = \"Frame values must be positive\"\n elif increment < 1:\n msg = \"Increment must be greater than or equal to 1\"\n elif first_frame > last_frame:\n msg = \"First frame must be less than last frame\"\n elif last_frame >= n_frames:\n msg = \"Last frame must be less than {}\".format(n_frames)\n else:\n valid_input = True\n except:\n valid_input = False\n msg = \"Please enter valid integer values\"\n\n if not valid_input:\n wx.PostEvent(self.parent.manager.parent,\n StatusEvent(status=msg))\n else:\n return { 'frames': [], 'inc': None, 'file': single_file }\n frames = list(range(first_frame, last_frame + 1, increment))\n return { 'frames': frames, 'inc': increment, 'file': single_file }", "title": "" } ]
[ { "docid": "ba3807dc066092db7439c382f1f79169", "score": "0.5913335", "text": "def getFrameRangeFromShotSettings(self):\r\n\t\ttry:\r\n\t\t\t#frRange = \"%d-%d\" %(int(os.environ['STARTFRAME']), int(os.environ['ENDFRAME'])) # Icarus\r\n\t\t\tfrRange = \"%d-%d\" %(int(os.environ['UHUB_STARTFRAME']), int(os.environ['UHUB_ENDFRAME'])) # U-HUB\r\n\t\t\tself.ui.frames_lineEdit.setText(frRange)\r\n\t\texcept KeyError:\r\n\t\t\tself.ui.frames_lineEdit.setText(\"\")", "title": "" }, { "docid": "10afd68795db62c4a02e1a8eaa171f3e", "score": "0.577385", "text": "def test_03_verify_page_range_screen_ui(self):\n # General Setup\n self.fc.select_a_google_drive_file_and_go_to_print_preview(file_name=\"2pages\")\n assert self.fc.fd[\"preview\"].get_option_selected_value(Preview.PAGE_RANGE) == 'All'\n self.fc.fd[\"preview\"].verify_an_element_and_click(Preview.PAGE_RANGE)\n # Validation\n assert set(self.fc.fd[\"preview\"].get_options_listed(Preview.PR_SCREEN_OPTIONS_UI)) == set(\n Preview.PAGE_RANGE_OPTIONS)\n assert self.fc.fd[\"preview\"].verify_pages_selected(Preview.PAGE_RANGE_OPTIONS[0]) is not False\n assert self.fc.fd[\"preview\"].verify_pages_selected(Preview.PAGE_RANGE_OPTIONS[1]) is not False", "title": "" }, { "docid": "4122f3623897225f316c47763538cff1", "score": "0.5509551", "text": "def on_fit_range_clicked(self):\n dialog = FitRangeConfigDialog(\n upper_lim=(0., 10., self.fit_range[1]),\n lower_lim=(-1., 10., self.fit_range[0]),\n dimension='microsecond')\n\n if dialog.exec_() == 1:\n upper_limit = dialog.get_widget_value(\"upper_limit\")\n lower_limit = dialog.get_widget_value(\"lower_limit\")\n self.fit_range = (lower_limit, upper_limit)", "title": "" }, { "docid": "50a47179eb0e1bc49eabdeabb704ee6b", "score": "0.5437391", "text": "def grabFrame(self,event):\n\n if self.extract_range_frame == True:\n num_frames_extract = self.endFrame.GetValue()\n for i in range(self.currFrame,self.currFrame+num_frames_extract):\n self.currFrame = i\n self.vid.set(1,self.currFrame)\n self.chooseFrame()\n else:\n self.vid.set(1,self.currFrame)\n self.chooseFrame()", "title": "" }, { "docid": "bb146eb6cb9f239ff77328a34d5d3d34", "score": "0.54290503", "text": "def calcFrameList(self, quiet=True):\r\n\t\ttry:\r\n\t\t\tself.numList = sequence.numList(self.ui.frames_lineEdit.text(), sort=False, quiet=quiet)\r\n\t\t\tif self.numList is False:\r\n\t\t\t\t#raise RuntimeError(\"Invalid entry for frame range.\")\r\n\t\t\t\tif not quiet:\r\n\t\t\t\t\t#verbose.warning(\"Invalid entry for frame range.\")\r\n\t\t\t\t\tprint(\"Warning: Invalid entry for frame range.\")\r\n\t\t\t\t# self.ui.frames_lineEdit.setProperty(\"mandatoryField\", True)\r\n\t\t\t\t# self.ui.frames_lineEdit.style().unpolish(self.ui.frames_lineEdit)\r\n\t\t\t\t# self.ui.frames_lineEdit.style().polish(self.ui.frames_lineEdit)\r\n\t\t\t\tself.numList = []\r\n\t\t\t\tself.taskList = [\"Unknown\", ]\r\n\r\n\t\t\t\t# msg = \"Invalid entry for frame range.\"\r\n\t\t\t\t# #mc.warning(msg)\r\n\t\t\t\t# #mc.confirmDialog(title=\"Scene not saved\", message=msg, icon=\"warning\", button=\"Close\")\r\n\t\t\t\t# self.ui.submit_pushButton.setToolTip(msg)\r\n\t\t\t\t# self.ui.submit_pushButton.setEnabled(False)\r\n\r\n\t\t\t\treturn False\r\n\r\n\t\t\telif self.numList is None:\r\n\t\t\t\t#raise RuntimeError(\"No frame range specified.\")\r\n\t\t\t\tif not quiet:\r\n\t\t\t\t\t#verbose.warning(\"No frame range specified.\")\r\n\t\t\t\t\tprint(\"Warning: No frame range specified.\")\r\n\t\t\t\t# self.ui.frames_lineEdit.setProperty(\"mandatoryField\", True)\r\n\t\t\t\t# self.ui.frames_lineEdit.style().unpolish(self.ui.frames_lineEdit)\r\n\t\t\t\t# self.ui.frames_lineEdit.style().polish(self.ui.frames_lineEdit)\r\n\t\t\t\tself.numList = []\r\n\t\t\t\tself.taskList = [\"Unknown\", ]\r\n\r\n\t\t\t\t# msg = \"No frame range specified.\"\r\n\t\t\t\t# #mc.warning(msg)\r\n\t\t\t\t# #mc.confirmDialog(title=\"Scene not saved\", message=msg, icon=\"warning\", button=\"Close\")\r\n\t\t\t\t# self.ui.submit_pushButton.setToolTip(msg)\r\n\t\t\t\t# self.ui.submit_pushButton.setEnabled(False)\r\n\r\n\t\t\t\treturn False\r\n\r\n\t\t\telse:\r\n\t\t\t\t# Update task size slider\r\n\t\t\t\t#self.ui.frames_lineEdit.setText(sequence.numRange(self.numList))\r\n\t\t\t\ttaskSize = self.ui.taskSize_spinBox.value()\r\n\t\t\t\tnFrames = len(self.numList)\r\n\t\t\t\tif taskSize < nFrames:\r\n\t\t\t\t\tself.ui.taskSize_slider.setMaximum(nFrames)\r\n\t\t\t\t\tself.ui.taskSize_spinBox.setMaximum(nFrames)\r\n\t\t\t\t\tself.ui.taskSize_spinBox.setValue(taskSize)\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.ui.taskSize_slider.setMaximum(nFrames)\r\n\t\t\t\t\tself.ui.taskSize_spinBox.setMaximum(nFrames)\r\n\t\t\t\t\tself.ui.taskSize_spinBox.setValue(nFrames)\r\n\t\t\t\tif nFrames == 1:\r\n\t\t\t\t\tself.ui.taskSize_slider.setEnabled(False)\r\n\t\t\t\t\tself.ui.taskSize_spinBox.setEnabled(False)\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.ui.taskSize_slider.setEnabled(True)\r\n\t\t\t\t\tself.ui.taskSize_spinBox.setEnabled(True)\r\n\r\n\t\t\t\t# Generate task list for rendering\r\n\t\t\t\tself.taskList = []\r\n\t\t\t\tsequences = list(sequence.seqRange(self.numList, gen_range=True))\r\n\t\t\t\tfor seq in sequences:\r\n\t\t\t\t\tchunks = list(sequence.chunks(seq, taskSize))\r\n\t\t\t\t\tfor chunk in chunks:\r\n\t\t\t\t\t\t#self.taskList.append(list(sequence.seqRange(chunk))[0])\r\n\t\t\t\t\t\tself.taskList.append(sequence.numRange(chunk))\r\n\r\n\t\t\t\treturn True\r\n\r\n\t\texcept (MemoryError, OverflowError):\r\n\t\t\t#verbose.warning(\"Specified frame range value(s) too large to process.\")\r\n\t\t\tprint(\"Warning: Specified frame range value(s) too large to process.\")\r\n\t\t\treturn False\r\n\r\n\t\t# except RuntimeError:\r\n\t\t# \tif not quiet:\r\n\t\t# \t\tverbose.warning(\"Invalid entry for frame range.\")\r\n\t\t# \t# self.ui.frames_lineEdit.setProperty(\"mandatoryField\", True)\r\n\t\t# \t# self.ui.frames_lineEdit.style().unpolish(self.ui.frames_lineEdit)\r\n\t\t# \t# self.ui.frames_lineEdit.style().polish(self.ui.frames_lineEdit)\r\n\t\t# \tself.numList = []\r\n\t\t# \tself.taskList = [\"Unknown\", ]\r\n\t\t# \treturn False\r\n\r\n\t\t# finally:\r\n\t\t# \tpass\r", "title": "" }, { "docid": "5cd60b7e906cace94cf2405f85043ada", "score": "0.541242", "text": "def buildTabDialog(self):\n super(Torus5dFrame, self).buildTabDialog()\n self.tab_dialog.addTab(Torus5dRangeTab(self.tab_dialog, self), \"Data Range\")", "title": "" }, { "docid": "c1a2663b701b7b4ddf6388ba7e353667", "score": "0.5410121", "text": "def export_frame():\r\n exp_frame = LabelFrame(window, text='Экспортировать в MS Excel', width=960, height=270)\r\n exp_frame.place(x=25, y=450)\r\n\r\n btn_1 = Button(exp_frame, text=\"Все банки по месяцам\", background=\"#555\", foreground=\"#ccc\",\r\n padx=\"15\", pady=\"6\", font=\"15\", command=choice_month)\r\n btn_1.pack(expand=True, fill=BOTH)\r\n\r\n btn_2 = Button(exp_frame, text=\"Панель данных\", background=\"#555\", foreground=\"#ccc\",\r\n padx=\"15\", pady=\"6\", font=\"15\", command=choice_bank)\r\n btn_2.pack(expand=True, fill=BOTH)\r\n\r\n return", "title": "" }, { "docid": "54d45a9687419e2c6d449c9222bb3c16", "score": "0.5384579", "text": "def on_fit_range_clicked(self):\n dialog = FitRangeConfigDialog(\n upper_lim=(0., 60., self.fit_range[1]),\n lower_lim=(-1., 60., self.fit_range[0]), dimension='ns')\n\n if dialog.exec_() == 1:\n upper_limit = dialog.get_widget_value(\"upper_limit\")\n lower_limit = dialog.get_widget_value(\"lower_limit\")\n self.fit_range = (lower_limit, upper_limit)", "title": "" }, { "docid": "49a136c235db02f2e723f9bc3780a63a", "score": "0.5343724", "text": "def activate_frame_range(self,event):\n self.checkSlider = event.GetEventObject()\n if self.checkSlider.GetValue() == True:\n self.extract_range_frame = True\n self.startFrame.Enable(True)\n self.startFrame.SetValue(self.slider.GetValue())\n self.endFrame.Enable(True)\n self.updateFrame.Enable(True)\n self.grab.Enable(False)\n else:\n self.extract_range_frame = False\n self.startFrame.Enable(False)\n self.endFrame.Enable(False)\n self.updateFrame.Enable(False)\n self.grab.Enable(True)", "title": "" }, { "docid": "ac3dc44e8d6a020617b351f6a508fd14", "score": "0.53095037", "text": "def show_pres_gen(self, event):\n\t\tPresetDialog(self, \"Preset Generator\")", "title": "" }, { "docid": "8b9e93030c3e9cfe40742bb5d974c9bc", "score": "0.5279935", "text": "def set_range_from_shot(shot):\n min_frame = shot.getAttr('startFrame')\n max_frame = shot.getAttr('endFrame')\n\n pm.playbackOptions(\n ast=min_frame,\n aet=max_frame,\n min=min_frame,\n max=max_frame\n )", "title": "" }, { "docid": "8bc3237ac8fdb028b4039f6108bd0c41", "score": "0.524625", "text": "def show_frameDelta(self, cont):\n frame = self.frames[cont]\n frame.gettext() #Displays either \"Ready\" or \"Insert stimuli\" based on experiment type\n frame.tkraise()", "title": "" }, { "docid": "195e224e426f2efca8eef92ed1a8ce14", "score": "0.520215", "text": "def trackbar(theWidth, theValue, theMin, theMax, theSegments = 1, theLabelFormat = '%.1Lf', theOptions = 0, theDiscreteStep = 1):\n\tprint('This is wrapper function to help code autocompletion.')", "title": "" }, { "docid": "4d13e15460338ffa0338d7aeb1bfd134", "score": "0.5100591", "text": "def setFrameListPreset(self):\r\n\t\t#print(self.sender().text())\r\n\t\tif self.sender().text() == \"Sequential\":\r\n\t\t\tnum_list = sequence.numList(self.ui.frames_lineEdit.text(), sort=True, quiet=True)\r\n\t\t\tif self.numList:\r\n\t\t\t\tself.ui.frames_lineEdit.setText(sequence.numRange(num_list))\r\n\t\t\telse:\r\n\t\t\t\tpass\r\n\r\n\t\telif self.sender().text() == \"Reverse order\":\r\n\t\t\tpass\r\n\r\n\t\telif self.sender().text() == \"Render first and last frames before others\":\r\n\t\t\tframes_str = self.ui.frames_lineEdit.text()\r\n\t\t\tnum_list = sequence.numList(frames_str, sort=True, quiet=True)\r\n\t\t\tif self.numList:\r\n\t\t\t\tfirst = min(num_list)\r\n\t\t\t\tlast = max(num_list)\r\n\t\t\t\tframes_str_prefix = \"%d, %d\" %(first, last)\r\n\t\t\t\tif not frames_str.startswith(frames_str_prefix):\r\n\t\t\t\t\tself.ui.frames_lineEdit.setText(\"%s, %s\" %(frames_str_prefix, frames_str))\r\n\t\t\telse:\r\n\t\t\t\tpass", "title": "" }, { "docid": "861577666521aacaf179c22e73a8c817", "score": "0.5064858", "text": "def show_frameEcho(self, cont):\n self.frames[StimPrepPg].button1.grid_remove()\n self.frames[StimPrepPg].button2.grid_remove()\n self.frames[StimPrepPg].label1.configure(text=\"Experiment in progress\")\n frame = self.frames[cont]\n\n os.makedirs(Appa.savefile) # Create general folder for experiment \n os.makedirs(Appa.savefile + \"/ExpDataPictures\") # Create folder for images from exp\n\n #Image capturing\n imgnum=0\n for i in range(Appa.exptime+1):\n start_time = clock()\n remaining = Appa.exptime-i # Calculate countdown\n self.frames[StimPrepPg].label2.configure(text=\"Time remaining: %d\" % remaining) # Set countdown\n self.frames[StimPrepPg].update_idletasks() # Refresh page \n if i%Appa.capturerate == 0: # Calculate if need to capture pic \n camera.capture(Appa.savefile + \"/ExpDataPictures/image\" + str(imgnum) + \".jpg\")\n Appa.expy.append(\"\") # Append empty place holder for future analyssi\n imgnum+=1\n sleep(1-(clock()-start_time))\n\n camera.stop_preview()\n frame.tkraise()", "title": "" }, { "docid": "8ddddcfe84c9630152742df5676e288c", "score": "0.5046323", "text": "def trackbar(theWhere, theX, theY, theWidth, theValue, theMin, theMax, theSegments = 1, theLabelFormat = '%.1Lf', theOptions = 0, theDiscreteStep = 1):\n\tprint('This is wrapper function to help code autocompletion.')", "title": "" }, { "docid": "3d3ba097ca1983f9d5470b46506ac474", "score": "0.503143", "text": "def choose_windows(spectrum, wstart, wend):\n\n #Define function that will obtain the cursor x position\n def onselect(vmin, vmax):\n \"\"\" Function that will obtain the cursor x position\"\"\"\n windows.append(float(Decimal(\"%.2f\" % vmin)))\n windows.append(float(Decimal(\"%.2f\" % vmax)))\n plot_windows(windows)\n plt.draw()\n #Create a plot so the user can choose the windows\n windows = [] #Set an empty window\n axis = plt.subplot(111)\n axis.plot(spectrum[:, 0], spectrum[:, 1], 'k-')\n axis.hlines(1, wstart, wend, color = 'b', linestyles = 'dashed')\n axis.set_xlim([wstart, wend])\n axis.set_ylim([min(subselect_spectra \\\n (spectrum, wstart, wend)[1]) - 0.2, 1.05])\n axis.set_xlabel(r'Wavelength $(\\AA)$')\n axis.set_ylabel('Normalized Flux')\n span = SpanSelector(axis, onselect, 'horizontal', minspan = 0.05)\n # Plot a vertical line at cursor position\n cursor = Cursor(axis, useblit = True, color = 'red', linewidth = 1 )\n cursor.horizOn = False\n\n plt.show()\n plt.clf()\n \n return windows", "title": "" }, { "docid": "287088044e0310051082f6a3186f302d", "score": "0.5019565", "text": "def show_seg_marker_dialog():\n \n from chimera import dialogs\n return dialogs.display(Segment_Marker.name)", "title": "" }, { "docid": "0b54d1e58af059846dda2adc2f0a71a7", "score": "0.4990835", "text": "def displayFrames(frames):\n framesFormat = \"%-35s %-11s %-15s %-13s %-12s %-9s %5s %7s %5s\"\n header = framesFormat % (\n \"Frame\", \"Status\", \"Host\", \"Start\", \"End\", \"Runtime\", \"Mem\", \"Retry\", \"Exit\")\n print(header + \"\\n\" + \"-\" * len(header))\n\n for frame in frames:\n startTime = cueadmin.format.formatTime(frame.data.start_time)\n stopTime = cueadmin.format.formatTime(frame.data.stop_time)\n\n if frame.data.start_time:\n duration = cueadmin.format.formatDuration(\n cueadmin.format.findDuration(frame.data.start_time, frame.data.stop_time))\n else:\n duration = \"\"\n\n memory = cueadmin.format.formatMem(frame.data.max_rss)\n exitStatus = frame.data.exit_status\n\n print(framesFormat % (\n cueadmin.format.cutoff(frame.data.name, 35),\n opencue.compiled_proto.job_pb2.FrameState.Name(frame.data.state),\n frame.data.last_resource,\n startTime,\n stopTime,\n duration,\n memory,\n frame.data.retry_count,\n exitStatus))\n\n if len(frames) == 1000:\n print(\"Warning: Only showing first 1000 matches. See frame query options to \"\n \"limit your results.\")", "title": "" }, { "docid": "97aab89295159bb5847a2a3f7390b775", "score": "0.49894312", "text": "def export_segment(self):\n\n msecs_from = msecs_to_hours_mins_secs(self.segment['pos0'])\n msecs_from = msecs_from.replace('.', \"H\", 1)\n msecs_from = msecs_from.replace('.', \"M\", 1) + \"S\"\n msecs_to = msecs_to_hours_mins_secs(self.segment['pos1'])\n msecs_to = msecs_to.replace('.', \"H\", 1)\n msecs_to = msecs_to.replace('.', \"M\", 1) + \"S\"\n filename = self.code_av_dialog.file_['name'][:-4] + \"_\"\n filename += msecs_from + \"_to_\" + msecs_to + \"_\"\n filename += self.code_av_dialog.file_['name'][-4:]\n export_dir = ExportDirectoryPathDialog(self.app, filename)\n filepath = export_dir.filepath\n if filepath is None:\n return\n if os.path.exists(filepath):\n os.remove(filepath)\n mediapath = \"\"\n try:\n if self.code_av_dialog.file_['mediapath'][0:6] in ('/audio', '/video'):\n mediapath = self.app.project_path + self.code_av_dialog.file_['mediapath']\n if self.code_av_dialog.file_['mediapath'][0:6] in ('audio:', 'video:'):\n mediapath = self.code_av_dialog.file_['mediapath'][6:]\n except Exception as e_:\n Message(self.app, _('Media not found'),\n str(e_) + \"\\n\" + self.app.project_path + self.code_av_dialog.file_['mediapath'],\n \"warning\").exec()\n return\n ffmpeg_command = 'ffmpeg -i \"' + mediapath + '\" -ss '\n ffmpeg_command += str(self.segment['pos0'] / 1000)\n ffmpeg_command += ' -to '\n ffmpeg_command += str(self.segment['pos1'] / 1000)\n ffmpeg_command += ' -c copy \"' + filepath + '\"'\n # print(f\"FFMPEG COMMAND\\n {ffmpeg_command}\")\n try:\n subprocess.run(ffmpeg_command, timeout=15, shell=True)\n self.code_av_dialog.parent_textEdit.append(_(\"A/V segment exported: \") + filepath)\n Message(self.app, _(\"Segment exported\"), filepath).exec()\n except Exception as e_:\n logger.error(str(e_))\n print(str(e_))\n Message(self.app, \"ffmpeg error\", str(e_)).exec()", "title": "" }, { "docid": "3196554bd258ff84cf0f8ec9a6c2b87c", "score": "0.49824834", "text": "def showVideo(self): # connected to Go to vide (btn5)\n\n# self.is_trace = False\n\n plot_with_colorbar(self.imv, self.data)\n\n# self.w.setWindowTitle('SMAnalyzer - Video - ' + self.f)\n self.meanEndEdit.setStyleSheet(\" background-color: ; \")\n\n try:\n self.translateMaxima()\n self.imv.view.addItem(self.roi)\n except:\n pass\n if self.JPG:\n self.mean = self.data", "title": "" }, { "docid": "982327f5089a668553fd3d0cc7cbb0e5", "score": "0.49745703", "text": "def trackbar2(theWidth, theValue1, theValue2, theMin, theMax, theSegments=1, theLabelFormat='%.1Lf', theOptions=0, theDiscreteStep=1):\n\tprint('This is wrapper function to help code autocompletion.')", "title": "" }, { "docid": "572a091297dea0a5cdc94e389405de48", "score": "0.497088", "text": "def show_scan_frame(self) -> None:\n src.getraenkeKasse.scanframe.ScanFrame(self)", "title": "" }, { "docid": "2ad93f72af3d15509f96237142728772", "score": "0.4968003", "text": "def show_export_events_dialog(self):\n self.export_events_dialog.update()\n self.export_events_dialog.show()", "title": "" }, { "docid": "2bfd9577c88b9603ec1b1d4534c8be49", "score": "0.49670574", "text": "def export_img(self, path, base = 'fseq-export-', figsize=(4,4),\n start = 0, stop = None, show_title = True,\n format='.png', vmin = None, vmax=None, **kwargs):\n import sys\n import matplotlib.pyplot as plt\n lib.ensure_dir(path)\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n if stop is None or stop == -1:\n stop = len(self)\n\tif hasattr(self, 'data'):\n\t vmin = ifnot(vmin, self.data_percentile(1)) # for scale\n\t vmax = ifnot(vmax, self.data_percentile(99)) # for scale\n\telse:\n\t vmin = ifnot(vmin, np.min(map(np.min, self.frames())))\n\t vmax = ifnot(vmax, np.min(map(np.max, self.frames())))\n kwargs.update({'vmin':vmin, 'vmax':vmax})\n\tprint path+base\n L = min(stop-start, len(self))\n\tfnames = []\n for i,frame in enumerate(self.frames()):\n if i < start: continue\n if i > stop: break\n ax.cla()\n ax.imshow(frame, aspect='equal', **kwargs)\n fname = path + base + '%06d.png'%i\n\t fnames.append(fname)\n if show_title:\n zscale = tuple(self.meta['axes'][0])\n ax.set_title('frame %06d (%3.3f %s)'%zscale)\n fig.savefig(fname)\n sys.stderr.write('\\r saving frame %06d of %06d'%(i+1, L))\n plt.close()\n\treturn fnames", "title": "" }, { "docid": "9a77f38ecfac5d612d6426a2056fc8d9", "score": "0.49639183", "text": "def getFrameRangeFromRenderSettings(self):\r\n\t\tif UI.ENVIRONMENT == \"MAYA\":\r\n\t\t\tstart_frame = int(mc.getAttr('defaultRenderGlobals.startFrame'))\r\n\t\t\tend_frame = int(mc.getAttr('defaultRenderGlobals.endFrame'))\r\n\r\n\t\telif UI.ENVIRONMENT == \"HOUDINI\":\r\n\t\t\t# Note this is the time slider (playbar) range, not the render\r\n\t\t\t# settings\r\n\t\t\tstart_frame = hou.playbar.playbackRange()[0]\r\n\t\t\tend_frame = hou.playbar.playbackRange()[1]\r\n\r\n\t\telif UI.ENVIRONMENT == \"NUKE\":\r\n\t\t\tstart_frame = nuke.Root()['first_frame'].getValue()\r\n\t\t\tend_frame = nuke.Root()['last_frame'].getValue()\r\n\r\n\t\tself.ui.frames_lineEdit.setText(\"%d-%d\" %(start_frame, end_frame))", "title": "" }, { "docid": "ab0d42cd38951741e47c8f6343f29f35", "score": "0.49546847", "text": "def trackbar2(theWhere, theX, theY, theWidth, theValue1, theValue2, theMin, theMax, theSegments=1, theLabelFormat='%.1Lf', theOptions=0, theDiscreteStep=1):\n\tprint('This is wrapper function to help code autocompletion.')", "title": "" }, { "docid": "2a1a39a68dc60fbef53764d44b54ec8d", "score": "0.4953241", "text": "def on_ok_clicked(self, obj):\n\n # Is there a filename? This should also test file permissions, etc.\n if not self.parse_target_frame():\n self.window.run()\n\n # Preparation\n self.parse_format_frame()\n self.parse_user_options()\n\n self.options.handler.set_paper_metric(\n self.paper_frame.get_paper_metric())\n self.options.handler.set_paper_name(self.paper_frame.get_paper_name())\n self.options.handler.set_orientation(self.paper_frame.get_orientation())\n self.options.handler.set_margins(self.paper_frame.get_paper_margins())\n self.options.handler.set_custom_paper_size(\n self.paper_frame.get_custom_paper_size())\n \n # Create the output document.\n self.make_document()\n \n # Save options\n self.options.handler.save_options()\n config.set('interface.open-with-default-viewer', \n self.open_with_app.get_active())", "title": "" }, { "docid": "26063a03b2213531eaf977069b81b84b", "score": "0.4944826", "text": "def execute(self, parameters, messages):\n\n arcpy.ImportToolbox(os.path.join(os.path.dirname(__file__), \"URB.pyt\"))\n arcpy.gp.toolbox = os.path.join(os.path.dirname(__file__), \"URB.pyt\")\n\n def extentToPoly(extent, srid=3003):\n clist = arcpy.Array()\n clist.append(arcpy.Point(extent.XMin, extent.YMin))\n clist.append(arcpy.Point(extent.XMin, extent.YMax))\n clist.append(arcpy.Point(extent.XMax, extent.YMax))\n clist.append(arcpy.Point(extent.XMax, extent.YMin))\n return arcpy.Polygon(clist)\n\n def get_best_fit_scale(fc,paper,scales=[1000,2000,2500,5000,7500,10000,20000]):\n desc = arcpy.Describe(fc)\n sheet = printOutput_templates[paper][\"size\"]\n margin = 10\n #mapUnitsPerMillimeter = [0.5,1,2,5,10]\n cx = (desc.extent.XMin + desc.extent.XMax)/2\n cy = (desc.extent.YMin + desc.extent.YMax)/2\n fc_bound = extentToPoly(desc.extent)\n\n for scale in scales:\n scaleFactor = scale / 1000\n wb = sheet[0] * scaleFactor / 2\n hb = sheet[1] * scaleFactor / 2\n wf = (sheet[0] - margin*2) * scaleFactor / 2\n hf = (sheet[1] - margin*2) * scaleFactor / 2\n \n #bound = arcpy.Polygon([arcpy.Point(cx-wb,cy-hb), arcpy.Point(cx+wb,cy-hb), arcpy.Point(cx+wb,cy+hb), arcpy.Point(cx-wb,cy+hb)])\n #frame = arcpy.Polygon([arcpy.Point(cx-wf,cy-hf), arcpy.Point(cx+wf,cy-hf), arcpy.Point(cx+wf,cy+hf), arcpy.Point(cx-wf,cy+hf)])\n bound = extentToPoly(arcpy.Extent(cx-wb, cy-hb, cx+wb, cy+hb))\n frame_extent = arcpy.Extent(cx-wf, cy-hf, cx+wf, cy+hf)\n frame = extentToPoly(frame_extent)\n\n #tempfcname = \"in_memory/output\" + uuid.uuid4().hex\n #arcpy.Intersect_analysis ([frame, fc_bound], tempfcname)\n #result = arcpy.GetCount_management(tempfcname)\n #intersections = int(result.getOutput(0))\n\n #if intersections > 0:\n if frame_extent.contains(desc.extent):\n return bound, frame, scale\n\n return bound, frame, scaleFactor\n\n def get_esri_ring(extent):\n ring = [\n [extent.XMin, extent.YMin],\n [extent.XMax, extent.YMin],\n [extent.XMax, extent.YMax],\n [extent.XMin, extent.YMax],\n [extent.XMin, extent.YMin],\n ]\n return ring\n\n probe_path = parameters[0].valueAsText\n coordinate_catastali = parameters[1].valueAsText\n paper = parameters[2].valueAsText.replace(\"'\",\"\")\n base = parameters[3].valueAsText.replace(\"'\",\"\")\n\n checkboxes = []\n #for idx in range(4,12):\n # if parameters[idx].valueAsText == \"true\":\n # checkboxes.append(idx)\n for param in parameters:\n #arcpy.AddMessage(\"param: %s %s\" % (str(param.datatype),str(param.valueAsText)))\n if param.datatype in (\"Booleano\", \"Boolean\") and param.valueAsText == \"true\":\n checkboxes.append(param.name)\n arcpy.AddMessage(\"checkboxes: %s\" % str(checkboxes))\n\n with open(os.path.join(os.path.dirname(__file__),\"web_map_as_json.json\"),\"r\") as jf:\n wmaj_template = jf.read()\n\n template_engine = Template(wmaj_template)\n\n decode_map = []\n\n if coordinate_catastali:\n CC_result = arcpy.gp.CC2FCtool(coordinate_catastali)\n probe_path = CC_result.getOutput(0)\n else:\n if not probe_path:\n arcpy.AddError(\"Deve essere specificata almeno un contesto, come layer o come coordinate catastali\")\n exit(0)\n \n arcpy.AddMessage(\"probe_path: %s paper:\" % probe_path)\n probe = arcpy.mapping.Layer(probe_path)\n with arcpy.da.SearchCursor(probe_path, ['SHAPE']) as cursor: \n probe_polygon = next(cursor)[0]\n\n #probe_json_path = os.path.join(tempfile.mkdtemp(), \"probe.json\")\n probe_json_path = get_jobfile(\"activity\", \"json\")\n arcpy.FeaturesToJSON_conversion(probe_path, probe_json_path, \"FORMATTED\")\n\n with open(probe_json_path ,\"r\") as jf:\n probe_json = jf.read()\n\n #arcpy.AddMessage(json.dumps(json.loads(probe_json),indent=3))\n\n json_feats = []\n probe_json_dict = json.loads(probe_json)\n for feat in probe_json_dict[\"features\"]:\n feat[\"symbol\"] = {\n \"color\": [255, 0, 0, 0],\n \"outline\": {\n \"color\": [255, 0, 0, 255],\n \"width\": 1.75,\n \"type\": \"esriSLS\",\n \"style\": \"esriSLSSolid\"\n },\n \"type\": \"esriSFS\",\n \"style\": \"esriSFSSolid\"\n }\n json_feats.append(feat)\n\n\n result_pdf = []\n\n for tema in temi:\n if not tema[\"label\"] in checkboxes:\n continue\n \n mapServices = basi[base] + tema[\"def\"]\n\n bound, frame, scale = get_best_fit_scale(probe_path, paper)\n\n printpar ={\n \"extent\": [ frame.extent.XMin, frame.extent.YMin, frame.extent.XMax, frame.extent.YMax ],\n \"scale\": scale,\n \"srid\": 3003,\n \"esri_poly\": json.dumps(json.loads(probe_json)[\"features\"][0][\"geometry\"][\"rings\"]),\n \"esri_style\": json.dumps(proto_ESRI_style), #non implementato nel template json\n \"esri_bound\": get_esri_ring(bound.extent),\n \"esri_frame\": get_esri_ring(frame.extent),\n \"title\": tema[\"label\"].upper().replace(\"_\",\"\"),\n \"dpi\": 200,\n \"auth\": \"Settore urbanistica, Servizi catastali e Mobilita'\",\n \"copyright\": \"Comune di Padova\"\n }\n\n web_map_as_json = template_engine.render(printpar = printpar)\n web_map_as_dict = json.loads(web_map_as_json)\n\n web_map_as_dict[\"operationalLayers\"][0][\"featureCollection\"][\"layers\"][0][\"featureSet\"][\"features\"] = json_feats\n web_map_as_dict[\"operationalLayers\"] = mapServices + web_map_as_dict[\"operationalLayers\"]\n web_map_as_json = json.dumps(web_map_as_dict)\n\n post_parameters = {\n \"f\": \"json\",\n \"Web_Map_as_JSON\": web_map_as_json,\n \"Format\": \"PDF\",\n \"Layout_Template\": printOutput_templates[paper][\"label\"]\n }\n\n #arcpy.AddMessage(json.dumps(post_parameters,indent=3))\n\n #pdf_file_path = os.path.join(tempfile.mkdtemp(), tema[\"label\"]+\".pdf\")\n pdf_file_path = get_jobfile(\"output\", \"pdf\")\n\n res = urllib.urlopen(base_url + \"arcgis/rest/services/Utilities/PrintingTools/GPServer/Export%20Web%20Map%20Task/execute\", urllib.urlencode(post_parameters)).read()\n\n if \"results\" in res:\n remoteFile = json.loads(res)['results'][0][\"value\"][\"url\"]\n #arcpy.AddMessage (\"REMOTE: \" + remoteFile)\n urllib.urlretrieve(remoteFile, pdf_file_path)\n arcpy.AddMessage(\"OK: %s\" % tema[\"label\"])\n result_pdf.append(pdf_file_path)\n else:\n arcpy.AddMessage(\"NO\")\n\n if parameters[-1].valueAsText:\n pdf_globale = parameters[-1].valueAsText\n else:\n #pdf_globale = os.path.join(tempfile.mkdtemp(), \"inquadramento.pdf\")\n pdf_globale = get_jobfile(\"output\", \"pdf\")\n\n merger = PdfFileMerger()\n for file in result_pdf:\n merger.append(PdfFileReader(file))\n merger.write(pdf_globale)\n\n parameters[-1].value = pdf_globale\n\n arcpy.AddMessage(\"OK: %s\" % pdf_globale)", "title": "" }, { "docid": "10575b6e0efc5c3e56b8f88ce0b550f0", "score": "0.4936005", "text": "def info_range(self):\n \n # If there is no data, tell the user and don't show the info dialog.\n if len(self.data) == 0:\n show_no_data_dialog(self, \"Info - %s\" % self.last_profile)\n return\n \n # Get the first and last entered dates.\n day_start = dates.split_date(self.data[0][0])\n day_end = dates.split_date(self.data[len(self.data) - 1][0])\n \n # Get a list of datetimes from the dates.\n datelist = dates.date_list_datetime(datasets.get_column(self.data, 0))\n \n # Get the starting and ending dates.\n cal_dlg = CalendarRangeDialog(self, \"Info in Range - %s\" % self.last_profile, day_start, day_end)\n response = cal_dlg.run()\n year1, month1, day1 = cal_dlg.start_cal.get_date()\n year2, month2, day2 = cal_dlg.end_cal.get_date()\n date1 = \"%d/%d/%d\" % (day1, month1 + 1, year1)\n date2 = \"%d/%d/%d\" % (day2, month2 + 1, year2)\n cal_dlg.destroy()\n \n # If the user did not click OK, don't continue.\n if response != Gtk.ResponseType.OK:\n return\n \n # Get the indices.\n dt_start = datetime.datetime(year1, month1 + 1, day1)\n start_index = dates.date_above(dt_start, datelist)\n dt_end = datetime.datetime(year2, month2 + 1, day2)\n end_index = dates.date_below(dt_end, datelist)\n \n # Check to make sure these dates are valid, and cancel the action if not.\n if start_index == DateValidation.INVALID:\n show_error_dialog(self, \"Info in Range - %s\" % self.last_profile, \"%s is not a valid date.\\n\\nThis date is not present and is not before any other dates.\" % date1)\n return\n if end_index == DateValidation.INVALID:\n show_error_dialog(self, \"Info in Range - %s\" % self.last_profile, \"%s is not a valid date.\\n\\nThis date is not present and is not after any other dates.\" % date2)\n return\n if end_index < start_index:\n show_error_dialog(self, \"Info in Range - %s\" % self.last_profile, \"The ending date must be after the starting date.\")\n return\n \n # Get the new list.\n data2 = self.data[start_index:end_index + 1]\n \n # Pass the data to the info dialog.\n self.show_info_generic(data = data2)", "title": "" }, { "docid": "6c916096b47eb99b3f25d7ca7bd57a62", "score": "0.49245515", "text": "def select_rectangle(infile, start, res_dict, fits_dict, wloc, outfil, maxim):\r\n im, header = get_fits_image(infile + str(start))\r\n im = im / np.max(im)\r\n get_fits_keys(header, fits_dict, res_dict, keyprint=False)\r\n # #===================================================================\r\n # new rect_plt\r\n # first get size of graph from tmp.png and size of image\r\n # graph coordinates are in image pixels!\r\n (imy, imx) = im.shape[:2]\r\n image_file = 'tmp.png' # scaled image\r\n imbw = np.flipud(ios.imread(image_file)) # get shape\r\n (canvasy, canvasx) = imbw.shape[:2]\r\n wlocw = (wloc[0] + 300, wloc[1] + 50)\r\n # check for old files\r\n delete_old_files(outfil, maxim, ext='.fit')\r\n image_elem_sel = [sg.Graph(\r\n canvas_size=(canvasx, canvasy),\r\n graph_bottom_left=(0, 0), # starts at top, set y-scale here\r\n graph_top_right=(imx, imy), # set x-scale here\r\n key='-GRAPH-',\r\n change_submits=True, # mouse click events\r\n drag_submits=True)]\r\n layout_select = [[sg.Text('Start File: ' + infile + str(start), size=(50, 1)), sg.Text(key='info', size=(40, 1)),\r\n sg.Ok(), sg.Cancel()],\r\n image_elem_sel]\r\n # ---------------------------------------------------------------------------\r\n winselect_active = True\r\n winselect = sg.Window(f'select zero order or spectral line',\r\n layout_select, finalize=True, location=wlocw,\r\n keep_on_top=True, no_titlebar=False,\r\n disable_close=False, disable_minimize=True)\r\n # get the graph element for ease of use later\r\n graph = winselect['-GRAPH-'] # type: sg.Graph\r\n graph.draw_image(image_file, location=(0, imy)) if image_file else None\r\n winselect.refresh()\r\n dragging = False\r\n start_point = end_point = prior_rect = None\r\n x0 = y0 = dx = dy = 0\r\n while winselect_active:\r\n event, values = winselect.read()\r\n idg = graph.draw_rectangle((0, 0), (imx, imy), line_color='blue')\r\n if event == \"-GRAPH-\": # if there's a \"Graph\" event, then it's a mouse\r\n x, y = (values[\"-GRAPH-\"])\r\n if not dragging:\r\n start_point = (x, y)\r\n dragging = True\r\n else:\r\n end_point = (x, y)\r\n if prior_rect:\r\n graph.delete_figure(prior_rect)\r\n if None not in (start_point, end_point):\r\n prior_rect = graph.draw_rectangle(start_point,\r\n end_point, line_color='red')\r\n elif event is not None and event.endswith('+UP'):\r\n # The drawing has ended because mouse up\r\n xy0 = [int(0.5 * (start_point[0] + end_point[0])),\r\n int(0.5 * (start_point[1] + end_point[1]))]\r\n size = (abs(start_point[0] - end_point[0]),\r\n abs(start_point[1] - end_point[1]))\r\n info = winselect[\"info\"]\r\n info.update(value=f\"grabbed rectangle at {xy0} with size {size}\")\r\n start_point, end_point = None, None # enable grabbing a new rect\r\n dragging = False\r\n if min(size[0], size[1]) > 1: # rectangle\r\n info.update(value=f\"rectangle at {xy0} with size {size}\")\r\n x0 = xy0[0]\r\n y0 = xy0[1]\r\n dx = int((size[0] + 1) / 2)\r\n dy = int((size[1] + 1) / 2)\r\n\r\n elif event in ('Ok', 'Cancel'):\r\n graph.delete_figure(idg)\r\n winselect_active = False\r\n winselect.close()\r\n return event, x0, y0, dx, dy", "title": "" }, { "docid": "59996cf468705e8938141a9c24208f86", "score": "0.4921469", "text": "def show_frameShark(self, cont, listofbuttons):\n frame = cont(self.container, self)\n self.frames[cont] = frame \n\n ExpsToGraph = []\n expnames = []\n unanalyzedlist = []\n result = True\n\n # Get list of experiments to plot\n for button in listofbuttons:\n if button.instate(['selected']):\n ExpsToGraph.append(getobject(button['text']))\n\n if len(ExpsToGraph) == 0: # Did not choose an experiment to graph\n tkMessageBox.showwarning(\"Error\", \"No experiments selected\")\n else:\n # Check to see if all experiments selected have been analyzed\n for experiment in ExpsToGraph:\n if experiment.expy[0] == \"\":\n unanalyzedlist.append(experiment.expnumber)\n else:\n experiment.expy = list(map(int, experiment.expy))\n frame.a.plot(range(len(experiment.expy)),experiment.expy, label=experiment.expnumber)\n expnames.append(experiment.expnumber)\n\n\n if len(unanalyzedlist) > 0: # If unanalyzed experiments exist warn user\n unanalyzedlist = \", \".join(unanalyzedlist)\n result = tkMessageBox.askquestion(\"Warning\", \"The following experiments have\\nnot been analyzed yet\\nand will not be graphed\\n\\n\" +unanalyzedlist+ \"\\n\\nProceed anyways?\")\n\n if result != \"no\": # Override or all experiments have been analyzed \n frame.grid(row=0, column=0, sticky=\"nsew\") \n frame.a.legend(loc='upper right', fontsize=8)\n frame.canvas.draw() \n frame.tkraise()", "title": "" }, { "docid": "057084b294b3f12db6cccefb5b454320", "score": "0.49202904", "text": "def menu(file_name):\n print (80 * '*')\n print ('*' + 1*' ' + 'PDB FILE ANALYZER' + 60*' ' + '*')\n print (80 * '*')\n print ('*'+ 1*' ' + 'Select an option from below:' + 49*' ' + '*')\n print ('*' + 78*' ' + '*')\n print ('*' + 6*' ' + '1) Open a PDB File' + 23*' ' + '(O)' + 28*' ' + '*')\n print ('*' + 6*' ' + '2) Information'+ 27*' ' + '(I)' + 28*' ' + '*')\n print ('*' + 6*' ' + '3) Show histogram of amino acids '+ 8*' ' + '(H)' + 28*' ' + '*')\n print ('*' + 6*' ' + '4) Display Secondary Structure'+ 11*' ' + '(S)' + 28*' ' + '*')\n print ('*' + 6*' ' + '5) Export PDB File '+ 22*' ' + '(X)' + 28*' ' + '*')\n print ('*' + 6*' ' + '6) Exit '+ 33*' ' + '(Q)' + 28*' ' + '*')\n print ('*' + 55*' ' + 'Current PDB: %s ' %file_name + '*')\n print (80 * '*')", "title": "" }, { "docid": "b559ccd5b2fc271a56d1f69417096004", "score": "0.4901829", "text": "def quicklook(SMPS):\n # create particle sizer display class\n psd = PSDisplay(SMPS)\n \n # create sample slider\n sample = widgets.IntSlider(min=0, max=len(SMPS.sample['data'])-1, continuous_update = False, description = 'sample #',layout = Layout(width = '100%'))\n \n # create fields slider containing SMPS data variables\n sfields = widgets.ToggleButtons(options=SMPS.data['variables'], value = SMPS.data['variables'][0], description='Variable:', disabled=False, button_style='')\n \n # create D50 checkbox\n D50line = widgets.Checkbox(value=False,description='D50',disabled=False,indent=False, layout = Layout(width='20%'))\n \n # create boundary line checkbox\n boundaryline = widgets.Checkbox(value=False,description='Boundaries',disabled=False, indent=False, layout = Layout(width='20%'))\n \n # create periodical indicator checkbox\n indperiods = widgets.Checkbox(value=False,description='24 hour periods',disabled=False,indent=False, layout = Layout(width='20%'))\n \n # create starttime for periodical indicator text box\n starttime = widgets.Text(value='00:00:00', placeholder='HH:MM:SS', description='Start time:', disabled=False)\n \n # create count limit box\n clim = widgets.BoundedFloatText( value=2*10**4, min=1*10**1, max=1*10**8,step=1*10**1, description='clim:',\n style = {'description_width': 'initial'}, disabled=False, continuous_update=True, orientation='horizontal',\n readout=True, readout_format='.2f', layout = Layout(width='25%'))\n \n # create diameter limit box\n ylim = widgets.BoundedFloatText( value=1*10**3, min=1*10**1, max=1*10**8,step=1*10**1, description='dlim:', disabled=False, continuous_update=True, orientation='horizontal', readout=True, readout_format='.2f', layout = Layout(width='25%'))\n \n # create histogram outplot\n outplot = widgets.interactive_output(psd.histogram, {'field':sfields,'sample':sample,'add_D50':D50line,'add_boundaries':boundaryline, 'xlim': ylim,'ylim':clim}) \n \n # create heatplot outplot\n outplot2 = widgets.interactive_output(psd.plot, {'field':sfields,'clim':clim, 'ylim':ylim, 'indicator':sample,'periods':indperiods, 'starttime':starttime})\n \n # create printed date output\n date = widgets.interactive_output(psd.dateInfo,{'sample':sample})\n \n # create organised display widgets\n ui1 = widgets.HBox([sfields])\n ui2 = widgets.HBox([D50line, boundaryline, clim, ylim])\n uidate = widgets.HBox([date])\n uiperiods = widgets.HBox([indperiods, starttime])\n ui3 = widgets.VBox([ui1, ui2, outplot, sample, uidate, outplot2, uiperiods], layout = Layout(height='100%'))\n \n return ui3", "title": "" }, { "docid": "9f93d5ab254c4e19bda8c011f7267793", "score": "0.4883903", "text": "def capture(rect=None, filepath='', prompt=True, hideWindow=None):\n widget = XSnapshotWidget(QApplication.desktop())\n widget.setRegion(rect)\n widget.setHideWindow(hideWindow)\n widget.setFilepath(filepath)\n widget.move(1, 1)\n widget.resize(QApplication.desktop().size())\n \n if prompt or not filepath:\n widget.show()\n else:\n widget.save()", "title": "" }, { "docid": "9bcfa9d4763be5223839a42fd5ee1d31", "score": "0.4878805", "text": "def user_input_from_gui(min_score, head_dir, max_score=\"NO\"):\n get_tables()\n global data\n cwd = os.getcwd()\n os.chdir(head_dir)\n if max_score == \"NO\":\n data_ = data[data[\"score_wo_prcnt\"] >= min_score]\n fname = str(round(min_score, 2)) + \".tsv\"\n data_.to_csv(fname, index=False, sep=\"\\t\")\n elif isinstance(max_score, int) or isinstance(max_score, float):\n data_ = data[\n (data[\"score_wo_prcnt\"] >= min_score)\n & (data[\"score_wo_prcnt\"] <= max_score)\n ]\n fname = str(round(min_score, 2)) + \"-\" + str(round(max_score, 2)) + \".tsv\"\n data_.to_csv(fname, index=False, sep=\"\\t\")", "title": "" }, { "docid": "0e39149363016fd0e877963dc1cd63e9", "score": "0.48700252", "text": "def debug_show_window(self, original_frame, current_index, frames_results):\n cv2.namedWindow(str(self.cfg.index),\n cv2.WINDOW_NORMAL | cv2.WINDOW_FREERATIO)\n frame = original_frame.copy()\n if len(frames_results):\n for rect in frames_results[0]:\n if rect[4] > self.cfg.alg['ssd_confidence']:\n # cv2.imwrite(f'data/frames/{self.cfg.index}_{current_index}.png',\n # cv2.cvtColor(original_frame, cv2.COLOR_BGR2RGB))\n # crop_frame = cv2.cvtColor(original_frame, cv2.COLOR_BGR2RGB)[int(rect[1]):int(rect[3]), int(rect[0]):int(rect[2]), :]\n # cv2.imwrite(f'/home/jt1/Desktop/crop_images/{current_index}.png', crop_frame)\n color = np.random.randint(0, 255, size=(3,))\n color = [int(c) for c in color]\n # get a square bbox, the real bbox of width and height is universal as 224 * 224 or 448 * 448\n p1, p2 = bbox_points(self.cfg, rect, original_frame.shape)\n # write text\n frame = paint_chinese_opencv(frame, '江豚', p1)\n cv2.rectangle(\n frame, (rect[0], rect[1]), (rect[2], rect[3]), color, 2)\n cv2.putText(frame, str(round(rect[4], 2)), (p2[0], p2[1]),\n cv2.FONT_HERSHEY_SIMPLEX, 2, color, 2, cv2.LINE_AA)\n cv2.imshow(str(self.cfg.index), cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n cv2.waitKey(1)", "title": "" }, { "docid": "5ac3928f2a0e1737a982082ed9593ab6", "score": "0.48533505", "text": "def show_native_ui():\n # Set up the flipbook:\n setup_flipbook()\n\n # Set the window name:\n window_name = \"animation_flipbook\"\n\n # If it already exists, kill it:\n if cmds.window(window_name, exists=True):\n cmds.deleteUI(window_name)\n\n if cmds.windowPref(window_name, exists=True):\n cmds.windowPref(window_name, remove=True)\n\n # Set up the window and the layout:\n cmds.window(window_name, title=\"Flipbook Animation Tool\")\n cmds.columnLayout(adjustableColumn=True)\n cmds.rowColumnLayout(columnAlign=(1, \"left\"), nc=1, cw=[(1, 480)])\n\n cmds.floatFieldGrp(\"set_framerange_field\",\n numberOfFields=2,\n label=\"Start Frame\",\n el=\"End Frame\",\n pre=1,\n v1=1.0,\n v2=24.0)\n\n cmds.button(w=10, label=\"Set Framerange\", command=set_framerange)\n cmds.text(label=\"\")\n\n # Select/deselect the pencil tool:\n cmds.button(w=20, label=\"Select Pencil Tool\", command=select_pencil_tool)\n cmds.button(w=20, label=\"Deselect Pencil Tool\", command=deselect_pencil_tool)\n cmds.text(label=\"\")\n\n # Set the page:\n cmds.button(w=20, label=\"Set page\", command=set_page)\n cmds.button(w=20, label=\"Add to page\", command=insert_page)\n cmds.button(w=20, label=\"Delete page\", command=delete_page)\n cmds.text(label=\"\")\n\n # Go to page:\n go_to_page_field = cmds.floatFieldGrp(\"go_to_page_field\",\n numberOfFields=1,\n columnWidth=(10, 10),\n columnAlign2=(\"left\", \"left\"),\n label=\"Go to frame\",\n pre=1,\n v1=1.0)\n\n cmds.button(w=20, label=\"Go to page\", command=lambda *args: go_to_page(go_to_page_field))\n cmds.text(label=\"\")\n\n cmds.button(w=20, label=\"Display Next Page\", command=display_next_page)\n cmds.button(w=20, label=\"Display Previous Page\", command=display_previous_page)\n cmds.text(label=\"\")\n\n cmds.button(w=20, label=\"Onion skin future frames\", command=display_next_pages)\n cmds.button(w=20, label=\"Onion skin past frames\", command=display_previous_pages)\n cmds.text(label=\"\")\n\n # Set the framerange:\n cmds.floatFieldGrp(\"set_loop_field\",\n numberOfFields=2,\n w=2,\n columnAlign2=(\"left\", \"left\"),\n label=\"Number of loops\",\n el=\"Step\",\n cw2=(10, 10),\n pre=1,\n v1=0.0,\n v2=0.0)\n cmds.button(w=20, label=\"Loop Selection\", command=loop_selected)\n cmds.text(label=\"\")\n\n # Playblast:\n cmds.button(w=20, label=\"Playblast\", command=playblast_scene)\n cmds.text(label=\"\")\n\n # Save:\n cmds.button(w=20, label=\"Save\", command=save_scene)\n cmds.text(label=\"\")\n cmds.setParent(\"..\")\n cmds.showWindow(window_name)", "title": "" }, { "docid": "bd3ee6a0908ca3d6441e0ca50d55db73", "score": "0.48511147", "text": "def frame_range(self):\n\t\treturn self.end - self.start + 1", "title": "" }, { "docid": "a64c9af71ca44ea8d15b16cb3a804a11", "score": "0.48430294", "text": "def _on_export(self, sender, *args):\r\n\t\t# NOTE: Ignores timerange for now\r\n\t\tif not self.nodeselector.uses_selection() and not self.nodeselector.selected_namespaces():\r\n\t\t\traise ValueError(\"Please select what to export from the scroll list\")\r\n\t\t# END handle invalid input\r\n\t\t\r\n\t\t# GET FILEPATH\r\n\t\t# on linux, only one filter is possible - it would be good to have a \r\n\t\t# capable file dialog coming from MRV ( in 2011 maybe just an adapter to \r\n\t\t# fileDialog2 )\r\n\t\tfile_path = cmds.fileDialog(mode=1,directoryMask=\"*.mb\")\r\n\t\tif not file_path:\r\n\t\t\treturn\r\n\t\t# END bail out\r\n\t\t\r\n\t\textlist = ( \".ma\", \".mb\" )\r\n\t\tcollection = [ p.basename() for p in ui.UI(self.filetype.p_collectionItemArray) ]\r\n\t\ttarget_ext = extlist[collection.index(self.filetype.p_select)]\r\n\t\t\r\n\t\tfile_path = Path(file_path)\r\n\t\tfile_path = file_path.stripext() + target_ext\r\n\t\t\r\n\t\tlib.AnimInOutLibrary.export(file_path, self.nodeselector.iter_nodes(asNode=False))", "title": "" }, { "docid": "9bcb9f82a8b64d79d9d8ac64acde5e2a", "score": "0.48208085", "text": "def display(self, *arrays, **kwargs):\n self.win = DS9Win(self.name, doOpen=True) # creates/detects the VIP ds9\n if kwargs.has_key('keepwin'):\n if kwargs['keepwin']: pass\n else: self.delete_frame(allfr=True)\n else: \n self.delete_frame(allfr=True)\n \n self.create_frame()\n self.tile()\n for i, array in enumerate(arrays):\n if i==0: \n self.win.showArray(array) \n else: \n self.create_frame()\n self.win.showArray(array)", "title": "" }, { "docid": "512d0514dd4b051bda297005f1745949", "score": "0.48152936", "text": "def createUI():\r\n #------- check to see if the window already exists and deletes if it does.-------#\r\n if(cmds.window(\"UI\",exists=True)):\r\n cmds.deleteUI(\"UI\")\r\n \r\n #create the window\r\n window = cmds.window(\"UI\", title=\"Audio Driven Keyframes\")\r\n \r\n #initialize the form layout\r\n form = cmds.formLayout(numberOfDivisions=100)\r\n \r\n #initialize progress bar\r\n progress = cmds.progressBar('progress')\r\n \r\n # create open button\r\n openButton = cmds.button(label=\"Open...\", command=('openFile()')) \r\n \r\n #new column layout\r\n infoCol = cmds.columnLayout()\r\n \r\n #------- if the audio has already been imported use it -------#\r\n if(cmds.objExists('audio')):\r\n \r\n global fileName, wavFile, params, frames\r\n fileName = cmds.getAttr('audio.filename')\r\n \r\n #------- set up the stream -------#\r\n wavFile = wave.open(fileName) \r\n params = wavFile.getparams() \r\n frames = wavFile.readframes(params[3])\r\n cmds.progressBar('progress', edit=True, maxValue = params[3])\r\n \r\n #------- text label for the current audio file loaded -------#\r\n currentAudioPath = cmds.text('currentAudioPath', label=\"Currently loaded \"+fileName)\r\n currentParams = cmds.text('currentParams', label=str(params))\r\n \r\n #------- otherwise just place dummy text-------# \r\n else:\r\n currentAudioPath = cmds.text('currentAudioPath', label=\"Please choose an audio file.\")\r\n currentParams = cmds.text('currentParams', label=\"\")\r\n \r\n cmds.setParent(\"..\")\r\n \r\n # create grid layout for all the variables in the form\r\n numbers = cmds.rowColumnLayout(numberOfColumns=2, columnWidth=[(1,100)])\r\n \r\n #create channel option boxes\r\n cmds.text(label=\"Channel\")\r\n cmds.radioButtonGrp('channel',\r\n nrb=3, \r\n onc=\"changeChannel()\", \r\n numberOfRadioButtons=3, \r\n labelArray3=['Left', 'Right', 'Both (Mono)'], \r\n sl=chosenChannel )\r\n \r\n #create axis option boxes\r\n cmds.text(label=\"Axis\")\r\n cmds.radioButtonGrp('axis',\r\n nrb=3, \r\n onc=\"changeAxis()\", \r\n numberOfRadioButtons=3, \r\n labelArray3=['X', 'Y', 'Z'], \r\n sl=chosenAxis )\r\n \r\n #create detail slider\r\n cmds.text(label=\"Detail\")\r\n detailSld = cmds.intSliderGrp('detail', field=True, cc=\"changeDetail()\", value=50,minValue = 1, maxValue=100)\r\n \r\n #create range row\r\n cmds.text(label=\"Range\")\r\n cmds.rowLayout(numberOfColumns=5)\r\n \r\n #min range field\r\n cmds.floatField('min', value=minMax[0])\r\n \r\n #assign current-to-min button\r\n cmds.button('assignLeft', label='<', c=\"assignLeft()\")\r\n \r\n #initialize the current field and if the object has been selected use the objects default axis value otherwise just set it to 0\r\n if(cmds.ls(sl=True)): \r\n selection = cmds.ls(sl=True)\r\n cmds.floatField('current', rfc=\"updateCurrentValue()\", value=cmds.getAttr(selection[0]+\".\"+axis[chosenAxis-1]))\r\n else:\r\n cmds.floatField('current', rfc=\"updateCurrentValue()\", value=0)\r\n \r\n #assign current-to-max button\r\n cmds.button('assignRight', label='>', c=\"assignRight()\")\r\n \r\n #max range field\r\n cmds.floatField('max', value=minMax[1])\r\n cmds.setParent(\"..\")\r\n \r\n #autolock option box\r\n cmds.text(label=\"Auto Lock\")\r\n cmds.checkBox('autoLock', label=\"\", cc=('lock()'), value=autoLock)\r\n \r\n cmds.setParent(\"..\")\r\n \r\n #make the generate and delete buttons on one row\r\n buttonsRow = cmds.rowColumnLayout(numberOfColumns=2)\r\n createButton = cmds.button(label=\"Generate Keyframes\", command=('generateKeyframes()'))\r\n deleteButton = cmds.button(label=\"Delete Keyframes\", command=('deleteKeyframes()')) \r\n \r\n \r\n cmds.setParent(\"..\") \r\n \r\n \r\n #------ put all these items in the form -----#\r\n cmds.formLayout(form, edit=True, attachForm=[(openButton, 'left', 10),\r\n (openButton, 'right', 10), \r\n (openButton, 'top', 10),\r\n (infoCol, 'left', 10),\r\n (infoCol, 'right', 10),\r\n (numbers, 'left', 10),\r\n (numbers, 'right', 10),\r\n (buttonsRow, 'left', 10),\r\n (buttonsRow, 'right', 10),\r\n (progress, 'left', 10),\r\n (progress, 'right', 10),\r\n (progress, 'bottom', 10),],\r\n attachControl=[(infoCol, 'top',10, openButton),\r\n (numbers, 'top', 10, infoCol),\r\n (buttonsRow, 'bottom',0, progress)])\r\n \r\n cmds.showWindow()", "title": "" }, { "docid": "1e4bea3c5ed5df5100afb785e81b0ba0", "score": "0.48067906", "text": "def plot_roi(start_frames, end_frames):\n #reads up until start_frames if start_frames !=0\n video = read_video()\n for i in range (start_frames, end_frames):\n fig, ax = plt.subplots()\n frame = video.read()[1][:,:,1]\n ax.imshow(frame[remodeled_roi_list()[i]])\n plt.ginput(10000, timeout=0, show_clicks=False)\n plt.close()", "title": "" }, { "docid": "f6121ccdfcba5c6aa3b60e659d87f76d", "score": "0.4806571", "text": "def usage(code):\n print 'Usage: python all_frames.py frame amp'\n sys.exit(code)", "title": "" }, { "docid": "b9807cbf611fb7a3f12113dfdc45bee1", "score": "0.48044258", "text": "def OpenBrowser(self): #Not currently in use\n filename1 = QFileDialog.getSaveFileName()\n self.export_layer.lineEdit.setText(filename1)", "title": "" }, { "docid": "872982095fb26eabcc94dde4983e81d7", "score": "0.4797458", "text": "def _textToFrame(self):\n if self.help_text:\n # show Frame #currect_frame : bounding_box\n annotation = \"nan,nan,nan,nan\"\n if self.bounding_box:\n annotation = self.parser.bboxString(self.bounding_box)\n elif self.pt1:\n annotation = str(self.pt1[0]) + \",\" + str(self.pt1[1]) + \",nan,nan\"\n\n cv2.putText(self.frame, \"Frame #\" + str(self.current_frame) + \" : \" + annotation, self.TEXT_ROW1_POS, cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE, (250, 250, 0), self.FONT_WEIGHT)\n\n # show control help\n info = \"'Enter' = next frame, 'Backspace' = previous frame, 'Q' = quit, 'H' = hide this text\"\n cv2.putText(self.frame, info, self.TEXT_ROW2_POS, cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE, (0, 150, 250), self.FONT_WEIGHT)\n info2 = \"'R' = reset annotation, 'P' = previous annotation, 'D' = duplicate previous annotation\"\n cv2.putText(self.frame, info2, self.TEXT_ROW3_POS, cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE, (0, 150, 250), self.FONT_WEIGHT)", "title": "" }, { "docid": "d35628ad47e9422a8c5f428a86bb5326", "score": "0.47954303", "text": "def takeSnapshot(fullFileName, type=-1, hideAnnotations=False):\n # show the message even if not taking a screen shot\n lm = slicer.app.layoutManager()\n # switch on the type to get the requested window\n widget = 0\n if type == slicer.qMRMLScreenShotDialog.FullLayout:\n # full layout\n widget = lm.viewport()\n elif type == slicer.qMRMLScreenShotDialog.ThreeD:\n # just the 3D window\n widget = lm.threeDWidget(0).threeDView()\n elif type == slicer.qMRMLScreenShotDialog.Red:\n # red slice window\n widget = lm.sliceWidget(\"Red\")\n elif type == slicer.qMRMLScreenShotDialog.Yellow:\n # yellow slice window\n widget = lm.sliceWidget(\"Yellow\")\n elif type == slicer.qMRMLScreenShotDialog.Green:\n # green slice window\n widget = lm.sliceWidget(\"Green\")\n else:\n # default to using the full window\n widget = slicer.util.mainWindow()\n # reset the type so that the node is set correctly\n #type = slicer.qMRMLScreenShotDialog.FullLayout\n\n if hideAnnotations:\n SlicerUtil.hideAllCornerAnnotations()\n # grab and convert to vtk image data\n qpixMap = qt.QPixmap().grabWidget(widget)\n # Save as a png file\n qpixMap.save(fullFileName)\n\n return fullFileName", "title": "" }, { "docid": "94c99d463efaf984a7c15d32d1f03311", "score": "0.47950166", "text": "def setRange(self,minimum,maximum):\r\n\t\tself.ProgressBar.setRange(minimum,maximum)", "title": "" }, { "docid": "9868a2245d5956648088dd004aeb79e1", "score": "0.47900984", "text": "def printRanges(self) -> unicode:\n ...", "title": "" }, { "docid": "079e42b992c92d22ad4d6978915fd7dd", "score": "0.4784321", "text": "def displayRange(lower, upper):\n if lower <= upper:\n print(lower)\n displayRange(lower + 1, upper)", "title": "" }, { "docid": "593765919e32137d37905da4bb3a091e", "score": "0.4780807", "text": "def show_frameCharlie(self, cont):\n frame = self.frames[cont]\n frame.confirmlabels()\n frame.tkraise()", "title": "" }, { "docid": "5e83dee90e67d54cb140d5aaac56152d", "score": "0.47752175", "text": "def graph_range(self):\n \n # If there is no data, tell the user and don't show the info dialog.\n if len(self.data) == 0:\n \n # Show the dialog.\n show_no_data_dialog(self, \"Graphs - %s\" % self.last_profile)\n return\n \n # Get the first and last entered dates.\n day_start = dates.split_date(self.data[0][0])\n day_end = dates.split_date(self.data[len(self.data) - 1][0])\n \n # Get a list of datetimes from the dates.\n datelist = dates.date_list_datetime(datasets.get_column(self.data, 0))\n \n # Get the starting and ending dates.\n cal_dlg = CalendarRangeDialog(self, \"Graphs in Range - %s\" % self.last_profile, day_start, day_end)\n response = cal_dlg.run()\n year1, month1, day1 = cal_dlg.start_cal.get_date()\n year2, month2, day2 = cal_dlg.end_cal.get_date()\n date1 = \"%d/%d/%d\" % (day1, month1 + 1, year1)\n date2 = \"%d/%d/%d\" % (day2, month2 + 1, year2)\n cal_dlg.destroy()\n \n # If the user did not click OK, don't continue.\n if response != Gtk.ResponseType.OK:\n return\n \n # Get the indices.\n dt_start = datetime.datetime(year1, month1 + 1, day1)\n start_index = dates.date_above(dt_start, datelist)\n dt_end = datetime.datetime(year2, month2 + 1, day2)\n end_index = dates.date_below(dt_end, datelist)\n \n # Check to make sure these dates are valid, and cancel the action if not.\n if start_index == DateValidation.INVALID:\n show_error_dialog(self, \"Graphs in Range - %s\" % self.last_profile, \"%s is not a valid date.\\n\\nThis date is not present and is not before any other dates.\" % date1)\n return\n if end_index == DateValidation.INVALID:\n show_error_dialog(self, \"Graphs in Range - %s\" % self.last_profile, \"%s is not a valid date.\\n\\nThis date is not present and is not after any other dates.\" % date2)\n return\n if end_index < start_index:\n show_error_dialog(self, \"Graphs in Range - %s\" % self.last_profile, \"The ending date must be after the starting date.\")\n return\n \n # Get the new list.\n data2 = self.data[start_index:end_index + 1]\n \n # Pass the data to the charts dialog.\n self.show_graph_generic(data = data2)", "title": "" }, { "docid": "fea6e6b5bce8e123eb5ad16856f340e1", "score": "0.47744238", "text": "def fnirs_montage_ui():\r\n source_labels = input(\"please enter sources names with the S# format: \").split()\r\n\r\n detector_labels = input(\"please enter detectors names with the D# format: \").split()\r\n\r\n Nz = input(\"please enter 3D Coordination of tip of the nose x y z in mm: \").split()\r\n for i in range(len(Nz)):\r\n Nz[i] = float(Nz[i])\r\n\r\n RPA = input(\"please enter 3D Coordination of the right preauricular x y z in mm: \").split()\r\n for i in range(len(RPA)):\r\n RPA[i] = float(RPA[i])\r\n\r\n LPA = input(\"please enter 3D Coordination of the left preauricular x y z in mm: \").split()\r\n for i in range(len(LPA)):\r\n LPA[i] = float(LPA[i])\r\n\r\n head_size = float(input(\"please enter the head size in mm \"))\r\n\r\n return source_labels, detector_labels, Nz, RPA, LPA, head_size", "title": "" }, { "docid": "221e1d110dd4e0c28c5db1c02b903bf7", "score": "0.47701207", "text": "def run(self):\n # show the dialog\n self.dlg.show()", "title": "" }, { "docid": "dd1fb71b9d6c35f6e3ed1866d0b0c1a4", "score": "0.4759211", "text": "def print_out(self):\n self.app.Application.PrintOut()", "title": "" }, { "docid": "76b25b5eb5b96acb2a833cdd6de29f4e", "score": "0.47543108", "text": "def show_frameAlpha(self, cont):\n #Reset all of experiment-class variables\n Appa.expnumber = str()\n Appa.exptype = str()\n Appa.exptime = int()\n Appa.savefile = str()\n Appa.expy = []\n app.startfresh() #Reinitalize all necessary pages to starting state\n frame = self.frames[cont]\n frame.tkraise() #raise to front", "title": "" }, { "docid": "85bb3d1c5b1690ff43923a6030238051", "score": "0.4752486", "text": "def set_framerange(*args):\n start_frame = cmds.floatFieldGrp(\"set_framerange_field\", query=True, v1=True)\n end_frame = cmds.floatFieldGrp(\"set_framerange_field\", query=True, v2=True)\n animationflipbook.set_framerange(start_frame, end_frame)", "title": "" }, { "docid": "d6962c2ab3daf67b52f88cff449e8150", "score": "0.4748542", "text": "def run():\n root = tk.Tk()\n root.title('Virus Spreads as per Mutation Patterns')\n root.minsize(300, 300)\n\n # This section simply sets the scene and creates the dropdown menu. The dropdown menu is tied to the region_selected variable\n mainframe = tk.Frame(master=root, height=20, width=50)\n tableFrame = tk.Frame(master=mainframe)\n regions = get_regions()\n region_selected = tk.StringVar(master=root, name='reg')\n region_selected.set(regions[0])\n dropdown = tk.OptionMenu(mainframe, region_selected, *regions)\n\n # This function generates the table for a given area. It's a local method so that it can access the tableFrame\n def generate_table(area, dummy1, dummy2):\n nonlocal tableFrame\n tableFrame.destroy()\n tableFrame = tk.Frame(master=mainframe)\n tableFrame.grid(row=1, column=1)\n results = get_spreads(root.getvar(area))\n titles = ('Source Region', '# of Cases from Source')\n tab = Table(tableFrame, titles, results)\n\n # Bind updates of the dropdown to the creation of tables.\n region_selected.trace_add('write', generate_table)\n\n # Place the dropdown and the frame holding the table into their appropriate grid positions.\n mainframe.pack()\n dropdown.grid(row=0, column=1)\n tableFrame.grid(row=1, column=1)\n\n root.mainloop()", "title": "" }, { "docid": "6e292408bd292f993d539b97a848669a", "score": "0.47433022", "text": "def showOptions(self):\n message = \"P: Plot generate image in plt \"\n self.render.render_text(self.ttt, message,\n (self.x_0 + 200, self.y_max + 20))\n message = \"I: Do interpolation two points\"\n self.render.render_text(self.ttt, message,\n (self.x_0 + 200, self.y_max + 40))\n message = \"O: Insert images in the latent space\"\n self.render.render_text(self.ttt, message,\n (self.x_0 + 200, self.y_max + 60))", "title": "" }, { "docid": "65d2551073a01baf013e9f2d74ae0878", "score": "0.4731846", "text": "def run(self, frames=120):\r\n anim = FuncAnimation(self.fig, self.step, blit=False, interval=100, frames=frames)\r\n # If you want to save your animations, you can comment either\r\n # of the lines below.\r\n # NOTE: FFmpeg is needed to render a .mp4 video of the animation.\r\n anim.save('/mnt/c/Users/tirth/Desktop/Searching in AI/ucs_large_with_weights.mp4')\r\n # anim.save('animation.html')\r\n # plt.show()\r", "title": "" }, { "docid": "41b953d5f1e5b8ea6fda8339468bdc9a", "score": "0.47301832", "text": "def show_frameResults(k):\n\n global img\n global moransReport\n\n # Functions for frameResults\n def load_image(k, type): \n fileName = f'reports/{type}_{str(k).replace(\".\",\"_\")}.jpg'\n img = Image.open(fileName)\n img = img.resize((353,457))\n imgTk = ImageTk.PhotoImage(img)\n return imgTk\n\n def change_image(k, type, curMap):\n \"\"\" Change the image in the results frame \"\"\"\n imgTk = load_image(k, type)\n curMap.configure(image=imgTk)\n curMap.image = imgTk\n\n def save_as_pdf(k, type):\n saveAsPath = filedialog.asksaveasfilename(initialdir=\"/\", filetypes=[(\"pdf files\", \"*.pdf\")])\n if \".pdf\" not in saveAsPath.lower():\n saveAsPath += '.pdf'\n imagePath = f'reports/{type}_{str(k).replace(\".\",\"_\")}.jpg'\n img = Image.open(imagePath)\n img.save(saveAsPath, \"PDF\")\n\n def view_ols_report(k):\n import os\n olsFile = f'{os.getcwd()}\\\\ols_reports\\\\{k}_ols.pdf'\n subprocess.Popen(olsFile, shell=True)\n\n def view_moransI_report(path):\n if path:\n subprocess.Popen(path, shell=True)\n else:\n messagebox.showerror(\"Error loading Moran's I report\", \"There was an error loading the Moran's I report.\")\n\n # Reset frameResults to clear any previous analysis runs\n for child in frameResults.winfo_children():\n child.destroy()\n\n # Show frameResults\n frameResults.place(relheight=0.9, relwidth=0.4, relx=0.55, rely=0.05)\n\n # Show map image (default to IDW map)\n img = load_image(k, \"IDW\")\n curMap = tk.Label(frameResults, image=img)\n curMap.pack()\n\n # Show button controls\n frameButtons = tk.Frame(frameResults, bg=\"black\", height=80) \n frameButtons.pack(side='bottom', fill='x')\n\n btnIDW = ttk.Button(frameButtons, text=\"Show IDW Results\", command=lambda: change_image(k, \"IDW\", curMap))\n btnIDW.place(x=5, rely=0.15, relwidth=0.3, relheight=0.3)\n\n btnIDWSave = ttk.Button(frameButtons, text=\"Save IDW to PDF...\", command=lambda: save_as_pdf(k, \"IDW\"))\n btnIDWSave.place(relx=0.35, rely=0.15, relheight=0.3)\n\n btnMoransI = ttk.Button(frameButtons, text=\"View Morans I\", command=lambda: view_moransI_report(moransReport))\n btnMoransI.place(relx=0.68, rely=0.15, relwidth=0.3, relheight=0.3)\n\n btnOLS = ttk.Button(frameButtons, text=\"Show OLS Results\", command=lambda: change_image(k, \"OLS\", curMap))\n btnOLS.place(x=5, rely=0.6, relwidth=0.3, relheight=0.3)\n\n btnOLSSave = ttk.Button(frameButtons, text=\"Save OLS to PDF...\", command=lambda: save_as_pdf(k, \"OLS\"))\n btnOLSSave.place(relx=0.35, rely=0.6, relwidth=0.3, relheight=0.3)\n\n btnViewOLS = ttk.Button(frameButtons, text=\"View OLS Report\", command=lambda: view_ols_report(k))\n btnViewOLS.place(relx=0.68, rely=0.6, relwidth=0.3, relheight=0.3)", "title": "" }, { "docid": "9a8a4ddb91aefd318f52ee80d6c19cec", "score": "0.47285014", "text": "def guimain():\r\n\t\r\n\ta = gui.QApplication(sys.argv)\r\n\ta.aboutToQuit.connect(a.deleteLater)\r\n\t\r\n\t# window\r\n\tw = gui.QWidget()\r\n\tw.resize(600, 400)\r\n\tw.setWindowTitle(\"TimeSheet\")\r\n\t\r\n\t# textboxes\r\n\tname_box = gui.QLineEdit(w)\r\n\tname_box.move(100, 40)\r\n\tname_box.resize(100, 24)\r\n\tname_box.setText(\"Enter Name\")\r\n\t\r\n\t# labels\r\n\twlcm = gui.QLabel(w)\r\n\twlcm.move(10, 20)\r\n\twlcm.resize(550, 30)\r\n\twlcm.setText(\"Welcome to TimeSheet. This application allows you to create\\\r\n\tyour very own table of work hours as simply as possible.\\nJust enter your\\\r\n\tinformation below and get started. Have fun spending your time on \\\r\n\tsomething useful!\")\r\n\t\r\n\tname_label = gui.QLabel(w)\r\n\tname_label.move(10, 40)\r\n\tname_label.resize(130,24)\r\n\tname_label.setText(\"Enter your name:\")\r\n\t\r\n\tcal_label = gui.QLabel(w)\r\n\tcal_label.move(10, 100)\r\n\tcal_label.resize(150,24)\r\n\tcal_label.setText(\"Choose date to begin with:\")\r\n\t\r\n\t\r\n\t#sl = gui.QAbstractSlider(w)\r\n\t#sl.move(20, 20)\r\n\t\r\n\t# checkboxes\r\n\tchbx_tot = gui.QCheckBox(w)\r\n\tchbx_tot.move(10, 60)\r\n\tchbx_tot.setText(\"Fix target hours\")\r\n\r\n\tchbx_mean = gui.QCheckBox(w)\r\n\tchbx_mean.move(10, 80)\r\n\tchbx_mean.setText(\"Fix mean target hours only\")\r\n\t\r\n\tgrp = gui.QButtonGroup(w)\r\n\tgrp.addButton(chbx_tot)\r\n\tgrp.addButton(chbx_mean)\r\n\tgrp.exclusive = True\r\n\t\r\n\t# calendar\r\n\tcal = gui.QCalendarWidget(w)\r\n\tcal.setGridVisible(True)\r\n\tcal.move(10, 130)\r\n\tcal.resize(320, 200)\r\n\t\r\n\t# buttons\r\n\tstart_button = gui.QPushButton(\"Create Timesheet\", w)\r\n\tstart_button.move(450, 350)\r\n\tstart_button.resize(100, 24)\r\n\t\r\n\t\r\n\tw.show()\t\r\n\t#times, times_months = create_timesheet(begin_date=\"15/01/01\",end_date=\"15/01/31\")\r\n\t#print_table(times,times_months)\r\n\t\r\n\tsys.exit(a.exec_())", "title": "" }, { "docid": "d679299d341087031658d97741d74a33", "score": "0.472513", "text": "def show_em_frame(frame_data):\n for datum in np.nditer(frame_data):\n ts_val = datum['ts'].item(0)\n thr_data = thr[datum['y'].item(0), datum['x'].item(0)]\n\n if datum['p'].item(0) == 0:\n thr_data.valid = 1\n thr_data.low = ts_val\n elif thr_data.valid == 1:\n thr_data.valid = 0\n thr_data.high = ts_val - thr_data.low\n\n img = 255 * (1 - (thr.high - min_val) / (val_range))\n #thr_h = cv2.adaptiveThreshold(thr_h, 255,\n #cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, 0)\n img = np.piecewise(img, [img <= 0, (img > 0) & (img < 255), img >= 255], [0, lambda x: x, 255])\n img = img.astype('uint8')\n cv2.imshow('img', img)\n cv2.waitKey(1)", "title": "" }, { "docid": "f21a2c9bef853768836894912d5ce76b", "score": "0.47117522", "text": "def showBrowserDialog(self):\n fileName = QFileDialog.getSaveFileName(None, 'Save output shapefile','','Shapefiles (*.shp *.SHP)')\n self.outputPath.setText(fileName)", "title": "" }, { "docid": "412eeae54df9a71f21ea0d702ff43abf", "score": "0.470262", "text": "def show_frameSquid(self, cont):\n frame = self.frames[cont]\n frame.tkraise() \n camera.start_preview(fullscreen=False, window=(appwidth/800, appheight/4, appwidth-(2*appwidth/800), appheight*9/10)) # This line starts the preview. ", "title": "" }, { "docid": "921c0f5432a83abc7c77c5413566382f", "score": "0.4699744", "text": "def show(self):\n self.plot_frame(-1)", "title": "" }, { "docid": "10a497a000c315c1144324e3a6272372", "score": "0.469705", "text": "def callback(x):\n\n interaction_window_pointer[0] = x\n try:\n\n read_frames(interaction_window_pointer, interaction_windows_size, interaction_window, video_frames_count)\n frame = interaction_window[str(interaction_window_pointer[0])]\n cur_timestamp = str(int(int(interaction_window_pointer[0]) * 1000 / 25))\n if frame is not None:\n \n # Priting BBox and Annotations over the current image\n #frame = drawBoundingBox(frame, bounding_box_data_list)\n #frame = drawAnnotations(frame, dicts, current_complex_event, current_complex_event_detected)\n #frame = resize_image(frame)\n #cv2.imshow(app_name, frame)\n\n # Showing info to CLI\n if (not(is_exporting)):\n print(\"[Visualization System] Frame: {:06}\".format(interaction_window_pointer[0]))\n print(\"[Visualization System] Second: \" + str(int(cur_timestamp) / 1000), end=\"\\r\\n\\r\\n\")\n \n except Exception as e:\n print(\"[Exception] \" + e.args)", "title": "" }, { "docid": "3dbbec52dc3513f121f71702b163176f", "score": "0.46862304", "text": "def ui_mode():\n # Create a dialog box with the features\n fields = ['Music', 'Movies', 'Horror', 'Thriller', 'Comedy',\n 'Romantic', 'Sci-fi', 'War', 'Fantasy/Fairy tales', \n 'Animated', 'Documentary', 'Western', 'Action', 'History', \n 'Mathematics', 'Physics', 'Internet', 'PC', 'Reading', \n 'Cars', 'Shopping', 'Science and technology', 'Adrenaline sports', \n 'Flying', 'Age', 'college/bachelor degree', 'currently a primary school pupil', \n 'doctorate degree', 'masters degree', 'primary school', 'secondary school', \n 'few hours a day', 'less than an hour a day', 'most of the day', 'no time at all']\n\n layout = [[sg.Text('url'), sg.Input('', key='url')],\n [sg.Text('Music'), sg.Input(5, key='Music')],\n [sg.Text('Movies'), sg.Input(5, key='Movies')],\n [sg.Text('Horror'), sg.Input(4, key='Horror')],\n [sg.Text('Thriller'), sg.Input(2, key='Thriller')],\n [sg.Text('Comedy'), sg.Input(5, key='Comedy')],\n [sg.Text('Romantic'), sg.Input(4, key='Romantic')],\n [sg.Text('Sci-fi'), sg.Input(4, key='Sci-fi')],\n [sg.Text('War'), sg.Input(1, key='War')],\n [sg.Text('Fantasy/Fairy tales'), sg.Input(5, key='Fantasy/Fairy tales')],\n [sg.Text('Animated'), sg.Input(5, key='Animated')],\n [sg.Text('Documentary'), sg.Input(3, key='Documentary')],\n [sg.Text('Western'), sg.Input(1, key='Western')],\n [sg.Text('Action'), sg.Input(2, key='Action')],\n [sg.Text('History'), sg.Input(1, key='History')],\n [sg.Text('Mathematics'), sg.Input(3, key='Mathematics')],\n [sg.Text('Physics'), sg.Input(3, key='Physics')],\n [sg.Text('Internet'), sg.Input(5, key='Internet')],\n [sg.Text('PC'), sg.Input(3, key='PC')],\n [sg.Text('Reading'), sg.Input(3, key='Reading')],\n [sg.Text('Cars'), sg.Input(1, key='Cars')],\n [sg.Text('Shopping'), sg.Input(4, key='Shopping')],\n [sg.Text('Science and technology'), sg.Input(4, key='Science and technology')],\n [sg.Text('Adrenaline sports'), sg.Input(4, key='Adrenaline sports')],\n [sg.Text('Flying'), sg.Input(1, key='Flying')],\n [sg.Text('Age', text_color='red'), sg.Input(20, key='Age')],\n [sg.Text('college/bachelor degree'), sg.Input(1, key='college/bachelor degree')],\n [sg.Text('currently a primary school pupil'), sg.Input(0, key='currently a primary school pupil')],\n [sg.Text('doctorate degree'), sg.Input(0, key='doctorate degree')],\n [sg.Text('masters degree'), sg.Input(0, key='masters degree')],\n [sg.Text('primary school'), sg.Input(0, key='primary school')],\n [sg.Text('secondary school'), sg.Input(0, key='secondary school')],\n [sg.Text('few hours a day'), sg.Input(1, key='few hours a day')],\n [sg.Text('less than an hour a day'), sg.Input(0, key='less than an hour a day')],\n [sg.Text('most of the day'), sg.Input(0, key='most of the day')],\n [sg.Text('no time at all'), sg.Input(0, key='no time at all')],\n [sg.Text('Prediction: ', text_color='blue', size=(40, 2), key='_PRED_')],\n [sg.RButton('Predict'), sg.Exit()]]\n\n window = sg.Window('Enter url and feature values', layout)\n while True:\n event, values = window.Read()\n predictions = str(get_multiple_predictions(values, fields))\n predictions.replace('\"', '')\n if event == 'Predict':\n print('entered predict event')\n # change the \"output\" element to be the value of \"input\" element\n window.Element('_PRED_').Update('Age: [' + values['Age'] + ']\\n' + \n 'Prediction: ' + predictions)\n if event is None or event == 'Exit':\n break\n\n window.Close()", "title": "" }, { "docid": "17789147d32666fe3ff02f17a831f625", "score": "0.4681595", "text": "def file_dialog(self):\n file = QtGui.QFileDialog.getOpenFileName(self, 'Open Texture', '')\n\n if file != '' and file is not None:\n self.lineEdit.setText(file)\n\n self.update()", "title": "" }, { "docid": "947c70c4424b01907ffe3bced1e6ae7a", "score": "0.4673959", "text": "def create_plot_html_page(msid, descrip, pout):\n#\n#--- read javascript file\n#\n jfile = house_keeping + 'java_script_deposit'\n f = open(jfile, 'r')\n jscript = f.read()\n f.close()\n\n #file_name = msid + '_limit_table'\n file_name = './Limit_table/' + msid + '_limit_table.html'\n#\n#--- start creating html page\n#\n out = '<!DOCTYPE html>\\n<html>\\n<head>\\n\\t<title>Envelope Trending Plots: ' + msid.upper() + '</title>\\n'\n out = out + jscript + '\\n'\n out = out + '<style>\\n'\n out = out + 'body{width: 600px; height300px; background-color:#FAEBD7;\\n'\n out = out + 'font-family:Georgia, \"Times New Roman\", Times, serif;}\\n'\n out = out + '</style>\\n'\n\n out = out + '</head>\\n<body style=\"width:95%;margin-left:10px; margin-right;10px;background-color:#FAEBD7;'\n out = out + 'font-family:Georgia, \"Times New Roman\", Times, serif\">\\n\\n'\n out = out + '<a href=\"' + web_address + 'envelope_main.html\" '\n out = out + 'style=\"float:right;padding-right:50px;font-size:120%\"><b>Back to Top</b></a>\\n'\n\n if descrip == '':\n out = out + '<h2>' + msid.upper() + '</h2>'\n else:\n out = out + '<h2>' + msid.upper() + ' (' + descrip.upper() + ')</h2>' \n\n out = out + '<div style=\"paddng-top:10px\"><h3>'\n out = out + 'Open <a href=\"javascript:popitup(\\'' + file_name + '\\')\" style=\"text-align:right\">Limit Table</a>.'\n out = out + '</h3>\\n'\n#\n#--- add the interactive plot here\n#\n if (pout == False) or (str(pout) == 'na'):\n out = out + '<h3 style=\"padding-top:200px;padding-bottom:200px\">No Data/No Plot</h3>'\n else:\n out = out + pout\n#\n#--- add the rest\n#\n out = out + '<ul><li style=\"font-size:80%\">Click the magnifier icon and choose the area to enlarge the area.</li>'\n out = out + '<li style=\"font-size:80%\">Click the cross icon and hold the button to move around the area.</li>'\n out = out + '<li style=\"font-size:80%\">Click the cross icon and then use the roller to zoom in and out.</li>'\n out = out + '<li style=\"font-size:80%\">Click the house icon to go back to the full view.</li>'\n out = out + '<li style=\"font-size:80%\">After enlarging the area with the magnifier,'\n out = out + ' use the cross icon to see the values of each data point.</li></ul>'\n\n [lout, gname] = get_group_names(msid)\n if lout != '':\n out = out + '<h3>Other msids in this group: ' + gname + '</h3>'\n out = out + lout\n\n out = out + '<div style=\"padding-top:30px\"></div>'\n out = out + '<hr /><p style=\"text-align:left; padding-top:10px;padding-bottom:20px\">'\n out = out + 'If you have any questions, please contact '\n out = out + '<a href=\"mailto:[email protected]\">[email protected]</a>.'\n out = out + '\\n\\n\\n</body>\\n</html>\\n'\n#\n#--- write out the html data\n#\n name = web_dir + msid + '_plot.html'\n fo = open(name, 'w')\n fo.write(out)\n fo.close()", "title": "" }, { "docid": "6fad818a5bf85b40910f30d948556f6a", "score": "0.46726665", "text": "def make_interactive_plot(self):\n self.change_pars_to_fit_button = Button(\n description=\"Change Parameters to Fit Result\",\n layout = Layout(width = '300px', margin = '0 0 5ps 0')\n ) \n\n self.reset_slider_lims_button = Button(\n description=\"Reset Slider Max\",\n layout = Layout(width = '300px', margin = '0 0 5ps 0')\n ) \n\n self.data_init_widget = Dropdown(\n options=list(np.arange(0,len(self.isub))), \n value = 0,\n description='Dataset',\n style = {'description_width': 'initial'},\n disabled=False,\n layout = Layout(width = '200px', margin = '0 0 5ps 0')\n )\n\n self.reset_slider_widget = Dropdown(\n options= [par for par in self.ctrl_pars.keys() if self.ctrl_pars[par]], \n value = None,\n description='Slider Reset',\n style = {'description_width': 'initial'},\n disabled=False,\n layout = Layout(width = '200px', margin = '0 0 5ps 0')\n ) \n\n self.wlim = {}\n self.wlim[self.spectra_object.orbital] = FloatRangeSlider (\n value=[np.min(self.esub), np.max(self.esub)],\n min = np.min(self.esub),\n max =np.max(self.esub),\n step = 0.01,\n description = self.spectra_object.orbital+'_xlim',\n style = {'description_width': 'initial'},\n layout = Layout(width = '300px', margin = '0 0 5ps 0')\n ) \n if self.compound:\n for orbital in self.connected_spectra.keys():\n self.wlim[orbital] = FloatRangeSlider (\n value=[np.min(self.connected_spectra[orbital].esub), np.max(self.connected_spectra[orbital].esub)],\n min = np.min(self.connected_spectra[orbital].esub),\n max =np.max(self.connected_spectra[orbital].esub),\n step = 0.01,\n description = orbital+'_xlim',\n style = {'description_width': 'initial'},\n layout = Layout(width = '300px', margin = '0 0 5ps 0')\n ) \n\n out = Output()\n display(out)\n\n @self.change_pars_to_fit_button.on_click\n def plot_on_click(b):\n with out:\n if not self.compound:\n self.spectra_object.params = self.spectra_object.fit_results[self.data_init_widget.value].params.copy()\n for pars in self.paramwidgets.keys():\n self.paramwidgets[pars].update_widget_group(self.spectra_object.params[pars])\n elif self.compound:\n self.compound_object.params = self.compound_object.fit_results[self.data_init_widget.value].params.copy()\n for pars in self.paramwidgets.keys():\n self.paramwidgets[pars].update_widget_group(self.compound_object.params[pars]) \n\n @self.reset_slider_lims_button.on_click\n def plot_on_click(b):\n with out:\n if self.reset_slider_widget.value !=None:\n self.paramwidgets[self.reset_slider_widget.value].ctrl_slider.max = 2*self.paramwidgets[self.reset_slider_widget.value].ctrl_slider.value\n\n # Create the interactive plot, then build the slider/graph parameter controls\n plotkwargs = {**{pw.name:pw.ctrl_slider for pw in self.paramwidgets.values() if hasattr(pw,'ctrl_slider')},\\\n **{plotlim.description:plotlim for plotlim in self.wlim.values()}}\n self.intplot = interactive(self.interactive_plot,**plotkwargs)\n \n vb = VBox(self.intplot.children[0:-1])\n vb2 = VBox([HBox([VBox([self.data_init_widget,self.reset_slider_widget]),VBox([self.change_pars_to_fit_button,self.reset_slider_lims_button])]),self.intplot.children[-1]])\n hb = HBox([vb,vb2])\n \n display(hb)", "title": "" }, { "docid": "a718204b6da7aaf9a338c82cb3c1b555", "score": "0.46719527", "text": "def window(theWidth, theHeight, theTitle):\n\tprint('This is wrapper function to help code autocompletion.')", "title": "" }, { "docid": "43e4ff18486c1579c62a8e2f9765e2eb", "score": "0.46719238", "text": "def _frame_out(self, *args):\n if len(args) == 2:\n self._display.frame(args[0], args[1].frame)\n elif len(args) == 1:\n if type(args[0]) is int:\n return Frame(self.frame_width(), self.frame_height(),\n self._display.frame(args[0]))\n else:\n self._display.frame(args[0].frame)\n else:\n return Frame(self.frame_width(), self.frame_height(),\n self._display.frame())", "title": "" }, { "docid": "83d66882651ac3c3846c7bba9ea517ab", "score": "0.466906", "text": "def main():\n st.title(\"BioInformatics Web App\")\n\n #activity = ['Intro','DNA','DotPlot',\"About\"]\n # choice = st.sidebar.selectbox(\"Select Activity\",activity)\n # if choice == 'Intro':\n\t# st.subheader(\"Intro\")\n # elif choice == \"DNA Sequence\":\n\t# st.subheader(\"DNA Sequence Analysis\")\n\n #input sequence\n seq_input = st.text_input('Input Sequence')\n\n #check validity of sequence entered\n validateSeq(seq_input)\n\n details = st.radio(\"Functions\",(\"Sequence Length\",\n \"Frequency of each Nucleotide\",\n \"Transcription of DNA\", \n \"Reverse Transcription\", \n \"Find Complementory Strand\",\n \"GC Content Percentage\"))\n if details == \"Sequence Length\":\n st.write(len(seq_input))\n\n elif details == \"Frequency of each Nucleotide\":\n st.subheader(\"Nucleotide Frequency\")\n seq_input = Counter(seq_input)\n st.write(seq_input)\n adenine_color = st.color_picker(\"Adenine Color\")\n thymine_color = st.color_picker(\"thymine Color\")\n guanine_color = st.color_picker(\"Guanine Color\")\n cytosil_color = st.color_picker(\"cytosil Color\")\n\n if st.button(\"Plot Freq\"):\n barlist = plt.bar(seq_input.keys(),seq_input.values())\n barlist[2].set_color(adenine_color)\n barlist[3].set_color(thymine_color)\n barlist[1].set_color(guanine_color)\n barlist[0].set_color(cytosil_color)\n\n st.pyplot()\n \n elif details == \"Transcription of DNA\":\n st.write(transcription(seq_input))\n\n elif details == \"Reverse Transcription\":\n st.write(reversetranscription(seq_input))\n\n elif details == \"Find Complementory Strand\":\n st.text(f\" DNA String + Complement + Reverse Complement: \\n 5'{seq_input} 3' \\n {''.join(['|' for c in range(len(seq_input))])} \\n 3'{reverse_complement(seq_input)[::-1]} 5'[Complement] \\n 5'{reverse_complement(seq_input)} 3'[Reverse Complement]\")\n\n elif details == \"GC Content Percentage\":\n st.write(f\" {gc_content(seq_input)}%\")", "title": "" }, { "docid": "8fbe416b60c11f12b7be979cf35f906d", "score": "0.466795", "text": "def onselect(vmin, vmax):\n windows.append(float(Decimal(\"%.2f\" % vmin)))\n windows.append(float(Decimal(\"%.2f\" % vmax)))\n plot_windows(windows)\n plt.draw()", "title": "" }, { "docid": "ddf645714b43146b402a1481a3feaafa", "score": "0.46650136", "text": "def interactive_numerical_plot(df_num,df_Y):\n from ipywidgets import HBox,Checkbox,FloatRangeSlider,VBox,ToggleButton,interactive_output,Dropdown\n from IPython.display import display\n\n def plot_num_and_save(xlimit,save_but,col,clip_box,clip_limit):\n nonlocal df_num, df_Y\n plt.close('all')\n\n if clip_box:\n clip_df_num = df_num.copy()\n clip_df_num.loc[clip_df_num[col]>clip_limit[1],col] = np.nan\n clip_df_num.loc[clip_df_num[col]<clip_limit[0],col] = np.nan\n else:\n clip_df_num = df_num\n\n# for i,col in zip(range(clip_df_num[col].shape[1]),clip_df_num[col]):\n fig,ax = plt.subplots(1,1,figsize=(10,5))\n sns.kdeplot(clip_df_num[col][df_Y == 0], label = 'label0').set_title(clip_df_num[col].name)\n sns.kdeplot(clip_df_num[col][df_Y == 1], label = 'label1')\n ax.set_xlim(xlimit[0],xlimit[1])\n plt.show()\n\n if save_but:\n fig.savefig('./plots/{}.png'.format(clip_df_num[col].name), bbox_inches='tight')\n\n xlimit = FloatRangeSlider(value = [df_num.iloc[:,1].min(),df_num.iloc[:,1].max()],min=df_num.iloc[:,1].min(),\n max=df_num.iloc[:,1].max(),step=(df_num.iloc[:,1].max()-df_num.iloc[:,1].min())/100,\n continuous_update=False,description='X_limit')\n save_but = ToggleButton(description='Save Figure')\n col = Dropdown(options=df_num.columns.tolist())\n clip_box = Checkbox(value=False,description='Clip ?')\n clip_limit = FloatRangeSlider(value = [df_num.iloc[:,1].min(),df_num.iloc[:,1].max()],min=df_num.iloc[:,1].min(),\n max=df_num.iloc[:,1].max(),step=(df_num.iloc[:,1].max()-df_num.iloc[:,1].min())/100,\n continuous_update=False,description='X_limit')\n\n\n out = interactive_output(plot_num_and_save,{\n 'xlimit' : xlimit,\n 'save_but':save_but,\n 'col' : col,\n 'clip_box':clip_box,\n 'clip_limit':clip_limit\n })\n# save_but = Button(description='Save Fig')\n vbox1 = VBox([xlimit,save_but,col,clip_box,clip_limit])\n ui = HBox([vbox1,out])\n display(ui)\n\n def on_click(change):\n change['owner'].value = False\n\n def on_click_case(change):\n try:\n xlimit.min = df_num[change['new']].min()\n xlimit.max = df_num[change['new']].max()\n clip_limit.min = df_num[change['new']].min()\n clip_limit.max = df_num[change['new']].max()\n\n except:\n xlimit.max = df_num[change['new']].max()\n xlimit.min = df_num[change['new']].min()\n clip_limit.max = df_num[change['new']].max()\n clip_limit.min = df_num[change['new']].min()\n\n xlimit.step = (df_num[change['new']].max() - df_num[change['new']].min())/100\n xlimit.value = [df_num[change['new']].min(),df_num[change['new']].max()]\n clip_limit.step = (df_num[change['new']].max() - df_num[change['new']].min())/100\n clip_limit.value = [df_num[change['new']].min(),df_num[change['new']].max()]\n\n save_but.observe(on_click, 'value')\n col.observe(on_click_case, 'value')", "title": "" }, { "docid": "d1ce84ef174cc08a45fee80456226f00", "score": "0.46647978", "text": "def pop_gui(self):\n self.layout.addWidget(QLabel('Confirm properties each save?'), self.y, 0)\n groupbox = self.add_radio(name='confirm', defval=True)\n self.layout.addWidget(groupbox, self.y, 1)\n self.y += 1\n\n self.layout.addWidget(QLabel('Save frames?'), self.y, 0)\n groupbox = self.add_radio(name='save_frames', defval=self.params['save']['save_frames'])\n self.layout.addWidget(groupbox, self.y, 1)\n self.y += 1\n\n self.layout.addWidget(QLabel('Clear data after save?'), self.y, 0)\n groupbox = self.add_radio(name='clear', defval=self.params['save']['clear'])\n self.layout.addWidget(groupbox, self.y, 1)\n self.y += 1\n\n self.layout.addWidget(QLabel('filename?'), self.y, 0)\n textedit = QLineEdit('')\n if 'filename' in self.params['save']:\n defval = self.params['save']['filename']\n textedit.setText(str(defval))\n textedit.name = 'filename'\n textedit.textChanged.connect(self.text_clicked)\n self.layout.addWidget(textedit, self.y, 1)\n self.y += 1\n\n self.layout.addWidget(QLabel('Confirm frames rate [fps]?'), self.y, 0)\n textedit = QLineEdit('')\n if 'fps' in self.params['improcess']:\n defval = self.params['improcess']['fps']\n textedit.setText(str(defval))\n textedit.name = 'fps'\n textedit.textChanged.connect(self.text_clicked)\n self.layout.addWidget(textedit, self.y, 1)\n self.y += 1\n\n self.layout.addWidget(QLabel('Confirm pixel size [microns]?'), self.y, 0)\n textedit = QLineEdit('')\n if 'pixel_size' in self.params['improcess']:\n defval = self.params['improcess']['pixel_size']\n if defval is not None:\n textedit.setText(str(defval))\n textedit.name = 'pixel_size'\n textedit.textChanged.connect(self.text_clicked)\n self.layout.addWidget(textedit, self.y, 1)\n self.y += 1", "title": "" }, { "docid": "a8de185a1b169065304f2b335a1aaec2", "score": "0.4658181", "text": "def export_window():\n\n current_courses = [\"EECS 1011\", \"ENG 1101\", \"MATH 1013\", \"MATH 1025\",\n \"PHYS1800\"]\n\n current_times = []\n with shelve.open(\"course_times.db\") as db:\n print(db[\"MATH 1025\"])\n for course in current_courses:\n current_times.append(db[course])\n\n layout = []\n\n i = 0\n for course in current_courses:\n layout.append([sg.Text(course), sg.Text(str(current_times[i]))])\n i += 1\n\n layout.append([sg.Button(\"Close\", key=\"close\")])\n\n window = sg.Window(\"Window 1\", layout)\n\n while True:\n event, values = window.read()\n if event == None or event == \"close\":\n break\n\n window.close()", "title": "" }, { "docid": "90dfb0628baba5b2ab2f924331f2b424", "score": "0.46561182", "text": "def select_frame(self):\n self.time_suppress()\n \n #select a decision making window\n bbox_slice = self.bbox_buffer[-self.timeWindow_selectFrame:] # returns a slice of size of window\n frame_slice = self.frame_buffer[-self.timeWindow_selectFrame:]\n \n\n if self.is_shovel_idle(frame_slice):\n return None, None, None, None\n\n\n # main logic of frame selection\n else: \n selection_scores = self.compute_selection_scores(bbox_slice)\n\n sorted_inds, numberOfNoneScores = self.argsort(selection_scores)\n\n\n numberOfValidScores = len(sorted_inds) - numberOfNoneScores\n \n if numberOfValidScores < self.minNumber_validScoresToSlectFrame:\n return None, None, None, None\n \n\n\n #Pick the highest Score\n ind_selected_max = sorted_inds[-1]\n \n #Pick the 90thPercentile Score\n ninetieth_percentile_ind = int(round(numberOfValidScores * 0.9)) + numberOfNoneScores - 1\n ind_selected = sorted_inds[ninetieth_percentile_ind]\n\n\n\n bbox_selected = bbox_slice[ind_selected]\n frame_selected = frame_slice[ind_selected]\n self.frames_selected.append(frame_selected)\n \n frame_selected_max = frame_slice[ind_selected_max]\n\n return frame_selected, bbox_selected, ind_selected, ind_selected_max, frame_selected_max", "title": "" }, { "docid": "87c07eb6112eaa14d0f94d06e36980c4", "score": "0.4650582", "text": "def _onClickSetRange(self) -> None:\n\n valueRange, okClicked = SetValueRangeDialog.getValueRange(self, initialValue=self.value())\n if okClicked:\n self.setValue(valueRange)\n self.valueChangedByUser[Multivalue].emit(valueRange)", "title": "" }, { "docid": "03135e385f22bfeae854b67127e14613", "score": "0.46481696", "text": "def __init__(self):\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\", \"--path-to-frames\", help=\"The absolute path to the frames. For example: /home/USER/.icons/status_icons/my_frames/ The frames must follow this naming convention: frame-<NUMBER>.<FORMAT>\\nWhere <NUMBER> starts at 0 and is incremented by 1 for the next frame and so on. <FORMAT> will default to png if no format is supplied\\nFor example a 3 frame PNG sequence should have the filenames: frame-0.png, frame-1.png, frame-2.png\")\n parser.add_argument(\"-f\", \"--filetype\", help=\"Specify the image format for the frames. Valid options: png, jpg, jpeg. Default is png.\")\n parser.add_argument(\"-t\", \"--time-per-frame\", help=\"Specify the time for each frame to be shown in milliseconds. Default is 300ms.\", type=int)\n parser.add_argument(\"-b\", \"--blank-icon\", help=\"Creates a blank icon in the system tray to act as a spacer.\", action=\"store_true\")\n args = parser.parse_args()\n\n if args.blank_icon: # icon is already blank\n return\n\n if args.path_to_frames == None:\n print(\"Absolute path to the frames must be specified with the -p option.\\nAborting...\")\n exit(-1)\n\n if args.filetype == \"jpg\":\n self.frame_format = \".\" + args.filetype\n elif args.filetype == \"jpeg\":\n self.frame_format = \".\" + args.filetype\n \n self.num_of_frames = len(fnmatch.filter(os.listdir(args.path_to_frames), self.frame_filename_prefix + \"*\" + self.frame_format)) # count number of frames in the supplied directory\n if self.num_of_frames == 0:\n print(\"Error: No suitable frames found in \" + args.path_to_frames + \"\\nAborting...\")\n exit(-1)\n\n if args.time_per_frame:\n self.frame_time = args.time_per_frame\n if self.frame_time < 10:\n print(\"Warning: A low time per frame value will lead to high CPU usage.\")\n\n frame_path_prefix = args.path_to_frames + self.frame_filename_prefix\n for i in range(0, self.num_of_frames + 1): # setup correct path names\n self.frame_paths.append(frame_path_prefix + str(i) + self.frame_format)\n\n if self.num_of_frames == 1: # we don't need callbacks if only one frame has been supplied so just set icon and return\n self.icon.set_from_file(self.frame_paths[0])\n return\n\n gobject.timeout_add(self.frame_time, self.update_frame) # callback from gtk.main()\n return", "title": "" }, { "docid": "188219dc773415de1076a762fa4d3b3c", "score": "0.46479467", "text": "def main_display():\n #Display:\n print '################################################################'\n print '# XNATUPLOAD #'\n print '# #'\n print '# Developed by the masiLab Vanderbilt University, TN, USA. #'\n print '# If issues, please start a thread here: #'\n print '# https://groups.google.com/forum/#!forum/vuiis-cci #'\n print '# Usage: #'\n print '# Upload data to XNAT following the csv file information #'\n print '# Parameters : #'\n if vars(OPTIONS) == DEFAULT_ARGUMENTS:\n print '# No Arguments given #'\n print '# See the help bellow or use \"Xnatupload -h\" #'\n print '################################################################'\n PARSER.print_help()\n sys.exit()\n else:\n if OPTIONS.host:\n print '# %*s -> %*s#' %(-20, 'XNAT Host', -33, get_proper_str(OPTIONS.host))\n if OPTIONS.username:\n print '# %*s -> %*s#' %(-20, 'XNAT User', -33, get_proper_str(OPTIONS.username))\n if OPTIONS.printmodality:\n print '# %*s -> %*s#' %(-20, 'Print Modality', -33, 'on')\n else:\n if OPTIONS.csvfile:\n print '# %*s -> %*s#' %(-20, 'CSV file', -33, get_proper_str(OPTIONS.csvfile))\n if OPTIONS.session_type:\n print '# %*s -> %*s#' %(-20, 'Session Type', -33, get_proper_str(OPTIONS.session_type))\n if OPTIONS.report:\n print '# %*s -> %*s#' %(-20, 'Report', -33, 'on')\n if OPTIONS.force:\n print '# %*s -> %*s#' %(-20, 'Force Upload', -33, 'on')\n if OPTIONS.delete:\n print '# %*s -> %*s#' %(-20, 'Delete resources', -33, 'on')\n if OPTIONS.deleteAll:\n print '# %*s -> %*s#' %(-20, 'Delete All', -33, 'on')\n if OPTIONS.extract:\n print '# %*s -> %*s#' %(-20, 'Extract ZIP', -33, 'on')\n if OPTIONS.outputfile:\n print '# %*s -> %*s#' %(-20, 'Output file', -33, get_proper_str(OPTIONS.outputfile))\n print '################################################################'\n print \"IMPORTANT WARNING FOR ALL USERS ABOUT XNAT:\"\n print \" session_label needs to be unique for each session.\"\n print \" Two subjects can NOT have the same session_label\"\n print '================================================================'", "title": "" }, { "docid": "0cd70927af0b840bf67eede36625f40d", "score": "0.46462128", "text": "def scanner_display_scanpoints(self):\n # Get local arguments\n points_label = self.get_obj('scanner_scan_points_label')\n rows_label = self.get_obj('scanner_scan_rows_label')\n time_label = self.get_obj('scanner_scan_time_label')\n\n points_label.set_markup(self.format_dro_string('Points: {0}'.format(self.scanner.scene.points_count),11))\n rows_label.set_markup(self.format_dro_string('Rows: {0} , Columns: {1}'.format(self.scanner.scene.rows,self.scanner.scene.columns),11))\n time_data=datetime.timedelta(seconds=round(self.scanner.scene.estimated_time))\n time_label.set_markup(self.format_dro_string('Estimated Time: {0}'.format(time_data),11))", "title": "" }, { "docid": "19a62117bbd3045e503ebd340b734182", "score": "0.46367753", "text": "def confirmButtonClicked(self):\n\n self.showPopupUnmarkedPaperInput()\n\n self.problemAmount = int(self.problemNumInput.text()) # 시험의 문제 갯수\n self.testpaperAmount = int(self.paperNumInput.text()) # 한 시험지 세트의 총 페이지 수\n self.gradeWithOCR = self.check_useOCR.isChecked() # OCR로 주관식 채점 여부\n\n fileLocs = []\n while True:\n fname = QFileDialog.getOpenFileName() # 비 마킹 시험지들의 파일 읽기\n if fname[0] != '': # 아직 읽을 파일이 들어온 경우\n fileLocs.append(fname[0])\n else: # 읽을 파일이 더 없는 경우 - 루프 종료\n break\n\n self.showPopupEdgeInstruction_1()\n\n counter = 0 # 임시 변수\n for imageLoc in fileLocs: # 각 시험지 이미지마다\n\n src = cv2.imread(imageLoc, cv2.IMREAD_COLOR)\n height = src.shape[0] # 시험지 이미지 높이\n width = src.shape[1] # 시험지 이미지 너비\n\n # 시험지가 너무 커 처리가 힘든 경우, 리사이징\n if height >= width:\n resizeScale = 1000 / height\n else:\n resizeScale = 1000 / width\n src = cv2.resize(src, (int(width * resizeScale), int(height * resizeScale)), interpolation=cv2.INTER_AREA)\n\n print(\"Changed dimensions : \", src.shape)\n\n height, width, channel = src.shape\n\n cv2.imshow(\"Automatic Scoring Program\", src)\n cv2.setMouseCallback('Automatic Scoring Program', self.mouseCallbackSpot)\n\n print(\"Click 4 spot of the image, starting from left-upper side, clockwise\")\n print(\"After that, press any key\")\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n print(self.clickCoordinates)\n\n srcPoint = np.array(self.clickCoordinates, dtype=np.float32)\n self.clickCoordinates = []\n\n # 시험지의 각 4개 꼭짓점을 지정하고, warping 진행\n dstPoint = np.array([[0, 0], [width, 0], [width, height], [0, height]], dtype=np.float32)\n matrix = cv2.getPerspectiveTransform(srcPoint, dstPoint)\n warpedUnmarkedPaper = cv2.warpPerspective(src, matrix, (width, height))\n cv2.imshow(\"Automatic Scoring Program\", warpedUnmarkedPaper)\n cv2.waitKey(0)\n\n # 리사이징한 시험지 파일 저장\n cv2.imwrite('./buffer/unprocessedBlankPaper_{}.jpg'.format(counter), warpedUnmarkedPaper)\n\n # 마킹 안 된 시험지 Blur, 흑백화 등 이미지 정제\n # convert the images to grayscale\n unmarkedPaper = cv2.cvtColor(warpedUnmarkedPaper, cv2.COLOR_BGR2GRAY)\n\n # blur\n for i in range(10):\n unmarkedPaper = cv2.GaussianBlur(unmarkedPaper, (7, 7), 0)\n\n cv2.imwrite('./buffer/processedBlankPaper_{}.jpg'.format(counter), unmarkedPaper)\n\n cv2.destroyAllWindows()\n\n counter = counter + 1\n\n self.Form.hide()\n\n #문제 설정 창 띄우기\n self.problemSettingWindow = QtWidgets.QWidget()\n self.problemSettingWindowUI = UI_ProblemSetting()\n self.problemSettingWindowUI.setupUi(self.problemSettingWindow, [], self.problemAmount, self.testpaperAmount, self.gradeWithOCR)\n self.problemSettingWindow.show()", "title": "" }, { "docid": "d8174387bb1deea347479b087489dac6", "score": "0.4636768", "text": "def Pump_Durations_Plots():\n plt.clf()\n VALUES = []\n where_clause = (\n \"(FLOW_RATE is not NULL) AND \"\n \"(FLOW_RATE > 0) AND \"\n \"(FLOW_RATE <= 100) AND \"\n \"(DURATION is not NULL) AND \"\n \"(DURATION > 0) AND\"\n \"(DURATION <= 12) AND\"\n \"(START_MEAS is not NULL) AND \"\n \"(START_MEAS > 0) AND \"\n \"(PUMP_MEAS is not NULL) AND \"\n \"(PUMP_MEAS > 0)\"\n )\n with arcpy.da.SearchCursor(CWIPL, ['FLOW_RATE', 'DURATION', 'START_MEAS',\\\n 'PUMP_MEAS'], where_clause) as cursor:\n for row in cursor:\n down = row[3] - row[2]\n if down > 0 and down < 100 and row[3] <=200:\n stuff = [row[0], row[1], row[3], down]\n VALUES.append(stuff)\n DUR_DATA = [i[1] for i in VALUES]\n PUMP_DATA = [i[0] for i in VALUES]\n Water_Level = [i[2] for i in VALUES]\n Drawdown_data = [i[3] for i in VALUES]\n plt.figure(1)\n plt.hist(DUR_DATA, bins = 48, label = 'Duration')\n plt.xlim([0, 12])\n plt.ylim([0, 125000])\n plt.minorticks_on()\n plt.xticks(fontsize = 24)\n plt.yticks(fontsize = 24)\n plt.xlabel('Duration [hours]', fontsize = 30)\n plt.ylabel('Number of entries', fontsize = 30)\n plt.grid(True)\n \n plt.figure(2)\n plt.hist(PUMP_DATA, bins = 100, label = 'Pump Rata Data')\n plt.minorticks_on()\n plt.xlim([0, 100])\n plt.ylim([0, 60000])\n plt.xticks(fontsize = 24)\n plt.yticks(fontsize = 24)\n plt.xlabel('Pump Rate [GPM]', fontsize = 30)\n plt.ylabel('Number of entries', fontsize = 30)\n plt.grid(True)\n \n plt.figure(3)\n plt.hist(Water_Level, bins = 200, label = 'Static Level Data', edgecolor = 'k')\n plt.minorticks_on()\n plt.xticks(fontsize = 24)\n plt.yticks(fontsize = 24)\n plt.xlabel('Static Water Level [ft]', fontsize = 30)\n plt.ylabel('Number of entries', fontsize = 30)\n plt.grid(True)\n \n plt.figure(4)\n plt.hist(Drawdown_data, bins = 100, label = 'Drawdown Data', color = 'g' , edgecolor = 'k')\n plt.minorticks_on()\n# plt.xlim([0, 100])\n# plt.ylim([0, 100000])\n plt.xticks(fontsize = 24)\n plt.yticks(fontsize = 24)\n plt.xlabel('Drawdown [ft]', fontsize = 30)\n plt.ylabel('Number of entries', fontsize = 30)\n plt.grid(True)\n \n fig, ax = plt.subplots(1,1)\n plt.figure(5)\n plt.grid(which='minor', linestyle=':', linewidth='0.5', color='gray',\\\n zorder = 0)\n plt.grid(which='major', linestyle='-', linewidth='0.5', color='black',\\\n zorder = 0)\n plt.scatter(PUMP_DATA, DUR_DATA, zorder = 3)\n plt.xticks(fontsize = 24)\n plt.yticks(fontsize = 24)\n plt.minorticks_on()\n plt.xlim([0,101])\n plt.ylim([0,12.25])\n plt.xlabel('Pumping Rates [GPM]', fontsize = 30)\n plt.ylabel('Duration of Test [hours]', fontsize = 30)\n ax.xaxis.set_major_locator(MultipleLocator(20))\n ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))\n ax.xaxis.set_minor_locator(MultipleLocator(10))\n ax.yaxis.set_major_locator(MultipleLocator(1))\n #ax.axis.set_major_formatter(FormatStrFormatter('%d'))\n ax.yaxis.set_minor_locator(MultipleLocator(0.5))\n ax.set_axisbelow(True)", "title": "" }, { "docid": "4d5d7493607b700952d1cb63e7e3da5b", "score": "0.46324617", "text": "def show_frameLima(self, cont, chosenexp):\n result = True \n result2 = True\n global Momo # Create variable to store experiment object\n Momo = getobject(chosenexp)\n \n # Case of analyzing control\n if Momo.iscontrol: \n if Momo.exptype != \"0\": # Not first time analyzing\n result2 = tkMessageBox.askquestion(\"Warning\", \"This control experiment has already\\nbeen analyzed as %s.\\nChanges may overwrite exisiting data.\\nProceed anyways?\" % getpreviouslyanalyzed(Momo))\n if result2 != \"no\": # First time analyzing or want to override\n frame = self.frames[AnalysisTypeForNone] # Go to AnalysisTypeForNone to get type to analyze as\n for button in [frame.thermobutton, frame.chemobutton, frame.photobutton]:\n if getpreviouslyanalyzed(Momo) == button['text']: # Turn on button if that was the previous analysis\n button.state([\"focus\",\"selected\"])\n else:\n button.state([\"!focus\",'!selected'])\n frame.tkraise()\n\n # Case of not control\n else: \n if Momo.expy[0] != \"\": # Already analyzed\n result = tkMessageBox.askquestion(\"Warning\", \"The selected experiment has already been analyzed.\\nChanges may overwrite exisiting data.\\nProceed anyways?\")\n if result != \"no\": # First time analyzing or want to override\n if Momo.exptype != \"4\": # Not strunching\n frame = cont(self.container, self)\n self.frames[cont] = frame \n frame.grid(row=0, column=0, sticky=\"nsew\")\n frame.ChangePic(1) # Go to first picture\n frame.tkraise() \n else:\n tkMessageBox.showwarning(\"Scrunching analysis not implimented yet\") #show warning\n \"\"\"\n frame = SrunchingAnalysis(self.container, self) # Create fresh page in case of old data\n self.frames[cont] = frame \n frame.grid(row=0, column=0, sticky=\"nsew\")\n frame.tkraise() \n \"\"\"", "title": "" }, { "docid": "0bcf36119a5c06c0b937ddd0324ffa48", "score": "0.4628606", "text": "def apply_new_ranges(self, event=None):\n msg = (\"(Main) hit Enter/Return in Max V ({}) or Max I ({}) entry\"\n .format(self.v_range.get(), self.i_range.get()))\n log_user_action(self.ivs2.logger, msg)\n\n if self.current_run_displayed or self.results_wiz:\n run_dir = self.ivs2.hdd_output_dir\n config_dir = os.path.dirname(self.config.cfg_filename)\n\n # Replace config from saved config of displayed image\n cfg_file, original_cfg_file = self.swap_config(run_dir, config_dir)\n\n # Update IVS2 properties and the config options\n try:\n self.ivs2.plot_max_x = float(self.v_range.get())\n self.ivs2.plot_max_y = float(self.i_range.get())\n self.config.cfg_set(\"Plotting\", \"plot max x\", self.ivs2.plot_max_x)\n self.config.cfg_set(\"Plotting\", \"plot max y\", self.ivs2.plot_max_y)\n event.widget.tk_focusNext().focus() # move focus out\n except ValueError:\n # Silently reject invalid values\n pass\n self.update_axis_ranges()\n self.update_idletasks()\n\n if self.current_run_displayed or self.results_wiz:\n if self.axes_locked.get() == \"Unlock\":\n # Unlock the axes (redisplays image and saves config)\n self.unlock_axes()\n else:\n # Redisplay the image with the new settings (saves config)\n self.redisplay_img(reprocess_adc=False)\n\n # Restore the config file from the snapshot\n self.restore_config(run_dir, config_dir,\n cfg_file, original_cfg_file)", "title": "" }, { "docid": "9c2455e10a2ec4fdd7e7f5506b8cfbe3", "score": "0.462317", "text": "def show_frameStingray(self, cont, obj):\n saveobject(obj)\n tkMessageBox.showwarning(\"Done\", \"Data has been saved for:\" + obj.expnumber) #show warning\n frame = self.frames[cont]\n frame.tkraise() #raise to front", "title": "" }, { "docid": "cfc75b83f7eeae89ed3216a9f8d9055c", "score": "0.46208972", "text": "def set_window(self, x_start, y_start, x_end, y_end):\r\n if x_start > self.dis_column or \\\r\n x_end > self.dis_column or \\\r\n y_start > self.dis_line or \\\r\n y_end > self.dis_line:\r\n raise ValueError(\"Window setting is Out of display range.\")\r\n\r\n reg_list = [\r\n 0x15, x_start, x_end - 1,\r\n 0x75, y_start, y_end - 1,\r\n ]\r\n self.write_reg_list(reg_list)", "title": "" }, { "docid": "c7189447a9197e79ba2e331c5709d595", "score": "0.46200073", "text": "def draw_match(self, frame, max_val, thresh_max, number, articlename):\n\n startx_coord = self.startx_coord[number]\n starty_coord = self.starty_coord[number]\n endx_coord = self.endx_coord[number]\n endy_coord = self.endy_coord[number]\n\n\n max_of_all = max_val[0]\n index_of_max = 0\n iterator = 0\n is_drawn = False\n\n for i in max_val:\n if max_of_all < i:\n max_of_all = i\n index_of_max = iterator\n iterator += 1\n\n if max_of_all > thresh_max:\n is_drawn = True\n\n cv2.rectangle(frame, (startx_coord[index_of_max], starty_coord[index_of_max]), (endx_coord[\n index_of_max], endy_coord[index_of_max]), (0, 0, 255), 2)\n cv2.putText(frame, articlename, (startx_coord[index_of_max], starty_coord[index_of_max] - 3),\n Config.font, 0.9, (0, 0, 255), 1, cv2.LINE_AA)\n\n Config.fps.stop()\n cv2.putText(frame, \"Elapsed time: {:.2f}\".format(Config.fps.elapsed()), Config.position_elapsed,\n Config.font, Config.fontScale, Config.fontColor, Config.lineType)\n cv2.putText(frame, \"FPS: {:.2f}\".format(Config.fps.fps()),\n Config.position_fps, Config.font, Config.fontScale,\n Config.fontColor, Config.lineType)\n\n cv2.imshow(\"Result\", frame)\n print(\"\\rFPS: {:.2f}\".format(Config.fps.fps()), end='\\r')\n return is_drawn, index_of_max, frame", "title": "" }, { "docid": "82d6f056853f945a47a4c18d7facf051", "score": "0.46178433", "text": "def ui(dockable):\n\n # get settings\n counter = SETTINGS.get('counter')\n if counter == None:\n counter = '1'\n SETTINGS.add('counter', counter)\n\n padding = SETTINGS.get('padding')\n if padding == None:\n padding = '2'\n SETTINGS.add('padding', padding)\n\n step = SETTINGS.get('step')\n if step == None:\n step = '1'\n SETTINGS.add('step', step)\n\n name = SETTINGS.get('name')\n if name == None:\n name = 'example_[C]_GEP'\n SETTINGS.add('name', name)\n\n namePreview = SETTINGS.get('namePreview')\n if namePreview == None:\n namePreview = ''\n SETTINGS.add('namePreview', namePreview)\n\n searchfor = SETTINGS.get('searchfor')\n if searchfor == None:\n searchfor = ''\n SETTINGS.add('searchfor', searchfor)\n\n replaceby = SETTINGS.get('replaceby')\n if replaceby == None:\n replaceby = ''\n SETTINGS.add('replaceby', replaceby)\n\n asset = SETTINGS.get('asset')\n if asset == None:\n asset = ''\n SETTINGS.add('asset', asset)\n\n desc = SETTINGS.get('desc')\n if desc == None:\n desc = ''\n SETTINGS.add('desc', desc)\n\n category = SETTINGS.get('category')\n if category == None:\n category = ''\n SETTINGS.add('category', category)\n\n # create ui\n try:\n cmds.deleteUI(WINDOWNAME, window=True)\n except:\n pass\n try:\n cmds.deleteUI(CONTROLNAME, control=True)\n except:\n pass\n\n cmds.window(WINDOWNAME, t='Batch rename selection')\n form = cmds.formLayout(parent=WINDOWNAME)\n\n # rename with counter frame layout\n frameRename = cmds.frameLayout('rename_frameLayoutRename',\n label='Rename',\n cll=True,\n cl=True,\n bv=True)\n\n separator = cmds.separator('rename_separator', style='in')\n coubterInput = cmds.textFieldGrp('rename_counter', editable=True, l='Counter start', text=counter, fcc=True, cc=parseInputText, tcc=parseInputText)\n stepInput = cmds.textFieldGrp('rename_step', editable=True, l='Step', text=step, fcc=True, cc=parseInputText, tcc=parseInputText)\n paddingInput = cmds.textFieldGrp('rename_padding', editable=True, l='Padding', text=padding, fcc=True, cc=parseInputText, tcc=parseInputText)\n\n inputText = cmds.textFieldGrp('rename_nameField', editable=True, l='New name', text=name, fcc=True,cc=parseInputText, tcc=parseInputText)\n inputText = cmds.textFieldGrp('rename_namePreview', editable=False, l='Preview', text='')\n\n renameButton = cmds.button('rename_button', l='Rename', command=renameItems)\n cmds.setParent('..')\n\n # Search and replace frame layout\n frameReplace = cmds.frameLayout('rename_frameLayoutReplace',\n label='Search & Replace',\n cll=True,\n cl=True,\n bv=True)\n separator = cmds.separator('rename_separator', style='in') \n searchInput = cmds.textFieldGrp('rename_search', editable=True, l='Search for', text=searchfor)\n replaceInput = cmds.textFieldGrp('rename_replace', editable=True, l='Replace by', text=replaceby)\n \n renameButton = cmds.button('rename_replacebutton', l='Search and Replace', command=searchReplace)\n cmds.setParent('..')\n\n # add attributes frame layout\n frameAttr = cmds.frameLayout('rename_frameLayoutAttr',\n label='Add attributes',\n cll=True,\n cl=True,\n bv=True)\n separator = cmds.separator('rename_separator', style='in')\n\n assetField = cmds.textFieldGrp('rename_asset', editable=True, l='asset', text=asset)\n descField = cmds.textFieldGrp('rename_desc', editable=True, l='desc', text=desc)\n categoryField = cmds.textFieldGrp('rename_category', editable=True, l='category', text=category)\n \n addAttrButton = cmds.button('rename_setattrbutton', l='Add attributes', command=setArnoldAttr)\n removeAttrButton = cmds.button('rename_removeattrbutton', l='Remove attributes', command=removeArnoldAttr)\n cmds.setParent('..')\n\n # frame layout containing selection items\n frameSelection = cmds.frameLayout('selecter_frameLayoutSelection',\n label='Item Selecter',\n cll=True,\n cl=True,\n bv=True)\n #outPane = cmds.scrollField('selecter_output', editable=False, wordWrap=False, text='')\n outPane = cmds.textScrollList('selecter_output', append=[], sii=True, ams=True, sc=selectItem)\n inputText = cmds.textFieldGrp('selecter_nameField', editable=True, l='Filter:', text='', fcc=True, cc=parseFilterSelection, tcc=parseFilterSelection)\n nodeCheckbox = cmds.checkBoxGrp('selecter_dagcheck', numberOfCheckBoxes=3, labelArray3=['Dag objects', 'Transforms', 'Shapes'], cc=parseFilterSelection)\n typeCheckbox = cmds.checkBoxGrp('selecter_typecheck', numberOfCheckBoxes=3, labelArray3=['Meshes', 'Cameras', 'Lights'], cc=parseFilterSelection)\n\n cmds.setParent('..')\n\n separatorBottom = cmds.separator('rename_separatorBottom', style='in')\n closeButton = cmds.button('rename_close', l='Close', command=closeUI)\n \n cmds.formLayout(form, e=True,\n attachControl=[(frameReplace, 'top', 5, frameRename),\n (frameAttr, 'top', 5, frameReplace),\n (frameSelection, 'top', 5, frameAttr),\n (frameSelection, 'bottom', 5, separatorBottom),\n (separatorBottom, 'bottom', 5, closeButton),\n ])\n\n cmds.formLayout(form, e=True,\n attachForm=[(frameRename, 'right', 5),\n (frameRename, 'left', 5),\n (frameRename, 'top', 5),\n (frameReplace, 'left', 5),\n (frameReplace, 'right', 5),\n (frameAttr, 'left', 5),\n (frameAttr, 'right', 5),\n (frameSelection, 'left', 5),\n (frameSelection, 'right', 5),\n (separatorBottom, 'left', 5),\n (separatorBottom, 'right', 5),\n (closeButton, 'left', 5),\n (closeButton, 'right', 5),\n (closeButton, 'bottom', 5),\n ])\n \n \n parseInputText()\n parseFilterSelection()\n \n if dockable:\n cmds.dockControl(CONTROLNAME, label='Batch rename', floating=True, area='right', content=WINDOWNAME)\n else:\n cmds.showWindow(WINDOWNAME)", "title": "" }, { "docid": "9f971213fb0678d439f85c2a326f194c", "score": "0.46166497", "text": "def gui_validate_landscape(self):\n page_range = self.strvar_landscape_pages.get().strip()\n if len(page_range) > 0 and not tools.check_page_nums(page_range):\n self.__remove_command_argument(cst.LANDSCAPE_ARG_NAME)\n self.strvar_landscape_pages.set(\"\")\n messagebox.showerror(\n message=\"Invalide `Output in Landscape` Page Argument!\"\n )\n return False\n\n if self.is_landscape_checked.get():\n arg = cst.LANDSCAPE_ARG_NAME\n if len(page_range) > 0:\n arg += page_range # no space between -ls and page numbers\n self.__add_or_update_command_argument(cst.LANDSCAPE_ARG_NAME, arg)\n else:\n self.__remove_command_argument(cst.LANDSCAPE_ARG_NAME)\n\n return True", "title": "" }, { "docid": "77414f661b65f8cd21940fbbe71a6f30", "score": "0.4610267", "text": "def show_video(video_path, labels: List[int], size: Optional[Tuple[int, int]] = None) -> None:\n visible_color: Final[Tuple[int, int, int]] = (0, 255, 0)\n non_visible_color: Final[Tuple[int, int, int]] = (0, 0, 255)\n\n cap = cv2.VideoCapture(video_path)\n video_length: Final[int] = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n frame_nb = 0\n while frame_nb < video_length:\n\n frame_ok, frame = cap.read()\n if not frame_ok:\n break\n\n # Crop, uncomment and change the values if needed.\n # if \"video-1\" in video_path:\n # frame = frame[900:]\n\n if size:\n frame = cv2.resize(frame, size, interpolation=cv2.INTER_AREA)\n\n frame = cv2.copyMakeBorder(frame, 40, 0, 0, 0, cv2.BORDER_CONSTANT, None, 0)\n defect_text = f\"The defect is: {os.path.normpath(video_path).split(os.sep)[-4]}\"\n frame_text = f\" - Frame {frame_nb} / {video_length}\"\n frame = cv2.putText(frame, defect_text + frame_text, (20, 25),\n cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, cv2.LINE_AA)\n\n frame = cv2.putText(frame, f\"Status: defect {'visible' if labels[frame_nb] != 0 else 'non-visible'}\",\n (frame.shape[1]-300, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, cv2.LINE_AA)\n frame = cv2.circle(frame, (frame.shape[1]-100, 20), 15,\n visible_color if labels[frame_nb] != 0 else non_visible_color, -1)\n\n while True:\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(10)\n if key == 32: # Space key, next frame\n break\n elif key == ord(\"q\"): # quit\n cap.release()\n cv2.destroyAllWindows()\n exit()\n\n frame_nb += 1\n\n cv2.destroyAllWindows()\n cap.release()", "title": "" }, { "docid": "1e0c19d8925ce014c7ca0285e94852b5", "score": "0.4608936", "text": "def show_export_dataset_dialog(self):\n self.export_dataset_dialog.update()\n self.export_dataset_dialog.show()", "title": "" }, { "docid": "833cb42e92332347016e2bc71419caee", "score": "0.46062315", "text": "def outputFormatOptionsMenu(self):\n outputFormatOptions = self.settingsSudoku.\\\n getSudokuOutputFormatOptions()\n self.drawMenuOptions(\"Change output type format\",\n outputFormatOptions)\n optionSelected = self.getUserInput()\n while not self.validateUserInput(optionSelected,\n len(outputFormatOptions)):\n print(\"*****The option selected is not valid****\")\n optionSelected = self.getUserInput()\n if(optionSelected == 1):\n print(\"Console\")\n self.settingsSudoku.setSudokuOutputFormat(\"Console\")\n elif (optionSelected == 2): \n print(\"File\")\n self.settingsSudoku.setSudokuOutputFormat(\"File\")\n userInputNameFile = raw_input(\\\n \"Enter a name for the outuput file: \")\n userInputPathFile = raw_input(\\\n \"Enter the specific path to store the file: \")", "title": "" }, { "docid": "023bfe849dfaed2ef3bbc3985ab58b5d", "score": "0.45980275", "text": "def display_and_update(self):\n sg.theme(\"Reddit\")\n window = sg.Window(\"Mathcad Automation\", [\n [sg.Image(data = images.tt_logo)],\n [sg.Text(f'Version: {self.version}', size = (10,1))], \n [sg.Frame(\"Choose Excel File*\", [[sg.FileBrowse(\"1. Browse\", key=\"excel_file\", enable_events=True),\n sg.InputText(self.excel, key=\"excel_name\", size=(30, 1),\n background_color='white', enable_events=True),],\n\n [sg.Button(\"2. Choose Templates\", key = \"choose_templates\", enable_events = True)],\n ])],\n [sg.Frame(\"Choose Database File (Optional)\", [[sg.FileBrowse(key=\"database_file\", enable_events=True),\n sg.InputText(self.database, key=\"database_name\", size=(30, 1),\n background_color='white', enable_events=True)], ])],\n [sg.Checkbox(\"Save to database?\", key=\"save_to_database\", default=self.save_to_database,\n tooltip=\"If selected, details about the generated report will be saved to a database.\")],\n\n [sg.Button(\"3. Continue\", key=\"continue\", button_color=\"green\")],\n\n ], icon=images.ma_logo_png)\n \"\"\"Listen for events\"\"\"\n while True: #logic loop\n event, values = window.read()\n if event == 'OK' or event == sg.WIN_CLOSED:\n window.close()\n return False\n else:\n if event == \"choose_templates\":\n \"\"\"\n load the excel file and search for different mounting locations\n \"\"\"\n if helpers.check_file_type(values['excel_name'], 'xlsx'):\n #get the mounting locations\n equipment = filestream.get_eqpt_from_xl(values['excel_name'])\n if len(equipment.items) > 0:\n self.excel = values['excel_name']\n self.template_layout = []\n for idx, item in enumerate(equipment.mounting_locations):\n if item not in self.templates:\n self.templates[item] = \"\" # instanitate each templates to be empty string\n #create the layout for the choose templates window\n self.template_layout += [\n [sg.Text(\"Choose \" + item + \" mounting template:\")],\n [sg.FileBrowse(key= item, enable_events=True),\n sg.InputText(self.templates[item], key=item + \"_name\",\n size=(60, 1), background_color='white', enable_events=True)],\n ]\n if self.choose_templates():\n self.can_continue = True\n\n\n if event == \"continue\": # user has input all information\n # list of all the user errors when they input information\n errors = list()\n # validate the filepaths\n self.save_to_database = values['save_to_database']\n #check if the database is a .csv file - if not, append error list\n if helpers.check_file_type(values['database_name'], 'csv') or values['database_name'] == \"\":\n self.database = values['database_name']\n else:\n errors.append(\"Database file must be a .csv file.\")\n #check if excel file is .xlsx\n if helpers.check_file_type(values['excel_name'], 'xlsx'):\n self.excel = values['excel_name']\n else:\n errors.append(\"Excel file must be a .xlsx file.\")\n # Error handling\n if len(errors) == 0 and self.can_continue: #no errors\n window.close()\n return True\n elif self.can_continue == False:\n alert = Popup(\"Error\", \"You must choose template files.\")\n alert.alert()\n continue\n else: #show errors in a popup window\n alert = Popup(\"Errors\", \"\\n\".join(errors))\n alert.alert()\n continue", "title": "" }, { "docid": "4b440491ff0b950033118bb7f66f7f8b", "score": "0.459581", "text": "def do_range(self, args, calibration=False):\n cmds = args.split()\n if len(cmds) < 4:\n print(\"Error: not enough args!\")\n return\n self.qa_analyzer.settings.CDSP_range = True\n self.qa_analyzer.settings.CDSP_range_param['CD1'] = int(cmds[0].strip())\n self.qa_analyzer.settings.CDSP_range_param['CD2'] = int(cmds[1].strip())\n self.qa_analyzer.settings.CDSP_range_param['SP1'] = int(cmds[2].strip())\n self.qa_analyzer.settings.CDSP_range_param['SP2'] = int(cmds[3].strip())\n if len(cmds) > 4:\n self.qa_analyzer.settings.CDSP_range_param['wext'] = int(cmds[4].strip())\n else:\n self.qa_analyzer.settings.CDSP_range_param['wext'] = None\n self.qa_analyzer.run(calibration)", "title": "" }, { "docid": "6f0984e29706e01f77507efa808c2680", "score": "0.45948943", "text": "def onCaptureButton(self):\n inputBrowserNode = self.ui.inputSequenceBrowserSelector.currentNode()\n inputImage = self.ui.inputVolumeSelector.currentNode()\n outputBrowserNode = self.ui.segmentationBrowserSelector.currentNode()\n selectedSegmentation = self.ui.inputSegmentationSelector.currentNode()\n numSkip = slicer.modules.singleslicesegmentation.widgetRepresentation().self().ui.skipImagesSpinBox.value\n\n if inputBrowserNode is None:\n logging.error(\"No browser node selected!\")\n return\n if selectedSegmentation is None:\n logging.error(\"No segmentation selected!\")\n return\n if outputBrowserNode is None:\n logging.error(\"No segmentation sequence browser selected!\")\n return\n\n original_index_str = selectedSegmentation.GetAttribute(self.ORIGINAL_IMAGE_INDEX)\n\n # If input sequence browser is selected in the toolbar, always consider this a new segmentation. This is needed in case\n # a scene was loaded with segmentation attribute ORIGINAL_IMAGE_INDEX not None.\n\n activeBrowserNode = slicer.modules.sequences.toolBar().activeBrowserNode()\n if activeBrowserNode == inputBrowserNode:\n original_index_str = None\n\n if original_index_str is None or original_index_str == \"None\" or original_index_str == \"\": # new segmentation\n inputImageIndex = inputBrowserNode.GetSelectedItemNumber()\n selectedSegmentation.SetAttribute(self.ORIGINAL_IMAGE_INDEX, str(inputImageIndex))\n self.logic.captureSlice(outputBrowserNode, selectedSegmentation, inputImage)\n self.logic.eraseCurrentSegmentation(selectedSegmentation)\n selectedSegmentation.SetAttribute(self.ORIGINAL_IMAGE_INDEX, \"None\")\n currentItemNum = inputBrowserNode.GetSelectedItemNumber()\n newItemNum = inputBrowserNode.SelectNextItem(numSkip)\n else: # overwrite segmentation\n self.logic.captureSlice(outputBrowserNode, selectedSegmentation, inputImage)\n currentItemNum = outputBrowserNode.GetSelectedItemNumber()\n newItemNum = outputBrowserNode.SelectNextItem()\n\n # Check if sequence browser wrapped around. If yes, pop up message box to ask if user wants to continue.\n\n if newItemNum < currentItemNum:\n logging.debug(\"Sequence wrapped around!\")\n\n msgBox = qt.QMessageBox()\n msgBox.setText(\"Sequence wrapped around!\")\n msgBox.setInformativeText(\"Please save the scene before closing the application!\")\n msgBox.setStandardButtons(qt.QMessageBox.Ok)\n msgBox.setDefaultButton(qt.QMessageBox.Ok)\n msgBox.exec_()", "title": "" }, { "docid": "111694a2830db1e8913b2f4c4e782ecf", "score": "0.4589033", "text": "def show_frameFish(self, cont):\n frame = self.frames[cont]\n camera.start_preview(fullscreen=False, window=(0,appheight/4,appwidth,appheight/2)) #this line starts the preview. \n frame.tkraise()", "title": "" }, { "docid": "74cdd61fc0cfaef06c4e9bf753dbd3fb", "score": "0.45877063", "text": "def image_analysis(self): # connected to image analysis button (btn_images)\n\n self.start = int(self.meanStartEdit.text())\n\n if self.roi == None and self.JPG == False :\n self.mean = self.data[self.start,:,:]\n self.is_image = True\n else:\n self.ROI_no_mean_images()\n\n # change the name and color of the buttos to notice the way you choose\n self.btn7.setText(\"Export Intensities from frame={}\".format(self.start))\n self.btn7.setStyleSheet(\n \"QPushButton { background-color: rgb(200, 200, 10); }\")\n self.meanEndEdit.setStyleSheet(\" background-color: red; \")", "title": "" } ]
b98d9695c5f2e060e667f6137c06d0dd
isFinished(ALInterpolationArticular3D self, float const & time) > bool
[ { "docid": "0e7f4cffb3df33943aaf7f315f84553f", "score": "0.875447", "text": "def isFinished(self, time):\n return _almathinternal.ALInterpolationArticular3D_isFinished(self, time)", "title": "" } ]
[ { "docid": "84e91adf599c004b342da77fe818c9b4", "score": "0.78961587", "text": "def isFinished(self, time):\n return _almathinternal.ALInterpolationArticular_isFinished(self, time)", "title": "" }, { "docid": "839e708c13e4da3ccd16b2b6bb407622", "score": "0.70739007", "text": "def isFinished(self):\n return _almathinternal.InterpolationTrapezoidSmooth_isFinished(self)", "title": "" }, { "docid": "c81835376ae7064583096ff8eea58b85", "score": "0.6948943", "text": "def isFinished(self):\n return _almathinternal.ALInterpolationBezier_isFinished(self)", "title": "" }, { "docid": "d20df1f69b909c6179a5881cae79fb3f", "score": "0.64873034", "text": "def isFinished(self, pTime):\n return _almathinternal.ALInterpolationQuinticSpline_isFinished(self, pTime)", "title": "" }, { "docid": "feb20b50b752124b26edbb62692ff5ff", "score": "0.61669093", "text": "def has_finished(self, action):\n\n torso_height, torso_ang = self.sim.data.qpos[1:3]\n return not (torso_height > 0.4 and torso_height < 2.1 and\n torso_ang > -1.0 and torso_ang < 1.0)", "title": "" }, { "docid": "ba6b43f0a6f3fb8584f208d8f7a0cd4f", "score": "0.5836551", "text": "def _isFinished(self):\n raise NotImplementedError", "title": "" }, { "docid": "59341ab71110ca9bd2e32442f6c74738", "score": "0.5825531", "text": "def isCompleted(self, dataLocation):\n return not self.isDefaulted(dataLocation) \\\n and dataLocation.isSurface()", "title": "" }, { "docid": "7b93fe7603f0d737e0a48f7805c548ae", "score": "0.58040255", "text": "def is_finished():\n pass", "title": "" }, { "docid": "81f46c5b9214b5586d9983d6ddaeb5eb", "score": "0.580163", "text": "def check_done(self, new_state, init_state, time_start=None):\n current_time = time.time()\n # + 0.1 because there are some noises\n if((new_state[1].position.z + 0.1) < init_state[1].position.z):\n return True\n elif (time_start is not None) and (current_time - time_start) > 180:\n print('time is up!(120s)')\n return True\n else:\n return False", "title": "" }, { "docid": "94b40fc32919fbee12abc459df616329", "score": "0.5667639", "text": "def isFinished(self):\n return (\n self.isPrepared() and\n self.isBaked() and\n self.isCut() and\n self.isBoxed())", "title": "" }, { "docid": "4a05c61bf81935f416de6e980b405709", "score": "0.56356305", "text": "def _done(observation: tuple) -> bool:\n angle, dist = observation[0], observation[1]\n return True if abs(angle) > np.deg2rad(15) or dist > 0.5 else False", "title": "" }, { "docid": "2258f98a1dda20c5adfdb9269a6fc637", "score": "0.56304795", "text": "def done(self, env):\n rot_quat = env.robot.GetBaseOrientation()\n rot_mat = env.pybullet_client.getMatrixFromQuaternion(rot_quat)\n return rot_mat[-1] < 0.85", "title": "" }, { "docid": "8f4b3ac1a4206cfc635bc45f27dd5e20", "score": "0.5608561", "text": "def isFinished(self): #$NON-NLS-1$\r", "title": "" }, { "docid": "542bb15ff0bee984ca51f210edb13e04", "score": "0.558418", "text": "def isFinished(self):\n # Get encoder count\n current = self.robot.drivetrain.get_encoder_value()\n # If abs(target - current) < threshold then return true\n return math.fabs(self._target_position - current) <= self._encoder_threshold or self.isTimedOut()", "title": "" }, { "docid": "b80316b148531a32a74b554a499f25f9", "score": "0.5571571", "text": "def IsDone(self, *args):\n return _IntRes2d.IntRes2d_Intersection_IsDone(self, *args)", "title": "" }, { "docid": "991b39cedd44525ab40c6ba905f3cd07", "score": "0.55124205", "text": "def done(self):\n return self.x == 31 and self.y == 39", "title": "" }, { "docid": "80e66cd671b593917b55c7fe453f4eb9", "score": "0.5493186", "text": "def isFinished(self):\n for x in range(8):\n for y in range(8):\n if self.__isEmpty([x, y]):\n return False\n return True", "title": "" }, { "docid": "3acc45ae1ca395eaaac92fa0d0d3169d", "score": "0.54439694", "text": "def _is_done(self, observations):\n current_eff_pos = observations['observation'][:3].tolist()\n\n done = self.check_if_done(\n self.movement_result, self.desired_position, current_eff_pos, self.threshold_error)\n return done", "title": "" }, { "docid": "a61eee27b90ab2b6ceaf5b59fbf245dd", "score": "0.54288363", "text": "def _check_cur_action_animaion_has_finished(self):\n return self.cur_action.finished()", "title": "" }, { "docid": "e73a6a4473b02d0544086279ca5898eb", "score": "0.54174805", "text": "def isFinished(self):\n\t\treturn False", "title": "" }, { "docid": "e4861cdd7e064516969140b7b1b1cb7b", "score": "0.540876", "text": "def _is_done(self):\n u, p = self.state\n logger.debug(\"u (actual): {}\".format(u))\n\n if u > p + self.p_diff_threshold:\n return True\n else:\n return False", "title": "" }, { "docid": "792972063b7b9d15142b93662ab4d0d6", "score": "0.5397395", "text": "def is_done(self):\n if 1 - self.fidelity() <= self.infidelity_threshold:\n return True\n return self.t >= self.T", "title": "" }, { "docid": "269fa8f82b4906b4704987c4536f51c9", "score": "0.53804135", "text": "def is_finished(self) -> bool:\n\n for i in range(self.rows):\n for j in range(self.cols):\n if self.boxes[i][j].value == 0:\n return False\n return True", "title": "" }, { "docid": "180430761a5183adb1fc3fb89457a962", "score": "0.5372404", "text": "def finished(self):\n return self.result is not None", "title": "" }, { "docid": "035773c9f3767bb25519de0cb2e076c5", "score": "0.53476006", "text": "def is_finished(self):\n return self.time_finished is not None", "title": "" }, { "docid": "d89debd0a7e0730fe3f6e61bf21f198f", "score": "0.5346567", "text": "def is_done(self, p):\n return (self.max_steps and self.steps >= self.max_steps) \\\n or p.x > self.containing_box.qx", "title": "" }, { "docid": "b260f27737a3989d6fd88ed9176f18db", "score": "0.5346386", "text": "def is_orbit_complete(self):\n return self.tsince > self.tend", "title": "" }, { "docid": "cf4563e3b99460b4f5a2d1f4fc90ecd0", "score": "0.5328927", "text": "def setIsFinished(self, pIsFinished):\n return _almathinternal.ALInterpolationArticular_setIsFinished(self, pIsFinished)", "title": "" }, { "docid": "c2fd119e9da452f8701147d9a04d863f", "score": "0.53274566", "text": "def is_finished(self):\n return len(self.get_next_game_states()) == 0", "title": "" }, { "docid": "ad6c5948c8f6828d0d782bfe6a51c81b", "score": "0.5311677", "text": "def animationComplete(self) -> bool:\n return all(self.animation_status)", "title": "" }, { "docid": "110dd01215a9ce776b8c06f958d4e768", "score": "0.5289392", "text": "def is_completed(self):\n return self.sample_count > self.max_sample", "title": "" }, { "docid": "110dd01215a9ce776b8c06f958d4e768", "score": "0.5289392", "text": "def is_completed(self):\n return self.sample_count > self.max_sample", "title": "" }, { "docid": "150d5a3a4bdc611990b44473436658f8", "score": "0.52757144", "text": "def isFinished(self):\n return False", "title": "" }, { "docid": "09112a194b439191331ca2803f47a19f", "score": "0.52754563", "text": "def has_finished(grid):\n\n if not get_cell_count(grid) and grid.generation > 0:\n return True\n\n return False", "title": "" }, { "docid": "2eaaab252bf5498d7d04898956249044", "score": "0.5274718", "text": "def is_finished(task, sent, step, unfin_idx):\n assert len(finalized[task][sent]) <= beam_size\n if len(finalized[task][sent]) == beam_size or step == max_len:\n return True\n return False", "title": "" }, { "docid": "1f5cc80b15767df21f6f1ef6e9a39d21", "score": "0.52622485", "text": "def isDone(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "1f5cc80b15767df21f6f1ef6e9a39d21", "score": "0.52622485", "text": "def isDone(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "1f5cc80b15767df21f6f1ef6e9a39d21", "score": "0.52622485", "text": "def isDone(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "1f5cc80b15767df21f6f1ef6e9a39d21", "score": "0.52622485", "text": "def isDone(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "1f5cc80b15767df21f6f1ef6e9a39d21", "score": "0.52622485", "text": "def isDone(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "1f5cc80b15767df21f6f1ef6e9a39d21", "score": "0.52622485", "text": "def isDone(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "1f5cc80b15767df21f6f1ef6e9a39d21", "score": "0.52622485", "text": "def isDone(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "1f5cc80b15767df21f6f1ef6e9a39d21", "score": "0.52622485", "text": "def isDone(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "1f5cc80b15767df21f6f1ef6e9a39d21", "score": "0.52622485", "text": "def isDone(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "1f5cc80b15767df21f6f1ef6e9a39d21", "score": "0.52599967", "text": "def isDone(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "1f5cc80b15767df21f6f1ef6e9a39d21", "score": "0.52599967", "text": "def isDone(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "1f5cc80b15767df21f6f1ef6e9a39d21", "score": "0.52599967", "text": "def isDone(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "8968d3c8f74f740bb70853f1b32bb05b", "score": "0.5257978", "text": "def has_finished(self):\n return self.itr >= self.max_itr", "title": "" }, { "docid": "a7e76bd7b14bc407dc955fa17f461734", "score": "0.52423304", "text": "def is_wave_finished(self):\n return self.curr_wave_time > max(self.waves[self.curr_wave].keys())", "title": "" }, { "docid": "4340ab024705eff390932c9f08a5c3da", "score": "0.5227636", "text": "def is_finished(self):\n return (self._caccount + self._sum_interest) >= 0.", "title": "" }, { "docid": "3ae8eb136f127873c48930397de5c373", "score": "0.52103645", "text": "def interval_completed(self) -> bool:\n return self.interval_frames >= self.sample_rate", "title": "" }, { "docid": "9483d7fe9eee38618a583a3dc63df9a2", "score": "0.520724", "text": "def wait_interpolation(self, controller_type=None, timeout=0):\n super(PR2TMPRobotInterface, self).wait_interpolation(\n controller_type, timeout)\n while not rospy.is_shutdown():\n self.update_robot_state(wait_until_update=True)\n if all(map(lambda j: j.name in self.ignore_joint_list or\n abs(j.joint_velocity) < 0.05\n if isinstance(j, RotationalJoint) else\n abs(j.joint_velocity) < 0.001,\n self.robot.joint_list)):\n break\n # TODO(Fix return value)\n return True", "title": "" }, { "docid": "8f5a05162762fc0e9df8eb6b4d1d51e2", "score": "0.5184158", "text": "def finished(self):\n return self.board == self.goal", "title": "" }, { "docid": "d28a2e7bba4a9c8630c783a3a0986f9e", "score": "0.51728487", "text": "def finished(self, achieved_goals, queue):\n return achieved_goals == self.goals or not queue", "title": "" }, { "docid": "19593142fc569ecf50a0838c8e118ae3", "score": "0.5168116", "text": "def is_finished(self):\n\n if self._total_time > self._global_time_limit:\n print(\n \"Exceeded global time limit {} / {}\".format(\n self._total_time, self._global_time_limit))\n return True\n\n for t in self._trials:\n if t.status in [Trial.PENDING, Trial.RUNNING, Trial.PAUSED]:\n return False\n return True", "title": "" }, { "docid": "5082d3087eb568d2b82b222ad35b28ad", "score": "0.51663554", "text": "def trial_finished(self):\n return self.ind+1>=len(self.words[self.word_index])", "title": "" }, { "docid": "26b1fb105b41c723e193b048e3ea79d7", "score": "0.51536345", "text": "def finished_execution(self) -> bool:\n return self._transition_state == self._FINISHED or self._transition_state == self._ABORTED", "title": "" }, { "docid": "344d22362aa070e43059344021d9bdbb", "score": "0.51531", "text": "def is_finished(self):\n for i in range(self.rows):\n for j in range(self.cols):\n if self.cells[i][j].value == 0:\n return False\n return True", "title": "" }, { "docid": "baae092925ecb85aeca50253e71431cc", "score": "0.51470554", "text": "def _is_finished(self) -> bool:\n return self._status[0] or self._status[1]", "title": "" }, { "docid": "1d58a1b8788c8381ef6220c9c0208769", "score": "0.5141703", "text": "def is_finished(self):\n return self.finished", "title": "" }, { "docid": "57a2b2d52deb773ece559672c80736c6", "score": "0.51412666", "text": "def init(self, pTimeInit, pTimeFinal, pPointInit, pPointFinal, pVelocityInit, pVelocityFinal, pPeriod):\n return _almathinternal.ALInterpolationArticular3D_init(self, pTimeInit, pTimeFinal, pPointInit, pPointFinal, pVelocityInit, pVelocityFinal, pPeriod)", "title": "" }, { "docid": "5f4a56713ac6bf1710ce412e22c6ce70", "score": "0.51185846", "text": "def setIsFinished(self, pIsFinished):\n return _almathinternal.ALInterpolationBezier_setIsFinished(self, pIsFinished)", "title": "" }, { "docid": "f80f2d934cb54daaf6a95d02999a37d1", "score": "0.5117361", "text": "def keyIsFinished(mashDic, key):\n return scipy.sum([mashDic[key][i][1] for i in range(Z)]) == 1", "title": "" }, { "docid": "20e37615526f73a32267acfcc99e7909", "score": "0.5088959", "text": "def isFinished(self):\n return self._finished", "title": "" }, { "docid": "5cfb3762f39b5fcc4be2d388efbdc67b", "score": "0.50729936", "text": "def _isFinished(self):\n return not self.currentPlayer.actions or any(self._getScores())", "title": "" }, { "docid": "ebd22f054e148f56e0a28e6d27e79faf", "score": "0.50692016", "text": "def _is_done(self, observations):\n raise NotImplementedError()", "title": "" }, { "docid": "ebd22f054e148f56e0a28e6d27e79faf", "score": "0.50692016", "text": "def _is_done(self, observations):\n raise NotImplementedError()", "title": "" }, { "docid": "ebd22f054e148f56e0a28e6d27e79faf", "score": "0.50692016", "text": "def _is_done(self, observations):\n raise NotImplementedError()", "title": "" }, { "docid": "ebd22f054e148f56e0a28e6d27e79faf", "score": "0.50692016", "text": "def _is_done(self, observations):\n raise NotImplementedError()", "title": "" }, { "docid": "50e320677d165ef09cb754322e976fa8", "score": "0.50626665", "text": "def is_finished(self):\n return os.path.exists(self.out_filename)", "title": "" }, { "docid": "2b7882a11ddda5b4c105893a794ccf7f", "score": "0.5059459", "text": "def SoGLMultiTextureImageElement_hasTransparency(state: 'SoState') -> \"SbBool\":\n return _coin.SoGLMultiTextureImageElement_hasTransparency(state)", "title": "" }, { "docid": "9b1bfd11462c7527f497b9cb9f335aa8", "score": "0.5055257", "text": "def is_dead_end(my_zoom):\n count = 0\n for value in my_zoom.values():\n count += 1 if not value else 0\n\n return True if count == 3 else False", "title": "" }, { "docid": "a4e31a9deac97594465439c3c7b6e137", "score": "0.5046841", "text": "def SoMultiTextureImageElement_containsTransparency(state: 'SoState') -> \"SbBool\":\n return _coin.SoMultiTextureImageElement_containsTransparency(state)", "title": "" }, { "docid": "79a553d8561206ea2245d344d384bd6b", "score": "0.504325", "text": "def abs_done(time_remain):\n global cex_abs_depth, abs_depth, abs_depth_prev, time_abs_prev, time_abs\n## print 'checking if abs has enough time to next cex'\n frames_to_next_cex = cex_abs_depth - abs_depth\n div = time_abs - time_abs_prev\n div = max(.1,div)\n frames_per_sec = (abs_depth - abs_depth_prev)/div\n if frames_per_sec <= 0:\n return False #something wrong \n## print 'frames_per_sec = %0.2f, frames_to_next_cex = %d, time remaining = %0.2f'%(frames_per_sec, frames_to_next_cex, time_remain)\n if frames_to_next_cex > 0.2*(frames_per_sec * time_remain): #later frames will take longer so factor of 5 here\n print 'not enough abs time to next cex'\n return True\n return False", "title": "" }, { "docid": "c327d649608de635bc0d373f2a6a0f75", "score": "0.50367916", "text": "def hasTransparency(state: 'SoState') -> \"SbBool\":\n return _coin.SoGLMultiTextureImageElement_hasTransparency(state)", "title": "" }, { "docid": "81e39d786e4aaa570f04ca63c662b086", "score": "0.503623", "text": "def is_finished(self):\n return self.text and self.screenshot", "title": "" }, { "docid": "37d09cf6281a0229f456def11aa7c2bd", "score": "0.5015781", "text": "def is_finished(self):\n return self.__is_finished", "title": "" }, { "docid": "e812f3106f95e5b55fcf9f009eda4aaf", "score": "0.5014571", "text": "def prev_has_finished(self):\n return self.itr_prev >= self.max_itr", "title": "" }, { "docid": "aefeafde97e5afc93f6b197c838e1c9e", "score": "0.5008804", "text": "def finished(self) -> bool:\n return self.__finished", "title": "" }, { "docid": "8dbff4c45a3437eae89c7949ee2dfb66", "score": "0.5008314", "text": "def is_finished(self):\n return self._finished", "title": "" }, { "docid": "db44817fec881b74128300bcbb488ee3", "score": "0.50015867", "text": "def is_delaunay(self, tri):\n circumcircle=tri.circumcircle()\n count=0\n for point in (tri.p0,tri.p1,tri.p2):\n if circumcircle.covers(point):\n #print 'yes'\n count+=1\n if count==3:\n #print count\n return True\n else:\n return False\n #Your implementation here", "title": "" }, { "docid": "ddac60ff121d562006777ac79626bad9", "score": "0.49990314", "text": "def is_finished(\n num_gameweeks,\n wildcard=False,\n free_hit=False,\n triple_captain=False,\n bench_boost=False,\n):\n final_expected_num = count_expected_outputs(\n 0, num_gameweeks, wildcard, free_hit, triple_captain, bench_boost\n )\n # count the json files in the output dir\n json_count = len(os.listdir(OUTPUT_DIR))\n if json_count == final_expected_num:\n return True\n return False", "title": "" }, { "docid": "a0ad633c2e36e2e44ede103db2d01a0f", "score": "0.49938917", "text": "def _is_done(self):\n # Is the sim done ? \n return self.current_sim_datetime > self.end_datetime", "title": "" }, { "docid": "33d3edc5d430a3a4408e071d79f9ae8d", "score": "0.49917263", "text": "def containsTransparency(state: 'SoState') -> \"SbBool\":\n return _coin.SoMultiTextureImageElement_containsTransparency(state)", "title": "" }, { "docid": "daa388333721f0b73ad52b60502f3f9a", "score": "0.4971992", "text": "def _test_f_3(x, y, z):\n time.sleep(0.25) # test the timing function\n return x + y + z", "title": "" }, { "docid": "381a406c1888c27082016d4467de8817", "score": "0.49694067", "text": "def getFinalTime(self):\n return _almathinternal.ALInterpolationTypeIV3D_getFinalTime(self)", "title": "" }, { "docid": "92b0a07ac91248befd14db167645b16d", "score": "0.4964097", "text": "def _check_if_done(self):\n return self._check_if_all_boxes_on_target() or self._check_if_maxsteps()", "title": "" }, { "docid": "4ada6e7aa520255332cc8e94d89c3bee", "score": "0.49635655", "text": "def episode_done(self):\n if not self.hit_is_complete:\n return False\n else:\n return True", "title": "" }, { "docid": "3960a98654cc2ab604ce97729686f95e", "score": "0.49438494", "text": "def is_final_state(x):\n p, s = x\n if abs(p) > 1 or abs(s) > 3:\n return True\n else:\n return False", "title": "" }, { "docid": "b2bc415fe4e0fa7c17db8e36d5bde7c1", "score": "0.49432087", "text": "def is_finished(\n self,\n step: int,\n unfin_idx: int,\n max_len: int,\n finalized_sent_len: int,\n beam_size: int,\n ):\n assert finalized_sent_len <= beam_size\n if finalized_sent_len == beam_size or step == max_len:\n return True\n return False", "title": "" }, { "docid": "12cfd22bbf08672e94a9bba507cfa8ad", "score": "0.49417865", "text": "def is_end_state(self) -> bool:\n return len(self.finished) == self.count", "title": "" }, { "docid": "ac3eb5ae2c25a3ef82f4207baa1d5c98", "score": "0.49348676", "text": "def is_finished(self, *additional, **kwargs):\n return all((sh.is_finished() for sh in list(additional) + self if hasattr(sh, 'is_finished')))", "title": "" }, { "docid": "e84f93ef1b9f20f20c48faa1b8cfecc3", "score": "0.492795", "text": "def acceleration3_d_available(self):\n ret = self._get_attr(\"acceleration3DAvailable\")\n return ret", "title": "" }, { "docid": "a3e2a85db991a6815477ee79035b75ec", "score": "0.49094743", "text": "def completed(self):\n return self.iteration >= len(self.batches)", "title": "" }, { "docid": "07c17ac655fdcc270904296375f51832", "score": "0.4908603", "text": "def goal_reached(self, world_state, grid_world):\r\n nr_ticks = grid_world.current_nr_ticks\r\n if self.max_nr_ticks == np.inf or self.max_nr_ticks <= 0:\r\n self.is_done = False\r\n else:\r\n if nr_ticks >= self.max_nr_ticks:\r\n self.is_done = True\r\n else:\r\n self.is_done = False\r\n return self.is_done", "title": "" }, { "docid": "3a62794063561fdc1db854be2df2a0b0", "score": "0.4907297", "text": "def is_3D(self) -> bool:\n return len(self._vol_shape_xyz) == 3 and self._vol_shape_xyz[-1] > 1", "title": "" }, { "docid": "e88950c4b150e50eb9a2c24417cc3f3d", "score": "0.4890563", "text": "def is_finished(self):\n is_scored_goals = self.home_team_goal > 0 or self.away_team_goal > 0\n if (is_scored_goals and util.compare_time_to_now(self.date, 1)) \\\n or (not is_scored_goals and util.compare_time_to_now(self.date, 100)):\n return True\n\n if util.is_None(self.goal) \\\n and util.is_None(self.shoton) \\\n and util.is_None(self.shotoff) \\\n and util.is_None(self.foulcommit) \\\n and util.is_None(self.card) \\\n and util.is_None(self.cross) \\\n and util.is_None(self.corner) \\\n and util.is_None(self.possession):\n return False\n return True", "title": "" }, { "docid": "097f46caaab32cc253ebb00ea141aeb0", "score": "0.48850796", "text": "def isFinished(self):\n # this function should be deterministic and without side effects\n if self.env.n_actions >= self.max_number_of_actions_per_session:\n return True\n elif self.env.state.click != Click.NOT_CLICKED:\n # click ends task\n return True\n elif self.env.state.quit == Quit.HAS_QUIT:\n # quit ends task\n return True\n return False", "title": "" }, { "docid": "e07d93ab3b11d2761de5cedf60145fff", "score": "0.48749816", "text": "def is_dataset_finished(self):\n n_completed = self.dataset.processed\n\n # Dataset has reached max pipeline depth\n if self.dataset.depth >= self.max_pipeline_depth:\n LOGGER.info('Dataset {} has reached max pipeline depth!'.format(self.dataset))\n return True\n\n # No budget for dataset\n if n_completed >= self.dataset.budget:\n LOGGER.info('Algorithm budget for dataset {} has run out!'.format(self.dataset))\n return True\n\n return False", "title": "" }, { "docid": "faea9f281a5b93922708805a03825f1c", "score": "0.48738745", "text": "def _is_animated(self, input):\r\n return self.animated_gif_optimiser._is_acceptable_image(input)", "title": "" } ]
8e050be4735a3412803428f673375b18
Save the outputs from the simulation run.
[ { "docid": "cc27b8307d07d7bd03068df1bc203cd3", "score": "0.59313", "text": "def save_simulation(\n output: str,\n profile_type: str,\n simulation_outputs: Solution,\n solar_irradiance: dict[int, float],\n) -> None:\n\n # Assemble the CSV datafile structure\n output_data = simulation_outputs.as_dataframe\n output_data[\"Solar irradiance / W/m^2\"] = solar_irradiance\n\n # Write to the output file.\n os.makedirs(SIMULATION_OUTPUTS_DIRECTORY, exist_ok=True)\n with open(\n f\"{os.path.join(SIMULATION_OUTPUTS_DIRECTORY, output)}_{profile_type}\" \".csv\",\n \"w\",\n encoding=\"UTF-8\",\n ) as output_file:\n output_data.to_csv(output_file) # type: ignore [arg-type]", "title": "" } ]
[ { "docid": "62357c71511468de2da3ef87b7e3c34b", "score": "0.75556", "text": "def save(self):\n # Message #\n self.parent.log.info(\"Saving final simulations results to disk.\")\n # The classifier values #\n self['values'] = self.sim.sit.classifier_value_ids\n # All the tables that are within the SimpleNamespace of `sim.results` #\n self['area'] = self.runner.internal['pools']\n self['classifiers'] = self.runner.internal['classifiers']\n self['flux'] = self.runner.internal['flux']\n self['parameters'] = self.runner.internal['parameters']\n self['pools'] = self.runner.internal['pools']\n self['state'] = self.runner.internal['state']", "title": "" }, { "docid": "1779797ec4ebfd520561913d0f987829", "score": "0.716197", "text": "def write_outputs(self):\n self.output.write_data_to_disk()\n if not self._no_pickle:\n state_pkl_filename = os.path.join(\n self.output.output_dir, f\"calstate-{self.chain_idx}.pkl\"\n )\n pickle.dump(self, open(state_pkl_filename, \"wb\"))", "title": "" }, { "docid": "8f2f52bed922d3b259d295dcca510e6e", "score": "0.71024096", "text": "def save(self):\n save_output_residual = (time.mktime(\n Coordinator.current_date.timetuple()) - self.first_save_output_residual) % get_seconds(\n self.save_output_interval)\n if abs(save_output_residual) <= 1e-7:\n self.handle_old_dps()\n self.handle_outliers()\n self.save_model_and_files()", "title": "" }, { "docid": "2caa6be1ea311ca4aea805a52d9c33db", "score": "0.7013826", "text": "def save(self):\n pickle.dump(self, open(pSimFileName, 'wb'), pickle.HIGHEST_PROTOCOL)", "title": "" }, { "docid": "1082b4db9d1895fb14747ed61a8d51e8", "score": "0.69863796", "text": "def dump_results(self, success):\n system = self.system\n\n t, _ = elapsed()\n\n if success and (not system.files.no_output):\n\n # system.varout.dump()\n system.varout.dump_np_vars()\n _, s = elapsed(t)\n logger.info('Simulation data dumped in {:s}.'.format(s))", "title": "" }, { "docid": "a94767a84a95c897907dcc74c417572f", "score": "0.6926807", "text": "def save_results(self):\n \n df_fname = '{0}.T'.format(self.df_agents_file)\n file = open(df_fname, 'w')\n pickle.dump(self.T, file)\n file.close()\n \n df_fname = '{0}.D_agent'.format(self.df_agents_file)\n file = open(df_fname, 'w')\n pickle.dump(self.D_agent, file)\n file.close() \n\n df_fname = '{0}.D_tstep'.format(self.df_agents_file)\n file = open(df_fname, 'w')\n pickle.dump(self.D_tstep, file)\n file.close() \n \n df_fname = '{0}.D_edges'.format(self.df_agents_file)\n file = open(df_fname, 'w')\n pickle.dump(self.D_edges, file)\n file.close() \n \n print '{0}: Results saved.'.format(self.city)\n sys.stdout.flush()", "title": "" }, { "docid": "28d0b21b7a9b6304b3403edaaeacda9f", "score": "0.6892895", "text": "def test_saved_output(self):\n execute_and_test_output_images(self, CliRunner(), 3, 3, \"save_\", [\"save\"])", "title": "" }, { "docid": "29c83d959d1650939e0f56fca0476b57", "score": "0.6755299", "text": "def store_simulation_results(self, simulation_results, results_dir):\n pass", "title": "" }, { "docid": "867bb54a99ef08eb5495dc25882a2368", "score": "0.67163306", "text": "def save(self):\n if self.output_folder_path:\n\n # Creating directories if they don't exist\n makedirs(dirname(join(self.output_folder_path, \"file\")), exist_ok=True)\n\n # ### Steps data ###\n csv_array = []\n for k, v in self.step_traces[\"scores\"].items():\n csv_array.append([k] + v)\n for k, v in self.step_traces[\"additional_values\"].items():\n csv_array.append([k] + v)\n csv_array.append([\"n_replaced\"] + self.step_traces[\"n_replaced\"])\n csv_array.append([\"timestamps\"] + self.step_traces[\"timestamps\"])\n\n with open(join(self.output_folder_path, 'steps.csv'), \"w\", newline='') as f:\n writer = csv.writer(f)\n for row in np.array(csv_array).T:\n writer.writerow(row)\n\n # ### All inserted individuals data ###\n if self.record_all_generated_individuals:\n csv_array = [[\"step\"] + self.all_generated_individuals_step,\n [\"SMILES\"] + self.all_generated_individuals_smiles,\n [\"obj_calls\"] + self.all_generated_individuals_n_obj_calls,\n [\"obj_value\"] + self.all_generated_individuals_obj_value,\n [\"improver\"] + self.all_generated_individuals_improver]\n\n with open(join(self.output_folder_path, \"all_generated.csv\"), \"w\") as f:\n writer = csv.writer(f)\n for row in np.array(csv_array).T:\n writer.writerow(row)\n\n # ### Last step population data ###\n csv_array = []\n\n # Mutation success history\n n_success_mut_str = []\n n_fail_mut_str = []\n for i, ind in enumerate(self.pop):\n n_success_mut_str.append(str(self.n_success_mut[i]))\n n_fail_mut_str.append(str(self.n_fail_mut[i]))\n\n csv_array.append([\"smiles\"] + self.pop_tabu_list)\n\n # Mutation success and failures\n csv_array.append([\"n_success_mut\"] + n_success_mut_str)\n csv_array.append([\"n_failures_mut\"] + n_fail_mut_str)\n\n # Scores data\n self.curr_total_scores, self.curr_scores = self.evaluation_strategy.get_population_scores()\n step_scores_dict = scores_to_scores_dict(self.curr_total_scores,\n self.curr_scores,\n self.evaluation_strategy.keys())\n\n for k, scores_list in step_scores_dict.items():\n scores_list_np = np.full((self.pop_max_size,), None)\n scores_list_np[:len(scores_list)] = scores_list\n csv_array.append([k] + list(scores_list_np))\n\n # Action history data\n csv_array.append([\"history_data\"] + self.actions_history)\n\n with open(join(self.output_folder_path, 'pop.csv'), \"w\", newline='') as f:\n writer = csv.writer(f)\n for row in np.array(csv_array).T:\n writer.writerow(row)\n\n # ### Removed individuals actions recording ###\n if self.record_history:\n with open(join(self.output_folder_path, 'removed_ind_act_history.csv'), \"w\", newline='') as f:\n\n writer = csv.writer(f)\n writer.writerow([\"history_data\", \"total\"] + self.evaluation_strategy.keys() + [\"smiles\"])\n\n for removed_act_history in self.removed_actions_score_smi_tuple.keys():\n if removed_act_history != \"\":\n total_score = self.removed_actions_score_smi_tuple[removed_act_history][0]\n scores = self.removed_actions_score_smi_tuple[removed_act_history][1]\n smi = self.removed_actions_score_smi_tuple[removed_act_history][2]\n\n writer.writerow([removed_act_history, total_score] + list(scores) + [smi])\n\n # ### Errors data ###\n with open(join(self.output_folder_path, 'errors.csv'), \"w\", newline='') as f:\n writer = csv.writer(f)\n writer.writerow([\"step\", \"error\"])\n for error in self.errors:\n writer.writerow(error)", "title": "" }, { "docid": "b70932305b7c443f264eda729aacc110", "score": "0.6612021", "text": "def save(self):\n self._network.save(self._total_episodes)\n with open(f\"{self.agent_dir}/checkpoints/{self._total_episodes}_replay_buffer.p\", \"wb\") as f:\n pickle.dump(self._replay_buffer, f)", "title": "" }, { "docid": "ea49d7a488bbdd12540a4372ad270ad2", "score": "0.65874064", "text": "def write_results(self, save_dir, simulation_ID):\n outfile_name=save_dir+'/'+simulation_ID+'_'+self.simtype+'.csv'\n outfile=open(outfile_name, 'w+')\n line1=\"mu= \"+str(self.mu)+\"\\tk= \"+str(self.k)+\"\\n\"\n outfile.write(line1)\n line2= \"Time\\tx_1\\tx_2\\ty_1\\ty_2\\n\"\n outfile.write(line2)\n for i in range(len(self.x1)):\n line=str(self.dt*i)+';'+str(self.x1[i])+';'+str(self.x2[i])+';'+str(self.y1[i])+';'+str(self.y2[i])+'\\n'\n outfile.write(line)\n outfile.close()", "title": "" }, { "docid": "7302ce3908c441efd6aed00c3ffbc047", "score": "0.6579228", "text": "def save_data(self, path=None):\n save_file = self.save_file_name\n if path is not None:\n save_file = path+self.save_file_name\n with open(save_file, 'wb') as outfile:\n pickle.dump(np.array(self.analysis_output), outfile)\n\n return", "title": "" }, { "docid": "fcf175d563152ca3f5cea418095eea76", "score": "0.6564639", "text": "def runsim(self):\n self.randwalk3d()\n self.savetofile()\n self.outputobject.close()", "title": "" }, { "docid": "956fb372e1013a3318c908de73b2180f", "score": "0.6542381", "text": "def save_output(self):\n report_name = self.FILENAME\n time_range = self.get_time_range_str()\n self.wb.save(get_output_filepath(report_name, time_range))", "title": "" }, { "docid": "66c441935b2a747682b09ff58fad0839", "score": "0.6539715", "text": "def save_data(self):\n\n repo_path = 'experimental_data/'\n if not os.path.isdir(repo_path):\n os.mkdir(repo_path)\n time_stamp = time.strftime('%d_%B_%Y_%I:%M%p')\n file_n = repo_path + 'quads_robotarium_sim_' + time_stamp + '.pckl'\n arrays = [self.time_record, self.x_record, self.orientation_record, self.input_record]\n with open(file_n, 'wb') as file:\n pickle.dump(arrays, file, protocol=2)", "title": "" }, { "docid": "f7291f105f3fc2b1b1a8a03114668a44", "score": "0.6515443", "text": "def save_runs(runs):\n\n for i in runs:\n run = runs.get(i)\n run.save(dir0=dir0)", "title": "" }, { "docid": "069c6a42571adb6d14fcaf8b6d5de03a", "score": "0.65010774", "text": "def _save_results(self, directory, topology, positions):\n\n from simtk import unit as simtk_unit\n simtk_positions = positions.to(unit.angstrom).magnitude * simtk_unit.angstrom\n\n self.coordinate_file_path = path.join(directory, 'output.pdb')\n\n with open(self.coordinate_file_path, 'w+') as minimised_file:\n # noinspection PyTypeChecker\n app.PDBFile.writeFile(topology, simtk_positions, minimised_file)\n\n logging.info('Coordinates generated: ' + self.substance.identifier)", "title": "" }, { "docid": "4cfa34b5aab46d9b911cc227b1531336", "score": "0.64957994", "text": "def save_results(self, results):\n self.results = results\n pickle.dump(self.results, open(os.path.join(\n results_dir,\n \"backtest_{num}.pickle\".format(num=self.backtest_number)), 'wb'))", "title": "" }, { "docid": "1ac3c83dd77029c48dee2a0420f162be", "score": "0.64797264", "text": "def sim_and_record(self):\n save_items=list(self.results_df.index)\n self.op_sim.save_internal_parameters(*save_items)\n self.internal_opsim_res=self.op_sim.operating_point()\n \n for save in save_items:\n self.results_df.at[save, 'Value']=self.internal_opsim_res[save].as_ndarray()[0]\n \n if self.display_results:\n print('.op sim internal parmter results')\n display(self.results_df)", "title": "" }, { "docid": "a5b8a014c5212e7bdbc7e9e8106d512a", "score": "0.64786536", "text": "def save_data(self):\n # Unlogged initial positions (Step not updated by Webots)\n self.links[0, :, :] = self.links[1, :, :]\n self.joints[0, :, :] = self.joints[1, :, :]\n # Diff position to extract velocity\n # Save\n #os.makedirs(os.path.dirname(self.filename), exist_ok=True)\n np.savez(\n self.filename,\n links=self.links,\n joints=self.joints,\n network=self.network,\n **self.parameters\n )", "title": "" }, { "docid": "5a85f2e59f99c3e5fdac707fb8a08dc3", "score": "0.64685446", "text": "def save_results(self, out_dir: str):\n\n if self.graphs is None:\n self.graph_results()\n\n build_out(out_dir)\n\n for y in self.graphs.keys():\n\n # have to create & save bc subsequent plots will overwrite past plots\n self.graphs[y].create_plot()\n self.graphs[y].save(\"{0}/{1}.png\".format(out_dir, str(y)))", "title": "" }, { "docid": "79f9fe43fce359806c3f6ec37a15c171", "score": "0.64500666", "text": "def save_model_and_files(self):\n self.save_model(\n os.path.join(os.getcwd(), 'outputs/multi_agent',\n 'X' + str(Coordinator.current_date).replace(':', '_') + '--' + str(\n Coordinator.dp_counter), 'model'))\n self.write_output_to_files(\n os.path.join(os.getcwd(), 'outputs/multi_agent',\n 'X' + str(Coordinator.current_date).replace(':', '_') + '--' + str(\n Coordinator.dp_counter), 'clusters'))\n self.write_topics_to_files(\n os.path.join(os.getcwd(), 'outputs/multi_agent',\n 'X' + str(Coordinator.current_date).replace(':', '_') + '--' + str(\n Coordinator.dp_counter), 'topics'))\n self.write_tweet_ids_to_files(\n os.path.join(os.getcwd(), 'outputs/multi_agent',\n 'X' + str(Coordinator.current_date).replace(':', '_') + '--' + str(\n Coordinator.dp_counter), 'clusters_tweet_ids'))\n if self.verbose == 1:\n print(f'{Fore.YELLOW}{self.current_date} : Save Model and Outputs -> Number of agents : {len(self.agents)}')", "title": "" }, { "docid": "a62bd9d0818e1150eeb161756185fb7f", "score": "0.64475393", "text": "def save_data(self, path=None):\n save_file = self.save_file_name\n if path is not None:\n save_file = path+self.save_file_name\n with open(save_file, 'wb') as outfile:\n pickle.dump(self.analysis_output, outfile)\n\n return", "title": "" }, { "docid": "d6c3e66e81653a120bc11267d18c51ad", "score": "0.6434548", "text": "def save_output(self, model_dico, file_save, X_val):\n\n pred = {}\n\n # Prediction\n for o in self.output_names:\n pred[o] = self.predict_model(model_dico[o], X_val)\n\n # Text to save\n output_text = ':'.join(self.output_names)\n \n for i in tqdm(range(len(pred[self.output_names[0]]))):\n output_text += '\\n'\n output_text += ':'.join([str(pred[o][i]) for o in self.output_names])\n \n with open(file_save, 'w') as file:\n file.write(output_text)\n\n print(\"File saved at '{}'\".format(file_save))", "title": "" }, { "docid": "694c6d0fc67d2c65c720f0498a3f2245", "score": "0.64069724", "text": "def _store_outputs(self, hypotheses: List[str]) -> None:\n current_valid_output_file = \"{}/{}.hyps\".format(self.model_dir,\n self.steps)\n with open(current_valid_output_file, 'w') as opened_file:\n for hyp in hypotheses:\n opened_file.write(\"{}\\n\".format(hyp))", "title": "" }, { "docid": "694c6d0fc67d2c65c720f0498a3f2245", "score": "0.64069724", "text": "def _store_outputs(self, hypotheses: List[str]) -> None:\n current_valid_output_file = \"{}/{}.hyps\".format(self.model_dir,\n self.steps)\n with open(current_valid_output_file, 'w') as opened_file:\n for hyp in hypotheses:\n opened_file.write(\"{}\\n\".format(hyp))", "title": "" }, { "docid": "0fa41e8dd48094b900cffbf1e8835e59", "score": "0.6406303", "text": "def get_outputs(self):\n\n output = {'filename': None,\n 'raw_frame_numbers': self.raw_frame_numbers.isChecked(),\n 'viewer': self.open_viewer.isChecked()}\n\n save = self.save_file.isChecked()\n if not save:\n return output\n\n save_path = self.file_path.text()\n if not save_path:\n scene = tp.Dcc.scene_name()\n time_stamp = datetime.datetime.today()\n str_time_stamp = time_stamp.strftime(\"%d-%m-%Y_%H-%M-%S\")\n save_path = '{}_{}'.format(scene, str_time_stamp)\n\n output['filename'] = save_path or artellapipe.MediaMgr().create_temp_path('playblast')\n\n return output", "title": "" }, { "docid": "528bed151cd894a61b3bac809331807a", "score": "0.6387252", "text": "def _store_outputs(self, hypotheses: List[str]) -> None:\n current_valid_output_file = \"{}/{}.hyps\".format(self.model_dir,\n self.steps)\n with open(current_valid_output_file, 'a') as opened_file:\n for hyp in hypotheses:\n opened_file.write(\"{}\\n\".format(hyp))", "title": "" }, { "docid": "c2e0c79d31b2a798a481c6d3f65f4501", "score": "0.6350344", "text": "def loc_save(self,S):\n\n pos=nx.get_node_attributes(self,'p')\n pe=nx.get_node_attributes(self,'pe_alg')\n typ = nx.get_node_attributes(self,'typ')\n if self.idx == 0:\n entete = 'NodeID, True Position x, True Position y, Est Position x, Est Position y, Timestamp\\n'\n file=open(os.path.join(basename,pstruc['DIRNETSAVE'],'simulation.txt'),'write')\n file.write(entete)\n file.close()\n\n try:\n file=open(os.path.join(basename,pstruc['DIRNETSAVE'],'simulation.txt'),'a')\n for n in self.nodes():\n if typ[n] != 'ap':\n data = n + ',' + str(pos[n][0]) + ',' + str(pos[n][1]) + ',' + str(pe[n][0][0]) + ',' + str(pe[n][0][1]) + ',' +pyu.timestamp(S.now()) +',\\n'\n file.write(data)\n file.close()\n self.idx = self.idx +1\n except:\n pass", "title": "" }, { "docid": "a2e24c737df604fcad6943164324a619", "score": "0.6349004", "text": "def save(self) :\n\n # Write header\n self._log_header1( gammalib.TERSE , 'Save dmatter analysis results' )\n\n # Continue only if FITS file is valid\n if self._fits != None :\n\n # Get outmap parameter\n outfile = self[ 'outfile' ].filename()\n\n # Log file name\n self._log_value( gammalib.NORMAL, 'dmatter file' , outfile.url() )\n\n # Save results\n self._fits.saveto( outfile , self['clobber'].boolean() )\n\n # Return\n return", "title": "" }, { "docid": "f3cf7b57e4437b2741cea5988b7dcb4a", "score": "0.6343419", "text": "def save(self, output, data):\n return", "title": "" }, { "docid": "04a60bbf615c5451a601b0de099ad5e6", "score": "0.63416994", "text": "def save(self):\n history_filename, results_filename, best_pars_filename = self._get_result_filenames(self.state_dir)\n np.savez_compressed(history_filename, history=self.history)\n np.savez_compressed(best_pars_filename, best_pars=gather(self.best_pars))\n data = {'best_iter': self.best_iter,\n 'best_val_loss': self.best_val_loss,\n 'best_tst_loss': self.best_tst_loss,\n 'training_time': self.training_time,\n 'start_time': self.start_time,\n 'end_time': self.end_time,\n 'termination_reason': self.termination_reason,\n 'cfg': self.cfg,\n 'data': self.data}\n with open(results_filename, 'wb') as results_file:\n json.dump(data, results_file, indent=4)", "title": "" }, { "docid": "79ea47a51d28cba937456d57338f6305", "score": "0.63317704", "text": "def save_test_result(self):\n pass", "title": "" }, { "docid": "490fbee520774c138ead228d077c90dc", "score": "0.6323168", "text": "def save(self, output, data):", "title": "" }, { "docid": "490fbee520774c138ead228d077c90dc", "score": "0.6323168", "text": "def save(self, output, data):", "title": "" }, { "docid": "ea0f99539cdc89777c6c6818f9e96b9b", "score": "0.63117325", "text": "def simulation_save_temp(self):\n with open(os.path.join('tmp','simulations',(str(self.id_simulation)+'full')), 'w') as stream:\n writer = csv.writer(stream)\n for row in range(self.gui.table_preview.rowCount()):\n rowdata = []\n for column in range(self.gui.table_preview.columnCount()):\n item = self.gui.table_preview.item(row, column)\n if item is not None:\n rowdata.append(\n unicode(item.text()).encode('utf8'))\n else:\n rowdata.append('')\n writer.writerow(rowdata)\n with open(os.path.join('tmp','simulations',(str(self.id_simulation)+'filter')), 'w') as stream:\n writer = csv.writer(stream)\n for row in range(self.gui.table_filtered.rowCount()):\n rowdata = []\n for column in range(self.gui.table_filtered.columnCount()):\n item = self.gui.table_filtered.item(row, column)\n if item is not None:\n rowdata.append(\n unicode(item.text()).encode('utf8'))\n else:\n rowdata.append('')\n writer.writerow(rowdata)\n with open(os.path.join('tmp','simulations',(str(self.id_simulation)+'stats')), 'w') as file_save:\n item = self.gui.tree_hits.topLevelItem(self.id_simulation)\n simulation = item.text(1) + ',' + item.text(2) + ',' +\\\n item.text(3) + ',' +item.text(4) + ',' +\\\n item.text(5) + ',' +item.text(6) + ',' +item.text(7) + ',' +\\\n item.text(8) + ',' +item.text(9)\n line = simulation+self.nl\n file_save.write(line)\n child_num = item.childCount()\n for i in range(0,child_num):\n name = item.child(i)\n name = name.text(0)\n line = name+self.nl\n file_save.write('.........'+line)", "title": "" }, { "docid": "6a1ece051d7771df3822d55db9011791", "score": "0.62942624", "text": "def outputs(self):", "title": "" }, { "docid": "0576b139f1adffc3e23c6c82b059519e", "score": "0.6263089", "text": "def output(self):\n try:\n # output_save path uses output_folder parameter unless it is left as the default 'config', which then uses the output_path in config.py\n dirpath = self.output_save_path\n except:\n dirpath = toad.config.dirpath\n\n save_ = getattr(self, 'persist', [])\n output = dict([(k, self.target_class(self._getpath(dirpath, k))) for k in save_])\n if self.persist == ['data']: # 1 data shortcut\n output = output['data']\n\n self.debg(f\"{type(self).__name__} output set to {output.path.absolute()}\")\n\n return output", "title": "" }, { "docid": "5f56de9c2cc2ea5ebadac7f7d40f120f", "score": "0.6260796", "text": "def save(self, output_folder, save_name):\n CRiSP = {'M_inv': self.M_inv, 'X': self.X, 'Y': self.Y, 's': self.s, 'psi': self.psi}\n print(\n f\"Saving model in {output_folder}...\\tKernel matrix inverse size: \\t{sys.getsizeof(CRiSP['M_inv']) * 1e-9:3f}GB\")\n\n if save_name is None:\n save_name = 'CRiSP'\n pickle.dump(CRiSP, open(output_folder / (save_name + \".pickle\"), 'wb'), protocol=4)\n print(f\"Save complete!\")", "title": "" }, { "docid": "16a0df3e9b4140cbb98fa2073e5eb6ba", "score": "0.62605923", "text": "def _save_state(self):\n save_path = os.path.join(save_config['root_dir'], str(time.time())+'.pkl')\n save_dict = dict()\n save_dict['player1'] = self.player1.to_dict()\n save_dict['player2'] = self.player2.to_dict()\n save_dict['game_board'] = self.game_board.to_dict()\n save_dict['game_engine'] = self.game_engine.to_dict()\n save_dict['turns_played'] = self.turns_played\n save_dict['_run_func'] = '{}_{}'.format(self.player1.type, self.player2.type)\n save_dict['game_ended'] = self.game_ended\n save_dict['start_time'] = self.start_time\n with open(save_path, 'wb') as f:\n pickle.dump(save_dict, f)", "title": "" }, { "docid": "1bcda4f19e497647a7dbad0bf78b245e", "score": "0.62458396", "text": "def save_results_to_file(self, output_file_path, seq_name):\n self.tracking_out['conf'] = 1\n self.tracking_out['x'] = -1\n self.tracking_out['y'] = -1\n self.tracking_out['z'] = -1\n self.tracking_out['bb_left'] += 1 # Indexing is 1-based in the ground truth\n self.tracking_out['bb_top'] += 1\n final_out = self.tracking_out[TRACKING_OUT_COLS].sort_values(by=['frame', 'ped_id'])\n if not os.path.exists(output_file_path):\n os.mkdir(output_file_path)\n final_out.to_csv(os.path.join(output_file_path, '{}.txt'.format(seq_name)), header=False, index=False)", "title": "" }, { "docid": "3f75d46843c365ab993d05106b1eca2b", "score": "0.6227219", "text": "def auto_save(self):\n r_min = self.gui.spin_rounds_min.text()\n r_max = self.gui.spin_rounds_max.text()\n odds_level = self.gui.spin_odds_level.text()\n nets = self.gui.tree_nets.currentItem()\n nets = self.gui.tree_nets.indexOfTopLevelItem(nets)\n filters = self.gui.tree_filters.currentItem()\n filters = self.gui.tree_filters.indexOfTopLevelItem(filters)\n ranges = self.gui.tree_ranges.currentItem()\n ranges = self.gui.tree_ranges.indexOfTopLevelItem(ranges)\n bets = self.gui.tree_bets.currentItem()\n bets = self.gui.tree_bets.indexOfTopLevelItem(bets)\n\n\n elements = [r_min, r_max, odds_level, nets, filters, ranges, bets]\n with open(os.path.join('profiles','auto_save','simulator.txt'),'w') as save:\n for i in elements:\n save.write(str(i)+self.nl)\n print 'save'", "title": "" }, { "docid": "37c80842b5d0f55e6dc7c48823c5c545", "score": "0.62234414", "text": "def save_data(self, path=None):\n output = self.get_data()\n save_file = self.save_file_name\n if path is not None:\n save_file = path+self.save_file_name\n with open(save_file, 'wb') as outfile:\n pickle.dump(output, outfile)\n\n return", "title": "" }, { "docid": "9a08df27b8fa067f8bedac2bb98d3ad5", "score": "0.62129515", "text": "def save_results(self):\n self.collect_assets()\n with open(self.output_sitemap_file, 'w', encoding='utf8') as file:\n file.writelines(page + '\\n' for page in sorted(self.assets.keys()))\n with open(self.output_assets_file, 'w', encoding='utf8') as file:\n for page in sorted(self.assets):\n file.writelines(page + '\\n')\n file.writelines(asset + '\\n' for asset in self.assets[page])", "title": "" }, { "docid": "cac6617deff6f56003db3c78a890ec08", "score": "0.6175703", "text": "def save_final_results(self, filename):\n\t\tfile = open(filename, \"w\")\n\n\t\tfile.write(\"Neural Network with \" + str(self.hidden_node_count) + \" hidden nodes and momentum \" +\n\t\t\t\t str(self.momentum) + \" had a final training accuracy of \" +\n\t\t\t\t str(self.training_accuracy_history[self.epochs - 1]) + \"\\n and a test accuracy of \" +\n\t\t\t\t str(self.test_accuracy_history[self.epochs - 1]) + \" after \" + str(self.epochs) + \" epochs\")\n\t\tfile.write(\"\\nTest Confusion Matrix: \\n\")\n\t\tfile.write(str(self.test_confusion_matrix))\n\t\tfile.write(\"\\ntraining accuracy history: \\n\" + str(self.training_accuracy_history))\n\t\tfile.write(\"\\ntest accuracy history: \\n\" + str(self.test_accuracy_history))\n\t\tfile.close()", "title": "" }, { "docid": "b5019269eb6ccfb2d9161055cb211f0f", "score": "0.617093", "text": "def save_results(self, name, prefix, writer, save_filetypes, step):\n # pylint: disable=unused-argument, arguments-differ, too-many-arguments\n\n filename = _pathlib.Path(prefix).joinpath(f\"{self.name}-{step:07d}.pth\")\n time = _datetime.utcnow().replace(tzinfo=_timezone.utc).isoformat()\n\n with _io.BytesIO() as mem:\n if hasattr(self.model, \"module\"):\n _torch.jit.save(self.model.module, mem)\n else:\n _torch.jit.save(self.model, mem)\n mem.seek(0)\n\n with _lzma.open(filename, \"wb\") as fobj:\n _torch.save({\"step\": step, \"time\": time, \"model\": mem.read()}, fobj)\n\n self.log.info(\"Done saving model for step %d to %s\", step, str(filename))", "title": "" }, { "docid": "e8da0e400f4552e707b107bc450477d2", "score": "0.6152689", "text": "def outputs(self, outputs):\n\n self._outputs = outputs", "title": "" }, { "docid": "b1c2d11a83ee464aec05998d6ee36e6e", "score": "0.61493796", "text": "def runSimulation(self, save, experiment, subexperiment, network_name, nx_params):\n\n num_neutral_per_agent = np.empty(shape=(self.S))\n num_fake_per_agent = np.empty(shape=(self.S))\n num_retracted_per_agent = np.empty(shape=(self.S))\n neutral_per_timestep = np.empty(shape=(self.S, self.T))\n fake_per_timestep = np.empty(shape=(self.S, self.T))\n retracted_per_timestep = np.empty(shape=(self.S, self.T))\n\n directory = \"./output/\" + experiment + \"/\" + subexperiment + \"/data/\"\n name = network_name + '_' + '_'.join(['{}={}'.format(k, v) for k, v in nx_params.items()])\n path = directory + \"N{N}-T{T}-S{S}-{shr}-{dly}-{name}-data.csv\".format(\n N=self.N, T=self.T, S=self.S, shr=self.shareTimeLimit, dly=self.delay, name=name)\n\n for s in range(self.S):\n # run model\n network = self.graph(**self.nx_params) # generate network from graph and params\n logs = self.runModel(network=network)\n df_belief = pd.DataFrame.from_dict(logs[0])\n\n if save: # write raw data to output directory\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n out = pd.DataFrame(index=[x for x in range(self.T)], columns=['s', 't']+[x for x in range(self.N)])\n out.iloc[:, 0] = np.repeat(s+1, repeats=self.T) # vector of timesteps # simulation number\n out.iloc[:, 1] = np.linspace(start=1, stop=self.T, num=self.T, dtype=int) # timestep number\n out.iloc[:, 2:self.N+2] = df_belief.values\n out.to_csv(path,\n index=False,\n header=True if s == 0 else False,\n mode='a', # append df to csv\n encoding='utf-8')\n\n # eval output\n num_neutral_per_agent[s] = np.mean(np.sum(df_belief.values == 0, axis=0))\n num_fake_per_agent[s] = np.mean(np.sum(df_belief.values == 1, axis=0))\n num_retracted_per_agent[s] = np.mean(np.sum(df_belief.values == 2, axis=0))\n neutral_per_timestep[s, :] = np.mean(df_belief.values == 0, axis=1)\n fake_per_timestep[s, :] = np.mean(df_belief.values == 1, axis=1)\n retracted_per_timestep[s, :] = np.mean(df_belief.values == 2, axis=1)\n\n # aggregate beliefs over time\n neutral_per_agent_avg = np.mean(num_neutral_per_agent)\n neutral_per_agent_sd = np.std(num_neutral_per_agent)\n fake_per_agent_avg = np.mean(num_fake_per_agent)\n fake_per_agent_sd = np.std(num_fake_per_agent)\n retracted_per_agent_avg = np.mean(num_retracted_per_agent)\n retracted_per_agent_sd = np.std(num_retracted_per_agent)\n frac_neutral_per_timestep = np.mean(neutral_per_timestep, axis=0)\n frac_neutral_per_timestep_sd = np.std(neutral_per_timestep, axis=0)\n frac_fake_per_timestep = np.mean(fake_per_timestep, axis=0)\n frac_fake_per_timestep_sd = np.std(fake_per_timestep, axis=0)\n frac_retracted_per_timestep = np.mean(retracted_per_timestep, axis=0)\n frac_retracted_per_timestep_sd = np.std(retracted_per_timestep, axis=0)\n\n # aggregate final belief distributions\n neutral_dist = neutral_per_timestep[:, self.T-1]\n fake_dist = fake_per_timestep[:, self.T-1]\n retracted_dist = retracted_per_timestep[:, self.T-1]\n\n # bundle aggregated output\n avg_per_agent = (neutral_per_agent_avg, fake_per_agent_avg, retracted_per_agent_avg)\n sd_per_agent = (neutral_per_agent_sd, fake_per_agent_sd, retracted_per_agent_sd)\n frac_belief_mean = (frac_neutral_per_timestep, frac_fake_per_timestep, frac_retracted_per_timestep)\n frac_belief_sd = (frac_neutral_per_timestep_sd, frac_fake_per_timestep_sd, frac_retracted_per_timestep_sd)\n belief_dist = (neutral_dist, fake_dist, retracted_dist)\n\n return avg_per_agent, sd_per_agent, frac_belief_mean, frac_belief_sd, belief_dist", "title": "" }, { "docid": "7e360c9571af78dfff25bfa7eaa5d1db", "score": "0.614781", "text": "def save_data(self, path=None):\n\n\n output = self.get_data()\n save_file = self.save_file_name\n if path is not None:\n save_file = path+self.save_file_name\n with open(save_file, 'wb') as outfile:\n pickle.dump(output, outfile)\n\n return", "title": "" }, { "docid": "fce336c106c67632904e81b7fbc2a29b", "score": "0.61423445", "text": "def write_results(self, figures_merit=None):\n\n # open output file\n f = open(self.folder + \"/000_experiment_setup_and_results.txt\", 'w')\n\n # parameters and settings of the simulation\n f.writelines('# # # # # # # # # # # # # # #' + '\\n')\n f.writelines('# # # Setting # # #' + '\\n')\n f.writelines('# # # # # # # # # # # # # # #' + '\\n')\n f.writelines(self.folder + '\\n')\n f.writelines(str(self.pars))\n f.writelines(str(self.dataset))\n f.writelines('\\n')\n f.writelines(\n 'number of concluded simulation:' + str(self.no_simulations) + '\\n')\n f.writelines('seed:' + str(self.seed) + '\\n')\n f.writelines('\\n\\n')\n\n # raw results\n f.writelines('# # # # # # # # # # # # # # #' + '\\n')\n f.writelines('# # # Raw results # # #' + '\\n')\n f.writelines('# # # # # # # # # # # # # # #' + '\\n')\n f.writelines(self.string_raw_results() + '\\n')\n f.writelines('failed simulations:' + str(self.fails) + '\\n')\n f.writelines('\\n\\n')\n\n # processed rusults\n f.writelines('# # # # # # # # # # # # # # #' + '\\n')\n f.writelines('# # # Processed results # # #' + '\\n')\n f.writelines('# # # # # # # # # # # # # # #' + '\\n')\n tabularResult, printout = self.process_results(figures_merit=figures_merit)\n f.writelines(printout + '\\n')\n\n # close file\n f.close()\n\n self.log.info(tabularResult)\n return tabularResult", "title": "" }, { "docid": "402094e086e432bbd082b98b44ce3894", "score": "0.614059", "text": "def save_output(output_name, observables, lattice):\n import cPickle as pickle\n from os.path import isfile\n observables_file = ((r'data\\%s.pkl') % (output_name))\n with open(observables_file, 'wb') as output:\n pickle.dump(observables, output, pickle.HIGHEST_PROTOCOL)\n if isfile(observables_file):\n print('Observables saved to: %s' % (observables_file))\n lattice_file = ((r'data\\%s_lattice.pkl') % (output_name))\n with open(lattice_file, 'wb') as output:\n pickle.dump(lattice, output, pickle.HIGHEST_PROTOCOL)\n if isfile(lattice_file):\n print('Lattice saved to: %s' % (lattice_file))", "title": "" }, { "docid": "c85e5118065c5f818c2c4ab8fc860371", "score": "0.61385643", "text": "def SaveData(out,run_num):\n runnumber=getnumor(run_num)\n base_name = qtg_par[\"instname\"]\n\n fullname=os.getcwd()+base_name+str(runnumber)+'.spe'\n SaveSPE(out,fullname)", "title": "" }, { "docid": "d18f1342f7728f698b86854c167164d0", "score": "0.6127893", "text": "def sgnSave(self):\n\n self.uiSaveSelectedCamera()\n self.uiSaveSelectedRendersFolder()\n self.uiSaveSelectedScenesFolder()\n self.uiSaveSelectedTerminalApp()\n self.uiSaveSelectedSettings()\n self.uiSaveSelectedRenderEngine()\n\n bResult = self.uiGetCheckResults()\n\n if bResult:\n self.core.setGlobalsValue(\"sDefaultEngine\", self.sSelectedRenderEngine)\n\n self.core.setGlobalsValue(\"sOutputRenders\", self.sSelectedRendersFolder)\n self.core.setGlobalsValue(\"sOutputScenes\", self.sSelectedScenesFolder)\n self.core.setGlobalsValue(\"sDefaultCamera\", self.sSelectedCamera)\n\n self.core.setGlobalsValue(\"iWidth\", int(self.sSelectedWidth))\n self.core.setGlobalsValue(\"iHeight\", int(self.sSelectedHeight))\n\n self.core.setGlobalsValue(\"iStart\", int(self.sSelectedStart))\n self.core.setGlobalsValue(\"iEnd\", int(self.sSelectedEnd))\n self.core.setGlobalsValue(\"iStep\", int(self.sSelectedStep))\n\n self.core.setGlobalsValue(\"sTerminalApp\", self.sSelectedTerminalApp)\n\n self.core.setGlobalsValue(\"bSettingImportRefs\", self.bSelectedSettingImportRefs)\n\n self.core.saveGlobals()\n\n self.uiCloseWindow()", "title": "" }, { "docid": "97119e72641b6e8911836605aae3d6e5", "score": "0.6126601", "text": "def retrieve_outputs(self, event_name: str, sim_type: str):\n\n job_name = self._get_job_name(event=event_name, sim_type=sim_type, new=False)\n salvus_job = sapi.get_job(\n site_name=self.comm.project.site_name, job_name=job_name\n )\n if sim_type == \"forward\":\n destination = self.comm.lasif.find_seismograms(\n event=event_name, iteration=self.comm.project.current_iteration\n )\n\n elif sim_type == \"adjoint\":\n destination = self.comm.lasif.find_gradient(\n iteration=self.comm.project.current_iteration,\n event=event_name,\n smooth=False,\n inversion_grid=False,\n just_give_path=True,\n )\n\n else:\n raise InversionsonError(\n f\"Simulation type {sim_type} not supported in this function\"\n )\n salvus_job.copy_output(\n destination=os.path.dirname(destination),\n allow_existing_destination_folder=True,\n )", "title": "" }, { "docid": "732b8f529b534b2175f56c6362517f2e", "score": "0.6123155", "text": "def save_outputs(display_frames):\n print('Saving plots...')\n # Delete existing plots and simulation gif\n if os.path.exists('./Plots'):\n shutil.rmtree('./Plots')\n os.mkdir('./Plots')\n # Save plots in parallel using number of cpu cores - 2\n Parallel(n_jobs=os.cpu_count() - 2)(delayed(save_plot)(display_frames[i], i) for i in range(len(display_frames)))\n # Generate simulation gif\n save_animation()", "title": "" }, { "docid": "ca693c6d219cc1aeea4053281aba6d32", "score": "0.6119877", "text": "def save_results(self):\n self.rf.matrix.data.to_csv(\"rf_results_averages.csv\", index=False)\n pd.DataFrame(self.rf.matrix.matrix).to_csv(\"rf_confusion_matrix.csv\", index=False)\n pd.DataFrame(self.rf.matrix.matrix_normalised[0, :, :]).to_csv(\"rf_confusion_matrix_normalised.csv\",\n index=False)\n self.knn.matrix.data.to_csv(\"knn_results_averages.csv\", index=False)\n pd.DataFrame(self.rf.matrix.matrix).to_csv(\"knn_confusion_matrix.csv\", index=False)\n pd.DataFrame(self.rf.matrix.matrix_normalised[0, :, :]).to_csv(\"knn_confusion_matrix_normalised.csv\",\n index=False)", "title": "" }, { "docid": "4057bea0a85a8e50e9fdf832cebbf8a8", "score": "0.6119099", "text": "def save_predictor(self, save_folder):\n simple_save(\n tf.get_default_session(),\n save_folder,\n inputs={\"obs\": self.model.act_model.X},\n outputs={\n \"action\": self.model.act_model.action,\n \"value\": self.model.act_model.vf,\n \"action_probs\": self.model.act_model.action_probs\n })", "title": "" }, { "docid": "f8853d3db2a6030cf5a8476ac2590969", "score": "0.61119646", "text": "def outputs(self):\r\n\r\n return self.__outputs", "title": "" }, { "docid": "31615ca3c999a285861a6f27ca31376a", "score": "0.61114407", "text": "def store_model_outputs(self, model, iter_num: int):\n assert model and model.outputs is not None, \"No model has been run\"\n # outputs_df = db.store.build_outputs_table([model], run_id=iter_num, chain_id=self.chain_id)\n derived_outputs_df = db.store.build_derived_outputs_table(\n [model], run_id=iter_num, chain_id=self.chain_id\n )\n # self.db.append_df(db.store.Table.OUTPUTS, outputs_df)\n self.db.append_df(db.store.Table.DERIVED, derived_outputs_df)", "title": "" }, { "docid": "e6c3223bb4a5c13e85f55dba23dff1a3", "score": "0.6106897", "text": "def do_save(self, args):\n if not CFState.get_instance().active_target:\n self.pwarning(\"\\n [!] Not interacting with a target. Set the active target with `interact`.\\n\")\n return\n module_path = \"/\".join(CFState.get_instance().active_target.__module__.split(\".\")[:-1])\n if \"results\" not in os.listdir(module_path):\n os.mkdir(f\"{module_path}/results\")\n\n filename = f\"{module_path}/results/{CFState.get_instance().active_target.model_name}_{CFState.get_instance().active_target.target_id}.json\"\n with open(filename, \"w\") as outfile:\n json.dump(CFState.get_instance().active_target.dump(), outfile, indent=1)\n\n self.poutput(f\"\\n[+] Successfully wrote {filename}\\n\")", "title": "" }, { "docid": "df1aa5fe5018211b03cb917f00347b91", "score": "0.6091375", "text": "def outputs(self) -> Dict[str, Any]:\n if not self.is_complete:\n raise _user_exceptions.FlyteAssertion(\n \"Please wait until the node execution has completed before requesting the outputs.\"\n )\n if self.error:\n raise _user_exceptions.FlyteAssertion(\"Outputs could not be found because the execution ended in failure.\")\n return self._outputs", "title": "" }, { "docid": "dcbde3c514ab191d8f39fddcc94865d6", "score": "0.6087984", "text": "def saveSimulationResultsToFiles(mainMemory, l1Cache, l2Cache, statResults):\n mainMemory.saveMemoryToFile(config.getMainMemoryStatusOutputFilePath())\n l1Cache.saveMemoryToFile(config.getL1CacheStatusOutputFilePath())\n if l2Cache:\n l2Cache.saveMemoryToFile(config.getL2Way0CacheStatusOutputFilePath())\n l2Cache.saveMemoryToFile(config.getL2Way1CacheStatusOutputFilePath())\n\n with open(config.getStatsFileName(), 'w') as statsFile:\n for stat in statResults:\n statsFile.write(str(stat) + '\\n')", "title": "" }, { "docid": "5f875b0bf2c2e436578a4540fbaa50dc", "score": "0.60832655", "text": "def save_model(self):\n tosave = {'rmsecv':self.rmsecv,\n 'trained':self.trained,\n 'cv':self.cv,\n 'columns':self.columns,\n 'lmbda':self.lmbda,\n 'max_evals':self.max_evals,}\n\n with open(os.path.join(self.bcupath, 'misc-estimator.pkl'), 'wb') as handle:\n pickle.dump(tosave, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n #print('saved : ', self.scaler.mean_)\n joblib.dump(self.scaler, os.path.join(self.bcupath, 'scaler.pkl'))\n joblib.dump(self.pca, os.path.join(self.bcupath, 'pca.pkl'))\n joblib.dump(self.estimator, os.path.join(self.bcupath, 'estimator.pkl'))", "title": "" }, { "docid": "b3828329f70e5023d303e26af135d1d0", "score": "0.6077398", "text": "def outputs(self):\n return self._outputs", "title": "" }, { "docid": "b3828329f70e5023d303e26af135d1d0", "score": "0.6077398", "text": "def outputs(self):\n return self._outputs", "title": "" }, { "docid": "6d0bdad6221cf4402035aa38772527d4", "score": "0.6077258", "text": "def write_results(self):\n\n # Save the results file.\n dir = self.write_results_dir + 'final'\n self.interpreter.results.write(file='results', dir=dir, force=True)\n\n # The Grace plots.\n dir = self.write_results_dir + 'final' + sep + 'grace'\n self.interpreter.grace.write(x_data_type='res_num', y_data_type='s2', file='s2.agr', dir=dir, force=True)\n self.interpreter.grace.write(x_data_type='res_num', y_data_type='s2f', file='s2f.agr', dir=dir, force=True)\n self.interpreter.grace.write(x_data_type='res_num', y_data_type='s2s', file='s2s.agr', dir=dir, force=True)\n self.interpreter.grace.write(x_data_type='res_num', y_data_type='te', file='te.agr', dir=dir, force=True)\n self.interpreter.grace.write(x_data_type='res_num', y_data_type='tf', file='tf.agr', dir=dir, force=True)\n self.interpreter.grace.write(x_data_type='res_num', y_data_type='ts', file='ts.agr', dir=dir, force=True)\n self.interpreter.grace.write(x_data_type='res_num', y_data_type='rex', file='rex.agr', dir=dir, force=True)\n self.interpreter.grace.write(x_data_type='s2', y_data_type='te', file='s2_vs_te.agr', dir=dir, force=True)\n self.interpreter.grace.write(x_data_type='s2', y_data_type='rex', file='s2_vs_rex.agr', dir=dir, force=True)\n self.interpreter.grace.write(x_data_type='te', y_data_type='rex', file='te_vs_rex.agr', dir=dir, force=True)\n\n # Write the values to text files.\n dir = self.write_results_dir + 'final'\n self.interpreter.value.write(param='s2', file='s2.txt', dir=dir, force=True)\n self.interpreter.value.write(param='s2f', file='s2f.txt', dir=dir, force=True)\n self.interpreter.value.write(param='s2s', file='s2s.txt', dir=dir, force=True)\n self.interpreter.value.write(param='te', file='te.txt', dir=dir, force=True)\n self.interpreter.value.write(param='tf', file='tf.txt', dir=dir, force=True)\n self.interpreter.value.write(param='ts', file='ts.txt', dir=dir, force=True)\n self.interpreter.value.write(param='rex', file='rex.txt', dir=dir, force=True)\n self.interpreter.value.write(param='local_tm', file='local_tm.txt', dir=dir, force=True)\n frqs = get_frequencies()\n for i in range(len(frqs)):\n comment = \"This is the Rex value with units rad.s^-1 scaled to a magnetic field strength of %s MHz.\" % (frqs[i]/1e6)\n self.interpreter.value.write(param='rex', file='rex_%s.txt'%int(frqs[i]/1e6), dir=dir, scaling=(2.0*pi*frqs[i])**2, comment=comment, force=True)\n\n # Create the PyMOL macros.\n dir = self.write_results_dir + 'final' + sep + 'pymol'\n self.interpreter.pymol.macro_write(data_type='s2', dir=dir, force=True)\n self.interpreter.pymol.macro_write(data_type='s2f', dir=dir, force=True)\n self.interpreter.pymol.macro_write(data_type='s2s', dir=dir, force=True)\n self.interpreter.pymol.macro_write(data_type='amp_fast', dir=dir, force=True)\n self.interpreter.pymol.macro_write(data_type='amp_slow', dir=dir, force=True)\n self.interpreter.pymol.macro_write(data_type='te', dir=dir, force=True)\n self.interpreter.pymol.macro_write(data_type='tf', dir=dir, force=True)\n self.interpreter.pymol.macro_write(data_type='ts', dir=dir, force=True)\n self.interpreter.pymol.macro_write(data_type='time_fast', dir=dir, force=True)\n self.interpreter.pymol.macro_write(data_type='time_slow', dir=dir, force=True)\n self.interpreter.pymol.macro_write(data_type='rex', dir=dir, force=True)\n\n # Create the Molmol macros.\n dir = self.write_results_dir + 'final' + sep + 'molmol'\n self.interpreter.molmol.macro_write(data_type='s2', dir=dir, force=True)\n self.interpreter.molmol.macro_write(data_type='s2f', dir=dir, force=True)\n self.interpreter.molmol.macro_write(data_type='s2s', dir=dir, force=True)\n self.interpreter.molmol.macro_write(data_type='amp_fast', dir=dir, force=True)\n self.interpreter.molmol.macro_write(data_type='amp_slow', dir=dir, force=True)\n self.interpreter.molmol.macro_write(data_type='te', dir=dir, force=True)\n self.interpreter.molmol.macro_write(data_type='tf', dir=dir, force=True)\n self.interpreter.molmol.macro_write(data_type='ts', dir=dir, force=True)\n self.interpreter.molmol.macro_write(data_type='time_fast', dir=dir, force=True)\n self.interpreter.molmol.macro_write(data_type='time_slow', dir=dir, force=True)\n self.interpreter.molmol.macro_write(data_type='rex', dir=dir, force=True)\n\n # Create a diffusion tensor representation of the tensor, if a PDB file is present and the local tm global model has not been selected.\n if hasattr(cdp, 'structure') and hasattr(cdp, 'diff_tensor'):\n dir = self.write_results_dir + 'final'\n self.interpreter.structure.create_diff_tensor_pdb(file=\"tensor.pdb\", dir=dir, force=True)", "title": "" }, { "docid": "7342431d7f3b0be140159ce157a2f83e", "score": "0.606516", "text": "def _save_results(\n self,\n gt_rgb: torch.Tensor,\n pred_rgb: torch.Tensor,\n gt_seg: torch.Tensor,\n pred_seg: torch.Tensor,\n gt_depth: torch.Tensor,\n pred_depth: torch.Tensor,\n path: str,\n ) -> None:\n\n save_rgb_results(gt_rgb[0], pred_rgb[0], path)\n save_seg_results(gt_seg[0], pred_seg[0], path)\n save_depth_results(gt_depth[0], pred_depth[0], path)", "title": "" }, { "docid": "40e3b06e906673d4e9073b370e0cc240", "score": "0.6060154", "text": "def save_results(self):\n if self.directory is None:\n return\n tmc_dir = os.path.join(\n self.directory, \n 'mem_tmc_{}.pkl'.format(self.tmc_number.zfill(4))\n )\n\n raw_list = []\n for j in range(self.mem_tmc.shape[0]):\n row_dict = { self.idxs_tmc[j][i] : self.mem_tmc[j][i] for i in range(self.mem_tmc.shape[1]) }\n raw_list.append(row_dict)\n\n pkl.dump(raw_list, open(tmc_dir, 'wb'))\n #pkl.dump({'mem_tmc': self.mem_tmc, 'idxs_tmc': self.idxs_tmc}, \n # open(tmc_dir, 'wb'))", "title": "" }, { "docid": "06d278f67ccedad293cdd7fbca4f047a", "score": "0.6058251", "text": "def save_csv_results(self):\n # Train\n self.train_results.to_csv(f'output_{self.data_name}\\\\winnow_{self.data_name}_train_results.csv')\n\n # Test\n self.test_results.to_csv(f'output_{self.data_name}\\\\winnow_{self.data_name}_test_results.csv')", "title": "" }, { "docid": "cb9100981af5dcc2cf06a970942eb82f", "score": "0.60476255", "text": "def _save_numpy(self, iter_):\n key = self._get_numpy_count()\n\n coords_to_export = self.simulated_coords[self._npy_starting_index:iter_]\n coords_to_export = self._swap_and_export(coords_to_export)\n np.save(\"{}_coords_{}.npy\".format(\n self.filename, key), coords_to_export)\n\n if self.save_forces:\n forces_to_export = self.simulated_forces[self._npy_starting_index:iter_]\n forces_to_export = self._swap_and_export(forces_to_export)\n np.save(\"{}_forces_{}.npy\".format(\n self.filename, key), forces_to_export)\n\n if self.save_potential:\n potentials_to_export = self.simulated_potential[self._npy_starting_index:iter_]\n potentials_to_export = self._swap_and_export(potentials_to_export)\n np.save(\"{}_potential_{}.npy\".format(\n self.filename, key), potentials_to_export)\n\n if self.friction is not None:\n kinetic_energies_to_export = self.kinetic_energies[self._npy_starting_index:iter_]\n kinetic_energies_to_export = self._swap_and_export(\n kinetic_energies_to_export)\n np.save(\"{}_kineticenergy_{}.npy\".format(self.filename, key),\n kinetic_energies_to_export)\n\n self._npy_starting_index = iter_\n self._npy_file_index += 1", "title": "" }, { "docid": "ae6c6650f5d1027b88b9a29784d7207e", "score": "0.6026936", "text": "def Save_Status(self):\r\n \r\n np.save(self.Folder + \"\\\\Step_%i.npy\"%self.step, {'u':self.u,\r\n 'RHS':self.RHS,\r\n 'time':self.t})", "title": "" }, { "docid": "6c96bbc53038e293fc458faa34457591", "score": "0.6019201", "text": "def writeSave(self):\n\n #store required data in savegame\n self.savegame.currentMap = self.player.map.fn\n self.savegame.currentPosition = self.player.position\n self.savegame.currentLevel = self.player.level\n self.savegame.currentDirection = self.player.direction\n self.savegame.party = self.player.party\n\n #open the file for writing, dump the save game, and close\n try:\n f = open(self.savegame.fn, \"w\")\n pickle.dump(self.savegame, f)\n f.close()\n except IOError:\n raise error.DittoIOException(\"Ditto main\", fn)\n\n print \"Game saved to \" + self.savegame.fn", "title": "" }, { "docid": "aacb8565baf441a45deccc33cf0ded53", "score": "0.6015083", "text": "def _save_time_step(self):\n for var_name, value_pointer in self._save_vars.items():\n self.results[var_name].append(\n operator.attrgetter(value_pointer)(self)\n )", "title": "" }, { "docid": "aacb8565baf441a45deccc33cf0ded53", "score": "0.6015083", "text": "def _save_time_step(self):\n for var_name, value_pointer in self._save_vars.items():\n self.results[var_name].append(\n operator.attrgetter(value_pointer)(self)\n )", "title": "" }, { "docid": "16c0762df48b4937dc130e7a2acdca36", "score": "0.60026264", "text": "def writeObservations(self):\n # Set model output arrays to None to initialise\n head = None\n sfr_df = None\n stream_options = ['stage', 'depth', 'discharge']\n # Write observation to file\n obs_group = self.model_data.observations.obs_group\n for obs_set in obs_group:\n obs_type = obs_group[obs_set]['obs_type']\n # Import the required model outputs for processing\n if obs_type == 'head':\n # Check if model outputs have already been imported and if not import\n if not head:\n headobj = self.import_heads()\n head = headobj.get_alldata()\n elif obs_type in stream_options:\n try:\n self.sfr_df\n except Exception:\n sfr_df = self.importSfrOut()\n # End except\n else:\n continue\n # End if\n\n group_set = obs_group[obs_set]\n obs_df = self.get_active_obs_group(group_set)\n sim_map_dict = group_set['mapped_observations']\n\n unique_obs_df_zone = obs_df['zone'].unique()\n if obs_type in stream_options:\n sfr_location = group_set['locations']['seg_loc']\n for zone in unique_obs_df_zone:\n zone_txt = obs_set if len(unique_obs_df_zone) == 1 else obs_set + zone\n with open(os.path.join(self.data_folder, 'observations_' + zone_txt + '.txt'), 'w') as f:\n obs_df_zone = obs_df[obs_df['zone'] == zone]\n for observation in obs_df_zone.index:\n interval = int(obs_df_zone['interval'].loc[observation])\n name = obs_df_zone['name'].loc[observation]\n seg = sfr_location.loc[name]\n sfr = sfr_df\n col_of_interest = obs_type\n\n if obs_type == 'discharge':\n col_of_interest = 'Qout'\n\n sim_obs = sfr[(sfr['segment'] == seg) & (sfr['time'] == interval)][col_of_interest]\n f.write('%f\\n' % sim_obs)\n # End for\n # End with\n # End for\n elif obs_type == 'head':\n for zone in unique_obs_df_zone:\n zone_txt = 'head' if len(unique_obs_df_zone) == 1 else zone\n with open(os.path.join(self.data_folder, 'observations_' + zone_txt + '.txt'), 'w') as f:\n obs_df_zone = obs_df[obs_df['zone'] == zone]\n for observation in obs_df_zone.index:\n interval = int(obs_df_zone['interval'].loc[observation])\n name = obs_df_zone['name'].loc[observation]\n name_dict = sim_map_dict[name]\n (x_cell, y_cell) = self.model_data.mesh2centroid2Dindex[(name_dict[1], name_dict[2])]\n (lay, row, col) = [name_dict[0], name_dict[1], name_dict[2]]\n\n sim_heads = [head[interval][lay][row][col]]\n sim_head = np.mean(sim_heads)\n f.write('%f\\n' % sim_head)\n # End for\n # End with\n # End for\n else:\n print(\"Unknown observation type!\")\n # End if\n # End for", "title": "" }, { "docid": "854c25e3ed3279c2709e53f86c6ca0f1", "score": "0.60018647", "text": "def save_data(self):\n J1 = self.get_joints_data(self.actuation1, self.actuation1_direction, self.robot.J1)\n J2 = self.get_joints_data(self.actuation2, self.actuation1_direction, self.robot.J2)\n J3 = self.get_joints_data(self.actuation2, self.actuation1_direction, self.robot.J3)\n J4 = self.get_joints_data(self.actuation1, self.actuation1_direction, self.robot.J4)\n\n x, y, z = Utils.list_coord2list(self.robot.position)\n\n robot = pd.DataFrame(\n np.array([\n self.actuation1,\n self.actuation1_direction,\n self.actuation2,\n self.actuation2_direction,\n x, y, z,\n np.array(self.robot.angle)[:, 0],\n np.array(self.robot.angle)[:, 1],\n np.array(self.robot.angle)[:, 2]\n ]).T,\n columns=[\n 'u1', 'u1_dir',\n 'u2', 'u2_dir',\n 'x', 'y', 'z',\n 'pitch', 'roll', 'yaw'\n ]\n )\n\n self.data = {}\n self.data['J1'] = J1\n self.data['J2'] = J2\n self.data['J3'] = J3\n self.data['J4'] = J4\n self.data['robot'] = robot\n self.data = pd.concat(self.data, axis=1)\n\n self.data.to_csv('{0}/results/{1}{2}{3}{4}.csv'.format(\n Path(__file__).resolve().parent,\n self.robot.J1.sequence,\n self.robot.J2.sequence,\n self.robot.J3.sequence,\n self.robot.J4.sequence\n ))\n self.data.to_pickle('{0}/results/{1}{2}{3}{4}.pkl'.format(\n Path(__file__).resolve().parent,\n self.robot.J1.sequence,\n self.robot.J2.sequence,\n self.robot.J3.sequence,\n self.robot.J4.sequence\n ))", "title": "" }, { "docid": "d45d3b29edd2e78a209096b41aeb62bd", "score": "0.5997252", "text": "def save_state(self, save_dir):\n self.replay_buffer.save(os.path.join(save_dir, 'replay'))", "title": "" }, { "docid": "812d40b69e0e78251b785e46c6b9a1ef", "score": "0.5993922", "text": "def saveTrajectory(self):\n filepath = 'output/'\n if path.exists(filepath):\n filename = 'traj.npy'\n filepath += filename \n np.save(filepath, self.camera_traj)\n print(\"Trajectory saved to: \" + filepath)\n else:\n print(\"Writing path does not exist!\")", "title": "" }, { "docid": "b4677fc5e86dee407191b50e0b61ffc7", "score": "0.5983296", "text": "def save_result(self):\n if self.episode_number == 1 or not self.do_evaluation_iterations:\n self.game_full_episode_scores.extend([self.total_episode_score_so_far])\n self.rolling_results.append(np.mean(self.game_full_episode_scores[-1 * self.rolling_score_window:]))\n self.save_max_result_seen()\n\n elif (self.episode_number - 1) % TRAINING_EPISODES_PER_EVAL_EPISODE == 0:\n self.game_full_episode_scores.extend(\n [self.total_episode_score_so_far for _ in range(TRAINING_EPISODES_PER_EVAL_EPISODE)])\n self.rolling_results.extend(\n [np.mean(self.game_full_episode_scores[-1 * self.rolling_score_window:]) for _ in\n range(TRAINING_EPISODES_PER_EVAL_EPISODE)])\n self.save_max_result_seen()", "title": "" }, { "docid": "0427888967e41816b3c10688ce67047e", "score": "0.5981149", "text": "def _save_trials(self, trials_log):\n trials_object = (self._hpopt_trials, self._evaluated_params)\n object_dump(trials_object, trials_log)\n tpe_trials_results = pd.DataFrame(self._hpopt_trials.results)\n csv_file = trials_log + '.csv'\n tpe_trials_results.to_csv(csv_file)\n tpe_trials_results = pd.DataFrame(self._evaluated_params)\n csv_file = trials_log + '.params.csv'\n tpe_trials_results.to_csv(csv_file)", "title": "" }, { "docid": "f28e358f6abbc05fa568ff28ae8aff20", "score": "0.5969702", "text": "def commit(self,deleteOutput=True,restore=False):\n if self.DEBUG:\n self.say.message_debug(self.CLASS_NAME+\"::commit()\")\n\n if not os.path.exists(self.directory):\n if restore:\n self.say.error(\"Set to restore, but simulation does not exist!\")\n exit(0)\n else:\n self.say.message(\"Simulation directory doesn't exist! So creating it\")\n os.makedirs(self.directory)\n self.project.writeToHistory(self.name+' created')\n\n if restore and deleteOutput:\n self.say.warning(\"You really don't want to delete output when\"\n \"restoring! So we changed it\")\n deleteOutput = False\n\n # We use .copy2 because we also want to copy permissions\n # Copy executable\n if self.compiled:\n self.copyBinary()\n\n # Update configuration files\n tools.inputWrite(self.inputDicts[0], self.directory+'/input')\n c_ = 1\n # Md config files\n if os.path.isfile(self.inputFile+'.md'):\n tools.inputWrite(self.inputDicts[c_], self.directory+'/input')\n #shutil.copy2(self.inputFile+'.md', self.directory)\n if self.template is not '':\n mdfile = self.template+\"/\"+self.inputDicts[c_]['init_file'][1:-1]\n shutil.copy2(mdfile, self.directory)\n c_ += 1\n # Elec config files\n if os.path.isfile(self.inputFile+'.elec'):\n tools.inputWrite(self.inputDicts[c_], self.directory+'/input')\n #shutil.copy2(self.inputFile+'.elec', self.directory)\n c_ += 1\n self.project.writeToHistory(\"Configuration files were written\",sim=self.name)\n\n self.project.writeToHistory(\"Simulation commited\",sim=self.name)\n\n # Create output directory\n # If it exists, do we delete the files?\n outputdir = self.inputDicts[0]['folder'][1:-1]\n fd = self.directory+\"/\"+outputdir\n if os.path.exists(fd):\n if deleteOutput:\n if len(outputdir) > 1:\n shutil.rmtree(fd)\n self.project.writeToHistory(\"Output dir was removed!\",sim=self.name)\n os.makedirs(fd)\n else:\n if not restore:\n self.say.warning(\"Output dir existed, we are adding files there\")\n else:\n os.makedirs(fd)", "title": "" }, { "docid": "6b850c6e99059e2a8fc69392fee25eec", "score": "0.5968843", "text": "def _do_outputs(self):", "title": "" }, { "docid": "878c2c1a03855c7544b274321258a64a", "score": "0.59677136", "text": "def Outputs(self):\n return self.__outputs", "title": "" }, { "docid": "18186eef48524b68affb826d3dd359df", "score": "0.5965068", "text": "def write_model(self):\n\n model_file_name = self.ml_service + '_model_' + self.model_name + '_' + self.model_version_to_train + '.pkl'\n model_full_file_name = os.path.join(self.finder.get_output_folder(), model_file_name)\n model_objects_list = [self.trained_model, self.scaler_X, self.scaler_Y, self.model_config.j_config]\n os.makedirs(self.finder.get_output_folder(), exist_ok=True)\n joblib.dump(value=model_objects_list, filename=model_full_file_name)\n\n #if self.run_type == 'run_on_azure':\n # Also save to outputs folder for auto save in Azure\n os.makedirs('outputs', exist_ok=True)\n # note file saved in the outputs folder is automatically uploaded into experiment record\n joblib.dump(value=model_objects_list, filename=os.path.join('outputs', model_file_name))\n print('\\nwriting to outputs: ' + model_file_name)", "title": "" }, { "docid": "ac69c2f975364f08ce373f1992eb5bc4", "score": "0.5952777", "text": "def save_outputs(ML_pipe, output_dir):\n stack, conjunction = add_conjunction(ML_pipe.feature_importances)\n conjunction_results = {'Features': conjunction, 'Raw': stack}\n save_xls(\n conjunction_results, os.path.join(output_dir, 'features.xlsx'))\n\n save_xls(\n ML_pipe.predictions, os.path.join(output_dir, 'predictions.xlsx'))\n\n ML_pipe.model_performance.to_excel(\n os.path.join(output_dir, 'performance.xlsx'))\n\n try:\n ML_pipe.feature_selection_gridsearch_results.to_excel(\n os.path.join(output_dir, 'feature_selection_gridsearch.xlsx'))\n ML_pipe.regression_gridsearch_results.to_excel(\n os.path.join(output_dir, 'regression_gridsearch.xlsx'))\n except AttributeError:\n pass", "title": "" }, { "docid": "f6b78ff0749c5bcec55b4e065dfd6c59", "score": "0.59369737", "text": "def save(self):\n torch.save(self.policy.state_dict(), str(config.fc1_units)+'_'+str(config.fc2_units) + '_model.pth')", "title": "" }, { "docid": "7995c4f61d8852492e939be1507e0252", "score": "0.59348357", "text": "def save_result(self):\n for ep in range(len(self.many_episode_rewards)):\n total_reward = np.sum(self.many_episode_rewards[ep])\n self.game_full_episode_scores.append(total_reward)\n self.rolling_results.append(np.mean(self.game_full_episode_scores[-1 * self.rolling_score_window:]))\n self.save_max_result_seen()", "title": "" }, { "docid": "17ef846eaeb33dfa7d0e2863512de8dd", "score": "0.59301054", "text": "def output_results(self):\n rg = HtmlReportGenerator(self.args)\n add_plots(self, rg) # change add_plots to show different plots!\n rg.variables.update(analyse(self))\n rg.generate()\n rg.save()\n print(\"Report saved to {0}\".format(self.args[\"filename_out\"]))", "title": "" }, { "docid": "4bdd0fab4a153d32b5287139073554a8", "score": "0.5923582", "text": "def save_state_to_disk_and_return(self):\n\n backtest_state = {\n \"timestamp\": time.time(),\n \"settings\": {\n \"start\": self.start,\n \"end\": self.end,\n \"output_path\": self.output_path\n },\n \"perf\": self.perf,\n \"stats\": self.stats,\n \"portfolio\": {\n \"costs\": self.portfolio.costs,\n \"received\": self.portfolio.received,\n \"portfolio_value\": self.portfolio.portfolio_value,\n \"order_history\": self.portfolio.order_history_to_df(),\n \"signals\": self.portfolio.signals_to_df(),\n \"monthly_returns\": self.portfolio.get_monthly_returns(), # use to make histogram and return per time\n },\n \"broker\": {\n \"blotter_history\": self.broker.blotter_history_to_df(),\n \"all_trades\": self.broker.all_trades_to_df(),\n \"trade_objects\": self.broker.all_trades_as_objects(),\n \"cancelled_orders\": self.broker.cancelled_orders_to_df()\n }\n }\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n pickle_path = self.output_path + \"/backtest_state_\" + timestamp + \".pickle\"\n\n pickle_out = open(pickle_path,\"wb\")\n pickle.dump(backtest_state, pickle_out)\n pickle_out.close()\n\n return backtest_state", "title": "" }, { "docid": "fe4c152676b32ac078a016b917ad0dc8", "score": "0.59220284", "text": "def test_sim(write):\n with Simulation(write,'r') as sim:\n for step in sim:\n pass\n system_routing = SystemStats(sim)\n sim.report()\n print(system_routing.runoff_stats)", "title": "" }, { "docid": "4529020862521743503dbc3002c30fca", "score": "0.5921215", "text": "def save_current_state(self, save_dir=None, **kwargs):\n super().save_current_state(save_dir, **kwargs)\n if self._save_rlr_every and self.istep % self._save_rlr_every == 0:\n thread_prefix = f'thread{self._thread:02d}_' if self._thread is not None else ''\n fname = f'{thread_prefix}gen{self._istep:03d}_search_radius'\n np.save(os.path.join(self._logdir, fname), self._r)", "title": "" }, { "docid": "e5be7eccdc3bbc2202c53a3f028105b4", "score": "0.59203136", "text": "def save_result(self):\n self.game_full_episode_scores.append(self.total_episode_score_so_far)\n self.rolling_results.append(np.mean(self.game_full_episode_scores[-100:]))", "title": "" }, { "docid": "58a964450ee97c30bb44bc705935a429", "score": "0.5919577", "text": "def save_summary_to_file(self):\n\n # Save a csv file with the power measurements done in all the tests\n\n path = os.path.join(self.log_path, self.RESULTS_SUMMARY_FILENAME)\n\n with open(path, 'w') as csvfile:\n csvfile.write('test,avg_power')\n for test_name, value in self.power_results.items():\n csvfile.write('\\n{},{}'.format(test_name, value))\n\n # Save a csv file with the calibration table for each simulation type\n\n for sim_type in self.calibration_table:\n\n path = os.path.join(\n self.log_path, '{}_{}'.format(sim_type,\n self.CALIBRATION_TABLE_FILENAME))\n\n with open(path, 'w') as csvfile:\n csvfile.write('band,dl_pathloss, ul_pathloss')\n for band, pathloss in self.calibration_table[sim_type].items():\n csvfile.write('\\n{},{},{}'.format(\n band, pathloss.get('dl', 'Error'),\n pathloss.get('ul', 'Error')))", "title": "" }, { "docid": "2577e2e5a7d07c9b05b705f15881cab2", "score": "0.5918313", "text": "def save(self, step):\n os.chdir(self.load_dir)\n self.saver.save(self.session, self.save_dir, step+self.global_step)\n np.savetxt('iteration.dat', np.array([step+self.global_step]))", "title": "" }, { "docid": "1ba118d91fdfa28d9dcf8976c94f7722", "score": "0.59144294", "text": "def outputs(self):\r\n if self.state is None:\r\n raise RuntimeError(\"Method add_to was not called.\")\r\n return self.state.outputs", "title": "" }, { "docid": "2fd0604adee2b2a8c0fd6c37d370cb0a", "score": "0.5906942", "text": "def save(self, fileName: str) -> NoReturn:\n A = []\n B = []\n pi = []\n for i in range(self.statesNumber()):\n a = []\n for j in range(self.statesNumber()):\n a.append(self.getTransition(i, j))\n A.append(a)\n b = []\n for sym in range(self.symbolsNumber()):\n b.append(self.getEmission(i, sym))\n B.append(b)\n pi.append(self.getInitial(i))\n with open(fileName, \"w\") as f:\n json.dump({\"A\": A, \"B\": B, \"pi\": pi,\n \"symbols\": self.getSymbols()}, f)", "title": "" }, { "docid": "201fef74f56f7b4ed860c3a13d78b922", "score": "0.5896817", "text": "def save(self, out_path, out_name):\n def write_array(path, X):\n np.savetxt(path, X, fmt='%s')\n\n if not os.path.isdir(out_path):\n os.makedirs(out_path)\n\n write_array(\n os.path.join(out_path, out_name + '.data'),\n self.get_data('X'))\n write_array(\n os.path.join(out_path, out_name + '_feat.name'), \n self.feat_name)\n\n if 'y' in self.data:\n write_array(\n os.path.join(out_path, out_name + '.solution'),\n self.get_data('y'))\n\n if 'X_train' and 'X_test' in self.data:\n write_array(\n os.path.join(out_path, out_name + '_train.data'),\n self.get_data('X_train'))\n write_array(\n os.path.join(out_path, out_name + '_test.data'),\n self.get_data('X_test'))\n if 'y_train' and 'y_test' in self.data:\n write_array(\n os.path.join(out_path, out_name + '_test.solution'),\n self.get_data('y_train'))\n write_array(\n os.path.join(out_path, out_name + '_test.solution'),\n self.get_data('y_test'))\n write_array(\n os.path.join(out_path, out_name + '_label.name'),\n self.label_name)\n\n with open(os.path.join(out_path, out_name + '_public.info'), 'w') as f:\n for key, item in self.info.items():\n f.write(str(key))\n f.write(' = ')\n f.write(str(item))\n f.write('\\n')", "title": "" }, { "docid": "a70a5f0e1661986cdc6f6a1ce9aea3fc", "score": "0.58953255", "text": "def saving(self):\n print(\"Saving....\")", "title": "" }, { "docid": "fea2878be04bc139df4ec9d82094a205", "score": "0.5893891", "text": "def save(self, filename):\n save_dict = {'init_dict': self.init_dict,\n 'agent_params': [a.get_params() for a in self.agents]}\n torch.save(save_dict, filename)", "title": "" }, { "docid": "c5016fd7fef742480ca37f102482fc98", "score": "0.58869094", "text": "def outputs(self):\n return super().outputs", "title": "" } ]